]> git.ipfire.org Git - thirdparty/grsecurity-scrape.git/blame - test/grsecurity-2.9-2.6.32.59-201203212033.patch
Auto commit, 1 new patch{es}.
[thirdparty/grsecurity-scrape.git] / test / grsecurity-2.9-2.6.32.59-201203212033.patch
CommitLineData
6f12eece
PK
1diff --git a/Documentation/dontdiff b/Documentation/dontdiff
2index e1efc40..4e87324 100644
3--- a/Documentation/dontdiff
4+++ b/Documentation/dontdiff
5@@ -1,15 +1,20 @@
6 *.a
7 *.aux
8 *.bin
9+*.c.[012].*
10+*.cis
11 *.cpio
12 *.csp
13+*.dbg
14 *.dsp
15 *.dvi
16 *.elf
17 *.eps
18 *.fw
19+*.gcno
20 *.gen.S
21 *.gif
22+*.gmo
23 *.grep
24 *.grp
25 *.gz
26@@ -38,8 +43,10 @@
27 *.tab.h
28 *.tex
29 *.ver
30+*.vim
31 *.xml
32 *_MODULES
33+*_reg_safe.h
34 *_vga16.c
35 *~
36 *.9
37@@ -49,11 +56,16 @@
38 53c700_d.h
39 CVS
40 ChangeSet
41+GPATH
42+GRTAGS
43+GSYMS
44+GTAGS
45 Image
46 Kerntypes
47 Module.markers
48 Module.symvers
49 PENDING
50+PERF*
51 SCCS
52 System.map*
53 TAGS
54@@ -76,7 +88,11 @@ btfixupprep
55 build
56 bvmlinux
57 bzImage*
58+capability_names.h
59+capflags.c
60 classlist.h*
61+clut_vga16.c
62+common-cmds.h
63 comp*.log
64 compile.h*
65 conf
66@@ -84,6 +100,8 @@ config
67 config-*
68 config_data.h*
69 config_data.gz*
70+config.c
71+config.tmp
72 conmakehash
73 consolemap_deftbl.c*
74 cpustr.h
75@@ -97,19 +115,23 @@ elfconfig.h*
76 fixdep
77 fore200e_mkfirm
78 fore200e_pca_fw.c*
79+gate.lds
80 gconf
81 gen-devlist
82 gen_crc32table
83 gen_init_cpio
84 genksyms
85 *_gray256.c
86+hash
87+hid-example
88 ihex2fw
89 ikconfig.h*
90 initramfs_data.cpio
91+initramfs_data.cpio.bz2
92 initramfs_data.cpio.gz
93 initramfs_list
94 kallsyms
95-kconfig
96+kern_constants.h
97 keywords.c
98 ksym.c*
99 ksym.h*
100@@ -117,6 +139,7 @@ kxgettext
101 lkc_defs.h
102 lex.c
103 lex.*.c
104+lib1funcs.S
105 logo_*.c
106 logo_*_clut224.c
107 logo_*_mono.c
108@@ -127,13 +150,16 @@ machtypes.h
109 map
110 maui_boot.h
111 mconf
112+mdp
113 miboot*
114 mk_elfconfig
115 mkboot
116 mkbugboot
117 mkcpustr
118 mkdep
119+mkpiggy
120 mkprep
121+mkregtable
122 mktables
123 mktree
124 modpost
125@@ -149,6 +175,7 @@ patches*
126 pca200e.bin
127 pca200e_ecd.bin2
128 piggy.gz
129+piggy.S
130 piggyback
131 pnmtologo
132 ppc_defs.h*
133@@ -157,12 +184,15 @@ qconf
134 raid6altivec*.c
135 raid6int*.c
136 raid6tables.c
137+regdb.c
138 relocs
139+rlim_names.h
140 series
141 setup
142 setup.bin
143 setup.elf
144 sImage
145+slabinfo
146 sm_tbl*
147 split-include
148 syscalltab.h
149@@ -171,6 +201,7 @@ tftpboot.img
150 timeconst.h
151 times.h*
152 trix_boot.h
153+user_constants.h
154 utsrelease.h*
155 vdso-syms.lds
156 vdso.lds
157@@ -186,14 +217,20 @@ version.h*
158 vmlinux
159 vmlinux-*
160 vmlinux.aout
161+vmlinux.bin.all
162+vmlinux.bin.bz2
163 vmlinux.lds
164+vmlinux.relocs
165+voffset.h
166 vsyscall.lds
167 vsyscall_32.lds
168 wanxlfw.inc
169 uImage
170 unifdef
171+utsrelease.h
172 wakeup.bin
173 wakeup.elf
174 wakeup.lds
175 zImage*
176 zconf.hash.c
177+zoffset.h
178diff --git a/Documentation/kernel-parameters.txt b/Documentation/kernel-parameters.txt
179index c840e7d..f4c451c 100644
180--- a/Documentation/kernel-parameters.txt
181+++ b/Documentation/kernel-parameters.txt
182@@ -1837,6 +1837,13 @@ and is between 256 and 4096 characters. It is defined in the file
183 the specified number of seconds. This is to be used if
184 your oopses keep scrolling off the screen.
185
186+ pax_nouderef [X86] disables UDEREF. Most likely needed under certain
187+ virtualization environments that don't cope well with the
188+ expand down segment used by UDEREF on X86-32 or the frequent
189+ page table updates on X86-64.
190+
191+ pax_softmode= 0/1 to disable/enable PaX softmode on boot already.
192+
193 pcbit= [HW,ISDN]
194
195 pcd. [PARIDE]
196diff --git a/MAINTAINERS b/MAINTAINERS
197index 613da5d..4fe3eda 100644
198--- a/MAINTAINERS
199+++ b/MAINTAINERS
200@@ -5725,6 +5725,14 @@ L: netdev@vger.kernel.org
201 S: Maintained
202 F: drivers/net/vmxnet3/
203
204+VMware PVSCSI driver
205+M: Alok Kataria <akataria@vmware.com>
206+M: VMware PV-Drivers <pv-drivers@vmware.com>
207+L: linux-scsi@vger.kernel.org
208+S: Maintained
209+F: drivers/scsi/vmw_pvscsi.c
210+F: drivers/scsi/vmw_pvscsi.h
211+
212 VOLTAGE AND CURRENT REGULATOR FRAMEWORK
213 M: Liam Girdwood <lrg@slimlogic.co.uk>
214 M: Mark Brown <broonie@opensource.wolfsonmicro.com>
215diff --git a/Makefile b/Makefile
216index 3a9a721..e5a22f7 100644
217--- a/Makefile
218+++ b/Makefile
219@@ -221,8 +221,9 @@ CONFIG_SHELL := $(shell if [ -x "$$BASH" ]; then echo $$BASH; \
220
221 HOSTCC = gcc
222 HOSTCXX = g++
223-HOSTCFLAGS = -Wall -Wmissing-prototypes -Wstrict-prototypes -O2 -fomit-frame-pointer
224-HOSTCXXFLAGS = -O2
225+HOSTCFLAGS = -Wall -W -Wmissing-prototypes -Wstrict-prototypes -Wno-unused-parameter -Wno-missing-field-initializers -O2 -fomit-frame-pointer -fno-delete-null-pointer-checks
226+HOSTCFLAGS += $(call cc-option, -Wno-empty-body)
227+HOSTCXXFLAGS = -O2 -Wall -W -fno-delete-null-pointer-checks
228
229 # Decide whether to build built-in, modular, or both.
230 # Normally, just do built-in.
231@@ -376,8 +377,8 @@ export RCS_TAR_IGNORE := --exclude SCCS --exclude BitKeeper --exclude .svn --exc
232 # Rules shared between *config targets and build targets
233
234 # Basic helpers built in scripts/
235-PHONY += scripts_basic
236-scripts_basic:
237+PHONY += scripts_basic gcc-plugins
238+scripts_basic: gcc-plugins
239 $(Q)$(MAKE) $(build)=scripts/basic
240
241 # To avoid any implicit rule to kick in, define an empty command.
242@@ -403,7 +404,7 @@ endif
243 # of make so .config is not included in this case either (for *config).
244
245 no-dot-config-targets := clean mrproper distclean \
246- cscope TAGS tags help %docs check% \
247+ cscope gtags TAGS tags help %docs check% \
248 include/linux/version.h headers_% \
249 kernelrelease kernelversion
250
251@@ -526,6 +527,53 @@ else
252 KBUILD_CFLAGS += -O2
253 endif
254
255+ifndef DISABLE_PAX_PLUGINS
256+ifeq ($(shell $(CONFIG_SHELL) $(srctree)/scripts/gcc-plugin.sh "$(HOSTCC)" "$(CC)"), y)
257+ifndef DISABLE_PAX_CONSTIFY_PLUGIN
258+CONSTIFY_PLUGIN_CFLAGS := -fplugin=$(objtree)/tools/gcc/constify_plugin.so -DCONSTIFY_PLUGIN
259+endif
260+ifdef CONFIG_PAX_MEMORY_STACKLEAK
261+STACKLEAK_PLUGIN_CFLAGS := -fplugin=$(objtree)/tools/gcc/stackleak_plugin.so -DSTACKLEAK_PLUGIN
262+STACKLEAK_PLUGIN_CFLAGS += -fplugin-arg-stackleak_plugin-track-lowest-sp=100
263+endif
264+ifdef CONFIG_KALLOCSTAT_PLUGIN
265+KALLOCSTAT_PLUGIN_CFLAGS := -fplugin=$(objtree)/tools/gcc/kallocstat_plugin.so
266+endif
267+ifdef CONFIG_PAX_KERNEXEC_PLUGIN
268+KERNEXEC_PLUGIN_CFLAGS := -fplugin=$(objtree)/tools/gcc/kernexec_plugin.so
269+KERNEXEC_PLUGIN_CFLAGS += -fplugin-arg-kernexec_plugin-method=$(CONFIG_PAX_KERNEXEC_PLUGIN_METHOD) -DKERNEXEC_PLUGIN
270+KERNEXEC_PLUGIN_AFLAGS := -DKERNEXEC_PLUGIN
271+endif
272+ifdef CONFIG_CHECKER_PLUGIN
273+ifeq ($(call cc-ifversion, -ge, 0406, y), y)
274+CHECKER_PLUGIN_CFLAGS := -fplugin=$(objtree)/tools/gcc/checker_plugin.so -DCHECKER_PLUGIN
275+endif
276+endif
277+COLORIZE_PLUGIN_CFLAGS := -fplugin=$(objtree)/tools/gcc/colorize_plugin.so
278+ifdef CONFIG_PAX_SIZE_OVERFLOW
279+SIZE_OVERFLOW_PLUGIN_CFLAGS := -fplugin=$(objtree)/tools/gcc/size_overflow_plugin.so -DSIZE_OVERFLOW_PLUGIN
280+endif
281+GCC_PLUGINS_CFLAGS := $(CONSTIFY_PLUGIN_CFLAGS) $(STACKLEAK_PLUGIN_CFLAGS) $(KALLOCSTAT_PLUGIN_CFLAGS)
282+GCC_PLUGINS_CFLAGS += $(KERNEXEC_PLUGIN_CFLAGS) $(CHECKER_PLUGIN_CFLAGS) $(COLORIZE_PLUGIN_CFLAGS) $(SIZE_OVERFLOW_PLUGIN_CFLAGS)
283+GCC_PLUGINS_AFLAGS := $(KERNEXEC_PLUGIN_AFLAGS)
284+export CONSTIFY_PLUGIN STACKLEAK_PLUGIN KERNEXEC_PLUGIN CHECKER_PLUGIN SIZE_OVERFLOW_PLUGIN
285+ifeq ($(KBUILD_EXTMOD),)
286+gcc-plugins:
287+ $(Q)$(MAKE) $(build)=tools/gcc
288+else
289+gcc-plugins: ;
290+endif
291+else
292+gcc-plugins:
293+ifeq ($(call cc-ifversion, -ge, 0405, y), y)
294+ $(error Your gcc installation does not support plugins. If the necessary headers for plugin support are missing, they should be installed. On Debian, apt-get install gcc-<ver>-plugin-dev. If you choose to ignore this error and lessen the improvements provided by this patch, re-run make with the DISABLE_PAX_PLUGINS=y argument.))
295+else
296+ $(Q)echo "warning, your gcc version does not support plugins, you should upgrade it to gcc 4.5 at least"
297+endif
298+ $(Q)echo "PAX_MEMORY_STACKLEAK and constification will be less secure"
299+endif
300+endif
301+
302 include $(srctree)/arch/$(SRCARCH)/Makefile
303
304 ifneq ($(CONFIG_FRAME_WARN),0)
305@@ -647,7 +695,7 @@ export mod_strip_cmd
306
307
308 ifeq ($(KBUILD_EXTMOD),)
309-core-y += kernel/ mm/ fs/ ipc/ security/ crypto/ block/
310+core-y += kernel/ mm/ fs/ ipc/ security/ crypto/ block/ grsecurity/
311
312 vmlinux-dirs := $(patsubst %/,%,$(filter %/, $(init-y) $(init-m) \
313 $(core-y) $(core-m) $(drivers-y) $(drivers-m) \
314@@ -868,6 +916,8 @@ vmlinux.o: $(modpost-init) $(vmlinux-main) FORCE
315
316 # The actual objects are generated when descending,
317 # make sure no implicit rule kicks in
318+$(sort $(vmlinux-init) $(vmlinux-main)) $(vmlinux-lds): KBUILD_CFLAGS += $(GCC_PLUGINS_CFLAGS)
319+$(sort $(vmlinux-init) $(vmlinux-main)) $(vmlinux-lds): KBUILD_AFLAGS += $(GCC_PLUGINS_AFLAGS)
320 $(sort $(vmlinux-init) $(vmlinux-main)) $(vmlinux-lds): $(vmlinux-dirs) ;
321
322 # Handle descending into subdirectories listed in $(vmlinux-dirs)
323@@ -877,7 +927,7 @@ $(sort $(vmlinux-init) $(vmlinux-main)) $(vmlinux-lds): $(vmlinux-dirs) ;
324 # Error messages still appears in the original language
325
326 PHONY += $(vmlinux-dirs)
327-$(vmlinux-dirs): prepare scripts
328+$(vmlinux-dirs): gcc-plugins prepare scripts
329 $(Q)$(MAKE) $(build)=$@
330
331 # Build the kernel release string
332@@ -986,6 +1036,7 @@ prepare0: archprepare FORCE
333 $(Q)$(MAKE) $(build)=. missing-syscalls
334
335 # All the preparing..
336+prepare: KBUILD_CFLAGS := $(filter-out $(GCC_PLUGINS_CFLAGS),$(KBUILD_CFLAGS))
337 prepare: prepare0
338
339 # The asm symlink changes when $(ARCH) changes.
340@@ -1127,6 +1178,8 @@ all: modules
341 # using awk while concatenating to the final file.
342
343 PHONY += modules
344+modules: KBUILD_CFLAGS += $(GCC_PLUGINS_CFLAGS)
345+modules: KBUILD_AFLAGS += $(GCC_PLUGINS_AFLAGS)
346 modules: $(vmlinux-dirs) $(if $(KBUILD_BUILTIN),vmlinux)
347 $(Q)$(AWK) '!x[$$0]++' $(vmlinux-dirs:%=$(objtree)/%/modules.order) > $(objtree)/modules.order
348 @$(kecho) ' Building modules, stage 2.';
349@@ -1136,7 +1189,7 @@ modules: $(vmlinux-dirs) $(if $(KBUILD_BUILTIN),vmlinux)
350
351 # Target to prepare building external modules
352 PHONY += modules_prepare
353-modules_prepare: prepare scripts
354+modules_prepare: gcc-plugins prepare scripts
355
356 # Target to install modules
357 PHONY += modules_install
358@@ -1201,7 +1254,7 @@ MRPROPER_FILES += .config .config.old include/asm .version .old_version \
359 include/linux/autoconf.h include/linux/version.h \
360 include/linux/utsrelease.h \
361 include/linux/bounds.h include/asm*/asm-offsets.h \
362- Module.symvers Module.markers tags TAGS cscope*
363+ Module.symvers Module.markers tags TAGS cscope* GPATH GTAGS GRTAGS GSYMS
364
365 # clean - Delete most, but leave enough to build external modules
366 #
367@@ -1245,7 +1298,7 @@ distclean: mrproper
368 @find $(srctree) $(RCS_FIND_IGNORE) \
369 \( -name '*.orig' -o -name '*.rej' -o -name '*~' \
370 -o -name '*.bak' -o -name '#*#' -o -name '.*.orig' \
371- -o -name '.*.rej' -o -size 0 \
372+ -o -name '.*.rej' -o -name '*.so' -o -size 0 \
373 -o -name '*%' -o -name '.*.cmd' -o -name 'core' \) \
374 -type f -print | xargs rm -f
375
376@@ -1292,6 +1345,7 @@ help:
377 @echo ' modules_prepare - Set up for building external modules'
378 @echo ' tags/TAGS - Generate tags file for editors'
379 @echo ' cscope - Generate cscope index'
380+ @echo ' gtags - Generate GNU GLOBAL index'
381 @echo ' kernelrelease - Output the release version string'
382 @echo ' kernelversion - Output the version stored in Makefile'
383 @echo ' headers_install - Install sanitised kernel headers to INSTALL_HDR_PATH'; \
384@@ -1393,6 +1447,8 @@ PHONY += $(module-dirs) modules
385 $(module-dirs): crmodverdir $(objtree)/Module.symvers
386 $(Q)$(MAKE) $(build)=$(patsubst _module_%,%,$@)
387
388+modules: KBUILD_CFLAGS += $(GCC_PLUGINS_CFLAGS)
389+modules: KBUILD_AFLAGS += $(GCC_PLUGINS_AFLAGS)
390 modules: $(module-dirs)
391 @$(kecho) ' Building modules, stage 2.';
392 $(Q)$(MAKE) -f $(srctree)/scripts/Makefile.modpost
393@@ -1448,7 +1504,7 @@ endif # KBUILD_EXTMOD
394 quiet_cmd_tags = GEN $@
395 cmd_tags = $(CONFIG_SHELL) $(srctree)/scripts/tags.sh $@
396
397-tags TAGS cscope: FORCE
398+tags TAGS cscope gtags: FORCE
399 $(call cmd,tags)
400
401 # Scripts to check various things for consistency
402@@ -1513,17 +1569,21 @@ else
403 target-dir = $(if $(KBUILD_EXTMOD),$(dir $<),$(dir $@))
404 endif
405
406-%.s: %.c prepare scripts FORCE
407+%.s: KBUILD_CFLAGS += $(GCC_PLUGINS_CFLAGS)
408+%.s: KBUILD_AFLAGS += $(GCC_PLUGINS_AFLAGS)
409+%.s: %.c gcc-plugins prepare scripts FORCE
410 $(Q)$(MAKE) $(build)=$(build-dir) $(target-dir)$(notdir $@)
411 %.i: %.c prepare scripts FORCE
412 $(Q)$(MAKE) $(build)=$(build-dir) $(target-dir)$(notdir $@)
413-%.o: %.c prepare scripts FORCE
414+%.o: KBUILD_CFLAGS += $(GCC_PLUGINS_CFLAGS)
415+%.o: KBUILD_AFLAGS += $(GCC_PLUGINS_AFLAGS)
416+%.o: %.c gcc-plugins prepare scripts FORCE
417 $(Q)$(MAKE) $(build)=$(build-dir) $(target-dir)$(notdir $@)
418 %.lst: %.c prepare scripts FORCE
419 $(Q)$(MAKE) $(build)=$(build-dir) $(target-dir)$(notdir $@)
420-%.s: %.S prepare scripts FORCE
421+%.s: %.S gcc-plugins prepare scripts FORCE
422 $(Q)$(MAKE) $(build)=$(build-dir) $(target-dir)$(notdir $@)
423-%.o: %.S prepare scripts FORCE
424+%.o: %.S gcc-plugins prepare scripts FORCE
425 $(Q)$(MAKE) $(build)=$(build-dir) $(target-dir)$(notdir $@)
426 %.symtypes: %.c prepare scripts FORCE
427 $(Q)$(MAKE) $(build)=$(build-dir) $(target-dir)$(notdir $@)
428@@ -1533,11 +1593,15 @@ endif
429 $(cmd_crmodverdir)
430 $(Q)$(MAKE) KBUILD_MODULES=$(if $(CONFIG_MODULES),1) \
431 $(build)=$(build-dir)
432-%/: prepare scripts FORCE
433+%/: KBUILD_CFLAGS += $(GCC_PLUGINS_CFLAGS)
434+%/: KBUILD_AFLAGS += $(GCC_PLUGINS_AFLAGS)
435+%/: gcc-plugins prepare scripts FORCE
436 $(cmd_crmodverdir)
437 $(Q)$(MAKE) KBUILD_MODULES=$(if $(CONFIG_MODULES),1) \
438 $(build)=$(build-dir)
439-%.ko: prepare scripts FORCE
440+%.ko: KBUILD_CFLAGS += $(GCC_PLUGINS_CFLAGS)
441+%.ko: KBUILD_AFLAGS += $(GCC_PLUGINS_AFLAGS)
442+%.ko: gcc-plugins prepare scripts FORCE
443 $(cmd_crmodverdir)
444 $(Q)$(MAKE) KBUILD_MODULES=$(if $(CONFIG_MODULES),1) \
445 $(build)=$(build-dir) $(@:.ko=.o)
446diff --git a/arch/alpha/include/asm/atomic.h b/arch/alpha/include/asm/atomic.h
447index 610dff4..f396854 100644
448--- a/arch/alpha/include/asm/atomic.h
449+++ b/arch/alpha/include/asm/atomic.h
450@@ -251,6 +251,16 @@ static __inline__ int atomic64_add_unless(atomic64_t *v, long a, long u)
451 #define atomic_dec(v) atomic_sub(1,(v))
452 #define atomic64_dec(v) atomic64_sub(1,(v))
453
454+#define atomic64_read_unchecked(v) atomic64_read(v)
455+#define atomic64_set_unchecked(v, i) atomic64_set((v), (i))
456+#define atomic64_add_unchecked(a, v) atomic64_add((a), (v))
457+#define atomic64_add_return_unchecked(a, v) atomic64_add_return((a), (v))
458+#define atomic64_sub_unchecked(a, v) atomic64_sub((a), (v))
459+#define atomic64_inc_unchecked(v) atomic64_inc(v)
460+#define atomic64_inc_return_unchecked(v) atomic64_inc_return(v)
461+#define atomic64_dec_unchecked(v) atomic64_dec(v)
462+#define atomic64_cmpxchg_unchecked(v, o, n) atomic64_cmpxchg((v), (o), (n))
463+
464 #define smp_mb__before_atomic_dec() smp_mb()
465 #define smp_mb__after_atomic_dec() smp_mb()
466 #define smp_mb__before_atomic_inc() smp_mb()
467diff --git a/arch/alpha/include/asm/cache.h b/arch/alpha/include/asm/cache.h
468index f199e69..af005f5 100644
469--- a/arch/alpha/include/asm/cache.h
470+++ b/arch/alpha/include/asm/cache.h
471@@ -4,19 +4,20 @@
472 #ifndef __ARCH_ALPHA_CACHE_H
473 #define __ARCH_ALPHA_CACHE_H
474
475+#include <linux/const.h>
476
477 /* Bytes per L1 (data) cache line. */
478 #if defined(CONFIG_ALPHA_GENERIC) || defined(CONFIG_ALPHA_EV6)
479-# define L1_CACHE_BYTES 64
480 # define L1_CACHE_SHIFT 6
481 #else
482 /* Both EV4 and EV5 are write-through, read-allocate,
483 direct-mapped, physical.
484 */
485-# define L1_CACHE_BYTES 32
486 # define L1_CACHE_SHIFT 5
487 #endif
488
489+#define L1_CACHE_BYTES (_AC(1,UL) << L1_CACHE_SHIFT)
490+
491 #define L1_CACHE_ALIGN(x) (((x)+(L1_CACHE_BYTES-1))&~(L1_CACHE_BYTES-1))
492 #define SMP_CACHE_BYTES L1_CACHE_BYTES
493
494diff --git a/arch/alpha/include/asm/elf.h b/arch/alpha/include/asm/elf.h
495index 5c75c1b..c82f878 100644
496--- a/arch/alpha/include/asm/elf.h
497+++ b/arch/alpha/include/asm/elf.h
498@@ -91,6 +91,13 @@ typedef elf_fpreg_t elf_fpregset_t[ELF_NFPREG];
499
500 #define ELF_ET_DYN_BASE (TASK_UNMAPPED_BASE + 0x1000000)
501
502+#ifdef CONFIG_PAX_ASLR
503+#define PAX_ELF_ET_DYN_BASE (current->personality & ADDR_LIMIT_32BIT ? 0x10000 : 0x120000000UL)
504+
505+#define PAX_DELTA_MMAP_LEN (current->personality & ADDR_LIMIT_32BIT ? 14 : 28)
506+#define PAX_DELTA_STACK_LEN (current->personality & ADDR_LIMIT_32BIT ? 14 : 19)
507+#endif
508+
509 /* $0 is set by ld.so to a pointer to a function which might be
510 registered using atexit. This provides a mean for the dynamic
511 linker to call DT_FINI functions for shared libraries that have
512diff --git a/arch/alpha/include/asm/pgtable.h b/arch/alpha/include/asm/pgtable.h
513index 3f0c59f..cf1e100 100644
514--- a/arch/alpha/include/asm/pgtable.h
515+++ b/arch/alpha/include/asm/pgtable.h
516@@ -101,6 +101,17 @@ struct vm_area_struct;
517 #define PAGE_SHARED __pgprot(_PAGE_VALID | __ACCESS_BITS)
518 #define PAGE_COPY __pgprot(_PAGE_VALID | __ACCESS_BITS | _PAGE_FOW)
519 #define PAGE_READONLY __pgprot(_PAGE_VALID | __ACCESS_BITS | _PAGE_FOW)
520+
521+#ifdef CONFIG_PAX_PAGEEXEC
522+# define PAGE_SHARED_NOEXEC __pgprot(_PAGE_VALID | __ACCESS_BITS | _PAGE_FOE)
523+# define PAGE_COPY_NOEXEC __pgprot(_PAGE_VALID | __ACCESS_BITS | _PAGE_FOW | _PAGE_FOE)
524+# define PAGE_READONLY_NOEXEC __pgprot(_PAGE_VALID | __ACCESS_BITS | _PAGE_FOW | _PAGE_FOE)
525+#else
526+# define PAGE_SHARED_NOEXEC PAGE_SHARED
527+# define PAGE_COPY_NOEXEC PAGE_COPY
528+# define PAGE_READONLY_NOEXEC PAGE_READONLY
529+#endif
530+
531 #define PAGE_KERNEL __pgprot(_PAGE_VALID | _PAGE_ASM | _PAGE_KRE | _PAGE_KWE)
532
533 #define _PAGE_NORMAL(x) __pgprot(_PAGE_VALID | __ACCESS_BITS | (x))
534diff --git a/arch/alpha/kernel/module.c b/arch/alpha/kernel/module.c
535index ebc3c89..20cfa63 100644
536--- a/arch/alpha/kernel/module.c
537+++ b/arch/alpha/kernel/module.c
538@@ -182,7 +182,7 @@ apply_relocate_add(Elf64_Shdr *sechdrs, const char *strtab,
539
540 /* The small sections were sorted to the end of the segment.
541 The following should definitely cover them. */
542- gp = (u64)me->module_core + me->core_size - 0x8000;
543+ gp = (u64)me->module_core_rw + me->core_size_rw - 0x8000;
544 got = sechdrs[me->arch.gotsecindex].sh_addr;
545
546 for (i = 0; i < n; i++) {
547diff --git a/arch/alpha/kernel/osf_sys.c b/arch/alpha/kernel/osf_sys.c
548index a94e49c..d71dd44 100644
549--- a/arch/alpha/kernel/osf_sys.c
550+++ b/arch/alpha/kernel/osf_sys.c
551@@ -1172,7 +1172,7 @@ arch_get_unmapped_area_1(unsigned long addr, unsigned long len,
552 /* At this point: (!vma || addr < vma->vm_end). */
553 if (limit - len < addr)
554 return -ENOMEM;
555- if (!vma || addr + len <= vma->vm_start)
556+ if (check_heap_stack_gap(vma, addr, len))
557 return addr;
558 addr = vma->vm_end;
559 vma = vma->vm_next;
560@@ -1208,6 +1208,10 @@ arch_get_unmapped_area(struct file *filp, unsigned long addr,
561 merely specific addresses, but regions of memory -- perhaps
562 this feature should be incorporated into all ports? */
563
564+#ifdef CONFIG_PAX_RANDMMAP
565+ if (!(current->mm->pax_flags & MF_PAX_RANDMMAP))
566+#endif
567+
568 if (addr) {
569 addr = arch_get_unmapped_area_1 (PAGE_ALIGN(addr), len, limit);
570 if (addr != (unsigned long) -ENOMEM)
571@@ -1215,8 +1219,8 @@ arch_get_unmapped_area(struct file *filp, unsigned long addr,
572 }
573
574 /* Next, try allocating at TASK_UNMAPPED_BASE. */
575- addr = arch_get_unmapped_area_1 (PAGE_ALIGN(TASK_UNMAPPED_BASE),
576- len, limit);
577+ addr = arch_get_unmapped_area_1 (PAGE_ALIGN(current->mm->mmap_base), len, limit);
578+
579 if (addr != (unsigned long) -ENOMEM)
580 return addr;
581
582diff --git a/arch/alpha/mm/fault.c b/arch/alpha/mm/fault.c
583index 00a31de..2ded0f2 100644
584--- a/arch/alpha/mm/fault.c
585+++ b/arch/alpha/mm/fault.c
586@@ -54,6 +54,124 @@ __load_new_mm_context(struct mm_struct *next_mm)
587 __reload_thread(pcb);
588 }
589
590+#ifdef CONFIG_PAX_PAGEEXEC
591+/*
592+ * PaX: decide what to do with offenders (regs->pc = fault address)
593+ *
594+ * returns 1 when task should be killed
595+ * 2 when patched PLT trampoline was detected
596+ * 3 when unpatched PLT trampoline was detected
597+ */
598+static int pax_handle_fetch_fault(struct pt_regs *regs)
599+{
600+
601+#ifdef CONFIG_PAX_EMUPLT
602+ int err;
603+
604+ do { /* PaX: patched PLT emulation #1 */
605+ unsigned int ldah, ldq, jmp;
606+
607+ err = get_user(ldah, (unsigned int *)regs->pc);
608+ err |= get_user(ldq, (unsigned int *)(regs->pc+4));
609+ err |= get_user(jmp, (unsigned int *)(regs->pc+8));
610+
611+ if (err)
612+ break;
613+
614+ if ((ldah & 0xFFFF0000U) == 0x277B0000U &&
615+ (ldq & 0xFFFF0000U) == 0xA77B0000U &&
616+ jmp == 0x6BFB0000U)
617+ {
618+ unsigned long r27, addr;
619+ unsigned long addrh = (ldah | 0xFFFFFFFFFFFF0000UL) << 16;
620+ unsigned long addrl = ldq | 0xFFFFFFFFFFFF0000UL;
621+
622+ addr = regs->r27 + ((addrh ^ 0x80000000UL) + 0x80000000UL) + ((addrl ^ 0x8000UL) + 0x8000UL);
623+ err = get_user(r27, (unsigned long *)addr);
624+ if (err)
625+ break;
626+
627+ regs->r27 = r27;
628+ regs->pc = r27;
629+ return 2;
630+ }
631+ } while (0);
632+
633+ do { /* PaX: patched PLT emulation #2 */
634+ unsigned int ldah, lda, br;
635+
636+ err = get_user(ldah, (unsigned int *)regs->pc);
637+ err |= get_user(lda, (unsigned int *)(regs->pc+4));
638+ err |= get_user(br, (unsigned int *)(regs->pc+8));
639+
640+ if (err)
641+ break;
642+
643+ if ((ldah & 0xFFFF0000U) == 0x277B0000U &&
644+ (lda & 0xFFFF0000U) == 0xA77B0000U &&
645+ (br & 0xFFE00000U) == 0xC3E00000U)
646+ {
647+ unsigned long addr = br | 0xFFFFFFFFFFE00000UL;
648+ unsigned long addrh = (ldah | 0xFFFFFFFFFFFF0000UL) << 16;
649+ unsigned long addrl = lda | 0xFFFFFFFFFFFF0000UL;
650+
651+ regs->r27 += ((addrh ^ 0x80000000UL) + 0x80000000UL) + ((addrl ^ 0x8000UL) + 0x8000UL);
652+ regs->pc += 12 + (((addr ^ 0x00100000UL) + 0x00100000UL) << 2);
653+ return 2;
654+ }
655+ } while (0);
656+
657+ do { /* PaX: unpatched PLT emulation */
658+ unsigned int br;
659+
660+ err = get_user(br, (unsigned int *)regs->pc);
661+
662+ if (!err && (br & 0xFFE00000U) == 0xC3800000U) {
663+ unsigned int br2, ldq, nop, jmp;
664+ unsigned long addr = br | 0xFFFFFFFFFFE00000UL, resolver;
665+
666+ addr = regs->pc + 4 + (((addr ^ 0x00100000UL) + 0x00100000UL) << 2);
667+ err = get_user(br2, (unsigned int *)addr);
668+ err |= get_user(ldq, (unsigned int *)(addr+4));
669+ err |= get_user(nop, (unsigned int *)(addr+8));
670+ err |= get_user(jmp, (unsigned int *)(addr+12));
671+ err |= get_user(resolver, (unsigned long *)(addr+16));
672+
673+ if (err)
674+ break;
675+
676+ if (br2 == 0xC3600000U &&
677+ ldq == 0xA77B000CU &&
678+ nop == 0x47FF041FU &&
679+ jmp == 0x6B7B0000U)
680+ {
681+ regs->r28 = regs->pc+4;
682+ regs->r27 = addr+16;
683+ regs->pc = resolver;
684+ return 3;
685+ }
686+ }
687+ } while (0);
688+#endif
689+
690+ return 1;
691+}
692+
693+void pax_report_insns(struct pt_regs *regs, void *pc, void *sp)
694+{
695+ unsigned long i;
696+
697+ printk(KERN_ERR "PAX: bytes at PC: ");
698+ for (i = 0; i < 5; i++) {
699+ unsigned int c;
700+ if (get_user(c, (unsigned int *)pc+i))
701+ printk(KERN_CONT "???????? ");
702+ else
703+ printk(KERN_CONT "%08x ", c);
704+ }
705+ printk("\n");
706+}
707+#endif
708
709 /*
710 * This routine handles page faults. It determines the address,
711@@ -131,8 +249,29 @@ do_page_fault(unsigned long address, unsigned long mmcsr,
712 good_area:
713 si_code = SEGV_ACCERR;
714 if (cause < 0) {
715- if (!(vma->vm_flags & VM_EXEC))
716+ if (!(vma->vm_flags & VM_EXEC)) {
717+
718+#ifdef CONFIG_PAX_PAGEEXEC
719+ if (!(mm->pax_flags & MF_PAX_PAGEEXEC) || address != regs->pc)
720+ goto bad_area;
721+
722+ up_read(&mm->mmap_sem);
723+ switch (pax_handle_fetch_fault(regs)) {
724+
725+#ifdef CONFIG_PAX_EMUPLT
726+ case 2:
727+ case 3:
728+ return;
729+#endif
730+
731+ }
732+ pax_report_fault(regs, (void *)regs->pc, (void *)rdusp());
733+ do_group_exit(SIGKILL);
734+#else
735 goto bad_area;
736+#endif
737+
738+ }
739 } else if (!cause) {
740 /* Allow reads even for write-only mappings */
741 if (!(vma->vm_flags & (VM_READ | VM_WRITE)))
742diff --git a/arch/arm/Kconfig b/arch/arm/Kconfig
743index b68faef..6dd1496 100644
744--- a/arch/arm/Kconfig
745+++ b/arch/arm/Kconfig
746@@ -14,6 +14,7 @@ config ARM
747 select SYS_SUPPORTS_APM_EMULATION
748 select HAVE_OPROFILE
749 select HAVE_ARCH_KGDB
750+ select GENERIC_ATOMIC64
751 select HAVE_KPROBES if (!XIP_KERNEL)
752 select HAVE_KRETPROBES if (HAVE_KPROBES)
753 select HAVE_FUNCTION_TRACER if (!XIP_KERNEL)
754diff --git a/arch/arm/include/asm/atomic.h b/arch/arm/include/asm/atomic.h
755index d0daeab..ca7e10e 100644
756--- a/arch/arm/include/asm/atomic.h
757+++ b/arch/arm/include/asm/atomic.h
758@@ -15,6 +15,10 @@
759 #include <linux/types.h>
760 #include <asm/system.h>
761
762+#ifdef CONFIG_GENERIC_ATOMIC64
763+#include <asm-generic/atomic64.h>
764+#endif
765+
766 #define ATOMIC_INIT(i) { (i) }
767
768 #ifdef __KERNEL__
769@@ -24,8 +28,16 @@
770 * strex/ldrex monitor on some implementations. The reason we can use it for
771 * atomic_set() is the clrex or dummy strex done on every exception return.
772 */
773-#define atomic_read(v) ((v)->counter)
774+#define atomic_read(v) (*(volatile int *)&(v)->counter)
775+static inline int atomic_read_unchecked(const atomic_unchecked_t *v)
776+{
777+ return v->counter;
778+}
779 #define atomic_set(v,i) (((v)->counter) = (i))
780+static inline void atomic_set_unchecked(atomic_unchecked_t *v, int i)
781+{
782+ v->counter = i;
783+}
784
785 #if __LINUX_ARM_ARCH__ >= 6
786
787@@ -40,6 +52,35 @@ static inline void atomic_add(int i, atomic_t *v)
788 int result;
789
790 __asm__ __volatile__("@ atomic_add\n"
791+"1: ldrex %1, [%2]\n"
792+" add %0, %1, %3\n"
793+
794+#ifdef CONFIG_PAX_REFCOUNT
795+" bvc 3f\n"
796+"2: bkpt 0xf103\n"
797+"3:\n"
798+#endif
799+
800+" strex %1, %0, [%2]\n"
801+" teq %1, #0\n"
802+" bne 1b"
803+
804+#ifdef CONFIG_PAX_REFCOUNT
805+"\n4:\n"
806+ _ASM_EXTABLE(2b, 4b)
807+#endif
808+
809+ : "=&r" (result), "=&r" (tmp)
810+ : "r" (&v->counter), "Ir" (i)
811+ : "cc");
812+}
813+
814+static inline void atomic_add_unchecked(int i, atomic_unchecked_t *v)
815+{
816+ unsigned long tmp;
817+ int result;
818+
819+ __asm__ __volatile__("@ atomic_add_unchecked\n"
820 "1: ldrex %0, [%2]\n"
821 " add %0, %0, %3\n"
822 " strex %1, %0, [%2]\n"
823@@ -58,6 +99,42 @@ static inline int atomic_add_return(int i, atomic_t *v)
824 smp_mb();
825
826 __asm__ __volatile__("@ atomic_add_return\n"
827+"1: ldrex %1, [%2]\n"
828+" add %0, %1, %3\n"
829+
830+#ifdef CONFIG_PAX_REFCOUNT
831+" bvc 3f\n"
832+" mov %0, %1\n"
833+"2: bkpt 0xf103\n"
834+"3:\n"
835+#endif
836+
837+" strex %1, %0, [%2]\n"
838+" teq %1, #0\n"
839+" bne 1b"
840+
841+#ifdef CONFIG_PAX_REFCOUNT
842+"\n4:\n"
843+ _ASM_EXTABLE(2b, 4b)
844+#endif
845+
846+ : "=&r" (result), "=&r" (tmp)
847+ : "r" (&v->counter), "Ir" (i)
848+ : "cc");
849+
850+ smp_mb();
851+
852+ return result;
853+}
854+
855+static inline int atomic_add_return_unchecked(int i, atomic_unchecked_t *v)
856+{
857+ unsigned long tmp;
858+ int result;
859+
860+ smp_mb();
861+
862+ __asm__ __volatile__("@ atomic_add_return_unchecked\n"
863 "1: ldrex %0, [%2]\n"
864 " add %0, %0, %3\n"
865 " strex %1, %0, [%2]\n"
866@@ -78,6 +155,35 @@ static inline void atomic_sub(int i, atomic_t *v)
867 int result;
868
869 __asm__ __volatile__("@ atomic_sub\n"
870+"1: ldrex %1, [%2]\n"
871+" sub %0, %1, %3\n"
872+
873+#ifdef CONFIG_PAX_REFCOUNT
874+" bvc 3f\n"
875+"2: bkpt 0xf103\n"
876+"3:\n"
877+#endif
878+
879+" strex %1, %0, [%2]\n"
880+" teq %1, #0\n"
881+" bne 1b"
882+
883+#ifdef CONFIG_PAX_REFCOUNT
884+"\n4:\n"
885+ _ASM_EXTABLE(2b, 4b)
886+#endif
887+
888+ : "=&r" (result), "=&r" (tmp)
889+ : "r" (&v->counter), "Ir" (i)
890+ : "cc");
891+}
892+
893+static inline void atomic_sub_unchecked(int i, atomic_unchecked_t *v)
894+{
895+ unsigned long tmp;
896+ int result;
897+
898+ __asm__ __volatile__("@ atomic_sub_unchecked\n"
899 "1: ldrex %0, [%2]\n"
900 " sub %0, %0, %3\n"
901 " strex %1, %0, [%2]\n"
902@@ -96,11 +202,25 @@ static inline int atomic_sub_return(int i, atomic_t *v)
903 smp_mb();
904
905 __asm__ __volatile__("@ atomic_sub_return\n"
906-"1: ldrex %0, [%2]\n"
907-" sub %0, %0, %3\n"
908+"1: ldrex %1, [%2]\n"
909+" sub %0, %1, %3\n"
910+
911+#ifdef CONFIG_PAX_REFCOUNT
912+" bvc 3f\n"
913+" mov %0, %1\n"
914+"2: bkpt 0xf103\n"
915+"3:\n"
916+#endif
917+
918 " strex %1, %0, [%2]\n"
919 " teq %1, #0\n"
920 " bne 1b"
921+
922+#ifdef CONFIG_PAX_REFCOUNT
923+"\n4:\n"
924+ _ASM_EXTABLE(2b, 4b)
925+#endif
926+
927 : "=&r" (result), "=&r" (tmp)
928 : "r" (&v->counter), "Ir" (i)
929 : "cc");
930@@ -132,6 +252,28 @@ static inline int atomic_cmpxchg(atomic_t *ptr, int old, int new)
931 return oldval;
932 }
933
934+static inline int atomic_cmpxchg_unchecked(atomic_unchecked_t *ptr, int old, int new)
935+{
936+ unsigned long oldval, res;
937+
938+ smp_mb();
939+
940+ do {
941+ __asm__ __volatile__("@ atomic_cmpxchg_unchecked\n"
942+ "ldrex %1, [%2]\n"
943+ "mov %0, #0\n"
944+ "teq %1, %3\n"
945+ "strexeq %0, %4, [%2]\n"
946+ : "=&r" (res), "=&r" (oldval)
947+ : "r" (&ptr->counter), "Ir" (old), "r" (new)
948+ : "cc");
949+ } while (res);
950+
951+ smp_mb();
952+
953+ return oldval;
954+}
955+
956 static inline void atomic_clear_mask(unsigned long mask, unsigned long *addr)
957 {
958 unsigned long tmp, tmp2;
959@@ -207,6 +349,10 @@ static inline void atomic_clear_mask(unsigned long mask, unsigned long *addr)
960 #endif /* __LINUX_ARM_ARCH__ */
961
962 #define atomic_xchg(v, new) (xchg(&((v)->counter), new))
963+static inline int atomic_xchg_unchecked(atomic_unchecked_t *v, int new)
964+{
965+ return xchg(&v->counter, new);
966+}
967
968 static inline int atomic_add_unless(atomic_t *v, int a, int u)
969 {
970@@ -220,11 +366,27 @@ static inline int atomic_add_unless(atomic_t *v, int a, int u)
971 #define atomic_inc_not_zero(v) atomic_add_unless((v), 1, 0)
972
973 #define atomic_inc(v) atomic_add(1, v)
974+static inline void atomic_inc_unchecked(atomic_unchecked_t *v)
975+{
976+ atomic_add_unchecked(1, v);
977+}
978 #define atomic_dec(v) atomic_sub(1, v)
979+static inline void atomic_dec_unchecked(atomic_unchecked_t *v)
980+{
981+ atomic_sub_unchecked(1, v);
982+}
983
984 #define atomic_inc_and_test(v) (atomic_add_return(1, v) == 0)
985+static inline int atomic_inc_and_test_unchecked(atomic_unchecked_t *v)
986+{
987+ return atomic_add_return_unchecked(1, v) == 0;
988+}
989 #define atomic_dec_and_test(v) (atomic_sub_return(1, v) == 0)
990 #define atomic_inc_return(v) (atomic_add_return(1, v))
991+static inline int atomic_inc_return_unchecked(atomic_unchecked_t *v)
992+{
993+ return atomic_add_return_unchecked(1, v);
994+}
995 #define atomic_dec_return(v) (atomic_sub_return(1, v))
996 #define atomic_sub_and_test(i, v) (atomic_sub_return(i, v) == 0)
997
998diff --git a/arch/arm/include/asm/cache.h b/arch/arm/include/asm/cache.h
999index 66c160b..bca1449 100644
1000--- a/arch/arm/include/asm/cache.h
1001+++ b/arch/arm/include/asm/cache.h
1002@@ -4,8 +4,10 @@
1003 #ifndef __ASMARM_CACHE_H
1004 #define __ASMARM_CACHE_H
1005
1006+#include <linux/const.h>
1007+
1008 #define L1_CACHE_SHIFT CONFIG_ARM_L1_CACHE_SHIFT
1009-#define L1_CACHE_BYTES (1 << L1_CACHE_SHIFT)
1010+#define L1_CACHE_BYTES (_AC(1,UL) << L1_CACHE_SHIFT)
1011
1012 /*
1013 * Memory returned by kmalloc() may be used for DMA, so we must make
1014diff --git a/arch/arm/include/asm/cacheflush.h b/arch/arm/include/asm/cacheflush.h
1015index 3d0cdd2..19957c5 100644
1016--- a/arch/arm/include/asm/cacheflush.h
1017+++ b/arch/arm/include/asm/cacheflush.h
1018@@ -216,13 +216,13 @@ struct cpu_cache_fns {
1019 void (*dma_inv_range)(const void *, const void *);
1020 void (*dma_clean_range)(const void *, const void *);
1021 void (*dma_flush_range)(const void *, const void *);
1022-};
1023+} __no_const;
1024
1025 struct outer_cache_fns {
1026 void (*inv_range)(unsigned long, unsigned long);
1027 void (*clean_range)(unsigned long, unsigned long);
1028 void (*flush_range)(unsigned long, unsigned long);
1029-};
1030+} __no_const;
1031
1032 /*
1033 * Select the calling method
1034diff --git a/arch/arm/include/asm/elf.h b/arch/arm/include/asm/elf.h
1035index 6aac3f5..265536b 100644
1036--- a/arch/arm/include/asm/elf.h
1037+++ b/arch/arm/include/asm/elf.h
1038@@ -109,7 +109,14 @@ int dump_task_regs(struct task_struct *t, elf_gregset_t *elfregs);
1039 the loader. We need to make sure that it is out of the way of the program
1040 that it will "exec", and that there is sufficient room for the brk. */
1041
1042-#define ELF_ET_DYN_BASE (2 * TASK_SIZE / 3)
1043+#define ELF_ET_DYN_BASE (TASK_SIZE / 3 * 2)
1044+
1045+#ifdef CONFIG_PAX_ASLR
1046+#define PAX_ELF_ET_DYN_BASE 0x00008000UL
1047+
1048+#define PAX_DELTA_MMAP_LEN ((current->personality == PER_LINUX_32BIT) ? 16 : 10)
1049+#define PAX_DELTA_STACK_LEN ((current->personality == PER_LINUX_32BIT) ? 16 : 10)
1050+#endif
1051
1052 /* When the program starts, a1 contains a pointer to a function to be
1053 registered with atexit, as per the SVR4 ABI. A value of 0 means we
1054diff --git a/arch/arm/include/asm/kmap_types.h b/arch/arm/include/asm/kmap_types.h
1055index c019949..388fdd1 100644
1056--- a/arch/arm/include/asm/kmap_types.h
1057+++ b/arch/arm/include/asm/kmap_types.h
1058@@ -19,6 +19,7 @@ enum km_type {
1059 KM_SOFTIRQ0,
1060 KM_SOFTIRQ1,
1061 KM_L2_CACHE,
1062+ KM_CLEARPAGE,
1063 KM_TYPE_NR
1064 };
1065
1066diff --git a/arch/arm/include/asm/page.h b/arch/arm/include/asm/page.h
1067index 3a32af4..c8def8a 100644
1068--- a/arch/arm/include/asm/page.h
1069+++ b/arch/arm/include/asm/page.h
1070@@ -122,7 +122,7 @@ struct cpu_user_fns {
1071 void (*cpu_clear_user_highpage)(struct page *page, unsigned long vaddr);
1072 void (*cpu_copy_user_highpage)(struct page *to, struct page *from,
1073 unsigned long vaddr);
1074-};
1075+} __no_const;
1076
1077 #ifdef MULTI_USER
1078 extern struct cpu_user_fns cpu_user;
1079diff --git a/arch/arm/include/asm/system.h b/arch/arm/include/asm/system.h
1080index d65b2f5..9d87555 100644
1081--- a/arch/arm/include/asm/system.h
1082+++ b/arch/arm/include/asm/system.h
1083@@ -86,6 +86,8 @@ void hook_fault_code(int nr, int (*fn)(unsigned long, unsigned int,
1084
1085 #define xchg(ptr,x) \
1086 ((__typeof__(*(ptr)))__xchg((unsigned long)(x),(ptr),sizeof(*(ptr))))
1087+#define xchg_unchecked(ptr,x) \
1088+ ((__typeof__(*(ptr)))__xchg((unsigned long)(x),(ptr),sizeof(*(ptr))))
1089
1090 extern asmlinkage void __backtrace(void);
1091 extern asmlinkage void c_backtrace(unsigned long fp, int pmode);
1092@@ -98,7 +100,7 @@ extern int cpu_architecture(void);
1093 extern void cpu_init(void);
1094
1095 void arm_machine_restart(char mode, const char *cmd);
1096-extern void (*arm_pm_restart)(char str, const char *cmd);
1097+extern void (*arm_pm_restart)(char str, const char *cmd) __noreturn;
1098
1099 #define UDBG_UNDEFINED (1 << 0)
1100 #define UDBG_SYSCALL (1 << 1)
1101@@ -505,6 +507,13 @@ static inline unsigned long long __cmpxchg64_mb(volatile void *ptr,
1102
1103 #endif /* __LINUX_ARM_ARCH__ >= 6 */
1104
1105+#define _ASM_EXTABLE(from, to) \
1106+" .pushsection __ex_table,\"a\"\n"\
1107+" .align 3\n" \
1108+" .long " #from ", " #to"\n" \
1109+" .popsection"
1110+
1111+
1112 #endif /* __ASSEMBLY__ */
1113
1114 #define arch_align_stack(x) (x)
1115diff --git a/arch/arm/include/asm/uaccess.h b/arch/arm/include/asm/uaccess.h
1116index 1d6bd40..fba0cb9 100644
1117--- a/arch/arm/include/asm/uaccess.h
1118+++ b/arch/arm/include/asm/uaccess.h
1119@@ -22,6 +22,8 @@
1120 #define VERIFY_READ 0
1121 #define VERIFY_WRITE 1
1122
1123+extern void check_object_size(const void *ptr, unsigned long n, bool to);
1124+
1125 /*
1126 * The exception table consists of pairs of addresses: the first is the
1127 * address of an instruction that is allowed to fault, and the second is
1128@@ -387,8 +389,23 @@ do { \
1129
1130
1131 #ifdef CONFIG_MMU
1132-extern unsigned long __must_check __copy_from_user(void *to, const void __user *from, unsigned long n);
1133-extern unsigned long __must_check __copy_to_user(void __user *to, const void *from, unsigned long n);
1134+extern unsigned long __must_check ___copy_from_user(void *to, const void __user *from, unsigned long n);
1135+extern unsigned long __must_check ___copy_to_user(void __user *to, const void *from, unsigned long n);
1136+
1137+static inline unsigned long __must_check __copy_from_user(void *to, const void __user *from, unsigned long n)
1138+{
1139+ if (!__builtin_constant_p(n))
1140+ check_object_size(to, n, false);
1141+ return ___copy_from_user(to, from, n);
1142+}
1143+
1144+static inline unsigned long __must_check __copy_to_user(void __user *to, const void *from, unsigned long n)
1145+{
1146+ if (!__builtin_constant_p(n))
1147+ check_object_size(from, n, true);
1148+ return ___copy_to_user(to, from, n);
1149+}
1150+
1151 extern unsigned long __must_check __copy_to_user_std(void __user *to, const void *from, unsigned long n);
1152 extern unsigned long __must_check __clear_user(void __user *addr, unsigned long n);
1153 extern unsigned long __must_check __clear_user_std(void __user *addr, unsigned long n);
1154@@ -403,6 +420,9 @@ extern unsigned long __must_check __strnlen_user(const char __user *s, long n);
1155
1156 static inline unsigned long __must_check copy_from_user(void *to, const void __user *from, unsigned long n)
1157 {
1158+ if ((long)n < 0)
1159+ return n;
1160+
1161 if (access_ok(VERIFY_READ, from, n))
1162 n = __copy_from_user(to, from, n);
1163 else /* security hole - plug it */
1164@@ -412,6 +432,9 @@ static inline unsigned long __must_check copy_from_user(void *to, const void __u
1165
1166 static inline unsigned long __must_check copy_to_user(void __user *to, const void *from, unsigned long n)
1167 {
1168+ if ((long)n < 0)
1169+ return n;
1170+
1171 if (access_ok(VERIFY_WRITE, to, n))
1172 n = __copy_to_user(to, from, n);
1173 return n;
1174diff --git a/arch/arm/kernel/armksyms.c b/arch/arm/kernel/armksyms.c
1175index 0e62770..e2c2cd6 100644
1176--- a/arch/arm/kernel/armksyms.c
1177+++ b/arch/arm/kernel/armksyms.c
1178@@ -118,8 +118,8 @@ EXPORT_SYMBOL(__strncpy_from_user);
1179 #ifdef CONFIG_MMU
1180 EXPORT_SYMBOL(copy_page);
1181
1182-EXPORT_SYMBOL(__copy_from_user);
1183-EXPORT_SYMBOL(__copy_to_user);
1184+EXPORT_SYMBOL(___copy_from_user);
1185+EXPORT_SYMBOL(___copy_to_user);
1186 EXPORT_SYMBOL(__clear_user);
1187
1188 EXPORT_SYMBOL(__get_user_1);
1189diff --git a/arch/arm/kernel/kgdb.c b/arch/arm/kernel/kgdb.c
1190index ba8ccfe..2dc34dc 100644
1191--- a/arch/arm/kernel/kgdb.c
1192+++ b/arch/arm/kernel/kgdb.c
1193@@ -190,7 +190,7 @@ void kgdb_arch_exit(void)
1194 * and we handle the normal undef case within the do_undefinstr
1195 * handler.
1196 */
1197-struct kgdb_arch arch_kgdb_ops = {
1198+const struct kgdb_arch arch_kgdb_ops = {
1199 #ifndef __ARMEB__
1200 .gdb_bpt_instr = {0xfe, 0xde, 0xff, 0xe7}
1201 #else /* ! __ARMEB__ */
1202diff --git a/arch/arm/kernel/process.c b/arch/arm/kernel/process.c
1203index 61f90d3..771ab27 100644
1204--- a/arch/arm/kernel/process.c
1205+++ b/arch/arm/kernel/process.c
1206@@ -83,7 +83,7 @@ static int __init hlt_setup(char *__unused)
1207 __setup("nohlt", nohlt_setup);
1208 __setup("hlt", hlt_setup);
1209
1210-void arm_machine_restart(char mode, const char *cmd)
1211+__noreturn void arm_machine_restart(char mode, const char *cmd)
1212 {
1213 /*
1214 * Clean and disable cache, and turn off interrupts
1215@@ -117,7 +117,7 @@ void arm_machine_restart(char mode, const char *cmd)
1216 void (*pm_power_off)(void);
1217 EXPORT_SYMBOL(pm_power_off);
1218
1219-void (*arm_pm_restart)(char str, const char *cmd) = arm_machine_restart;
1220+void (*arm_pm_restart)(char str, const char *cmd) __noreturn = arm_machine_restart;
1221 EXPORT_SYMBOL_GPL(arm_pm_restart);
1222
1223
1224@@ -195,6 +195,7 @@ __setup("reboot=", reboot_setup);
1225
1226 void machine_halt(void)
1227 {
1228+ BUG();
1229 }
1230
1231
1232@@ -202,6 +203,7 @@ void machine_power_off(void)
1233 {
1234 if (pm_power_off)
1235 pm_power_off();
1236+ BUG();
1237 }
1238
1239 void machine_restart(char *cmd)
1240diff --git a/arch/arm/kernel/setup.c b/arch/arm/kernel/setup.c
1241index c6c57b6..0c3b29e 100644
1242--- a/arch/arm/kernel/setup.c
1243+++ b/arch/arm/kernel/setup.c
1244@@ -92,16 +92,16 @@ EXPORT_SYMBOL(elf_hwcap);
1245 struct processor processor;
1246 #endif
1247 #ifdef MULTI_TLB
1248-struct cpu_tlb_fns cpu_tlb;
1249+struct cpu_tlb_fns cpu_tlb __read_only;
1250 #endif
1251 #ifdef MULTI_USER
1252-struct cpu_user_fns cpu_user;
1253+struct cpu_user_fns cpu_user __read_only;
1254 #endif
1255 #ifdef MULTI_CACHE
1256-struct cpu_cache_fns cpu_cache;
1257+struct cpu_cache_fns cpu_cache __read_only;
1258 #endif
1259 #ifdef CONFIG_OUTER_CACHE
1260-struct outer_cache_fns outer_cache;
1261+struct outer_cache_fns outer_cache __read_only;
1262 #endif
1263
1264 struct stack {
1265diff --git a/arch/arm/kernel/traps.c b/arch/arm/kernel/traps.c
1266index 3f361a7..6e806e1 100644
1267--- a/arch/arm/kernel/traps.c
1268+++ b/arch/arm/kernel/traps.c
1269@@ -247,6 +247,8 @@ static void __die(const char *str, int err, struct thread_info *thread, struct p
1270
1271 DEFINE_SPINLOCK(die_lock);
1272
1273+extern void gr_handle_kernel_exploit(void);
1274+
1275 /*
1276 * This function is protected against re-entrancy.
1277 */
1278@@ -271,6 +273,8 @@ NORET_TYPE void die(const char *str, struct pt_regs *regs, int err)
1279 if (panic_on_oops)
1280 panic("Fatal exception");
1281
1282+ gr_handle_kernel_exploit();
1283+
1284 do_exit(SIGSEGV);
1285 }
1286
1287diff --git a/arch/arm/kernel/vmlinux.lds.S b/arch/arm/kernel/vmlinux.lds.S
1288index aecf87df..bed731b 100644
1289--- a/arch/arm/kernel/vmlinux.lds.S
1290+++ b/arch/arm/kernel/vmlinux.lds.S
1291@@ -74,14 +74,18 @@ SECTIONS
1292 #ifndef CONFIG_XIP_KERNEL
1293 __init_begin = _stext;
1294 INIT_DATA
1295+ EXIT_TEXT
1296+ EXIT_DATA
1297 . = ALIGN(PAGE_SIZE);
1298 __init_end = .;
1299 #endif
1300 }
1301
1302 /DISCARD/ : { /* Exit code and data */
1303+#ifdef CONFIG_XIP_KERNEL
1304 EXIT_TEXT
1305 EXIT_DATA
1306+#endif
1307 *(.exitcall.exit)
1308 *(.discard)
1309 *(.ARM.exidx.exit.text)
1310diff --git a/arch/arm/lib/copy_from_user.S b/arch/arm/lib/copy_from_user.S
1311index e4fe124..0fc246b 100644
1312--- a/arch/arm/lib/copy_from_user.S
1313+++ b/arch/arm/lib/copy_from_user.S
1314@@ -16,7 +16,7 @@
1315 /*
1316 * Prototype:
1317 *
1318- * size_t __copy_from_user(void *to, const void *from, size_t n)
1319+ * size_t ___copy_from_user(void *to, const void *from, size_t n)
1320 *
1321 * Purpose:
1322 *
1323@@ -84,11 +84,11 @@
1324
1325 .text
1326
1327-ENTRY(__copy_from_user)
1328+ENTRY(___copy_from_user)
1329
1330 #include "copy_template.S"
1331
1332-ENDPROC(__copy_from_user)
1333+ENDPROC(___copy_from_user)
1334
1335 .section .fixup,"ax"
1336 .align 0
1337diff --git a/arch/arm/lib/copy_page.S b/arch/arm/lib/copy_page.S
1338index 6ee2f67..d1cce76 100644
1339--- a/arch/arm/lib/copy_page.S
1340+++ b/arch/arm/lib/copy_page.S
1341@@ -10,6 +10,7 @@
1342 * ASM optimised string functions
1343 */
1344 #include <linux/linkage.h>
1345+#include <linux/const.h>
1346 #include <asm/assembler.h>
1347 #include <asm/asm-offsets.h>
1348 #include <asm/cache.h>
1349diff --git a/arch/arm/lib/copy_to_user.S b/arch/arm/lib/copy_to_user.S
1350index 1a71e15..ac7b258 100644
1351--- a/arch/arm/lib/copy_to_user.S
1352+++ b/arch/arm/lib/copy_to_user.S
1353@@ -16,7 +16,7 @@
1354 /*
1355 * Prototype:
1356 *
1357- * size_t __copy_to_user(void *to, const void *from, size_t n)
1358+ * size_t ___copy_to_user(void *to, const void *from, size_t n)
1359 *
1360 * Purpose:
1361 *
1362@@ -88,11 +88,11 @@
1363 .text
1364
1365 ENTRY(__copy_to_user_std)
1366-WEAK(__copy_to_user)
1367+WEAK(___copy_to_user)
1368
1369 #include "copy_template.S"
1370
1371-ENDPROC(__copy_to_user)
1372+ENDPROC(___copy_to_user)
1373
1374 .section .fixup,"ax"
1375 .align 0
1376diff --git a/arch/arm/lib/uaccess.S b/arch/arm/lib/uaccess.S
1377index ffdd274..91017b6 100644
1378--- a/arch/arm/lib/uaccess.S
1379+++ b/arch/arm/lib/uaccess.S
1380@@ -19,7 +19,7 @@
1381
1382 #define PAGE_SHIFT 12
1383
1384-/* Prototype: int __copy_to_user(void *to, const char *from, size_t n)
1385+/* Prototype: int ___copy_to_user(void *to, const char *from, size_t n)
1386 * Purpose : copy a block to user memory from kernel memory
1387 * Params : to - user memory
1388 * : from - kernel memory
1389@@ -39,7 +39,7 @@ USER( strgtbt r3, [r0], #1) @ May fault
1390 sub r2, r2, ip
1391 b .Lc2u_dest_aligned
1392
1393-ENTRY(__copy_to_user)
1394+ENTRY(___copy_to_user)
1395 stmfd sp!, {r2, r4 - r7, lr}
1396 cmp r2, #4
1397 blt .Lc2u_not_enough
1398@@ -277,14 +277,14 @@ USER( strgebt r3, [r0], #1) @ May fault
1399 ldrgtb r3, [r1], #0
1400 USER( strgtbt r3, [r0], #1) @ May fault
1401 b .Lc2u_finished
1402-ENDPROC(__copy_to_user)
1403+ENDPROC(___copy_to_user)
1404
1405 .section .fixup,"ax"
1406 .align 0
1407 9001: ldmfd sp!, {r0, r4 - r7, pc}
1408 .previous
1409
1410-/* Prototype: unsigned long __copy_from_user(void *to,const void *from,unsigned long n);
1411+/* Prototype: unsigned long ___copy_from_user(void *to,const void *from,unsigned long n);
1412 * Purpose : copy a block from user memory to kernel memory
1413 * Params : to - kernel memory
1414 * : from - user memory
1415@@ -303,7 +303,7 @@ USER( ldrgtbt r3, [r1], #1) @ May fault
1416 sub r2, r2, ip
1417 b .Lcfu_dest_aligned
1418
1419-ENTRY(__copy_from_user)
1420+ENTRY(___copy_from_user)
1421 stmfd sp!, {r0, r2, r4 - r7, lr}
1422 cmp r2, #4
1423 blt .Lcfu_not_enough
1424@@ -543,7 +543,7 @@ USER( ldrgebt r3, [r1], #1) @ May fault
1425 USER( ldrgtbt r3, [r1], #1) @ May fault
1426 strgtb r3, [r0], #1
1427 b .Lcfu_finished
1428-ENDPROC(__copy_from_user)
1429+ENDPROC(___copy_from_user)
1430
1431 .section .fixup,"ax"
1432 .align 0
1433diff --git a/arch/arm/lib/uaccess_with_memcpy.c b/arch/arm/lib/uaccess_with_memcpy.c
1434index 6b967ff..67d5b2b 100644
1435--- a/arch/arm/lib/uaccess_with_memcpy.c
1436+++ b/arch/arm/lib/uaccess_with_memcpy.c
1437@@ -97,7 +97,7 @@ out:
1438 }
1439
1440 unsigned long
1441-__copy_to_user(void __user *to, const void *from, unsigned long n)
1442+___copy_to_user(void __user *to, const void *from, unsigned long n)
1443 {
1444 /*
1445 * This test is stubbed out of the main function above to keep
1446diff --git a/arch/arm/mach-at91/pm.c b/arch/arm/mach-at91/pm.c
1447index 4028724..beec230 100644
1448--- a/arch/arm/mach-at91/pm.c
1449+++ b/arch/arm/mach-at91/pm.c
1450@@ -348,7 +348,7 @@ static void at91_pm_end(void)
1451 }
1452
1453
1454-static struct platform_suspend_ops at91_pm_ops ={
1455+static const struct platform_suspend_ops at91_pm_ops ={
1456 .valid = at91_pm_valid_state,
1457 .begin = at91_pm_begin,
1458 .enter = at91_pm_enter,
1459diff --git a/arch/arm/mach-omap1/pm.c b/arch/arm/mach-omap1/pm.c
1460index 5218943..0a34552 100644
1461--- a/arch/arm/mach-omap1/pm.c
1462+++ b/arch/arm/mach-omap1/pm.c
1463@@ -647,7 +647,7 @@ static struct irqaction omap_wakeup_irq = {
1464
1465
1466
1467-static struct platform_suspend_ops omap_pm_ops ={
1468+static const struct platform_suspend_ops omap_pm_ops ={
1469 .prepare = omap_pm_prepare,
1470 .enter = omap_pm_enter,
1471 .finish = omap_pm_finish,
1472diff --git a/arch/arm/mach-omap2/pm24xx.c b/arch/arm/mach-omap2/pm24xx.c
1473index bff5c4e..d4c649b 100644
1474--- a/arch/arm/mach-omap2/pm24xx.c
1475+++ b/arch/arm/mach-omap2/pm24xx.c
1476@@ -326,7 +326,7 @@ static void omap2_pm_finish(void)
1477 enable_hlt();
1478 }
1479
1480-static struct platform_suspend_ops omap_pm_ops = {
1481+static const struct platform_suspend_ops omap_pm_ops = {
1482 .prepare = omap2_pm_prepare,
1483 .enter = omap2_pm_enter,
1484 .finish = omap2_pm_finish,
1485diff --git a/arch/arm/mach-omap2/pm34xx.c b/arch/arm/mach-omap2/pm34xx.c
1486index 8946319..7d3e661 100644
1487--- a/arch/arm/mach-omap2/pm34xx.c
1488+++ b/arch/arm/mach-omap2/pm34xx.c
1489@@ -401,7 +401,7 @@ static void omap3_pm_end(void)
1490 return;
1491 }
1492
1493-static struct platform_suspend_ops omap_pm_ops = {
1494+static const struct platform_suspend_ops omap_pm_ops = {
1495 .begin = omap3_pm_begin,
1496 .end = omap3_pm_end,
1497 .prepare = omap3_pm_prepare,
1498diff --git a/arch/arm/mach-pnx4008/pm.c b/arch/arm/mach-pnx4008/pm.c
1499index b3d8d53..6e68ebc 100644
1500--- a/arch/arm/mach-pnx4008/pm.c
1501+++ b/arch/arm/mach-pnx4008/pm.c
1502@@ -116,7 +116,7 @@ static int pnx4008_pm_valid(suspend_state_t state)
1503 (state == PM_SUSPEND_MEM);
1504 }
1505
1506-static struct platform_suspend_ops pnx4008_pm_ops = {
1507+static const struct platform_suspend_ops pnx4008_pm_ops = {
1508 .enter = pnx4008_pm_enter,
1509 .valid = pnx4008_pm_valid,
1510 };
1511diff --git a/arch/arm/mach-pxa/pm.c b/arch/arm/mach-pxa/pm.c
1512index 7693355..9beb00a 100644
1513--- a/arch/arm/mach-pxa/pm.c
1514+++ b/arch/arm/mach-pxa/pm.c
1515@@ -95,7 +95,7 @@ void pxa_pm_finish(void)
1516 pxa_cpu_pm_fns->finish();
1517 }
1518
1519-static struct platform_suspend_ops pxa_pm_ops = {
1520+static const struct platform_suspend_ops pxa_pm_ops = {
1521 .valid = pxa_pm_valid,
1522 .enter = pxa_pm_enter,
1523 .prepare = pxa_pm_prepare,
1524diff --git a/arch/arm/mach-pxa/sharpsl_pm.c b/arch/arm/mach-pxa/sharpsl_pm.c
1525index 629e05d..06be589 100644
1526--- a/arch/arm/mach-pxa/sharpsl_pm.c
1527+++ b/arch/arm/mach-pxa/sharpsl_pm.c
1528@@ -891,7 +891,7 @@ static void sharpsl_apm_get_power_status(struct apm_power_info *info)
1529 }
1530
1531 #ifdef CONFIG_PM
1532-static struct platform_suspend_ops sharpsl_pm_ops = {
1533+static const struct platform_suspend_ops sharpsl_pm_ops = {
1534 .prepare = pxa_pm_prepare,
1535 .finish = pxa_pm_finish,
1536 .enter = corgi_pxa_pm_enter,
1537diff --git a/arch/arm/mach-sa1100/pm.c b/arch/arm/mach-sa1100/pm.c
1538index c83fdc8..ab9fc44 100644
1539--- a/arch/arm/mach-sa1100/pm.c
1540+++ b/arch/arm/mach-sa1100/pm.c
1541@@ -120,7 +120,7 @@ unsigned long sleep_phys_sp(void *sp)
1542 return virt_to_phys(sp);
1543 }
1544
1545-static struct platform_suspend_ops sa11x0_pm_ops = {
1546+static const struct platform_suspend_ops sa11x0_pm_ops = {
1547 .enter = sa11x0_pm_enter,
1548 .valid = suspend_valid_only_mem,
1549 };
1550diff --git a/arch/arm/mm/fault.c b/arch/arm/mm/fault.c
1551index 3191cd6..c322981 100644
1552--- a/arch/arm/mm/fault.c
1553+++ b/arch/arm/mm/fault.c
1554@@ -166,6 +166,13 @@ __do_user_fault(struct task_struct *tsk, unsigned long addr,
1555 }
1556 #endif
1557
1558+#ifdef CONFIG_PAX_PAGEEXEC
1559+ if (fsr & FSR_LNX_PF) {
1560+ pax_report_fault(regs, (void *)regs->ARM_pc, (void *)regs->ARM_sp);
1561+ do_group_exit(SIGKILL);
1562+ }
1563+#endif
1564+
1565 tsk->thread.address = addr;
1566 tsk->thread.error_code = fsr;
1567 tsk->thread.trap_no = 14;
1568@@ -357,6 +364,33 @@ do_page_fault(unsigned long addr, unsigned int fsr, struct pt_regs *regs)
1569 }
1570 #endif /* CONFIG_MMU */
1571
1572+#ifdef CONFIG_PAX_PAGEEXEC
1573+void pax_report_insns(struct pt_regs *regs, void *pc, void *sp)
1574+{
1575+ long i;
1576+
1577+ printk(KERN_ERR "PAX: bytes at PC: ");
1578+ for (i = 0; i < 20; i++) {
1579+ unsigned char c;
1580+ if (get_user(c, (__force unsigned char __user *)pc+i))
1581+ printk(KERN_CONT "?? ");
1582+ else
1583+ printk(KERN_CONT "%02x ", c);
1584+ }
1585+ printk("\n");
1586+
1587+ printk(KERN_ERR "PAX: bytes at SP-4: ");
1588+ for (i = -1; i < 20; i++) {
1589+ unsigned long c;
1590+ if (get_user(c, (__force unsigned long __user *)sp+i))
1591+ printk(KERN_CONT "???????? ");
1592+ else
1593+ printk(KERN_CONT "%08lx ", c);
1594+ }
1595+ printk("\n");
1596+}
1597+#endif
1598+
1599 /*
1600 * First Level Translation Fault Handler
1601 *
1602@@ -569,6 +603,20 @@ do_PrefetchAbort(unsigned long addr, unsigned int ifsr, struct pt_regs *regs)
1603 const struct fsr_info *inf = ifsr_info + fsr_fs(ifsr);
1604 struct siginfo info;
1605
1606+#ifdef CONFIG_PAX_REFCOUNT
1607+ if (fsr_fs(ifsr) == 2) {
1608+ unsigned int bkpt;
1609+
1610+ if (!probe_kernel_address((unsigned int *)addr, bkpt) && bkpt == 0xe12f1073) {
1611+ current->thread.error_code = ifsr;
1612+ current->thread.trap_no = 0;
1613+ pax_report_refcount_overflow(regs);
1614+ fixup_exception(regs);
1615+ return;
1616+ }
1617+ }
1618+#endif
1619+
1620 if (!inf->fn(addr, ifsr | FSR_LNX_PF, regs))
1621 return;
1622
1623diff --git a/arch/arm/mm/mmap.c b/arch/arm/mm/mmap.c
1624index f5abc51..7ec524c 100644
1625--- a/arch/arm/mm/mmap.c
1626+++ b/arch/arm/mm/mmap.c
1627@@ -63,6 +63,10 @@ arch_get_unmapped_area(struct file *filp, unsigned long addr,
1628 if (len > TASK_SIZE)
1629 return -ENOMEM;
1630
1631+#ifdef CONFIG_PAX_RANDMMAP
1632+ if (!(mm->pax_flags & MF_PAX_RANDMMAP))
1633+#endif
1634+
1635 if (addr) {
1636 if (do_align)
1637 addr = COLOUR_ALIGN(addr, pgoff);
1638@@ -70,15 +74,14 @@ arch_get_unmapped_area(struct file *filp, unsigned long addr,
1639 addr = PAGE_ALIGN(addr);
1640
1641 vma = find_vma(mm, addr);
1642- if (TASK_SIZE - len >= addr &&
1643- (!vma || addr + len <= vma->vm_start))
1644+ if (TASK_SIZE - len >= addr && check_heap_stack_gap(vma, addr, len))
1645 return addr;
1646 }
1647 if (len > mm->cached_hole_size) {
1648- start_addr = addr = mm->free_area_cache;
1649+ start_addr = addr = mm->free_area_cache;
1650 } else {
1651- start_addr = addr = TASK_UNMAPPED_BASE;
1652- mm->cached_hole_size = 0;
1653+ start_addr = addr = mm->mmap_base;
1654+ mm->cached_hole_size = 0;
1655 }
1656
1657 full_search:
1658@@ -94,14 +97,14 @@ full_search:
1659 * Start a new search - just in case we missed
1660 * some holes.
1661 */
1662- if (start_addr != TASK_UNMAPPED_BASE) {
1663- start_addr = addr = TASK_UNMAPPED_BASE;
1664+ if (start_addr != mm->mmap_base) {
1665+ start_addr = addr = mm->mmap_base;
1666 mm->cached_hole_size = 0;
1667 goto full_search;
1668 }
1669 return -ENOMEM;
1670 }
1671- if (!vma || addr + len <= vma->vm_start) {
1672+ if (check_heap_stack_gap(vma, addr, len)) {
1673 /*
1674 * Remember the place where we stopped the search:
1675 */
1676diff --git a/arch/arm/plat-s3c/pm.c b/arch/arm/plat-s3c/pm.c
1677index 8d97db2..b66cfa5 100644
1678--- a/arch/arm/plat-s3c/pm.c
1679+++ b/arch/arm/plat-s3c/pm.c
1680@@ -355,7 +355,7 @@ static void s3c_pm_finish(void)
1681 s3c_pm_check_cleanup();
1682 }
1683
1684-static struct platform_suspend_ops s3c_pm_ops = {
1685+static const struct platform_suspend_ops s3c_pm_ops = {
1686 .enter = s3c_pm_enter,
1687 .prepare = s3c_pm_prepare,
1688 .finish = s3c_pm_finish,
1689diff --git a/arch/avr32/include/asm/cache.h b/arch/avr32/include/asm/cache.h
1690index d3cf35a..0ba6053 100644
1691--- a/arch/avr32/include/asm/cache.h
1692+++ b/arch/avr32/include/asm/cache.h
1693@@ -1,8 +1,10 @@
1694 #ifndef __ASM_AVR32_CACHE_H
1695 #define __ASM_AVR32_CACHE_H
1696
1697+#include <linux/const.h>
1698+
1699 #define L1_CACHE_SHIFT 5
1700-#define L1_CACHE_BYTES (1 << L1_CACHE_SHIFT)
1701+#define L1_CACHE_BYTES (_AC(1,UL) << L1_CACHE_SHIFT)
1702
1703 /*
1704 * Memory returned by kmalloc() may be used for DMA, so we must make
1705diff --git a/arch/avr32/include/asm/elf.h b/arch/avr32/include/asm/elf.h
1706index d5d1d41..856e2ed 100644
1707--- a/arch/avr32/include/asm/elf.h
1708+++ b/arch/avr32/include/asm/elf.h
1709@@ -85,8 +85,14 @@ typedef struct user_fpu_struct elf_fpregset_t;
1710 the loader. We need to make sure that it is out of the way of the program
1711 that it will "exec", and that there is sufficient room for the brk. */
1712
1713-#define ELF_ET_DYN_BASE (2 * TASK_SIZE / 3)
1714+#define ELF_ET_DYN_BASE (TASK_SIZE / 3 * 2)
1715
1716+#ifdef CONFIG_PAX_ASLR
1717+#define PAX_ELF_ET_DYN_BASE 0x00001000UL
1718+
1719+#define PAX_DELTA_MMAP_LEN 15
1720+#define PAX_DELTA_STACK_LEN 15
1721+#endif
1722
1723 /* This yields a mask that user programs can use to figure out what
1724 instruction set this CPU supports. This could be done in user space,
1725diff --git a/arch/avr32/include/asm/kmap_types.h b/arch/avr32/include/asm/kmap_types.h
1726index b7f5c68..556135c 100644
1727--- a/arch/avr32/include/asm/kmap_types.h
1728+++ b/arch/avr32/include/asm/kmap_types.h
1729@@ -22,7 +22,8 @@ D(10) KM_IRQ0,
1730 D(11) KM_IRQ1,
1731 D(12) KM_SOFTIRQ0,
1732 D(13) KM_SOFTIRQ1,
1733-D(14) KM_TYPE_NR
1734+D(14) KM_CLEARPAGE,
1735+D(15) KM_TYPE_NR
1736 };
1737
1738 #undef D
1739diff --git a/arch/avr32/mach-at32ap/pm.c b/arch/avr32/mach-at32ap/pm.c
1740index f021edf..32d680e 100644
1741--- a/arch/avr32/mach-at32ap/pm.c
1742+++ b/arch/avr32/mach-at32ap/pm.c
1743@@ -176,7 +176,7 @@ out:
1744 return 0;
1745 }
1746
1747-static struct platform_suspend_ops avr32_pm_ops = {
1748+static const struct platform_suspend_ops avr32_pm_ops = {
1749 .valid = avr32_pm_valid_state,
1750 .enter = avr32_pm_enter,
1751 };
1752diff --git a/arch/avr32/mm/fault.c b/arch/avr32/mm/fault.c
1753index b61d86d..e292c7f 100644
1754--- a/arch/avr32/mm/fault.c
1755+++ b/arch/avr32/mm/fault.c
1756@@ -41,6 +41,23 @@ static inline int notify_page_fault(struct pt_regs *regs, int trap)
1757
1758 int exception_trace = 1;
1759
1760+#ifdef CONFIG_PAX_PAGEEXEC
1761+void pax_report_insns(struct pt_regs *regs, void *pc, void *sp)
1762+{
1763+ unsigned long i;
1764+
1765+ printk(KERN_ERR "PAX: bytes at PC: ");
1766+ for (i = 0; i < 20; i++) {
1767+ unsigned char c;
1768+ if (get_user(c, (unsigned char *)pc+i))
1769+ printk(KERN_CONT "???????? ");
1770+ else
1771+ printk(KERN_CONT "%02x ", c);
1772+ }
1773+ printk("\n");
1774+}
1775+#endif
1776+
1777 /*
1778 * This routine handles page faults. It determines the address and the
1779 * problem, and then passes it off to one of the appropriate routines.
1780@@ -157,6 +174,16 @@ bad_area:
1781 up_read(&mm->mmap_sem);
1782
1783 if (user_mode(regs)) {
1784+
1785+#ifdef CONFIG_PAX_PAGEEXEC
1786+ if (mm->pax_flags & MF_PAX_PAGEEXEC) {
1787+ if (ecr == ECR_PROTECTION_X || ecr == ECR_TLB_MISS_X) {
1788+ pax_report_fault(regs, (void *)regs->pc, (void *)regs->sp);
1789+ do_group_exit(SIGKILL);
1790+ }
1791+ }
1792+#endif
1793+
1794 if (exception_trace && printk_ratelimit())
1795 printk("%s%s[%d]: segfault at %08lx pc %08lx "
1796 "sp %08lx ecr %lu\n",
1797diff --git a/arch/blackfin/include/asm/cache.h b/arch/blackfin/include/asm/cache.h
1798index 93f6c63..d144953 100644
1799--- a/arch/blackfin/include/asm/cache.h
1800+++ b/arch/blackfin/include/asm/cache.h
1801@@ -7,12 +7,14 @@
1802 #ifndef __ARCH_BLACKFIN_CACHE_H
1803 #define __ARCH_BLACKFIN_CACHE_H
1804
1805+#include <linux/const.h>
1806+
1807 /*
1808 * Bytes per L1 cache line
1809 * Blackfin loads 32 bytes for cache
1810 */
1811 #define L1_CACHE_SHIFT 5
1812-#define L1_CACHE_BYTES (1 << L1_CACHE_SHIFT)
1813+#define L1_CACHE_BYTES (_AC(1,UL) << L1_CACHE_SHIFT)
1814 #define SMP_CACHE_BYTES L1_CACHE_BYTES
1815
1816 #define ARCH_KMALLOC_MINALIGN L1_CACHE_BYTES
1817diff --git a/arch/blackfin/kernel/kgdb.c b/arch/blackfin/kernel/kgdb.c
1818index cce79d0..c406c85 100644
1819--- a/arch/blackfin/kernel/kgdb.c
1820+++ b/arch/blackfin/kernel/kgdb.c
1821@@ -428,7 +428,7 @@ int kgdb_arch_handle_exception(int vector, int signo,
1822 return -1; /* this means that we do not want to exit from the handler */
1823 }
1824
1825-struct kgdb_arch arch_kgdb_ops = {
1826+const struct kgdb_arch arch_kgdb_ops = {
1827 .gdb_bpt_instr = {0xa1},
1828 #ifdef CONFIG_SMP
1829 .flags = KGDB_HW_BREAKPOINT|KGDB_THR_PROC_SWAP,
1830diff --git a/arch/blackfin/mach-common/pm.c b/arch/blackfin/mach-common/pm.c
1831index 8837be4..b2fb413 100644
1832--- a/arch/blackfin/mach-common/pm.c
1833+++ b/arch/blackfin/mach-common/pm.c
1834@@ -255,7 +255,7 @@ static int bfin_pm_enter(suspend_state_t state)
1835 return 0;
1836 }
1837
1838-struct platform_suspend_ops bfin_pm_ops = {
1839+const struct platform_suspend_ops bfin_pm_ops = {
1840 .enter = bfin_pm_enter,
1841 .valid = bfin_pm_valid,
1842 };
1843diff --git a/arch/cris/include/arch-v10/arch/cache.h b/arch/cris/include/arch-v10/arch/cache.h
1844index aea2718..3639a60 100644
1845--- a/arch/cris/include/arch-v10/arch/cache.h
1846+++ b/arch/cris/include/arch-v10/arch/cache.h
1847@@ -1,8 +1,9 @@
1848 #ifndef _ASM_ARCH_CACHE_H
1849 #define _ASM_ARCH_CACHE_H
1850
1851+#include <linux/const.h>
1852 /* Etrax 100LX have 32-byte cache-lines. */
1853-#define L1_CACHE_BYTES 32
1854 #define L1_CACHE_SHIFT 5
1855+#define L1_CACHE_BYTES (_AC(1,UL) << L1_CACHE_SHIFT)
1856
1857 #endif /* _ASM_ARCH_CACHE_H */
1858diff --git a/arch/cris/include/arch-v32/arch/cache.h b/arch/cris/include/arch-v32/arch/cache.h
1859index dfc7305..417f5b3 100644
1860--- a/arch/cris/include/arch-v32/arch/cache.h
1861+++ b/arch/cris/include/arch-v32/arch/cache.h
1862@@ -1,11 +1,12 @@
1863 #ifndef _ASM_CRIS_ARCH_CACHE_H
1864 #define _ASM_CRIS_ARCH_CACHE_H
1865
1866+#include <linux/const.h>
1867 #include <arch/hwregs/dma.h>
1868
1869 /* A cache-line is 32 bytes. */
1870-#define L1_CACHE_BYTES 32
1871 #define L1_CACHE_SHIFT 5
1872+#define L1_CACHE_BYTES (_AC(1,UL) << L1_CACHE_SHIFT)
1873
1874 void flush_dma_list(dma_descr_data *descr);
1875 void flush_dma_descr(dma_descr_data *descr, int flush_buf);
1876diff --git a/arch/frv/include/asm/atomic.h b/arch/frv/include/asm/atomic.h
1877index 00a57af..c3ef0cd 100644
1878--- a/arch/frv/include/asm/atomic.h
1879+++ b/arch/frv/include/asm/atomic.h
1880@@ -241,6 +241,16 @@ extern uint32_t __xchg_32(uint32_t i, volatile void *v);
1881 #define atomic64_cmpxchg(v, old, new) (__cmpxchg_64(old, new, &(v)->counter))
1882 #define atomic64_xchg(v, new) (__xchg_64(new, &(v)->counter))
1883
1884+#define atomic64_read_unchecked(v) atomic64_read(v)
1885+#define atomic64_set_unchecked(v, i) atomic64_set((v), (i))
1886+#define atomic64_add_unchecked(a, v) atomic64_add((a), (v))
1887+#define atomic64_add_return_unchecked(a, v) atomic64_add_return((a), (v))
1888+#define atomic64_sub_unchecked(a, v) atomic64_sub((a), (v))
1889+#define atomic64_inc_unchecked(v) atomic64_inc(v)
1890+#define atomic64_inc_return_unchecked(v) atomic64_inc_return(v)
1891+#define atomic64_dec_unchecked(v) atomic64_dec(v)
1892+#define atomic64_cmpxchg_unchecked(v, o, n) atomic64_cmpxchg((v), (o), (n))
1893+
1894 static __inline__ int atomic_add_unless(atomic_t *v, int a, int u)
1895 {
1896 int c, old;
1897diff --git a/arch/frv/include/asm/cache.h b/arch/frv/include/asm/cache.h
1898index 7dc0f0f..1e6a620 100644
1899--- a/arch/frv/include/asm/cache.h
1900+++ b/arch/frv/include/asm/cache.h
1901@@ -12,10 +12,11 @@
1902 #ifndef __ASM_CACHE_H
1903 #define __ASM_CACHE_H
1904
1905+#include <linux/const.h>
1906
1907 /* bytes per L1 cache line */
1908 #define L1_CACHE_SHIFT (CONFIG_FRV_L1_CACHE_SHIFT)
1909-#define L1_CACHE_BYTES (1 << L1_CACHE_SHIFT)
1910+#define L1_CACHE_BYTES (_AC(1,UL) << L1_CACHE_SHIFT)
1911
1912 #define ARCH_KMALLOC_MINALIGN L1_CACHE_BYTES
1913
1914diff --git a/arch/frv/include/asm/kmap_types.h b/arch/frv/include/asm/kmap_types.h
1915index f8e16b2..c73ff79 100644
1916--- a/arch/frv/include/asm/kmap_types.h
1917+++ b/arch/frv/include/asm/kmap_types.h
1918@@ -23,6 +23,7 @@ enum km_type {
1919 KM_IRQ1,
1920 KM_SOFTIRQ0,
1921 KM_SOFTIRQ1,
1922+ KM_CLEARPAGE,
1923 KM_TYPE_NR
1924 };
1925
1926diff --git a/arch/frv/mm/elf-fdpic.c b/arch/frv/mm/elf-fdpic.c
1927index 385fd30..6c3d97e 100644
1928--- a/arch/frv/mm/elf-fdpic.c
1929+++ b/arch/frv/mm/elf-fdpic.c
1930@@ -73,8 +73,7 @@ unsigned long arch_get_unmapped_area(struct file *filp, unsigned long addr, unsi
1931 if (addr) {
1932 addr = PAGE_ALIGN(addr);
1933 vma = find_vma(current->mm, addr);
1934- if (TASK_SIZE - len >= addr &&
1935- (!vma || addr + len <= vma->vm_start))
1936+ if (TASK_SIZE - len >= addr && check_heap_stack_gap(vma, addr, len))
1937 goto success;
1938 }
1939
1940@@ -89,7 +88,7 @@ unsigned long arch_get_unmapped_area(struct file *filp, unsigned long addr, unsi
1941 for (; vma; vma = vma->vm_next) {
1942 if (addr > limit)
1943 break;
1944- if (addr + len <= vma->vm_start)
1945+ if (check_heap_stack_gap(vma, addr, len))
1946 goto success;
1947 addr = vma->vm_end;
1948 }
1949@@ -104,7 +103,7 @@ unsigned long arch_get_unmapped_area(struct file *filp, unsigned long addr, unsi
1950 for (; vma; vma = vma->vm_next) {
1951 if (addr > limit)
1952 break;
1953- if (addr + len <= vma->vm_start)
1954+ if (check_heap_stack_gap(vma, addr, len))
1955 goto success;
1956 addr = vma->vm_end;
1957 }
1958diff --git a/arch/h8300/include/asm/cache.h b/arch/h8300/include/asm/cache.h
1959index c635028..6d9445a 100644
1960--- a/arch/h8300/include/asm/cache.h
1961+++ b/arch/h8300/include/asm/cache.h
1962@@ -1,8 +1,10 @@
1963 #ifndef __ARCH_H8300_CACHE_H
1964 #define __ARCH_H8300_CACHE_H
1965
1966+#include <linux/const.h>
1967+
1968 /* bytes per L1 cache line */
1969-#define L1_CACHE_BYTES 4
1970+#define L1_CACHE_BYTES _AC(4,UL)
1971
1972 /* m68k-elf-gcc 2.95.2 doesn't like these */
1973
1974diff --git a/arch/ia64/hp/common/hwsw_iommu.c b/arch/ia64/hp/common/hwsw_iommu.c
1975index e4a80d8..11a7ea1 100644
1976--- a/arch/ia64/hp/common/hwsw_iommu.c
1977+++ b/arch/ia64/hp/common/hwsw_iommu.c
1978@@ -17,7 +17,7 @@
1979 #include <linux/swiotlb.h>
1980 #include <asm/machvec.h>
1981
1982-extern struct dma_map_ops sba_dma_ops, swiotlb_dma_ops;
1983+extern const struct dma_map_ops sba_dma_ops, swiotlb_dma_ops;
1984
1985 /* swiotlb declarations & definitions: */
1986 extern int swiotlb_late_init_with_default_size (size_t size);
1987@@ -33,7 +33,7 @@ static inline int use_swiotlb(struct device *dev)
1988 !sba_dma_ops.dma_supported(dev, *dev->dma_mask);
1989 }
1990
1991-struct dma_map_ops *hwsw_dma_get_ops(struct device *dev)
1992+const struct dma_map_ops *hwsw_dma_get_ops(struct device *dev)
1993 {
1994 if (use_swiotlb(dev))
1995 return &swiotlb_dma_ops;
1996diff --git a/arch/ia64/hp/common/sba_iommu.c b/arch/ia64/hp/common/sba_iommu.c
1997index 01ae69b..35752fd 100644
1998--- a/arch/ia64/hp/common/sba_iommu.c
1999+++ b/arch/ia64/hp/common/sba_iommu.c
2000@@ -2097,7 +2097,7 @@ static struct acpi_driver acpi_sba_ioc_driver = {
2001 },
2002 };
2003
2004-extern struct dma_map_ops swiotlb_dma_ops;
2005+extern const struct dma_map_ops swiotlb_dma_ops;
2006
2007 static int __init
2008 sba_init(void)
2009@@ -2211,7 +2211,7 @@ sba_page_override(char *str)
2010
2011 __setup("sbapagesize=",sba_page_override);
2012
2013-struct dma_map_ops sba_dma_ops = {
2014+const struct dma_map_ops sba_dma_ops = {
2015 .alloc_coherent = sba_alloc_coherent,
2016 .free_coherent = sba_free_coherent,
2017 .map_page = sba_map_page,
2018diff --git a/arch/ia64/ia32/binfmt_elf32.c b/arch/ia64/ia32/binfmt_elf32.c
2019index c69552b..c7122f4 100644
2020--- a/arch/ia64/ia32/binfmt_elf32.c
2021+++ b/arch/ia64/ia32/binfmt_elf32.c
2022@@ -45,6 +45,13 @@ randomize_stack_top(unsigned long stack_top);
2023
2024 #define elf_read_implies_exec(ex, have_pt_gnu_stack) (!(have_pt_gnu_stack))
2025
2026+#ifdef CONFIG_PAX_ASLR
2027+#define PAX_ELF_ET_DYN_BASE (current->personality == PER_LINUX32 ? 0x08048000UL : 0x4000000000000000UL)
2028+
2029+#define PAX_DELTA_MMAP_LEN (current->personality == PER_LINUX32 ? 16 : 3*PAGE_SHIFT - 13)
2030+#define PAX_DELTA_STACK_LEN (current->personality == PER_LINUX32 ? 16 : 3*PAGE_SHIFT - 13)
2031+#endif
2032+
2033 /* Ugly but avoids duplication */
2034 #include "../../../fs/binfmt_elf.c"
2035
2036diff --git a/arch/ia64/ia32/ia32priv.h b/arch/ia64/ia32/ia32priv.h
2037index 0f15349..26b3429 100644
2038--- a/arch/ia64/ia32/ia32priv.h
2039+++ b/arch/ia64/ia32/ia32priv.h
2040@@ -296,7 +296,14 @@ typedef struct compat_siginfo {
2041 #define ELF_DATA ELFDATA2LSB
2042 #define ELF_ARCH EM_386
2043
2044-#define IA32_STACK_TOP IA32_PAGE_OFFSET
2045+#ifdef CONFIG_PAX_RANDUSTACK
2046+#define __IA32_DELTA_STACK (current->mm->delta_stack)
2047+#else
2048+#define __IA32_DELTA_STACK 0UL
2049+#endif
2050+
2051+#define IA32_STACK_TOP (IA32_PAGE_OFFSET - __IA32_DELTA_STACK)
2052+
2053 #define IA32_GATE_OFFSET IA32_PAGE_OFFSET
2054 #define IA32_GATE_END IA32_PAGE_OFFSET + PAGE_SIZE
2055
2056diff --git a/arch/ia64/include/asm/atomic.h b/arch/ia64/include/asm/atomic.h
2057index 88405cb..de5ca5d 100644
2058--- a/arch/ia64/include/asm/atomic.h
2059+++ b/arch/ia64/include/asm/atomic.h
2060@@ -210,6 +210,16 @@ atomic64_add_negative (__s64 i, atomic64_t *v)
2061 #define atomic64_inc(v) atomic64_add(1, (v))
2062 #define atomic64_dec(v) atomic64_sub(1, (v))
2063
2064+#define atomic64_read_unchecked(v) atomic64_read(v)
2065+#define atomic64_set_unchecked(v, i) atomic64_set((v), (i))
2066+#define atomic64_add_unchecked(a, v) atomic64_add((a), (v))
2067+#define atomic64_add_return_unchecked(a, v) atomic64_add_return((a), (v))
2068+#define atomic64_sub_unchecked(a, v) atomic64_sub((a), (v))
2069+#define atomic64_inc_unchecked(v) atomic64_inc(v)
2070+#define atomic64_inc_return_unchecked(v) atomic64_inc_return(v)
2071+#define atomic64_dec_unchecked(v) atomic64_dec(v)
2072+#define atomic64_cmpxchg_unchecked(v, o, n) atomic64_cmpxchg((v), (o), (n))
2073+
2074 /* Atomic operations are already serializing */
2075 #define smp_mb__before_atomic_dec() barrier()
2076 #define smp_mb__after_atomic_dec() barrier()
2077diff --git a/arch/ia64/include/asm/cache.h b/arch/ia64/include/asm/cache.h
2078index e7482bd..d1c9b8e 100644
2079--- a/arch/ia64/include/asm/cache.h
2080+++ b/arch/ia64/include/asm/cache.h
2081@@ -1,6 +1,7 @@
2082 #ifndef _ASM_IA64_CACHE_H
2083 #define _ASM_IA64_CACHE_H
2084
2085+#include <linux/const.h>
2086
2087 /*
2088 * Copyright (C) 1998-2000 Hewlett-Packard Co
2089@@ -9,7 +10,7 @@
2090
2091 /* Bytes per L1 (data) cache line. */
2092 #define L1_CACHE_SHIFT CONFIG_IA64_L1_CACHE_SHIFT
2093-#define L1_CACHE_BYTES (1 << L1_CACHE_SHIFT)
2094+#define L1_CACHE_BYTES (_AC(1,UL) << L1_CACHE_SHIFT)
2095
2096 #ifdef CONFIG_SMP
2097 # define SMP_CACHE_SHIFT L1_CACHE_SHIFT
2098diff --git a/arch/ia64/include/asm/dma-mapping.h b/arch/ia64/include/asm/dma-mapping.h
2099index 8d3c79c..71b3af6 100644
2100--- a/arch/ia64/include/asm/dma-mapping.h
2101+++ b/arch/ia64/include/asm/dma-mapping.h
2102@@ -12,7 +12,7 @@
2103
2104 #define ARCH_HAS_DMA_GET_REQUIRED_MASK
2105
2106-extern struct dma_map_ops *dma_ops;
2107+extern const struct dma_map_ops *dma_ops;
2108 extern struct ia64_machine_vector ia64_mv;
2109 extern void set_iommu_machvec(void);
2110
2111@@ -24,7 +24,7 @@ extern void machvec_dma_sync_sg(struct device *, struct scatterlist *, int,
2112 static inline void *dma_alloc_coherent(struct device *dev, size_t size,
2113 dma_addr_t *daddr, gfp_t gfp)
2114 {
2115- struct dma_map_ops *ops = platform_dma_get_ops(dev);
2116+ const struct dma_map_ops *ops = platform_dma_get_ops(dev);
2117 void *caddr;
2118
2119 caddr = ops->alloc_coherent(dev, size, daddr, gfp);
2120@@ -35,7 +35,7 @@ static inline void *dma_alloc_coherent(struct device *dev, size_t size,
2121 static inline void dma_free_coherent(struct device *dev, size_t size,
2122 void *caddr, dma_addr_t daddr)
2123 {
2124- struct dma_map_ops *ops = platform_dma_get_ops(dev);
2125+ const struct dma_map_ops *ops = platform_dma_get_ops(dev);
2126 debug_dma_free_coherent(dev, size, caddr, daddr);
2127 ops->free_coherent(dev, size, caddr, daddr);
2128 }
2129@@ -49,13 +49,13 @@ static inline void dma_free_coherent(struct device *dev, size_t size,
2130
2131 static inline int dma_mapping_error(struct device *dev, dma_addr_t daddr)
2132 {
2133- struct dma_map_ops *ops = platform_dma_get_ops(dev);
2134+ const struct dma_map_ops *ops = platform_dma_get_ops(dev);
2135 return ops->mapping_error(dev, daddr);
2136 }
2137
2138 static inline int dma_supported(struct device *dev, u64 mask)
2139 {
2140- struct dma_map_ops *ops = platform_dma_get_ops(dev);
2141+ const struct dma_map_ops *ops = platform_dma_get_ops(dev);
2142 return ops->dma_supported(dev, mask);
2143 }
2144
2145diff --git a/arch/ia64/include/asm/elf.h b/arch/ia64/include/asm/elf.h
2146index 86eddee..b116bb4 100644
2147--- a/arch/ia64/include/asm/elf.h
2148+++ b/arch/ia64/include/asm/elf.h
2149@@ -43,6 +43,13 @@
2150 */
2151 #define ELF_ET_DYN_BASE (TASK_UNMAPPED_BASE + 0x800000000UL)
2152
2153+#ifdef CONFIG_PAX_ASLR
2154+#define PAX_ELF_ET_DYN_BASE (current->personality == PER_LINUX32 ? 0x08048000UL : 0x4000000000000000UL)
2155+
2156+#define PAX_DELTA_MMAP_LEN (current->personality == PER_LINUX32 ? 16 : 3*PAGE_SHIFT - 13)
2157+#define PAX_DELTA_STACK_LEN (current->personality == PER_LINUX32 ? 16 : 3*PAGE_SHIFT - 13)
2158+#endif
2159+
2160 #define PT_IA_64_UNWIND 0x70000001
2161
2162 /* IA-64 relocations: */
2163diff --git a/arch/ia64/include/asm/machvec.h b/arch/ia64/include/asm/machvec.h
2164index 367d299..9ad4279 100644
2165--- a/arch/ia64/include/asm/machvec.h
2166+++ b/arch/ia64/include/asm/machvec.h
2167@@ -45,7 +45,7 @@ typedef void ia64_mv_kernel_launch_event_t(void);
2168 /* DMA-mapping interface: */
2169 typedef void ia64_mv_dma_init (void);
2170 typedef u64 ia64_mv_dma_get_required_mask (struct device *);
2171-typedef struct dma_map_ops *ia64_mv_dma_get_ops(struct device *);
2172+typedef const struct dma_map_ops *ia64_mv_dma_get_ops(struct device *);
2173
2174 /*
2175 * WARNING: The legacy I/O space is _architected_. Platforms are
2176@@ -251,7 +251,7 @@ extern void machvec_init_from_cmdline(const char *cmdline);
2177 # endif /* CONFIG_IA64_GENERIC */
2178
2179 extern void swiotlb_dma_init(void);
2180-extern struct dma_map_ops *dma_get_ops(struct device *);
2181+extern const struct dma_map_ops *dma_get_ops(struct device *);
2182
2183 /*
2184 * Define default versions so we can extend machvec for new platforms without having
2185diff --git a/arch/ia64/include/asm/pgtable.h b/arch/ia64/include/asm/pgtable.h
2186index 8840a69..cdb63d9 100644
2187--- a/arch/ia64/include/asm/pgtable.h
2188+++ b/arch/ia64/include/asm/pgtable.h
2189@@ -12,7 +12,7 @@
2190 * David Mosberger-Tang <davidm@hpl.hp.com>
2191 */
2192
2193-
2194+#include <linux/const.h>
2195 #include <asm/mman.h>
2196 #include <asm/page.h>
2197 #include <asm/processor.h>
2198@@ -143,6 +143,17 @@
2199 #define PAGE_READONLY __pgprot(__ACCESS_BITS | _PAGE_PL_3 | _PAGE_AR_R)
2200 #define PAGE_COPY __pgprot(__ACCESS_BITS | _PAGE_PL_3 | _PAGE_AR_R)
2201 #define PAGE_COPY_EXEC __pgprot(__ACCESS_BITS | _PAGE_PL_3 | _PAGE_AR_RX)
2202+
2203+#ifdef CONFIG_PAX_PAGEEXEC
2204+# define PAGE_SHARED_NOEXEC __pgprot(__ACCESS_BITS | _PAGE_PL_3 | _PAGE_AR_RW)
2205+# define PAGE_READONLY_NOEXEC __pgprot(__ACCESS_BITS | _PAGE_PL_3 | _PAGE_AR_R)
2206+# define PAGE_COPY_NOEXEC __pgprot(__ACCESS_BITS | _PAGE_PL_3 | _PAGE_AR_R)
2207+#else
2208+# define PAGE_SHARED_NOEXEC PAGE_SHARED
2209+# define PAGE_READONLY_NOEXEC PAGE_READONLY
2210+# define PAGE_COPY_NOEXEC PAGE_COPY
2211+#endif
2212+
2213 #define PAGE_GATE __pgprot(__ACCESS_BITS | _PAGE_PL_0 | _PAGE_AR_X_RX)
2214 #define PAGE_KERNEL __pgprot(__DIRTY_BITS | _PAGE_PL_0 | _PAGE_AR_RWX)
2215 #define PAGE_KERNELRX __pgprot(__ACCESS_BITS | _PAGE_PL_0 | _PAGE_AR_RX)
2216diff --git a/arch/ia64/include/asm/spinlock.h b/arch/ia64/include/asm/spinlock.h
2217index 239ecdc..f94170e 100644
2218--- a/arch/ia64/include/asm/spinlock.h
2219+++ b/arch/ia64/include/asm/spinlock.h
2220@@ -72,7 +72,7 @@ static __always_inline void __ticket_spin_unlock(raw_spinlock_t *lock)
2221 unsigned short *p = (unsigned short *)&lock->lock + 1, tmp;
2222
2223 asm volatile ("ld2.bias %0=[%1]" : "=r"(tmp) : "r"(p));
2224- ACCESS_ONCE(*p) = (tmp + 2) & ~1;
2225+ ACCESS_ONCE_RW(*p) = (tmp + 2) & ~1;
2226 }
2227
2228 static __always_inline void __ticket_spin_unlock_wait(raw_spinlock_t *lock)
2229diff --git a/arch/ia64/include/asm/uaccess.h b/arch/ia64/include/asm/uaccess.h
2230index 449c8c0..432a3d2 100644
2231--- a/arch/ia64/include/asm/uaccess.h
2232+++ b/arch/ia64/include/asm/uaccess.h
2233@@ -257,7 +257,7 @@ __copy_from_user (void *to, const void __user *from, unsigned long count)
2234 const void *__cu_from = (from); \
2235 long __cu_len = (n); \
2236 \
2237- if (__access_ok(__cu_to, __cu_len, get_fs())) \
2238+ if (__cu_len > 0 && __cu_len <= INT_MAX && __access_ok(__cu_to, __cu_len, get_fs())) \
2239 __cu_len = __copy_user(__cu_to, (__force void __user *) __cu_from, __cu_len); \
2240 __cu_len; \
2241 })
2242@@ -269,7 +269,7 @@ __copy_from_user (void *to, const void __user *from, unsigned long count)
2243 long __cu_len = (n); \
2244 \
2245 __chk_user_ptr(__cu_from); \
2246- if (__access_ok(__cu_from, __cu_len, get_fs())) \
2247+ if (__cu_len > 0 && __cu_len <= INT_MAX && __access_ok(__cu_from, __cu_len, get_fs())) \
2248 __cu_len = __copy_user((__force void __user *) __cu_to, __cu_from, __cu_len); \
2249 __cu_len; \
2250 })
2251diff --git a/arch/ia64/kernel/dma-mapping.c b/arch/ia64/kernel/dma-mapping.c
2252index f2c1600..969398a 100644
2253--- a/arch/ia64/kernel/dma-mapping.c
2254+++ b/arch/ia64/kernel/dma-mapping.c
2255@@ -3,7 +3,7 @@
2256 /* Set this to 1 if there is a HW IOMMU in the system */
2257 int iommu_detected __read_mostly;
2258
2259-struct dma_map_ops *dma_ops;
2260+const struct dma_map_ops *dma_ops;
2261 EXPORT_SYMBOL(dma_ops);
2262
2263 #define PREALLOC_DMA_DEBUG_ENTRIES (1 << 16)
2264@@ -16,7 +16,7 @@ static int __init dma_init(void)
2265 }
2266 fs_initcall(dma_init);
2267
2268-struct dma_map_ops *dma_get_ops(struct device *dev)
2269+const struct dma_map_ops *dma_get_ops(struct device *dev)
2270 {
2271 return dma_ops;
2272 }
2273diff --git a/arch/ia64/kernel/module.c b/arch/ia64/kernel/module.c
2274index 1481b0a..e7d38ff 100644
2275--- a/arch/ia64/kernel/module.c
2276+++ b/arch/ia64/kernel/module.c
2277@@ -315,8 +315,7 @@ module_alloc (unsigned long size)
2278 void
2279 module_free (struct module *mod, void *module_region)
2280 {
2281- if (mod && mod->arch.init_unw_table &&
2282- module_region == mod->module_init) {
2283+ if (mod && mod->arch.init_unw_table && module_region == mod->module_init_rx) {
2284 unw_remove_unwind_table(mod->arch.init_unw_table);
2285 mod->arch.init_unw_table = NULL;
2286 }
2287@@ -502,15 +501,39 @@ module_frob_arch_sections (Elf_Ehdr *ehdr, Elf_Shdr *sechdrs, char *secstrings,
2288 }
2289
2290 static inline int
2291+in_init_rx (const struct module *mod, uint64_t addr)
2292+{
2293+ return addr - (uint64_t) mod->module_init_rx < mod->init_size_rx;
2294+}
2295+
2296+static inline int
2297+in_init_rw (const struct module *mod, uint64_t addr)
2298+{
2299+ return addr - (uint64_t) mod->module_init_rw < mod->init_size_rw;
2300+}
2301+
2302+static inline int
2303 in_init (const struct module *mod, uint64_t addr)
2304 {
2305- return addr - (uint64_t) mod->module_init < mod->init_size;
2306+ return in_init_rx(mod, addr) || in_init_rw(mod, addr);
2307+}
2308+
2309+static inline int
2310+in_core_rx (const struct module *mod, uint64_t addr)
2311+{
2312+ return addr - (uint64_t) mod->module_core_rx < mod->core_size_rx;
2313+}
2314+
2315+static inline int
2316+in_core_rw (const struct module *mod, uint64_t addr)
2317+{
2318+ return addr - (uint64_t) mod->module_core_rw < mod->core_size_rw;
2319 }
2320
2321 static inline int
2322 in_core (const struct module *mod, uint64_t addr)
2323 {
2324- return addr - (uint64_t) mod->module_core < mod->core_size;
2325+ return in_core_rx(mod, addr) || in_core_rw(mod, addr);
2326 }
2327
2328 static inline int
2329@@ -693,7 +716,14 @@ do_reloc (struct module *mod, uint8_t r_type, Elf64_Sym *sym, uint64_t addend,
2330 break;
2331
2332 case RV_BDREL:
2333- val -= (uint64_t) (in_init(mod, val) ? mod->module_init : mod->module_core);
2334+ if (in_init_rx(mod, val))
2335+ val -= (uint64_t) mod->module_init_rx;
2336+ else if (in_init_rw(mod, val))
2337+ val -= (uint64_t) mod->module_init_rw;
2338+ else if (in_core_rx(mod, val))
2339+ val -= (uint64_t) mod->module_core_rx;
2340+ else if (in_core_rw(mod, val))
2341+ val -= (uint64_t) mod->module_core_rw;
2342 break;
2343
2344 case RV_LTV:
2345@@ -828,15 +858,15 @@ apply_relocate_add (Elf64_Shdr *sechdrs, const char *strtab, unsigned int symind
2346 * addresses have been selected...
2347 */
2348 uint64_t gp;
2349- if (mod->core_size > MAX_LTOFF)
2350+ if (mod->core_size_rx + mod->core_size_rw > MAX_LTOFF)
2351 /*
2352 * This takes advantage of fact that SHF_ARCH_SMALL gets allocated
2353 * at the end of the module.
2354 */
2355- gp = mod->core_size - MAX_LTOFF / 2;
2356+ gp = mod->core_size_rx + mod->core_size_rw - MAX_LTOFF / 2;
2357 else
2358- gp = mod->core_size / 2;
2359- gp = (uint64_t) mod->module_core + ((gp + 7) & -8);
2360+ gp = (mod->core_size_rx + mod->core_size_rw) / 2;
2361+ gp = (uint64_t) mod->module_core_rx + ((gp + 7) & -8);
2362 mod->arch.gp = gp;
2363 DEBUGP("%s: placing gp at 0x%lx\n", __func__, gp);
2364 }
2365diff --git a/arch/ia64/kernel/pci-dma.c b/arch/ia64/kernel/pci-dma.c
2366index f6b1ff0..de773fb 100644
2367--- a/arch/ia64/kernel/pci-dma.c
2368+++ b/arch/ia64/kernel/pci-dma.c
2369@@ -43,7 +43,7 @@ struct device fallback_dev = {
2370 .dma_mask = &fallback_dev.coherent_dma_mask,
2371 };
2372
2373-extern struct dma_map_ops intel_dma_ops;
2374+extern const struct dma_map_ops intel_dma_ops;
2375
2376 static int __init pci_iommu_init(void)
2377 {
2378@@ -96,15 +96,34 @@ int iommu_dma_supported(struct device *dev, u64 mask)
2379 }
2380 EXPORT_SYMBOL(iommu_dma_supported);
2381
2382+extern void *intel_alloc_coherent(struct device *hwdev, size_t size, dma_addr_t *dma_handle, gfp_t flags);
2383+extern void intel_free_coherent(struct device *hwdev, size_t size, void *vaddr, dma_addr_t dma_handle);
2384+extern int intel_map_sg(struct device *hwdev, struct scatterlist *sglist, int nelems, enum dma_data_direction dir, struct dma_attrs *attrs);
2385+extern void intel_unmap_sg(struct device *hwdev, struct scatterlist *sglist, int nelems, enum dma_data_direction dir, struct dma_attrs *attrs);
2386+extern dma_addr_t intel_map_page(struct device *dev, struct page *page, unsigned long offset, size_t size, enum dma_data_direction dir, struct dma_attrs *attrs);
2387+extern void intel_unmap_page(struct device *dev, dma_addr_t dev_addr, size_t size, enum dma_data_direction dir, struct dma_attrs *attrs);
2388+extern int intel_mapping_error(struct device *dev, dma_addr_t dma_addr);
2389+
2390+static const struct dma_map_ops intel_iommu_dma_ops = {
2391+ /* from drivers/pci/intel-iommu.c:intel_dma_ops */
2392+ .alloc_coherent = intel_alloc_coherent,
2393+ .free_coherent = intel_free_coherent,
2394+ .map_sg = intel_map_sg,
2395+ .unmap_sg = intel_unmap_sg,
2396+ .map_page = intel_map_page,
2397+ .unmap_page = intel_unmap_page,
2398+ .mapping_error = intel_mapping_error,
2399+
2400+ .sync_single_for_cpu = machvec_dma_sync_single,
2401+ .sync_sg_for_cpu = machvec_dma_sync_sg,
2402+ .sync_single_for_device = machvec_dma_sync_single,
2403+ .sync_sg_for_device = machvec_dma_sync_sg,
2404+ .dma_supported = iommu_dma_supported,
2405+};
2406+
2407 void __init pci_iommu_alloc(void)
2408 {
2409- dma_ops = &intel_dma_ops;
2410-
2411- dma_ops->sync_single_for_cpu = machvec_dma_sync_single;
2412- dma_ops->sync_sg_for_cpu = machvec_dma_sync_sg;
2413- dma_ops->sync_single_for_device = machvec_dma_sync_single;
2414- dma_ops->sync_sg_for_device = machvec_dma_sync_sg;
2415- dma_ops->dma_supported = iommu_dma_supported;
2416+ dma_ops = &intel_iommu_dma_ops;
2417
2418 /*
2419 * The order of these functions is important for
2420diff --git a/arch/ia64/kernel/pci-swiotlb.c b/arch/ia64/kernel/pci-swiotlb.c
2421index 285aae8..61dbab6 100644
2422--- a/arch/ia64/kernel/pci-swiotlb.c
2423+++ b/arch/ia64/kernel/pci-swiotlb.c
2424@@ -21,7 +21,7 @@ static void *ia64_swiotlb_alloc_coherent(struct device *dev, size_t size,
2425 return swiotlb_alloc_coherent(dev, size, dma_handle, gfp);
2426 }
2427
2428-struct dma_map_ops swiotlb_dma_ops = {
2429+const struct dma_map_ops swiotlb_dma_ops = {
2430 .alloc_coherent = ia64_swiotlb_alloc_coherent,
2431 .free_coherent = swiotlb_free_coherent,
2432 .map_page = swiotlb_map_page,
2433diff --git a/arch/ia64/kernel/sys_ia64.c b/arch/ia64/kernel/sys_ia64.c
2434index 609d500..7dde2a8 100644
2435--- a/arch/ia64/kernel/sys_ia64.c
2436+++ b/arch/ia64/kernel/sys_ia64.c
2437@@ -43,6 +43,13 @@ arch_get_unmapped_area (struct file *filp, unsigned long addr, unsigned long len
2438 if (REGION_NUMBER(addr) == RGN_HPAGE)
2439 addr = 0;
2440 #endif
2441+
2442+#ifdef CONFIG_PAX_RANDMMAP
2443+ if (mm->pax_flags & MF_PAX_RANDMMAP)
2444+ addr = mm->free_area_cache;
2445+ else
2446+#endif
2447+
2448 if (!addr)
2449 addr = mm->free_area_cache;
2450
2451@@ -61,14 +68,14 @@ arch_get_unmapped_area (struct file *filp, unsigned long addr, unsigned long len
2452 for (vma = find_vma(mm, addr); ; vma = vma->vm_next) {
2453 /* At this point: (!vma || addr < vma->vm_end). */
2454 if (TASK_SIZE - len < addr || RGN_MAP_LIMIT - len < REGION_OFFSET(addr)) {
2455- if (start_addr != TASK_UNMAPPED_BASE) {
2456+ if (start_addr != mm->mmap_base) {
2457 /* Start a new search --- just in case we missed some holes. */
2458- addr = TASK_UNMAPPED_BASE;
2459+ addr = mm->mmap_base;
2460 goto full_search;
2461 }
2462 return -ENOMEM;
2463 }
2464- if (!vma || addr + len <= vma->vm_start) {
2465+ if (check_heap_stack_gap(vma, addr, len)) {
2466 /* Remember the address where we stopped this search: */
2467 mm->free_area_cache = addr + len;
2468 return addr;
2469diff --git a/arch/ia64/kernel/topology.c b/arch/ia64/kernel/topology.c
2470index 8f06035..b3a5818 100644
2471--- a/arch/ia64/kernel/topology.c
2472+++ b/arch/ia64/kernel/topology.c
2473@@ -282,7 +282,7 @@ static ssize_t cache_show(struct kobject * kobj, struct attribute * attr, char *
2474 return ret;
2475 }
2476
2477-static struct sysfs_ops cache_sysfs_ops = {
2478+static const struct sysfs_ops cache_sysfs_ops = {
2479 .show = cache_show
2480 };
2481
2482diff --git a/arch/ia64/kernel/vmlinux.lds.S b/arch/ia64/kernel/vmlinux.lds.S
2483index 0a0c77b..8e55a81 100644
2484--- a/arch/ia64/kernel/vmlinux.lds.S
2485+++ b/arch/ia64/kernel/vmlinux.lds.S
2486@@ -190,7 +190,7 @@ SECTIONS
2487 /* Per-cpu data: */
2488 . = ALIGN(PERCPU_PAGE_SIZE);
2489 PERCPU_VADDR(PERCPU_ADDR, :percpu)
2490- __phys_per_cpu_start = __per_cpu_load;
2491+ __phys_per_cpu_start = per_cpu_load;
2492 . = __phys_per_cpu_start + PERCPU_PAGE_SIZE; /* ensure percpu data fits
2493 * into percpu page size
2494 */
2495diff --git a/arch/ia64/mm/fault.c b/arch/ia64/mm/fault.c
2496index 19261a9..1611b7a 100644
2497--- a/arch/ia64/mm/fault.c
2498+++ b/arch/ia64/mm/fault.c
2499@@ -72,6 +72,23 @@ mapped_kernel_page_is_present (unsigned long address)
2500 return pte_present(pte);
2501 }
2502
2503+#ifdef CONFIG_PAX_PAGEEXEC
2504+void pax_report_insns(struct pt_regs *regs, void *pc, void *sp)
2505+{
2506+ unsigned long i;
2507+
2508+ printk(KERN_ERR "PAX: bytes at PC: ");
2509+ for (i = 0; i < 8; i++) {
2510+ unsigned int c;
2511+ if (get_user(c, (unsigned int *)pc+i))
2512+ printk(KERN_CONT "???????? ");
2513+ else
2514+ printk(KERN_CONT "%08x ", c);
2515+ }
2516+ printk("\n");
2517+}
2518+#endif
2519+
2520 void __kprobes
2521 ia64_do_page_fault (unsigned long address, unsigned long isr, struct pt_regs *regs)
2522 {
2523@@ -145,9 +162,23 @@ ia64_do_page_fault (unsigned long address, unsigned long isr, struct pt_regs *re
2524 mask = ( (((isr >> IA64_ISR_X_BIT) & 1UL) << VM_EXEC_BIT)
2525 | (((isr >> IA64_ISR_W_BIT) & 1UL) << VM_WRITE_BIT));
2526
2527- if ((vma->vm_flags & mask) != mask)
2528+ if ((vma->vm_flags & mask) != mask) {
2529+
2530+#ifdef CONFIG_PAX_PAGEEXEC
2531+ if (!(vma->vm_flags & VM_EXEC) && (mask & VM_EXEC)) {
2532+ if (!(mm->pax_flags & MF_PAX_PAGEEXEC) || address != regs->cr_iip)
2533+ goto bad_area;
2534+
2535+ up_read(&mm->mmap_sem);
2536+ pax_report_fault(regs, (void *)regs->cr_iip, (void *)regs->r12);
2537+ do_group_exit(SIGKILL);
2538+ }
2539+#endif
2540+
2541 goto bad_area;
2542
2543+ }
2544+
2545 survive:
2546 /*
2547 * If for any reason at all we couldn't handle the fault, make
2548diff --git a/arch/ia64/mm/hugetlbpage.c b/arch/ia64/mm/hugetlbpage.c
2549index b0f6157..a082bbc 100644
2550--- a/arch/ia64/mm/hugetlbpage.c
2551+++ b/arch/ia64/mm/hugetlbpage.c
2552@@ -172,7 +172,7 @@ unsigned long hugetlb_get_unmapped_area(struct file *file, unsigned long addr, u
2553 /* At this point: (!vmm || addr < vmm->vm_end). */
2554 if (REGION_OFFSET(addr) + len > RGN_MAP_LIMIT)
2555 return -ENOMEM;
2556- if (!vmm || (addr + len) <= vmm->vm_start)
2557+ if (check_heap_stack_gap(vmm, addr, len))
2558 return addr;
2559 addr = ALIGN(vmm->vm_end, HPAGE_SIZE);
2560 }
2561diff --git a/arch/ia64/mm/init.c b/arch/ia64/mm/init.c
2562index 1857766..05cc6a3 100644
2563--- a/arch/ia64/mm/init.c
2564+++ b/arch/ia64/mm/init.c
2565@@ -122,6 +122,19 @@ ia64_init_addr_space (void)
2566 vma->vm_start = current->thread.rbs_bot & PAGE_MASK;
2567 vma->vm_end = vma->vm_start + PAGE_SIZE;
2568 vma->vm_flags = VM_DATA_DEFAULT_FLAGS|VM_GROWSUP|VM_ACCOUNT;
2569+
2570+#ifdef CONFIG_PAX_PAGEEXEC
2571+ if (current->mm->pax_flags & MF_PAX_PAGEEXEC) {
2572+ vma->vm_flags &= ~VM_EXEC;
2573+
2574+#ifdef CONFIG_PAX_MPROTECT
2575+ if (current->mm->pax_flags & MF_PAX_MPROTECT)
2576+ vma->vm_flags &= ~VM_MAYEXEC;
2577+#endif
2578+
2579+ }
2580+#endif
2581+
2582 vma->vm_page_prot = vm_get_page_prot(vma->vm_flags);
2583 down_write(&current->mm->mmap_sem);
2584 if (insert_vm_struct(current->mm, vma)) {
2585diff --git a/arch/ia64/sn/pci/pci_dma.c b/arch/ia64/sn/pci/pci_dma.c
2586index 98b6849..8046766 100644
2587--- a/arch/ia64/sn/pci/pci_dma.c
2588+++ b/arch/ia64/sn/pci/pci_dma.c
2589@@ -464,7 +464,7 @@ int sn_pci_legacy_write(struct pci_bus *bus, u16 port, u32 val, u8 size)
2590 return ret;
2591 }
2592
2593-static struct dma_map_ops sn_dma_ops = {
2594+static const struct dma_map_ops sn_dma_ops = {
2595 .alloc_coherent = sn_dma_alloc_coherent,
2596 .free_coherent = sn_dma_free_coherent,
2597 .map_page = sn_dma_map_page,
2598diff --git a/arch/m32r/include/asm/cache.h b/arch/m32r/include/asm/cache.h
2599index 40b3ee9..8c2c112 100644
2600--- a/arch/m32r/include/asm/cache.h
2601+++ b/arch/m32r/include/asm/cache.h
2602@@ -1,8 +1,10 @@
2603 #ifndef _ASM_M32R_CACHE_H
2604 #define _ASM_M32R_CACHE_H
2605
2606+#include <linux/const.h>
2607+
2608 /* L1 cache line size */
2609 #define L1_CACHE_SHIFT 4
2610-#define L1_CACHE_BYTES (1 << L1_CACHE_SHIFT)
2611+#define L1_CACHE_BYTES (_AC(1,UL) << L1_CACHE_SHIFT)
2612
2613 #endif /* _ASM_M32R_CACHE_H */
2614diff --git a/arch/m32r/lib/usercopy.c b/arch/m32r/lib/usercopy.c
2615index 82abd15..d95ae5d 100644
2616--- a/arch/m32r/lib/usercopy.c
2617+++ b/arch/m32r/lib/usercopy.c
2618@@ -14,6 +14,9 @@
2619 unsigned long
2620 __generic_copy_to_user(void __user *to, const void *from, unsigned long n)
2621 {
2622+ if ((long)n < 0)
2623+ return n;
2624+
2625 prefetch(from);
2626 if (access_ok(VERIFY_WRITE, to, n))
2627 __copy_user(to,from,n);
2628@@ -23,6 +26,9 @@ __generic_copy_to_user(void __user *to, const void *from, unsigned long n)
2629 unsigned long
2630 __generic_copy_from_user(void *to, const void __user *from, unsigned long n)
2631 {
2632+ if ((long)n < 0)
2633+ return n;
2634+
2635 prefetchw(to);
2636 if (access_ok(VERIFY_READ, from, n))
2637 __copy_user_zeroing(to,from,n);
2638diff --git a/arch/m68k/include/asm/cache.h b/arch/m68k/include/asm/cache.h
2639index ecafbe1..432c3e4 100644
2640--- a/arch/m68k/include/asm/cache.h
2641+++ b/arch/m68k/include/asm/cache.h
2642@@ -4,9 +4,11 @@
2643 #ifndef __ARCH_M68K_CACHE_H
2644 #define __ARCH_M68K_CACHE_H
2645
2646+#include <linux/const.h>
2647+
2648 /* bytes per L1 cache line */
2649 #define L1_CACHE_SHIFT 4
2650-#define L1_CACHE_BYTES (1<< L1_CACHE_SHIFT)
2651+#define L1_CACHE_BYTES (_AC(1,UL) << L1_CACHE_SHIFT)
2652
2653 #define ARCH_KMALLOC_MINALIGN L1_CACHE_BYTES
2654
2655diff --git a/arch/microblaze/include/asm/cache.h b/arch/microblaze/include/asm/cache.h
2656index c209c47..2ba96e2 100644
2657--- a/arch/microblaze/include/asm/cache.h
2658+++ b/arch/microblaze/include/asm/cache.h
2659@@ -13,11 +13,12 @@
2660 #ifndef _ASM_MICROBLAZE_CACHE_H
2661 #define _ASM_MICROBLAZE_CACHE_H
2662
2663+#include <linux/const.h>
2664 #include <asm/registers.h>
2665
2666 #define L1_CACHE_SHIFT 2
2667 /* word-granular cache in microblaze */
2668-#define L1_CACHE_BYTES (1 << L1_CACHE_SHIFT)
2669+#define L1_CACHE_BYTES (_AC(1,UL) << L1_CACHE_SHIFT)
2670
2671 #define SMP_CACHE_BYTES L1_CACHE_BYTES
2672
2673diff --git a/arch/mips/Kconfig b/arch/mips/Kconfig
2674index fd7620f..63d73a6 100644
2675--- a/arch/mips/Kconfig
2676+++ b/arch/mips/Kconfig
2677@@ -5,6 +5,7 @@ config MIPS
2678 select HAVE_IDE
2679 select HAVE_OPROFILE
2680 select HAVE_ARCH_KGDB
2681+ select GENERIC_ATOMIC64 if !64BIT
2682 # Horrible source of confusion. Die, die, die ...
2683 select EMBEDDED
2684 select RTC_LIB if !LEMOTE_FULOONG2E
2685diff --git a/arch/mips/Makefile b/arch/mips/Makefile
2686index 77f5021..2b1db8a 100644
2687--- a/arch/mips/Makefile
2688+++ b/arch/mips/Makefile
2689@@ -51,6 +51,8 @@ endif
2690 cflags-y := -ffunction-sections
2691 cflags-y += $(call cc-option, -mno-check-zero-division)
2692
2693+cflags-y += -Wno-sign-compare -Wno-extra
2694+
2695 ifdef CONFIG_32BIT
2696 ld-emul = $(32bit-emul)
2697 vmlinux-32 = vmlinux
2698diff --git a/arch/mips/alchemy/devboards/pm.c b/arch/mips/alchemy/devboards/pm.c
2699index 632f986..fd0378d 100644
2700--- a/arch/mips/alchemy/devboards/pm.c
2701+++ b/arch/mips/alchemy/devboards/pm.c
2702@@ -78,7 +78,7 @@ static void db1x_pm_end(void)
2703
2704 }
2705
2706-static struct platform_suspend_ops db1x_pm_ops = {
2707+static const struct platform_suspend_ops db1x_pm_ops = {
2708 .valid = suspend_valid_only_mem,
2709 .begin = db1x_pm_begin,
2710 .enter = db1x_pm_enter,
2711diff --git a/arch/mips/include/asm/atomic.h b/arch/mips/include/asm/atomic.h
2712index 09e7128..111035b 100644
2713--- a/arch/mips/include/asm/atomic.h
2714+++ b/arch/mips/include/asm/atomic.h
2715@@ -21,6 +21,10 @@
2716 #include <asm/war.h>
2717 #include <asm/system.h>
2718
2719+#ifdef CONFIG_GENERIC_ATOMIC64
2720+#include <asm-generic/atomic64.h>
2721+#endif
2722+
2723 #define ATOMIC_INIT(i) { (i) }
2724
2725 /*
2726@@ -782,6 +786,16 @@ static __inline__ int atomic64_add_unless(atomic64_t *v, long a, long u)
2727 */
2728 #define atomic64_add_negative(i, v) (atomic64_add_return(i, (v)) < 0)
2729
2730+#define atomic64_read_unchecked(v) atomic64_read(v)
2731+#define atomic64_set_unchecked(v, i) atomic64_set((v), (i))
2732+#define atomic64_add_unchecked(a, v) atomic64_add((a), (v))
2733+#define atomic64_add_return_unchecked(a, v) atomic64_add_return((a), (v))
2734+#define atomic64_sub_unchecked(a, v) atomic64_sub((a), (v))
2735+#define atomic64_inc_unchecked(v) atomic64_inc(v)
2736+#define atomic64_inc_return_unchecked(v) atomic64_inc_return(v)
2737+#define atomic64_dec_unchecked(v) atomic64_dec(v)
2738+#define atomic64_cmpxchg_unchecked(v, o, n) atomic64_cmpxchg((v), (o), (n))
2739+
2740 #endif /* CONFIG_64BIT */
2741
2742 /*
2743diff --git a/arch/mips/include/asm/cache.h b/arch/mips/include/asm/cache.h
2744index 37f175c..c7a3065 100644
2745--- a/arch/mips/include/asm/cache.h
2746+++ b/arch/mips/include/asm/cache.h
2747@@ -9,10 +9,11 @@
2748 #ifndef _ASM_CACHE_H
2749 #define _ASM_CACHE_H
2750
2751+#include <linux/const.h>
2752 #include <kmalloc.h>
2753
2754 #define L1_CACHE_SHIFT CONFIG_MIPS_L1_CACHE_SHIFT
2755-#define L1_CACHE_BYTES (1 << L1_CACHE_SHIFT)
2756+#define L1_CACHE_BYTES (_AC(1,UL) << L1_CACHE_SHIFT)
2757
2758 #define SMP_CACHE_SHIFT L1_CACHE_SHIFT
2759 #define SMP_CACHE_BYTES L1_CACHE_BYTES
2760diff --git a/arch/mips/include/asm/elf.h b/arch/mips/include/asm/elf.h
2761index 7990694..4e93acf 100644
2762--- a/arch/mips/include/asm/elf.h
2763+++ b/arch/mips/include/asm/elf.h
2764@@ -368,4 +368,11 @@ extern int dump_task_fpu(struct task_struct *, elf_fpregset_t *);
2765 #define ELF_ET_DYN_BASE (TASK_SIZE / 3 * 2)
2766 #endif
2767
2768+#ifdef CONFIG_PAX_ASLR
2769+#define PAX_ELF_ET_DYN_BASE (test_thread_flag(TIF_32BIT_ADDR) ? 0x00400000UL : 0x00400000UL)
2770+
2771+#define PAX_DELTA_MMAP_LEN (test_thread_flag(TIF_32BIT_ADDR) ? 27-PAGE_SHIFT : 36-PAGE_SHIFT)
2772+#define PAX_DELTA_STACK_LEN (test_thread_flag(TIF_32BIT_ADDR) ? 27-PAGE_SHIFT : 36-PAGE_SHIFT)
2773+#endif
2774+
2775 #endif /* _ASM_ELF_H */
2776diff --git a/arch/mips/include/asm/page.h b/arch/mips/include/asm/page.h
2777index f266295..627cfff 100644
2778--- a/arch/mips/include/asm/page.h
2779+++ b/arch/mips/include/asm/page.h
2780@@ -93,7 +93,7 @@ extern void copy_user_highpage(struct page *to, struct page *from,
2781 #ifdef CONFIG_CPU_MIPS32
2782 typedef struct { unsigned long pte_low, pte_high; } pte_t;
2783 #define pte_val(x) ((x).pte_low | ((unsigned long long)(x).pte_high << 32))
2784- #define __pte(x) ({ pte_t __pte = {(x), ((unsigned long long)(x)) >> 32}; __pte; })
2785+ #define __pte(x) ({ pte_t __pte = {(x), (x) >> 32}; __pte; })
2786 #else
2787 typedef struct { unsigned long long pte; } pte_t;
2788 #define pte_val(x) ((x).pte)
2789diff --git a/arch/mips/include/asm/reboot.h b/arch/mips/include/asm/reboot.h
2790index e48c0bf..f3acf65 100644
2791--- a/arch/mips/include/asm/reboot.h
2792+++ b/arch/mips/include/asm/reboot.h
2793@@ -9,7 +9,7 @@
2794 #ifndef _ASM_REBOOT_H
2795 #define _ASM_REBOOT_H
2796
2797-extern void (*_machine_restart)(char *command);
2798-extern void (*_machine_halt)(void);
2799+extern void (*__noreturn _machine_restart)(char *command);
2800+extern void (*__noreturn _machine_halt)(void);
2801
2802 #endif /* _ASM_REBOOT_H */
2803diff --git a/arch/mips/include/asm/system.h b/arch/mips/include/asm/system.h
2804index 83b5509..9fa24a23 100644
2805--- a/arch/mips/include/asm/system.h
2806+++ b/arch/mips/include/asm/system.h
2807@@ -230,6 +230,6 @@ extern void per_cpu_trap_init(void);
2808 */
2809 #define __ARCH_WANT_UNLOCKED_CTXSW
2810
2811-extern unsigned long arch_align_stack(unsigned long sp);
2812+#define arch_align_stack(x) ((x) & ~0xfUL)
2813
2814 #endif /* _ASM_SYSTEM_H */
2815diff --git a/arch/mips/kernel/binfmt_elfn32.c b/arch/mips/kernel/binfmt_elfn32.c
2816index 9fdd8bc..fcf9d68 100644
2817--- a/arch/mips/kernel/binfmt_elfn32.c
2818+++ b/arch/mips/kernel/binfmt_elfn32.c
2819@@ -50,6 +50,13 @@ typedef elf_fpreg_t elf_fpregset_t[ELF_NFPREG];
2820 #undef ELF_ET_DYN_BASE
2821 #define ELF_ET_DYN_BASE (TASK32_SIZE / 3 * 2)
2822
2823+#ifdef CONFIG_PAX_ASLR
2824+#define PAX_ELF_ET_DYN_BASE (test_thread_flag(TIF_32BIT_ADDR) ? 0x00400000UL : 0x00400000UL)
2825+
2826+#define PAX_DELTA_MMAP_LEN (test_thread_flag(TIF_32BIT_ADDR) ? 27-PAGE_SHIFT : 36-PAGE_SHIFT)
2827+#define PAX_DELTA_STACK_LEN (test_thread_flag(TIF_32BIT_ADDR) ? 27-PAGE_SHIFT : 36-PAGE_SHIFT)
2828+#endif
2829+
2830 #include <asm/processor.h>
2831 #include <linux/module.h>
2832 #include <linux/elfcore.h>
2833diff --git a/arch/mips/kernel/binfmt_elfo32.c b/arch/mips/kernel/binfmt_elfo32.c
2834index ff44823..cf0b48a 100644
2835--- a/arch/mips/kernel/binfmt_elfo32.c
2836+++ b/arch/mips/kernel/binfmt_elfo32.c
2837@@ -52,6 +52,13 @@ typedef elf_fpreg_t elf_fpregset_t[ELF_NFPREG];
2838 #undef ELF_ET_DYN_BASE
2839 #define ELF_ET_DYN_BASE (TASK32_SIZE / 3 * 2)
2840
2841+#ifdef CONFIG_PAX_ASLR
2842+#define PAX_ELF_ET_DYN_BASE (test_thread_flag(TIF_32BIT_ADDR) ? 0x00400000UL : 0x00400000UL)
2843+
2844+#define PAX_DELTA_MMAP_LEN (test_thread_flag(TIF_32BIT_ADDR) ? 27-PAGE_SHIFT : 36-PAGE_SHIFT)
2845+#define PAX_DELTA_STACK_LEN (test_thread_flag(TIF_32BIT_ADDR) ? 27-PAGE_SHIFT : 36-PAGE_SHIFT)
2846+#endif
2847+
2848 #include <asm/processor.h>
2849
2850 /*
2851diff --git a/arch/mips/kernel/kgdb.c b/arch/mips/kernel/kgdb.c
2852index 50c9bb8..efdd5f8 100644
2853--- a/arch/mips/kernel/kgdb.c
2854+++ b/arch/mips/kernel/kgdb.c
2855@@ -245,6 +245,7 @@ int kgdb_arch_handle_exception(int vector, int signo, int err_code,
2856 return -1;
2857 }
2858
2859+/* cannot be const */
2860 struct kgdb_arch arch_kgdb_ops;
2861
2862 /*
2863diff --git a/arch/mips/kernel/process.c b/arch/mips/kernel/process.c
2864index f3d73e1..bb3f57a 100644
2865--- a/arch/mips/kernel/process.c
2866+++ b/arch/mips/kernel/process.c
2867@@ -470,15 +470,3 @@ unsigned long get_wchan(struct task_struct *task)
2868 out:
2869 return pc;
2870 }
2871-
2872-/*
2873- * Don't forget that the stack pointer must be aligned on a 8 bytes
2874- * boundary for 32-bits ABI and 16 bytes for 64-bits ABI.
2875- */
2876-unsigned long arch_align_stack(unsigned long sp)
2877-{
2878- if (!(current->personality & ADDR_NO_RANDOMIZE) && randomize_va_space)
2879- sp -= get_random_int() & ~PAGE_MASK;
2880-
2881- return sp & ALMASK;
2882-}
2883diff --git a/arch/mips/kernel/reset.c b/arch/mips/kernel/reset.c
2884index 060563a..7fbf310 100644
2885--- a/arch/mips/kernel/reset.c
2886+++ b/arch/mips/kernel/reset.c
2887@@ -19,8 +19,8 @@
2888 * So handle all using function pointers to machine specific
2889 * functions.
2890 */
2891-void (*_machine_restart)(char *command);
2892-void (*_machine_halt)(void);
2893+void (*__noreturn _machine_restart)(char *command);
2894+void (*__noreturn _machine_halt)(void);
2895 void (*pm_power_off)(void);
2896
2897 EXPORT_SYMBOL(pm_power_off);
2898@@ -29,16 +29,19 @@ void machine_restart(char *command)
2899 {
2900 if (_machine_restart)
2901 _machine_restart(command);
2902+ BUG();
2903 }
2904
2905 void machine_halt(void)
2906 {
2907 if (_machine_halt)
2908 _machine_halt();
2909+ BUG();
2910 }
2911
2912 void machine_power_off(void)
2913 {
2914 if (pm_power_off)
2915 pm_power_off();
2916+ BUG();
2917 }
2918diff --git a/arch/mips/kernel/syscall.c b/arch/mips/kernel/syscall.c
2919index 3f7f466..3abe0b5 100644
2920--- a/arch/mips/kernel/syscall.c
2921+++ b/arch/mips/kernel/syscall.c
2922@@ -102,17 +102,21 @@ unsigned long arch_get_unmapped_area(struct file *filp, unsigned long addr,
2923 do_color_align = 0;
2924 if (filp || (flags & MAP_SHARED))
2925 do_color_align = 1;
2926+
2927+#ifdef CONFIG_PAX_RANDMMAP
2928+ if (!(current->mm->pax_flags & MF_PAX_RANDMMAP))
2929+#endif
2930+
2931 if (addr) {
2932 if (do_color_align)
2933 addr = COLOUR_ALIGN(addr, pgoff);
2934 else
2935 addr = PAGE_ALIGN(addr);
2936 vmm = find_vma(current->mm, addr);
2937- if (task_size - len >= addr &&
2938- (!vmm || addr + len <= vmm->vm_start))
2939+ if (task_size - len >= addr && check_heap_stack_gap(vmm, addr, len))
2940 return addr;
2941 }
2942- addr = TASK_UNMAPPED_BASE;
2943+ addr = current->mm->mmap_base;
2944 if (do_color_align)
2945 addr = COLOUR_ALIGN(addr, pgoff);
2946 else
2947@@ -122,7 +126,7 @@ unsigned long arch_get_unmapped_area(struct file *filp, unsigned long addr,
2948 /* At this point: (!vmm || addr < vmm->vm_end). */
2949 if (task_size - len < addr)
2950 return -ENOMEM;
2951- if (!vmm || addr + len <= vmm->vm_start)
2952+ if (check_heap_stack_gap(vmm, addr, len))
2953 return addr;
2954 addr = vmm->vm_end;
2955 if (do_color_align)
2956diff --git a/arch/mips/mm/fault.c b/arch/mips/mm/fault.c
2957index e97a7a2..f18f5b0 100644
2958--- a/arch/mips/mm/fault.c
2959+++ b/arch/mips/mm/fault.c
2960@@ -26,6 +26,23 @@
2961 #include <asm/ptrace.h>
2962 #include <asm/highmem.h> /* For VMALLOC_END */
2963
2964+#ifdef CONFIG_PAX_PAGEEXEC
2965+void pax_report_insns(struct pt_regs *regs, void *pc, void *sp)
2966+{
2967+ unsigned long i;
2968+
2969+ printk(KERN_ERR "PAX: bytes at PC: ");
2970+ for (i = 0; i < 5; i++) {
2971+ unsigned int c;
2972+ if (get_user(c, (unsigned int *)pc+i))
2973+ printk(KERN_CONT "???????? ");
2974+ else
2975+ printk(KERN_CONT "%08x ", c);
2976+ }
2977+ printk("\n");
2978+}
2979+#endif
2980+
2981 /*
2982 * This routine handles page faults. It determines the address,
2983 * and the problem, and then passes it off to one of the appropriate
2984diff --git a/arch/mn10300/proc-mn103e010/include/proc/cache.h b/arch/mn10300/proc-mn103e010/include/proc/cache.h
2985index bdc1f9a..e8de5c5 100644
2986--- a/arch/mn10300/proc-mn103e010/include/proc/cache.h
2987+++ b/arch/mn10300/proc-mn103e010/include/proc/cache.h
2988@@ -11,12 +11,14 @@
2989 #ifndef _ASM_PROC_CACHE_H
2990 #define _ASM_PROC_CACHE_H
2991
2992+#include <linux/const.h>
2993+
2994 /* L1 cache */
2995
2996 #define L1_CACHE_NWAYS 4 /* number of ways in caches */
2997 #define L1_CACHE_NENTRIES 256 /* number of entries in each way */
2998-#define L1_CACHE_BYTES 16 /* bytes per entry */
2999 #define L1_CACHE_SHIFT 4 /* shift for bytes per entry */
3000+#define L1_CACHE_BYTES (_AC(1,UL) << L1_CACHE_SHIFT) /* bytes per entry */
3001 #define L1_CACHE_WAYDISP 0x1000 /* displacement of one way from the next */
3002
3003 #define L1_CACHE_TAG_VALID 0x00000001 /* cache tag valid bit */
3004diff --git a/arch/parisc/include/asm/atomic.h b/arch/parisc/include/asm/atomic.h
3005index 8bc9e96..26554f8 100644
3006--- a/arch/parisc/include/asm/atomic.h
3007+++ b/arch/parisc/include/asm/atomic.h
3008@@ -336,6 +336,16 @@ static __inline__ int atomic64_add_unless(atomic64_t *v, long a, long u)
3009
3010 #define atomic64_inc_not_zero(v) atomic64_add_unless((v), 1, 0)
3011
3012+#define atomic64_read_unchecked(v) atomic64_read(v)
3013+#define atomic64_set_unchecked(v, i) atomic64_set((v), (i))
3014+#define atomic64_add_unchecked(a, v) atomic64_add((a), (v))
3015+#define atomic64_add_return_unchecked(a, v) atomic64_add_return((a), (v))
3016+#define atomic64_sub_unchecked(a, v) atomic64_sub((a), (v))
3017+#define atomic64_inc_unchecked(v) atomic64_inc(v)
3018+#define atomic64_inc_return_unchecked(v) atomic64_inc_return(v)
3019+#define atomic64_dec_unchecked(v) atomic64_dec(v)
3020+#define atomic64_cmpxchg_unchecked(v, o, n) atomic64_cmpxchg((v), (o), (n))
3021+
3022 #else /* CONFIG_64BIT */
3023
3024 #include <asm-generic/atomic64.h>
3025diff --git a/arch/parisc/include/asm/cache.h b/arch/parisc/include/asm/cache.h
3026index 32c2cca..a7b3a64 100644
3027--- a/arch/parisc/include/asm/cache.h
3028+++ b/arch/parisc/include/asm/cache.h
3029@@ -5,6 +5,7 @@
3030 #ifndef __ARCH_PARISC_CACHE_H
3031 #define __ARCH_PARISC_CACHE_H
3032
3033+#include <linux/const.h>
3034
3035 /*
3036 * PA 2.0 processors have 64-byte cachelines; PA 1.1 processors have
3037@@ -15,13 +16,13 @@
3038 * just ruin performance.
3039 */
3040 #ifdef CONFIG_PA20
3041-#define L1_CACHE_BYTES 64
3042 #define L1_CACHE_SHIFT 6
3043 #else
3044-#define L1_CACHE_BYTES 32
3045 #define L1_CACHE_SHIFT 5
3046 #endif
3047
3048+#define L1_CACHE_BYTES (_AC(1,UL) << L1_CACHE_SHIFT)
3049+
3050 #ifndef __ASSEMBLY__
3051
3052 #define L1_CACHE_ALIGN(x) (((x)+(L1_CACHE_BYTES-1))&~(L1_CACHE_BYTES-1))
3053diff --git a/arch/parisc/include/asm/elf.h b/arch/parisc/include/asm/elf.h
3054index 9c802eb..0592e41 100644
3055--- a/arch/parisc/include/asm/elf.h
3056+++ b/arch/parisc/include/asm/elf.h
3057@@ -343,6 +343,13 @@ struct pt_regs; /* forward declaration... */
3058
3059 #define ELF_ET_DYN_BASE (TASK_UNMAPPED_BASE + 0x01000000)
3060
3061+#ifdef CONFIG_PAX_ASLR
3062+#define PAX_ELF_ET_DYN_BASE 0x10000UL
3063+
3064+#define PAX_DELTA_MMAP_LEN 16
3065+#define PAX_DELTA_STACK_LEN 16
3066+#endif
3067+
3068 /* This yields a mask that user programs can use to figure out what
3069 instruction set this CPU supports. This could be done in user space,
3070 but it's not easy, and we've already done it here. */
3071diff --git a/arch/parisc/include/asm/pgtable.h b/arch/parisc/include/asm/pgtable.h
3072index a27d2e2..18fd845 100644
3073--- a/arch/parisc/include/asm/pgtable.h
3074+++ b/arch/parisc/include/asm/pgtable.h
3075@@ -207,6 +207,17 @@
3076 #define PAGE_EXECREAD __pgprot(_PAGE_PRESENT | _PAGE_USER | _PAGE_READ | _PAGE_EXEC |_PAGE_ACCESSED)
3077 #define PAGE_COPY PAGE_EXECREAD
3078 #define PAGE_RWX __pgprot(_PAGE_PRESENT | _PAGE_USER | _PAGE_READ | _PAGE_WRITE | _PAGE_EXEC |_PAGE_ACCESSED)
3079+
3080+#ifdef CONFIG_PAX_PAGEEXEC
3081+# define PAGE_SHARED_NOEXEC __pgprot(_PAGE_PRESENT | _PAGE_USER | _PAGE_READ | _PAGE_WRITE | _PAGE_ACCESSED)
3082+# define PAGE_COPY_NOEXEC __pgprot(_PAGE_PRESENT | _PAGE_USER | _PAGE_READ | _PAGE_ACCESSED)
3083+# define PAGE_READONLY_NOEXEC __pgprot(_PAGE_PRESENT | _PAGE_USER | _PAGE_READ | _PAGE_ACCESSED)
3084+#else
3085+# define PAGE_SHARED_NOEXEC PAGE_SHARED
3086+# define PAGE_COPY_NOEXEC PAGE_COPY
3087+# define PAGE_READONLY_NOEXEC PAGE_READONLY
3088+#endif
3089+
3090 #define PAGE_KERNEL __pgprot(_PAGE_KERNEL)
3091 #define PAGE_KERNEL_RO __pgprot(_PAGE_KERNEL & ~_PAGE_WRITE)
3092 #define PAGE_KERNEL_UNC __pgprot(_PAGE_KERNEL | _PAGE_NO_CACHE)
3093diff --git a/arch/parisc/kernel/module.c b/arch/parisc/kernel/module.c
3094index 2120746..8d70a5e 100644
3095--- a/arch/parisc/kernel/module.c
3096+++ b/arch/parisc/kernel/module.c
3097@@ -95,16 +95,38 @@
3098
3099 /* three functions to determine where in the module core
3100 * or init pieces the location is */
3101+static inline int in_init_rx(struct module *me, void *loc)
3102+{
3103+ return (loc >= me->module_init_rx &&
3104+ loc < (me->module_init_rx + me->init_size_rx));
3105+}
3106+
3107+static inline int in_init_rw(struct module *me, void *loc)
3108+{
3109+ return (loc >= me->module_init_rw &&
3110+ loc < (me->module_init_rw + me->init_size_rw));
3111+}
3112+
3113 static inline int in_init(struct module *me, void *loc)
3114 {
3115- return (loc >= me->module_init &&
3116- loc <= (me->module_init + me->init_size));
3117+ return in_init_rx(me, loc) || in_init_rw(me, loc);
3118+}
3119+
3120+static inline int in_core_rx(struct module *me, void *loc)
3121+{
3122+ return (loc >= me->module_core_rx &&
3123+ loc < (me->module_core_rx + me->core_size_rx));
3124+}
3125+
3126+static inline int in_core_rw(struct module *me, void *loc)
3127+{
3128+ return (loc >= me->module_core_rw &&
3129+ loc < (me->module_core_rw + me->core_size_rw));
3130 }
3131
3132 static inline int in_core(struct module *me, void *loc)
3133 {
3134- return (loc >= me->module_core &&
3135- loc <= (me->module_core + me->core_size));
3136+ return in_core_rx(me, loc) || in_core_rw(me, loc);
3137 }
3138
3139 static inline int in_local(struct module *me, void *loc)
3140@@ -364,13 +386,13 @@ int module_frob_arch_sections(CONST Elf_Ehdr *hdr,
3141 }
3142
3143 /* align things a bit */
3144- me->core_size = ALIGN(me->core_size, 16);
3145- me->arch.got_offset = me->core_size;
3146- me->core_size += gots * sizeof(struct got_entry);
3147+ me->core_size_rw = ALIGN(me->core_size_rw, 16);
3148+ me->arch.got_offset = me->core_size_rw;
3149+ me->core_size_rw += gots * sizeof(struct got_entry);
3150
3151- me->core_size = ALIGN(me->core_size, 16);
3152- me->arch.fdesc_offset = me->core_size;
3153- me->core_size += fdescs * sizeof(Elf_Fdesc);
3154+ me->core_size_rw = ALIGN(me->core_size_rw, 16);
3155+ me->arch.fdesc_offset = me->core_size_rw;
3156+ me->core_size_rw += fdescs * sizeof(Elf_Fdesc);
3157
3158 me->arch.got_max = gots;
3159 me->arch.fdesc_max = fdescs;
3160@@ -388,7 +410,7 @@ static Elf64_Word get_got(struct module *me, unsigned long value, long addend)
3161
3162 BUG_ON(value == 0);
3163
3164- got = me->module_core + me->arch.got_offset;
3165+ got = me->module_core_rw + me->arch.got_offset;
3166 for (i = 0; got[i].addr; i++)
3167 if (got[i].addr == value)
3168 goto out;
3169@@ -406,7 +428,7 @@ static Elf64_Word get_got(struct module *me, unsigned long value, long addend)
3170 #ifdef CONFIG_64BIT
3171 static Elf_Addr get_fdesc(struct module *me, unsigned long value)
3172 {
3173- Elf_Fdesc *fdesc = me->module_core + me->arch.fdesc_offset;
3174+ Elf_Fdesc *fdesc = me->module_core_rw + me->arch.fdesc_offset;
3175
3176 if (!value) {
3177 printk(KERN_ERR "%s: zero OPD requested!\n", me->name);
3178@@ -424,7 +446,7 @@ static Elf_Addr get_fdesc(struct module *me, unsigned long value)
3179
3180 /* Create new one */
3181 fdesc->addr = value;
3182- fdesc->gp = (Elf_Addr)me->module_core + me->arch.got_offset;
3183+ fdesc->gp = (Elf_Addr)me->module_core_rw + me->arch.got_offset;
3184 return (Elf_Addr)fdesc;
3185 }
3186 #endif /* CONFIG_64BIT */
3187@@ -848,7 +870,7 @@ register_unwind_table(struct module *me,
3188
3189 table = (unsigned char *)sechdrs[me->arch.unwind_section].sh_addr;
3190 end = table + sechdrs[me->arch.unwind_section].sh_size;
3191- gp = (Elf_Addr)me->module_core + me->arch.got_offset;
3192+ gp = (Elf_Addr)me->module_core_rw + me->arch.got_offset;
3193
3194 DEBUGP("register_unwind_table(), sect = %d at 0x%p - 0x%p (gp=0x%lx)\n",
3195 me->arch.unwind_section, table, end, gp);
3196diff --git a/arch/parisc/kernel/sys_parisc.c b/arch/parisc/kernel/sys_parisc.c
3197index 9147391..f3d949a 100644
3198--- a/arch/parisc/kernel/sys_parisc.c
3199+++ b/arch/parisc/kernel/sys_parisc.c
3200@@ -43,7 +43,7 @@ static unsigned long get_unshared_area(unsigned long addr, unsigned long len)
3201 /* At this point: (!vma || addr < vma->vm_end). */
3202 if (TASK_SIZE - len < addr)
3203 return -ENOMEM;
3204- if (!vma || addr + len <= vma->vm_start)
3205+ if (check_heap_stack_gap(vma, addr, len))
3206 return addr;
3207 addr = vma->vm_end;
3208 }
3209@@ -79,7 +79,7 @@ static unsigned long get_shared_area(struct address_space *mapping,
3210 /* At this point: (!vma || addr < vma->vm_end). */
3211 if (TASK_SIZE - len < addr)
3212 return -ENOMEM;
3213- if (!vma || addr + len <= vma->vm_start)
3214+ if (check_heap_stack_gap(vma, addr, len))
3215 return addr;
3216 addr = DCACHE_ALIGN(vma->vm_end - offset) + offset;
3217 if (addr < vma->vm_end) /* handle wraparound */
3218@@ -98,7 +98,7 @@ unsigned long arch_get_unmapped_area(struct file *filp, unsigned long addr,
3219 if (flags & MAP_FIXED)
3220 return addr;
3221 if (!addr)
3222- addr = TASK_UNMAPPED_BASE;
3223+ addr = current->mm->mmap_base;
3224
3225 if (filp) {
3226 addr = get_shared_area(filp->f_mapping, addr, len, pgoff);
3227diff --git a/arch/parisc/kernel/traps.c b/arch/parisc/kernel/traps.c
3228index 8b58bf0..7afff03 100644
3229--- a/arch/parisc/kernel/traps.c
3230+++ b/arch/parisc/kernel/traps.c
3231@@ -733,9 +733,7 @@ void notrace handle_interruption(int code, struct pt_regs *regs)
3232
3233 down_read(&current->mm->mmap_sem);
3234 vma = find_vma(current->mm,regs->iaoq[0]);
3235- if (vma && (regs->iaoq[0] >= vma->vm_start)
3236- && (vma->vm_flags & VM_EXEC)) {
3237-
3238+ if (vma && (regs->iaoq[0] >= vma->vm_start)) {
3239 fault_address = regs->iaoq[0];
3240 fault_space = regs->iasq[0];
3241
3242diff --git a/arch/parisc/mm/fault.c b/arch/parisc/mm/fault.c
3243index c6afbfc..c5839f6 100644
3244--- a/arch/parisc/mm/fault.c
3245+++ b/arch/parisc/mm/fault.c
3246@@ -15,6 +15,7 @@
3247 #include <linux/sched.h>
3248 #include <linux/interrupt.h>
3249 #include <linux/module.h>
3250+#include <linux/unistd.h>
3251
3252 #include <asm/uaccess.h>
3253 #include <asm/traps.h>
3254@@ -52,7 +53,7 @@ DEFINE_PER_CPU(struct exception_data, exception_data);
3255 static unsigned long
3256 parisc_acctyp(unsigned long code, unsigned int inst)
3257 {
3258- if (code == 6 || code == 16)
3259+ if (code == 6 || code == 7 || code == 16)
3260 return VM_EXEC;
3261
3262 switch (inst & 0xf0000000) {
3263@@ -138,6 +139,116 @@ parisc_acctyp(unsigned long code, unsigned int inst)
3264 }
3265 #endif
3266
3267+#ifdef CONFIG_PAX_PAGEEXEC
3268+/*
3269+ * PaX: decide what to do with offenders (instruction_pointer(regs) = fault address)
3270+ *
3271+ * returns 1 when task should be killed
3272+ * 2 when rt_sigreturn trampoline was detected
3273+ * 3 when unpatched PLT trampoline was detected
3274+ */
3275+static int pax_handle_fetch_fault(struct pt_regs *regs)
3276+{
3277+
3278+#ifdef CONFIG_PAX_EMUPLT
3279+ int err;
3280+
3281+ do { /* PaX: unpatched PLT emulation */
3282+ unsigned int bl, depwi;
3283+
3284+ err = get_user(bl, (unsigned int *)instruction_pointer(regs));
3285+ err |= get_user(depwi, (unsigned int *)(instruction_pointer(regs)+4));
3286+
3287+ if (err)
3288+ break;
3289+
3290+ if (bl == 0xEA9F1FDDU && depwi == 0xD6801C1EU) {
3291+ unsigned int ldw, bv, ldw2, addr = instruction_pointer(regs)-12;
3292+
3293+ err = get_user(ldw, (unsigned int *)addr);
3294+ err |= get_user(bv, (unsigned int *)(addr+4));
3295+ err |= get_user(ldw2, (unsigned int *)(addr+8));
3296+
3297+ if (err)
3298+ break;
3299+
3300+ if (ldw == 0x0E801096U &&
3301+ bv == 0xEAC0C000U &&
3302+ ldw2 == 0x0E881095U)
3303+ {
3304+ unsigned int resolver, map;
3305+
3306+ err = get_user(resolver, (unsigned int *)(instruction_pointer(regs)+8));
3307+ err |= get_user(map, (unsigned int *)(instruction_pointer(regs)+12));
3308+ if (err)
3309+ break;
3310+
3311+ regs->gr[20] = instruction_pointer(regs)+8;
3312+ regs->gr[21] = map;
3313+ regs->gr[22] = resolver;
3314+ regs->iaoq[0] = resolver | 3UL;
3315+ regs->iaoq[1] = regs->iaoq[0] + 4;
3316+ return 3;
3317+ }
3318+ }
3319+ } while (0);
3320+#endif
3321+
3322+#ifdef CONFIG_PAX_EMUTRAMP
3323+
3324+#ifndef CONFIG_PAX_EMUSIGRT
3325+ if (!(current->mm->pax_flags & MF_PAX_EMUTRAMP))
3326+ return 1;
3327+#endif
3328+
3329+ do { /* PaX: rt_sigreturn emulation */
3330+ unsigned int ldi1, ldi2, bel, nop;
3331+
3332+ err = get_user(ldi1, (unsigned int *)instruction_pointer(regs));
3333+ err |= get_user(ldi2, (unsigned int *)(instruction_pointer(regs)+4));
3334+ err |= get_user(bel, (unsigned int *)(instruction_pointer(regs)+8));
3335+ err |= get_user(nop, (unsigned int *)(instruction_pointer(regs)+12));
3336+
3337+ if (err)
3338+ break;
3339+
3340+ if ((ldi1 == 0x34190000U || ldi1 == 0x34190002U) &&
3341+ ldi2 == 0x3414015AU &&
3342+ bel == 0xE4008200U &&
3343+ nop == 0x08000240U)
3344+ {
3345+ regs->gr[25] = (ldi1 & 2) >> 1;
3346+ regs->gr[20] = __NR_rt_sigreturn;
3347+ regs->gr[31] = regs->iaoq[1] + 16;
3348+ regs->sr[0] = regs->iasq[1];
3349+ regs->iaoq[0] = 0x100UL;
3350+ regs->iaoq[1] = regs->iaoq[0] + 4;
3351+ regs->iasq[0] = regs->sr[2];
3352+ regs->iasq[1] = regs->sr[2];
3353+ return 2;
3354+ }
3355+ } while (0);
3356+#endif
3357+
3358+ return 1;
3359+}
3360+
3361+void pax_report_insns(struct pt_regs *regs, void *pc, void *sp)
3362+{
3363+ unsigned long i;
3364+
3365+ printk(KERN_ERR "PAX: bytes at PC: ");
3366+ for (i = 0; i < 5; i++) {
3367+ unsigned int c;
3368+ if (get_user(c, (unsigned int *)pc+i))
3369+ printk(KERN_CONT "???????? ");
3370+ else
3371+ printk(KERN_CONT "%08x ", c);
3372+ }
3373+ printk("\n");
3374+}
3375+#endif
3376+
3377 int fixup_exception(struct pt_regs *regs)
3378 {
3379 const struct exception_table_entry *fix;
3380@@ -192,8 +303,33 @@ good_area:
3381
3382 acc_type = parisc_acctyp(code,regs->iir);
3383
3384- if ((vma->vm_flags & acc_type) != acc_type)
3385+ if ((vma->vm_flags & acc_type) != acc_type) {
3386+
3387+#ifdef CONFIG_PAX_PAGEEXEC
3388+ if ((mm->pax_flags & MF_PAX_PAGEEXEC) && (acc_type & VM_EXEC) &&
3389+ (address & ~3UL) == instruction_pointer(regs))
3390+ {
3391+ up_read(&mm->mmap_sem);
3392+ switch (pax_handle_fetch_fault(regs)) {
3393+
3394+#ifdef CONFIG_PAX_EMUPLT
3395+ case 3:
3396+ return;
3397+#endif
3398+
3399+#ifdef CONFIG_PAX_EMUTRAMP
3400+ case 2:
3401+ return;
3402+#endif
3403+
3404+ }
3405+ pax_report_fault(regs, (void *)instruction_pointer(regs), (void *)regs->gr[30]);
3406+ do_group_exit(SIGKILL);
3407+ }
3408+#endif
3409+
3410 goto bad_area;
3411+ }
3412
3413 /*
3414 * If for any reason at all we couldn't handle the fault, make
3415diff --git a/arch/powerpc/Makefile b/arch/powerpc/Makefile
3416index c107b74..409dc0f 100644
3417--- a/arch/powerpc/Makefile
3418+++ b/arch/powerpc/Makefile
3419@@ -74,6 +74,8 @@ KBUILD_AFLAGS += -Iarch/$(ARCH)
3420 KBUILD_CFLAGS += -msoft-float -pipe -Iarch/$(ARCH) $(CFLAGS-y)
3421 CPP = $(CC) -E $(KBUILD_CFLAGS)
3422
3423+cflags-y += -Wno-sign-compare -Wno-extra
3424+
3425 CHECKFLAGS += -m$(CONFIG_WORD_SIZE) -D__powerpc__ -D__powerpc$(CONFIG_WORD_SIZE)__
3426
3427 ifeq ($(CONFIG_PPC64),y)
3428diff --git a/arch/powerpc/include/asm/cache.h b/arch/powerpc/include/asm/cache.h
3429index 81de6eb..d5d0e24 100644
3430--- a/arch/powerpc/include/asm/cache.h
3431+++ b/arch/powerpc/include/asm/cache.h
3432@@ -3,6 +3,7 @@
3433
3434 #ifdef __KERNEL__
3435
3436+#include <linux/const.h>
3437
3438 /* bytes per L1 cache line */
3439 #if defined(CONFIG_8xx) || defined(CONFIG_403GCX)
3440@@ -18,7 +19,7 @@
3441 #define L1_CACHE_SHIFT 7
3442 #endif
3443
3444-#define L1_CACHE_BYTES (1 << L1_CACHE_SHIFT)
3445+#define L1_CACHE_BYTES (_AC(1,UL) << L1_CACHE_SHIFT)
3446
3447 #define SMP_CACHE_BYTES L1_CACHE_BYTES
3448
3449diff --git a/arch/powerpc/include/asm/device.h b/arch/powerpc/include/asm/device.h
3450index 6d94d27..50d4cad 100644
3451--- a/arch/powerpc/include/asm/device.h
3452+++ b/arch/powerpc/include/asm/device.h
3453@@ -14,7 +14,7 @@ struct dev_archdata {
3454 struct device_node *of_node;
3455
3456 /* DMA operations on that device */
3457- struct dma_map_ops *dma_ops;
3458+ const struct dma_map_ops *dma_ops;
3459
3460 /*
3461 * When an iommu is in use, dma_data is used as a ptr to the base of the
3462diff --git a/arch/powerpc/include/asm/dma-mapping.h b/arch/powerpc/include/asm/dma-mapping.h
3463index e281dae..2b8a784 100644
3464--- a/arch/powerpc/include/asm/dma-mapping.h
3465+++ b/arch/powerpc/include/asm/dma-mapping.h
3466@@ -69,9 +69,9 @@ static inline unsigned long device_to_mask(struct device *dev)
3467 #ifdef CONFIG_PPC64
3468 extern struct dma_map_ops dma_iommu_ops;
3469 #endif
3470-extern struct dma_map_ops dma_direct_ops;
3471+extern const struct dma_map_ops dma_direct_ops;
3472
3473-static inline struct dma_map_ops *get_dma_ops(struct device *dev)
3474+static inline const struct dma_map_ops *get_dma_ops(struct device *dev)
3475 {
3476 /* We don't handle the NULL dev case for ISA for now. We could
3477 * do it via an out of line call but it is not needed for now. The
3478@@ -84,7 +84,7 @@ static inline struct dma_map_ops *get_dma_ops(struct device *dev)
3479 return dev->archdata.dma_ops;
3480 }
3481
3482-static inline void set_dma_ops(struct device *dev, struct dma_map_ops *ops)
3483+static inline void set_dma_ops(struct device *dev, const struct dma_map_ops *ops)
3484 {
3485 dev->archdata.dma_ops = ops;
3486 }
3487@@ -118,7 +118,7 @@ static inline void set_dma_offset(struct device *dev, dma_addr_t off)
3488
3489 static inline int dma_supported(struct device *dev, u64 mask)
3490 {
3491- struct dma_map_ops *dma_ops = get_dma_ops(dev);
3492+ const struct dma_map_ops *dma_ops = get_dma_ops(dev);
3493
3494 if (unlikely(dma_ops == NULL))
3495 return 0;
3496@@ -132,7 +132,7 @@ static inline int dma_supported(struct device *dev, u64 mask)
3497
3498 static inline int dma_set_mask(struct device *dev, u64 dma_mask)
3499 {
3500- struct dma_map_ops *dma_ops = get_dma_ops(dev);
3501+ const struct dma_map_ops *dma_ops = get_dma_ops(dev);
3502
3503 if (unlikely(dma_ops == NULL))
3504 return -EIO;
3505@@ -147,7 +147,7 @@ static inline int dma_set_mask(struct device *dev, u64 dma_mask)
3506 static inline void *dma_alloc_coherent(struct device *dev, size_t size,
3507 dma_addr_t *dma_handle, gfp_t flag)
3508 {
3509- struct dma_map_ops *dma_ops = get_dma_ops(dev);
3510+ const struct dma_map_ops *dma_ops = get_dma_ops(dev);
3511 void *cpu_addr;
3512
3513 BUG_ON(!dma_ops);
3514@@ -162,7 +162,7 @@ static inline void *dma_alloc_coherent(struct device *dev, size_t size,
3515 static inline void dma_free_coherent(struct device *dev, size_t size,
3516 void *cpu_addr, dma_addr_t dma_handle)
3517 {
3518- struct dma_map_ops *dma_ops = get_dma_ops(dev);
3519+ const struct dma_map_ops *dma_ops = get_dma_ops(dev);
3520
3521 BUG_ON(!dma_ops);
3522
3523@@ -173,7 +173,7 @@ static inline void dma_free_coherent(struct device *dev, size_t size,
3524
3525 static inline int dma_mapping_error(struct device *dev, dma_addr_t dma_addr)
3526 {
3527- struct dma_map_ops *dma_ops = get_dma_ops(dev);
3528+ const struct dma_map_ops *dma_ops = get_dma_ops(dev);
3529
3530 if (dma_ops->mapping_error)
3531 return dma_ops->mapping_error(dev, dma_addr);
3532diff --git a/arch/powerpc/include/asm/elf.h b/arch/powerpc/include/asm/elf.h
3533index 5698502..5db093c 100644
3534--- a/arch/powerpc/include/asm/elf.h
3535+++ b/arch/powerpc/include/asm/elf.h
3536@@ -179,8 +179,19 @@ typedef elf_fpreg_t elf_vsrreghalf_t32[ELF_NVSRHALFREG];
3537 the loader. We need to make sure that it is out of the way of the program
3538 that it will "exec", and that there is sufficient room for the brk. */
3539
3540-extern unsigned long randomize_et_dyn(unsigned long base);
3541-#define ELF_ET_DYN_BASE (randomize_et_dyn(0x20000000))
3542+#define ELF_ET_DYN_BASE (0x20000000)
3543+
3544+#ifdef CONFIG_PAX_ASLR
3545+#define PAX_ELF_ET_DYN_BASE (0x10000000UL)
3546+
3547+#ifdef __powerpc64__
3548+#define PAX_DELTA_MMAP_LEN (test_thread_flag(TIF_32BIT) ? 16 : 28)
3549+#define PAX_DELTA_STACK_LEN (test_thread_flag(TIF_32BIT) ? 16 : 28)
3550+#else
3551+#define PAX_DELTA_MMAP_LEN 15
3552+#define PAX_DELTA_STACK_LEN 15
3553+#endif
3554+#endif
3555
3556 /*
3557 * Our registers are always unsigned longs, whether we're a 32 bit
3558@@ -275,9 +286,6 @@ extern int arch_setup_additional_pages(struct linux_binprm *bprm,
3559 (0x7ff >> (PAGE_SHIFT - 12)) : \
3560 (0x3ffff >> (PAGE_SHIFT - 12)))
3561
3562-extern unsigned long arch_randomize_brk(struct mm_struct *mm);
3563-#define arch_randomize_brk arch_randomize_brk
3564-
3565 #endif /* __KERNEL__ */
3566
3567 /*
3568diff --git a/arch/powerpc/include/asm/iommu.h b/arch/powerpc/include/asm/iommu.h
3569index edfc980..1766f59 100644
3570--- a/arch/powerpc/include/asm/iommu.h
3571+++ b/arch/powerpc/include/asm/iommu.h
3572@@ -116,6 +116,9 @@ extern void iommu_init_early_iSeries(void);
3573 extern void iommu_init_early_dart(void);
3574 extern void iommu_init_early_pasemi(void);
3575
3576+/* dma-iommu.c */
3577+extern int dma_iommu_dma_supported(struct device *dev, u64 mask);
3578+
3579 #ifdef CONFIG_PCI
3580 extern void pci_iommu_init(void);
3581 extern void pci_direct_iommu_init(void);
3582diff --git a/arch/powerpc/include/asm/kmap_types.h b/arch/powerpc/include/asm/kmap_types.h
3583index 9163695..5a00112 100644
3584--- a/arch/powerpc/include/asm/kmap_types.h
3585+++ b/arch/powerpc/include/asm/kmap_types.h
3586@@ -26,6 +26,7 @@ enum km_type {
3587 KM_SOFTIRQ1,
3588 KM_PPC_SYNC_PAGE,
3589 KM_PPC_SYNC_ICACHE,
3590+ KM_CLEARPAGE,
3591 KM_TYPE_NR
3592 };
3593
3594diff --git a/arch/powerpc/include/asm/page.h b/arch/powerpc/include/asm/page.h
3595index ff24254..fe45b21 100644
3596--- a/arch/powerpc/include/asm/page.h
3597+++ b/arch/powerpc/include/asm/page.h
3598@@ -116,8 +116,9 @@ extern phys_addr_t kernstart_addr;
3599 * and needs to be executable. This means the whole heap ends
3600 * up being executable.
3601 */
3602-#define VM_DATA_DEFAULT_FLAGS32 (VM_READ | VM_WRITE | VM_EXEC | \
3603- VM_MAYREAD | VM_MAYWRITE | VM_MAYEXEC)
3604+#define VM_DATA_DEFAULT_FLAGS32 \
3605+ (((current->personality & READ_IMPLIES_EXEC) ? VM_EXEC : 0) | \
3606+ VM_READ | VM_WRITE | VM_MAYREAD | VM_MAYWRITE | VM_MAYEXEC)
3607
3608 #define VM_DATA_DEFAULT_FLAGS64 (VM_READ | VM_WRITE | \
3609 VM_MAYREAD | VM_MAYWRITE | VM_MAYEXEC)
3610@@ -145,6 +146,9 @@ extern phys_addr_t kernstart_addr;
3611 #define is_kernel_addr(x) ((x) >= PAGE_OFFSET)
3612 #endif
3613
3614+#define ktla_ktva(addr) (addr)
3615+#define ktva_ktla(addr) (addr)
3616+
3617 #ifndef __ASSEMBLY__
3618
3619 #undef STRICT_MM_TYPECHECKS
3620diff --git a/arch/powerpc/include/asm/page_64.h b/arch/powerpc/include/asm/page_64.h
3621index 3f17b83..1f9e766 100644
3622--- a/arch/powerpc/include/asm/page_64.h
3623+++ b/arch/powerpc/include/asm/page_64.h
3624@@ -180,15 +180,18 @@ do { \
3625 * stack by default, so in the absense of a PT_GNU_STACK program header
3626 * we turn execute permission off.
3627 */
3628-#define VM_STACK_DEFAULT_FLAGS32 (VM_READ | VM_WRITE | VM_EXEC | \
3629- VM_MAYREAD | VM_MAYWRITE | VM_MAYEXEC)
3630+#define VM_STACK_DEFAULT_FLAGS32 \
3631+ (((current->personality & READ_IMPLIES_EXEC) ? VM_EXEC : 0) | \
3632+ VM_READ | VM_WRITE | VM_MAYREAD | VM_MAYWRITE | VM_MAYEXEC)
3633
3634 #define VM_STACK_DEFAULT_FLAGS64 (VM_READ | VM_WRITE | \
3635 VM_MAYREAD | VM_MAYWRITE | VM_MAYEXEC)
3636
3637+#ifndef CONFIG_PAX_PAGEEXEC
3638 #define VM_STACK_DEFAULT_FLAGS \
3639 (test_thread_flag(TIF_32BIT) ? \
3640 VM_STACK_DEFAULT_FLAGS32 : VM_STACK_DEFAULT_FLAGS64)
3641+#endif
3642
3643 #include <asm-generic/getorder.h>
3644
3645diff --git a/arch/powerpc/include/asm/pci.h b/arch/powerpc/include/asm/pci.h
3646index b5ea626..40308222 100644
3647--- a/arch/powerpc/include/asm/pci.h
3648+++ b/arch/powerpc/include/asm/pci.h
3649@@ -65,8 +65,8 @@ static inline int pci_get_legacy_ide_irq(struct pci_dev *dev, int channel)
3650 }
3651
3652 #ifdef CONFIG_PCI
3653-extern void set_pci_dma_ops(struct dma_map_ops *dma_ops);
3654-extern struct dma_map_ops *get_pci_dma_ops(void);
3655+extern void set_pci_dma_ops(const struct dma_map_ops *dma_ops);
3656+extern const struct dma_map_ops *get_pci_dma_ops(void);
3657 #else /* CONFIG_PCI */
3658 #define set_pci_dma_ops(d)
3659 #define get_pci_dma_ops() NULL
3660diff --git a/arch/powerpc/include/asm/pgtable.h b/arch/powerpc/include/asm/pgtable.h
3661index 2a5da06..d65bea2 100644
3662--- a/arch/powerpc/include/asm/pgtable.h
3663+++ b/arch/powerpc/include/asm/pgtable.h
3664@@ -2,6 +2,7 @@
3665 #define _ASM_POWERPC_PGTABLE_H
3666 #ifdef __KERNEL__
3667
3668+#include <linux/const.h>
3669 #ifndef __ASSEMBLY__
3670 #include <asm/processor.h> /* For TASK_SIZE */
3671 #include <asm/mmu.h>
3672diff --git a/arch/powerpc/include/asm/pte-hash32.h b/arch/powerpc/include/asm/pte-hash32.h
3673index 4aad413..85d86bf 100644
3674--- a/arch/powerpc/include/asm/pte-hash32.h
3675+++ b/arch/powerpc/include/asm/pte-hash32.h
3676@@ -21,6 +21,7 @@
3677 #define _PAGE_FILE 0x004 /* when !present: nonlinear file mapping */
3678 #define _PAGE_USER 0x004 /* usermode access allowed */
3679 #define _PAGE_GUARDED 0x008 /* G: prohibit speculative access */
3680+#define _PAGE_EXEC _PAGE_GUARDED
3681 #define _PAGE_COHERENT 0x010 /* M: enforce memory coherence (SMP systems) */
3682 #define _PAGE_NO_CACHE 0x020 /* I: cache inhibit */
3683 #define _PAGE_WRITETHRU 0x040 /* W: cache write-through */
3684diff --git a/arch/powerpc/include/asm/ptrace.h b/arch/powerpc/include/asm/ptrace.h
3685index 8c34149..78f425a 100644
3686--- a/arch/powerpc/include/asm/ptrace.h
3687+++ b/arch/powerpc/include/asm/ptrace.h
3688@@ -103,7 +103,7 @@ extern unsigned long profile_pc(struct pt_regs *regs);
3689 } while(0)
3690
3691 struct task_struct;
3692-extern unsigned long ptrace_get_reg(struct task_struct *task, int regno);
3693+extern unsigned long ptrace_get_reg(struct task_struct *task, unsigned int regno);
3694 extern int ptrace_put_reg(struct task_struct *task, int regno,
3695 unsigned long data);
3696
3697diff --git a/arch/powerpc/include/asm/reg.h b/arch/powerpc/include/asm/reg.h
3698index 32a7c30..be3a8bb 100644
3699--- a/arch/powerpc/include/asm/reg.h
3700+++ b/arch/powerpc/include/asm/reg.h
3701@@ -191,6 +191,7 @@
3702 #define SPRN_DBCR 0x136 /* e300 Data Breakpoint Control Reg */
3703 #define SPRN_DSISR 0x012 /* Data Storage Interrupt Status Register */
3704 #define DSISR_NOHPTE 0x40000000 /* no translation found */
3705+#define DSISR_GUARDED 0x10000000 /* fetch from guarded storage */
3706 #define DSISR_PROTFAULT 0x08000000 /* protection fault */
3707 #define DSISR_ISSTORE 0x02000000 /* access was a store */
3708 #define DSISR_DABRMATCH 0x00400000 /* hit data breakpoint */
3709diff --git a/arch/powerpc/include/asm/swiotlb.h b/arch/powerpc/include/asm/swiotlb.h
3710index 8979d4c..d2fd0d3 100644
3711--- a/arch/powerpc/include/asm/swiotlb.h
3712+++ b/arch/powerpc/include/asm/swiotlb.h
3713@@ -13,7 +13,7 @@
3714
3715 #include <linux/swiotlb.h>
3716
3717-extern struct dma_map_ops swiotlb_dma_ops;
3718+extern const struct dma_map_ops swiotlb_dma_ops;
3719
3720 static inline void dma_mark_clean(void *addr, size_t size) {}
3721
3722diff --git a/arch/powerpc/include/asm/system.h b/arch/powerpc/include/asm/system.h
3723index 094a12a..877a60a 100644
3724--- a/arch/powerpc/include/asm/system.h
3725+++ b/arch/powerpc/include/asm/system.h
3726@@ -531,7 +531,7 @@ __cmpxchg_local(volatile void *ptr, unsigned long old, unsigned long new,
3727 #define cmpxchg64_local(ptr, o, n) __cmpxchg64_local_generic((ptr), (o), (n))
3728 #endif
3729
3730-extern unsigned long arch_align_stack(unsigned long sp);
3731+#define arch_align_stack(x) ((x) & ~0xfUL)
3732
3733 /* Used in very early kernel initialization. */
3734 extern unsigned long reloc_offset(void);
3735diff --git a/arch/powerpc/include/asm/uaccess.h b/arch/powerpc/include/asm/uaccess.h
3736index bd0fb84..a42a14b 100644
3737--- a/arch/powerpc/include/asm/uaccess.h
3738+++ b/arch/powerpc/include/asm/uaccess.h
3739@@ -13,6 +13,8 @@
3740 #define VERIFY_READ 0
3741 #define VERIFY_WRITE 1
3742
3743+extern void check_object_size(const void *ptr, unsigned long n, bool to);
3744+
3745 /*
3746 * The fs value determines whether argument validity checking should be
3747 * performed or not. If get_fs() == USER_DS, checking is performed, with
3748@@ -327,52 +329,6 @@ do { \
3749 extern unsigned long __copy_tofrom_user(void __user *to,
3750 const void __user *from, unsigned long size);
3751
3752-#ifndef __powerpc64__
3753-
3754-static inline unsigned long copy_from_user(void *to,
3755- const void __user *from, unsigned long n)
3756-{
3757- unsigned long over;
3758-
3759- if (access_ok(VERIFY_READ, from, n))
3760- return __copy_tofrom_user((__force void __user *)to, from, n);
3761- if ((unsigned long)from < TASK_SIZE) {
3762- over = (unsigned long)from + n - TASK_SIZE;
3763- return __copy_tofrom_user((__force void __user *)to, from,
3764- n - over) + over;
3765- }
3766- return n;
3767-}
3768-
3769-static inline unsigned long copy_to_user(void __user *to,
3770- const void *from, unsigned long n)
3771-{
3772- unsigned long over;
3773-
3774- if (access_ok(VERIFY_WRITE, to, n))
3775- return __copy_tofrom_user(to, (__force void __user *)from, n);
3776- if ((unsigned long)to < TASK_SIZE) {
3777- over = (unsigned long)to + n - TASK_SIZE;
3778- return __copy_tofrom_user(to, (__force void __user *)from,
3779- n - over) + over;
3780- }
3781- return n;
3782-}
3783-
3784-#else /* __powerpc64__ */
3785-
3786-#define __copy_in_user(to, from, size) \
3787- __copy_tofrom_user((to), (from), (size))
3788-
3789-extern unsigned long copy_from_user(void *to, const void __user *from,
3790- unsigned long n);
3791-extern unsigned long copy_to_user(void __user *to, const void *from,
3792- unsigned long n);
3793-extern unsigned long copy_in_user(void __user *to, const void __user *from,
3794- unsigned long n);
3795-
3796-#endif /* __powerpc64__ */
3797-
3798 static inline unsigned long __copy_from_user_inatomic(void *to,
3799 const void __user *from, unsigned long n)
3800 {
3801@@ -396,6 +352,10 @@ static inline unsigned long __copy_from_user_inatomic(void *to,
3802 if (ret == 0)
3803 return 0;
3804 }
3805+
3806+ if (!__builtin_constant_p(n))
3807+ check_object_size(to, n, false);
3808+
3809 return __copy_tofrom_user((__force void __user *)to, from, n);
3810 }
3811
3812@@ -422,6 +382,10 @@ static inline unsigned long __copy_to_user_inatomic(void __user *to,
3813 if (ret == 0)
3814 return 0;
3815 }
3816+
3817+ if (!__builtin_constant_p(n))
3818+ check_object_size(from, n, true);
3819+
3820 return __copy_tofrom_user(to, (__force const void __user *)from, n);
3821 }
3822
3823@@ -439,6 +403,92 @@ static inline unsigned long __copy_to_user(void __user *to,
3824 return __copy_to_user_inatomic(to, from, size);
3825 }
3826
3827+#ifndef __powerpc64__
3828+
3829+static inline unsigned long __must_check copy_from_user(void *to,
3830+ const void __user *from, unsigned long n)
3831+{
3832+ unsigned long over;
3833+
3834+ if ((long)n < 0)
3835+ return n;
3836+
3837+ if (access_ok(VERIFY_READ, from, n)) {
3838+ if (!__builtin_constant_p(n))
3839+ check_object_size(to, n, false);
3840+ return __copy_tofrom_user((__force void __user *)to, from, n);
3841+ }
3842+ if ((unsigned long)from < TASK_SIZE) {
3843+ over = (unsigned long)from + n - TASK_SIZE;
3844+ if (!__builtin_constant_p(n - over))
3845+ check_object_size(to, n - over, false);
3846+ return __copy_tofrom_user((__force void __user *)to, from,
3847+ n - over) + over;
3848+ }
3849+ return n;
3850+}
3851+
3852+static inline unsigned long __must_check copy_to_user(void __user *to,
3853+ const void *from, unsigned long n)
3854+{
3855+ unsigned long over;
3856+
3857+ if ((long)n < 0)
3858+ return n;
3859+
3860+ if (access_ok(VERIFY_WRITE, to, n)) {
3861+ if (!__builtin_constant_p(n))
3862+ check_object_size(from, n, true);
3863+ return __copy_tofrom_user(to, (__force void __user *)from, n);
3864+ }
3865+ if ((unsigned long)to < TASK_SIZE) {
3866+ over = (unsigned long)to + n - TASK_SIZE;
3867+ if (!__builtin_constant_p(n))
3868+ check_object_size(from, n - over, true);
3869+ return __copy_tofrom_user(to, (__force void __user *)from,
3870+ n - over) + over;
3871+ }
3872+ return n;
3873+}
3874+
3875+#else /* __powerpc64__ */
3876+
3877+#define __copy_in_user(to, from, size) \
3878+ __copy_tofrom_user((to), (from), (size))
3879+
3880+static inline unsigned long __must_check copy_from_user(void *to, const void __user *from, unsigned long n)
3881+{
3882+ if ((long)n < 0 || n > INT_MAX)
3883+ return n;
3884+
3885+ if (!__builtin_constant_p(n))
3886+ check_object_size(to, n, false);
3887+
3888+ if (likely(access_ok(VERIFY_READ, from, n)))
3889+ n = __copy_from_user(to, from, n);
3890+ else
3891+ memset(to, 0, n);
3892+ return n;
3893+}
3894+
3895+static inline unsigned long __must_check copy_to_user(void __user *to, const void *from, unsigned long n)
3896+{
3897+ if ((long)n < 0 || n > INT_MAX)
3898+ return n;
3899+
3900+ if (likely(access_ok(VERIFY_WRITE, to, n))) {
3901+ if (!__builtin_constant_p(n))
3902+ check_object_size(from, n, true);
3903+ n = __copy_to_user(to, from, n);
3904+ }
3905+ return n;
3906+}
3907+
3908+extern unsigned long copy_in_user(void __user *to, const void __user *from,
3909+ unsigned long n);
3910+
3911+#endif /* __powerpc64__ */
3912+
3913 extern unsigned long __clear_user(void __user *addr, unsigned long size);
3914
3915 static inline unsigned long clear_user(void __user *addr, unsigned long size)
3916diff --git a/arch/powerpc/kernel/cacheinfo.c b/arch/powerpc/kernel/cacheinfo.c
3917index bb37b1d..01fe9ce 100644
3918--- a/arch/powerpc/kernel/cacheinfo.c
3919+++ b/arch/powerpc/kernel/cacheinfo.c
3920@@ -642,7 +642,7 @@ static struct kobj_attribute *cache_index_opt_attrs[] = {
3921 &cache_assoc_attr,
3922 };
3923
3924-static struct sysfs_ops cache_index_ops = {
3925+static const struct sysfs_ops cache_index_ops = {
3926 .show = cache_index_show,
3927 };
3928
3929diff --git a/arch/powerpc/kernel/dma-iommu.c b/arch/powerpc/kernel/dma-iommu.c
3930index 37771a5..648530c 100644
3931--- a/arch/powerpc/kernel/dma-iommu.c
3932+++ b/arch/powerpc/kernel/dma-iommu.c
3933@@ -70,7 +70,7 @@ static void dma_iommu_unmap_sg(struct device *dev, struct scatterlist *sglist,
3934 }
3935
3936 /* We support DMA to/from any memory page via the iommu */
3937-static int dma_iommu_dma_supported(struct device *dev, u64 mask)
3938+int dma_iommu_dma_supported(struct device *dev, u64 mask)
3939 {
3940 struct iommu_table *tbl = get_iommu_table_base(dev);
3941
3942diff --git a/arch/powerpc/kernel/dma-swiotlb.c b/arch/powerpc/kernel/dma-swiotlb.c
3943index e96cbbd..bdd6d41 100644
3944--- a/arch/powerpc/kernel/dma-swiotlb.c
3945+++ b/arch/powerpc/kernel/dma-swiotlb.c
3946@@ -31,7 +31,7 @@ unsigned int ppc_swiotlb_enable;
3947 * map_page, and unmap_page on highmem, use normal dma_ops
3948 * for everything else.
3949 */
3950-struct dma_map_ops swiotlb_dma_ops = {
3951+const struct dma_map_ops swiotlb_dma_ops = {
3952 .alloc_coherent = dma_direct_alloc_coherent,
3953 .free_coherent = dma_direct_free_coherent,
3954 .map_sg = swiotlb_map_sg_attrs,
3955diff --git a/arch/powerpc/kernel/dma.c b/arch/powerpc/kernel/dma.c
3956index 6215062..ebea59c 100644
3957--- a/arch/powerpc/kernel/dma.c
3958+++ b/arch/powerpc/kernel/dma.c
3959@@ -134,7 +134,7 @@ static inline void dma_direct_sync_single_range(struct device *dev,
3960 }
3961 #endif
3962
3963-struct dma_map_ops dma_direct_ops = {
3964+const struct dma_map_ops dma_direct_ops = {
3965 .alloc_coherent = dma_direct_alloc_coherent,
3966 .free_coherent = dma_direct_free_coherent,
3967 .map_sg = dma_direct_map_sg,
3968diff --git a/arch/powerpc/kernel/exceptions-64e.S b/arch/powerpc/kernel/exceptions-64e.S
3969index 24dcc0e..a300455 100644
3970--- a/arch/powerpc/kernel/exceptions-64e.S
3971+++ b/arch/powerpc/kernel/exceptions-64e.S
3972@@ -455,6 +455,7 @@ storage_fault_common:
3973 std r14,_DAR(r1)
3974 std r15,_DSISR(r1)
3975 addi r3,r1,STACK_FRAME_OVERHEAD
3976+ bl .save_nvgprs
3977 mr r4,r14
3978 mr r5,r15
3979 ld r14,PACA_EXGEN+EX_R14(r13)
3980@@ -464,8 +465,7 @@ storage_fault_common:
3981 cmpdi r3,0
3982 bne- 1f
3983 b .ret_from_except_lite
3984-1: bl .save_nvgprs
3985- mr r5,r3
3986+1: mr r5,r3
3987 addi r3,r1,STACK_FRAME_OVERHEAD
3988 ld r4,_DAR(r1)
3989 bl .bad_page_fault
3990diff --git a/arch/powerpc/kernel/exceptions-64s.S b/arch/powerpc/kernel/exceptions-64s.S
3991index 1808876..9fd206a 100644
3992--- a/arch/powerpc/kernel/exceptions-64s.S
3993+++ b/arch/powerpc/kernel/exceptions-64s.S
3994@@ -818,10 +818,10 @@ handle_page_fault:
3995 11: ld r4,_DAR(r1)
3996 ld r5,_DSISR(r1)
3997 addi r3,r1,STACK_FRAME_OVERHEAD
3998+ bl .save_nvgprs
3999 bl .do_page_fault
4000 cmpdi r3,0
4001 beq+ 13f
4002- bl .save_nvgprs
4003 mr r5,r3
4004 addi r3,r1,STACK_FRAME_OVERHEAD
4005 lwz r4,_DAR(r1)
4006diff --git a/arch/powerpc/kernel/ibmebus.c b/arch/powerpc/kernel/ibmebus.c
4007index a4c8b38..1b09ad9 100644
4008--- a/arch/powerpc/kernel/ibmebus.c
4009+++ b/arch/powerpc/kernel/ibmebus.c
4010@@ -127,7 +127,7 @@ static int ibmebus_dma_supported(struct device *dev, u64 mask)
4011 return 1;
4012 }
4013
4014-static struct dma_map_ops ibmebus_dma_ops = {
4015+static const struct dma_map_ops ibmebus_dma_ops = {
4016 .alloc_coherent = ibmebus_alloc_coherent,
4017 .free_coherent = ibmebus_free_coherent,
4018 .map_sg = ibmebus_map_sg,
4019diff --git a/arch/powerpc/kernel/irq.c b/arch/powerpc/kernel/irq.c
4020index 8564a41..67f3471 100644
4021--- a/arch/powerpc/kernel/irq.c
4022+++ b/arch/powerpc/kernel/irq.c
4023@@ -490,9 +490,6 @@ struct irq_host *irq_alloc_host(struct device_node *of_node,
4024 host->ops = ops;
4025 host->of_node = of_node_get(of_node);
4026
4027- if (host->ops->match == NULL)
4028- host->ops->match = default_irq_host_match;
4029-
4030 spin_lock_irqsave(&irq_big_lock, flags);
4031
4032 /* If it's a legacy controller, check for duplicates and
4033@@ -567,7 +564,12 @@ struct irq_host *irq_find_host(struct device_node *node)
4034 */
4035 spin_lock_irqsave(&irq_big_lock, flags);
4036 list_for_each_entry(h, &irq_hosts, link)
4037- if (h->ops->match(h, node)) {
4038+ if (h->ops->match) {
4039+ if (h->ops->match(h, node)) {
4040+ found = h;
4041+ break;
4042+ }
4043+ } else if (default_irq_host_match(h, node)) {
4044 found = h;
4045 break;
4046 }
4047diff --git a/arch/powerpc/kernel/kgdb.c b/arch/powerpc/kernel/kgdb.c
4048index 641c74b..8339ad7 100644
4049--- a/arch/powerpc/kernel/kgdb.c
4050+++ b/arch/powerpc/kernel/kgdb.c
4051@@ -126,7 +126,7 @@ static int kgdb_handle_breakpoint(struct pt_regs *regs)
4052 if (kgdb_handle_exception(0, SIGTRAP, 0, regs) != 0)
4053 return 0;
4054
4055- if (*(u32 *) (regs->nip) == *(u32 *) (&arch_kgdb_ops.gdb_bpt_instr))
4056+ if (*(u32 *) (regs->nip) == *(const u32 *) (&arch_kgdb_ops.gdb_bpt_instr))
4057 regs->nip += 4;
4058
4059 return 1;
4060@@ -353,7 +353,7 @@ int kgdb_arch_handle_exception(int vector, int signo, int err_code,
4061 /*
4062 * Global data
4063 */
4064-struct kgdb_arch arch_kgdb_ops = {
4065+const struct kgdb_arch arch_kgdb_ops = {
4066 .gdb_bpt_instr = {0x7d, 0x82, 0x10, 0x08},
4067 };
4068
4069diff --git a/arch/powerpc/kernel/module.c b/arch/powerpc/kernel/module.c
4070index 477c663..4f50234 100644
4071--- a/arch/powerpc/kernel/module.c
4072+++ b/arch/powerpc/kernel/module.c
4073@@ -31,11 +31,24 @@
4074
4075 LIST_HEAD(module_bug_list);
4076
4077+#ifdef CONFIG_PAX_KERNEXEC
4078 void *module_alloc(unsigned long size)
4079 {
4080 if (size == 0)
4081 return NULL;
4082
4083+ return vmalloc(size);
4084+}
4085+
4086+void *module_alloc_exec(unsigned long size)
4087+#else
4088+void *module_alloc(unsigned long size)
4089+#endif
4090+
4091+{
4092+ if (size == 0)
4093+ return NULL;
4094+
4095 return vmalloc_exec(size);
4096 }
4097
4098@@ -45,6 +58,13 @@ void module_free(struct module *mod, void *module_region)
4099 vfree(module_region);
4100 }
4101
4102+#ifdef CONFIG_PAX_KERNEXEC
4103+void module_free_exec(struct module *mod, void *module_region)
4104+{
4105+ module_free(mod, module_region);
4106+}
4107+#endif
4108+
4109 static const Elf_Shdr *find_section(const Elf_Ehdr *hdr,
4110 const Elf_Shdr *sechdrs,
4111 const char *name)
4112diff --git a/arch/powerpc/kernel/module_32.c b/arch/powerpc/kernel/module_32.c
4113index f832773..0507238 100644
4114--- a/arch/powerpc/kernel/module_32.c
4115+++ b/arch/powerpc/kernel/module_32.c
4116@@ -162,7 +162,7 @@ int module_frob_arch_sections(Elf32_Ehdr *hdr,
4117 me->arch.core_plt_section = i;
4118 }
4119 if (!me->arch.core_plt_section || !me->arch.init_plt_section) {
4120- printk("Module doesn't contain .plt or .init.plt sections.\n");
4121+ printk("Module %s doesn't contain .plt or .init.plt sections.\n", me->name);
4122 return -ENOEXEC;
4123 }
4124
4125@@ -203,11 +203,16 @@ static uint32_t do_plt_call(void *location,
4126
4127 DEBUGP("Doing plt for call to 0x%x at 0x%x\n", val, (unsigned int)location);
4128 /* Init, or core PLT? */
4129- if (location >= mod->module_core
4130- && location < mod->module_core + mod->core_size)
4131+ if ((location >= mod->module_core_rx && location < mod->module_core_rx + mod->core_size_rx) ||
4132+ (location >= mod->module_core_rw && location < mod->module_core_rw + mod->core_size_rw))
4133 entry = (void *)sechdrs[mod->arch.core_plt_section].sh_addr;
4134- else
4135+ else if ((location >= mod->module_init_rx && location < mod->module_init_rx + mod->init_size_rx) ||
4136+ (location >= mod->module_init_rw && location < mod->module_init_rw + mod->init_size_rw))
4137 entry = (void *)sechdrs[mod->arch.init_plt_section].sh_addr;
4138+ else {
4139+ printk(KERN_ERR "%s: invalid R_PPC_REL24 entry found\n", mod->name);
4140+ return ~0UL;
4141+ }
4142
4143 /* Find this entry, or if that fails, the next avail. entry */
4144 while (entry->jump[0]) {
4145diff --git a/arch/powerpc/kernel/pci-common.c b/arch/powerpc/kernel/pci-common.c
4146index cadbed6..b9bbb00 100644
4147--- a/arch/powerpc/kernel/pci-common.c
4148+++ b/arch/powerpc/kernel/pci-common.c
4149@@ -50,14 +50,14 @@ resource_size_t isa_mem_base;
4150 unsigned int ppc_pci_flags = 0;
4151
4152
4153-static struct dma_map_ops *pci_dma_ops = &dma_direct_ops;
4154+static const struct dma_map_ops *pci_dma_ops = &dma_direct_ops;
4155
4156-void set_pci_dma_ops(struct dma_map_ops *dma_ops)
4157+void set_pci_dma_ops(const struct dma_map_ops *dma_ops)
4158 {
4159 pci_dma_ops = dma_ops;
4160 }
4161
4162-struct dma_map_ops *get_pci_dma_ops(void)
4163+const struct dma_map_ops *get_pci_dma_ops(void)
4164 {
4165 return pci_dma_ops;
4166 }
4167diff --git a/arch/powerpc/kernel/process.c b/arch/powerpc/kernel/process.c
4168index 7b816da..8d5c277 100644
4169--- a/arch/powerpc/kernel/process.c
4170+++ b/arch/powerpc/kernel/process.c
4171@@ -539,8 +539,8 @@ void show_regs(struct pt_regs * regs)
4172 * Lookup NIP late so we have the best change of getting the
4173 * above info out without failing
4174 */
4175- printk("NIP ["REG"] %pS\n", regs->nip, (void *)regs->nip);
4176- printk("LR ["REG"] %pS\n", regs->link, (void *)regs->link);
4177+ printk("NIP ["REG"] %pA\n", regs->nip, (void *)regs->nip);
4178+ printk("LR ["REG"] %pA\n", regs->link, (void *)regs->link);
4179 #endif
4180 show_stack(current, (unsigned long *) regs->gpr[1]);
4181 if (!user_mode(regs))
4182@@ -1034,10 +1034,10 @@ void show_stack(struct task_struct *tsk, unsigned long *stack)
4183 newsp = stack[0];
4184 ip = stack[STACK_FRAME_LR_SAVE];
4185 if (!firstframe || ip != lr) {
4186- printk("["REG"] ["REG"] %pS", sp, ip, (void *)ip);
4187+ printk("["REG"] ["REG"] %pA", sp, ip, (void *)ip);
4188 #ifdef CONFIG_FUNCTION_GRAPH_TRACER
4189 if ((ip == rth || ip == mrth) && curr_frame >= 0) {
4190- printk(" (%pS)",
4191+ printk(" (%pA)",
4192 (void *)current->ret_stack[curr_frame].ret);
4193 curr_frame--;
4194 }
4195@@ -1057,7 +1057,7 @@ void show_stack(struct task_struct *tsk, unsigned long *stack)
4196 struct pt_regs *regs = (struct pt_regs *)
4197 (sp + STACK_FRAME_OVERHEAD);
4198 lr = regs->link;
4199- printk("--- Exception: %lx at %pS\n LR = %pS\n",
4200+ printk("--- Exception: %lx at %pA\n LR = %pA\n",
4201 regs->trap, (void *)regs->nip, (void *)lr);
4202 firstframe = 1;
4203 }
4204@@ -1134,58 +1134,3 @@ void thread_info_cache_init(void)
4205 }
4206
4207 #endif /* THREAD_SHIFT < PAGE_SHIFT */
4208-
4209-unsigned long arch_align_stack(unsigned long sp)
4210-{
4211- if (!(current->personality & ADDR_NO_RANDOMIZE) && randomize_va_space)
4212- sp -= get_random_int() & ~PAGE_MASK;
4213- return sp & ~0xf;
4214-}
4215-
4216-static inline unsigned long brk_rnd(void)
4217-{
4218- unsigned long rnd = 0;
4219-
4220- /* 8MB for 32bit, 1GB for 64bit */
4221- if (is_32bit_task())
4222- rnd = (long)(get_random_int() % (1<<(23-PAGE_SHIFT)));
4223- else
4224- rnd = (long)(get_random_int() % (1<<(30-PAGE_SHIFT)));
4225-
4226- return rnd << PAGE_SHIFT;
4227-}
4228-
4229-unsigned long arch_randomize_brk(struct mm_struct *mm)
4230-{
4231- unsigned long base = mm->brk;
4232- unsigned long ret;
4233-
4234-#ifdef CONFIG_PPC_STD_MMU_64
4235- /*
4236- * If we are using 1TB segments and we are allowed to randomise
4237- * the heap, we can put it above 1TB so it is backed by a 1TB
4238- * segment. Otherwise the heap will be in the bottom 1TB
4239- * which always uses 256MB segments and this may result in a
4240- * performance penalty.
4241- */
4242- if (!is_32bit_task() && (mmu_highuser_ssize == MMU_SEGSIZE_1T))
4243- base = max_t(unsigned long, mm->brk, 1UL << SID_SHIFT_1T);
4244-#endif
4245-
4246- ret = PAGE_ALIGN(base + brk_rnd());
4247-
4248- if (ret < mm->brk)
4249- return mm->brk;
4250-
4251- return ret;
4252-}
4253-
4254-unsigned long randomize_et_dyn(unsigned long base)
4255-{
4256- unsigned long ret = PAGE_ALIGN(base + brk_rnd());
4257-
4258- if (ret < base)
4259- return base;
4260-
4261- return ret;
4262-}
4263diff --git a/arch/powerpc/kernel/ptrace.c b/arch/powerpc/kernel/ptrace.c
4264index ef14988..856c4bc 100644
4265--- a/arch/powerpc/kernel/ptrace.c
4266+++ b/arch/powerpc/kernel/ptrace.c
4267@@ -86,7 +86,7 @@ static int set_user_trap(struct task_struct *task, unsigned long trap)
4268 /*
4269 * Get contents of register REGNO in task TASK.
4270 */
4271-unsigned long ptrace_get_reg(struct task_struct *task, int regno)
4272+unsigned long ptrace_get_reg(struct task_struct *task, unsigned int regno)
4273 {
4274 if (task->thread.regs == NULL)
4275 return -EIO;
4276@@ -894,7 +894,7 @@ long arch_ptrace(struct task_struct *child, long request, long addr, long data)
4277
4278 CHECK_FULL_REGS(child->thread.regs);
4279 if (index < PT_FPR0) {
4280- tmp = ptrace_get_reg(child, (int) index);
4281+ tmp = ptrace_get_reg(child, index);
4282 } else {
4283 flush_fp_to_thread(child);
4284 tmp = ((unsigned long *)child->thread.fpr)
4285diff --git a/arch/powerpc/kernel/signal_32.c b/arch/powerpc/kernel/signal_32.c
4286index d670429..2bc59b2 100644
4287--- a/arch/powerpc/kernel/signal_32.c
4288+++ b/arch/powerpc/kernel/signal_32.c
4289@@ -857,7 +857,7 @@ int handle_rt_signal32(unsigned long sig, struct k_sigaction *ka,
4290 /* Save user registers on the stack */
4291 frame = &rt_sf->uc.uc_mcontext;
4292 addr = frame;
4293- if (vdso32_rt_sigtramp && current->mm->context.vdso_base) {
4294+ if (vdso32_rt_sigtramp && current->mm->context.vdso_base != ~0UL) {
4295 if (save_user_regs(regs, frame, 0, 1))
4296 goto badframe;
4297 regs->link = current->mm->context.vdso_base + vdso32_rt_sigtramp;
4298diff --git a/arch/powerpc/kernel/signal_64.c b/arch/powerpc/kernel/signal_64.c
4299index 2fe6fc6..ada0d96 100644
4300--- a/arch/powerpc/kernel/signal_64.c
4301+++ b/arch/powerpc/kernel/signal_64.c
4302@@ -429,7 +429,7 @@ int handle_rt_signal64(int signr, struct k_sigaction *ka, siginfo_t *info,
4303 current->thread.fpscr.val = 0;
4304
4305 /* Set up to return from userspace. */
4306- if (vdso64_rt_sigtramp && current->mm->context.vdso_base) {
4307+ if (vdso64_rt_sigtramp && current->mm->context.vdso_base != ~0UL) {
4308 regs->link = current->mm->context.vdso_base + vdso64_rt_sigtramp;
4309 } else {
4310 err |= setup_trampoline(__NR_rt_sigreturn, &frame->tramp[0]);
4311diff --git a/arch/powerpc/kernel/sys_ppc32.c b/arch/powerpc/kernel/sys_ppc32.c
4312index b97c2d6..dd01a6a 100644
4313--- a/arch/powerpc/kernel/sys_ppc32.c
4314+++ b/arch/powerpc/kernel/sys_ppc32.c
4315@@ -563,10 +563,10 @@ asmlinkage long compat_sys_sysctl(struct __sysctl_args32 __user *args)
4316 if (oldlenp) {
4317 if (!error) {
4318 if (get_user(oldlen, oldlenp) ||
4319- put_user(oldlen, (compat_size_t __user *)compat_ptr(tmp.oldlenp)))
4320+ put_user(oldlen, (compat_size_t __user *)compat_ptr(tmp.oldlenp)) ||
4321+ copy_to_user(args->__unused, tmp.__unused, sizeof(tmp.__unused)))
4322 error = -EFAULT;
4323 }
4324- copy_to_user(args->__unused, tmp.__unused, sizeof(tmp.__unused));
4325 }
4326 return error;
4327 }
4328diff --git a/arch/powerpc/kernel/traps.c b/arch/powerpc/kernel/traps.c
4329index 6f0ae1a..e4b6a56 100644
4330--- a/arch/powerpc/kernel/traps.c
4331+++ b/arch/powerpc/kernel/traps.c
4332@@ -99,6 +99,8 @@ static void pmac_backlight_unblank(void)
4333 static inline void pmac_backlight_unblank(void) { }
4334 #endif
4335
4336+extern void gr_handle_kernel_exploit(void);
4337+
4338 int die(const char *str, struct pt_regs *regs, long err)
4339 {
4340 static struct {
4341@@ -168,6 +170,8 @@ int die(const char *str, struct pt_regs *regs, long err)
4342 if (panic_on_oops)
4343 panic("Fatal exception");
4344
4345+ gr_handle_kernel_exploit();
4346+
4347 oops_exit();
4348 do_exit(err);
4349
4350diff --git a/arch/powerpc/kernel/vdso.c b/arch/powerpc/kernel/vdso.c
4351index 137dc22..fe57a79 100644
4352--- a/arch/powerpc/kernel/vdso.c
4353+++ b/arch/powerpc/kernel/vdso.c
4354@@ -36,6 +36,7 @@
4355 #include <asm/firmware.h>
4356 #include <asm/vdso.h>
4357 #include <asm/vdso_datapage.h>
4358+#include <asm/mman.h>
4359
4360 #include "setup.h"
4361
4362@@ -220,7 +221,7 @@ int arch_setup_additional_pages(struct linux_binprm *bprm, int uses_interp)
4363 vdso_base = VDSO32_MBASE;
4364 #endif
4365
4366- current->mm->context.vdso_base = 0;
4367+ current->mm->context.vdso_base = ~0UL;
4368
4369 /* vDSO has a problem and was disabled, just don't "enable" it for the
4370 * process
4371@@ -240,7 +241,7 @@ int arch_setup_additional_pages(struct linux_binprm *bprm, int uses_interp)
4372 vdso_base = get_unmapped_area(NULL, vdso_base,
4373 (vdso_pages << PAGE_SHIFT) +
4374 ((VDSO_ALIGNMENT - 1) & PAGE_MASK),
4375- 0, 0);
4376+ 0, MAP_PRIVATE | MAP_EXECUTABLE);
4377 if (IS_ERR_VALUE(vdso_base)) {
4378 rc = vdso_base;
4379 goto fail_mmapsem;
4380diff --git a/arch/powerpc/kernel/vio.c b/arch/powerpc/kernel/vio.c
4381index 77f6421..829564a 100644
4382--- a/arch/powerpc/kernel/vio.c
4383+++ b/arch/powerpc/kernel/vio.c
4384@@ -601,11 +601,12 @@ static void vio_dma_iommu_unmap_sg(struct device *dev,
4385 vio_cmo_dealloc(viodev, alloc_size);
4386 }
4387
4388-struct dma_map_ops vio_dma_mapping_ops = {
4389+static const struct dma_map_ops vio_dma_mapping_ops = {
4390 .alloc_coherent = vio_dma_iommu_alloc_coherent,
4391 .free_coherent = vio_dma_iommu_free_coherent,
4392 .map_sg = vio_dma_iommu_map_sg,
4393 .unmap_sg = vio_dma_iommu_unmap_sg,
4394+ .dma_supported = dma_iommu_dma_supported,
4395 .map_page = vio_dma_iommu_map_page,
4396 .unmap_page = vio_dma_iommu_unmap_page,
4397
4398@@ -857,7 +858,6 @@ static void vio_cmo_bus_remove(struct vio_dev *viodev)
4399
4400 static void vio_cmo_set_dma_ops(struct vio_dev *viodev)
4401 {
4402- vio_dma_mapping_ops.dma_supported = dma_iommu_ops.dma_supported;
4403 viodev->dev.archdata.dma_ops = &vio_dma_mapping_ops;
4404 }
4405
4406diff --git a/arch/powerpc/lib/usercopy_64.c b/arch/powerpc/lib/usercopy_64.c
4407index 5eea6f3..5d10396 100644
4408--- a/arch/powerpc/lib/usercopy_64.c
4409+++ b/arch/powerpc/lib/usercopy_64.c
4410@@ -9,22 +9,6 @@
4411 #include <linux/module.h>
4412 #include <asm/uaccess.h>
4413
4414-unsigned long copy_from_user(void *to, const void __user *from, unsigned long n)
4415-{
4416- if (likely(access_ok(VERIFY_READ, from, n)))
4417- n = __copy_from_user(to, from, n);
4418- else
4419- memset(to, 0, n);
4420- return n;
4421-}
4422-
4423-unsigned long copy_to_user(void __user *to, const void *from, unsigned long n)
4424-{
4425- if (likely(access_ok(VERIFY_WRITE, to, n)))
4426- n = __copy_to_user(to, from, n);
4427- return n;
4428-}
4429-
4430 unsigned long copy_in_user(void __user *to, const void __user *from,
4431 unsigned long n)
4432 {
4433@@ -35,7 +19,5 @@ unsigned long copy_in_user(void __user *to, const void __user *from,
4434 return n;
4435 }
4436
4437-EXPORT_SYMBOL(copy_from_user);
4438-EXPORT_SYMBOL(copy_to_user);
4439 EXPORT_SYMBOL(copy_in_user);
4440
4441diff --git a/arch/powerpc/mm/fault.c b/arch/powerpc/mm/fault.c
4442index e7dae82..877ce0d 100644
4443--- a/arch/powerpc/mm/fault.c
4444+++ b/arch/powerpc/mm/fault.c
4445@@ -30,6 +30,10 @@
4446 #include <linux/kprobes.h>
4447 #include <linux/kdebug.h>
4448 #include <linux/perf_event.h>
4449+#include <linux/slab.h>
4450+#include <linux/pagemap.h>
4451+#include <linux/compiler.h>
4452+#include <linux/unistd.h>
4453
4454 #include <asm/firmware.h>
4455 #include <asm/page.h>
4456@@ -40,6 +44,7 @@
4457 #include <asm/uaccess.h>
4458 #include <asm/tlbflush.h>
4459 #include <asm/siginfo.h>
4460+#include <asm/ptrace.h>
4461
4462
4463 #ifdef CONFIG_KPROBES
4464@@ -64,6 +69,33 @@ static inline int notify_page_fault(struct pt_regs *regs)
4465 }
4466 #endif
4467
4468+#ifdef CONFIG_PAX_PAGEEXEC
4469+/*
4470+ * PaX: decide what to do with offenders (regs->nip = fault address)
4471+ *
4472+ * returns 1 when task should be killed
4473+ */
4474+static int pax_handle_fetch_fault(struct pt_regs *regs)
4475+{
4476+ return 1;
4477+}
4478+
4479+void pax_report_insns(struct pt_regs *regs, void *pc, void *sp)
4480+{
4481+ unsigned long i;
4482+
4483+ printk(KERN_ERR "PAX: bytes at PC: ");
4484+ for (i = 0; i < 5; i++) {
4485+ unsigned int c;
4486+ if (get_user(c, (unsigned int __user *)pc+i))
4487+ printk(KERN_CONT "???????? ");
4488+ else
4489+ printk(KERN_CONT "%08x ", c);
4490+ }
4491+ printk("\n");
4492+}
4493+#endif
4494+
4495 /*
4496 * Check whether the instruction at regs->nip is a store using
4497 * an update addressing form which will update r1.
4498@@ -134,7 +166,7 @@ int __kprobes do_page_fault(struct pt_regs *regs, unsigned long address,
4499 * indicate errors in DSISR but can validly be set in SRR1.
4500 */
4501 if (trap == 0x400)
4502- error_code &= 0x48200000;
4503+ error_code &= 0x58200000;
4504 else
4505 is_write = error_code & DSISR_ISSTORE;
4506 #else
4507@@ -250,7 +282,7 @@ good_area:
4508 * "undefined". Of those that can be set, this is the only
4509 * one which seems bad.
4510 */
4511- if (error_code & 0x10000000)
4512+ if (error_code & DSISR_GUARDED)
4513 /* Guarded storage error. */
4514 goto bad_area;
4515 #endif /* CONFIG_8xx */
4516@@ -265,7 +297,7 @@ good_area:
4517 * processors use the same I/D cache coherency mechanism
4518 * as embedded.
4519 */
4520- if (error_code & DSISR_PROTFAULT)
4521+ if (error_code & (DSISR_PROTFAULT | DSISR_GUARDED))
4522 goto bad_area;
4523 #endif /* CONFIG_PPC_STD_MMU */
4524
4525@@ -335,6 +367,23 @@ bad_area:
4526 bad_area_nosemaphore:
4527 /* User mode accesses cause a SIGSEGV */
4528 if (user_mode(regs)) {
4529+
4530+#ifdef CONFIG_PAX_PAGEEXEC
4531+ if (mm->pax_flags & MF_PAX_PAGEEXEC) {
4532+#ifdef CONFIG_PPC_STD_MMU
4533+ if (is_exec && (error_code & (DSISR_PROTFAULT | DSISR_GUARDED))) {
4534+#else
4535+ if (is_exec && regs->nip == address) {
4536+#endif
4537+ switch (pax_handle_fetch_fault(regs)) {
4538+ }
4539+
4540+ pax_report_fault(regs, (void *)regs->nip, (void *)regs->gpr[PT_R1]);
4541+ do_group_exit(SIGKILL);
4542+ }
4543+ }
4544+#endif
4545+
4546 _exception(SIGSEGV, regs, code, address);
4547 return 0;
4548 }
4549diff --git a/arch/powerpc/mm/mem.c b/arch/powerpc/mm/mem.c
4550index 5973631..ad617af 100644
4551--- a/arch/powerpc/mm/mem.c
4552+++ b/arch/powerpc/mm/mem.c
4553@@ -250,7 +250,7 @@ static int __init mark_nonram_nosave(void)
4554 {
4555 unsigned long lmb_next_region_start_pfn,
4556 lmb_region_max_pfn;
4557- int i;
4558+ unsigned int i;
4559
4560 for (i = 0; i < lmb.memory.cnt - 1; i++) {
4561 lmb_region_max_pfn =
4562diff --git a/arch/powerpc/mm/mmap_64.c b/arch/powerpc/mm/mmap_64.c
4563index 0d957a4..26d968f 100644
4564--- a/arch/powerpc/mm/mmap_64.c
4565+++ b/arch/powerpc/mm/mmap_64.c
4566@@ -99,10 +99,22 @@ void arch_pick_mmap_layout(struct mm_struct *mm)
4567 */
4568 if (mmap_is_legacy()) {
4569 mm->mmap_base = TASK_UNMAPPED_BASE;
4570+
4571+#ifdef CONFIG_PAX_RANDMMAP
4572+ if (mm->pax_flags & MF_PAX_RANDMMAP)
4573+ mm->mmap_base += mm->delta_mmap;
4574+#endif
4575+
4576 mm->get_unmapped_area = arch_get_unmapped_area;
4577 mm->unmap_area = arch_unmap_area;
4578 } else {
4579 mm->mmap_base = mmap_base();
4580+
4581+#ifdef CONFIG_PAX_RANDMMAP
4582+ if (mm->pax_flags & MF_PAX_RANDMMAP)
4583+ mm->mmap_base -= mm->delta_mmap + mm->delta_stack;
4584+#endif
4585+
4586 mm->get_unmapped_area = arch_get_unmapped_area_topdown;
4587 mm->unmap_area = arch_unmap_area_topdown;
4588 }
4589diff --git a/arch/powerpc/mm/slice.c b/arch/powerpc/mm/slice.c
4590index ba51948..23009d9 100644
4591--- a/arch/powerpc/mm/slice.c
4592+++ b/arch/powerpc/mm/slice.c
4593@@ -98,7 +98,7 @@ static int slice_area_is_free(struct mm_struct *mm, unsigned long addr,
4594 if ((mm->task_size - len) < addr)
4595 return 0;
4596 vma = find_vma(mm, addr);
4597- return (!vma || (addr + len) <= vma->vm_start);
4598+ return check_heap_stack_gap(vma, addr, len);
4599 }
4600
4601 static int slice_low_has_vma(struct mm_struct *mm, unsigned long slice)
4602@@ -256,7 +256,7 @@ full_search:
4603 addr = _ALIGN_UP(addr + 1, 1ul << SLICE_HIGH_SHIFT);
4604 continue;
4605 }
4606- if (!vma || addr + len <= vma->vm_start) {
4607+ if (check_heap_stack_gap(vma, addr, len)) {
4608 /*
4609 * Remember the place where we stopped the search:
4610 */
4611@@ -313,10 +313,14 @@ static unsigned long slice_find_area_topdown(struct mm_struct *mm,
4612 }
4613 }
4614
4615- addr = mm->mmap_base;
4616- while (addr > len) {
4617+ if (mm->mmap_base < len)
4618+ addr = -ENOMEM;
4619+ else
4620+ addr = mm->mmap_base - len;
4621+
4622+ while (!IS_ERR_VALUE(addr)) {
4623 /* Go down by chunk size */
4624- addr = _ALIGN_DOWN(addr - len, 1ul << pshift);
4625+ addr = _ALIGN_DOWN(addr, 1ul << pshift);
4626
4627 /* Check for hit with different page size */
4628 mask = slice_range_to_mask(addr, len);
4629@@ -336,7 +340,7 @@ static unsigned long slice_find_area_topdown(struct mm_struct *mm,
4630 * return with success:
4631 */
4632 vma = find_vma(mm, addr);
4633- if (!vma || (addr + len) <= vma->vm_start) {
4634+ if (check_heap_stack_gap(vma, addr, len)) {
4635 /* remember the address as a hint for next time */
4636 if (use_cache)
4637 mm->free_area_cache = addr;
4638@@ -348,7 +352,7 @@ static unsigned long slice_find_area_topdown(struct mm_struct *mm,
4639 mm->cached_hole_size = vma->vm_start - addr;
4640
4641 /* try just below the current vma->vm_start */
4642- addr = vma->vm_start;
4643+ addr = skip_heap_stack_gap(vma, len);
4644 }
4645
4646 /*
4647@@ -426,6 +430,11 @@ unsigned long slice_get_unmapped_area(unsigned long addr, unsigned long len,
4648 if (fixed && addr > (mm->task_size - len))
4649 return -EINVAL;
4650
4651+#ifdef CONFIG_PAX_RANDMMAP
4652+ if (!fixed && (mm->pax_flags & MF_PAX_RANDMMAP))
4653+ addr = 0;
4654+#endif
4655+
4656 /* If hint, make sure it matches our alignment restrictions */
4657 if (!fixed && addr) {
4658 addr = _ALIGN_UP(addr, 1ul << pshift);
4659diff --git a/arch/powerpc/platforms/52xx/lite5200_pm.c b/arch/powerpc/platforms/52xx/lite5200_pm.c
4660index b5c753d..8f01abe 100644
4661--- a/arch/powerpc/platforms/52xx/lite5200_pm.c
4662+++ b/arch/powerpc/platforms/52xx/lite5200_pm.c
4663@@ -235,7 +235,7 @@ static void lite5200_pm_end(void)
4664 lite5200_pm_target_state = PM_SUSPEND_ON;
4665 }
4666
4667-static struct platform_suspend_ops lite5200_pm_ops = {
4668+static const struct platform_suspend_ops lite5200_pm_ops = {
4669 .valid = lite5200_pm_valid,
4670 .begin = lite5200_pm_begin,
4671 .prepare = lite5200_pm_prepare,
4672diff --git a/arch/powerpc/platforms/52xx/mpc52xx_pm.c b/arch/powerpc/platforms/52xx/mpc52xx_pm.c
4673index a55b0b6..478c18e 100644
4674--- a/arch/powerpc/platforms/52xx/mpc52xx_pm.c
4675+++ b/arch/powerpc/platforms/52xx/mpc52xx_pm.c
4676@@ -180,7 +180,7 @@ void mpc52xx_pm_finish(void)
4677 iounmap(mbar);
4678 }
4679
4680-static struct platform_suspend_ops mpc52xx_pm_ops = {
4681+static const struct platform_suspend_ops mpc52xx_pm_ops = {
4682 .valid = mpc52xx_pm_valid,
4683 .prepare = mpc52xx_pm_prepare,
4684 .enter = mpc52xx_pm_enter,
4685diff --git a/arch/powerpc/platforms/83xx/suspend.c b/arch/powerpc/platforms/83xx/suspend.c
4686index 08e65fc..643d3ac 100644
4687--- a/arch/powerpc/platforms/83xx/suspend.c
4688+++ b/arch/powerpc/platforms/83xx/suspend.c
4689@@ -273,7 +273,7 @@ static int mpc83xx_is_pci_agent(void)
4690 return ret;
4691 }
4692
4693-static struct platform_suspend_ops mpc83xx_suspend_ops = {
4694+static const struct platform_suspend_ops mpc83xx_suspend_ops = {
4695 .valid = mpc83xx_suspend_valid,
4696 .begin = mpc83xx_suspend_begin,
4697 .enter = mpc83xx_suspend_enter,
4698diff --git a/arch/powerpc/platforms/cell/iommu.c b/arch/powerpc/platforms/cell/iommu.c
4699index ca5bfdf..1602e09 100644
4700--- a/arch/powerpc/platforms/cell/iommu.c
4701+++ b/arch/powerpc/platforms/cell/iommu.c
4702@@ -642,7 +642,7 @@ static int dma_fixed_dma_supported(struct device *dev, u64 mask)
4703
4704 static int dma_set_mask_and_switch(struct device *dev, u64 dma_mask);
4705
4706-struct dma_map_ops dma_iommu_fixed_ops = {
4707+const struct dma_map_ops dma_iommu_fixed_ops = {
4708 .alloc_coherent = dma_fixed_alloc_coherent,
4709 .free_coherent = dma_fixed_free_coherent,
4710 .map_sg = dma_fixed_map_sg,
4711diff --git a/arch/powerpc/platforms/ps3/system-bus.c b/arch/powerpc/platforms/ps3/system-bus.c
4712index e34b305..20e48ec 100644
4713--- a/arch/powerpc/platforms/ps3/system-bus.c
4714+++ b/arch/powerpc/platforms/ps3/system-bus.c
4715@@ -694,7 +694,7 @@ static int ps3_dma_supported(struct device *_dev, u64 mask)
4716 return mask >= DMA_BIT_MASK(32);
4717 }
4718
4719-static struct dma_map_ops ps3_sb_dma_ops = {
4720+static const struct dma_map_ops ps3_sb_dma_ops = {
4721 .alloc_coherent = ps3_alloc_coherent,
4722 .free_coherent = ps3_free_coherent,
4723 .map_sg = ps3_sb_map_sg,
4724@@ -704,7 +704,7 @@ static struct dma_map_ops ps3_sb_dma_ops = {
4725 .unmap_page = ps3_unmap_page,
4726 };
4727
4728-static struct dma_map_ops ps3_ioc0_dma_ops = {
4729+static const struct dma_map_ops ps3_ioc0_dma_ops = {
4730 .alloc_coherent = ps3_alloc_coherent,
4731 .free_coherent = ps3_free_coherent,
4732 .map_sg = ps3_ioc0_map_sg,
4733diff --git a/arch/powerpc/platforms/pseries/Kconfig b/arch/powerpc/platforms/pseries/Kconfig
4734index f0e6f28..60d53ed 100644
4735--- a/arch/powerpc/platforms/pseries/Kconfig
4736+++ b/arch/powerpc/platforms/pseries/Kconfig
4737@@ -2,6 +2,8 @@ config PPC_PSERIES
4738 depends on PPC64 && PPC_BOOK3S
4739 bool "IBM pSeries & new (POWER5-based) iSeries"
4740 select MPIC
4741+ select PCI_MSI
4742+ select XICS
4743 select PPC_I8259
4744 select PPC_RTAS
4745 select RTAS_ERROR_LOGGING
4746diff --git a/arch/s390/Kconfig b/arch/s390/Kconfig
4747index aca7fff..76c2b6b 100644
4748--- a/arch/s390/Kconfig
4749+++ b/arch/s390/Kconfig
4750@@ -197,28 +197,26 @@ config AUDIT_ARCH
4751
4752 config S390_SWITCH_AMODE
4753 bool "Switch kernel/user addressing modes"
4754+ default y
4755 help
4756 This option allows to switch the addressing modes of kernel and user
4757- space. The kernel parameter switch_amode=on will enable this feature,
4758- default is disabled. Enabling this (via kernel parameter) on machines
4759- earlier than IBM System z9-109 EC/BC will reduce system performance.
4760+ space. Enabling this on machines earlier than IBM System z9-109 EC/BC
4761+ will reduce system performance.
4762
4763 Note that this option will also be selected by selecting the execute
4764- protection option below. Enabling the execute protection via the
4765- noexec kernel parameter will also switch the addressing modes,
4766- independent of the switch_amode kernel parameter.
4767+ protection option below. Enabling the execute protection will also
4768+ switch the addressing modes, independent of this option.
4769
4770
4771 config S390_EXEC_PROTECT
4772 bool "Data execute protection"
4773+ default y
4774 select S390_SWITCH_AMODE
4775 help
4776 This option allows to enable a buffer overflow protection for user
4777 space programs and it also selects the addressing mode option above.
4778- The kernel parameter noexec=on will enable this feature and also
4779- switch the addressing modes, default is disabled. Enabling this (via
4780- kernel parameter) on machines earlier than IBM System z9-109 EC/BC
4781- will reduce system performance.
4782+ Enabling this on machines earlier than IBM System z9-109 EC/BC will
4783+ reduce system performance.
4784
4785 comment "Code generation options"
4786
4787diff --git a/arch/s390/include/asm/atomic.h b/arch/s390/include/asm/atomic.h
4788index ae7c8f9..3f01a0c 100644
4789--- a/arch/s390/include/asm/atomic.h
4790+++ b/arch/s390/include/asm/atomic.h
4791@@ -362,6 +362,16 @@ static inline int atomic64_add_unless(atomic64_t *v, long long a, long long u)
4792 #define atomic64_dec_and_test(_v) (atomic64_sub_return(1, _v) == 0)
4793 #define atomic64_inc_not_zero(v) atomic64_add_unless((v), 1, 0)
4794
4795+#define atomic64_read_unchecked(v) atomic64_read(v)
4796+#define atomic64_set_unchecked(v, i) atomic64_set((v), (i))
4797+#define atomic64_add_unchecked(a, v) atomic64_add((a), (v))
4798+#define atomic64_add_return_unchecked(a, v) atomic64_add_return((a), (v))
4799+#define atomic64_sub_unchecked(a, v) atomic64_sub((a), (v))
4800+#define atomic64_inc_unchecked(v) atomic64_inc(v)
4801+#define atomic64_inc_return_unchecked(v) atomic64_inc_return(v)
4802+#define atomic64_dec_unchecked(v) atomic64_dec(v)
4803+#define atomic64_cmpxchg_unchecked(v, o, n) atomic64_cmpxchg((v), (o), (n))
4804+
4805 #define smp_mb__before_atomic_dec() smp_mb()
4806 #define smp_mb__after_atomic_dec() smp_mb()
4807 #define smp_mb__before_atomic_inc() smp_mb()
4808diff --git a/arch/s390/include/asm/cache.h b/arch/s390/include/asm/cache.h
4809index 9b86681..c5140db 100644
4810--- a/arch/s390/include/asm/cache.h
4811+++ b/arch/s390/include/asm/cache.h
4812@@ -11,8 +11,10 @@
4813 #ifndef __ARCH_S390_CACHE_H
4814 #define __ARCH_S390_CACHE_H
4815
4816-#define L1_CACHE_BYTES 256
4817+#include <linux/const.h>
4818+
4819 #define L1_CACHE_SHIFT 8
4820+#define L1_CACHE_BYTES (_AC(1,UL) << L1_CACHE_SHIFT)
4821
4822 #define __read_mostly __attribute__((__section__(".data.read_mostly")))
4823
4824diff --git a/arch/s390/include/asm/elf.h b/arch/s390/include/asm/elf.h
4825index e885442..e3a2817 100644
4826--- a/arch/s390/include/asm/elf.h
4827+++ b/arch/s390/include/asm/elf.h
4828@@ -164,6 +164,13 @@ extern unsigned int vdso_enabled;
4829 that it will "exec", and that there is sufficient room for the brk. */
4830 #define ELF_ET_DYN_BASE (STACK_TOP / 3 * 2)
4831
4832+#ifdef CONFIG_PAX_ASLR
4833+#define PAX_ELF_ET_DYN_BASE (test_thread_flag(TIF_31BIT) ? 0x10000UL : 0x80000000UL)
4834+
4835+#define PAX_DELTA_MMAP_LEN (test_thread_flag(TIF_31BIT) ? 15 : 26)
4836+#define PAX_DELTA_STACK_LEN (test_thread_flag(TIF_31BIT) ? 15 : 26)
4837+#endif
4838+
4839 /* This yields a mask that user programs can use to figure out what
4840 instruction set this CPU supports. */
4841
4842diff --git a/arch/s390/include/asm/setup.h b/arch/s390/include/asm/setup.h
4843index e37478e..9ce0e9f 100644
4844--- a/arch/s390/include/asm/setup.h
4845+++ b/arch/s390/include/asm/setup.h
4846@@ -50,13 +50,13 @@ extern unsigned long memory_end;
4847 void detect_memory_layout(struct mem_chunk chunk[]);
4848
4849 #ifdef CONFIG_S390_SWITCH_AMODE
4850-extern unsigned int switch_amode;
4851+#define switch_amode (1)
4852 #else
4853 #define switch_amode (0)
4854 #endif
4855
4856 #ifdef CONFIG_S390_EXEC_PROTECT
4857-extern unsigned int s390_noexec;
4858+#define s390_noexec (1)
4859 #else
4860 #define s390_noexec (0)
4861 #endif
4862diff --git a/arch/s390/include/asm/uaccess.h b/arch/s390/include/asm/uaccess.h
4863index 8377e91..e28e6f1 100644
4864--- a/arch/s390/include/asm/uaccess.h
4865+++ b/arch/s390/include/asm/uaccess.h
4866@@ -232,6 +232,10 @@ static inline unsigned long __must_check
4867 copy_to_user(void __user *to, const void *from, unsigned long n)
4868 {
4869 might_fault();
4870+
4871+ if ((long)n < 0)
4872+ return n;
4873+
4874 if (access_ok(VERIFY_WRITE, to, n))
4875 n = __copy_to_user(to, from, n);
4876 return n;
4877@@ -257,6 +261,9 @@ copy_to_user(void __user *to, const void *from, unsigned long n)
4878 static inline unsigned long __must_check
4879 __copy_from_user(void *to, const void __user *from, unsigned long n)
4880 {
4881+ if ((long)n < 0)
4882+ return n;
4883+
4884 if (__builtin_constant_p(n) && (n <= 256))
4885 return uaccess.copy_from_user_small(n, from, to);
4886 else
4887@@ -283,6 +290,10 @@ static inline unsigned long __must_check
4888 copy_from_user(void *to, const void __user *from, unsigned long n)
4889 {
4890 might_fault();
4891+
4892+ if ((long)n < 0)
4893+ return n;
4894+
4895 if (access_ok(VERIFY_READ, from, n))
4896 n = __copy_from_user(to, from, n);
4897 else
4898diff --git a/arch/s390/kernel/module.c b/arch/s390/kernel/module.c
4899index 639380a..72e3c02 100644
4900--- a/arch/s390/kernel/module.c
4901+++ b/arch/s390/kernel/module.c
4902@@ -166,11 +166,11 @@ module_frob_arch_sections(Elf_Ehdr *hdr, Elf_Shdr *sechdrs,
4903
4904 /* Increase core size by size of got & plt and set start
4905 offsets for got and plt. */
4906- me->core_size = ALIGN(me->core_size, 4);
4907- me->arch.got_offset = me->core_size;
4908- me->core_size += me->arch.got_size;
4909- me->arch.plt_offset = me->core_size;
4910- me->core_size += me->arch.plt_size;
4911+ me->core_size_rw = ALIGN(me->core_size_rw, 4);
4912+ me->arch.got_offset = me->core_size_rw;
4913+ me->core_size_rw += me->arch.got_size;
4914+ me->arch.plt_offset = me->core_size_rx;
4915+ me->core_size_rx += me->arch.plt_size;
4916 return 0;
4917 }
4918
4919@@ -256,7 +256,7 @@ apply_rela(Elf_Rela *rela, Elf_Addr base, Elf_Sym *symtab,
4920 if (info->got_initialized == 0) {
4921 Elf_Addr *gotent;
4922
4923- gotent = me->module_core + me->arch.got_offset +
4924+ gotent = me->module_core_rw + me->arch.got_offset +
4925 info->got_offset;
4926 *gotent = val;
4927 info->got_initialized = 1;
4928@@ -280,7 +280,7 @@ apply_rela(Elf_Rela *rela, Elf_Addr base, Elf_Sym *symtab,
4929 else if (r_type == R_390_GOTENT ||
4930 r_type == R_390_GOTPLTENT)
4931 *(unsigned int *) loc =
4932- (val + (Elf_Addr) me->module_core - loc) >> 1;
4933+ (val + (Elf_Addr) me->module_core_rw - loc) >> 1;
4934 else if (r_type == R_390_GOT64 ||
4935 r_type == R_390_GOTPLT64)
4936 *(unsigned long *) loc = val;
4937@@ -294,7 +294,7 @@ apply_rela(Elf_Rela *rela, Elf_Addr base, Elf_Sym *symtab,
4938 case R_390_PLTOFF64: /* 16 bit offset from GOT to PLT. */
4939 if (info->plt_initialized == 0) {
4940 unsigned int *ip;
4941- ip = me->module_core + me->arch.plt_offset +
4942+ ip = me->module_core_rx + me->arch.plt_offset +
4943 info->plt_offset;
4944 #ifndef CONFIG_64BIT
4945 ip[0] = 0x0d105810; /* basr 1,0; l 1,6(1); br 1 */
4946@@ -319,7 +319,7 @@ apply_rela(Elf_Rela *rela, Elf_Addr base, Elf_Sym *symtab,
4947 val - loc + 0xffffUL < 0x1ffffeUL) ||
4948 (r_type == R_390_PLT32DBL &&
4949 val - loc + 0xffffffffULL < 0x1fffffffeULL)))
4950- val = (Elf_Addr) me->module_core +
4951+ val = (Elf_Addr) me->module_core_rx +
4952 me->arch.plt_offset +
4953 info->plt_offset;
4954 val += rela->r_addend - loc;
4955@@ -341,7 +341,7 @@ apply_rela(Elf_Rela *rela, Elf_Addr base, Elf_Sym *symtab,
4956 case R_390_GOTOFF32: /* 32 bit offset to GOT. */
4957 case R_390_GOTOFF64: /* 64 bit offset to GOT. */
4958 val = val + rela->r_addend -
4959- ((Elf_Addr) me->module_core + me->arch.got_offset);
4960+ ((Elf_Addr) me->module_core_rw + me->arch.got_offset);
4961 if (r_type == R_390_GOTOFF16)
4962 *(unsigned short *) loc = val;
4963 else if (r_type == R_390_GOTOFF32)
4964@@ -351,7 +351,7 @@ apply_rela(Elf_Rela *rela, Elf_Addr base, Elf_Sym *symtab,
4965 break;
4966 case R_390_GOTPC: /* 32 bit PC relative offset to GOT. */
4967 case R_390_GOTPCDBL: /* 32 bit PC rel. off. to GOT shifted by 1. */
4968- val = (Elf_Addr) me->module_core + me->arch.got_offset +
4969+ val = (Elf_Addr) me->module_core_rw + me->arch.got_offset +
4970 rela->r_addend - loc;
4971 if (r_type == R_390_GOTPC)
4972 *(unsigned int *) loc = val;
4973diff --git a/arch/s390/kernel/setup.c b/arch/s390/kernel/setup.c
4974index 358e545..051e4f4 100644
4975--- a/arch/s390/kernel/setup.c
4976+++ b/arch/s390/kernel/setup.c
4977@@ -307,9 +307,6 @@ static int __init early_parse_mem(char *p)
4978 early_param("mem", early_parse_mem);
4979
4980 #ifdef CONFIG_S390_SWITCH_AMODE
4981-unsigned int switch_amode = 0;
4982-EXPORT_SYMBOL_GPL(switch_amode);
4983-
4984 static int set_amode_and_uaccess(unsigned long user_amode,
4985 unsigned long user32_amode)
4986 {
4987@@ -335,17 +332,6 @@ static int set_amode_and_uaccess(unsigned long user_amode,
4988 return 0;
4989 }
4990 }
4991-
4992-/*
4993- * Switch kernel/user addressing modes?
4994- */
4995-static int __init early_parse_switch_amode(char *p)
4996-{
4997- switch_amode = 1;
4998- return 0;
4999-}
5000-early_param("switch_amode", early_parse_switch_amode);
5001-
5002 #else /* CONFIG_S390_SWITCH_AMODE */
5003 static inline int set_amode_and_uaccess(unsigned long user_amode,
5004 unsigned long user32_amode)
5005@@ -354,24 +340,6 @@ static inline int set_amode_and_uaccess(unsigned long user_amode,
5006 }
5007 #endif /* CONFIG_S390_SWITCH_AMODE */
5008
5009-#ifdef CONFIG_S390_EXEC_PROTECT
5010-unsigned int s390_noexec = 0;
5011-EXPORT_SYMBOL_GPL(s390_noexec);
5012-
5013-/*
5014- * Enable execute protection?
5015- */
5016-static int __init early_parse_noexec(char *p)
5017-{
5018- if (!strncmp(p, "off", 3))
5019- return 0;
5020- switch_amode = 1;
5021- s390_noexec = 1;
5022- return 0;
5023-}
5024-early_param("noexec", early_parse_noexec);
5025-#endif /* CONFIG_S390_EXEC_PROTECT */
5026-
5027 static void setup_addressing_mode(void)
5028 {
5029 if (s390_noexec) {
5030diff --git a/arch/s390/mm/mmap.c b/arch/s390/mm/mmap.c
5031index 0ab74ae..c8b68f9 100644
5032--- a/arch/s390/mm/mmap.c
5033+++ b/arch/s390/mm/mmap.c
5034@@ -78,10 +78,22 @@ void arch_pick_mmap_layout(struct mm_struct *mm)
5035 */
5036 if (mmap_is_legacy()) {
5037 mm->mmap_base = TASK_UNMAPPED_BASE;
5038+
5039+#ifdef CONFIG_PAX_RANDMMAP
5040+ if (mm->pax_flags & MF_PAX_RANDMMAP)
5041+ mm->mmap_base += mm->delta_mmap;
5042+#endif
5043+
5044 mm->get_unmapped_area = arch_get_unmapped_area;
5045 mm->unmap_area = arch_unmap_area;
5046 } else {
5047 mm->mmap_base = mmap_base();
5048+
5049+#ifdef CONFIG_PAX_RANDMMAP
5050+ if (mm->pax_flags & MF_PAX_RANDMMAP)
5051+ mm->mmap_base -= mm->delta_mmap + mm->delta_stack;
5052+#endif
5053+
5054 mm->get_unmapped_area = arch_get_unmapped_area_topdown;
5055 mm->unmap_area = arch_unmap_area_topdown;
5056 }
5057@@ -153,10 +165,22 @@ void arch_pick_mmap_layout(struct mm_struct *mm)
5058 */
5059 if (mmap_is_legacy()) {
5060 mm->mmap_base = TASK_UNMAPPED_BASE;
5061+
5062+#ifdef CONFIG_PAX_RANDMMAP
5063+ if (mm->pax_flags & MF_PAX_RANDMMAP)
5064+ mm->mmap_base += mm->delta_mmap;
5065+#endif
5066+
5067 mm->get_unmapped_area = s390_get_unmapped_area;
5068 mm->unmap_area = arch_unmap_area;
5069 } else {
5070 mm->mmap_base = mmap_base();
5071+
5072+#ifdef CONFIG_PAX_RANDMMAP
5073+ if (mm->pax_flags & MF_PAX_RANDMMAP)
5074+ mm->mmap_base -= mm->delta_mmap + mm->delta_stack;
5075+#endif
5076+
5077 mm->get_unmapped_area = s390_get_unmapped_area_topdown;
5078 mm->unmap_area = arch_unmap_area_topdown;
5079 }
5080diff --git a/arch/score/include/asm/cache.h b/arch/score/include/asm/cache.h
5081index ae3d59f..f65f075 100644
5082--- a/arch/score/include/asm/cache.h
5083+++ b/arch/score/include/asm/cache.h
5084@@ -1,7 +1,9 @@
5085 #ifndef _ASM_SCORE_CACHE_H
5086 #define _ASM_SCORE_CACHE_H
5087
5088+#include <linux/const.h>
5089+
5090 #define L1_CACHE_SHIFT 4
5091-#define L1_CACHE_BYTES (1 << L1_CACHE_SHIFT)
5092+#define L1_CACHE_BYTES (_AC(1,UL) << L1_CACHE_SHIFT)
5093
5094 #endif /* _ASM_SCORE_CACHE_H */
5095diff --git a/arch/score/include/asm/system.h b/arch/score/include/asm/system.h
5096index 589d5c7..669e274 100644
5097--- a/arch/score/include/asm/system.h
5098+++ b/arch/score/include/asm/system.h
5099@@ -17,7 +17,7 @@ do { \
5100 #define finish_arch_switch(prev) do {} while (0)
5101
5102 typedef void (*vi_handler_t)(void);
5103-extern unsigned long arch_align_stack(unsigned long sp);
5104+#define arch_align_stack(x) (x)
5105
5106 #define mb() barrier()
5107 #define rmb() barrier()
5108diff --git a/arch/score/kernel/process.c b/arch/score/kernel/process.c
5109index 25d0803..d6c8e36 100644
5110--- a/arch/score/kernel/process.c
5111+++ b/arch/score/kernel/process.c
5112@@ -161,8 +161,3 @@ unsigned long get_wchan(struct task_struct *task)
5113
5114 return task_pt_regs(task)->cp0_epc;
5115 }
5116-
5117-unsigned long arch_align_stack(unsigned long sp)
5118-{
5119- return sp;
5120-}
5121diff --git a/arch/sh/boards/mach-hp6xx/pm.c b/arch/sh/boards/mach-hp6xx/pm.c
5122index d936c1a..304a252 100644
5123--- a/arch/sh/boards/mach-hp6xx/pm.c
5124+++ b/arch/sh/boards/mach-hp6xx/pm.c
5125@@ -143,7 +143,7 @@ static int hp6x0_pm_enter(suspend_state_t state)
5126 return 0;
5127 }
5128
5129-static struct platform_suspend_ops hp6x0_pm_ops = {
5130+static const struct platform_suspend_ops hp6x0_pm_ops = {
5131 .enter = hp6x0_pm_enter,
5132 .valid = suspend_valid_only_mem,
5133 };
5134diff --git a/arch/sh/include/asm/cache.h b/arch/sh/include/asm/cache.h
5135index 02df18e..ae3a793 100644
5136--- a/arch/sh/include/asm/cache.h
5137+++ b/arch/sh/include/asm/cache.h
5138@@ -9,10 +9,11 @@
5139 #define __ASM_SH_CACHE_H
5140 #ifdef __KERNEL__
5141
5142+#include <linux/const.h>
5143 #include <linux/init.h>
5144 #include <cpu/cache.h>
5145
5146-#define L1_CACHE_BYTES (1 << L1_CACHE_SHIFT)
5147+#define L1_CACHE_BYTES (_AC(1,UL) << L1_CACHE_SHIFT)
5148
5149 #define __read_mostly __attribute__((__section__(".data.read_mostly")))
5150
5151diff --git a/arch/sh/kernel/cpu/sh4/sq.c b/arch/sh/kernel/cpu/sh4/sq.c
5152index 8a8a993..7b3079b 100644
5153--- a/arch/sh/kernel/cpu/sh4/sq.c
5154+++ b/arch/sh/kernel/cpu/sh4/sq.c
5155@@ -327,7 +327,7 @@ static struct attribute *sq_sysfs_attrs[] = {
5156 NULL,
5157 };
5158
5159-static struct sysfs_ops sq_sysfs_ops = {
5160+static const struct sysfs_ops sq_sysfs_ops = {
5161 .show = sq_sysfs_show,
5162 .store = sq_sysfs_store,
5163 };
5164diff --git a/arch/sh/kernel/cpu/shmobile/pm.c b/arch/sh/kernel/cpu/shmobile/pm.c
5165index ee3c2aa..c49cee6 100644
5166--- a/arch/sh/kernel/cpu/shmobile/pm.c
5167+++ b/arch/sh/kernel/cpu/shmobile/pm.c
5168@@ -58,7 +58,7 @@ static int sh_pm_enter(suspend_state_t state)
5169 return 0;
5170 }
5171
5172-static struct platform_suspend_ops sh_pm_ops = {
5173+static const struct platform_suspend_ops sh_pm_ops = {
5174 .enter = sh_pm_enter,
5175 .valid = suspend_valid_only_mem,
5176 };
5177diff --git a/arch/sh/kernel/kgdb.c b/arch/sh/kernel/kgdb.c
5178index 3e532d0..9faa306 100644
5179--- a/arch/sh/kernel/kgdb.c
5180+++ b/arch/sh/kernel/kgdb.c
5181@@ -271,7 +271,7 @@ void kgdb_arch_exit(void)
5182 {
5183 }
5184
5185-struct kgdb_arch arch_kgdb_ops = {
5186+const struct kgdb_arch arch_kgdb_ops = {
5187 /* Breakpoint instruction: trapa #0x3c */
5188 #ifdef CONFIG_CPU_LITTLE_ENDIAN
5189 .gdb_bpt_instr = { 0x3c, 0xc3 },
5190diff --git a/arch/sh/mm/mmap.c b/arch/sh/mm/mmap.c
5191index afeb710..d1d1289 100644
5192--- a/arch/sh/mm/mmap.c
5193+++ b/arch/sh/mm/mmap.c
5194@@ -74,8 +74,7 @@ unsigned long arch_get_unmapped_area(struct file *filp, unsigned long addr,
5195 addr = PAGE_ALIGN(addr);
5196
5197 vma = find_vma(mm, addr);
5198- if (TASK_SIZE - len >= addr &&
5199- (!vma || addr + len <= vma->vm_start))
5200+ if (TASK_SIZE - len >= addr && check_heap_stack_gap(vma, addr, len))
5201 return addr;
5202 }
5203
5204@@ -106,7 +105,7 @@ full_search:
5205 }
5206 return -ENOMEM;
5207 }
5208- if (likely(!vma || addr + len <= vma->vm_start)) {
5209+ if (likely(check_heap_stack_gap(vma, addr, len))) {
5210 /*
5211 * Remember the place where we stopped the search:
5212 */
5213@@ -157,8 +156,7 @@ arch_get_unmapped_area_topdown(struct file *filp, const unsigned long addr0,
5214 addr = PAGE_ALIGN(addr);
5215
5216 vma = find_vma(mm, addr);
5217- if (TASK_SIZE - len >= addr &&
5218- (!vma || addr + len <= vma->vm_start))
5219+ if (TASK_SIZE - len >= addr && check_heap_stack_gap(vma, addr, len))
5220 return addr;
5221 }
5222
5223@@ -179,7 +177,7 @@ arch_get_unmapped_area_topdown(struct file *filp, const unsigned long addr0,
5224 /* make sure it can fit in the remaining address space */
5225 if (likely(addr > len)) {
5226 vma = find_vma(mm, addr-len);
5227- if (!vma || addr <= vma->vm_start) {
5228+ if (check_heap_stack_gap(vma, addr - len, len)) {
5229 /* remember the address as a hint for next time */
5230 return (mm->free_area_cache = addr-len);
5231 }
5232@@ -188,18 +186,18 @@ arch_get_unmapped_area_topdown(struct file *filp, const unsigned long addr0,
5233 if (unlikely(mm->mmap_base < len))
5234 goto bottomup;
5235
5236- addr = mm->mmap_base-len;
5237- if (do_colour_align)
5238- addr = COLOUR_ALIGN_DOWN(addr, pgoff);
5239+ addr = mm->mmap_base - len;
5240
5241 do {
5242+ if (do_colour_align)
5243+ addr = COLOUR_ALIGN_DOWN(addr, pgoff);
5244 /*
5245 * Lookup failure means no vma is above this address,
5246 * else if new region fits below vma->vm_start,
5247 * return with success:
5248 */
5249 vma = find_vma(mm, addr);
5250- if (likely(!vma || addr+len <= vma->vm_start)) {
5251+ if (likely(check_heap_stack_gap(vma, addr, len))) {
5252 /* remember the address as a hint for next time */
5253 return (mm->free_area_cache = addr);
5254 }
5255@@ -209,10 +207,8 @@ arch_get_unmapped_area_topdown(struct file *filp, const unsigned long addr0,
5256 mm->cached_hole_size = vma->vm_start - addr;
5257
5258 /* try just below the current vma->vm_start */
5259- addr = vma->vm_start-len;
5260- if (do_colour_align)
5261- addr = COLOUR_ALIGN_DOWN(addr, pgoff);
5262- } while (likely(len < vma->vm_start));
5263+ addr = skip_heap_stack_gap(vma, len);
5264+ } while (!IS_ERR_VALUE(addr));
5265
5266 bottomup:
5267 /*
5268diff --git a/arch/sparc/Kconfig b/arch/sparc/Kconfig
5269index 05ef538..dc9c857 100644
5270--- a/arch/sparc/Kconfig
5271+++ b/arch/sparc/Kconfig
5272@@ -32,6 +32,7 @@ config SPARC
5273
5274 config SPARC32
5275 def_bool !64BIT
5276+ select GENERIC_ATOMIC64
5277
5278 config SPARC64
5279 def_bool 64BIT
5280diff --git a/arch/sparc/Makefile b/arch/sparc/Makefile
5281index 113225b..7fd04e7 100644
5282--- a/arch/sparc/Makefile
5283+++ b/arch/sparc/Makefile
5284@@ -75,7 +75,7 @@ drivers-$(CONFIG_OPROFILE) += arch/sparc/oprofile/
5285 # Export what is needed by arch/sparc/boot/Makefile
5286 export VMLINUX_INIT VMLINUX_MAIN
5287 VMLINUX_INIT := $(head-y) $(init-y)
5288-VMLINUX_MAIN := $(core-y) kernel/ mm/ fs/ ipc/ security/ crypto/ block/
5289+VMLINUX_MAIN := $(core-y) kernel/ mm/ fs/ ipc/ security/ crypto/ block/ grsecurity/
5290 VMLINUX_MAIN += $(patsubst %/, %/lib.a, $(libs-y)) $(libs-y)
5291 VMLINUX_MAIN += $(drivers-y) $(net-y)
5292
5293diff --git a/arch/sparc/include/asm/atomic_32.h b/arch/sparc/include/asm/atomic_32.h
5294index f0d343c..cf36e68 100644
5295--- a/arch/sparc/include/asm/atomic_32.h
5296+++ b/arch/sparc/include/asm/atomic_32.h
5297@@ -13,6 +13,8 @@
5298
5299 #include <linux/types.h>
5300
5301+#include <asm-generic/atomic64.h>
5302+
5303 #ifdef __KERNEL__
5304
5305 #include <asm/system.h>
5306diff --git a/arch/sparc/include/asm/atomic_64.h b/arch/sparc/include/asm/atomic_64.h
5307index f5cc06f..f858d47 100644
5308--- a/arch/sparc/include/asm/atomic_64.h
5309+++ b/arch/sparc/include/asm/atomic_64.h
5310@@ -14,18 +14,40 @@
5311 #define ATOMIC64_INIT(i) { (i) }
5312
5313 #define atomic_read(v) ((v)->counter)
5314+static inline int atomic_read_unchecked(const atomic_unchecked_t *v)
5315+{
5316+ return v->counter;
5317+}
5318 #define atomic64_read(v) ((v)->counter)
5319+static inline long atomic64_read_unchecked(const atomic64_unchecked_t *v)
5320+{
5321+ return v->counter;
5322+}
5323
5324 #define atomic_set(v, i) (((v)->counter) = i)
5325+static inline void atomic_set_unchecked(atomic_unchecked_t *v, int i)
5326+{
5327+ v->counter = i;
5328+}
5329 #define atomic64_set(v, i) (((v)->counter) = i)
5330+static inline void atomic64_set_unchecked(atomic64_unchecked_t *v, long i)
5331+{
5332+ v->counter = i;
5333+}
5334
5335 extern void atomic_add(int, atomic_t *);
5336+extern void atomic_add_unchecked(int, atomic_unchecked_t *);
5337 extern void atomic64_add(long, atomic64_t *);
5338+extern void atomic64_add_unchecked(long, atomic64_unchecked_t *);
5339 extern void atomic_sub(int, atomic_t *);
5340+extern void atomic_sub_unchecked(int, atomic_unchecked_t *);
5341 extern void atomic64_sub(long, atomic64_t *);
5342+extern void atomic64_sub_unchecked(long, atomic64_unchecked_t *);
5343
5344 extern int atomic_add_ret(int, atomic_t *);
5345+extern int atomic_add_ret_unchecked(int, atomic_unchecked_t *);
5346 extern long atomic64_add_ret(long, atomic64_t *);
5347+extern long atomic64_add_ret_unchecked(long, atomic64_unchecked_t *);
5348 extern int atomic_sub_ret(int, atomic_t *);
5349 extern long atomic64_sub_ret(long, atomic64_t *);
5350
5351@@ -33,13 +55,29 @@ extern long atomic64_sub_ret(long, atomic64_t *);
5352 #define atomic64_dec_return(v) atomic64_sub_ret(1, v)
5353
5354 #define atomic_inc_return(v) atomic_add_ret(1, v)
5355+static inline int atomic_inc_return_unchecked(atomic_unchecked_t *v)
5356+{
5357+ return atomic_add_ret_unchecked(1, v);
5358+}
5359 #define atomic64_inc_return(v) atomic64_add_ret(1, v)
5360+static inline long atomic64_inc_return_unchecked(atomic64_unchecked_t *v)
5361+{
5362+ return atomic64_add_ret_unchecked(1, v);
5363+}
5364
5365 #define atomic_sub_return(i, v) atomic_sub_ret(i, v)
5366 #define atomic64_sub_return(i, v) atomic64_sub_ret(i, v)
5367
5368 #define atomic_add_return(i, v) atomic_add_ret(i, v)
5369+static inline int atomic_add_return_unchecked(int i, atomic_unchecked_t *v)
5370+{
5371+ return atomic_add_ret_unchecked(i, v);
5372+}
5373 #define atomic64_add_return(i, v) atomic64_add_ret(i, v)
5374+static inline long atomic64_add_return_unchecked(long i, atomic64_unchecked_t *v)
5375+{
5376+ return atomic64_add_ret_unchecked(i, v);
5377+}
5378
5379 /*
5380 * atomic_inc_and_test - increment and test
5381@@ -50,6 +88,10 @@ extern long atomic64_sub_ret(long, atomic64_t *);
5382 * other cases.
5383 */
5384 #define atomic_inc_and_test(v) (atomic_inc_return(v) == 0)
5385+static inline int atomic_inc_and_test_unchecked(atomic_unchecked_t *v)
5386+{
5387+ return atomic_inc_return_unchecked(v) == 0;
5388+}
5389 #define atomic64_inc_and_test(v) (atomic64_inc_return(v) == 0)
5390
5391 #define atomic_sub_and_test(i, v) (atomic_sub_ret(i, v) == 0)
5392@@ -59,30 +101,65 @@ extern long atomic64_sub_ret(long, atomic64_t *);
5393 #define atomic64_dec_and_test(v) (atomic64_sub_ret(1, v) == 0)
5394
5395 #define atomic_inc(v) atomic_add(1, v)
5396+static inline void atomic_inc_unchecked(atomic_unchecked_t *v)
5397+{
5398+ atomic_add_unchecked(1, v);
5399+}
5400 #define atomic64_inc(v) atomic64_add(1, v)
5401+static inline void atomic64_inc_unchecked(atomic64_unchecked_t *v)
5402+{
5403+ atomic64_add_unchecked(1, v);
5404+}
5405
5406 #define atomic_dec(v) atomic_sub(1, v)
5407+static inline void atomic_dec_unchecked(atomic_unchecked_t *v)
5408+{
5409+ atomic_sub_unchecked(1, v);
5410+}
5411 #define atomic64_dec(v) atomic64_sub(1, v)
5412+static inline void atomic64_dec_unchecked(atomic64_unchecked_t *v)
5413+{
5414+ atomic64_sub_unchecked(1, v);
5415+}
5416
5417 #define atomic_add_negative(i, v) (atomic_add_ret(i, v) < 0)
5418 #define atomic64_add_negative(i, v) (atomic64_add_ret(i, v) < 0)
5419
5420 #define atomic_cmpxchg(v, o, n) (cmpxchg(&((v)->counter), (o), (n)))
5421+static inline int atomic_cmpxchg_unchecked(atomic_unchecked_t *v, int old, int new)
5422+{
5423+ return cmpxchg(&v->counter, old, new);
5424+}
5425 #define atomic_xchg(v, new) (xchg(&((v)->counter), new))
5426+static inline int atomic_xchg_unchecked(atomic_unchecked_t *v, int new)
5427+{
5428+ return xchg(&v->counter, new);
5429+}
5430
5431 static inline int atomic_add_unless(atomic_t *v, int a, int u)
5432 {
5433- int c, old;
5434+ int c, old, new;
5435 c = atomic_read(v);
5436 for (;;) {
5437- if (unlikely(c == (u)))
5438+ if (unlikely(c == u))
5439 break;
5440- old = atomic_cmpxchg((v), c, c + (a));
5441+
5442+ asm volatile("addcc %2, %0, %0\n"
5443+
5444+#ifdef CONFIG_PAX_REFCOUNT
5445+ "tvs %%icc, 6\n"
5446+#endif
5447+
5448+ : "=r" (new)
5449+ : "0" (c), "ir" (a)
5450+ : "cc");
5451+
5452+ old = atomic_cmpxchg(v, c, new);
5453 if (likely(old == c))
5454 break;
5455 c = old;
5456 }
5457- return c != (u);
5458+ return c != u;
5459 }
5460
5461 #define atomic_inc_not_zero(v) atomic_add_unless((v), 1, 0)
5462@@ -90,20 +167,35 @@ static inline int atomic_add_unless(atomic_t *v, int a, int u)
5463 #define atomic64_cmpxchg(v, o, n) \
5464 ((__typeof__((v)->counter))cmpxchg(&((v)->counter), (o), (n)))
5465 #define atomic64_xchg(v, new) (xchg(&((v)->counter), new))
5466+static inline long atomic64_xchg_unchecked(atomic64_unchecked_t *v, long new)
5467+{
5468+ return xchg(&v->counter, new);
5469+}
5470
5471 static inline long atomic64_add_unless(atomic64_t *v, long a, long u)
5472 {
5473- long c, old;
5474+ long c, old, new;
5475 c = atomic64_read(v);
5476 for (;;) {
5477- if (unlikely(c == (u)))
5478+ if (unlikely(c == u))
5479 break;
5480- old = atomic64_cmpxchg((v), c, c + (a));
5481+
5482+ asm volatile("addcc %2, %0, %0\n"
5483+
5484+#ifdef CONFIG_PAX_REFCOUNT
5485+ "tvs %%xcc, 6\n"
5486+#endif
5487+
5488+ : "=r" (new)
5489+ : "0" (c), "ir" (a)
5490+ : "cc");
5491+
5492+ old = atomic64_cmpxchg(v, c, new);
5493 if (likely(old == c))
5494 break;
5495 c = old;
5496 }
5497- return c != (u);
5498+ return c != u;
5499 }
5500
5501 #define atomic64_inc_not_zero(v) atomic64_add_unless((v), 1, 0)
5502diff --git a/arch/sparc/include/asm/cache.h b/arch/sparc/include/asm/cache.h
5503index 41f85ae..73b80b5 100644
5504--- a/arch/sparc/include/asm/cache.h
5505+++ b/arch/sparc/include/asm/cache.h
5506@@ -7,8 +7,10 @@
5507 #ifndef _SPARC_CACHE_H
5508 #define _SPARC_CACHE_H
5509
5510+#include <linux/const.h>
5511+
5512 #define L1_CACHE_SHIFT 5
5513-#define L1_CACHE_BYTES 32
5514+#define L1_CACHE_BYTES (_AC(1,UL) << L1_CACHE_SHIFT)
5515 #define L1_CACHE_ALIGN(x) ((((x)+(L1_CACHE_BYTES-1))&~(L1_CACHE_BYTES-1)))
5516
5517 #ifdef CONFIG_SPARC32
5518diff --git a/arch/sparc/include/asm/dma-mapping.h b/arch/sparc/include/asm/dma-mapping.h
5519index 5a8c308..38def92 100644
5520--- a/arch/sparc/include/asm/dma-mapping.h
5521+++ b/arch/sparc/include/asm/dma-mapping.h
5522@@ -14,10 +14,10 @@ extern int dma_set_mask(struct device *dev, u64 dma_mask);
5523 #define dma_free_noncoherent(d, s, v, h) dma_free_coherent(d, s, v, h)
5524 #define dma_is_consistent(d, h) (1)
5525
5526-extern struct dma_map_ops *dma_ops, pci32_dma_ops;
5527+extern const struct dma_map_ops *dma_ops, pci32_dma_ops;
5528 extern struct bus_type pci_bus_type;
5529
5530-static inline struct dma_map_ops *get_dma_ops(struct device *dev)
5531+static inline const struct dma_map_ops *get_dma_ops(struct device *dev)
5532 {
5533 #if defined(CONFIG_SPARC32) && defined(CONFIG_PCI)
5534 if (dev->bus == &pci_bus_type)
5535@@ -31,7 +31,7 @@ static inline struct dma_map_ops *get_dma_ops(struct device *dev)
5536 static inline void *dma_alloc_coherent(struct device *dev, size_t size,
5537 dma_addr_t *dma_handle, gfp_t flag)
5538 {
5539- struct dma_map_ops *ops = get_dma_ops(dev);
5540+ const struct dma_map_ops *ops = get_dma_ops(dev);
5541 void *cpu_addr;
5542
5543 cpu_addr = ops->alloc_coherent(dev, size, dma_handle, flag);
5544@@ -42,7 +42,7 @@ static inline void *dma_alloc_coherent(struct device *dev, size_t size,
5545 static inline void dma_free_coherent(struct device *dev, size_t size,
5546 void *cpu_addr, dma_addr_t dma_handle)
5547 {
5548- struct dma_map_ops *ops = get_dma_ops(dev);
5549+ const struct dma_map_ops *ops = get_dma_ops(dev);
5550
5551 debug_dma_free_coherent(dev, size, cpu_addr, dma_handle);
5552 ops->free_coherent(dev, size, cpu_addr, dma_handle);
5553diff --git a/arch/sparc/include/asm/elf_32.h b/arch/sparc/include/asm/elf_32.h
5554index 381a1b5..b97e3ff 100644
5555--- a/arch/sparc/include/asm/elf_32.h
5556+++ b/arch/sparc/include/asm/elf_32.h
5557@@ -116,6 +116,13 @@ typedef struct {
5558
5559 #define ELF_ET_DYN_BASE (TASK_UNMAPPED_BASE)
5560
5561+#ifdef CONFIG_PAX_ASLR
5562+#define PAX_ELF_ET_DYN_BASE 0x10000UL
5563+
5564+#define PAX_DELTA_MMAP_LEN 16
5565+#define PAX_DELTA_STACK_LEN 16
5566+#endif
5567+
5568 /* This yields a mask that user programs can use to figure out what
5569 instruction set this cpu supports. This can NOT be done in userspace
5570 on Sparc. */
5571diff --git a/arch/sparc/include/asm/elf_64.h b/arch/sparc/include/asm/elf_64.h
5572index 9968085..c2106ef 100644
5573--- a/arch/sparc/include/asm/elf_64.h
5574+++ b/arch/sparc/include/asm/elf_64.h
5575@@ -163,6 +163,12 @@ typedef struct {
5576 #define ELF_ET_DYN_BASE 0x0000010000000000UL
5577 #define COMPAT_ELF_ET_DYN_BASE 0x0000000070000000UL
5578
5579+#ifdef CONFIG_PAX_ASLR
5580+#define PAX_ELF_ET_DYN_BASE (test_thread_flag(TIF_32BIT) ? 0x10000UL : 0x100000UL)
5581+
5582+#define PAX_DELTA_MMAP_LEN (test_thread_flag(TIF_32BIT) ? 14 : 28)
5583+#define PAX_DELTA_STACK_LEN (test_thread_flag(TIF_32BIT) ? 15 : 29)
5584+#endif
5585
5586 /* This yields a mask that user programs can use to figure out what
5587 instruction set this cpu supports. */
5588diff --git a/arch/sparc/include/asm/page_32.h b/arch/sparc/include/asm/page_32.h
5589index 156707b..aefa786 100644
5590--- a/arch/sparc/include/asm/page_32.h
5591+++ b/arch/sparc/include/asm/page_32.h
5592@@ -8,6 +8,8 @@
5593 #ifndef _SPARC_PAGE_H
5594 #define _SPARC_PAGE_H
5595
5596+#include <linux/const.h>
5597+
5598 #define PAGE_SHIFT 12
5599
5600 #ifndef __ASSEMBLY__
5601diff --git a/arch/sparc/include/asm/pgtable_32.h b/arch/sparc/include/asm/pgtable_32.h
5602index e0cabe7..efd60f1 100644
5603--- a/arch/sparc/include/asm/pgtable_32.h
5604+++ b/arch/sparc/include/asm/pgtable_32.h
5605@@ -43,6 +43,13 @@ BTFIXUPDEF_SIMM13(user_ptrs_per_pgd)
5606 BTFIXUPDEF_INT(page_none)
5607 BTFIXUPDEF_INT(page_copy)
5608 BTFIXUPDEF_INT(page_readonly)
5609+
5610+#ifdef CONFIG_PAX_PAGEEXEC
5611+BTFIXUPDEF_INT(page_shared_noexec)
5612+BTFIXUPDEF_INT(page_copy_noexec)
5613+BTFIXUPDEF_INT(page_readonly_noexec)
5614+#endif
5615+
5616 BTFIXUPDEF_INT(page_kernel)
5617
5618 #define PMD_SHIFT SUN4C_PMD_SHIFT
5619@@ -64,6 +71,16 @@ extern pgprot_t PAGE_SHARED;
5620 #define PAGE_COPY __pgprot(BTFIXUP_INT(page_copy))
5621 #define PAGE_READONLY __pgprot(BTFIXUP_INT(page_readonly))
5622
5623+#ifdef CONFIG_PAX_PAGEEXEC
5624+extern pgprot_t PAGE_SHARED_NOEXEC;
5625+# define PAGE_COPY_NOEXEC __pgprot(BTFIXUP_INT(page_copy_noexec))
5626+# define PAGE_READONLY_NOEXEC __pgprot(BTFIXUP_INT(page_readonly_noexec))
5627+#else
5628+# define PAGE_SHARED_NOEXEC PAGE_SHARED
5629+# define PAGE_COPY_NOEXEC PAGE_COPY
5630+# define PAGE_READONLY_NOEXEC PAGE_READONLY
5631+#endif
5632+
5633 extern unsigned long page_kernel;
5634
5635 #ifdef MODULE
5636diff --git a/arch/sparc/include/asm/pgtsrmmu.h b/arch/sparc/include/asm/pgtsrmmu.h
5637index 1407c07..7e10231 100644
5638--- a/arch/sparc/include/asm/pgtsrmmu.h
5639+++ b/arch/sparc/include/asm/pgtsrmmu.h
5640@@ -115,6 +115,13 @@
5641 SRMMU_EXEC | SRMMU_REF)
5642 #define SRMMU_PAGE_RDONLY __pgprot(SRMMU_VALID | SRMMU_CACHE | \
5643 SRMMU_EXEC | SRMMU_REF)
5644+
5645+#ifdef CONFIG_PAX_PAGEEXEC
5646+#define SRMMU_PAGE_SHARED_NOEXEC __pgprot(SRMMU_VALID | SRMMU_CACHE | SRMMU_WRITE | SRMMU_REF)
5647+#define SRMMU_PAGE_COPY_NOEXEC __pgprot(SRMMU_VALID | SRMMU_CACHE | SRMMU_REF)
5648+#define SRMMU_PAGE_RDONLY_NOEXEC __pgprot(SRMMU_VALID | SRMMU_CACHE | SRMMU_REF)
5649+#endif
5650+
5651 #define SRMMU_PAGE_KERNEL __pgprot(SRMMU_VALID | SRMMU_CACHE | SRMMU_PRIV | \
5652 SRMMU_DIRTY | SRMMU_REF)
5653
5654diff --git a/arch/sparc/include/asm/spinlock_64.h b/arch/sparc/include/asm/spinlock_64.h
5655index 43e5147..47622a1 100644
5656--- a/arch/sparc/include/asm/spinlock_64.h
5657+++ b/arch/sparc/include/asm/spinlock_64.h
5658@@ -92,14 +92,19 @@ static inline void __raw_spin_lock_flags(raw_spinlock_t *lock, unsigned long fla
5659
5660 /* Multi-reader locks, these are much saner than the 32-bit Sparc ones... */
5661
5662-static void inline arch_read_lock(raw_rwlock_t *lock)
5663+static inline void arch_read_lock(raw_rwlock_t *lock)
5664 {
5665 unsigned long tmp1, tmp2;
5666
5667 __asm__ __volatile__ (
5668 "1: ldsw [%2], %0\n"
5669 " brlz,pn %0, 2f\n"
5670-"4: add %0, 1, %1\n"
5671+"4: addcc %0, 1, %1\n"
5672+
5673+#ifdef CONFIG_PAX_REFCOUNT
5674+" tvs %%icc, 6\n"
5675+#endif
5676+
5677 " cas [%2], %0, %1\n"
5678 " cmp %0, %1\n"
5679 " bne,pn %%icc, 1b\n"
5680@@ -112,10 +117,10 @@ static void inline arch_read_lock(raw_rwlock_t *lock)
5681 " .previous"
5682 : "=&r" (tmp1), "=&r" (tmp2)
5683 : "r" (lock)
5684- : "memory");
5685+ : "memory", "cc");
5686 }
5687
5688-static int inline arch_read_trylock(raw_rwlock_t *lock)
5689+static inline int arch_read_trylock(raw_rwlock_t *lock)
5690 {
5691 int tmp1, tmp2;
5692
5693@@ -123,7 +128,12 @@ static int inline arch_read_trylock(raw_rwlock_t *lock)
5694 "1: ldsw [%2], %0\n"
5695 " brlz,a,pn %0, 2f\n"
5696 " mov 0, %0\n"
5697-" add %0, 1, %1\n"
5698+" addcc %0, 1, %1\n"
5699+
5700+#ifdef CONFIG_PAX_REFCOUNT
5701+" tvs %%icc, 6\n"
5702+#endif
5703+
5704 " cas [%2], %0, %1\n"
5705 " cmp %0, %1\n"
5706 " bne,pn %%icc, 1b\n"
5707@@ -136,13 +146,18 @@ static int inline arch_read_trylock(raw_rwlock_t *lock)
5708 return tmp1;
5709 }
5710
5711-static void inline arch_read_unlock(raw_rwlock_t *lock)
5712+static inline void arch_read_unlock(raw_rwlock_t *lock)
5713 {
5714 unsigned long tmp1, tmp2;
5715
5716 __asm__ __volatile__(
5717 "1: lduw [%2], %0\n"
5718-" sub %0, 1, %1\n"
5719+" subcc %0, 1, %1\n"
5720+
5721+#ifdef CONFIG_PAX_REFCOUNT
5722+" tvs %%icc, 6\n"
5723+#endif
5724+
5725 " cas [%2], %0, %1\n"
5726 " cmp %0, %1\n"
5727 " bne,pn %%xcc, 1b\n"
5728@@ -152,7 +167,7 @@ static void inline arch_read_unlock(raw_rwlock_t *lock)
5729 : "memory");
5730 }
5731
5732-static void inline arch_write_lock(raw_rwlock_t *lock)
5733+static inline void arch_write_lock(raw_rwlock_t *lock)
5734 {
5735 unsigned long mask, tmp1, tmp2;
5736
5737@@ -177,7 +192,7 @@ static void inline arch_write_lock(raw_rwlock_t *lock)
5738 : "memory");
5739 }
5740
5741-static void inline arch_write_unlock(raw_rwlock_t *lock)
5742+static inline void arch_write_unlock(raw_rwlock_t *lock)
5743 {
5744 __asm__ __volatile__(
5745 " stw %%g0, [%0]"
5746@@ -186,7 +201,7 @@ static void inline arch_write_unlock(raw_rwlock_t *lock)
5747 : "memory");
5748 }
5749
5750-static int inline arch_write_trylock(raw_rwlock_t *lock)
5751+static inline int arch_write_trylock(raw_rwlock_t *lock)
5752 {
5753 unsigned long mask, tmp1, tmp2, result;
5754
5755diff --git a/arch/sparc/include/asm/thread_info_32.h b/arch/sparc/include/asm/thread_info_32.h
5756index 844d73a..f787fb9 100644
5757--- a/arch/sparc/include/asm/thread_info_32.h
5758+++ b/arch/sparc/include/asm/thread_info_32.h
5759@@ -50,6 +50,8 @@ struct thread_info {
5760 unsigned long w_saved;
5761
5762 struct restart_block restart_block;
5763+
5764+ unsigned long lowest_stack;
5765 };
5766
5767 /*
5768diff --git a/arch/sparc/include/asm/thread_info_64.h b/arch/sparc/include/asm/thread_info_64.h
5769index f78ad9a..9f55fc7 100644
5770--- a/arch/sparc/include/asm/thread_info_64.h
5771+++ b/arch/sparc/include/asm/thread_info_64.h
5772@@ -68,6 +68,8 @@ struct thread_info {
5773 struct pt_regs *kern_una_regs;
5774 unsigned int kern_una_insn;
5775
5776+ unsigned long lowest_stack;
5777+
5778 unsigned long fpregs[0] __attribute__ ((aligned(64)));
5779 };
5780
5781diff --git a/arch/sparc/include/asm/uaccess.h b/arch/sparc/include/asm/uaccess.h
5782index e88fbe5..96b0ce5 100644
5783--- a/arch/sparc/include/asm/uaccess.h
5784+++ b/arch/sparc/include/asm/uaccess.h
5785@@ -1,5 +1,13 @@
5786 #ifndef ___ASM_SPARC_UACCESS_H
5787 #define ___ASM_SPARC_UACCESS_H
5788+
5789+#ifdef __KERNEL__
5790+#ifndef __ASSEMBLY__
5791+#include <linux/types.h>
5792+extern void check_object_size(const void *ptr, unsigned long n, bool to);
5793+#endif
5794+#endif
5795+
5796 #if defined(__sparc__) && defined(__arch64__)
5797 #include <asm/uaccess_64.h>
5798 #else
5799diff --git a/arch/sparc/include/asm/uaccess_32.h b/arch/sparc/include/asm/uaccess_32.h
5800index 8303ac4..07f333d 100644
5801--- a/arch/sparc/include/asm/uaccess_32.h
5802+++ b/arch/sparc/include/asm/uaccess_32.h
5803@@ -249,27 +249,46 @@ extern unsigned long __copy_user(void __user *to, const void __user *from, unsig
5804
5805 static inline unsigned long copy_to_user(void __user *to, const void *from, unsigned long n)
5806 {
5807- if (n && __access_ok((unsigned long) to, n))
5808+ if ((long)n < 0)
5809+ return n;
5810+
5811+ if (n && __access_ok((unsigned long) to, n)) {
5812+ if (!__builtin_constant_p(n))
5813+ check_object_size(from, n, true);
5814 return __copy_user(to, (__force void __user *) from, n);
5815- else
5816+ } else
5817 return n;
5818 }
5819
5820 static inline unsigned long __copy_to_user(void __user *to, const void *from, unsigned long n)
5821 {
5822+ if ((long)n < 0)
5823+ return n;
5824+
5825+ if (!__builtin_constant_p(n))
5826+ check_object_size(from, n, true);
5827+
5828 return __copy_user(to, (__force void __user *) from, n);
5829 }
5830
5831 static inline unsigned long copy_from_user(void *to, const void __user *from, unsigned long n)
5832 {
5833- if (n && __access_ok((unsigned long) from, n))
5834+ if ((long)n < 0)
5835+ return n;
5836+
5837+ if (n && __access_ok((unsigned long) from, n)) {
5838+ if (!__builtin_constant_p(n))
5839+ check_object_size(to, n, false);
5840 return __copy_user((__force void __user *) to, from, n);
5841- else
5842+ } else
5843 return n;
5844 }
5845
5846 static inline unsigned long __copy_from_user(void *to, const void __user *from, unsigned long n)
5847 {
5848+ if ((long)n < 0)
5849+ return n;
5850+
5851 return __copy_user((__force void __user *) to, from, n);
5852 }
5853
5854diff --git a/arch/sparc/include/asm/uaccess_64.h b/arch/sparc/include/asm/uaccess_64.h
5855index 9ea271e..7b8a271 100644
5856--- a/arch/sparc/include/asm/uaccess_64.h
5857+++ b/arch/sparc/include/asm/uaccess_64.h
5858@@ -9,6 +9,7 @@
5859 #include <linux/compiler.h>
5860 #include <linux/string.h>
5861 #include <linux/thread_info.h>
5862+#include <linux/kernel.h>
5863 #include <asm/asi.h>
5864 #include <asm/system.h>
5865 #include <asm/spitfire.h>
5866@@ -212,8 +213,15 @@ extern unsigned long copy_from_user_fixup(void *to, const void __user *from,
5867 static inline unsigned long __must_check
5868 copy_from_user(void *to, const void __user *from, unsigned long size)
5869 {
5870- unsigned long ret = ___copy_from_user(to, from, size);
5871+ unsigned long ret;
5872
5873+ if ((long)size < 0 || size > INT_MAX)
5874+ return size;
5875+
5876+ if (!__builtin_constant_p(size))
5877+ check_object_size(to, size, false);
5878+
5879+ ret = ___copy_from_user(to, from, size);
5880 if (unlikely(ret))
5881 ret = copy_from_user_fixup(to, from, size);
5882 return ret;
5883@@ -228,8 +236,15 @@ extern unsigned long copy_to_user_fixup(void __user *to, const void *from,
5884 static inline unsigned long __must_check
5885 copy_to_user(void __user *to, const void *from, unsigned long size)
5886 {
5887- unsigned long ret = ___copy_to_user(to, from, size);
5888+ unsigned long ret;
5889
5890+ if ((long)size < 0 || size > INT_MAX)
5891+ return size;
5892+
5893+ if (!__builtin_constant_p(size))
5894+ check_object_size(from, size, true);
5895+
5896+ ret = ___copy_to_user(to, from, size);
5897 if (unlikely(ret))
5898 ret = copy_to_user_fixup(to, from, size);
5899 return ret;
5900diff --git a/arch/sparc/kernel/Makefile b/arch/sparc/kernel/Makefile
5901index 2782681..77ded84 100644
5902--- a/arch/sparc/kernel/Makefile
5903+++ b/arch/sparc/kernel/Makefile
5904@@ -3,7 +3,7 @@
5905 #
5906
5907 asflags-y := -ansi
5908-ccflags-y := -Werror
5909+#ccflags-y := -Werror
5910
5911 extra-y := head_$(BITS).o
5912 extra-y += init_task.o
5913diff --git a/arch/sparc/kernel/iommu.c b/arch/sparc/kernel/iommu.c
5914index 7690cc2..ece64c9 100644
5915--- a/arch/sparc/kernel/iommu.c
5916+++ b/arch/sparc/kernel/iommu.c
5917@@ -826,7 +826,7 @@ static void dma_4u_sync_sg_for_cpu(struct device *dev,
5918 spin_unlock_irqrestore(&iommu->lock, flags);
5919 }
5920
5921-static struct dma_map_ops sun4u_dma_ops = {
5922+static const struct dma_map_ops sun4u_dma_ops = {
5923 .alloc_coherent = dma_4u_alloc_coherent,
5924 .free_coherent = dma_4u_free_coherent,
5925 .map_page = dma_4u_map_page,
5926@@ -837,7 +837,7 @@ static struct dma_map_ops sun4u_dma_ops = {
5927 .sync_sg_for_cpu = dma_4u_sync_sg_for_cpu,
5928 };
5929
5930-struct dma_map_ops *dma_ops = &sun4u_dma_ops;
5931+const struct dma_map_ops *dma_ops = &sun4u_dma_ops;
5932 EXPORT_SYMBOL(dma_ops);
5933
5934 extern int pci64_dma_supported(struct pci_dev *pdev, u64 device_mask);
5935diff --git a/arch/sparc/kernel/ioport.c b/arch/sparc/kernel/ioport.c
5936index 9f61fd8..bd048db 100644
5937--- a/arch/sparc/kernel/ioport.c
5938+++ b/arch/sparc/kernel/ioport.c
5939@@ -392,7 +392,7 @@ static void sbus_sync_sg_for_device(struct device *dev, struct scatterlist *sg,
5940 BUG();
5941 }
5942
5943-struct dma_map_ops sbus_dma_ops = {
5944+const struct dma_map_ops sbus_dma_ops = {
5945 .alloc_coherent = sbus_alloc_coherent,
5946 .free_coherent = sbus_free_coherent,
5947 .map_page = sbus_map_page,
5948@@ -403,7 +403,7 @@ struct dma_map_ops sbus_dma_ops = {
5949 .sync_sg_for_device = sbus_sync_sg_for_device,
5950 };
5951
5952-struct dma_map_ops *dma_ops = &sbus_dma_ops;
5953+const struct dma_map_ops *dma_ops = &sbus_dma_ops;
5954 EXPORT_SYMBOL(dma_ops);
5955
5956 static int __init sparc_register_ioport(void)
5957@@ -640,7 +640,7 @@ static void pci32_sync_sg_for_device(struct device *device, struct scatterlist *
5958 }
5959 }
5960
5961-struct dma_map_ops pci32_dma_ops = {
5962+const struct dma_map_ops pci32_dma_ops = {
5963 .alloc_coherent = pci32_alloc_coherent,
5964 .free_coherent = pci32_free_coherent,
5965 .map_page = pci32_map_page,
5966diff --git a/arch/sparc/kernel/kgdb_32.c b/arch/sparc/kernel/kgdb_32.c
5967index 04df4ed..55c4b6e 100644
5968--- a/arch/sparc/kernel/kgdb_32.c
5969+++ b/arch/sparc/kernel/kgdb_32.c
5970@@ -158,7 +158,7 @@ void kgdb_arch_exit(void)
5971 {
5972 }
5973
5974-struct kgdb_arch arch_kgdb_ops = {
5975+const struct kgdb_arch arch_kgdb_ops = {
5976 /* Breakpoint instruction: ta 0x7d */
5977 .gdb_bpt_instr = { 0x91, 0xd0, 0x20, 0x7d },
5978 };
5979diff --git a/arch/sparc/kernel/kgdb_64.c b/arch/sparc/kernel/kgdb_64.c
5980index f5a0fd4..d886f71 100644
5981--- a/arch/sparc/kernel/kgdb_64.c
5982+++ b/arch/sparc/kernel/kgdb_64.c
5983@@ -180,7 +180,7 @@ void kgdb_arch_exit(void)
5984 {
5985 }
5986
5987-struct kgdb_arch arch_kgdb_ops = {
5988+const struct kgdb_arch arch_kgdb_ops = {
5989 /* Breakpoint instruction: ta 0x72 */
5990 .gdb_bpt_instr = { 0x91, 0xd0, 0x20, 0x72 },
5991 };
5992diff --git a/arch/sparc/kernel/pci_sun4v.c b/arch/sparc/kernel/pci_sun4v.c
5993index 23c33ff..d137fbd 100644
5994--- a/arch/sparc/kernel/pci_sun4v.c
5995+++ b/arch/sparc/kernel/pci_sun4v.c
5996@@ -525,7 +525,7 @@ static void dma_4v_unmap_sg(struct device *dev, struct scatterlist *sglist,
5997 spin_unlock_irqrestore(&iommu->lock, flags);
5998 }
5999
6000-static struct dma_map_ops sun4v_dma_ops = {
6001+static const struct dma_map_ops sun4v_dma_ops = {
6002 .alloc_coherent = dma_4v_alloc_coherent,
6003 .free_coherent = dma_4v_free_coherent,
6004 .map_page = dma_4v_map_page,
6005diff --git a/arch/sparc/kernel/process_32.c b/arch/sparc/kernel/process_32.c
6006index c49865b..b41a81b 100644
6007--- a/arch/sparc/kernel/process_32.c
6008+++ b/arch/sparc/kernel/process_32.c
6009@@ -196,7 +196,7 @@ void __show_backtrace(unsigned long fp)
6010 rw->ins[4], rw->ins[5],
6011 rw->ins[6],
6012 rw->ins[7]);
6013- printk("%pS\n", (void *) rw->ins[7]);
6014+ printk("%pA\n", (void *) rw->ins[7]);
6015 rw = (struct reg_window32 *) rw->ins[6];
6016 }
6017 spin_unlock_irqrestore(&sparc_backtrace_lock, flags);
6018@@ -263,14 +263,14 @@ void show_regs(struct pt_regs *r)
6019
6020 printk("PSR: %08lx PC: %08lx NPC: %08lx Y: %08lx %s\n",
6021 r->psr, r->pc, r->npc, r->y, print_tainted());
6022- printk("PC: <%pS>\n", (void *) r->pc);
6023+ printk("PC: <%pA>\n", (void *) r->pc);
6024 printk("%%G: %08lx %08lx %08lx %08lx %08lx %08lx %08lx %08lx\n",
6025 r->u_regs[0], r->u_regs[1], r->u_regs[2], r->u_regs[3],
6026 r->u_regs[4], r->u_regs[5], r->u_regs[6], r->u_regs[7]);
6027 printk("%%O: %08lx %08lx %08lx %08lx %08lx %08lx %08lx %08lx\n",
6028 r->u_regs[8], r->u_regs[9], r->u_regs[10], r->u_regs[11],
6029 r->u_regs[12], r->u_regs[13], r->u_regs[14], r->u_regs[15]);
6030- printk("RPC: <%pS>\n", (void *) r->u_regs[15]);
6031+ printk("RPC: <%pA>\n", (void *) r->u_regs[15]);
6032
6033 printk("%%L: %08lx %08lx %08lx %08lx %08lx %08lx %08lx %08lx\n",
6034 rw->locals[0], rw->locals[1], rw->locals[2], rw->locals[3],
6035@@ -305,7 +305,7 @@ void show_stack(struct task_struct *tsk, unsigned long *_ksp)
6036 rw = (struct reg_window32 *) fp;
6037 pc = rw->ins[7];
6038 printk("[%08lx : ", pc);
6039- printk("%pS ] ", (void *) pc);
6040+ printk("%pA ] ", (void *) pc);
6041 fp = rw->ins[6];
6042 } while (++count < 16);
6043 printk("\n");
6044diff --git a/arch/sparc/kernel/process_64.c b/arch/sparc/kernel/process_64.c
6045index cb70476..3d0c191 100644
6046--- a/arch/sparc/kernel/process_64.c
6047+++ b/arch/sparc/kernel/process_64.c
6048@@ -180,14 +180,14 @@ static void show_regwindow(struct pt_regs *regs)
6049 printk("i4: %016lx i5: %016lx i6: %016lx i7: %016lx\n",
6050 rwk->ins[4], rwk->ins[5], rwk->ins[6], rwk->ins[7]);
6051 if (regs->tstate & TSTATE_PRIV)
6052- printk("I7: <%pS>\n", (void *) rwk->ins[7]);
6053+ printk("I7: <%pA>\n", (void *) rwk->ins[7]);
6054 }
6055
6056 void show_regs(struct pt_regs *regs)
6057 {
6058 printk("TSTATE: %016lx TPC: %016lx TNPC: %016lx Y: %08x %s\n", regs->tstate,
6059 regs->tpc, regs->tnpc, regs->y, print_tainted());
6060- printk("TPC: <%pS>\n", (void *) regs->tpc);
6061+ printk("TPC: <%pA>\n", (void *) regs->tpc);
6062 printk("g0: %016lx g1: %016lx g2: %016lx g3: %016lx\n",
6063 regs->u_regs[0], regs->u_regs[1], regs->u_regs[2],
6064 regs->u_regs[3]);
6065@@ -200,7 +200,7 @@ void show_regs(struct pt_regs *regs)
6066 printk("o4: %016lx o5: %016lx sp: %016lx ret_pc: %016lx\n",
6067 regs->u_regs[12], regs->u_regs[13], regs->u_regs[14],
6068 regs->u_regs[15]);
6069- printk("RPC: <%pS>\n", (void *) regs->u_regs[15]);
6070+ printk("RPC: <%pA>\n", (void *) regs->u_regs[15]);
6071 show_regwindow(regs);
6072 }
6073
6074@@ -284,7 +284,7 @@ void arch_trigger_all_cpu_backtrace(void)
6075 ((tp && tp->task) ? tp->task->pid : -1));
6076
6077 if (gp->tstate & TSTATE_PRIV) {
6078- printk(" TPC[%pS] O7[%pS] I7[%pS] RPC[%pS]\n",
6079+ printk(" TPC[%pA] O7[%pA] I7[%pA] RPC[%pA]\n",
6080 (void *) gp->tpc,
6081 (void *) gp->o7,
6082 (void *) gp->i7,
6083diff --git a/arch/sparc/kernel/sigutil_64.c b/arch/sparc/kernel/sigutil_64.c
6084index 6edc4e5..06a69b4 100644
6085--- a/arch/sparc/kernel/sigutil_64.c
6086+++ b/arch/sparc/kernel/sigutil_64.c
6087@@ -2,6 +2,7 @@
6088 #include <linux/types.h>
6089 #include <linux/thread_info.h>
6090 #include <linux/uaccess.h>
6091+#include <linux/errno.h>
6092
6093 #include <asm/sigcontext.h>
6094 #include <asm/fpumacro.h>
6095diff --git a/arch/sparc/kernel/sys_sparc_32.c b/arch/sparc/kernel/sys_sparc_32.c
6096index 3a82e65..ce0a53a 100644
6097--- a/arch/sparc/kernel/sys_sparc_32.c
6098+++ b/arch/sparc/kernel/sys_sparc_32.c
6099@@ -57,7 +57,7 @@ unsigned long arch_get_unmapped_area(struct file *filp, unsigned long addr, unsi
6100 if (ARCH_SUN4C && len > 0x20000000)
6101 return -ENOMEM;
6102 if (!addr)
6103- addr = TASK_UNMAPPED_BASE;
6104+ addr = current->mm->mmap_base;
6105
6106 if (flags & MAP_SHARED)
6107 addr = COLOUR_ALIGN(addr);
6108@@ -72,7 +72,7 @@ unsigned long arch_get_unmapped_area(struct file *filp, unsigned long addr, unsi
6109 }
6110 if (TASK_SIZE - PAGE_SIZE - len < addr)
6111 return -ENOMEM;
6112- if (!vmm || addr + len <= vmm->vm_start)
6113+ if (check_heap_stack_gap(vmm, addr, len))
6114 return addr;
6115 addr = vmm->vm_end;
6116 if (flags & MAP_SHARED)
6117diff --git a/arch/sparc/kernel/sys_sparc_64.c b/arch/sparc/kernel/sys_sparc_64.c
6118index cfa0e19..98972ac 100644
6119--- a/arch/sparc/kernel/sys_sparc_64.c
6120+++ b/arch/sparc/kernel/sys_sparc_64.c
6121@@ -125,7 +125,7 @@ unsigned long arch_get_unmapped_area(struct file *filp, unsigned long addr, unsi
6122 /* We do not accept a shared mapping if it would violate
6123 * cache aliasing constraints.
6124 */
6125- if ((flags & MAP_SHARED) &&
6126+ if ((filp || (flags & MAP_SHARED)) &&
6127 ((addr - (pgoff << PAGE_SHIFT)) & (SHMLBA - 1)))
6128 return -EINVAL;
6129 return addr;
6130@@ -140,6 +140,10 @@ unsigned long arch_get_unmapped_area(struct file *filp, unsigned long addr, unsi
6131 if (filp || (flags & MAP_SHARED))
6132 do_color_align = 1;
6133
6134+#ifdef CONFIG_PAX_RANDMMAP
6135+ if (!(mm->pax_flags & MF_PAX_RANDMMAP))
6136+#endif
6137+
6138 if (addr) {
6139 if (do_color_align)
6140 addr = COLOUR_ALIGN(addr, pgoff);
6141@@ -147,15 +151,14 @@ unsigned long arch_get_unmapped_area(struct file *filp, unsigned long addr, unsi
6142 addr = PAGE_ALIGN(addr);
6143
6144 vma = find_vma(mm, addr);
6145- if (task_size - len >= addr &&
6146- (!vma || addr + len <= vma->vm_start))
6147+ if (task_size - len >= addr && check_heap_stack_gap(vma, addr, len))
6148 return addr;
6149 }
6150
6151 if (len > mm->cached_hole_size) {
6152- start_addr = addr = mm->free_area_cache;
6153+ start_addr = addr = mm->free_area_cache;
6154 } else {
6155- start_addr = addr = TASK_UNMAPPED_BASE;
6156+ start_addr = addr = mm->mmap_base;
6157 mm->cached_hole_size = 0;
6158 }
6159
6160@@ -175,14 +178,14 @@ full_search:
6161 vma = find_vma(mm, VA_EXCLUDE_END);
6162 }
6163 if (unlikely(task_size < addr)) {
6164- if (start_addr != TASK_UNMAPPED_BASE) {
6165- start_addr = addr = TASK_UNMAPPED_BASE;
6166+ if (start_addr != mm->mmap_base) {
6167+ start_addr = addr = mm->mmap_base;
6168 mm->cached_hole_size = 0;
6169 goto full_search;
6170 }
6171 return -ENOMEM;
6172 }
6173- if (likely(!vma || addr + len <= vma->vm_start)) {
6174+ if (likely(check_heap_stack_gap(vma, addr, len))) {
6175 /*
6176 * Remember the place where we stopped the search:
6177 */
6178@@ -216,7 +219,7 @@ arch_get_unmapped_area_topdown(struct file *filp, const unsigned long addr0,
6179 /* We do not accept a shared mapping if it would violate
6180 * cache aliasing constraints.
6181 */
6182- if ((flags & MAP_SHARED) &&
6183+ if ((filp || (flags & MAP_SHARED)) &&
6184 ((addr - (pgoff << PAGE_SHIFT)) & (SHMLBA - 1)))
6185 return -EINVAL;
6186 return addr;
6187@@ -237,8 +240,7 @@ arch_get_unmapped_area_topdown(struct file *filp, const unsigned long addr0,
6188 addr = PAGE_ALIGN(addr);
6189
6190 vma = find_vma(mm, addr);
6191- if (task_size - len >= addr &&
6192- (!vma || addr + len <= vma->vm_start))
6193+ if (task_size - len >= addr && check_heap_stack_gap(vma, addr, len))
6194 return addr;
6195 }
6196
6197@@ -259,7 +261,7 @@ arch_get_unmapped_area_topdown(struct file *filp, const unsigned long addr0,
6198 /* make sure it can fit in the remaining address space */
6199 if (likely(addr > len)) {
6200 vma = find_vma(mm, addr-len);
6201- if (!vma || addr <= vma->vm_start) {
6202+ if (check_heap_stack_gap(vma, addr - len, len)) {
6203 /* remember the address as a hint for next time */
6204 return (mm->free_area_cache = addr-len);
6205 }
6206@@ -268,18 +270,18 @@ arch_get_unmapped_area_topdown(struct file *filp, const unsigned long addr0,
6207 if (unlikely(mm->mmap_base < len))
6208 goto bottomup;
6209
6210- addr = mm->mmap_base-len;
6211- if (do_color_align)
6212- addr = COLOUR_ALIGN_DOWN(addr, pgoff);
6213+ addr = mm->mmap_base - len;
6214
6215 do {
6216+ if (do_color_align)
6217+ addr = COLOUR_ALIGN_DOWN(addr, pgoff);
6218 /*
6219 * Lookup failure means no vma is above this address,
6220 * else if new region fits below vma->vm_start,
6221 * return with success:
6222 */
6223 vma = find_vma(mm, addr);
6224- if (likely(!vma || addr+len <= vma->vm_start)) {
6225+ if (likely(check_heap_stack_gap(vma, addr, len))) {
6226 /* remember the address as a hint for next time */
6227 return (mm->free_area_cache = addr);
6228 }
6229@@ -289,10 +291,8 @@ arch_get_unmapped_area_topdown(struct file *filp, const unsigned long addr0,
6230 mm->cached_hole_size = vma->vm_start - addr;
6231
6232 /* try just below the current vma->vm_start */
6233- addr = vma->vm_start-len;
6234- if (do_color_align)
6235- addr = COLOUR_ALIGN_DOWN(addr, pgoff);
6236- } while (likely(len < vma->vm_start));
6237+ addr = skip_heap_stack_gap(vma, len);
6238+ } while (!IS_ERR_VALUE(addr));
6239
6240 bottomup:
6241 /*
6242@@ -384,6 +384,12 @@ void arch_pick_mmap_layout(struct mm_struct *mm)
6243 current->signal->rlim[RLIMIT_STACK].rlim_cur == RLIM_INFINITY ||
6244 sysctl_legacy_va_layout) {
6245 mm->mmap_base = TASK_UNMAPPED_BASE + random_factor;
6246+
6247+#ifdef CONFIG_PAX_RANDMMAP
6248+ if (mm->pax_flags & MF_PAX_RANDMMAP)
6249+ mm->mmap_base += mm->delta_mmap;
6250+#endif
6251+
6252 mm->get_unmapped_area = arch_get_unmapped_area;
6253 mm->unmap_area = arch_unmap_area;
6254 } else {
6255@@ -398,6 +404,12 @@ void arch_pick_mmap_layout(struct mm_struct *mm)
6256 gap = (task_size / 6 * 5);
6257
6258 mm->mmap_base = PAGE_ALIGN(task_size - gap - random_factor);
6259+
6260+#ifdef CONFIG_PAX_RANDMMAP
6261+ if (mm->pax_flags & MF_PAX_RANDMMAP)
6262+ mm->mmap_base -= mm->delta_mmap + mm->delta_stack;
6263+#endif
6264+
6265 mm->get_unmapped_area = arch_get_unmapped_area_topdown;
6266 mm->unmap_area = arch_unmap_area_topdown;
6267 }
6268diff --git a/arch/sparc/kernel/traps_32.c b/arch/sparc/kernel/traps_32.c
6269index c0490c7..84959d1 100644
6270--- a/arch/sparc/kernel/traps_32.c
6271+++ b/arch/sparc/kernel/traps_32.c
6272@@ -44,6 +44,8 @@ static void instruction_dump(unsigned long *pc)
6273 #define __SAVE __asm__ __volatile__("save %sp, -0x40, %sp\n\t")
6274 #define __RESTORE __asm__ __volatile__("restore %g0, %g0, %g0\n\t")
6275
6276+extern void gr_handle_kernel_exploit(void);
6277+
6278 void die_if_kernel(char *str, struct pt_regs *regs)
6279 {
6280 static int die_counter;
6281@@ -76,15 +78,17 @@ void die_if_kernel(char *str, struct pt_regs *regs)
6282 count++ < 30 &&
6283 (((unsigned long) rw) >= PAGE_OFFSET) &&
6284 !(((unsigned long) rw) & 0x7)) {
6285- printk("Caller[%08lx]: %pS\n", rw->ins[7],
6286+ printk("Caller[%08lx]: %pA\n", rw->ins[7],
6287 (void *) rw->ins[7]);
6288 rw = (struct reg_window32 *)rw->ins[6];
6289 }
6290 }
6291 printk("Instruction DUMP:");
6292 instruction_dump ((unsigned long *) regs->pc);
6293- if(regs->psr & PSR_PS)
6294+ if(regs->psr & PSR_PS) {
6295+ gr_handle_kernel_exploit();
6296 do_exit(SIGKILL);
6297+ }
6298 do_exit(SIGSEGV);
6299 }
6300
6301diff --git a/arch/sparc/kernel/traps_64.c b/arch/sparc/kernel/traps_64.c
6302index 10f7bb9..cdb6793 100644
6303--- a/arch/sparc/kernel/traps_64.c
6304+++ b/arch/sparc/kernel/traps_64.c
6305@@ -73,7 +73,7 @@ static void dump_tl1_traplog(struct tl1_traplog *p)
6306 i + 1,
6307 p->trapstack[i].tstate, p->trapstack[i].tpc,
6308 p->trapstack[i].tnpc, p->trapstack[i].tt);
6309- printk("TRAPLOG: TPC<%pS>\n", (void *) p->trapstack[i].tpc);
6310+ printk("TRAPLOG: TPC<%pA>\n", (void *) p->trapstack[i].tpc);
6311 }
6312 }
6313
6314@@ -93,6 +93,12 @@ void bad_trap(struct pt_regs *regs, long lvl)
6315
6316 lvl -= 0x100;
6317 if (regs->tstate & TSTATE_PRIV) {
6318+
6319+#ifdef CONFIG_PAX_REFCOUNT
6320+ if (lvl == 6)
6321+ pax_report_refcount_overflow(regs);
6322+#endif
6323+
6324 sprintf(buffer, "Kernel bad sw trap %lx", lvl);
6325 die_if_kernel(buffer, regs);
6326 }
6327@@ -111,11 +117,16 @@ void bad_trap(struct pt_regs *regs, long lvl)
6328 void bad_trap_tl1(struct pt_regs *regs, long lvl)
6329 {
6330 char buffer[32];
6331-
6332+
6333 if (notify_die(DIE_TRAP_TL1, "bad trap tl1", regs,
6334 0, lvl, SIGTRAP) == NOTIFY_STOP)
6335 return;
6336
6337+#ifdef CONFIG_PAX_REFCOUNT
6338+ if (lvl == 6)
6339+ pax_report_refcount_overflow(regs);
6340+#endif
6341+
6342 dump_tl1_traplog((struct tl1_traplog *)(regs + 1));
6343
6344 sprintf (buffer, "Bad trap %lx at tl>0", lvl);
6345@@ -1139,7 +1150,7 @@ static void cheetah_log_errors(struct pt_regs *regs, struct cheetah_err_info *in
6346 regs->tpc, regs->tnpc, regs->u_regs[UREG_I7], regs->tstate);
6347 printk("%s" "ERROR(%d): ",
6348 (recoverable ? KERN_WARNING : KERN_CRIT), smp_processor_id());
6349- printk("TPC<%pS>\n", (void *) regs->tpc);
6350+ printk("TPC<%pA>\n", (void *) regs->tpc);
6351 printk("%s" "ERROR(%d): M_SYND(%lx), E_SYND(%lx)%s%s\n",
6352 (recoverable ? KERN_WARNING : KERN_CRIT), smp_processor_id(),
6353 (afsr & CHAFSR_M_SYNDROME) >> CHAFSR_M_SYNDROME_SHIFT,
6354@@ -1746,7 +1757,7 @@ void cheetah_plus_parity_error(int type, struct pt_regs *regs)
6355 smp_processor_id(),
6356 (type & 0x1) ? 'I' : 'D',
6357 regs->tpc);
6358- printk(KERN_EMERG "TPC<%pS>\n", (void *) regs->tpc);
6359+ printk(KERN_EMERG "TPC<%pA>\n", (void *) regs->tpc);
6360 panic("Irrecoverable Cheetah+ parity error.");
6361 }
6362
6363@@ -1754,7 +1765,7 @@ void cheetah_plus_parity_error(int type, struct pt_regs *regs)
6364 smp_processor_id(),
6365 (type & 0x1) ? 'I' : 'D',
6366 regs->tpc);
6367- printk(KERN_WARNING "TPC<%pS>\n", (void *) regs->tpc);
6368+ printk(KERN_WARNING "TPC<%pA>\n", (void *) regs->tpc);
6369 }
6370
6371 struct sun4v_error_entry {
6372@@ -1961,9 +1972,9 @@ void sun4v_itlb_error_report(struct pt_regs *regs, int tl)
6373
6374 printk(KERN_EMERG "SUN4V-ITLB: Error at TPC[%lx], tl %d\n",
6375 regs->tpc, tl);
6376- printk(KERN_EMERG "SUN4V-ITLB: TPC<%pS>\n", (void *) regs->tpc);
6377+ printk(KERN_EMERG "SUN4V-ITLB: TPC<%pA>\n", (void *) regs->tpc);
6378 printk(KERN_EMERG "SUN4V-ITLB: O7[%lx]\n", regs->u_regs[UREG_I7]);
6379- printk(KERN_EMERG "SUN4V-ITLB: O7<%pS>\n",
6380+ printk(KERN_EMERG "SUN4V-ITLB: O7<%pA>\n",
6381 (void *) regs->u_regs[UREG_I7]);
6382 printk(KERN_EMERG "SUN4V-ITLB: vaddr[%lx] ctx[%lx] "
6383 "pte[%lx] error[%lx]\n",
6384@@ -1985,9 +1996,9 @@ void sun4v_dtlb_error_report(struct pt_regs *regs, int tl)
6385
6386 printk(KERN_EMERG "SUN4V-DTLB: Error at TPC[%lx], tl %d\n",
6387 regs->tpc, tl);
6388- printk(KERN_EMERG "SUN4V-DTLB: TPC<%pS>\n", (void *) regs->tpc);
6389+ printk(KERN_EMERG "SUN4V-DTLB: TPC<%pA>\n", (void *) regs->tpc);
6390 printk(KERN_EMERG "SUN4V-DTLB: O7[%lx]\n", regs->u_regs[UREG_I7]);
6391- printk(KERN_EMERG "SUN4V-DTLB: O7<%pS>\n",
6392+ printk(KERN_EMERG "SUN4V-DTLB: O7<%pA>\n",
6393 (void *) regs->u_regs[UREG_I7]);
6394 printk(KERN_EMERG "SUN4V-DTLB: vaddr[%lx] ctx[%lx] "
6395 "pte[%lx] error[%lx]\n",
6396@@ -2191,7 +2202,7 @@ void show_stack(struct task_struct *tsk, unsigned long *_ksp)
6397 fp = (unsigned long)sf->fp + STACK_BIAS;
6398 }
6399
6400- printk(" [%016lx] %pS\n", pc, (void *) pc);
6401+ printk(" [%016lx] %pA\n", pc, (void *) pc);
6402 } while (++count < 16);
6403 }
6404
6405@@ -2233,6 +2244,8 @@ static inline struct reg_window *kernel_stack_up(struct reg_window *rw)
6406 return (struct reg_window *) (fp + STACK_BIAS);
6407 }
6408
6409+extern void gr_handle_kernel_exploit(void);
6410+
6411 void die_if_kernel(char *str, struct pt_regs *regs)
6412 {
6413 static int die_counter;
6414@@ -2260,7 +2273,7 @@ void die_if_kernel(char *str, struct pt_regs *regs)
6415 while (rw &&
6416 count++ < 30&&
6417 is_kernel_stack(current, rw)) {
6418- printk("Caller[%016lx]: %pS\n", rw->ins[7],
6419+ printk("Caller[%016lx]: %pA\n", rw->ins[7],
6420 (void *) rw->ins[7]);
6421
6422 rw = kernel_stack_up(rw);
6423@@ -2273,8 +2286,11 @@ void die_if_kernel(char *str, struct pt_regs *regs)
6424 }
6425 user_instruction_dump ((unsigned int __user *) regs->tpc);
6426 }
6427- if (regs->tstate & TSTATE_PRIV)
6428+ if (regs->tstate & TSTATE_PRIV) {
6429+ gr_handle_kernel_exploit();
6430 do_exit(SIGKILL);
6431+ }
6432+
6433 do_exit(SIGSEGV);
6434 }
6435 EXPORT_SYMBOL(die_if_kernel);
6436diff --git a/arch/sparc/kernel/una_asm_64.S b/arch/sparc/kernel/una_asm_64.S
6437index be183fe..1c8d332 100644
6438--- a/arch/sparc/kernel/una_asm_64.S
6439+++ b/arch/sparc/kernel/una_asm_64.S
6440@@ -127,7 +127,7 @@ do_int_load:
6441 wr %o5, 0x0, %asi
6442 retl
6443 mov 0, %o0
6444- .size __do_int_load, .-__do_int_load
6445+ .size do_int_load, .-do_int_load
6446
6447 .section __ex_table,"a"
6448 .word 4b, __retl_efault
6449diff --git a/arch/sparc/kernel/unaligned_64.c b/arch/sparc/kernel/unaligned_64.c
6450index 3792099..2af17d8 100644
6451--- a/arch/sparc/kernel/unaligned_64.c
6452+++ b/arch/sparc/kernel/unaligned_64.c
6453@@ -288,7 +288,7 @@ static void log_unaligned(struct pt_regs *regs)
6454 if (count < 5) {
6455 last_time = jiffies;
6456 count++;
6457- printk("Kernel unaligned access at TPC[%lx] %pS\n",
6458+ printk("Kernel unaligned access at TPC[%lx] %pA\n",
6459 regs->tpc, (void *) regs->tpc);
6460 }
6461 }
6462diff --git a/arch/sparc/lib/Makefile b/arch/sparc/lib/Makefile
6463index e75faf0..24f12f9 100644
6464--- a/arch/sparc/lib/Makefile
6465+++ b/arch/sparc/lib/Makefile
6466@@ -2,7 +2,7 @@
6467 #
6468
6469 asflags-y := -ansi -DST_DIV0=0x02
6470-ccflags-y := -Werror
6471+#ccflags-y := -Werror
6472
6473 lib-$(CONFIG_SPARC32) += mul.o rem.o sdiv.o udiv.o umul.o urem.o ashrdi3.o
6474 lib-$(CONFIG_SPARC32) += memcpy.o memset.o
6475diff --git a/arch/sparc/lib/atomic_64.S b/arch/sparc/lib/atomic_64.S
6476index 0268210..f0291ca 100644
6477--- a/arch/sparc/lib/atomic_64.S
6478+++ b/arch/sparc/lib/atomic_64.S
6479@@ -18,7 +18,12 @@
6480 atomic_add: /* %o0 = increment, %o1 = atomic_ptr */
6481 BACKOFF_SETUP(%o2)
6482 1: lduw [%o1], %g1
6483- add %g1, %o0, %g7
6484+ addcc %g1, %o0, %g7
6485+
6486+#ifdef CONFIG_PAX_REFCOUNT
6487+ tvs %icc, 6
6488+#endif
6489+
6490 cas [%o1], %g1, %g7
6491 cmp %g1, %g7
6492 bne,pn %icc, 2f
6493@@ -28,12 +33,32 @@ atomic_add: /* %o0 = increment, %o1 = atomic_ptr */
6494 2: BACKOFF_SPIN(%o2, %o3, 1b)
6495 .size atomic_add, .-atomic_add
6496
6497+ .globl atomic_add_unchecked
6498+ .type atomic_add_unchecked,#function
6499+atomic_add_unchecked: /* %o0 = increment, %o1 = atomic_ptr */
6500+ BACKOFF_SETUP(%o2)
6501+1: lduw [%o1], %g1
6502+ add %g1, %o0, %g7
6503+ cas [%o1], %g1, %g7
6504+ cmp %g1, %g7
6505+ bne,pn %icc, 2f
6506+ nop
6507+ retl
6508+ nop
6509+2: BACKOFF_SPIN(%o2, %o3, 1b)
6510+ .size atomic_add_unchecked, .-atomic_add_unchecked
6511+
6512 .globl atomic_sub
6513 .type atomic_sub,#function
6514 atomic_sub: /* %o0 = decrement, %o1 = atomic_ptr */
6515 BACKOFF_SETUP(%o2)
6516 1: lduw [%o1], %g1
6517- sub %g1, %o0, %g7
6518+ subcc %g1, %o0, %g7
6519+
6520+#ifdef CONFIG_PAX_REFCOUNT
6521+ tvs %icc, 6
6522+#endif
6523+
6524 cas [%o1], %g1, %g7
6525 cmp %g1, %g7
6526 bne,pn %icc, 2f
6527@@ -43,12 +68,32 @@ atomic_sub: /* %o0 = decrement, %o1 = atomic_ptr */
6528 2: BACKOFF_SPIN(%o2, %o3, 1b)
6529 .size atomic_sub, .-atomic_sub
6530
6531+ .globl atomic_sub_unchecked
6532+ .type atomic_sub_unchecked,#function
6533+atomic_sub_unchecked: /* %o0 = decrement, %o1 = atomic_ptr */
6534+ BACKOFF_SETUP(%o2)
6535+1: lduw [%o1], %g1
6536+ sub %g1, %o0, %g7
6537+ cas [%o1], %g1, %g7
6538+ cmp %g1, %g7
6539+ bne,pn %icc, 2f
6540+ nop
6541+ retl
6542+ nop
6543+2: BACKOFF_SPIN(%o2, %o3, 1b)
6544+ .size atomic_sub_unchecked, .-atomic_sub_unchecked
6545+
6546 .globl atomic_add_ret
6547 .type atomic_add_ret,#function
6548 atomic_add_ret: /* %o0 = increment, %o1 = atomic_ptr */
6549 BACKOFF_SETUP(%o2)
6550 1: lduw [%o1], %g1
6551- add %g1, %o0, %g7
6552+ addcc %g1, %o0, %g7
6553+
6554+#ifdef CONFIG_PAX_REFCOUNT
6555+ tvs %icc, 6
6556+#endif
6557+
6558 cas [%o1], %g1, %g7
6559 cmp %g1, %g7
6560 bne,pn %icc, 2f
6561@@ -59,12 +104,33 @@ atomic_add_ret: /* %o0 = increment, %o1 = atomic_ptr */
6562 2: BACKOFF_SPIN(%o2, %o3, 1b)
6563 .size atomic_add_ret, .-atomic_add_ret
6564
6565+ .globl atomic_add_ret_unchecked
6566+ .type atomic_add_ret_unchecked,#function
6567+atomic_add_ret_unchecked: /* %o0 = increment, %o1 = atomic_ptr */
6568+ BACKOFF_SETUP(%o2)
6569+1: lduw [%o1], %g1
6570+ addcc %g1, %o0, %g7
6571+ cas [%o1], %g1, %g7
6572+ cmp %g1, %g7
6573+ bne,pn %icc, 2f
6574+ add %g7, %o0, %g7
6575+ sra %g7, 0, %o0
6576+ retl
6577+ nop
6578+2: BACKOFF_SPIN(%o2, %o3, 1b)
6579+ .size atomic_add_ret_unchecked, .-atomic_add_ret_unchecked
6580+
6581 .globl atomic_sub_ret
6582 .type atomic_sub_ret,#function
6583 atomic_sub_ret: /* %o0 = decrement, %o1 = atomic_ptr */
6584 BACKOFF_SETUP(%o2)
6585 1: lduw [%o1], %g1
6586- sub %g1, %o0, %g7
6587+ subcc %g1, %o0, %g7
6588+
6589+#ifdef CONFIG_PAX_REFCOUNT
6590+ tvs %icc, 6
6591+#endif
6592+
6593 cas [%o1], %g1, %g7
6594 cmp %g1, %g7
6595 bne,pn %icc, 2f
6596@@ -80,7 +146,12 @@ atomic_sub_ret: /* %o0 = decrement, %o1 = atomic_ptr */
6597 atomic64_add: /* %o0 = increment, %o1 = atomic_ptr */
6598 BACKOFF_SETUP(%o2)
6599 1: ldx [%o1], %g1
6600- add %g1, %o0, %g7
6601+ addcc %g1, %o0, %g7
6602+
6603+#ifdef CONFIG_PAX_REFCOUNT
6604+ tvs %xcc, 6
6605+#endif
6606+
6607 casx [%o1], %g1, %g7
6608 cmp %g1, %g7
6609 bne,pn %xcc, 2f
6610@@ -90,12 +161,32 @@ atomic64_add: /* %o0 = increment, %o1 = atomic_ptr */
6611 2: BACKOFF_SPIN(%o2, %o3, 1b)
6612 .size atomic64_add, .-atomic64_add
6613
6614+ .globl atomic64_add_unchecked
6615+ .type atomic64_add_unchecked,#function
6616+atomic64_add_unchecked: /* %o0 = increment, %o1 = atomic_ptr */
6617+ BACKOFF_SETUP(%o2)
6618+1: ldx [%o1], %g1
6619+ addcc %g1, %o0, %g7
6620+ casx [%o1], %g1, %g7
6621+ cmp %g1, %g7
6622+ bne,pn %xcc, 2f
6623+ nop
6624+ retl
6625+ nop
6626+2: BACKOFF_SPIN(%o2, %o3, 1b)
6627+ .size atomic64_add_unchecked, .-atomic64_add_unchecked
6628+
6629 .globl atomic64_sub
6630 .type atomic64_sub,#function
6631 atomic64_sub: /* %o0 = decrement, %o1 = atomic_ptr */
6632 BACKOFF_SETUP(%o2)
6633 1: ldx [%o1], %g1
6634- sub %g1, %o0, %g7
6635+ subcc %g1, %o0, %g7
6636+
6637+#ifdef CONFIG_PAX_REFCOUNT
6638+ tvs %xcc, 6
6639+#endif
6640+
6641 casx [%o1], %g1, %g7
6642 cmp %g1, %g7
6643 bne,pn %xcc, 2f
6644@@ -105,12 +196,32 @@ atomic64_sub: /* %o0 = decrement, %o1 = atomic_ptr */
6645 2: BACKOFF_SPIN(%o2, %o3, 1b)
6646 .size atomic64_sub, .-atomic64_sub
6647
6648+ .globl atomic64_sub_unchecked
6649+ .type atomic64_sub_unchecked,#function
6650+atomic64_sub_unchecked: /* %o0 = decrement, %o1 = atomic_ptr */
6651+ BACKOFF_SETUP(%o2)
6652+1: ldx [%o1], %g1
6653+ subcc %g1, %o0, %g7
6654+ casx [%o1], %g1, %g7
6655+ cmp %g1, %g7
6656+ bne,pn %xcc, 2f
6657+ nop
6658+ retl
6659+ nop
6660+2: BACKOFF_SPIN(%o2, %o3, 1b)
6661+ .size atomic64_sub_unchecked, .-atomic64_sub_unchecked
6662+
6663 .globl atomic64_add_ret
6664 .type atomic64_add_ret,#function
6665 atomic64_add_ret: /* %o0 = increment, %o1 = atomic_ptr */
6666 BACKOFF_SETUP(%o2)
6667 1: ldx [%o1], %g1
6668- add %g1, %o0, %g7
6669+ addcc %g1, %o0, %g7
6670+
6671+#ifdef CONFIG_PAX_REFCOUNT
6672+ tvs %xcc, 6
6673+#endif
6674+
6675 casx [%o1], %g1, %g7
6676 cmp %g1, %g7
6677 bne,pn %xcc, 2f
6678@@ -121,12 +232,33 @@ atomic64_add_ret: /* %o0 = increment, %o1 = atomic_ptr */
6679 2: BACKOFF_SPIN(%o2, %o3, 1b)
6680 .size atomic64_add_ret, .-atomic64_add_ret
6681
6682+ .globl atomic64_add_ret_unchecked
6683+ .type atomic64_add_ret_unchecked,#function
6684+atomic64_add_ret_unchecked: /* %o0 = increment, %o1 = atomic_ptr */
6685+ BACKOFF_SETUP(%o2)
6686+1: ldx [%o1], %g1
6687+ addcc %g1, %o0, %g7
6688+ casx [%o1], %g1, %g7
6689+ cmp %g1, %g7
6690+ bne,pn %xcc, 2f
6691+ add %g7, %o0, %g7
6692+ mov %g7, %o0
6693+ retl
6694+ nop
6695+2: BACKOFF_SPIN(%o2, %o3, 1b)
6696+ .size atomic64_add_ret_unchecked, .-atomic64_add_ret_unchecked
6697+
6698 .globl atomic64_sub_ret
6699 .type atomic64_sub_ret,#function
6700 atomic64_sub_ret: /* %o0 = decrement, %o1 = atomic_ptr */
6701 BACKOFF_SETUP(%o2)
6702 1: ldx [%o1], %g1
6703- sub %g1, %o0, %g7
6704+ subcc %g1, %o0, %g7
6705+
6706+#ifdef CONFIG_PAX_REFCOUNT
6707+ tvs %xcc, 6
6708+#endif
6709+
6710 casx [%o1], %g1, %g7
6711 cmp %g1, %g7
6712 bne,pn %xcc, 2f
6713diff --git a/arch/sparc/lib/ksyms.c b/arch/sparc/lib/ksyms.c
6714index 704b126..2e79d76 100644
6715--- a/arch/sparc/lib/ksyms.c
6716+++ b/arch/sparc/lib/ksyms.c
6717@@ -144,12 +144,18 @@ EXPORT_SYMBOL(__downgrade_write);
6718
6719 /* Atomic counter implementation. */
6720 EXPORT_SYMBOL(atomic_add);
6721+EXPORT_SYMBOL(atomic_add_unchecked);
6722 EXPORT_SYMBOL(atomic_add_ret);
6723+EXPORT_SYMBOL(atomic_add_ret_unchecked);
6724 EXPORT_SYMBOL(atomic_sub);
6725+EXPORT_SYMBOL(atomic_sub_unchecked);
6726 EXPORT_SYMBOL(atomic_sub_ret);
6727 EXPORT_SYMBOL(atomic64_add);
6728+EXPORT_SYMBOL(atomic64_add_unchecked);
6729 EXPORT_SYMBOL(atomic64_add_ret);
6730+EXPORT_SYMBOL(atomic64_add_ret_unchecked);
6731 EXPORT_SYMBOL(atomic64_sub);
6732+EXPORT_SYMBOL(atomic64_sub_unchecked);
6733 EXPORT_SYMBOL(atomic64_sub_ret);
6734
6735 /* Atomic bit operations. */
6736diff --git a/arch/sparc/lib/rwsem_64.S b/arch/sparc/lib/rwsem_64.S
6737index 91a7d29..ce75c29 100644
6738--- a/arch/sparc/lib/rwsem_64.S
6739+++ b/arch/sparc/lib/rwsem_64.S
6740@@ -11,7 +11,12 @@
6741 .globl __down_read
6742 __down_read:
6743 1: lduw [%o0], %g1
6744- add %g1, 1, %g7
6745+ addcc %g1, 1, %g7
6746+
6747+#ifdef CONFIG_PAX_REFCOUNT
6748+ tvs %icc, 6
6749+#endif
6750+
6751 cas [%o0], %g1, %g7
6752 cmp %g1, %g7
6753 bne,pn %icc, 1b
6754@@ -33,7 +38,12 @@ __down_read:
6755 .globl __down_read_trylock
6756 __down_read_trylock:
6757 1: lduw [%o0], %g1
6758- add %g1, 1, %g7
6759+ addcc %g1, 1, %g7
6760+
6761+#ifdef CONFIG_PAX_REFCOUNT
6762+ tvs %icc, 6
6763+#endif
6764+
6765 cmp %g7, 0
6766 bl,pn %icc, 2f
6767 mov 0, %o1
6768@@ -51,7 +61,12 @@ __down_write:
6769 or %g1, %lo(RWSEM_ACTIVE_WRITE_BIAS), %g1
6770 1:
6771 lduw [%o0], %g3
6772- add %g3, %g1, %g7
6773+ addcc %g3, %g1, %g7
6774+
6775+#ifdef CONFIG_PAX_REFCOUNT
6776+ tvs %icc, 6
6777+#endif
6778+
6779 cas [%o0], %g3, %g7
6780 cmp %g3, %g7
6781 bne,pn %icc, 1b
6782@@ -77,7 +92,12 @@ __down_write_trylock:
6783 cmp %g3, 0
6784 bne,pn %icc, 2f
6785 mov 0, %o1
6786- add %g3, %g1, %g7
6787+ addcc %g3, %g1, %g7
6788+
6789+#ifdef CONFIG_PAX_REFCOUNT
6790+ tvs %icc, 6
6791+#endif
6792+
6793 cas [%o0], %g3, %g7
6794 cmp %g3, %g7
6795 bne,pn %icc, 1b
6796@@ -90,7 +110,12 @@ __down_write_trylock:
6797 __up_read:
6798 1:
6799 lduw [%o0], %g1
6800- sub %g1, 1, %g7
6801+ subcc %g1, 1, %g7
6802+
6803+#ifdef CONFIG_PAX_REFCOUNT
6804+ tvs %icc, 6
6805+#endif
6806+
6807 cas [%o0], %g1, %g7
6808 cmp %g1, %g7
6809 bne,pn %icc, 1b
6810@@ -118,7 +143,12 @@ __up_write:
6811 or %g1, %lo(RWSEM_ACTIVE_WRITE_BIAS), %g1
6812 1:
6813 lduw [%o0], %g3
6814- sub %g3, %g1, %g7
6815+ subcc %g3, %g1, %g7
6816+
6817+#ifdef CONFIG_PAX_REFCOUNT
6818+ tvs %icc, 6
6819+#endif
6820+
6821 cas [%o0], %g3, %g7
6822 cmp %g3, %g7
6823 bne,pn %icc, 1b
6824@@ -143,7 +173,12 @@ __downgrade_write:
6825 or %g1, %lo(RWSEM_WAITING_BIAS), %g1
6826 1:
6827 lduw [%o0], %g3
6828- sub %g3, %g1, %g7
6829+ subcc %g3, %g1, %g7
6830+
6831+#ifdef CONFIG_PAX_REFCOUNT
6832+ tvs %icc, 6
6833+#endif
6834+
6835 cas [%o0], %g3, %g7
6836 cmp %g3, %g7
6837 bne,pn %icc, 1b
6838diff --git a/arch/sparc/mm/Makefile b/arch/sparc/mm/Makefile
6839index 79836a7..62f47a2 100644
6840--- a/arch/sparc/mm/Makefile
6841+++ b/arch/sparc/mm/Makefile
6842@@ -2,7 +2,7 @@
6843 #
6844
6845 asflags-y := -ansi
6846-ccflags-y := -Werror
6847+#ccflags-y := -Werror
6848
6849 obj-$(CONFIG_SPARC64) += ultra.o tlb.o tsb.o
6850 obj-y += fault_$(BITS).o
6851diff --git a/arch/sparc/mm/fault_32.c b/arch/sparc/mm/fault_32.c
6852index b99f81c..3453e93 100644
6853--- a/arch/sparc/mm/fault_32.c
6854+++ b/arch/sparc/mm/fault_32.c
6855@@ -21,6 +21,9 @@
6856 #include <linux/interrupt.h>
6857 #include <linux/module.h>
6858 #include <linux/kdebug.h>
6859+#include <linux/slab.h>
6860+#include <linux/pagemap.h>
6861+#include <linux/compiler.h>
6862
6863 #include <asm/system.h>
6864 #include <asm/page.h>
6865@@ -167,6 +170,267 @@ static unsigned long compute_si_addr(struct pt_regs *regs, int text_fault)
6866 return safe_compute_effective_address(regs, insn);
6867 }
6868
6869+#ifdef CONFIG_PAX_PAGEEXEC
6870+#ifdef CONFIG_PAX_DLRESOLVE
6871+static void pax_emuplt_close(struct vm_area_struct *vma)
6872+{
6873+ vma->vm_mm->call_dl_resolve = 0UL;
6874+}
6875+
6876+static int pax_emuplt_fault(struct vm_area_struct *vma, struct vm_fault *vmf)
6877+{
6878+ unsigned int *kaddr;
6879+
6880+ vmf->page = alloc_page(GFP_HIGHUSER);
6881+ if (!vmf->page)
6882+ return VM_FAULT_OOM;
6883+
6884+ kaddr = kmap(vmf->page);
6885+ memset(kaddr, 0, PAGE_SIZE);
6886+ kaddr[0] = 0x9DE3BFA8U; /* save */
6887+ flush_dcache_page(vmf->page);
6888+ kunmap(vmf->page);
6889+ return VM_FAULT_MAJOR;
6890+}
6891+
6892+static const struct vm_operations_struct pax_vm_ops = {
6893+ .close = pax_emuplt_close,
6894+ .fault = pax_emuplt_fault
6895+};
6896+
6897+static int pax_insert_vma(struct vm_area_struct *vma, unsigned long addr)
6898+{
6899+ int ret;
6900+
6901+ vma->vm_mm = current->mm;
6902+ vma->vm_start = addr;
6903+ vma->vm_end = addr + PAGE_SIZE;
6904+ vma->vm_flags = VM_READ | VM_EXEC | VM_MAYREAD | VM_MAYEXEC;
6905+ vma->vm_page_prot = vm_get_page_prot(vma->vm_flags);
6906+ vma->vm_ops = &pax_vm_ops;
6907+
6908+ ret = insert_vm_struct(current->mm, vma);
6909+ if (ret)
6910+ return ret;
6911+
6912+ ++current->mm->total_vm;
6913+ return 0;
6914+}
6915+#endif
6916+
6917+/*
6918+ * PaX: decide what to do with offenders (regs->pc = fault address)
6919+ *
6920+ * returns 1 when task should be killed
6921+ * 2 when patched PLT trampoline was detected
6922+ * 3 when unpatched PLT trampoline was detected
6923+ */
6924+static int pax_handle_fetch_fault(struct pt_regs *regs)
6925+{
6926+
6927+#ifdef CONFIG_PAX_EMUPLT
6928+ int err;
6929+
6930+ do { /* PaX: patched PLT emulation #1 */
6931+ unsigned int sethi1, sethi2, jmpl;
6932+
6933+ err = get_user(sethi1, (unsigned int *)regs->pc);
6934+ err |= get_user(sethi2, (unsigned int *)(regs->pc+4));
6935+ err |= get_user(jmpl, (unsigned int *)(regs->pc+8));
6936+
6937+ if (err)
6938+ break;
6939+
6940+ if ((sethi1 & 0xFFC00000U) == 0x03000000U &&
6941+ (sethi2 & 0xFFC00000U) == 0x03000000U &&
6942+ (jmpl & 0xFFFFE000U) == 0x81C06000U)
6943+ {
6944+ unsigned int addr;
6945+
6946+ regs->u_regs[UREG_G1] = (sethi2 & 0x003FFFFFU) << 10;
6947+ addr = regs->u_regs[UREG_G1];
6948+ addr += (((jmpl | 0xFFFFE000U) ^ 0x00001000U) + 0x00001000U);
6949+ regs->pc = addr;
6950+ regs->npc = addr+4;
6951+ return 2;
6952+ }
6953+ } while (0);
6954+
6955+ { /* PaX: patched PLT emulation #2 */
6956+ unsigned int ba;
6957+
6958+ err = get_user(ba, (unsigned int *)regs->pc);
6959+
6960+ if (!err && (ba & 0xFFC00000U) == 0x30800000U) {
6961+ unsigned int addr;
6962+
6963+ addr = regs->pc + ((((ba | 0xFFC00000U) ^ 0x00200000U) + 0x00200000U) << 2);
6964+ regs->pc = addr;
6965+ regs->npc = addr+4;
6966+ return 2;
6967+ }
6968+ }
6969+
6970+ do { /* PaX: patched PLT emulation #3 */
6971+ unsigned int sethi, jmpl, nop;
6972+
6973+ err = get_user(sethi, (unsigned int *)regs->pc);
6974+ err |= get_user(jmpl, (unsigned int *)(regs->pc+4));
6975+ err |= get_user(nop, (unsigned int *)(regs->pc+8));
6976+
6977+ if (err)
6978+ break;
6979+
6980+ if ((sethi & 0xFFC00000U) == 0x03000000U &&
6981+ (jmpl & 0xFFFFE000U) == 0x81C06000U &&
6982+ nop == 0x01000000U)
6983+ {
6984+ unsigned int addr;
6985+
6986+ addr = (sethi & 0x003FFFFFU) << 10;
6987+ regs->u_regs[UREG_G1] = addr;
6988+ addr += (((jmpl | 0xFFFFE000U) ^ 0x00001000U) + 0x00001000U);
6989+ regs->pc = addr;
6990+ regs->npc = addr+4;
6991+ return 2;
6992+ }
6993+ } while (0);
6994+
6995+ do { /* PaX: unpatched PLT emulation step 1 */
6996+ unsigned int sethi, ba, nop;
6997+
6998+ err = get_user(sethi, (unsigned int *)regs->pc);
6999+ err |= get_user(ba, (unsigned int *)(regs->pc+4));
7000+ err |= get_user(nop, (unsigned int *)(regs->pc+8));
7001+
7002+ if (err)
7003+ break;
7004+
7005+ if ((sethi & 0xFFC00000U) == 0x03000000U &&
7006+ ((ba & 0xFFC00000U) == 0x30800000U || (ba & 0xFFF80000U) == 0x30680000U) &&
7007+ nop == 0x01000000U)
7008+ {
7009+ unsigned int addr, save, call;
7010+
7011+ if ((ba & 0xFFC00000U) == 0x30800000U)
7012+ addr = regs->pc + 4 + ((((ba | 0xFFC00000U) ^ 0x00200000U) + 0x00200000U) << 2);
7013+ else
7014+ addr = regs->pc + 4 + ((((ba | 0xFFF80000U) ^ 0x00040000U) + 0x00040000U) << 2);
7015+
7016+ err = get_user(save, (unsigned int *)addr);
7017+ err |= get_user(call, (unsigned int *)(addr+4));
7018+ err |= get_user(nop, (unsigned int *)(addr+8));
7019+ if (err)
7020+ break;
7021+
7022+#ifdef CONFIG_PAX_DLRESOLVE
7023+ if (save == 0x9DE3BFA8U &&
7024+ (call & 0xC0000000U) == 0x40000000U &&
7025+ nop == 0x01000000U)
7026+ {
7027+ struct vm_area_struct *vma;
7028+ unsigned long call_dl_resolve;
7029+
7030+ down_read(&current->mm->mmap_sem);
7031+ call_dl_resolve = current->mm->call_dl_resolve;
7032+ up_read(&current->mm->mmap_sem);
7033+ if (likely(call_dl_resolve))
7034+ goto emulate;
7035+
7036+ vma = kmem_cache_zalloc(vm_area_cachep, GFP_KERNEL);
7037+
7038+ down_write(&current->mm->mmap_sem);
7039+ if (current->mm->call_dl_resolve) {
7040+ call_dl_resolve = current->mm->call_dl_resolve;
7041+ up_write(&current->mm->mmap_sem);
7042+ if (vma)
7043+ kmem_cache_free(vm_area_cachep, vma);
7044+ goto emulate;
7045+ }
7046+
7047+ call_dl_resolve = get_unmapped_area(NULL, 0UL, PAGE_SIZE, 0UL, MAP_PRIVATE);
7048+ if (!vma || (call_dl_resolve & ~PAGE_MASK)) {
7049+ up_write(&current->mm->mmap_sem);
7050+ if (vma)
7051+ kmem_cache_free(vm_area_cachep, vma);
7052+ return 1;
7053+ }
7054+
7055+ if (pax_insert_vma(vma, call_dl_resolve)) {
7056+ up_write(&current->mm->mmap_sem);
7057+ kmem_cache_free(vm_area_cachep, vma);
7058+ return 1;
7059+ }
7060+
7061+ current->mm->call_dl_resolve = call_dl_resolve;
7062+ up_write(&current->mm->mmap_sem);
7063+
7064+emulate:
7065+ regs->u_regs[UREG_G1] = (sethi & 0x003FFFFFU) << 10;
7066+ regs->pc = call_dl_resolve;
7067+ regs->npc = addr+4;
7068+ return 3;
7069+ }
7070+#endif
7071+
7072+ /* PaX: glibc 2.4+ generates sethi/jmpl instead of save/call */
7073+ if ((save & 0xFFC00000U) == 0x05000000U &&
7074+ (call & 0xFFFFE000U) == 0x85C0A000U &&
7075+ nop == 0x01000000U)
7076+ {
7077+ regs->u_regs[UREG_G1] = (sethi & 0x003FFFFFU) << 10;
7078+ regs->u_regs[UREG_G2] = addr + 4;
7079+ addr = (save & 0x003FFFFFU) << 10;
7080+ addr += (((call | 0xFFFFE000U) ^ 0x00001000U) + 0x00001000U);
7081+ regs->pc = addr;
7082+ regs->npc = addr+4;
7083+ return 3;
7084+ }
7085+ }
7086+ } while (0);
7087+
7088+ do { /* PaX: unpatched PLT emulation step 2 */
7089+ unsigned int save, call, nop;
7090+
7091+ err = get_user(save, (unsigned int *)(regs->pc-4));
7092+ err |= get_user(call, (unsigned int *)regs->pc);
7093+ err |= get_user(nop, (unsigned int *)(regs->pc+4));
7094+ if (err)
7095+ break;
7096+
7097+ if (save == 0x9DE3BFA8U &&
7098+ (call & 0xC0000000U) == 0x40000000U &&
7099+ nop == 0x01000000U)
7100+ {
7101+ unsigned int dl_resolve = regs->pc + ((((call | 0xC0000000U) ^ 0x20000000U) + 0x20000000U) << 2);
7102+
7103+ regs->u_regs[UREG_RETPC] = regs->pc;
7104+ regs->pc = dl_resolve;
7105+ regs->npc = dl_resolve+4;
7106+ return 3;
7107+ }
7108+ } while (0);
7109+#endif
7110+
7111+ return 1;
7112+}
7113+
7114+void pax_report_insns(struct pt_regs *regs, void *pc, void *sp)
7115+{
7116+ unsigned long i;
7117+
7118+ printk(KERN_ERR "PAX: bytes at PC: ");
7119+ for (i = 0; i < 8; i++) {
7120+ unsigned int c;
7121+ if (get_user(c, (unsigned int *)pc+i))
7122+ printk(KERN_CONT "???????? ");
7123+ else
7124+ printk(KERN_CONT "%08x ", c);
7125+ }
7126+ printk("\n");
7127+}
7128+#endif
7129+
7130 asmlinkage void do_sparc_fault(struct pt_regs *regs, int text_fault, int write,
7131 unsigned long address)
7132 {
7133@@ -231,6 +495,24 @@ good_area:
7134 if(!(vma->vm_flags & VM_WRITE))
7135 goto bad_area;
7136 } else {
7137+
7138+#ifdef CONFIG_PAX_PAGEEXEC
7139+ if ((mm->pax_flags & MF_PAX_PAGEEXEC) && text_fault && !(vma->vm_flags & VM_EXEC)) {
7140+ up_read(&mm->mmap_sem);
7141+ switch (pax_handle_fetch_fault(regs)) {
7142+
7143+#ifdef CONFIG_PAX_EMUPLT
7144+ case 2:
7145+ case 3:
7146+ return;
7147+#endif
7148+
7149+ }
7150+ pax_report_fault(regs, (void *)regs->pc, (void *)regs->u_regs[UREG_FP]);
7151+ do_group_exit(SIGKILL);
7152+ }
7153+#endif
7154+
7155 /* Allow reads even for write-only mappings */
7156 if(!(vma->vm_flags & (VM_READ | VM_EXEC)))
7157 goto bad_area;
7158diff --git a/arch/sparc/mm/fault_64.c b/arch/sparc/mm/fault_64.c
7159index 43b0da9..a0b78f9 100644
7160--- a/arch/sparc/mm/fault_64.c
7161+++ b/arch/sparc/mm/fault_64.c
7162@@ -20,6 +20,9 @@
7163 #include <linux/kprobes.h>
7164 #include <linux/kdebug.h>
7165 #include <linux/percpu.h>
7166+#include <linux/slab.h>
7167+#include <linux/pagemap.h>
7168+#include <linux/compiler.h>
7169
7170 #include <asm/page.h>
7171 #include <asm/pgtable.h>
7172@@ -78,7 +81,7 @@ static void bad_kernel_pc(struct pt_regs *regs, unsigned long vaddr)
7173 printk(KERN_CRIT "OOPS: Bogus kernel PC [%016lx] in fault handler\n",
7174 regs->tpc);
7175 printk(KERN_CRIT "OOPS: RPC [%016lx]\n", regs->u_regs[15]);
7176- printk("OOPS: RPC <%pS>\n", (void *) regs->u_regs[15]);
7177+ printk("OOPS: RPC <%pA>\n", (void *) regs->u_regs[15]);
7178 printk(KERN_CRIT "OOPS: Fault was to vaddr[%lx]\n", vaddr);
7179 dump_stack();
7180 unhandled_fault(regs->tpc, current, regs);
7181@@ -249,6 +252,456 @@ static void noinline bogus_32bit_fault_address(struct pt_regs *regs,
7182 show_regs(regs);
7183 }
7184
7185+#ifdef CONFIG_PAX_PAGEEXEC
7186+#ifdef CONFIG_PAX_DLRESOLVE
7187+static void pax_emuplt_close(struct vm_area_struct *vma)
7188+{
7189+ vma->vm_mm->call_dl_resolve = 0UL;
7190+}
7191+
7192+static int pax_emuplt_fault(struct vm_area_struct *vma, struct vm_fault *vmf)
7193+{
7194+ unsigned int *kaddr;
7195+
7196+ vmf->page = alloc_page(GFP_HIGHUSER);
7197+ if (!vmf->page)
7198+ return VM_FAULT_OOM;
7199+
7200+ kaddr = kmap(vmf->page);
7201+ memset(kaddr, 0, PAGE_SIZE);
7202+ kaddr[0] = 0x9DE3BFA8U; /* save */
7203+ flush_dcache_page(vmf->page);
7204+ kunmap(vmf->page);
7205+ return VM_FAULT_MAJOR;
7206+}
7207+
7208+static const struct vm_operations_struct pax_vm_ops = {
7209+ .close = pax_emuplt_close,
7210+ .fault = pax_emuplt_fault
7211+};
7212+
7213+static int pax_insert_vma(struct vm_area_struct *vma, unsigned long addr)
7214+{
7215+ int ret;
7216+
7217+ vma->vm_mm = current->mm;
7218+ vma->vm_start = addr;
7219+ vma->vm_end = addr + PAGE_SIZE;
7220+ vma->vm_flags = VM_READ | VM_EXEC | VM_MAYREAD | VM_MAYEXEC;
7221+ vma->vm_page_prot = vm_get_page_prot(vma->vm_flags);
7222+ vma->vm_ops = &pax_vm_ops;
7223+
7224+ ret = insert_vm_struct(current->mm, vma);
7225+ if (ret)
7226+ return ret;
7227+
7228+ ++current->mm->total_vm;
7229+ return 0;
7230+}
7231+#endif
7232+
7233+/*
7234+ * PaX: decide what to do with offenders (regs->tpc = fault address)
7235+ *
7236+ * returns 1 when task should be killed
7237+ * 2 when patched PLT trampoline was detected
7238+ * 3 when unpatched PLT trampoline was detected
7239+ */
7240+static int pax_handle_fetch_fault(struct pt_regs *regs)
7241+{
7242+
7243+#ifdef CONFIG_PAX_EMUPLT
7244+ int err;
7245+
7246+ do { /* PaX: patched PLT emulation #1 */
7247+ unsigned int sethi1, sethi2, jmpl;
7248+
7249+ err = get_user(sethi1, (unsigned int *)regs->tpc);
7250+ err |= get_user(sethi2, (unsigned int *)(regs->tpc+4));
7251+ err |= get_user(jmpl, (unsigned int *)(regs->tpc+8));
7252+
7253+ if (err)
7254+ break;
7255+
7256+ if ((sethi1 & 0xFFC00000U) == 0x03000000U &&
7257+ (sethi2 & 0xFFC00000U) == 0x03000000U &&
7258+ (jmpl & 0xFFFFE000U) == 0x81C06000U)
7259+ {
7260+ unsigned long addr;
7261+
7262+ regs->u_regs[UREG_G1] = (sethi2 & 0x003FFFFFU) << 10;
7263+ addr = regs->u_regs[UREG_G1];
7264+ addr += (((jmpl | 0xFFFFFFFFFFFFE000UL) ^ 0x00001000UL) + 0x00001000UL);
7265+
7266+ if (test_thread_flag(TIF_32BIT))
7267+ addr &= 0xFFFFFFFFUL;
7268+
7269+ regs->tpc = addr;
7270+ regs->tnpc = addr+4;
7271+ return 2;
7272+ }
7273+ } while (0);
7274+
7275+ { /* PaX: patched PLT emulation #2 */
7276+ unsigned int ba;
7277+
7278+ err = get_user(ba, (unsigned int *)regs->tpc);
7279+
7280+ if (!err && (ba & 0xFFC00000U) == 0x30800000U) {
7281+ unsigned long addr;
7282+
7283+ addr = regs->tpc + ((((ba | 0xFFFFFFFFFFC00000UL) ^ 0x00200000UL) + 0x00200000UL) << 2);
7284+
7285+ if (test_thread_flag(TIF_32BIT))
7286+ addr &= 0xFFFFFFFFUL;
7287+
7288+ regs->tpc = addr;
7289+ regs->tnpc = addr+4;
7290+ return 2;
7291+ }
7292+ }
7293+
7294+ do { /* PaX: patched PLT emulation #3 */
7295+ unsigned int sethi, jmpl, nop;
7296+
7297+ err = get_user(sethi, (unsigned int *)regs->tpc);
7298+ err |= get_user(jmpl, (unsigned int *)(regs->tpc+4));
7299+ err |= get_user(nop, (unsigned int *)(regs->tpc+8));
7300+
7301+ if (err)
7302+ break;
7303+
7304+ if ((sethi & 0xFFC00000U) == 0x03000000U &&
7305+ (jmpl & 0xFFFFE000U) == 0x81C06000U &&
7306+ nop == 0x01000000U)
7307+ {
7308+ unsigned long addr;
7309+
7310+ addr = (sethi & 0x003FFFFFU) << 10;
7311+ regs->u_regs[UREG_G1] = addr;
7312+ addr += (((jmpl | 0xFFFFFFFFFFFFE000UL) ^ 0x00001000UL) + 0x00001000UL);
7313+
7314+ if (test_thread_flag(TIF_32BIT))
7315+ addr &= 0xFFFFFFFFUL;
7316+
7317+ regs->tpc = addr;
7318+ regs->tnpc = addr+4;
7319+ return 2;
7320+ }
7321+ } while (0);
7322+
7323+ do { /* PaX: patched PLT emulation #4 */
7324+ unsigned int sethi, mov1, call, mov2;
7325+
7326+ err = get_user(sethi, (unsigned int *)regs->tpc);
7327+ err |= get_user(mov1, (unsigned int *)(regs->tpc+4));
7328+ err |= get_user(call, (unsigned int *)(regs->tpc+8));
7329+ err |= get_user(mov2, (unsigned int *)(regs->tpc+12));
7330+
7331+ if (err)
7332+ break;
7333+
7334+ if ((sethi & 0xFFC00000U) == 0x03000000U &&
7335+ mov1 == 0x8210000FU &&
7336+ (call & 0xC0000000U) == 0x40000000U &&
7337+ mov2 == 0x9E100001U)
7338+ {
7339+ unsigned long addr;
7340+
7341+ regs->u_regs[UREG_G1] = regs->u_regs[UREG_RETPC];
7342+ addr = regs->tpc + 4 + ((((call | 0xFFFFFFFFC0000000UL) ^ 0x20000000UL) + 0x20000000UL) << 2);
7343+
7344+ if (test_thread_flag(TIF_32BIT))
7345+ addr &= 0xFFFFFFFFUL;
7346+
7347+ regs->tpc = addr;
7348+ regs->tnpc = addr+4;
7349+ return 2;
7350+ }
7351+ } while (0);
7352+
7353+ do { /* PaX: patched PLT emulation #5 */
7354+ unsigned int sethi, sethi1, sethi2, or1, or2, sllx, jmpl, nop;
7355+
7356+ err = get_user(sethi, (unsigned int *)regs->tpc);
7357+ err |= get_user(sethi1, (unsigned int *)(regs->tpc+4));
7358+ err |= get_user(sethi2, (unsigned int *)(regs->tpc+8));
7359+ err |= get_user(or1, (unsigned int *)(regs->tpc+12));
7360+ err |= get_user(or2, (unsigned int *)(regs->tpc+16));
7361+ err |= get_user(sllx, (unsigned int *)(regs->tpc+20));
7362+ err |= get_user(jmpl, (unsigned int *)(regs->tpc+24));
7363+ err |= get_user(nop, (unsigned int *)(regs->tpc+28));
7364+
7365+ if (err)
7366+ break;
7367+
7368+ if ((sethi & 0xFFC00000U) == 0x03000000U &&
7369+ (sethi1 & 0xFFC00000U) == 0x03000000U &&
7370+ (sethi2 & 0xFFC00000U) == 0x0B000000U &&
7371+ (or1 & 0xFFFFE000U) == 0x82106000U &&
7372+ (or2 & 0xFFFFE000U) == 0x8A116000U &&
7373+ sllx == 0x83287020U &&
7374+ jmpl == 0x81C04005U &&
7375+ nop == 0x01000000U)
7376+ {
7377+ unsigned long addr;
7378+
7379+ regs->u_regs[UREG_G1] = ((sethi1 & 0x003FFFFFU) << 10) | (or1 & 0x000003FFU);
7380+ regs->u_regs[UREG_G1] <<= 32;
7381+ regs->u_regs[UREG_G5] = ((sethi2 & 0x003FFFFFU) << 10) | (or2 & 0x000003FFU);
7382+ addr = regs->u_regs[UREG_G1] + regs->u_regs[UREG_G5];
7383+ regs->tpc = addr;
7384+ regs->tnpc = addr+4;
7385+ return 2;
7386+ }
7387+ } while (0);
7388+
7389+ do { /* PaX: patched PLT emulation #6 */
7390+ unsigned int sethi, sethi1, sethi2, sllx, or, jmpl, nop;
7391+
7392+ err = get_user(sethi, (unsigned int *)regs->tpc);
7393+ err |= get_user(sethi1, (unsigned int *)(regs->tpc+4));
7394+ err |= get_user(sethi2, (unsigned int *)(regs->tpc+8));
7395+ err |= get_user(sllx, (unsigned int *)(regs->tpc+12));
7396+ err |= get_user(or, (unsigned int *)(regs->tpc+16));
7397+ err |= get_user(jmpl, (unsigned int *)(regs->tpc+20));
7398+ err |= get_user(nop, (unsigned int *)(regs->tpc+24));
7399+
7400+ if (err)
7401+ break;
7402+
7403+ if ((sethi & 0xFFC00000U) == 0x03000000U &&
7404+ (sethi1 & 0xFFC00000U) == 0x03000000U &&
7405+ (sethi2 & 0xFFC00000U) == 0x0B000000U &&
7406+ sllx == 0x83287020U &&
7407+ (or & 0xFFFFE000U) == 0x8A116000U &&
7408+ jmpl == 0x81C04005U &&
7409+ nop == 0x01000000U)
7410+ {
7411+ unsigned long addr;
7412+
7413+ regs->u_regs[UREG_G1] = (sethi1 & 0x003FFFFFU) << 10;
7414+ regs->u_regs[UREG_G1] <<= 32;
7415+ regs->u_regs[UREG_G5] = ((sethi2 & 0x003FFFFFU) << 10) | (or & 0x3FFU);
7416+ addr = regs->u_regs[UREG_G1] + regs->u_regs[UREG_G5];
7417+ regs->tpc = addr;
7418+ regs->tnpc = addr+4;
7419+ return 2;
7420+ }
7421+ } while (0);
7422+
7423+ do { /* PaX: unpatched PLT emulation step 1 */
7424+ unsigned int sethi, ba, nop;
7425+
7426+ err = get_user(sethi, (unsigned int *)regs->tpc);
7427+ err |= get_user(ba, (unsigned int *)(regs->tpc+4));
7428+ err |= get_user(nop, (unsigned int *)(regs->tpc+8));
7429+
7430+ if (err)
7431+ break;
7432+
7433+ if ((sethi & 0xFFC00000U) == 0x03000000U &&
7434+ ((ba & 0xFFC00000U) == 0x30800000U || (ba & 0xFFF80000U) == 0x30680000U) &&
7435+ nop == 0x01000000U)
7436+ {
7437+ unsigned long addr;
7438+ unsigned int save, call;
7439+ unsigned int sethi1, sethi2, or1, or2, sllx, add, jmpl;
7440+
7441+ if ((ba & 0xFFC00000U) == 0x30800000U)
7442+ addr = regs->tpc + 4 + ((((ba | 0xFFFFFFFFFFC00000UL) ^ 0x00200000UL) + 0x00200000UL) << 2);
7443+ else
7444+ addr = regs->tpc + 4 + ((((ba | 0xFFFFFFFFFFF80000UL) ^ 0x00040000UL) + 0x00040000UL) << 2);
7445+
7446+ if (test_thread_flag(TIF_32BIT))
7447+ addr &= 0xFFFFFFFFUL;
7448+
7449+ err = get_user(save, (unsigned int *)addr);
7450+ err |= get_user(call, (unsigned int *)(addr+4));
7451+ err |= get_user(nop, (unsigned int *)(addr+8));
7452+ if (err)
7453+ break;
7454+
7455+#ifdef CONFIG_PAX_DLRESOLVE
7456+ if (save == 0x9DE3BFA8U &&
7457+ (call & 0xC0000000U) == 0x40000000U &&
7458+ nop == 0x01000000U)
7459+ {
7460+ struct vm_area_struct *vma;
7461+ unsigned long call_dl_resolve;
7462+
7463+ down_read(&current->mm->mmap_sem);
7464+ call_dl_resolve = current->mm->call_dl_resolve;
7465+ up_read(&current->mm->mmap_sem);
7466+ if (likely(call_dl_resolve))
7467+ goto emulate;
7468+
7469+ vma = kmem_cache_zalloc(vm_area_cachep, GFP_KERNEL);
7470+
7471+ down_write(&current->mm->mmap_sem);
7472+ if (current->mm->call_dl_resolve) {
7473+ call_dl_resolve = current->mm->call_dl_resolve;
7474+ up_write(&current->mm->mmap_sem);
7475+ if (vma)
7476+ kmem_cache_free(vm_area_cachep, vma);
7477+ goto emulate;
7478+ }
7479+
7480+ call_dl_resolve = get_unmapped_area(NULL, 0UL, PAGE_SIZE, 0UL, MAP_PRIVATE);
7481+ if (!vma || (call_dl_resolve & ~PAGE_MASK)) {
7482+ up_write(&current->mm->mmap_sem);
7483+ if (vma)
7484+ kmem_cache_free(vm_area_cachep, vma);
7485+ return 1;
7486+ }
7487+
7488+ if (pax_insert_vma(vma, call_dl_resolve)) {
7489+ up_write(&current->mm->mmap_sem);
7490+ kmem_cache_free(vm_area_cachep, vma);
7491+ return 1;
7492+ }
7493+
7494+ current->mm->call_dl_resolve = call_dl_resolve;
7495+ up_write(&current->mm->mmap_sem);
7496+
7497+emulate:
7498+ regs->u_regs[UREG_G1] = (sethi & 0x003FFFFFU) << 10;
7499+ regs->tpc = call_dl_resolve;
7500+ regs->tnpc = addr+4;
7501+ return 3;
7502+ }
7503+#endif
7504+
7505+ /* PaX: glibc 2.4+ generates sethi/jmpl instead of save/call */
7506+ if ((save & 0xFFC00000U) == 0x05000000U &&
7507+ (call & 0xFFFFE000U) == 0x85C0A000U &&
7508+ nop == 0x01000000U)
7509+ {
7510+ regs->u_regs[UREG_G1] = (sethi & 0x003FFFFFU) << 10;
7511+ regs->u_regs[UREG_G2] = addr + 4;
7512+ addr = (save & 0x003FFFFFU) << 10;
7513+ addr += (((call | 0xFFFFFFFFFFFFE000UL) ^ 0x00001000UL) + 0x00001000UL);
7514+
7515+ if (test_thread_flag(TIF_32BIT))
7516+ addr &= 0xFFFFFFFFUL;
7517+
7518+ regs->tpc = addr;
7519+ regs->tnpc = addr+4;
7520+ return 3;
7521+ }
7522+
7523+ /* PaX: 64-bit PLT stub */
7524+ err = get_user(sethi1, (unsigned int *)addr);
7525+ err |= get_user(sethi2, (unsigned int *)(addr+4));
7526+ err |= get_user(or1, (unsigned int *)(addr+8));
7527+ err |= get_user(or2, (unsigned int *)(addr+12));
7528+ err |= get_user(sllx, (unsigned int *)(addr+16));
7529+ err |= get_user(add, (unsigned int *)(addr+20));
7530+ err |= get_user(jmpl, (unsigned int *)(addr+24));
7531+ err |= get_user(nop, (unsigned int *)(addr+28));
7532+ if (err)
7533+ break;
7534+
7535+ if ((sethi1 & 0xFFC00000U) == 0x09000000U &&
7536+ (sethi2 & 0xFFC00000U) == 0x0B000000U &&
7537+ (or1 & 0xFFFFE000U) == 0x88112000U &&
7538+ (or2 & 0xFFFFE000U) == 0x8A116000U &&
7539+ sllx == 0x89293020U &&
7540+ add == 0x8A010005U &&
7541+ jmpl == 0x89C14000U &&
7542+ nop == 0x01000000U)
7543+ {
7544+ regs->u_regs[UREG_G1] = (sethi & 0x003FFFFFU) << 10;
7545+ regs->u_regs[UREG_G4] = ((sethi1 & 0x003FFFFFU) << 10) | (or1 & 0x000003FFU);
7546+ regs->u_regs[UREG_G4] <<= 32;
7547+ regs->u_regs[UREG_G5] = ((sethi2 & 0x003FFFFFU) << 10) | (or2 & 0x000003FFU);
7548+ regs->u_regs[UREG_G5] += regs->u_regs[UREG_G4];
7549+ regs->u_regs[UREG_G4] = addr + 24;
7550+ addr = regs->u_regs[UREG_G5];
7551+ regs->tpc = addr;
7552+ regs->tnpc = addr+4;
7553+ return 3;
7554+ }
7555+ }
7556+ } while (0);
7557+
7558+#ifdef CONFIG_PAX_DLRESOLVE
7559+ do { /* PaX: unpatched PLT emulation step 2 */
7560+ unsigned int save, call, nop;
7561+
7562+ err = get_user(save, (unsigned int *)(regs->tpc-4));
7563+ err |= get_user(call, (unsigned int *)regs->tpc);
7564+ err |= get_user(nop, (unsigned int *)(regs->tpc+4));
7565+ if (err)
7566+ break;
7567+
7568+ if (save == 0x9DE3BFA8U &&
7569+ (call & 0xC0000000U) == 0x40000000U &&
7570+ nop == 0x01000000U)
7571+ {
7572+ unsigned long dl_resolve = regs->tpc + ((((call | 0xFFFFFFFFC0000000UL) ^ 0x20000000UL) + 0x20000000UL) << 2);
7573+
7574+ if (test_thread_flag(TIF_32BIT))
7575+ dl_resolve &= 0xFFFFFFFFUL;
7576+
7577+ regs->u_regs[UREG_RETPC] = regs->tpc;
7578+ regs->tpc = dl_resolve;
7579+ regs->tnpc = dl_resolve+4;
7580+ return 3;
7581+ }
7582+ } while (0);
7583+#endif
7584+
7585+ do { /* PaX: patched PLT emulation #7, must be AFTER the unpatched PLT emulation */
7586+ unsigned int sethi, ba, nop;
7587+
7588+ err = get_user(sethi, (unsigned int *)regs->tpc);
7589+ err |= get_user(ba, (unsigned int *)(regs->tpc+4));
7590+ err |= get_user(nop, (unsigned int *)(regs->tpc+8));
7591+
7592+ if (err)
7593+ break;
7594+
7595+ if ((sethi & 0xFFC00000U) == 0x03000000U &&
7596+ (ba & 0xFFF00000U) == 0x30600000U &&
7597+ nop == 0x01000000U)
7598+ {
7599+ unsigned long addr;
7600+
7601+ addr = (sethi & 0x003FFFFFU) << 10;
7602+ regs->u_regs[UREG_G1] = addr;
7603+ addr = regs->tpc + ((((ba | 0xFFFFFFFFFFF80000UL) ^ 0x00040000UL) + 0x00040000UL) << 2);
7604+
7605+ if (test_thread_flag(TIF_32BIT))
7606+ addr &= 0xFFFFFFFFUL;
7607+
7608+ regs->tpc = addr;
7609+ regs->tnpc = addr+4;
7610+ return 2;
7611+ }
7612+ } while (0);
7613+
7614+#endif
7615+
7616+ return 1;
7617+}
7618+
7619+void pax_report_insns(struct pt_regs *regs, void *pc, void *sp)
7620+{
7621+ unsigned long i;
7622+
7623+ printk(KERN_ERR "PAX: bytes at PC: ");
7624+ for (i = 0; i < 8; i++) {
7625+ unsigned int c;
7626+ if (get_user(c, (unsigned int *)pc+i))
7627+ printk(KERN_CONT "???????? ");
7628+ else
7629+ printk(KERN_CONT "%08x ", c);
7630+ }
7631+ printk("\n");
7632+}
7633+#endif
7634+
7635 asmlinkage void __kprobes do_sparc64_fault(struct pt_regs *regs)
7636 {
7637 struct mm_struct *mm = current->mm;
7638@@ -315,6 +768,29 @@ asmlinkage void __kprobes do_sparc64_fault(struct pt_regs *regs)
7639 if (!vma)
7640 goto bad_area;
7641
7642+#ifdef CONFIG_PAX_PAGEEXEC
7643+ /* PaX: detect ITLB misses on non-exec pages */
7644+ if ((mm->pax_flags & MF_PAX_PAGEEXEC) && vma->vm_start <= address &&
7645+ !(vma->vm_flags & VM_EXEC) && (fault_code & FAULT_CODE_ITLB))
7646+ {
7647+ if (address != regs->tpc)
7648+ goto good_area;
7649+
7650+ up_read(&mm->mmap_sem);
7651+ switch (pax_handle_fetch_fault(regs)) {
7652+
7653+#ifdef CONFIG_PAX_EMUPLT
7654+ case 2:
7655+ case 3:
7656+ return;
7657+#endif
7658+
7659+ }
7660+ pax_report_fault(regs, (void *)regs->tpc, (void *)(regs->u_regs[UREG_FP] + STACK_BIAS));
7661+ do_group_exit(SIGKILL);
7662+ }
7663+#endif
7664+
7665 /* Pure DTLB misses do not tell us whether the fault causing
7666 * load/store/atomic was a write or not, it only says that there
7667 * was no match. So in such a case we (carefully) read the
7668diff --git a/arch/sparc/mm/hugetlbpage.c b/arch/sparc/mm/hugetlbpage.c
7669index f27d103..1b06377 100644
7670--- a/arch/sparc/mm/hugetlbpage.c
7671+++ b/arch/sparc/mm/hugetlbpage.c
7672@@ -69,7 +69,7 @@ full_search:
7673 }
7674 return -ENOMEM;
7675 }
7676- if (likely(!vma || addr + len <= vma->vm_start)) {
7677+ if (likely(check_heap_stack_gap(vma, addr, len))) {
7678 /*
7679 * Remember the place where we stopped the search:
7680 */
7681@@ -108,7 +108,7 @@ hugetlb_get_unmapped_area_topdown(struct file *filp, const unsigned long addr0,
7682 /* make sure it can fit in the remaining address space */
7683 if (likely(addr > len)) {
7684 vma = find_vma(mm, addr-len);
7685- if (!vma || addr <= vma->vm_start) {
7686+ if (check_heap_stack_gap(vma, addr - len, len)) {
7687 /* remember the address as a hint for next time */
7688 return (mm->free_area_cache = addr-len);
7689 }
7690@@ -117,16 +117,17 @@ hugetlb_get_unmapped_area_topdown(struct file *filp, const unsigned long addr0,
7691 if (unlikely(mm->mmap_base < len))
7692 goto bottomup;
7693
7694- addr = (mm->mmap_base-len) & HPAGE_MASK;
7695+ addr = mm->mmap_base - len;
7696
7697 do {
7698+ addr &= HPAGE_MASK;
7699 /*
7700 * Lookup failure means no vma is above this address,
7701 * else if new region fits below vma->vm_start,
7702 * return with success:
7703 */
7704 vma = find_vma(mm, addr);
7705- if (likely(!vma || addr+len <= vma->vm_start)) {
7706+ if (likely(check_heap_stack_gap(vma, addr, len))) {
7707 /* remember the address as a hint for next time */
7708 return (mm->free_area_cache = addr);
7709 }
7710@@ -136,8 +137,8 @@ hugetlb_get_unmapped_area_topdown(struct file *filp, const unsigned long addr0,
7711 mm->cached_hole_size = vma->vm_start - addr;
7712
7713 /* try just below the current vma->vm_start */
7714- addr = (vma->vm_start-len) & HPAGE_MASK;
7715- } while (likely(len < vma->vm_start));
7716+ addr = skip_heap_stack_gap(vma, len);
7717+ } while (!IS_ERR_VALUE(addr));
7718
7719 bottomup:
7720 /*
7721@@ -183,8 +184,7 @@ hugetlb_get_unmapped_area(struct file *file, unsigned long addr,
7722 if (addr) {
7723 addr = ALIGN(addr, HPAGE_SIZE);
7724 vma = find_vma(mm, addr);
7725- if (task_size - len >= addr &&
7726- (!vma || addr + len <= vma->vm_start))
7727+ if (task_size - len >= addr && check_heap_stack_gap(vma, addr, len))
7728 return addr;
7729 }
7730 if (mm->get_unmapped_area == arch_get_unmapped_area)
7731diff --git a/arch/sparc/mm/init_32.c b/arch/sparc/mm/init_32.c
7732index dc7c3b1..34c0070 100644
7733--- a/arch/sparc/mm/init_32.c
7734+++ b/arch/sparc/mm/init_32.c
7735@@ -317,6 +317,9 @@ extern void device_scan(void);
7736 pgprot_t PAGE_SHARED __read_mostly;
7737 EXPORT_SYMBOL(PAGE_SHARED);
7738
7739+pgprot_t PAGE_SHARED_NOEXEC __read_mostly;
7740+EXPORT_SYMBOL(PAGE_SHARED_NOEXEC);
7741+
7742 void __init paging_init(void)
7743 {
7744 switch(sparc_cpu_model) {
7745@@ -345,17 +348,17 @@ void __init paging_init(void)
7746
7747 /* Initialize the protection map with non-constant, MMU dependent values. */
7748 protection_map[0] = PAGE_NONE;
7749- protection_map[1] = PAGE_READONLY;
7750- protection_map[2] = PAGE_COPY;
7751- protection_map[3] = PAGE_COPY;
7752+ protection_map[1] = PAGE_READONLY_NOEXEC;
7753+ protection_map[2] = PAGE_COPY_NOEXEC;
7754+ protection_map[3] = PAGE_COPY_NOEXEC;
7755 protection_map[4] = PAGE_READONLY;
7756 protection_map[5] = PAGE_READONLY;
7757 protection_map[6] = PAGE_COPY;
7758 protection_map[7] = PAGE_COPY;
7759 protection_map[8] = PAGE_NONE;
7760- protection_map[9] = PAGE_READONLY;
7761- protection_map[10] = PAGE_SHARED;
7762- protection_map[11] = PAGE_SHARED;
7763+ protection_map[9] = PAGE_READONLY_NOEXEC;
7764+ protection_map[10] = PAGE_SHARED_NOEXEC;
7765+ protection_map[11] = PAGE_SHARED_NOEXEC;
7766 protection_map[12] = PAGE_READONLY;
7767 protection_map[13] = PAGE_READONLY;
7768 protection_map[14] = PAGE_SHARED;
7769diff --git a/arch/sparc/mm/srmmu.c b/arch/sparc/mm/srmmu.c
7770index 509b1ff..bfd7118 100644
7771--- a/arch/sparc/mm/srmmu.c
7772+++ b/arch/sparc/mm/srmmu.c
7773@@ -2200,6 +2200,13 @@ void __init ld_mmu_srmmu(void)
7774 PAGE_SHARED = pgprot_val(SRMMU_PAGE_SHARED);
7775 BTFIXUPSET_INT(page_copy, pgprot_val(SRMMU_PAGE_COPY));
7776 BTFIXUPSET_INT(page_readonly, pgprot_val(SRMMU_PAGE_RDONLY));
7777+
7778+#ifdef CONFIG_PAX_PAGEEXEC
7779+ PAGE_SHARED_NOEXEC = pgprot_val(SRMMU_PAGE_SHARED_NOEXEC);
7780+ BTFIXUPSET_INT(page_copy_noexec, pgprot_val(SRMMU_PAGE_COPY_NOEXEC));
7781+ BTFIXUPSET_INT(page_readonly_noexec, pgprot_val(SRMMU_PAGE_RDONLY_NOEXEC));
7782+#endif
7783+
7784 BTFIXUPSET_INT(page_kernel, pgprot_val(SRMMU_PAGE_KERNEL));
7785 page_kernel = pgprot_val(SRMMU_PAGE_KERNEL);
7786
7787diff --git a/arch/um/Makefile b/arch/um/Makefile
7788index fc633db..5e1a1c2 100644
7789--- a/arch/um/Makefile
7790+++ b/arch/um/Makefile
7791@@ -49,6 +49,10 @@ USER_CFLAGS = $(patsubst $(KERNEL_DEFINES),,$(patsubst -D__KERNEL__,,\
7792 $(patsubst -I%,,$(KBUILD_CFLAGS)))) $(ARCH_INCLUDE) $(MODE_INCLUDE) \
7793 $(filter -I%,$(CFLAGS)) -D_FILE_OFFSET_BITS=64
7794
7795+ifdef CONSTIFY_PLUGIN
7796+USER_CFLAGS += $(CONSTIFY_PLUGIN) -fplugin-arg-constify_plugin-no-constify
7797+endif
7798+
7799 include $(srctree)/$(ARCH_DIR)/Makefile-$(SUBARCH)
7800
7801 #This will adjust *FLAGS accordingly to the platform.
7802diff --git a/arch/um/include/asm/cache.h b/arch/um/include/asm/cache.h
7803index 19e1bdd..3665b77 100644
7804--- a/arch/um/include/asm/cache.h
7805+++ b/arch/um/include/asm/cache.h
7806@@ -1,6 +1,7 @@
7807 #ifndef __UM_CACHE_H
7808 #define __UM_CACHE_H
7809
7810+#include <linux/const.h>
7811
7812 #if defined(CONFIG_UML_X86) && !defined(CONFIG_64BIT)
7813 # define L1_CACHE_SHIFT (CONFIG_X86_L1_CACHE_SHIFT)
7814@@ -12,6 +13,6 @@
7815 # define L1_CACHE_SHIFT 5
7816 #endif
7817
7818-#define L1_CACHE_BYTES (1 << L1_CACHE_SHIFT)
7819+#define L1_CACHE_BYTES (_AC(1,UL) << L1_CACHE_SHIFT)
7820
7821 #endif
7822diff --git a/arch/um/include/asm/kmap_types.h b/arch/um/include/asm/kmap_types.h
7823index 6c03acd..a5e0215 100644
7824--- a/arch/um/include/asm/kmap_types.h
7825+++ b/arch/um/include/asm/kmap_types.h
7826@@ -23,6 +23,7 @@ enum km_type {
7827 KM_IRQ1,
7828 KM_SOFTIRQ0,
7829 KM_SOFTIRQ1,
7830+ KM_CLEARPAGE,
7831 KM_TYPE_NR
7832 };
7833
7834diff --git a/arch/um/include/asm/page.h b/arch/um/include/asm/page.h
7835index 4cc9b6c..02e5029 100644
7836--- a/arch/um/include/asm/page.h
7837+++ b/arch/um/include/asm/page.h
7838@@ -14,6 +14,9 @@
7839 #define PAGE_SIZE (_AC(1, UL) << PAGE_SHIFT)
7840 #define PAGE_MASK (~(PAGE_SIZE-1))
7841
7842+#define ktla_ktva(addr) (addr)
7843+#define ktva_ktla(addr) (addr)
7844+
7845 #ifndef __ASSEMBLY__
7846
7847 struct page;
7848diff --git a/arch/um/kernel/process.c b/arch/um/kernel/process.c
7849index 4a28a15..654dc2a 100644
7850--- a/arch/um/kernel/process.c
7851+++ b/arch/um/kernel/process.c
7852@@ -393,22 +393,6 @@ int singlestepping(void * t)
7853 return 2;
7854 }
7855
7856-/*
7857- * Only x86 and x86_64 have an arch_align_stack().
7858- * All other arches have "#define arch_align_stack(x) (x)"
7859- * in their asm/system.h
7860- * As this is included in UML from asm-um/system-generic.h,
7861- * we can use it to behave as the subarch does.
7862- */
7863-#ifndef arch_align_stack
7864-unsigned long arch_align_stack(unsigned long sp)
7865-{
7866- if (!(current->personality & ADDR_NO_RANDOMIZE) && randomize_va_space)
7867- sp -= get_random_int() % 8192;
7868- return sp & ~0xf;
7869-}
7870-#endif
7871-
7872 unsigned long get_wchan(struct task_struct *p)
7873 {
7874 unsigned long stack_page, sp, ip;
7875diff --git a/arch/um/sys-i386/shared/sysdep/system.h b/arch/um/sys-i386/shared/sysdep/system.h
7876index d1b93c4..ae1b7fd 100644
7877--- a/arch/um/sys-i386/shared/sysdep/system.h
7878+++ b/arch/um/sys-i386/shared/sysdep/system.h
7879@@ -17,7 +17,7 @@
7880 # define AT_VECTOR_SIZE_ARCH 1
7881 #endif
7882
7883-extern unsigned long arch_align_stack(unsigned long sp);
7884+#define arch_align_stack(x) ((x) & ~0xfUL)
7885
7886 void default_idle(void);
7887
7888diff --git a/arch/um/sys-i386/syscalls.c b/arch/um/sys-i386/syscalls.c
7889index 857ca0b..9a2669d 100644
7890--- a/arch/um/sys-i386/syscalls.c
7891+++ b/arch/um/sys-i386/syscalls.c
7892@@ -11,6 +11,21 @@
7893 #include "asm/uaccess.h"
7894 #include "asm/unistd.h"
7895
7896+int i386_mmap_check(unsigned long addr, unsigned long len, unsigned long flags)
7897+{
7898+ unsigned long pax_task_size = TASK_SIZE;
7899+
7900+#ifdef CONFIG_PAX_SEGMEXEC
7901+ if (current->mm->pax_flags & MF_PAX_SEGMEXEC)
7902+ pax_task_size = SEGMEXEC_TASK_SIZE;
7903+#endif
7904+
7905+ if (len > pax_task_size || addr > pax_task_size - len)
7906+ return -EINVAL;
7907+
7908+ return 0;
7909+}
7910+
7911 /*
7912 * Perform the select(nd, in, out, ex, tv) and mmap() system
7913 * calls. Linux/i386 didn't use to be able to handle more than
7914diff --git a/arch/um/sys-x86_64/shared/sysdep/system.h b/arch/um/sys-x86_64/shared/sysdep/system.h
7915index d1b93c4..ae1b7fd 100644
7916--- a/arch/um/sys-x86_64/shared/sysdep/system.h
7917+++ b/arch/um/sys-x86_64/shared/sysdep/system.h
7918@@ -17,7 +17,7 @@
7919 # define AT_VECTOR_SIZE_ARCH 1
7920 #endif
7921
7922-extern unsigned long arch_align_stack(unsigned long sp);
7923+#define arch_align_stack(x) ((x) & ~0xfUL)
7924
7925 void default_idle(void);
7926
7927diff --git a/arch/x86/Kconfig b/arch/x86/Kconfig
7928index 73ae02a..f932de5 100644
7929--- a/arch/x86/Kconfig
7930+++ b/arch/x86/Kconfig
7931@@ -223,7 +223,7 @@ config X86_TRAMPOLINE
7932
7933 config X86_32_LAZY_GS
7934 def_bool y
7935- depends on X86_32 && !CC_STACKPROTECTOR
7936+ depends on X86_32 && !CC_STACKPROTECTOR && !PAX_MEMORY_UDEREF
7937
7938 config KTIME_SCALAR
7939 def_bool X86_32
7940@@ -1008,7 +1008,7 @@ choice
7941
7942 config NOHIGHMEM
7943 bool "off"
7944- depends on !X86_NUMAQ
7945+ depends on !X86_NUMAQ && !(PAX_PAGEEXEC && PAX_ENABLE_PAE)
7946 ---help---
7947 Linux can use up to 64 Gigabytes of physical memory on x86 systems.
7948 However, the address space of 32-bit x86 processors is only 4
7949@@ -1045,7 +1045,7 @@ config NOHIGHMEM
7950
7951 config HIGHMEM4G
7952 bool "4GB"
7953- depends on !X86_NUMAQ
7954+ depends on !X86_NUMAQ && !(PAX_PAGEEXEC && PAX_ENABLE_PAE)
7955 ---help---
7956 Select this if you have a 32-bit processor and between 1 and 4
7957 gigabytes of physical RAM.
7958@@ -1099,7 +1099,7 @@ config PAGE_OFFSET
7959 hex
7960 default 0xB0000000 if VMSPLIT_3G_OPT
7961 default 0x80000000 if VMSPLIT_2G
7962- default 0x78000000 if VMSPLIT_2G_OPT
7963+ default 0x70000000 if VMSPLIT_2G_OPT
7964 default 0x40000000 if VMSPLIT_1G
7965 default 0xC0000000
7966 depends on X86_32
7967@@ -1460,6 +1460,7 @@ config SECCOMP
7968
7969 config CC_STACKPROTECTOR
7970 bool "Enable -fstack-protector buffer overflow detection (EXPERIMENTAL)"
7971+ depends on X86_64 || !PAX_MEMORY_UDEREF
7972 ---help---
7973 This option turns on the -fstack-protector GCC feature. This
7974 feature puts, at the beginning of functions, a canary value on
7975@@ -1517,6 +1518,7 @@ config KEXEC_JUMP
7976 config PHYSICAL_START
7977 hex "Physical address where the kernel is loaded" if (EMBEDDED || CRASH_DUMP)
7978 default "0x1000000"
7979+ range 0x400000 0x40000000
7980 ---help---
7981 This gives the physical address where the kernel is loaded.
7982
7983@@ -1581,6 +1583,7 @@ config PHYSICAL_ALIGN
7984 hex
7985 prompt "Alignment value to which kernel should be aligned" if X86_32
7986 default "0x1000000"
7987+ range 0x400000 0x1000000 if PAX_KERNEXEC
7988 range 0x2000 0x1000000
7989 ---help---
7990 This value puts the alignment restrictions on physical address
7991@@ -1612,9 +1615,10 @@ config HOTPLUG_CPU
7992 Say N if you want to disable CPU hotplug.
7993
7994 config COMPAT_VDSO
7995- def_bool y
7996+ def_bool n
7997 prompt "Compat VDSO support"
7998 depends on X86_32 || IA32_EMULATION
7999+ depends on !PAX_NOEXEC && !PAX_MEMORY_UDEREF
8000 ---help---
8001 Map the 32-bit VDSO to the predictable old-style address too.
8002 ---help---
8003diff --git a/arch/x86/Kconfig.cpu b/arch/x86/Kconfig.cpu
8004index 0e566103..1a6b57e 100644
8005--- a/arch/x86/Kconfig.cpu
8006+++ b/arch/x86/Kconfig.cpu
8007@@ -340,7 +340,7 @@ config X86_PPRO_FENCE
8008
8009 config X86_F00F_BUG
8010 def_bool y
8011- depends on M586MMX || M586TSC || M586 || M486 || M386
8012+ depends on (M586MMX || M586TSC || M586 || M486 || M386) && !PAX_KERNEXEC
8013
8014 config X86_WP_WORKS_OK
8015 def_bool y
8016@@ -360,7 +360,7 @@ config X86_POPAD_OK
8017
8018 config X86_ALIGNMENT_16
8019 def_bool y
8020- depends on MWINCHIP3D || MWINCHIPC6 || MCYRIXIII || X86_ELAN || MK6 || M586MMX || M586TSC || M586 || M486 || MVIAC3_2 || MGEODEGX1
8021+ depends on MWINCHIP3D || MWINCHIPC6 || MCYRIXIII || X86_ELAN || MK8 || MK7 || MK6 || MCORE2 || MPENTIUM4 || MPENTIUMIII || MPENTIUMII || M686 || M586MMX || M586TSC || M586 || M486 || MVIAC3_2 || MGEODEGX1
8022
8023 config X86_INTEL_USERCOPY
8024 def_bool y
8025@@ -406,7 +406,7 @@ config X86_CMPXCHG64
8026 # generates cmov.
8027 config X86_CMOV
8028 def_bool y
8029- depends on (MK8 || MK7 || MCORE2 || MPENTIUM4 || MPENTIUMM || MPENTIUMIII || MPENTIUMII || M686 || MVIAC3_2 || MVIAC7 || MCRUSOE || MEFFICEON || X86_64 || MATOM)
8030+ depends on (MK8 || MK7 || MCORE2 || MPSC || MPENTIUM4 || MPENTIUMM || MPENTIUMIII || MPENTIUMII || M686 || MVIAC3_2 || MVIAC7 || MCRUSOE || MEFFICEON || X86_64 || MATOM)
8031
8032 config X86_MINIMUM_CPU_FAMILY
8033 int
8034diff --git a/arch/x86/Kconfig.debug b/arch/x86/Kconfig.debug
8035index d105f29..c928727 100644
8036--- a/arch/x86/Kconfig.debug
8037+++ b/arch/x86/Kconfig.debug
8038@@ -99,7 +99,7 @@ config X86_PTDUMP
8039 config DEBUG_RODATA
8040 bool "Write protect kernel read-only data structures"
8041 default y
8042- depends on DEBUG_KERNEL
8043+ depends on DEBUG_KERNEL && BROKEN
8044 ---help---
8045 Mark the kernel read-only data as write-protected in the pagetables,
8046 in order to catch accidental (and incorrect) writes to such const
8047diff --git a/arch/x86/Makefile b/arch/x86/Makefile
8048index d2d24c9..0f21f8d 100644
8049--- a/arch/x86/Makefile
8050+++ b/arch/x86/Makefile
8051@@ -44,6 +44,7 @@ ifeq ($(CONFIG_X86_32),y)
8052 else
8053 BITS := 64
8054 UTS_MACHINE := x86_64
8055+ biarch := $(call cc-option,-m64)
8056 CHECKFLAGS += -D__x86_64__ -m64
8057
8058 KBUILD_AFLAGS += -m64
8059@@ -189,3 +190,12 @@ define archhelp
8060 echo ' FDARGS="..." arguments for the booted kernel'
8061 echo ' FDINITRD=file initrd for the booted kernel'
8062 endef
8063+
8064+define OLD_LD
8065+
8066+*** ${VERSION}.${PATCHLEVEL} PaX kernels no longer build correctly with old versions of binutils.
8067+*** Please upgrade your binutils to 2.18 or newer
8068+endef
8069+
8070+archprepare:
8071+ $(if $(LDFLAGS_BUILD_ID),,$(error $(OLD_LD)))
8072diff --git a/arch/x86/boot/Makefile b/arch/x86/boot/Makefile
8073index ec749c2..bbb5319 100644
8074--- a/arch/x86/boot/Makefile
8075+++ b/arch/x86/boot/Makefile
8076@@ -69,6 +69,9 @@ KBUILD_CFLAGS := $(LINUXINCLUDE) -g -Os -D_SETUP -D__KERNEL__ \
8077 $(call cc-option, -fno-stack-protector) \
8078 $(call cc-option, -mpreferred-stack-boundary=2)
8079 KBUILD_CFLAGS += $(call cc-option, -m32)
8080+ifdef CONSTIFY_PLUGIN
8081+KBUILD_CFLAGS += $(CONSTIFY_PLUGIN) -fplugin-arg-constify_plugin-no-constify
8082+endif
8083 KBUILD_AFLAGS := $(KBUILD_CFLAGS) -D__ASSEMBLY__
8084 GCOV_PROFILE := n
8085
8086diff --git a/arch/x86/boot/bitops.h b/arch/x86/boot/bitops.h
8087index 878e4b9..20537ab 100644
8088--- a/arch/x86/boot/bitops.h
8089+++ b/arch/x86/boot/bitops.h
8090@@ -26,7 +26,7 @@ static inline int variable_test_bit(int nr, const void *addr)
8091 u8 v;
8092 const u32 *p = (const u32 *)addr;
8093
8094- asm("btl %2,%1; setc %0" : "=qm" (v) : "m" (*p), "Ir" (nr));
8095+ asm volatile("btl %2,%1; setc %0" : "=qm" (v) : "m" (*p), "Ir" (nr));
8096 return v;
8097 }
8098
8099@@ -37,7 +37,7 @@ static inline int variable_test_bit(int nr, const void *addr)
8100
8101 static inline void set_bit(int nr, void *addr)
8102 {
8103- asm("btsl %1,%0" : "+m" (*(u32 *)addr) : "Ir" (nr));
8104+ asm volatile("btsl %1,%0" : "+m" (*(u32 *)addr) : "Ir" (nr));
8105 }
8106
8107 #endif /* BOOT_BITOPS_H */
8108diff --git a/arch/x86/boot/boot.h b/arch/x86/boot/boot.h
8109index 98239d2..f40214c 100644
8110--- a/arch/x86/boot/boot.h
8111+++ b/arch/x86/boot/boot.h
8112@@ -82,7 +82,7 @@ static inline void io_delay(void)
8113 static inline u16 ds(void)
8114 {
8115 u16 seg;
8116- asm("movw %%ds,%0" : "=rm" (seg));
8117+ asm volatile("movw %%ds,%0" : "=rm" (seg));
8118 return seg;
8119 }
8120
8121@@ -178,7 +178,7 @@ static inline void wrgs32(u32 v, addr_t addr)
8122 static inline int memcmp(const void *s1, const void *s2, size_t len)
8123 {
8124 u8 diff;
8125- asm("repe; cmpsb; setnz %0"
8126+ asm volatile("repe; cmpsb; setnz %0"
8127 : "=qm" (diff), "+D" (s1), "+S" (s2), "+c" (len));
8128 return diff;
8129 }
8130diff --git a/arch/x86/boot/compressed/Makefile b/arch/x86/boot/compressed/Makefile
8131index f8ed065..5bf5ff3 100644
8132--- a/arch/x86/boot/compressed/Makefile
8133+++ b/arch/x86/boot/compressed/Makefile
8134@@ -13,6 +13,9 @@ cflags-$(CONFIG_X86_64) := -mcmodel=small
8135 KBUILD_CFLAGS += $(cflags-y)
8136 KBUILD_CFLAGS += $(call cc-option,-ffreestanding)
8137 KBUILD_CFLAGS += $(call cc-option,-fno-stack-protector)
8138+ifdef CONSTIFY_PLUGIN
8139+KBUILD_CFLAGS += $(CONSTIFY_PLUGIN) -fplugin-arg-constify_plugin-no-constify
8140+endif
8141
8142 KBUILD_AFLAGS := $(KBUILD_CFLAGS) -D__ASSEMBLY__
8143 GCOV_PROFILE := n
8144diff --git a/arch/x86/boot/compressed/head_32.S b/arch/x86/boot/compressed/head_32.S
8145index f543b70..b60fba8 100644
8146--- a/arch/x86/boot/compressed/head_32.S
8147+++ b/arch/x86/boot/compressed/head_32.S
8148@@ -76,7 +76,7 @@ ENTRY(startup_32)
8149 notl %eax
8150 andl %eax, %ebx
8151 #else
8152- movl $LOAD_PHYSICAL_ADDR, %ebx
8153+ movl $____LOAD_PHYSICAL_ADDR, %ebx
8154 #endif
8155
8156 /* Target address to relocate to for decompression */
8157@@ -149,7 +149,7 @@ relocated:
8158 * and where it was actually loaded.
8159 */
8160 movl %ebp, %ebx
8161- subl $LOAD_PHYSICAL_ADDR, %ebx
8162+ subl $____LOAD_PHYSICAL_ADDR, %ebx
8163 jz 2f /* Nothing to be done if loaded at compiled addr. */
8164 /*
8165 * Process relocations.
8166@@ -157,8 +157,7 @@ relocated:
8167
8168 1: subl $4, %edi
8169 movl (%edi), %ecx
8170- testl %ecx, %ecx
8171- jz 2f
8172+ jecxz 2f
8173 addl %ebx, -__PAGE_OFFSET(%ebx, %ecx)
8174 jmp 1b
8175 2:
8176diff --git a/arch/x86/boot/compressed/head_64.S b/arch/x86/boot/compressed/head_64.S
8177index 077e1b6..2c6b13b 100644
8178--- a/arch/x86/boot/compressed/head_64.S
8179+++ b/arch/x86/boot/compressed/head_64.S
8180@@ -91,7 +91,7 @@ ENTRY(startup_32)
8181 notl %eax
8182 andl %eax, %ebx
8183 #else
8184- movl $LOAD_PHYSICAL_ADDR, %ebx
8185+ movl $____LOAD_PHYSICAL_ADDR, %ebx
8186 #endif
8187
8188 /* Target address to relocate to for decompression */
8189@@ -183,7 +183,7 @@ no_longmode:
8190 hlt
8191 jmp 1b
8192
8193-#include "../../kernel/verify_cpu_64.S"
8194+#include "../../kernel/verify_cpu.S"
8195
8196 /*
8197 * Be careful here startup_64 needs to be at a predictable
8198@@ -234,7 +234,7 @@ ENTRY(startup_64)
8199 notq %rax
8200 andq %rax, %rbp
8201 #else
8202- movq $LOAD_PHYSICAL_ADDR, %rbp
8203+ movq $____LOAD_PHYSICAL_ADDR, %rbp
8204 #endif
8205
8206 /* Target address to relocate to for decompression */
8207diff --git a/arch/x86/boot/compressed/misc.c b/arch/x86/boot/compressed/misc.c
8208index 842b2a3..f00178b 100644
8209--- a/arch/x86/boot/compressed/misc.c
8210+++ b/arch/x86/boot/compressed/misc.c
8211@@ -288,7 +288,7 @@ static void parse_elf(void *output)
8212 case PT_LOAD:
8213 #ifdef CONFIG_RELOCATABLE
8214 dest = output;
8215- dest += (phdr->p_paddr - LOAD_PHYSICAL_ADDR);
8216+ dest += (phdr->p_paddr - ____LOAD_PHYSICAL_ADDR);
8217 #else
8218 dest = (void *)(phdr->p_paddr);
8219 #endif
8220@@ -335,7 +335,7 @@ asmlinkage void decompress_kernel(void *rmode, memptr heap,
8221 error("Destination address too large");
8222 #endif
8223 #ifndef CONFIG_RELOCATABLE
8224- if ((unsigned long)output != LOAD_PHYSICAL_ADDR)
8225+ if ((unsigned long)output != ____LOAD_PHYSICAL_ADDR)
8226 error("Wrong destination address");
8227 #endif
8228
8229diff --git a/arch/x86/boot/compressed/mkpiggy.c b/arch/x86/boot/compressed/mkpiggy.c
8230index bcbd36c..b1754af 100644
8231--- a/arch/x86/boot/compressed/mkpiggy.c
8232+++ b/arch/x86/boot/compressed/mkpiggy.c
8233@@ -74,7 +74,7 @@ int main(int argc, char *argv[])
8234
8235 offs = (olen > ilen) ? olen - ilen : 0;
8236 offs += olen >> 12; /* Add 8 bytes for each 32K block */
8237- offs += 32*1024 + 18; /* Add 32K + 18 bytes slack */
8238+ offs += 64*1024; /* Add 64K bytes slack */
8239 offs = (offs+4095) & ~4095; /* Round to a 4K boundary */
8240
8241 printf(".section \".rodata.compressed\",\"a\",@progbits\n");
8242diff --git a/arch/x86/boot/compressed/relocs.c b/arch/x86/boot/compressed/relocs.c
8243index bbeb0c3..f5167ab 100644
8244--- a/arch/x86/boot/compressed/relocs.c
8245+++ b/arch/x86/boot/compressed/relocs.c
8246@@ -10,8 +10,11 @@
8247 #define USE_BSD
8248 #include <endian.h>
8249
8250+#include "../../../../include/linux/autoconf.h"
8251+
8252 #define ARRAY_SIZE(x) (sizeof(x) / sizeof((x)[0]))
8253 static Elf32_Ehdr ehdr;
8254+static Elf32_Phdr *phdr;
8255 static unsigned long reloc_count, reloc_idx;
8256 static unsigned long *relocs;
8257
8258@@ -37,7 +40,7 @@ static const char* safe_abs_relocs[] = {
8259
8260 static int is_safe_abs_reloc(const char* sym_name)
8261 {
8262- int i;
8263+ unsigned int i;
8264
8265 for (i = 0; i < ARRAY_SIZE(safe_abs_relocs); i++) {
8266 if (!strcmp(sym_name, safe_abs_relocs[i]))
8267@@ -245,9 +248,39 @@ static void read_ehdr(FILE *fp)
8268 }
8269 }
8270
8271+static void read_phdrs(FILE *fp)
8272+{
8273+ unsigned int i;
8274+
8275+ phdr = calloc(ehdr.e_phnum, sizeof(Elf32_Phdr));
8276+ if (!phdr) {
8277+ die("Unable to allocate %d program headers\n",
8278+ ehdr.e_phnum);
8279+ }
8280+ if (fseek(fp, ehdr.e_phoff, SEEK_SET) < 0) {
8281+ die("Seek to %d failed: %s\n",
8282+ ehdr.e_phoff, strerror(errno));
8283+ }
8284+ if (fread(phdr, sizeof(*phdr), ehdr.e_phnum, fp) != ehdr.e_phnum) {
8285+ die("Cannot read ELF program headers: %s\n",
8286+ strerror(errno));
8287+ }
8288+ for(i = 0; i < ehdr.e_phnum; i++) {
8289+ phdr[i].p_type = elf32_to_cpu(phdr[i].p_type);
8290+ phdr[i].p_offset = elf32_to_cpu(phdr[i].p_offset);
8291+ phdr[i].p_vaddr = elf32_to_cpu(phdr[i].p_vaddr);
8292+ phdr[i].p_paddr = elf32_to_cpu(phdr[i].p_paddr);
8293+ phdr[i].p_filesz = elf32_to_cpu(phdr[i].p_filesz);
8294+ phdr[i].p_memsz = elf32_to_cpu(phdr[i].p_memsz);
8295+ phdr[i].p_flags = elf32_to_cpu(phdr[i].p_flags);
8296+ phdr[i].p_align = elf32_to_cpu(phdr[i].p_align);
8297+ }
8298+
8299+}
8300+
8301 static void read_shdrs(FILE *fp)
8302 {
8303- int i;
8304+ unsigned int i;
8305 Elf32_Shdr shdr;
8306
8307 secs = calloc(ehdr.e_shnum, sizeof(struct section));
8308@@ -282,7 +315,7 @@ static void read_shdrs(FILE *fp)
8309
8310 static void read_strtabs(FILE *fp)
8311 {
8312- int i;
8313+ unsigned int i;
8314 for (i = 0; i < ehdr.e_shnum; i++) {
8315 struct section *sec = &secs[i];
8316 if (sec->shdr.sh_type != SHT_STRTAB) {
8317@@ -307,7 +340,7 @@ static void read_strtabs(FILE *fp)
8318
8319 static void read_symtabs(FILE *fp)
8320 {
8321- int i,j;
8322+ unsigned int i,j;
8323 for (i = 0; i < ehdr.e_shnum; i++) {
8324 struct section *sec = &secs[i];
8325 if (sec->shdr.sh_type != SHT_SYMTAB) {
8326@@ -340,7 +373,9 @@ static void read_symtabs(FILE *fp)
8327
8328 static void read_relocs(FILE *fp)
8329 {
8330- int i,j;
8331+ unsigned int i,j;
8332+ uint32_t base;
8333+
8334 for (i = 0; i < ehdr.e_shnum; i++) {
8335 struct section *sec = &secs[i];
8336 if (sec->shdr.sh_type != SHT_REL) {
8337@@ -360,9 +395,18 @@ static void read_relocs(FILE *fp)
8338 die("Cannot read symbol table: %s\n",
8339 strerror(errno));
8340 }
8341+ base = 0;
8342+ for (j = 0; j < ehdr.e_phnum; j++) {
8343+ if (phdr[j].p_type != PT_LOAD )
8344+ continue;
8345+ if (secs[sec->shdr.sh_info].shdr.sh_offset < phdr[j].p_offset || secs[sec->shdr.sh_info].shdr.sh_offset >= phdr[j].p_offset + phdr[j].p_filesz)
8346+ continue;
8347+ base = CONFIG_PAGE_OFFSET + phdr[j].p_paddr - phdr[j].p_vaddr;
8348+ break;
8349+ }
8350 for (j = 0; j < sec->shdr.sh_size/sizeof(Elf32_Rel); j++) {
8351 Elf32_Rel *rel = &sec->reltab[j];
8352- rel->r_offset = elf32_to_cpu(rel->r_offset);
8353+ rel->r_offset = elf32_to_cpu(rel->r_offset) + base;
8354 rel->r_info = elf32_to_cpu(rel->r_info);
8355 }
8356 }
8357@@ -371,14 +415,14 @@ static void read_relocs(FILE *fp)
8358
8359 static void print_absolute_symbols(void)
8360 {
8361- int i;
8362+ unsigned int i;
8363 printf("Absolute symbols\n");
8364 printf(" Num: Value Size Type Bind Visibility Name\n");
8365 for (i = 0; i < ehdr.e_shnum; i++) {
8366 struct section *sec = &secs[i];
8367 char *sym_strtab;
8368 Elf32_Sym *sh_symtab;
8369- int j;
8370+ unsigned int j;
8371
8372 if (sec->shdr.sh_type != SHT_SYMTAB) {
8373 continue;
8374@@ -406,14 +450,14 @@ static void print_absolute_symbols(void)
8375
8376 static void print_absolute_relocs(void)
8377 {
8378- int i, printed = 0;
8379+ unsigned int i, printed = 0;
8380
8381 for (i = 0; i < ehdr.e_shnum; i++) {
8382 struct section *sec = &secs[i];
8383 struct section *sec_applies, *sec_symtab;
8384 char *sym_strtab;
8385 Elf32_Sym *sh_symtab;
8386- int j;
8387+ unsigned int j;
8388 if (sec->shdr.sh_type != SHT_REL) {
8389 continue;
8390 }
8391@@ -474,13 +518,13 @@ static void print_absolute_relocs(void)
8392
8393 static void walk_relocs(void (*visit)(Elf32_Rel *rel, Elf32_Sym *sym))
8394 {
8395- int i;
8396+ unsigned int i;
8397 /* Walk through the relocations */
8398 for (i = 0; i < ehdr.e_shnum; i++) {
8399 char *sym_strtab;
8400 Elf32_Sym *sh_symtab;
8401 struct section *sec_applies, *sec_symtab;
8402- int j;
8403+ unsigned int j;
8404 struct section *sec = &secs[i];
8405
8406 if (sec->shdr.sh_type != SHT_REL) {
8407@@ -504,6 +548,21 @@ static void walk_relocs(void (*visit)(Elf32_Rel *rel, Elf32_Sym *sym))
8408 if (sym->st_shndx == SHN_ABS) {
8409 continue;
8410 }
8411+ /* Don't relocate actual per-cpu variables, they are absolute indices, not addresses */
8412+ if (!strcmp(sec_name(sym->st_shndx), ".data.percpu") && strcmp(sym_name(sym_strtab, sym), "__per_cpu_load"))
8413+ continue;
8414+
8415+#if defined(CONFIG_PAX_KERNEXEC) && defined(CONFIG_X86_32)
8416+ /* Don't relocate actual code, they are relocated implicitly by the base address of KERNEL_CS */
8417+ if (!strcmp(sec_name(sym->st_shndx), ".module.text") && !strcmp(sym_name(sym_strtab, sym), "_etext"))
8418+ continue;
8419+ if (!strcmp(sec_name(sym->st_shndx), ".init.text"))
8420+ continue;
8421+ if (!strcmp(sec_name(sym->st_shndx), ".exit.text"))
8422+ continue;
8423+ if (!strcmp(sec_name(sym->st_shndx), ".text") && strcmp(sym_name(sym_strtab, sym), "__LOAD_PHYSICAL_ADDR"))
8424+ continue;
8425+#endif
8426 if (r_type == R_386_NONE || r_type == R_386_PC32) {
8427 /*
8428 * NONE can be ignored and and PC relative
8429@@ -541,7 +600,7 @@ static int cmp_relocs(const void *va, const void *vb)
8430
8431 static void emit_relocs(int as_text)
8432 {
8433- int i;
8434+ unsigned int i;
8435 /* Count how many relocations I have and allocate space for them. */
8436 reloc_count = 0;
8437 walk_relocs(count_reloc);
8438@@ -634,6 +693,7 @@ int main(int argc, char **argv)
8439 fname, strerror(errno));
8440 }
8441 read_ehdr(fp);
8442+ read_phdrs(fp);
8443 read_shdrs(fp);
8444 read_strtabs(fp);
8445 read_symtabs(fp);
8446diff --git a/arch/x86/boot/cpucheck.c b/arch/x86/boot/cpucheck.c
8447index 4d3ff03..e4972ff 100644
8448--- a/arch/x86/boot/cpucheck.c
8449+++ b/arch/x86/boot/cpucheck.c
8450@@ -74,7 +74,7 @@ static int has_fpu(void)
8451 u16 fcw = -1, fsw = -1;
8452 u32 cr0;
8453
8454- asm("movl %%cr0,%0" : "=r" (cr0));
8455+ asm volatile("movl %%cr0,%0" : "=r" (cr0));
8456 if (cr0 & (X86_CR0_EM|X86_CR0_TS)) {
8457 cr0 &= ~(X86_CR0_EM|X86_CR0_TS);
8458 asm volatile("movl %0,%%cr0" : : "r" (cr0));
8459@@ -90,7 +90,7 @@ static int has_eflag(u32 mask)
8460 {
8461 u32 f0, f1;
8462
8463- asm("pushfl ; "
8464+ asm volatile("pushfl ; "
8465 "pushfl ; "
8466 "popl %0 ; "
8467 "movl %0,%1 ; "
8468@@ -115,7 +115,7 @@ static void get_flags(void)
8469 set_bit(X86_FEATURE_FPU, cpu.flags);
8470
8471 if (has_eflag(X86_EFLAGS_ID)) {
8472- asm("cpuid"
8473+ asm volatile("cpuid"
8474 : "=a" (max_intel_level),
8475 "=b" (cpu_vendor[0]),
8476 "=d" (cpu_vendor[1]),
8477@@ -124,7 +124,7 @@ static void get_flags(void)
8478
8479 if (max_intel_level >= 0x00000001 &&
8480 max_intel_level <= 0x0000ffff) {
8481- asm("cpuid"
8482+ asm volatile("cpuid"
8483 : "=a" (tfms),
8484 "=c" (cpu.flags[4]),
8485 "=d" (cpu.flags[0])
8486@@ -136,7 +136,7 @@ static void get_flags(void)
8487 cpu.model += ((tfms >> 16) & 0xf) << 4;
8488 }
8489
8490- asm("cpuid"
8491+ asm volatile("cpuid"
8492 : "=a" (max_amd_level)
8493 : "a" (0x80000000)
8494 : "ebx", "ecx", "edx");
8495@@ -144,7 +144,7 @@ static void get_flags(void)
8496 if (max_amd_level >= 0x80000001 &&
8497 max_amd_level <= 0x8000ffff) {
8498 u32 eax = 0x80000001;
8499- asm("cpuid"
8500+ asm volatile("cpuid"
8501 : "+a" (eax),
8502 "=c" (cpu.flags[6]),
8503 "=d" (cpu.flags[1])
8504@@ -203,9 +203,9 @@ int check_cpu(int *cpu_level_ptr, int *req_level_ptr, u32 **err_flags_ptr)
8505 u32 ecx = MSR_K7_HWCR;
8506 u32 eax, edx;
8507
8508- asm("rdmsr" : "=a" (eax), "=d" (edx) : "c" (ecx));
8509+ asm volatile("rdmsr" : "=a" (eax), "=d" (edx) : "c" (ecx));
8510 eax &= ~(1 << 15);
8511- asm("wrmsr" : : "a" (eax), "d" (edx), "c" (ecx));
8512+ asm volatile("wrmsr" : : "a" (eax), "d" (edx), "c" (ecx));
8513
8514 get_flags(); /* Make sure it really did something */
8515 err = check_flags();
8516@@ -218,9 +218,9 @@ int check_cpu(int *cpu_level_ptr, int *req_level_ptr, u32 **err_flags_ptr)
8517 u32 ecx = MSR_VIA_FCR;
8518 u32 eax, edx;
8519
8520- asm("rdmsr" : "=a" (eax), "=d" (edx) : "c" (ecx));
8521+ asm volatile("rdmsr" : "=a" (eax), "=d" (edx) : "c" (ecx));
8522 eax |= (1<<1)|(1<<7);
8523- asm("wrmsr" : : "a" (eax), "d" (edx), "c" (ecx));
8524+ asm volatile("wrmsr" : : "a" (eax), "d" (edx), "c" (ecx));
8525
8526 set_bit(X86_FEATURE_CX8, cpu.flags);
8527 err = check_flags();
8528@@ -231,12 +231,12 @@ int check_cpu(int *cpu_level_ptr, int *req_level_ptr, u32 **err_flags_ptr)
8529 u32 eax, edx;
8530 u32 level = 1;
8531
8532- asm("rdmsr" : "=a" (eax), "=d" (edx) : "c" (ecx));
8533- asm("wrmsr" : : "a" (~0), "d" (edx), "c" (ecx));
8534- asm("cpuid"
8535+ asm volatile("rdmsr" : "=a" (eax), "=d" (edx) : "c" (ecx));
8536+ asm volatile("wrmsr" : : "a" (~0), "d" (edx), "c" (ecx));
8537+ asm volatile("cpuid"
8538 : "+a" (level), "=d" (cpu.flags[0])
8539 : : "ecx", "ebx");
8540- asm("wrmsr" : : "a" (eax), "d" (edx), "c" (ecx));
8541+ asm volatile("wrmsr" : : "a" (eax), "d" (edx), "c" (ecx));
8542
8543 err = check_flags();
8544 }
8545diff --git a/arch/x86/boot/header.S b/arch/x86/boot/header.S
8546index b31cc54..8d69237 100644
8547--- a/arch/x86/boot/header.S
8548+++ b/arch/x86/boot/header.S
8549@@ -224,7 +224,7 @@ setup_data: .quad 0 # 64-bit physical pointer to
8550 # single linked list of
8551 # struct setup_data
8552
8553-pref_address: .quad LOAD_PHYSICAL_ADDR # preferred load addr
8554+pref_address: .quad ____LOAD_PHYSICAL_ADDR # preferred load addr
8555
8556 #define ZO_INIT_SIZE (ZO__end - ZO_startup_32 + ZO_z_extract_offset)
8557 #define VO_INIT_SIZE (VO__end - VO__text)
8558diff --git a/arch/x86/boot/memory.c b/arch/x86/boot/memory.c
8559index cae3feb..ff8ff2a 100644
8560--- a/arch/x86/boot/memory.c
8561+++ b/arch/x86/boot/memory.c
8562@@ -19,7 +19,7 @@
8563
8564 static int detect_memory_e820(void)
8565 {
8566- int count = 0;
8567+ unsigned int count = 0;
8568 struct biosregs ireg, oreg;
8569 struct e820entry *desc = boot_params.e820_map;
8570 static struct e820entry buf; /* static so it is zeroed */
8571diff --git a/arch/x86/boot/video-vesa.c b/arch/x86/boot/video-vesa.c
8572index 11e8c6e..fdbb1ed 100644
8573--- a/arch/x86/boot/video-vesa.c
8574+++ b/arch/x86/boot/video-vesa.c
8575@@ -200,6 +200,7 @@ static void vesa_store_pm_info(void)
8576
8577 boot_params.screen_info.vesapm_seg = oreg.es;
8578 boot_params.screen_info.vesapm_off = oreg.di;
8579+ boot_params.screen_info.vesapm_size = oreg.cx;
8580 }
8581
8582 /*
8583diff --git a/arch/x86/boot/video.c b/arch/x86/boot/video.c
8584index d42da38..787cdf3 100644
8585--- a/arch/x86/boot/video.c
8586+++ b/arch/x86/boot/video.c
8587@@ -90,7 +90,7 @@ static void store_mode_params(void)
8588 static unsigned int get_entry(void)
8589 {
8590 char entry_buf[4];
8591- int i, len = 0;
8592+ unsigned int i, len = 0;
8593 int key;
8594 unsigned int v;
8595
8596diff --git a/arch/x86/crypto/aes-x86_64-asm_64.S b/arch/x86/crypto/aes-x86_64-asm_64.S
8597index 5b577d5..3c1fed4 100644
8598--- a/arch/x86/crypto/aes-x86_64-asm_64.S
8599+++ b/arch/x86/crypto/aes-x86_64-asm_64.S
8600@@ -8,6 +8,8 @@
8601 * including this sentence is retained in full.
8602 */
8603
8604+#include <asm/alternative-asm.h>
8605+
8606 .extern crypto_ft_tab
8607 .extern crypto_it_tab
8608 .extern crypto_fl_tab
8609@@ -71,6 +73,8 @@ FUNC: movq r1,r2; \
8610 je B192; \
8611 leaq 32(r9),r9;
8612
8613+#define ret pax_force_retaddr 0, 1; ret
8614+
8615 #define epilogue(r1,r2,r3,r4,r5,r6,r7,r8,r9) \
8616 movq r1,r2; \
8617 movq r3,r4; \
8618diff --git a/arch/x86/crypto/aesni-intel_asm.S b/arch/x86/crypto/aesni-intel_asm.S
8619index eb0566e..e3ebad8 100644
8620--- a/arch/x86/crypto/aesni-intel_asm.S
8621+++ b/arch/x86/crypto/aesni-intel_asm.S
8622@@ -16,6 +16,7 @@
8623 */
8624
8625 #include <linux/linkage.h>
8626+#include <asm/alternative-asm.h>
8627
8628 .text
8629
8630@@ -52,6 +53,7 @@ _key_expansion_256a:
8631 pxor %xmm1, %xmm0
8632 movaps %xmm0, (%rcx)
8633 add $0x10, %rcx
8634+ pax_force_retaddr_bts
8635 ret
8636
8637 _key_expansion_192a:
8638@@ -75,6 +77,7 @@ _key_expansion_192a:
8639 shufps $0b01001110, %xmm2, %xmm1
8640 movaps %xmm1, 16(%rcx)
8641 add $0x20, %rcx
8642+ pax_force_retaddr_bts
8643 ret
8644
8645 _key_expansion_192b:
8646@@ -93,6 +96,7 @@ _key_expansion_192b:
8647
8648 movaps %xmm0, (%rcx)
8649 add $0x10, %rcx
8650+ pax_force_retaddr_bts
8651 ret
8652
8653 _key_expansion_256b:
8654@@ -104,6 +108,7 @@ _key_expansion_256b:
8655 pxor %xmm1, %xmm2
8656 movaps %xmm2, (%rcx)
8657 add $0x10, %rcx
8658+ pax_force_retaddr_bts
8659 ret
8660
8661 /*
8662@@ -239,7 +244,9 @@ ENTRY(aesni_set_key)
8663 cmp %rcx, %rdi
8664 jb .Ldec_key_loop
8665 xor %rax, %rax
8666+ pax_force_retaddr 0, 1
8667 ret
8668+ENDPROC(aesni_set_key)
8669
8670 /*
8671 * void aesni_enc(struct crypto_aes_ctx *ctx, u8 *dst, const u8 *src)
8672@@ -249,7 +256,9 @@ ENTRY(aesni_enc)
8673 movups (INP), STATE # input
8674 call _aesni_enc1
8675 movups STATE, (OUTP) # output
8676+ pax_force_retaddr 0, 1
8677 ret
8678+ENDPROC(aesni_enc)
8679
8680 /*
8681 * _aesni_enc1: internal ABI
8682@@ -319,6 +328,7 @@ _aesni_enc1:
8683 movaps 0x70(TKEYP), KEY
8684 # aesenclast KEY, STATE # last round
8685 .byte 0x66, 0x0f, 0x38, 0xdd, 0xc2
8686+ pax_force_retaddr_bts
8687 ret
8688
8689 /*
8690@@ -482,6 +492,7 @@ _aesni_enc4:
8691 .byte 0x66, 0x0f, 0x38, 0xdd, 0xea
8692 # aesenclast KEY, STATE4
8693 .byte 0x66, 0x0f, 0x38, 0xdd, 0xf2
8694+ pax_force_retaddr_bts
8695 ret
8696
8697 /*
8698@@ -493,7 +504,9 @@ ENTRY(aesni_dec)
8699 movups (INP), STATE # input
8700 call _aesni_dec1
8701 movups STATE, (OUTP) #output
8702+ pax_force_retaddr 0, 1
8703 ret
8704+ENDPROC(aesni_dec)
8705
8706 /*
8707 * _aesni_dec1: internal ABI
8708@@ -563,6 +576,7 @@ _aesni_dec1:
8709 movaps 0x70(TKEYP), KEY
8710 # aesdeclast KEY, STATE # last round
8711 .byte 0x66, 0x0f, 0x38, 0xdf, 0xc2
8712+ pax_force_retaddr_bts
8713 ret
8714
8715 /*
8716@@ -726,6 +740,7 @@ _aesni_dec4:
8717 .byte 0x66, 0x0f, 0x38, 0xdf, 0xea
8718 # aesdeclast KEY, STATE4
8719 .byte 0x66, 0x0f, 0x38, 0xdf, 0xf2
8720+ pax_force_retaddr_bts
8721 ret
8722
8723 /*
8724@@ -769,7 +784,9 @@ ENTRY(aesni_ecb_enc)
8725 cmp $16, LEN
8726 jge .Lecb_enc_loop1
8727 .Lecb_enc_ret:
8728+ pax_force_retaddr 0, 1
8729 ret
8730+ENDPROC(aesni_ecb_enc)
8731
8732 /*
8733 * void aesni_ecb_dec(struct crypto_aes_ctx *ctx, const u8 *dst, u8 *src,
8734@@ -813,7 +830,9 @@ ENTRY(aesni_ecb_dec)
8735 cmp $16, LEN
8736 jge .Lecb_dec_loop1
8737 .Lecb_dec_ret:
8738+ pax_force_retaddr 0, 1
8739 ret
8740+ENDPROC(aesni_ecb_dec)
8741
8742 /*
8743 * void aesni_cbc_enc(struct crypto_aes_ctx *ctx, const u8 *dst, u8 *src,
8744@@ -837,7 +856,9 @@ ENTRY(aesni_cbc_enc)
8745 jge .Lcbc_enc_loop
8746 movups STATE, (IVP)
8747 .Lcbc_enc_ret:
8748+ pax_force_retaddr 0, 1
8749 ret
8750+ENDPROC(aesni_cbc_enc)
8751
8752 /*
8753 * void aesni_cbc_dec(struct crypto_aes_ctx *ctx, const u8 *dst, u8 *src,
8754@@ -894,4 +915,6 @@ ENTRY(aesni_cbc_dec)
8755 .Lcbc_dec_ret:
8756 movups IV, (IVP)
8757 .Lcbc_dec_just_ret:
8758+ pax_force_retaddr 0, 1
8759 ret
8760+ENDPROC(aesni_cbc_dec)
8761diff --git a/arch/x86/crypto/salsa20-x86_64-asm_64.S b/arch/x86/crypto/salsa20-x86_64-asm_64.S
8762index 6214a9b..1f4fc9a 100644
8763--- a/arch/x86/crypto/salsa20-x86_64-asm_64.S
8764+++ b/arch/x86/crypto/salsa20-x86_64-asm_64.S
8765@@ -1,3 +1,5 @@
8766+#include <asm/alternative-asm.h>
8767+
8768 # enter ECRYPT_encrypt_bytes
8769 .text
8770 .p2align 5
8771@@ -790,6 +792,7 @@ ECRYPT_encrypt_bytes:
8772 add %r11,%rsp
8773 mov %rdi,%rax
8774 mov %rsi,%rdx
8775+ pax_force_retaddr 0, 1
8776 ret
8777 # bytesatleast65:
8778 ._bytesatleast65:
8779@@ -891,6 +894,7 @@ ECRYPT_keysetup:
8780 add %r11,%rsp
8781 mov %rdi,%rax
8782 mov %rsi,%rdx
8783+ pax_force_retaddr
8784 ret
8785 # enter ECRYPT_ivsetup
8786 .text
8787@@ -917,4 +921,5 @@ ECRYPT_ivsetup:
8788 add %r11,%rsp
8789 mov %rdi,%rax
8790 mov %rsi,%rdx
8791+ pax_force_retaddr
8792 ret
8793diff --git a/arch/x86/crypto/twofish-x86_64-asm_64.S b/arch/x86/crypto/twofish-x86_64-asm_64.S
8794index 35974a5..5662ae2 100644
8795--- a/arch/x86/crypto/twofish-x86_64-asm_64.S
8796+++ b/arch/x86/crypto/twofish-x86_64-asm_64.S
8797@@ -21,6 +21,7 @@
8798 .text
8799
8800 #include <asm/asm-offsets.h>
8801+#include <asm/alternative-asm.h>
8802
8803 #define a_offset 0
8804 #define b_offset 4
8805@@ -269,6 +270,7 @@ twofish_enc_blk:
8806
8807 popq R1
8808 movq $1,%rax
8809+ pax_force_retaddr 0, 1
8810 ret
8811
8812 twofish_dec_blk:
8813@@ -321,4 +323,5 @@ twofish_dec_blk:
8814
8815 popq R1
8816 movq $1,%rax
8817+ pax_force_retaddr 0, 1
8818 ret
8819diff --git a/arch/x86/ia32/ia32_aout.c b/arch/x86/ia32/ia32_aout.c
8820index 14531ab..bc68a7b 100644
8821--- a/arch/x86/ia32/ia32_aout.c
8822+++ b/arch/x86/ia32/ia32_aout.c
8823@@ -169,6 +169,8 @@ static int aout_core_dump(long signr, struct pt_regs *regs, struct file *file,
8824 unsigned long dump_start, dump_size;
8825 struct user32 dump;
8826
8827+ memset(&dump, 0, sizeof(dump));
8828+
8829 fs = get_fs();
8830 set_fs(KERNEL_DS);
8831 has_dumped = 1;
8832@@ -218,12 +220,6 @@ static int aout_core_dump(long signr, struct pt_regs *regs, struct file *file,
8833 dump_size = dump.u_ssize << PAGE_SHIFT;
8834 DUMP_WRITE(dump_start, dump_size);
8835 }
8836- /*
8837- * Finally dump the task struct. Not be used by gdb, but
8838- * could be useful
8839- */
8840- set_fs(KERNEL_DS);
8841- DUMP_WRITE(current, sizeof(*current));
8842 end_coredump:
8843 set_fs(fs);
8844 return has_dumped;
8845@@ -327,6 +323,13 @@ static int load_aout_binary(struct linux_binprm *bprm, struct pt_regs *regs)
8846 current->mm->free_area_cache = TASK_UNMAPPED_BASE;
8847 current->mm->cached_hole_size = 0;
8848
8849+ retval = setup_arg_pages(bprm, IA32_STACK_TOP, EXSTACK_DEFAULT);
8850+ if (retval < 0) {
8851+ /* Someone check-me: is this error path enough? */
8852+ send_sig(SIGKILL, current, 0);
8853+ return retval;
8854+ }
8855+
8856 install_exec_creds(bprm);
8857 current->flags &= ~PF_FORKNOEXEC;
8858
8859@@ -422,13 +425,6 @@ beyond_if:
8860
8861 set_brk(current->mm->start_brk, current->mm->brk);
8862
8863- retval = setup_arg_pages(bprm, IA32_STACK_TOP, EXSTACK_DEFAULT);
8864- if (retval < 0) {
8865- /* Someone check-me: is this error path enough? */
8866- send_sig(SIGKILL, current, 0);
8867- return retval;
8868- }
8869-
8870 current->mm->start_stack =
8871 (unsigned long)create_aout_tables((char __user *)bprm->p, bprm);
8872 /* start thread */
8873diff --git a/arch/x86/ia32/ia32_signal.c b/arch/x86/ia32/ia32_signal.c
8874index 588a7aa..a3468b0 100644
8875--- a/arch/x86/ia32/ia32_signal.c
8876+++ b/arch/x86/ia32/ia32_signal.c
8877@@ -167,7 +167,7 @@ asmlinkage long sys32_sigaltstack(const stack_ia32_t __user *uss_ptr,
8878 }
8879 seg = get_fs();
8880 set_fs(KERNEL_DS);
8881- ret = do_sigaltstack(uss_ptr ? &uss : NULL, &uoss, regs->sp);
8882+ ret = do_sigaltstack(uss_ptr ? (const stack_t __force_user *)&uss : NULL, (stack_t __force_user *)&uoss, regs->sp);
8883 set_fs(seg);
8884 if (ret >= 0 && uoss_ptr) {
8885 if (!access_ok(VERIFY_WRITE, uoss_ptr, sizeof(stack_ia32_t)))
8886@@ -374,7 +374,7 @@ static int ia32_setup_sigcontext(struct sigcontext_ia32 __user *sc,
8887 */
8888 static void __user *get_sigframe(struct k_sigaction *ka, struct pt_regs *regs,
8889 size_t frame_size,
8890- void **fpstate)
8891+ void __user **fpstate)
8892 {
8893 unsigned long sp;
8894
8895@@ -395,7 +395,7 @@ static void __user *get_sigframe(struct k_sigaction *ka, struct pt_regs *regs,
8896
8897 if (used_math()) {
8898 sp = sp - sig_xstate_ia32_size;
8899- *fpstate = (struct _fpstate_ia32 *) sp;
8900+ *fpstate = (struct _fpstate_ia32 __user *) sp;
8901 if (save_i387_xstate_ia32(*fpstate) < 0)
8902 return (void __user *) -1L;
8903 }
8904@@ -403,7 +403,7 @@ static void __user *get_sigframe(struct k_sigaction *ka, struct pt_regs *regs,
8905 sp -= frame_size;
8906 /* Align the stack pointer according to the i386 ABI,
8907 * i.e. so that on function entry ((sp + 4) & 15) == 0. */
8908- sp = ((sp + 4) & -16ul) - 4;
8909+ sp = ((sp - 12) & -16ul) - 4;
8910 return (void __user *) sp;
8911 }
8912
8913@@ -461,7 +461,7 @@ int ia32_setup_frame(int sig, struct k_sigaction *ka,
8914 * These are actually not used anymore, but left because some
8915 * gdb versions depend on them as a marker.
8916 */
8917- put_user_ex(*((u64 *)&code), (u64 *)frame->retcode);
8918+ put_user_ex(*((const u64 *)&code), (u64 __user *)frame->retcode);
8919 } put_user_catch(err);
8920
8921 if (err)
8922@@ -503,7 +503,7 @@ int ia32_setup_rt_frame(int sig, struct k_sigaction *ka, siginfo_t *info,
8923 0xb8,
8924 __NR_ia32_rt_sigreturn,
8925 0x80cd,
8926- 0,
8927+ 0
8928 };
8929
8930 frame = get_sigframe(ka, regs, sizeof(*frame), &fpstate);
8931@@ -533,16 +533,18 @@ int ia32_setup_rt_frame(int sig, struct k_sigaction *ka, siginfo_t *info,
8932
8933 if (ka->sa.sa_flags & SA_RESTORER)
8934 restorer = ka->sa.sa_restorer;
8935+ else if (current->mm->context.vdso)
8936+ /* Return stub is in 32bit vsyscall page */
8937+ restorer = VDSO32_SYMBOL(current->mm->context.vdso, rt_sigreturn);
8938 else
8939- restorer = VDSO32_SYMBOL(current->mm->context.vdso,
8940- rt_sigreturn);
8941+ restorer = &frame->retcode;
8942 put_user_ex(ptr_to_compat(restorer), &frame->pretcode);
8943
8944 /*
8945 * Not actually used anymore, but left because some gdb
8946 * versions need it.
8947 */
8948- put_user_ex(*((u64 *)&code), (u64 *)frame->retcode);
8949+ put_user_ex(*((const u64 *)&code), (u64 __user *)frame->retcode);
8950 } put_user_catch(err);
8951
8952 if (err)
8953diff --git a/arch/x86/ia32/ia32entry.S b/arch/x86/ia32/ia32entry.S
8954index 4edd8eb..29124b4 100644
8955--- a/arch/x86/ia32/ia32entry.S
8956+++ b/arch/x86/ia32/ia32entry.S
8957@@ -13,7 +13,9 @@
8958 #include <asm/thread_info.h>
8959 #include <asm/segment.h>
8960 #include <asm/irqflags.h>
8961+#include <asm/pgtable.h>
8962 #include <linux/linkage.h>
8963+#include <asm/alternative-asm.h>
8964
8965 /* Avoid __ASSEMBLER__'ifying <linux/audit.h> just for this. */
8966 #include <linux/elf-em.h>
8967@@ -93,6 +95,32 @@ ENTRY(native_irq_enable_sysexit)
8968 ENDPROC(native_irq_enable_sysexit)
8969 #endif
8970
8971+ .macro pax_enter_kernel_user
8972+ pax_set_fptr_mask
8973+#ifdef CONFIG_PAX_MEMORY_UDEREF
8974+ call pax_enter_kernel_user
8975+#endif
8976+ .endm
8977+
8978+ .macro pax_exit_kernel_user
8979+#ifdef CONFIG_PAX_MEMORY_UDEREF
8980+ call pax_exit_kernel_user
8981+#endif
8982+#ifdef CONFIG_PAX_RANDKSTACK
8983+ pushq %rax
8984+ pushq %r11
8985+ call pax_randomize_kstack
8986+ popq %r11
8987+ popq %rax
8988+#endif
8989+ .endm
8990+
8991+.macro pax_erase_kstack
8992+#ifdef CONFIG_PAX_MEMORY_STACKLEAK
8993+ call pax_erase_kstack
8994+#endif
8995+.endm
8996+
8997 /*
8998 * 32bit SYSENTER instruction entry.
8999 *
9000@@ -119,12 +147,6 @@ ENTRY(ia32_sysenter_target)
9001 CFI_REGISTER rsp,rbp
9002 SWAPGS_UNSAFE_STACK
9003 movq PER_CPU_VAR(kernel_stack), %rsp
9004- addq $(KERNEL_STACK_OFFSET),%rsp
9005- /*
9006- * No need to follow this irqs on/off section: the syscall
9007- * disabled irqs, here we enable it straight after entry:
9008- */
9009- ENABLE_INTERRUPTS(CLBR_NONE)
9010 movl %ebp,%ebp /* zero extension */
9011 pushq $__USER32_DS
9012 CFI_ADJUST_CFA_OFFSET 8
9013@@ -135,28 +157,42 @@ ENTRY(ia32_sysenter_target)
9014 pushfq
9015 CFI_ADJUST_CFA_OFFSET 8
9016 /*CFI_REL_OFFSET rflags,0*/
9017- movl 8*3-THREAD_SIZE+TI_sysenter_return(%rsp), %r10d
9018- CFI_REGISTER rip,r10
9019+ orl $X86_EFLAGS_IF,(%rsp)
9020+ GET_THREAD_INFO(%r11)
9021+ movl TI_sysenter_return(%r11), %r11d
9022+ CFI_REGISTER rip,r11
9023 pushq $__USER32_CS
9024 CFI_ADJUST_CFA_OFFSET 8
9025 /*CFI_REL_OFFSET cs,0*/
9026 movl %eax, %eax
9027- pushq %r10
9028+ pushq %r11
9029 CFI_ADJUST_CFA_OFFSET 8
9030 CFI_REL_OFFSET rip,0
9031 pushq %rax
9032 CFI_ADJUST_CFA_OFFSET 8
9033 cld
9034 SAVE_ARGS 0,0,1
9035+ pax_enter_kernel_user
9036+ /*
9037+ * No need to follow this irqs on/off section: the syscall
9038+ * disabled irqs, here we enable it straight after entry:
9039+ */
9040+ ENABLE_INTERRUPTS(CLBR_NONE)
9041 /* no need to do an access_ok check here because rbp has been
9042 32bit zero extended */
9043+
9044+#ifdef CONFIG_PAX_MEMORY_UDEREF
9045+ mov $PAX_USER_SHADOW_BASE,%r11
9046+ add %r11,%rbp
9047+#endif
9048+
9049 1: movl (%rbp),%ebp
9050 .section __ex_table,"a"
9051 .quad 1b,ia32_badarg
9052 .previous
9053- GET_THREAD_INFO(%r10)
9054- orl $TS_COMPAT,TI_status(%r10)
9055- testl $_TIF_WORK_SYSCALL_ENTRY,TI_flags(%r10)
9056+ GET_THREAD_INFO(%r11)
9057+ orl $TS_COMPAT,TI_status(%r11)
9058+ testl $_TIF_WORK_SYSCALL_ENTRY,TI_flags(%r11)
9059 CFI_REMEMBER_STATE
9060 jnz sysenter_tracesys
9061 cmpq $(IA32_NR_syscalls-1),%rax
9062@@ -166,13 +202,15 @@ sysenter_do_call:
9063 sysenter_dispatch:
9064 call *ia32_sys_call_table(,%rax,8)
9065 movq %rax,RAX-ARGOFFSET(%rsp)
9066- GET_THREAD_INFO(%r10)
9067+ GET_THREAD_INFO(%r11)
9068 DISABLE_INTERRUPTS(CLBR_NONE)
9069 TRACE_IRQS_OFF
9070- testl $_TIF_ALLWORK_MASK,TI_flags(%r10)
9071+ testl $_TIF_ALLWORK_MASK,TI_flags(%r11)
9072 jnz sysexit_audit
9073 sysexit_from_sys_call:
9074- andl $~TS_COMPAT,TI_status(%r10)
9075+ pax_exit_kernel_user
9076+ pax_erase_kstack
9077+ andl $~TS_COMPAT,TI_status(%r11)
9078 /* clear IF, that popfq doesn't enable interrupts early */
9079 andl $~0x200,EFLAGS-R11(%rsp)
9080 movl RIP-R11(%rsp),%edx /* User %eip */
9081@@ -200,6 +238,9 @@ sysexit_from_sys_call:
9082 movl %eax,%esi /* 2nd arg: syscall number */
9083 movl $AUDIT_ARCH_I386,%edi /* 1st arg: audit arch */
9084 call audit_syscall_entry
9085+
9086+ pax_erase_kstack
9087+
9088 movl RAX-ARGOFFSET(%rsp),%eax /* reload syscall number */
9089 cmpq $(IA32_NR_syscalls-1),%rax
9090 ja ia32_badsys
9091@@ -211,7 +252,7 @@ sysexit_from_sys_call:
9092 .endm
9093
9094 .macro auditsys_exit exit
9095- testl $(_TIF_ALLWORK_MASK & ~_TIF_SYSCALL_AUDIT),TI_flags(%r10)
9096+ testl $(_TIF_ALLWORK_MASK & ~_TIF_SYSCALL_AUDIT),TI_flags(%r11)
9097 jnz ia32_ret_from_sys_call
9098 TRACE_IRQS_ON
9099 sti
9100@@ -221,12 +262,12 @@ sysexit_from_sys_call:
9101 movzbl %al,%edi /* zero-extend that into %edi */
9102 inc %edi /* first arg, 0->1(AUDITSC_SUCCESS), 1->2(AUDITSC_FAILURE) */
9103 call audit_syscall_exit
9104- GET_THREAD_INFO(%r10)
9105+ GET_THREAD_INFO(%r11)
9106 movl RAX-ARGOFFSET(%rsp),%eax /* reload syscall return value */
9107 movl $(_TIF_ALLWORK_MASK & ~_TIF_SYSCALL_AUDIT),%edi
9108 cli
9109 TRACE_IRQS_OFF
9110- testl %edi,TI_flags(%r10)
9111+ testl %edi,TI_flags(%r11)
9112 jz \exit
9113 CLEAR_RREGS -ARGOFFSET
9114 jmp int_with_check
9115@@ -244,7 +285,7 @@ sysexit_audit:
9116
9117 sysenter_tracesys:
9118 #ifdef CONFIG_AUDITSYSCALL
9119- testl $(_TIF_WORK_SYSCALL_ENTRY & ~_TIF_SYSCALL_AUDIT),TI_flags(%r10)
9120+ testl $(_TIF_WORK_SYSCALL_ENTRY & ~_TIF_SYSCALL_AUDIT),TI_flags(%r11)
9121 jz sysenter_auditsys
9122 #endif
9123 SAVE_REST
9124@@ -252,6 +293,9 @@ sysenter_tracesys:
9125 movq $-ENOSYS,RAX(%rsp)/* ptrace can change this for a bad syscall */
9126 movq %rsp,%rdi /* &pt_regs -> arg1 */
9127 call syscall_trace_enter
9128+
9129+ pax_erase_kstack
9130+
9131 LOAD_ARGS32 ARGOFFSET /* reload args from stack in case ptrace changed it */
9132 RESTORE_REST
9133 cmpq $(IA32_NR_syscalls-1),%rax
9134@@ -283,19 +327,20 @@ ENDPROC(ia32_sysenter_target)
9135 ENTRY(ia32_cstar_target)
9136 CFI_STARTPROC32 simple
9137 CFI_SIGNAL_FRAME
9138- CFI_DEF_CFA rsp,KERNEL_STACK_OFFSET
9139+ CFI_DEF_CFA rsp,0
9140 CFI_REGISTER rip,rcx
9141 /*CFI_REGISTER rflags,r11*/
9142 SWAPGS_UNSAFE_STACK
9143 movl %esp,%r8d
9144 CFI_REGISTER rsp,r8
9145 movq PER_CPU_VAR(kernel_stack),%rsp
9146+ SAVE_ARGS 8*6,1,1
9147+ pax_enter_kernel_user
9148 /*
9149 * No need to follow this irqs on/off section: the syscall
9150 * disabled irqs and here we enable it straight after entry:
9151 */
9152 ENABLE_INTERRUPTS(CLBR_NONE)
9153- SAVE_ARGS 8,1,1
9154 movl %eax,%eax /* zero extension */
9155 movq %rax,ORIG_RAX-ARGOFFSET(%rsp)
9156 movq %rcx,RIP-ARGOFFSET(%rsp)
9157@@ -311,13 +356,19 @@ ENTRY(ia32_cstar_target)
9158 /* no need to do an access_ok check here because r8 has been
9159 32bit zero extended */
9160 /* hardware stack frame is complete now */
9161+
9162+#ifdef CONFIG_PAX_MEMORY_UDEREF
9163+ mov $PAX_USER_SHADOW_BASE,%r11
9164+ add %r11,%r8
9165+#endif
9166+
9167 1: movl (%r8),%r9d
9168 .section __ex_table,"a"
9169 .quad 1b,ia32_badarg
9170 .previous
9171- GET_THREAD_INFO(%r10)
9172- orl $TS_COMPAT,TI_status(%r10)
9173- testl $_TIF_WORK_SYSCALL_ENTRY,TI_flags(%r10)
9174+ GET_THREAD_INFO(%r11)
9175+ orl $TS_COMPAT,TI_status(%r11)
9176+ testl $_TIF_WORK_SYSCALL_ENTRY,TI_flags(%r11)
9177 CFI_REMEMBER_STATE
9178 jnz cstar_tracesys
9179 cmpq $IA32_NR_syscalls-1,%rax
9180@@ -327,13 +378,15 @@ cstar_do_call:
9181 cstar_dispatch:
9182 call *ia32_sys_call_table(,%rax,8)
9183 movq %rax,RAX-ARGOFFSET(%rsp)
9184- GET_THREAD_INFO(%r10)
9185+ GET_THREAD_INFO(%r11)
9186 DISABLE_INTERRUPTS(CLBR_NONE)
9187 TRACE_IRQS_OFF
9188- testl $_TIF_ALLWORK_MASK,TI_flags(%r10)
9189+ testl $_TIF_ALLWORK_MASK,TI_flags(%r11)
9190 jnz sysretl_audit
9191 sysretl_from_sys_call:
9192- andl $~TS_COMPAT,TI_status(%r10)
9193+ pax_exit_kernel_user
9194+ pax_erase_kstack
9195+ andl $~TS_COMPAT,TI_status(%r11)
9196 RESTORE_ARGS 1,-ARG_SKIP,1,1,1
9197 movl RIP-ARGOFFSET(%rsp),%ecx
9198 CFI_REGISTER rip,rcx
9199@@ -361,7 +414,7 @@ sysretl_audit:
9200
9201 cstar_tracesys:
9202 #ifdef CONFIG_AUDITSYSCALL
9203- testl $(_TIF_WORK_SYSCALL_ENTRY & ~_TIF_SYSCALL_AUDIT),TI_flags(%r10)
9204+ testl $(_TIF_WORK_SYSCALL_ENTRY & ~_TIF_SYSCALL_AUDIT),TI_flags(%r11)
9205 jz cstar_auditsys
9206 #endif
9207 xchgl %r9d,%ebp
9208@@ -370,6 +423,9 @@ cstar_tracesys:
9209 movq $-ENOSYS,RAX(%rsp) /* ptrace can change this for a bad syscall */
9210 movq %rsp,%rdi /* &pt_regs -> arg1 */
9211 call syscall_trace_enter
9212+
9213+ pax_erase_kstack
9214+
9215 LOAD_ARGS32 ARGOFFSET, 1 /* reload args from stack in case ptrace changed it */
9216 RESTORE_REST
9217 xchgl %ebp,%r9d
9218@@ -415,11 +471,6 @@ ENTRY(ia32_syscall)
9219 CFI_REL_OFFSET rip,RIP-RIP
9220 PARAVIRT_ADJUST_EXCEPTION_FRAME
9221 SWAPGS
9222- /*
9223- * No need to follow this irqs on/off section: the syscall
9224- * disabled irqs and here we enable it straight after entry:
9225- */
9226- ENABLE_INTERRUPTS(CLBR_NONE)
9227 movl %eax,%eax
9228 pushq %rax
9229 CFI_ADJUST_CFA_OFFSET 8
9230@@ -427,9 +478,15 @@ ENTRY(ia32_syscall)
9231 /* note the registers are not zero extended to the sf.
9232 this could be a problem. */
9233 SAVE_ARGS 0,0,1
9234- GET_THREAD_INFO(%r10)
9235- orl $TS_COMPAT,TI_status(%r10)
9236- testl $_TIF_WORK_SYSCALL_ENTRY,TI_flags(%r10)
9237+ pax_enter_kernel_user
9238+ /*
9239+ * No need to follow this irqs on/off section: the syscall
9240+ * disabled irqs and here we enable it straight after entry:
9241+ */
9242+ ENABLE_INTERRUPTS(CLBR_NONE)
9243+ GET_THREAD_INFO(%r11)
9244+ orl $TS_COMPAT,TI_status(%r11)
9245+ testl $_TIF_WORK_SYSCALL_ENTRY,TI_flags(%r11)
9246 jnz ia32_tracesys
9247 cmpq $(IA32_NR_syscalls-1),%rax
9248 ja ia32_badsys
9249@@ -448,6 +505,9 @@ ia32_tracesys:
9250 movq $-ENOSYS,RAX(%rsp) /* ptrace can change this for a bad syscall */
9251 movq %rsp,%rdi /* &pt_regs -> arg1 */
9252 call syscall_trace_enter
9253+
9254+ pax_erase_kstack
9255+
9256 LOAD_ARGS32 ARGOFFSET /* reload args from stack in case ptrace changed it */
9257 RESTORE_REST
9258 cmpq $(IA32_NR_syscalls-1),%rax
9259@@ -462,6 +522,7 @@ ia32_badsys:
9260
9261 quiet_ni_syscall:
9262 movq $-ENOSYS,%rax
9263+ pax_force_retaddr
9264 ret
9265 CFI_ENDPROC
9266
9267diff --git a/arch/x86/ia32/sys_ia32.c b/arch/x86/ia32/sys_ia32.c
9268index 016218c..47ccbdd 100644
9269--- a/arch/x86/ia32/sys_ia32.c
9270+++ b/arch/x86/ia32/sys_ia32.c
9271@@ -69,8 +69,8 @@ asmlinkage long sys32_ftruncate64(unsigned int fd, unsigned long offset_low,
9272 */
9273 static int cp_stat64(struct stat64 __user *ubuf, struct kstat *stat)
9274 {
9275- typeof(ubuf->st_uid) uid = 0;
9276- typeof(ubuf->st_gid) gid = 0;
9277+ typeof(((struct stat64 *)0)->st_uid) uid = 0;
9278+ typeof(((struct stat64 *)0)->st_gid) gid = 0;
9279 SET_UID(uid, stat->uid);
9280 SET_GID(gid, stat->gid);
9281 if (!access_ok(VERIFY_WRITE, ubuf, sizeof(struct stat64)) ||
9282@@ -308,8 +308,8 @@ asmlinkage long sys32_rt_sigprocmask(int how, compat_sigset_t __user *set,
9283 }
9284 set_fs(KERNEL_DS);
9285 ret = sys_rt_sigprocmask(how,
9286- set ? (sigset_t __user *)&s : NULL,
9287- oset ? (sigset_t __user *)&s : NULL,
9288+ set ? (sigset_t __force_user *)&s : NULL,
9289+ oset ? (sigset_t __force_user *)&s : NULL,
9290 sigsetsize);
9291 set_fs(old_fs);
9292 if (ret)
9293@@ -371,7 +371,7 @@ asmlinkage long sys32_sched_rr_get_interval(compat_pid_t pid,
9294 mm_segment_t old_fs = get_fs();
9295
9296 set_fs(KERNEL_DS);
9297- ret = sys_sched_rr_get_interval(pid, (struct timespec __user *)&t);
9298+ ret = sys_sched_rr_get_interval(pid, (struct timespec __force_user *)&t);
9299 set_fs(old_fs);
9300 if (put_compat_timespec(&t, interval))
9301 return -EFAULT;
9302@@ -387,7 +387,7 @@ asmlinkage long sys32_rt_sigpending(compat_sigset_t __user *set,
9303 mm_segment_t old_fs = get_fs();
9304
9305 set_fs(KERNEL_DS);
9306- ret = sys_rt_sigpending((sigset_t __user *)&s, sigsetsize);
9307+ ret = sys_rt_sigpending((sigset_t __force_user *)&s, sigsetsize);
9308 set_fs(old_fs);
9309 if (!ret) {
9310 switch (_NSIG_WORDS) {
9311@@ -412,7 +412,7 @@ asmlinkage long sys32_rt_sigqueueinfo(int pid, int sig,
9312 if (copy_siginfo_from_user32(&info, uinfo))
9313 return -EFAULT;
9314 set_fs(KERNEL_DS);
9315- ret = sys_rt_sigqueueinfo(pid, sig, (siginfo_t __user *)&info);
9316+ ret = sys_rt_sigqueueinfo(pid, sig, (siginfo_t __force_user *)&info);
9317 set_fs(old_fs);
9318 return ret;
9319 }
9320@@ -513,7 +513,7 @@ asmlinkage long sys32_sendfile(int out_fd, int in_fd,
9321 return -EFAULT;
9322
9323 set_fs(KERNEL_DS);
9324- ret = sys_sendfile(out_fd, in_fd, offset ? (off_t __user *)&of : NULL,
9325+ ret = sys_sendfile(out_fd, in_fd, offset ? (off_t __force_user *)&of : NULL,
9326 count);
9327 set_fs(old_fs);
9328
9329diff --git a/arch/x86/include/asm/alternative-asm.h b/arch/x86/include/asm/alternative-asm.h
9330index e2077d3..17d07ad 100644
9331--- a/arch/x86/include/asm/alternative-asm.h
9332+++ b/arch/x86/include/asm/alternative-asm.h
9333@@ -8,10 +8,10 @@
9334
9335 #ifdef CONFIG_SMP
9336 .macro LOCK_PREFIX
9337-1: lock
9338+672: lock
9339 .section .smp_locks,"a"
9340 .align 4
9341- X86_ALIGN 1b
9342+ X86_ALIGN 672b
9343 .previous
9344 .endm
9345 #else
9346@@ -19,4 +19,43 @@
9347 .endm
9348 #endif
9349
9350+#ifdef KERNEXEC_PLUGIN
9351+ .macro pax_force_retaddr_bts rip=0
9352+ btsq $63,\rip(%rsp)
9353+ .endm
9354+#ifdef CONFIG_PAX_KERNEXEC_PLUGIN_METHOD_BTS
9355+ .macro pax_force_retaddr rip=0, reload=0
9356+ btsq $63,\rip(%rsp)
9357+ .endm
9358+ .macro pax_force_fptr ptr
9359+ btsq $63,\ptr
9360+ .endm
9361+ .macro pax_set_fptr_mask
9362+ .endm
9363+#endif
9364+#ifdef CONFIG_PAX_KERNEXEC_PLUGIN_METHOD_OR
9365+ .macro pax_force_retaddr rip=0, reload=0
9366+ .if \reload
9367+ pax_set_fptr_mask
9368+ .endif
9369+ orq %r10,\rip(%rsp)
9370+ .endm
9371+ .macro pax_force_fptr ptr
9372+ orq %r10,\ptr
9373+ .endm
9374+ .macro pax_set_fptr_mask
9375+ movabs $0x8000000000000000,%r10
9376+ .endm
9377+#endif
9378+#else
9379+ .macro pax_force_retaddr rip=0, reload=0
9380+ .endm
9381+ .macro pax_force_fptr ptr
9382+ .endm
9383+ .macro pax_force_retaddr_bts rip=0
9384+ .endm
9385+ .macro pax_set_fptr_mask
9386+ .endm
9387+#endif
9388+
9389 #endif /* __ASSEMBLY__ */
9390diff --git a/arch/x86/include/asm/alternative.h b/arch/x86/include/asm/alternative.h
9391index c240efc..fdfadf3 100644
9392--- a/arch/x86/include/asm/alternative.h
9393+++ b/arch/x86/include/asm/alternative.h
9394@@ -85,7 +85,7 @@ static inline void alternatives_smp_switch(int smp) {}
9395 " .byte 662b-661b\n" /* sourcelen */ \
9396 " .byte 664f-663f\n" /* replacementlen */ \
9397 ".previous\n" \
9398- ".section .altinstr_replacement, \"ax\"\n" \
9399+ ".section .altinstr_replacement, \"a\"\n" \
9400 "663:\n\t" newinstr "\n664:\n" /* replacement */ \
9401 ".previous"
9402
9403diff --git a/arch/x86/include/asm/apic.h b/arch/x86/include/asm/apic.h
9404index 474d80d..1f97d58 100644
9405--- a/arch/x86/include/asm/apic.h
9406+++ b/arch/x86/include/asm/apic.h
9407@@ -46,7 +46,7 @@ static inline void generic_apic_probe(void)
9408
9409 #ifdef CONFIG_X86_LOCAL_APIC
9410
9411-extern unsigned int apic_verbosity;
9412+extern int apic_verbosity;
9413 extern int local_apic_timer_c2_ok;
9414
9415 extern int disable_apic;
9416diff --git a/arch/x86/include/asm/apm.h b/arch/x86/include/asm/apm.h
9417index 20370c6..a2eb9b0 100644
9418--- a/arch/x86/include/asm/apm.h
9419+++ b/arch/x86/include/asm/apm.h
9420@@ -34,7 +34,7 @@ static inline void apm_bios_call_asm(u32 func, u32 ebx_in, u32 ecx_in,
9421 __asm__ __volatile__(APM_DO_ZERO_SEGS
9422 "pushl %%edi\n\t"
9423 "pushl %%ebp\n\t"
9424- "lcall *%%cs:apm_bios_entry\n\t"
9425+ "lcall *%%ss:apm_bios_entry\n\t"
9426 "setc %%al\n\t"
9427 "popl %%ebp\n\t"
9428 "popl %%edi\n\t"
9429@@ -58,7 +58,7 @@ static inline u8 apm_bios_call_simple_asm(u32 func, u32 ebx_in,
9430 __asm__ __volatile__(APM_DO_ZERO_SEGS
9431 "pushl %%edi\n\t"
9432 "pushl %%ebp\n\t"
9433- "lcall *%%cs:apm_bios_entry\n\t"
9434+ "lcall *%%ss:apm_bios_entry\n\t"
9435 "setc %%bl\n\t"
9436 "popl %%ebp\n\t"
9437 "popl %%edi\n\t"
9438diff --git a/arch/x86/include/asm/atomic_32.h b/arch/x86/include/asm/atomic_32.h
9439index dc5a667..939040c 100644
9440--- a/arch/x86/include/asm/atomic_32.h
9441+++ b/arch/x86/include/asm/atomic_32.h
9442@@ -25,6 +25,17 @@ static inline int atomic_read(const atomic_t *v)
9443 }
9444
9445 /**
9446+ * atomic_read_unchecked - read atomic variable
9447+ * @v: pointer of type atomic_unchecked_t
9448+ *
9449+ * Atomically reads the value of @v.
9450+ */
9451+static inline int atomic_read_unchecked(const atomic_unchecked_t *v)
9452+{
9453+ return v->counter;
9454+}
9455+
9456+/**
9457 * atomic_set - set atomic variable
9458 * @v: pointer of type atomic_t
9459 * @i: required value
9460@@ -37,6 +48,18 @@ static inline void atomic_set(atomic_t *v, int i)
9461 }
9462
9463 /**
9464+ * atomic_set_unchecked - set atomic variable
9465+ * @v: pointer of type atomic_unchecked_t
9466+ * @i: required value
9467+ *
9468+ * Atomically sets the value of @v to @i.
9469+ */
9470+static inline void atomic_set_unchecked(atomic_unchecked_t *v, int i)
9471+{
9472+ v->counter = i;
9473+}
9474+
9475+/**
9476 * atomic_add - add integer to atomic variable
9477 * @i: integer value to add
9478 * @v: pointer of type atomic_t
9479@@ -45,7 +68,29 @@ static inline void atomic_set(atomic_t *v, int i)
9480 */
9481 static inline void atomic_add(int i, atomic_t *v)
9482 {
9483- asm volatile(LOCK_PREFIX "addl %1,%0"
9484+ asm volatile(LOCK_PREFIX "addl %1,%0\n"
9485+
9486+#ifdef CONFIG_PAX_REFCOUNT
9487+ "jno 0f\n"
9488+ LOCK_PREFIX "subl %1,%0\n"
9489+ "int $4\n0:\n"
9490+ _ASM_EXTABLE(0b, 0b)
9491+#endif
9492+
9493+ : "+m" (v->counter)
9494+ : "ir" (i));
9495+}
9496+
9497+/**
9498+ * atomic_add_unchecked - add integer to atomic variable
9499+ * @i: integer value to add
9500+ * @v: pointer of type atomic_unchecked_t
9501+ *
9502+ * Atomically adds @i to @v.
9503+ */
9504+static inline void atomic_add_unchecked(int i, atomic_unchecked_t *v)
9505+{
9506+ asm volatile(LOCK_PREFIX "addl %1,%0\n"
9507 : "+m" (v->counter)
9508 : "ir" (i));
9509 }
9510@@ -59,7 +104,29 @@ static inline void atomic_add(int i, atomic_t *v)
9511 */
9512 static inline void atomic_sub(int i, atomic_t *v)
9513 {
9514- asm volatile(LOCK_PREFIX "subl %1,%0"
9515+ asm volatile(LOCK_PREFIX "subl %1,%0\n"
9516+
9517+#ifdef CONFIG_PAX_REFCOUNT
9518+ "jno 0f\n"
9519+ LOCK_PREFIX "addl %1,%0\n"
9520+ "int $4\n0:\n"
9521+ _ASM_EXTABLE(0b, 0b)
9522+#endif
9523+
9524+ : "+m" (v->counter)
9525+ : "ir" (i));
9526+}
9527+
9528+/**
9529+ * atomic_sub_unchecked - subtract integer from atomic variable
9530+ * @i: integer value to subtract
9531+ * @v: pointer of type atomic_unchecked_t
9532+ *
9533+ * Atomically subtracts @i from @v.
9534+ */
9535+static inline void atomic_sub_unchecked(int i, atomic_unchecked_t *v)
9536+{
9537+ asm volatile(LOCK_PREFIX "subl %1,%0\n"
9538 : "+m" (v->counter)
9539 : "ir" (i));
9540 }
9541@@ -77,7 +144,16 @@ static inline int atomic_sub_and_test(int i, atomic_t *v)
9542 {
9543 unsigned char c;
9544
9545- asm volatile(LOCK_PREFIX "subl %2,%0; sete %1"
9546+ asm volatile(LOCK_PREFIX "subl %2,%0\n"
9547+
9548+#ifdef CONFIG_PAX_REFCOUNT
9549+ "jno 0f\n"
9550+ LOCK_PREFIX "addl %2,%0\n"
9551+ "int $4\n0:\n"
9552+ _ASM_EXTABLE(0b, 0b)
9553+#endif
9554+
9555+ "sete %1\n"
9556 : "+m" (v->counter), "=qm" (c)
9557 : "ir" (i) : "memory");
9558 return c;
9559@@ -91,7 +167,27 @@ static inline int atomic_sub_and_test(int i, atomic_t *v)
9560 */
9561 static inline void atomic_inc(atomic_t *v)
9562 {
9563- asm volatile(LOCK_PREFIX "incl %0"
9564+ asm volatile(LOCK_PREFIX "incl %0\n"
9565+
9566+#ifdef CONFIG_PAX_REFCOUNT
9567+ "jno 0f\n"
9568+ LOCK_PREFIX "decl %0\n"
9569+ "int $4\n0:\n"
9570+ _ASM_EXTABLE(0b, 0b)
9571+#endif
9572+
9573+ : "+m" (v->counter));
9574+}
9575+
9576+/**
9577+ * atomic_inc_unchecked - increment atomic variable
9578+ * @v: pointer of type atomic_unchecked_t
9579+ *
9580+ * Atomically increments @v by 1.
9581+ */
9582+static inline void atomic_inc_unchecked(atomic_unchecked_t *v)
9583+{
9584+ asm volatile(LOCK_PREFIX "incl %0\n"
9585 : "+m" (v->counter));
9586 }
9587
9588@@ -103,7 +199,27 @@ static inline void atomic_inc(atomic_t *v)
9589 */
9590 static inline void atomic_dec(atomic_t *v)
9591 {
9592- asm volatile(LOCK_PREFIX "decl %0"
9593+ asm volatile(LOCK_PREFIX "decl %0\n"
9594+
9595+#ifdef CONFIG_PAX_REFCOUNT
9596+ "jno 0f\n"
9597+ LOCK_PREFIX "incl %0\n"
9598+ "int $4\n0:\n"
9599+ _ASM_EXTABLE(0b, 0b)
9600+#endif
9601+
9602+ : "+m" (v->counter));
9603+}
9604+
9605+/**
9606+ * atomic_dec_unchecked - decrement atomic variable
9607+ * @v: pointer of type atomic_unchecked_t
9608+ *
9609+ * Atomically decrements @v by 1.
9610+ */
9611+static inline void atomic_dec_unchecked(atomic_unchecked_t *v)
9612+{
9613+ asm volatile(LOCK_PREFIX "decl %0\n"
9614 : "+m" (v->counter));
9615 }
9616
9617@@ -119,7 +235,16 @@ static inline int atomic_dec_and_test(atomic_t *v)
9618 {
9619 unsigned char c;
9620
9621- asm volatile(LOCK_PREFIX "decl %0; sete %1"
9622+ asm volatile(LOCK_PREFIX "decl %0\n"
9623+
9624+#ifdef CONFIG_PAX_REFCOUNT
9625+ "jno 0f\n"
9626+ LOCK_PREFIX "incl %0\n"
9627+ "int $4\n0:\n"
9628+ _ASM_EXTABLE(0b, 0b)
9629+#endif
9630+
9631+ "sete %1\n"
9632 : "+m" (v->counter), "=qm" (c)
9633 : : "memory");
9634 return c != 0;
9635@@ -137,7 +262,35 @@ static inline int atomic_inc_and_test(atomic_t *v)
9636 {
9637 unsigned char c;
9638
9639- asm volatile(LOCK_PREFIX "incl %0; sete %1"
9640+ asm volatile(LOCK_PREFIX "incl %0\n"
9641+
9642+#ifdef CONFIG_PAX_REFCOUNT
9643+ "jno 0f\n"
9644+ LOCK_PREFIX "decl %0\n"
9645+ "into\n0:\n"
9646+ _ASM_EXTABLE(0b, 0b)
9647+#endif
9648+
9649+ "sete %1\n"
9650+ : "+m" (v->counter), "=qm" (c)
9651+ : : "memory");
9652+ return c != 0;
9653+}
9654+
9655+/**
9656+ * atomic_inc_and_test_unchecked - increment and test
9657+ * @v: pointer of type atomic_unchecked_t
9658+ *
9659+ * Atomically increments @v by 1
9660+ * and returns true if the result is zero, or false for all
9661+ * other cases.
9662+ */
9663+static inline int atomic_inc_and_test_unchecked(atomic_unchecked_t *v)
9664+{
9665+ unsigned char c;
9666+
9667+ asm volatile(LOCK_PREFIX "incl %0\n"
9668+ "sete %1\n"
9669 : "+m" (v->counter), "=qm" (c)
9670 : : "memory");
9671 return c != 0;
9672@@ -156,7 +309,16 @@ static inline int atomic_add_negative(int i, atomic_t *v)
9673 {
9674 unsigned char c;
9675
9676- asm volatile(LOCK_PREFIX "addl %2,%0; sets %1"
9677+ asm volatile(LOCK_PREFIX "addl %2,%0\n"
9678+
9679+#ifdef CONFIG_PAX_REFCOUNT
9680+ "jno 0f\n"
9681+ LOCK_PREFIX "subl %2,%0\n"
9682+ "int $4\n0:\n"
9683+ _ASM_EXTABLE(0b, 0b)
9684+#endif
9685+
9686+ "sets %1\n"
9687 : "+m" (v->counter), "=qm" (c)
9688 : "ir" (i) : "memory");
9689 return c;
9690@@ -179,7 +341,15 @@ static inline int atomic_add_return(int i, atomic_t *v)
9691 #endif
9692 /* Modern 486+ processor */
9693 __i = i;
9694- asm volatile(LOCK_PREFIX "xaddl %0, %1"
9695+ asm volatile(LOCK_PREFIX "xaddl %0, %1\n"
9696+
9697+#ifdef CONFIG_PAX_REFCOUNT
9698+ "jno 0f\n"
9699+ "movl %0, %1\n"
9700+ "int $4\n0:\n"
9701+ _ASM_EXTABLE(0b, 0b)
9702+#endif
9703+
9704 : "+r" (i), "+m" (v->counter)
9705 : : "memory");
9706 return i + __i;
9707@@ -195,6 +365,38 @@ no_xadd: /* Legacy 386 processor */
9708 }
9709
9710 /**
9711+ * atomic_add_return_unchecked - add integer and return
9712+ * @v: pointer of type atomic_unchecked_t
9713+ * @i: integer value to add
9714+ *
9715+ * Atomically adds @i to @v and returns @i + @v
9716+ */
9717+static inline int atomic_add_return_unchecked(int i, atomic_unchecked_t *v)
9718+{
9719+ int __i;
9720+#ifdef CONFIG_M386
9721+ unsigned long flags;
9722+ if (unlikely(boot_cpu_data.x86 <= 3))
9723+ goto no_xadd;
9724+#endif
9725+ /* Modern 486+ processor */
9726+ __i = i;
9727+ asm volatile(LOCK_PREFIX "xaddl %0, %1"
9728+ : "+r" (i), "+m" (v->counter)
9729+ : : "memory");
9730+ return i + __i;
9731+
9732+#ifdef CONFIG_M386
9733+no_xadd: /* Legacy 386 processor */
9734+ local_irq_save(flags);
9735+ __i = atomic_read_unchecked(v);
9736+ atomic_set_unchecked(v, i + __i);
9737+ local_irq_restore(flags);
9738+ return i + __i;
9739+#endif
9740+}
9741+
9742+/**
9743 * atomic_sub_return - subtract integer and return
9744 * @v: pointer of type atomic_t
9745 * @i: integer value to subtract
9746@@ -211,11 +413,21 @@ static inline int atomic_cmpxchg(atomic_t *v, int old, int new)
9747 return cmpxchg(&v->counter, old, new);
9748 }
9749
9750+static inline int atomic_cmpxchg_unchecked(atomic_unchecked_t *v, int old, int new)
9751+{
9752+ return cmpxchg(&v->counter, old, new);
9753+}
9754+
9755 static inline int atomic_xchg(atomic_t *v, int new)
9756 {
9757 return xchg(&v->counter, new);
9758 }
9759
9760+static inline int atomic_xchg_unchecked(atomic_unchecked_t *v, int new)
9761+{
9762+ return xchg(&v->counter, new);
9763+}
9764+
9765 /**
9766 * atomic_add_unless - add unless the number is already a given value
9767 * @v: pointer of type atomic_t
9768@@ -227,22 +439,39 @@ static inline int atomic_xchg(atomic_t *v, int new)
9769 */
9770 static inline int atomic_add_unless(atomic_t *v, int a, int u)
9771 {
9772- int c, old;
9773+ int c, old, new;
9774 c = atomic_read(v);
9775 for (;;) {
9776- if (unlikely(c == (u)))
9777+ if (unlikely(c == u))
9778 break;
9779- old = atomic_cmpxchg((v), c, c + (a));
9780+
9781+ asm volatile("addl %2,%0\n"
9782+
9783+#ifdef CONFIG_PAX_REFCOUNT
9784+ "jno 0f\n"
9785+ "subl %2,%0\n"
9786+ "int $4\n0:\n"
9787+ _ASM_EXTABLE(0b, 0b)
9788+#endif
9789+
9790+ : "=r" (new)
9791+ : "0" (c), "ir" (a));
9792+
9793+ old = atomic_cmpxchg(v, c, new);
9794 if (likely(old == c))
9795 break;
9796 c = old;
9797 }
9798- return c != (u);
9799+ return c != u;
9800 }
9801
9802 #define atomic_inc_not_zero(v) atomic_add_unless((v), 1, 0)
9803
9804 #define atomic_inc_return(v) (atomic_add_return(1, v))
9805+static inline int atomic_inc_return_unchecked(atomic_unchecked_t *v)
9806+{
9807+ return atomic_add_return_unchecked(1, v);
9808+}
9809 #define atomic_dec_return(v) (atomic_sub_return(1, v))
9810
9811 /* These are x86-specific, used by some header files */
9812@@ -266,9 +495,18 @@ typedef struct {
9813 u64 __aligned(8) counter;
9814 } atomic64_t;
9815
9816+#ifdef CONFIG_PAX_REFCOUNT
9817+typedef struct {
9818+ u64 __aligned(8) counter;
9819+} atomic64_unchecked_t;
9820+#else
9821+typedef atomic64_t atomic64_unchecked_t;
9822+#endif
9823+
9824 #define ATOMIC64_INIT(val) { (val) }
9825
9826 extern u64 atomic64_cmpxchg(atomic64_t *ptr, u64 old_val, u64 new_val);
9827+extern u64 atomic64_cmpxchg_unchecked(atomic64_unchecked_t *ptr, u64 old_val, u64 new_val);
9828
9829 /**
9830 * atomic64_xchg - xchg atomic64 variable
9831@@ -279,6 +517,7 @@ extern u64 atomic64_cmpxchg(atomic64_t *ptr, u64 old_val, u64 new_val);
9832 * the old value.
9833 */
9834 extern u64 atomic64_xchg(atomic64_t *ptr, u64 new_val);
9835+extern u64 atomic64_xchg_unchecked(atomic64_unchecked_t *ptr, u64 new_val);
9836
9837 /**
9838 * atomic64_set - set atomic64 variable
9839@@ -290,6 +529,15 @@ extern u64 atomic64_xchg(atomic64_t *ptr, u64 new_val);
9840 extern void atomic64_set(atomic64_t *ptr, u64 new_val);
9841
9842 /**
9843+ * atomic64_unchecked_set - set atomic64 variable
9844+ * @ptr: pointer to type atomic64_unchecked_t
9845+ * @new_val: value to assign
9846+ *
9847+ * Atomically sets the value of @ptr to @new_val.
9848+ */
9849+extern void atomic64_set_unchecked(atomic64_unchecked_t *ptr, u64 new_val);
9850+
9851+/**
9852 * atomic64_read - read atomic64 variable
9853 * @ptr: pointer to type atomic64_t
9854 *
9855@@ -317,7 +565,33 @@ static inline u64 atomic64_read(atomic64_t *ptr)
9856 return res;
9857 }
9858
9859-extern u64 atomic64_read(atomic64_t *ptr);
9860+/**
9861+ * atomic64_read_unchecked - read atomic64 variable
9862+ * @ptr: pointer to type atomic64_unchecked_t
9863+ *
9864+ * Atomically reads the value of @ptr and returns it.
9865+ */
9866+static inline u64 atomic64_read_unchecked(atomic64_unchecked_t *ptr)
9867+{
9868+ u64 res;
9869+
9870+ /*
9871+ * Note, we inline this atomic64_unchecked_t primitive because
9872+ * it only clobbers EAX/EDX and leaves the others
9873+ * untouched. We also (somewhat subtly) rely on the
9874+ * fact that cmpxchg8b returns the current 64-bit value
9875+ * of the memory location we are touching:
9876+ */
9877+ asm volatile(
9878+ "mov %%ebx, %%eax\n\t"
9879+ "mov %%ecx, %%edx\n\t"
9880+ LOCK_PREFIX "cmpxchg8b %1\n"
9881+ : "=&A" (res)
9882+ : "m" (*ptr)
9883+ );
9884+
9885+ return res;
9886+}
9887
9888 /**
9889 * atomic64_add_return - add and return
9890@@ -332,8 +606,11 @@ extern u64 atomic64_add_return(u64 delta, atomic64_t *ptr);
9891 * Other variants with different arithmetic operators:
9892 */
9893 extern u64 atomic64_sub_return(u64 delta, atomic64_t *ptr);
9894+extern u64 atomic64_sub_return_unchecked(u64 delta, atomic64_unchecked_t *ptr);
9895 extern u64 atomic64_inc_return(atomic64_t *ptr);
9896+extern u64 atomic64_inc_return_unchecked(atomic64_unchecked_t *ptr);
9897 extern u64 atomic64_dec_return(atomic64_t *ptr);
9898+extern u64 atomic64_dec_return_unchecked(atomic64_unchecked_t *ptr);
9899
9900 /**
9901 * atomic64_add - add integer to atomic64 variable
9902@@ -345,6 +622,15 @@ extern u64 atomic64_dec_return(atomic64_t *ptr);
9903 extern void atomic64_add(u64 delta, atomic64_t *ptr);
9904
9905 /**
9906+ * atomic64_add_unchecked - add integer to atomic64 variable
9907+ * @delta: integer value to add
9908+ * @ptr: pointer to type atomic64_unchecked_t
9909+ *
9910+ * Atomically adds @delta to @ptr.
9911+ */
9912+extern void atomic64_add_unchecked(u64 delta, atomic64_unchecked_t *ptr);
9913+
9914+/**
9915 * atomic64_sub - subtract the atomic64 variable
9916 * @delta: integer value to subtract
9917 * @ptr: pointer to type atomic64_t
9918@@ -354,6 +640,15 @@ extern void atomic64_add(u64 delta, atomic64_t *ptr);
9919 extern void atomic64_sub(u64 delta, atomic64_t *ptr);
9920
9921 /**
9922+ * atomic64_sub_unchecked - subtract the atomic64 variable
9923+ * @delta: integer value to subtract
9924+ * @ptr: pointer to type atomic64_unchecked_t
9925+ *
9926+ * Atomically subtracts @delta from @ptr.
9927+ */
9928+extern void atomic64_sub_unchecked(u64 delta, atomic64_unchecked_t *ptr);
9929+
9930+/**
9931 * atomic64_sub_and_test - subtract value from variable and test result
9932 * @delta: integer value to subtract
9933 * @ptr: pointer to type atomic64_t
9934@@ -373,6 +668,14 @@ extern int atomic64_sub_and_test(u64 delta, atomic64_t *ptr);
9935 extern void atomic64_inc(atomic64_t *ptr);
9936
9937 /**
9938+ * atomic64_inc_unchecked - increment atomic64 variable
9939+ * @ptr: pointer to type atomic64_unchecked_t
9940+ *
9941+ * Atomically increments @ptr by 1.
9942+ */
9943+extern void atomic64_inc_unchecked(atomic64_unchecked_t *ptr);
9944+
9945+/**
9946 * atomic64_dec - decrement atomic64 variable
9947 * @ptr: pointer to type atomic64_t
9948 *
9949@@ -381,6 +684,14 @@ extern void atomic64_inc(atomic64_t *ptr);
9950 extern void atomic64_dec(atomic64_t *ptr);
9951
9952 /**
9953+ * atomic64_dec_unchecked - decrement atomic64 variable
9954+ * @ptr: pointer to type atomic64_unchecked_t
9955+ *
9956+ * Atomically decrements @ptr by 1.
9957+ */
9958+extern void atomic64_dec_unchecked(atomic64_unchecked_t *ptr);
9959+
9960+/**
9961 * atomic64_dec_and_test - decrement and test
9962 * @ptr: pointer to type atomic64_t
9963 *
9964diff --git a/arch/x86/include/asm/atomic_64.h b/arch/x86/include/asm/atomic_64.h
9965index d605dc2..fafd7bd 100644
9966--- a/arch/x86/include/asm/atomic_64.h
9967+++ b/arch/x86/include/asm/atomic_64.h
9968@@ -24,6 +24,17 @@ static inline int atomic_read(const atomic_t *v)
9969 }
9970
9971 /**
9972+ * atomic_read_unchecked - read atomic variable
9973+ * @v: pointer of type atomic_unchecked_t
9974+ *
9975+ * Atomically reads the value of @v.
9976+ */
9977+static inline int atomic_read_unchecked(const atomic_unchecked_t *v)
9978+{
9979+ return v->counter;
9980+}
9981+
9982+/**
9983 * atomic_set - set atomic variable
9984 * @v: pointer of type atomic_t
9985 * @i: required value
9986@@ -36,6 +47,18 @@ static inline void atomic_set(atomic_t *v, int i)
9987 }
9988
9989 /**
9990+ * atomic_set_unchecked - set atomic variable
9991+ * @v: pointer of type atomic_unchecked_t
9992+ * @i: required value
9993+ *
9994+ * Atomically sets the value of @v to @i.
9995+ */
9996+static inline void atomic_set_unchecked(atomic_unchecked_t *v, int i)
9997+{
9998+ v->counter = i;
9999+}
10000+
10001+/**
10002 * atomic_add - add integer to atomic variable
10003 * @i: integer value to add
10004 * @v: pointer of type atomic_t
10005@@ -44,7 +67,29 @@ static inline void atomic_set(atomic_t *v, int i)
10006 */
10007 static inline void atomic_add(int i, atomic_t *v)
10008 {
10009- asm volatile(LOCK_PREFIX "addl %1,%0"
10010+ asm volatile(LOCK_PREFIX "addl %1,%0\n"
10011+
10012+#ifdef CONFIG_PAX_REFCOUNT
10013+ "jno 0f\n"
10014+ LOCK_PREFIX "subl %1,%0\n"
10015+ "int $4\n0:\n"
10016+ _ASM_EXTABLE(0b, 0b)
10017+#endif
10018+
10019+ : "=m" (v->counter)
10020+ : "ir" (i), "m" (v->counter));
10021+}
10022+
10023+/**
10024+ * atomic_add_unchecked - add integer to atomic variable
10025+ * @i: integer value to add
10026+ * @v: pointer of type atomic_unchecked_t
10027+ *
10028+ * Atomically adds @i to @v.
10029+ */
10030+static inline void atomic_add_unchecked(int i, atomic_unchecked_t *v)
10031+{
10032+ asm volatile(LOCK_PREFIX "addl %1,%0\n"
10033 : "=m" (v->counter)
10034 : "ir" (i), "m" (v->counter));
10035 }
10036@@ -58,7 +103,29 @@ static inline void atomic_add(int i, atomic_t *v)
10037 */
10038 static inline void atomic_sub(int i, atomic_t *v)
10039 {
10040- asm volatile(LOCK_PREFIX "subl %1,%0"
10041+ asm volatile(LOCK_PREFIX "subl %1,%0\n"
10042+
10043+#ifdef CONFIG_PAX_REFCOUNT
10044+ "jno 0f\n"
10045+ LOCK_PREFIX "addl %1,%0\n"
10046+ "int $4\n0:\n"
10047+ _ASM_EXTABLE(0b, 0b)
10048+#endif
10049+
10050+ : "=m" (v->counter)
10051+ : "ir" (i), "m" (v->counter));
10052+}
10053+
10054+/**
10055+ * atomic_sub_unchecked - subtract the atomic variable
10056+ * @i: integer value to subtract
10057+ * @v: pointer of type atomic_unchecked_t
10058+ *
10059+ * Atomically subtracts @i from @v.
10060+ */
10061+static inline void atomic_sub_unchecked(int i, atomic_unchecked_t *v)
10062+{
10063+ asm volatile(LOCK_PREFIX "subl %1,%0\n"
10064 : "=m" (v->counter)
10065 : "ir" (i), "m" (v->counter));
10066 }
10067@@ -76,7 +143,16 @@ static inline int atomic_sub_and_test(int i, atomic_t *v)
10068 {
10069 unsigned char c;
10070
10071- asm volatile(LOCK_PREFIX "subl %2,%0; sete %1"
10072+ asm volatile(LOCK_PREFIX "subl %2,%0\n"
10073+
10074+#ifdef CONFIG_PAX_REFCOUNT
10075+ "jno 0f\n"
10076+ LOCK_PREFIX "addl %2,%0\n"
10077+ "int $4\n0:\n"
10078+ _ASM_EXTABLE(0b, 0b)
10079+#endif
10080+
10081+ "sete %1\n"
10082 : "=m" (v->counter), "=qm" (c)
10083 : "ir" (i), "m" (v->counter) : "memory");
10084 return c;
10085@@ -90,7 +166,28 @@ static inline int atomic_sub_and_test(int i, atomic_t *v)
10086 */
10087 static inline void atomic_inc(atomic_t *v)
10088 {
10089- asm volatile(LOCK_PREFIX "incl %0"
10090+ asm volatile(LOCK_PREFIX "incl %0\n"
10091+
10092+#ifdef CONFIG_PAX_REFCOUNT
10093+ "jno 0f\n"
10094+ LOCK_PREFIX "decl %0\n"
10095+ "int $4\n0:\n"
10096+ _ASM_EXTABLE(0b, 0b)
10097+#endif
10098+
10099+ : "=m" (v->counter)
10100+ : "m" (v->counter));
10101+}
10102+
10103+/**
10104+ * atomic_inc_unchecked - increment atomic variable
10105+ * @v: pointer of type atomic_unchecked_t
10106+ *
10107+ * Atomically increments @v by 1.
10108+ */
10109+static inline void atomic_inc_unchecked(atomic_unchecked_t *v)
10110+{
10111+ asm volatile(LOCK_PREFIX "incl %0\n"
10112 : "=m" (v->counter)
10113 : "m" (v->counter));
10114 }
10115@@ -103,7 +200,28 @@ static inline void atomic_inc(atomic_t *v)
10116 */
10117 static inline void atomic_dec(atomic_t *v)
10118 {
10119- asm volatile(LOCK_PREFIX "decl %0"
10120+ asm volatile(LOCK_PREFIX "decl %0\n"
10121+
10122+#ifdef CONFIG_PAX_REFCOUNT
10123+ "jno 0f\n"
10124+ LOCK_PREFIX "incl %0\n"
10125+ "int $4\n0:\n"
10126+ _ASM_EXTABLE(0b, 0b)
10127+#endif
10128+
10129+ : "=m" (v->counter)
10130+ : "m" (v->counter));
10131+}
10132+
10133+/**
10134+ * atomic_dec_unchecked - decrement atomic variable
10135+ * @v: pointer of type atomic_unchecked_t
10136+ *
10137+ * Atomically decrements @v by 1.
10138+ */
10139+static inline void atomic_dec_unchecked(atomic_unchecked_t *v)
10140+{
10141+ asm volatile(LOCK_PREFIX "decl %0\n"
10142 : "=m" (v->counter)
10143 : "m" (v->counter));
10144 }
10145@@ -120,7 +238,16 @@ static inline int atomic_dec_and_test(atomic_t *v)
10146 {
10147 unsigned char c;
10148
10149- asm volatile(LOCK_PREFIX "decl %0; sete %1"
10150+ asm volatile(LOCK_PREFIX "decl %0\n"
10151+
10152+#ifdef CONFIG_PAX_REFCOUNT
10153+ "jno 0f\n"
10154+ LOCK_PREFIX "incl %0\n"
10155+ "int $4\n0:\n"
10156+ _ASM_EXTABLE(0b, 0b)
10157+#endif
10158+
10159+ "sete %1\n"
10160 : "=m" (v->counter), "=qm" (c)
10161 : "m" (v->counter) : "memory");
10162 return c != 0;
10163@@ -138,7 +265,35 @@ static inline int atomic_inc_and_test(atomic_t *v)
10164 {
10165 unsigned char c;
10166
10167- asm volatile(LOCK_PREFIX "incl %0; sete %1"
10168+ asm volatile(LOCK_PREFIX "incl %0\n"
10169+
10170+#ifdef CONFIG_PAX_REFCOUNT
10171+ "jno 0f\n"
10172+ LOCK_PREFIX "decl %0\n"
10173+ "int $4\n0:\n"
10174+ _ASM_EXTABLE(0b, 0b)
10175+#endif
10176+
10177+ "sete %1\n"
10178+ : "=m" (v->counter), "=qm" (c)
10179+ : "m" (v->counter) : "memory");
10180+ return c != 0;
10181+}
10182+
10183+/**
10184+ * atomic_inc_and_test_unchecked - increment and test
10185+ * @v: pointer of type atomic_unchecked_t
10186+ *
10187+ * Atomically increments @v by 1
10188+ * and returns true if the result is zero, or false for all
10189+ * other cases.
10190+ */
10191+static inline int atomic_inc_and_test_unchecked(atomic_unchecked_t *v)
10192+{
10193+ unsigned char c;
10194+
10195+ asm volatile(LOCK_PREFIX "incl %0\n"
10196+ "sete %1\n"
10197 : "=m" (v->counter), "=qm" (c)
10198 : "m" (v->counter) : "memory");
10199 return c != 0;
10200@@ -157,7 +312,16 @@ static inline int atomic_add_negative(int i, atomic_t *v)
10201 {
10202 unsigned char c;
10203
10204- asm volatile(LOCK_PREFIX "addl %2,%0; sets %1"
10205+ asm volatile(LOCK_PREFIX "addl %2,%0\n"
10206+
10207+#ifdef CONFIG_PAX_REFCOUNT
10208+ "jno 0f\n"
10209+ LOCK_PREFIX "subl %2,%0\n"
10210+ "int $4\n0:\n"
10211+ _ASM_EXTABLE(0b, 0b)
10212+#endif
10213+
10214+ "sets %1\n"
10215 : "=m" (v->counter), "=qm" (c)
10216 : "ir" (i), "m" (v->counter) : "memory");
10217 return c;
10218@@ -173,7 +337,31 @@ static inline int atomic_add_negative(int i, atomic_t *v)
10219 static inline int atomic_add_return(int i, atomic_t *v)
10220 {
10221 int __i = i;
10222- asm volatile(LOCK_PREFIX "xaddl %0, %1"
10223+ asm volatile(LOCK_PREFIX "xaddl %0, %1\n"
10224+
10225+#ifdef CONFIG_PAX_REFCOUNT
10226+ "jno 0f\n"
10227+ "movl %0, %1\n"
10228+ "int $4\n0:\n"
10229+ _ASM_EXTABLE(0b, 0b)
10230+#endif
10231+
10232+ : "+r" (i), "+m" (v->counter)
10233+ : : "memory");
10234+ return i + __i;
10235+}
10236+
10237+/**
10238+ * atomic_add_return_unchecked - add and return
10239+ * @i: integer value to add
10240+ * @v: pointer of type atomic_unchecked_t
10241+ *
10242+ * Atomically adds @i to @v and returns @i + @v
10243+ */
10244+static inline int atomic_add_return_unchecked(int i, atomic_unchecked_t *v)
10245+{
10246+ int __i = i;
10247+ asm volatile(LOCK_PREFIX "xaddl %0, %1\n"
10248 : "+r" (i), "+m" (v->counter)
10249 : : "memory");
10250 return i + __i;
10251@@ -185,6 +373,10 @@ static inline int atomic_sub_return(int i, atomic_t *v)
10252 }
10253
10254 #define atomic_inc_return(v) (atomic_add_return(1, v))
10255+static inline int atomic_inc_return_unchecked(atomic_unchecked_t *v)
10256+{
10257+ return atomic_add_return_unchecked(1, v);
10258+}
10259 #define atomic_dec_return(v) (atomic_sub_return(1, v))
10260
10261 /* The 64-bit atomic type */
10262@@ -204,6 +396,18 @@ static inline long atomic64_read(const atomic64_t *v)
10263 }
10264
10265 /**
10266+ * atomic64_read_unchecked - read atomic64 variable
10267+ * @v: pointer of type atomic64_unchecked_t
10268+ *
10269+ * Atomically reads the value of @v.
10270+ * Doesn't imply a read memory barrier.
10271+ */
10272+static inline long atomic64_read_unchecked(const atomic64_unchecked_t *v)
10273+{
10274+ return v->counter;
10275+}
10276+
10277+/**
10278 * atomic64_set - set atomic64 variable
10279 * @v: pointer to type atomic64_t
10280 * @i: required value
10281@@ -216,6 +420,18 @@ static inline void atomic64_set(atomic64_t *v, long i)
10282 }
10283
10284 /**
10285+ * atomic64_set_unchecked - set atomic64 variable
10286+ * @v: pointer to type atomic64_unchecked_t
10287+ * @i: required value
10288+ *
10289+ * Atomically sets the value of @v to @i.
10290+ */
10291+static inline void atomic64_set_unchecked(atomic64_unchecked_t *v, long i)
10292+{
10293+ v->counter = i;
10294+}
10295+
10296+/**
10297 * atomic64_add - add integer to atomic64 variable
10298 * @i: integer value to add
10299 * @v: pointer to type atomic64_t
10300@@ -224,6 +440,28 @@ static inline void atomic64_set(atomic64_t *v, long i)
10301 */
10302 static inline void atomic64_add(long i, atomic64_t *v)
10303 {
10304+ asm volatile(LOCK_PREFIX "addq %1,%0\n"
10305+
10306+#ifdef CONFIG_PAX_REFCOUNT
10307+ "jno 0f\n"
10308+ LOCK_PREFIX "subq %1,%0\n"
10309+ "int $4\n0:\n"
10310+ _ASM_EXTABLE(0b, 0b)
10311+#endif
10312+
10313+ : "=m" (v->counter)
10314+ : "er" (i), "m" (v->counter));
10315+}
10316+
10317+/**
10318+ * atomic64_add_unchecked - add integer to atomic64 variable
10319+ * @i: integer value to add
10320+ * @v: pointer to type atomic64_unchecked_t
10321+ *
10322+ * Atomically adds @i to @v.
10323+ */
10324+static inline void atomic64_add_unchecked(long i, atomic64_unchecked_t *v)
10325+{
10326 asm volatile(LOCK_PREFIX "addq %1,%0"
10327 : "=m" (v->counter)
10328 : "er" (i), "m" (v->counter));
10329@@ -238,7 +476,15 @@ static inline void atomic64_add(long i, atomic64_t *v)
10330 */
10331 static inline void atomic64_sub(long i, atomic64_t *v)
10332 {
10333- asm volatile(LOCK_PREFIX "subq %1,%0"
10334+ asm volatile(LOCK_PREFIX "subq %1,%0\n"
10335+
10336+#ifdef CONFIG_PAX_REFCOUNT
10337+ "jno 0f\n"
10338+ LOCK_PREFIX "addq %1,%0\n"
10339+ "int $4\n0:\n"
10340+ _ASM_EXTABLE(0b, 0b)
10341+#endif
10342+
10343 : "=m" (v->counter)
10344 : "er" (i), "m" (v->counter));
10345 }
10346@@ -256,7 +502,16 @@ static inline int atomic64_sub_and_test(long i, atomic64_t *v)
10347 {
10348 unsigned char c;
10349
10350- asm volatile(LOCK_PREFIX "subq %2,%0; sete %1"
10351+ asm volatile(LOCK_PREFIX "subq %2,%0\n"
10352+
10353+#ifdef CONFIG_PAX_REFCOUNT
10354+ "jno 0f\n"
10355+ LOCK_PREFIX "addq %2,%0\n"
10356+ "int $4\n0:\n"
10357+ _ASM_EXTABLE(0b, 0b)
10358+#endif
10359+
10360+ "sete %1\n"
10361 : "=m" (v->counter), "=qm" (c)
10362 : "er" (i), "m" (v->counter) : "memory");
10363 return c;
10364@@ -270,6 +525,27 @@ static inline int atomic64_sub_and_test(long i, atomic64_t *v)
10365 */
10366 static inline void atomic64_inc(atomic64_t *v)
10367 {
10368+ asm volatile(LOCK_PREFIX "incq %0\n"
10369+
10370+#ifdef CONFIG_PAX_REFCOUNT
10371+ "jno 0f\n"
10372+ LOCK_PREFIX "decq %0\n"
10373+ "int $4\n0:\n"
10374+ _ASM_EXTABLE(0b, 0b)
10375+#endif
10376+
10377+ : "=m" (v->counter)
10378+ : "m" (v->counter));
10379+}
10380+
10381+/**
10382+ * atomic64_inc_unchecked - increment atomic64 variable
10383+ * @v: pointer to type atomic64_unchecked_t
10384+ *
10385+ * Atomically increments @v by 1.
10386+ */
10387+static inline void atomic64_inc_unchecked(atomic64_unchecked_t *v)
10388+{
10389 asm volatile(LOCK_PREFIX "incq %0"
10390 : "=m" (v->counter)
10391 : "m" (v->counter));
10392@@ -283,7 +559,28 @@ static inline void atomic64_inc(atomic64_t *v)
10393 */
10394 static inline void atomic64_dec(atomic64_t *v)
10395 {
10396- asm volatile(LOCK_PREFIX "decq %0"
10397+ asm volatile(LOCK_PREFIX "decq %0\n"
10398+
10399+#ifdef CONFIG_PAX_REFCOUNT
10400+ "jno 0f\n"
10401+ LOCK_PREFIX "incq %0\n"
10402+ "int $4\n0:\n"
10403+ _ASM_EXTABLE(0b, 0b)
10404+#endif
10405+
10406+ : "=m" (v->counter)
10407+ : "m" (v->counter));
10408+}
10409+
10410+/**
10411+ * atomic64_dec_unchecked - decrement atomic64 variable
10412+ * @v: pointer to type atomic64_t
10413+ *
10414+ * Atomically decrements @v by 1.
10415+ */
10416+static inline void atomic64_dec_unchecked(atomic64_unchecked_t *v)
10417+{
10418+ asm volatile(LOCK_PREFIX "decq %0\n"
10419 : "=m" (v->counter)
10420 : "m" (v->counter));
10421 }
10422@@ -300,7 +597,16 @@ static inline int atomic64_dec_and_test(atomic64_t *v)
10423 {
10424 unsigned char c;
10425
10426- asm volatile(LOCK_PREFIX "decq %0; sete %1"
10427+ asm volatile(LOCK_PREFIX "decq %0\n"
10428+
10429+#ifdef CONFIG_PAX_REFCOUNT
10430+ "jno 0f\n"
10431+ LOCK_PREFIX "incq %0\n"
10432+ "int $4\n0:\n"
10433+ _ASM_EXTABLE(0b, 0b)
10434+#endif
10435+
10436+ "sete %1\n"
10437 : "=m" (v->counter), "=qm" (c)
10438 : "m" (v->counter) : "memory");
10439 return c != 0;
10440@@ -318,7 +624,16 @@ static inline int atomic64_inc_and_test(atomic64_t *v)
10441 {
10442 unsigned char c;
10443
10444- asm volatile(LOCK_PREFIX "incq %0; sete %1"
10445+ asm volatile(LOCK_PREFIX "incq %0\n"
10446+
10447+#ifdef CONFIG_PAX_REFCOUNT
10448+ "jno 0f\n"
10449+ LOCK_PREFIX "decq %0\n"
10450+ "int $4\n0:\n"
10451+ _ASM_EXTABLE(0b, 0b)
10452+#endif
10453+
10454+ "sete %1\n"
10455 : "=m" (v->counter), "=qm" (c)
10456 : "m" (v->counter) : "memory");
10457 return c != 0;
10458@@ -337,7 +652,16 @@ static inline int atomic64_add_negative(long i, atomic64_t *v)
10459 {
10460 unsigned char c;
10461
10462- asm volatile(LOCK_PREFIX "addq %2,%0; sets %1"
10463+ asm volatile(LOCK_PREFIX "addq %2,%0\n"
10464+
10465+#ifdef CONFIG_PAX_REFCOUNT
10466+ "jno 0f\n"
10467+ LOCK_PREFIX "subq %2,%0\n"
10468+ "int $4\n0:\n"
10469+ _ASM_EXTABLE(0b, 0b)
10470+#endif
10471+
10472+ "sets %1\n"
10473 : "=m" (v->counter), "=qm" (c)
10474 : "er" (i), "m" (v->counter) : "memory");
10475 return c;
10476@@ -353,7 +677,31 @@ static inline int atomic64_add_negative(long i, atomic64_t *v)
10477 static inline long atomic64_add_return(long i, atomic64_t *v)
10478 {
10479 long __i = i;
10480- asm volatile(LOCK_PREFIX "xaddq %0, %1;"
10481+ asm volatile(LOCK_PREFIX "xaddq %0, %1\n"
10482+
10483+#ifdef CONFIG_PAX_REFCOUNT
10484+ "jno 0f\n"
10485+ "movq %0, %1\n"
10486+ "int $4\n0:\n"
10487+ _ASM_EXTABLE(0b, 0b)
10488+#endif
10489+
10490+ : "+r" (i), "+m" (v->counter)
10491+ : : "memory");
10492+ return i + __i;
10493+}
10494+
10495+/**
10496+ * atomic64_add_return_unchecked - add and return
10497+ * @i: integer value to add
10498+ * @v: pointer to type atomic64_unchecked_t
10499+ *
10500+ * Atomically adds @i to @v and returns @i + @v
10501+ */
10502+static inline long atomic64_add_return_unchecked(long i, atomic64_unchecked_t *v)
10503+{
10504+ long __i = i;
10505+ asm volatile(LOCK_PREFIX "xaddq %0, %1"
10506 : "+r" (i), "+m" (v->counter)
10507 : : "memory");
10508 return i + __i;
10509@@ -365,6 +713,10 @@ static inline long atomic64_sub_return(long i, atomic64_t *v)
10510 }
10511
10512 #define atomic64_inc_return(v) (atomic64_add_return(1, (v)))
10513+static inline long atomic64_inc_return_unchecked(atomic64_unchecked_t *v)
10514+{
10515+ return atomic64_add_return_unchecked(1, v);
10516+}
10517 #define atomic64_dec_return(v) (atomic64_sub_return(1, (v)))
10518
10519 static inline long atomic64_cmpxchg(atomic64_t *v, long old, long new)
10520@@ -372,21 +724,41 @@ static inline long atomic64_cmpxchg(atomic64_t *v, long old, long new)
10521 return cmpxchg(&v->counter, old, new);
10522 }
10523
10524+static inline long atomic64_cmpxchg_unchecked(atomic64_unchecked_t *v, long old, long new)
10525+{
10526+ return cmpxchg(&v->counter, old, new);
10527+}
10528+
10529 static inline long atomic64_xchg(atomic64_t *v, long new)
10530 {
10531 return xchg(&v->counter, new);
10532 }
10533
10534+static inline long atomic64_xchg_unchecked(atomic64_unchecked_t *v, long new)
10535+{
10536+ return xchg(&v->counter, new);
10537+}
10538+
10539 static inline long atomic_cmpxchg(atomic_t *v, int old, int new)
10540 {
10541 return cmpxchg(&v->counter, old, new);
10542 }
10543
10544+static inline long atomic_cmpxchg_unchecked(atomic_unchecked_t *v, int old, int new)
10545+{
10546+ return cmpxchg(&v->counter, old, new);
10547+}
10548+
10549 static inline long atomic_xchg(atomic_t *v, int new)
10550 {
10551 return xchg(&v->counter, new);
10552 }
10553
10554+static inline long atomic_xchg_unchecked(atomic_unchecked_t *v, int new)
10555+{
10556+ return xchg(&v->counter, new);
10557+}
10558+
10559 /**
10560 * atomic_add_unless - add unless the number is a given value
10561 * @v: pointer of type atomic_t
10562@@ -398,17 +770,30 @@ static inline long atomic_xchg(atomic_t *v, int new)
10563 */
10564 static inline int atomic_add_unless(atomic_t *v, int a, int u)
10565 {
10566- int c, old;
10567+ int c, old, new;
10568 c = atomic_read(v);
10569 for (;;) {
10570- if (unlikely(c == (u)))
10571+ if (unlikely(c == u))
10572 break;
10573- old = atomic_cmpxchg((v), c, c + (a));
10574+
10575+ asm volatile("addl %2,%0\n"
10576+
10577+#ifdef CONFIG_PAX_REFCOUNT
10578+ "jno 0f\n"
10579+ "subl %2,%0\n"
10580+ "int $4\n0:\n"
10581+ _ASM_EXTABLE(0b, 0b)
10582+#endif
10583+
10584+ : "=r" (new)
10585+ : "0" (c), "ir" (a));
10586+
10587+ old = atomic_cmpxchg(v, c, new);
10588 if (likely(old == c))
10589 break;
10590 c = old;
10591 }
10592- return c != (u);
10593+ return c != u;
10594 }
10595
10596 #define atomic_inc_not_zero(v) atomic_add_unless((v), 1, 0)
10597@@ -424,17 +809,30 @@ static inline int atomic_add_unless(atomic_t *v, int a, int u)
10598 */
10599 static inline int atomic64_add_unless(atomic64_t *v, long a, long u)
10600 {
10601- long c, old;
10602+ long c, old, new;
10603 c = atomic64_read(v);
10604 for (;;) {
10605- if (unlikely(c == (u)))
10606+ if (unlikely(c == u))
10607 break;
10608- old = atomic64_cmpxchg((v), c, c + (a));
10609+
10610+ asm volatile("addq %2,%0\n"
10611+
10612+#ifdef CONFIG_PAX_REFCOUNT
10613+ "jno 0f\n"
10614+ "subq %2,%0\n"
10615+ "int $4\n0:\n"
10616+ _ASM_EXTABLE(0b, 0b)
10617+#endif
10618+
10619+ : "=r" (new)
10620+ : "0" (c), "er" (a));
10621+
10622+ old = atomic64_cmpxchg(v, c, new);
10623 if (likely(old == c))
10624 break;
10625 c = old;
10626 }
10627- return c != (u);
10628+ return c != u;
10629 }
10630
10631 /**
10632diff --git a/arch/x86/include/asm/bitops.h b/arch/x86/include/asm/bitops.h
10633index 02b47a6..d5c4b15 100644
10634--- a/arch/x86/include/asm/bitops.h
10635+++ b/arch/x86/include/asm/bitops.h
10636@@ -38,7 +38,7 @@
10637 * a mask operation on a byte.
10638 */
10639 #define IS_IMMEDIATE(nr) (__builtin_constant_p(nr))
10640-#define CONST_MASK_ADDR(nr, addr) BITOP_ADDR((void *)(addr) + ((nr)>>3))
10641+#define CONST_MASK_ADDR(nr, addr) BITOP_ADDR((volatile void *)(addr) + ((nr)>>3))
10642 #define CONST_MASK(nr) (1 << ((nr) & 7))
10643
10644 /**
10645diff --git a/arch/x86/include/asm/boot.h b/arch/x86/include/asm/boot.h
10646index 7a10659..8bbf355 100644
10647--- a/arch/x86/include/asm/boot.h
10648+++ b/arch/x86/include/asm/boot.h
10649@@ -11,10 +11,15 @@
10650 #include <asm/pgtable_types.h>
10651
10652 /* Physical address where kernel should be loaded. */
10653-#define LOAD_PHYSICAL_ADDR ((CONFIG_PHYSICAL_START \
10654+#define ____LOAD_PHYSICAL_ADDR ((CONFIG_PHYSICAL_START \
10655 + (CONFIG_PHYSICAL_ALIGN - 1)) \
10656 & ~(CONFIG_PHYSICAL_ALIGN - 1))
10657
10658+#ifndef __ASSEMBLY__
10659+extern unsigned char __LOAD_PHYSICAL_ADDR[];
10660+#define LOAD_PHYSICAL_ADDR ((unsigned long)__LOAD_PHYSICAL_ADDR)
10661+#endif
10662+
10663 /* Minimum kernel alignment, as a power of two */
10664 #ifdef CONFIG_X86_64
10665 #define MIN_KERNEL_ALIGN_LG2 PMD_SHIFT
10666diff --git a/arch/x86/include/asm/cache.h b/arch/x86/include/asm/cache.h
10667index 549860d..7d45f68 100644
10668--- a/arch/x86/include/asm/cache.h
10669+++ b/arch/x86/include/asm/cache.h
10670@@ -5,9 +5,10 @@
10671
10672 /* L1 cache line size */
10673 #define L1_CACHE_SHIFT (CONFIG_X86_L1_CACHE_SHIFT)
10674-#define L1_CACHE_BYTES (1 << L1_CACHE_SHIFT)
10675+#define L1_CACHE_BYTES (_AC(1,UL) << L1_CACHE_SHIFT)
10676
10677 #define __read_mostly __attribute__((__section__(".data.read_mostly")))
10678+#define __read_only __attribute__((__section__(".data.read_only")))
10679
10680 #ifdef CONFIG_X86_VSMP
10681 /* vSMP Internode cacheline shift */
10682diff --git a/arch/x86/include/asm/cacheflush.h b/arch/x86/include/asm/cacheflush.h
10683index b54f6af..5b376a6 100644
10684--- a/arch/x86/include/asm/cacheflush.h
10685+++ b/arch/x86/include/asm/cacheflush.h
10686@@ -60,7 +60,7 @@ PAGEFLAG(WC, WC)
10687 static inline unsigned long get_page_memtype(struct page *pg)
10688 {
10689 if (!PageUncached(pg) && !PageWC(pg))
10690- return -1;
10691+ return ~0UL;
10692 else if (!PageUncached(pg) && PageWC(pg))
10693 return _PAGE_CACHE_WC;
10694 else if (PageUncached(pg) && !PageWC(pg))
10695@@ -85,7 +85,7 @@ static inline void set_page_memtype(struct page *pg, unsigned long memtype)
10696 SetPageWC(pg);
10697 break;
10698 default:
10699- case -1:
10700+ case ~0UL:
10701 ClearPageUncached(pg);
10702 ClearPageWC(pg);
10703 break;
10704diff --git a/arch/x86/include/asm/calling.h b/arch/x86/include/asm/calling.h
10705index 0e63c9a..ab8d972 100644
10706--- a/arch/x86/include/asm/calling.h
10707+++ b/arch/x86/include/asm/calling.h
10708@@ -52,32 +52,32 @@ For 32-bit we have the following conventions - kernel is built with
10709 * for assembly code:
10710 */
10711
10712-#define R15 0
10713-#define R14 8
10714-#define R13 16
10715-#define R12 24
10716-#define RBP 32
10717-#define RBX 40
10718+#define R15 (0)
10719+#define R14 (8)
10720+#define R13 (16)
10721+#define R12 (24)
10722+#define RBP (32)
10723+#define RBX (40)
10724
10725 /* arguments: interrupts/non tracing syscalls only save up to here: */
10726-#define R11 48
10727-#define R10 56
10728-#define R9 64
10729-#define R8 72
10730-#define RAX 80
10731-#define RCX 88
10732-#define RDX 96
10733-#define RSI 104
10734-#define RDI 112
10735-#define ORIG_RAX 120 /* + error_code */
10736+#define R11 (48)
10737+#define R10 (56)
10738+#define R9 (64)
10739+#define R8 (72)
10740+#define RAX (80)
10741+#define RCX (88)
10742+#define RDX (96)
10743+#define RSI (104)
10744+#define RDI (112)
10745+#define ORIG_RAX (120) /* + error_code */
10746 /* end of arguments */
10747
10748 /* cpu exception frame or undefined in case of fast syscall: */
10749-#define RIP 128
10750-#define CS 136
10751-#define EFLAGS 144
10752-#define RSP 152
10753-#define SS 160
10754+#define RIP (128)
10755+#define CS (136)
10756+#define EFLAGS (144)
10757+#define RSP (152)
10758+#define SS (160)
10759
10760 #define ARGOFFSET R11
10761 #define SWFRAME ORIG_RAX
10762diff --git a/arch/x86/include/asm/checksum_32.h b/arch/x86/include/asm/checksum_32.h
10763index 46fc474..b02b0f9 100644
10764--- a/arch/x86/include/asm/checksum_32.h
10765+++ b/arch/x86/include/asm/checksum_32.h
10766@@ -31,6 +31,14 @@ asmlinkage __wsum csum_partial_copy_generic(const void *src, void *dst,
10767 int len, __wsum sum,
10768 int *src_err_ptr, int *dst_err_ptr);
10769
10770+asmlinkage __wsum csum_partial_copy_generic_to_user(const void *src, void *dst,
10771+ int len, __wsum sum,
10772+ int *src_err_ptr, int *dst_err_ptr);
10773+
10774+asmlinkage __wsum csum_partial_copy_generic_from_user(const void *src, void *dst,
10775+ int len, __wsum sum,
10776+ int *src_err_ptr, int *dst_err_ptr);
10777+
10778 /*
10779 * Note: when you get a NULL pointer exception here this means someone
10780 * passed in an incorrect kernel address to one of these functions.
10781@@ -50,7 +58,7 @@ static inline __wsum csum_partial_copy_from_user(const void __user *src,
10782 int *err_ptr)
10783 {
10784 might_sleep();
10785- return csum_partial_copy_generic((__force void *)src, dst,
10786+ return csum_partial_copy_generic_from_user((__force void *)src, dst,
10787 len, sum, err_ptr, NULL);
10788 }
10789
10790@@ -178,7 +186,7 @@ static inline __wsum csum_and_copy_to_user(const void *src,
10791 {
10792 might_sleep();
10793 if (access_ok(VERIFY_WRITE, dst, len))
10794- return csum_partial_copy_generic(src, (__force void *)dst,
10795+ return csum_partial_copy_generic_to_user(src, (__force void *)dst,
10796 len, sum, NULL, err_ptr);
10797
10798 if (len)
10799diff --git a/arch/x86/include/asm/desc.h b/arch/x86/include/asm/desc.h
10800index 617bd56..7b047a1 100644
10801--- a/arch/x86/include/asm/desc.h
10802+++ b/arch/x86/include/asm/desc.h
10803@@ -4,6 +4,7 @@
10804 #include <asm/desc_defs.h>
10805 #include <asm/ldt.h>
10806 #include <asm/mmu.h>
10807+#include <asm/pgtable.h>
10808 #include <linux/smp.h>
10809
10810 static inline void fill_ldt(struct desc_struct *desc,
10811@@ -15,6 +16,7 @@ static inline void fill_ldt(struct desc_struct *desc,
10812 desc->base1 = (info->base_addr & 0x00ff0000) >> 16;
10813 desc->type = (info->read_exec_only ^ 1) << 1;
10814 desc->type |= info->contents << 2;
10815+ desc->type |= info->seg_not_present ^ 1;
10816 desc->s = 1;
10817 desc->dpl = 0x3;
10818 desc->p = info->seg_not_present ^ 1;
10819@@ -31,16 +33,12 @@ static inline void fill_ldt(struct desc_struct *desc,
10820 }
10821
10822 extern struct desc_ptr idt_descr;
10823-extern gate_desc idt_table[];
10824-
10825-struct gdt_page {
10826- struct desc_struct gdt[GDT_ENTRIES];
10827-} __attribute__((aligned(PAGE_SIZE)));
10828-DECLARE_PER_CPU_PAGE_ALIGNED(struct gdt_page, gdt_page);
10829+extern gate_desc idt_table[256];
10830
10831+extern struct desc_struct cpu_gdt_table[NR_CPUS][PAGE_SIZE / sizeof(struct desc_struct)];
10832 static inline struct desc_struct *get_cpu_gdt_table(unsigned int cpu)
10833 {
10834- return per_cpu(gdt_page, cpu).gdt;
10835+ return cpu_gdt_table[cpu];
10836 }
10837
10838 #ifdef CONFIG_X86_64
10839@@ -65,9 +63,14 @@ static inline void pack_gate(gate_desc *gate, unsigned char type,
10840 unsigned long base, unsigned dpl, unsigned flags,
10841 unsigned short seg)
10842 {
10843- gate->a = (seg << 16) | (base & 0xffff);
10844- gate->b = (base & 0xffff0000) |
10845- (((0x80 | type | (dpl << 5)) & 0xff) << 8);
10846+ gate->gate.offset_low = base;
10847+ gate->gate.seg = seg;
10848+ gate->gate.reserved = 0;
10849+ gate->gate.type = type;
10850+ gate->gate.s = 0;
10851+ gate->gate.dpl = dpl;
10852+ gate->gate.p = 1;
10853+ gate->gate.offset_high = base >> 16;
10854 }
10855
10856 #endif
10857@@ -115,13 +118,17 @@ static inline void paravirt_free_ldt(struct desc_struct *ldt, unsigned entries)
10858 static inline void native_write_idt_entry(gate_desc *idt, int entry,
10859 const gate_desc *gate)
10860 {
10861+ pax_open_kernel();
10862 memcpy(&idt[entry], gate, sizeof(*gate));
10863+ pax_close_kernel();
10864 }
10865
10866 static inline void native_write_ldt_entry(struct desc_struct *ldt, int entry,
10867 const void *desc)
10868 {
10869+ pax_open_kernel();
10870 memcpy(&ldt[entry], desc, 8);
10871+ pax_close_kernel();
10872 }
10873
10874 static inline void native_write_gdt_entry(struct desc_struct *gdt, int entry,
10875@@ -139,7 +146,10 @@ static inline void native_write_gdt_entry(struct desc_struct *gdt, int entry,
10876 size = sizeof(struct desc_struct);
10877 break;
10878 }
10879+
10880+ pax_open_kernel();
10881 memcpy(&gdt[entry], desc, size);
10882+ pax_close_kernel();
10883 }
10884
10885 static inline void pack_descriptor(struct desc_struct *desc, unsigned long base,
10886@@ -211,7 +221,9 @@ static inline void native_set_ldt(const void *addr, unsigned int entries)
10887
10888 static inline void native_load_tr_desc(void)
10889 {
10890+ pax_open_kernel();
10891 asm volatile("ltr %w0"::"q" (GDT_ENTRY_TSS*8));
10892+ pax_close_kernel();
10893 }
10894
10895 static inline void native_load_gdt(const struct desc_ptr *dtr)
10896@@ -246,8 +258,10 @@ static inline void native_load_tls(struct thread_struct *t, unsigned int cpu)
10897 unsigned int i;
10898 struct desc_struct *gdt = get_cpu_gdt_table(cpu);
10899
10900+ pax_open_kernel();
10901 for (i = 0; i < GDT_ENTRY_TLS_ENTRIES; i++)
10902 gdt[GDT_ENTRY_TLS_MIN + i] = t->tls_array[i];
10903+ pax_close_kernel();
10904 }
10905
10906 #define _LDT_empty(info) \
10907@@ -309,7 +323,7 @@ static inline void set_desc_limit(struct desc_struct *desc, unsigned long limit)
10908 desc->limit = (limit >> 16) & 0xf;
10909 }
10910
10911-static inline void _set_gate(int gate, unsigned type, void *addr,
10912+static inline void _set_gate(int gate, unsigned type, const void *addr,
10913 unsigned dpl, unsigned ist, unsigned seg)
10914 {
10915 gate_desc s;
10916@@ -327,7 +341,7 @@ static inline void _set_gate(int gate, unsigned type, void *addr,
10917 * Pentium F0 0F bugfix can have resulted in the mapped
10918 * IDT being write-protected.
10919 */
10920-static inline void set_intr_gate(unsigned int n, void *addr)
10921+static inline void set_intr_gate(unsigned int n, const void *addr)
10922 {
10923 BUG_ON((unsigned)n > 0xFF);
10924 _set_gate(n, GATE_INTERRUPT, addr, 0, 0, __KERNEL_CS);
10925@@ -356,19 +370,19 @@ static inline void alloc_intr_gate(unsigned int n, void *addr)
10926 /*
10927 * This routine sets up an interrupt gate at directory privilege level 3.
10928 */
10929-static inline void set_system_intr_gate(unsigned int n, void *addr)
10930+static inline void set_system_intr_gate(unsigned int n, const void *addr)
10931 {
10932 BUG_ON((unsigned)n > 0xFF);
10933 _set_gate(n, GATE_INTERRUPT, addr, 0x3, 0, __KERNEL_CS);
10934 }
10935
10936-static inline void set_system_trap_gate(unsigned int n, void *addr)
10937+static inline void set_system_trap_gate(unsigned int n, const void *addr)
10938 {
10939 BUG_ON((unsigned)n > 0xFF);
10940 _set_gate(n, GATE_TRAP, addr, 0x3, 0, __KERNEL_CS);
10941 }
10942
10943-static inline void set_trap_gate(unsigned int n, void *addr)
10944+static inline void set_trap_gate(unsigned int n, const void *addr)
10945 {
10946 BUG_ON((unsigned)n > 0xFF);
10947 _set_gate(n, GATE_TRAP, addr, 0, 0, __KERNEL_CS);
10948@@ -377,19 +391,31 @@ static inline void set_trap_gate(unsigned int n, void *addr)
10949 static inline void set_task_gate(unsigned int n, unsigned int gdt_entry)
10950 {
10951 BUG_ON((unsigned)n > 0xFF);
10952- _set_gate(n, GATE_TASK, (void *)0, 0, 0, (gdt_entry<<3));
10953+ _set_gate(n, GATE_TASK, (const void *)0, 0, 0, (gdt_entry<<3));
10954 }
10955
10956-static inline void set_intr_gate_ist(int n, void *addr, unsigned ist)
10957+static inline void set_intr_gate_ist(int n, const void *addr, unsigned ist)
10958 {
10959 BUG_ON((unsigned)n > 0xFF);
10960 _set_gate(n, GATE_INTERRUPT, addr, 0, ist, __KERNEL_CS);
10961 }
10962
10963-static inline void set_system_intr_gate_ist(int n, void *addr, unsigned ist)
10964+static inline void set_system_intr_gate_ist(int n, const void *addr, unsigned ist)
10965 {
10966 BUG_ON((unsigned)n > 0xFF);
10967 _set_gate(n, GATE_INTERRUPT, addr, 0x3, ist, __KERNEL_CS);
10968 }
10969
10970+#ifdef CONFIG_X86_32
10971+static inline void set_user_cs(unsigned long base, unsigned long limit, int cpu)
10972+{
10973+ struct desc_struct d;
10974+
10975+ if (likely(limit))
10976+ limit = (limit - 1UL) >> PAGE_SHIFT;
10977+ pack_descriptor(&d, base, limit, 0xFB, 0xC);
10978+ write_gdt_entry(get_cpu_gdt_table(cpu), GDT_ENTRY_DEFAULT_USER_CS, &d, DESCTYPE_S);
10979+}
10980+#endif
10981+
10982 #endif /* _ASM_X86_DESC_H */
10983diff --git a/arch/x86/include/asm/desc_defs.h b/arch/x86/include/asm/desc_defs.h
10984index 9d66848..6b4a691 100644
10985--- a/arch/x86/include/asm/desc_defs.h
10986+++ b/arch/x86/include/asm/desc_defs.h
10987@@ -31,6 +31,12 @@ struct desc_struct {
10988 unsigned base1: 8, type: 4, s: 1, dpl: 2, p: 1;
10989 unsigned limit: 4, avl: 1, l: 1, d: 1, g: 1, base2: 8;
10990 };
10991+ struct {
10992+ u16 offset_low;
10993+ u16 seg;
10994+ unsigned reserved: 8, type: 4, s: 1, dpl: 2, p: 1;
10995+ unsigned offset_high: 16;
10996+ } gate;
10997 };
10998 } __attribute__((packed));
10999
11000diff --git a/arch/x86/include/asm/device.h b/arch/x86/include/asm/device.h
11001index cee34e9..a7c3fa2 100644
11002--- a/arch/x86/include/asm/device.h
11003+++ b/arch/x86/include/asm/device.h
11004@@ -6,7 +6,7 @@ struct dev_archdata {
11005 void *acpi_handle;
11006 #endif
11007 #ifdef CONFIG_X86_64
11008-struct dma_map_ops *dma_ops;
11009+ const struct dma_map_ops *dma_ops;
11010 #endif
11011 #ifdef CONFIG_DMAR
11012 void *iommu; /* hook for IOMMU specific extension */
11013diff --git a/arch/x86/include/asm/dma-mapping.h b/arch/x86/include/asm/dma-mapping.h
11014index 6a25d5d..786b202 100644
11015--- a/arch/x86/include/asm/dma-mapping.h
11016+++ b/arch/x86/include/asm/dma-mapping.h
11017@@ -25,9 +25,9 @@ extern int iommu_merge;
11018 extern struct device x86_dma_fallback_dev;
11019 extern int panic_on_overflow;
11020
11021-extern struct dma_map_ops *dma_ops;
11022+extern const struct dma_map_ops *dma_ops;
11023
11024-static inline struct dma_map_ops *get_dma_ops(struct device *dev)
11025+static inline const struct dma_map_ops *get_dma_ops(struct device *dev)
11026 {
11027 #ifdef CONFIG_X86_32
11028 return dma_ops;
11029@@ -44,7 +44,7 @@ static inline struct dma_map_ops *get_dma_ops(struct device *dev)
11030 /* Make sure we keep the same behaviour */
11031 static inline int dma_mapping_error(struct device *dev, dma_addr_t dma_addr)
11032 {
11033- struct dma_map_ops *ops = get_dma_ops(dev);
11034+ const struct dma_map_ops *ops = get_dma_ops(dev);
11035 if (ops->mapping_error)
11036 return ops->mapping_error(dev, dma_addr);
11037
11038@@ -122,7 +122,7 @@ static inline void *
11039 dma_alloc_coherent(struct device *dev, size_t size, dma_addr_t *dma_handle,
11040 gfp_t gfp)
11041 {
11042- struct dma_map_ops *ops = get_dma_ops(dev);
11043+ const struct dma_map_ops *ops = get_dma_ops(dev);
11044 void *memory;
11045
11046 gfp &= ~(__GFP_DMA | __GFP_HIGHMEM | __GFP_DMA32);
11047@@ -149,7 +149,7 @@ dma_alloc_coherent(struct device *dev, size_t size, dma_addr_t *dma_handle,
11048 static inline void dma_free_coherent(struct device *dev, size_t size,
11049 void *vaddr, dma_addr_t bus)
11050 {
11051- struct dma_map_ops *ops = get_dma_ops(dev);
11052+ const struct dma_map_ops *ops = get_dma_ops(dev);
11053
11054 WARN_ON(irqs_disabled()); /* for portability */
11055
11056diff --git a/arch/x86/include/asm/e820.h b/arch/x86/include/asm/e820.h
11057index 40b4e61..40d8133 100644
11058--- a/arch/x86/include/asm/e820.h
11059+++ b/arch/x86/include/asm/e820.h
11060@@ -133,7 +133,7 @@ extern char *default_machine_specific_memory_setup(void);
11061 #define ISA_END_ADDRESS 0x100000
11062 #define is_ISA_range(s, e) ((s) >= ISA_START_ADDRESS && (e) < ISA_END_ADDRESS)
11063
11064-#define BIOS_BEGIN 0x000a0000
11065+#define BIOS_BEGIN 0x000c0000
11066 #define BIOS_END 0x00100000
11067
11068 #ifdef __KERNEL__
11069diff --git a/arch/x86/include/asm/elf.h b/arch/x86/include/asm/elf.h
11070index 8ac9d9a..0a6c96e 100644
11071--- a/arch/x86/include/asm/elf.h
11072+++ b/arch/x86/include/asm/elf.h
11073@@ -257,7 +257,25 @@ extern int force_personality32;
11074 the loader. We need to make sure that it is out of the way of the program
11075 that it will "exec", and that there is sufficient room for the brk. */
11076
11077+#ifdef CONFIG_PAX_SEGMEXEC
11078+#define ELF_ET_DYN_BASE ((current->mm->pax_flags & MF_PAX_SEGMEXEC) ? SEGMEXEC_TASK_SIZE/3*2 : TASK_SIZE/3*2)
11079+#else
11080 #define ELF_ET_DYN_BASE (TASK_SIZE / 3 * 2)
11081+#endif
11082+
11083+#ifdef CONFIG_PAX_ASLR
11084+#ifdef CONFIG_X86_32
11085+#define PAX_ELF_ET_DYN_BASE 0x10000000UL
11086+
11087+#define PAX_DELTA_MMAP_LEN (current->mm->pax_flags & MF_PAX_SEGMEXEC ? 15 : 16)
11088+#define PAX_DELTA_STACK_LEN (current->mm->pax_flags & MF_PAX_SEGMEXEC ? 15 : 16)
11089+#else
11090+#define PAX_ELF_ET_DYN_BASE 0x400000UL
11091+
11092+#define PAX_DELTA_MMAP_LEN ((test_thread_flag(TIF_IA32)) ? 16 : TASK_SIZE_MAX_SHIFT - PAGE_SHIFT - 3)
11093+#define PAX_DELTA_STACK_LEN ((test_thread_flag(TIF_IA32)) ? 16 : TASK_SIZE_MAX_SHIFT - PAGE_SHIFT - 3)
11094+#endif
11095+#endif
11096
11097 /* This yields a mask that user programs can use to figure out what
11098 instruction set this CPU supports. This could be done in user space,
11099@@ -310,9 +328,7 @@ do { \
11100
11101 #define ARCH_DLINFO \
11102 do { \
11103- if (vdso_enabled) \
11104- NEW_AUX_ENT(AT_SYSINFO_EHDR, \
11105- (unsigned long)current->mm->context.vdso); \
11106+ NEW_AUX_ENT(AT_SYSINFO_EHDR, current->mm->context.vdso); \
11107 } while (0)
11108
11109 #define AT_SYSINFO 32
11110@@ -323,7 +339,7 @@ do { \
11111
11112 #endif /* !CONFIG_X86_32 */
11113
11114-#define VDSO_CURRENT_BASE ((unsigned long)current->mm->context.vdso)
11115+#define VDSO_CURRENT_BASE (current->mm->context.vdso)
11116
11117 #define VDSO_ENTRY \
11118 ((unsigned long)VDSO32_SYMBOL(VDSO_CURRENT_BASE, vsyscall))
11119@@ -337,7 +353,4 @@ extern int arch_setup_additional_pages(struct linux_binprm *bprm,
11120 extern int syscall32_setup_pages(struct linux_binprm *, int exstack);
11121 #define compat_arch_setup_additional_pages syscall32_setup_pages
11122
11123-extern unsigned long arch_randomize_brk(struct mm_struct *mm);
11124-#define arch_randomize_brk arch_randomize_brk
11125-
11126 #endif /* _ASM_X86_ELF_H */
11127diff --git a/arch/x86/include/asm/emergency-restart.h b/arch/x86/include/asm/emergency-restart.h
11128index cc70c1c..d96d011 100644
11129--- a/arch/x86/include/asm/emergency-restart.h
11130+++ b/arch/x86/include/asm/emergency-restart.h
11131@@ -15,6 +15,6 @@ enum reboot_type {
11132
11133 extern enum reboot_type reboot_type;
11134
11135-extern void machine_emergency_restart(void);
11136+extern void machine_emergency_restart(void) __noreturn;
11137
11138 #endif /* _ASM_X86_EMERGENCY_RESTART_H */
11139diff --git a/arch/x86/include/asm/floppy.h b/arch/x86/include/asm/floppy.h
11140index dbe82a5..c6d8a00 100644
11141--- a/arch/x86/include/asm/floppy.h
11142+++ b/arch/x86/include/asm/floppy.h
11143@@ -157,6 +157,7 @@ static unsigned long dma_mem_alloc(unsigned long size)
11144 }
11145
11146
11147+static unsigned long vdma_mem_alloc(unsigned long size) __size_overflow(1);
11148 static unsigned long vdma_mem_alloc(unsigned long size)
11149 {
11150 return (unsigned long)vmalloc(size);
11151diff --git a/arch/x86/include/asm/futex.h b/arch/x86/include/asm/futex.h
11152index 1f11ce4..7caabd1 100644
11153--- a/arch/x86/include/asm/futex.h
11154+++ b/arch/x86/include/asm/futex.h
11155@@ -12,16 +12,18 @@
11156 #include <asm/system.h>
11157
11158 #define __futex_atomic_op1(insn, ret, oldval, uaddr, oparg) \
11159+ typecheck(u32 __user *, uaddr); \
11160 asm volatile("1:\t" insn "\n" \
11161 "2:\t.section .fixup,\"ax\"\n" \
11162 "3:\tmov\t%3, %1\n" \
11163 "\tjmp\t2b\n" \
11164 "\t.previous\n" \
11165 _ASM_EXTABLE(1b, 3b) \
11166- : "=r" (oldval), "=r" (ret), "+m" (*uaddr) \
11167+ : "=r" (oldval), "=r" (ret), "+m" (*(u32 __user *)____m(uaddr))\
11168 : "i" (-EFAULT), "0" (oparg), "1" (0))
11169
11170 #define __futex_atomic_op2(insn, ret, oldval, uaddr, oparg) \
11171+ typecheck(u32 __user *, uaddr); \
11172 asm volatile("1:\tmovl %2, %0\n" \
11173 "\tmovl\t%0, %3\n" \
11174 "\t" insn "\n" \
11175@@ -34,10 +36,10 @@
11176 _ASM_EXTABLE(1b, 4b) \
11177 _ASM_EXTABLE(2b, 4b) \
11178 : "=&a" (oldval), "=&r" (ret), \
11179- "+m" (*uaddr), "=&r" (tem) \
11180+ "+m" (*(u32 __user *)____m(uaddr)), "=&r" (tem) \
11181 : "r" (oparg), "i" (-EFAULT), "1" (0))
11182
11183-static inline int futex_atomic_op_inuser(int encoded_op, int __user *uaddr)
11184+static inline int futex_atomic_op_inuser(int encoded_op, u32 __user *uaddr)
11185 {
11186 int op = (encoded_op >> 28) & 7;
11187 int cmp = (encoded_op >> 24) & 15;
11188@@ -61,10 +63,10 @@ static inline int futex_atomic_op_inuser(int encoded_op, int __user *uaddr)
11189
11190 switch (op) {
11191 case FUTEX_OP_SET:
11192- __futex_atomic_op1("xchgl %0, %2", ret, oldval, uaddr, oparg);
11193+ __futex_atomic_op1(__copyuser_seg"xchgl %0, %2", ret, oldval, uaddr, oparg);
11194 break;
11195 case FUTEX_OP_ADD:
11196- __futex_atomic_op1(LOCK_PREFIX "xaddl %0, %2", ret, oldval,
11197+ __futex_atomic_op1(LOCK_PREFIX __copyuser_seg"xaddl %0, %2", ret, oldval,
11198 uaddr, oparg);
11199 break;
11200 case FUTEX_OP_OR:
11201@@ -109,7 +111,7 @@ static inline int futex_atomic_op_inuser(int encoded_op, int __user *uaddr)
11202 return ret;
11203 }
11204
11205-static inline int futex_atomic_cmpxchg_inatomic(int __user *uaddr, int oldval,
11206+static inline int futex_atomic_cmpxchg_inatomic(u32 __user *uaddr, int oldval,
11207 int newval)
11208 {
11209
11210@@ -119,16 +121,16 @@ static inline int futex_atomic_cmpxchg_inatomic(int __user *uaddr, int oldval,
11211 return -ENOSYS;
11212 #endif
11213
11214- if (!access_ok(VERIFY_WRITE, uaddr, sizeof(int)))
11215+ if (!access_ok(VERIFY_WRITE, uaddr, sizeof(u32)))
11216 return -EFAULT;
11217
11218- asm volatile("1:\t" LOCK_PREFIX "cmpxchgl %3, %1\n"
11219+ asm volatile("1:\t" LOCK_PREFIX __copyuser_seg"cmpxchgl %3, %1\n"
11220 "2:\t.section .fixup, \"ax\"\n"
11221 "3:\tmov %2, %0\n"
11222 "\tjmp 2b\n"
11223 "\t.previous\n"
11224 _ASM_EXTABLE(1b, 3b)
11225- : "=a" (oldval), "+m" (*uaddr)
11226+ : "=a" (oldval), "+m" (*(u32 *)____m(uaddr))
11227 : "i" (-EFAULT), "r" (newval), "0" (oldval)
11228 : "memory"
11229 );
11230diff --git a/arch/x86/include/asm/hw_irq.h b/arch/x86/include/asm/hw_irq.h
11231index ba180d9..3bad351 100644
11232--- a/arch/x86/include/asm/hw_irq.h
11233+++ b/arch/x86/include/asm/hw_irq.h
11234@@ -92,8 +92,8 @@ extern void setup_ioapic_dest(void);
11235 extern void enable_IO_APIC(void);
11236
11237 /* Statistics */
11238-extern atomic_t irq_err_count;
11239-extern atomic_t irq_mis_count;
11240+extern atomic_unchecked_t irq_err_count;
11241+extern atomic_unchecked_t irq_mis_count;
11242
11243 /* EISA */
11244 extern void eisa_set_level_irq(unsigned int irq);
11245diff --git a/arch/x86/include/asm/i387.h b/arch/x86/include/asm/i387.h
11246index 0b20bbb..4cb1396 100644
11247--- a/arch/x86/include/asm/i387.h
11248+++ b/arch/x86/include/asm/i387.h
11249@@ -60,6 +60,11 @@ static inline int fxrstor_checking(struct i387_fxsave_struct *fx)
11250 {
11251 int err;
11252
11253+#if defined(CONFIG_X86_64) && defined(CONFIG_PAX_MEMORY_UDEREF)
11254+ if ((unsigned long)fx < PAX_USER_SHADOW_BASE)
11255+ fx = (struct i387_fxsave_struct *)((void *)fx + PAX_USER_SHADOW_BASE);
11256+#endif
11257+
11258 asm volatile("1: rex64/fxrstor (%[fx])\n\t"
11259 "2:\n"
11260 ".section .fixup,\"ax\"\n"
11261@@ -105,6 +110,11 @@ static inline int fxsave_user(struct i387_fxsave_struct __user *fx)
11262 {
11263 int err;
11264
11265+#if defined(CONFIG_X86_64) && defined(CONFIG_PAX_MEMORY_UDEREF)
11266+ if ((unsigned long)fx < PAX_USER_SHADOW_BASE)
11267+ fx = (struct i387_fxsave_struct __user *)((void __user *)fx + PAX_USER_SHADOW_BASE);
11268+#endif
11269+
11270 asm volatile("1: rex64/fxsave (%[fx])\n\t"
11271 "2:\n"
11272 ".section .fixup,\"ax\"\n"
11273@@ -195,13 +205,8 @@ static inline int fxrstor_checking(struct i387_fxsave_struct *fx)
11274 }
11275
11276 /* We need a safe address that is cheap to find and that is already
11277- in L1 during context switch. The best choices are unfortunately
11278- different for UP and SMP */
11279-#ifdef CONFIG_SMP
11280-#define safe_address (__per_cpu_offset[0])
11281-#else
11282-#define safe_address (kstat_cpu(0).cpustat.user)
11283-#endif
11284+ in L1 during context switch. */
11285+#define safe_address (init_tss[smp_processor_id()].x86_tss.sp0)
11286
11287 /*
11288 * These must be called with preempt disabled
11289@@ -291,7 +296,7 @@ static inline void kernel_fpu_begin(void)
11290 struct thread_info *me = current_thread_info();
11291 preempt_disable();
11292 if (me->status & TS_USEDFPU)
11293- __save_init_fpu(me->task);
11294+ __save_init_fpu(current);
11295 else
11296 clts();
11297 }
11298diff --git a/arch/x86/include/asm/io_32.h b/arch/x86/include/asm/io_32.h
11299index a299900..15c5410 100644
11300--- a/arch/x86/include/asm/io_32.h
11301+++ b/arch/x86/include/asm/io_32.h
11302@@ -3,6 +3,7 @@
11303
11304 #include <linux/string.h>
11305 #include <linux/compiler.h>
11306+#include <asm/processor.h>
11307
11308 /*
11309 * This file contains the definitions for the x86 IO instructions
11310@@ -42,6 +43,17 @@
11311
11312 #ifdef __KERNEL__
11313
11314+#define ARCH_HAS_VALID_PHYS_ADDR_RANGE
11315+static inline int valid_phys_addr_range(unsigned long addr, size_t count)
11316+{
11317+ return ((addr + count + PAGE_SIZE - 1) >> PAGE_SHIFT) < (1ULL << (boot_cpu_data.x86_phys_bits - PAGE_SHIFT)) ? 1 : 0;
11318+}
11319+
11320+static inline int valid_mmap_phys_addr_range(unsigned long pfn, size_t count)
11321+{
11322+ return (pfn + (count >> PAGE_SHIFT)) < (1ULL << (boot_cpu_data.x86_phys_bits - PAGE_SHIFT)) ? 1 : 0;
11323+}
11324+
11325 #include <asm-generic/iomap.h>
11326
11327 #include <linux/vmalloc.h>
11328diff --git a/arch/x86/include/asm/io_64.h b/arch/x86/include/asm/io_64.h
11329index 2440678..c158b88 100644
11330--- a/arch/x86/include/asm/io_64.h
11331+++ b/arch/x86/include/asm/io_64.h
11332@@ -140,6 +140,17 @@ __OUTS(l)
11333
11334 #include <linux/vmalloc.h>
11335
11336+#define ARCH_HAS_VALID_PHYS_ADDR_RANGE
11337+static inline int valid_phys_addr_range(unsigned long addr, size_t count)
11338+{
11339+ return ((addr + count + PAGE_SIZE - 1) >> PAGE_SHIFT) < (1ULL << (boot_cpu_data.x86_phys_bits - PAGE_SHIFT)) ? 1 : 0;
11340+}
11341+
11342+static inline int valid_mmap_phys_addr_range(unsigned long pfn, size_t count)
11343+{
11344+ return (pfn + (count >> PAGE_SHIFT)) < (1ULL << (boot_cpu_data.x86_phys_bits - PAGE_SHIFT)) ? 1 : 0;
11345+}
11346+
11347 #include <asm-generic/iomap.h>
11348
11349 void __memcpy_fromio(void *, unsigned long, unsigned);
11350diff --git a/arch/x86/include/asm/iommu.h b/arch/x86/include/asm/iommu.h
11351index fd6d21b..8b13915 100644
11352--- a/arch/x86/include/asm/iommu.h
11353+++ b/arch/x86/include/asm/iommu.h
11354@@ -3,7 +3,7 @@
11355
11356 extern void pci_iommu_shutdown(void);
11357 extern void no_iommu_init(void);
11358-extern struct dma_map_ops nommu_dma_ops;
11359+extern const struct dma_map_ops nommu_dma_ops;
11360 extern int force_iommu, no_iommu;
11361 extern int iommu_detected;
11362 extern int iommu_pass_through;
11363diff --git a/arch/x86/include/asm/irqflags.h b/arch/x86/include/asm/irqflags.h
11364index 9e2b952..557206e 100644
11365--- a/arch/x86/include/asm/irqflags.h
11366+++ b/arch/x86/include/asm/irqflags.h
11367@@ -142,6 +142,11 @@ static inline unsigned long __raw_local_irq_save(void)
11368 sti; \
11369 sysexit
11370
11371+#define GET_CR0_INTO_RDI mov %cr0, %rdi
11372+#define SET_RDI_INTO_CR0 mov %rdi, %cr0
11373+#define GET_CR3_INTO_RDI mov %cr3, %rdi
11374+#define SET_RDI_INTO_CR3 mov %rdi, %cr3
11375+
11376 #else
11377 #define INTERRUPT_RETURN iret
11378 #define ENABLE_INTERRUPTS_SYSEXIT sti; sysexit
11379diff --git a/arch/x86/include/asm/kprobes.h b/arch/x86/include/asm/kprobes.h
11380index 4fe681d..bb6d40c 100644
11381--- a/arch/x86/include/asm/kprobes.h
11382+++ b/arch/x86/include/asm/kprobes.h
11383@@ -34,13 +34,8 @@ typedef u8 kprobe_opcode_t;
11384 #define BREAKPOINT_INSTRUCTION 0xcc
11385 #define RELATIVEJUMP_INSTRUCTION 0xe9
11386 #define MAX_INSN_SIZE 16
11387-#define MAX_STACK_SIZE 64
11388-#define MIN_STACK_SIZE(ADDR) \
11389- (((MAX_STACK_SIZE) < (((unsigned long)current_thread_info()) + \
11390- THREAD_SIZE - (unsigned long)(ADDR))) \
11391- ? (MAX_STACK_SIZE) \
11392- : (((unsigned long)current_thread_info()) + \
11393- THREAD_SIZE - (unsigned long)(ADDR)))
11394+#define MAX_STACK_SIZE 64UL
11395+#define MIN_STACK_SIZE(ADDR) min(MAX_STACK_SIZE, current->thread.sp0 - (unsigned long)(ADDR))
11396
11397 #define flush_insn_slot(p) do { } while (0)
11398
11399diff --git a/arch/x86/include/asm/kvm_host.h b/arch/x86/include/asm/kvm_host.h
11400index 08bc2ff..acafd8f 100644
11401--- a/arch/x86/include/asm/kvm_host.h
11402+++ b/arch/x86/include/asm/kvm_host.h
11403@@ -534,9 +534,9 @@ struct kvm_x86_ops {
11404 bool (*gb_page_enable)(void);
11405
11406 const struct trace_print_flags *exit_reasons_str;
11407-};
11408+} __do_const;
11409
11410-extern struct kvm_x86_ops *kvm_x86_ops;
11411+extern const struct kvm_x86_ops *kvm_x86_ops;
11412
11413 int kvm_mmu_module_init(void);
11414 void kvm_mmu_module_exit(void);
11415@@ -558,9 +558,9 @@ void kvm_mmu_change_mmu_pages(struct kvm *kvm, unsigned int kvm_nr_mmu_pages);
11416 int load_pdptrs(struct kvm_vcpu *vcpu, unsigned long cr3);
11417
11418 int emulator_write_phys(struct kvm_vcpu *vcpu, gpa_t gpa,
11419- const void *val, int bytes);
11420+ const void *val, int bytes) __size_overflow(2);
11421 int kvm_pv_mmu_op(struct kvm_vcpu *vcpu, unsigned long bytes,
11422- gpa_t addr, unsigned long *ret);
11423+ gpa_t addr, unsigned long *ret) __size_overflow(2,3);
11424 u8 kvm_get_guest_memory_type(struct kvm_vcpu *vcpu, gfn_t gfn);
11425
11426 extern bool tdp_enabled;
11427@@ -619,7 +619,7 @@ void kvm_lmsw(struct kvm_vcpu *vcpu, unsigned long msw);
11428 void kvm_get_cs_db_l_bits(struct kvm_vcpu *vcpu, int *db, int *l);
11429
11430 int kvm_get_msr_common(struct kvm_vcpu *vcpu, u32 msr, u64 *pdata);
11431-int kvm_set_msr_common(struct kvm_vcpu *vcpu, u32 msr, u64 data);
11432+int kvm_set_msr_common(struct kvm_vcpu *vcpu, u32 msr, u64 data) __size_overflow(3);
11433
11434 void kvm_queue_exception(struct kvm_vcpu *vcpu, unsigned nr);
11435 void kvm_queue_exception_e(struct kvm_vcpu *vcpu, unsigned nr, u32 error_code);
11436@@ -643,7 +643,7 @@ unsigned long segment_base(u16 selector);
11437 void kvm_mmu_flush_tlb(struct kvm_vcpu *vcpu);
11438 void kvm_mmu_pte_write(struct kvm_vcpu *vcpu, gpa_t gpa,
11439 const u8 *new, int bytes,
11440- bool guest_initiated);
11441+ bool guest_initiated) __size_overflow(2);
11442 int kvm_mmu_unprotect_page_virt(struct kvm_vcpu *vcpu, gva_t gva);
11443 void __kvm_mmu_free_some_pages(struct kvm_vcpu *vcpu);
11444 int kvm_mmu_load(struct kvm_vcpu *vcpu);
11445diff --git a/arch/x86/include/asm/local.h b/arch/x86/include/asm/local.h
11446index 47b9b6f..815aaa1 100644
11447--- a/arch/x86/include/asm/local.h
11448+++ b/arch/x86/include/asm/local.h
11449@@ -18,26 +18,58 @@ typedef struct {
11450
11451 static inline void local_inc(local_t *l)
11452 {
11453- asm volatile(_ASM_INC "%0"
11454+ asm volatile(_ASM_INC "%0\n"
11455+
11456+#ifdef CONFIG_PAX_REFCOUNT
11457+ "jno 0f\n"
11458+ _ASM_DEC "%0\n"
11459+ "int $4\n0:\n"
11460+ _ASM_EXTABLE(0b, 0b)
11461+#endif
11462+
11463 : "+m" (l->a.counter));
11464 }
11465
11466 static inline void local_dec(local_t *l)
11467 {
11468- asm volatile(_ASM_DEC "%0"
11469+ asm volatile(_ASM_DEC "%0\n"
11470+
11471+#ifdef CONFIG_PAX_REFCOUNT
11472+ "jno 0f\n"
11473+ _ASM_INC "%0\n"
11474+ "int $4\n0:\n"
11475+ _ASM_EXTABLE(0b, 0b)
11476+#endif
11477+
11478 : "+m" (l->a.counter));
11479 }
11480
11481 static inline void local_add(long i, local_t *l)
11482 {
11483- asm volatile(_ASM_ADD "%1,%0"
11484+ asm volatile(_ASM_ADD "%1,%0\n"
11485+
11486+#ifdef CONFIG_PAX_REFCOUNT
11487+ "jno 0f\n"
11488+ _ASM_SUB "%1,%0\n"
11489+ "int $4\n0:\n"
11490+ _ASM_EXTABLE(0b, 0b)
11491+#endif
11492+
11493 : "+m" (l->a.counter)
11494 : "ir" (i));
11495 }
11496
11497 static inline void local_sub(long i, local_t *l)
11498 {
11499- asm volatile(_ASM_SUB "%1,%0"
11500+ asm volatile(_ASM_SUB "%1,%0\n"
11501+
11502+#ifdef CONFIG_PAX_REFCOUNT
11503+ "jno 0f\n"
11504+ _ASM_ADD "%1,%0\n"
11505+ "int $4\n0:\n"
11506+ _ASM_EXTABLE(0b, 0b)
11507+#endif
11508+
11509 : "+m" (l->a.counter)
11510 : "ir" (i));
11511 }
11512@@ -55,7 +87,16 @@ static inline int local_sub_and_test(long i, local_t *l)
11513 {
11514 unsigned char c;
11515
11516- asm volatile(_ASM_SUB "%2,%0; sete %1"
11517+ asm volatile(_ASM_SUB "%2,%0\n"
11518+
11519+#ifdef CONFIG_PAX_REFCOUNT
11520+ "jno 0f\n"
11521+ _ASM_ADD "%2,%0\n"
11522+ "int $4\n0:\n"
11523+ _ASM_EXTABLE(0b, 0b)
11524+#endif
11525+
11526+ "sete %1\n"
11527 : "+m" (l->a.counter), "=qm" (c)
11528 : "ir" (i) : "memory");
11529 return c;
11530@@ -73,7 +114,16 @@ static inline int local_dec_and_test(local_t *l)
11531 {
11532 unsigned char c;
11533
11534- asm volatile(_ASM_DEC "%0; sete %1"
11535+ asm volatile(_ASM_DEC "%0\n"
11536+
11537+#ifdef CONFIG_PAX_REFCOUNT
11538+ "jno 0f\n"
11539+ _ASM_INC "%0\n"
11540+ "int $4\n0:\n"
11541+ _ASM_EXTABLE(0b, 0b)
11542+#endif
11543+
11544+ "sete %1\n"
11545 : "+m" (l->a.counter), "=qm" (c)
11546 : : "memory");
11547 return c != 0;
11548@@ -91,7 +141,16 @@ static inline int local_inc_and_test(local_t *l)
11549 {
11550 unsigned char c;
11551
11552- asm volatile(_ASM_INC "%0; sete %1"
11553+ asm volatile(_ASM_INC "%0\n"
11554+
11555+#ifdef CONFIG_PAX_REFCOUNT
11556+ "jno 0f\n"
11557+ _ASM_DEC "%0\n"
11558+ "int $4\n0:\n"
11559+ _ASM_EXTABLE(0b, 0b)
11560+#endif
11561+
11562+ "sete %1\n"
11563 : "+m" (l->a.counter), "=qm" (c)
11564 : : "memory");
11565 return c != 0;
11566@@ -110,7 +169,16 @@ static inline int local_add_negative(long i, local_t *l)
11567 {
11568 unsigned char c;
11569
11570- asm volatile(_ASM_ADD "%2,%0; sets %1"
11571+ asm volatile(_ASM_ADD "%2,%0\n"
11572+
11573+#ifdef CONFIG_PAX_REFCOUNT
11574+ "jno 0f\n"
11575+ _ASM_SUB "%2,%0\n"
11576+ "int $4\n0:\n"
11577+ _ASM_EXTABLE(0b, 0b)
11578+#endif
11579+
11580+ "sets %1\n"
11581 : "+m" (l->a.counter), "=qm" (c)
11582 : "ir" (i) : "memory");
11583 return c;
11584@@ -133,7 +201,15 @@ static inline long local_add_return(long i, local_t *l)
11585 #endif
11586 /* Modern 486+ processor */
11587 __i = i;
11588- asm volatile(_ASM_XADD "%0, %1;"
11589+ asm volatile(_ASM_XADD "%0, %1\n"
11590+
11591+#ifdef CONFIG_PAX_REFCOUNT
11592+ "jno 0f\n"
11593+ _ASM_MOV "%0,%1\n"
11594+ "int $4\n0:\n"
11595+ _ASM_EXTABLE(0b, 0b)
11596+#endif
11597+
11598 : "+r" (i), "+m" (l->a.counter)
11599 : : "memory");
11600 return i + __i;
11601diff --git a/arch/x86/include/asm/microcode.h b/arch/x86/include/asm/microcode.h
11602index ef51b50..514ba37 100644
11603--- a/arch/x86/include/asm/microcode.h
11604+++ b/arch/x86/include/asm/microcode.h
11605@@ -12,13 +12,13 @@ struct device;
11606 enum ucode_state { UCODE_ERROR, UCODE_OK, UCODE_NFOUND };
11607
11608 struct microcode_ops {
11609- enum ucode_state (*request_microcode_user) (int cpu,
11610+ enum ucode_state (* const request_microcode_user) (int cpu,
11611 const void __user *buf, size_t size);
11612
11613- enum ucode_state (*request_microcode_fw) (int cpu,
11614+ enum ucode_state (* const request_microcode_fw) (int cpu,
11615 struct device *device);
11616
11617- void (*microcode_fini_cpu) (int cpu);
11618+ void (* const microcode_fini_cpu) (int cpu);
11619
11620 /*
11621 * The generic 'microcode_core' part guarantees that
11622@@ -38,18 +38,18 @@ struct ucode_cpu_info {
11623 extern struct ucode_cpu_info ucode_cpu_info[];
11624
11625 #ifdef CONFIG_MICROCODE_INTEL
11626-extern struct microcode_ops * __init init_intel_microcode(void);
11627+extern const struct microcode_ops * __init init_intel_microcode(void);
11628 #else
11629-static inline struct microcode_ops * __init init_intel_microcode(void)
11630+static inline const struct microcode_ops * __init init_intel_microcode(void)
11631 {
11632 return NULL;
11633 }
11634 #endif /* CONFIG_MICROCODE_INTEL */
11635
11636 #ifdef CONFIG_MICROCODE_AMD
11637-extern struct microcode_ops * __init init_amd_microcode(void);
11638+extern const struct microcode_ops * __init init_amd_microcode(void);
11639 #else
11640-static inline struct microcode_ops * __init init_amd_microcode(void)
11641+static inline const struct microcode_ops * __init init_amd_microcode(void)
11642 {
11643 return NULL;
11644 }
11645diff --git a/arch/x86/include/asm/mman.h b/arch/x86/include/asm/mman.h
11646index 593e51d..fa69c9a 100644
11647--- a/arch/x86/include/asm/mman.h
11648+++ b/arch/x86/include/asm/mman.h
11649@@ -5,4 +5,14 @@
11650
11651 #include <asm-generic/mman.h>
11652
11653+#ifdef __KERNEL__
11654+#ifndef __ASSEMBLY__
11655+#ifdef CONFIG_X86_32
11656+#define arch_mmap_check i386_mmap_check
11657+int i386_mmap_check(unsigned long addr, unsigned long len,
11658+ unsigned long flags);
11659+#endif
11660+#endif
11661+#endif
11662+
11663 #endif /* _ASM_X86_MMAN_H */
11664diff --git a/arch/x86/include/asm/mmu.h b/arch/x86/include/asm/mmu.h
11665index 80a1dee..239c67d 100644
11666--- a/arch/x86/include/asm/mmu.h
11667+++ b/arch/x86/include/asm/mmu.h
11668@@ -9,10 +9,23 @@
11669 * we put the segment information here.
11670 */
11671 typedef struct {
11672- void *ldt;
11673+ struct desc_struct *ldt;
11674 int size;
11675 struct mutex lock;
11676- void *vdso;
11677+ unsigned long vdso;
11678+
11679+#ifdef CONFIG_X86_32
11680+#if defined(CONFIG_PAX_PAGEEXEC) || defined(CONFIG_PAX_SEGMEXEC)
11681+ unsigned long user_cs_base;
11682+ unsigned long user_cs_limit;
11683+
11684+#if defined(CONFIG_PAX_PAGEEXEC) && defined(CONFIG_SMP)
11685+ cpumask_t cpu_user_cs_mask;
11686+#endif
11687+
11688+#endif
11689+#endif
11690+
11691 } mm_context_t;
11692
11693 #ifdef CONFIG_SMP
11694diff --git a/arch/x86/include/asm/mmu_context.h b/arch/x86/include/asm/mmu_context.h
11695index 8b5393e..8143173 100644
11696--- a/arch/x86/include/asm/mmu_context.h
11697+++ b/arch/x86/include/asm/mmu_context.h
11698@@ -24,6 +24,18 @@ void destroy_context(struct mm_struct *mm);
11699
11700 static inline void enter_lazy_tlb(struct mm_struct *mm, struct task_struct *tsk)
11701 {
11702+
11703+#if defined(CONFIG_X86_64) && defined(CONFIG_PAX_MEMORY_UDEREF)
11704+ unsigned int i;
11705+ pgd_t *pgd;
11706+
11707+ pax_open_kernel();
11708+ pgd = get_cpu_pgd(smp_processor_id());
11709+ for (i = USER_PGD_PTRS; i < 2 * USER_PGD_PTRS; ++i)
11710+ set_pgd_batched(pgd+i, native_make_pgd(0));
11711+ pax_close_kernel();
11712+#endif
11713+
11714 #ifdef CONFIG_SMP
11715 if (percpu_read(cpu_tlbstate.state) == TLBSTATE_OK)
11716 percpu_write(cpu_tlbstate.state, TLBSTATE_LAZY);
11717@@ -34,16 +46,30 @@ static inline void switch_mm(struct mm_struct *prev, struct mm_struct *next,
11718 struct task_struct *tsk)
11719 {
11720 unsigned cpu = smp_processor_id();
11721+#if defined(CONFIG_X86_32) && (defined(CONFIG_PAX_PAGEEXEC) || defined(CONFIG_PAX_SEGMEXEC)) && defined(CONFIG_SMP)
11722+ int tlbstate = TLBSTATE_OK;
11723+#endif
11724
11725 if (likely(prev != next)) {
11726 #ifdef CONFIG_SMP
11727+#if defined(CONFIG_X86_32) && (defined(CONFIG_PAX_PAGEEXEC) || defined(CONFIG_PAX_SEGMEXEC))
11728+ tlbstate = percpu_read(cpu_tlbstate.state);
11729+#endif
11730 percpu_write(cpu_tlbstate.state, TLBSTATE_OK);
11731 percpu_write(cpu_tlbstate.active_mm, next);
11732 #endif
11733 cpumask_set_cpu(cpu, mm_cpumask(next));
11734
11735 /* Re-load page tables */
11736+#ifdef CONFIG_PAX_PER_CPU_PGD
11737+ pax_open_kernel();
11738+ __clone_user_pgds(get_cpu_pgd(cpu), next->pgd, USER_PGD_PTRS);
11739+ __shadow_user_pgds(get_cpu_pgd(cpu) + USER_PGD_PTRS, next->pgd, USER_PGD_PTRS);
11740+ pax_close_kernel();
11741+ load_cr3(get_cpu_pgd(cpu));
11742+#else
11743 load_cr3(next->pgd);
11744+#endif
11745
11746 /* stop flush ipis for the previous mm */
11747 cpumask_clear_cpu(cpu, mm_cpumask(prev));
11748@@ -53,9 +79,38 @@ static inline void switch_mm(struct mm_struct *prev, struct mm_struct *next,
11749 */
11750 if (unlikely(prev->context.ldt != next->context.ldt))
11751 load_LDT_nolock(&next->context);
11752- }
11753+
11754+#if defined(CONFIG_X86_32) && defined(CONFIG_PAX_PAGEEXEC) && defined(CONFIG_SMP)
11755+ if (!nx_enabled) {
11756+ smp_mb__before_clear_bit();
11757+ cpu_clear(cpu, prev->context.cpu_user_cs_mask);
11758+ smp_mb__after_clear_bit();
11759+ cpu_set(cpu, next->context.cpu_user_cs_mask);
11760+ }
11761+#endif
11762+
11763+#if defined(CONFIG_X86_32) && (defined(CONFIG_PAX_PAGEEXEC) || defined(CONFIG_PAX_SEGMEXEC))
11764+ if (unlikely(prev->context.user_cs_base != next->context.user_cs_base ||
11765+ prev->context.user_cs_limit != next->context.user_cs_limit))
11766+ set_user_cs(next->context.user_cs_base, next->context.user_cs_limit, cpu);
11767 #ifdef CONFIG_SMP
11768+ else if (unlikely(tlbstate != TLBSTATE_OK))
11769+ set_user_cs(next->context.user_cs_base, next->context.user_cs_limit, cpu);
11770+#endif
11771+#endif
11772+
11773+ }
11774 else {
11775+
11776+#ifdef CONFIG_PAX_PER_CPU_PGD
11777+ pax_open_kernel();
11778+ __clone_user_pgds(get_cpu_pgd(cpu), next->pgd, USER_PGD_PTRS);
11779+ __shadow_user_pgds(get_cpu_pgd(cpu) + USER_PGD_PTRS, next->pgd, USER_PGD_PTRS);
11780+ pax_close_kernel();
11781+ load_cr3(get_cpu_pgd(cpu));
11782+#endif
11783+
11784+#ifdef CONFIG_SMP
11785 percpu_write(cpu_tlbstate.state, TLBSTATE_OK);
11786 BUG_ON(percpu_read(cpu_tlbstate.active_mm) != next);
11787
11788@@ -64,11 +119,28 @@ static inline void switch_mm(struct mm_struct *prev, struct mm_struct *next,
11789 * tlb flush IPI delivery. We must reload CR3
11790 * to make sure to use no freed page tables.
11791 */
11792+
11793+#ifndef CONFIG_PAX_PER_CPU_PGD
11794 load_cr3(next->pgd);
11795+#endif
11796+
11797 load_LDT_nolock(&next->context);
11798+
11799+#if defined(CONFIG_X86_32) && defined(CONFIG_PAX_PAGEEXEC)
11800+ if (!nx_enabled)
11801+ cpu_set(cpu, next->context.cpu_user_cs_mask);
11802+#endif
11803+
11804+#if defined(CONFIG_X86_32) && (defined(CONFIG_PAX_PAGEEXEC) || defined(CONFIG_PAX_SEGMEXEC))
11805+#ifdef CONFIG_PAX_PAGEEXEC
11806+ if (!((next->pax_flags & MF_PAX_PAGEEXEC) && nx_enabled))
11807+#endif
11808+ set_user_cs(next->context.user_cs_base, next->context.user_cs_limit, cpu);
11809+#endif
11810+
11811 }
11812+#endif
11813 }
11814-#endif
11815 }
11816
11817 #define activate_mm(prev, next) \
11818diff --git a/arch/x86/include/asm/module.h b/arch/x86/include/asm/module.h
11819index 3e2ce58..caaf478 100644
11820--- a/arch/x86/include/asm/module.h
11821+++ b/arch/x86/include/asm/module.h
11822@@ -5,6 +5,7 @@
11823
11824 #ifdef CONFIG_X86_64
11825 /* X86_64 does not define MODULE_PROC_FAMILY */
11826+#define MODULE_PROC_FAMILY ""
11827 #elif defined CONFIG_M386
11828 #define MODULE_PROC_FAMILY "386 "
11829 #elif defined CONFIG_M486
11830@@ -59,13 +60,26 @@
11831 #error unknown processor family
11832 #endif
11833
11834-#ifdef CONFIG_X86_32
11835-# ifdef CONFIG_4KSTACKS
11836-# define MODULE_STACKSIZE "4KSTACKS "
11837-# else
11838-# define MODULE_STACKSIZE ""
11839-# endif
11840-# define MODULE_ARCH_VERMAGIC MODULE_PROC_FAMILY MODULE_STACKSIZE
11841+#if defined(CONFIG_X86_32) && defined(CONFIG_4KSTACKS)
11842+#define MODULE_STACKSIZE "4KSTACKS "
11843+#else
11844+#define MODULE_STACKSIZE ""
11845 #endif
11846
11847+#ifdef CONFIG_PAX_KERNEXEC_PLUGIN_METHOD_BTS
11848+#define MODULE_PAX_KERNEXEC "KERNEXEC_BTS "
11849+#elif defined(CONFIG_PAX_KERNEXEC_PLUGIN_METHOD_OR)
11850+#define MODULE_PAX_KERNEXEC "KERNEXEC_OR "
11851+#else
11852+#define MODULE_PAX_KERNEXEC ""
11853+#endif
11854+
11855+#ifdef CONFIG_PAX_MEMORY_UDEREF
11856+#define MODULE_PAX_UDEREF "UDEREF "
11857+#else
11858+#define MODULE_PAX_UDEREF ""
11859+#endif
11860+
11861+#define MODULE_ARCH_VERMAGIC MODULE_PROC_FAMILY MODULE_STACKSIZE MODULE_PAX_KERNEXEC MODULE_PAX_UDEREF
11862+
11863 #endif /* _ASM_X86_MODULE_H */
11864diff --git a/arch/x86/include/asm/page_64_types.h b/arch/x86/include/asm/page_64_types.h
11865index 7639dbf..e08a58c 100644
11866--- a/arch/x86/include/asm/page_64_types.h
11867+++ b/arch/x86/include/asm/page_64_types.h
11868@@ -56,7 +56,7 @@ void copy_page(void *to, void *from);
11869
11870 /* duplicated to the one in bootmem.h */
11871 extern unsigned long max_pfn;
11872-extern unsigned long phys_base;
11873+extern const unsigned long phys_base;
11874
11875 extern unsigned long __phys_addr(unsigned long);
11876 #define __phys_reloc_hide(x) (x)
11877diff --git a/arch/x86/include/asm/paravirt.h b/arch/x86/include/asm/paravirt.h
11878index efb3899..ef30687 100644
11879--- a/arch/x86/include/asm/paravirt.h
11880+++ b/arch/x86/include/asm/paravirt.h
11881@@ -648,6 +648,18 @@ static inline void set_pgd(pgd_t *pgdp, pgd_t pgd)
11882 val);
11883 }
11884
11885+static inline void set_pgd_batched(pgd_t *pgdp, pgd_t pgd)
11886+{
11887+ pgdval_t val = native_pgd_val(pgd);
11888+
11889+ if (sizeof(pgdval_t) > sizeof(long))
11890+ PVOP_VCALL3(pv_mmu_ops.set_pgd_batched, pgdp,
11891+ val, (u64)val >> 32);
11892+ else
11893+ PVOP_VCALL2(pv_mmu_ops.set_pgd_batched, pgdp,
11894+ val);
11895+}
11896+
11897 static inline void pgd_clear(pgd_t *pgdp)
11898 {
11899 set_pgd(pgdp, __pgd(0));
11900@@ -729,6 +741,21 @@ static inline void __set_fixmap(unsigned /* enum fixed_addresses */ idx,
11901 pv_mmu_ops.set_fixmap(idx, phys, flags);
11902 }
11903
11904+#ifdef CONFIG_PAX_KERNEXEC
11905+static inline unsigned long pax_open_kernel(void)
11906+{
11907+ return PVOP_CALL0(unsigned long, pv_mmu_ops.pax_open_kernel);
11908+}
11909+
11910+static inline unsigned long pax_close_kernel(void)
11911+{
11912+ return PVOP_CALL0(unsigned long, pv_mmu_ops.pax_close_kernel);
11913+}
11914+#else
11915+static inline unsigned long pax_open_kernel(void) { return 0; }
11916+static inline unsigned long pax_close_kernel(void) { return 0; }
11917+#endif
11918+
11919 #if defined(CONFIG_SMP) && defined(CONFIG_PARAVIRT_SPINLOCKS)
11920
11921 static inline int __raw_spin_is_locked(struct raw_spinlock *lock)
11922@@ -945,7 +972,7 @@ extern void default_banner(void);
11923
11924 #define PARA_PATCH(struct, off) ((PARAVIRT_PATCH_##struct + (off)) / 4)
11925 #define PARA_SITE(ptype, clobbers, ops) _PVSITE(ptype, clobbers, ops, .long, 4)
11926-#define PARA_INDIRECT(addr) *%cs:addr
11927+#define PARA_INDIRECT(addr) *%ss:addr
11928 #endif
11929
11930 #define INTERRUPT_RETURN \
11931@@ -1022,6 +1049,21 @@ extern void default_banner(void);
11932 PARA_SITE(PARA_PATCH(pv_cpu_ops, PV_CPU_irq_enable_sysexit), \
11933 CLBR_NONE, \
11934 jmp PARA_INDIRECT(pv_cpu_ops+PV_CPU_irq_enable_sysexit))
11935+
11936+#define GET_CR0_INTO_RDI \
11937+ call PARA_INDIRECT(pv_cpu_ops+PV_CPU_read_cr0); \
11938+ mov %rax,%rdi
11939+
11940+#define SET_RDI_INTO_CR0 \
11941+ call PARA_INDIRECT(pv_cpu_ops+PV_CPU_write_cr0)
11942+
11943+#define GET_CR3_INTO_RDI \
11944+ call PARA_INDIRECT(pv_mmu_ops+PV_MMU_read_cr3); \
11945+ mov %rax,%rdi
11946+
11947+#define SET_RDI_INTO_CR3 \
11948+ call PARA_INDIRECT(pv_mmu_ops+PV_MMU_write_cr3)
11949+
11950 #endif /* CONFIG_X86_32 */
11951
11952 #endif /* __ASSEMBLY__ */
11953diff --git a/arch/x86/include/asm/paravirt_types.h b/arch/x86/include/asm/paravirt_types.h
11954index 9357473..aeb2de5 100644
11955--- a/arch/x86/include/asm/paravirt_types.h
11956+++ b/arch/x86/include/asm/paravirt_types.h
11957@@ -78,19 +78,19 @@ struct pv_init_ops {
11958 */
11959 unsigned (*patch)(u8 type, u16 clobber, void *insnbuf,
11960 unsigned long addr, unsigned len);
11961-};
11962+} __no_const;
11963
11964
11965 struct pv_lazy_ops {
11966 /* Set deferred update mode, used for batching operations. */
11967 void (*enter)(void);
11968 void (*leave)(void);
11969-};
11970+} __no_const;
11971
11972 struct pv_time_ops {
11973 unsigned long long (*sched_clock)(void);
11974 unsigned long (*get_tsc_khz)(void);
11975-};
11976+} __no_const;
11977
11978 struct pv_cpu_ops {
11979 /* hooks for various privileged instructions */
11980@@ -186,7 +186,7 @@ struct pv_cpu_ops {
11981
11982 void (*start_context_switch)(struct task_struct *prev);
11983 void (*end_context_switch)(struct task_struct *next);
11984-};
11985+} __no_const;
11986
11987 struct pv_irq_ops {
11988 /*
11989@@ -217,7 +217,7 @@ struct pv_apic_ops {
11990 unsigned long start_eip,
11991 unsigned long start_esp);
11992 #endif
11993-};
11994+} __no_const;
11995
11996 struct pv_mmu_ops {
11997 unsigned long (*read_cr2)(void);
11998@@ -301,6 +301,7 @@ struct pv_mmu_ops {
11999 struct paravirt_callee_save make_pud;
12000
12001 void (*set_pgd)(pgd_t *pudp, pgd_t pgdval);
12002+ void (*set_pgd_batched)(pgd_t *pudp, pgd_t pgdval);
12003 #endif /* PAGETABLE_LEVELS == 4 */
12004 #endif /* PAGETABLE_LEVELS >= 3 */
12005
12006@@ -316,6 +317,12 @@ struct pv_mmu_ops {
12007 an mfn. We can tell which is which from the index. */
12008 void (*set_fixmap)(unsigned /* enum fixed_addresses */ idx,
12009 phys_addr_t phys, pgprot_t flags);
12010+
12011+#ifdef CONFIG_PAX_KERNEXEC
12012+ unsigned long (*pax_open_kernel)(void);
12013+ unsigned long (*pax_close_kernel)(void);
12014+#endif
12015+
12016 };
12017
12018 struct raw_spinlock;
12019@@ -326,7 +333,7 @@ struct pv_lock_ops {
12020 void (*spin_lock_flags)(struct raw_spinlock *lock, unsigned long flags);
12021 int (*spin_trylock)(struct raw_spinlock *lock);
12022 void (*spin_unlock)(struct raw_spinlock *lock);
12023-};
12024+} __no_const;
12025
12026 /* This contains all the paravirt structures: we get a convenient
12027 * number for each function using the offset which we use to indicate
12028diff --git a/arch/x86/include/asm/pci_x86.h b/arch/x86/include/asm/pci_x86.h
12029index b399988..3f47c38 100644
12030--- a/arch/x86/include/asm/pci_x86.h
12031+++ b/arch/x86/include/asm/pci_x86.h
12032@@ -89,16 +89,16 @@ extern int (*pcibios_enable_irq)(struct pci_dev *dev);
12033 extern void (*pcibios_disable_irq)(struct pci_dev *dev);
12034
12035 struct pci_raw_ops {
12036- int (*read)(unsigned int domain, unsigned int bus, unsigned int devfn,
12037+ int (* const read)(unsigned int domain, unsigned int bus, unsigned int devfn,
12038 int reg, int len, u32 *val);
12039- int (*write)(unsigned int domain, unsigned int bus, unsigned int devfn,
12040+ int (* const write)(unsigned int domain, unsigned int bus, unsigned int devfn,
12041 int reg, int len, u32 val);
12042 };
12043
12044-extern struct pci_raw_ops *raw_pci_ops;
12045-extern struct pci_raw_ops *raw_pci_ext_ops;
12046+extern const struct pci_raw_ops *raw_pci_ops;
12047+extern const struct pci_raw_ops *raw_pci_ext_ops;
12048
12049-extern struct pci_raw_ops pci_direct_conf1;
12050+extern const struct pci_raw_ops pci_direct_conf1;
12051 extern bool port_cf9_safe;
12052
12053 /* arch_initcall level */
12054diff --git a/arch/x86/include/asm/percpu.h b/arch/x86/include/asm/percpu.h
12055index b65a36d..50345a4 100644
12056--- a/arch/x86/include/asm/percpu.h
12057+++ b/arch/x86/include/asm/percpu.h
12058@@ -78,6 +78,7 @@ do { \
12059 if (0) { \
12060 T__ tmp__; \
12061 tmp__ = (val); \
12062+ (void)tmp__; \
12063 } \
12064 switch (sizeof(var)) { \
12065 case 1: \
12066diff --git a/arch/x86/include/asm/pgalloc.h b/arch/x86/include/asm/pgalloc.h
12067index 271de94..ef944d6 100644
12068--- a/arch/x86/include/asm/pgalloc.h
12069+++ b/arch/x86/include/asm/pgalloc.h
12070@@ -63,6 +63,13 @@ static inline void pmd_populate_kernel(struct mm_struct *mm,
12071 pmd_t *pmd, pte_t *pte)
12072 {
12073 paravirt_alloc_pte(mm, __pa(pte) >> PAGE_SHIFT);
12074+ set_pmd(pmd, __pmd(__pa(pte) | _KERNPG_TABLE));
12075+}
12076+
12077+static inline void pmd_populate_user(struct mm_struct *mm,
12078+ pmd_t *pmd, pte_t *pte)
12079+{
12080+ paravirt_alloc_pte(mm, __pa(pte) >> PAGE_SHIFT);
12081 set_pmd(pmd, __pmd(__pa(pte) | _PAGE_TABLE));
12082 }
12083
12084diff --git a/arch/x86/include/asm/pgtable-2level.h b/arch/x86/include/asm/pgtable-2level.h
12085index 2334982..70bc412 100644
12086--- a/arch/x86/include/asm/pgtable-2level.h
12087+++ b/arch/x86/include/asm/pgtable-2level.h
12088@@ -18,7 +18,9 @@ static inline void native_set_pte(pte_t *ptep , pte_t pte)
12089
12090 static inline void native_set_pmd(pmd_t *pmdp, pmd_t pmd)
12091 {
12092+ pax_open_kernel();
12093 *pmdp = pmd;
12094+ pax_close_kernel();
12095 }
12096
12097 static inline void native_set_pte_atomic(pte_t *ptep, pte_t pte)
12098diff --git a/arch/x86/include/asm/pgtable-3level.h b/arch/x86/include/asm/pgtable-3level.h
12099index 33927d2..ccde329 100644
12100--- a/arch/x86/include/asm/pgtable-3level.h
12101+++ b/arch/x86/include/asm/pgtable-3level.h
12102@@ -38,12 +38,16 @@ static inline void native_set_pte_atomic(pte_t *ptep, pte_t pte)
12103
12104 static inline void native_set_pmd(pmd_t *pmdp, pmd_t pmd)
12105 {
12106+ pax_open_kernel();
12107 set_64bit((unsigned long long *)(pmdp), native_pmd_val(pmd));
12108+ pax_close_kernel();
12109 }
12110
12111 static inline void native_set_pud(pud_t *pudp, pud_t pud)
12112 {
12113+ pax_open_kernel();
12114 set_64bit((unsigned long long *)(pudp), native_pud_val(pud));
12115+ pax_close_kernel();
12116 }
12117
12118 /*
12119diff --git a/arch/x86/include/asm/pgtable.h b/arch/x86/include/asm/pgtable.h
12120index af6fd36..867ff74 100644
12121--- a/arch/x86/include/asm/pgtable.h
12122+++ b/arch/x86/include/asm/pgtable.h
12123@@ -39,6 +39,7 @@ extern struct list_head pgd_list;
12124
12125 #ifndef __PAGETABLE_PUD_FOLDED
12126 #define set_pgd(pgdp, pgd) native_set_pgd(pgdp, pgd)
12127+#define set_pgd_batched(pgdp, pgd) native_set_pgd_batched(pgdp, pgd)
12128 #define pgd_clear(pgd) native_pgd_clear(pgd)
12129 #endif
12130
12131@@ -74,12 +75,51 @@ extern struct list_head pgd_list;
12132
12133 #define arch_end_context_switch(prev) do {} while(0)
12134
12135+#define pax_open_kernel() native_pax_open_kernel()
12136+#define pax_close_kernel() native_pax_close_kernel()
12137 #endif /* CONFIG_PARAVIRT */
12138
12139+#define __HAVE_ARCH_PAX_OPEN_KERNEL
12140+#define __HAVE_ARCH_PAX_CLOSE_KERNEL
12141+
12142+#ifdef CONFIG_PAX_KERNEXEC
12143+static inline unsigned long native_pax_open_kernel(void)
12144+{
12145+ unsigned long cr0;
12146+
12147+ preempt_disable();
12148+ barrier();
12149+ cr0 = read_cr0() ^ X86_CR0_WP;
12150+ BUG_ON(unlikely(cr0 & X86_CR0_WP));
12151+ write_cr0(cr0);
12152+ return cr0 ^ X86_CR0_WP;
12153+}
12154+
12155+static inline unsigned long native_pax_close_kernel(void)
12156+{
12157+ unsigned long cr0;
12158+
12159+ cr0 = read_cr0() ^ X86_CR0_WP;
12160+ BUG_ON(unlikely(!(cr0 & X86_CR0_WP)));
12161+ write_cr0(cr0);
12162+ barrier();
12163+ preempt_enable_no_resched();
12164+ return cr0 ^ X86_CR0_WP;
12165+}
12166+#else
12167+static inline unsigned long native_pax_open_kernel(void) { return 0; }
12168+static inline unsigned long native_pax_close_kernel(void) { return 0; }
12169+#endif
12170+
12171 /*
12172 * The following only work if pte_present() is true.
12173 * Undefined behaviour if not..
12174 */
12175+static inline int pte_user(pte_t pte)
12176+{
12177+ return pte_val(pte) & _PAGE_USER;
12178+}
12179+
12180 static inline int pte_dirty(pte_t pte)
12181 {
12182 return pte_flags(pte) & _PAGE_DIRTY;
12183@@ -167,9 +207,29 @@ static inline pte_t pte_wrprotect(pte_t pte)
12184 return pte_clear_flags(pte, _PAGE_RW);
12185 }
12186
12187+static inline pte_t pte_mkread(pte_t pte)
12188+{
12189+ return __pte(pte_val(pte) | _PAGE_USER);
12190+}
12191+
12192 static inline pte_t pte_mkexec(pte_t pte)
12193 {
12194- return pte_clear_flags(pte, _PAGE_NX);
12195+#ifdef CONFIG_X86_PAE
12196+ if (__supported_pte_mask & _PAGE_NX)
12197+ return pte_clear_flags(pte, _PAGE_NX);
12198+ else
12199+#endif
12200+ return pte_set_flags(pte, _PAGE_USER);
12201+}
12202+
12203+static inline pte_t pte_exprotect(pte_t pte)
12204+{
12205+#ifdef CONFIG_X86_PAE
12206+ if (__supported_pte_mask & _PAGE_NX)
12207+ return pte_set_flags(pte, _PAGE_NX);
12208+ else
12209+#endif
12210+ return pte_clear_flags(pte, _PAGE_USER);
12211 }
12212
12213 static inline pte_t pte_mkdirty(pte_t pte)
12214@@ -302,6 +362,15 @@ pte_t *populate_extra_pte(unsigned long vaddr);
12215 #endif
12216
12217 #ifndef __ASSEMBLY__
12218+
12219+#ifdef CONFIG_PAX_PER_CPU_PGD
12220+extern pgd_t cpu_pgd[NR_CPUS][PTRS_PER_PGD];
12221+static inline pgd_t *get_cpu_pgd(unsigned int cpu)
12222+{
12223+ return cpu_pgd[cpu];
12224+}
12225+#endif
12226+
12227 #include <linux/mm_types.h>
12228
12229 static inline int pte_none(pte_t pte)
12230@@ -472,7 +541,7 @@ static inline pud_t *pud_offset(pgd_t *pgd, unsigned long address)
12231
12232 static inline int pgd_bad(pgd_t pgd)
12233 {
12234- return (pgd_flags(pgd) & ~_PAGE_USER) != _KERNPG_TABLE;
12235+ return (pgd_flags(pgd) & ~(_PAGE_USER | _PAGE_NX)) != _KERNPG_TABLE;
12236 }
12237
12238 static inline int pgd_none(pgd_t pgd)
12239@@ -495,7 +564,12 @@ static inline int pgd_none(pgd_t pgd)
12240 * pgd_offset() returns a (pgd_t *)
12241 * pgd_index() is used get the offset into the pgd page's array of pgd_t's;
12242 */
12243-#define pgd_offset(mm, address) ((mm)->pgd + pgd_index((address)))
12244+#define pgd_offset(mm, address) ((mm)->pgd + pgd_index(address))
12245+
12246+#ifdef CONFIG_PAX_PER_CPU_PGD
12247+#define pgd_offset_cpu(cpu, address) (get_cpu_pgd(cpu) + pgd_index(address))
12248+#endif
12249+
12250 /*
12251 * a shortcut which implies the use of the kernel's pgd, instead
12252 * of a process's
12253@@ -506,6 +580,20 @@ static inline int pgd_none(pgd_t pgd)
12254 #define KERNEL_PGD_BOUNDARY pgd_index(PAGE_OFFSET)
12255 #define KERNEL_PGD_PTRS (PTRS_PER_PGD - KERNEL_PGD_BOUNDARY)
12256
12257+#ifdef CONFIG_X86_32
12258+#define USER_PGD_PTRS KERNEL_PGD_BOUNDARY
12259+#else
12260+#define TASK_SIZE_MAX_SHIFT CONFIG_TASK_SIZE_MAX_SHIFT
12261+#define USER_PGD_PTRS (_AC(1,UL) << (TASK_SIZE_MAX_SHIFT - PGDIR_SHIFT))
12262+
12263+#ifdef CONFIG_PAX_MEMORY_UDEREF
12264+#define PAX_USER_SHADOW_BASE (_AC(1,UL) << TASK_SIZE_MAX_SHIFT)
12265+#else
12266+#define PAX_USER_SHADOW_BASE (_AC(0,UL))
12267+#endif
12268+
12269+#endif
12270+
12271 #ifndef __ASSEMBLY__
12272
12273 extern int direct_gbpages;
12274@@ -611,11 +699,23 @@ static inline void ptep_set_wrprotect(struct mm_struct *mm,
12275 * dst and src can be on the same page, but the range must not overlap,
12276 * and must not cross a page boundary.
12277 */
12278-static inline void clone_pgd_range(pgd_t *dst, pgd_t *src, int count)
12279+static inline void clone_pgd_range(pgd_t *dst, const pgd_t *src, int count)
12280 {
12281- memcpy(dst, src, count * sizeof(pgd_t));
12282+ pax_open_kernel();
12283+ while (count--)
12284+ *dst++ = *src++;
12285+ pax_close_kernel();
12286 }
12287
12288+#ifdef CONFIG_PAX_PER_CPU_PGD
12289+extern void __clone_user_pgds(pgd_t *dst, const pgd_t *src, int count);
12290+#endif
12291+
12292+#if defined(CONFIG_X86_64) && defined(CONFIG_PAX_MEMORY_UDEREF)
12293+extern void __shadow_user_pgds(pgd_t *dst, const pgd_t *src, int count);
12294+#else
12295+static inline void __shadow_user_pgds(pgd_t *dst, const pgd_t *src, int count) {}
12296+#endif
12297
12298 #include <asm-generic/pgtable.h>
12299 #endif /* __ASSEMBLY__ */
12300diff --git a/arch/x86/include/asm/pgtable_32.h b/arch/x86/include/asm/pgtable_32.h
12301index 750f1bf..971e8394 100644
12302--- a/arch/x86/include/asm/pgtable_32.h
12303+++ b/arch/x86/include/asm/pgtable_32.h
12304@@ -26,9 +26,6 @@
12305 struct mm_struct;
12306 struct vm_area_struct;
12307
12308-extern pgd_t swapper_pg_dir[1024];
12309-extern pgd_t trampoline_pg_dir[1024];
12310-
12311 static inline void pgtable_cache_init(void) { }
12312 static inline void check_pgt_cache(void) { }
12313 void paging_init(void);
12314@@ -49,6 +46,12 @@ extern void set_pmd_pfn(unsigned long, unsigned long, pgprot_t);
12315 # include <asm/pgtable-2level.h>
12316 #endif
12317
12318+extern pgd_t swapper_pg_dir[PTRS_PER_PGD];
12319+extern pgd_t trampoline_pg_dir[PTRS_PER_PGD];
12320+#ifdef CONFIG_X86_PAE
12321+extern pmd_t swapper_pm_dir[PTRS_PER_PGD][PTRS_PER_PMD];
12322+#endif
12323+
12324 #if defined(CONFIG_HIGHPTE)
12325 #define __KM_PTE \
12326 (in_nmi() ? KM_NMI_PTE : \
12327@@ -73,7 +76,9 @@ extern void set_pmd_pfn(unsigned long, unsigned long, pgprot_t);
12328 /* Clear a kernel PTE and flush it from the TLB */
12329 #define kpte_clear_flush(ptep, vaddr) \
12330 do { \
12331+ pax_open_kernel(); \
12332 pte_clear(&init_mm, (vaddr), (ptep)); \
12333+ pax_close_kernel(); \
12334 __flush_tlb_one((vaddr)); \
12335 } while (0)
12336
12337@@ -85,6 +90,9 @@ do { \
12338
12339 #endif /* !__ASSEMBLY__ */
12340
12341+#define HAVE_ARCH_UNMAPPED_AREA
12342+#define HAVE_ARCH_UNMAPPED_AREA_TOPDOWN
12343+
12344 /*
12345 * kern_addr_valid() is (1) for FLATMEM and (0) for
12346 * SPARSEMEM and DISCONTIGMEM
12347diff --git a/arch/x86/include/asm/pgtable_32_types.h b/arch/x86/include/asm/pgtable_32_types.h
12348index 5e67c15..12d5c47 100644
12349--- a/arch/x86/include/asm/pgtable_32_types.h
12350+++ b/arch/x86/include/asm/pgtable_32_types.h
12351@@ -8,7 +8,7 @@
12352 */
12353 #ifdef CONFIG_X86_PAE
12354 # include <asm/pgtable-3level_types.h>
12355-# define PMD_SIZE (1UL << PMD_SHIFT)
12356+# define PMD_SIZE (_AC(1, UL) << PMD_SHIFT)
12357 # define PMD_MASK (~(PMD_SIZE - 1))
12358 #else
12359 # include <asm/pgtable-2level_types.h>
12360@@ -46,6 +46,19 @@ extern bool __vmalloc_start_set; /* set once high_memory is set */
12361 # define VMALLOC_END (FIXADDR_START - 2 * PAGE_SIZE)
12362 #endif
12363
12364+#ifdef CONFIG_PAX_KERNEXEC
12365+#ifndef __ASSEMBLY__
12366+extern unsigned char MODULES_EXEC_VADDR[];
12367+extern unsigned char MODULES_EXEC_END[];
12368+#endif
12369+#include <asm/boot.h>
12370+#define ktla_ktva(addr) (addr + LOAD_PHYSICAL_ADDR + PAGE_OFFSET)
12371+#define ktva_ktla(addr) (addr - LOAD_PHYSICAL_ADDR - PAGE_OFFSET)
12372+#else
12373+#define ktla_ktva(addr) (addr)
12374+#define ktva_ktla(addr) (addr)
12375+#endif
12376+
12377 #define MODULES_VADDR VMALLOC_START
12378 #define MODULES_END VMALLOC_END
12379 #define MODULES_LEN (MODULES_VADDR - MODULES_END)
12380diff --git a/arch/x86/include/asm/pgtable_64.h b/arch/x86/include/asm/pgtable_64.h
12381index c57a301..6b414ff 100644
12382--- a/arch/x86/include/asm/pgtable_64.h
12383+++ b/arch/x86/include/asm/pgtable_64.h
12384@@ -16,10 +16,14 @@
12385
12386 extern pud_t level3_kernel_pgt[512];
12387 extern pud_t level3_ident_pgt[512];
12388+extern pud_t level3_vmalloc_start_pgt[512];
12389+extern pud_t level3_vmalloc_end_pgt[512];
12390+extern pud_t level3_vmemmap_pgt[512];
12391+extern pud_t level2_vmemmap_pgt[512];
12392 extern pmd_t level2_kernel_pgt[512];
12393 extern pmd_t level2_fixmap_pgt[512];
12394-extern pmd_t level2_ident_pgt[512];
12395-extern pgd_t init_level4_pgt[];
12396+extern pmd_t level2_ident_pgt[512*2];
12397+extern pgd_t init_level4_pgt[512];
12398
12399 #define swapper_pg_dir init_level4_pgt
12400
12401@@ -74,7 +78,9 @@ static inline pte_t native_ptep_get_and_clear(pte_t *xp)
12402
12403 static inline void native_set_pmd(pmd_t *pmdp, pmd_t pmd)
12404 {
12405+ pax_open_kernel();
12406 *pmdp = pmd;
12407+ pax_close_kernel();
12408 }
12409
12410 static inline void native_pmd_clear(pmd_t *pmd)
12411@@ -94,6 +100,13 @@ static inline void native_pud_clear(pud_t *pud)
12412
12413 static inline void native_set_pgd(pgd_t *pgdp, pgd_t pgd)
12414 {
12415+ pax_open_kernel();
12416+ *pgdp = pgd;
12417+ pax_close_kernel();
12418+}
12419+
12420+static inline void native_set_pgd_batched(pgd_t *pgdp, pgd_t pgd)
12421+{
12422 *pgdp = pgd;
12423 }
12424
12425diff --git a/arch/x86/include/asm/pgtable_64_types.h b/arch/x86/include/asm/pgtable_64_types.h
12426index 766ea16..5b96cb3 100644
12427--- a/arch/x86/include/asm/pgtable_64_types.h
12428+++ b/arch/x86/include/asm/pgtable_64_types.h
12429@@ -59,5 +59,10 @@ typedef struct { pteval_t pte; } pte_t;
12430 #define MODULES_VADDR _AC(0xffffffffa0000000, UL)
12431 #define MODULES_END _AC(0xffffffffff000000, UL)
12432 #define MODULES_LEN (MODULES_END - MODULES_VADDR)
12433+#define MODULES_EXEC_VADDR MODULES_VADDR
12434+#define MODULES_EXEC_END MODULES_END
12435+
12436+#define ktla_ktva(addr) (addr)
12437+#define ktva_ktla(addr) (addr)
12438
12439 #endif /* _ASM_X86_PGTABLE_64_DEFS_H */
12440diff --git a/arch/x86/include/asm/pgtable_types.h b/arch/x86/include/asm/pgtable_types.h
12441index d1f4a76..2f46ba1 100644
12442--- a/arch/x86/include/asm/pgtable_types.h
12443+++ b/arch/x86/include/asm/pgtable_types.h
12444@@ -16,12 +16,11 @@
12445 #define _PAGE_BIT_PSE 7 /* 4 MB (or 2MB) page */
12446 #define _PAGE_BIT_PAT 7 /* on 4KB pages */
12447 #define _PAGE_BIT_GLOBAL 8 /* Global TLB entry PPro+ */
12448-#define _PAGE_BIT_UNUSED1 9 /* available for programmer */
12449+#define _PAGE_BIT_SPECIAL 9 /* special mappings, no associated struct page */
12450 #define _PAGE_BIT_IOMAP 10 /* flag used to indicate IO mapping */
12451 #define _PAGE_BIT_HIDDEN 11 /* hidden by kmemcheck */
12452 #define _PAGE_BIT_PAT_LARGE 12 /* On 2MB or 1GB pages */
12453-#define _PAGE_BIT_SPECIAL _PAGE_BIT_UNUSED1
12454-#define _PAGE_BIT_CPA_TEST _PAGE_BIT_UNUSED1
12455+#define _PAGE_BIT_CPA_TEST _PAGE_BIT_SPECIAL
12456 #define _PAGE_BIT_NX 63 /* No execute: only valid after cpuid check */
12457
12458 /* If _PAGE_BIT_PRESENT is clear, we use these: */
12459@@ -39,7 +38,6 @@
12460 #define _PAGE_DIRTY (_AT(pteval_t, 1) << _PAGE_BIT_DIRTY)
12461 #define _PAGE_PSE (_AT(pteval_t, 1) << _PAGE_BIT_PSE)
12462 #define _PAGE_GLOBAL (_AT(pteval_t, 1) << _PAGE_BIT_GLOBAL)
12463-#define _PAGE_UNUSED1 (_AT(pteval_t, 1) << _PAGE_BIT_UNUSED1)
12464 #define _PAGE_IOMAP (_AT(pteval_t, 1) << _PAGE_BIT_IOMAP)
12465 #define _PAGE_PAT (_AT(pteval_t, 1) << _PAGE_BIT_PAT)
12466 #define _PAGE_PAT_LARGE (_AT(pteval_t, 1) << _PAGE_BIT_PAT_LARGE)
12467@@ -55,8 +53,10 @@
12468
12469 #if defined(CONFIG_X86_64) || defined(CONFIG_X86_PAE)
12470 #define _PAGE_NX (_AT(pteval_t, 1) << _PAGE_BIT_NX)
12471-#else
12472+#elif defined(CONFIG_KMEMCHECK)
12473 #define _PAGE_NX (_AT(pteval_t, 0))
12474+#else
12475+#define _PAGE_NX (_AT(pteval_t, 1) << _PAGE_BIT_HIDDEN)
12476 #endif
12477
12478 #define _PAGE_FILE (_AT(pteval_t, 1) << _PAGE_BIT_FILE)
12479@@ -93,6 +93,9 @@
12480 #define PAGE_READONLY_EXEC __pgprot(_PAGE_PRESENT | _PAGE_USER | \
12481 _PAGE_ACCESSED)
12482
12483+#define PAGE_READONLY_NOEXEC PAGE_READONLY
12484+#define PAGE_SHARED_NOEXEC PAGE_SHARED
12485+
12486 #define __PAGE_KERNEL_EXEC \
12487 (_PAGE_PRESENT | _PAGE_RW | _PAGE_DIRTY | _PAGE_ACCESSED | _PAGE_GLOBAL)
12488 #define __PAGE_KERNEL (__PAGE_KERNEL_EXEC | _PAGE_NX)
12489@@ -103,8 +106,8 @@
12490 #define __PAGE_KERNEL_WC (__PAGE_KERNEL | _PAGE_CACHE_WC)
12491 #define __PAGE_KERNEL_NOCACHE (__PAGE_KERNEL | _PAGE_PCD | _PAGE_PWT)
12492 #define __PAGE_KERNEL_UC_MINUS (__PAGE_KERNEL | _PAGE_PCD)
12493-#define __PAGE_KERNEL_VSYSCALL (__PAGE_KERNEL_RX | _PAGE_USER)
12494-#define __PAGE_KERNEL_VSYSCALL_NOCACHE (__PAGE_KERNEL_VSYSCALL | _PAGE_PCD | _PAGE_PWT)
12495+#define __PAGE_KERNEL_VSYSCALL (__PAGE_KERNEL_RO | _PAGE_USER)
12496+#define __PAGE_KERNEL_VSYSCALL_NOCACHE (__PAGE_KERNEL_RO | _PAGE_PCD | _PAGE_PWT | _PAGE_USER)
12497 #define __PAGE_KERNEL_LARGE (__PAGE_KERNEL | _PAGE_PSE)
12498 #define __PAGE_KERNEL_LARGE_NOCACHE (__PAGE_KERNEL | _PAGE_CACHE_UC | _PAGE_PSE)
12499 #define __PAGE_KERNEL_LARGE_EXEC (__PAGE_KERNEL_EXEC | _PAGE_PSE)
12500@@ -163,8 +166,8 @@
12501 * bits are combined, this will alow user to access the high address mapped
12502 * VDSO in the presence of CONFIG_COMPAT_VDSO
12503 */
12504-#define PTE_IDENT_ATTR 0x003 /* PRESENT+RW */
12505-#define PDE_IDENT_ATTR 0x067 /* PRESENT+RW+USER+DIRTY+ACCESSED */
12506+#define PTE_IDENT_ATTR 0x063 /* PRESENT+RW+DIRTY+ACCESSED */
12507+#define PDE_IDENT_ATTR 0x063 /* PRESENT+RW+DIRTY+ACCESSED */
12508 #define PGD_IDENT_ATTR 0x001 /* PRESENT (no other attributes) */
12509 #endif
12510
12511@@ -202,7 +205,17 @@ static inline pgdval_t pgd_flags(pgd_t pgd)
12512 {
12513 return native_pgd_val(pgd) & PTE_FLAGS_MASK;
12514 }
12515+#endif
12516
12517+#if PAGETABLE_LEVELS == 3
12518+#include <asm-generic/pgtable-nopud.h>
12519+#endif
12520+
12521+#if PAGETABLE_LEVELS == 2
12522+#include <asm-generic/pgtable-nopmd.h>
12523+#endif
12524+
12525+#ifndef __ASSEMBLY__
12526 #if PAGETABLE_LEVELS > 3
12527 typedef struct { pudval_t pud; } pud_t;
12528
12529@@ -216,8 +229,6 @@ static inline pudval_t native_pud_val(pud_t pud)
12530 return pud.pud;
12531 }
12532 #else
12533-#include <asm-generic/pgtable-nopud.h>
12534-
12535 static inline pudval_t native_pud_val(pud_t pud)
12536 {
12537 return native_pgd_val(pud.pgd);
12538@@ -237,8 +248,6 @@ static inline pmdval_t native_pmd_val(pmd_t pmd)
12539 return pmd.pmd;
12540 }
12541 #else
12542-#include <asm-generic/pgtable-nopmd.h>
12543-
12544 static inline pmdval_t native_pmd_val(pmd_t pmd)
12545 {
12546 return native_pgd_val(pmd.pud.pgd);
12547@@ -278,7 +287,16 @@ typedef struct page *pgtable_t;
12548
12549 extern pteval_t __supported_pte_mask;
12550 extern void set_nx(void);
12551+
12552+#ifdef CONFIG_X86_32
12553+#ifdef CONFIG_X86_PAE
12554 extern int nx_enabled;
12555+#else
12556+#define nx_enabled (0)
12557+#endif
12558+#else
12559+#define nx_enabled (1)
12560+#endif
12561
12562 #define pgprot_writecombine pgprot_writecombine
12563 extern pgprot_t pgprot_writecombine(pgprot_t prot);
12564diff --git a/arch/x86/include/asm/processor.h b/arch/x86/include/asm/processor.h
12565index fa04dea..5f823fc 100644
12566--- a/arch/x86/include/asm/processor.h
12567+++ b/arch/x86/include/asm/processor.h
12568@@ -272,7 +272,7 @@ struct tss_struct {
12569
12570 } ____cacheline_aligned;
12571
12572-DECLARE_PER_CPU_SHARED_ALIGNED(struct tss_struct, init_tss);
12573+extern struct tss_struct init_tss[NR_CPUS];
12574
12575 /*
12576 * Save the original ist values for checking stack pointers during debugging
12577@@ -911,11 +911,18 @@ static inline void spin_lock_prefetch(const void *x)
12578 */
12579 #define TASK_SIZE PAGE_OFFSET
12580 #define TASK_SIZE_MAX TASK_SIZE
12581+
12582+#ifdef CONFIG_PAX_SEGMEXEC
12583+#define SEGMEXEC_TASK_SIZE (TASK_SIZE / 2)
12584+#define STACK_TOP ((current->mm->pax_flags & MF_PAX_SEGMEXEC)?SEGMEXEC_TASK_SIZE:TASK_SIZE)
12585+#else
12586 #define STACK_TOP TASK_SIZE
12587-#define STACK_TOP_MAX STACK_TOP
12588+#endif
12589+
12590+#define STACK_TOP_MAX TASK_SIZE
12591
12592 #define INIT_THREAD { \
12593- .sp0 = sizeof(init_stack) + (long)&init_stack, \
12594+ .sp0 = sizeof(init_stack) + (long)&init_stack - 8, \
12595 .vm86_info = NULL, \
12596 .sysenter_cs = __KERNEL_CS, \
12597 .io_bitmap_ptr = NULL, \
12598@@ -929,7 +936,7 @@ static inline void spin_lock_prefetch(const void *x)
12599 */
12600 #define INIT_TSS { \
12601 .x86_tss = { \
12602- .sp0 = sizeof(init_stack) + (long)&init_stack, \
12603+ .sp0 = sizeof(init_stack) + (long)&init_stack - 8, \
12604 .ss0 = __KERNEL_DS, \
12605 .ss1 = __KERNEL_CS, \
12606 .io_bitmap_base = INVALID_IO_BITMAP_OFFSET, \
12607@@ -940,11 +947,7 @@ static inline void spin_lock_prefetch(const void *x)
12608 extern unsigned long thread_saved_pc(struct task_struct *tsk);
12609
12610 #define THREAD_SIZE_LONGS (THREAD_SIZE/sizeof(unsigned long))
12611-#define KSTK_TOP(info) \
12612-({ \
12613- unsigned long *__ptr = (unsigned long *)(info); \
12614- (unsigned long)(&__ptr[THREAD_SIZE_LONGS]); \
12615-})
12616+#define KSTK_TOP(info) ((container_of(info, struct task_struct, tinfo))->thread.sp0)
12617
12618 /*
12619 * The below -8 is to reserve 8 bytes on top of the ring0 stack.
12620@@ -959,7 +962,7 @@ extern unsigned long thread_saved_pc(struct task_struct *tsk);
12621 #define task_pt_regs(task) \
12622 ({ \
12623 struct pt_regs *__regs__; \
12624- __regs__ = (struct pt_regs *)(KSTK_TOP(task_stack_page(task))-8); \
12625+ __regs__ = (struct pt_regs *)((task)->thread.sp0); \
12626 __regs__ - 1; \
12627 })
12628
12629@@ -969,13 +972,13 @@ extern unsigned long thread_saved_pc(struct task_struct *tsk);
12630 /*
12631 * User space process size. 47bits minus one guard page.
12632 */
12633-#define TASK_SIZE_MAX ((1UL << 47) - PAGE_SIZE)
12634+#define TASK_SIZE_MAX ((1UL << TASK_SIZE_MAX_SHIFT) - PAGE_SIZE)
12635
12636 /* This decides where the kernel will search for a free chunk of vm
12637 * space during mmap's.
12638 */
12639 #define IA32_PAGE_OFFSET ((current->personality & ADDR_LIMIT_3GB) ? \
12640- 0xc0000000 : 0xFFFFe000)
12641+ 0xc0000000 : 0xFFFFf000)
12642
12643 #define TASK_SIZE (test_thread_flag(TIF_IA32) ? \
12644 IA32_PAGE_OFFSET : TASK_SIZE_MAX)
12645@@ -986,11 +989,11 @@ extern unsigned long thread_saved_pc(struct task_struct *tsk);
12646 #define STACK_TOP_MAX TASK_SIZE_MAX
12647
12648 #define INIT_THREAD { \
12649- .sp0 = (unsigned long)&init_stack + sizeof(init_stack) \
12650+ .sp0 = (unsigned long)&init_stack + sizeof(init_stack) - 16 \
12651 }
12652
12653 #define INIT_TSS { \
12654- .x86_tss.sp0 = (unsigned long)&init_stack + sizeof(init_stack) \
12655+ .x86_tss.sp0 = (unsigned long)&init_stack + sizeof(init_stack) - 16 \
12656 }
12657
12658 /*
12659@@ -1012,6 +1015,10 @@ extern void start_thread(struct pt_regs *regs, unsigned long new_ip,
12660 */
12661 #define TASK_UNMAPPED_BASE (PAGE_ALIGN(TASK_SIZE / 3))
12662
12663+#ifdef CONFIG_PAX_SEGMEXEC
12664+#define SEGMEXEC_TASK_UNMAPPED_BASE (PAGE_ALIGN(SEGMEXEC_TASK_SIZE / 3))
12665+#endif
12666+
12667 #define KSTK_EIP(task) (task_pt_regs(task)->ip)
12668
12669 /* Get/set a process' ability to use the timestamp counter instruction */
12670diff --git a/arch/x86/include/asm/ptrace.h b/arch/x86/include/asm/ptrace.h
12671index 0f0d908..f2e3da2 100644
12672--- a/arch/x86/include/asm/ptrace.h
12673+++ b/arch/x86/include/asm/ptrace.h
12674@@ -151,28 +151,29 @@ static inline unsigned long regs_return_value(struct pt_regs *regs)
12675 }
12676
12677 /*
12678- * user_mode_vm(regs) determines whether a register set came from user mode.
12679+ * user_mode(regs) determines whether a register set came from user mode.
12680 * This is true if V8086 mode was enabled OR if the register set was from
12681 * protected mode with RPL-3 CS value. This tricky test checks that with
12682 * one comparison. Many places in the kernel can bypass this full check
12683- * if they have already ruled out V8086 mode, so user_mode(regs) can be used.
12684+ * if they have already ruled out V8086 mode, so user_mode_novm(regs) can
12685+ * be used.
12686 */
12687-static inline int user_mode(struct pt_regs *regs)
12688+static inline int user_mode_novm(struct pt_regs *regs)
12689 {
12690 #ifdef CONFIG_X86_32
12691 return (regs->cs & SEGMENT_RPL_MASK) == USER_RPL;
12692 #else
12693- return !!(regs->cs & 3);
12694+ return !!(regs->cs & SEGMENT_RPL_MASK);
12695 #endif
12696 }
12697
12698-static inline int user_mode_vm(struct pt_regs *regs)
12699+static inline int user_mode(struct pt_regs *regs)
12700 {
12701 #ifdef CONFIG_X86_32
12702 return ((regs->cs & SEGMENT_RPL_MASK) | (regs->flags & X86_VM_MASK)) >=
12703 USER_RPL;
12704 #else
12705- return user_mode(regs);
12706+ return user_mode_novm(regs);
12707 #endif
12708 }
12709
12710diff --git a/arch/x86/include/asm/reboot.h b/arch/x86/include/asm/reboot.h
12711index 562d4fd..6e39df1 100644
12712--- a/arch/x86/include/asm/reboot.h
12713+++ b/arch/x86/include/asm/reboot.h
12714@@ -6,19 +6,19 @@
12715 struct pt_regs;
12716
12717 struct machine_ops {
12718- void (*restart)(char *cmd);
12719- void (*halt)(void);
12720- void (*power_off)(void);
12721+ void (* __noreturn restart)(char *cmd);
12722+ void (* __noreturn halt)(void);
12723+ void (* __noreturn power_off)(void);
12724 void (*shutdown)(void);
12725 void (*crash_shutdown)(struct pt_regs *);
12726- void (*emergency_restart)(void);
12727-};
12728+ void (* __noreturn emergency_restart)(void);
12729+} __no_const;
12730
12731 extern struct machine_ops machine_ops;
12732
12733 void native_machine_crash_shutdown(struct pt_regs *regs);
12734 void native_machine_shutdown(void);
12735-void machine_real_restart(const unsigned char *code, int length);
12736+void machine_real_restart(const unsigned char *code, unsigned int length) __noreturn;
12737
12738 typedef void (*nmi_shootdown_cb)(int, struct die_args*);
12739 void nmi_shootdown_cpus(nmi_shootdown_cb callback);
12740diff --git a/arch/x86/include/asm/rwsem.h b/arch/x86/include/asm/rwsem.h
12741index 606ede1..dbfff37 100644
12742--- a/arch/x86/include/asm/rwsem.h
12743+++ b/arch/x86/include/asm/rwsem.h
12744@@ -118,6 +118,14 @@ static inline void __down_read(struct rw_semaphore *sem)
12745 {
12746 asm volatile("# beginning down_read\n\t"
12747 LOCK_PREFIX _ASM_INC "(%1)\n\t"
12748+
12749+#ifdef CONFIG_PAX_REFCOUNT
12750+ "jno 0f\n"
12751+ LOCK_PREFIX _ASM_DEC "(%1)\n\t"
12752+ "int $4\n0:\n"
12753+ _ASM_EXTABLE(0b, 0b)
12754+#endif
12755+
12756 /* adds 0x00000001, returns the old value */
12757 " jns 1f\n"
12758 " call call_rwsem_down_read_failed\n"
12759@@ -139,6 +147,14 @@ static inline int __down_read_trylock(struct rw_semaphore *sem)
12760 "1:\n\t"
12761 " mov %1,%2\n\t"
12762 " add %3,%2\n\t"
12763+
12764+#ifdef CONFIG_PAX_REFCOUNT
12765+ "jno 0f\n"
12766+ "sub %3,%2\n"
12767+ "int $4\n0:\n"
12768+ _ASM_EXTABLE(0b, 0b)
12769+#endif
12770+
12771 " jle 2f\n\t"
12772 LOCK_PREFIX " cmpxchg %2,%0\n\t"
12773 " jnz 1b\n\t"
12774@@ -160,6 +176,14 @@ static inline void __down_write_nested(struct rw_semaphore *sem, int subclass)
12775 tmp = RWSEM_ACTIVE_WRITE_BIAS;
12776 asm volatile("# beginning down_write\n\t"
12777 LOCK_PREFIX " xadd %1,(%2)\n\t"
12778+
12779+#ifdef CONFIG_PAX_REFCOUNT
12780+ "jno 0f\n"
12781+ "mov %1,(%2)\n"
12782+ "int $4\n0:\n"
12783+ _ASM_EXTABLE(0b, 0b)
12784+#endif
12785+
12786 /* subtract 0x0000ffff, returns the old value */
12787 " test %1,%1\n\t"
12788 /* was the count 0 before? */
12789@@ -198,6 +222,14 @@ static inline void __up_read(struct rw_semaphore *sem)
12790 rwsem_count_t tmp = -RWSEM_ACTIVE_READ_BIAS;
12791 asm volatile("# beginning __up_read\n\t"
12792 LOCK_PREFIX " xadd %1,(%2)\n\t"
12793+
12794+#ifdef CONFIG_PAX_REFCOUNT
12795+ "jno 0f\n"
12796+ "mov %1,(%2)\n"
12797+ "int $4\n0:\n"
12798+ _ASM_EXTABLE(0b, 0b)
12799+#endif
12800+
12801 /* subtracts 1, returns the old value */
12802 " jns 1f\n\t"
12803 " call call_rwsem_wake\n"
12804@@ -216,6 +248,14 @@ static inline void __up_write(struct rw_semaphore *sem)
12805 rwsem_count_t tmp;
12806 asm volatile("# beginning __up_write\n\t"
12807 LOCK_PREFIX " xadd %1,(%2)\n\t"
12808+
12809+#ifdef CONFIG_PAX_REFCOUNT
12810+ "jno 0f\n"
12811+ "mov %1,(%2)\n"
12812+ "int $4\n0:\n"
12813+ _ASM_EXTABLE(0b, 0b)
12814+#endif
12815+
12816 /* tries to transition
12817 0xffff0001 -> 0x00000000 */
12818 " jz 1f\n"
12819@@ -234,6 +274,14 @@ static inline void __downgrade_write(struct rw_semaphore *sem)
12820 {
12821 asm volatile("# beginning __downgrade_write\n\t"
12822 LOCK_PREFIX _ASM_ADD "%2,(%1)\n\t"
12823+
12824+#ifdef CONFIG_PAX_REFCOUNT
12825+ "jno 0f\n"
12826+ LOCK_PREFIX _ASM_SUB "%2,(%1)\n"
12827+ "int $4\n0:\n"
12828+ _ASM_EXTABLE(0b, 0b)
12829+#endif
12830+
12831 /*
12832 * transitions 0xZZZZ0001 -> 0xYYYY0001 (i386)
12833 * 0xZZZZZZZZ00000001 -> 0xYYYYYYYY00000001 (x86_64)
12834@@ -253,7 +301,15 @@ static inline void __downgrade_write(struct rw_semaphore *sem)
12835 static inline void rwsem_atomic_add(rwsem_count_t delta,
12836 struct rw_semaphore *sem)
12837 {
12838- asm volatile(LOCK_PREFIX _ASM_ADD "%1,%0"
12839+ asm volatile(LOCK_PREFIX _ASM_ADD "%1,%0\n"
12840+
12841+#ifdef CONFIG_PAX_REFCOUNT
12842+ "jno 0f\n"
12843+ LOCK_PREFIX _ASM_SUB "%1,%0\n"
12844+ "int $4\n0:\n"
12845+ _ASM_EXTABLE(0b, 0b)
12846+#endif
12847+
12848 : "+m" (sem->count)
12849 : "er" (delta));
12850 }
12851@@ -266,7 +322,15 @@ static inline rwsem_count_t rwsem_atomic_update(rwsem_count_t delta,
12852 {
12853 rwsem_count_t tmp = delta;
12854
12855- asm volatile(LOCK_PREFIX "xadd %0,%1"
12856+ asm volatile(LOCK_PREFIX "xadd %0,%1\n"
12857+
12858+#ifdef CONFIG_PAX_REFCOUNT
12859+ "jno 0f\n"
12860+ "mov %0,%1\n"
12861+ "int $4\n0:\n"
12862+ _ASM_EXTABLE(0b, 0b)
12863+#endif
12864+
12865 : "+r" (tmp), "+m" (sem->count)
12866 : : "memory");
12867
12868diff --git a/arch/x86/include/asm/segment.h b/arch/x86/include/asm/segment.h
12869index 14e0ed8..7f7dd5e 100644
12870--- a/arch/x86/include/asm/segment.h
12871+++ b/arch/x86/include/asm/segment.h
12872@@ -62,10 +62,15 @@
12873 * 26 - ESPFIX small SS
12874 * 27 - per-cpu [ offset to per-cpu data area ]
12875 * 28 - stack_canary-20 [ for stack protector ]
12876- * 29 - unused
12877- * 30 - unused
12878+ * 29 - PCI BIOS CS
12879+ * 30 - PCI BIOS DS
12880 * 31 - TSS for double fault handler
12881 */
12882+#define GDT_ENTRY_KERNEXEC_EFI_CS (1)
12883+#define GDT_ENTRY_KERNEXEC_EFI_DS (2)
12884+#define __KERNEXEC_EFI_CS (GDT_ENTRY_KERNEXEC_EFI_CS*8)
12885+#define __KERNEXEC_EFI_DS (GDT_ENTRY_KERNEXEC_EFI_DS*8)
12886+
12887 #define GDT_ENTRY_TLS_MIN 6
12888 #define GDT_ENTRY_TLS_MAX (GDT_ENTRY_TLS_MIN + GDT_ENTRY_TLS_ENTRIES - 1)
12889
12890@@ -77,6 +82,8 @@
12891
12892 #define GDT_ENTRY_KERNEL_CS (GDT_ENTRY_KERNEL_BASE + 0)
12893
12894+#define GDT_ENTRY_KERNEXEC_KERNEL_CS (4)
12895+
12896 #define GDT_ENTRY_KERNEL_DS (GDT_ENTRY_KERNEL_BASE + 1)
12897
12898 #define GDT_ENTRY_TSS (GDT_ENTRY_KERNEL_BASE + 4)
12899@@ -88,7 +95,7 @@
12900 #define GDT_ENTRY_ESPFIX_SS (GDT_ENTRY_KERNEL_BASE + 14)
12901 #define __ESPFIX_SS (GDT_ENTRY_ESPFIX_SS * 8)
12902
12903-#define GDT_ENTRY_PERCPU (GDT_ENTRY_KERNEL_BASE + 15)
12904+#define GDT_ENTRY_PERCPU (GDT_ENTRY_KERNEL_BASE + 15)
12905 #ifdef CONFIG_SMP
12906 #define __KERNEL_PERCPU (GDT_ENTRY_PERCPU * 8)
12907 #else
12908@@ -102,6 +109,12 @@
12909 #define __KERNEL_STACK_CANARY 0
12910 #endif
12911
12912+#define GDT_ENTRY_PCIBIOS_CS (GDT_ENTRY_KERNEL_BASE + 17)
12913+#define __PCIBIOS_CS (GDT_ENTRY_PCIBIOS_CS * 8)
12914+
12915+#define GDT_ENTRY_PCIBIOS_DS (GDT_ENTRY_KERNEL_BASE + 18)
12916+#define __PCIBIOS_DS (GDT_ENTRY_PCIBIOS_DS * 8)
12917+
12918 #define GDT_ENTRY_DOUBLEFAULT_TSS 31
12919
12920 /*
12921@@ -139,7 +152,7 @@
12922 */
12923
12924 /* Matches PNP_CS32 and PNP_CS16 (they must be consecutive) */
12925-#define SEGMENT_IS_PNP_CODE(x) (((x) & 0xf4) == GDT_ENTRY_PNPBIOS_BASE * 8)
12926+#define SEGMENT_IS_PNP_CODE(x) (((x) & 0xFFFCU) == PNP_CS32 || ((x) & 0xFFFCU) == PNP_CS16)
12927
12928
12929 #else
12930@@ -163,6 +176,8 @@
12931 #define __USER32_CS (GDT_ENTRY_DEFAULT_USER32_CS * 8 + 3)
12932 #define __USER32_DS __USER_DS
12933
12934+#define GDT_ENTRY_KERNEXEC_KERNEL_CS 7
12935+
12936 #define GDT_ENTRY_TSS 8 /* needs two entries */
12937 #define GDT_ENTRY_LDT 10 /* needs two entries */
12938 #define GDT_ENTRY_TLS_MIN 12
12939@@ -183,6 +198,7 @@
12940 #endif
12941
12942 #define __KERNEL_CS (GDT_ENTRY_KERNEL_CS * 8)
12943+#define __KERNEXEC_KERNEL_CS (GDT_ENTRY_KERNEXEC_KERNEL_CS * 8)
12944 #define __KERNEL_DS (GDT_ENTRY_KERNEL_DS * 8)
12945 #define __USER_DS (GDT_ENTRY_DEFAULT_USER_DS* 8 + 3)
12946 #define __USER_CS (GDT_ENTRY_DEFAULT_USER_CS* 8 + 3)
12947diff --git a/arch/x86/include/asm/smp.h b/arch/x86/include/asm/smp.h
12948index 4c2f63c..5685db2 100644
12949--- a/arch/x86/include/asm/smp.h
12950+++ b/arch/x86/include/asm/smp.h
12951@@ -24,7 +24,7 @@ extern unsigned int num_processors;
12952 DECLARE_PER_CPU(cpumask_var_t, cpu_sibling_map);
12953 DECLARE_PER_CPU(cpumask_var_t, cpu_core_map);
12954 DECLARE_PER_CPU(u16, cpu_llc_id);
12955-DECLARE_PER_CPU(int, cpu_number);
12956+DECLARE_PER_CPU(unsigned int, cpu_number);
12957
12958 static inline struct cpumask *cpu_sibling_mask(int cpu)
12959 {
12960@@ -40,10 +40,7 @@ DECLARE_EARLY_PER_CPU(u16, x86_cpu_to_apicid);
12961 DECLARE_EARLY_PER_CPU(u16, x86_bios_cpu_apicid);
12962
12963 /* Static state in head.S used to set up a CPU */
12964-extern struct {
12965- void *sp;
12966- unsigned short ss;
12967-} stack_start;
12968+extern unsigned long stack_start; /* Initial stack pointer address */
12969
12970 struct smp_ops {
12971 void (*smp_prepare_boot_cpu)(void);
12972@@ -60,7 +57,7 @@ struct smp_ops {
12973
12974 void (*send_call_func_ipi)(const struct cpumask *mask);
12975 void (*send_call_func_single_ipi)(int cpu);
12976-};
12977+} __no_const;
12978
12979 /* Globals due to paravirt */
12980 extern void set_cpu_sibling_map(int cpu);
12981@@ -175,14 +172,8 @@ extern unsigned disabled_cpus __cpuinitdata;
12982 extern int safe_smp_processor_id(void);
12983
12984 #elif defined(CONFIG_X86_64_SMP)
12985-#define raw_smp_processor_id() (percpu_read(cpu_number))
12986-
12987-#define stack_smp_processor_id() \
12988-({ \
12989- struct thread_info *ti; \
12990- __asm__("andq %%rsp,%0; ":"=r" (ti) : "0" (CURRENT_MASK)); \
12991- ti->cpu; \
12992-})
12993+#define raw_smp_processor_id() (percpu_read(cpu_number))
12994+#define stack_smp_processor_id() raw_smp_processor_id()
12995 #define safe_smp_processor_id() smp_processor_id()
12996
12997 #endif
12998diff --git a/arch/x86/include/asm/spinlock.h b/arch/x86/include/asm/spinlock.h
12999index 4e77853..4359783 100644
13000--- a/arch/x86/include/asm/spinlock.h
13001+++ b/arch/x86/include/asm/spinlock.h
13002@@ -249,6 +249,14 @@ static inline int __raw_write_can_lock(raw_rwlock_t *lock)
13003 static inline void __raw_read_lock(raw_rwlock_t *rw)
13004 {
13005 asm volatile(LOCK_PREFIX " subl $1,(%0)\n\t"
13006+
13007+#ifdef CONFIG_PAX_REFCOUNT
13008+ "jno 0f\n"
13009+ LOCK_PREFIX " addl $1,(%0)\n"
13010+ "int $4\n0:\n"
13011+ _ASM_EXTABLE(0b, 0b)
13012+#endif
13013+
13014 "jns 1f\n"
13015 "call __read_lock_failed\n\t"
13016 "1:\n"
13017@@ -258,6 +266,14 @@ static inline void __raw_read_lock(raw_rwlock_t *rw)
13018 static inline void __raw_write_lock(raw_rwlock_t *rw)
13019 {
13020 asm volatile(LOCK_PREFIX " subl %1,(%0)\n\t"
13021+
13022+#ifdef CONFIG_PAX_REFCOUNT
13023+ "jno 0f\n"
13024+ LOCK_PREFIX " addl %1,(%0)\n"
13025+ "int $4\n0:\n"
13026+ _ASM_EXTABLE(0b, 0b)
13027+#endif
13028+
13029 "jz 1f\n"
13030 "call __write_lock_failed\n\t"
13031 "1:\n"
13032@@ -286,12 +302,29 @@ static inline int __raw_write_trylock(raw_rwlock_t *lock)
13033
13034 static inline void __raw_read_unlock(raw_rwlock_t *rw)
13035 {
13036- asm volatile(LOCK_PREFIX "incl %0" :"+m" (rw->lock) : : "memory");
13037+ asm volatile(LOCK_PREFIX "incl %0\n"
13038+
13039+#ifdef CONFIG_PAX_REFCOUNT
13040+ "jno 0f\n"
13041+ LOCK_PREFIX "decl %0\n"
13042+ "int $4\n0:\n"
13043+ _ASM_EXTABLE(0b, 0b)
13044+#endif
13045+
13046+ :"+m" (rw->lock) : : "memory");
13047 }
13048
13049 static inline void __raw_write_unlock(raw_rwlock_t *rw)
13050 {
13051- asm volatile(LOCK_PREFIX "addl %1, %0"
13052+ asm volatile(LOCK_PREFIX "addl %1, %0\n"
13053+
13054+#ifdef CONFIG_PAX_REFCOUNT
13055+ "jno 0f\n"
13056+ LOCK_PREFIX "subl %1, %0\n"
13057+ "int $4\n0:\n"
13058+ _ASM_EXTABLE(0b, 0b)
13059+#endif
13060+
13061 : "+m" (rw->lock) : "i" (RW_LOCK_BIAS) : "memory");
13062 }
13063
13064diff --git a/arch/x86/include/asm/stackprotector.h b/arch/x86/include/asm/stackprotector.h
13065index 1575177..cb23f52 100644
13066--- a/arch/x86/include/asm/stackprotector.h
13067+++ b/arch/x86/include/asm/stackprotector.h
13068@@ -48,7 +48,7 @@
13069 * head_32 for boot CPU and setup_per_cpu_areas() for others.
13070 */
13071 #define GDT_STACK_CANARY_INIT \
13072- [GDT_ENTRY_STACK_CANARY] = GDT_ENTRY_INIT(0x4090, 0, 0x18),
13073+ [GDT_ENTRY_STACK_CANARY] = GDT_ENTRY_INIT(0x4090, 0, 0x17),
13074
13075 /*
13076 * Initialize the stackprotector canary value.
13077@@ -113,7 +113,7 @@ static inline void setup_stack_canary_segment(int cpu)
13078
13079 static inline void load_stack_canary_segment(void)
13080 {
13081-#ifdef CONFIG_X86_32
13082+#if defined(CONFIG_X86_32) && !defined(CONFIG_PAX_MEMORY_UDEREF)
13083 asm volatile ("mov %0, %%gs" : : "r" (0));
13084 #endif
13085 }
13086diff --git a/arch/x86/include/asm/syscalls.h b/arch/x86/include/asm/syscalls.h
13087index 1bb6e39..234246f 100644
13088--- a/arch/x86/include/asm/syscalls.h
13089+++ b/arch/x86/include/asm/syscalls.h
13090@@ -24,7 +24,7 @@ int sys_fork(struct pt_regs *);
13091 int sys_vfork(struct pt_regs *);
13092
13093 /* kernel/ldt.c */
13094-asmlinkage int sys_modify_ldt(int, void __user *, unsigned long);
13095+asmlinkage int sys_modify_ldt(int, void __user *, unsigned long) __size_overflow(3);
13096
13097 /* kernel/signal.c */
13098 long sys_rt_sigreturn(struct pt_regs *);
13099diff --git a/arch/x86/include/asm/system.h b/arch/x86/include/asm/system.h
13100index e0fbf29..858ef4a 100644
13101--- a/arch/x86/include/asm/system.h
13102+++ b/arch/x86/include/asm/system.h
13103@@ -132,7 +132,7 @@ do { \
13104 "thread_return:\n\t" \
13105 "movq "__percpu_arg([current_task])",%%rsi\n\t" \
13106 __switch_canary \
13107- "movq %P[thread_info](%%rsi),%%r8\n\t" \
13108+ "movq "__percpu_arg([thread_info])",%%r8\n\t" \
13109 "movq %%rax,%%rdi\n\t" \
13110 "testl %[_tif_fork],%P[ti_flags](%%r8)\n\t" \
13111 "jnz ret_from_fork\n\t" \
13112@@ -143,7 +143,7 @@ do { \
13113 [threadrsp] "i" (offsetof(struct task_struct, thread.sp)), \
13114 [ti_flags] "i" (offsetof(struct thread_info, flags)), \
13115 [_tif_fork] "i" (_TIF_FORK), \
13116- [thread_info] "i" (offsetof(struct task_struct, stack)), \
13117+ [thread_info] "m" (per_cpu_var(current_tinfo)), \
13118 [current_task] "m" (per_cpu_var(current_task)) \
13119 __switch_canary_iparam \
13120 : "memory", "cc" __EXTRA_CLOBBER)
13121@@ -200,7 +200,7 @@ static inline unsigned long get_limit(unsigned long segment)
13122 {
13123 unsigned long __limit;
13124 asm("lsll %1,%0" : "=r" (__limit) : "r" (segment));
13125- return __limit + 1;
13126+ return __limit;
13127 }
13128
13129 static inline void native_clts(void)
13130@@ -340,12 +340,12 @@ void enable_hlt(void);
13131
13132 void cpu_idle_wait(void);
13133
13134-extern unsigned long arch_align_stack(unsigned long sp);
13135+#define arch_align_stack(x) ((x) & ~0xfUL)
13136 extern void free_init_pages(char *what, unsigned long begin, unsigned long end);
13137
13138 void default_idle(void);
13139
13140-void stop_this_cpu(void *dummy);
13141+void stop_this_cpu(void *dummy) __noreturn;
13142
13143 /*
13144 * Force strict CPU ordering.
13145diff --git a/arch/x86/include/asm/thread_info.h b/arch/x86/include/asm/thread_info.h
13146index 19c3ce4..8962535 100644
13147--- a/arch/x86/include/asm/thread_info.h
13148+++ b/arch/x86/include/asm/thread_info.h
13149@@ -10,6 +10,7 @@
13150 #include <linux/compiler.h>
13151 #include <asm/page.h>
13152 #include <asm/types.h>
13153+#include <asm/percpu.h>
13154
13155 /*
13156 * low level task data that entry.S needs immediate access to
13157@@ -24,7 +25,6 @@ struct exec_domain;
13158 #include <asm/atomic.h>
13159
13160 struct thread_info {
13161- struct task_struct *task; /* main task structure */
13162 struct exec_domain *exec_domain; /* execution domain */
13163 __u32 flags; /* low level flags */
13164 __u32 status; /* thread synchronous flags */
13165@@ -34,18 +34,12 @@ struct thread_info {
13166 mm_segment_t addr_limit;
13167 struct restart_block restart_block;
13168 void __user *sysenter_return;
13169-#ifdef CONFIG_X86_32
13170- unsigned long previous_esp; /* ESP of the previous stack in
13171- case of nested (IRQ) stacks
13172- */
13173- __u8 supervisor_stack[0];
13174-#endif
13175+ unsigned long lowest_stack;
13176 int uaccess_err;
13177 };
13178
13179-#define INIT_THREAD_INFO(tsk) \
13180+#define INIT_THREAD_INFO \
13181 { \
13182- .task = &tsk, \
13183 .exec_domain = &default_exec_domain, \
13184 .flags = 0, \
13185 .cpu = 0, \
13186@@ -56,7 +50,7 @@ struct thread_info {
13187 }, \
13188 }
13189
13190-#define init_thread_info (init_thread_union.thread_info)
13191+#define init_thread_info (init_thread_union.stack)
13192 #define init_stack (init_thread_union.stack)
13193
13194 #else /* !__ASSEMBLY__ */
13195@@ -163,45 +157,40 @@ struct thread_info {
13196 #define alloc_thread_info(tsk) \
13197 ((struct thread_info *)__get_free_pages(THREAD_FLAGS, THREAD_ORDER))
13198
13199-#ifdef CONFIG_X86_32
13200-
13201-#define STACK_WARN (THREAD_SIZE/8)
13202-/*
13203- * macros/functions for gaining access to the thread information structure
13204- *
13205- * preempt_count needs to be 1 initially, until the scheduler is functional.
13206- */
13207-#ifndef __ASSEMBLY__
13208-
13209-
13210-/* how to get the current stack pointer from C */
13211-register unsigned long current_stack_pointer asm("esp") __used;
13212-
13213-/* how to get the thread information struct from C */
13214-static inline struct thread_info *current_thread_info(void)
13215-{
13216- return (struct thread_info *)
13217- (current_stack_pointer & ~(THREAD_SIZE - 1));
13218-}
13219-
13220-#else /* !__ASSEMBLY__ */
13221-
13222+#ifdef __ASSEMBLY__
13223 /* how to get the thread information struct from ASM */
13224 #define GET_THREAD_INFO(reg) \
13225- movl $-THREAD_SIZE, reg; \
13226- andl %esp, reg
13227+ mov PER_CPU_VAR(current_tinfo), reg
13228
13229 /* use this one if reg already contains %esp */
13230-#define GET_THREAD_INFO_WITH_ESP(reg) \
13231- andl $-THREAD_SIZE, reg
13232+#define GET_THREAD_INFO_WITH_ESP(reg) GET_THREAD_INFO(reg)
13233+#else
13234+/* how to get the thread information struct from C */
13235+DECLARE_PER_CPU(struct thread_info *, current_tinfo);
13236+
13237+static __always_inline struct thread_info *current_thread_info(void)
13238+{
13239+ return percpu_read_stable(current_tinfo);
13240+}
13241+#endif
13242+
13243+#ifdef CONFIG_X86_32
13244+
13245+#define STACK_WARN (THREAD_SIZE/8)
13246+/*
13247+ * macros/functions for gaining access to the thread information structure
13248+ *
13249+ * preempt_count needs to be 1 initially, until the scheduler is functional.
13250+ */
13251+#ifndef __ASSEMBLY__
13252+
13253+/* how to get the current stack pointer from C */
13254+register unsigned long current_stack_pointer asm("esp") __used;
13255
13256 #endif
13257
13258 #else /* X86_32 */
13259
13260-#include <asm/percpu.h>
13261-#define KERNEL_STACK_OFFSET (5*8)
13262-
13263 /*
13264 * macros/functions for gaining access to the thread information structure
13265 * preempt_count needs to be 1 initially, until the scheduler is functional.
13266@@ -209,21 +198,8 @@ static inline struct thread_info *current_thread_info(void)
13267 #ifndef __ASSEMBLY__
13268 DECLARE_PER_CPU(unsigned long, kernel_stack);
13269
13270-static inline struct thread_info *current_thread_info(void)
13271-{
13272- struct thread_info *ti;
13273- ti = (void *)(percpu_read_stable(kernel_stack) +
13274- KERNEL_STACK_OFFSET - THREAD_SIZE);
13275- return ti;
13276-}
13277-
13278-#else /* !__ASSEMBLY__ */
13279-
13280-/* how to get the thread information struct from ASM */
13281-#define GET_THREAD_INFO(reg) \
13282- movq PER_CPU_VAR(kernel_stack),reg ; \
13283- subq $(THREAD_SIZE-KERNEL_STACK_OFFSET),reg
13284-
13285+/* how to get the current stack pointer from C */
13286+register unsigned long current_stack_pointer asm("rsp") __used;
13287 #endif
13288
13289 #endif /* !X86_32 */
13290@@ -260,5 +236,16 @@ extern void arch_task_cache_init(void);
13291 extern void free_thread_info(struct thread_info *ti);
13292 extern int arch_dup_task_struct(struct task_struct *dst, struct task_struct *src);
13293 #define arch_task_cache_init arch_task_cache_init
13294+
13295+#define __HAVE_THREAD_FUNCTIONS
13296+#define task_thread_info(task) (&(task)->tinfo)
13297+#define task_stack_page(task) ((task)->stack)
13298+#define setup_thread_stack(p, org) do {} while (0)
13299+#define end_of_stack(p) ((unsigned long *)task_stack_page(p) + 1)
13300+
13301+#define __HAVE_ARCH_TASK_STRUCT_ALLOCATOR
13302+extern struct task_struct *alloc_task_struct(void);
13303+extern void free_task_struct(struct task_struct *);
13304+
13305 #endif
13306 #endif /* _ASM_X86_THREAD_INFO_H */
13307diff --git a/arch/x86/include/asm/uaccess.h b/arch/x86/include/asm/uaccess.h
13308index 61c5874..8a046e9 100644
13309--- a/arch/x86/include/asm/uaccess.h
13310+++ b/arch/x86/include/asm/uaccess.h
13311@@ -8,12 +8,15 @@
13312 #include <linux/thread_info.h>
13313 #include <linux/prefetch.h>
13314 #include <linux/string.h>
13315+#include <linux/sched.h>
13316 #include <asm/asm.h>
13317 #include <asm/page.h>
13318
13319 #define VERIFY_READ 0
13320 #define VERIFY_WRITE 1
13321
13322+extern void check_object_size(const void *ptr, unsigned long n, bool to);
13323+
13324 /*
13325 * The fs value determines whether argument validity checking should be
13326 * performed or not. If get_fs() == USER_DS, checking is performed, with
13327@@ -29,7 +32,12 @@
13328
13329 #define get_ds() (KERNEL_DS)
13330 #define get_fs() (current_thread_info()->addr_limit)
13331+#if defined(CONFIG_X86_32) && defined(CONFIG_PAX_MEMORY_UDEREF)
13332+void __set_fs(mm_segment_t x);
13333+void set_fs(mm_segment_t x);
13334+#else
13335 #define set_fs(x) (current_thread_info()->addr_limit = (x))
13336+#endif
13337
13338 #define segment_eq(a, b) ((a).seg == (b).seg)
13339
13340@@ -77,7 +85,33 @@
13341 * checks that the pointer is in the user space range - after calling
13342 * this function, memory access functions may still return -EFAULT.
13343 */
13344-#define access_ok(type, addr, size) (likely(__range_not_ok(addr, size) == 0))
13345+#define __access_ok(type, addr, size) (likely(__range_not_ok(addr, size) == 0))
13346+#define access_ok(type, addr, size) \
13347+({ \
13348+ long __size = size; \
13349+ unsigned long __addr = (unsigned long)addr; \
13350+ unsigned long __addr_ao = __addr & PAGE_MASK; \
13351+ unsigned long __end_ao = __addr + __size - 1; \
13352+ bool __ret_ao = __range_not_ok(__addr, __size) == 0; \
13353+ if (__ret_ao && unlikely((__end_ao ^ __addr_ao) & PAGE_MASK)) { \
13354+ while(__addr_ao <= __end_ao) { \
13355+ char __c_ao; \
13356+ __addr_ao += PAGE_SIZE; \
13357+ if (__size > PAGE_SIZE) \
13358+ cond_resched(); \
13359+ if (__get_user(__c_ao, (char __user *)__addr)) \
13360+ break; \
13361+ if (type != VERIFY_WRITE) { \
13362+ __addr = __addr_ao; \
13363+ continue; \
13364+ } \
13365+ if (__put_user(__c_ao, (char __user *)__addr)) \
13366+ break; \
13367+ __addr = __addr_ao; \
13368+ } \
13369+ } \
13370+ __ret_ao; \
13371+})
13372
13373 /*
13374 * The exception table consists of pairs of addresses: the first is the
13375@@ -183,12 +217,20 @@ extern int __get_user_bad(void);
13376 asm volatile("call __put_user_" #size : "=a" (__ret_pu) \
13377 : "0" ((typeof(*(ptr)))(x)), "c" (ptr) : "ebx")
13378
13379-
13380+#if defined(CONFIG_X86_32) && defined(CONFIG_PAX_MEMORY_UDEREF)
13381+#define __copyuser_seg "gs;"
13382+#define __COPYUSER_SET_ES "pushl %%gs; popl %%es\n"
13383+#define __COPYUSER_RESTORE_ES "pushl %%ss; popl %%es\n"
13384+#else
13385+#define __copyuser_seg
13386+#define __COPYUSER_SET_ES
13387+#define __COPYUSER_RESTORE_ES
13388+#endif
13389
13390 #ifdef CONFIG_X86_32
13391 #define __put_user_asm_u64(x, addr, err, errret) \
13392- asm volatile("1: movl %%eax,0(%2)\n" \
13393- "2: movl %%edx,4(%2)\n" \
13394+ asm volatile("1: "__copyuser_seg"movl %%eax,0(%2)\n" \
13395+ "2: "__copyuser_seg"movl %%edx,4(%2)\n" \
13396 "3:\n" \
13397 ".section .fixup,\"ax\"\n" \
13398 "4: movl %3,%0\n" \
13399@@ -200,8 +242,8 @@ extern int __get_user_bad(void);
13400 : "A" (x), "r" (addr), "i" (errret), "0" (err))
13401
13402 #define __put_user_asm_ex_u64(x, addr) \
13403- asm volatile("1: movl %%eax,0(%1)\n" \
13404- "2: movl %%edx,4(%1)\n" \
13405+ asm volatile("1: "__copyuser_seg"movl %%eax,0(%1)\n" \
13406+ "2: "__copyuser_seg"movl %%edx,4(%1)\n" \
13407 "3:\n" \
13408 _ASM_EXTABLE(1b, 2b - 1b) \
13409 _ASM_EXTABLE(2b, 3b - 2b) \
13410@@ -253,7 +295,7 @@ extern void __put_user_8(void);
13411 __typeof__(*(ptr)) __pu_val; \
13412 __chk_user_ptr(ptr); \
13413 might_fault(); \
13414- __pu_val = x; \
13415+ __pu_val = (x); \
13416 switch (sizeof(*(ptr))) { \
13417 case 1: \
13418 __put_user_x(1, __pu_val, ptr, __ret_pu); \
13419@@ -374,7 +416,7 @@ do { \
13420 } while (0)
13421
13422 #define __get_user_asm(x, addr, err, itype, rtype, ltype, errret) \
13423- asm volatile("1: mov"itype" %2,%"rtype"1\n" \
13424+ asm volatile("1: "__copyuser_seg"mov"itype" %2,%"rtype"1\n"\
13425 "2:\n" \
13426 ".section .fixup,\"ax\"\n" \
13427 "3: mov %3,%0\n" \
13428@@ -382,7 +424,7 @@ do { \
13429 " jmp 2b\n" \
13430 ".previous\n" \
13431 _ASM_EXTABLE(1b, 3b) \
13432- : "=r" (err), ltype(x) \
13433+ : "=r" (err), ltype (x) \
13434 : "m" (__m(addr)), "i" (errret), "0" (err))
13435
13436 #define __get_user_size_ex(x, ptr, size) \
13437@@ -407,7 +449,7 @@ do { \
13438 } while (0)
13439
13440 #define __get_user_asm_ex(x, addr, itype, rtype, ltype) \
13441- asm volatile("1: mov"itype" %1,%"rtype"0\n" \
13442+ asm volatile("1: "__copyuser_seg"mov"itype" %1,%"rtype"0\n"\
13443 "2:\n" \
13444 _ASM_EXTABLE(1b, 2b - 1b) \
13445 : ltype(x) : "m" (__m(addr)))
13446@@ -424,13 +466,24 @@ do { \
13447 int __gu_err; \
13448 unsigned long __gu_val; \
13449 __get_user_size(__gu_val, (ptr), (size), __gu_err, -EFAULT); \
13450- (x) = (__force __typeof__(*(ptr)))__gu_val; \
13451+ (x) = (__typeof__(*(ptr)))__gu_val; \
13452 __gu_err; \
13453 })
13454
13455 /* FIXME: this hack is definitely wrong -AK */
13456 struct __large_struct { unsigned long buf[100]; };
13457-#define __m(x) (*(struct __large_struct __user *)(x))
13458+#if defined(CONFIG_X86_64) && defined(CONFIG_PAX_MEMORY_UDEREF)
13459+#define ____m(x) \
13460+({ \
13461+ unsigned long ____x = (unsigned long)(x); \
13462+ if (____x < PAX_USER_SHADOW_BASE) \
13463+ ____x += PAX_USER_SHADOW_BASE; \
13464+ (void __user *)____x; \
13465+})
13466+#else
13467+#define ____m(x) (x)
13468+#endif
13469+#define __m(x) (*(struct __large_struct __user *)____m(x))
13470
13471 /*
13472 * Tell gcc we read from memory instead of writing: this is because
13473@@ -438,7 +491,7 @@ struct __large_struct { unsigned long buf[100]; };
13474 * aliasing issues.
13475 */
13476 #define __put_user_asm(x, addr, err, itype, rtype, ltype, errret) \
13477- asm volatile("1: mov"itype" %"rtype"1,%2\n" \
13478+ asm volatile("1: "__copyuser_seg"mov"itype" %"rtype"1,%2\n"\
13479 "2:\n" \
13480 ".section .fixup,\"ax\"\n" \
13481 "3: mov %3,%0\n" \
13482@@ -446,10 +499,10 @@ struct __large_struct { unsigned long buf[100]; };
13483 ".previous\n" \
13484 _ASM_EXTABLE(1b, 3b) \
13485 : "=r"(err) \
13486- : ltype(x), "m" (__m(addr)), "i" (errret), "0" (err))
13487+ : ltype (x), "m" (__m(addr)), "i" (errret), "0" (err))
13488
13489 #define __put_user_asm_ex(x, addr, itype, rtype, ltype) \
13490- asm volatile("1: mov"itype" %"rtype"0,%1\n" \
13491+ asm volatile("1: "__copyuser_seg"mov"itype" %"rtype"0,%1\n"\
13492 "2:\n" \
13493 _ASM_EXTABLE(1b, 2b - 1b) \
13494 : : ltype(x), "m" (__m(addr)))
13495@@ -488,8 +541,12 @@ struct __large_struct { unsigned long buf[100]; };
13496 * On error, the variable @x is set to zero.
13497 */
13498
13499+#if defined(CONFIG_X86_64) && defined(CONFIG_PAX_MEMORY_UDEREF)
13500+#define __get_user(x, ptr) get_user((x), (ptr))
13501+#else
13502 #define __get_user(x, ptr) \
13503 __get_user_nocheck((x), (ptr), sizeof(*(ptr)))
13504+#endif
13505
13506 /**
13507 * __put_user: - Write a simple value into user space, with less checking.
13508@@ -511,8 +568,12 @@ struct __large_struct { unsigned long buf[100]; };
13509 * Returns zero on success, or -EFAULT on error.
13510 */
13511
13512+#if defined(CONFIG_X86_64) && defined(CONFIG_PAX_MEMORY_UDEREF)
13513+#define __put_user(x, ptr) put_user((x), (ptr))
13514+#else
13515 #define __put_user(x, ptr) \
13516 __put_user_nocheck((__typeof__(*(ptr)))(x), (ptr), sizeof(*(ptr)))
13517+#endif
13518
13519 #define __get_user_unaligned __get_user
13520 #define __put_user_unaligned __put_user
13521@@ -530,7 +591,7 @@ struct __large_struct { unsigned long buf[100]; };
13522 #define get_user_ex(x, ptr) do { \
13523 unsigned long __gue_val; \
13524 __get_user_size_ex((__gue_val), (ptr), (sizeof(*(ptr)))); \
13525- (x) = (__force __typeof__(*(ptr)))__gue_val; \
13526+ (x) = (__typeof__(*(ptr)))__gue_val; \
13527 } while (0)
13528
13529 #ifdef CONFIG_X86_WP_WORKS_OK
13530@@ -567,6 +628,7 @@ extern struct movsl_mask {
13531
13532 #define ARCH_HAS_NOCACHE_UACCESS 1
13533
13534+#define ARCH_HAS_SORT_EXTABLE
13535 #ifdef CONFIG_X86_32
13536 # include "uaccess_32.h"
13537 #else
13538diff --git a/arch/x86/include/asm/uaccess_32.h b/arch/x86/include/asm/uaccess_32.h
13539index 632fb44..bb15d3f 100644
13540--- a/arch/x86/include/asm/uaccess_32.h
13541+++ b/arch/x86/include/asm/uaccess_32.h
13542@@ -12,15 +12,15 @@
13543 #include <asm/page.h>
13544
13545 unsigned long __must_check __copy_to_user_ll
13546- (void __user *to, const void *from, unsigned long n);
13547+ (void __user *to, const void *from, unsigned long n) __size_overflow(3);
13548 unsigned long __must_check __copy_from_user_ll
13549- (void *to, const void __user *from, unsigned long n);
13550+ (void *to, const void __user *from, unsigned long n) __size_overflow(3);
13551 unsigned long __must_check __copy_from_user_ll_nozero
13552- (void *to, const void __user *from, unsigned long n);
13553+ (void *to, const void __user *from, unsigned long n) __size_overflow(3);
13554 unsigned long __must_check __copy_from_user_ll_nocache
13555- (void *to, const void __user *from, unsigned long n);
13556+ (void *to, const void __user *from, unsigned long n) __size_overflow(3);
13557 unsigned long __must_check __copy_from_user_ll_nocache_nozero
13558- (void *to, const void __user *from, unsigned long n);
13559+ (void *to, const void __user *from, unsigned long n) __size_overflow(3);
13560
13561 /**
13562 * __copy_to_user_inatomic: - Copy a block of data into user space, with less checking.
13563@@ -42,8 +42,15 @@ unsigned long __must_check __copy_from_user_ll_nocache_nozero
13564 */
13565
13566 static __always_inline unsigned long __must_check
13567+__copy_to_user_inatomic(void __user *to, const void *from, unsigned long n) __size_overflow(3);
13568+static __always_inline unsigned long __must_check
13569 __copy_to_user_inatomic(void __user *to, const void *from, unsigned long n)
13570 {
13571+ pax_track_stack();
13572+
13573+ if ((long)n < 0)
13574+ return n;
13575+
13576 if (__builtin_constant_p(n)) {
13577 unsigned long ret;
13578
13579@@ -62,6 +69,8 @@ __copy_to_user_inatomic(void __user *to, const void *from, unsigned long n)
13580 return ret;
13581 }
13582 }
13583+ if (!__builtin_constant_p(n))
13584+ check_object_size(from, n, true);
13585 return __copy_to_user_ll(to, from, n);
13586 }
13587
13588@@ -80,15 +89,23 @@ __copy_to_user_inatomic(void __user *to, const void *from, unsigned long n)
13589 * On success, this will be zero.
13590 */
13591 static __always_inline unsigned long __must_check
13592+__copy_to_user(void __user *to, const void *from, unsigned long n) __size_overflow(3);
13593+static __always_inline unsigned long __must_check
13594 __copy_to_user(void __user *to, const void *from, unsigned long n)
13595 {
13596 might_fault();
13597+
13598 return __copy_to_user_inatomic(to, from, n);
13599 }
13600
13601 static __always_inline unsigned long
13602+__copy_from_user_inatomic(void *to, const void __user *from, unsigned long n) __size_overflow(3);
13603+static __always_inline unsigned long
13604 __copy_from_user_inatomic(void *to, const void __user *from, unsigned long n)
13605 {
13606+ if ((long)n < 0)
13607+ return n;
13608+
13609 /* Avoid zeroing the tail if the copy fails..
13610 * If 'n' is constant and 1, 2, or 4, we do still zero on a failure,
13611 * but as the zeroing behaviour is only significant when n is not
13612@@ -135,9 +152,17 @@ __copy_from_user_inatomic(void *to, const void __user *from, unsigned long n)
13613 * for explanation of why this is needed.
13614 */
13615 static __always_inline unsigned long
13616+__copy_from_user(void *to, const void __user *from, unsigned long n) __size_overflow(3);
13617+static __always_inline unsigned long
13618 __copy_from_user(void *to, const void __user *from, unsigned long n)
13619 {
13620 might_fault();
13621+
13622+ pax_track_stack();
13623+
13624+ if ((long)n < 0)
13625+ return n;
13626+
13627 if (__builtin_constant_p(n)) {
13628 unsigned long ret;
13629
13630@@ -153,13 +178,21 @@ __copy_from_user(void *to, const void __user *from, unsigned long n)
13631 return ret;
13632 }
13633 }
13634+ if (!__builtin_constant_p(n))
13635+ check_object_size(to, n, false);
13636 return __copy_from_user_ll(to, from, n);
13637 }
13638
13639 static __always_inline unsigned long __copy_from_user_nocache(void *to,
13640+ const void __user *from, unsigned long n) __size_overflow(3);
13641+static __always_inline unsigned long __copy_from_user_nocache(void *to,
13642 const void __user *from, unsigned long n)
13643 {
13644 might_fault();
13645+
13646+ if ((long)n < 0)
13647+ return n;
13648+
13649 if (__builtin_constant_p(n)) {
13650 unsigned long ret;
13651
13652@@ -180,20 +213,75 @@ static __always_inline unsigned long __copy_from_user_nocache(void *to,
13653
13654 static __always_inline unsigned long
13655 __copy_from_user_inatomic_nocache(void *to, const void __user *from,
13656+ unsigned long n) __size_overflow(3);
13657+static __always_inline unsigned long
13658+__copy_from_user_inatomic_nocache(void *to, const void __user *from,
13659 unsigned long n)
13660 {
13661- return __copy_from_user_ll_nocache_nozero(to, from, n);
13662+ if ((long)n < 0)
13663+ return n;
13664+
13665+ return __copy_from_user_ll_nocache_nozero(to, from, n);
13666+}
13667+
13668+/**
13669+ * copy_to_user: - Copy a block of data into user space.
13670+ * @to: Destination address, in user space.
13671+ * @from: Source address, in kernel space.
13672+ * @n: Number of bytes to copy.
13673+ *
13674+ * Context: User context only. This function may sleep.
13675+ *
13676+ * Copy data from kernel space to user space.
13677+ *
13678+ * Returns number of bytes that could not be copied.
13679+ * On success, this will be zero.
13680+ */
13681+static __always_inline unsigned long __must_check
13682+copy_to_user(void __user *to, const void *from, unsigned long n) __size_overflow(3);
13683+static __always_inline unsigned long __must_check
13684+copy_to_user(void __user *to, const void *from, unsigned long n)
13685+{
13686+ if (access_ok(VERIFY_WRITE, to, n))
13687+ n = __copy_to_user(to, from, n);
13688+ return n;
13689+}
13690+
13691+/**
13692+ * copy_from_user: - Copy a block of data from user space.
13693+ * @to: Destination address, in kernel space.
13694+ * @from: Source address, in user space.
13695+ * @n: Number of bytes to copy.
13696+ *
13697+ * Context: User context only. This function may sleep.
13698+ *
13699+ * Copy data from user space to kernel space.
13700+ *
13701+ * Returns number of bytes that could not be copied.
13702+ * On success, this will be zero.
13703+ *
13704+ * If some data could not be copied, this function will pad the copied
13705+ * data to the requested size using zero bytes.
13706+ */
13707+static __always_inline unsigned long __must_check
13708+copy_from_user(void *to, const void __user *from, unsigned long n) __size_overflow(3);
13709+static __always_inline unsigned long __must_check
13710+copy_from_user(void *to, const void __user *from, unsigned long n)
13711+{
13712+ if (access_ok(VERIFY_READ, from, n))
13713+ n = __copy_from_user(to, from, n);
13714+ else if ((long)n > 0) {
13715+ if (!__builtin_constant_p(n))
13716+ check_object_size(to, n, false);
13717+ memset(to, 0, n);
13718+ }
13719+ return n;
13720 }
13721
13722-unsigned long __must_check copy_to_user(void __user *to,
13723- const void *from, unsigned long n);
13724-unsigned long __must_check copy_from_user(void *to,
13725- const void __user *from,
13726- unsigned long n);
13727 long __must_check strncpy_from_user(char *dst, const char __user *src,
13728- long count);
13729+ unsigned long count);
13730 long __must_check __strncpy_from_user(char *dst,
13731- const char __user *src, long count);
13732+ const char __user *src, unsigned long count);
13733
13734 /**
13735 * strlen_user: - Get the size of a string in user space.
13736@@ -211,8 +299,8 @@ long __must_check __strncpy_from_user(char *dst,
13737 */
13738 #define strlen_user(str) strnlen_user(str, LONG_MAX)
13739
13740-long strnlen_user(const char __user *str, long n);
13741-unsigned long __must_check clear_user(void __user *mem, unsigned long len);
13742-unsigned long __must_check __clear_user(void __user *mem, unsigned long len);
13743+long strnlen_user(const char __user *str, unsigned long n);
13744+unsigned long __must_check clear_user(void __user *mem, unsigned long len) __size_overflow(2);
13745+unsigned long __must_check __clear_user(void __user *mem, unsigned long len) __size_overflow(2);
13746
13747 #endif /* _ASM_X86_UACCESS_32_H */
13748diff --git a/arch/x86/include/asm/uaccess_64.h b/arch/x86/include/asm/uaccess_64.h
13749index db24b21..7cd829e 100644
13750--- a/arch/x86/include/asm/uaccess_64.h
13751+++ b/arch/x86/include/asm/uaccess_64.h
13752@@ -9,6 +9,9 @@
13753 #include <linux/prefetch.h>
13754 #include <linux/lockdep.h>
13755 #include <asm/page.h>
13756+#include <asm/pgtable.h>
13757+
13758+#define set_fs(x) (current_thread_info()->addr_limit = (x))
13759
13760 /*
13761 * Copy To/From Userspace
13762@@ -16,116 +19,215 @@
13763
13764 /* Handles exceptions in both to and from, but doesn't do access_ok */
13765 __must_check unsigned long
13766-copy_user_generic(void *to, const void *from, unsigned len);
13767+copy_user_generic(void *to, const void *from, unsigned long len) __size_overflow(3);
13768
13769 __must_check unsigned long
13770-copy_to_user(void __user *to, const void *from, unsigned len);
13771-__must_check unsigned long
13772-copy_from_user(void *to, const void __user *from, unsigned len);
13773-__must_check unsigned long
13774-copy_in_user(void __user *to, const void __user *from, unsigned len);
13775+copy_in_user(void __user *to, const void __user *from, unsigned long len) __size_overflow(3);
13776
13777 static __always_inline __must_check
13778-int __copy_from_user(void *dst, const void __user *src, unsigned size)
13779+unsigned long __copy_from_user(void *dst, const void __user *src, unsigned long size) __size_overflow(3);
13780+static __always_inline __must_check
13781+unsigned long __copy_from_user(void *dst, const void __user *src, unsigned long size)
13782 {
13783- int ret = 0;
13784+ unsigned ret = 0;
13785
13786 might_fault();
13787- if (!__builtin_constant_p(size))
13788- return copy_user_generic(dst, (__force void *)src, size);
13789+
13790+ if (size > INT_MAX)
13791+ return size;
13792+
13793+#ifdef CONFIG_PAX_MEMORY_UDEREF
13794+ if (!__access_ok(VERIFY_READ, src, size))
13795+ return size;
13796+#endif
13797+
13798+ if (!__builtin_constant_p(size)) {
13799+ check_object_size(dst, size, false);
13800+
13801+#ifdef CONFIG_PAX_MEMORY_UDEREF
13802+ if ((unsigned long)src < PAX_USER_SHADOW_BASE)
13803+ src += PAX_USER_SHADOW_BASE;
13804+#endif
13805+
13806+ return copy_user_generic(dst, (__force_kernel const void *)src, size);
13807+ }
13808 switch (size) {
13809- case 1:__get_user_asm(*(u8 *)dst, (u8 __user *)src,
13810+ case 1:__get_user_asm(*(u8 *)dst, (const u8 __user *)src,
13811 ret, "b", "b", "=q", 1);
13812 return ret;
13813- case 2:__get_user_asm(*(u16 *)dst, (u16 __user *)src,
13814+ case 2:__get_user_asm(*(u16 *)dst, (const u16 __user *)src,
13815 ret, "w", "w", "=r", 2);
13816 return ret;
13817- case 4:__get_user_asm(*(u32 *)dst, (u32 __user *)src,
13818+ case 4:__get_user_asm(*(u32 *)dst, (const u32 __user *)src,
13819 ret, "l", "k", "=r", 4);
13820 return ret;
13821- case 8:__get_user_asm(*(u64 *)dst, (u64 __user *)src,
13822+ case 8:__get_user_asm(*(u64 *)dst, (const u64 __user *)src,
13823 ret, "q", "", "=r", 8);
13824 return ret;
13825 case 10:
13826- __get_user_asm(*(u64 *)dst, (u64 __user *)src,
13827+ __get_user_asm(*(u64 *)dst, (const u64 __user *)src,
13828 ret, "q", "", "=r", 10);
13829 if (unlikely(ret))
13830 return ret;
13831 __get_user_asm(*(u16 *)(8 + (char *)dst),
13832- (u16 __user *)(8 + (char __user *)src),
13833+ (const u16 __user *)(8 + (const char __user *)src),
13834 ret, "w", "w", "=r", 2);
13835 return ret;
13836 case 16:
13837- __get_user_asm(*(u64 *)dst, (u64 __user *)src,
13838+ __get_user_asm(*(u64 *)dst, (const u64 __user *)src,
13839 ret, "q", "", "=r", 16);
13840 if (unlikely(ret))
13841 return ret;
13842 __get_user_asm(*(u64 *)(8 + (char *)dst),
13843- (u64 __user *)(8 + (char __user *)src),
13844+ (const u64 __user *)(8 + (const char __user *)src),
13845 ret, "q", "", "=r", 8);
13846 return ret;
13847 default:
13848- return copy_user_generic(dst, (__force void *)src, size);
13849+
13850+#ifdef CONFIG_PAX_MEMORY_UDEREF
13851+ if ((unsigned long)src < PAX_USER_SHADOW_BASE)
13852+ src += PAX_USER_SHADOW_BASE;
13853+#endif
13854+
13855+ return copy_user_generic(dst, (__force_kernel const void *)src, size);
13856 }
13857 }
13858
13859 static __always_inline __must_check
13860-int __copy_to_user(void __user *dst, const void *src, unsigned size)
13861+unsigned long __copy_to_user(void __user *dst, const void *src, unsigned long size) __size_overflow(3);
13862+static __always_inline __must_check
13863+unsigned long __copy_to_user(void __user *dst, const void *src, unsigned long size)
13864 {
13865- int ret = 0;
13866+ unsigned ret = 0;
13867
13868 might_fault();
13869- if (!__builtin_constant_p(size))
13870- return copy_user_generic((__force void *)dst, src, size);
13871+
13872+ pax_track_stack();
13873+
13874+ if (size > INT_MAX)
13875+ return size;
13876+
13877+#ifdef CONFIG_PAX_MEMORY_UDEREF
13878+ if (!__access_ok(VERIFY_WRITE, dst, size))
13879+ return size;
13880+#endif
13881+
13882+ if (!__builtin_constant_p(size)) {
13883+ check_object_size(src, size, true);
13884+
13885+#ifdef CONFIG_PAX_MEMORY_UDEREF
13886+ if ((unsigned long)dst < PAX_USER_SHADOW_BASE)
13887+ dst += PAX_USER_SHADOW_BASE;
13888+#endif
13889+
13890+ return copy_user_generic((__force_kernel void *)dst, src, size);
13891+ }
13892 switch (size) {
13893- case 1:__put_user_asm(*(u8 *)src, (u8 __user *)dst,
13894+ case 1:__put_user_asm(*(const u8 *)src, (u8 __user *)dst,
13895 ret, "b", "b", "iq", 1);
13896 return ret;
13897- case 2:__put_user_asm(*(u16 *)src, (u16 __user *)dst,
13898+ case 2:__put_user_asm(*(const u16 *)src, (u16 __user *)dst,
13899 ret, "w", "w", "ir", 2);
13900 return ret;
13901- case 4:__put_user_asm(*(u32 *)src, (u32 __user *)dst,
13902+ case 4:__put_user_asm(*(const u32 *)src, (u32 __user *)dst,
13903 ret, "l", "k", "ir", 4);
13904 return ret;
13905- case 8:__put_user_asm(*(u64 *)src, (u64 __user *)dst,
13906+ case 8:__put_user_asm(*(const u64 *)src, (u64 __user *)dst,
13907 ret, "q", "", "er", 8);
13908 return ret;
13909 case 10:
13910- __put_user_asm(*(u64 *)src, (u64 __user *)dst,
13911+ __put_user_asm(*(const u64 *)src, (u64 __user *)dst,
13912 ret, "q", "", "er", 10);
13913 if (unlikely(ret))
13914 return ret;
13915 asm("":::"memory");
13916- __put_user_asm(4[(u16 *)src], 4 + (u16 __user *)dst,
13917+ __put_user_asm(4[(const u16 *)src], 4 + (u16 __user *)dst,
13918 ret, "w", "w", "ir", 2);
13919 return ret;
13920 case 16:
13921- __put_user_asm(*(u64 *)src, (u64 __user *)dst,
13922+ __put_user_asm(*(const u64 *)src, (u64 __user *)dst,
13923 ret, "q", "", "er", 16);
13924 if (unlikely(ret))
13925 return ret;
13926 asm("":::"memory");
13927- __put_user_asm(1[(u64 *)src], 1 + (u64 __user *)dst,
13928+ __put_user_asm(1[(const u64 *)src], 1 + (u64 __user *)dst,
13929 ret, "q", "", "er", 8);
13930 return ret;
13931 default:
13932- return copy_user_generic((__force void *)dst, src, size);
13933+
13934+#ifdef CONFIG_PAX_MEMORY_UDEREF
13935+ if ((unsigned long)dst < PAX_USER_SHADOW_BASE)
13936+ dst += PAX_USER_SHADOW_BASE;
13937+#endif
13938+
13939+ return copy_user_generic((__force_kernel void *)dst, src, size);
13940 }
13941 }
13942
13943 static __always_inline __must_check
13944-int __copy_in_user(void __user *dst, const void __user *src, unsigned size)
13945+unsigned long copy_to_user(void __user *to, const void *from, unsigned long len) __size_overflow(3);
13946+static __always_inline __must_check
13947+unsigned long copy_to_user(void __user *to, const void *from, unsigned long len)
13948 {
13949- int ret = 0;
13950+ if (access_ok(VERIFY_WRITE, to, len))
13951+ len = __copy_to_user(to, from, len);
13952+ return len;
13953+}
13954
13955+static __always_inline __must_check
13956+unsigned long copy_from_user(void *to, const void __user *from, unsigned long len) __size_overflow(3);
13957+static __always_inline __must_check
13958+unsigned long copy_from_user(void *to, const void __user *from, unsigned long len)
13959+{
13960 might_fault();
13961- if (!__builtin_constant_p(size))
13962- return copy_user_generic((__force void *)dst,
13963- (__force void *)src, size);
13964+
13965+ if (access_ok(VERIFY_READ, from, len))
13966+ len = __copy_from_user(to, from, len);
13967+ else if (len < INT_MAX) {
13968+ if (!__builtin_constant_p(len))
13969+ check_object_size(to, len, false);
13970+ memset(to, 0, len);
13971+ }
13972+ return len;
13973+}
13974+
13975+static __always_inline __must_check
13976+unsigned long __copy_in_user(void __user *dst, const void __user *src, unsigned long size) __size_overflow(3);
13977+static __always_inline __must_check
13978+unsigned long __copy_in_user(void __user *dst, const void __user *src, unsigned long size)
13979+{
13980+ unsigned ret = 0;
13981+
13982+ might_fault();
13983+
13984+ pax_track_stack();
13985+
13986+ if (size > INT_MAX)
13987+ return size;
13988+
13989+#ifdef CONFIG_PAX_MEMORY_UDEREF
13990+ if (!__access_ok(VERIFY_READ, src, size))
13991+ return size;
13992+ if (!__access_ok(VERIFY_WRITE, dst, size))
13993+ return size;
13994+#endif
13995+
13996+ if (!__builtin_constant_p(size)) {
13997+
13998+#ifdef CONFIG_PAX_MEMORY_UDEREF
13999+ if ((unsigned long)src < PAX_USER_SHADOW_BASE)
14000+ src += PAX_USER_SHADOW_BASE;
14001+ if ((unsigned long)dst < PAX_USER_SHADOW_BASE)
14002+ dst += PAX_USER_SHADOW_BASE;
14003+#endif
14004+
14005+ return copy_user_generic((__force_kernel void *)dst,
14006+ (__force_kernel const void *)src, size);
14007+ }
14008 switch (size) {
14009 case 1: {
14010 u8 tmp;
14011- __get_user_asm(tmp, (u8 __user *)src,
14012+ __get_user_asm(tmp, (const u8 __user *)src,
14013 ret, "b", "b", "=q", 1);
14014 if (likely(!ret))
14015 __put_user_asm(tmp, (u8 __user *)dst,
14016@@ -134,7 +236,7 @@ int __copy_in_user(void __user *dst, const void __user *src, unsigned size)
14017 }
14018 case 2: {
14019 u16 tmp;
14020- __get_user_asm(tmp, (u16 __user *)src,
14021+ __get_user_asm(tmp, (const u16 __user *)src,
14022 ret, "w", "w", "=r", 2);
14023 if (likely(!ret))
14024 __put_user_asm(tmp, (u16 __user *)dst,
14025@@ -144,7 +246,7 @@ int __copy_in_user(void __user *dst, const void __user *src, unsigned size)
14026
14027 case 4: {
14028 u32 tmp;
14029- __get_user_asm(tmp, (u32 __user *)src,
14030+ __get_user_asm(tmp, (const u32 __user *)src,
14031 ret, "l", "k", "=r", 4);
14032 if (likely(!ret))
14033 __put_user_asm(tmp, (u32 __user *)dst,
14034@@ -153,7 +255,7 @@ int __copy_in_user(void __user *dst, const void __user *src, unsigned size)
14035 }
14036 case 8: {
14037 u64 tmp;
14038- __get_user_asm(tmp, (u64 __user *)src,
14039+ __get_user_asm(tmp, (const u64 __user *)src,
14040 ret, "q", "", "=r", 8);
14041 if (likely(!ret))
14042 __put_user_asm(tmp, (u64 __user *)dst,
14043@@ -161,48 +263,105 @@ int __copy_in_user(void __user *dst, const void __user *src, unsigned size)
14044 return ret;
14045 }
14046 default:
14047- return copy_user_generic((__force void *)dst,
14048- (__force void *)src, size);
14049+
14050+#ifdef CONFIG_PAX_MEMORY_UDEREF
14051+ if ((unsigned long)src < PAX_USER_SHADOW_BASE)
14052+ src += PAX_USER_SHADOW_BASE;
14053+ if ((unsigned long)dst < PAX_USER_SHADOW_BASE)
14054+ dst += PAX_USER_SHADOW_BASE;
14055+#endif
14056+
14057+ return copy_user_generic((__force_kernel void *)dst,
14058+ (__force_kernel const void *)src, size);
14059 }
14060 }
14061
14062 __must_check long
14063-strncpy_from_user(char *dst, const char __user *src, long count);
14064+strncpy_from_user(char *dst, const char __user *src, unsigned long count);
14065 __must_check long
14066-__strncpy_from_user(char *dst, const char __user *src, long count);
14067-__must_check long strnlen_user(const char __user *str, long n);
14068-__must_check long __strnlen_user(const char __user *str, long n);
14069+__strncpy_from_user(char *dst, const char __user *src, unsigned long count);
14070+__must_check long strnlen_user(const char __user *str, unsigned long n);
14071+__must_check long __strnlen_user(const char __user *str, unsigned long n);
14072 __must_check long strlen_user(const char __user *str);
14073-__must_check unsigned long clear_user(void __user *mem, unsigned long len);
14074-__must_check unsigned long __clear_user(void __user *mem, unsigned long len);
14075+__must_check unsigned long clear_user(void __user *mem, unsigned long len) __size_overflow(2);
14076+__must_check unsigned long __clear_user(void __user *mem, unsigned long len) __size_overflow(2);
14077
14078-__must_check long __copy_from_user_inatomic(void *dst, const void __user *src,
14079- unsigned size);
14080+static __must_check __always_inline unsigned long
14081+__copy_from_user_inatomic(void *dst, const void __user *src, unsigned long size) __size_overflow(3);
14082+static __must_check __always_inline unsigned long
14083+__copy_from_user_inatomic(void *dst, const void __user *src, unsigned long size)
14084+{
14085+ pax_track_stack();
14086+
14087+ if (size > INT_MAX)
14088+ return size;
14089+
14090+#ifdef CONFIG_PAX_MEMORY_UDEREF
14091+ if (!__access_ok(VERIFY_READ, src, size))
14092+ return size;
14093
14094-static __must_check __always_inline int
14095-__copy_to_user_inatomic(void __user *dst, const void *src, unsigned size)
14096+ if ((unsigned long)src < PAX_USER_SHADOW_BASE)
14097+ src += PAX_USER_SHADOW_BASE;
14098+#endif
14099+
14100+ return copy_user_generic(dst, (__force_kernel const void *)src, size);
14101+}
14102+
14103+static __must_check __always_inline unsigned long
14104+__copy_to_user_inatomic(void __user *dst, const void *src, unsigned long size) __size_overflow(3);
14105+static __must_check __always_inline unsigned long
14106+__copy_to_user_inatomic(void __user *dst, const void *src, unsigned long size)
14107 {
14108- return copy_user_generic((__force void *)dst, src, size);
14109+ if (size > INT_MAX)
14110+ return size;
14111+
14112+#ifdef CONFIG_PAX_MEMORY_UDEREF
14113+ if (!__access_ok(VERIFY_WRITE, dst, size))
14114+ return size;
14115+
14116+ if ((unsigned long)dst < PAX_USER_SHADOW_BASE)
14117+ dst += PAX_USER_SHADOW_BASE;
14118+#endif
14119+
14120+ return copy_user_generic((__force_kernel void *)dst, src, size);
14121 }
14122
14123-extern long __copy_user_nocache(void *dst, const void __user *src,
14124- unsigned size, int zerorest);
14125+extern unsigned long __copy_user_nocache(void *dst, const void __user *src,
14126+ unsigned long size, int zerorest) __size_overflow(3);
14127
14128-static inline int
14129-__copy_from_user_nocache(void *dst, const void __user *src, unsigned size)
14130+static inline unsigned long __copy_from_user_nocache(void *dst, const void __user *src, unsigned long size) __size_overflow(3);
14131+static inline unsigned long __copy_from_user_nocache(void *dst, const void __user *src, unsigned long size)
14132 {
14133 might_sleep();
14134+
14135+ if (size > INT_MAX)
14136+ return size;
14137+
14138+#ifdef CONFIG_PAX_MEMORY_UDEREF
14139+ if (!__access_ok(VERIFY_READ, src, size))
14140+ return size;
14141+#endif
14142+
14143 return __copy_user_nocache(dst, src, size, 1);
14144 }
14145
14146-static inline int
14147-__copy_from_user_inatomic_nocache(void *dst, const void __user *src,
14148- unsigned size)
14149+static inline unsigned long __copy_from_user_inatomic_nocache(void *dst, const void __user *src,
14150+ unsigned long size) __size_overflow(3);
14151+static inline unsigned long __copy_from_user_inatomic_nocache(void *dst, const void __user *src,
14152+ unsigned long size)
14153 {
14154+ if (size > INT_MAX)
14155+ return size;
14156+
14157+#ifdef CONFIG_PAX_MEMORY_UDEREF
14158+ if (!__access_ok(VERIFY_READ, src, size))
14159+ return size;
14160+#endif
14161+
14162 return __copy_user_nocache(dst, src, size, 0);
14163 }
14164
14165-unsigned long
14166-copy_user_handle_tail(char *to, char *from, unsigned len, unsigned zerorest);
14167+extern unsigned long
14168+copy_user_handle_tail(char __user *to, char __user *from, unsigned long len, unsigned zerorest) __size_overflow(3);
14169
14170 #endif /* _ASM_X86_UACCESS_64_H */
14171diff --git a/arch/x86/include/asm/vdso.h b/arch/x86/include/asm/vdso.h
14172index 9064052..786cfbc 100644
14173--- a/arch/x86/include/asm/vdso.h
14174+++ b/arch/x86/include/asm/vdso.h
14175@@ -25,7 +25,7 @@ extern const char VDSO32_PRELINK[];
14176 #define VDSO32_SYMBOL(base, name) \
14177 ({ \
14178 extern const char VDSO32_##name[]; \
14179- (void *)(VDSO32_##name - VDSO32_PRELINK + (unsigned long)(base)); \
14180+ (void __user *)(VDSO32_##name - VDSO32_PRELINK + (unsigned long)(base)); \
14181 })
14182 #endif
14183
14184diff --git a/arch/x86/include/asm/vgtod.h b/arch/x86/include/asm/vgtod.h
14185index 3d61e20..9507180 100644
14186--- a/arch/x86/include/asm/vgtod.h
14187+++ b/arch/x86/include/asm/vgtod.h
14188@@ -14,6 +14,7 @@ struct vsyscall_gtod_data {
14189 int sysctl_enabled;
14190 struct timezone sys_tz;
14191 struct { /* extract of a clocksource struct */
14192+ char name[8];
14193 cycle_t (*vread)(void);
14194 cycle_t cycle_last;
14195 cycle_t mask;
14196diff --git a/arch/x86/include/asm/vmi.h b/arch/x86/include/asm/vmi.h
14197index 61e08c0..b0da582 100644
14198--- a/arch/x86/include/asm/vmi.h
14199+++ b/arch/x86/include/asm/vmi.h
14200@@ -191,6 +191,7 @@ struct vrom_header {
14201 u8 reserved[96]; /* Reserved for headers */
14202 char vmi_init[8]; /* VMI_Init jump point */
14203 char get_reloc[8]; /* VMI_GetRelocationInfo jump point */
14204+ char rom_data[8048]; /* rest of the option ROM */
14205 } __attribute__((packed));
14206
14207 struct pnp_header {
14208diff --git a/arch/x86/include/asm/vmi_time.h b/arch/x86/include/asm/vmi_time.h
14209index c6e0bee..fcb9f74 100644
14210--- a/arch/x86/include/asm/vmi_time.h
14211+++ b/arch/x86/include/asm/vmi_time.h
14212@@ -43,7 +43,7 @@ extern struct vmi_timer_ops {
14213 int (*wallclock_updated)(void);
14214 void (*set_alarm)(u32 flags, u64 expiry, u64 period);
14215 void (*cancel_alarm)(u32 flags);
14216-} vmi_timer_ops;
14217+} __no_const vmi_timer_ops;
14218
14219 /* Prototypes */
14220 extern void __init vmi_time_init(void);
14221diff --git a/arch/x86/include/asm/vsyscall.h b/arch/x86/include/asm/vsyscall.h
14222index d0983d2..1f7c9e9 100644
14223--- a/arch/x86/include/asm/vsyscall.h
14224+++ b/arch/x86/include/asm/vsyscall.h
14225@@ -15,9 +15,10 @@ enum vsyscall_num {
14226
14227 #ifdef __KERNEL__
14228 #include <linux/seqlock.h>
14229+#include <linux/getcpu.h>
14230+#include <linux/time.h>
14231
14232 #define __section_vgetcpu_mode __attribute__ ((unused, __section__ (".vgetcpu_mode"), aligned(16)))
14233-#define __section_jiffies __attribute__ ((unused, __section__ (".jiffies"), aligned(16)))
14234
14235 /* Definitions for CONFIG_GENERIC_TIME definitions */
14236 #define __section_vsyscall_gtod_data __attribute__ \
14237@@ -31,7 +32,6 @@ enum vsyscall_num {
14238 #define VGETCPU_LSL 2
14239
14240 extern int __vgetcpu_mode;
14241-extern volatile unsigned long __jiffies;
14242
14243 /* kernel space (writeable) */
14244 extern int vgetcpu_mode;
14245@@ -39,6 +39,9 @@ extern struct timezone sys_tz;
14246
14247 extern void map_vsyscall(void);
14248
14249+extern int vgettimeofday(struct timeval * tv, struct timezone * tz);
14250+extern time_t vtime(time_t *t);
14251+extern long vgetcpu(unsigned *cpu, unsigned *node, struct getcpu_cache *tcache);
14252 #endif /* __KERNEL__ */
14253
14254 #endif /* _ASM_X86_VSYSCALL_H */
14255diff --git a/arch/x86/include/asm/x86_init.h b/arch/x86/include/asm/x86_init.h
14256index 2c756fd..3377e37 100644
14257--- a/arch/x86/include/asm/x86_init.h
14258+++ b/arch/x86/include/asm/x86_init.h
14259@@ -28,7 +28,7 @@ struct x86_init_mpparse {
14260 void (*mpc_oem_bus_info)(struct mpc_bus *m, char *name);
14261 void (*find_smp_config)(unsigned int reserve);
14262 void (*get_smp_config)(unsigned int early);
14263-};
14264+} __no_const;
14265
14266 /**
14267 * struct x86_init_resources - platform specific resource related ops
14268@@ -42,7 +42,7 @@ struct x86_init_resources {
14269 void (*probe_roms)(void);
14270 void (*reserve_resources)(void);
14271 char *(*memory_setup)(void);
14272-};
14273+} __no_const;
14274
14275 /**
14276 * struct x86_init_irqs - platform specific interrupt setup
14277@@ -55,7 +55,7 @@ struct x86_init_irqs {
14278 void (*pre_vector_init)(void);
14279 void (*intr_init)(void);
14280 void (*trap_init)(void);
14281-};
14282+} __no_const;
14283
14284 /**
14285 * struct x86_init_oem - oem platform specific customizing functions
14286@@ -65,7 +65,7 @@ struct x86_init_irqs {
14287 struct x86_init_oem {
14288 void (*arch_setup)(void);
14289 void (*banner)(void);
14290-};
14291+} __no_const;
14292
14293 /**
14294 * struct x86_init_paging - platform specific paging functions
14295@@ -75,7 +75,7 @@ struct x86_init_oem {
14296 struct x86_init_paging {
14297 void (*pagetable_setup_start)(pgd_t *base);
14298 void (*pagetable_setup_done)(pgd_t *base);
14299-};
14300+} __no_const;
14301
14302 /**
14303 * struct x86_init_timers - platform specific timer setup
14304@@ -88,7 +88,7 @@ struct x86_init_timers {
14305 void (*setup_percpu_clockev)(void);
14306 void (*tsc_pre_init)(void);
14307 void (*timer_init)(void);
14308-};
14309+} __no_const;
14310
14311 /**
14312 * struct x86_init_ops - functions for platform specific setup
14313@@ -101,7 +101,7 @@ struct x86_init_ops {
14314 struct x86_init_oem oem;
14315 struct x86_init_paging paging;
14316 struct x86_init_timers timers;
14317-};
14318+} __no_const;
14319
14320 /**
14321 * struct x86_cpuinit_ops - platform specific cpu hotplug setups
14322@@ -109,7 +109,7 @@ struct x86_init_ops {
14323 */
14324 struct x86_cpuinit_ops {
14325 void (*setup_percpu_clockev)(void);
14326-};
14327+} __no_const;
14328
14329 /**
14330 * struct x86_platform_ops - platform specific runtime functions
14331@@ -121,7 +121,7 @@ struct x86_platform_ops {
14332 unsigned long (*calibrate_tsc)(void);
14333 unsigned long (*get_wallclock)(void);
14334 int (*set_wallclock)(unsigned long nowtime);
14335-};
14336+} __no_const;
14337
14338 extern struct x86_init_ops x86_init;
14339 extern struct x86_cpuinit_ops x86_cpuinit;
14340diff --git a/arch/x86/include/asm/xsave.h b/arch/x86/include/asm/xsave.h
14341index 727acc1..554f3eb 100644
14342--- a/arch/x86/include/asm/xsave.h
14343+++ b/arch/x86/include/asm/xsave.h
14344@@ -56,6 +56,12 @@ static inline int xrstor_checking(struct xsave_struct *fx)
14345 static inline int xsave_user(struct xsave_struct __user *buf)
14346 {
14347 int err;
14348+
14349+#if defined(CONFIG_X86_64) && defined(CONFIG_PAX_MEMORY_UDEREF)
14350+ if ((unsigned long)buf < PAX_USER_SHADOW_BASE)
14351+ buf = (struct xsave_struct __user *)((void __user*)buf + PAX_USER_SHADOW_BASE);
14352+#endif
14353+
14354 __asm__ __volatile__("1: .byte " REX_PREFIX "0x0f,0xae,0x27\n"
14355 "2:\n"
14356 ".section .fixup,\"ax\"\n"
14357@@ -78,10 +84,15 @@ static inline int xsave_user(struct xsave_struct __user *buf)
14358 static inline int xrestore_user(struct xsave_struct __user *buf, u64 mask)
14359 {
14360 int err;
14361- struct xsave_struct *xstate = ((__force struct xsave_struct *)buf);
14362+ struct xsave_struct *xstate = ((__force_kernel struct xsave_struct *)buf);
14363 u32 lmask = mask;
14364 u32 hmask = mask >> 32;
14365
14366+#if defined(CONFIG_X86_64) && defined(CONFIG_PAX_MEMORY_UDEREF)
14367+ if ((unsigned long)xstate < PAX_USER_SHADOW_BASE)
14368+ xstate = (struct xsave_struct *)((void *)xstate + PAX_USER_SHADOW_BASE);
14369+#endif
14370+
14371 __asm__ __volatile__("1: .byte " REX_PREFIX "0x0f,0xae,0x2f\n"
14372 "2:\n"
14373 ".section .fixup,\"ax\"\n"
14374diff --git a/arch/x86/kernel/acpi/realmode/Makefile b/arch/x86/kernel/acpi/realmode/Makefile
14375index 6a564ac..9b1340c 100644
14376--- a/arch/x86/kernel/acpi/realmode/Makefile
14377+++ b/arch/x86/kernel/acpi/realmode/Makefile
14378@@ -41,6 +41,9 @@ KBUILD_CFLAGS := $(LINUXINCLUDE) -g -Os -D_SETUP -D_WAKEUP -D__KERNEL__ \
14379 $(call cc-option, -fno-stack-protector) \
14380 $(call cc-option, -mpreferred-stack-boundary=2)
14381 KBUILD_CFLAGS += $(call cc-option, -m32)
14382+ifdef CONSTIFY_PLUGIN
14383+KBUILD_CFLAGS += $(CONSTIFY_PLUGIN) -fplugin-arg-constify_plugin-no-constify
14384+endif
14385 KBUILD_AFLAGS := $(KBUILD_CFLAGS) -D__ASSEMBLY__
14386 GCOV_PROFILE := n
14387
14388diff --git a/arch/x86/kernel/acpi/realmode/wakeup.S b/arch/x86/kernel/acpi/realmode/wakeup.S
14389index 580b4e2..d4129e4 100644
14390--- a/arch/x86/kernel/acpi/realmode/wakeup.S
14391+++ b/arch/x86/kernel/acpi/realmode/wakeup.S
14392@@ -91,6 +91,9 @@ _start:
14393 /* Do any other stuff... */
14394
14395 #ifndef CONFIG_64BIT
14396+ /* Recheck NX bit overrides (64bit path does this in trampoline) */
14397+ call verify_cpu
14398+
14399 /* This could also be done in C code... */
14400 movl pmode_cr3, %eax
14401 movl %eax, %cr3
14402@@ -104,7 +107,7 @@ _start:
14403 movl %eax, %ecx
14404 orl %edx, %ecx
14405 jz 1f
14406- movl $0xc0000080, %ecx
14407+ mov $MSR_EFER, %ecx
14408 wrmsr
14409 1:
14410
14411@@ -114,6 +117,7 @@ _start:
14412 movl pmode_cr0, %eax
14413 movl %eax, %cr0
14414 jmp pmode_return
14415+# include "../../verify_cpu.S"
14416 #else
14417 pushw $0
14418 pushw trampoline_segment
14419diff --git a/arch/x86/kernel/acpi/sleep.c b/arch/x86/kernel/acpi/sleep.c
14420index ca93638..7042f24 100644
14421--- a/arch/x86/kernel/acpi/sleep.c
14422+++ b/arch/x86/kernel/acpi/sleep.c
14423@@ -11,11 +11,12 @@
14424 #include <linux/cpumask.h>
14425 #include <asm/segment.h>
14426 #include <asm/desc.h>
14427+#include <asm/e820.h>
14428
14429 #include "realmode/wakeup.h"
14430 #include "sleep.h"
14431
14432-unsigned long acpi_wakeup_address;
14433+unsigned long acpi_wakeup_address = 0x2000;
14434 unsigned long acpi_realmode_flags;
14435
14436 /* address in low memory of the wakeup routine. */
14437@@ -98,9 +99,13 @@ int acpi_save_state_mem(void)
14438 #else /* CONFIG_64BIT */
14439 header->trampoline_segment = setup_trampoline() >> 4;
14440 #ifdef CONFIG_SMP
14441- stack_start.sp = temp_stack + sizeof(temp_stack);
14442+ stack_start = (unsigned long)temp_stack + sizeof(temp_stack);
14443+
14444+ pax_open_kernel();
14445 early_gdt_descr.address =
14446 (unsigned long)get_cpu_gdt_table(smp_processor_id());
14447+ pax_close_kernel();
14448+
14449 initial_gs = per_cpu_offset(smp_processor_id());
14450 #endif
14451 initial_code = (unsigned long)wakeup_long64;
14452@@ -134,14 +139,8 @@ void __init acpi_reserve_bootmem(void)
14453 return;
14454 }
14455
14456- acpi_realmode = (unsigned long)alloc_bootmem_low(WAKEUP_SIZE);
14457-
14458- if (!acpi_realmode) {
14459- printk(KERN_ERR "ACPI: Cannot allocate lowmem, S3 disabled.\n");
14460- return;
14461- }
14462-
14463- acpi_wakeup_address = virt_to_phys((void *)acpi_realmode);
14464+ reserve_early(acpi_wakeup_address, acpi_wakeup_address + WAKEUP_SIZE, "ACPI Wakeup Code");
14465+ acpi_realmode = (unsigned long)__va(acpi_wakeup_address);;
14466 }
14467
14468
14469diff --git a/arch/x86/kernel/acpi/wakeup_32.S b/arch/x86/kernel/acpi/wakeup_32.S
14470index 8ded418..079961e 100644
14471--- a/arch/x86/kernel/acpi/wakeup_32.S
14472+++ b/arch/x86/kernel/acpi/wakeup_32.S
14473@@ -30,13 +30,11 @@ wakeup_pmode_return:
14474 # and restore the stack ... but you need gdt for this to work
14475 movl saved_context_esp, %esp
14476
14477- movl %cs:saved_magic, %eax
14478- cmpl $0x12345678, %eax
14479+ cmpl $0x12345678, saved_magic
14480 jne bogus_magic
14481
14482 # jump to place where we left off
14483- movl saved_eip, %eax
14484- jmp *%eax
14485+ jmp *(saved_eip)
14486
14487 bogus_magic:
14488 jmp bogus_magic
14489diff --git a/arch/x86/kernel/alternative.c b/arch/x86/kernel/alternative.c
14490index de7353c..075da5f 100644
14491--- a/arch/x86/kernel/alternative.c
14492+++ b/arch/x86/kernel/alternative.c
14493@@ -407,7 +407,7 @@ void __init_or_module apply_paravirt(struct paravirt_patch_site *start,
14494
14495 BUG_ON(p->len > MAX_PATCH_LEN);
14496 /* prep the buffer with the original instructions */
14497- memcpy(insnbuf, p->instr, p->len);
14498+ memcpy(insnbuf, ktla_ktva(p->instr), p->len);
14499 used = pv_init_ops.patch(p->instrtype, p->clobbers, insnbuf,
14500 (unsigned long)p->instr, p->len);
14501
14502@@ -475,7 +475,7 @@ void __init alternative_instructions(void)
14503 if (smp_alt_once)
14504 free_init_pages("SMP alternatives",
14505 (unsigned long)__smp_locks,
14506- (unsigned long)__smp_locks_end);
14507+ PAGE_ALIGN((unsigned long)__smp_locks_end));
14508
14509 restart_nmi();
14510 }
14511@@ -492,13 +492,17 @@ void __init alternative_instructions(void)
14512 * instructions. And on the local CPU you need to be protected again NMI or MCE
14513 * handlers seeing an inconsistent instruction while you patch.
14514 */
14515-static void *__init_or_module text_poke_early(void *addr, const void *opcode,
14516+static void *__kprobes text_poke_early(void *addr, const void *opcode,
14517 size_t len)
14518 {
14519 unsigned long flags;
14520 local_irq_save(flags);
14521- memcpy(addr, opcode, len);
14522+
14523+ pax_open_kernel();
14524+ memcpy(ktla_ktva(addr), opcode, len);
14525 sync_core();
14526+ pax_close_kernel();
14527+
14528 local_irq_restore(flags);
14529 /* Could also do a CLFLUSH here to speed up CPU recovery; but
14530 that causes hangs on some VIA CPUs. */
14531@@ -520,35 +524,21 @@ static void *__init_or_module text_poke_early(void *addr, const void *opcode,
14532 */
14533 void *__kprobes text_poke(void *addr, const void *opcode, size_t len)
14534 {
14535- unsigned long flags;
14536- char *vaddr;
14537+ unsigned char *vaddr = ktla_ktva(addr);
14538 struct page *pages[2];
14539- int i;
14540+ size_t i;
14541
14542 if (!core_kernel_text((unsigned long)addr)) {
14543- pages[0] = vmalloc_to_page(addr);
14544- pages[1] = vmalloc_to_page(addr + PAGE_SIZE);
14545+ pages[0] = vmalloc_to_page(vaddr);
14546+ pages[1] = vmalloc_to_page(vaddr + PAGE_SIZE);
14547 } else {
14548- pages[0] = virt_to_page(addr);
14549+ pages[0] = virt_to_page(vaddr);
14550 WARN_ON(!PageReserved(pages[0]));
14551- pages[1] = virt_to_page(addr + PAGE_SIZE);
14552+ pages[1] = virt_to_page(vaddr + PAGE_SIZE);
14553 }
14554 BUG_ON(!pages[0]);
14555- local_irq_save(flags);
14556- set_fixmap(FIX_TEXT_POKE0, page_to_phys(pages[0]));
14557- if (pages[1])
14558- set_fixmap(FIX_TEXT_POKE1, page_to_phys(pages[1]));
14559- vaddr = (char *)fix_to_virt(FIX_TEXT_POKE0);
14560- memcpy(&vaddr[(unsigned long)addr & ~PAGE_MASK], opcode, len);
14561- clear_fixmap(FIX_TEXT_POKE0);
14562- if (pages[1])
14563- clear_fixmap(FIX_TEXT_POKE1);
14564- local_flush_tlb();
14565- sync_core();
14566- /* Could also do a CLFLUSH here to speed up CPU recovery; but
14567- that causes hangs on some VIA CPUs. */
14568+ text_poke_early(addr, opcode, len);
14569 for (i = 0; i < len; i++)
14570- BUG_ON(((char *)addr)[i] != ((char *)opcode)[i]);
14571- local_irq_restore(flags);
14572+ BUG_ON((vaddr)[i] != ((const unsigned char *)opcode)[i]);
14573 return addr;
14574 }
14575diff --git a/arch/x86/kernel/amd_iommu.c b/arch/x86/kernel/amd_iommu.c
14576index 3a44b75..1601800 100644
14577--- a/arch/x86/kernel/amd_iommu.c
14578+++ b/arch/x86/kernel/amd_iommu.c
14579@@ -2076,7 +2076,7 @@ static void prealloc_protection_domains(void)
14580 }
14581 }
14582
14583-static struct dma_map_ops amd_iommu_dma_ops = {
14584+static const struct dma_map_ops amd_iommu_dma_ops = {
14585 .alloc_coherent = alloc_coherent,
14586 .free_coherent = free_coherent,
14587 .map_page = map_page,
14588diff --git a/arch/x86/kernel/apic/apic.c b/arch/x86/kernel/apic/apic.c
14589index 1d2d670..8e3f477 100644
14590--- a/arch/x86/kernel/apic/apic.c
14591+++ b/arch/x86/kernel/apic/apic.c
14592@@ -170,7 +170,7 @@ int first_system_vector = 0xfe;
14593 /*
14594 * Debug level, exported for io_apic.c
14595 */
14596-unsigned int apic_verbosity;
14597+int apic_verbosity;
14598
14599 int pic_mode;
14600
14601@@ -1794,7 +1794,7 @@ void smp_error_interrupt(struct pt_regs *regs)
14602 apic_write(APIC_ESR, 0);
14603 v1 = apic_read(APIC_ESR);
14604 ack_APIC_irq();
14605- atomic_inc(&irq_err_count);
14606+ atomic_inc_unchecked(&irq_err_count);
14607
14608 /*
14609 * Here is what the APIC error bits mean:
14610@@ -2184,6 +2184,8 @@ static int __cpuinit apic_cluster_num(void)
14611 u16 *bios_cpu_apicid;
14612 DECLARE_BITMAP(clustermap, NUM_APIC_CLUSTERS);
14613
14614+ pax_track_stack();
14615+
14616 bios_cpu_apicid = early_per_cpu_ptr(x86_bios_cpu_apicid);
14617 bitmap_zero(clustermap, NUM_APIC_CLUSTERS);
14618
14619diff --git a/arch/x86/kernel/apic/io_apic.c b/arch/x86/kernel/apic/io_apic.c
14620index 8928d97..f799cea 100644
14621--- a/arch/x86/kernel/apic/io_apic.c
14622+++ b/arch/x86/kernel/apic/io_apic.c
14623@@ -716,7 +716,7 @@ struct IO_APIC_route_entry **alloc_ioapic_entries(void)
14624 ioapic_entries = kzalloc(sizeof(*ioapic_entries) * nr_ioapics,
14625 GFP_ATOMIC);
14626 if (!ioapic_entries)
14627- return 0;
14628+ return NULL;
14629
14630 for (apic = 0; apic < nr_ioapics; apic++) {
14631 ioapic_entries[apic] =
14632@@ -733,7 +733,7 @@ nomem:
14633 kfree(ioapic_entries[apic]);
14634 kfree(ioapic_entries);
14635
14636- return 0;
14637+ return NULL;
14638 }
14639
14640 /*
14641@@ -1150,7 +1150,7 @@ int IO_APIC_get_PCI_irq_vector(int bus, int slot, int pin,
14642 }
14643 EXPORT_SYMBOL(IO_APIC_get_PCI_irq_vector);
14644
14645-void lock_vector_lock(void)
14646+void lock_vector_lock(void) __acquires(vector_lock)
14647 {
14648 /* Used to the online set of cpus does not change
14649 * during assign_irq_vector.
14650@@ -1158,7 +1158,7 @@ void lock_vector_lock(void)
14651 spin_lock(&vector_lock);
14652 }
14653
14654-void unlock_vector_lock(void)
14655+void unlock_vector_lock(void) __releases(vector_lock)
14656 {
14657 spin_unlock(&vector_lock);
14658 }
14659@@ -2542,7 +2542,7 @@ static void ack_apic_edge(unsigned int irq)
14660 ack_APIC_irq();
14661 }
14662
14663-atomic_t irq_mis_count;
14664+atomic_unchecked_t irq_mis_count;
14665
14666 static void ack_apic_level(unsigned int irq)
14667 {
14668@@ -2626,7 +2626,7 @@ static void ack_apic_level(unsigned int irq)
14669
14670 /* Tail end of version 0x11 I/O APIC bug workaround */
14671 if (!(v & (1 << (i & 0x1f)))) {
14672- atomic_inc(&irq_mis_count);
14673+ atomic_inc_unchecked(&irq_mis_count);
14674 spin_lock(&ioapic_lock);
14675 __mask_and_edge_IO_APIC_irq(cfg);
14676 __unmask_and_level_IO_APIC_irq(cfg);
14677diff --git a/arch/x86/kernel/apm_32.c b/arch/x86/kernel/apm_32.c
14678index 151ace6..f317474 100644
14679--- a/arch/x86/kernel/apm_32.c
14680+++ b/arch/x86/kernel/apm_32.c
14681@@ -410,7 +410,7 @@ static DEFINE_SPINLOCK(user_list_lock);
14682 * This is for buggy BIOS's that refer to (real mode) segment 0x40
14683 * even though they are called in protected mode.
14684 */
14685-static struct desc_struct bad_bios_desc = GDT_ENTRY_INIT(0x4092,
14686+static const struct desc_struct bad_bios_desc = GDT_ENTRY_INIT(0x4093,
14687 (unsigned long)__va(0x400UL), PAGE_SIZE - 0x400 - 1);
14688
14689 static const char driver_version[] = "1.16ac"; /* no spaces */
14690@@ -588,7 +588,10 @@ static long __apm_bios_call(void *_call)
14691 BUG_ON(cpu != 0);
14692 gdt = get_cpu_gdt_table(cpu);
14693 save_desc_40 = gdt[0x40 / 8];
14694+
14695+ pax_open_kernel();
14696 gdt[0x40 / 8] = bad_bios_desc;
14697+ pax_close_kernel();
14698
14699 apm_irq_save(flags);
14700 APM_DO_SAVE_SEGS;
14701@@ -597,7 +600,11 @@ static long __apm_bios_call(void *_call)
14702 &call->esi);
14703 APM_DO_RESTORE_SEGS;
14704 apm_irq_restore(flags);
14705+
14706+ pax_open_kernel();
14707 gdt[0x40 / 8] = save_desc_40;
14708+ pax_close_kernel();
14709+
14710 put_cpu();
14711
14712 return call->eax & 0xff;
14713@@ -664,7 +671,10 @@ static long __apm_bios_call_simple(void *_call)
14714 BUG_ON(cpu != 0);
14715 gdt = get_cpu_gdt_table(cpu);
14716 save_desc_40 = gdt[0x40 / 8];
14717+
14718+ pax_open_kernel();
14719 gdt[0x40 / 8] = bad_bios_desc;
14720+ pax_close_kernel();
14721
14722 apm_irq_save(flags);
14723 APM_DO_SAVE_SEGS;
14724@@ -672,7 +682,11 @@ static long __apm_bios_call_simple(void *_call)
14725 &call->eax);
14726 APM_DO_RESTORE_SEGS;
14727 apm_irq_restore(flags);
14728+
14729+ pax_open_kernel();
14730 gdt[0x40 / 8] = save_desc_40;
14731+ pax_close_kernel();
14732+
14733 put_cpu();
14734 return error;
14735 }
14736@@ -975,7 +989,7 @@ recalc:
14737
14738 static void apm_power_off(void)
14739 {
14740- unsigned char po_bios_call[] = {
14741+ const unsigned char po_bios_call[] = {
14742 0xb8, 0x00, 0x10, /* movw $0x1000,ax */
14743 0x8e, 0xd0, /* movw ax,ss */
14744 0xbc, 0x00, 0xf0, /* movw $0xf000,sp */
14745@@ -2357,12 +2371,15 @@ static int __init apm_init(void)
14746 * code to that CPU.
14747 */
14748 gdt = get_cpu_gdt_table(0);
14749+
14750+ pax_open_kernel();
14751 set_desc_base(&gdt[APM_CS >> 3],
14752 (unsigned long)__va((unsigned long)apm_info.bios.cseg << 4));
14753 set_desc_base(&gdt[APM_CS_16 >> 3],
14754 (unsigned long)__va((unsigned long)apm_info.bios.cseg_16 << 4));
14755 set_desc_base(&gdt[APM_DS >> 3],
14756 (unsigned long)__va((unsigned long)apm_info.bios.dseg << 4));
14757+ pax_close_kernel();
14758
14759 proc_create("apm", 0, NULL, &apm_file_ops);
14760
14761diff --git a/arch/x86/kernel/asm-offsets_32.c b/arch/x86/kernel/asm-offsets_32.c
14762index dfdbf64..9b2b6ce 100644
14763--- a/arch/x86/kernel/asm-offsets_32.c
14764+++ b/arch/x86/kernel/asm-offsets_32.c
14765@@ -51,7 +51,6 @@ void foo(void)
14766 OFFSET(CPUINFO_x86_vendor_id, cpuinfo_x86, x86_vendor_id);
14767 BLANK();
14768
14769- OFFSET(TI_task, thread_info, task);
14770 OFFSET(TI_exec_domain, thread_info, exec_domain);
14771 OFFSET(TI_flags, thread_info, flags);
14772 OFFSET(TI_status, thread_info, status);
14773@@ -60,6 +59,8 @@ void foo(void)
14774 OFFSET(TI_restart_block, thread_info, restart_block);
14775 OFFSET(TI_sysenter_return, thread_info, sysenter_return);
14776 OFFSET(TI_cpu, thread_info, cpu);
14777+ OFFSET(TI_lowest_stack, thread_info, lowest_stack);
14778+ DEFINE(TI_task_thread_sp0, offsetof(struct task_struct, thread.sp0) - offsetof(struct task_struct, tinfo));
14779 BLANK();
14780
14781 OFFSET(GDS_size, desc_ptr, size);
14782@@ -99,6 +100,7 @@ void foo(void)
14783
14784 DEFINE(PAGE_SIZE_asm, PAGE_SIZE);
14785 DEFINE(PAGE_SHIFT_asm, PAGE_SHIFT);
14786+ DEFINE(THREAD_SIZE_asm, THREAD_SIZE);
14787 DEFINE(PTRS_PER_PTE, PTRS_PER_PTE);
14788 DEFINE(PTRS_PER_PMD, PTRS_PER_PMD);
14789 DEFINE(PTRS_PER_PGD, PTRS_PER_PGD);
14790@@ -115,6 +117,11 @@ void foo(void)
14791 OFFSET(PV_CPU_iret, pv_cpu_ops, iret);
14792 OFFSET(PV_CPU_irq_enable_sysexit, pv_cpu_ops, irq_enable_sysexit);
14793 OFFSET(PV_CPU_read_cr0, pv_cpu_ops, read_cr0);
14794+
14795+#ifdef CONFIG_PAX_KERNEXEC
14796+ OFFSET(PV_CPU_write_cr0, pv_cpu_ops, write_cr0);
14797+#endif
14798+
14799 #endif
14800
14801 #ifdef CONFIG_XEN
14802diff --git a/arch/x86/kernel/asm-offsets_64.c b/arch/x86/kernel/asm-offsets_64.c
14803index 4a6aeed..371de20 100644
14804--- a/arch/x86/kernel/asm-offsets_64.c
14805+++ b/arch/x86/kernel/asm-offsets_64.c
14806@@ -44,6 +44,8 @@ int main(void)
14807 ENTRY(addr_limit);
14808 ENTRY(preempt_count);
14809 ENTRY(status);
14810+ ENTRY(lowest_stack);
14811+ DEFINE(TI_task_thread_sp0, offsetof(struct task_struct, thread.sp0) - offsetof(struct task_struct, tinfo));
14812 #ifdef CONFIG_IA32_EMULATION
14813 ENTRY(sysenter_return);
14814 #endif
14815@@ -63,6 +65,18 @@ int main(void)
14816 OFFSET(PV_CPU_irq_enable_sysexit, pv_cpu_ops, irq_enable_sysexit);
14817 OFFSET(PV_CPU_swapgs, pv_cpu_ops, swapgs);
14818 OFFSET(PV_MMU_read_cr2, pv_mmu_ops, read_cr2);
14819+
14820+#ifdef CONFIG_PAX_KERNEXEC
14821+ OFFSET(PV_CPU_read_cr0, pv_cpu_ops, read_cr0);
14822+ OFFSET(PV_CPU_write_cr0, pv_cpu_ops, write_cr0);
14823+#endif
14824+
14825+#ifdef CONFIG_PAX_MEMORY_UDEREF
14826+ OFFSET(PV_MMU_read_cr3, pv_mmu_ops, read_cr3);
14827+ OFFSET(PV_MMU_write_cr3, pv_mmu_ops, write_cr3);
14828+ OFFSET(PV_MMU_set_pgd_batched, pv_mmu_ops, set_pgd_batched);
14829+#endif
14830+
14831 #endif
14832
14833
14834@@ -115,6 +129,7 @@ int main(void)
14835 ENTRY(cr8);
14836 BLANK();
14837 #undef ENTRY
14838+ DEFINE(TSS_size, sizeof(struct tss_struct));
14839 DEFINE(TSS_ist, offsetof(struct tss_struct, x86_tss.ist));
14840 BLANK();
14841 DEFINE(crypto_tfm_ctx_offset, offsetof(struct crypto_tfm, __crt_ctx));
14842@@ -130,6 +145,7 @@ int main(void)
14843
14844 BLANK();
14845 DEFINE(PAGE_SIZE_asm, PAGE_SIZE);
14846+ DEFINE(THREAD_SIZE_asm, THREAD_SIZE);
14847 #ifdef CONFIG_XEN
14848 BLANK();
14849 OFFSET(XEN_vcpu_info_mask, vcpu_info, evtchn_upcall_mask);
14850diff --git a/arch/x86/kernel/cpu/Makefile b/arch/x86/kernel/cpu/Makefile
14851index ff502cc..dc5133e 100644
14852--- a/arch/x86/kernel/cpu/Makefile
14853+++ b/arch/x86/kernel/cpu/Makefile
14854@@ -7,10 +7,6 @@ ifdef CONFIG_FUNCTION_TRACER
14855 CFLAGS_REMOVE_common.o = -pg
14856 endif
14857
14858-# Make sure load_percpu_segment has no stackprotector
14859-nostackp := $(call cc-option, -fno-stack-protector)
14860-CFLAGS_common.o := $(nostackp)
14861-
14862 obj-y := intel_cacheinfo.o addon_cpuid_features.o
14863 obj-y += proc.o capflags.o powerflags.o common.o
14864 obj-y += vmware.o hypervisor.o sched.o
14865diff --git a/arch/x86/kernel/cpu/amd.c b/arch/x86/kernel/cpu/amd.c
14866index 6e082dc..a0b5f36 100644
14867--- a/arch/x86/kernel/cpu/amd.c
14868+++ b/arch/x86/kernel/cpu/amd.c
14869@@ -602,7 +602,7 @@ static unsigned int __cpuinit amd_size_cache(struct cpuinfo_x86 *c,
14870 unsigned int size)
14871 {
14872 /* AMD errata T13 (order #21922) */
14873- if ((c->x86 == 6)) {
14874+ if (c->x86 == 6) {
14875 /* Duron Rev A0 */
14876 if (c->x86_model == 3 && c->x86_mask == 0)
14877 size = 64;
14878diff --git a/arch/x86/kernel/cpu/common.c b/arch/x86/kernel/cpu/common.c
14879index 4e34d10..ba6bc97 100644
14880--- a/arch/x86/kernel/cpu/common.c
14881+++ b/arch/x86/kernel/cpu/common.c
14882@@ -83,60 +83,6 @@ static const struct cpu_dev __cpuinitconst default_cpu = {
14883
14884 static const struct cpu_dev *this_cpu __cpuinitdata = &default_cpu;
14885
14886-DEFINE_PER_CPU_PAGE_ALIGNED(struct gdt_page, gdt_page) = { .gdt = {
14887-#ifdef CONFIG_X86_64
14888- /*
14889- * We need valid kernel segments for data and code in long mode too
14890- * IRET will check the segment types kkeil 2000/10/28
14891- * Also sysret mandates a special GDT layout
14892- *
14893- * TLS descriptors are currently at a different place compared to i386.
14894- * Hopefully nobody expects them at a fixed place (Wine?)
14895- */
14896- [GDT_ENTRY_KERNEL32_CS] = GDT_ENTRY_INIT(0xc09b, 0, 0xfffff),
14897- [GDT_ENTRY_KERNEL_CS] = GDT_ENTRY_INIT(0xa09b, 0, 0xfffff),
14898- [GDT_ENTRY_KERNEL_DS] = GDT_ENTRY_INIT(0xc093, 0, 0xfffff),
14899- [GDT_ENTRY_DEFAULT_USER32_CS] = GDT_ENTRY_INIT(0xc0fb, 0, 0xfffff),
14900- [GDT_ENTRY_DEFAULT_USER_DS] = GDT_ENTRY_INIT(0xc0f3, 0, 0xfffff),
14901- [GDT_ENTRY_DEFAULT_USER_CS] = GDT_ENTRY_INIT(0xa0fb, 0, 0xfffff),
14902-#else
14903- [GDT_ENTRY_KERNEL_CS] = GDT_ENTRY_INIT(0xc09a, 0, 0xfffff),
14904- [GDT_ENTRY_KERNEL_DS] = GDT_ENTRY_INIT(0xc092, 0, 0xfffff),
14905- [GDT_ENTRY_DEFAULT_USER_CS] = GDT_ENTRY_INIT(0xc0fa, 0, 0xfffff),
14906- [GDT_ENTRY_DEFAULT_USER_DS] = GDT_ENTRY_INIT(0xc0f2, 0, 0xfffff),
14907- /*
14908- * Segments used for calling PnP BIOS have byte granularity.
14909- * They code segments and data segments have fixed 64k limits,
14910- * the transfer segment sizes are set at run time.
14911- */
14912- /* 32-bit code */
14913- [GDT_ENTRY_PNPBIOS_CS32] = GDT_ENTRY_INIT(0x409a, 0, 0xffff),
14914- /* 16-bit code */
14915- [GDT_ENTRY_PNPBIOS_CS16] = GDT_ENTRY_INIT(0x009a, 0, 0xffff),
14916- /* 16-bit data */
14917- [GDT_ENTRY_PNPBIOS_DS] = GDT_ENTRY_INIT(0x0092, 0, 0xffff),
14918- /* 16-bit data */
14919- [GDT_ENTRY_PNPBIOS_TS1] = GDT_ENTRY_INIT(0x0092, 0, 0),
14920- /* 16-bit data */
14921- [GDT_ENTRY_PNPBIOS_TS2] = GDT_ENTRY_INIT(0x0092, 0, 0),
14922- /*
14923- * The APM segments have byte granularity and their bases
14924- * are set at run time. All have 64k limits.
14925- */
14926- /* 32-bit code */
14927- [GDT_ENTRY_APMBIOS_BASE] = GDT_ENTRY_INIT(0x409a, 0, 0xffff),
14928- /* 16-bit code */
14929- [GDT_ENTRY_APMBIOS_BASE+1] = GDT_ENTRY_INIT(0x009a, 0, 0xffff),
14930- /* data */
14931- [GDT_ENTRY_APMBIOS_BASE+2] = GDT_ENTRY_INIT(0x4092, 0, 0xffff),
14932-
14933- [GDT_ENTRY_ESPFIX_SS] = GDT_ENTRY_INIT(0xc092, 0, 0xfffff),
14934- [GDT_ENTRY_PERCPU] = GDT_ENTRY_INIT(0xc092, 0, 0xfffff),
14935- GDT_STACK_CANARY_INIT
14936-#endif
14937-} };
14938-EXPORT_PER_CPU_SYMBOL_GPL(gdt_page);
14939-
14940 static int __init x86_xsave_setup(char *s)
14941 {
14942 setup_clear_cpu_cap(X86_FEATURE_XSAVE);
14943@@ -344,7 +290,7 @@ void switch_to_new_gdt(int cpu)
14944 {
14945 struct desc_ptr gdt_descr;
14946
14947- gdt_descr.address = (long)get_cpu_gdt_table(cpu);
14948+ gdt_descr.address = (unsigned long)get_cpu_gdt_table(cpu);
14949 gdt_descr.size = GDT_SIZE - 1;
14950 load_gdt(&gdt_descr);
14951 /* Reload the per-cpu base */
14952@@ -798,6 +744,10 @@ static void __cpuinit identify_cpu(struct cpuinfo_x86 *c)
14953 /* Filter out anything that depends on CPUID levels we don't have */
14954 filter_cpuid_features(c, true);
14955
14956+#if defined(CONFIG_X86_32) && (defined(CONFIG_PAX_SEGMEXEC) || defined(CONFIG_PAX_KERNEXEC) || defined(CONFIG_PAX_MEMORY_UDEREF))
14957+ setup_clear_cpu_cap(X86_FEATURE_SEP);
14958+#endif
14959+
14960 /* If the model name is still unset, do table lookup. */
14961 if (!c->x86_model_id[0]) {
14962 const char *p;
14963@@ -980,6 +930,9 @@ static __init int setup_disablecpuid(char *arg)
14964 }
14965 __setup("clearcpuid=", setup_disablecpuid);
14966
14967+DEFINE_PER_CPU(struct thread_info *, current_tinfo) = &init_task.tinfo;
14968+EXPORT_PER_CPU_SYMBOL(current_tinfo);
14969+
14970 #ifdef CONFIG_X86_64
14971 struct desc_ptr idt_descr = { NR_VECTORS * 16 - 1, (unsigned long) idt_table };
14972
14973@@ -995,7 +948,7 @@ DEFINE_PER_CPU(struct task_struct *, current_task) ____cacheline_aligned =
14974 EXPORT_PER_CPU_SYMBOL(current_task);
14975
14976 DEFINE_PER_CPU(unsigned long, kernel_stack) =
14977- (unsigned long)&init_thread_union - KERNEL_STACK_OFFSET + THREAD_SIZE;
14978+ (unsigned long)&init_thread_union - 16 + THREAD_SIZE;
14979 EXPORT_PER_CPU_SYMBOL(kernel_stack);
14980
14981 DEFINE_PER_CPU(char *, irq_stack_ptr) =
14982@@ -1060,7 +1013,7 @@ struct pt_regs * __cpuinit idle_regs(struct pt_regs *regs)
14983 {
14984 memset(regs, 0, sizeof(struct pt_regs));
14985 regs->fs = __KERNEL_PERCPU;
14986- regs->gs = __KERNEL_STACK_CANARY;
14987+ savesegment(gs, regs->gs);
14988
14989 return regs;
14990 }
14991@@ -1101,7 +1054,7 @@ void __cpuinit cpu_init(void)
14992 int i;
14993
14994 cpu = stack_smp_processor_id();
14995- t = &per_cpu(init_tss, cpu);
14996+ t = init_tss + cpu;
14997 orig_ist = &per_cpu(orig_ist, cpu);
14998
14999 #ifdef CONFIG_NUMA
15000@@ -1127,7 +1080,7 @@ void __cpuinit cpu_init(void)
15001 switch_to_new_gdt(cpu);
15002 loadsegment(fs, 0);
15003
15004- load_idt((const struct desc_ptr *)&idt_descr);
15005+ load_idt(&idt_descr);
15006
15007 memset(me->thread.tls_array, 0, GDT_ENTRY_TLS_ENTRIES * 8);
15008 syscall_init();
15009@@ -1136,7 +1089,6 @@ void __cpuinit cpu_init(void)
15010 wrmsrl(MSR_KERNEL_GS_BASE, 0);
15011 barrier();
15012
15013- check_efer();
15014 if (cpu != 0)
15015 enable_x2apic();
15016
15017@@ -1199,7 +1151,7 @@ void __cpuinit cpu_init(void)
15018 {
15019 int cpu = smp_processor_id();
15020 struct task_struct *curr = current;
15021- struct tss_struct *t = &per_cpu(init_tss, cpu);
15022+ struct tss_struct *t = init_tss + cpu;
15023 struct thread_struct *thread = &curr->thread;
15024
15025 if (cpumask_test_and_set_cpu(cpu, cpu_initialized_mask)) {
15026diff --git a/arch/x86/kernel/cpu/intel.c b/arch/x86/kernel/cpu/intel.c
15027index 6a77cca..4f4fca0 100644
15028--- a/arch/x86/kernel/cpu/intel.c
15029+++ b/arch/x86/kernel/cpu/intel.c
15030@@ -162,7 +162,7 @@ static void __cpuinit trap_init_f00f_bug(void)
15031 * Update the IDT descriptor and reload the IDT so that
15032 * it uses the read-only mapped virtual address.
15033 */
15034- idt_descr.address = fix_to_virt(FIX_F00F_IDT);
15035+ idt_descr.address = (struct desc_struct *)fix_to_virt(FIX_F00F_IDT);
15036 load_idt(&idt_descr);
15037 }
15038 #endif
15039diff --git a/arch/x86/kernel/cpu/intel_cacheinfo.c b/arch/x86/kernel/cpu/intel_cacheinfo.c
15040index 417990f..96dc36b 100644
15041--- a/arch/x86/kernel/cpu/intel_cacheinfo.c
15042+++ b/arch/x86/kernel/cpu/intel_cacheinfo.c
15043@@ -921,7 +921,7 @@ static ssize_t store(struct kobject *kobj, struct attribute *attr,
15044 return ret;
15045 }
15046
15047-static struct sysfs_ops sysfs_ops = {
15048+static const struct sysfs_ops sysfs_ops = {
15049 .show = show,
15050 .store = store,
15051 };
15052diff --git a/arch/x86/kernel/cpu/mcheck/mce-inject.c b/arch/x86/kernel/cpu/mcheck/mce-inject.c
15053index 472763d..aa4d686 100644
15054--- a/arch/x86/kernel/cpu/mcheck/mce-inject.c
15055+++ b/arch/x86/kernel/cpu/mcheck/mce-inject.c
15056@@ -178,6 +178,8 @@ static void raise_mce(struct mce *m)
15057
15058 /* Error injection interface */
15059 static ssize_t mce_write(struct file *filp, const char __user *ubuf,
15060+ size_t usize, loff_t *off) __size_overflow(3);
15061+static ssize_t mce_write(struct file *filp, const char __user *ubuf,
15062 size_t usize, loff_t *off)
15063 {
15064 struct mce m;
15065@@ -211,7 +213,9 @@ static ssize_t mce_write(struct file *filp, const char __user *ubuf,
15066 static int inject_init(void)
15067 {
15068 printk(KERN_INFO "Machine check injector initialized\n");
15069- mce_chrdev_ops.write = mce_write;
15070+ pax_open_kernel();
15071+ *(void **)&mce_chrdev_ops.write = mce_write;
15072+ pax_close_kernel();
15073 register_die_notifier(&mce_raise_nb);
15074 return 0;
15075 }
15076diff --git a/arch/x86/kernel/cpu/mcheck/mce.c b/arch/x86/kernel/cpu/mcheck/mce.c
15077index 0f16a2b..21740f5 100644
15078--- a/arch/x86/kernel/cpu/mcheck/mce.c
15079+++ b/arch/x86/kernel/cpu/mcheck/mce.c
15080@@ -43,6 +43,7 @@
15081 #include <asm/ipi.h>
15082 #include <asm/mce.h>
15083 #include <asm/msr.h>
15084+#include <asm/local.h>
15085
15086 #include "mce-internal.h"
15087
15088@@ -187,7 +188,7 @@ static void print_mce(struct mce *m)
15089 !(m->mcgstatus & MCG_STATUS_EIPV) ? " !INEXACT!" : "",
15090 m->cs, m->ip);
15091
15092- if (m->cs == __KERNEL_CS)
15093+ if (m->cs == __KERNEL_CS || m->cs == __KERNEXEC_KERNEL_CS)
15094 print_symbol("{%s}", m->ip);
15095 pr_cont("\n");
15096 }
15097@@ -221,10 +222,10 @@ static void print_mce_tail(void)
15098
15099 #define PANIC_TIMEOUT 5 /* 5 seconds */
15100
15101-static atomic_t mce_paniced;
15102+static atomic_unchecked_t mce_paniced;
15103
15104 static int fake_panic;
15105-static atomic_t mce_fake_paniced;
15106+static atomic_unchecked_t mce_fake_paniced;
15107
15108 /* Panic in progress. Enable interrupts and wait for final IPI */
15109 static void wait_for_panic(void)
15110@@ -248,7 +249,7 @@ static void mce_panic(char *msg, struct mce *final, char *exp)
15111 /*
15112 * Make sure only one CPU runs in machine check panic
15113 */
15114- if (atomic_inc_return(&mce_paniced) > 1)
15115+ if (atomic_inc_return_unchecked(&mce_paniced) > 1)
15116 wait_for_panic();
15117 barrier();
15118
15119@@ -256,7 +257,7 @@ static void mce_panic(char *msg, struct mce *final, char *exp)
15120 console_verbose();
15121 } else {
15122 /* Don't log too much for fake panic */
15123- if (atomic_inc_return(&mce_fake_paniced) > 1)
15124+ if (atomic_inc_return_unchecked(&mce_fake_paniced) > 1)
15125 return;
15126 }
15127 print_mce_head();
15128@@ -616,7 +617,7 @@ static int mce_timed_out(u64 *t)
15129 * might have been modified by someone else.
15130 */
15131 rmb();
15132- if (atomic_read(&mce_paniced))
15133+ if (atomic_read_unchecked(&mce_paniced))
15134 wait_for_panic();
15135 if (!monarch_timeout)
15136 goto out;
15137@@ -1394,7 +1395,7 @@ static void unexpected_machine_check(struct pt_regs *regs, long error_code)
15138 }
15139
15140 /* Call the installed machine check handler for this CPU setup. */
15141-void (*machine_check_vector)(struct pt_regs *, long error_code) =
15142+void (*machine_check_vector)(struct pt_regs *, long error_code) __read_only =
15143 unexpected_machine_check;
15144
15145 /*
15146@@ -1416,7 +1417,9 @@ void __cpuinit mcheck_init(struct cpuinfo_x86 *c)
15147 return;
15148 }
15149
15150+ pax_open_kernel();
15151 machine_check_vector = do_machine_check;
15152+ pax_close_kernel();
15153
15154 mce_init();
15155 mce_cpu_features(c);
15156@@ -1429,14 +1432,14 @@ void __cpuinit mcheck_init(struct cpuinfo_x86 *c)
15157 */
15158
15159 static DEFINE_SPINLOCK(mce_state_lock);
15160-static int open_count; /* #times opened */
15161+static local_t open_count; /* #times opened */
15162 static int open_exclu; /* already open exclusive? */
15163
15164 static int mce_open(struct inode *inode, struct file *file)
15165 {
15166 spin_lock(&mce_state_lock);
15167
15168- if (open_exclu || (open_count && (file->f_flags & O_EXCL))) {
15169+ if (open_exclu || (local_read(&open_count) && (file->f_flags & O_EXCL))) {
15170 spin_unlock(&mce_state_lock);
15171
15172 return -EBUSY;
15173@@ -1444,7 +1447,7 @@ static int mce_open(struct inode *inode, struct file *file)
15174
15175 if (file->f_flags & O_EXCL)
15176 open_exclu = 1;
15177- open_count++;
15178+ local_inc(&open_count);
15179
15180 spin_unlock(&mce_state_lock);
15181
15182@@ -1455,7 +1458,7 @@ static int mce_release(struct inode *inode, struct file *file)
15183 {
15184 spin_lock(&mce_state_lock);
15185
15186- open_count--;
15187+ local_dec(&open_count);
15188 open_exclu = 0;
15189
15190 spin_unlock(&mce_state_lock);
15191@@ -2082,7 +2085,7 @@ struct dentry *mce_get_debugfs_dir(void)
15192 static void mce_reset(void)
15193 {
15194 cpu_missing = 0;
15195- atomic_set(&mce_fake_paniced, 0);
15196+ atomic_set_unchecked(&mce_fake_paniced, 0);
15197 atomic_set(&mce_executing, 0);
15198 atomic_set(&mce_callin, 0);
15199 atomic_set(&global_nwo, 0);
15200diff --git a/arch/x86/kernel/cpu/mcheck/mce_amd.c b/arch/x86/kernel/cpu/mcheck/mce_amd.c
15201index ef3cd31..9d2f6ab 100644
15202--- a/arch/x86/kernel/cpu/mcheck/mce_amd.c
15203+++ b/arch/x86/kernel/cpu/mcheck/mce_amd.c
15204@@ -385,7 +385,7 @@ static ssize_t store(struct kobject *kobj, struct attribute *attr,
15205 return ret;
15206 }
15207
15208-static struct sysfs_ops threshold_ops = {
15209+static const struct sysfs_ops threshold_ops = {
15210 .show = show,
15211 .store = store,
15212 };
15213diff --git a/arch/x86/kernel/cpu/mcheck/p5.c b/arch/x86/kernel/cpu/mcheck/p5.c
15214index 5c0e653..0882b0a 100644
15215--- a/arch/x86/kernel/cpu/mcheck/p5.c
15216+++ b/arch/x86/kernel/cpu/mcheck/p5.c
15217@@ -12,6 +12,7 @@
15218 #include <asm/system.h>
15219 #include <asm/mce.h>
15220 #include <asm/msr.h>
15221+#include <asm/pgtable.h>
15222
15223 /* By default disabled */
15224 int mce_p5_enabled __read_mostly;
15225@@ -50,7 +51,9 @@ void intel_p5_mcheck_init(struct cpuinfo_x86 *c)
15226 if (!cpu_has(c, X86_FEATURE_MCE))
15227 return;
15228
15229+ pax_open_kernel();
15230 machine_check_vector = pentium_machine_check;
15231+ pax_close_kernel();
15232 /* Make sure the vector pointer is visible before we enable MCEs: */
15233 wmb();
15234
15235diff --git a/arch/x86/kernel/cpu/mcheck/winchip.c b/arch/x86/kernel/cpu/mcheck/winchip.c
15236index 54060f5..c1a7577 100644
15237--- a/arch/x86/kernel/cpu/mcheck/winchip.c
15238+++ b/arch/x86/kernel/cpu/mcheck/winchip.c
15239@@ -11,6 +11,7 @@
15240 #include <asm/system.h>
15241 #include <asm/mce.h>
15242 #include <asm/msr.h>
15243+#include <asm/pgtable.h>
15244
15245 /* Machine check handler for WinChip C6: */
15246 static void winchip_machine_check(struct pt_regs *regs, long error_code)
15247@@ -24,7 +25,9 @@ void winchip_mcheck_init(struct cpuinfo_x86 *c)
15248 {
15249 u32 lo, hi;
15250
15251+ pax_open_kernel();
15252 machine_check_vector = winchip_machine_check;
15253+ pax_close_kernel();
15254 /* Make sure the vector pointer is visible before we enable MCEs: */
15255 wmb();
15256
15257diff --git a/arch/x86/kernel/cpu/mtrr/amd.c b/arch/x86/kernel/cpu/mtrr/amd.c
15258index 33af141..92ba9cd 100644
15259--- a/arch/x86/kernel/cpu/mtrr/amd.c
15260+++ b/arch/x86/kernel/cpu/mtrr/amd.c
15261@@ -108,7 +108,7 @@ amd_validate_add_page(unsigned long base, unsigned long size, unsigned int type)
15262 return 0;
15263 }
15264
15265-static struct mtrr_ops amd_mtrr_ops = {
15266+static const struct mtrr_ops amd_mtrr_ops = {
15267 .vendor = X86_VENDOR_AMD,
15268 .set = amd_set_mtrr,
15269 .get = amd_get_mtrr,
15270diff --git a/arch/x86/kernel/cpu/mtrr/centaur.c b/arch/x86/kernel/cpu/mtrr/centaur.c
15271index de89f14..316fe3e 100644
15272--- a/arch/x86/kernel/cpu/mtrr/centaur.c
15273+++ b/arch/x86/kernel/cpu/mtrr/centaur.c
15274@@ -110,7 +110,7 @@ centaur_validate_add_page(unsigned long base, unsigned long size, unsigned int t
15275 return 0;
15276 }
15277
15278-static struct mtrr_ops centaur_mtrr_ops = {
15279+static const struct mtrr_ops centaur_mtrr_ops = {
15280 .vendor = X86_VENDOR_CENTAUR,
15281 .set = centaur_set_mcr,
15282 .get = centaur_get_mcr,
15283diff --git a/arch/x86/kernel/cpu/mtrr/cyrix.c b/arch/x86/kernel/cpu/mtrr/cyrix.c
15284index 228d982..68a3343 100644
15285--- a/arch/x86/kernel/cpu/mtrr/cyrix.c
15286+++ b/arch/x86/kernel/cpu/mtrr/cyrix.c
15287@@ -265,7 +265,7 @@ static void cyrix_set_all(void)
15288 post_set();
15289 }
15290
15291-static struct mtrr_ops cyrix_mtrr_ops = {
15292+static const struct mtrr_ops cyrix_mtrr_ops = {
15293 .vendor = X86_VENDOR_CYRIX,
15294 .set_all = cyrix_set_all,
15295 .set = cyrix_set_arr,
15296diff --git a/arch/x86/kernel/cpu/mtrr/generic.c b/arch/x86/kernel/cpu/mtrr/generic.c
15297index 55da0c5..4d75584 100644
15298--- a/arch/x86/kernel/cpu/mtrr/generic.c
15299+++ b/arch/x86/kernel/cpu/mtrr/generic.c
15300@@ -752,7 +752,7 @@ int positive_have_wrcomb(void)
15301 /*
15302 * Generic structure...
15303 */
15304-struct mtrr_ops generic_mtrr_ops = {
15305+const struct mtrr_ops generic_mtrr_ops = {
15306 .use_intel_if = 1,
15307 .set_all = generic_set_all,
15308 .get = generic_get_mtrr,
15309diff --git a/arch/x86/kernel/cpu/mtrr/if.c b/arch/x86/kernel/cpu/mtrr/if.c
15310index 3c1b12d..454f6b6 100644
15311--- a/arch/x86/kernel/cpu/mtrr/if.c
15312+++ b/arch/x86/kernel/cpu/mtrr/if.c
15313@@ -89,6 +89,8 @@ mtrr_file_del(unsigned long base, unsigned long size,
15314 * "base=%Lx size=%Lx type=%s" or "disable=%d"
15315 */
15316 static ssize_t
15317+mtrr_write(struct file *file, const char __user *buf, size_t len, loff_t * ppos) __size_overflow(3);
15318+static ssize_t
15319 mtrr_write(struct file *file, const char __user *buf, size_t len, loff_t * ppos)
15320 {
15321 int i, err;
15322diff --git a/arch/x86/kernel/cpu/mtrr/main.c b/arch/x86/kernel/cpu/mtrr/main.c
15323index fd60f09..c94ef52 100644
15324--- a/arch/x86/kernel/cpu/mtrr/main.c
15325+++ b/arch/x86/kernel/cpu/mtrr/main.c
15326@@ -60,14 +60,14 @@ static DEFINE_MUTEX(mtrr_mutex);
15327 u64 size_or_mask, size_and_mask;
15328 static bool mtrr_aps_delayed_init;
15329
15330-static struct mtrr_ops *mtrr_ops[X86_VENDOR_NUM];
15331+static const struct mtrr_ops *mtrr_ops[X86_VENDOR_NUM] __read_only;
15332
15333-struct mtrr_ops *mtrr_if;
15334+const struct mtrr_ops *mtrr_if;
15335
15336 static void set_mtrr(unsigned int reg, unsigned long base,
15337 unsigned long size, mtrr_type type);
15338
15339-void set_mtrr_ops(struct mtrr_ops *ops)
15340+void set_mtrr_ops(const struct mtrr_ops *ops)
15341 {
15342 if (ops->vendor && ops->vendor < X86_VENDOR_NUM)
15343 mtrr_ops[ops->vendor] = ops;
15344diff --git a/arch/x86/kernel/cpu/mtrr/mtrr.h b/arch/x86/kernel/cpu/mtrr/mtrr.h
15345index a501dee..816c719 100644
15346--- a/arch/x86/kernel/cpu/mtrr/mtrr.h
15347+++ b/arch/x86/kernel/cpu/mtrr/mtrr.h
15348@@ -25,14 +25,14 @@ struct mtrr_ops {
15349 int (*validate_add_page)(unsigned long base, unsigned long size,
15350 unsigned int type);
15351 int (*have_wrcomb)(void);
15352-};
15353+} __do_const;
15354
15355 extern int generic_get_free_region(unsigned long base, unsigned long size,
15356 int replace_reg);
15357 extern int generic_validate_add_page(unsigned long base, unsigned long size,
15358 unsigned int type);
15359
15360-extern struct mtrr_ops generic_mtrr_ops;
15361+extern const struct mtrr_ops generic_mtrr_ops;
15362
15363 extern int positive_have_wrcomb(void);
15364
15365@@ -53,10 +53,10 @@ void fill_mtrr_var_range(unsigned int index,
15366 u32 base_lo, u32 base_hi, u32 mask_lo, u32 mask_hi);
15367 void get_mtrr_state(void);
15368
15369-extern void set_mtrr_ops(struct mtrr_ops *ops);
15370+extern void set_mtrr_ops(const struct mtrr_ops *ops);
15371
15372 extern u64 size_or_mask, size_and_mask;
15373-extern struct mtrr_ops *mtrr_if;
15374+extern const struct mtrr_ops *mtrr_if;
15375
15376 #define is_cpu(vnd) (mtrr_if && mtrr_if->vendor == X86_VENDOR_##vnd)
15377 #define use_intel() (mtrr_if && mtrr_if->use_intel_if == 1)
15378diff --git a/arch/x86/kernel/cpu/perf_event.c b/arch/x86/kernel/cpu/perf_event.c
15379index 0ff02ca..fc49a60 100644
15380--- a/arch/x86/kernel/cpu/perf_event.c
15381+++ b/arch/x86/kernel/cpu/perf_event.c
15382@@ -723,10 +723,10 @@ x86_perf_event_update(struct perf_event *event,
15383 * count to the generic event atomically:
15384 */
15385 again:
15386- prev_raw_count = atomic64_read(&hwc->prev_count);
15387+ prev_raw_count = atomic64_read_unchecked(&hwc->prev_count);
15388 rdmsrl(hwc->event_base + idx, new_raw_count);
15389
15390- if (atomic64_cmpxchg(&hwc->prev_count, prev_raw_count,
15391+ if (atomic64_cmpxchg_unchecked(&hwc->prev_count, prev_raw_count,
15392 new_raw_count) != prev_raw_count)
15393 goto again;
15394
15395@@ -741,7 +741,7 @@ again:
15396 delta = (new_raw_count << shift) - (prev_raw_count << shift);
15397 delta >>= shift;
15398
15399- atomic64_add(delta, &event->count);
15400+ atomic64_add_unchecked(delta, &event->count);
15401 atomic64_sub(delta, &hwc->period_left);
15402
15403 return new_raw_count;
15404@@ -1353,7 +1353,7 @@ x86_perf_event_set_period(struct perf_event *event,
15405 * The hw event starts counting from this event offset,
15406 * mark it to be able to extra future deltas:
15407 */
15408- atomic64_set(&hwc->prev_count, (u64)-left);
15409+ atomic64_set_unchecked(&hwc->prev_count, (u64)-left);
15410
15411 err = checking_wrmsrl(hwc->event_base + idx,
15412 (u64)(-left) & x86_pmu.event_mask);
15413@@ -2357,7 +2357,7 @@ perf_callchain_user(struct pt_regs *regs, struct perf_callchain_entry *entry)
15414 break;
15415
15416 callchain_store(entry, frame.return_address);
15417- fp = frame.next_frame;
15418+ fp = (__force const void __user *)frame.next_frame;
15419 }
15420 }
15421
15422diff --git a/arch/x86/kernel/cpu/perfctr-watchdog.c b/arch/x86/kernel/cpu/perfctr-watchdog.c
15423index 898df97..9e82503 100644
15424--- a/arch/x86/kernel/cpu/perfctr-watchdog.c
15425+++ b/arch/x86/kernel/cpu/perfctr-watchdog.c
15426@@ -30,11 +30,11 @@ struct nmi_watchdog_ctlblk {
15427
15428 /* Interface defining a CPU specific perfctr watchdog */
15429 struct wd_ops {
15430- int (*reserve)(void);
15431- void (*unreserve)(void);
15432- int (*setup)(unsigned nmi_hz);
15433- void (*rearm)(struct nmi_watchdog_ctlblk *wd, unsigned nmi_hz);
15434- void (*stop)(void);
15435+ int (* const reserve)(void);
15436+ void (* const unreserve)(void);
15437+ int (* const setup)(unsigned nmi_hz);
15438+ void (* const rearm)(struct nmi_watchdog_ctlblk *wd, unsigned nmi_hz);
15439+ void (* const stop)(void);
15440 unsigned perfctr;
15441 unsigned evntsel;
15442 u64 checkbit;
15443@@ -645,6 +645,7 @@ static const struct wd_ops p4_wd_ops = {
15444 #define ARCH_PERFMON_NMI_EVENT_SEL ARCH_PERFMON_UNHALTED_CORE_CYCLES_SEL
15445 #define ARCH_PERFMON_NMI_EVENT_UMASK ARCH_PERFMON_UNHALTED_CORE_CYCLES_UMASK
15446
15447+/* cannot be const */
15448 static struct wd_ops intel_arch_wd_ops;
15449
15450 static int setup_intel_arch_watchdog(unsigned nmi_hz)
15451@@ -697,6 +698,7 @@ static int setup_intel_arch_watchdog(unsigned nmi_hz)
15452 return 1;
15453 }
15454
15455+/* cannot be const */
15456 static struct wd_ops intel_arch_wd_ops __read_mostly = {
15457 .reserve = single_msr_reserve,
15458 .unreserve = single_msr_unreserve,
15459diff --git a/arch/x86/kernel/crash.c b/arch/x86/kernel/crash.c
15460index ff95824..2ffdcb5 100644
15461--- a/arch/x86/kernel/crash.c
15462+++ b/arch/x86/kernel/crash.c
15463@@ -41,7 +41,7 @@ static void kdump_nmi_callback(int cpu, struct die_args *args)
15464 regs = args->regs;
15465
15466 #ifdef CONFIG_X86_32
15467- if (!user_mode_vm(regs)) {
15468+ if (!user_mode(regs)) {
15469 crash_fixup_ss_esp(&fixed_regs, regs);
15470 regs = &fixed_regs;
15471 }
15472diff --git a/arch/x86/kernel/doublefault_32.c b/arch/x86/kernel/doublefault_32.c
15473index 37250fe..bf2ec74 100644
15474--- a/arch/x86/kernel/doublefault_32.c
15475+++ b/arch/x86/kernel/doublefault_32.c
15476@@ -11,7 +11,7 @@
15477
15478 #define DOUBLEFAULT_STACKSIZE (1024)
15479 static unsigned long doublefault_stack[DOUBLEFAULT_STACKSIZE];
15480-#define STACK_START (unsigned long)(doublefault_stack+DOUBLEFAULT_STACKSIZE)
15481+#define STACK_START (unsigned long)(doublefault_stack+DOUBLEFAULT_STACKSIZE-2)
15482
15483 #define ptr_ok(x) ((x) > PAGE_OFFSET && (x) < PAGE_OFFSET + MAXMEM)
15484
15485@@ -21,7 +21,7 @@ static void doublefault_fn(void)
15486 unsigned long gdt, tss;
15487
15488 store_gdt(&gdt_desc);
15489- gdt = gdt_desc.address;
15490+ gdt = (unsigned long)gdt_desc.address;
15491
15492 printk(KERN_EMERG "PANIC: double fault, gdt at %08lx [%d bytes]\n", gdt, gdt_desc.size);
15493
15494@@ -58,10 +58,10 @@ struct tss_struct doublefault_tss __cacheline_aligned = {
15495 /* 0x2 bit is always set */
15496 .flags = X86_EFLAGS_SF | 0x2,
15497 .sp = STACK_START,
15498- .es = __USER_DS,
15499+ .es = __KERNEL_DS,
15500 .cs = __KERNEL_CS,
15501 .ss = __KERNEL_DS,
15502- .ds = __USER_DS,
15503+ .ds = __KERNEL_DS,
15504 .fs = __KERNEL_PERCPU,
15505
15506 .__cr3 = __pa_nodebug(swapper_pg_dir),
15507diff --git a/arch/x86/kernel/dumpstack.c b/arch/x86/kernel/dumpstack.c
15508index 2d8a371..4fa6ae6 100644
15509--- a/arch/x86/kernel/dumpstack.c
15510+++ b/arch/x86/kernel/dumpstack.c
15511@@ -2,6 +2,9 @@
15512 * Copyright (C) 1991, 1992 Linus Torvalds
15513 * Copyright (C) 2000, 2001, 2002 Andi Kleen, SuSE Labs
15514 */
15515+#ifdef CONFIG_GRKERNSEC_HIDESYM
15516+#define __INCLUDED_BY_HIDESYM 1
15517+#endif
15518 #include <linux/kallsyms.h>
15519 #include <linux/kprobes.h>
15520 #include <linux/uaccess.h>
15521@@ -28,7 +31,7 @@ static int die_counter;
15522
15523 void printk_address(unsigned long address, int reliable)
15524 {
15525- printk(" [<%p>] %s%pS\n", (void *) address,
15526+ printk(" [<%p>] %s%pA\n", (void *) address,
15527 reliable ? "" : "? ", (void *) address);
15528 }
15529
15530@@ -36,9 +39,8 @@ void printk_address(unsigned long address, int reliable)
15531 static void
15532 print_ftrace_graph_addr(unsigned long addr, void *data,
15533 const struct stacktrace_ops *ops,
15534- struct thread_info *tinfo, int *graph)
15535+ struct task_struct *task, int *graph)
15536 {
15537- struct task_struct *task = tinfo->task;
15538 unsigned long ret_addr;
15539 int index = task->curr_ret_stack;
15540
15541@@ -59,7 +61,7 @@ print_ftrace_graph_addr(unsigned long addr, void *data,
15542 static inline void
15543 print_ftrace_graph_addr(unsigned long addr, void *data,
15544 const struct stacktrace_ops *ops,
15545- struct thread_info *tinfo, int *graph)
15546+ struct task_struct *task, int *graph)
15547 { }
15548 #endif
15549
15550@@ -70,10 +72,8 @@ print_ftrace_graph_addr(unsigned long addr, void *data,
15551 * severe exception (double fault, nmi, stack fault, debug, mce) hardware stack
15552 */
15553
15554-static inline int valid_stack_ptr(struct thread_info *tinfo,
15555- void *p, unsigned int size, void *end)
15556+static inline int valid_stack_ptr(void *t, void *p, unsigned int size, void *end)
15557 {
15558- void *t = tinfo;
15559 if (end) {
15560 if (p < end && p >= (end-THREAD_SIZE))
15561 return 1;
15562@@ -84,14 +84,14 @@ static inline int valid_stack_ptr(struct thread_info *tinfo,
15563 }
15564
15565 unsigned long
15566-print_context_stack(struct thread_info *tinfo,
15567+print_context_stack(struct task_struct *task, void *stack_start,
15568 unsigned long *stack, unsigned long bp,
15569 const struct stacktrace_ops *ops, void *data,
15570 unsigned long *end, int *graph)
15571 {
15572 struct stack_frame *frame = (struct stack_frame *)bp;
15573
15574- while (valid_stack_ptr(tinfo, stack, sizeof(*stack), end)) {
15575+ while (valid_stack_ptr(stack_start, stack, sizeof(*stack), end)) {
15576 unsigned long addr;
15577
15578 addr = *stack;
15579@@ -103,7 +103,7 @@ print_context_stack(struct thread_info *tinfo,
15580 } else {
15581 ops->address(data, addr, 0);
15582 }
15583- print_ftrace_graph_addr(addr, data, ops, tinfo, graph);
15584+ print_ftrace_graph_addr(addr, data, ops, task, graph);
15585 }
15586 stack++;
15587 }
15588@@ -180,7 +180,7 @@ void dump_stack(void)
15589 #endif
15590
15591 printk("Pid: %d, comm: %.20s %s %s %.*s\n",
15592- current->pid, current->comm, print_tainted(),
15593+ task_pid_nr(current), current->comm, print_tainted(),
15594 init_utsname()->release,
15595 (int)strcspn(init_utsname()->version, " "),
15596 init_utsname()->version);
15597@@ -220,6 +220,8 @@ unsigned __kprobes long oops_begin(void)
15598 return flags;
15599 }
15600
15601+extern void gr_handle_kernel_exploit(void);
15602+
15603 void __kprobes oops_end(unsigned long flags, struct pt_regs *regs, int signr)
15604 {
15605 if (regs && kexec_should_crash(current))
15606@@ -241,7 +243,10 @@ void __kprobes oops_end(unsigned long flags, struct pt_regs *regs, int signr)
15607 panic("Fatal exception in interrupt");
15608 if (panic_on_oops)
15609 panic("Fatal exception");
15610- do_exit(signr);
15611+
15612+ gr_handle_kernel_exploit();
15613+
15614+ do_group_exit(signr);
15615 }
15616
15617 int __kprobes __die(const char *str, struct pt_regs *regs, long err)
15618@@ -295,7 +300,7 @@ void die(const char *str, struct pt_regs *regs, long err)
15619 unsigned long flags = oops_begin();
15620 int sig = SIGSEGV;
15621
15622- if (!user_mode_vm(regs))
15623+ if (!user_mode(regs))
15624 report_bug(regs->ip, regs);
15625
15626 if (__die(str, regs, err))
15627diff --git a/arch/x86/kernel/dumpstack.h b/arch/x86/kernel/dumpstack.h
15628index 81086c2..13e8b17 100644
15629--- a/arch/x86/kernel/dumpstack.h
15630+++ b/arch/x86/kernel/dumpstack.h
15631@@ -15,7 +15,7 @@
15632 #endif
15633
15634 extern unsigned long
15635-print_context_stack(struct thread_info *tinfo,
15636+print_context_stack(struct task_struct *task, void *stack_start,
15637 unsigned long *stack, unsigned long bp,
15638 const struct stacktrace_ops *ops, void *data,
15639 unsigned long *end, int *graph);
15640diff --git a/arch/x86/kernel/dumpstack_32.c b/arch/x86/kernel/dumpstack_32.c
15641index f7dd2a7..504f53b 100644
15642--- a/arch/x86/kernel/dumpstack_32.c
15643+++ b/arch/x86/kernel/dumpstack_32.c
15644@@ -53,16 +53,12 @@ void dump_trace(struct task_struct *task, struct pt_regs *regs,
15645 #endif
15646
15647 for (;;) {
15648- struct thread_info *context;
15649+ void *stack_start = (void *)((unsigned long)stack & ~(THREAD_SIZE-1));
15650+ bp = print_context_stack(task, stack_start, stack, bp, ops, data, NULL, &graph);
15651
15652- context = (struct thread_info *)
15653- ((unsigned long)stack & (~(THREAD_SIZE - 1)));
15654- bp = print_context_stack(context, stack, bp, ops,
15655- data, NULL, &graph);
15656-
15657- stack = (unsigned long *)context->previous_esp;
15658- if (!stack)
15659+ if (stack_start == task_stack_page(task))
15660 break;
15661+ stack = *(unsigned long **)stack_start;
15662 if (ops->stack(data, "IRQ") < 0)
15663 break;
15664 touch_nmi_watchdog();
15665@@ -112,11 +108,12 @@ void show_registers(struct pt_regs *regs)
15666 * When in-kernel, we also print out the stack and code at the
15667 * time of the fault..
15668 */
15669- if (!user_mode_vm(regs)) {
15670+ if (!user_mode(regs)) {
15671 unsigned int code_prologue = code_bytes * 43 / 64;
15672 unsigned int code_len = code_bytes;
15673 unsigned char c;
15674 u8 *ip;
15675+ unsigned long cs_base = get_desc_base(&get_cpu_gdt_table(smp_processor_id())[(0xffff & regs->cs) >> 3]);
15676
15677 printk(KERN_EMERG "Stack:\n");
15678 show_stack_log_lvl(NULL, regs, &regs->sp,
15679@@ -124,10 +121,10 @@ void show_registers(struct pt_regs *regs)
15680
15681 printk(KERN_EMERG "Code: ");
15682
15683- ip = (u8 *)regs->ip - code_prologue;
15684+ ip = (u8 *)regs->ip - code_prologue + cs_base;
15685 if (ip < (u8 *)PAGE_OFFSET || probe_kernel_address(ip, c)) {
15686 /* try starting at IP */
15687- ip = (u8 *)regs->ip;
15688+ ip = (u8 *)regs->ip + cs_base;
15689 code_len = code_len - code_prologue + 1;
15690 }
15691 for (i = 0; i < code_len; i++, ip++) {
15692@@ -136,7 +133,7 @@ void show_registers(struct pt_regs *regs)
15693 printk(" Bad EIP value.");
15694 break;
15695 }
15696- if (ip == (u8 *)regs->ip)
15697+ if (ip == (u8 *)regs->ip + cs_base)
15698 printk("<%02x> ", c);
15699 else
15700 printk("%02x ", c);
15701@@ -145,10 +142,23 @@ void show_registers(struct pt_regs *regs)
15702 printk("\n");
15703 }
15704
15705+#ifdef CONFIG_PAX_MEMORY_STACKLEAK
15706+void pax_check_alloca(unsigned long size)
15707+{
15708+ unsigned long sp = (unsigned long)&sp, stack_left;
15709+
15710+ /* all kernel stacks are of the same size */
15711+ stack_left = sp & (THREAD_SIZE - 1);
15712+ BUG_ON(stack_left < 256 || size >= stack_left - 256);
15713+}
15714+EXPORT_SYMBOL(pax_check_alloca);
15715+#endif
15716+
15717 int is_valid_bugaddr(unsigned long ip)
15718 {
15719 unsigned short ud2;
15720
15721+ ip = ktla_ktva(ip);
15722 if (ip < PAGE_OFFSET)
15723 return 0;
15724 if (probe_kernel_address((unsigned short *)ip, ud2))
15725diff --git a/arch/x86/kernel/dumpstack_64.c b/arch/x86/kernel/dumpstack_64.c
15726index a071e6b..36cd585 100644
15727--- a/arch/x86/kernel/dumpstack_64.c
15728+++ b/arch/x86/kernel/dumpstack_64.c
15729@@ -116,8 +116,8 @@ void dump_trace(struct task_struct *task, struct pt_regs *regs,
15730 unsigned long *irq_stack_end =
15731 (unsigned long *)per_cpu(irq_stack_ptr, cpu);
15732 unsigned used = 0;
15733- struct thread_info *tinfo;
15734 int graph = 0;
15735+ void *stack_start;
15736
15737 if (!task)
15738 task = current;
15739@@ -146,10 +146,10 @@ void dump_trace(struct task_struct *task, struct pt_regs *regs,
15740 * current stack address. If the stacks consist of nested
15741 * exceptions
15742 */
15743- tinfo = task_thread_info(task);
15744 for (;;) {
15745 char *id;
15746 unsigned long *estack_end;
15747+
15748 estack_end = in_exception_stack(cpu, (unsigned long)stack,
15749 &used, &id);
15750
15751@@ -157,7 +157,7 @@ void dump_trace(struct task_struct *task, struct pt_regs *regs,
15752 if (ops->stack(data, id) < 0)
15753 break;
15754
15755- bp = print_context_stack(tinfo, stack, bp, ops,
15756+ bp = print_context_stack(task, estack_end - EXCEPTION_STKSZ, stack, bp, ops,
15757 data, estack_end, &graph);
15758 ops->stack(data, "<EOE>");
15759 /*
15760@@ -176,7 +176,7 @@ void dump_trace(struct task_struct *task, struct pt_regs *regs,
15761 if (stack >= irq_stack && stack < irq_stack_end) {
15762 if (ops->stack(data, "IRQ") < 0)
15763 break;
15764- bp = print_context_stack(tinfo, stack, bp,
15765+ bp = print_context_stack(task, irq_stack, stack, bp,
15766 ops, data, irq_stack_end, &graph);
15767 /*
15768 * We link to the next stack (which would be
15769@@ -195,7 +195,8 @@ void dump_trace(struct task_struct *task, struct pt_regs *regs,
15770 /*
15771 * This handles the process stack:
15772 */
15773- bp = print_context_stack(tinfo, stack, bp, ops, data, NULL, &graph);
15774+ stack_start = (void *)((unsigned long)stack & ~(THREAD_SIZE-1));
15775+ bp = print_context_stack(task, stack_start, stack, bp, ops, data, NULL, &graph);
15776 put_cpu();
15777 }
15778 EXPORT_SYMBOL(dump_trace);
15779@@ -304,3 +305,50 @@ int is_valid_bugaddr(unsigned long ip)
15780 return ud2 == 0x0b0f;
15781 }
15782
15783+
15784+#ifdef CONFIG_PAX_MEMORY_STACKLEAK
15785+void pax_check_alloca(unsigned long size)
15786+{
15787+ unsigned long sp = (unsigned long)&sp, stack_start, stack_end;
15788+ unsigned cpu, used;
15789+ char *id;
15790+
15791+ /* check the process stack first */
15792+ stack_start = (unsigned long)task_stack_page(current);
15793+ stack_end = stack_start + THREAD_SIZE;
15794+ if (likely(stack_start <= sp && sp < stack_end)) {
15795+ unsigned long stack_left = sp & (THREAD_SIZE - 1);
15796+ BUG_ON(stack_left < 256 || size >= stack_left - 256);
15797+ return;
15798+ }
15799+
15800+ cpu = get_cpu();
15801+
15802+ /* check the irq stacks */
15803+ stack_end = (unsigned long)per_cpu(irq_stack_ptr, cpu);
15804+ stack_start = stack_end - IRQ_STACK_SIZE;
15805+ if (stack_start <= sp && sp < stack_end) {
15806+ unsigned long stack_left = sp & (IRQ_STACK_SIZE - 1);
15807+ put_cpu();
15808+ BUG_ON(stack_left < 256 || size >= stack_left - 256);
15809+ return;
15810+ }
15811+
15812+ /* check the exception stacks */
15813+ used = 0;
15814+ stack_end = (unsigned long)in_exception_stack(cpu, sp, &used, &id);
15815+ stack_start = stack_end - EXCEPTION_STKSZ;
15816+ if (stack_end && stack_start <= sp && sp < stack_end) {
15817+ unsigned long stack_left = sp & (EXCEPTION_STKSZ - 1);
15818+ put_cpu();
15819+ BUG_ON(stack_left < 256 || size >= stack_left - 256);
15820+ return;
15821+ }
15822+
15823+ put_cpu();
15824+
15825+ /* unknown stack */
15826+ BUG();
15827+}
15828+EXPORT_SYMBOL(pax_check_alloca);
15829+#endif
15830diff --git a/arch/x86/kernel/e820.c b/arch/x86/kernel/e820.c
15831index a89739a..95e0c48 100644
15832--- a/arch/x86/kernel/e820.c
15833+++ b/arch/x86/kernel/e820.c
15834@@ -733,7 +733,7 @@ struct early_res {
15835 };
15836 static struct early_res early_res[MAX_EARLY_RES] __initdata = {
15837 { 0, PAGE_SIZE, "BIOS data page" }, /* BIOS data page */
15838- {}
15839+ { 0, 0, {0}, 0 }
15840 };
15841
15842 static int __init find_overlapped_early(u64 start, u64 end)
15843diff --git a/arch/x86/kernel/early_printk.c b/arch/x86/kernel/early_printk.c
15844index b9c830c..1e41a96 100644
15845--- a/arch/x86/kernel/early_printk.c
15846+++ b/arch/x86/kernel/early_printk.c
15847@@ -7,6 +7,7 @@
15848 #include <linux/pci_regs.h>
15849 #include <linux/pci_ids.h>
15850 #include <linux/errno.h>
15851+#include <linux/sched.h>
15852 #include <asm/io.h>
15853 #include <asm/processor.h>
15854 #include <asm/fcntl.h>
15855@@ -170,6 +171,8 @@ asmlinkage void early_printk(const char *fmt, ...)
15856 int n;
15857 va_list ap;
15858
15859+ pax_track_stack();
15860+
15861 va_start(ap, fmt);
15862 n = vscnprintf(buf, sizeof(buf), fmt, ap);
15863 early_console->write(early_console, buf, n);
15864diff --git a/arch/x86/kernel/efi_32.c b/arch/x86/kernel/efi_32.c
15865index 5cab48e..b025f9b 100644
15866--- a/arch/x86/kernel/efi_32.c
15867+++ b/arch/x86/kernel/efi_32.c
15868@@ -38,70 +38,56 @@
15869 */
15870
15871 static unsigned long efi_rt_eflags;
15872-static pgd_t efi_bak_pg_dir_pointer[2];
15873+static pgd_t __initdata efi_bak_pg_dir_pointer[KERNEL_PGD_PTRS];
15874
15875-void efi_call_phys_prelog(void)
15876+void __init efi_call_phys_prelog(void)
15877 {
15878- unsigned long cr4;
15879- unsigned long temp;
15880 struct desc_ptr gdt_descr;
15881
15882+#ifdef CONFIG_PAX_KERNEXEC
15883+ struct desc_struct d;
15884+#endif
15885+
15886 local_irq_save(efi_rt_eflags);
15887
15888- /*
15889- * If I don't have PAE, I should just duplicate two entries in page
15890- * directory. If I have PAE, I just need to duplicate one entry in
15891- * page directory.
15892- */
15893- cr4 = read_cr4_safe();
15894-
15895- if (cr4 & X86_CR4_PAE) {
15896- efi_bak_pg_dir_pointer[0].pgd =
15897- swapper_pg_dir[pgd_index(0)].pgd;
15898- swapper_pg_dir[0].pgd =
15899- swapper_pg_dir[pgd_index(PAGE_OFFSET)].pgd;
15900- } else {
15901- efi_bak_pg_dir_pointer[0].pgd =
15902- swapper_pg_dir[pgd_index(0)].pgd;
15903- efi_bak_pg_dir_pointer[1].pgd =
15904- swapper_pg_dir[pgd_index(0x400000)].pgd;
15905- swapper_pg_dir[pgd_index(0)].pgd =
15906- swapper_pg_dir[pgd_index(PAGE_OFFSET)].pgd;
15907- temp = PAGE_OFFSET + 0x400000;
15908- swapper_pg_dir[pgd_index(0x400000)].pgd =
15909- swapper_pg_dir[pgd_index(temp)].pgd;
15910- }
15911+ clone_pgd_range(efi_bak_pg_dir_pointer, swapper_pg_dir, KERNEL_PGD_PTRS);
15912+ clone_pgd_range(swapper_pg_dir, swapper_pg_dir + KERNEL_PGD_BOUNDARY,
15913+ min_t(unsigned long, KERNEL_PGD_PTRS, KERNEL_PGD_BOUNDARY));
15914
15915 /*
15916 * After the lock is released, the original page table is restored.
15917 */
15918 __flush_tlb_all();
15919
15920+#ifdef CONFIG_PAX_KERNEXEC
15921+ pack_descriptor(&d, 0, 0xFFFFF, 0x9B, 0xC);
15922+ write_gdt_entry(get_cpu_gdt_table(0), GDT_ENTRY_KERNEXEC_EFI_CS, &d, DESCTYPE_S);
15923+ pack_descriptor(&d, 0, 0xFFFFF, 0x93, 0xC);
15924+ write_gdt_entry(get_cpu_gdt_table(0), GDT_ENTRY_KERNEXEC_EFI_DS, &d, DESCTYPE_S);
15925+#endif
15926+
15927 gdt_descr.address = __pa(get_cpu_gdt_table(0));
15928 gdt_descr.size = GDT_SIZE - 1;
15929 load_gdt(&gdt_descr);
15930 }
15931
15932-void efi_call_phys_epilog(void)
15933+void __init efi_call_phys_epilog(void)
15934 {
15935- unsigned long cr4;
15936 struct desc_ptr gdt_descr;
15937
15938+#ifdef CONFIG_PAX_KERNEXEC
15939+ struct desc_struct d;
15940+
15941+ memset(&d, 0, sizeof d);
15942+ write_gdt_entry(get_cpu_gdt_table(0), GDT_ENTRY_KERNEXEC_EFI_CS, &d, DESCTYPE_S);
15943+ write_gdt_entry(get_cpu_gdt_table(0), GDT_ENTRY_KERNEXEC_EFI_DS, &d, DESCTYPE_S);
15944+#endif
15945+
15946 gdt_descr.address = (unsigned long)get_cpu_gdt_table(0);
15947 gdt_descr.size = GDT_SIZE - 1;
15948 load_gdt(&gdt_descr);
15949
15950- cr4 = read_cr4_safe();
15951-
15952- if (cr4 & X86_CR4_PAE) {
15953- swapper_pg_dir[pgd_index(0)].pgd =
15954- efi_bak_pg_dir_pointer[0].pgd;
15955- } else {
15956- swapper_pg_dir[pgd_index(0)].pgd =
15957- efi_bak_pg_dir_pointer[0].pgd;
15958- swapper_pg_dir[pgd_index(0x400000)].pgd =
15959- efi_bak_pg_dir_pointer[1].pgd;
15960- }
15961+ clone_pgd_range(swapper_pg_dir, efi_bak_pg_dir_pointer, KERNEL_PGD_PTRS);
15962
15963 /*
15964 * After the lock is released, the original page table is restored.
15965diff --git a/arch/x86/kernel/efi_stub_32.S b/arch/x86/kernel/efi_stub_32.S
15966index fbe66e6..c5c0dd2 100644
15967--- a/arch/x86/kernel/efi_stub_32.S
15968+++ b/arch/x86/kernel/efi_stub_32.S
15969@@ -6,7 +6,9 @@
15970 */
15971
15972 #include <linux/linkage.h>
15973+#include <linux/init.h>
15974 #include <asm/page_types.h>
15975+#include <asm/segment.h>
15976
15977 /*
15978 * efi_call_phys(void *, ...) is a function with variable parameters.
15979@@ -20,7 +22,7 @@
15980 * service functions will comply with gcc calling convention, too.
15981 */
15982
15983-.text
15984+__INIT
15985 ENTRY(efi_call_phys)
15986 /*
15987 * 0. The function can only be called in Linux kernel. So CS has been
15988@@ -36,9 +38,11 @@ ENTRY(efi_call_phys)
15989 * The mapping of lower virtual memory has been created in prelog and
15990 * epilog.
15991 */
15992- movl $1f, %edx
15993- subl $__PAGE_OFFSET, %edx
15994- jmp *%edx
15995+ movl $(__KERNEXEC_EFI_DS), %edx
15996+ mov %edx, %ds
15997+ mov %edx, %es
15998+ mov %edx, %ss
15999+ ljmp $(__KERNEXEC_EFI_CS),$1f-__PAGE_OFFSET
16000 1:
16001
16002 /*
16003@@ -47,14 +51,8 @@ ENTRY(efi_call_phys)
16004 * parameter 2, ..., param n. To make things easy, we save the return
16005 * address of efi_call_phys in a global variable.
16006 */
16007- popl %edx
16008- movl %edx, saved_return_addr
16009- /* get the function pointer into ECX*/
16010- popl %ecx
16011- movl %ecx, efi_rt_function_ptr
16012- movl $2f, %edx
16013- subl $__PAGE_OFFSET, %edx
16014- pushl %edx
16015+ popl (saved_return_addr)
16016+ popl (efi_rt_function_ptr)
16017
16018 /*
16019 * 3. Clear PG bit in %CR0.
16020@@ -73,9 +71,8 @@ ENTRY(efi_call_phys)
16021 /*
16022 * 5. Call the physical function.
16023 */
16024- jmp *%ecx
16025+ call *(efi_rt_function_ptr-__PAGE_OFFSET)
16026
16027-2:
16028 /*
16029 * 6. After EFI runtime service returns, control will return to
16030 * following instruction. We'd better readjust stack pointer first.
16031@@ -88,35 +85,32 @@ ENTRY(efi_call_phys)
16032 movl %cr0, %edx
16033 orl $0x80000000, %edx
16034 movl %edx, %cr0
16035- jmp 1f
16036-1:
16037+
16038 /*
16039 * 8. Now restore the virtual mode from flat mode by
16040 * adding EIP with PAGE_OFFSET.
16041 */
16042- movl $1f, %edx
16043- jmp *%edx
16044+ ljmp $(__KERNEL_CS),$1f+__PAGE_OFFSET
16045 1:
16046+ movl $(__KERNEL_DS), %edx
16047+ mov %edx, %ds
16048+ mov %edx, %es
16049+ mov %edx, %ss
16050
16051 /*
16052 * 9. Balance the stack. And because EAX contain the return value,
16053 * we'd better not clobber it.
16054 */
16055- leal efi_rt_function_ptr, %edx
16056- movl (%edx), %ecx
16057- pushl %ecx
16058+ pushl (efi_rt_function_ptr)
16059
16060 /*
16061- * 10. Push the saved return address onto the stack and return.
16062+ * 10. Return to the saved return address.
16063 */
16064- leal saved_return_addr, %edx
16065- movl (%edx), %ecx
16066- pushl %ecx
16067- ret
16068+ jmpl *(saved_return_addr)
16069 ENDPROC(efi_call_phys)
16070 .previous
16071
16072-.data
16073+__INITDATA
16074 saved_return_addr:
16075 .long 0
16076 efi_rt_function_ptr:
16077diff --git a/arch/x86/kernel/efi_stub_64.S b/arch/x86/kernel/efi_stub_64.S
16078index 4c07cca..2c8427d 100644
16079--- a/arch/x86/kernel/efi_stub_64.S
16080+++ b/arch/x86/kernel/efi_stub_64.S
16081@@ -7,6 +7,7 @@
16082 */
16083
16084 #include <linux/linkage.h>
16085+#include <asm/alternative-asm.h>
16086
16087 #define SAVE_XMM \
16088 mov %rsp, %rax; \
16089@@ -40,6 +41,7 @@ ENTRY(efi_call0)
16090 call *%rdi
16091 addq $32, %rsp
16092 RESTORE_XMM
16093+ pax_force_retaddr 0, 1
16094 ret
16095 ENDPROC(efi_call0)
16096
16097@@ -50,6 +52,7 @@ ENTRY(efi_call1)
16098 call *%rdi
16099 addq $32, %rsp
16100 RESTORE_XMM
16101+ pax_force_retaddr 0, 1
16102 ret
16103 ENDPROC(efi_call1)
16104
16105@@ -60,6 +63,7 @@ ENTRY(efi_call2)
16106 call *%rdi
16107 addq $32, %rsp
16108 RESTORE_XMM
16109+ pax_force_retaddr 0, 1
16110 ret
16111 ENDPROC(efi_call2)
16112
16113@@ -71,6 +75,7 @@ ENTRY(efi_call3)
16114 call *%rdi
16115 addq $32, %rsp
16116 RESTORE_XMM
16117+ pax_force_retaddr 0, 1
16118 ret
16119 ENDPROC(efi_call3)
16120
16121@@ -83,6 +88,7 @@ ENTRY(efi_call4)
16122 call *%rdi
16123 addq $32, %rsp
16124 RESTORE_XMM
16125+ pax_force_retaddr 0, 1
16126 ret
16127 ENDPROC(efi_call4)
16128
16129@@ -96,6 +102,7 @@ ENTRY(efi_call5)
16130 call *%rdi
16131 addq $48, %rsp
16132 RESTORE_XMM
16133+ pax_force_retaddr 0, 1
16134 ret
16135 ENDPROC(efi_call5)
16136
16137@@ -112,5 +119,6 @@ ENTRY(efi_call6)
16138 call *%rdi
16139 addq $48, %rsp
16140 RESTORE_XMM
16141+ pax_force_retaddr 0, 1
16142 ret
16143 ENDPROC(efi_call6)
16144diff --git a/arch/x86/kernel/entry_32.S b/arch/x86/kernel/entry_32.S
16145index c097e7d..c689cf4 100644
16146--- a/arch/x86/kernel/entry_32.S
16147+++ b/arch/x86/kernel/entry_32.S
16148@@ -185,13 +185,146 @@
16149 /*CFI_REL_OFFSET gs, PT_GS*/
16150 .endm
16151 .macro SET_KERNEL_GS reg
16152+
16153+#ifdef CONFIG_CC_STACKPROTECTOR
16154 movl $(__KERNEL_STACK_CANARY), \reg
16155+#elif defined(CONFIG_PAX_MEMORY_UDEREF)
16156+ movl $(__USER_DS), \reg
16157+#else
16158+ xorl \reg, \reg
16159+#endif
16160+
16161 movl \reg, %gs
16162 .endm
16163
16164 #endif /* CONFIG_X86_32_LAZY_GS */
16165
16166-.macro SAVE_ALL
16167+.macro pax_enter_kernel
16168+#ifdef CONFIG_PAX_KERNEXEC
16169+ call pax_enter_kernel
16170+#endif
16171+.endm
16172+
16173+.macro pax_exit_kernel
16174+#ifdef CONFIG_PAX_KERNEXEC
16175+ call pax_exit_kernel
16176+#endif
16177+.endm
16178+
16179+#ifdef CONFIG_PAX_KERNEXEC
16180+ENTRY(pax_enter_kernel)
16181+#ifdef CONFIG_PARAVIRT
16182+ pushl %eax
16183+ pushl %ecx
16184+ call PARA_INDIRECT(pv_cpu_ops+PV_CPU_read_cr0)
16185+ mov %eax, %esi
16186+#else
16187+ mov %cr0, %esi
16188+#endif
16189+ bts $16, %esi
16190+ jnc 1f
16191+ mov %cs, %esi
16192+ cmp $__KERNEL_CS, %esi
16193+ jz 3f
16194+ ljmp $__KERNEL_CS, $3f
16195+1: ljmp $__KERNEXEC_KERNEL_CS, $2f
16196+2:
16197+#ifdef CONFIG_PARAVIRT
16198+ mov %esi, %eax
16199+ call PARA_INDIRECT(pv_cpu_ops+PV_CPU_write_cr0)
16200+#else
16201+ mov %esi, %cr0
16202+#endif
16203+3:
16204+#ifdef CONFIG_PARAVIRT
16205+ popl %ecx
16206+ popl %eax
16207+#endif
16208+ ret
16209+ENDPROC(pax_enter_kernel)
16210+
16211+ENTRY(pax_exit_kernel)
16212+#ifdef CONFIG_PARAVIRT
16213+ pushl %eax
16214+ pushl %ecx
16215+#endif
16216+ mov %cs, %esi
16217+ cmp $__KERNEXEC_KERNEL_CS, %esi
16218+ jnz 2f
16219+#ifdef CONFIG_PARAVIRT
16220+ call PARA_INDIRECT(pv_cpu_ops+PV_CPU_read_cr0);
16221+ mov %eax, %esi
16222+#else
16223+ mov %cr0, %esi
16224+#endif
16225+ btr $16, %esi
16226+ ljmp $__KERNEL_CS, $1f
16227+1:
16228+#ifdef CONFIG_PARAVIRT
16229+ mov %esi, %eax
16230+ call PARA_INDIRECT(pv_cpu_ops+PV_CPU_write_cr0);
16231+#else
16232+ mov %esi, %cr0
16233+#endif
16234+2:
16235+#ifdef CONFIG_PARAVIRT
16236+ popl %ecx
16237+ popl %eax
16238+#endif
16239+ ret
16240+ENDPROC(pax_exit_kernel)
16241+#endif
16242+
16243+.macro pax_erase_kstack
16244+#ifdef CONFIG_PAX_MEMORY_STACKLEAK
16245+ call pax_erase_kstack
16246+#endif
16247+.endm
16248+
16249+#ifdef CONFIG_PAX_MEMORY_STACKLEAK
16250+/*
16251+ * ebp: thread_info
16252+ * ecx, edx: can be clobbered
16253+ */
16254+ENTRY(pax_erase_kstack)
16255+ pushl %edi
16256+ pushl %eax
16257+
16258+ mov TI_lowest_stack(%ebp), %edi
16259+ mov $-0xBEEF, %eax
16260+ std
16261+
16262+1: mov %edi, %ecx
16263+ and $THREAD_SIZE_asm - 1, %ecx
16264+ shr $2, %ecx
16265+ repne scasl
16266+ jecxz 2f
16267+
16268+ cmp $2*16, %ecx
16269+ jc 2f
16270+
16271+ mov $2*16, %ecx
16272+ repe scasl
16273+ jecxz 2f
16274+ jne 1b
16275+
16276+2: cld
16277+ mov %esp, %ecx
16278+ sub %edi, %ecx
16279+ shr $2, %ecx
16280+ rep stosl
16281+
16282+ mov TI_task_thread_sp0(%ebp), %edi
16283+ sub $128, %edi
16284+ mov %edi, TI_lowest_stack(%ebp)
16285+
16286+ popl %eax
16287+ popl %edi
16288+ ret
16289+ENDPROC(pax_erase_kstack)
16290+#endif
16291+
16292+.macro __SAVE_ALL _DS
16293 cld
16294 PUSH_GS
16295 pushl %fs
16296@@ -224,7 +357,7 @@
16297 pushl %ebx
16298 CFI_ADJUST_CFA_OFFSET 4
16299 CFI_REL_OFFSET ebx, 0
16300- movl $(__USER_DS), %edx
16301+ movl $\_DS, %edx
16302 movl %edx, %ds
16303 movl %edx, %es
16304 movl $(__KERNEL_PERCPU), %edx
16305@@ -232,6 +365,15 @@
16306 SET_KERNEL_GS %edx
16307 .endm
16308
16309+.macro SAVE_ALL
16310+#if defined(CONFIG_PAX_KERNEXEC) || defined(CONFIG_PAX_PAGEEXEC) || defined(CONFIG_PAX_SEGMEXEC) || defined(CONFIG_PAX_MEMORY_UDEREF)
16311+ __SAVE_ALL __KERNEL_DS
16312+ pax_enter_kernel
16313+#else
16314+ __SAVE_ALL __USER_DS
16315+#endif
16316+.endm
16317+
16318 .macro RESTORE_INT_REGS
16319 popl %ebx
16320 CFI_ADJUST_CFA_OFFSET -4
16321@@ -331,7 +473,7 @@ ENTRY(ret_from_fork)
16322 CFI_ADJUST_CFA_OFFSET -4
16323 jmp syscall_exit
16324 CFI_ENDPROC
16325-END(ret_from_fork)
16326+ENDPROC(ret_from_fork)
16327
16328 /*
16329 * Return to user mode is not as complex as all this looks,
16330@@ -352,7 +494,15 @@ check_userspace:
16331 movb PT_CS(%esp), %al
16332 andl $(X86_EFLAGS_VM | SEGMENT_RPL_MASK), %eax
16333 cmpl $USER_RPL, %eax
16334+
16335+#ifdef CONFIG_PAX_KERNEXEC
16336+ jae resume_userspace
16337+
16338+ PAX_EXIT_KERNEL
16339+ jmp resume_kernel
16340+#else
16341 jb resume_kernel # not returning to v8086 or userspace
16342+#endif
16343
16344 ENTRY(resume_userspace)
16345 LOCKDEP_SYS_EXIT
16346@@ -364,8 +514,8 @@ ENTRY(resume_userspace)
16347 andl $_TIF_WORK_MASK, %ecx # is there any work to be done on
16348 # int/exception return?
16349 jne work_pending
16350- jmp restore_all
16351-END(ret_from_exception)
16352+ jmp restore_all_pax
16353+ENDPROC(ret_from_exception)
16354
16355 #ifdef CONFIG_PREEMPT
16356 ENTRY(resume_kernel)
16357@@ -380,7 +530,7 @@ need_resched:
16358 jz restore_all
16359 call preempt_schedule_irq
16360 jmp need_resched
16361-END(resume_kernel)
16362+ENDPROC(resume_kernel)
16363 #endif
16364 CFI_ENDPROC
16365
16366@@ -414,25 +564,36 @@ sysenter_past_esp:
16367 /*CFI_REL_OFFSET cs, 0*/
16368 /*
16369 * Push current_thread_info()->sysenter_return to the stack.
16370- * A tiny bit of offset fixup is necessary - 4*4 means the 4 words
16371- * pushed above; +8 corresponds to copy_thread's esp0 setting.
16372 */
16373- pushl (TI_sysenter_return-THREAD_SIZE+8+4*4)(%esp)
16374+ pushl $0
16375 CFI_ADJUST_CFA_OFFSET 4
16376 CFI_REL_OFFSET eip, 0
16377
16378 pushl %eax
16379 CFI_ADJUST_CFA_OFFSET 4
16380 SAVE_ALL
16381+ GET_THREAD_INFO(%ebp)
16382+ movl TI_sysenter_return(%ebp),%ebp
16383+ movl %ebp,PT_EIP(%esp)
16384 ENABLE_INTERRUPTS(CLBR_NONE)
16385
16386 /*
16387 * Load the potential sixth argument from user stack.
16388 * Careful about security.
16389 */
16390+ movl PT_OLDESP(%esp),%ebp
16391+
16392+#ifdef CONFIG_PAX_MEMORY_UDEREF
16393+ mov PT_OLDSS(%esp),%ds
16394+1: movl %ds:(%ebp),%ebp
16395+ push %ss
16396+ pop %ds
16397+#else
16398 cmpl $__PAGE_OFFSET-3,%ebp
16399 jae syscall_fault
16400 1: movl (%ebp),%ebp
16401+#endif
16402+
16403 movl %ebp,PT_EBP(%esp)
16404 .section __ex_table,"a"
16405 .align 4
16406@@ -455,12 +616,24 @@ sysenter_do_call:
16407 testl $_TIF_ALLWORK_MASK, %ecx
16408 jne sysexit_audit
16409 sysenter_exit:
16410+
16411+#ifdef CONFIG_PAX_RANDKSTACK
16412+ pushl_cfi %eax
16413+ movl %esp, %eax
16414+ call pax_randomize_kstack
16415+ popl_cfi %eax
16416+#endif
16417+
16418+ pax_erase_kstack
16419+
16420 /* if something modifies registers it must also disable sysexit */
16421 movl PT_EIP(%esp), %edx
16422 movl PT_OLDESP(%esp), %ecx
16423 xorl %ebp,%ebp
16424 TRACE_IRQS_ON
16425 1: mov PT_FS(%esp), %fs
16426+2: mov PT_DS(%esp), %ds
16427+3: mov PT_ES(%esp), %es
16428 PTGS_TO_GS
16429 ENABLE_INTERRUPTS_SYSEXIT
16430
16431@@ -477,6 +650,9 @@ sysenter_audit:
16432 movl %eax,%edx /* 2nd arg: syscall number */
16433 movl $AUDIT_ARCH_I386,%eax /* 1st arg: audit arch */
16434 call audit_syscall_entry
16435+
16436+ pax_erase_kstack
16437+
16438 pushl %ebx
16439 CFI_ADJUST_CFA_OFFSET 4
16440 movl PT_EAX(%esp),%eax /* reload syscall number */
16441@@ -504,11 +680,17 @@ sysexit_audit:
16442
16443 CFI_ENDPROC
16444 .pushsection .fixup,"ax"
16445-2: movl $0,PT_FS(%esp)
16446+4: movl $0,PT_FS(%esp)
16447+ jmp 1b
16448+5: movl $0,PT_DS(%esp)
16449+ jmp 1b
16450+6: movl $0,PT_ES(%esp)
16451 jmp 1b
16452 .section __ex_table,"a"
16453 .align 4
16454- .long 1b,2b
16455+ .long 1b,4b
16456+ .long 2b,5b
16457+ .long 3b,6b
16458 .popsection
16459 PTGS_TO_GS_EX
16460 ENDPROC(ia32_sysenter_target)
16461@@ -538,6 +720,15 @@ syscall_exit:
16462 testl $_TIF_ALLWORK_MASK, %ecx # current->work
16463 jne syscall_exit_work
16464
16465+restore_all_pax:
16466+
16467+#ifdef CONFIG_PAX_RANDKSTACK
16468+ movl %esp, %eax
16469+ call pax_randomize_kstack
16470+#endif
16471+
16472+ pax_erase_kstack
16473+
16474 restore_all:
16475 TRACE_IRQS_IRET
16476 restore_all_notrace:
16477@@ -602,10 +793,29 @@ ldt_ss:
16478 mov PT_OLDESP(%esp), %eax /* load userspace esp */
16479 mov %dx, %ax /* eax: new kernel esp */
16480 sub %eax, %edx /* offset (low word is 0) */
16481- PER_CPU(gdt_page, %ebx)
16482+#ifdef CONFIG_SMP
16483+ movl PER_CPU_VAR(cpu_number), %ebx
16484+ shll $PAGE_SHIFT_asm, %ebx
16485+ addl $cpu_gdt_table, %ebx
16486+#else
16487+ movl $cpu_gdt_table, %ebx
16488+#endif
16489 shr $16, %edx
16490+
16491+#ifdef CONFIG_PAX_KERNEXEC
16492+ mov %cr0, %esi
16493+ btr $16, %esi
16494+ mov %esi, %cr0
16495+#endif
16496+
16497 mov %dl, GDT_ENTRY_ESPFIX_SS * 8 + 4(%ebx) /* bits 16..23 */
16498 mov %dh, GDT_ENTRY_ESPFIX_SS * 8 + 7(%ebx) /* bits 24..31 */
16499+
16500+#ifdef CONFIG_PAX_KERNEXEC
16501+ bts $16, %esi
16502+ mov %esi, %cr0
16503+#endif
16504+
16505 pushl $__ESPFIX_SS
16506 CFI_ADJUST_CFA_OFFSET 4
16507 push %eax /* new kernel esp */
16508@@ -636,36 +846,30 @@ work_resched:
16509 movl TI_flags(%ebp), %ecx
16510 andl $_TIF_WORK_MASK, %ecx # is there any work to be done other
16511 # than syscall tracing?
16512- jz restore_all
16513+ jz restore_all_pax
16514 testb $_TIF_NEED_RESCHED, %cl
16515 jnz work_resched
16516
16517 work_notifysig: # deal with pending signals and
16518 # notify-resume requests
16519+ movl %esp, %eax
16520 #ifdef CONFIG_VM86
16521 testl $X86_EFLAGS_VM, PT_EFLAGS(%esp)
16522- movl %esp, %eax
16523- jne work_notifysig_v86 # returning to kernel-space or
16524+ jz 1f # returning to kernel-space or
16525 # vm86-space
16526- xorl %edx, %edx
16527- call do_notify_resume
16528- jmp resume_userspace_sig
16529
16530- ALIGN
16531-work_notifysig_v86:
16532 pushl %ecx # save ti_flags for do_notify_resume
16533 CFI_ADJUST_CFA_OFFSET 4
16534 call save_v86_state # %eax contains pt_regs pointer
16535 popl %ecx
16536 CFI_ADJUST_CFA_OFFSET -4
16537 movl %eax, %esp
16538-#else
16539- movl %esp, %eax
16540+1:
16541 #endif
16542 xorl %edx, %edx
16543 call do_notify_resume
16544 jmp resume_userspace_sig
16545-END(work_pending)
16546+ENDPROC(work_pending)
16547
16548 # perform syscall exit tracing
16549 ALIGN
16550@@ -673,11 +877,14 @@ syscall_trace_entry:
16551 movl $-ENOSYS,PT_EAX(%esp)
16552 movl %esp, %eax
16553 call syscall_trace_enter
16554+
16555+ pax_erase_kstack
16556+
16557 /* What it returned is what we'll actually use. */
16558 cmpl $(nr_syscalls), %eax
16559 jnae syscall_call
16560 jmp syscall_exit
16561-END(syscall_trace_entry)
16562+ENDPROC(syscall_trace_entry)
16563
16564 # perform syscall exit tracing
16565 ALIGN
16566@@ -690,20 +897,24 @@ syscall_exit_work:
16567 movl %esp, %eax
16568 call syscall_trace_leave
16569 jmp resume_userspace
16570-END(syscall_exit_work)
16571+ENDPROC(syscall_exit_work)
16572 CFI_ENDPROC
16573
16574 RING0_INT_FRAME # can't unwind into user space anyway
16575 syscall_fault:
16576+#ifdef CONFIG_PAX_MEMORY_UDEREF
16577+ push %ss
16578+ pop %ds
16579+#endif
16580 GET_THREAD_INFO(%ebp)
16581 movl $-EFAULT,PT_EAX(%esp)
16582 jmp resume_userspace
16583-END(syscall_fault)
16584+ENDPROC(syscall_fault)
16585
16586 syscall_badsys:
16587 movl $-ENOSYS,PT_EAX(%esp)
16588 jmp resume_userspace
16589-END(syscall_badsys)
16590+ENDPROC(syscall_badsys)
16591 CFI_ENDPROC
16592
16593 /*
16594@@ -726,6 +937,33 @@ PTREGSCALL(rt_sigreturn)
16595 PTREGSCALL(vm86)
16596 PTREGSCALL(vm86old)
16597
16598+ ALIGN;
16599+ENTRY(kernel_execve)
16600+ push %ebp
16601+ sub $PT_OLDSS+4,%esp
16602+ push %edi
16603+ push %ecx
16604+ push %eax
16605+ lea 3*4(%esp),%edi
16606+ mov $PT_OLDSS/4+1,%ecx
16607+ xorl %eax,%eax
16608+ rep stosl
16609+ pop %eax
16610+ pop %ecx
16611+ pop %edi
16612+ movl $X86_EFLAGS_IF,PT_EFLAGS(%esp)
16613+ mov %eax,PT_EBX(%esp)
16614+ mov %edx,PT_ECX(%esp)
16615+ mov %ecx,PT_EDX(%esp)
16616+ mov %esp,%eax
16617+ call sys_execve
16618+ GET_THREAD_INFO(%ebp)
16619+ test %eax,%eax
16620+ jz syscall_exit
16621+ add $PT_OLDSS+4,%esp
16622+ pop %ebp
16623+ ret
16624+
16625 .macro FIXUP_ESPFIX_STACK
16626 /*
16627 * Switch back for ESPFIX stack to the normal zerobased stack
16628@@ -735,7 +973,13 @@ PTREGSCALL(vm86old)
16629 * normal stack and adjusts ESP with the matching offset.
16630 */
16631 /* fixup the stack */
16632- PER_CPU(gdt_page, %ebx)
16633+#ifdef CONFIG_SMP
16634+ movl PER_CPU_VAR(cpu_number), %ebx
16635+ shll $PAGE_SHIFT_asm, %ebx
16636+ addl $cpu_gdt_table, %ebx
16637+#else
16638+ movl $cpu_gdt_table, %ebx
16639+#endif
16640 mov GDT_ENTRY_ESPFIX_SS * 8 + 4(%ebx), %al /* bits 16..23 */
16641 mov GDT_ENTRY_ESPFIX_SS * 8 + 7(%ebx), %ah /* bits 24..31 */
16642 shl $16, %eax
16643@@ -793,7 +1037,7 @@ vector=vector+1
16644 .endr
16645 2: jmp common_interrupt
16646 .endr
16647-END(irq_entries_start)
16648+ENDPROC(irq_entries_start)
16649
16650 .previous
16651 END(interrupt)
16652@@ -840,7 +1084,7 @@ ENTRY(coprocessor_error)
16653 CFI_ADJUST_CFA_OFFSET 4
16654 jmp error_code
16655 CFI_ENDPROC
16656-END(coprocessor_error)
16657+ENDPROC(coprocessor_error)
16658
16659 ENTRY(simd_coprocessor_error)
16660 RING0_INT_FRAME
16661@@ -850,7 +1094,7 @@ ENTRY(simd_coprocessor_error)
16662 CFI_ADJUST_CFA_OFFSET 4
16663 jmp error_code
16664 CFI_ENDPROC
16665-END(simd_coprocessor_error)
16666+ENDPROC(simd_coprocessor_error)
16667
16668 ENTRY(device_not_available)
16669 RING0_INT_FRAME
16670@@ -860,7 +1104,7 @@ ENTRY(device_not_available)
16671 CFI_ADJUST_CFA_OFFSET 4
16672 jmp error_code
16673 CFI_ENDPROC
16674-END(device_not_available)
16675+ENDPROC(device_not_available)
16676
16677 #ifdef CONFIG_PARAVIRT
16678 ENTRY(native_iret)
16679@@ -869,12 +1113,12 @@ ENTRY(native_iret)
16680 .align 4
16681 .long native_iret, iret_exc
16682 .previous
16683-END(native_iret)
16684+ENDPROC(native_iret)
16685
16686 ENTRY(native_irq_enable_sysexit)
16687 sti
16688 sysexit
16689-END(native_irq_enable_sysexit)
16690+ENDPROC(native_irq_enable_sysexit)
16691 #endif
16692
16693 ENTRY(overflow)
16694@@ -885,7 +1129,7 @@ ENTRY(overflow)
16695 CFI_ADJUST_CFA_OFFSET 4
16696 jmp error_code
16697 CFI_ENDPROC
16698-END(overflow)
16699+ENDPROC(overflow)
16700
16701 ENTRY(bounds)
16702 RING0_INT_FRAME
16703@@ -895,7 +1139,7 @@ ENTRY(bounds)
16704 CFI_ADJUST_CFA_OFFSET 4
16705 jmp error_code
16706 CFI_ENDPROC
16707-END(bounds)
16708+ENDPROC(bounds)
16709
16710 ENTRY(invalid_op)
16711 RING0_INT_FRAME
16712@@ -905,7 +1149,7 @@ ENTRY(invalid_op)
16713 CFI_ADJUST_CFA_OFFSET 4
16714 jmp error_code
16715 CFI_ENDPROC
16716-END(invalid_op)
16717+ENDPROC(invalid_op)
16718
16719 ENTRY(coprocessor_segment_overrun)
16720 RING0_INT_FRAME
16721@@ -915,7 +1159,7 @@ ENTRY(coprocessor_segment_overrun)
16722 CFI_ADJUST_CFA_OFFSET 4
16723 jmp error_code
16724 CFI_ENDPROC
16725-END(coprocessor_segment_overrun)
16726+ENDPROC(coprocessor_segment_overrun)
16727
16728 ENTRY(invalid_TSS)
16729 RING0_EC_FRAME
16730@@ -923,7 +1167,7 @@ ENTRY(invalid_TSS)
16731 CFI_ADJUST_CFA_OFFSET 4
16732 jmp error_code
16733 CFI_ENDPROC
16734-END(invalid_TSS)
16735+ENDPROC(invalid_TSS)
16736
16737 ENTRY(segment_not_present)
16738 RING0_EC_FRAME
16739@@ -931,7 +1175,7 @@ ENTRY(segment_not_present)
16740 CFI_ADJUST_CFA_OFFSET 4
16741 jmp error_code
16742 CFI_ENDPROC
16743-END(segment_not_present)
16744+ENDPROC(segment_not_present)
16745
16746 ENTRY(stack_segment)
16747 RING0_EC_FRAME
16748@@ -939,7 +1183,7 @@ ENTRY(stack_segment)
16749 CFI_ADJUST_CFA_OFFSET 4
16750 jmp error_code
16751 CFI_ENDPROC
16752-END(stack_segment)
16753+ENDPROC(stack_segment)
16754
16755 ENTRY(alignment_check)
16756 RING0_EC_FRAME
16757@@ -947,7 +1191,7 @@ ENTRY(alignment_check)
16758 CFI_ADJUST_CFA_OFFSET 4
16759 jmp error_code
16760 CFI_ENDPROC
16761-END(alignment_check)
16762+ENDPROC(alignment_check)
16763
16764 ENTRY(divide_error)
16765 RING0_INT_FRAME
16766@@ -957,7 +1201,7 @@ ENTRY(divide_error)
16767 CFI_ADJUST_CFA_OFFSET 4
16768 jmp error_code
16769 CFI_ENDPROC
16770-END(divide_error)
16771+ENDPROC(divide_error)
16772
16773 #ifdef CONFIG_X86_MCE
16774 ENTRY(machine_check)
16775@@ -968,7 +1212,7 @@ ENTRY(machine_check)
16776 CFI_ADJUST_CFA_OFFSET 4
16777 jmp error_code
16778 CFI_ENDPROC
16779-END(machine_check)
16780+ENDPROC(machine_check)
16781 #endif
16782
16783 ENTRY(spurious_interrupt_bug)
16784@@ -979,7 +1223,7 @@ ENTRY(spurious_interrupt_bug)
16785 CFI_ADJUST_CFA_OFFSET 4
16786 jmp error_code
16787 CFI_ENDPROC
16788-END(spurious_interrupt_bug)
16789+ENDPROC(spurious_interrupt_bug)
16790
16791 ENTRY(kernel_thread_helper)
16792 pushl $0 # fake return address for unwinder
16793@@ -1095,7 +1339,7 @@ ENDPROC(xen_failsafe_callback)
16794
16795 ENTRY(mcount)
16796 ret
16797-END(mcount)
16798+ENDPROC(mcount)
16799
16800 ENTRY(ftrace_caller)
16801 cmpl $0, function_trace_stop
16802@@ -1124,7 +1368,7 @@ ftrace_graph_call:
16803 .globl ftrace_stub
16804 ftrace_stub:
16805 ret
16806-END(ftrace_caller)
16807+ENDPROC(ftrace_caller)
16808
16809 #else /* ! CONFIG_DYNAMIC_FTRACE */
16810
16811@@ -1160,7 +1404,7 @@ trace:
16812 popl %ecx
16813 popl %eax
16814 jmp ftrace_stub
16815-END(mcount)
16816+ENDPROC(mcount)
16817 #endif /* CONFIG_DYNAMIC_FTRACE */
16818 #endif /* CONFIG_FUNCTION_TRACER */
16819
16820@@ -1181,7 +1425,7 @@ ENTRY(ftrace_graph_caller)
16821 popl %ecx
16822 popl %eax
16823 ret
16824-END(ftrace_graph_caller)
16825+ENDPROC(ftrace_graph_caller)
16826
16827 .globl return_to_handler
16828 return_to_handler:
16829@@ -1198,7 +1442,6 @@ return_to_handler:
16830 ret
16831 #endif
16832
16833-.section .rodata,"a"
16834 #include "syscall_table_32.S"
16835
16836 syscall_table_size=(.-sys_call_table)
16837@@ -1255,15 +1498,18 @@ error_code:
16838 movl $-1, PT_ORIG_EAX(%esp) # no syscall to restart
16839 REG_TO_PTGS %ecx
16840 SET_KERNEL_GS %ecx
16841- movl $(__USER_DS), %ecx
16842+ movl $(__KERNEL_DS), %ecx
16843 movl %ecx, %ds
16844 movl %ecx, %es
16845+
16846+ pax_enter_kernel
16847+
16848 TRACE_IRQS_OFF
16849 movl %esp,%eax # pt_regs pointer
16850 call *%edi
16851 jmp ret_from_exception
16852 CFI_ENDPROC
16853-END(page_fault)
16854+ENDPROC(page_fault)
16855
16856 /*
16857 * Debug traps and NMI can happen at the one SYSENTER instruction
16858@@ -1309,7 +1555,7 @@ debug_stack_correct:
16859 call do_debug
16860 jmp ret_from_exception
16861 CFI_ENDPROC
16862-END(debug)
16863+ENDPROC(debug)
16864
16865 /*
16866 * NMI is doubly nasty. It can happen _while_ we're handling
16867@@ -1351,6 +1597,9 @@ nmi_stack_correct:
16868 xorl %edx,%edx # zero error code
16869 movl %esp,%eax # pt_regs pointer
16870 call do_nmi
16871+
16872+ pax_exit_kernel
16873+
16874 jmp restore_all_notrace
16875 CFI_ENDPROC
16876
16877@@ -1391,12 +1640,15 @@ nmi_espfix_stack:
16878 FIXUP_ESPFIX_STACK # %eax == %esp
16879 xorl %edx,%edx # zero error code
16880 call do_nmi
16881+
16882+ pax_exit_kernel
16883+
16884 RESTORE_REGS
16885 lss 12+4(%esp), %esp # back to espfix stack
16886 CFI_ADJUST_CFA_OFFSET -24
16887 jmp irq_return
16888 CFI_ENDPROC
16889-END(nmi)
16890+ENDPROC(nmi)
16891
16892 ENTRY(int3)
16893 RING0_INT_FRAME
16894@@ -1409,7 +1661,7 @@ ENTRY(int3)
16895 call do_int3
16896 jmp ret_from_exception
16897 CFI_ENDPROC
16898-END(int3)
16899+ENDPROC(int3)
16900
16901 ENTRY(general_protection)
16902 RING0_EC_FRAME
16903@@ -1417,7 +1669,7 @@ ENTRY(general_protection)
16904 CFI_ADJUST_CFA_OFFSET 4
16905 jmp error_code
16906 CFI_ENDPROC
16907-END(general_protection)
16908+ENDPROC(general_protection)
16909
16910 /*
16911 * End of kprobes section
16912diff --git a/arch/x86/kernel/entry_64.S b/arch/x86/kernel/entry_64.S
16913index 34a56a9..74613c5 100644
16914--- a/arch/x86/kernel/entry_64.S
16915+++ b/arch/x86/kernel/entry_64.S
16916@@ -53,6 +53,8 @@
16917 #include <asm/paravirt.h>
16918 #include <asm/ftrace.h>
16919 #include <asm/percpu.h>
16920+#include <asm/pgtable.h>
16921+#include <asm/alternative-asm.h>
16922
16923 /* Avoid __ASSEMBLER__'ifying <linux/audit.h> just for this. */
16924 #include <linux/elf-em.h>
16925@@ -64,8 +66,9 @@
16926 #ifdef CONFIG_FUNCTION_TRACER
16927 #ifdef CONFIG_DYNAMIC_FTRACE
16928 ENTRY(mcount)
16929+ pax_force_retaddr
16930 retq
16931-END(mcount)
16932+ENDPROC(mcount)
16933
16934 ENTRY(ftrace_caller)
16935 cmpl $0, function_trace_stop
16936@@ -88,8 +91,9 @@ GLOBAL(ftrace_graph_call)
16937 #endif
16938
16939 GLOBAL(ftrace_stub)
16940+ pax_force_retaddr
16941 retq
16942-END(ftrace_caller)
16943+ENDPROC(ftrace_caller)
16944
16945 #else /* ! CONFIG_DYNAMIC_FTRACE */
16946 ENTRY(mcount)
16947@@ -108,6 +112,7 @@ ENTRY(mcount)
16948 #endif
16949
16950 GLOBAL(ftrace_stub)
16951+ pax_force_retaddr
16952 retq
16953
16954 trace:
16955@@ -117,12 +122,13 @@ trace:
16956 movq 8(%rbp), %rsi
16957 subq $MCOUNT_INSN_SIZE, %rdi
16958
16959+ pax_force_fptr ftrace_trace_function
16960 call *ftrace_trace_function
16961
16962 MCOUNT_RESTORE_FRAME
16963
16964 jmp ftrace_stub
16965-END(mcount)
16966+ENDPROC(mcount)
16967 #endif /* CONFIG_DYNAMIC_FTRACE */
16968 #endif /* CONFIG_FUNCTION_TRACER */
16969
16970@@ -142,8 +148,9 @@ ENTRY(ftrace_graph_caller)
16971
16972 MCOUNT_RESTORE_FRAME
16973
16974+ pax_force_retaddr
16975 retq
16976-END(ftrace_graph_caller)
16977+ENDPROC(ftrace_graph_caller)
16978
16979 GLOBAL(return_to_handler)
16980 subq $24, %rsp
16981@@ -159,6 +166,7 @@ GLOBAL(return_to_handler)
16982 movq 8(%rsp), %rdx
16983 movq (%rsp), %rax
16984 addq $16, %rsp
16985+ pax_force_retaddr
16986 retq
16987 #endif
16988
16989@@ -174,6 +182,282 @@ ENTRY(native_usergs_sysret64)
16990 ENDPROC(native_usergs_sysret64)
16991 #endif /* CONFIG_PARAVIRT */
16992
16993+ .macro ljmpq sel, off
16994+#if defined(CONFIG_MPSC) || defined(CONFIG_MCORE2) || defined (CONFIG_MATOM)
16995+ .byte 0x48; ljmp *1234f(%rip)
16996+ .pushsection .rodata
16997+ .align 16
16998+ 1234: .quad \off; .word \sel
16999+ .popsection
17000+#else
17001+ pushq $\sel
17002+ pushq $\off
17003+ lretq
17004+#endif
17005+ .endm
17006+
17007+ .macro pax_enter_kernel
17008+ pax_set_fptr_mask
17009+#ifdef CONFIG_PAX_KERNEXEC
17010+ call pax_enter_kernel
17011+#endif
17012+ .endm
17013+
17014+ .macro pax_exit_kernel
17015+#ifdef CONFIG_PAX_KERNEXEC
17016+ call pax_exit_kernel
17017+#endif
17018+ .endm
17019+
17020+#ifdef CONFIG_PAX_KERNEXEC
17021+ENTRY(pax_enter_kernel)
17022+ pushq %rdi
17023+
17024+#ifdef CONFIG_PARAVIRT
17025+ PV_SAVE_REGS(CLBR_RDI)
17026+#endif
17027+
17028+ GET_CR0_INTO_RDI
17029+ bts $16,%rdi
17030+ jnc 3f
17031+ mov %cs,%edi
17032+ cmp $__KERNEL_CS,%edi
17033+ jnz 2f
17034+1:
17035+
17036+#ifdef CONFIG_PARAVIRT
17037+ PV_RESTORE_REGS(CLBR_RDI)
17038+#endif
17039+
17040+ popq %rdi
17041+ pax_force_retaddr
17042+ retq
17043+
17044+2: ljmpq __KERNEL_CS,1f
17045+3: ljmpq __KERNEXEC_KERNEL_CS,4f
17046+4: SET_RDI_INTO_CR0
17047+ jmp 1b
17048+ENDPROC(pax_enter_kernel)
17049+
17050+ENTRY(pax_exit_kernel)
17051+ pushq %rdi
17052+
17053+#ifdef CONFIG_PARAVIRT
17054+ PV_SAVE_REGS(CLBR_RDI)
17055+#endif
17056+
17057+ mov %cs,%rdi
17058+ cmp $__KERNEXEC_KERNEL_CS,%edi
17059+ jz 2f
17060+1:
17061+
17062+#ifdef CONFIG_PARAVIRT
17063+ PV_RESTORE_REGS(CLBR_RDI);
17064+#endif
17065+
17066+ popq %rdi
17067+ pax_force_retaddr
17068+ retq
17069+
17070+2: GET_CR0_INTO_RDI
17071+ btr $16,%rdi
17072+ ljmpq __KERNEL_CS,3f
17073+3: SET_RDI_INTO_CR0
17074+ jmp 1b
17075+#ifdef CONFIG_PARAVIRT
17076+ PV_RESTORE_REGS(CLBR_RDI);
17077+#endif
17078+
17079+ popq %rdi
17080+ pax_force_retaddr
17081+ retq
17082+ENDPROC(pax_exit_kernel)
17083+#endif
17084+
17085+ .macro pax_enter_kernel_user
17086+ pax_set_fptr_mask
17087+#ifdef CONFIG_PAX_MEMORY_UDEREF
17088+ call pax_enter_kernel_user
17089+#endif
17090+ .endm
17091+
17092+ .macro pax_exit_kernel_user
17093+#ifdef CONFIG_PAX_MEMORY_UDEREF
17094+ call pax_exit_kernel_user
17095+#endif
17096+#ifdef CONFIG_PAX_RANDKSTACK
17097+ pushq %rax
17098+ call pax_randomize_kstack
17099+ popq %rax
17100+#endif
17101+ .endm
17102+
17103+#ifdef CONFIG_PAX_MEMORY_UDEREF
17104+ENTRY(pax_enter_kernel_user)
17105+ pushq %rdi
17106+ pushq %rbx
17107+
17108+#ifdef CONFIG_PARAVIRT
17109+ PV_SAVE_REGS(CLBR_RDI)
17110+#endif
17111+
17112+ GET_CR3_INTO_RDI
17113+ mov %rdi,%rbx
17114+ add $__START_KERNEL_map,%rbx
17115+ sub phys_base(%rip),%rbx
17116+
17117+#ifdef CONFIG_PARAVIRT
17118+ pushq %rdi
17119+ cmpl $0, pv_info+PARAVIRT_enabled
17120+ jz 1f
17121+ i = 0
17122+ .rept USER_PGD_PTRS
17123+ mov i*8(%rbx),%rsi
17124+ mov $0,%sil
17125+ lea i*8(%rbx),%rdi
17126+ call PARA_INDIRECT(pv_mmu_ops+PV_MMU_set_pgd_batched)
17127+ i = i + 1
17128+ .endr
17129+ jmp 2f
17130+1:
17131+#endif
17132+
17133+ i = 0
17134+ .rept USER_PGD_PTRS
17135+ movb $0,i*8(%rbx)
17136+ i = i + 1
17137+ .endr
17138+
17139+#ifdef CONFIG_PARAVIRT
17140+2: popq %rdi
17141+#endif
17142+ SET_RDI_INTO_CR3
17143+
17144+#ifdef CONFIG_PAX_KERNEXEC
17145+ GET_CR0_INTO_RDI
17146+ bts $16,%rdi
17147+ SET_RDI_INTO_CR0
17148+#endif
17149+
17150+#ifdef CONFIG_PARAVIRT
17151+ PV_RESTORE_REGS(CLBR_RDI)
17152+#endif
17153+
17154+ popq %rbx
17155+ popq %rdi
17156+ pax_force_retaddr
17157+ retq
17158+ENDPROC(pax_enter_kernel_user)
17159+
17160+ENTRY(pax_exit_kernel_user)
17161+ push %rdi
17162+
17163+#ifdef CONFIG_PARAVIRT
17164+ pushq %rbx
17165+ PV_SAVE_REGS(CLBR_RDI)
17166+#endif
17167+
17168+#ifdef CONFIG_PAX_KERNEXEC
17169+ GET_CR0_INTO_RDI
17170+ btr $16,%rdi
17171+ SET_RDI_INTO_CR0
17172+#endif
17173+
17174+ GET_CR3_INTO_RDI
17175+ add $__START_KERNEL_map,%rdi
17176+ sub phys_base(%rip),%rdi
17177+
17178+#ifdef CONFIG_PARAVIRT
17179+ cmpl $0, pv_info+PARAVIRT_enabled
17180+ jz 1f
17181+ mov %rdi,%rbx
17182+ i = 0
17183+ .rept USER_PGD_PTRS
17184+ mov i*8(%rbx),%rsi
17185+ mov $0x67,%sil
17186+ lea i*8(%rbx),%rdi
17187+ call PARA_INDIRECT(pv_mmu_ops+PV_MMU_set_pgd_batched)
17188+ i = i + 1
17189+ .endr
17190+ jmp 2f
17191+1:
17192+#endif
17193+
17194+ i = 0
17195+ .rept USER_PGD_PTRS
17196+ movb $0x67,i*8(%rdi)
17197+ i = i + 1
17198+ .endr
17199+
17200+#ifdef CONFIG_PARAVIRT
17201+2: PV_RESTORE_REGS(CLBR_RDI)
17202+ popq %rbx
17203+#endif
17204+
17205+ popq %rdi
17206+ pax_force_retaddr
17207+ retq
17208+ENDPROC(pax_exit_kernel_user)
17209+#endif
17210+
17211+.macro pax_erase_kstack
17212+#ifdef CONFIG_PAX_MEMORY_STACKLEAK
17213+ call pax_erase_kstack
17214+#endif
17215+.endm
17216+
17217+#ifdef CONFIG_PAX_MEMORY_STACKLEAK
17218+/*
17219+ * r11: thread_info
17220+ * rcx, rdx: can be clobbered
17221+ */
17222+ENTRY(pax_erase_kstack)
17223+ pushq %rdi
17224+ pushq %rax
17225+ pushq %r11
17226+
17227+ GET_THREAD_INFO(%r11)
17228+ mov TI_lowest_stack(%r11), %rdi
17229+ mov $-0xBEEF, %rax
17230+ std
17231+
17232+1: mov %edi, %ecx
17233+ and $THREAD_SIZE_asm - 1, %ecx
17234+ shr $3, %ecx
17235+ repne scasq
17236+ jecxz 2f
17237+
17238+ cmp $2*8, %ecx
17239+ jc 2f
17240+
17241+ mov $2*8, %ecx
17242+ repe scasq
17243+ jecxz 2f
17244+ jne 1b
17245+
17246+2: cld
17247+ mov %esp, %ecx
17248+ sub %edi, %ecx
17249+
17250+ cmp $THREAD_SIZE_asm, %rcx
17251+ jb 3f
17252+ ud2
17253+3:
17254+
17255+ shr $3, %ecx
17256+ rep stosq
17257+
17258+ mov TI_task_thread_sp0(%r11), %rdi
17259+ sub $256, %rdi
17260+ mov %rdi, TI_lowest_stack(%r11)
17261+
17262+ popq %r11
17263+ popq %rax
17264+ popq %rdi
17265+ pax_force_retaddr
17266+ ret
17267+ENDPROC(pax_erase_kstack)
17268+#endif
17269
17270 .macro TRACE_IRQS_IRETQ offset=ARGOFFSET
17271 #ifdef CONFIG_TRACE_IRQFLAGS
17272@@ -233,8 +517,8 @@ ENDPROC(native_usergs_sysret64)
17273 .endm
17274
17275 .macro UNFAKE_STACK_FRAME
17276- addq $8*6, %rsp
17277- CFI_ADJUST_CFA_OFFSET -(6*8)
17278+ addq $8*6 + ARG_SKIP, %rsp
17279+ CFI_ADJUST_CFA_OFFSET -(6*8 + ARG_SKIP)
17280 .endm
17281
17282 /*
17283@@ -317,7 +601,7 @@ ENTRY(save_args)
17284 leaq -ARGOFFSET+16(%rsp),%rdi /* arg1 for handler */
17285 movq_cfi rbp, 8 /* push %rbp */
17286 leaq 8(%rsp), %rbp /* mov %rsp, %ebp */
17287- testl $3, CS(%rdi)
17288+ testb $3, CS(%rdi)
17289 je 1f
17290 SWAPGS
17291 /*
17292@@ -337,9 +621,10 @@ ENTRY(save_args)
17293 * We entered an interrupt context - irqs are off:
17294 */
17295 2: TRACE_IRQS_OFF
17296+ pax_force_retaddr_bts
17297 ret
17298 CFI_ENDPROC
17299-END(save_args)
17300+ENDPROC(save_args)
17301
17302 ENTRY(save_rest)
17303 PARTIAL_FRAME 1 REST_SKIP+8
17304@@ -352,9 +637,10 @@ ENTRY(save_rest)
17305 movq_cfi r15, R15+16
17306 movq %r11, 8(%rsp) /* return address */
17307 FIXUP_TOP_OF_STACK %r11, 16
17308+ pax_force_retaddr
17309 ret
17310 CFI_ENDPROC
17311-END(save_rest)
17312+ENDPROC(save_rest)
17313
17314 /* save complete stack frame */
17315 .pushsection .kprobes.text, "ax"
17316@@ -383,9 +669,10 @@ ENTRY(save_paranoid)
17317 js 1f /* negative -> in kernel */
17318 SWAPGS
17319 xorl %ebx,%ebx
17320-1: ret
17321+1: pax_force_retaddr_bts
17322+ ret
17323 CFI_ENDPROC
17324-END(save_paranoid)
17325+ENDPROC(save_paranoid)
17326 .popsection
17327
17328 /*
17329@@ -409,7 +696,7 @@ ENTRY(ret_from_fork)
17330
17331 RESTORE_REST
17332
17333- testl $3, CS-ARGOFFSET(%rsp) # from kernel_thread?
17334+ testb $3, CS-ARGOFFSET(%rsp) # from kernel_thread?
17335 je int_ret_from_sys_call
17336
17337 testl $_TIF_IA32, TI_flags(%rcx) # 32-bit compat task needs IRET
17338@@ -419,7 +706,7 @@ ENTRY(ret_from_fork)
17339 jmp ret_from_sys_call # go to the SYSRET fastpath
17340
17341 CFI_ENDPROC
17342-END(ret_from_fork)
17343+ENDPROC(ret_from_fork)
17344
17345 /*
17346 * System call entry. Upto 6 arguments in registers are supported.
17347@@ -455,7 +742,7 @@ END(ret_from_fork)
17348 ENTRY(system_call)
17349 CFI_STARTPROC simple
17350 CFI_SIGNAL_FRAME
17351- CFI_DEF_CFA rsp,KERNEL_STACK_OFFSET
17352+ CFI_DEF_CFA rsp,0
17353 CFI_REGISTER rip,rcx
17354 /*CFI_REGISTER rflags,r11*/
17355 SWAPGS_UNSAFE_STACK
17356@@ -468,12 +755,13 @@ ENTRY(system_call_after_swapgs)
17357
17358 movq %rsp,PER_CPU_VAR(old_rsp)
17359 movq PER_CPU_VAR(kernel_stack),%rsp
17360+ SAVE_ARGS 8*6,1
17361+ pax_enter_kernel_user
17362 /*
17363 * No need to follow this irqs off/on section - it's straight
17364 * and short:
17365 */
17366 ENABLE_INTERRUPTS(CLBR_NONE)
17367- SAVE_ARGS 8,1
17368 movq %rax,ORIG_RAX-ARGOFFSET(%rsp)
17369 movq %rcx,RIP-ARGOFFSET(%rsp)
17370 CFI_REL_OFFSET rip,RIP-ARGOFFSET
17371@@ -483,7 +771,7 @@ ENTRY(system_call_after_swapgs)
17372 system_call_fastpath:
17373 cmpq $__NR_syscall_max,%rax
17374 ja badsys
17375- movq %r10,%rcx
17376+ movq R10-ARGOFFSET(%rsp),%rcx
17377 call *sys_call_table(,%rax,8) # XXX: rip relative
17378 movq %rax,RAX-ARGOFFSET(%rsp)
17379 /*
17380@@ -502,6 +790,8 @@ sysret_check:
17381 andl %edi,%edx
17382 jnz sysret_careful
17383 CFI_REMEMBER_STATE
17384+ pax_exit_kernel_user
17385+ pax_erase_kstack
17386 /*
17387 * sysretq will re-enable interrupts:
17388 */
17389@@ -555,14 +845,18 @@ badsys:
17390 * jump back to the normal fast path.
17391 */
17392 auditsys:
17393- movq %r10,%r9 /* 6th arg: 4th syscall arg */
17394+ movq R10-ARGOFFSET(%rsp),%r9 /* 6th arg: 4th syscall arg */
17395 movq %rdx,%r8 /* 5th arg: 3rd syscall arg */
17396 movq %rsi,%rcx /* 4th arg: 2nd syscall arg */
17397 movq %rdi,%rdx /* 3rd arg: 1st syscall arg */
17398 movq %rax,%rsi /* 2nd arg: syscall number */
17399 movl $AUDIT_ARCH_X86_64,%edi /* 1st arg: audit arch */
17400 call audit_syscall_entry
17401+
17402+ pax_erase_kstack
17403+
17404 LOAD_ARGS 0 /* reload call-clobbered registers */
17405+ pax_set_fptr_mask
17406 jmp system_call_fastpath
17407
17408 /*
17409@@ -592,16 +886,20 @@ tracesys:
17410 FIXUP_TOP_OF_STACK %rdi
17411 movq %rsp,%rdi
17412 call syscall_trace_enter
17413+
17414+ pax_erase_kstack
17415+
17416 /*
17417 * Reload arg registers from stack in case ptrace changed them.
17418 * We don't reload %rax because syscall_trace_enter() returned
17419 * the value it wants us to use in the table lookup.
17420 */
17421 LOAD_ARGS ARGOFFSET, 1
17422+ pax_set_fptr_mask
17423 RESTORE_REST
17424 cmpq $__NR_syscall_max,%rax
17425 ja int_ret_from_sys_call /* RAX(%rsp) set to -ENOSYS above */
17426- movq %r10,%rcx /* fixup for C */
17427+ movq R10-ARGOFFSET(%rsp),%rcx /* fixup for C */
17428 call *sys_call_table(,%rax,8)
17429 movq %rax,RAX-ARGOFFSET(%rsp)
17430 /* Use IRET because user could have changed frame */
17431@@ -613,7 +911,7 @@ tracesys:
17432 GLOBAL(int_ret_from_sys_call)
17433 DISABLE_INTERRUPTS(CLBR_NONE)
17434 TRACE_IRQS_OFF
17435- testl $3,CS-ARGOFFSET(%rsp)
17436+ testb $3,CS-ARGOFFSET(%rsp)
17437 je retint_restore_args
17438 movl $_TIF_ALLWORK_MASK,%edi
17439 /* edi: mask to check */
17440@@ -624,6 +922,7 @@ GLOBAL(int_with_check)
17441 andl %edi,%edx
17442 jnz int_careful
17443 andl $~TS_COMPAT,TI_status(%rcx)
17444+ pax_erase_kstack
17445 jmp retint_swapgs
17446
17447 /* Either reschedule or signal or syscall exit tracking needed. */
17448@@ -674,7 +973,7 @@ int_restore_rest:
17449 TRACE_IRQS_OFF
17450 jmp int_with_check
17451 CFI_ENDPROC
17452-END(system_call)
17453+ENDPROC(system_call)
17454
17455 /*
17456 * Certain special system calls that need to save a complete full stack frame.
17457@@ -690,7 +989,7 @@ ENTRY(\label)
17458 call \func
17459 jmp ptregscall_common
17460 CFI_ENDPROC
17461-END(\label)
17462+ENDPROC(\label)
17463 .endm
17464
17465 PTREGSCALL stub_clone, sys_clone, %r8
17466@@ -708,9 +1007,10 @@ ENTRY(ptregscall_common)
17467 movq_cfi_restore R12+8, r12
17468 movq_cfi_restore RBP+8, rbp
17469 movq_cfi_restore RBX+8, rbx
17470+ pax_force_retaddr
17471 ret $REST_SKIP /* pop extended registers */
17472 CFI_ENDPROC
17473-END(ptregscall_common)
17474+ENDPROC(ptregscall_common)
17475
17476 ENTRY(stub_execve)
17477 CFI_STARTPROC
17478@@ -726,7 +1026,7 @@ ENTRY(stub_execve)
17479 RESTORE_REST
17480 jmp int_ret_from_sys_call
17481 CFI_ENDPROC
17482-END(stub_execve)
17483+ENDPROC(stub_execve)
17484
17485 /*
17486 * sigreturn is special because it needs to restore all registers on return.
17487@@ -744,7 +1044,7 @@ ENTRY(stub_rt_sigreturn)
17488 RESTORE_REST
17489 jmp int_ret_from_sys_call
17490 CFI_ENDPROC
17491-END(stub_rt_sigreturn)
17492+ENDPROC(stub_rt_sigreturn)
17493
17494 /*
17495 * Build the entry stubs and pointer table with some assembler magic.
17496@@ -780,7 +1080,7 @@ vector=vector+1
17497 2: jmp common_interrupt
17498 .endr
17499 CFI_ENDPROC
17500-END(irq_entries_start)
17501+ENDPROC(irq_entries_start)
17502
17503 .previous
17504 END(interrupt)
17505@@ -800,6 +1100,16 @@ END(interrupt)
17506 CFI_ADJUST_CFA_OFFSET 10*8
17507 call save_args
17508 PARTIAL_FRAME 0
17509+#ifdef CONFIG_PAX_MEMORY_UDEREF
17510+ testb $3, CS(%rdi)
17511+ jnz 1f
17512+ pax_enter_kernel
17513+ jmp 2f
17514+1: pax_enter_kernel_user
17515+2:
17516+#else
17517+ pax_enter_kernel
17518+#endif
17519 call \func
17520 .endm
17521
17522@@ -822,7 +1132,7 @@ ret_from_intr:
17523 CFI_ADJUST_CFA_OFFSET -8
17524 exit_intr:
17525 GET_THREAD_INFO(%rcx)
17526- testl $3,CS-ARGOFFSET(%rsp)
17527+ testb $3,CS-ARGOFFSET(%rsp)
17528 je retint_kernel
17529
17530 /* Interrupt came from user space */
17531@@ -844,12 +1154,15 @@ retint_swapgs: /* return to user-space */
17532 * The iretq could re-enable interrupts:
17533 */
17534 DISABLE_INTERRUPTS(CLBR_ANY)
17535+ pax_exit_kernel_user
17536 TRACE_IRQS_IRETQ
17537 SWAPGS
17538 jmp restore_args
17539
17540 retint_restore_args: /* return to kernel space */
17541 DISABLE_INTERRUPTS(CLBR_ANY)
17542+ pax_exit_kernel
17543+ pax_force_retaddr RIP-ARGOFFSET
17544 /*
17545 * The iretq could re-enable interrupts:
17546 */
17547@@ -940,7 +1253,7 @@ ENTRY(retint_kernel)
17548 #endif
17549
17550 CFI_ENDPROC
17551-END(common_interrupt)
17552+ENDPROC(common_interrupt)
17553
17554 /*
17555 * APIC interrupts.
17556@@ -953,7 +1266,7 @@ ENTRY(\sym)
17557 interrupt \do_sym
17558 jmp ret_from_intr
17559 CFI_ENDPROC
17560-END(\sym)
17561+ENDPROC(\sym)
17562 .endm
17563
17564 #ifdef CONFIG_SMP
17565@@ -1032,12 +1345,22 @@ ENTRY(\sym)
17566 CFI_ADJUST_CFA_OFFSET 15*8
17567 call error_entry
17568 DEFAULT_FRAME 0
17569+#ifdef CONFIG_PAX_MEMORY_UDEREF
17570+ testb $3, CS(%rsp)
17571+ jnz 1f
17572+ pax_enter_kernel
17573+ jmp 2f
17574+1: pax_enter_kernel_user
17575+2:
17576+#else
17577+ pax_enter_kernel
17578+#endif
17579 movq %rsp,%rdi /* pt_regs pointer */
17580 xorl %esi,%esi /* no error code */
17581 call \do_sym
17582 jmp error_exit /* %ebx: no swapgs flag */
17583 CFI_ENDPROC
17584-END(\sym)
17585+ENDPROC(\sym)
17586 .endm
17587
17588 .macro paranoidzeroentry sym do_sym
17589@@ -1049,12 +1372,22 @@ ENTRY(\sym)
17590 subq $15*8, %rsp
17591 call save_paranoid
17592 TRACE_IRQS_OFF
17593+#ifdef CONFIG_PAX_MEMORY_UDEREF
17594+ testb $3, CS(%rsp)
17595+ jnz 1f
17596+ pax_enter_kernel
17597+ jmp 2f
17598+1: pax_enter_kernel_user
17599+2:
17600+#else
17601+ pax_enter_kernel
17602+#endif
17603 movq %rsp,%rdi /* pt_regs pointer */
17604 xorl %esi,%esi /* no error code */
17605 call \do_sym
17606 jmp paranoid_exit /* %ebx: no swapgs flag */
17607 CFI_ENDPROC
17608-END(\sym)
17609+ENDPROC(\sym)
17610 .endm
17611
17612 .macro paranoidzeroentry_ist sym do_sym ist
17613@@ -1066,15 +1399,30 @@ ENTRY(\sym)
17614 subq $15*8, %rsp
17615 call save_paranoid
17616 TRACE_IRQS_OFF
17617+#ifdef CONFIG_PAX_MEMORY_UDEREF
17618+ testb $3, CS(%rsp)
17619+ jnz 1f
17620+ pax_enter_kernel
17621+ jmp 2f
17622+1: pax_enter_kernel_user
17623+2:
17624+#else
17625+ pax_enter_kernel
17626+#endif
17627 movq %rsp,%rdi /* pt_regs pointer */
17628 xorl %esi,%esi /* no error code */
17629- PER_CPU(init_tss, %rbp)
17630+#ifdef CONFIG_SMP
17631+ imul $TSS_size, PER_CPU_VAR(cpu_number), %ebp
17632+ lea init_tss(%rbp), %rbp
17633+#else
17634+ lea init_tss(%rip), %rbp
17635+#endif
17636 subq $EXCEPTION_STKSZ, TSS_ist + (\ist - 1) * 8(%rbp)
17637 call \do_sym
17638 addq $EXCEPTION_STKSZ, TSS_ist + (\ist - 1) * 8(%rbp)
17639 jmp paranoid_exit /* %ebx: no swapgs flag */
17640 CFI_ENDPROC
17641-END(\sym)
17642+ENDPROC(\sym)
17643 .endm
17644
17645 .macro errorentry sym do_sym
17646@@ -1085,13 +1433,23 @@ ENTRY(\sym)
17647 CFI_ADJUST_CFA_OFFSET 15*8
17648 call error_entry
17649 DEFAULT_FRAME 0
17650+#ifdef CONFIG_PAX_MEMORY_UDEREF
17651+ testb $3, CS(%rsp)
17652+ jnz 1f
17653+ pax_enter_kernel
17654+ jmp 2f
17655+1: pax_enter_kernel_user
17656+2:
17657+#else
17658+ pax_enter_kernel
17659+#endif
17660 movq %rsp,%rdi /* pt_regs pointer */
17661 movq ORIG_RAX(%rsp),%rsi /* get error code */
17662 movq $-1,ORIG_RAX(%rsp) /* no syscall to restart */
17663 call \do_sym
17664 jmp error_exit /* %ebx: no swapgs flag */
17665 CFI_ENDPROC
17666-END(\sym)
17667+ENDPROC(\sym)
17668 .endm
17669
17670 /* error code is on the stack already */
17671@@ -1104,13 +1462,23 @@ ENTRY(\sym)
17672 call save_paranoid
17673 DEFAULT_FRAME 0
17674 TRACE_IRQS_OFF
17675+#ifdef CONFIG_PAX_MEMORY_UDEREF
17676+ testb $3, CS(%rsp)
17677+ jnz 1f
17678+ pax_enter_kernel
17679+ jmp 2f
17680+1: pax_enter_kernel_user
17681+2:
17682+#else
17683+ pax_enter_kernel
17684+#endif
17685 movq %rsp,%rdi /* pt_regs pointer */
17686 movq ORIG_RAX(%rsp),%rsi /* get error code */
17687 movq $-1,ORIG_RAX(%rsp) /* no syscall to restart */
17688 call \do_sym
17689 jmp paranoid_exit /* %ebx: no swapgs flag */
17690 CFI_ENDPROC
17691-END(\sym)
17692+ENDPROC(\sym)
17693 .endm
17694
17695 zeroentry divide_error do_divide_error
17696@@ -1141,9 +1509,10 @@ gs_change:
17697 SWAPGS
17698 popf
17699 CFI_ADJUST_CFA_OFFSET -8
17700+ pax_force_retaddr
17701 ret
17702 CFI_ENDPROC
17703-END(native_load_gs_index)
17704+ENDPROC(native_load_gs_index)
17705
17706 .section __ex_table,"a"
17707 .align 8
17708@@ -1193,11 +1562,12 @@ ENTRY(kernel_thread)
17709 * of hacks for example to fork off the per-CPU idle tasks.
17710 * [Hopefully no generic code relies on the reschedule -AK]
17711 */
17712- RESTORE_ALL
17713+ RESTORE_REST
17714 UNFAKE_STACK_FRAME
17715+ pax_force_retaddr
17716 ret
17717 CFI_ENDPROC
17718-END(kernel_thread)
17719+ENDPROC(kernel_thread)
17720
17721 ENTRY(child_rip)
17722 pushq $0 # fake return address
17723@@ -1208,13 +1578,14 @@ ENTRY(child_rip)
17724 */
17725 movq %rdi, %rax
17726 movq %rsi, %rdi
17727+ pax_force_fptr %rax
17728 call *%rax
17729 # exit
17730 mov %eax, %edi
17731 call do_exit
17732 ud2 # padding for call trace
17733 CFI_ENDPROC
17734-END(child_rip)
17735+ENDPROC(child_rip)
17736
17737 /*
17738 * execve(). This function needs to use IRET, not SYSRET, to set up all state properly.
17739@@ -1241,11 +1612,11 @@ ENTRY(kernel_execve)
17740 RESTORE_REST
17741 testq %rax,%rax
17742 je int_ret_from_sys_call
17743- RESTORE_ARGS
17744 UNFAKE_STACK_FRAME
17745+ pax_force_retaddr
17746 ret
17747 CFI_ENDPROC
17748-END(kernel_execve)
17749+ENDPROC(kernel_execve)
17750
17751 /* Call softirq on interrupt stack. Interrupts are off. */
17752 ENTRY(call_softirq)
17753@@ -1263,9 +1634,10 @@ ENTRY(call_softirq)
17754 CFI_DEF_CFA_REGISTER rsp
17755 CFI_ADJUST_CFA_OFFSET -8
17756 decl PER_CPU_VAR(irq_count)
17757+ pax_force_retaddr
17758 ret
17759 CFI_ENDPROC
17760-END(call_softirq)
17761+ENDPROC(call_softirq)
17762
17763 #ifdef CONFIG_XEN
17764 zeroentry xen_hypervisor_callback xen_do_hypervisor_callback
17765@@ -1303,7 +1675,7 @@ ENTRY(xen_do_hypervisor_callback) # do_hypervisor_callback(struct *pt_regs)
17766 decl PER_CPU_VAR(irq_count)
17767 jmp error_exit
17768 CFI_ENDPROC
17769-END(xen_do_hypervisor_callback)
17770+ENDPROC(xen_do_hypervisor_callback)
17771
17772 /*
17773 * Hypervisor uses this for application faults while it executes.
17774@@ -1362,7 +1734,7 @@ ENTRY(xen_failsafe_callback)
17775 SAVE_ALL
17776 jmp error_exit
17777 CFI_ENDPROC
17778-END(xen_failsafe_callback)
17779+ENDPROC(xen_failsafe_callback)
17780
17781 #endif /* CONFIG_XEN */
17782
17783@@ -1405,16 +1777,31 @@ ENTRY(paranoid_exit)
17784 TRACE_IRQS_OFF
17785 testl %ebx,%ebx /* swapgs needed? */
17786 jnz paranoid_restore
17787- testl $3,CS(%rsp)
17788+ testb $3,CS(%rsp)
17789 jnz paranoid_userspace
17790+#ifdef CONFIG_PAX_MEMORY_UDEREF
17791+ pax_exit_kernel
17792+ TRACE_IRQS_IRETQ 0
17793+ SWAPGS_UNSAFE_STACK
17794+ RESTORE_ALL 8
17795+ pax_force_retaddr_bts
17796+ jmp irq_return
17797+#endif
17798 paranoid_swapgs:
17799+#ifdef CONFIG_PAX_MEMORY_UDEREF
17800+ pax_exit_kernel_user
17801+#else
17802+ pax_exit_kernel
17803+#endif
17804 TRACE_IRQS_IRETQ 0
17805 SWAPGS_UNSAFE_STACK
17806 RESTORE_ALL 8
17807 jmp irq_return
17808 paranoid_restore:
17809+ pax_exit_kernel
17810 TRACE_IRQS_IRETQ 0
17811 RESTORE_ALL 8
17812+ pax_force_retaddr_bts
17813 jmp irq_return
17814 paranoid_userspace:
17815 GET_THREAD_INFO(%rcx)
17816@@ -1443,7 +1830,7 @@ paranoid_schedule:
17817 TRACE_IRQS_OFF
17818 jmp paranoid_userspace
17819 CFI_ENDPROC
17820-END(paranoid_exit)
17821+ENDPROC(paranoid_exit)
17822
17823 /*
17824 * Exception entry point. This expects an error code/orig_rax on the stack.
17825@@ -1470,12 +1857,13 @@ ENTRY(error_entry)
17826 movq_cfi r14, R14+8
17827 movq_cfi r15, R15+8
17828 xorl %ebx,%ebx
17829- testl $3,CS+8(%rsp)
17830+ testb $3,CS+8(%rsp)
17831 je error_kernelspace
17832 error_swapgs:
17833 SWAPGS
17834 error_sti:
17835 TRACE_IRQS_OFF
17836+ pax_force_retaddr_bts
17837 ret
17838 CFI_ENDPROC
17839
17840@@ -1497,7 +1885,7 @@ error_kernelspace:
17841 cmpq $gs_change,RIP+8(%rsp)
17842 je error_swapgs
17843 jmp error_sti
17844-END(error_entry)
17845+ENDPROC(error_entry)
17846
17847
17848 /* ebx: no swapgs flag (1: don't need swapgs, 0: need it) */
17849@@ -1517,7 +1905,7 @@ ENTRY(error_exit)
17850 jnz retint_careful
17851 jmp retint_swapgs
17852 CFI_ENDPROC
17853-END(error_exit)
17854+ENDPROC(error_exit)
17855
17856
17857 /* runs on exception stack */
17858@@ -1529,6 +1917,16 @@ ENTRY(nmi)
17859 CFI_ADJUST_CFA_OFFSET 15*8
17860 call save_paranoid
17861 DEFAULT_FRAME 0
17862+#ifdef CONFIG_PAX_MEMORY_UDEREF
17863+ testb $3, CS(%rsp)
17864+ jnz 1f
17865+ pax_enter_kernel
17866+ jmp 2f
17867+1: pax_enter_kernel_user
17868+2:
17869+#else
17870+ pax_enter_kernel
17871+#endif
17872 /* paranoidentry do_nmi, 0; without TRACE_IRQS_OFF */
17873 movq %rsp,%rdi
17874 movq $-1,%rsi
17875@@ -1539,12 +1937,28 @@ ENTRY(nmi)
17876 DISABLE_INTERRUPTS(CLBR_NONE)
17877 testl %ebx,%ebx /* swapgs needed? */
17878 jnz nmi_restore
17879- testl $3,CS(%rsp)
17880+ testb $3,CS(%rsp)
17881 jnz nmi_userspace
17882+#ifdef CONFIG_PAX_MEMORY_UDEREF
17883+ pax_exit_kernel
17884+ SWAPGS_UNSAFE_STACK
17885+ RESTORE_ALL 8
17886+ pax_force_retaddr_bts
17887+ jmp irq_return
17888+#endif
17889 nmi_swapgs:
17890+#ifdef CONFIG_PAX_MEMORY_UDEREF
17891+ pax_exit_kernel_user
17892+#else
17893+ pax_exit_kernel
17894+#endif
17895 SWAPGS_UNSAFE_STACK
17896+ RESTORE_ALL 8
17897+ jmp irq_return
17898 nmi_restore:
17899+ pax_exit_kernel
17900 RESTORE_ALL 8
17901+ pax_force_retaddr_bts
17902 jmp irq_return
17903 nmi_userspace:
17904 GET_THREAD_INFO(%rcx)
17905@@ -1573,14 +1987,14 @@ nmi_schedule:
17906 jmp paranoid_exit
17907 CFI_ENDPROC
17908 #endif
17909-END(nmi)
17910+ENDPROC(nmi)
17911
17912 ENTRY(ignore_sysret)
17913 CFI_STARTPROC
17914 mov $-ENOSYS,%eax
17915 sysret
17916 CFI_ENDPROC
17917-END(ignore_sysret)
17918+ENDPROC(ignore_sysret)
17919
17920 /*
17921 * End of kprobes section
17922diff --git a/arch/x86/kernel/ftrace.c b/arch/x86/kernel/ftrace.c
17923index 9dbb527..7b3615a 100644
17924--- a/arch/x86/kernel/ftrace.c
17925+++ b/arch/x86/kernel/ftrace.c
17926@@ -103,7 +103,7 @@ static void *mod_code_ip; /* holds the IP to write to */
17927 static void *mod_code_newcode; /* holds the text to write to the IP */
17928
17929 static unsigned nmi_wait_count;
17930-static atomic_t nmi_update_count = ATOMIC_INIT(0);
17931+static atomic_unchecked_t nmi_update_count = ATOMIC_INIT(0);
17932
17933 int ftrace_arch_read_dyn_info(char *buf, int size)
17934 {
17935@@ -111,7 +111,7 @@ int ftrace_arch_read_dyn_info(char *buf, int size)
17936
17937 r = snprintf(buf, size, "%u %u",
17938 nmi_wait_count,
17939- atomic_read(&nmi_update_count));
17940+ atomic_read_unchecked(&nmi_update_count));
17941 return r;
17942 }
17943
17944@@ -149,8 +149,10 @@ void ftrace_nmi_enter(void)
17945 {
17946 if (atomic_inc_return(&nmi_running) & MOD_CODE_WRITE_FLAG) {
17947 smp_rmb();
17948+ pax_open_kernel();
17949 ftrace_mod_code();
17950- atomic_inc(&nmi_update_count);
17951+ pax_close_kernel();
17952+ atomic_inc_unchecked(&nmi_update_count);
17953 }
17954 /* Must have previous changes seen before executions */
17955 smp_mb();
17956@@ -215,7 +217,7 @@ do_ftrace_mod_code(unsigned long ip, void *new_code)
17957
17958
17959
17960-static unsigned char ftrace_nop[MCOUNT_INSN_SIZE];
17961+static unsigned char ftrace_nop[MCOUNT_INSN_SIZE] __read_only;
17962
17963 static unsigned char *ftrace_nop_replace(void)
17964 {
17965@@ -228,6 +230,8 @@ ftrace_modify_code(unsigned long ip, unsigned char *old_code,
17966 {
17967 unsigned char replaced[MCOUNT_INSN_SIZE];
17968
17969+ ip = ktla_ktva(ip);
17970+
17971 /*
17972 * Note: Due to modules and __init, code can
17973 * disappear and change, we need to protect against faulting
17974@@ -284,7 +288,7 @@ int ftrace_update_ftrace_func(ftrace_func_t func)
17975 unsigned char old[MCOUNT_INSN_SIZE], *new;
17976 int ret;
17977
17978- memcpy(old, &ftrace_call, MCOUNT_INSN_SIZE);
17979+ memcpy(old, (void *)ktla_ktva((unsigned long)ftrace_call), MCOUNT_INSN_SIZE);
17980 new = ftrace_call_replace(ip, (unsigned long)func);
17981 ret = ftrace_modify_code(ip, old, new);
17982
17983@@ -337,15 +341,15 @@ int __init ftrace_dyn_arch_init(void *data)
17984 switch (faulted) {
17985 case 0:
17986 pr_info("ftrace: converting mcount calls to 0f 1f 44 00 00\n");
17987- memcpy(ftrace_nop, ftrace_test_p6nop, MCOUNT_INSN_SIZE);
17988+ memcpy(ftrace_nop, ktla_ktva(ftrace_test_p6nop), MCOUNT_INSN_SIZE);
17989 break;
17990 case 1:
17991 pr_info("ftrace: converting mcount calls to 66 66 66 66 90\n");
17992- memcpy(ftrace_nop, ftrace_test_nop5, MCOUNT_INSN_SIZE);
17993+ memcpy(ftrace_nop, ktla_ktva(ftrace_test_nop5), MCOUNT_INSN_SIZE);
17994 break;
17995 case 2:
17996 pr_info("ftrace: converting mcount calls to jmp . + 5\n");
17997- memcpy(ftrace_nop, ftrace_test_jmp, MCOUNT_INSN_SIZE);
17998+ memcpy(ftrace_nop, ktla_ktva(ftrace_test_jmp), MCOUNT_INSN_SIZE);
17999 break;
18000 }
18001
18002@@ -366,6 +370,8 @@ static int ftrace_mod_jmp(unsigned long ip,
18003 {
18004 unsigned char code[MCOUNT_INSN_SIZE];
18005
18006+ ip = ktla_ktva(ip);
18007+
18008 if (probe_kernel_read(code, (void *)ip, MCOUNT_INSN_SIZE))
18009 return -EFAULT;
18010
18011diff --git a/arch/x86/kernel/head32.c b/arch/x86/kernel/head32.c
18012index 4f8e250..df24706 100644
18013--- a/arch/x86/kernel/head32.c
18014+++ b/arch/x86/kernel/head32.c
18015@@ -16,6 +16,7 @@
18016 #include <asm/apic.h>
18017 #include <asm/io_apic.h>
18018 #include <asm/bios_ebda.h>
18019+#include <asm/boot.h>
18020
18021 static void __init i386_default_early_setup(void)
18022 {
18023@@ -31,7 +32,7 @@ void __init i386_start_kernel(void)
18024 {
18025 reserve_trampoline_memory();
18026
18027- reserve_early(__pa_symbol(&_text), __pa_symbol(&__bss_stop), "TEXT DATA BSS");
18028+ reserve_early(LOAD_PHYSICAL_ADDR, __pa_symbol(&__bss_stop), "TEXT DATA BSS");
18029
18030 #ifdef CONFIG_BLK_DEV_INITRD
18031 /* Reserve INITRD */
18032diff --git a/arch/x86/kernel/head_32.S b/arch/x86/kernel/head_32.S
18033index 34c3308..6fc4e76 100644
18034--- a/arch/x86/kernel/head_32.S
18035+++ b/arch/x86/kernel/head_32.S
18036@@ -19,10 +19,17 @@
18037 #include <asm/setup.h>
18038 #include <asm/processor-flags.h>
18039 #include <asm/percpu.h>
18040+#include <asm/msr-index.h>
18041
18042 /* Physical address */
18043 #define pa(X) ((X) - __PAGE_OFFSET)
18044
18045+#ifdef CONFIG_PAX_KERNEXEC
18046+#define ta(X) (X)
18047+#else
18048+#define ta(X) ((X) - __PAGE_OFFSET)
18049+#endif
18050+
18051 /*
18052 * References to members of the new_cpu_data structure.
18053 */
18054@@ -52,11 +59,7 @@
18055 * and small than max_low_pfn, otherwise will waste some page table entries
18056 */
18057
18058-#if PTRS_PER_PMD > 1
18059-#define PAGE_TABLE_SIZE(pages) (((pages) / PTRS_PER_PMD) + PTRS_PER_PGD)
18060-#else
18061-#define PAGE_TABLE_SIZE(pages) ((pages) / PTRS_PER_PGD)
18062-#endif
18063+#define PAGE_TABLE_SIZE(pages) ((pages) / PTRS_PER_PTE)
18064
18065 /* Enough space to fit pagetables for the low memory linear map */
18066 MAPPING_BEYOND_END = \
18067@@ -73,6 +76,12 @@ INIT_MAP_SIZE = PAGE_TABLE_SIZE(KERNEL_PAGES) * PAGE_SIZE_asm
18068 RESERVE_BRK(pagetables, INIT_MAP_SIZE)
18069
18070 /*
18071+ * Real beginning of normal "text" segment
18072+ */
18073+ENTRY(stext)
18074+ENTRY(_stext)
18075+
18076+/*
18077 * 32-bit kernel entrypoint; only used by the boot CPU. On entry,
18078 * %esi points to the real-mode code as a 32-bit pointer.
18079 * CS and DS must be 4 GB flat segments, but we don't depend on
18080@@ -80,7 +89,16 @@ RESERVE_BRK(pagetables, INIT_MAP_SIZE)
18081 * can.
18082 */
18083 __HEAD
18084+
18085+#ifdef CONFIG_PAX_KERNEXEC
18086+ jmp startup_32
18087+/* PaX: fill first page in .text with int3 to catch NULL derefs in kernel mode */
18088+.fill PAGE_SIZE-5,1,0xcc
18089+#endif
18090+
18091 ENTRY(startup_32)
18092+ movl pa(stack_start),%ecx
18093+
18094 /* test KEEP_SEGMENTS flag to see if the bootloader is asking
18095 us to not reload segments */
18096 testb $(1<<6), BP_loadflags(%esi)
18097@@ -95,7 +113,60 @@ ENTRY(startup_32)
18098 movl %eax,%es
18099 movl %eax,%fs
18100 movl %eax,%gs
18101+ movl %eax,%ss
18102 2:
18103+ leal -__PAGE_OFFSET(%ecx),%esp
18104+
18105+#ifdef CONFIG_SMP
18106+ movl $pa(cpu_gdt_table),%edi
18107+ movl $__per_cpu_load,%eax
18108+ movw %ax,__KERNEL_PERCPU + 2(%edi)
18109+ rorl $16,%eax
18110+ movb %al,__KERNEL_PERCPU + 4(%edi)
18111+ movb %ah,__KERNEL_PERCPU + 7(%edi)
18112+ movl $__per_cpu_end - 1,%eax
18113+ subl $__per_cpu_start,%eax
18114+ movw %ax,__KERNEL_PERCPU + 0(%edi)
18115+#endif
18116+
18117+#ifdef CONFIG_PAX_MEMORY_UDEREF
18118+ movl $NR_CPUS,%ecx
18119+ movl $pa(cpu_gdt_table),%edi
18120+1:
18121+ movl $((((__PAGE_OFFSET-1) & 0xf0000000) >> 12) | 0x00c09700),GDT_ENTRY_KERNEL_DS * 8 + 4(%edi)
18122+ movl $((((__PAGE_OFFSET-1) & 0xf0000000) >> 12) | 0x00c0fb00),GDT_ENTRY_DEFAULT_USER_CS * 8 + 4(%edi)
18123+ movl $((((__PAGE_OFFSET-1) & 0xf0000000) >> 12) | 0x00c0f300),GDT_ENTRY_DEFAULT_USER_DS * 8 + 4(%edi)
18124+ addl $PAGE_SIZE_asm,%edi
18125+ loop 1b
18126+#endif
18127+
18128+#ifdef CONFIG_PAX_KERNEXEC
18129+ movl $pa(boot_gdt),%edi
18130+ movl $__LOAD_PHYSICAL_ADDR,%eax
18131+ movw %ax,__BOOT_CS + 2(%edi)
18132+ rorl $16,%eax
18133+ movb %al,__BOOT_CS + 4(%edi)
18134+ movb %ah,__BOOT_CS + 7(%edi)
18135+ rorl $16,%eax
18136+
18137+ ljmp $(__BOOT_CS),$1f
18138+1:
18139+
18140+ movl $NR_CPUS,%ecx
18141+ movl $pa(cpu_gdt_table),%edi
18142+ addl $__PAGE_OFFSET,%eax
18143+1:
18144+ movw %ax,__KERNEL_CS + 2(%edi)
18145+ movw %ax,__KERNEXEC_KERNEL_CS + 2(%edi)
18146+ rorl $16,%eax
18147+ movb %al,__KERNEL_CS + 4(%edi)
18148+ movb %al,__KERNEXEC_KERNEL_CS + 4(%edi)
18149+ movb %ah,__KERNEL_CS + 7(%edi)
18150+ movb %ah,__KERNEXEC_KERNEL_CS + 7(%edi)
18151+ rorl $16,%eax
18152+ addl $PAGE_SIZE_asm,%edi
18153+ loop 1b
18154+#endif
18155
18156 /*
18157 * Clear BSS first so that there are no surprises...
18158@@ -140,9 +211,7 @@ ENTRY(startup_32)
18159 cmpl $num_subarch_entries, %eax
18160 jae bad_subarch
18161
18162- movl pa(subarch_entries)(,%eax,4), %eax
18163- subl $__PAGE_OFFSET, %eax
18164- jmp *%eax
18165+ jmp *pa(subarch_entries)(,%eax,4)
18166
18167 bad_subarch:
18168 WEAK(lguest_entry)
18169@@ -154,10 +223,10 @@ WEAK(xen_entry)
18170 __INITDATA
18171
18172 subarch_entries:
18173- .long default_entry /* normal x86/PC */
18174- .long lguest_entry /* lguest hypervisor */
18175- .long xen_entry /* Xen hypervisor */
18176- .long default_entry /* Moorestown MID */
18177+ .long ta(default_entry) /* normal x86/PC */
18178+ .long ta(lguest_entry) /* lguest hypervisor */
18179+ .long ta(xen_entry) /* Xen hypervisor */
18180+ .long ta(default_entry) /* Moorestown MID */
18181 num_subarch_entries = (. - subarch_entries) / 4
18182 .previous
18183 #endif /* CONFIG_PARAVIRT */
18184@@ -218,8 +287,11 @@ default_entry:
18185 movl %eax, pa(max_pfn_mapped)
18186
18187 /* Do early initialization of the fixmap area */
18188- movl $pa(swapper_pg_fixmap)+PDE_IDENT_ATTR,%eax
18189- movl %eax,pa(swapper_pg_pmd+0x1000*KPMDS-8)
18190+#ifdef CONFIG_COMPAT_VDSO
18191+ movl $pa(swapper_pg_fixmap)+PDE_IDENT_ATTR+_PAGE_USER,pa(swapper_pg_pmd+0x1000*KPMDS-8)
18192+#else
18193+ movl $pa(swapper_pg_fixmap)+PDE_IDENT_ATTR,pa(swapper_pg_pmd+0x1000*KPMDS-8)
18194+#endif
18195 #else /* Not PAE */
18196
18197 page_pde_offset = (__PAGE_OFFSET >> 20);
18198@@ -249,8 +321,11 @@ page_pde_offset = (__PAGE_OFFSET >> 20);
18199 movl %eax, pa(max_pfn_mapped)
18200
18201 /* Do early initialization of the fixmap area */
18202- movl $pa(swapper_pg_fixmap)+PDE_IDENT_ATTR,%eax
18203- movl %eax,pa(swapper_pg_dir+0xffc)
18204+#ifdef CONFIG_COMPAT_VDSO
18205+ movl $pa(swapper_pg_fixmap)+PDE_IDENT_ATTR+_PAGE_USER,pa(swapper_pg_dir+0xffc)
18206+#else
18207+ movl $pa(swapper_pg_fixmap)+PDE_IDENT_ATTR,pa(swapper_pg_dir+0xffc)
18208+#endif
18209 #endif
18210 jmp 3f
18211 /*
18212@@ -272,6 +347,9 @@ ENTRY(startup_32_smp)
18213 movl %eax,%es
18214 movl %eax,%fs
18215 movl %eax,%gs
18216+ movl pa(stack_start),%ecx
18217+ movl %eax,%ss
18218+ leal -__PAGE_OFFSET(%ecx),%esp
18219 #endif /* CONFIG_SMP */
18220 3:
18221
18222@@ -297,6 +375,7 @@ ENTRY(startup_32_smp)
18223 orl %edx,%eax
18224 movl %eax,%cr4
18225
18226+#ifdef CONFIG_X86_PAE
18227 btl $5, %eax # check if PAE is enabled
18228 jnc 6f
18229
18230@@ -305,6 +384,10 @@ ENTRY(startup_32_smp)
18231 cpuid
18232 cmpl $0x80000000, %eax
18233 jbe 6f
18234+
18235+ /* Clear bogus XD_DISABLE bits */
18236+ call verify_cpu
18237+
18238 mov $0x80000001, %eax
18239 cpuid
18240 /* Execute Disable bit supported? */
18241@@ -312,13 +395,17 @@ ENTRY(startup_32_smp)
18242 jnc 6f
18243
18244 /* Setup EFER (Extended Feature Enable Register) */
18245- movl $0xc0000080, %ecx
18246+ movl $MSR_EFER, %ecx
18247 rdmsr
18248
18249 btsl $11, %eax
18250 /* Make changes effective */
18251 wrmsr
18252
18253+ btsl $_PAGE_BIT_NX-32,pa(__supported_pte_mask+4)
18254+ movl $1,pa(nx_enabled)
18255+#endif
18256+
18257 6:
18258
18259 /*
18260@@ -331,8 +418,8 @@ ENTRY(startup_32_smp)
18261 movl %eax,%cr0 /* ..and set paging (PG) bit */
18262 ljmp $__BOOT_CS,$1f /* Clear prefetch and normalize %eip */
18263 1:
18264- /* Set up the stack pointer */
18265- lss stack_start,%esp
18266+ /* Shift the stack pointer to a virtual address */
18267+ addl $__PAGE_OFFSET, %esp
18268
18269 /*
18270 * Initialize eflags. Some BIOS's leave bits like NT set. This would
18271@@ -344,9 +431,7 @@ ENTRY(startup_32_smp)
18272
18273 #ifdef CONFIG_SMP
18274 cmpb $0, ready
18275- jz 1f /* Initial CPU cleans BSS */
18276- jmp checkCPUtype
18277-1:
18278+ jnz checkCPUtype
18279 #endif /* CONFIG_SMP */
18280
18281 /*
18282@@ -424,7 +509,7 @@ is386: movl $2,%ecx # set MP
18283 1: movl $(__KERNEL_DS),%eax # reload all the segment registers
18284 movl %eax,%ss # after changing gdt.
18285
18286- movl $(__USER_DS),%eax # DS/ES contains default USER segment
18287+# movl $(__KERNEL_DS),%eax # DS/ES contains default KERNEL segment
18288 movl %eax,%ds
18289 movl %eax,%es
18290
18291@@ -438,15 +523,22 @@ is386: movl $2,%ecx # set MP
18292 */
18293 cmpb $0,ready
18294 jne 1f
18295- movl $per_cpu__gdt_page,%eax
18296+ movl $cpu_gdt_table,%eax
18297 movl $per_cpu__stack_canary,%ecx
18298+#ifdef CONFIG_SMP
18299+ addl $__per_cpu_load,%ecx
18300+#endif
18301 movw %cx, 8 * GDT_ENTRY_STACK_CANARY + 2(%eax)
18302 shrl $16, %ecx
18303 movb %cl, 8 * GDT_ENTRY_STACK_CANARY + 4(%eax)
18304 movb %ch, 8 * GDT_ENTRY_STACK_CANARY + 7(%eax)
18305 1:
18306-#endif
18307 movl $(__KERNEL_STACK_CANARY),%eax
18308+#elif defined(CONFIG_PAX_MEMORY_UDEREF)
18309+ movl $(__USER_DS),%eax
18310+#else
18311+ xorl %eax,%eax
18312+#endif
18313 movl %eax,%gs
18314
18315 xorl %eax,%eax # Clear LDT
18316@@ -454,14 +546,7 @@ is386: movl $2,%ecx # set MP
18317
18318 cld # gcc2 wants the direction flag cleared at all times
18319 pushl $0 # fake return address for unwinder
18320-#ifdef CONFIG_SMP
18321- movb ready, %cl
18322 movb $1, ready
18323- cmpb $0,%cl # the first CPU calls start_kernel
18324- je 1f
18325- movl (stack_start), %esp
18326-1:
18327-#endif /* CONFIG_SMP */
18328 jmp *(initial_code)
18329
18330 /*
18331@@ -546,22 +631,22 @@ early_page_fault:
18332 jmp early_fault
18333
18334 early_fault:
18335- cld
18336 #ifdef CONFIG_PRINTK
18337+ cmpl $1,%ss:early_recursion_flag
18338+ je hlt_loop
18339+ incl %ss:early_recursion_flag
18340+ cld
18341 pusha
18342 movl $(__KERNEL_DS),%eax
18343 movl %eax,%ds
18344 movl %eax,%es
18345- cmpl $2,early_recursion_flag
18346- je hlt_loop
18347- incl early_recursion_flag
18348 movl %cr2,%eax
18349 pushl %eax
18350 pushl %edx /* trapno */
18351 pushl $fault_msg
18352 call printk
18353+; call dump_stack
18354 #endif
18355- call dump_stack
18356 hlt_loop:
18357 hlt
18358 jmp hlt_loop
18359@@ -569,8 +654,11 @@ hlt_loop:
18360 /* This is the default interrupt "handler" :-) */
18361 ALIGN
18362 ignore_int:
18363- cld
18364 #ifdef CONFIG_PRINTK
18365+ cmpl $2,%ss:early_recursion_flag
18366+ je hlt_loop
18367+ incl %ss:early_recursion_flag
18368+ cld
18369 pushl %eax
18370 pushl %ecx
18371 pushl %edx
18372@@ -579,9 +667,6 @@ ignore_int:
18373 movl $(__KERNEL_DS),%eax
18374 movl %eax,%ds
18375 movl %eax,%es
18376- cmpl $2,early_recursion_flag
18377- je hlt_loop
18378- incl early_recursion_flag
18379 pushl 16(%esp)
18380 pushl 24(%esp)
18381 pushl 32(%esp)
18382@@ -600,6 +685,8 @@ ignore_int:
18383 #endif
18384 iret
18385
18386+#include "verify_cpu.S"
18387+
18388 __REFDATA
18389 .align 4
18390 ENTRY(initial_code)
18391@@ -610,31 +697,47 @@ ENTRY(initial_page_table)
18392 /*
18393 * BSS section
18394 */
18395-__PAGE_ALIGNED_BSS
18396- .align PAGE_SIZE_asm
18397 #ifdef CONFIG_X86_PAE
18398+.section .swapper_pg_pmd,"a",@progbits
18399 swapper_pg_pmd:
18400 .fill 1024*KPMDS,4,0
18401 #else
18402+.section .swapper_pg_dir,"a",@progbits
18403 ENTRY(swapper_pg_dir)
18404 .fill 1024,4,0
18405 #endif
18406+.section .swapper_pg_fixmap,"a",@progbits
18407 swapper_pg_fixmap:
18408 .fill 1024,4,0
18409 #ifdef CONFIG_X86_TRAMPOLINE
18410+.section .trampoline_pg_dir,"a",@progbits
18411 ENTRY(trampoline_pg_dir)
18412+#ifdef CONFIG_X86_PAE
18413+ .fill 4,8,0
18414+#else
18415 .fill 1024,4,0
18416 #endif
18417+#endif
18418+
18419+.section .empty_zero_page,"a",@progbits
18420 ENTRY(empty_zero_page)
18421 .fill 4096,1,0
18422
18423 /*
18424+ * The IDT has to be page-aligned to simplify the Pentium
18425+ * F0 0F bug workaround.. We have a special link segment
18426+ * for this.
18427+ */
18428+.section .idt,"a",@progbits
18429+ENTRY(idt_table)
18430+ .fill 256,8,0
18431+
18432+/*
18433 * This starts the data section.
18434 */
18435 #ifdef CONFIG_X86_PAE
18436-__PAGE_ALIGNED_DATA
18437- /* Page-aligned for the benefit of paravirt? */
18438- .align PAGE_SIZE_asm
18439+.section .swapper_pg_dir,"a",@progbits
18440+
18441 ENTRY(swapper_pg_dir)
18442 .long pa(swapper_pg_pmd+PGD_IDENT_ATTR),0 /* low identity map */
18443 # if KPMDS == 3
18444@@ -653,15 +756,24 @@ ENTRY(swapper_pg_dir)
18445 # error "Kernel PMDs should be 1, 2 or 3"
18446 # endif
18447 .align PAGE_SIZE_asm /* needs to be page-sized too */
18448+
18449+#ifdef CONFIG_PAX_PER_CPU_PGD
18450+ENTRY(cpu_pgd)
18451+ .rept NR_CPUS
18452+ .fill 4,8,0
18453+ .endr
18454+#endif
18455+
18456 #endif
18457
18458 .data
18459+.balign 4
18460 ENTRY(stack_start)
18461- .long init_thread_union+THREAD_SIZE
18462- .long __BOOT_DS
18463+ .long init_thread_union+THREAD_SIZE-8
18464
18465 ready: .byte 0
18466
18467+.section .rodata,"a",@progbits
18468 early_recursion_flag:
18469 .long 0
18470
18471@@ -697,7 +809,7 @@ fault_msg:
18472 .word 0 # 32 bit align gdt_desc.address
18473 boot_gdt_descr:
18474 .word __BOOT_DS+7
18475- .long boot_gdt - __PAGE_OFFSET
18476+ .long pa(boot_gdt)
18477
18478 .word 0 # 32-bit align idt_desc.address
18479 idt_descr:
18480@@ -708,7 +820,7 @@ idt_descr:
18481 .word 0 # 32 bit align gdt_desc.address
18482 ENTRY(early_gdt_descr)
18483 .word GDT_ENTRIES*8-1
18484- .long per_cpu__gdt_page /* Overwritten for secondary CPUs */
18485+ .long cpu_gdt_table /* Overwritten for secondary CPUs */
18486
18487 /*
18488 * The boot_gdt must mirror the equivalent in setup.S and is
18489@@ -717,5 +829,65 @@ ENTRY(early_gdt_descr)
18490 .align L1_CACHE_BYTES
18491 ENTRY(boot_gdt)
18492 .fill GDT_ENTRY_BOOT_CS,8,0
18493- .quad 0x00cf9a000000ffff /* kernel 4GB code at 0x00000000 */
18494- .quad 0x00cf92000000ffff /* kernel 4GB data at 0x00000000 */
18495+ .quad 0x00cf9b000000ffff /* kernel 4GB code at 0x00000000 */
18496+ .quad 0x00cf93000000ffff /* kernel 4GB data at 0x00000000 */
18497+
18498+ .align PAGE_SIZE_asm
18499+ENTRY(cpu_gdt_table)
18500+ .rept NR_CPUS
18501+ .quad 0x0000000000000000 /* NULL descriptor */
18502+ .quad 0x0000000000000000 /* 0x0b reserved */
18503+ .quad 0x0000000000000000 /* 0x13 reserved */
18504+ .quad 0x0000000000000000 /* 0x1b reserved */
18505+
18506+#ifdef CONFIG_PAX_KERNEXEC
18507+ .quad 0x00cf9b000000ffff /* 0x20 alternate kernel 4GB code at 0x00000000 */
18508+#else
18509+ .quad 0x0000000000000000 /* 0x20 unused */
18510+#endif
18511+
18512+ .quad 0x0000000000000000 /* 0x28 unused */
18513+ .quad 0x0000000000000000 /* 0x33 TLS entry 1 */
18514+ .quad 0x0000000000000000 /* 0x3b TLS entry 2 */
18515+ .quad 0x0000000000000000 /* 0x43 TLS entry 3 */
18516+ .quad 0x0000000000000000 /* 0x4b reserved */
18517+ .quad 0x0000000000000000 /* 0x53 reserved */
18518+ .quad 0x0000000000000000 /* 0x5b reserved */
18519+
18520+ .quad 0x00cf9b000000ffff /* 0x60 kernel 4GB code at 0x00000000 */
18521+ .quad 0x00cf93000000ffff /* 0x68 kernel 4GB data at 0x00000000 */
18522+ .quad 0x00cffb000000ffff /* 0x73 user 4GB code at 0x00000000 */
18523+ .quad 0x00cff3000000ffff /* 0x7b user 4GB data at 0x00000000 */
18524+
18525+ .quad 0x0000000000000000 /* 0x80 TSS descriptor */
18526+ .quad 0x0000000000000000 /* 0x88 LDT descriptor */
18527+
18528+ /*
18529+ * Segments used for calling PnP BIOS have byte granularity.
18530+ * The code segments and data segments have fixed 64k limits,
18531+ * the transfer segment sizes are set at run time.
18532+ */
18533+ .quad 0x00409b000000ffff /* 0x90 32-bit code */
18534+ .quad 0x00009b000000ffff /* 0x98 16-bit code */
18535+ .quad 0x000093000000ffff /* 0xa0 16-bit data */
18536+ .quad 0x0000930000000000 /* 0xa8 16-bit data */
18537+ .quad 0x0000930000000000 /* 0xb0 16-bit data */
18538+
18539+ /*
18540+ * The APM segments have byte granularity and their bases
18541+ * are set at run time. All have 64k limits.
18542+ */
18543+ .quad 0x00409b000000ffff /* 0xb8 APM CS code */
18544+ .quad 0x00009b000000ffff /* 0xc0 APM CS 16 code (16 bit) */
18545+ .quad 0x004093000000ffff /* 0xc8 APM DS data */
18546+
18547+ .quad 0x00c0930000000000 /* 0xd0 - ESPFIX SS */
18548+ .quad 0x0040930000000000 /* 0xd8 - PERCPU */
18549+ .quad 0x0040910000000017 /* 0xe0 - STACK_CANARY */
18550+ .quad 0x0000000000000000 /* 0xe8 - PCIBIOS_CS */
18551+ .quad 0x0000000000000000 /* 0xf0 - PCIBIOS_DS */
18552+ .quad 0x0000000000000000 /* 0xf8 - GDT entry 31: double-fault TSS */
18553+
18554+ /* Be sure this is zeroed to avoid false validations in Xen */
18555+ .fill PAGE_SIZE_asm - GDT_SIZE,1,0
18556+ .endr
18557diff --git a/arch/x86/kernel/head_64.S b/arch/x86/kernel/head_64.S
18558index 780cd92..758b2a6 100644
18559--- a/arch/x86/kernel/head_64.S
18560+++ b/arch/x86/kernel/head_64.S
18561@@ -19,6 +19,8 @@
18562 #include <asm/cache.h>
18563 #include <asm/processor-flags.h>
18564 #include <asm/percpu.h>
18565+#include <asm/cpufeature.h>
18566+#include <asm/alternative-asm.h>
18567
18568 #ifdef CONFIG_PARAVIRT
18569 #include <asm/asm-offsets.h>
18570@@ -38,6 +40,12 @@ L4_PAGE_OFFSET = pgd_index(__PAGE_OFFSET)
18571 L3_PAGE_OFFSET = pud_index(__PAGE_OFFSET)
18572 L4_START_KERNEL = pgd_index(__START_KERNEL_map)
18573 L3_START_KERNEL = pud_index(__START_KERNEL_map)
18574+L4_VMALLOC_START = pgd_index(VMALLOC_START)
18575+L3_VMALLOC_START = pud_index(VMALLOC_START)
18576+L4_VMALLOC_END = pgd_index(VMALLOC_END)
18577+L3_VMALLOC_END = pud_index(VMALLOC_END)
18578+L4_VMEMMAP_START = pgd_index(VMEMMAP_START)
18579+L3_VMEMMAP_START = pud_index(VMEMMAP_START)
18580
18581 .text
18582 __HEAD
18583@@ -85,35 +93,23 @@ startup_64:
18584 */
18585 addq %rbp, init_level4_pgt + 0(%rip)
18586 addq %rbp, init_level4_pgt + (L4_PAGE_OFFSET*8)(%rip)
18587+ addq %rbp, init_level4_pgt + (L4_VMALLOC_START*8)(%rip)
18588+ addq %rbp, init_level4_pgt + (L4_VMALLOC_END*8)(%rip)
18589+ addq %rbp, init_level4_pgt + (L4_VMEMMAP_START*8)(%rip)
18590 addq %rbp, init_level4_pgt + (L4_START_KERNEL*8)(%rip)
18591
18592 addq %rbp, level3_ident_pgt + 0(%rip)
18593+#ifndef CONFIG_XEN
18594+ addq %rbp, level3_ident_pgt + 8(%rip)
18595+#endif
18596
18597- addq %rbp, level3_kernel_pgt + (510*8)(%rip)
18598- addq %rbp, level3_kernel_pgt + (511*8)(%rip)
18599+ addq %rbp, level3_vmemmap_pgt + (L3_VMEMMAP_START*8)(%rip)
18600+
18601+ addq %rbp, level3_kernel_pgt + (L3_START_KERNEL*8)(%rip)
18602+ addq %rbp, level3_kernel_pgt + (L3_START_KERNEL*8+8)(%rip)
18603
18604 addq %rbp, level2_fixmap_pgt + (506*8)(%rip)
18605-
18606- /* Add an Identity mapping if I am above 1G */
18607- leaq _text(%rip), %rdi
18608- andq $PMD_PAGE_MASK, %rdi
18609-
18610- movq %rdi, %rax
18611- shrq $PUD_SHIFT, %rax
18612- andq $(PTRS_PER_PUD - 1), %rax
18613- jz ident_complete
18614-
18615- leaq (level2_spare_pgt - __START_KERNEL_map + _KERNPG_TABLE)(%rbp), %rdx
18616- leaq level3_ident_pgt(%rip), %rbx
18617- movq %rdx, 0(%rbx, %rax, 8)
18618-
18619- movq %rdi, %rax
18620- shrq $PMD_SHIFT, %rax
18621- andq $(PTRS_PER_PMD - 1), %rax
18622- leaq __PAGE_KERNEL_IDENT_LARGE_EXEC(%rdi), %rdx
18623- leaq level2_spare_pgt(%rip), %rbx
18624- movq %rdx, 0(%rbx, %rax, 8)
18625-ident_complete:
18626+ addq %rbp, level2_fixmap_pgt + (507*8)(%rip)
18627
18628 /*
18629 * Fixup the kernel text+data virtual addresses. Note that
18630@@ -161,8 +157,8 @@ ENTRY(secondary_startup_64)
18631 * after the boot processor executes this code.
18632 */
18633
18634- /* Enable PAE mode and PGE */
18635- movl $(X86_CR4_PAE | X86_CR4_PGE), %eax
18636+ /* Enable PAE mode and PSE/PGE */
18637+ movl $(X86_CR4_PSE | X86_CR4_PAE | X86_CR4_PGE), %eax
18638 movq %rax, %cr4
18639
18640 /* Setup early boot stage 4 level pagetables. */
18641@@ -184,9 +180,16 @@ ENTRY(secondary_startup_64)
18642 movl $MSR_EFER, %ecx
18643 rdmsr
18644 btsl $_EFER_SCE, %eax /* Enable System Call */
18645- btl $20,%edi /* No Execute supported? */
18646+ btl $(X86_FEATURE_NX & 31),%edi /* No Execute supported? */
18647 jnc 1f
18648 btsl $_EFER_NX, %eax
18649+ leaq init_level4_pgt(%rip), %rdi
18650+#ifndef CONFIG_EFI
18651+ btsq $_PAGE_BIT_NX, 8*L4_PAGE_OFFSET(%rdi)
18652+#endif
18653+ btsq $_PAGE_BIT_NX, 8*L4_VMALLOC_START(%rdi)
18654+ btsq $_PAGE_BIT_NX, 8*L4_VMALLOC_END(%rdi)
18655+ btsq $_PAGE_BIT_NX, 8*L4_VMEMMAP_START(%rdi)
18656 1: wrmsr /* Make changes effective */
18657
18658 /* Setup cr0 */
18659@@ -249,6 +252,7 @@ ENTRY(secondary_startup_64)
18660 * jump. In addition we need to ensure %cs is set so we make this
18661 * a far return.
18662 */
18663+ pax_set_fptr_mask
18664 movq initial_code(%rip),%rax
18665 pushq $0 # fake return address to stop unwinder
18666 pushq $__KERNEL_CS # set correct cs
18667@@ -262,16 +266,16 @@ ENTRY(secondary_startup_64)
18668 .quad x86_64_start_kernel
18669 ENTRY(initial_gs)
18670 .quad INIT_PER_CPU_VAR(irq_stack_union)
18671- __FINITDATA
18672
18673 ENTRY(stack_start)
18674 .quad init_thread_union+THREAD_SIZE-8
18675 .word 0
18676+ __FINITDATA
18677
18678 bad_address:
18679 jmp bad_address
18680
18681- .section ".init.text","ax"
18682+ __INIT
18683 #ifdef CONFIG_EARLY_PRINTK
18684 .globl early_idt_handlers
18685 early_idt_handlers:
18686@@ -316,18 +320,23 @@ ENTRY(early_idt_handler)
18687 #endif /* EARLY_PRINTK */
18688 1: hlt
18689 jmp 1b
18690+ .previous
18691
18692 #ifdef CONFIG_EARLY_PRINTK
18693+ __INITDATA
18694 early_recursion_flag:
18695 .long 0
18696+ .previous
18697
18698+ .section .rodata,"a",@progbits
18699 early_idt_msg:
18700 .asciz "PANIC: early exception %02lx rip %lx:%lx error %lx cr2 %lx\n"
18701 early_idt_ripmsg:
18702 .asciz "RIP %s\n"
18703+ .previous
18704 #endif /* CONFIG_EARLY_PRINTK */
18705- .previous
18706
18707+ .section .rodata,"a",@progbits
18708 #define NEXT_PAGE(name) \
18709 .balign PAGE_SIZE; \
18710 ENTRY(name)
18711@@ -350,13 +359,41 @@ NEXT_PAGE(init_level4_pgt)
18712 .quad level3_ident_pgt - __START_KERNEL_map + _KERNPG_TABLE
18713 .org init_level4_pgt + L4_PAGE_OFFSET*8, 0
18714 .quad level3_ident_pgt - __START_KERNEL_map + _KERNPG_TABLE
18715+ .org init_level4_pgt + L4_VMALLOC_START*8, 0
18716+ .quad level3_vmalloc_start_pgt - __START_KERNEL_map + _KERNPG_TABLE
18717+ .org init_level4_pgt + L4_VMALLOC_END*8, 0
18718+ .quad level3_vmalloc_end_pgt - __START_KERNEL_map + _KERNPG_TABLE
18719+ .org init_level4_pgt + L4_VMEMMAP_START*8, 0
18720+ .quad level3_vmemmap_pgt - __START_KERNEL_map + _KERNPG_TABLE
18721 .org init_level4_pgt + L4_START_KERNEL*8, 0
18722 /* (2^48-(2*1024*1024*1024))/(2^39) = 511 */
18723 .quad level3_kernel_pgt - __START_KERNEL_map + _PAGE_TABLE
18724
18725+#ifdef CONFIG_PAX_PER_CPU_PGD
18726+NEXT_PAGE(cpu_pgd)
18727+ .rept NR_CPUS
18728+ .fill 512,8,0
18729+ .endr
18730+#endif
18731+
18732 NEXT_PAGE(level3_ident_pgt)
18733 .quad level2_ident_pgt - __START_KERNEL_map + _KERNPG_TABLE
18734+#ifdef CONFIG_XEN
18735 .fill 511,8,0
18736+#else
18737+ .quad level2_ident_pgt + PAGE_SIZE - __START_KERNEL_map + _KERNPG_TABLE
18738+ .fill 510,8,0
18739+#endif
18740+
18741+NEXT_PAGE(level3_vmalloc_start_pgt)
18742+ .fill 512,8,0
18743+
18744+NEXT_PAGE(level3_vmalloc_end_pgt)
18745+ .fill 512,8,0
18746+
18747+NEXT_PAGE(level3_vmemmap_pgt)
18748+ .fill L3_VMEMMAP_START,8,0
18749+ .quad level2_vmemmap_pgt - __START_KERNEL_map + _KERNPG_TABLE
18750
18751 NEXT_PAGE(level3_kernel_pgt)
18752 .fill L3_START_KERNEL,8,0
18753@@ -364,20 +401,23 @@ NEXT_PAGE(level3_kernel_pgt)
18754 .quad level2_kernel_pgt - __START_KERNEL_map + _KERNPG_TABLE
18755 .quad level2_fixmap_pgt - __START_KERNEL_map + _PAGE_TABLE
18756
18757+NEXT_PAGE(level2_vmemmap_pgt)
18758+ .fill 512,8,0
18759+
18760 NEXT_PAGE(level2_fixmap_pgt)
18761- .fill 506,8,0
18762- .quad level1_fixmap_pgt - __START_KERNEL_map + _PAGE_TABLE
18763- /* 8MB reserved for vsyscalls + a 2MB hole = 4 + 1 entries */
18764- .fill 5,8,0
18765+ .fill 507,8,0
18766+ .quad level1_vsyscall_pgt - __START_KERNEL_map + _PAGE_TABLE
18767+ /* 6MB reserved for vsyscalls + a 2MB hole = 3 + 1 entries */
18768+ .fill 4,8,0
18769
18770-NEXT_PAGE(level1_fixmap_pgt)
18771+NEXT_PAGE(level1_vsyscall_pgt)
18772 .fill 512,8,0
18773
18774-NEXT_PAGE(level2_ident_pgt)
18775- /* Since I easily can, map the first 1G.
18776+ /* Since I easily can, map the first 2G.
18777 * Don't set NX because code runs from these pages.
18778 */
18779- PMDS(0, __PAGE_KERNEL_IDENT_LARGE_EXEC, PTRS_PER_PMD)
18780+NEXT_PAGE(level2_ident_pgt)
18781+ PMDS(0, __PAGE_KERNEL_IDENT_LARGE_EXEC, 2*PTRS_PER_PMD)
18782
18783 NEXT_PAGE(level2_kernel_pgt)
18784 /*
18785@@ -390,33 +430,55 @@ NEXT_PAGE(level2_kernel_pgt)
18786 * If you want to increase this then increase MODULES_VADDR
18787 * too.)
18788 */
18789- PMDS(0, __PAGE_KERNEL_LARGE_EXEC,
18790- KERNEL_IMAGE_SIZE/PMD_SIZE)
18791-
18792-NEXT_PAGE(level2_spare_pgt)
18793- .fill 512, 8, 0
18794+ PMDS(0, __PAGE_KERNEL_LARGE_EXEC, KERNEL_IMAGE_SIZE/PMD_SIZE)
18795
18796 #undef PMDS
18797 #undef NEXT_PAGE
18798
18799- .data
18800+ .align PAGE_SIZE
18801+ENTRY(cpu_gdt_table)
18802+ .rept NR_CPUS
18803+ .quad 0x0000000000000000 /* NULL descriptor */
18804+ .quad 0x00cf9b000000ffff /* __KERNEL32_CS */
18805+ .quad 0x00af9b000000ffff /* __KERNEL_CS */
18806+ .quad 0x00cf93000000ffff /* __KERNEL_DS */
18807+ .quad 0x00cffb000000ffff /* __USER32_CS */
18808+ .quad 0x00cff3000000ffff /* __USER_DS, __USER32_DS */
18809+ .quad 0x00affb000000ffff /* __USER_CS */
18810+
18811+#ifdef CONFIG_PAX_KERNEXEC
18812+ .quad 0x00af9b000000ffff /* __KERNEXEC_KERNEL_CS */
18813+#else
18814+ .quad 0x0 /* unused */
18815+#endif
18816+
18817+ .quad 0,0 /* TSS */
18818+ .quad 0,0 /* LDT */
18819+ .quad 0,0,0 /* three TLS descriptors */
18820+ .quad 0x0000f40000000000 /* node/CPU stored in limit */
18821+ /* asm/segment.h:GDT_ENTRIES must match this */
18822+
18823+ /* zero the remaining page */
18824+ .fill PAGE_SIZE / 8 - GDT_ENTRIES,8,0
18825+ .endr
18826+
18827 .align 16
18828 .globl early_gdt_descr
18829 early_gdt_descr:
18830 .word GDT_ENTRIES*8-1
18831 early_gdt_descr_base:
18832- .quad INIT_PER_CPU_VAR(gdt_page)
18833+ .quad cpu_gdt_table
18834
18835 ENTRY(phys_base)
18836 /* This must match the first entry in level2_kernel_pgt */
18837 .quad 0x0000000000000000
18838
18839 #include "../../x86/xen/xen-head.S"
18840-
18841- .section .bss, "aw", @nobits
18842+
18843+ .section .rodata,"a",@progbits
18844 .align L1_CACHE_BYTES
18845 ENTRY(idt_table)
18846- .skip IDT_ENTRIES * 16
18847+ .fill 512,8,0
18848
18849 __PAGE_ALIGNED_BSS
18850 .align PAGE_SIZE
18851diff --git a/arch/x86/kernel/i386_ksyms_32.c b/arch/x86/kernel/i386_ksyms_32.c
18852index 9c3bd4a..e1d9b35 100644
18853--- a/arch/x86/kernel/i386_ksyms_32.c
18854+++ b/arch/x86/kernel/i386_ksyms_32.c
18855@@ -20,8 +20,12 @@ extern void cmpxchg8b_emu(void);
18856 EXPORT_SYMBOL(cmpxchg8b_emu);
18857 #endif
18858
18859+EXPORT_SYMBOL_GPL(cpu_gdt_table);
18860+
18861 /* Networking helper routines. */
18862 EXPORT_SYMBOL(csum_partial_copy_generic);
18863+EXPORT_SYMBOL(csum_partial_copy_generic_to_user);
18864+EXPORT_SYMBOL(csum_partial_copy_generic_from_user);
18865
18866 EXPORT_SYMBOL(__get_user_1);
18867 EXPORT_SYMBOL(__get_user_2);
18868@@ -36,3 +40,7 @@ EXPORT_SYMBOL(strstr);
18869
18870 EXPORT_SYMBOL(csum_partial);
18871 EXPORT_SYMBOL(empty_zero_page);
18872+
18873+#ifdef CONFIG_PAX_KERNEXEC
18874+EXPORT_SYMBOL(__LOAD_PHYSICAL_ADDR);
18875+#endif
18876diff --git a/arch/x86/kernel/i387.c b/arch/x86/kernel/i387.c
18877index f2f8540..d845509 100644
18878--- a/arch/x86/kernel/i387.c
18879+++ b/arch/x86/kernel/i387.c
18880@@ -176,6 +176,9 @@ int xfpregs_active(struct task_struct *target, const struct user_regset *regset)
18881
18882 int xfpregs_get(struct task_struct *target, const struct user_regset *regset,
18883 unsigned int pos, unsigned int count,
18884+ void *kbuf, void __user *ubuf) __size_overflow(4);
18885+int xfpregs_get(struct task_struct *target, const struct user_regset *regset,
18886+ unsigned int pos, unsigned int count,
18887 void *kbuf, void __user *ubuf)
18888 {
18889 int ret;
18890@@ -193,6 +196,9 @@ int xfpregs_get(struct task_struct *target, const struct user_regset *regset,
18891
18892 int xfpregs_set(struct task_struct *target, const struct user_regset *regset,
18893 unsigned int pos, unsigned int count,
18894+ const void *kbuf, const void __user *ubuf) __size_overflow(4);
18895+int xfpregs_set(struct task_struct *target, const struct user_regset *regset,
18896+ unsigned int pos, unsigned int count,
18897 const void *kbuf, const void __user *ubuf)
18898 {
18899 int ret;
18900@@ -365,6 +371,9 @@ static void convert_to_fxsr(struct task_struct *tsk,
18901
18902 int fpregs_get(struct task_struct *target, const struct user_regset *regset,
18903 unsigned int pos, unsigned int count,
18904+ void *kbuf, void __user *ubuf) __size_overflow(3,4);
18905+int fpregs_get(struct task_struct *target, const struct user_regset *regset,
18906+ unsigned int pos, unsigned int count,
18907 void *kbuf, void __user *ubuf)
18908 {
18909 struct user_i387_ia32_struct env;
18910@@ -395,6 +404,9 @@ int fpregs_get(struct task_struct *target, const struct user_regset *regset,
18911
18912 int fpregs_set(struct task_struct *target, const struct user_regset *regset,
18913 unsigned int pos, unsigned int count,
18914+ const void *kbuf, const void __user *ubuf) __size_overflow(3,4);
18915+int fpregs_set(struct task_struct *target, const struct user_regset *regset,
18916+ unsigned int pos, unsigned int count,
18917 const void *kbuf, const void __user *ubuf)
18918 {
18919 struct user_i387_ia32_struct env;
18920@@ -540,6 +552,8 @@ static inline int restore_i387_fsave(struct _fpstate_ia32 __user *buf)
18921 }
18922
18923 static int restore_i387_fxsave(struct _fpstate_ia32 __user *buf,
18924+ unsigned int size) __size_overflow(2);
18925+static int restore_i387_fxsave(struct _fpstate_ia32 __user *buf,
18926 unsigned int size)
18927 {
18928 struct task_struct *tsk = current;
18929diff --git a/arch/x86/kernel/i8259.c b/arch/x86/kernel/i8259.c
18930index df89102..a244320 100644
18931--- a/arch/x86/kernel/i8259.c
18932+++ b/arch/x86/kernel/i8259.c
18933@@ -208,7 +208,7 @@ spurious_8259A_irq:
18934 "spurious 8259A interrupt: IRQ%d.\n", irq);
18935 spurious_irq_mask |= irqmask;
18936 }
18937- atomic_inc(&irq_err_count);
18938+ atomic_inc_unchecked(&irq_err_count);
18939 /*
18940 * Theoretically we do not have to handle this IRQ,
18941 * but in Linux this does not cause problems and is
18942diff --git a/arch/x86/kernel/init_task.c b/arch/x86/kernel/init_task.c
18943index 3a54dcb..1c22348 100644
18944--- a/arch/x86/kernel/init_task.c
18945+++ b/arch/x86/kernel/init_task.c
18946@@ -20,8 +20,7 @@ static struct sighand_struct init_sighand = INIT_SIGHAND(init_sighand);
18947 * way process stacks are handled. This is done by having a special
18948 * "init_task" linker map entry..
18949 */
18950-union thread_union init_thread_union __init_task_data =
18951- { INIT_THREAD_INFO(init_task) };
18952+union thread_union init_thread_union __init_task_data;
18953
18954 /*
18955 * Initial task structure.
18956@@ -38,5 +37,5 @@ EXPORT_SYMBOL(init_task);
18957 * section. Since TSS's are completely CPU-local, we want them
18958 * on exact cacheline boundaries, to eliminate cacheline ping-pong.
18959 */
18960-DEFINE_PER_CPU_SHARED_ALIGNED(struct tss_struct, init_tss) = INIT_TSS;
18961-
18962+struct tss_struct init_tss[NR_CPUS] ____cacheline_internodealigned_in_smp = { [0 ... NR_CPUS-1] = INIT_TSS };
18963+EXPORT_SYMBOL(init_tss);
18964diff --git a/arch/x86/kernel/ioport.c b/arch/x86/kernel/ioport.c
18965index 99c4d30..74c84e9 100644
18966--- a/arch/x86/kernel/ioport.c
18967+++ b/arch/x86/kernel/ioport.c
18968@@ -6,6 +6,7 @@
18969 #include <linux/sched.h>
18970 #include <linux/kernel.h>
18971 #include <linux/capability.h>
18972+#include <linux/security.h>
18973 #include <linux/errno.h>
18974 #include <linux/types.h>
18975 #include <linux/ioport.h>
18976@@ -41,6 +42,12 @@ asmlinkage long sys_ioperm(unsigned long from, unsigned long num, int turn_on)
18977
18978 if ((from + num <= from) || (from + num > IO_BITMAP_BITS))
18979 return -EINVAL;
18980+#ifdef CONFIG_GRKERNSEC_IO
18981+ if (turn_on && grsec_disable_privio) {
18982+ gr_handle_ioperm();
18983+ return -EPERM;
18984+ }
18985+#endif
18986 if (turn_on && !capable(CAP_SYS_RAWIO))
18987 return -EPERM;
18988
18989@@ -67,7 +74,7 @@ asmlinkage long sys_ioperm(unsigned long from, unsigned long num, int turn_on)
18990 * because the ->io_bitmap_max value must match the bitmap
18991 * contents:
18992 */
18993- tss = &per_cpu(init_tss, get_cpu());
18994+ tss = init_tss + get_cpu();
18995
18996 set_bitmap(t->io_bitmap_ptr, from, num, !turn_on);
18997
18998@@ -111,6 +118,12 @@ static int do_iopl(unsigned int level, struct pt_regs *regs)
18999 return -EINVAL;
19000 /* Trying to gain more privileges? */
19001 if (level > old) {
19002+#ifdef CONFIG_GRKERNSEC_IO
19003+ if (grsec_disable_privio) {
19004+ gr_handle_iopl();
19005+ return -EPERM;
19006+ }
19007+#endif
19008 if (!capable(CAP_SYS_RAWIO))
19009 return -EPERM;
19010 }
19011diff --git a/arch/x86/kernel/irq.c b/arch/x86/kernel/irq.c
19012index 04bbd52..83a07d9 100644
19013--- a/arch/x86/kernel/irq.c
19014+++ b/arch/x86/kernel/irq.c
19015@@ -15,7 +15,7 @@
19016 #include <asm/mce.h>
19017 #include <asm/hw_irq.h>
19018
19019-atomic_t irq_err_count;
19020+atomic_unchecked_t irq_err_count;
19021
19022 /* Function pointer for generic interrupt vector handling */
19023 void (*generic_interrupt_extension)(void) = NULL;
19024@@ -114,9 +114,9 @@ static int show_other_interrupts(struct seq_file *p, int prec)
19025 seq_printf(p, "%10u ", per_cpu(mce_poll_count, j));
19026 seq_printf(p, " Machine check polls\n");
19027 #endif
19028- seq_printf(p, "%*s: %10u\n", prec, "ERR", atomic_read(&irq_err_count));
19029+ seq_printf(p, "%*s: %10u\n", prec, "ERR", atomic_read_unchecked(&irq_err_count));
19030 #if defined(CONFIG_X86_IO_APIC)
19031- seq_printf(p, "%*s: %10u\n", prec, "MIS", atomic_read(&irq_mis_count));
19032+ seq_printf(p, "%*s: %10u\n", prec, "MIS", atomic_read_unchecked(&irq_mis_count));
19033 #endif
19034 return 0;
19035 }
19036@@ -209,10 +209,10 @@ u64 arch_irq_stat_cpu(unsigned int cpu)
19037
19038 u64 arch_irq_stat(void)
19039 {
19040- u64 sum = atomic_read(&irq_err_count);
19041+ u64 sum = atomic_read_unchecked(&irq_err_count);
19042
19043 #ifdef CONFIG_X86_IO_APIC
19044- sum += atomic_read(&irq_mis_count);
19045+ sum += atomic_read_unchecked(&irq_mis_count);
19046 #endif
19047 return sum;
19048 }
19049diff --git a/arch/x86/kernel/irq_32.c b/arch/x86/kernel/irq_32.c
19050index 7d35d0f..03f1d52 100644
19051--- a/arch/x86/kernel/irq_32.c
19052+++ b/arch/x86/kernel/irq_32.c
19053@@ -35,7 +35,7 @@ static int check_stack_overflow(void)
19054 __asm__ __volatile__("andl %%esp,%0" :
19055 "=r" (sp) : "0" (THREAD_SIZE - 1));
19056
19057- return sp < (sizeof(struct thread_info) + STACK_WARN);
19058+ return sp < STACK_WARN;
19059 }
19060
19061 static void print_stack_overflow(void)
19062@@ -54,9 +54,9 @@ static inline void print_stack_overflow(void) { }
19063 * per-CPU IRQ handling contexts (thread information and stack)
19064 */
19065 union irq_ctx {
19066- struct thread_info tinfo;
19067- u32 stack[THREAD_SIZE/sizeof(u32)];
19068-} __attribute__((aligned(PAGE_SIZE)));
19069+ unsigned long previous_esp;
19070+ u32 stack[THREAD_SIZE/sizeof(u32)];
19071+} __attribute__((aligned(THREAD_SIZE)));
19072
19073 static DEFINE_PER_CPU(union irq_ctx *, hardirq_ctx);
19074 static DEFINE_PER_CPU(union irq_ctx *, softirq_ctx);
19075@@ -78,10 +78,9 @@ static void call_on_stack(void *func, void *stack)
19076 static inline int
19077 execute_on_irq_stack(int overflow, struct irq_desc *desc, int irq)
19078 {
19079- union irq_ctx *curctx, *irqctx;
19080+ union irq_ctx *irqctx;
19081 u32 *isp, arg1, arg2;
19082
19083- curctx = (union irq_ctx *) current_thread_info();
19084 irqctx = __get_cpu_var(hardirq_ctx);
19085
19086 /*
19087@@ -90,21 +89,16 @@ execute_on_irq_stack(int overflow, struct irq_desc *desc, int irq)
19088 * handler) we can't do that and just have to keep using the
19089 * current stack (which is the irq stack already after all)
19090 */
19091- if (unlikely(curctx == irqctx))
19092+ if (unlikely((void *)current_stack_pointer - (void *)irqctx < THREAD_SIZE))
19093 return 0;
19094
19095 /* build the stack frame on the IRQ stack */
19096- isp = (u32 *) ((char *)irqctx + sizeof(*irqctx));
19097- irqctx->tinfo.task = curctx->tinfo.task;
19098- irqctx->tinfo.previous_esp = current_stack_pointer;
19099+ isp = (u32 *) ((char *)irqctx + sizeof(*irqctx) - 8);
19100+ irqctx->previous_esp = current_stack_pointer;
19101
19102- /*
19103- * Copy the softirq bits in preempt_count so that the
19104- * softirq checks work in the hardirq context.
19105- */
19106- irqctx->tinfo.preempt_count =
19107- (irqctx->tinfo.preempt_count & ~SOFTIRQ_MASK) |
19108- (curctx->tinfo.preempt_count & SOFTIRQ_MASK);
19109+#ifdef CONFIG_PAX_MEMORY_UDEREF
19110+ __set_fs(MAKE_MM_SEG(0));
19111+#endif
19112
19113 if (unlikely(overflow))
19114 call_on_stack(print_stack_overflow, isp);
19115@@ -116,6 +110,11 @@ execute_on_irq_stack(int overflow, struct irq_desc *desc, int irq)
19116 : "0" (irq), "1" (desc), "2" (isp),
19117 "D" (desc->handle_irq)
19118 : "memory", "cc", "ecx");
19119+
19120+#ifdef CONFIG_PAX_MEMORY_UDEREF
19121+ __set_fs(current_thread_info()->addr_limit);
19122+#endif
19123+
19124 return 1;
19125 }
19126
19127@@ -124,28 +123,11 @@ execute_on_irq_stack(int overflow, struct irq_desc *desc, int irq)
19128 */
19129 void __cpuinit irq_ctx_init(int cpu)
19130 {
19131- union irq_ctx *irqctx;
19132-
19133 if (per_cpu(hardirq_ctx, cpu))
19134 return;
19135
19136- irqctx = &per_cpu(hardirq_stack, cpu);
19137- irqctx->tinfo.task = NULL;
19138- irqctx->tinfo.exec_domain = NULL;
19139- irqctx->tinfo.cpu = cpu;
19140- irqctx->tinfo.preempt_count = HARDIRQ_OFFSET;
19141- irqctx->tinfo.addr_limit = MAKE_MM_SEG(0);
19142-
19143- per_cpu(hardirq_ctx, cpu) = irqctx;
19144-
19145- irqctx = &per_cpu(softirq_stack, cpu);
19146- irqctx->tinfo.task = NULL;
19147- irqctx->tinfo.exec_domain = NULL;
19148- irqctx->tinfo.cpu = cpu;
19149- irqctx->tinfo.preempt_count = 0;
19150- irqctx->tinfo.addr_limit = MAKE_MM_SEG(0);
19151-
19152- per_cpu(softirq_ctx, cpu) = irqctx;
19153+ per_cpu(hardirq_ctx, cpu) = &per_cpu(hardirq_stack, cpu);
19154+ per_cpu(softirq_ctx, cpu) = &per_cpu(softirq_stack, cpu);
19155
19156 printk(KERN_DEBUG "CPU %u irqstacks, hard=%p soft=%p\n",
19157 cpu, per_cpu(hardirq_ctx, cpu), per_cpu(softirq_ctx, cpu));
19158@@ -159,7 +141,6 @@ void irq_ctx_exit(int cpu)
19159 asmlinkage void do_softirq(void)
19160 {
19161 unsigned long flags;
19162- struct thread_info *curctx;
19163 union irq_ctx *irqctx;
19164 u32 *isp;
19165
19166@@ -169,15 +150,22 @@ asmlinkage void do_softirq(void)
19167 local_irq_save(flags);
19168
19169 if (local_softirq_pending()) {
19170- curctx = current_thread_info();
19171 irqctx = __get_cpu_var(softirq_ctx);
19172- irqctx->tinfo.task = curctx->task;
19173- irqctx->tinfo.previous_esp = current_stack_pointer;
19174+ irqctx->previous_esp = current_stack_pointer;
19175
19176 /* build the stack frame on the softirq stack */
19177- isp = (u32 *) ((char *)irqctx + sizeof(*irqctx));
19178+ isp = (u32 *) ((char *)irqctx + sizeof(*irqctx) - 8);
19179+
19180+#ifdef CONFIG_PAX_MEMORY_UDEREF
19181+ __set_fs(MAKE_MM_SEG(0));
19182+#endif
19183
19184 call_on_stack(__do_softirq, isp);
19185+
19186+#ifdef CONFIG_PAX_MEMORY_UDEREF
19187+ __set_fs(current_thread_info()->addr_limit);
19188+#endif
19189+
19190 /*
19191 * Shouldnt happen, we returned above if in_interrupt():
19192 */
19193diff --git a/arch/x86/kernel/kgdb.c b/arch/x86/kernel/kgdb.c
19194index 8d82a77..0baf312 100644
19195--- a/arch/x86/kernel/kgdb.c
19196+++ b/arch/x86/kernel/kgdb.c
19197@@ -390,13 +390,13 @@ int kgdb_arch_handle_exception(int e_vector, int signo, int err_code,
19198
19199 /* clear the trace bit */
19200 linux_regs->flags &= ~X86_EFLAGS_TF;
19201- atomic_set(&kgdb_cpu_doing_single_step, -1);
19202+ atomic_set_unchecked(&kgdb_cpu_doing_single_step, -1);
19203
19204 /* set the trace bit if we're stepping */
19205 if (remcomInBuffer[0] == 's') {
19206 linux_regs->flags |= X86_EFLAGS_TF;
19207 kgdb_single_step = 1;
19208- atomic_set(&kgdb_cpu_doing_single_step,
19209+ atomic_set_unchecked(&kgdb_cpu_doing_single_step,
19210 raw_smp_processor_id());
19211 }
19212
19213@@ -476,7 +476,7 @@ static int __kgdb_notify(struct die_args *args, unsigned long cmd)
19214 break;
19215
19216 case DIE_DEBUG:
19217- if (atomic_read(&kgdb_cpu_doing_single_step) ==
19218+ if (atomic_read_unchecked(&kgdb_cpu_doing_single_step) ==
19219 raw_smp_processor_id()) {
19220 if (user_mode(regs))
19221 return single_step_cont(regs, args);
19222@@ -573,7 +573,7 @@ unsigned long kgdb_arch_pc(int exception, struct pt_regs *regs)
19223 return instruction_pointer(regs);
19224 }
19225
19226-struct kgdb_arch arch_kgdb_ops = {
19227+const struct kgdb_arch arch_kgdb_ops = {
19228 /* Breakpoint instruction: */
19229 .gdb_bpt_instr = { 0xcc },
19230 .flags = KGDB_HW_BREAKPOINT,
19231diff --git a/arch/x86/kernel/kprobes.c b/arch/x86/kernel/kprobes.c
19232index 7a67820..70ea187 100644
19233--- a/arch/x86/kernel/kprobes.c
19234+++ b/arch/x86/kernel/kprobes.c
19235@@ -168,9 +168,13 @@ static void __kprobes set_jmp_op(void *from, void *to)
19236 char op;
19237 s32 raddr;
19238 } __attribute__((packed)) * jop;
19239- jop = (struct __arch_jmp_op *)from;
19240+
19241+ jop = (struct __arch_jmp_op *)(ktla_ktva(from));
19242+
19243+ pax_open_kernel();
19244 jop->raddr = (s32)((long)(to) - ((long)(from) + 5));
19245 jop->op = RELATIVEJUMP_INSTRUCTION;
19246+ pax_close_kernel();
19247 }
19248
19249 /*
19250@@ -195,7 +199,7 @@ static int __kprobes can_boost(kprobe_opcode_t *opcodes)
19251 kprobe_opcode_t opcode;
19252 kprobe_opcode_t *orig_opcodes = opcodes;
19253
19254- if (search_exception_tables((unsigned long)opcodes))
19255+ if (search_exception_tables(ktva_ktla((unsigned long)opcodes)))
19256 return 0; /* Page fault may occur on this address. */
19257
19258 retry:
19259@@ -339,7 +343,9 @@ static void __kprobes fix_riprel(struct kprobe *p)
19260 disp = (u8 *) p->addr + *((s32 *) insn) -
19261 (u8 *) p->ainsn.insn;
19262 BUG_ON((s64) (s32) disp != disp); /* Sanity check. */
19263+ pax_open_kernel();
19264 *(s32 *)insn = (s32) disp;
19265+ pax_close_kernel();
19266 }
19267 }
19268 #endif
19269@@ -347,16 +353,18 @@ static void __kprobes fix_riprel(struct kprobe *p)
19270
19271 static void __kprobes arch_copy_kprobe(struct kprobe *p)
19272 {
19273- memcpy(p->ainsn.insn, p->addr, MAX_INSN_SIZE * sizeof(kprobe_opcode_t));
19274+ pax_open_kernel();
19275+ memcpy(p->ainsn.insn, ktla_ktva(p->addr), MAX_INSN_SIZE * sizeof(kprobe_opcode_t));
19276+ pax_close_kernel();
19277
19278 fix_riprel(p);
19279
19280- if (can_boost(p->addr))
19281+ if (can_boost(ktla_ktva(p->addr)))
19282 p->ainsn.boostable = 0;
19283 else
19284 p->ainsn.boostable = -1;
19285
19286- p->opcode = *p->addr;
19287+ p->opcode = *(ktla_ktva(p->addr));
19288 }
19289
19290 int __kprobes arch_prepare_kprobe(struct kprobe *p)
19291@@ -434,7 +442,7 @@ static void __kprobes prepare_singlestep(struct kprobe *p, struct pt_regs *regs)
19292 if (p->opcode == BREAKPOINT_INSTRUCTION)
19293 regs->ip = (unsigned long)p->addr;
19294 else
19295- regs->ip = (unsigned long)p->ainsn.insn;
19296+ regs->ip = ktva_ktla((unsigned long)p->ainsn.insn);
19297 }
19298
19299 void __kprobes arch_prepare_kretprobe(struct kretprobe_instance *ri,
19300@@ -455,7 +463,7 @@ static void __kprobes setup_singlestep(struct kprobe *p, struct pt_regs *regs,
19301 if (p->ainsn.boostable == 1 && !p->post_handler) {
19302 /* Boost up -- we can execute copied instructions directly */
19303 reset_current_kprobe();
19304- regs->ip = (unsigned long)p->ainsn.insn;
19305+ regs->ip = ktva_ktla((unsigned long)p->ainsn.insn);
19306 preempt_enable_no_resched();
19307 return;
19308 }
19309@@ -525,7 +533,7 @@ static int __kprobes kprobe_handler(struct pt_regs *regs)
19310 struct kprobe_ctlblk *kcb;
19311
19312 addr = (kprobe_opcode_t *)(regs->ip - sizeof(kprobe_opcode_t));
19313- if (*addr != BREAKPOINT_INSTRUCTION) {
19314+ if (*(kprobe_opcode_t *)ktla_ktva((unsigned long)addr) != BREAKPOINT_INSTRUCTION) {
19315 /*
19316 * The breakpoint instruction was removed right
19317 * after we hit it. Another cpu has removed
19318@@ -637,6 +645,9 @@ static void __used __kprobes kretprobe_trampoline_holder(void)
19319 /* Skip orig_ax, ip, cs */
19320 " addq $24, %rsp\n"
19321 " popfq\n"
19322+#ifdef KERNEXEC_PLUGIN
19323+ " btsq $63,(%rsp)\n"
19324+#endif
19325 #else
19326 " pushf\n"
19327 /*
19328@@ -777,7 +788,7 @@ static void __kprobes resume_execution(struct kprobe *p,
19329 struct pt_regs *regs, struct kprobe_ctlblk *kcb)
19330 {
19331 unsigned long *tos = stack_addr(regs);
19332- unsigned long copy_ip = (unsigned long)p->ainsn.insn;
19333+ unsigned long copy_ip = ktva_ktla((unsigned long)p->ainsn.insn);
19334 unsigned long orig_ip = (unsigned long)p->addr;
19335 kprobe_opcode_t *insn = p->ainsn.insn;
19336
19337@@ -960,7 +971,7 @@ int __kprobes kprobe_exceptions_notify(struct notifier_block *self,
19338 struct die_args *args = data;
19339 int ret = NOTIFY_DONE;
19340
19341- if (args->regs && user_mode_vm(args->regs))
19342+ if (args->regs && user_mode(args->regs))
19343 return ret;
19344
19345 switch (val) {
19346diff --git a/arch/x86/kernel/kvm.c b/arch/x86/kernel/kvm.c
19347index 63b0ec8..6d92227 100644
19348--- a/arch/x86/kernel/kvm.c
19349+++ b/arch/x86/kernel/kvm.c
19350@@ -216,6 +216,7 @@ static void __init paravirt_ops_setup(void)
19351 pv_mmu_ops.set_pud = kvm_set_pud;
19352 #if PAGETABLE_LEVELS == 4
19353 pv_mmu_ops.set_pgd = kvm_set_pgd;
19354+ pv_mmu_ops.set_pgd_batched = kvm_set_pgd;
19355 #endif
19356 #endif
19357 pv_mmu_ops.flush_tlb_user = kvm_flush_tlb;
19358diff --git a/arch/x86/kernel/ldt.c b/arch/x86/kernel/ldt.c
19359index ec6ef60..d784780 100644
19360--- a/arch/x86/kernel/ldt.c
19361+++ b/arch/x86/kernel/ldt.c
19362@@ -66,13 +66,13 @@ static int alloc_ldt(mm_context_t *pc, int mincount, int reload)
19363 if (reload) {
19364 #ifdef CONFIG_SMP
19365 preempt_disable();
19366- load_LDT(pc);
19367+ load_LDT_nolock(pc);
19368 if (!cpumask_equal(mm_cpumask(current->mm),
19369 cpumask_of(smp_processor_id())))
19370 smp_call_function(flush_ldt, current->mm, 1);
19371 preempt_enable();
19372 #else
19373- load_LDT(pc);
19374+ load_LDT_nolock(pc);
19375 #endif
19376 }
19377 if (oldsize) {
19378@@ -94,7 +94,7 @@ static inline int copy_ldt(mm_context_t *new, mm_context_t *old)
19379 return err;
19380
19381 for (i = 0; i < old->size; i++)
19382- write_ldt_entry(new->ldt, i, old->ldt + i * LDT_ENTRY_SIZE);
19383+ write_ldt_entry(new->ldt, i, old->ldt + i);
19384 return 0;
19385 }
19386
19387@@ -115,6 +115,24 @@ int init_new_context(struct task_struct *tsk, struct mm_struct *mm)
19388 retval = copy_ldt(&mm->context, &old_mm->context);
19389 mutex_unlock(&old_mm->context.lock);
19390 }
19391+
19392+ if (tsk == current) {
19393+ mm->context.vdso = 0;
19394+
19395+#ifdef CONFIG_X86_32
19396+#if defined(CONFIG_PAX_PAGEEXEC) || defined(CONFIG_PAX_SEGMEXEC)
19397+ mm->context.user_cs_base = 0UL;
19398+ mm->context.user_cs_limit = ~0UL;
19399+
19400+#if defined(CONFIG_PAX_PAGEEXEC) && defined(CONFIG_SMP)
19401+ cpus_clear(mm->context.cpu_user_cs_mask);
19402+#endif
19403+
19404+#endif
19405+#endif
19406+
19407+ }
19408+
19409 return retval;
19410 }
19411
19412@@ -140,6 +158,7 @@ void destroy_context(struct mm_struct *mm)
19413 }
19414 }
19415
19416+static int read_ldt(void __user *ptr, unsigned long bytecount) __size_overflow(2);
19417 static int read_ldt(void __user *ptr, unsigned long bytecount)
19418 {
19419 int err;
19420@@ -229,6 +248,13 @@ static int write_ldt(void __user *ptr, unsigned long bytecount, int oldmode)
19421 }
19422 }
19423
19424+#ifdef CONFIG_PAX_SEGMEXEC
19425+ if ((mm->pax_flags & MF_PAX_SEGMEXEC) && (ldt_info.contents & MODIFY_LDT_CONTENTS_CODE)) {
19426+ error = -EINVAL;
19427+ goto out_unlock;
19428+ }
19429+#endif
19430+
19431 fill_ldt(&ldt, &ldt_info);
19432 if (oldmode)
19433 ldt.avl = 0;
19434diff --git a/arch/x86/kernel/machine_kexec_32.c b/arch/x86/kernel/machine_kexec_32.c
19435index c1c429d..f02eaf9 100644
19436--- a/arch/x86/kernel/machine_kexec_32.c
19437+++ b/arch/x86/kernel/machine_kexec_32.c
19438@@ -26,7 +26,7 @@
19439 #include <asm/system.h>
19440 #include <asm/cacheflush.h>
19441
19442-static void set_idt(void *newidt, __u16 limit)
19443+static void set_idt(struct desc_struct *newidt, __u16 limit)
19444 {
19445 struct desc_ptr curidt;
19446
19447@@ -38,7 +38,7 @@ static void set_idt(void *newidt, __u16 limit)
19448 }
19449
19450
19451-static void set_gdt(void *newgdt, __u16 limit)
19452+static void set_gdt(struct desc_struct *newgdt, __u16 limit)
19453 {
19454 struct desc_ptr curgdt;
19455
19456@@ -217,7 +217,7 @@ void machine_kexec(struct kimage *image)
19457 }
19458
19459 control_page = page_address(image->control_code_page);
19460- memcpy(control_page, relocate_kernel, KEXEC_CONTROL_CODE_MAX_SIZE);
19461+ memcpy(control_page, (void *)ktla_ktva((unsigned long)relocate_kernel), KEXEC_CONTROL_CODE_MAX_SIZE);
19462
19463 relocate_kernel_ptr = control_page;
19464 page_list[PA_CONTROL_PAGE] = __pa(control_page);
19465diff --git a/arch/x86/kernel/microcode_amd.c b/arch/x86/kernel/microcode_amd.c
19466index 1e47679..e73449d 100644
19467--- a/arch/x86/kernel/microcode_amd.c
19468+++ b/arch/x86/kernel/microcode_amd.c
19469@@ -364,7 +364,7 @@ static void microcode_fini_cpu_amd(int cpu)
19470 uci->mc = NULL;
19471 }
19472
19473-static struct microcode_ops microcode_amd_ops = {
19474+static const struct microcode_ops microcode_amd_ops = {
19475 .request_microcode_user = request_microcode_user,
19476 .request_microcode_fw = request_microcode_fw,
19477 .collect_cpu_info = collect_cpu_info_amd,
19478@@ -372,7 +372,7 @@ static struct microcode_ops microcode_amd_ops = {
19479 .microcode_fini_cpu = microcode_fini_cpu_amd,
19480 };
19481
19482-struct microcode_ops * __init init_amd_microcode(void)
19483+const struct microcode_ops * __init init_amd_microcode(void)
19484 {
19485 return &microcode_amd_ops;
19486 }
19487diff --git a/arch/x86/kernel/microcode_core.c b/arch/x86/kernel/microcode_core.c
19488index 378e9a8..b5a6ea9 100644
19489--- a/arch/x86/kernel/microcode_core.c
19490+++ b/arch/x86/kernel/microcode_core.c
19491@@ -90,7 +90,7 @@ MODULE_LICENSE("GPL");
19492
19493 #define MICROCODE_VERSION "2.00"
19494
19495-static struct microcode_ops *microcode_ops;
19496+static const struct microcode_ops *microcode_ops;
19497
19498 /*
19499 * Synchronization.
19500diff --git a/arch/x86/kernel/microcode_intel.c b/arch/x86/kernel/microcode_intel.c
19501index 0d334dd..5a709b5 100644
19502--- a/arch/x86/kernel/microcode_intel.c
19503+++ b/arch/x86/kernel/microcode_intel.c
19504@@ -441,15 +441,16 @@ static enum ucode_state request_microcode_fw(int cpu, struct device *device)
19505 return ret;
19506 }
19507
19508+static int get_ucode_user(void *to, const void *from, size_t n) __size_overflow(3);
19509 static int get_ucode_user(void *to, const void *from, size_t n)
19510 {
19511- return copy_from_user(to, from, n);
19512+ return copy_from_user(to, (const void __force_user *)from, n);
19513 }
19514
19515 static enum ucode_state
19516 request_microcode_user(int cpu, const void __user *buf, size_t size)
19517 {
19518- return generic_load_microcode(cpu, (void *)buf, size, &get_ucode_user);
19519+ return generic_load_microcode(cpu, (__force_kernel void *)buf, size, &get_ucode_user);
19520 }
19521
19522 static void microcode_fini_cpu(int cpu)
19523@@ -460,7 +461,7 @@ static void microcode_fini_cpu(int cpu)
19524 uci->mc = NULL;
19525 }
19526
19527-static struct microcode_ops microcode_intel_ops = {
19528+static const struct microcode_ops microcode_intel_ops = {
19529 .request_microcode_user = request_microcode_user,
19530 .request_microcode_fw = request_microcode_fw,
19531 .collect_cpu_info = collect_cpu_info,
19532@@ -468,7 +469,7 @@ static struct microcode_ops microcode_intel_ops = {
19533 .microcode_fini_cpu = microcode_fini_cpu,
19534 };
19535
19536-struct microcode_ops * __init init_intel_microcode(void)
19537+const struct microcode_ops * __init init_intel_microcode(void)
19538 {
19539 return &microcode_intel_ops;
19540 }
19541diff --git a/arch/x86/kernel/module.c b/arch/x86/kernel/module.c
19542index 89f386f..9028f51 100644
19543--- a/arch/x86/kernel/module.c
19544+++ b/arch/x86/kernel/module.c
19545@@ -34,7 +34,7 @@
19546 #define DEBUGP(fmt...)
19547 #endif
19548
19549-void *module_alloc(unsigned long size)
19550+static void *__module_alloc(unsigned long size, pgprot_t prot)
19551 {
19552 struct vm_struct *area;
19553
19554@@ -48,8 +48,18 @@ void *module_alloc(unsigned long size)
19555 if (!area)
19556 return NULL;
19557
19558- return __vmalloc_area(area, GFP_KERNEL | __GFP_HIGHMEM,
19559- PAGE_KERNEL_EXEC);
19560+ return __vmalloc_area(area, GFP_KERNEL | __GFP_HIGHMEM | __GFP_ZERO, prot);
19561+}
19562+
19563+void *module_alloc(unsigned long size)
19564+{
19565+
19566+#ifdef CONFIG_PAX_KERNEXEC
19567+ return __module_alloc(size, PAGE_KERNEL);
19568+#else
19569+ return __module_alloc(size, PAGE_KERNEL_EXEC);
19570+#endif
19571+
19572 }
19573
19574 /* Free memory returned from module_alloc */
19575@@ -58,6 +68,40 @@ void module_free(struct module *mod, void *module_region)
19576 vfree(module_region);
19577 }
19578
19579+#ifdef CONFIG_PAX_KERNEXEC
19580+#ifdef CONFIG_X86_32
19581+void *module_alloc_exec(unsigned long size)
19582+{
19583+ struct vm_struct *area;
19584+
19585+ if (size == 0)
19586+ return NULL;
19587+
19588+ area = __get_vm_area(size, VM_ALLOC, (unsigned long)&MODULES_EXEC_VADDR, (unsigned long)&MODULES_EXEC_END);
19589+ return area ? area->addr : NULL;
19590+}
19591+EXPORT_SYMBOL(module_alloc_exec);
19592+
19593+void module_free_exec(struct module *mod, void *module_region)
19594+{
19595+ vunmap(module_region);
19596+}
19597+EXPORT_SYMBOL(module_free_exec);
19598+#else
19599+void module_free_exec(struct module *mod, void *module_region)
19600+{
19601+ module_free(mod, module_region);
19602+}
19603+EXPORT_SYMBOL(module_free_exec);
19604+
19605+void *module_alloc_exec(unsigned long size)
19606+{
19607+ return __module_alloc(size, PAGE_KERNEL_RX);
19608+}
19609+EXPORT_SYMBOL(module_alloc_exec);
19610+#endif
19611+#endif
19612+
19613 /* We don't need anything special. */
19614 int module_frob_arch_sections(Elf_Ehdr *hdr,
19615 Elf_Shdr *sechdrs,
19616@@ -77,14 +121,16 @@ int apply_relocate(Elf32_Shdr *sechdrs,
19617 unsigned int i;
19618 Elf32_Rel *rel = (void *)sechdrs[relsec].sh_addr;
19619 Elf32_Sym *sym;
19620- uint32_t *location;
19621+ uint32_t *plocation, location;
19622
19623 DEBUGP("Applying relocate section %u to %u\n", relsec,
19624 sechdrs[relsec].sh_info);
19625 for (i = 0; i < sechdrs[relsec].sh_size / sizeof(*rel); i++) {
19626 /* This is where to make the change */
19627- location = (void *)sechdrs[sechdrs[relsec].sh_info].sh_addr
19628- + rel[i].r_offset;
19629+ plocation = (void *)sechdrs[sechdrs[relsec].sh_info].sh_addr + rel[i].r_offset;
19630+ location = (uint32_t)plocation;
19631+ if (sechdrs[sechdrs[relsec].sh_info].sh_flags & SHF_EXECINSTR)
19632+ plocation = ktla_ktva((void *)plocation);
19633 /* This is the symbol it is referring to. Note that all
19634 undefined symbols have been resolved. */
19635 sym = (Elf32_Sym *)sechdrs[symindex].sh_addr
19636@@ -93,11 +139,15 @@ int apply_relocate(Elf32_Shdr *sechdrs,
19637 switch (ELF32_R_TYPE(rel[i].r_info)) {
19638 case R_386_32:
19639 /* We add the value into the location given */
19640- *location += sym->st_value;
19641+ pax_open_kernel();
19642+ *plocation += sym->st_value;
19643+ pax_close_kernel();
19644 break;
19645 case R_386_PC32:
19646 /* Add the value, subtract its postition */
19647- *location += sym->st_value - (uint32_t)location;
19648+ pax_open_kernel();
19649+ *plocation += sym->st_value - location;
19650+ pax_close_kernel();
19651 break;
19652 default:
19653 printk(KERN_ERR "module %s: Unknown relocation: %u\n",
19654@@ -153,21 +203,30 @@ int apply_relocate_add(Elf64_Shdr *sechdrs,
19655 case R_X86_64_NONE:
19656 break;
19657 case R_X86_64_64:
19658+ pax_open_kernel();
19659 *(u64 *)loc = val;
19660+ pax_close_kernel();
19661 break;
19662 case R_X86_64_32:
19663+ pax_open_kernel();
19664 *(u32 *)loc = val;
19665+ pax_close_kernel();
19666 if (val != *(u32 *)loc)
19667 goto overflow;
19668 break;
19669 case R_X86_64_32S:
19670+ pax_open_kernel();
19671 *(s32 *)loc = val;
19672+ pax_close_kernel();
19673 if ((s64)val != *(s32 *)loc)
19674 goto overflow;
19675 break;
19676 case R_X86_64_PC32:
19677 val -= (u64)loc;
19678+ pax_open_kernel();
19679 *(u32 *)loc = val;
19680+ pax_close_kernel();
19681+
19682 #if 0
19683 if ((s64)val != *(s32 *)loc)
19684 goto overflow;
19685diff --git a/arch/x86/kernel/paravirt-spinlocks.c b/arch/x86/kernel/paravirt-spinlocks.c
19686index 3a7c5a4..9191528 100644
19687--- a/arch/x86/kernel/paravirt-spinlocks.c
19688+++ b/arch/x86/kernel/paravirt-spinlocks.c
19689@@ -13,7 +13,7 @@ default_spin_lock_flags(raw_spinlock_t *lock, unsigned long flags)
19690 __raw_spin_lock(lock);
19691 }
19692
19693-struct pv_lock_ops pv_lock_ops = {
19694+struct pv_lock_ops pv_lock_ops __read_only = {
19695 #ifdef CONFIG_SMP
19696 .spin_is_locked = __ticket_spin_is_locked,
19697 .spin_is_contended = __ticket_spin_is_contended,
19698diff --git a/arch/x86/kernel/paravirt.c b/arch/x86/kernel/paravirt.c
19699index 1b1739d..dea6077 100644
19700--- a/arch/x86/kernel/paravirt.c
19701+++ b/arch/x86/kernel/paravirt.c
19702@@ -53,6 +53,9 @@ u64 _paravirt_ident_64(u64 x)
19703 {
19704 return x;
19705 }
19706+#if defined(CONFIG_X86_32) && defined(CONFIG_X86_PAE)
19707+PV_CALLEE_SAVE_REGS_THUNK(_paravirt_ident_64);
19708+#endif
19709
19710 void __init default_banner(void)
19711 {
19712@@ -122,7 +125,7 @@ unsigned paravirt_patch_jmp(void *insnbuf, const void *target,
19713 * corresponding structure. */
19714 static void *get_call_destination(u8 type)
19715 {
19716- struct paravirt_patch_template tmpl = {
19717+ const struct paravirt_patch_template tmpl = {
19718 .pv_init_ops = pv_init_ops,
19719 .pv_time_ops = pv_time_ops,
19720 .pv_cpu_ops = pv_cpu_ops,
19721@@ -133,6 +136,8 @@ static void *get_call_destination(u8 type)
19722 .pv_lock_ops = pv_lock_ops,
19723 #endif
19724 };
19725+
19726+ pax_track_stack();
19727 return *((void **)&tmpl + type);
19728 }
19729
19730@@ -145,15 +150,19 @@ unsigned paravirt_patch_default(u8 type, u16 clobbers, void *insnbuf,
19731 if (opfunc == NULL)
19732 /* If there's no function, patch it with a ud2a (BUG) */
19733 ret = paravirt_patch_insns(insnbuf, len, ud2a, ud2a+sizeof(ud2a));
19734- else if (opfunc == _paravirt_nop)
19735+ else if (opfunc == (void *)_paravirt_nop)
19736 /* If the operation is a nop, then nop the callsite */
19737 ret = paravirt_patch_nop();
19738
19739 /* identity functions just return their single argument */
19740- else if (opfunc == _paravirt_ident_32)
19741+ else if (opfunc == (void *)_paravirt_ident_32)
19742 ret = paravirt_patch_ident_32(insnbuf, len);
19743- else if (opfunc == _paravirt_ident_64)
19744+ else if (opfunc == (void *)_paravirt_ident_64)
19745 ret = paravirt_patch_ident_64(insnbuf, len);
19746+#if defined(CONFIG_X86_32) && defined(CONFIG_X86_PAE)
19747+ else if (opfunc == (void *)__raw_callee_save__paravirt_ident_64)
19748+ ret = paravirt_patch_ident_64(insnbuf, len);
19749+#endif
19750
19751 else if (type == PARAVIRT_PATCH(pv_cpu_ops.iret) ||
19752 type == PARAVIRT_PATCH(pv_cpu_ops.irq_enable_sysexit) ||
19753@@ -178,7 +187,7 @@ unsigned paravirt_patch_insns(void *insnbuf, unsigned len,
19754 if (insn_len > len || start == NULL)
19755 insn_len = len;
19756 else
19757- memcpy(insnbuf, start, insn_len);
19758+ memcpy(insnbuf, ktla_ktva(start), insn_len);
19759
19760 return insn_len;
19761 }
19762@@ -294,22 +303,22 @@ void arch_flush_lazy_mmu_mode(void)
19763 preempt_enable();
19764 }
19765
19766-struct pv_info pv_info = {
19767+struct pv_info pv_info __read_only = {
19768 .name = "bare hardware",
19769 .paravirt_enabled = 0,
19770 .kernel_rpl = 0,
19771 .shared_kernel_pmd = 1, /* Only used when CONFIG_X86_PAE is set */
19772 };
19773
19774-struct pv_init_ops pv_init_ops = {
19775+struct pv_init_ops pv_init_ops __read_only = {
19776 .patch = native_patch,
19777 };
19778
19779-struct pv_time_ops pv_time_ops = {
19780+struct pv_time_ops pv_time_ops __read_only = {
19781 .sched_clock = native_sched_clock,
19782 };
19783
19784-struct pv_irq_ops pv_irq_ops = {
19785+struct pv_irq_ops pv_irq_ops __read_only = {
19786 .save_fl = __PV_IS_CALLEE_SAVE(native_save_fl),
19787 .restore_fl = __PV_IS_CALLEE_SAVE(native_restore_fl),
19788 .irq_disable = __PV_IS_CALLEE_SAVE(native_irq_disable),
19789@@ -321,7 +330,7 @@ struct pv_irq_ops pv_irq_ops = {
19790 #endif
19791 };
19792
19793-struct pv_cpu_ops pv_cpu_ops = {
19794+struct pv_cpu_ops pv_cpu_ops __read_only = {
19795 .cpuid = native_cpuid,
19796 .get_debugreg = native_get_debugreg,
19797 .set_debugreg = native_set_debugreg,
19798@@ -382,21 +391,26 @@ struct pv_cpu_ops pv_cpu_ops = {
19799 .end_context_switch = paravirt_nop,
19800 };
19801
19802-struct pv_apic_ops pv_apic_ops = {
19803+struct pv_apic_ops pv_apic_ops __read_only = {
19804 #ifdef CONFIG_X86_LOCAL_APIC
19805 .startup_ipi_hook = paravirt_nop,
19806 #endif
19807 };
19808
19809-#if defined(CONFIG_X86_32) && !defined(CONFIG_X86_PAE)
19810+#ifdef CONFIG_X86_32
19811+#ifdef CONFIG_X86_PAE
19812+/* 64-bit pagetable entries */
19813+#define PTE_IDENT PV_CALLEE_SAVE(_paravirt_ident_64)
19814+#else
19815 /* 32-bit pagetable entries */
19816 #define PTE_IDENT __PV_IS_CALLEE_SAVE(_paravirt_ident_32)
19817+#endif
19818 #else
19819 /* 64-bit pagetable entries */
19820 #define PTE_IDENT __PV_IS_CALLEE_SAVE(_paravirt_ident_64)
19821 #endif
19822
19823-struct pv_mmu_ops pv_mmu_ops = {
19824+struct pv_mmu_ops pv_mmu_ops __read_only = {
19825
19826 .read_cr2 = native_read_cr2,
19827 .write_cr2 = native_write_cr2,
19828@@ -448,6 +462,7 @@ struct pv_mmu_ops pv_mmu_ops = {
19829 .make_pud = PTE_IDENT,
19830
19831 .set_pgd = native_set_pgd,
19832+ .set_pgd_batched = native_set_pgd_batched,
19833 #endif
19834 #endif /* PAGETABLE_LEVELS >= 3 */
19835
19836@@ -467,6 +482,12 @@ struct pv_mmu_ops pv_mmu_ops = {
19837 },
19838
19839 .set_fixmap = native_set_fixmap,
19840+
19841+#ifdef CONFIG_PAX_KERNEXEC
19842+ .pax_open_kernel = native_pax_open_kernel,
19843+ .pax_close_kernel = native_pax_close_kernel,
19844+#endif
19845+
19846 };
19847
19848 EXPORT_SYMBOL_GPL(pv_time_ops);
19849diff --git a/arch/x86/kernel/pci-calgary_64.c b/arch/x86/kernel/pci-calgary_64.c
19850index 1a2d4b1..6a0dd55 100644
19851--- a/arch/x86/kernel/pci-calgary_64.c
19852+++ b/arch/x86/kernel/pci-calgary_64.c
19853@@ -477,7 +477,7 @@ static void calgary_free_coherent(struct device *dev, size_t size,
19854 free_pages((unsigned long)vaddr, get_order(size));
19855 }
19856
19857-static struct dma_map_ops calgary_dma_ops = {
19858+static const struct dma_map_ops calgary_dma_ops = {
19859 .alloc_coherent = calgary_alloc_coherent,
19860 .free_coherent = calgary_free_coherent,
19861 .map_sg = calgary_map_sg,
19862diff --git a/arch/x86/kernel/pci-dma.c b/arch/x86/kernel/pci-dma.c
19863index 6ac3931..42b4414 100644
19864--- a/arch/x86/kernel/pci-dma.c
19865+++ b/arch/x86/kernel/pci-dma.c
19866@@ -14,7 +14,7 @@
19867
19868 static int forbid_dac __read_mostly;
19869
19870-struct dma_map_ops *dma_ops;
19871+const struct dma_map_ops *dma_ops;
19872 EXPORT_SYMBOL(dma_ops);
19873
19874 static int iommu_sac_force __read_mostly;
19875@@ -243,7 +243,7 @@ early_param("iommu", iommu_setup);
19876
19877 int dma_supported(struct device *dev, u64 mask)
19878 {
19879- struct dma_map_ops *ops = get_dma_ops(dev);
19880+ const struct dma_map_ops *ops = get_dma_ops(dev);
19881
19882 #ifdef CONFIG_PCI
19883 if (mask > 0xffffffff && forbid_dac > 0) {
19884diff --git a/arch/x86/kernel/pci-gart_64.c b/arch/x86/kernel/pci-gart_64.c
19885index 1c76691..e3632db 100644
19886--- a/arch/x86/kernel/pci-gart_64.c
19887+++ b/arch/x86/kernel/pci-gart_64.c
19888@@ -682,7 +682,7 @@ static __init int init_k8_gatt(struct agp_kern_info *info)
19889 return -1;
19890 }
19891
19892-static struct dma_map_ops gart_dma_ops = {
19893+static const struct dma_map_ops gart_dma_ops = {
19894 .map_sg = gart_map_sg,
19895 .unmap_sg = gart_unmap_sg,
19896 .map_page = gart_map_page,
19897diff --git a/arch/x86/kernel/pci-nommu.c b/arch/x86/kernel/pci-nommu.c
19898index a3933d4..c898869 100644
19899--- a/arch/x86/kernel/pci-nommu.c
19900+++ b/arch/x86/kernel/pci-nommu.c
19901@@ -94,7 +94,7 @@ static void nommu_sync_sg_for_device(struct device *dev,
19902 flush_write_buffers();
19903 }
19904
19905-struct dma_map_ops nommu_dma_ops = {
19906+const struct dma_map_ops nommu_dma_ops = {
19907 .alloc_coherent = dma_generic_alloc_coherent,
19908 .free_coherent = nommu_free_coherent,
19909 .map_sg = nommu_map_sg,
19910diff --git a/arch/x86/kernel/pci-swiotlb.c b/arch/x86/kernel/pci-swiotlb.c
19911index aaa6b78..4de1881 100644
19912--- a/arch/x86/kernel/pci-swiotlb.c
19913+++ b/arch/x86/kernel/pci-swiotlb.c
19914@@ -25,7 +25,7 @@ static void *x86_swiotlb_alloc_coherent(struct device *hwdev, size_t size,
19915 return swiotlb_alloc_coherent(hwdev, size, dma_handle, flags);
19916 }
19917
19918-static struct dma_map_ops swiotlb_dma_ops = {
19919+static const struct dma_map_ops swiotlb_dma_ops = {
19920 .mapping_error = swiotlb_dma_mapping_error,
19921 .alloc_coherent = x86_swiotlb_alloc_coherent,
19922 .free_coherent = swiotlb_free_coherent,
19923diff --git a/arch/x86/kernel/process.c b/arch/x86/kernel/process.c
19924index fc6c84d..0312ca2 100644
19925--- a/arch/x86/kernel/process.c
19926+++ b/arch/x86/kernel/process.c
19927@@ -51,16 +51,33 @@ void free_thread_xstate(struct task_struct *tsk)
19928
19929 void free_thread_info(struct thread_info *ti)
19930 {
19931- free_thread_xstate(ti->task);
19932 free_pages((unsigned long)ti, get_order(THREAD_SIZE));
19933 }
19934
19935+static struct kmem_cache *task_struct_cachep;
19936+
19937 void arch_task_cache_init(void)
19938 {
19939- task_xstate_cachep =
19940- kmem_cache_create("task_xstate", xstate_size,
19941+ /* create a slab on which task_structs can be allocated */
19942+ task_struct_cachep =
19943+ kmem_cache_create("task_struct", sizeof(struct task_struct),
19944+ ARCH_MIN_TASKALIGN, SLAB_PANIC | SLAB_NOTRACK, NULL);
19945+
19946+ task_xstate_cachep =
19947+ kmem_cache_create("task_xstate", xstate_size,
19948 __alignof__(union thread_xstate),
19949- SLAB_PANIC | SLAB_NOTRACK, NULL);
19950+ SLAB_PANIC | SLAB_NOTRACK | SLAB_USERCOPY, NULL);
19951+}
19952+
19953+struct task_struct *alloc_task_struct(void)
19954+{
19955+ return kmem_cache_alloc(task_struct_cachep, GFP_KERNEL);
19956+}
19957+
19958+void free_task_struct(struct task_struct *task)
19959+{
19960+ free_thread_xstate(task);
19961+ kmem_cache_free(task_struct_cachep, task);
19962 }
19963
19964 /*
19965@@ -73,7 +90,7 @@ void exit_thread(void)
19966 unsigned long *bp = t->io_bitmap_ptr;
19967
19968 if (bp) {
19969- struct tss_struct *tss = &per_cpu(init_tss, get_cpu());
19970+ struct tss_struct *tss = init_tss + get_cpu();
19971
19972 t->io_bitmap_ptr = NULL;
19973 clear_thread_flag(TIF_IO_BITMAP);
19974@@ -93,6 +110,9 @@ void flush_thread(void)
19975
19976 clear_tsk_thread_flag(tsk, TIF_DEBUG);
19977
19978+#if defined(CONFIG_X86_32) && !defined(CONFIG_CC_STACKPROTECTOR) && !defined(CONFIG_PAX_MEMORY_UDEREF)
19979+ loadsegment(gs, 0);
19980+#endif
19981 tsk->thread.debugreg0 = 0;
19982 tsk->thread.debugreg1 = 0;
19983 tsk->thread.debugreg2 = 0;
19984@@ -307,7 +327,7 @@ void default_idle(void)
19985 EXPORT_SYMBOL(default_idle);
19986 #endif
19987
19988-void stop_this_cpu(void *dummy)
19989+__noreturn void stop_this_cpu(void *dummy)
19990 {
19991 local_irq_disable();
19992 /*
19993@@ -568,16 +588,38 @@ static int __init idle_setup(char *str)
19994 }
19995 early_param("idle", idle_setup);
19996
19997-unsigned long arch_align_stack(unsigned long sp)
19998+#ifdef CONFIG_PAX_RANDKSTACK
19999+void pax_randomize_kstack(struct pt_regs *regs)
20000 {
20001- if (!(current->personality & ADDR_NO_RANDOMIZE) && randomize_va_space)
20002- sp -= get_random_int() % 8192;
20003- return sp & ~0xf;
20004-}
20005+ struct thread_struct *thread = &current->thread;
20006+ unsigned long time;
20007
20008-unsigned long arch_randomize_brk(struct mm_struct *mm)
20009-{
20010- unsigned long range_end = mm->brk + 0x02000000;
20011- return randomize_range(mm->brk, range_end, 0) ? : mm->brk;
20012+ if (!randomize_va_space)
20013+ return;
20014+
20015+ if (v8086_mode(regs))
20016+ return;
20017+
20018+ rdtscl(time);
20019+
20020+ /* P4 seems to return a 0 LSB, ignore it */
20021+#ifdef CONFIG_MPENTIUM4
20022+ time &= 0x3EUL;
20023+ time <<= 2;
20024+#elif defined(CONFIG_X86_64)
20025+ time &= 0xFUL;
20026+ time <<= 4;
20027+#else
20028+ time &= 0x1FUL;
20029+ time <<= 3;
20030+#endif
20031+
20032+ thread->sp0 ^= time;
20033+ load_sp0(init_tss + smp_processor_id(), thread);
20034+
20035+#ifdef CONFIG_X86_64
20036+ percpu_write(kernel_stack, thread->sp0);
20037+#endif
20038 }
20039+#endif
20040
20041diff --git a/arch/x86/kernel/process_32.c b/arch/x86/kernel/process_32.c
20042index c40c432..6e1df72 100644
20043--- a/arch/x86/kernel/process_32.c
20044+++ b/arch/x86/kernel/process_32.c
20045@@ -67,6 +67,7 @@ asmlinkage void ret_from_fork(void) __asm__("ret_from_fork");
20046 unsigned long thread_saved_pc(struct task_struct *tsk)
20047 {
20048 return ((unsigned long *)tsk->thread.sp)[3];
20049+//XXX return tsk->thread.eip;
20050 }
20051
20052 #ifndef CONFIG_SMP
20053@@ -129,15 +130,14 @@ void __show_regs(struct pt_regs *regs, int all)
20054 unsigned short ss, gs;
20055 const char *board;
20056
20057- if (user_mode_vm(regs)) {
20058+ if (user_mode(regs)) {
20059 sp = regs->sp;
20060 ss = regs->ss & 0xffff;
20061- gs = get_user_gs(regs);
20062 } else {
20063 sp = (unsigned long) (&regs->sp);
20064 savesegment(ss, ss);
20065- savesegment(gs, gs);
20066 }
20067+ gs = get_user_gs(regs);
20068
20069 printk("\n");
20070
20071@@ -210,10 +210,10 @@ int kernel_thread(int (*fn)(void *), void *arg, unsigned long flags)
20072 regs.bx = (unsigned long) fn;
20073 regs.dx = (unsigned long) arg;
20074
20075- regs.ds = __USER_DS;
20076- regs.es = __USER_DS;
20077+ regs.ds = __KERNEL_DS;
20078+ regs.es = __KERNEL_DS;
20079 regs.fs = __KERNEL_PERCPU;
20080- regs.gs = __KERNEL_STACK_CANARY;
20081+ savesegment(gs, regs.gs);
20082 regs.orig_ax = -1;
20083 regs.ip = (unsigned long) kernel_thread_helper;
20084 regs.cs = __KERNEL_CS | get_kernel_rpl();
20085@@ -247,13 +247,14 @@ int copy_thread(unsigned long clone_flags, unsigned long sp,
20086 struct task_struct *tsk;
20087 int err;
20088
20089- childregs = task_pt_regs(p);
20090+ childregs = task_stack_page(p) + THREAD_SIZE - sizeof(struct pt_regs) - 8;
20091 *childregs = *regs;
20092 childregs->ax = 0;
20093 childregs->sp = sp;
20094
20095 p->thread.sp = (unsigned long) childregs;
20096 p->thread.sp0 = (unsigned long) (childregs+1);
20097+ p->tinfo.lowest_stack = (unsigned long)task_stack_page(p);
20098
20099 p->thread.ip = (unsigned long) ret_from_fork;
20100
20101@@ -345,7 +346,7 @@ __switch_to(struct task_struct *prev_p, struct task_struct *next_p)
20102 struct thread_struct *prev = &prev_p->thread,
20103 *next = &next_p->thread;
20104 int cpu = smp_processor_id();
20105- struct tss_struct *tss = &per_cpu(init_tss, cpu);
20106+ struct tss_struct *tss = init_tss + cpu;
20107 bool preload_fpu;
20108
20109 /* never put a printk in __switch_to... printk() calls wake_up*() indirectly */
20110@@ -380,6 +381,10 @@ __switch_to(struct task_struct *prev_p, struct task_struct *next_p)
20111 */
20112 lazy_save_gs(prev->gs);
20113
20114+#ifdef CONFIG_PAX_MEMORY_UDEREF
20115+ __set_fs(task_thread_info(next_p)->addr_limit);
20116+#endif
20117+
20118 /*
20119 * Load the per-thread Thread-Local Storage descriptor.
20120 */
20121@@ -415,6 +420,9 @@ __switch_to(struct task_struct *prev_p, struct task_struct *next_p)
20122 */
20123 arch_end_context_switch(next_p);
20124
20125+ percpu_write(current_task, next_p);
20126+ percpu_write(current_tinfo, &next_p->tinfo);
20127+
20128 if (preload_fpu)
20129 __math_state_restore();
20130
20131@@ -424,8 +432,6 @@ __switch_to(struct task_struct *prev_p, struct task_struct *next_p)
20132 if (prev->gs | next->gs)
20133 lazy_load_gs(next->gs);
20134
20135- percpu_write(current_task, next_p);
20136-
20137 return prev_p;
20138 }
20139
20140@@ -495,4 +501,3 @@ unsigned long get_wchan(struct task_struct *p)
20141 } while (count++ < 16);
20142 return 0;
20143 }
20144-
20145diff --git a/arch/x86/kernel/process_64.c b/arch/x86/kernel/process_64.c
20146index 39493bc..196816d 100644
20147--- a/arch/x86/kernel/process_64.c
20148+++ b/arch/x86/kernel/process_64.c
20149@@ -91,7 +91,7 @@ static void __exit_idle(void)
20150 void exit_idle(void)
20151 {
20152 /* idle loop has pid 0 */
20153- if (current->pid)
20154+ if (task_pid_nr(current))
20155 return;
20156 __exit_idle();
20157 }
20158@@ -170,7 +170,7 @@ void __show_regs(struct pt_regs *regs, int all)
20159 if (!board)
20160 board = "";
20161 printk(KERN_INFO "Pid: %d, comm: %.20s %s %s %.*s %s\n",
20162- current->pid, current->comm, print_tainted(),
20163+ task_pid_nr(current), current->comm, print_tainted(),
20164 init_utsname()->release,
20165 (int)strcspn(init_utsname()->version, " "),
20166 init_utsname()->version, board);
20167@@ -280,8 +280,7 @@ int copy_thread(unsigned long clone_flags, unsigned long sp,
20168 struct pt_regs *childregs;
20169 struct task_struct *me = current;
20170
20171- childregs = ((struct pt_regs *)
20172- (THREAD_SIZE + task_stack_page(p))) - 1;
20173+ childregs = task_stack_page(p) + THREAD_SIZE - sizeof(struct pt_regs) - 16;
20174 *childregs = *regs;
20175
20176 childregs->ax = 0;
20177@@ -292,6 +291,7 @@ int copy_thread(unsigned long clone_flags, unsigned long sp,
20178 p->thread.sp = (unsigned long) childregs;
20179 p->thread.sp0 = (unsigned long) (childregs+1);
20180 p->thread.usersp = me->thread.usersp;
20181+ p->tinfo.lowest_stack = (unsigned long)task_stack_page(p);
20182
20183 set_tsk_thread_flag(p, TIF_FORK);
20184
20185@@ -379,7 +379,7 @@ __switch_to(struct task_struct *prev_p, struct task_struct *next_p)
20186 struct thread_struct *prev = &prev_p->thread;
20187 struct thread_struct *next = &next_p->thread;
20188 int cpu = smp_processor_id();
20189- struct tss_struct *tss = &per_cpu(init_tss, cpu);
20190+ struct tss_struct *tss = init_tss + cpu;
20191 unsigned fsindex, gsindex;
20192 bool preload_fpu;
20193
20194@@ -475,10 +475,9 @@ __switch_to(struct task_struct *prev_p, struct task_struct *next_p)
20195 prev->usersp = percpu_read(old_rsp);
20196 percpu_write(old_rsp, next->usersp);
20197 percpu_write(current_task, next_p);
20198+ percpu_write(current_tinfo, &next_p->tinfo);
20199
20200- percpu_write(kernel_stack,
20201- (unsigned long)task_stack_page(next_p) +
20202- THREAD_SIZE - KERNEL_STACK_OFFSET);
20203+ percpu_write(kernel_stack, next->sp0);
20204
20205 /*
20206 * Now maybe reload the debug registers and handle I/O bitmaps
20207@@ -559,12 +558,11 @@ unsigned long get_wchan(struct task_struct *p)
20208 if (!p || p == current || p->state == TASK_RUNNING)
20209 return 0;
20210 stack = (unsigned long)task_stack_page(p);
20211- if (p->thread.sp < stack || p->thread.sp >= stack+THREAD_SIZE)
20212+ if (p->thread.sp < stack || p->thread.sp > stack+THREAD_SIZE-16-sizeof(u64))
20213 return 0;
20214 fp = *(u64 *)(p->thread.sp);
20215 do {
20216- if (fp < (unsigned long)stack ||
20217- fp >= (unsigned long)stack+THREAD_SIZE)
20218+ if (fp < stack || fp > stack+THREAD_SIZE-16-sizeof(u64))
20219 return 0;
20220 ip = *(u64 *)(fp+8);
20221 if (!in_sched_functions(ip))
20222diff --git a/arch/x86/kernel/ptrace.c b/arch/x86/kernel/ptrace.c
20223index c06acdd..09de221 100644
20224--- a/arch/x86/kernel/ptrace.c
20225+++ b/arch/x86/kernel/ptrace.c
20226@@ -559,6 +559,10 @@ static int ioperm_active(struct task_struct *target,
20227 static int ioperm_get(struct task_struct *target,
20228 const struct user_regset *regset,
20229 unsigned int pos, unsigned int count,
20230+ void *kbuf, void __user *ubuf) __size_overflow(3,4);
20231+static int ioperm_get(struct task_struct *target,
20232+ const struct user_regset *regset,
20233+ unsigned int pos, unsigned int count,
20234 void *kbuf, void __user *ubuf)
20235 {
20236 if (!target->thread.io_bitmap_ptr)
20237@@ -925,7 +929,7 @@ static const struct user_regset_view user_x86_32_view; /* Initialized below. */
20238 long arch_ptrace(struct task_struct *child, long request, long addr, long data)
20239 {
20240 int ret;
20241- unsigned long __user *datap = (unsigned long __user *)data;
20242+ unsigned long __user *datap = (__force unsigned long __user *)data;
20243
20244 switch (request) {
20245 /* read the word at location addr in the USER area. */
20246@@ -1012,14 +1016,14 @@ long arch_ptrace(struct task_struct *child, long request, long addr, long data)
20247 if (addr < 0)
20248 return -EIO;
20249 ret = do_get_thread_area(child, addr,
20250- (struct user_desc __user *) data);
20251+ (__force struct user_desc __user *) data);
20252 break;
20253
20254 case PTRACE_SET_THREAD_AREA:
20255 if (addr < 0)
20256 return -EIO;
20257 ret = do_set_thread_area(child, addr,
20258- (struct user_desc __user *) data, 0);
20259+ (__force struct user_desc __user *) data, 0);
20260 break;
20261 #endif
20262
20263@@ -1038,12 +1042,12 @@ long arch_ptrace(struct task_struct *child, long request, long addr, long data)
20264 #ifdef CONFIG_X86_PTRACE_BTS
20265 case PTRACE_BTS_CONFIG:
20266 ret = ptrace_bts_config
20267- (child, data, (struct ptrace_bts_config __user *)addr);
20268+ (child, data, (__force struct ptrace_bts_config __user *)addr);
20269 break;
20270
20271 case PTRACE_BTS_STATUS:
20272 ret = ptrace_bts_status
20273- (child, data, (struct ptrace_bts_config __user *)addr);
20274+ (child, data, (__force struct ptrace_bts_config __user *)addr);
20275 break;
20276
20277 case PTRACE_BTS_SIZE:
20278@@ -1052,7 +1056,7 @@ long arch_ptrace(struct task_struct *child, long request, long addr, long data)
20279
20280 case PTRACE_BTS_GET:
20281 ret = ptrace_bts_read_record
20282- (child, data, (struct bts_struct __user *) addr);
20283+ (child, data, (__force struct bts_struct __user *) addr);
20284 break;
20285
20286 case PTRACE_BTS_CLEAR:
20287@@ -1061,7 +1065,7 @@ long arch_ptrace(struct task_struct *child, long request, long addr, long data)
20288
20289 case PTRACE_BTS_DRAIN:
20290 ret = ptrace_bts_drain
20291- (child, data, (struct bts_struct __user *) addr);
20292+ (child, data, (__force struct bts_struct __user *) addr);
20293 break;
20294 #endif /* CONFIG_X86_PTRACE_BTS */
20295
20296@@ -1450,7 +1454,7 @@ void send_sigtrap(struct task_struct *tsk, struct pt_regs *regs,
20297 info.si_code = si_code;
20298
20299 /* User-mode ip? */
20300- info.si_addr = user_mode_vm(regs) ? (void __user *) regs->ip : NULL;
20301+ info.si_addr = user_mode(regs) ? (__force void __user *) regs->ip : NULL;
20302
20303 /* Send us the fake SIGTRAP */
20304 force_sig_info(SIGTRAP, &info, tsk);
20305@@ -1469,7 +1473,7 @@ void send_sigtrap(struct task_struct *tsk, struct pt_regs *regs,
20306 * We must return the syscall number to actually look up in the table.
20307 * This can be -1L to skip running any syscall at all.
20308 */
20309-asmregparm long syscall_trace_enter(struct pt_regs *regs)
20310+long syscall_trace_enter(struct pt_regs *regs)
20311 {
20312 long ret = 0;
20313
20314@@ -1514,7 +1518,7 @@ asmregparm long syscall_trace_enter(struct pt_regs *regs)
20315 return ret ?: regs->orig_ax;
20316 }
20317
20318-asmregparm void syscall_trace_leave(struct pt_regs *regs)
20319+void syscall_trace_leave(struct pt_regs *regs)
20320 {
20321 if (unlikely(current->audit_context))
20322 audit_syscall_exit(AUDITSC_RESULT(regs->ax), regs->ax);
20323diff --git a/arch/x86/kernel/reboot.c b/arch/x86/kernel/reboot.c
20324index cf98100..e76e03d 100644
20325--- a/arch/x86/kernel/reboot.c
20326+++ b/arch/x86/kernel/reboot.c
20327@@ -33,7 +33,7 @@ void (*pm_power_off)(void);
20328 EXPORT_SYMBOL(pm_power_off);
20329
20330 static const struct desc_ptr no_idt = {};
20331-static int reboot_mode;
20332+static unsigned short reboot_mode;
20333 enum reboot_type reboot_type = BOOT_KBD;
20334 int reboot_force;
20335
20336@@ -292,12 +292,12 @@ core_initcall(reboot_init);
20337 controller to pulse the CPU reset line, which is more thorough, but
20338 doesn't work with at least one type of 486 motherboard. It is easy
20339 to stop this code working; hence the copious comments. */
20340-static const unsigned long long
20341-real_mode_gdt_entries [3] =
20342+static struct desc_struct
20343+real_mode_gdt_entries [3] __read_only =
20344 {
20345- 0x0000000000000000ULL, /* Null descriptor */
20346- 0x00009b000000ffffULL, /* 16-bit real-mode 64k code at 0x00000000 */
20347- 0x000093000100ffffULL /* 16-bit real-mode 64k data at 0x00000100 */
20348+ GDT_ENTRY_INIT(0, 0, 0), /* Null descriptor */
20349+ GDT_ENTRY_INIT(0x9b, 0, 0xffff), /* 16-bit real-mode 64k code at 0x00000000 */
20350+ GDT_ENTRY_INIT(0x93, 0x100, 0xffff) /* 16-bit real-mode 64k data at 0x00000100 */
20351 };
20352
20353 static const struct desc_ptr
20354@@ -346,7 +346,7 @@ static const unsigned char jump_to_bios [] =
20355 * specified by the code and length parameters.
20356 * We assume that length will aways be less that 100!
20357 */
20358-void machine_real_restart(const unsigned char *code, int length)
20359+__noreturn void machine_real_restart(const unsigned char *code, unsigned int length)
20360 {
20361 local_irq_disable();
20362
20363@@ -366,8 +366,8 @@ void machine_real_restart(const unsigned char *code, int length)
20364 /* Remap the kernel at virtual address zero, as well as offset zero
20365 from the kernel segment. This assumes the kernel segment starts at
20366 virtual address PAGE_OFFSET. */
20367- memcpy(swapper_pg_dir, swapper_pg_dir + KERNEL_PGD_BOUNDARY,
20368- sizeof(swapper_pg_dir [0]) * KERNEL_PGD_PTRS);
20369+ clone_pgd_range(swapper_pg_dir, swapper_pg_dir + KERNEL_PGD_BOUNDARY,
20370+ min_t(unsigned long, KERNEL_PGD_PTRS, KERNEL_PGD_BOUNDARY));
20371
20372 /*
20373 * Use `swapper_pg_dir' as our page directory.
20374@@ -379,16 +379,15 @@ void machine_real_restart(const unsigned char *code, int length)
20375 boot)". This seems like a fairly standard thing that gets set by
20376 REBOOT.COM programs, and the previous reset routine did this
20377 too. */
20378- *((unsigned short *)0x472) = reboot_mode;
20379+ *(unsigned short *)(__va(0x472)) = reboot_mode;
20380
20381 /* For the switch to real mode, copy some code to low memory. It has
20382 to be in the first 64k because it is running in 16-bit mode, and it
20383 has to have the same physical and virtual address, because it turns
20384 off paging. Copy it near the end of the first page, out of the way
20385 of BIOS variables. */
20386- memcpy((void *)(0x1000 - sizeof(real_mode_switch) - 100),
20387- real_mode_switch, sizeof (real_mode_switch));
20388- memcpy((void *)(0x1000 - 100), code, length);
20389+ memcpy(__va(0x1000 - sizeof (real_mode_switch) - 100), real_mode_switch, sizeof (real_mode_switch));
20390+ memcpy(__va(0x1000 - 100), code, length);
20391
20392 /* Set up the IDT for real mode. */
20393 load_idt(&real_mode_idt);
20394@@ -416,6 +415,7 @@ void machine_real_restart(const unsigned char *code, int length)
20395 __asm__ __volatile__ ("ljmp $0x0008,%0"
20396 :
20397 : "i" ((void *)(0x1000 - sizeof (real_mode_switch) - 100)));
20398+ do { } while (1);
20399 }
20400 #ifdef CONFIG_APM_MODULE
20401 EXPORT_SYMBOL(machine_real_restart);
20402@@ -544,7 +544,7 @@ void __attribute__((weak)) mach_reboot_fixups(void)
20403 {
20404 }
20405
20406-static void native_machine_emergency_restart(void)
20407+__noreturn static void native_machine_emergency_restart(void)
20408 {
20409 int i;
20410
20411@@ -659,13 +659,13 @@ void native_machine_shutdown(void)
20412 #endif
20413 }
20414
20415-static void __machine_emergency_restart(int emergency)
20416+static __noreturn void __machine_emergency_restart(int emergency)
20417 {
20418 reboot_emergency = emergency;
20419 machine_ops.emergency_restart();
20420 }
20421
20422-static void native_machine_restart(char *__unused)
20423+static __noreturn void native_machine_restart(char *__unused)
20424 {
20425 printk("machine restart\n");
20426
20427@@ -674,7 +674,7 @@ static void native_machine_restart(char *__unused)
20428 __machine_emergency_restart(0);
20429 }
20430
20431-static void native_machine_halt(void)
20432+static __noreturn void native_machine_halt(void)
20433 {
20434 /* stop other cpus and apics */
20435 machine_shutdown();
20436@@ -685,7 +685,7 @@ static void native_machine_halt(void)
20437 stop_this_cpu(NULL);
20438 }
20439
20440-static void native_machine_power_off(void)
20441+__noreturn static void native_machine_power_off(void)
20442 {
20443 if (pm_power_off) {
20444 if (!reboot_force)
20445@@ -694,6 +694,7 @@ static void native_machine_power_off(void)
20446 }
20447 /* a fallback in case there is no PM info available */
20448 tboot_shutdown(TB_SHUTDOWN_HALT);
20449+ do { } while (1);
20450 }
20451
20452 struct machine_ops machine_ops = {
20453diff --git a/arch/x86/kernel/relocate_kernel_64.S b/arch/x86/kernel/relocate_kernel_64.S
20454index 7a6f3b3..976a959 100644
20455--- a/arch/x86/kernel/relocate_kernel_64.S
20456+++ b/arch/x86/kernel/relocate_kernel_64.S
20457@@ -11,6 +11,7 @@
20458 #include <asm/kexec.h>
20459 #include <asm/processor-flags.h>
20460 #include <asm/pgtable_types.h>
20461+#include <asm/alternative-asm.h>
20462
20463 /*
20464 * Must be relocatable PIC code callable as a C function
20465@@ -167,6 +168,7 @@ identity_mapped:
20466 xorq %r14, %r14
20467 xorq %r15, %r15
20468
20469+ pax_force_retaddr 0, 1
20470 ret
20471
20472 1:
20473diff --git a/arch/x86/kernel/setup.c b/arch/x86/kernel/setup.c
20474index 5449a26..0b6c759 100644
20475--- a/arch/x86/kernel/setup.c
20476+++ b/arch/x86/kernel/setup.c
20477@@ -783,14 +783,14 @@ void __init setup_arch(char **cmdline_p)
20478
20479 if (!boot_params.hdr.root_flags)
20480 root_mountflags &= ~MS_RDONLY;
20481- init_mm.start_code = (unsigned long) _text;
20482- init_mm.end_code = (unsigned long) _etext;
20483+ init_mm.start_code = ktla_ktva((unsigned long) _text);
20484+ init_mm.end_code = ktla_ktva((unsigned long) _etext);
20485 init_mm.end_data = (unsigned long) _edata;
20486 init_mm.brk = _brk_end;
20487
20488- code_resource.start = virt_to_phys(_text);
20489- code_resource.end = virt_to_phys(_etext)-1;
20490- data_resource.start = virt_to_phys(_etext);
20491+ code_resource.start = virt_to_phys(ktla_ktva(_text));
20492+ code_resource.end = virt_to_phys(ktla_ktva(_etext))-1;
20493+ data_resource.start = virt_to_phys(_sdata);
20494 data_resource.end = virt_to_phys(_edata)-1;
20495 bss_resource.start = virt_to_phys(&__bss_start);
20496 bss_resource.end = virt_to_phys(&__bss_stop)-1;
20497diff --git a/arch/x86/kernel/setup_percpu.c b/arch/x86/kernel/setup_percpu.c
20498index d559af9..244f55d 100644
20499--- a/arch/x86/kernel/setup_percpu.c
20500+++ b/arch/x86/kernel/setup_percpu.c
20501@@ -25,19 +25,17 @@
20502 # define DBG(x...)
20503 #endif
20504
20505-DEFINE_PER_CPU(int, cpu_number);
20506+#ifdef CONFIG_SMP
20507+DEFINE_PER_CPU(unsigned int, cpu_number);
20508 EXPORT_PER_CPU_SYMBOL(cpu_number);
20509+#endif
20510
20511-#ifdef CONFIG_X86_64
20512 #define BOOT_PERCPU_OFFSET ((unsigned long)__per_cpu_load)
20513-#else
20514-#define BOOT_PERCPU_OFFSET 0
20515-#endif
20516
20517 DEFINE_PER_CPU(unsigned long, this_cpu_off) = BOOT_PERCPU_OFFSET;
20518 EXPORT_PER_CPU_SYMBOL(this_cpu_off);
20519
20520-unsigned long __per_cpu_offset[NR_CPUS] __read_mostly = {
20521+unsigned long __per_cpu_offset[NR_CPUS] __read_only = {
20522 [0 ... NR_CPUS-1] = BOOT_PERCPU_OFFSET,
20523 };
20524 EXPORT_SYMBOL(__per_cpu_offset);
20525@@ -100,6 +98,8 @@ static bool __init pcpu_need_numa(void)
20526 * Pointer to the allocated area on success, NULL on failure.
20527 */
20528 static void * __init pcpu_alloc_bootmem(unsigned int cpu, unsigned long size,
20529+ unsigned long align) __size_overflow(2);
20530+static void * __init pcpu_alloc_bootmem(unsigned int cpu, unsigned long size,
20531 unsigned long align)
20532 {
20533 const unsigned long goal = __pa(MAX_DMA_ADDRESS);
20534@@ -128,6 +128,8 @@ static void * __init pcpu_alloc_bootmem(unsigned int cpu, unsigned long size,
20535 /*
20536 * Helpers for first chunk memory allocation
20537 */
20538+static void * __init pcpu_fc_alloc(unsigned int cpu, size_t size, size_t align) __size_overflow(2);
20539+
20540 static void * __init pcpu_fc_alloc(unsigned int cpu, size_t size, size_t align)
20541 {
20542 return pcpu_alloc_bootmem(cpu, size, align);
20543@@ -159,10 +161,10 @@ static inline void setup_percpu_segment(int cpu)
20544 {
20545 #ifdef CONFIG_X86_32
20546 struct desc_struct gdt;
20547+ unsigned long base = per_cpu_offset(cpu);
20548
20549- pack_descriptor(&gdt, per_cpu_offset(cpu), 0xFFFFF,
20550- 0x2 | DESCTYPE_S, 0x8);
20551- gdt.s = 1;
20552+ pack_descriptor(&gdt, base, (VMALLOC_END - base - 1) >> PAGE_SHIFT,
20553+ 0x83 | DESCTYPE_S, 0xC);
20554 write_gdt_entry(get_cpu_gdt_table(cpu),
20555 GDT_ENTRY_PERCPU, &gdt, DESCTYPE_S);
20556 #endif
20557@@ -212,6 +214,11 @@ void __init setup_per_cpu_areas(void)
20558 /* alrighty, percpu areas up and running */
20559 delta = (unsigned long)pcpu_base_addr - (unsigned long)__per_cpu_start;
20560 for_each_possible_cpu(cpu) {
20561+#ifdef CONFIG_CC_STACKPROTECTOR
20562+#ifdef CONFIG_X86_32
20563+ unsigned long canary = per_cpu(stack_canary.canary, cpu);
20564+#endif
20565+#endif
20566 per_cpu_offset(cpu) = delta + pcpu_unit_offsets[cpu];
20567 per_cpu(this_cpu_off, cpu) = per_cpu_offset(cpu);
20568 per_cpu(cpu_number, cpu) = cpu;
20569@@ -239,6 +246,12 @@ void __init setup_per_cpu_areas(void)
20570 early_per_cpu_map(x86_cpu_to_node_map, cpu);
20571 #endif
20572 #endif
20573+#ifdef CONFIG_CC_STACKPROTECTOR
20574+#ifdef CONFIG_X86_32
20575+ if (!cpu)
20576+ per_cpu(stack_canary.canary, cpu) = canary;
20577+#endif
20578+#endif
20579 /*
20580 * Up to this point, the boot CPU has been using .data.init
20581 * area. Reload any changed state for the boot CPU.
20582diff --git a/arch/x86/kernel/signal.c b/arch/x86/kernel/signal.c
20583index 6a44a76..a9287a1 100644
20584--- a/arch/x86/kernel/signal.c
20585+++ b/arch/x86/kernel/signal.c
20586@@ -197,7 +197,7 @@ static unsigned long align_sigframe(unsigned long sp)
20587 * Align the stack pointer according to the i386 ABI,
20588 * i.e. so that on function entry ((sp + 4) & 15) == 0.
20589 */
20590- sp = ((sp + 4) & -16ul) - 4;
20591+ sp = ((sp - 12) & -16ul) - 4;
20592 #else /* !CONFIG_X86_32 */
20593 sp = round_down(sp, 16) - 8;
20594 #endif
20595@@ -248,11 +248,11 @@ get_sigframe(struct k_sigaction *ka, struct pt_regs *regs, size_t frame_size,
20596 * Return an always-bogus address instead so we will die with SIGSEGV.
20597 */
20598 if (onsigstack && !likely(on_sig_stack(sp)))
20599- return (void __user *)-1L;
20600+ return (__force void __user *)-1L;
20601
20602 /* save i387 state */
20603 if (used_math() && save_i387_xstate(*fpstate) < 0)
20604- return (void __user *)-1L;
20605+ return (__force void __user *)-1L;
20606
20607 return (void __user *)sp;
20608 }
20609@@ -307,9 +307,9 @@ __setup_frame(int sig, struct k_sigaction *ka, sigset_t *set,
20610 }
20611
20612 if (current->mm->context.vdso)
20613- restorer = VDSO32_SYMBOL(current->mm->context.vdso, sigreturn);
20614+ restorer = (__force void __user *)VDSO32_SYMBOL(current->mm->context.vdso, sigreturn);
20615 else
20616- restorer = &frame->retcode;
20617+ restorer = (void __user *)&frame->retcode;
20618 if (ka->sa.sa_flags & SA_RESTORER)
20619 restorer = ka->sa.sa_restorer;
20620
20621@@ -323,7 +323,7 @@ __setup_frame(int sig, struct k_sigaction *ka, sigset_t *set,
20622 * reasons and because gdb uses it as a signature to notice
20623 * signal handler stack frames.
20624 */
20625- err |= __put_user(*((u64 *)&retcode), (u64 *)frame->retcode);
20626+ err |= __put_user(*((u64 *)&retcode), (u64 __user *)frame->retcode);
20627
20628 if (err)
20629 return -EFAULT;
20630@@ -377,7 +377,10 @@ static int __setup_rt_frame(int sig, struct k_sigaction *ka, siginfo_t *info,
20631 err |= __copy_to_user(&frame->uc.uc_sigmask, set, sizeof(*set));
20632
20633 /* Set up to return from userspace. */
20634- restorer = VDSO32_SYMBOL(current->mm->context.vdso, rt_sigreturn);
20635+ if (current->mm->context.vdso)
20636+ restorer = (__force void __user *)VDSO32_SYMBOL(current->mm->context.vdso, rt_sigreturn);
20637+ else
20638+ restorer = (void __user *)&frame->retcode;
20639 if (ka->sa.sa_flags & SA_RESTORER)
20640 restorer = ka->sa.sa_restorer;
20641 put_user_ex(restorer, &frame->pretcode);
20642@@ -389,7 +392,7 @@ static int __setup_rt_frame(int sig, struct k_sigaction *ka, siginfo_t *info,
20643 * reasons and because gdb uses it as a signature to notice
20644 * signal handler stack frames.
20645 */
20646- put_user_ex(*((u64 *)&rt_retcode), (u64 *)frame->retcode);
20647+ put_user_ex(*((u64 *)&rt_retcode), (u64 __user *)frame->retcode);
20648 } put_user_catch(err);
20649
20650 if (err)
20651@@ -782,6 +785,8 @@ static void do_signal(struct pt_regs *regs)
20652 int signr;
20653 sigset_t *oldset;
20654
20655+ pax_track_stack();
20656+
20657 /*
20658 * We want the common case to go fast, which is why we may in certain
20659 * cases get here from kernel mode. Just return without doing anything
20660@@ -789,7 +794,7 @@ static void do_signal(struct pt_regs *regs)
20661 * X86_32: vm86 regs switched out by assembly code before reaching
20662 * here, so testing against kernel CS suffices.
20663 */
20664- if (!user_mode(regs))
20665+ if (!user_mode_novm(regs))
20666 return;
20667
20668 if (current_thread_info()->status & TS_RESTORE_SIGMASK)
20669diff --git a/arch/x86/kernel/smpboot.c b/arch/x86/kernel/smpboot.c
20670index 7e8e905..64d5c32 100644
20671--- a/arch/x86/kernel/smpboot.c
20672+++ b/arch/x86/kernel/smpboot.c
20673@@ -94,14 +94,14 @@ static DEFINE_PER_CPU(struct task_struct *, idle_thread_array);
20674 */
20675 static DEFINE_MUTEX(x86_cpu_hotplug_driver_mutex);
20676
20677-void cpu_hotplug_driver_lock()
20678+void cpu_hotplug_driver_lock(void)
20679 {
20680- mutex_lock(&x86_cpu_hotplug_driver_mutex);
20681+ mutex_lock(&x86_cpu_hotplug_driver_mutex);
20682 }
20683
20684-void cpu_hotplug_driver_unlock()
20685+void cpu_hotplug_driver_unlock(void)
20686 {
20687- mutex_unlock(&x86_cpu_hotplug_driver_mutex);
20688+ mutex_unlock(&x86_cpu_hotplug_driver_mutex);
20689 }
20690
20691 ssize_t arch_cpu_probe(const char *buf, size_t count) { return -1; }
20692@@ -625,7 +625,7 @@ wakeup_secondary_cpu_via_init(int phys_apicid, unsigned long start_eip)
20693 * target processor state.
20694 */
20695 startup_ipi_hook(phys_apicid, (unsigned long) start_secondary,
20696- (unsigned long)stack_start.sp);
20697+ stack_start);
20698
20699 /*
20700 * Run STARTUP IPI loop.
20701@@ -743,6 +743,7 @@ static int __cpuinit do_boot_cpu(int apicid, int cpu)
20702 set_idle_for_cpu(cpu, c_idle.idle);
20703 do_rest:
20704 per_cpu(current_task, cpu) = c_idle.idle;
20705+ per_cpu(current_tinfo, cpu) = &c_idle.idle->tinfo;
20706 #ifdef CONFIG_X86_32
20707 /* Stack for startup_32 can be just as for start_secondary onwards */
20708 irq_ctx_init(cpu);
20709@@ -750,13 +751,15 @@ do_rest:
20710 #else
20711 clear_tsk_thread_flag(c_idle.idle, TIF_FORK);
20712 initial_gs = per_cpu_offset(cpu);
20713- per_cpu(kernel_stack, cpu) =
20714- (unsigned long)task_stack_page(c_idle.idle) -
20715- KERNEL_STACK_OFFSET + THREAD_SIZE;
20716+ per_cpu(kernel_stack, cpu) = (unsigned long)task_stack_page(c_idle.idle) - 16 + THREAD_SIZE;
20717 #endif
20718+
20719+ pax_open_kernel();
20720 early_gdt_descr.address = (unsigned long)get_cpu_gdt_table(cpu);
20721+ pax_close_kernel();
20722+
20723 initial_code = (unsigned long)start_secondary;
20724- stack_start.sp = (void *) c_idle.idle->thread.sp;
20725+ stack_start = c_idle.idle->thread.sp;
20726
20727 /* start_ip had better be page-aligned! */
20728 start_ip = setup_trampoline();
20729@@ -891,6 +894,12 @@ int __cpuinit native_cpu_up(unsigned int cpu)
20730
20731 per_cpu(cpu_state, cpu) = CPU_UP_PREPARE;
20732
20733+#ifdef CONFIG_PAX_PER_CPU_PGD
20734+ clone_pgd_range(get_cpu_pgd(cpu) + KERNEL_PGD_BOUNDARY,
20735+ swapper_pg_dir + KERNEL_PGD_BOUNDARY,
20736+ KERNEL_PGD_PTRS);
20737+#endif
20738+
20739 err = do_boot_cpu(apicid, cpu);
20740
20741 if (err) {
20742diff --git a/arch/x86/kernel/step.c b/arch/x86/kernel/step.c
20743index 3149032..14f1053 100644
20744--- a/arch/x86/kernel/step.c
20745+++ b/arch/x86/kernel/step.c
20746@@ -27,10 +27,10 @@ unsigned long convert_ip_to_linear(struct task_struct *child, struct pt_regs *re
20747 struct desc_struct *desc;
20748 unsigned long base;
20749
20750- seg &= ~7UL;
20751+ seg >>= 3;
20752
20753 mutex_lock(&child->mm->context.lock);
20754- if (unlikely((seg >> 3) >= child->mm->context.size))
20755+ if (unlikely(seg >= child->mm->context.size))
20756 addr = -1L; /* bogus selector, access would fault */
20757 else {
20758 desc = child->mm->context.ldt + seg;
20759@@ -42,7 +42,8 @@ unsigned long convert_ip_to_linear(struct task_struct *child, struct pt_regs *re
20760 addr += base;
20761 }
20762 mutex_unlock(&child->mm->context.lock);
20763- }
20764+ } else if (seg == __KERNEL_CS || seg == __KERNEXEC_KERNEL_CS)
20765+ addr = ktla_ktva(addr);
20766
20767 return addr;
20768 }
20769@@ -53,6 +54,9 @@ static int is_setting_trap_flag(struct task_struct *child, struct pt_regs *regs)
20770 unsigned char opcode[15];
20771 unsigned long addr = convert_ip_to_linear(child, regs);
20772
20773+ if (addr == -EINVAL)
20774+ return 0;
20775+
20776 copied = access_process_vm(child, addr, opcode, sizeof(opcode), 0);
20777 for (i = 0; i < copied; i++) {
20778 switch (opcode[i]) {
20779@@ -74,7 +78,7 @@ static int is_setting_trap_flag(struct task_struct *child, struct pt_regs *regs)
20780
20781 #ifdef CONFIG_X86_64
20782 case 0x40 ... 0x4f:
20783- if (regs->cs != __USER_CS)
20784+ if ((regs->cs & 0xffff) != __USER_CS)
20785 /* 32-bit mode: register increment */
20786 return 0;
20787 /* 64-bit mode: REX prefix */
20788diff --git a/arch/x86/kernel/sys_i386_32.c b/arch/x86/kernel/sys_i386_32.c
20789index dee1ff7..a397f7f 100644
20790--- a/arch/x86/kernel/sys_i386_32.c
20791+++ b/arch/x86/kernel/sys_i386_32.c
20792@@ -24,6 +24,21 @@
20793
20794 #include <asm/syscalls.h>
20795
20796+int i386_mmap_check(unsigned long addr, unsigned long len, unsigned long flags)
20797+{
20798+ unsigned long pax_task_size = TASK_SIZE;
20799+
20800+#ifdef CONFIG_PAX_SEGMEXEC
20801+ if (current->mm->pax_flags & MF_PAX_SEGMEXEC)
20802+ pax_task_size = SEGMEXEC_TASK_SIZE;
20803+#endif
20804+
20805+ if (len > pax_task_size || addr > pax_task_size - len)
20806+ return -EINVAL;
20807+
20808+ return 0;
20809+}
20810+
20811 /*
20812 * Perform the select(nd, in, out, ex, tv) and mmap() system
20813 * calls. Linux/i386 didn't use to be able to handle more than
20814@@ -58,6 +73,212 @@ out:
20815 return err;
20816 }
20817
20818+unsigned long
20819+arch_get_unmapped_area(struct file *filp, unsigned long addr,
20820+ unsigned long len, unsigned long pgoff, unsigned long flags)
20821+{
20822+ struct mm_struct *mm = current->mm;
20823+ struct vm_area_struct *vma;
20824+ unsigned long start_addr, pax_task_size = TASK_SIZE;
20825+
20826+#ifdef CONFIG_PAX_SEGMEXEC
20827+ if (mm->pax_flags & MF_PAX_SEGMEXEC)
20828+ pax_task_size = SEGMEXEC_TASK_SIZE;
20829+#endif
20830+
20831+ pax_task_size -= PAGE_SIZE;
20832+
20833+ if (len > pax_task_size)
20834+ return -ENOMEM;
20835+
20836+ if (flags & MAP_FIXED)
20837+ return addr;
20838+
20839+#ifdef CONFIG_PAX_RANDMMAP
20840+ if (!(mm->pax_flags & MF_PAX_RANDMMAP))
20841+#endif
20842+
20843+ if (addr) {
20844+ addr = PAGE_ALIGN(addr);
20845+ if (pax_task_size - len >= addr) {
20846+ vma = find_vma(mm, addr);
20847+ if (check_heap_stack_gap(vma, addr, len))
20848+ return addr;
20849+ }
20850+ }
20851+ if (len > mm->cached_hole_size) {
20852+ start_addr = addr = mm->free_area_cache;
20853+ } else {
20854+ start_addr = addr = mm->mmap_base;
20855+ mm->cached_hole_size = 0;
20856+ }
20857+
20858+#ifdef CONFIG_PAX_PAGEEXEC
20859+ if (!nx_enabled && (mm->pax_flags & MF_PAX_PAGEEXEC) && (flags & MAP_EXECUTABLE) && start_addr >= mm->mmap_base) {
20860+ start_addr = 0x00110000UL;
20861+
20862+#ifdef CONFIG_PAX_RANDMMAP
20863+ if (mm->pax_flags & MF_PAX_RANDMMAP)
20864+ start_addr += mm->delta_mmap & 0x03FFF000UL;
20865+#endif
20866+
20867+ if (mm->start_brk <= start_addr && start_addr < mm->mmap_base)
20868+ start_addr = addr = mm->mmap_base;
20869+ else
20870+ addr = start_addr;
20871+ }
20872+#endif
20873+
20874+full_search:
20875+ for (vma = find_vma(mm, addr); ; vma = vma->vm_next) {
20876+ /* At this point: (!vma || addr < vma->vm_end). */
20877+ if (pax_task_size - len < addr) {
20878+ /*
20879+ * Start a new search - just in case we missed
20880+ * some holes.
20881+ */
20882+ if (start_addr != mm->mmap_base) {
20883+ start_addr = addr = mm->mmap_base;
20884+ mm->cached_hole_size = 0;
20885+ goto full_search;
20886+ }
20887+ return -ENOMEM;
20888+ }
20889+ if (check_heap_stack_gap(vma, addr, len))
20890+ break;
20891+ if (addr + mm->cached_hole_size < vma->vm_start)
20892+ mm->cached_hole_size = vma->vm_start - addr;
20893+ addr = vma->vm_end;
20894+ if (mm->start_brk <= addr && addr < mm->mmap_base) {
20895+ start_addr = addr = mm->mmap_base;
20896+ mm->cached_hole_size = 0;
20897+ goto full_search;
20898+ }
20899+ }
20900+
20901+ /*
20902+ * Remember the place where we stopped the search:
20903+ */
20904+ mm->free_area_cache = addr + len;
20905+ return addr;
20906+}
20907+
20908+unsigned long
20909+arch_get_unmapped_area_topdown(struct file *filp, const unsigned long addr0,
20910+ const unsigned long len, const unsigned long pgoff,
20911+ const unsigned long flags)
20912+{
20913+ struct vm_area_struct *vma;
20914+ struct mm_struct *mm = current->mm;
20915+ unsigned long base = mm->mmap_base, addr = addr0, pax_task_size = TASK_SIZE;
20916+
20917+#ifdef CONFIG_PAX_SEGMEXEC
20918+ if (mm->pax_flags & MF_PAX_SEGMEXEC)
20919+ pax_task_size = SEGMEXEC_TASK_SIZE;
20920+#endif
20921+
20922+ pax_task_size -= PAGE_SIZE;
20923+
20924+ /* requested length too big for entire address space */
20925+ if (len > pax_task_size)
20926+ return -ENOMEM;
20927+
20928+ if (flags & MAP_FIXED)
20929+ return addr;
20930+
20931+#ifdef CONFIG_PAX_PAGEEXEC
20932+ if (!nx_enabled && (mm->pax_flags & MF_PAX_PAGEEXEC) && (flags & MAP_EXECUTABLE))
20933+ goto bottomup;
20934+#endif
20935+
20936+#ifdef CONFIG_PAX_RANDMMAP
20937+ if (!(mm->pax_flags & MF_PAX_RANDMMAP))
20938+#endif
20939+
20940+ /* requesting a specific address */
20941+ if (addr) {
20942+ addr = PAGE_ALIGN(addr);
20943+ if (pax_task_size - len >= addr) {
20944+ vma = find_vma(mm, addr);
20945+ if (check_heap_stack_gap(vma, addr, len))
20946+ return addr;
20947+ }
20948+ }
20949+
20950+ /* check if free_area_cache is useful for us */
20951+ if (len <= mm->cached_hole_size) {
20952+ mm->cached_hole_size = 0;
20953+ mm->free_area_cache = mm->mmap_base;
20954+ }
20955+
20956+ /* either no address requested or can't fit in requested address hole */
20957+ addr = mm->free_area_cache;
20958+
20959+ /* make sure it can fit in the remaining address space */
20960+ if (addr > len) {
20961+ vma = find_vma(mm, addr-len);
20962+ if (check_heap_stack_gap(vma, addr - len, len))
20963+ /* remember the address as a hint for next time */
20964+ return (mm->free_area_cache = addr-len);
20965+ }
20966+
20967+ if (mm->mmap_base < len)
20968+ goto bottomup;
20969+
20970+ addr = mm->mmap_base-len;
20971+
20972+ do {
20973+ /*
20974+ * Lookup failure means no vma is above this address,
20975+ * else if new region fits below vma->vm_start,
20976+ * return with success:
20977+ */
20978+ vma = find_vma(mm, addr);
20979+ if (check_heap_stack_gap(vma, addr, len))
20980+ /* remember the address as a hint for next time */
20981+ return (mm->free_area_cache = addr);
20982+
20983+ /* remember the largest hole we saw so far */
20984+ if (addr + mm->cached_hole_size < vma->vm_start)
20985+ mm->cached_hole_size = vma->vm_start - addr;
20986+
20987+ /* try just below the current vma->vm_start */
20988+ addr = skip_heap_stack_gap(vma, len);
20989+ } while (!IS_ERR_VALUE(addr));
20990+
20991+bottomup:
20992+ /*
20993+ * A failed mmap() very likely causes application failure,
20994+ * so fall back to the bottom-up function here. This scenario
20995+ * can happen with large stack limits and large mmap()
20996+ * allocations.
20997+ */
20998+
20999+#ifdef CONFIG_PAX_SEGMEXEC
21000+ if (mm->pax_flags & MF_PAX_SEGMEXEC)
21001+ mm->mmap_base = SEGMEXEC_TASK_UNMAPPED_BASE;
21002+ else
21003+#endif
21004+
21005+ mm->mmap_base = TASK_UNMAPPED_BASE;
21006+
21007+#ifdef CONFIG_PAX_RANDMMAP
21008+ if (mm->pax_flags & MF_PAX_RANDMMAP)
21009+ mm->mmap_base += mm->delta_mmap;
21010+#endif
21011+
21012+ mm->free_area_cache = mm->mmap_base;
21013+ mm->cached_hole_size = ~0UL;
21014+ addr = arch_get_unmapped_area(filp, addr0, len, pgoff, flags);
21015+ /*
21016+ * Restore the topdown base:
21017+ */
21018+ mm->mmap_base = base;
21019+ mm->free_area_cache = base;
21020+ mm->cached_hole_size = ~0UL;
21021+
21022+ return addr;
21023+}
21024
21025 struct sel_arg_struct {
21026 unsigned long n;
21027@@ -93,7 +314,7 @@ asmlinkage int sys_ipc(uint call, int first, int second,
21028 return sys_semtimedop(first, (struct sembuf __user *)ptr, second, NULL);
21029 case SEMTIMEDOP:
21030 return sys_semtimedop(first, (struct sembuf __user *)ptr, second,
21031- (const struct timespec __user *)fifth);
21032+ (__force const struct timespec __user *)fifth);
21033
21034 case SEMGET:
21035 return sys_semget(first, second, third);
21036@@ -140,7 +361,7 @@ asmlinkage int sys_ipc(uint call, int first, int second,
21037 ret = do_shmat(first, (char __user *) ptr, second, &raddr);
21038 if (ret)
21039 return ret;
21040- return put_user(raddr, (ulong __user *) third);
21041+ return put_user(raddr, (__force ulong __user *) third);
21042 }
21043 case 1: /* iBCS2 emulator entry point */
21044 if (!segment_eq(get_fs(), get_ds()))
21045@@ -207,17 +428,3 @@ asmlinkage int sys_olduname(struct oldold_utsname __user *name)
21046
21047 return error;
21048 }
21049-
21050-
21051-/*
21052- * Do a system call from kernel instead of calling sys_execve so we
21053- * end up with proper pt_regs.
21054- */
21055-int kernel_execve(const char *filename, char *const argv[], char *const envp[])
21056-{
21057- long __res;
21058- asm volatile ("push %%ebx ; movl %2,%%ebx ; int $0x80 ; pop %%ebx"
21059- : "=a" (__res)
21060- : "0" (__NR_execve), "ri" (filename), "c" (argv), "d" (envp) : "memory");
21061- return __res;
21062-}
21063diff --git a/arch/x86/kernel/sys_x86_64.c b/arch/x86/kernel/sys_x86_64.c
21064index 8aa2057..b604bc1 100644
21065--- a/arch/x86/kernel/sys_x86_64.c
21066+++ b/arch/x86/kernel/sys_x86_64.c
21067@@ -32,8 +32,8 @@ out:
21068 return error;
21069 }
21070
21071-static void find_start_end(unsigned long flags, unsigned long *begin,
21072- unsigned long *end)
21073+static void find_start_end(struct mm_struct *mm, unsigned long flags,
21074+ unsigned long *begin, unsigned long *end)
21075 {
21076 if (!test_thread_flag(TIF_IA32) && (flags & MAP_32BIT)) {
21077 unsigned long new_begin;
21078@@ -52,7 +52,7 @@ static void find_start_end(unsigned long flags, unsigned long *begin,
21079 *begin = new_begin;
21080 }
21081 } else {
21082- *begin = TASK_UNMAPPED_BASE;
21083+ *begin = mm->mmap_base;
21084 *end = TASK_SIZE;
21085 }
21086 }
21087@@ -69,16 +69,19 @@ arch_get_unmapped_area(struct file *filp, unsigned long addr,
21088 if (flags & MAP_FIXED)
21089 return addr;
21090
21091- find_start_end(flags, &begin, &end);
21092+ find_start_end(mm, flags, &begin, &end);
21093
21094 if (len > end)
21095 return -ENOMEM;
21096
21097+#ifdef CONFIG_PAX_RANDMMAP
21098+ if (!(mm->pax_flags & MF_PAX_RANDMMAP))
21099+#endif
21100+
21101 if (addr) {
21102 addr = PAGE_ALIGN(addr);
21103 vma = find_vma(mm, addr);
21104- if (end - len >= addr &&
21105- (!vma || addr + len <= vma->vm_start))
21106+ if (end - len >= addr && check_heap_stack_gap(vma, addr, len))
21107 return addr;
21108 }
21109 if (((flags & MAP_32BIT) || test_thread_flag(TIF_IA32))
21110@@ -106,7 +109,7 @@ full_search:
21111 }
21112 return -ENOMEM;
21113 }
21114- if (!vma || addr + len <= vma->vm_start) {
21115+ if (check_heap_stack_gap(vma, addr, len)) {
21116 /*
21117 * Remember the place where we stopped the search:
21118 */
21119@@ -128,7 +131,7 @@ arch_get_unmapped_area_topdown(struct file *filp, const unsigned long addr0,
21120 {
21121 struct vm_area_struct *vma;
21122 struct mm_struct *mm = current->mm;
21123- unsigned long addr = addr0;
21124+ unsigned long base = mm->mmap_base, addr = addr0;
21125
21126 /* requested length too big for entire address space */
21127 if (len > TASK_SIZE)
21128@@ -141,13 +144,18 @@ arch_get_unmapped_area_topdown(struct file *filp, const unsigned long addr0,
21129 if (!test_thread_flag(TIF_IA32) && (flags & MAP_32BIT))
21130 goto bottomup;
21131
21132+#ifdef CONFIG_PAX_RANDMMAP
21133+ if (!(mm->pax_flags & MF_PAX_RANDMMAP))
21134+#endif
21135+
21136 /* requesting a specific address */
21137 if (addr) {
21138 addr = PAGE_ALIGN(addr);
21139- vma = find_vma(mm, addr);
21140- if (TASK_SIZE - len >= addr &&
21141- (!vma || addr + len <= vma->vm_start))
21142- return addr;
21143+ if (TASK_SIZE - len >= addr) {
21144+ vma = find_vma(mm, addr);
21145+ if (check_heap_stack_gap(vma, addr, len))
21146+ return addr;
21147+ }
21148 }
21149
21150 /* check if free_area_cache is useful for us */
21151@@ -162,7 +170,7 @@ arch_get_unmapped_area_topdown(struct file *filp, const unsigned long addr0,
21152 /* make sure it can fit in the remaining address space */
21153 if (addr > len) {
21154 vma = find_vma(mm, addr-len);
21155- if (!vma || addr <= vma->vm_start)
21156+ if (check_heap_stack_gap(vma, addr - len, len))
21157 /* remember the address as a hint for next time */
21158 return mm->free_area_cache = addr-len;
21159 }
21160@@ -179,7 +187,7 @@ arch_get_unmapped_area_topdown(struct file *filp, const unsigned long addr0,
21161 * return with success:
21162 */
21163 vma = find_vma(mm, addr);
21164- if (!vma || addr+len <= vma->vm_start)
21165+ if (check_heap_stack_gap(vma, addr, len))
21166 /* remember the address as a hint for next time */
21167 return mm->free_area_cache = addr;
21168
21169@@ -188,8 +196,8 @@ arch_get_unmapped_area_topdown(struct file *filp, const unsigned long addr0,
21170 mm->cached_hole_size = vma->vm_start - addr;
21171
21172 /* try just below the current vma->vm_start */
21173- addr = vma->vm_start-len;
21174- } while (len < vma->vm_start);
21175+ addr = skip_heap_stack_gap(vma, len);
21176+ } while (!IS_ERR_VALUE(addr));
21177
21178 bottomup:
21179 /*
21180@@ -198,13 +206,21 @@ bottomup:
21181 * can happen with large stack limits and large mmap()
21182 * allocations.
21183 */
21184+ mm->mmap_base = TASK_UNMAPPED_BASE;
21185+
21186+#ifdef CONFIG_PAX_RANDMMAP
21187+ if (mm->pax_flags & MF_PAX_RANDMMAP)
21188+ mm->mmap_base += mm->delta_mmap;
21189+#endif
21190+
21191+ mm->free_area_cache = mm->mmap_base;
21192 mm->cached_hole_size = ~0UL;
21193- mm->free_area_cache = TASK_UNMAPPED_BASE;
21194 addr = arch_get_unmapped_area(filp, addr0, len, pgoff, flags);
21195 /*
21196 * Restore the topdown base:
21197 */
21198- mm->free_area_cache = mm->mmap_base;
21199+ mm->mmap_base = base;
21200+ mm->free_area_cache = base;
21201 mm->cached_hole_size = ~0UL;
21202
21203 return addr;
21204diff --git a/arch/x86/kernel/syscall_table_32.S b/arch/x86/kernel/syscall_table_32.S
21205index 76d70a4..4c94a44 100644
21206--- a/arch/x86/kernel/syscall_table_32.S
21207+++ b/arch/x86/kernel/syscall_table_32.S
21208@@ -1,3 +1,4 @@
21209+.section .rodata,"a",@progbits
21210 ENTRY(sys_call_table)
21211 .long sys_restart_syscall /* 0 - old "setup()" system call, used for restarting */
21212 .long sys_exit
21213diff --git a/arch/x86/kernel/tboot.c b/arch/x86/kernel/tboot.c
21214index 46b8277..3349d55 100644
21215--- a/arch/x86/kernel/tboot.c
21216+++ b/arch/x86/kernel/tboot.c
21217@@ -216,7 +216,7 @@ static int tboot_setup_sleep(void)
21218
21219 void tboot_shutdown(u32 shutdown_type)
21220 {
21221- void (*shutdown)(void);
21222+ void (* __noreturn shutdown)(void);
21223
21224 if (!tboot_enabled())
21225 return;
21226@@ -238,7 +238,7 @@ void tboot_shutdown(u32 shutdown_type)
21227
21228 switch_to_tboot_pt();
21229
21230- shutdown = (void(*)(void))(unsigned long)tboot->shutdown_entry;
21231+ shutdown = (void *)tboot->shutdown_entry;
21232 shutdown();
21233
21234 /* should not reach here */
21235@@ -295,7 +295,7 @@ void tboot_sleep(u8 sleep_state, u32 pm1a_control, u32 pm1b_control)
21236 tboot_shutdown(acpi_shutdown_map[sleep_state]);
21237 }
21238
21239-static atomic_t ap_wfs_count;
21240+static atomic_unchecked_t ap_wfs_count;
21241
21242 static int tboot_wait_for_aps(int num_aps)
21243 {
21244@@ -319,9 +319,9 @@ static int __cpuinit tboot_cpu_callback(struct notifier_block *nfb,
21245 {
21246 switch (action) {
21247 case CPU_DYING:
21248- atomic_inc(&ap_wfs_count);
21249+ atomic_inc_unchecked(&ap_wfs_count);
21250 if (num_online_cpus() == 1)
21251- if (tboot_wait_for_aps(atomic_read(&ap_wfs_count)))
21252+ if (tboot_wait_for_aps(atomic_read_unchecked(&ap_wfs_count)))
21253 return NOTIFY_BAD;
21254 break;
21255 }
21256@@ -340,7 +340,7 @@ static __init int tboot_late_init(void)
21257
21258 tboot_create_trampoline();
21259
21260- atomic_set(&ap_wfs_count, 0);
21261+ atomic_set_unchecked(&ap_wfs_count, 0);
21262 register_hotcpu_notifier(&tboot_cpu_notifier);
21263 return 0;
21264 }
21265diff --git a/arch/x86/kernel/time.c b/arch/x86/kernel/time.c
21266index be25734..87fe232 100644
21267--- a/arch/x86/kernel/time.c
21268+++ b/arch/x86/kernel/time.c
21269@@ -26,17 +26,13 @@
21270 int timer_ack;
21271 #endif
21272
21273-#ifdef CONFIG_X86_64
21274-volatile unsigned long __jiffies __section_jiffies = INITIAL_JIFFIES;
21275-#endif
21276-
21277 unsigned long profile_pc(struct pt_regs *regs)
21278 {
21279 unsigned long pc = instruction_pointer(regs);
21280
21281- if (!user_mode_vm(regs) && in_lock_functions(pc)) {
21282+ if (!user_mode(regs) && in_lock_functions(pc)) {
21283 #ifdef CONFIG_FRAME_POINTER
21284- return *(unsigned long *)(regs->bp + sizeof(long));
21285+ return ktla_ktva(*(unsigned long *)(regs->bp + sizeof(long)));
21286 #else
21287 unsigned long *sp =
21288 (unsigned long *)kernel_stack_pointer(regs);
21289@@ -45,11 +41,17 @@ unsigned long profile_pc(struct pt_regs *regs)
21290 * or above a saved flags. Eflags has bits 22-31 zero,
21291 * kernel addresses don't.
21292 */
21293+
21294+#ifdef CONFIG_PAX_KERNEXEC
21295+ return ktla_ktva(sp[0]);
21296+#else
21297 if (sp[0] >> 22)
21298 return sp[0];
21299 if (sp[1] >> 22)
21300 return sp[1];
21301 #endif
21302+
21303+#endif
21304 }
21305 return pc;
21306 }
21307diff --git a/arch/x86/kernel/tls.c b/arch/x86/kernel/tls.c
21308index 6bb7b85..dd853e1 100644
21309--- a/arch/x86/kernel/tls.c
21310+++ b/arch/x86/kernel/tls.c
21311@@ -85,6 +85,11 @@ int do_set_thread_area(struct task_struct *p, int idx,
21312 if (idx < GDT_ENTRY_TLS_MIN || idx > GDT_ENTRY_TLS_MAX)
21313 return -EINVAL;
21314
21315+#ifdef CONFIG_PAX_SEGMEXEC
21316+ if ((p->mm->pax_flags & MF_PAX_SEGMEXEC) && (info.contents & MODIFY_LDT_CONTENTS_CODE))
21317+ return -EINVAL;
21318+#endif
21319+
21320 set_tls_desc(p, idx, &info, 1);
21321
21322 return 0;
21323diff --git a/arch/x86/kernel/tls.h b/arch/x86/kernel/tls.h
21324index 2f083a2..7d3fecc 100644
21325--- a/arch/x86/kernel/tls.h
21326+++ b/arch/x86/kernel/tls.h
21327@@ -16,6 +16,6 @@
21328
21329 extern user_regset_active_fn regset_tls_active;
21330 extern user_regset_get_fn regset_tls_get;
21331-extern user_regset_set_fn regset_tls_set;
21332+extern user_regset_set_fn regset_tls_set __size_overflow(4);
21333
21334 #endif /* _ARCH_X86_KERNEL_TLS_H */
21335diff --git a/arch/x86/kernel/trampoline_32.S b/arch/x86/kernel/trampoline_32.S
21336index 8508237..229b664 100644
21337--- a/arch/x86/kernel/trampoline_32.S
21338+++ b/arch/x86/kernel/trampoline_32.S
21339@@ -32,6 +32,12 @@
21340 #include <asm/segment.h>
21341 #include <asm/page_types.h>
21342
21343+#ifdef CONFIG_PAX_KERNEXEC
21344+#define ta(X) (X)
21345+#else
21346+#define ta(X) ((X) - __PAGE_OFFSET)
21347+#endif
21348+
21349 /* We can free up trampoline after bootup if cpu hotplug is not supported. */
21350 __CPUINITRODATA
21351 .code16
21352@@ -60,7 +66,7 @@ r_base = .
21353 inc %ax # protected mode (PE) bit
21354 lmsw %ax # into protected mode
21355 # flush prefetch and jump to startup_32_smp in arch/i386/kernel/head.S
21356- ljmpl $__BOOT_CS, $(startup_32_smp-__PAGE_OFFSET)
21357+ ljmpl $__BOOT_CS, $ta(startup_32_smp)
21358
21359 # These need to be in the same 64K segment as the above;
21360 # hence we don't use the boot_gdt_descr defined in head.S
21361diff --git a/arch/x86/kernel/trampoline_64.S b/arch/x86/kernel/trampoline_64.S
21362index 3af2dff..ba8aa49 100644
21363--- a/arch/x86/kernel/trampoline_64.S
21364+++ b/arch/x86/kernel/trampoline_64.S
21365@@ -91,7 +91,7 @@ startup_32:
21366 movl $__KERNEL_DS, %eax # Initialize the %ds segment register
21367 movl %eax, %ds
21368
21369- movl $X86_CR4_PAE, %eax
21370+ movl $(X86_CR4_PSE | X86_CR4_PAE | X86_CR4_PGE), %eax
21371 movl %eax, %cr4 # Enable PAE mode
21372
21373 # Setup trampoline 4 level pagetables
21374@@ -127,7 +127,7 @@ startup_64:
21375 no_longmode:
21376 hlt
21377 jmp no_longmode
21378-#include "verify_cpu_64.S"
21379+#include "verify_cpu.S"
21380
21381 # Careful these need to be in the same 64K segment as the above;
21382 tidt:
21383@@ -138,7 +138,7 @@ tidt:
21384 # so the kernel can live anywhere
21385 .balign 4
21386 tgdt:
21387- .short tgdt_end - tgdt # gdt limit
21388+ .short tgdt_end - tgdt - 1 # gdt limit
21389 .long tgdt - r_base
21390 .short 0
21391 .quad 0x00cf9b000000ffff # __KERNEL32_CS
21392diff --git a/arch/x86/kernel/traps.c b/arch/x86/kernel/traps.c
21393index 7e37dce..ec3f8e5 100644
21394--- a/arch/x86/kernel/traps.c
21395+++ b/arch/x86/kernel/traps.c
21396@@ -69,12 +69,6 @@ asmlinkage int system_call(void);
21397
21398 /* Do we ignore FPU interrupts ? */
21399 char ignore_fpu_irq;
21400-
21401-/*
21402- * The IDT has to be page-aligned to simplify the Pentium
21403- * F0 0F bug workaround.
21404- */
21405-gate_desc idt_table[NR_VECTORS] __page_aligned_data = { { { { 0, 0 } } }, };
21406 #endif
21407
21408 DECLARE_BITMAP(used_vectors, NR_VECTORS);
21409@@ -112,19 +106,19 @@ static inline void preempt_conditional_cli(struct pt_regs *regs)
21410 static inline void
21411 die_if_kernel(const char *str, struct pt_regs *regs, long err)
21412 {
21413- if (!user_mode_vm(regs))
21414+ if (!user_mode(regs))
21415 die(str, regs, err);
21416 }
21417 #endif
21418
21419 static void __kprobes
21420-do_trap(int trapnr, int signr, char *str, struct pt_regs *regs,
21421+do_trap(int trapnr, int signr, const char *str, struct pt_regs *regs,
21422 long error_code, siginfo_t *info)
21423 {
21424 struct task_struct *tsk = current;
21425
21426 #ifdef CONFIG_X86_32
21427- if (regs->flags & X86_VM_MASK) {
21428+ if (v8086_mode(regs)) {
21429 /*
21430 * traps 0, 1, 3, 4, and 5 should be forwarded to vm86.
21431 * On nmi (interrupt 2), do_trap should not be called.
21432@@ -135,7 +129,7 @@ do_trap(int trapnr, int signr, char *str, struct pt_regs *regs,
21433 }
21434 #endif
21435
21436- if (!user_mode(regs))
21437+ if (!user_mode_novm(regs))
21438 goto kernel_trap;
21439
21440 #ifdef CONFIG_X86_32
21441@@ -158,7 +152,7 @@ trap_signal:
21442 printk_ratelimit()) {
21443 printk(KERN_INFO
21444 "%s[%d] trap %s ip:%lx sp:%lx error:%lx",
21445- tsk->comm, tsk->pid, str,
21446+ tsk->comm, task_pid_nr(tsk), str,
21447 regs->ip, regs->sp, error_code);
21448 print_vma_addr(" in ", regs->ip);
21449 printk("\n");
21450@@ -175,8 +169,20 @@ kernel_trap:
21451 if (!fixup_exception(regs)) {
21452 tsk->thread.error_code = error_code;
21453 tsk->thread.trap_no = trapnr;
21454+
21455+#if defined(CONFIG_X86_32) && defined(CONFIG_PAX_KERNEXEC)
21456+ if (trapnr == 12 && ((regs->cs & 0xFFFF) == __KERNEL_CS || (regs->cs & 0xFFFF) == __KERNEXEC_KERNEL_CS))
21457+ str = "PAX: suspicious stack segment fault";
21458+#endif
21459+
21460 die(str, regs, error_code);
21461 }
21462+
21463+#ifdef CONFIG_PAX_REFCOUNT
21464+ if (trapnr == 4)
21465+ pax_report_refcount_overflow(regs);
21466+#endif
21467+
21468 return;
21469
21470 #ifdef CONFIG_X86_32
21471@@ -265,14 +271,30 @@ do_general_protection(struct pt_regs *regs, long error_code)
21472 conditional_sti(regs);
21473
21474 #ifdef CONFIG_X86_32
21475- if (regs->flags & X86_VM_MASK)
21476+ if (v8086_mode(regs))
21477 goto gp_in_vm86;
21478 #endif
21479
21480 tsk = current;
21481- if (!user_mode(regs))
21482+ if (!user_mode_novm(regs))
21483 goto gp_in_kernel;
21484
21485+#if defined(CONFIG_X86_32) && defined(CONFIG_PAX_PAGEEXEC)
21486+ if (!nx_enabled && tsk->mm && (tsk->mm->pax_flags & MF_PAX_PAGEEXEC)) {
21487+ struct mm_struct *mm = tsk->mm;
21488+ unsigned long limit;
21489+
21490+ down_write(&mm->mmap_sem);
21491+ limit = mm->context.user_cs_limit;
21492+ if (limit < TASK_SIZE) {
21493+ track_exec_limit(mm, limit, TASK_SIZE, VM_EXEC);
21494+ up_write(&mm->mmap_sem);
21495+ return;
21496+ }
21497+ up_write(&mm->mmap_sem);
21498+ }
21499+#endif
21500+
21501 tsk->thread.error_code = error_code;
21502 tsk->thread.trap_no = 13;
21503
21504@@ -305,6 +327,13 @@ gp_in_kernel:
21505 if (notify_die(DIE_GPF, "general protection fault", regs,
21506 error_code, 13, SIGSEGV) == NOTIFY_STOP)
21507 return;
21508+
21509+#if defined(CONFIG_X86_32) && defined(CONFIG_PAX_KERNEXEC)
21510+ if ((regs->cs & 0xFFFF) == __KERNEL_CS || (regs->cs & 0xFFFF) == __KERNEXEC_KERNEL_CS)
21511+ die("PAX: suspicious general protection fault", regs, error_code);
21512+ else
21513+#endif
21514+
21515 die("general protection fault", regs, error_code);
21516 }
21517
21518@@ -435,6 +464,17 @@ static notrace __kprobes void default_do_nmi(struct pt_regs *regs)
21519 dotraplinkage notrace __kprobes void
21520 do_nmi(struct pt_regs *regs, long error_code)
21521 {
21522+
21523+#if defined(CONFIG_X86_32) && defined(CONFIG_PAX_KERNEXEC)
21524+ if (!user_mode(regs)) {
21525+ unsigned long cs = regs->cs & 0xFFFF;
21526+ unsigned long ip = ktva_ktla(regs->ip);
21527+
21528+ if ((cs == __KERNEL_CS || cs == __KERNEXEC_KERNEL_CS) && ip <= (unsigned long)_etext)
21529+ regs->ip = ip;
21530+ }
21531+#endif
21532+
21533 nmi_enter();
21534
21535 inc_irq_stat(__nmi_count);
21536@@ -558,7 +598,7 @@ dotraplinkage void __kprobes do_debug(struct pt_regs *regs, long error_code)
21537 }
21538
21539 #ifdef CONFIG_X86_32
21540- if (regs->flags & X86_VM_MASK)
21541+ if (v8086_mode(regs))
21542 goto debug_vm86;
21543 #endif
21544
21545@@ -570,7 +610,7 @@ dotraplinkage void __kprobes do_debug(struct pt_regs *regs, long error_code)
21546 * kernel space (but re-enable TF when returning to user mode).
21547 */
21548 if (condition & DR_STEP) {
21549- if (!user_mode(regs))
21550+ if (!user_mode_novm(regs))
21551 goto clear_TF_reenable;
21552 }
21553
21554@@ -757,7 +797,7 @@ do_simd_coprocessor_error(struct pt_regs *regs, long error_code)
21555 * Handle strange cache flush from user space exception
21556 * in all other cases. This is undocumented behaviour.
21557 */
21558- if (regs->flags & X86_VM_MASK) {
21559+ if (v8086_mode(regs)) {
21560 handle_vm86_fault((struct kernel_vm86_regs *)regs, error_code);
21561 return;
21562 }
21563@@ -798,7 +838,7 @@ asmlinkage void __attribute__((weak)) smp_threshold_interrupt(void)
21564 void __math_state_restore(void)
21565 {
21566 struct thread_info *thread = current_thread_info();
21567- struct task_struct *tsk = thread->task;
21568+ struct task_struct *tsk = current;
21569
21570 /*
21571 * Paranoid restore. send a SIGSEGV if we fail to restore the state.
21572@@ -825,8 +865,7 @@ void __math_state_restore(void)
21573 */
21574 asmlinkage void math_state_restore(void)
21575 {
21576- struct thread_info *thread = current_thread_info();
21577- struct task_struct *tsk = thread->task;
21578+ struct task_struct *tsk = current;
21579
21580 if (!tsk_used_math(tsk)) {
21581 local_irq_enable();
21582diff --git a/arch/x86/kernel/verify_cpu.S b/arch/x86/kernel/verify_cpu.S
21583new file mode 100644
21584index 0000000..50c5edd
21585--- /dev/null
21586+++ b/arch/x86/kernel/verify_cpu.S
21587@@ -0,0 +1,140 @@
21588+/*
21589+ *
21590+ * verify_cpu.S - Code for cpu long mode and SSE verification. This
21591+ * code has been borrowed from boot/setup.S and was introduced by
21592+ * Andi Kleen.
21593+ *
21594+ * Copyright (c) 2007 Andi Kleen (ak@suse.de)
21595+ * Copyright (c) 2007 Eric Biederman (ebiederm@xmission.com)
21596+ * Copyright (c) 2007 Vivek Goyal (vgoyal@in.ibm.com)
21597+ * Copyright (c) 2010 Kees Cook (kees.cook@canonical.com)
21598+ *
21599+ * This source code is licensed under the GNU General Public License,
21600+ * Version 2. See the file COPYING for more details.
21601+ *
21602+ * This is a common code for verification whether CPU supports
21603+ * long mode and SSE or not. It is not called directly instead this
21604+ * file is included at various places and compiled in that context.
21605+ * This file is expected to run in 32bit code. Currently:
21606+ *
21607+ * arch/x86/boot/compressed/head_64.S: Boot cpu verification
21608+ * arch/x86/kernel/trampoline_64.S: secondary processor verification
21609+ * arch/x86/kernel/head_32.S: processor startup
21610+ * arch/x86/kernel/acpi/realmode/wakeup.S: 32bit processor resume
21611+ *
21612+ * verify_cpu, returns the status of longmode and SSE in register %eax.
21613+ * 0: Success 1: Failure
21614+ *
21615+ * On Intel, the XD_DISABLE flag will be cleared as a side-effect.
21616+ *
21617+ * The caller needs to check for the error code and take the action
21618+ * appropriately. Either display a message or halt.
21619+ */
21620+
21621+#include <asm/cpufeature.h>
21622+#include <asm/msr-index.h>
21623+
21624+verify_cpu:
21625+ pushfl # Save caller passed flags
21626+ pushl $0 # Kill any dangerous flags
21627+ popfl
21628+
21629+ pushfl # standard way to check for cpuid
21630+ popl %eax
21631+ movl %eax,%ebx
21632+ xorl $0x200000,%eax
21633+ pushl %eax
21634+ popfl
21635+ pushfl
21636+ popl %eax
21637+ cmpl %eax,%ebx
21638+ jz verify_cpu_no_longmode # cpu has no cpuid
21639+
21640+ movl $0x0,%eax # See if cpuid 1 is implemented
21641+ cpuid
21642+ cmpl $0x1,%eax
21643+ jb verify_cpu_no_longmode # no cpuid 1
21644+
21645+ xor %di,%di
21646+ cmpl $0x68747541,%ebx # AuthenticAMD
21647+ jnz verify_cpu_noamd
21648+ cmpl $0x69746e65,%edx
21649+ jnz verify_cpu_noamd
21650+ cmpl $0x444d4163,%ecx
21651+ jnz verify_cpu_noamd
21652+ mov $1,%di # cpu is from AMD
21653+ jmp verify_cpu_check
21654+
21655+verify_cpu_noamd:
21656+ cmpl $0x756e6547,%ebx # GenuineIntel?
21657+ jnz verify_cpu_check
21658+ cmpl $0x49656e69,%edx
21659+ jnz verify_cpu_check
21660+ cmpl $0x6c65746e,%ecx
21661+ jnz verify_cpu_check
21662+
21663+ # only call IA32_MISC_ENABLE when:
21664+ # family > 6 || (family == 6 && model >= 0xd)
21665+ movl $0x1, %eax # check CPU family and model
21666+ cpuid
21667+ movl %eax, %ecx
21668+
21669+ andl $0x0ff00f00, %eax # mask family and extended family
21670+ shrl $8, %eax
21671+ cmpl $6, %eax
21672+ ja verify_cpu_clear_xd # family > 6, ok
21673+ jb verify_cpu_check # family < 6, skip
21674+
21675+ andl $0x000f00f0, %ecx # mask model and extended model
21676+ shrl $4, %ecx
21677+ cmpl $0xd, %ecx
21678+ jb verify_cpu_check # family == 6, model < 0xd, skip
21679+
21680+verify_cpu_clear_xd:
21681+ movl $MSR_IA32_MISC_ENABLE, %ecx
21682+ rdmsr
21683+ btrl $2, %edx # clear MSR_IA32_MISC_ENABLE_XD_DISABLE
21684+ jnc verify_cpu_check # only write MSR if bit was changed
21685+ wrmsr
21686+
21687+verify_cpu_check:
21688+ movl $0x1,%eax # Does the cpu have what it takes
21689+ cpuid
21690+ andl $REQUIRED_MASK0,%edx
21691+ xorl $REQUIRED_MASK0,%edx
21692+ jnz verify_cpu_no_longmode
21693+
21694+ movl $0x80000000,%eax # See if extended cpuid is implemented
21695+ cpuid
21696+ cmpl $0x80000001,%eax
21697+ jb verify_cpu_no_longmode # no extended cpuid
21698+
21699+ movl $0x80000001,%eax # Does the cpu have what it takes
21700+ cpuid
21701+ andl $REQUIRED_MASK1,%edx
21702+ xorl $REQUIRED_MASK1,%edx
21703+ jnz verify_cpu_no_longmode
21704+
21705+verify_cpu_sse_test:
21706+ movl $1,%eax
21707+ cpuid
21708+ andl $SSE_MASK,%edx
21709+ cmpl $SSE_MASK,%edx
21710+ je verify_cpu_sse_ok
21711+ test %di,%di
21712+ jz verify_cpu_no_longmode # only try to force SSE on AMD
21713+ movl $MSR_K7_HWCR,%ecx
21714+ rdmsr
21715+ btr $15,%eax # enable SSE
21716+ wrmsr
21717+ xor %di,%di # don't loop
21718+ jmp verify_cpu_sse_test # try again
21719+
21720+verify_cpu_no_longmode:
21721+ popfl # Restore caller passed flags
21722+ movl $1,%eax
21723+ ret
21724+verify_cpu_sse_ok:
21725+ popfl # Restore caller passed flags
21726+ xorl %eax, %eax
21727+ ret
21728diff --git a/arch/x86/kernel/verify_cpu_64.S b/arch/x86/kernel/verify_cpu_64.S
21729deleted file mode 100644
21730index 45b6f8a..0000000
21731--- a/arch/x86/kernel/verify_cpu_64.S
21732+++ /dev/null
21733@@ -1,105 +0,0 @@
21734-/*
21735- *
21736- * verify_cpu.S - Code for cpu long mode and SSE verification. This
21737- * code has been borrowed from boot/setup.S and was introduced by
21738- * Andi Kleen.
21739- *
21740- * Copyright (c) 2007 Andi Kleen (ak@suse.de)
21741- * Copyright (c) 2007 Eric Biederman (ebiederm@xmission.com)
21742- * Copyright (c) 2007 Vivek Goyal (vgoyal@in.ibm.com)
21743- *
21744- * This source code is licensed under the GNU General Public License,
21745- * Version 2. See the file COPYING for more details.
21746- *
21747- * This is a common code for verification whether CPU supports
21748- * long mode and SSE or not. It is not called directly instead this
21749- * file is included at various places and compiled in that context.
21750- * Following are the current usage.
21751- *
21752- * This file is included by both 16bit and 32bit code.
21753- *
21754- * arch/x86_64/boot/setup.S : Boot cpu verification (16bit)
21755- * arch/x86_64/boot/compressed/head.S: Boot cpu verification (32bit)
21756- * arch/x86_64/kernel/trampoline.S: secondary processor verfication (16bit)
21757- * arch/x86_64/kernel/acpi/wakeup.S:Verfication at resume (16bit)
21758- *
21759- * verify_cpu, returns the status of cpu check in register %eax.
21760- * 0: Success 1: Failure
21761- *
21762- * The caller needs to check for the error code and take the action
21763- * appropriately. Either display a message or halt.
21764- */
21765-
21766-#include <asm/cpufeature.h>
21767-
21768-verify_cpu:
21769- pushfl # Save caller passed flags
21770- pushl $0 # Kill any dangerous flags
21771- popfl
21772-
21773- pushfl # standard way to check for cpuid
21774- popl %eax
21775- movl %eax,%ebx
21776- xorl $0x200000,%eax
21777- pushl %eax
21778- popfl
21779- pushfl
21780- popl %eax
21781- cmpl %eax,%ebx
21782- jz verify_cpu_no_longmode # cpu has no cpuid
21783-
21784- movl $0x0,%eax # See if cpuid 1 is implemented
21785- cpuid
21786- cmpl $0x1,%eax
21787- jb verify_cpu_no_longmode # no cpuid 1
21788-
21789- xor %di,%di
21790- cmpl $0x68747541,%ebx # AuthenticAMD
21791- jnz verify_cpu_noamd
21792- cmpl $0x69746e65,%edx
21793- jnz verify_cpu_noamd
21794- cmpl $0x444d4163,%ecx
21795- jnz verify_cpu_noamd
21796- mov $1,%di # cpu is from AMD
21797-
21798-verify_cpu_noamd:
21799- movl $0x1,%eax # Does the cpu have what it takes
21800- cpuid
21801- andl $REQUIRED_MASK0,%edx
21802- xorl $REQUIRED_MASK0,%edx
21803- jnz verify_cpu_no_longmode
21804-
21805- movl $0x80000000,%eax # See if extended cpuid is implemented
21806- cpuid
21807- cmpl $0x80000001,%eax
21808- jb verify_cpu_no_longmode # no extended cpuid
21809-
21810- movl $0x80000001,%eax # Does the cpu have what it takes
21811- cpuid
21812- andl $REQUIRED_MASK1,%edx
21813- xorl $REQUIRED_MASK1,%edx
21814- jnz verify_cpu_no_longmode
21815-
21816-verify_cpu_sse_test:
21817- movl $1,%eax
21818- cpuid
21819- andl $SSE_MASK,%edx
21820- cmpl $SSE_MASK,%edx
21821- je verify_cpu_sse_ok
21822- test %di,%di
21823- jz verify_cpu_no_longmode # only try to force SSE on AMD
21824- movl $0xc0010015,%ecx # HWCR
21825- rdmsr
21826- btr $15,%eax # enable SSE
21827- wrmsr
21828- xor %di,%di # don't loop
21829- jmp verify_cpu_sse_test # try again
21830-
21831-verify_cpu_no_longmode:
21832- popfl # Restore caller passed flags
21833- movl $1,%eax
21834- ret
21835-verify_cpu_sse_ok:
21836- popfl # Restore caller passed flags
21837- xorl %eax, %eax
21838- ret
21839diff --git a/arch/x86/kernel/vm86_32.c b/arch/x86/kernel/vm86_32.c
21840index 9c4e625..e9bb4ed 100644
21841--- a/arch/x86/kernel/vm86_32.c
21842+++ b/arch/x86/kernel/vm86_32.c
21843@@ -41,6 +41,7 @@
21844 #include <linux/ptrace.h>
21845 #include <linux/audit.h>
21846 #include <linux/stddef.h>
21847+#include <linux/grsecurity.h>
21848
21849 #include <asm/uaccess.h>
21850 #include <asm/io.h>
21851@@ -109,6 +110,9 @@ static int copy_vm86_regs_to_user(struct vm86_regs __user *user,
21852 /* convert vm86_regs to kernel_vm86_regs */
21853 static int copy_vm86_regs_from_user(struct kernel_vm86_regs *regs,
21854 const struct vm86_regs __user *user,
21855+ unsigned extra) __size_overflow(3);
21856+static int copy_vm86_regs_from_user(struct kernel_vm86_regs *regs,
21857+ const struct vm86_regs __user *user,
21858 unsigned extra)
21859 {
21860 int ret = 0;
21861@@ -148,7 +152,7 @@ struct pt_regs *save_v86_state(struct kernel_vm86_regs *regs)
21862 do_exit(SIGSEGV);
21863 }
21864
21865- tss = &per_cpu(init_tss, get_cpu());
21866+ tss = init_tss + get_cpu();
21867 current->thread.sp0 = current->thread.saved_sp0;
21868 current->thread.sysenter_cs = __KERNEL_CS;
21869 load_sp0(tss, &current->thread);
21870@@ -208,6 +212,13 @@ int sys_vm86old(struct pt_regs *regs)
21871 struct task_struct *tsk;
21872 int tmp, ret = -EPERM;
21873
21874+#ifdef CONFIG_GRKERNSEC_VM86
21875+ if (!capable(CAP_SYS_RAWIO)) {
21876+ gr_handle_vm86();
21877+ goto out;
21878+ }
21879+#endif
21880+
21881 tsk = current;
21882 if (tsk->thread.saved_sp0)
21883 goto out;
21884@@ -238,6 +249,14 @@ int sys_vm86(struct pt_regs *regs)
21885 int tmp, ret;
21886 struct vm86plus_struct __user *v86;
21887
21888+#ifdef CONFIG_GRKERNSEC_VM86
21889+ if (!capable(CAP_SYS_RAWIO)) {
21890+ gr_handle_vm86();
21891+ ret = -EPERM;
21892+ goto out;
21893+ }
21894+#endif
21895+
21896 tsk = current;
21897 switch (regs->bx) {
21898 case VM86_REQUEST_IRQ:
21899@@ -324,7 +343,7 @@ static void do_sys_vm86(struct kernel_vm86_struct *info, struct task_struct *tsk
21900 tsk->thread.saved_fs = info->regs32->fs;
21901 tsk->thread.saved_gs = get_user_gs(info->regs32);
21902
21903- tss = &per_cpu(init_tss, get_cpu());
21904+ tss = init_tss + get_cpu();
21905 tsk->thread.sp0 = (unsigned long) &info->VM86_TSS_ESP0;
21906 if (cpu_has_sep)
21907 tsk->thread.sysenter_cs = 0;
21908@@ -529,7 +548,7 @@ static void do_int(struct kernel_vm86_regs *regs, int i,
21909 goto cannot_handle;
21910 if (i == 0x21 && is_revectored(AH(regs), &KVM86->int21_revectored))
21911 goto cannot_handle;
21912- intr_ptr = (unsigned long __user *) (i << 2);
21913+ intr_ptr = (__force unsigned long __user *) (i << 2);
21914 if (get_user(segoffs, intr_ptr))
21915 goto cannot_handle;
21916 if ((segoffs >> 16) == BIOSSEG)
21917diff --git a/arch/x86/kernel/vmi_32.c b/arch/x86/kernel/vmi_32.c
21918index d430e4c..831f817 100644
21919--- a/arch/x86/kernel/vmi_32.c
21920+++ b/arch/x86/kernel/vmi_32.c
21921@@ -44,12 +44,17 @@ typedef u32 __attribute__((regparm(1))) (VROMFUNC)(void);
21922 typedef u64 __attribute__((regparm(2))) (VROMLONGFUNC)(int);
21923
21924 #define call_vrom_func(rom,func) \
21925- (((VROMFUNC *)(rom->func))())
21926+ (((VROMFUNC *)(ktva_ktla(rom.func)))())
21927
21928 #define call_vrom_long_func(rom,func,arg) \
21929- (((VROMLONGFUNC *)(rom->func)) (arg))
21930+({\
21931+ u64 __reloc = ((VROMLONGFUNC *)(ktva_ktla(rom.func))) (arg);\
21932+ struct vmi_relocation_info *const __rel = (struct vmi_relocation_info *)&__reloc;\
21933+ __rel->eip = (unsigned char *)ktva_ktla((unsigned long)__rel->eip);\
21934+ __reloc;\
21935+})
21936
21937-static struct vrom_header *vmi_rom;
21938+static struct vrom_header vmi_rom __attribute((__section__(".vmi.rom"), __aligned__(PAGE_SIZE)));
21939 static int disable_pge;
21940 static int disable_pse;
21941 static int disable_sep;
21942@@ -76,10 +81,10 @@ static struct {
21943 void (*set_initial_ap_state)(int, int);
21944 void (*halt)(void);
21945 void (*set_lazy_mode)(int mode);
21946-} vmi_ops;
21947+} __no_const vmi_ops __read_only;
21948
21949 /* Cached VMI operations */
21950-struct vmi_timer_ops vmi_timer_ops;
21951+struct vmi_timer_ops vmi_timer_ops __read_only;
21952
21953 /*
21954 * VMI patching routines.
21955@@ -94,7 +99,7 @@ struct vmi_timer_ops vmi_timer_ops;
21956 static inline void patch_offset(void *insnbuf,
21957 unsigned long ip, unsigned long dest)
21958 {
21959- *(unsigned long *)(insnbuf+1) = dest-ip-5;
21960+ *(unsigned long *)(insnbuf+1) = dest-ip-5;
21961 }
21962
21963 static unsigned patch_internal(int call, unsigned len, void *insnbuf,
21964@@ -102,6 +107,7 @@ static unsigned patch_internal(int call, unsigned len, void *insnbuf,
21965 {
21966 u64 reloc;
21967 struct vmi_relocation_info *const rel = (struct vmi_relocation_info *)&reloc;
21968+
21969 reloc = call_vrom_long_func(vmi_rom, get_reloc, call);
21970 switch(rel->type) {
21971 case VMI_RELOCATION_CALL_REL:
21972@@ -404,13 +410,13 @@ static void vmi_set_pud(pud_t *pudp, pud_t pudval)
21973
21974 static void vmi_pte_clear(struct mm_struct *mm, unsigned long addr, pte_t *ptep)
21975 {
21976- const pte_t pte = { .pte = 0 };
21977+ const pte_t pte = __pte(0ULL);
21978 vmi_ops.set_pte(pte, ptep, vmi_flags_addr(mm, addr, VMI_PAGE_PT, 0));
21979 }
21980
21981 static void vmi_pmd_clear(pmd_t *pmd)
21982 {
21983- const pte_t pte = { .pte = 0 };
21984+ const pte_t pte = __pte(0ULL);
21985 vmi_ops.set_pte(pte, (pte_t *)pmd, VMI_PAGE_PD);
21986 }
21987 #endif
21988@@ -438,10 +444,10 @@ vmi_startup_ipi_hook(int phys_apicid, unsigned long start_eip,
21989 ap.ss = __KERNEL_DS;
21990 ap.esp = (unsigned long) start_esp;
21991
21992- ap.ds = __USER_DS;
21993- ap.es = __USER_DS;
21994+ ap.ds = __KERNEL_DS;
21995+ ap.es = __KERNEL_DS;
21996 ap.fs = __KERNEL_PERCPU;
21997- ap.gs = __KERNEL_STACK_CANARY;
21998+ savesegment(gs, ap.gs);
21999
22000 ap.eflags = 0;
22001
22002@@ -486,6 +492,18 @@ static void vmi_leave_lazy_mmu(void)
22003 paravirt_leave_lazy_mmu();
22004 }
22005
22006+#ifdef CONFIG_PAX_KERNEXEC
22007+static unsigned long vmi_pax_open_kernel(void)
22008+{
22009+ return 0;
22010+}
22011+
22012+static unsigned long vmi_pax_close_kernel(void)
22013+{
22014+ return 0;
22015+}
22016+#endif
22017+
22018 static inline int __init check_vmi_rom(struct vrom_header *rom)
22019 {
22020 struct pci_header *pci;
22021@@ -498,6 +516,10 @@ static inline int __init check_vmi_rom(struct vrom_header *rom)
22022 return 0;
22023 if (rom->vrom_signature != VMI_SIGNATURE)
22024 return 0;
22025+ if (rom->rom_length * 512 > sizeof(*rom)) {
22026+ printk(KERN_WARNING "PAX: VMI: ROM size too big: %x\n", rom->rom_length * 512);
22027+ return 0;
22028+ }
22029 if (rom->api_version_maj != VMI_API_REV_MAJOR ||
22030 rom->api_version_min+1 < VMI_API_REV_MINOR+1) {
22031 printk(KERN_WARNING "VMI: Found mismatched rom version %d.%d\n",
22032@@ -562,7 +584,7 @@ static inline int __init probe_vmi_rom(void)
22033 struct vrom_header *romstart;
22034 romstart = (struct vrom_header *)isa_bus_to_virt(base);
22035 if (check_vmi_rom(romstart)) {
22036- vmi_rom = romstart;
22037+ vmi_rom = *romstart;
22038 return 1;
22039 }
22040 }
22041@@ -836,6 +858,11 @@ static inline int __init activate_vmi(void)
22042
22043 para_fill(pv_irq_ops.safe_halt, Halt);
22044
22045+#ifdef CONFIG_PAX_KERNEXEC
22046+ pv_mmu_ops.pax_open_kernel = vmi_pax_open_kernel;
22047+ pv_mmu_ops.pax_close_kernel = vmi_pax_close_kernel;
22048+#endif
22049+
22050 /*
22051 * Alternative instruction rewriting doesn't happen soon enough
22052 * to convert VMI_IRET to a call instead of a jump; so we have
22053@@ -853,16 +880,16 @@ static inline int __init activate_vmi(void)
22054
22055 void __init vmi_init(void)
22056 {
22057- if (!vmi_rom)
22058+ if (!vmi_rom.rom_signature)
22059 probe_vmi_rom();
22060 else
22061- check_vmi_rom(vmi_rom);
22062+ check_vmi_rom(&vmi_rom);
22063
22064 /* In case probing for or validating the ROM failed, basil */
22065- if (!vmi_rom)
22066+ if (!vmi_rom.rom_signature)
22067 return;
22068
22069- reserve_top_address(-vmi_rom->virtual_top);
22070+ reserve_top_address(-vmi_rom.virtual_top);
22071
22072 #ifdef CONFIG_X86_IO_APIC
22073 /* This is virtual hardware; timer routing is wired correctly */
22074@@ -874,7 +901,7 @@ void __init vmi_activate(void)
22075 {
22076 unsigned long flags;
22077
22078- if (!vmi_rom)
22079+ if (!vmi_rom.rom_signature)
22080 return;
22081
22082 local_irq_save(flags);
22083diff --git a/arch/x86/kernel/vmlinux.lds.S b/arch/x86/kernel/vmlinux.lds.S
22084index 3c68fe2..12c8280 100644
22085--- a/arch/x86/kernel/vmlinux.lds.S
22086+++ b/arch/x86/kernel/vmlinux.lds.S
22087@@ -26,6 +26,13 @@
22088 #include <asm/page_types.h>
22089 #include <asm/cache.h>
22090 #include <asm/boot.h>
22091+#include <asm/segment.h>
22092+
22093+#if defined(CONFIG_X86_32) && defined(CONFIG_PAX_KERNEXEC)
22094+#define __KERNEL_TEXT_OFFSET (LOAD_OFFSET + ____LOAD_PHYSICAL_ADDR)
22095+#else
22096+#define __KERNEL_TEXT_OFFSET 0
22097+#endif
22098
22099 #undef i386 /* in case the preprocessor is a 32bit one */
22100
22101@@ -34,40 +41,53 @@ OUTPUT_FORMAT(CONFIG_OUTPUT_FORMAT, CONFIG_OUTPUT_FORMAT, CONFIG_OUTPUT_FORMAT)
22102 #ifdef CONFIG_X86_32
22103 OUTPUT_ARCH(i386)
22104 ENTRY(phys_startup_32)
22105-jiffies = jiffies_64;
22106 #else
22107 OUTPUT_ARCH(i386:x86-64)
22108 ENTRY(phys_startup_64)
22109-jiffies_64 = jiffies;
22110 #endif
22111
22112 PHDRS {
22113 text PT_LOAD FLAGS(5); /* R_E */
22114- data PT_LOAD FLAGS(7); /* RWE */
22115+#ifdef CONFIG_X86_32
22116+ module PT_LOAD FLAGS(5); /* R_E */
22117+#endif
22118+#ifdef CONFIG_XEN
22119+ rodata PT_LOAD FLAGS(5); /* R_E */
22120+#else
22121+ rodata PT_LOAD FLAGS(4); /* R__ */
22122+#endif
22123+ data PT_LOAD FLAGS(6); /* RW_ */
22124 #ifdef CONFIG_X86_64
22125 user PT_LOAD FLAGS(5); /* R_E */
22126+#endif
22127+ init.begin PT_LOAD FLAGS(6); /* RW_ */
22128 #ifdef CONFIG_SMP
22129 percpu PT_LOAD FLAGS(6); /* RW_ */
22130 #endif
22131+ text.init PT_LOAD FLAGS(5); /* R_E */
22132+ text.exit PT_LOAD FLAGS(5); /* R_E */
22133 init PT_LOAD FLAGS(7); /* RWE */
22134-#endif
22135 note PT_NOTE FLAGS(0); /* ___ */
22136 }
22137
22138 SECTIONS
22139 {
22140 #ifdef CONFIG_X86_32
22141- . = LOAD_OFFSET + LOAD_PHYSICAL_ADDR;
22142- phys_startup_32 = startup_32 - LOAD_OFFSET;
22143+ . = LOAD_OFFSET + ____LOAD_PHYSICAL_ADDR;
22144 #else
22145- . = __START_KERNEL;
22146- phys_startup_64 = startup_64 - LOAD_OFFSET;
22147+ . = __START_KERNEL;
22148 #endif
22149
22150 /* Text and read-only data */
22151- .text : AT(ADDR(.text) - LOAD_OFFSET) {
22152- _text = .;
22153+ .text (. - __KERNEL_TEXT_OFFSET): AT(ADDR(.text) - LOAD_OFFSET + __KERNEL_TEXT_OFFSET) {
22154 /* bootstrapping code */
22155+#ifdef CONFIG_X86_32
22156+ phys_startup_32 = startup_32 - LOAD_OFFSET + __KERNEL_TEXT_OFFSET;
22157+#else
22158+ phys_startup_64 = startup_64 - LOAD_OFFSET + __KERNEL_TEXT_OFFSET;
22159+#endif
22160+ __LOAD_PHYSICAL_ADDR = . - LOAD_OFFSET + __KERNEL_TEXT_OFFSET;
22161+ _text = .;
22162 HEAD_TEXT
22163 #ifdef CONFIG_X86_32
22164 . = ALIGN(PAGE_SIZE);
22165@@ -82,28 +102,71 @@ SECTIONS
22166 IRQENTRY_TEXT
22167 *(.fixup)
22168 *(.gnu.warning)
22169- /* End of text section */
22170- _etext = .;
22171 } :text = 0x9090
22172
22173- NOTES :text :note
22174+ . += __KERNEL_TEXT_OFFSET;
22175
22176- EXCEPTION_TABLE(16) :text = 0x9090
22177+#ifdef CONFIG_X86_32
22178+ . = ALIGN(PAGE_SIZE);
22179+ .vmi.rom : AT(ADDR(.vmi.rom) - LOAD_OFFSET) {
22180+ *(.vmi.rom)
22181+ } :module
22182+
22183+ . = ALIGN(PAGE_SIZE);
22184+ .module.text : AT(ADDR(.module.text) - LOAD_OFFSET) {
22185+
22186+#if defined(CONFIG_PAX_KERNEXEC) && defined(CONFIG_MODULES)
22187+ MODULES_EXEC_VADDR = .;
22188+ BYTE(0)
22189+ . += (CONFIG_PAX_KERNEXEC_MODULE_TEXT * 1024 * 1024);
22190+ . = ALIGN(HPAGE_SIZE);
22191+ MODULES_EXEC_END = . - 1;
22192+#endif
22193+
22194+ } :module
22195+#endif
22196+
22197+ .text.end : AT(ADDR(.text.end) - LOAD_OFFSET) {
22198+ /* End of text section */
22199+ _etext = . - __KERNEL_TEXT_OFFSET;
22200+ }
22201+
22202+#ifdef CONFIG_X86_32
22203+ . = ALIGN(PAGE_SIZE);
22204+ .rodata.page_aligned : AT(ADDR(.rodata.page_aligned) - LOAD_OFFSET) {
22205+ *(.idt)
22206+ . = ALIGN(PAGE_SIZE);
22207+ *(.empty_zero_page)
22208+ *(.swapper_pg_fixmap)
22209+ *(.swapper_pg_pmd)
22210+ *(.swapper_pg_dir)
22211+ *(.trampoline_pg_dir)
22212+ } :rodata
22213+#endif
22214+
22215+ . = ALIGN(PAGE_SIZE);
22216+ NOTES :rodata :note
22217+
22218+ EXCEPTION_TABLE(16) :rodata
22219
22220 RO_DATA(PAGE_SIZE)
22221
22222 /* Data */
22223 .data : AT(ADDR(.data) - LOAD_OFFSET) {
22224+
22225+#ifdef CONFIG_PAX_KERNEXEC
22226+ . = ALIGN(HPAGE_SIZE);
22227+#else
22228+ . = ALIGN(PAGE_SIZE);
22229+#endif
22230+
22231 /* Start of data section */
22232 _sdata = .;
22233
22234 /* init_task */
22235 INIT_TASK_DATA(THREAD_SIZE)
22236
22237-#ifdef CONFIG_X86_32
22238- /* 32 bit has nosave before _edata */
22239 NOSAVE_DATA
22240-#endif
22241
22242 PAGE_ALIGNED_DATA(PAGE_SIZE)
22243
22244@@ -112,6 +175,8 @@ SECTIONS
22245 DATA_DATA
22246 CONSTRUCTORS
22247
22248+ jiffies = jiffies_64;
22249+
22250 /* rarely changed data like cpu maps */
22251 READ_MOSTLY_DATA(CONFIG_X86_INTERNODE_CACHE_BYTES)
22252
22253@@ -166,12 +231,6 @@ SECTIONS
22254 }
22255 vgetcpu_mode = VVIRT(.vgetcpu_mode);
22256
22257- . = ALIGN(CONFIG_X86_L1_CACHE_BYTES);
22258- .jiffies : AT(VLOAD(.jiffies)) {
22259- *(.jiffies)
22260- }
22261- jiffies = VVIRT(.jiffies);
22262-
22263 .vsyscall_3 ADDR(.vsyscall_0) + 3072: AT(VLOAD(.vsyscall_3)) {
22264 *(.vsyscall_3)
22265 }
22266@@ -187,12 +246,19 @@ SECTIONS
22267 #endif /* CONFIG_X86_64 */
22268
22269 /* Init code and data - will be freed after init */
22270- . = ALIGN(PAGE_SIZE);
22271 .init.begin : AT(ADDR(.init.begin) - LOAD_OFFSET) {
22272+ BYTE(0)
22273+
22274+#ifdef CONFIG_PAX_KERNEXEC
22275+ . = ALIGN(HPAGE_SIZE);
22276+#else
22277+ . = ALIGN(PAGE_SIZE);
22278+#endif
22279+
22280 __init_begin = .; /* paired with __init_end */
22281- }
22282+ } :init.begin
22283
22284-#if defined(CONFIG_X86_64) && defined(CONFIG_SMP)
22285+#ifdef CONFIG_SMP
22286 /*
22287 * percpu offsets are zero-based on SMP. PERCPU_VADDR() changes the
22288 * output PHDR, so the next output section - .init.text - should
22289@@ -201,12 +267,27 @@ SECTIONS
22290 PERCPU_VADDR(0, :percpu)
22291 #endif
22292
22293- INIT_TEXT_SECTION(PAGE_SIZE)
22294-#ifdef CONFIG_X86_64
22295- :init
22296-#endif
22297+ . = ALIGN(PAGE_SIZE);
22298+ init_begin = .;
22299+ .init.text (. - __KERNEL_TEXT_OFFSET): AT(init_begin - LOAD_OFFSET) {
22300+ VMLINUX_SYMBOL(_sinittext) = .;
22301+ INIT_TEXT
22302+ VMLINUX_SYMBOL(_einittext) = .;
22303+ . = ALIGN(PAGE_SIZE);
22304+ } :text.init
22305
22306- INIT_DATA_SECTION(16)
22307+ /*
22308+ * .exit.text is discard at runtime, not link time, to deal with
22309+ * references from .altinstructions and .eh_frame
22310+ */
22311+ .exit.text : AT(ADDR(.exit.text) - LOAD_OFFSET + __KERNEL_TEXT_OFFSET) {
22312+ EXIT_TEXT
22313+ . = ALIGN(16);
22314+ } :text.exit
22315+ . = init_begin + SIZEOF(.init.text) + SIZEOF(.exit.text);
22316+
22317+ . = ALIGN(PAGE_SIZE);
22318+ INIT_DATA_SECTION(16) :init
22319
22320 .x86_cpu_dev.init : AT(ADDR(.x86_cpu_dev.init) - LOAD_OFFSET) {
22321 __x86_cpu_dev_start = .;
22322@@ -232,19 +313,11 @@ SECTIONS
22323 *(.altinstr_replacement)
22324 }
22325
22326- /*
22327- * .exit.text is discard at runtime, not link time, to deal with
22328- * references from .altinstructions and .eh_frame
22329- */
22330- .exit.text : AT(ADDR(.exit.text) - LOAD_OFFSET) {
22331- EXIT_TEXT
22332- }
22333-
22334 .exit.data : AT(ADDR(.exit.data) - LOAD_OFFSET) {
22335 EXIT_DATA
22336 }
22337
22338-#if !defined(CONFIG_X86_64) || !defined(CONFIG_SMP)
22339+#ifndef CONFIG_SMP
22340 PERCPU(PAGE_SIZE)
22341 #endif
22342
22343@@ -267,12 +340,6 @@ SECTIONS
22344 . = ALIGN(PAGE_SIZE);
22345 }
22346
22347-#ifdef CONFIG_X86_64
22348- .data_nosave : AT(ADDR(.data_nosave) - LOAD_OFFSET) {
22349- NOSAVE_DATA
22350- }
22351-#endif
22352-
22353 /* BSS */
22354 . = ALIGN(PAGE_SIZE);
22355 .bss : AT(ADDR(.bss) - LOAD_OFFSET) {
22356@@ -288,6 +355,7 @@ SECTIONS
22357 __brk_base = .;
22358 . += 64 * 1024; /* 64k alignment slop space */
22359 *(.brk_reservation) /* areas brk users have reserved */
22360+ . = ALIGN(HPAGE_SIZE);
22361 __brk_limit = .;
22362 }
22363
22364@@ -316,13 +384,12 @@ SECTIONS
22365 * for the boot processor.
22366 */
22367 #define INIT_PER_CPU(x) init_per_cpu__##x = per_cpu__##x + __per_cpu_load
22368-INIT_PER_CPU(gdt_page);
22369 INIT_PER_CPU(irq_stack_union);
22370
22371 /*
22372 * Build-time check on the image size:
22373 */
22374-. = ASSERT((_end - _text <= KERNEL_IMAGE_SIZE),
22375+. = ASSERT((_end - _text - __KERNEL_TEXT_OFFSET <= KERNEL_IMAGE_SIZE),
22376 "kernel image bigger than KERNEL_IMAGE_SIZE");
22377
22378 #ifdef CONFIG_SMP
22379diff --git a/arch/x86/kernel/vsyscall_64.c b/arch/x86/kernel/vsyscall_64.c
22380index 62f39d7..3bc46a1 100644
22381--- a/arch/x86/kernel/vsyscall_64.c
22382+++ b/arch/x86/kernel/vsyscall_64.c
22383@@ -80,6 +80,7 @@ void update_vsyscall(struct timespec *wall_time, struct clocksource *clock,
22384
22385 write_seqlock_irqsave(&vsyscall_gtod_data.lock, flags);
22386 /* copy vsyscall data */
22387+ strlcpy(vsyscall_gtod_data.clock.name, clock->name, sizeof vsyscall_gtod_data.clock.name);
22388 vsyscall_gtod_data.clock.vread = clock->vread;
22389 vsyscall_gtod_data.clock.cycle_last = clock->cycle_last;
22390 vsyscall_gtod_data.clock.mask = clock->mask;
22391@@ -203,7 +204,7 @@ vgetcpu(unsigned *cpu, unsigned *node, struct getcpu_cache *tcache)
22392 We do this here because otherwise user space would do it on
22393 its own in a likely inferior way (no access to jiffies).
22394 If you don't like it pass NULL. */
22395- if (tcache && tcache->blob[0] == (j = __jiffies)) {
22396+ if (tcache && tcache->blob[0] == (j = jiffies)) {
22397 p = tcache->blob[1];
22398 } else if (__vgetcpu_mode == VGETCPU_RDTSCP) {
22399 /* Load per CPU data from RDTSCP */
22400diff --git a/arch/x86/kernel/x8664_ksyms_64.c b/arch/x86/kernel/x8664_ksyms_64.c
22401index 3909e3b..5433a97 100644
22402--- a/arch/x86/kernel/x8664_ksyms_64.c
22403+++ b/arch/x86/kernel/x8664_ksyms_64.c
22404@@ -30,8 +30,6 @@ EXPORT_SYMBOL(__put_user_8);
22405
22406 EXPORT_SYMBOL(copy_user_generic);
22407 EXPORT_SYMBOL(__copy_user_nocache);
22408-EXPORT_SYMBOL(copy_from_user);
22409-EXPORT_SYMBOL(copy_to_user);
22410 EXPORT_SYMBOL(__copy_from_user_inatomic);
22411
22412 EXPORT_SYMBOL(copy_page);
22413diff --git a/arch/x86/kernel/xsave.c b/arch/x86/kernel/xsave.c
22414index c5ee17e..d63218f 100644
22415--- a/arch/x86/kernel/xsave.c
22416+++ b/arch/x86/kernel/xsave.c
22417@@ -54,7 +54,7 @@ int check_for_xstate(struct i387_fxsave_struct __user *buf,
22418 fx_sw_user->xstate_size > fx_sw_user->extended_size)
22419 return -1;
22420
22421- err = __get_user(magic2, (__u32 *) (((void *)fpstate) +
22422+ err = __get_user(magic2, (__u32 __user *) (((void __user *)fpstate) +
22423 fx_sw_user->extended_size -
22424 FP_XSTATE_MAGIC2_SIZE));
22425 /*
22426@@ -196,7 +196,7 @@ fx_only:
22427 * the other extended state.
22428 */
22429 xrstor_state(init_xstate_buf, pcntxt_mask & ~XSTATE_FPSSE);
22430- return fxrstor_checking((__force struct i387_fxsave_struct *)buf);
22431+ return fxrstor_checking((struct i387_fxsave_struct __force_kernel *)buf);
22432 }
22433
22434 /*
22435@@ -228,7 +228,7 @@ int restore_i387_xstate(void __user *buf)
22436 if (task_thread_info(tsk)->status & TS_XSAVE)
22437 err = restore_user_xstate(buf);
22438 else
22439- err = fxrstor_checking((__force struct i387_fxsave_struct *)
22440+ err = fxrstor_checking((struct i387_fxsave_struct __user *)
22441 buf);
22442 if (unlikely(err)) {
22443 /*
22444diff --git a/arch/x86/kvm/emulate.c b/arch/x86/kvm/emulate.c
22445index 1350e43..a94b011 100644
22446--- a/arch/x86/kvm/emulate.c
22447+++ b/arch/x86/kvm/emulate.c
22448@@ -81,8 +81,8 @@
22449 #define Src2CL (1<<29)
22450 #define Src2ImmByte (2<<29)
22451 #define Src2One (3<<29)
22452-#define Src2Imm16 (4<<29)
22453-#define Src2Mask (7<<29)
22454+#define Src2Imm16 (4U<<29)
22455+#define Src2Mask (7U<<29)
22456
22457 enum {
22458 Group1_80, Group1_81, Group1_82, Group1_83,
22459@@ -411,6 +411,7 @@ static u32 group2_table[] = {
22460
22461 #define ____emulate_2op(_op, _src, _dst, _eflags, _x, _y, _suffix) \
22462 do { \
22463+ unsigned long _tmp; \
22464 __asm__ __volatile__ ( \
22465 _PRE_EFLAGS("0", "4", "2") \
22466 _op _suffix " %"_x"3,%1; " \
22467@@ -424,8 +425,6 @@ static u32 group2_table[] = {
22468 /* Raw emulation: instruction has two explicit operands. */
22469 #define __emulate_2op_nobyte(_op,_src,_dst,_eflags,_wx,_wy,_lx,_ly,_qx,_qy) \
22470 do { \
22471- unsigned long _tmp; \
22472- \
22473 switch ((_dst).bytes) { \
22474 case 2: \
22475 ____emulate_2op(_op,_src,_dst,_eflags,_wx,_wy,"w"); \
22476@@ -441,7 +440,6 @@ static u32 group2_table[] = {
22477
22478 #define __emulate_2op(_op,_src,_dst,_eflags,_bx,_by,_wx,_wy,_lx,_ly,_qx,_qy) \
22479 do { \
22480- unsigned long _tmp; \
22481 switch ((_dst).bytes) { \
22482 case 1: \
22483 ____emulate_2op(_op,_src,_dst,_eflags,_bx,_by,"b"); \
22484diff --git a/arch/x86/kvm/lapic.c b/arch/x86/kvm/lapic.c
22485index 8dfeaaa..4daa395 100644
22486--- a/arch/x86/kvm/lapic.c
22487+++ b/arch/x86/kvm/lapic.c
22488@@ -52,7 +52,7 @@
22489 #define APIC_BUS_CYCLE_NS 1
22490
22491 /* #define apic_debug(fmt,arg...) printk(KERN_WARNING fmt,##arg) */
22492-#define apic_debug(fmt, arg...)
22493+#define apic_debug(fmt, arg...) do {} while (0)
22494
22495 #define APIC_LVT_NUM 6
22496 /* 14 is the version for Xeon and Pentium 8.4.8*/
22497diff --git a/arch/x86/kvm/paging_tmpl.h b/arch/x86/kvm/paging_tmpl.h
22498index 3bc2707..dd157e2 100644
22499--- a/arch/x86/kvm/paging_tmpl.h
22500+++ b/arch/x86/kvm/paging_tmpl.h
22501@@ -416,6 +416,8 @@ static int FNAME(page_fault)(struct kvm_vcpu *vcpu, gva_t addr,
22502 int level = PT_PAGE_TABLE_LEVEL;
22503 unsigned long mmu_seq;
22504
22505+ pax_track_stack();
22506+
22507 pgprintk("%s: addr %lx err %x\n", __func__, addr, error_code);
22508 kvm_mmu_audit(vcpu, "pre page fault");
22509
22510@@ -461,6 +463,7 @@ static int FNAME(page_fault)(struct kvm_vcpu *vcpu, gva_t addr,
22511 kvm_mmu_free_some_pages(vcpu);
22512 sptep = FNAME(fetch)(vcpu, addr, &walker, user_fault, write_fault,
22513 level, &write_pt, pfn);
22514+ (void)sptep;
22515 pgprintk("%s: shadow pte %p %llx ptwrite %d\n", __func__,
22516 sptep, *sptep, write_pt);
22517
22518diff --git a/arch/x86/kvm/svm.c b/arch/x86/kvm/svm.c
22519index 7c6e63e..1b7dac1 100644
22520--- a/arch/x86/kvm/svm.c
22521+++ b/arch/x86/kvm/svm.c
22522@@ -2240,6 +2240,7 @@ static int rdmsr_interception(struct vcpu_svm *svm, struct kvm_run *kvm_run)
22523 return 1;
22524 }
22525
22526+static int svm_set_msr(struct kvm_vcpu *vcpu, unsigned ecx, u64 data) __size_overflow(3);
22527 static int svm_set_msr(struct kvm_vcpu *vcpu, unsigned ecx, u64 data)
22528 {
22529 struct vcpu_svm *svm = to_svm(vcpu);
22530@@ -2486,7 +2487,11 @@ static void reload_tss(struct kvm_vcpu *vcpu)
22531 int cpu = raw_smp_processor_id();
22532
22533 struct svm_cpu_data *svm_data = per_cpu(svm_data, cpu);
22534+
22535+ pax_open_kernel();
22536 svm_data->tss_desc->type = 9; /* available 32/64-bit TSS */
22537+ pax_close_kernel();
22538+
22539 load_TR_desc();
22540 }
22541
22542@@ -2947,7 +2952,7 @@ static bool svm_gb_page_enable(void)
22543 return true;
22544 }
22545
22546-static struct kvm_x86_ops svm_x86_ops = {
22547+static const struct kvm_x86_ops svm_x86_ops = {
22548 .cpu_has_kvm_support = has_svm,
22549 .disabled_by_bios = is_disabled,
22550 .hardware_setup = svm_hardware_setup,
22551diff --git a/arch/x86/kvm/vmx.c b/arch/x86/kvm/vmx.c
22552index e6d925f..8cdd779 100644
22553--- a/arch/x86/kvm/vmx.c
22554+++ b/arch/x86/kvm/vmx.c
22555@@ -570,7 +570,11 @@ static void reload_tss(void)
22556
22557 kvm_get_gdt(&gdt);
22558 descs = (void *)gdt.base;
22559+
22560+ pax_open_kernel();
22561 descs[GDT_ENTRY_TSS].type = 9; /* available TSS */
22562+ pax_close_kernel();
22563+
22564 load_TR_desc();
22565 }
22566
22567@@ -1035,6 +1039,7 @@ static int vmx_get_msr(struct kvm_vcpu *vcpu, u32 msr_index, u64 *pdata)
22568 * Returns 0 on success, non-0 otherwise.
22569 * Assumes vcpu_load() was already called.
22570 */
22571+static int vmx_set_msr(struct kvm_vcpu *vcpu, u32 msr_index, u64 data) __size_overflow(3);
22572 static int vmx_set_msr(struct kvm_vcpu *vcpu, u32 msr_index, u64 data)
22573 {
22574 struct vcpu_vmx *vmx = to_vmx(vcpu);
22575@@ -1410,8 +1415,11 @@ static __init int hardware_setup(void)
22576 if (!cpu_has_vmx_flexpriority())
22577 flexpriority_enabled = 0;
22578
22579- if (!cpu_has_vmx_tpr_shadow())
22580- kvm_x86_ops->update_cr8_intercept = NULL;
22581+ if (!cpu_has_vmx_tpr_shadow()) {
22582+ pax_open_kernel();
22583+ *(void **)&kvm_x86_ops->update_cr8_intercept = NULL;
22584+ pax_close_kernel();
22585+ }
22586
22587 if (enable_ept && !cpu_has_vmx_ept_2m_page())
22588 kvm_disable_largepages();
22589@@ -2362,7 +2370,7 @@ static int vmx_vcpu_setup(struct vcpu_vmx *vmx)
22590 vmcs_writel(HOST_IDTR_BASE, dt.base); /* 22.2.4 */
22591
22592 asm("mov $.Lkvm_vmx_return, %0" : "=r"(kvm_vmx_return));
22593- vmcs_writel(HOST_RIP, kvm_vmx_return); /* 22.2.5 */
22594+ vmcs_writel(HOST_RIP, ktla_ktva(kvm_vmx_return)); /* 22.2.5 */
22595 vmcs_write32(VM_EXIT_MSR_STORE_COUNT, 0);
22596 vmcs_write32(VM_EXIT_MSR_LOAD_COUNT, 0);
22597 vmcs_write32(VM_ENTRY_MSR_LOAD_COUNT, 0);
22598@@ -3718,6 +3726,12 @@ static void vmx_vcpu_run(struct kvm_vcpu *vcpu, struct kvm_run *kvm_run)
22599 "jmp .Lkvm_vmx_return \n\t"
22600 ".Llaunched: " __ex(ASM_VMX_VMRESUME) "\n\t"
22601 ".Lkvm_vmx_return: "
22602+
22603+#if defined(CONFIG_X86_32) && defined(CONFIG_PAX_KERNEXEC)
22604+ "ljmp %[cs],$.Lkvm_vmx_return2\n\t"
22605+ ".Lkvm_vmx_return2: "
22606+#endif
22607+
22608 /* Save guest registers, load host registers, keep flags */
22609 "xchg %0, (%%"R"sp) \n\t"
22610 "mov %%"R"ax, %c[rax](%0) \n\t"
22611@@ -3764,8 +3778,13 @@ static void vmx_vcpu_run(struct kvm_vcpu *vcpu, struct kvm_run *kvm_run)
22612 [r15]"i"(offsetof(struct vcpu_vmx, vcpu.arch.regs[VCPU_REGS_R15])),
22613 #endif
22614 [cr2]"i"(offsetof(struct vcpu_vmx, vcpu.arch.cr2))
22615+
22616+#if defined(CONFIG_X86_32) && defined(CONFIG_PAX_KERNEXEC)
22617+ ,[cs]"i"(__KERNEL_CS)
22618+#endif
22619+
22620 : "cc", "memory"
22621- , R"bx", R"di", R"si"
22622+ , R"ax", R"bx", R"di", R"si"
22623 #ifdef CONFIG_X86_64
22624 , "r8", "r9", "r10", "r11", "r12", "r13", "r14", "r15"
22625 #endif
22626@@ -3782,7 +3801,16 @@ static void vmx_vcpu_run(struct kvm_vcpu *vcpu, struct kvm_run *kvm_run)
22627 if (vmx->rmode.irq.pending)
22628 fixup_rmode_irq(vmx);
22629
22630- asm("mov %0, %%ds; mov %0, %%es" : : "r"(__USER_DS));
22631+ asm("mov %0, %%ds; mov %0, %%es; mov %0, %%ss" : : "r"(__KERNEL_DS));
22632+
22633+#if defined(CONFIG_X86_32) && defined(CONFIG_PAX_KERNEXEC)
22634+ loadsegment(fs, __KERNEL_PERCPU);
22635+#endif
22636+
22637+#if defined(CONFIG_X86_32) && defined(CONFIG_PAX_MEMORY_UDEREF)
22638+ __set_fs(current_thread_info()->addr_limit);
22639+#endif
22640+
22641 vmx->launched = 1;
22642
22643 vmx_complete_interrupts(vmx);
22644@@ -3957,7 +3985,7 @@ static bool vmx_gb_page_enable(void)
22645 return false;
22646 }
22647
22648-static struct kvm_x86_ops vmx_x86_ops = {
22649+static const struct kvm_x86_ops vmx_x86_ops = {
22650 .cpu_has_kvm_support = cpu_has_kvm_support,
22651 .disabled_by_bios = vmx_disabled_by_bios,
22652 .hardware_setup = hardware_setup,
22653diff --git a/arch/x86/kvm/x86.c b/arch/x86/kvm/x86.c
22654index df1cefb..31447ca 100644
22655--- a/arch/x86/kvm/x86.c
22656+++ b/arch/x86/kvm/x86.c
22657@@ -82,7 +82,7 @@ static void update_cr8_intercept(struct kvm_vcpu *vcpu);
22658 static int kvm_dev_ioctl_get_supported_cpuid(struct kvm_cpuid2 *cpuid,
22659 struct kvm_cpuid_entry2 __user *entries);
22660
22661-struct kvm_x86_ops *kvm_x86_ops;
22662+const struct kvm_x86_ops *kvm_x86_ops;
22663 EXPORT_SYMBOL_GPL(kvm_x86_ops);
22664
22665 int ignore_msrs = 0;
22666@@ -547,6 +547,7 @@ static int do_set_msr(struct kvm_vcpu *vcpu, unsigned index, u64 *data)
22667 return kvm_set_msr(vcpu, index, *data);
22668 }
22669
22670+static void kvm_write_wall_clock(struct kvm *kvm, gpa_t wall_clock) __size_overflow(2);
22671 static void kvm_write_wall_clock(struct kvm *kvm, gpa_t wall_clock)
22672 {
22673 int version;
22674@@ -1430,15 +1431,20 @@ static int kvm_vcpu_ioctl_set_cpuid2(struct kvm_vcpu *vcpu,
22675 struct kvm_cpuid2 *cpuid,
22676 struct kvm_cpuid_entry2 __user *entries)
22677 {
22678- int r;
22679+ int r, i;
22680
22681 r = -E2BIG;
22682 if (cpuid->nent > KVM_MAX_CPUID_ENTRIES)
22683 goto out;
22684 r = -EFAULT;
22685- if (copy_from_user(&vcpu->arch.cpuid_entries, entries,
22686- cpuid->nent * sizeof(struct kvm_cpuid_entry2)))
22687+ if (!access_ok(VERIFY_READ, entries, cpuid->nent * sizeof(struct kvm_cpuid_entry2)))
22688 goto out;
22689+ for (i = 0; i < cpuid->nent; ++i) {
22690+ struct kvm_cpuid_entry2 cpuid_entry;
22691+ if (__copy_from_user(&cpuid_entry, entries + i, sizeof(cpuid_entry)))
22692+ goto out;
22693+ vcpu->arch.cpuid_entries[i] = cpuid_entry;
22694+ }
22695 vcpu->arch.cpuid_nent = cpuid->nent;
22696 kvm_apic_set_version(vcpu);
22697 return 0;
22698@@ -1451,16 +1457,20 @@ static int kvm_vcpu_ioctl_get_cpuid2(struct kvm_vcpu *vcpu,
22699 struct kvm_cpuid2 *cpuid,
22700 struct kvm_cpuid_entry2 __user *entries)
22701 {
22702- int r;
22703+ int r, i;
22704
22705 vcpu_load(vcpu);
22706 r = -E2BIG;
22707 if (cpuid->nent < vcpu->arch.cpuid_nent)
22708 goto out;
22709 r = -EFAULT;
22710- if (copy_to_user(entries, &vcpu->arch.cpuid_entries,
22711- vcpu->arch.cpuid_nent * sizeof(struct kvm_cpuid_entry2)))
22712+ if (!access_ok(VERIFY_WRITE, entries, vcpu->arch.cpuid_nent * sizeof(struct kvm_cpuid_entry2)))
22713 goto out;
22714+ for (i = 0; i < vcpu->arch.cpuid_nent; ++i) {
22715+ struct kvm_cpuid_entry2 cpuid_entry = vcpu->arch.cpuid_entries[i];
22716+ if (__copy_to_user(entries + i, &cpuid_entry, sizeof(cpuid_entry)))
22717+ goto out;
22718+ }
22719 return 0;
22720
22721 out:
22722@@ -1678,7 +1688,7 @@ static int kvm_vcpu_ioctl_set_lapic(struct kvm_vcpu *vcpu,
22723 static int kvm_vcpu_ioctl_interrupt(struct kvm_vcpu *vcpu,
22724 struct kvm_interrupt *irq)
22725 {
22726- if (irq->irq < 0 || irq->irq >= 256)
22727+ if (irq->irq >= 256)
22728 return -EINVAL;
22729 if (irqchip_in_kernel(vcpu->kvm))
22730 return -ENXIO;
22731@@ -2768,6 +2778,11 @@ static int emulator_cmpxchg_emulated(unsigned long addr,
22732 const void *old,
22733 const void *new,
22734 unsigned int bytes,
22735+ struct kvm_vcpu *vcpu) __size_overflow(5);
22736+static int emulator_cmpxchg_emulated(unsigned long addr,
22737+ const void *old,
22738+ const void *new,
22739+ unsigned int bytes,
22740 struct kvm_vcpu *vcpu)
22741 {
22742 printk_once(KERN_WARNING "kvm: emulating exchange as write\n");
22743@@ -3260,10 +3275,10 @@ static struct notifier_block kvmclock_cpufreq_notifier_block = {
22744 .notifier_call = kvmclock_cpufreq_notifier
22745 };
22746
22747-int kvm_arch_init(void *opaque)
22748+int kvm_arch_init(const void *opaque)
22749 {
22750 int r, cpu;
22751- struct kvm_x86_ops *ops = (struct kvm_x86_ops *)opaque;
22752+ const struct kvm_x86_ops *ops = (const struct kvm_x86_ops *)opaque;
22753
22754 if (kvm_x86_ops) {
22755 printk(KERN_ERR "kvm: already loaded the other module\n");
22756diff --git a/arch/x86/lguest/boot.c b/arch/x86/lguest/boot.c
22757index 7e59dc1..b88c98f 100644
22758--- a/arch/x86/lguest/boot.c
22759+++ b/arch/x86/lguest/boot.c
22760@@ -1172,9 +1172,10 @@ static __init int early_put_chars(u32 vtermno, const char *buf, int count)
22761 * Rebooting also tells the Host we're finished, but the RESTART flag tells the
22762 * Launcher to reboot us.
22763 */
22764-static void lguest_restart(char *reason)
22765+static __noreturn void lguest_restart(char *reason)
22766 {
22767 kvm_hypercall2(LHCALL_SHUTDOWN, __pa(reason), LGUEST_SHUTDOWN_RESTART);
22768+ BUG();
22769 }
22770
22771 /*G:050
22772diff --git a/arch/x86/lib/atomic64_32.c b/arch/x86/lib/atomic64_32.c
22773index 824fa0b..c619e96 100644
22774--- a/arch/x86/lib/atomic64_32.c
22775+++ b/arch/x86/lib/atomic64_32.c
22776@@ -25,6 +25,12 @@ u64 atomic64_cmpxchg(atomic64_t *ptr, u64 old_val, u64 new_val)
22777 }
22778 EXPORT_SYMBOL(atomic64_cmpxchg);
22779
22780+u64 atomic64_cmpxchg_unchecked(atomic64_unchecked_t *ptr, u64 old_val, u64 new_val)
22781+{
22782+ return cmpxchg8b(&ptr->counter, old_val, new_val);
22783+}
22784+EXPORT_SYMBOL(atomic64_cmpxchg_unchecked);
22785+
22786 /**
22787 * atomic64_xchg - xchg atomic64 variable
22788 * @ptr: pointer to type atomic64_t
22789@@ -56,6 +62,36 @@ u64 atomic64_xchg(atomic64_t *ptr, u64 new_val)
22790 EXPORT_SYMBOL(atomic64_xchg);
22791
22792 /**
22793+ * atomic64_xchg_unchecked - xchg atomic64 variable
22794+ * @ptr: pointer to type atomic64_unchecked_t
22795+ * @new_val: value to assign
22796+ *
22797+ * Atomically xchgs the value of @ptr to @new_val and returns
22798+ * the old value.
22799+ */
22800+u64 atomic64_xchg_unchecked(atomic64_unchecked_t *ptr, u64 new_val)
22801+{
22802+ /*
22803+ * Try first with a (possibly incorrect) assumption about
22804+ * what we have there. We'll do two loops most likely,
22805+ * but we'll get an ownership MESI transaction straight away
22806+ * instead of a read transaction followed by a
22807+ * flush-for-ownership transaction:
22808+ */
22809+ u64 old_val, real_val = 0;
22810+
22811+ do {
22812+ old_val = real_val;
22813+
22814+ real_val = atomic64_cmpxchg_unchecked(ptr, old_val, new_val);
22815+
22816+ } while (real_val != old_val);
22817+
22818+ return old_val;
22819+}
22820+EXPORT_SYMBOL(atomic64_xchg_unchecked);
22821+
22822+/**
22823 * atomic64_set - set atomic64 variable
22824 * @ptr: pointer to type atomic64_t
22825 * @new_val: value to assign
22826@@ -69,7 +105,19 @@ void atomic64_set(atomic64_t *ptr, u64 new_val)
22827 EXPORT_SYMBOL(atomic64_set);
22828
22829 /**
22830-EXPORT_SYMBOL(atomic64_read);
22831+ * atomic64_unchecked_set - set atomic64 variable
22832+ * @ptr: pointer to type atomic64_unchecked_t
22833+ * @new_val: value to assign
22834+ *
22835+ * Atomically sets the value of @ptr to @new_val.
22836+ */
22837+void atomic64_set_unchecked(atomic64_unchecked_t *ptr, u64 new_val)
22838+{
22839+ atomic64_xchg_unchecked(ptr, new_val);
22840+}
22841+EXPORT_SYMBOL(atomic64_set_unchecked);
22842+
22843+/**
22844 * atomic64_add_return - add and return
22845 * @delta: integer value to add
22846 * @ptr: pointer to type atomic64_t
22847@@ -99,24 +147,72 @@ noinline u64 atomic64_add_return(u64 delta, atomic64_t *ptr)
22848 }
22849 EXPORT_SYMBOL(atomic64_add_return);
22850
22851+/**
22852+ * atomic64_add_return_unchecked - add and return
22853+ * @delta: integer value to add
22854+ * @ptr: pointer to type atomic64_unchecked_t
22855+ *
22856+ * Atomically adds @delta to @ptr and returns @delta + *@ptr
22857+ */
22858+noinline u64 atomic64_add_return_unchecked(u64 delta, atomic64_unchecked_t *ptr)
22859+{
22860+ /*
22861+ * Try first with a (possibly incorrect) assumption about
22862+ * what we have there. We'll do two loops most likely,
22863+ * but we'll get an ownership MESI transaction straight away
22864+ * instead of a read transaction followed by a
22865+ * flush-for-ownership transaction:
22866+ */
22867+ u64 old_val, new_val, real_val = 0;
22868+
22869+ do {
22870+ old_val = real_val;
22871+ new_val = old_val + delta;
22872+
22873+ real_val = atomic64_cmpxchg_unchecked(ptr, old_val, new_val);
22874+
22875+ } while (real_val != old_val);
22876+
22877+ return new_val;
22878+}
22879+EXPORT_SYMBOL(atomic64_add_return_unchecked);
22880+
22881 u64 atomic64_sub_return(u64 delta, atomic64_t *ptr)
22882 {
22883 return atomic64_add_return(-delta, ptr);
22884 }
22885 EXPORT_SYMBOL(atomic64_sub_return);
22886
22887+u64 atomic64_sub_return_unchecked(u64 delta, atomic64_unchecked_t *ptr)
22888+{
22889+ return atomic64_add_return_unchecked(-delta, ptr);
22890+}
22891+EXPORT_SYMBOL(atomic64_sub_return_unchecked);
22892+
22893 u64 atomic64_inc_return(atomic64_t *ptr)
22894 {
22895 return atomic64_add_return(1, ptr);
22896 }
22897 EXPORT_SYMBOL(atomic64_inc_return);
22898
22899+u64 atomic64_inc_return_unchecked(atomic64_unchecked_t *ptr)
22900+{
22901+ return atomic64_add_return_unchecked(1, ptr);
22902+}
22903+EXPORT_SYMBOL(atomic64_inc_return_unchecked);
22904+
22905 u64 atomic64_dec_return(atomic64_t *ptr)
22906 {
22907 return atomic64_sub_return(1, ptr);
22908 }
22909 EXPORT_SYMBOL(atomic64_dec_return);
22910
22911+u64 atomic64_dec_return_unchecked(atomic64_unchecked_t *ptr)
22912+{
22913+ return atomic64_sub_return_unchecked(1, ptr);
22914+}
22915+EXPORT_SYMBOL(atomic64_dec_return_unchecked);
22916+
22917 /**
22918 * atomic64_add - add integer to atomic64 variable
22919 * @delta: integer value to add
22920@@ -131,6 +227,19 @@ void atomic64_add(u64 delta, atomic64_t *ptr)
22921 EXPORT_SYMBOL(atomic64_add);
22922
22923 /**
22924+ * atomic64_add_unchecked - add integer to atomic64 variable
22925+ * @delta: integer value to add
22926+ * @ptr: pointer to type atomic64_unchecked_t
22927+ *
22928+ * Atomically adds @delta to @ptr.
22929+ */
22930+void atomic64_add_unchecked(u64 delta, atomic64_unchecked_t *ptr)
22931+{
22932+ atomic64_add_return_unchecked(delta, ptr);
22933+}
22934+EXPORT_SYMBOL(atomic64_add_unchecked);
22935+
22936+/**
22937 * atomic64_sub - subtract the atomic64 variable
22938 * @delta: integer value to subtract
22939 * @ptr: pointer to type atomic64_t
22940@@ -144,6 +253,19 @@ void atomic64_sub(u64 delta, atomic64_t *ptr)
22941 EXPORT_SYMBOL(atomic64_sub);
22942
22943 /**
22944+ * atomic64_sub_unchecked - subtract the atomic64 variable
22945+ * @delta: integer value to subtract
22946+ * @ptr: pointer to type atomic64_unchecked_t
22947+ *
22948+ * Atomically subtracts @delta from @ptr.
22949+ */
22950+void atomic64_sub_unchecked(u64 delta, atomic64_unchecked_t *ptr)
22951+{
22952+ atomic64_add_unchecked(-delta, ptr);
22953+}
22954+EXPORT_SYMBOL(atomic64_sub_unchecked);
22955+
22956+/**
22957 * atomic64_sub_and_test - subtract value from variable and test result
22958 * @delta: integer value to subtract
22959 * @ptr: pointer to type atomic64_t
22960@@ -173,6 +295,18 @@ void atomic64_inc(atomic64_t *ptr)
22961 EXPORT_SYMBOL(atomic64_inc);
22962
22963 /**
22964+ * atomic64_inc_unchecked - increment atomic64 variable
22965+ * @ptr: pointer to type atomic64_unchecked_t
22966+ *
22967+ * Atomically increments @ptr by 1.
22968+ */
22969+void atomic64_inc_unchecked(atomic64_unchecked_t *ptr)
22970+{
22971+ atomic64_add_unchecked(1, ptr);
22972+}
22973+EXPORT_SYMBOL(atomic64_inc_unchecked);
22974+
22975+/**
22976 * atomic64_dec - decrement atomic64 variable
22977 * @ptr: pointer to type atomic64_t
22978 *
22979@@ -185,6 +319,18 @@ void atomic64_dec(atomic64_t *ptr)
22980 EXPORT_SYMBOL(atomic64_dec);
22981
22982 /**
22983+ * atomic64_dec_unchecked - decrement atomic64 variable
22984+ * @ptr: pointer to type atomic64_unchecked_t
22985+ *
22986+ * Atomically decrements @ptr by 1.
22987+ */
22988+void atomic64_dec_unchecked(atomic64_unchecked_t *ptr)
22989+{
22990+ atomic64_sub_unchecked(1, ptr);
22991+}
22992+EXPORT_SYMBOL(atomic64_dec_unchecked);
22993+
22994+/**
22995 * atomic64_dec_and_test - decrement and test
22996 * @ptr: pointer to type atomic64_t
22997 *
22998diff --git a/arch/x86/lib/checksum_32.S b/arch/x86/lib/checksum_32.S
22999index adbccd0..98f96c8 100644
23000--- a/arch/x86/lib/checksum_32.S
23001+++ b/arch/x86/lib/checksum_32.S
23002@@ -28,7 +28,8 @@
23003 #include <linux/linkage.h>
23004 #include <asm/dwarf2.h>
23005 #include <asm/errno.h>
23006-
23007+#include <asm/segment.h>
23008+
23009 /*
23010 * computes a partial checksum, e.g. for TCP/UDP fragments
23011 */
23012@@ -304,9 +305,28 @@ unsigned int csum_partial_copy_generic (const char *src, char *dst,
23013
23014 #define ARGBASE 16
23015 #define FP 12
23016-
23017-ENTRY(csum_partial_copy_generic)
23018+
23019+ENTRY(csum_partial_copy_generic_to_user)
23020 CFI_STARTPROC
23021+
23022+#ifdef CONFIG_PAX_MEMORY_UDEREF
23023+ pushl %gs
23024+ CFI_ADJUST_CFA_OFFSET 4
23025+ popl %es
23026+ CFI_ADJUST_CFA_OFFSET -4
23027+ jmp csum_partial_copy_generic
23028+#endif
23029+
23030+ENTRY(csum_partial_copy_generic_from_user)
23031+
23032+#ifdef CONFIG_PAX_MEMORY_UDEREF
23033+ pushl %gs
23034+ CFI_ADJUST_CFA_OFFSET 4
23035+ popl %ds
23036+ CFI_ADJUST_CFA_OFFSET -4
23037+#endif
23038+
23039+ENTRY(csum_partial_copy_generic)
23040 subl $4,%esp
23041 CFI_ADJUST_CFA_OFFSET 4
23042 pushl %edi
23043@@ -331,7 +351,7 @@ ENTRY(csum_partial_copy_generic)
23044 jmp 4f
23045 SRC(1: movw (%esi), %bx )
23046 addl $2, %esi
23047-DST( movw %bx, (%edi) )
23048+DST( movw %bx, %es:(%edi) )
23049 addl $2, %edi
23050 addw %bx, %ax
23051 adcl $0, %eax
23052@@ -343,30 +363,30 @@ DST( movw %bx, (%edi) )
23053 SRC(1: movl (%esi), %ebx )
23054 SRC( movl 4(%esi), %edx )
23055 adcl %ebx, %eax
23056-DST( movl %ebx, (%edi) )
23057+DST( movl %ebx, %es:(%edi) )
23058 adcl %edx, %eax
23059-DST( movl %edx, 4(%edi) )
23060+DST( movl %edx, %es:4(%edi) )
23061
23062 SRC( movl 8(%esi), %ebx )
23063 SRC( movl 12(%esi), %edx )
23064 adcl %ebx, %eax
23065-DST( movl %ebx, 8(%edi) )
23066+DST( movl %ebx, %es:8(%edi) )
23067 adcl %edx, %eax
23068-DST( movl %edx, 12(%edi) )
23069+DST( movl %edx, %es:12(%edi) )
23070
23071 SRC( movl 16(%esi), %ebx )
23072 SRC( movl 20(%esi), %edx )
23073 adcl %ebx, %eax
23074-DST( movl %ebx, 16(%edi) )
23075+DST( movl %ebx, %es:16(%edi) )
23076 adcl %edx, %eax
23077-DST( movl %edx, 20(%edi) )
23078+DST( movl %edx, %es:20(%edi) )
23079
23080 SRC( movl 24(%esi), %ebx )
23081 SRC( movl 28(%esi), %edx )
23082 adcl %ebx, %eax
23083-DST( movl %ebx, 24(%edi) )
23084+DST( movl %ebx, %es:24(%edi) )
23085 adcl %edx, %eax
23086-DST( movl %edx, 28(%edi) )
23087+DST( movl %edx, %es:28(%edi) )
23088
23089 lea 32(%esi), %esi
23090 lea 32(%edi), %edi
23091@@ -380,7 +400,7 @@ DST( movl %edx, 28(%edi) )
23092 shrl $2, %edx # This clears CF
23093 SRC(3: movl (%esi), %ebx )
23094 adcl %ebx, %eax
23095-DST( movl %ebx, (%edi) )
23096+DST( movl %ebx, %es:(%edi) )
23097 lea 4(%esi), %esi
23098 lea 4(%edi), %edi
23099 dec %edx
23100@@ -392,12 +412,12 @@ DST( movl %ebx, (%edi) )
23101 jb 5f
23102 SRC( movw (%esi), %cx )
23103 leal 2(%esi), %esi
23104-DST( movw %cx, (%edi) )
23105+DST( movw %cx, %es:(%edi) )
23106 leal 2(%edi), %edi
23107 je 6f
23108 shll $16,%ecx
23109 SRC(5: movb (%esi), %cl )
23110-DST( movb %cl, (%edi) )
23111+DST( movb %cl, %es:(%edi) )
23112 6: addl %ecx, %eax
23113 adcl $0, %eax
23114 7:
23115@@ -408,7 +428,7 @@ DST( movb %cl, (%edi) )
23116
23117 6001:
23118 movl ARGBASE+20(%esp), %ebx # src_err_ptr
23119- movl $-EFAULT, (%ebx)
23120+ movl $-EFAULT, %ss:(%ebx)
23121
23122 # zero the complete destination - computing the rest
23123 # is too much work
23124@@ -421,11 +441,19 @@ DST( movb %cl, (%edi) )
23125
23126 6002:
23127 movl ARGBASE+24(%esp), %ebx # dst_err_ptr
23128- movl $-EFAULT,(%ebx)
23129+ movl $-EFAULT,%ss:(%ebx)
23130 jmp 5000b
23131
23132 .previous
23133
23134+ pushl %ss
23135+ CFI_ADJUST_CFA_OFFSET 4
23136+ popl %ds
23137+ CFI_ADJUST_CFA_OFFSET -4
23138+ pushl %ss
23139+ CFI_ADJUST_CFA_OFFSET 4
23140+ popl %es
23141+ CFI_ADJUST_CFA_OFFSET -4
23142 popl %ebx
23143 CFI_ADJUST_CFA_OFFSET -4
23144 CFI_RESTORE ebx
23145@@ -439,26 +467,47 @@ DST( movb %cl, (%edi) )
23146 CFI_ADJUST_CFA_OFFSET -4
23147 ret
23148 CFI_ENDPROC
23149-ENDPROC(csum_partial_copy_generic)
23150+ENDPROC(csum_partial_copy_generic_to_user)
23151
23152 #else
23153
23154 /* Version for PentiumII/PPro */
23155
23156 #define ROUND1(x) \
23157+ nop; nop; nop; \
23158 SRC(movl x(%esi), %ebx ) ; \
23159 addl %ebx, %eax ; \
23160- DST(movl %ebx, x(%edi) ) ;
23161+ DST(movl %ebx, %es:x(%edi)) ;
23162
23163 #define ROUND(x) \
23164+ nop; nop; nop; \
23165 SRC(movl x(%esi), %ebx ) ; \
23166 adcl %ebx, %eax ; \
23167- DST(movl %ebx, x(%edi) ) ;
23168+ DST(movl %ebx, %es:x(%edi)) ;
23169
23170 #define ARGBASE 12
23171-
23172-ENTRY(csum_partial_copy_generic)
23173+
23174+ENTRY(csum_partial_copy_generic_to_user)
23175 CFI_STARTPROC
23176+
23177+#ifdef CONFIG_PAX_MEMORY_UDEREF
23178+ pushl %gs
23179+ CFI_ADJUST_CFA_OFFSET 4
23180+ popl %es
23181+ CFI_ADJUST_CFA_OFFSET -4
23182+ jmp csum_partial_copy_generic
23183+#endif
23184+
23185+ENTRY(csum_partial_copy_generic_from_user)
23186+
23187+#ifdef CONFIG_PAX_MEMORY_UDEREF
23188+ pushl %gs
23189+ CFI_ADJUST_CFA_OFFSET 4
23190+ popl %ds
23191+ CFI_ADJUST_CFA_OFFSET -4
23192+#endif
23193+
23194+ENTRY(csum_partial_copy_generic)
23195 pushl %ebx
23196 CFI_ADJUST_CFA_OFFSET 4
23197 CFI_REL_OFFSET ebx, 0
23198@@ -482,7 +531,7 @@ ENTRY(csum_partial_copy_generic)
23199 subl %ebx, %edi
23200 lea -1(%esi),%edx
23201 andl $-32,%edx
23202- lea 3f(%ebx,%ebx), %ebx
23203+ lea 3f(%ebx,%ebx,2), %ebx
23204 testl %esi, %esi
23205 jmp *%ebx
23206 1: addl $64,%esi
23207@@ -503,19 +552,19 @@ ENTRY(csum_partial_copy_generic)
23208 jb 5f
23209 SRC( movw (%esi), %dx )
23210 leal 2(%esi), %esi
23211-DST( movw %dx, (%edi) )
23212+DST( movw %dx, %es:(%edi) )
23213 leal 2(%edi), %edi
23214 je 6f
23215 shll $16,%edx
23216 5:
23217 SRC( movb (%esi), %dl )
23218-DST( movb %dl, (%edi) )
23219+DST( movb %dl, %es:(%edi) )
23220 6: addl %edx, %eax
23221 adcl $0, %eax
23222 7:
23223 .section .fixup, "ax"
23224 6001: movl ARGBASE+20(%esp), %ebx # src_err_ptr
23225- movl $-EFAULT, (%ebx)
23226+ movl $-EFAULT, %ss:(%ebx)
23227 # zero the complete destination (computing the rest is too much work)
23228 movl ARGBASE+8(%esp),%edi # dst
23229 movl ARGBASE+12(%esp),%ecx # len
23230@@ -523,10 +572,21 @@ DST( movb %dl, (%edi) )
23231 rep; stosb
23232 jmp 7b
23233 6002: movl ARGBASE+24(%esp), %ebx # dst_err_ptr
23234- movl $-EFAULT, (%ebx)
23235+ movl $-EFAULT, %ss:(%ebx)
23236 jmp 7b
23237 .previous
23238
23239+#ifdef CONFIG_PAX_MEMORY_UDEREF
23240+ pushl %ss
23241+ CFI_ADJUST_CFA_OFFSET 4
23242+ popl %ds
23243+ CFI_ADJUST_CFA_OFFSET -4
23244+ pushl %ss
23245+ CFI_ADJUST_CFA_OFFSET 4
23246+ popl %es
23247+ CFI_ADJUST_CFA_OFFSET -4
23248+#endif
23249+
23250 popl %esi
23251 CFI_ADJUST_CFA_OFFSET -4
23252 CFI_RESTORE esi
23253@@ -538,7 +598,7 @@ DST( movb %dl, (%edi) )
23254 CFI_RESTORE ebx
23255 ret
23256 CFI_ENDPROC
23257-ENDPROC(csum_partial_copy_generic)
23258+ENDPROC(csum_partial_copy_generic_to_user)
23259
23260 #undef ROUND
23261 #undef ROUND1
23262diff --git a/arch/x86/lib/clear_page_64.S b/arch/x86/lib/clear_page_64.S
23263index ebeafcc..1e3a402 100644
23264--- a/arch/x86/lib/clear_page_64.S
23265+++ b/arch/x86/lib/clear_page_64.S
23266@@ -1,5 +1,6 @@
23267 #include <linux/linkage.h>
23268 #include <asm/dwarf2.h>
23269+#include <asm/alternative-asm.h>
23270
23271 /*
23272 * Zero a page.
23273@@ -10,6 +11,7 @@ ENTRY(clear_page_c)
23274 movl $4096/8,%ecx
23275 xorl %eax,%eax
23276 rep stosq
23277+ pax_force_retaddr
23278 ret
23279 CFI_ENDPROC
23280 ENDPROC(clear_page_c)
23281@@ -33,6 +35,7 @@ ENTRY(clear_page)
23282 leaq 64(%rdi),%rdi
23283 jnz .Lloop
23284 nop
23285+ pax_force_retaddr
23286 ret
23287 CFI_ENDPROC
23288 .Lclear_page_end:
23289@@ -43,7 +46,7 @@ ENDPROC(clear_page)
23290
23291 #include <asm/cpufeature.h>
23292
23293- .section .altinstr_replacement,"ax"
23294+ .section .altinstr_replacement,"a"
23295 1: .byte 0xeb /* jmp <disp8> */
23296 .byte (clear_page_c - clear_page) - (2f - 1b) /* offset */
23297 2:
23298diff --git a/arch/x86/lib/copy_page_64.S b/arch/x86/lib/copy_page_64.S
23299index 727a5d4..333818a 100644
23300--- a/arch/x86/lib/copy_page_64.S
23301+++ b/arch/x86/lib/copy_page_64.S
23302@@ -2,12 +2,14 @@
23303
23304 #include <linux/linkage.h>
23305 #include <asm/dwarf2.h>
23306+#include <asm/alternative-asm.h>
23307
23308 ALIGN
23309 copy_page_c:
23310 CFI_STARTPROC
23311 movl $4096/8,%ecx
23312 rep movsq
23313+ pax_force_retaddr
23314 ret
23315 CFI_ENDPROC
23316 ENDPROC(copy_page_c)
23317@@ -38,7 +40,7 @@ ENTRY(copy_page)
23318 movq 16 (%rsi), %rdx
23319 movq 24 (%rsi), %r8
23320 movq 32 (%rsi), %r9
23321- movq 40 (%rsi), %r10
23322+ movq 40 (%rsi), %r13
23323 movq 48 (%rsi), %r11
23324 movq 56 (%rsi), %r12
23325
23326@@ -49,7 +51,7 @@ ENTRY(copy_page)
23327 movq %rdx, 16 (%rdi)
23328 movq %r8, 24 (%rdi)
23329 movq %r9, 32 (%rdi)
23330- movq %r10, 40 (%rdi)
23331+ movq %r13, 40 (%rdi)
23332 movq %r11, 48 (%rdi)
23333 movq %r12, 56 (%rdi)
23334
23335@@ -68,7 +70,7 @@ ENTRY(copy_page)
23336 movq 16 (%rsi), %rdx
23337 movq 24 (%rsi), %r8
23338 movq 32 (%rsi), %r9
23339- movq 40 (%rsi), %r10
23340+ movq 40 (%rsi), %r13
23341 movq 48 (%rsi), %r11
23342 movq 56 (%rsi), %r12
23343
23344@@ -77,7 +79,7 @@ ENTRY(copy_page)
23345 movq %rdx, 16 (%rdi)
23346 movq %r8, 24 (%rdi)
23347 movq %r9, 32 (%rdi)
23348- movq %r10, 40 (%rdi)
23349+ movq %r13, 40 (%rdi)
23350 movq %r11, 48 (%rdi)
23351 movq %r12, 56 (%rdi)
23352
23353@@ -94,6 +96,7 @@ ENTRY(copy_page)
23354 CFI_RESTORE r13
23355 addq $3*8,%rsp
23356 CFI_ADJUST_CFA_OFFSET -3*8
23357+ pax_force_retaddr
23358 ret
23359 .Lcopy_page_end:
23360 CFI_ENDPROC
23361@@ -104,7 +107,7 @@ ENDPROC(copy_page)
23362
23363 #include <asm/cpufeature.h>
23364
23365- .section .altinstr_replacement,"ax"
23366+ .section .altinstr_replacement,"a"
23367 1: .byte 0xeb /* jmp <disp8> */
23368 .byte (copy_page_c - copy_page) - (2f - 1b) /* offset */
23369 2:
23370diff --git a/arch/x86/lib/copy_user_64.S b/arch/x86/lib/copy_user_64.S
23371index af8debd..40c75f3 100644
23372--- a/arch/x86/lib/copy_user_64.S
23373+++ b/arch/x86/lib/copy_user_64.S
23374@@ -15,13 +15,15 @@
23375 #include <asm/asm-offsets.h>
23376 #include <asm/thread_info.h>
23377 #include <asm/cpufeature.h>
23378+#include <asm/pgtable.h>
23379+#include <asm/alternative-asm.h>
23380
23381 .macro ALTERNATIVE_JUMP feature,orig,alt
23382 0:
23383 .byte 0xe9 /* 32bit jump */
23384 .long \orig-1f /* by default jump to orig */
23385 1:
23386- .section .altinstr_replacement,"ax"
23387+ .section .altinstr_replacement,"a"
23388 2: .byte 0xe9 /* near jump with 32bit immediate */
23389 .long \alt-1b /* offset */ /* or alternatively to alt */
23390 .previous
23391@@ -64,55 +66,26 @@
23392 #endif
23393 .endm
23394
23395-/* Standard copy_to_user with segment limit checking */
23396-ENTRY(copy_to_user)
23397- CFI_STARTPROC
23398- GET_THREAD_INFO(%rax)
23399- movq %rdi,%rcx
23400- addq %rdx,%rcx
23401- jc bad_to_user
23402- cmpq TI_addr_limit(%rax),%rcx
23403- ja bad_to_user
23404- ALTERNATIVE_JUMP X86_FEATURE_REP_GOOD,copy_user_generic_unrolled,copy_user_generic_string
23405- CFI_ENDPROC
23406-ENDPROC(copy_to_user)
23407-
23408-/* Standard copy_from_user with segment limit checking */
23409-ENTRY(copy_from_user)
23410- CFI_STARTPROC
23411- GET_THREAD_INFO(%rax)
23412- movq %rsi,%rcx
23413- addq %rdx,%rcx
23414- jc bad_from_user
23415- cmpq TI_addr_limit(%rax),%rcx
23416- ja bad_from_user
23417- ALTERNATIVE_JUMP X86_FEATURE_REP_GOOD,copy_user_generic_unrolled,copy_user_generic_string
23418- CFI_ENDPROC
23419-ENDPROC(copy_from_user)
23420-
23421 ENTRY(copy_user_generic)
23422 CFI_STARTPROC
23423 ALTERNATIVE_JUMP X86_FEATURE_REP_GOOD,copy_user_generic_unrolled,copy_user_generic_string
23424 CFI_ENDPROC
23425 ENDPROC(copy_user_generic)
23426
23427-ENTRY(__copy_from_user_inatomic)
23428- CFI_STARTPROC
23429- ALTERNATIVE_JUMP X86_FEATURE_REP_GOOD,copy_user_generic_unrolled,copy_user_generic_string
23430- CFI_ENDPROC
23431-ENDPROC(__copy_from_user_inatomic)
23432-
23433 .section .fixup,"ax"
23434 /* must zero dest */
23435 ENTRY(bad_from_user)
23436 bad_from_user:
23437 CFI_STARTPROC
23438+ testl %edx,%edx
23439+ js bad_to_user
23440 movl %edx,%ecx
23441 xorl %eax,%eax
23442 rep
23443 stosb
23444 bad_to_user:
23445 movl %edx,%eax
23446+ pax_force_retaddr
23447 ret
23448 CFI_ENDPROC
23449 ENDPROC(bad_from_user)
23450@@ -142,19 +115,19 @@ ENTRY(copy_user_generic_unrolled)
23451 jz 17f
23452 1: movq (%rsi),%r8
23453 2: movq 1*8(%rsi),%r9
23454-3: movq 2*8(%rsi),%r10
23455+3: movq 2*8(%rsi),%rax
23456 4: movq 3*8(%rsi),%r11
23457 5: movq %r8,(%rdi)
23458 6: movq %r9,1*8(%rdi)
23459-7: movq %r10,2*8(%rdi)
23460+7: movq %rax,2*8(%rdi)
23461 8: movq %r11,3*8(%rdi)
23462 9: movq 4*8(%rsi),%r8
23463 10: movq 5*8(%rsi),%r9
23464-11: movq 6*8(%rsi),%r10
23465+11: movq 6*8(%rsi),%rax
23466 12: movq 7*8(%rsi),%r11
23467 13: movq %r8,4*8(%rdi)
23468 14: movq %r9,5*8(%rdi)
23469-15: movq %r10,6*8(%rdi)
23470+15: movq %rax,6*8(%rdi)
23471 16: movq %r11,7*8(%rdi)
23472 leaq 64(%rsi),%rsi
23473 leaq 64(%rdi),%rdi
23474@@ -180,6 +153,7 @@ ENTRY(copy_user_generic_unrolled)
23475 decl %ecx
23476 jnz 21b
23477 23: xor %eax,%eax
23478+ pax_force_retaddr
23479 ret
23480
23481 .section .fixup,"ax"
23482@@ -252,6 +226,7 @@ ENTRY(copy_user_generic_string)
23483 3: rep
23484 movsb
23485 4: xorl %eax,%eax
23486+ pax_force_retaddr
23487 ret
23488
23489 .section .fixup,"ax"
23490diff --git a/arch/x86/lib/copy_user_nocache_64.S b/arch/x86/lib/copy_user_nocache_64.S
23491index cb0c112..e3a6895 100644
23492--- a/arch/x86/lib/copy_user_nocache_64.S
23493+++ b/arch/x86/lib/copy_user_nocache_64.S
23494@@ -8,12 +8,14 @@
23495
23496 #include <linux/linkage.h>
23497 #include <asm/dwarf2.h>
23498+#include <asm/alternative-asm.h>
23499
23500 #define FIX_ALIGNMENT 1
23501
23502 #include <asm/current.h>
23503 #include <asm/asm-offsets.h>
23504 #include <asm/thread_info.h>
23505+#include <asm/pgtable.h>
23506
23507 .macro ALIGN_DESTINATION
23508 #ifdef FIX_ALIGNMENT
23509@@ -50,6 +52,15 @@
23510 */
23511 ENTRY(__copy_user_nocache)
23512 CFI_STARTPROC
23513+
23514+#ifdef CONFIG_PAX_MEMORY_UDEREF
23515+ mov $PAX_USER_SHADOW_BASE,%rcx
23516+ cmp %rcx,%rsi
23517+ jae 1f
23518+ add %rcx,%rsi
23519+1:
23520+#endif
23521+
23522 cmpl $8,%edx
23523 jb 20f /* less then 8 bytes, go to byte copy loop */
23524 ALIGN_DESTINATION
23525@@ -59,19 +70,19 @@ ENTRY(__copy_user_nocache)
23526 jz 17f
23527 1: movq (%rsi),%r8
23528 2: movq 1*8(%rsi),%r9
23529-3: movq 2*8(%rsi),%r10
23530+3: movq 2*8(%rsi),%rax
23531 4: movq 3*8(%rsi),%r11
23532 5: movnti %r8,(%rdi)
23533 6: movnti %r9,1*8(%rdi)
23534-7: movnti %r10,2*8(%rdi)
23535+7: movnti %rax,2*8(%rdi)
23536 8: movnti %r11,3*8(%rdi)
23537 9: movq 4*8(%rsi),%r8
23538 10: movq 5*8(%rsi),%r9
23539-11: movq 6*8(%rsi),%r10
23540+11: movq 6*8(%rsi),%rax
23541 12: movq 7*8(%rsi),%r11
23542 13: movnti %r8,4*8(%rdi)
23543 14: movnti %r9,5*8(%rdi)
23544-15: movnti %r10,6*8(%rdi)
23545+15: movnti %rax,6*8(%rdi)
23546 16: movnti %r11,7*8(%rdi)
23547 leaq 64(%rsi),%rsi
23548 leaq 64(%rdi),%rdi
23549@@ -98,6 +109,7 @@ ENTRY(__copy_user_nocache)
23550 jnz 21b
23551 23: xorl %eax,%eax
23552 sfence
23553+ pax_force_retaddr
23554 ret
23555
23556 .section .fixup,"ax"
23557diff --git a/arch/x86/lib/csum-copy_64.S b/arch/x86/lib/csum-copy_64.S
23558index f0dba36..48cb4d6 100644
23559--- a/arch/x86/lib/csum-copy_64.S
23560+++ b/arch/x86/lib/csum-copy_64.S
23561@@ -8,6 +8,7 @@
23562 #include <linux/linkage.h>
23563 #include <asm/dwarf2.h>
23564 #include <asm/errno.h>
23565+#include <asm/alternative-asm.h>
23566
23567 /*
23568 * Checksum copy with exception handling.
23569@@ -228,6 +229,7 @@ ENTRY(csum_partial_copy_generic)
23570 CFI_RESTORE rbp
23571 addq $7*8,%rsp
23572 CFI_ADJUST_CFA_OFFSET -7*8
23573+ pax_force_retaddr 0, 1
23574 ret
23575 CFI_RESTORE_STATE
23576
23577diff --git a/arch/x86/lib/csum-wrappers_64.c b/arch/x86/lib/csum-wrappers_64.c
23578index 459b58a..9570bc7 100644
23579--- a/arch/x86/lib/csum-wrappers_64.c
23580+++ b/arch/x86/lib/csum-wrappers_64.c
23581@@ -52,7 +52,13 @@ csum_partial_copy_from_user(const void __user *src, void *dst,
23582 len -= 2;
23583 }
23584 }
23585- isum = csum_partial_copy_generic((__force const void *)src,
23586+
23587+#ifdef CONFIG_PAX_MEMORY_UDEREF
23588+ if ((unsigned long)src < PAX_USER_SHADOW_BASE)
23589+ src += PAX_USER_SHADOW_BASE;
23590+#endif
23591+
23592+ isum = csum_partial_copy_generic((const void __force_kernel *)src,
23593 dst, len, isum, errp, NULL);
23594 if (unlikely(*errp))
23595 goto out_err;
23596@@ -105,7 +111,13 @@ csum_partial_copy_to_user(const void *src, void __user *dst,
23597 }
23598
23599 *errp = 0;
23600- return csum_partial_copy_generic(src, (void __force *)dst,
23601+
23602+#ifdef CONFIG_PAX_MEMORY_UDEREF
23603+ if ((unsigned long)dst < PAX_USER_SHADOW_BASE)
23604+ dst += PAX_USER_SHADOW_BASE;
23605+#endif
23606+
23607+ return csum_partial_copy_generic(src, (void __force_kernel *)dst,
23608 len, isum, NULL, errp);
23609 }
23610 EXPORT_SYMBOL(csum_partial_copy_to_user);
23611diff --git a/arch/x86/lib/delay.c b/arch/x86/lib/delay.c
23612index ff485d3..b6372ce 100644
23613--- a/arch/x86/lib/delay.c
23614+++ b/arch/x86/lib/delay.c
23615@@ -48,9 +48,9 @@ static void delay_loop(unsigned long loops)
23616 }
23617
23618 /* TSC based delay: */
23619-static void delay_tsc(unsigned long loops)
23620+static void delay_tsc(unsigned long __loops)
23621 {
23622- unsigned long bclock, now;
23623+ u32 bclock, now, loops = __loops;
23624 int cpu;
23625
23626 preempt_disable();
23627diff --git a/arch/x86/lib/getuser.S b/arch/x86/lib/getuser.S
23628index 51f1504..ddac4c1 100644
23629--- a/arch/x86/lib/getuser.S
23630+++ b/arch/x86/lib/getuser.S
23631@@ -33,15 +33,38 @@
23632 #include <asm/asm-offsets.h>
23633 #include <asm/thread_info.h>
23634 #include <asm/asm.h>
23635+#include <asm/segment.h>
23636+#include <asm/pgtable.h>
23637+#include <asm/alternative-asm.h>
23638+
23639+#if defined(CONFIG_X86_32) && defined(CONFIG_PAX_MEMORY_UDEREF)
23640+#define __copyuser_seg gs;
23641+#else
23642+#define __copyuser_seg
23643+#endif
23644
23645 .text
23646 ENTRY(__get_user_1)
23647 CFI_STARTPROC
23648+
23649+#if !defined(CONFIG_X86_32) || !defined(CONFIG_PAX_MEMORY_UDEREF)
23650 GET_THREAD_INFO(%_ASM_DX)
23651 cmp TI_addr_limit(%_ASM_DX),%_ASM_AX
23652 jae bad_get_user
23653-1: movzb (%_ASM_AX),%edx
23654+
23655+#if defined(CONFIG_X86_64) && defined(CONFIG_PAX_MEMORY_UDEREF)
23656+ mov $PAX_USER_SHADOW_BASE,%_ASM_DX
23657+ cmp %_ASM_DX,%_ASM_AX
23658+ jae 1234f
23659+ add %_ASM_DX,%_ASM_AX
23660+1234:
23661+#endif
23662+
23663+#endif
23664+
23665+1: __copyuser_seg movzb (%_ASM_AX),%edx
23666 xor %eax,%eax
23667+ pax_force_retaddr
23668 ret
23669 CFI_ENDPROC
23670 ENDPROC(__get_user_1)
23671@@ -49,12 +72,26 @@ ENDPROC(__get_user_1)
23672 ENTRY(__get_user_2)
23673 CFI_STARTPROC
23674 add $1,%_ASM_AX
23675+
23676+#if !defined(CONFIG_X86_32) || !defined(CONFIG_PAX_MEMORY_UDEREF)
23677 jc bad_get_user
23678 GET_THREAD_INFO(%_ASM_DX)
23679 cmp TI_addr_limit(%_ASM_DX),%_ASM_AX
23680 jae bad_get_user
23681-2: movzwl -1(%_ASM_AX),%edx
23682+
23683+#if defined(CONFIG_X86_64) && defined(CONFIG_PAX_MEMORY_UDEREF)
23684+ mov $PAX_USER_SHADOW_BASE,%_ASM_DX
23685+ cmp %_ASM_DX,%_ASM_AX
23686+ jae 1234f
23687+ add %_ASM_DX,%_ASM_AX
23688+1234:
23689+#endif
23690+
23691+#endif
23692+
23693+2: __copyuser_seg movzwl -1(%_ASM_AX),%edx
23694 xor %eax,%eax
23695+ pax_force_retaddr
23696 ret
23697 CFI_ENDPROC
23698 ENDPROC(__get_user_2)
23699@@ -62,12 +99,26 @@ ENDPROC(__get_user_2)
23700 ENTRY(__get_user_4)
23701 CFI_STARTPROC
23702 add $3,%_ASM_AX
23703+
23704+#if !defined(CONFIG_X86_32) || !defined(CONFIG_PAX_MEMORY_UDEREF)
23705 jc bad_get_user
23706 GET_THREAD_INFO(%_ASM_DX)
23707 cmp TI_addr_limit(%_ASM_DX),%_ASM_AX
23708 jae bad_get_user
23709-3: mov -3(%_ASM_AX),%edx
23710+
23711+#if defined(CONFIG_X86_64) && defined(CONFIG_PAX_MEMORY_UDEREF)
23712+ mov $PAX_USER_SHADOW_BASE,%_ASM_DX
23713+ cmp %_ASM_DX,%_ASM_AX
23714+ jae 1234f
23715+ add %_ASM_DX,%_ASM_AX
23716+1234:
23717+#endif
23718+
23719+#endif
23720+
23721+3: __copyuser_seg mov -3(%_ASM_AX),%edx
23722 xor %eax,%eax
23723+ pax_force_retaddr
23724 ret
23725 CFI_ENDPROC
23726 ENDPROC(__get_user_4)
23727@@ -80,8 +131,18 @@ ENTRY(__get_user_8)
23728 GET_THREAD_INFO(%_ASM_DX)
23729 cmp TI_addr_limit(%_ASM_DX),%_ASM_AX
23730 jae bad_get_user
23731+
23732+#ifdef CONFIG_PAX_MEMORY_UDEREF
23733+ mov $PAX_USER_SHADOW_BASE,%_ASM_DX
23734+ cmp %_ASM_DX,%_ASM_AX
23735+ jae 1234f
23736+ add %_ASM_DX,%_ASM_AX
23737+1234:
23738+#endif
23739+
23740 4: movq -7(%_ASM_AX),%_ASM_DX
23741 xor %eax,%eax
23742+ pax_force_retaddr
23743 ret
23744 CFI_ENDPROC
23745 ENDPROC(__get_user_8)
23746@@ -91,6 +152,7 @@ bad_get_user:
23747 CFI_STARTPROC
23748 xor %edx,%edx
23749 mov $(-EFAULT),%_ASM_AX
23750+ pax_force_retaddr
23751 ret
23752 CFI_ENDPROC
23753 END(bad_get_user)
23754diff --git a/arch/x86/lib/iomap_copy_64.S b/arch/x86/lib/iomap_copy_64.S
23755index 05a95e7..326f2fa 100644
23756--- a/arch/x86/lib/iomap_copy_64.S
23757+++ b/arch/x86/lib/iomap_copy_64.S
23758@@ -17,6 +17,7 @@
23759
23760 #include <linux/linkage.h>
23761 #include <asm/dwarf2.h>
23762+#include <asm/alternative-asm.h>
23763
23764 /*
23765 * override generic version in lib/iomap_copy.c
23766@@ -25,6 +26,7 @@ ENTRY(__iowrite32_copy)
23767 CFI_STARTPROC
23768 movl %edx,%ecx
23769 rep movsd
23770+ pax_force_retaddr
23771 ret
23772 CFI_ENDPROC
23773 ENDPROC(__iowrite32_copy)
23774diff --git a/arch/x86/lib/memcpy_64.S b/arch/x86/lib/memcpy_64.S
23775index ad5441e..610e351 100644
23776--- a/arch/x86/lib/memcpy_64.S
23777+++ b/arch/x86/lib/memcpy_64.S
23778@@ -4,6 +4,7 @@
23779
23780 #include <asm/cpufeature.h>
23781 #include <asm/dwarf2.h>
23782+#include <asm/alternative-asm.h>
23783
23784 /*
23785 * memcpy - Copy a memory block.
23786@@ -34,6 +35,7 @@ memcpy_c:
23787 rep movsq
23788 movl %edx, %ecx
23789 rep movsb
23790+ pax_force_retaddr
23791 ret
23792 CFI_ENDPROC
23793 ENDPROC(memcpy_c)
23794@@ -118,6 +120,7 @@ ENTRY(memcpy)
23795 jnz .Lloop_1
23796
23797 .Lend:
23798+ pax_force_retaddr 0, 1
23799 ret
23800 CFI_ENDPROC
23801 ENDPROC(memcpy)
23802@@ -128,7 +131,7 @@ ENDPROC(__memcpy)
23803 * It is also a lot simpler. Use this when possible:
23804 */
23805
23806- .section .altinstr_replacement, "ax"
23807+ .section .altinstr_replacement, "a"
23808 1: .byte 0xeb /* jmp <disp8> */
23809 .byte (memcpy_c - memcpy) - (2f - 1b) /* offset */
23810 2:
23811diff --git a/arch/x86/lib/memset_64.S b/arch/x86/lib/memset_64.S
23812index 2c59481..7e9ba4e 100644
23813--- a/arch/x86/lib/memset_64.S
23814+++ b/arch/x86/lib/memset_64.S
23815@@ -2,6 +2,7 @@
23816
23817 #include <linux/linkage.h>
23818 #include <asm/dwarf2.h>
23819+#include <asm/alternative-asm.h>
23820
23821 /*
23822 * ISO C memset - set a memory block to a byte value.
23823@@ -28,6 +29,7 @@ memset_c:
23824 movl %r8d,%ecx
23825 rep stosb
23826 movq %r9,%rax
23827+ pax_force_retaddr
23828 ret
23829 CFI_ENDPROC
23830 ENDPROC(memset_c)
23831@@ -35,13 +37,13 @@ ENDPROC(memset_c)
23832 ENTRY(memset)
23833 ENTRY(__memset)
23834 CFI_STARTPROC
23835- movq %rdi,%r10
23836 movq %rdx,%r11
23837
23838 /* expand byte value */
23839 movzbl %sil,%ecx
23840 movabs $0x0101010101010101,%rax
23841 mul %rcx /* with rax, clobbers rdx */
23842+ movq %rdi,%rdx
23843
23844 /* align dst */
23845 movl %edi,%r9d
23846@@ -95,7 +97,8 @@ ENTRY(__memset)
23847 jnz .Lloop_1
23848
23849 .Lende:
23850- movq %r10,%rax
23851+ movq %rdx,%rax
23852+ pax_force_retaddr
23853 ret
23854
23855 CFI_RESTORE_STATE
23856@@ -118,7 +121,7 @@ ENDPROC(__memset)
23857
23858 #include <asm/cpufeature.h>
23859
23860- .section .altinstr_replacement,"ax"
23861+ .section .altinstr_replacement,"a"
23862 1: .byte 0xeb /* jmp <disp8> */
23863 .byte (memset_c - memset) - (2f - 1b) /* offset */
23864 2:
23865diff --git a/arch/x86/lib/mmx_32.c b/arch/x86/lib/mmx_32.c
23866index c9f2d9b..e7fd2c0 100644
23867--- a/arch/x86/lib/mmx_32.c
23868+++ b/arch/x86/lib/mmx_32.c
23869@@ -29,6 +29,7 @@ void *_mmx_memcpy(void *to, const void *from, size_t len)
23870 {
23871 void *p;
23872 int i;
23873+ unsigned long cr0;
23874
23875 if (unlikely(in_interrupt()))
23876 return __memcpy(to, from, len);
23877@@ -39,44 +40,72 @@ void *_mmx_memcpy(void *to, const void *from, size_t len)
23878 kernel_fpu_begin();
23879
23880 __asm__ __volatile__ (
23881- "1: prefetch (%0)\n" /* This set is 28 bytes */
23882- " prefetch 64(%0)\n"
23883- " prefetch 128(%0)\n"
23884- " prefetch 192(%0)\n"
23885- " prefetch 256(%0)\n"
23886+ "1: prefetch (%1)\n" /* This set is 28 bytes */
23887+ " prefetch 64(%1)\n"
23888+ " prefetch 128(%1)\n"
23889+ " prefetch 192(%1)\n"
23890+ " prefetch 256(%1)\n"
23891 "2: \n"
23892 ".section .fixup, \"ax\"\n"
23893- "3: movw $0x1AEB, 1b\n" /* jmp on 26 bytes */
23894+ "3: \n"
23895+
23896+#ifdef CONFIG_PAX_KERNEXEC
23897+ " movl %%cr0, %0\n"
23898+ " movl %0, %%eax\n"
23899+ " andl $0xFFFEFFFF, %%eax\n"
23900+ " movl %%eax, %%cr0\n"
23901+#endif
23902+
23903+ " movw $0x1AEB, 1b\n" /* jmp on 26 bytes */
23904+
23905+#ifdef CONFIG_PAX_KERNEXEC
23906+ " movl %0, %%cr0\n"
23907+#endif
23908+
23909 " jmp 2b\n"
23910 ".previous\n"
23911 _ASM_EXTABLE(1b, 3b)
23912- : : "r" (from));
23913+ : "=&r" (cr0) : "r" (from) : "ax");
23914
23915 for ( ; i > 5; i--) {
23916 __asm__ __volatile__ (
23917- "1: prefetch 320(%0)\n"
23918- "2: movq (%0), %%mm0\n"
23919- " movq 8(%0), %%mm1\n"
23920- " movq 16(%0), %%mm2\n"
23921- " movq 24(%0), %%mm3\n"
23922- " movq %%mm0, (%1)\n"
23923- " movq %%mm1, 8(%1)\n"
23924- " movq %%mm2, 16(%1)\n"
23925- " movq %%mm3, 24(%1)\n"
23926- " movq 32(%0), %%mm0\n"
23927- " movq 40(%0), %%mm1\n"
23928- " movq 48(%0), %%mm2\n"
23929- " movq 56(%0), %%mm3\n"
23930- " movq %%mm0, 32(%1)\n"
23931- " movq %%mm1, 40(%1)\n"
23932- " movq %%mm2, 48(%1)\n"
23933- " movq %%mm3, 56(%1)\n"
23934+ "1: prefetch 320(%1)\n"
23935+ "2: movq (%1), %%mm0\n"
23936+ " movq 8(%1), %%mm1\n"
23937+ " movq 16(%1), %%mm2\n"
23938+ " movq 24(%1), %%mm3\n"
23939+ " movq %%mm0, (%2)\n"
23940+ " movq %%mm1, 8(%2)\n"
23941+ " movq %%mm2, 16(%2)\n"
23942+ " movq %%mm3, 24(%2)\n"
23943+ " movq 32(%1), %%mm0\n"
23944+ " movq 40(%1), %%mm1\n"
23945+ " movq 48(%1), %%mm2\n"
23946+ " movq 56(%1), %%mm3\n"
23947+ " movq %%mm0, 32(%2)\n"
23948+ " movq %%mm1, 40(%2)\n"
23949+ " movq %%mm2, 48(%2)\n"
23950+ " movq %%mm3, 56(%2)\n"
23951 ".section .fixup, \"ax\"\n"
23952- "3: movw $0x05EB, 1b\n" /* jmp on 5 bytes */
23953+ "3:\n"
23954+
23955+#ifdef CONFIG_PAX_KERNEXEC
23956+ " movl %%cr0, %0\n"
23957+ " movl %0, %%eax\n"
23958+ " andl $0xFFFEFFFF, %%eax\n"
23959+ " movl %%eax, %%cr0\n"
23960+#endif
23961+
23962+ " movw $0x05EB, 1b\n" /* jmp on 5 bytes */
23963+
23964+#ifdef CONFIG_PAX_KERNEXEC
23965+ " movl %0, %%cr0\n"
23966+#endif
23967+
23968 " jmp 2b\n"
23969 ".previous\n"
23970 _ASM_EXTABLE(1b, 3b)
23971- : : "r" (from), "r" (to) : "memory");
23972+ : "=&r" (cr0) : "r" (from), "r" (to) : "memory", "ax");
23973
23974 from += 64;
23975 to += 64;
23976@@ -158,6 +187,7 @@ static void fast_clear_page(void *page)
23977 static void fast_copy_page(void *to, void *from)
23978 {
23979 int i;
23980+ unsigned long cr0;
23981
23982 kernel_fpu_begin();
23983
23984@@ -166,42 +196,70 @@ static void fast_copy_page(void *to, void *from)
23985 * but that is for later. -AV
23986 */
23987 __asm__ __volatile__(
23988- "1: prefetch (%0)\n"
23989- " prefetch 64(%0)\n"
23990- " prefetch 128(%0)\n"
23991- " prefetch 192(%0)\n"
23992- " prefetch 256(%0)\n"
23993+ "1: prefetch (%1)\n"
23994+ " prefetch 64(%1)\n"
23995+ " prefetch 128(%1)\n"
23996+ " prefetch 192(%1)\n"
23997+ " prefetch 256(%1)\n"
23998 "2: \n"
23999 ".section .fixup, \"ax\"\n"
24000- "3: movw $0x1AEB, 1b\n" /* jmp on 26 bytes */
24001+ "3: \n"
24002+
24003+#ifdef CONFIG_PAX_KERNEXEC
24004+ " movl %%cr0, %0\n"
24005+ " movl %0, %%eax\n"
24006+ " andl $0xFFFEFFFF, %%eax\n"
24007+ " movl %%eax, %%cr0\n"
24008+#endif
24009+
24010+ " movw $0x1AEB, 1b\n" /* jmp on 26 bytes */
24011+
24012+#ifdef CONFIG_PAX_KERNEXEC
24013+ " movl %0, %%cr0\n"
24014+#endif
24015+
24016 " jmp 2b\n"
24017 ".previous\n"
24018- _ASM_EXTABLE(1b, 3b) : : "r" (from));
24019+ _ASM_EXTABLE(1b, 3b) : "=&r" (cr0) : "r" (from) : "ax");
24020
24021 for (i = 0; i < (4096-320)/64; i++) {
24022 __asm__ __volatile__ (
24023- "1: prefetch 320(%0)\n"
24024- "2: movq (%0), %%mm0\n"
24025- " movntq %%mm0, (%1)\n"
24026- " movq 8(%0), %%mm1\n"
24027- " movntq %%mm1, 8(%1)\n"
24028- " movq 16(%0), %%mm2\n"
24029- " movntq %%mm2, 16(%1)\n"
24030- " movq 24(%0), %%mm3\n"
24031- " movntq %%mm3, 24(%1)\n"
24032- " movq 32(%0), %%mm4\n"
24033- " movntq %%mm4, 32(%1)\n"
24034- " movq 40(%0), %%mm5\n"
24035- " movntq %%mm5, 40(%1)\n"
24036- " movq 48(%0), %%mm6\n"
24037- " movntq %%mm6, 48(%1)\n"
24038- " movq 56(%0), %%mm7\n"
24039- " movntq %%mm7, 56(%1)\n"
24040+ "1: prefetch 320(%1)\n"
24041+ "2: movq (%1), %%mm0\n"
24042+ " movntq %%mm0, (%2)\n"
24043+ " movq 8(%1), %%mm1\n"
24044+ " movntq %%mm1, 8(%2)\n"
24045+ " movq 16(%1), %%mm2\n"
24046+ " movntq %%mm2, 16(%2)\n"
24047+ " movq 24(%1), %%mm3\n"
24048+ " movntq %%mm3, 24(%2)\n"
24049+ " movq 32(%1), %%mm4\n"
24050+ " movntq %%mm4, 32(%2)\n"
24051+ " movq 40(%1), %%mm5\n"
24052+ " movntq %%mm5, 40(%2)\n"
24053+ " movq 48(%1), %%mm6\n"
24054+ " movntq %%mm6, 48(%2)\n"
24055+ " movq 56(%1), %%mm7\n"
24056+ " movntq %%mm7, 56(%2)\n"
24057 ".section .fixup, \"ax\"\n"
24058- "3: movw $0x05EB, 1b\n" /* jmp on 5 bytes */
24059+ "3:\n"
24060+
24061+#ifdef CONFIG_PAX_KERNEXEC
24062+ " movl %%cr0, %0\n"
24063+ " movl %0, %%eax\n"
24064+ " andl $0xFFFEFFFF, %%eax\n"
24065+ " movl %%eax, %%cr0\n"
24066+#endif
24067+
24068+ " movw $0x05EB, 1b\n" /* jmp on 5 bytes */
24069+
24070+#ifdef CONFIG_PAX_KERNEXEC
24071+ " movl %0, %%cr0\n"
24072+#endif
24073+
24074 " jmp 2b\n"
24075 ".previous\n"
24076- _ASM_EXTABLE(1b, 3b) : : "r" (from), "r" (to) : "memory");
24077+ _ASM_EXTABLE(1b, 3b) : "=&r" (cr0) : "r" (from), "r" (to) : "memory", "ax");
24078
24079 from += 64;
24080 to += 64;
24081@@ -280,47 +338,76 @@ static void fast_clear_page(void *page)
24082 static void fast_copy_page(void *to, void *from)
24083 {
24084 int i;
24085+ unsigned long cr0;
24086
24087 kernel_fpu_begin();
24088
24089 __asm__ __volatile__ (
24090- "1: prefetch (%0)\n"
24091- " prefetch 64(%0)\n"
24092- " prefetch 128(%0)\n"
24093- " prefetch 192(%0)\n"
24094- " prefetch 256(%0)\n"
24095+ "1: prefetch (%1)\n"
24096+ " prefetch 64(%1)\n"
24097+ " prefetch 128(%1)\n"
24098+ " prefetch 192(%1)\n"
24099+ " prefetch 256(%1)\n"
24100 "2: \n"
24101 ".section .fixup, \"ax\"\n"
24102- "3: movw $0x1AEB, 1b\n" /* jmp on 26 bytes */
24103+ "3: \n"
24104+
24105+#ifdef CONFIG_PAX_KERNEXEC
24106+ " movl %%cr0, %0\n"
24107+ " movl %0, %%eax\n"
24108+ " andl $0xFFFEFFFF, %%eax\n"
24109+ " movl %%eax, %%cr0\n"
24110+#endif
24111+
24112+ " movw $0x1AEB, 1b\n" /* jmp on 26 bytes */
24113+
24114+#ifdef CONFIG_PAX_KERNEXEC
24115+ " movl %0, %%cr0\n"
24116+#endif
24117+
24118 " jmp 2b\n"
24119 ".previous\n"
24120- _ASM_EXTABLE(1b, 3b) : : "r" (from));
24121+ _ASM_EXTABLE(1b, 3b) : "=&r" (cr0) : "r" (from) : "ax");
24122
24123 for (i = 0; i < 4096/64; i++) {
24124 __asm__ __volatile__ (
24125- "1: prefetch 320(%0)\n"
24126- "2: movq (%0), %%mm0\n"
24127- " movq 8(%0), %%mm1\n"
24128- " movq 16(%0), %%mm2\n"
24129- " movq 24(%0), %%mm3\n"
24130- " movq %%mm0, (%1)\n"
24131- " movq %%mm1, 8(%1)\n"
24132- " movq %%mm2, 16(%1)\n"
24133- " movq %%mm3, 24(%1)\n"
24134- " movq 32(%0), %%mm0\n"
24135- " movq 40(%0), %%mm1\n"
24136- " movq 48(%0), %%mm2\n"
24137- " movq 56(%0), %%mm3\n"
24138- " movq %%mm0, 32(%1)\n"
24139- " movq %%mm1, 40(%1)\n"
24140- " movq %%mm2, 48(%1)\n"
24141- " movq %%mm3, 56(%1)\n"
24142+ "1: prefetch 320(%1)\n"
24143+ "2: movq (%1), %%mm0\n"
24144+ " movq 8(%1), %%mm1\n"
24145+ " movq 16(%1), %%mm2\n"
24146+ " movq 24(%1), %%mm3\n"
24147+ " movq %%mm0, (%2)\n"
24148+ " movq %%mm1, 8(%2)\n"
24149+ " movq %%mm2, 16(%2)\n"
24150+ " movq %%mm3, 24(%2)\n"
24151+ " movq 32(%1), %%mm0\n"
24152+ " movq 40(%1), %%mm1\n"
24153+ " movq 48(%1), %%mm2\n"
24154+ " movq 56(%1), %%mm3\n"
24155+ " movq %%mm0, 32(%2)\n"
24156+ " movq %%mm1, 40(%2)\n"
24157+ " movq %%mm2, 48(%2)\n"
24158+ " movq %%mm3, 56(%2)\n"
24159 ".section .fixup, \"ax\"\n"
24160- "3: movw $0x05EB, 1b\n" /* jmp on 5 bytes */
24161+ "3:\n"
24162+
24163+#ifdef CONFIG_PAX_KERNEXEC
24164+ " movl %%cr0, %0\n"
24165+ " movl %0, %%eax\n"
24166+ " andl $0xFFFEFFFF, %%eax\n"
24167+ " movl %%eax, %%cr0\n"
24168+#endif
24169+
24170+ " movw $0x05EB, 1b\n" /* jmp on 5 bytes */
24171+
24172+#ifdef CONFIG_PAX_KERNEXEC
24173+ " movl %0, %%cr0\n"
24174+#endif
24175+
24176 " jmp 2b\n"
24177 ".previous\n"
24178 _ASM_EXTABLE(1b, 3b)
24179- : : "r" (from), "r" (to) : "memory");
24180+ : "=&r" (cr0) : "r" (from), "r" (to) : "memory", "ax");
24181
24182 from += 64;
24183 to += 64;
24184diff --git a/arch/x86/lib/msr-reg.S b/arch/x86/lib/msr-reg.S
24185index 69fa106..adda88b 100644
24186--- a/arch/x86/lib/msr-reg.S
24187+++ b/arch/x86/lib/msr-reg.S
24188@@ -3,6 +3,7 @@
24189 #include <asm/dwarf2.h>
24190 #include <asm/asm.h>
24191 #include <asm/msr.h>
24192+#include <asm/alternative-asm.h>
24193
24194 #ifdef CONFIG_X86_64
24195 /*
24196@@ -16,7 +17,7 @@ ENTRY(native_\op\()_safe_regs)
24197 CFI_STARTPROC
24198 pushq_cfi %rbx
24199 pushq_cfi %rbp
24200- movq %rdi, %r10 /* Save pointer */
24201+ movq %rdi, %r9 /* Save pointer */
24202 xorl %r11d, %r11d /* Return value */
24203 movl (%rdi), %eax
24204 movl 4(%rdi), %ecx
24205@@ -27,16 +28,17 @@ ENTRY(native_\op\()_safe_regs)
24206 movl 28(%rdi), %edi
24207 CFI_REMEMBER_STATE
24208 1: \op
24209-2: movl %eax, (%r10)
24210+2: movl %eax, (%r9)
24211 movl %r11d, %eax /* Return value */
24212- movl %ecx, 4(%r10)
24213- movl %edx, 8(%r10)
24214- movl %ebx, 12(%r10)
24215- movl %ebp, 20(%r10)
24216- movl %esi, 24(%r10)
24217- movl %edi, 28(%r10)
24218+ movl %ecx, 4(%r9)
24219+ movl %edx, 8(%r9)
24220+ movl %ebx, 12(%r9)
24221+ movl %ebp, 20(%r9)
24222+ movl %esi, 24(%r9)
24223+ movl %edi, 28(%r9)
24224 popq_cfi %rbp
24225 popq_cfi %rbx
24226+ pax_force_retaddr
24227 ret
24228 3:
24229 CFI_RESTORE_STATE
24230diff --git a/arch/x86/lib/putuser.S b/arch/x86/lib/putuser.S
24231index 36b0d15..d381858 100644
24232--- a/arch/x86/lib/putuser.S
24233+++ b/arch/x86/lib/putuser.S
24234@@ -15,7 +15,9 @@
24235 #include <asm/thread_info.h>
24236 #include <asm/errno.h>
24237 #include <asm/asm.h>
24238-
24239+#include <asm/segment.h>
24240+#include <asm/pgtable.h>
24241+#include <asm/alternative-asm.h>
24242
24243 /*
24244 * __put_user_X
24245@@ -29,52 +31,119 @@
24246 * as they get called from within inline assembly.
24247 */
24248
24249-#define ENTER CFI_STARTPROC ; \
24250- GET_THREAD_INFO(%_ASM_BX)
24251-#define EXIT ret ; \
24252+#define ENTER CFI_STARTPROC
24253+#define EXIT pax_force_retaddr; ret ; \
24254 CFI_ENDPROC
24255
24256+#if defined(CONFIG_X86_64) && defined(CONFIG_PAX_MEMORY_UDEREF)
24257+#define _DEST %_ASM_CX,%_ASM_BX
24258+#else
24259+#define _DEST %_ASM_CX
24260+#endif
24261+
24262+#if defined(CONFIG_X86_32) && defined(CONFIG_PAX_MEMORY_UDEREF)
24263+#define __copyuser_seg gs;
24264+#else
24265+#define __copyuser_seg
24266+#endif
24267+
24268 .text
24269 ENTRY(__put_user_1)
24270 ENTER
24271+
24272+#if !defined(CONFIG_X86_32) || !defined(CONFIG_PAX_MEMORY_UDEREF)
24273+ GET_THREAD_INFO(%_ASM_BX)
24274 cmp TI_addr_limit(%_ASM_BX),%_ASM_CX
24275 jae bad_put_user
24276-1: movb %al,(%_ASM_CX)
24277+
24278+#if defined(CONFIG_X86_64) && defined(CONFIG_PAX_MEMORY_UDEREF)
24279+ mov $PAX_USER_SHADOW_BASE,%_ASM_BX
24280+ cmp %_ASM_BX,%_ASM_CX
24281+ jb 1234f
24282+ xor %ebx,%ebx
24283+1234:
24284+#endif
24285+
24286+#endif
24287+
24288+1: __copyuser_seg movb %al,(_DEST)
24289 xor %eax,%eax
24290 EXIT
24291 ENDPROC(__put_user_1)
24292
24293 ENTRY(__put_user_2)
24294 ENTER
24295+
24296+#if !defined(CONFIG_X86_32) || !defined(CONFIG_PAX_MEMORY_UDEREF)
24297+ GET_THREAD_INFO(%_ASM_BX)
24298 mov TI_addr_limit(%_ASM_BX),%_ASM_BX
24299 sub $1,%_ASM_BX
24300 cmp %_ASM_BX,%_ASM_CX
24301 jae bad_put_user
24302-2: movw %ax,(%_ASM_CX)
24303+
24304+#if defined(CONFIG_X86_64) && defined(CONFIG_PAX_MEMORY_UDEREF)
24305+ mov $PAX_USER_SHADOW_BASE,%_ASM_BX
24306+ cmp %_ASM_BX,%_ASM_CX
24307+ jb 1234f
24308+ xor %ebx,%ebx
24309+1234:
24310+#endif
24311+
24312+#endif
24313+
24314+2: __copyuser_seg movw %ax,(_DEST)
24315 xor %eax,%eax
24316 EXIT
24317 ENDPROC(__put_user_2)
24318
24319 ENTRY(__put_user_4)
24320 ENTER
24321+
24322+#if !defined(CONFIG_X86_32) || !defined(CONFIG_PAX_MEMORY_UDEREF)
24323+ GET_THREAD_INFO(%_ASM_BX)
24324 mov TI_addr_limit(%_ASM_BX),%_ASM_BX
24325 sub $3,%_ASM_BX
24326 cmp %_ASM_BX,%_ASM_CX
24327 jae bad_put_user
24328-3: movl %eax,(%_ASM_CX)
24329+
24330+#if defined(CONFIG_X86_64) && defined(CONFIG_PAX_MEMORY_UDEREF)
24331+ mov $PAX_USER_SHADOW_BASE,%_ASM_BX
24332+ cmp %_ASM_BX,%_ASM_CX
24333+ jb 1234f
24334+ xor %ebx,%ebx
24335+1234:
24336+#endif
24337+
24338+#endif
24339+
24340+3: __copyuser_seg movl %eax,(_DEST)
24341 xor %eax,%eax
24342 EXIT
24343 ENDPROC(__put_user_4)
24344
24345 ENTRY(__put_user_8)
24346 ENTER
24347+
24348+#if !defined(CONFIG_X86_32) || !defined(CONFIG_PAX_MEMORY_UDEREF)
24349+ GET_THREAD_INFO(%_ASM_BX)
24350 mov TI_addr_limit(%_ASM_BX),%_ASM_BX
24351 sub $7,%_ASM_BX
24352 cmp %_ASM_BX,%_ASM_CX
24353 jae bad_put_user
24354-4: mov %_ASM_AX,(%_ASM_CX)
24355+
24356+#if defined(CONFIG_X86_64) && defined(CONFIG_PAX_MEMORY_UDEREF)
24357+ mov $PAX_USER_SHADOW_BASE,%_ASM_BX
24358+ cmp %_ASM_BX,%_ASM_CX
24359+ jb 1234f
24360+ xor %ebx,%ebx
24361+1234:
24362+#endif
24363+
24364+#endif
24365+
24366+4: __copyuser_seg mov %_ASM_AX,(_DEST)
24367 #ifdef CONFIG_X86_32
24368-5: movl %edx,4(%_ASM_CX)
24369+5: __copyuser_seg movl %edx,4(_DEST)
24370 #endif
24371 xor %eax,%eax
24372 EXIT
24373diff --git a/arch/x86/lib/rwlock_64.S b/arch/x86/lib/rwlock_64.S
24374index 05ea55f..6345b9a 100644
24375--- a/arch/x86/lib/rwlock_64.S
24376+++ b/arch/x86/lib/rwlock_64.S
24377@@ -2,6 +2,7 @@
24378
24379 #include <linux/linkage.h>
24380 #include <asm/rwlock.h>
24381+#include <asm/asm.h>
24382 #include <asm/alternative-asm.h>
24383 #include <asm/dwarf2.h>
24384
24385@@ -10,13 +11,34 @@ ENTRY(__write_lock_failed)
24386 CFI_STARTPROC
24387 LOCK_PREFIX
24388 addl $RW_LOCK_BIAS,(%rdi)
24389+
24390+#ifdef CONFIG_PAX_REFCOUNT
24391+ jno 1234f
24392+ LOCK_PREFIX
24393+ subl $RW_LOCK_BIAS,(%rdi)
24394+ int $4
24395+1234:
24396+ _ASM_EXTABLE(1234b, 1234b)
24397+#endif
24398+
24399 1: rep
24400 nop
24401 cmpl $RW_LOCK_BIAS,(%rdi)
24402 jne 1b
24403 LOCK_PREFIX
24404 subl $RW_LOCK_BIAS,(%rdi)
24405+
24406+#ifdef CONFIG_PAX_REFCOUNT
24407+ jno 1234f
24408+ LOCK_PREFIX
24409+ addl $RW_LOCK_BIAS,(%rdi)
24410+ int $4
24411+1234:
24412+ _ASM_EXTABLE(1234b, 1234b)
24413+#endif
24414+
24415 jnz __write_lock_failed
24416+ pax_force_retaddr
24417 ret
24418 CFI_ENDPROC
24419 END(__write_lock_failed)
24420@@ -26,13 +48,34 @@ ENTRY(__read_lock_failed)
24421 CFI_STARTPROC
24422 LOCK_PREFIX
24423 incl (%rdi)
24424+
24425+#ifdef CONFIG_PAX_REFCOUNT
24426+ jno 1234f
24427+ LOCK_PREFIX
24428+ decl (%rdi)
24429+ int $4
24430+1234:
24431+ _ASM_EXTABLE(1234b, 1234b)
24432+#endif
24433+
24434 1: rep
24435 nop
24436 cmpl $1,(%rdi)
24437 js 1b
24438 LOCK_PREFIX
24439 decl (%rdi)
24440+
24441+#ifdef CONFIG_PAX_REFCOUNT
24442+ jno 1234f
24443+ LOCK_PREFIX
24444+ incl (%rdi)
24445+ int $4
24446+1234:
24447+ _ASM_EXTABLE(1234b, 1234b)
24448+#endif
24449+
24450 js __read_lock_failed
24451+ pax_force_retaddr
24452 ret
24453 CFI_ENDPROC
24454 END(__read_lock_failed)
24455diff --git a/arch/x86/lib/rwsem_64.S b/arch/x86/lib/rwsem_64.S
24456index 15acecf..f768b10 100644
24457--- a/arch/x86/lib/rwsem_64.S
24458+++ b/arch/x86/lib/rwsem_64.S
24459@@ -48,6 +48,7 @@ ENTRY(call_rwsem_down_read_failed)
24460 call rwsem_down_read_failed
24461 popq %rdx
24462 restore_common_regs
24463+ pax_force_retaddr
24464 ret
24465 ENDPROC(call_rwsem_down_read_failed)
24466
24467@@ -56,6 +57,7 @@ ENTRY(call_rwsem_down_write_failed)
24468 movq %rax,%rdi
24469 call rwsem_down_write_failed
24470 restore_common_regs
24471+ pax_force_retaddr
24472 ret
24473 ENDPROC(call_rwsem_down_write_failed)
24474
24475@@ -66,7 +68,8 @@ ENTRY(call_rwsem_wake)
24476 movq %rax,%rdi
24477 call rwsem_wake
24478 restore_common_regs
24479-1: ret
24480+1: pax_force_retaddr
24481+ ret
24482 ENDPROC(call_rwsem_wake)
24483
24484 /* Fix up special calling conventions */
24485@@ -77,5 +80,6 @@ ENTRY(call_rwsem_downgrade_wake)
24486 call rwsem_downgrade_wake
24487 popq %rdx
24488 restore_common_regs
24489+ pax_force_retaddr
24490 ret
24491 ENDPROC(call_rwsem_downgrade_wake)
24492diff --git a/arch/x86/lib/thunk_64.S b/arch/x86/lib/thunk_64.S
24493index bf9a7d5..fb06ab5 100644
24494--- a/arch/x86/lib/thunk_64.S
24495+++ b/arch/x86/lib/thunk_64.S
24496@@ -10,7 +10,8 @@
24497 #include <asm/dwarf2.h>
24498 #include <asm/calling.h>
24499 #include <asm/rwlock.h>
24500-
24501+ #include <asm/alternative-asm.h>
24502+
24503 /* rdi: arg1 ... normal C conventions. rax is saved/restored. */
24504 .macro thunk name,func
24505 .globl \name
24506@@ -70,6 +71,7 @@
24507 SAVE_ARGS
24508 restore:
24509 RESTORE_ARGS
24510+ pax_force_retaddr
24511 ret
24512 CFI_ENDPROC
24513
24514@@ -77,5 +79,6 @@ restore:
24515 SAVE_ARGS
24516 restore_norax:
24517 RESTORE_ARGS 1
24518+ pax_force_retaddr
24519 ret
24520 CFI_ENDPROC
24521diff --git a/arch/x86/lib/usercopy_32.c b/arch/x86/lib/usercopy_32.c
24522index 1f118d4..8e0ead9 100644
24523--- a/arch/x86/lib/usercopy_32.c
24524+++ b/arch/x86/lib/usercopy_32.c
24525@@ -43,7 +43,7 @@ do { \
24526 __asm__ __volatile__( \
24527 " testl %1,%1\n" \
24528 " jz 2f\n" \
24529- "0: lodsb\n" \
24530+ "0: "__copyuser_seg"lodsb\n" \
24531 " stosb\n" \
24532 " testb %%al,%%al\n" \
24533 " jz 1f\n" \
24534@@ -83,7 +83,7 @@ do { \
24535 * and returns @count.
24536 */
24537 long
24538-__strncpy_from_user(char *dst, const char __user *src, long count)
24539+__strncpy_from_user(char *dst, const char __user *src, unsigned long count)
24540 {
24541 long res;
24542 __do_strncpy_from_user(dst, src, count, res);
24543@@ -110,7 +110,7 @@ EXPORT_SYMBOL(__strncpy_from_user);
24544 * and returns @count.
24545 */
24546 long
24547-strncpy_from_user(char *dst, const char __user *src, long count)
24548+strncpy_from_user(char *dst, const char __user *src, unsigned long count)
24549 {
24550 long res = -EFAULT;
24551 if (access_ok(VERIFY_READ, src, 1))
24552@@ -128,10 +128,12 @@ do { \
24553 int __d0; \
24554 might_fault(); \
24555 __asm__ __volatile__( \
24556+ __COPYUSER_SET_ES \
24557 "0: rep; stosl\n" \
24558 " movl %2,%0\n" \
24559 "1: rep; stosb\n" \
24560 "2:\n" \
24561+ __COPYUSER_RESTORE_ES \
24562 ".section .fixup,\"ax\"\n" \
24563 "3: lea 0(%2,%0,4),%0\n" \
24564 " jmp 2b\n" \
24565@@ -192,7 +194,7 @@ EXPORT_SYMBOL(__clear_user);
24566 * On exception, returns 0.
24567 * If the string is too long, returns a value greater than @n.
24568 */
24569-long strnlen_user(const char __user *s, long n)
24570+long strnlen_user(const char __user *s, unsigned long n)
24571 {
24572 unsigned long mask = -__addr_ok(s);
24573 unsigned long res, tmp;
24574@@ -200,6 +202,7 @@ long strnlen_user(const char __user *s, long n)
24575 might_fault();
24576
24577 __asm__ __volatile__(
24578+ __COPYUSER_SET_ES
24579 " testl %0, %0\n"
24580 " jz 3f\n"
24581 " andl %0,%%ecx\n"
24582@@ -208,6 +211,7 @@ long strnlen_user(const char __user *s, long n)
24583 " subl %%ecx,%0\n"
24584 " addl %0,%%eax\n"
24585 "1:\n"
24586+ __COPYUSER_RESTORE_ES
24587 ".section .fixup,\"ax\"\n"
24588 "2: xorl %%eax,%%eax\n"
24589 " jmp 1b\n"
24590@@ -227,7 +231,7 @@ EXPORT_SYMBOL(strnlen_user);
24591
24592 #ifdef CONFIG_X86_INTEL_USERCOPY
24593 static unsigned long
24594-__copy_user_intel(void __user *to, const void *from, unsigned long size)
24595+__generic_copy_to_user_intel(void __user *to, const void *from, unsigned long size)
24596 {
24597 int d0, d1;
24598 __asm__ __volatile__(
24599@@ -239,36 +243,36 @@ __copy_user_intel(void __user *to, const void *from, unsigned long size)
24600 " .align 2,0x90\n"
24601 "3: movl 0(%4), %%eax\n"
24602 "4: movl 4(%4), %%edx\n"
24603- "5: movl %%eax, 0(%3)\n"
24604- "6: movl %%edx, 4(%3)\n"
24605+ "5: "__copyuser_seg" movl %%eax, 0(%3)\n"
24606+ "6: "__copyuser_seg" movl %%edx, 4(%3)\n"
24607 "7: movl 8(%4), %%eax\n"
24608 "8: movl 12(%4),%%edx\n"
24609- "9: movl %%eax, 8(%3)\n"
24610- "10: movl %%edx, 12(%3)\n"
24611+ "9: "__copyuser_seg" movl %%eax, 8(%3)\n"
24612+ "10: "__copyuser_seg" movl %%edx, 12(%3)\n"
24613 "11: movl 16(%4), %%eax\n"
24614 "12: movl 20(%4), %%edx\n"
24615- "13: movl %%eax, 16(%3)\n"
24616- "14: movl %%edx, 20(%3)\n"
24617+ "13: "__copyuser_seg" movl %%eax, 16(%3)\n"
24618+ "14: "__copyuser_seg" movl %%edx, 20(%3)\n"
24619 "15: movl 24(%4), %%eax\n"
24620 "16: movl 28(%4), %%edx\n"
24621- "17: movl %%eax, 24(%3)\n"
24622- "18: movl %%edx, 28(%3)\n"
24623+ "17: "__copyuser_seg" movl %%eax, 24(%3)\n"
24624+ "18: "__copyuser_seg" movl %%edx, 28(%3)\n"
24625 "19: movl 32(%4), %%eax\n"
24626 "20: movl 36(%4), %%edx\n"
24627- "21: movl %%eax, 32(%3)\n"
24628- "22: movl %%edx, 36(%3)\n"
24629+ "21: "__copyuser_seg" movl %%eax, 32(%3)\n"
24630+ "22: "__copyuser_seg" movl %%edx, 36(%3)\n"
24631 "23: movl 40(%4), %%eax\n"
24632 "24: movl 44(%4), %%edx\n"
24633- "25: movl %%eax, 40(%3)\n"
24634- "26: movl %%edx, 44(%3)\n"
24635+ "25: "__copyuser_seg" movl %%eax, 40(%3)\n"
24636+ "26: "__copyuser_seg" movl %%edx, 44(%3)\n"
24637 "27: movl 48(%4), %%eax\n"
24638 "28: movl 52(%4), %%edx\n"
24639- "29: movl %%eax, 48(%3)\n"
24640- "30: movl %%edx, 52(%3)\n"
24641+ "29: "__copyuser_seg" movl %%eax, 48(%3)\n"
24642+ "30: "__copyuser_seg" movl %%edx, 52(%3)\n"
24643 "31: movl 56(%4), %%eax\n"
24644 "32: movl 60(%4), %%edx\n"
24645- "33: movl %%eax, 56(%3)\n"
24646- "34: movl %%edx, 60(%3)\n"
24647+ "33: "__copyuser_seg" movl %%eax, 56(%3)\n"
24648+ "34: "__copyuser_seg" movl %%edx, 60(%3)\n"
24649 " addl $-64, %0\n"
24650 " addl $64, %4\n"
24651 " addl $64, %3\n"
24652@@ -278,10 +282,12 @@ __copy_user_intel(void __user *to, const void *from, unsigned long size)
24653 " shrl $2, %0\n"
24654 " andl $3, %%eax\n"
24655 " cld\n"
24656+ __COPYUSER_SET_ES
24657 "99: rep; movsl\n"
24658 "36: movl %%eax, %0\n"
24659 "37: rep; movsb\n"
24660 "100:\n"
24661+ __COPYUSER_RESTORE_ES
24662 ".section .fixup,\"ax\"\n"
24663 "101: lea 0(%%eax,%0,4),%0\n"
24664 " jmp 100b\n"
24665@@ -334,46 +340,155 @@ __copy_user_intel(void __user *to, const void *from, unsigned long size)
24666 }
24667
24668 static unsigned long
24669+__generic_copy_from_user_intel(void *to, const void __user *from, unsigned long size)
24670+{
24671+ int d0, d1;
24672+ __asm__ __volatile__(
24673+ " .align 2,0x90\n"
24674+ "1: "__copyuser_seg" movl 32(%4), %%eax\n"
24675+ " cmpl $67, %0\n"
24676+ " jbe 3f\n"
24677+ "2: "__copyuser_seg" movl 64(%4), %%eax\n"
24678+ " .align 2,0x90\n"
24679+ "3: "__copyuser_seg" movl 0(%4), %%eax\n"
24680+ "4: "__copyuser_seg" movl 4(%4), %%edx\n"
24681+ "5: movl %%eax, 0(%3)\n"
24682+ "6: movl %%edx, 4(%3)\n"
24683+ "7: "__copyuser_seg" movl 8(%4), %%eax\n"
24684+ "8: "__copyuser_seg" movl 12(%4),%%edx\n"
24685+ "9: movl %%eax, 8(%3)\n"
24686+ "10: movl %%edx, 12(%3)\n"
24687+ "11: "__copyuser_seg" movl 16(%4), %%eax\n"
24688+ "12: "__copyuser_seg" movl 20(%4), %%edx\n"
24689+ "13: movl %%eax, 16(%3)\n"
24690+ "14: movl %%edx, 20(%3)\n"
24691+ "15: "__copyuser_seg" movl 24(%4), %%eax\n"
24692+ "16: "__copyuser_seg" movl 28(%4), %%edx\n"
24693+ "17: movl %%eax, 24(%3)\n"
24694+ "18: movl %%edx, 28(%3)\n"
24695+ "19: "__copyuser_seg" movl 32(%4), %%eax\n"
24696+ "20: "__copyuser_seg" movl 36(%4), %%edx\n"
24697+ "21: movl %%eax, 32(%3)\n"
24698+ "22: movl %%edx, 36(%3)\n"
24699+ "23: "__copyuser_seg" movl 40(%4), %%eax\n"
24700+ "24: "__copyuser_seg" movl 44(%4), %%edx\n"
24701+ "25: movl %%eax, 40(%3)\n"
24702+ "26: movl %%edx, 44(%3)\n"
24703+ "27: "__copyuser_seg" movl 48(%4), %%eax\n"
24704+ "28: "__copyuser_seg" movl 52(%4), %%edx\n"
24705+ "29: movl %%eax, 48(%3)\n"
24706+ "30: movl %%edx, 52(%3)\n"
24707+ "31: "__copyuser_seg" movl 56(%4), %%eax\n"
24708+ "32: "__copyuser_seg" movl 60(%4), %%edx\n"
24709+ "33: movl %%eax, 56(%3)\n"
24710+ "34: movl %%edx, 60(%3)\n"
24711+ " addl $-64, %0\n"
24712+ " addl $64, %4\n"
24713+ " addl $64, %3\n"
24714+ " cmpl $63, %0\n"
24715+ " ja 1b\n"
24716+ "35: movl %0, %%eax\n"
24717+ " shrl $2, %0\n"
24718+ " andl $3, %%eax\n"
24719+ " cld\n"
24720+ "99: rep; "__copyuser_seg" movsl\n"
24721+ "36: movl %%eax, %0\n"
24722+ "37: rep; "__copyuser_seg" movsb\n"
24723+ "100:\n"
24724+ ".section .fixup,\"ax\"\n"
24725+ "101: lea 0(%%eax,%0,4),%0\n"
24726+ " jmp 100b\n"
24727+ ".previous\n"
24728+ ".section __ex_table,\"a\"\n"
24729+ " .align 4\n"
24730+ " .long 1b,100b\n"
24731+ " .long 2b,100b\n"
24732+ " .long 3b,100b\n"
24733+ " .long 4b,100b\n"
24734+ " .long 5b,100b\n"
24735+ " .long 6b,100b\n"
24736+ " .long 7b,100b\n"
24737+ " .long 8b,100b\n"
24738+ " .long 9b,100b\n"
24739+ " .long 10b,100b\n"
24740+ " .long 11b,100b\n"
24741+ " .long 12b,100b\n"
24742+ " .long 13b,100b\n"
24743+ " .long 14b,100b\n"
24744+ " .long 15b,100b\n"
24745+ " .long 16b,100b\n"
24746+ " .long 17b,100b\n"
24747+ " .long 18b,100b\n"
24748+ " .long 19b,100b\n"
24749+ " .long 20b,100b\n"
24750+ " .long 21b,100b\n"
24751+ " .long 22b,100b\n"
24752+ " .long 23b,100b\n"
24753+ " .long 24b,100b\n"
24754+ " .long 25b,100b\n"
24755+ " .long 26b,100b\n"
24756+ " .long 27b,100b\n"
24757+ " .long 28b,100b\n"
24758+ " .long 29b,100b\n"
24759+ " .long 30b,100b\n"
24760+ " .long 31b,100b\n"
24761+ " .long 32b,100b\n"
24762+ " .long 33b,100b\n"
24763+ " .long 34b,100b\n"
24764+ " .long 35b,100b\n"
24765+ " .long 36b,100b\n"
24766+ " .long 37b,100b\n"
24767+ " .long 99b,101b\n"
24768+ ".previous"
24769+ : "=&c"(size), "=&D" (d0), "=&S" (d1)
24770+ : "1"(to), "2"(from), "0"(size)
24771+ : "eax", "edx", "memory");
24772+ return size;
24773+}
24774+
24775+static unsigned long
24776+__copy_user_zeroing_intel(void *to, const void __user *from, unsigned long size) __size_overflow(3);
24777+static unsigned long
24778 __copy_user_zeroing_intel(void *to, const void __user *from, unsigned long size)
24779 {
24780 int d0, d1;
24781 __asm__ __volatile__(
24782 " .align 2,0x90\n"
24783- "0: movl 32(%4), %%eax\n"
24784+ "0: "__copyuser_seg" movl 32(%4), %%eax\n"
24785 " cmpl $67, %0\n"
24786 " jbe 2f\n"
24787- "1: movl 64(%4), %%eax\n"
24788+ "1: "__copyuser_seg" movl 64(%4), %%eax\n"
24789 " .align 2,0x90\n"
24790- "2: movl 0(%4), %%eax\n"
24791- "21: movl 4(%4), %%edx\n"
24792+ "2: "__copyuser_seg" movl 0(%4), %%eax\n"
24793+ "21: "__copyuser_seg" movl 4(%4), %%edx\n"
24794 " movl %%eax, 0(%3)\n"
24795 " movl %%edx, 4(%3)\n"
24796- "3: movl 8(%4), %%eax\n"
24797- "31: movl 12(%4),%%edx\n"
24798+ "3: "__copyuser_seg" movl 8(%4), %%eax\n"
24799+ "31: "__copyuser_seg" movl 12(%4),%%edx\n"
24800 " movl %%eax, 8(%3)\n"
24801 " movl %%edx, 12(%3)\n"
24802- "4: movl 16(%4), %%eax\n"
24803- "41: movl 20(%4), %%edx\n"
24804+ "4: "__copyuser_seg" movl 16(%4), %%eax\n"
24805+ "41: "__copyuser_seg" movl 20(%4), %%edx\n"
24806 " movl %%eax, 16(%3)\n"
24807 " movl %%edx, 20(%3)\n"
24808- "10: movl 24(%4), %%eax\n"
24809- "51: movl 28(%4), %%edx\n"
24810+ "10: "__copyuser_seg" movl 24(%4), %%eax\n"
24811+ "51: "__copyuser_seg" movl 28(%4), %%edx\n"
24812 " movl %%eax, 24(%3)\n"
24813 " movl %%edx, 28(%3)\n"
24814- "11: movl 32(%4), %%eax\n"
24815- "61: movl 36(%4), %%edx\n"
24816+ "11: "__copyuser_seg" movl 32(%4), %%eax\n"
24817+ "61: "__copyuser_seg" movl 36(%4), %%edx\n"
24818 " movl %%eax, 32(%3)\n"
24819 " movl %%edx, 36(%3)\n"
24820- "12: movl 40(%4), %%eax\n"
24821- "71: movl 44(%4), %%edx\n"
24822+ "12: "__copyuser_seg" movl 40(%4), %%eax\n"
24823+ "71: "__copyuser_seg" movl 44(%4), %%edx\n"
24824 " movl %%eax, 40(%3)\n"
24825 " movl %%edx, 44(%3)\n"
24826- "13: movl 48(%4), %%eax\n"
24827- "81: movl 52(%4), %%edx\n"
24828+ "13: "__copyuser_seg" movl 48(%4), %%eax\n"
24829+ "81: "__copyuser_seg" movl 52(%4), %%edx\n"
24830 " movl %%eax, 48(%3)\n"
24831 " movl %%edx, 52(%3)\n"
24832- "14: movl 56(%4), %%eax\n"
24833- "91: movl 60(%4), %%edx\n"
24834+ "14: "__copyuser_seg" movl 56(%4), %%eax\n"
24835+ "91: "__copyuser_seg" movl 60(%4), %%edx\n"
24836 " movl %%eax, 56(%3)\n"
24837 " movl %%edx, 60(%3)\n"
24838 " addl $-64, %0\n"
24839@@ -385,9 +500,9 @@ __copy_user_zeroing_intel(void *to, const void __user *from, unsigned long size)
24840 " shrl $2, %0\n"
24841 " andl $3, %%eax\n"
24842 " cld\n"
24843- "6: rep; movsl\n"
24844+ "6: rep; "__copyuser_seg" movsl\n"
24845 " movl %%eax,%0\n"
24846- "7: rep; movsb\n"
24847+ "7: rep; "__copyuser_seg" movsb\n"
24848 "8:\n"
24849 ".section .fixup,\"ax\"\n"
24850 "9: lea 0(%%eax,%0,4),%0\n"
24851@@ -434,47 +549,49 @@ __copy_user_zeroing_intel(void *to, const void __user *from, unsigned long size)
24852 */
24853
24854 static unsigned long __copy_user_zeroing_intel_nocache(void *to,
24855+ const void __user *from, unsigned long size) __size_overflow(3);
24856+static unsigned long __copy_user_zeroing_intel_nocache(void *to,
24857 const void __user *from, unsigned long size)
24858 {
24859 int d0, d1;
24860
24861 __asm__ __volatile__(
24862 " .align 2,0x90\n"
24863- "0: movl 32(%4), %%eax\n"
24864+ "0: "__copyuser_seg" movl 32(%4), %%eax\n"
24865 " cmpl $67, %0\n"
24866 " jbe 2f\n"
24867- "1: movl 64(%4), %%eax\n"
24868+ "1: "__copyuser_seg" movl 64(%4), %%eax\n"
24869 " .align 2,0x90\n"
24870- "2: movl 0(%4), %%eax\n"
24871- "21: movl 4(%4), %%edx\n"
24872+ "2: "__copyuser_seg" movl 0(%4), %%eax\n"
24873+ "21: "__copyuser_seg" movl 4(%4), %%edx\n"
24874 " movnti %%eax, 0(%3)\n"
24875 " movnti %%edx, 4(%3)\n"
24876- "3: movl 8(%4), %%eax\n"
24877- "31: movl 12(%4),%%edx\n"
24878+ "3: "__copyuser_seg" movl 8(%4), %%eax\n"
24879+ "31: "__copyuser_seg" movl 12(%4),%%edx\n"
24880 " movnti %%eax, 8(%3)\n"
24881 " movnti %%edx, 12(%3)\n"
24882- "4: movl 16(%4), %%eax\n"
24883- "41: movl 20(%4), %%edx\n"
24884+ "4: "__copyuser_seg" movl 16(%4), %%eax\n"
24885+ "41: "__copyuser_seg" movl 20(%4), %%edx\n"
24886 " movnti %%eax, 16(%3)\n"
24887 " movnti %%edx, 20(%3)\n"
24888- "10: movl 24(%4), %%eax\n"
24889- "51: movl 28(%4), %%edx\n"
24890+ "10: "__copyuser_seg" movl 24(%4), %%eax\n"
24891+ "51: "__copyuser_seg" movl 28(%4), %%edx\n"
24892 " movnti %%eax, 24(%3)\n"
24893 " movnti %%edx, 28(%3)\n"
24894- "11: movl 32(%4), %%eax\n"
24895- "61: movl 36(%4), %%edx\n"
24896+ "11: "__copyuser_seg" movl 32(%4), %%eax\n"
24897+ "61: "__copyuser_seg" movl 36(%4), %%edx\n"
24898 " movnti %%eax, 32(%3)\n"
24899 " movnti %%edx, 36(%3)\n"
24900- "12: movl 40(%4), %%eax\n"
24901- "71: movl 44(%4), %%edx\n"
24902+ "12: "__copyuser_seg" movl 40(%4), %%eax\n"
24903+ "71: "__copyuser_seg" movl 44(%4), %%edx\n"
24904 " movnti %%eax, 40(%3)\n"
24905 " movnti %%edx, 44(%3)\n"
24906- "13: movl 48(%4), %%eax\n"
24907- "81: movl 52(%4), %%edx\n"
24908+ "13: "__copyuser_seg" movl 48(%4), %%eax\n"
24909+ "81: "__copyuser_seg" movl 52(%4), %%edx\n"
24910 " movnti %%eax, 48(%3)\n"
24911 " movnti %%edx, 52(%3)\n"
24912- "14: movl 56(%4), %%eax\n"
24913- "91: movl 60(%4), %%edx\n"
24914+ "14: "__copyuser_seg" movl 56(%4), %%eax\n"
24915+ "91: "__copyuser_seg" movl 60(%4), %%edx\n"
24916 " movnti %%eax, 56(%3)\n"
24917 " movnti %%edx, 60(%3)\n"
24918 " addl $-64, %0\n"
24919@@ -487,9 +604,9 @@ static unsigned long __copy_user_zeroing_intel_nocache(void *to,
24920 " shrl $2, %0\n"
24921 " andl $3, %%eax\n"
24922 " cld\n"
24923- "6: rep; movsl\n"
24924+ "6: rep; "__copyuser_seg" movsl\n"
24925 " movl %%eax,%0\n"
24926- "7: rep; movsb\n"
24927+ "7: rep; "__copyuser_seg" movsb\n"
24928 "8:\n"
24929 ".section .fixup,\"ax\"\n"
24930 "9: lea 0(%%eax,%0,4),%0\n"
24931@@ -531,47 +648,49 @@ static unsigned long __copy_user_zeroing_intel_nocache(void *to,
24932 }
24933
24934 static unsigned long __copy_user_intel_nocache(void *to,
24935+ const void __user *from, unsigned long size) __size_overflow(3);
24936+static unsigned long __copy_user_intel_nocache(void *to,
24937 const void __user *from, unsigned long size)
24938 {
24939 int d0, d1;
24940
24941 __asm__ __volatile__(
24942 " .align 2,0x90\n"
24943- "0: movl 32(%4), %%eax\n"
24944+ "0: "__copyuser_seg" movl 32(%4), %%eax\n"
24945 " cmpl $67, %0\n"
24946 " jbe 2f\n"
24947- "1: movl 64(%4), %%eax\n"
24948+ "1: "__copyuser_seg" movl 64(%4), %%eax\n"
24949 " .align 2,0x90\n"
24950- "2: movl 0(%4), %%eax\n"
24951- "21: movl 4(%4), %%edx\n"
24952+ "2: "__copyuser_seg" movl 0(%4), %%eax\n"
24953+ "21: "__copyuser_seg" movl 4(%4), %%edx\n"
24954 " movnti %%eax, 0(%3)\n"
24955 " movnti %%edx, 4(%3)\n"
24956- "3: movl 8(%4), %%eax\n"
24957- "31: movl 12(%4),%%edx\n"
24958+ "3: "__copyuser_seg" movl 8(%4), %%eax\n"
24959+ "31: "__copyuser_seg" movl 12(%4),%%edx\n"
24960 " movnti %%eax, 8(%3)\n"
24961 " movnti %%edx, 12(%3)\n"
24962- "4: movl 16(%4), %%eax\n"
24963- "41: movl 20(%4), %%edx\n"
24964+ "4: "__copyuser_seg" movl 16(%4), %%eax\n"
24965+ "41: "__copyuser_seg" movl 20(%4), %%edx\n"
24966 " movnti %%eax, 16(%3)\n"
24967 " movnti %%edx, 20(%3)\n"
24968- "10: movl 24(%4), %%eax\n"
24969- "51: movl 28(%4), %%edx\n"
24970+ "10: "__copyuser_seg" movl 24(%4), %%eax\n"
24971+ "51: "__copyuser_seg" movl 28(%4), %%edx\n"
24972 " movnti %%eax, 24(%3)\n"
24973 " movnti %%edx, 28(%3)\n"
24974- "11: movl 32(%4), %%eax\n"
24975- "61: movl 36(%4), %%edx\n"
24976+ "11: "__copyuser_seg" movl 32(%4), %%eax\n"
24977+ "61: "__copyuser_seg" movl 36(%4), %%edx\n"
24978 " movnti %%eax, 32(%3)\n"
24979 " movnti %%edx, 36(%3)\n"
24980- "12: movl 40(%4), %%eax\n"
24981- "71: movl 44(%4), %%edx\n"
24982+ "12: "__copyuser_seg" movl 40(%4), %%eax\n"
24983+ "71: "__copyuser_seg" movl 44(%4), %%edx\n"
24984 " movnti %%eax, 40(%3)\n"
24985 " movnti %%edx, 44(%3)\n"
24986- "13: movl 48(%4), %%eax\n"
24987- "81: movl 52(%4), %%edx\n"
24988+ "13: "__copyuser_seg" movl 48(%4), %%eax\n"
24989+ "81: "__copyuser_seg" movl 52(%4), %%edx\n"
24990 " movnti %%eax, 48(%3)\n"
24991 " movnti %%edx, 52(%3)\n"
24992- "14: movl 56(%4), %%eax\n"
24993- "91: movl 60(%4), %%edx\n"
24994+ "14: "__copyuser_seg" movl 56(%4), %%eax\n"
24995+ "91: "__copyuser_seg" movl 60(%4), %%edx\n"
24996 " movnti %%eax, 56(%3)\n"
24997 " movnti %%edx, 60(%3)\n"
24998 " addl $-64, %0\n"
24999@@ -584,9 +703,9 @@ static unsigned long __copy_user_intel_nocache(void *to,
25000 " shrl $2, %0\n"
25001 " andl $3, %%eax\n"
25002 " cld\n"
25003- "6: rep; movsl\n"
25004+ "6: rep; "__copyuser_seg" movsl\n"
25005 " movl %%eax,%0\n"
25006- "7: rep; movsb\n"
25007+ "7: rep; "__copyuser_seg" movsb\n"
25008 "8:\n"
25009 ".section .fixup,\"ax\"\n"
25010 "9: lea 0(%%eax,%0,4),%0\n"
25011@@ -629,32 +748,36 @@ static unsigned long __copy_user_intel_nocache(void *to,
25012 */
25013 unsigned long __copy_user_zeroing_intel(void *to, const void __user *from,
25014 unsigned long size);
25015-unsigned long __copy_user_intel(void __user *to, const void *from,
25016+unsigned long __generic_copy_to_user_intel(void __user *to, const void *from,
25017+ unsigned long size);
25018+unsigned long __generic_copy_from_user_intel(void *to, const void __user *from,
25019 unsigned long size);
25020 unsigned long __copy_user_zeroing_intel_nocache(void *to,
25021 const void __user *from, unsigned long size);
25022 #endif /* CONFIG_X86_INTEL_USERCOPY */
25023
25024 /* Generic arbitrary sized copy. */
25025-#define __copy_user(to, from, size) \
25026+#define __copy_user(to, from, size, prefix, set, restore) \
25027 do { \
25028 int __d0, __d1, __d2; \
25029 __asm__ __volatile__( \
25030+ set \
25031 " cmp $7,%0\n" \
25032 " jbe 1f\n" \
25033 " movl %1,%0\n" \
25034 " negl %0\n" \
25035 " andl $7,%0\n" \
25036 " subl %0,%3\n" \
25037- "4: rep; movsb\n" \
25038+ "4: rep; "prefix"movsb\n" \
25039 " movl %3,%0\n" \
25040 " shrl $2,%0\n" \
25041 " andl $3,%3\n" \
25042 " .align 2,0x90\n" \
25043- "0: rep; movsl\n" \
25044+ "0: rep; "prefix"movsl\n" \
25045 " movl %3,%0\n" \
25046- "1: rep; movsb\n" \
25047+ "1: rep; "prefix"movsb\n" \
25048 "2:\n" \
25049+ restore \
25050 ".section .fixup,\"ax\"\n" \
25051 "5: addl %3,%0\n" \
25052 " jmp 2b\n" \
25053@@ -682,14 +805,14 @@ do { \
25054 " negl %0\n" \
25055 " andl $7,%0\n" \
25056 " subl %0,%3\n" \
25057- "4: rep; movsb\n" \
25058+ "4: rep; "__copyuser_seg"movsb\n" \
25059 " movl %3,%0\n" \
25060 " shrl $2,%0\n" \
25061 " andl $3,%3\n" \
25062 " .align 2,0x90\n" \
25063- "0: rep; movsl\n" \
25064+ "0: rep; "__copyuser_seg"movsl\n" \
25065 " movl %3,%0\n" \
25066- "1: rep; movsb\n" \
25067+ "1: rep; "__copyuser_seg"movsb\n" \
25068 "2:\n" \
25069 ".section .fixup,\"ax\"\n" \
25070 "5: addl %3,%0\n" \
25071@@ -775,9 +898,9 @@ survive:
25072 }
25073 #endif
25074 if (movsl_is_ok(to, from, n))
25075- __copy_user(to, from, n);
25076+ __copy_user(to, from, n, "", __COPYUSER_SET_ES, __COPYUSER_RESTORE_ES);
25077 else
25078- n = __copy_user_intel(to, from, n);
25079+ n = __generic_copy_to_user_intel(to, from, n);
25080 return n;
25081 }
25082 EXPORT_SYMBOL(__copy_to_user_ll);
25083@@ -797,10 +920,9 @@ unsigned long __copy_from_user_ll_nozero(void *to, const void __user *from,
25084 unsigned long n)
25085 {
25086 if (movsl_is_ok(to, from, n))
25087- __copy_user(to, from, n);
25088+ __copy_user(to, from, n, __copyuser_seg, "", "");
25089 else
25090- n = __copy_user_intel((void __user *)to,
25091- (const void *)from, n);
25092+ n = __generic_copy_from_user_intel(to, from, n);
25093 return n;
25094 }
25095 EXPORT_SYMBOL(__copy_from_user_ll_nozero);
25096@@ -827,59 +949,38 @@ unsigned long __copy_from_user_ll_nocache_nozero(void *to, const void __user *fr
25097 if (n > 64 && cpu_has_xmm2)
25098 n = __copy_user_intel_nocache(to, from, n);
25099 else
25100- __copy_user(to, from, n);
25101+ __copy_user(to, from, n, __copyuser_seg, "", "");
25102 #else
25103- __copy_user(to, from, n);
25104+ __copy_user(to, from, n, __copyuser_seg, "", "");
25105 #endif
25106 return n;
25107 }
25108 EXPORT_SYMBOL(__copy_from_user_ll_nocache_nozero);
25109
25110-/**
25111- * copy_to_user: - Copy a block of data into user space.
25112- * @to: Destination address, in user space.
25113- * @from: Source address, in kernel space.
25114- * @n: Number of bytes to copy.
25115- *
25116- * Context: User context only. This function may sleep.
25117- *
25118- * Copy data from kernel space to user space.
25119- *
25120- * Returns number of bytes that could not be copied.
25121- * On success, this will be zero.
25122- */
25123-unsigned long
25124-copy_to_user(void __user *to, const void *from, unsigned long n)
25125+#ifdef CONFIG_PAX_MEMORY_UDEREF
25126+void __set_fs(mm_segment_t x)
25127 {
25128- if (access_ok(VERIFY_WRITE, to, n))
25129- n = __copy_to_user(to, from, n);
25130- return n;
25131+ switch (x.seg) {
25132+ case 0:
25133+ loadsegment(gs, 0);
25134+ break;
25135+ case TASK_SIZE_MAX:
25136+ loadsegment(gs, __USER_DS);
25137+ break;
25138+ case -1UL:
25139+ loadsegment(gs, __KERNEL_DS);
25140+ break;
25141+ default:
25142+ BUG();
25143+ }
25144+ return;
25145 }
25146-EXPORT_SYMBOL(copy_to_user);
25147+EXPORT_SYMBOL(__set_fs);
25148
25149-/**
25150- * copy_from_user: - Copy a block of data from user space.
25151- * @to: Destination address, in kernel space.
25152- * @from: Source address, in user space.
25153- * @n: Number of bytes to copy.
25154- *
25155- * Context: User context only. This function may sleep.
25156- *
25157- * Copy data from user space to kernel space.
25158- *
25159- * Returns number of bytes that could not be copied.
25160- * On success, this will be zero.
25161- *
25162- * If some data could not be copied, this function will pad the copied
25163- * data to the requested size using zero bytes.
25164- */
25165-unsigned long
25166-copy_from_user(void *to, const void __user *from, unsigned long n)
25167+void set_fs(mm_segment_t x)
25168 {
25169- if (access_ok(VERIFY_READ, from, n))
25170- n = __copy_from_user(to, from, n);
25171- else
25172- memset(to, 0, n);
25173- return n;
25174+ current_thread_info()->addr_limit = x;
25175+ __set_fs(x);
25176 }
25177-EXPORT_SYMBOL(copy_from_user);
25178+EXPORT_SYMBOL(set_fs);
25179+#endif
25180diff --git a/arch/x86/lib/usercopy_64.c b/arch/x86/lib/usercopy_64.c
25181index b7c2849..bab76d3 100644
25182--- a/arch/x86/lib/usercopy_64.c
25183+++ b/arch/x86/lib/usercopy_64.c
25184@@ -39,16 +39,22 @@ do { \
25185 } while (0)
25186
25187 long
25188-__strncpy_from_user(char *dst, const char __user *src, long count)
25189+__strncpy_from_user(char *dst, const char __user *src, unsigned long count)
25190 {
25191 long res;
25192+
25193+#ifdef CONFIG_PAX_MEMORY_UDEREF
25194+ if ((unsigned long)src < PAX_USER_SHADOW_BASE)
25195+ src += PAX_USER_SHADOW_BASE;
25196+#endif
25197+
25198 __do_strncpy_from_user(dst, src, count, res);
25199 return res;
25200 }
25201 EXPORT_SYMBOL(__strncpy_from_user);
25202
25203 long
25204-strncpy_from_user(char *dst, const char __user *src, long count)
25205+strncpy_from_user(char *dst, const char __user *src, unsigned long count)
25206 {
25207 long res = -EFAULT;
25208 if (access_ok(VERIFY_READ, src, 1))
25209@@ -65,6 +71,12 @@ unsigned long __clear_user(void __user *addr, unsigned long size)
25210 {
25211 long __d0;
25212 might_fault();
25213+
25214+#ifdef CONFIG_PAX_MEMORY_UDEREF
25215+ if ((unsigned long)addr < PAX_USER_SHADOW_BASE)
25216+ addr += PAX_USER_SHADOW_BASE;
25217+#endif
25218+
25219 /* no memory constraint because it doesn't change any memory gcc knows
25220 about */
25221 asm volatile(
25222@@ -107,7 +119,7 @@ EXPORT_SYMBOL(clear_user);
25223 * Return 0 on exception, a value greater than N if too long
25224 */
25225
25226-long __strnlen_user(const char __user *s, long n)
25227+long __strnlen_user(const char __user *s, unsigned long n)
25228 {
25229 long res = 0;
25230 char c;
25231@@ -125,7 +137,7 @@ long __strnlen_user(const char __user *s, long n)
25232 }
25233 EXPORT_SYMBOL(__strnlen_user);
25234
25235-long strnlen_user(const char __user *s, long n)
25236+long strnlen_user(const char __user *s, unsigned long n)
25237 {
25238 if (!access_ok(VERIFY_READ, s, 1))
25239 return 0;
25240@@ -149,12 +161,20 @@ long strlen_user(const char __user *s)
25241 }
25242 EXPORT_SYMBOL(strlen_user);
25243
25244-unsigned long copy_in_user(void __user *to, const void __user *from, unsigned len)
25245+unsigned long copy_in_user(void __user *to, const void __user *from, unsigned long len)
25246 {
25247- if (access_ok(VERIFY_WRITE, to, len) && access_ok(VERIFY_READ, from, len)) {
25248- return copy_user_generic((__force void *)to, (__force void *)from, len);
25249- }
25250- return len;
25251+ if (access_ok(VERIFY_WRITE, to, len) && access_ok(VERIFY_READ, from, len)) {
25252+
25253+#ifdef CONFIG_PAX_MEMORY_UDEREF
25254+ if ((unsigned long)to < PAX_USER_SHADOW_BASE)
25255+ to += PAX_USER_SHADOW_BASE;
25256+ if ((unsigned long)from < PAX_USER_SHADOW_BASE)
25257+ from += PAX_USER_SHADOW_BASE;
25258+#endif
25259+
25260+ return copy_user_generic((void __force_kernel *)to, (void __force_kernel *)from, len);
25261+ }
25262+ return len;
25263 }
25264 EXPORT_SYMBOL(copy_in_user);
25265
25266@@ -164,7 +184,7 @@ EXPORT_SYMBOL(copy_in_user);
25267 * it is not necessary to optimize tail handling.
25268 */
25269 unsigned long
25270-copy_user_handle_tail(char *to, char *from, unsigned len, unsigned zerorest)
25271+copy_user_handle_tail(char __user *to, char __user *from, unsigned long len, unsigned zerorest)
25272 {
25273 char c;
25274 unsigned zero_len;
25275diff --git a/arch/x86/mm/extable.c b/arch/x86/mm/extable.c
25276index 61b41ca..5fef66a 100644
25277--- a/arch/x86/mm/extable.c
25278+++ b/arch/x86/mm/extable.c
25279@@ -1,14 +1,71 @@
25280 #include <linux/module.h>
25281 #include <linux/spinlock.h>
25282+#include <linux/sort.h>
25283 #include <asm/uaccess.h>
25284+#include <asm/pgtable.h>
25285
25286+/*
25287+ * The exception table needs to be sorted so that the binary
25288+ * search that we use to find entries in it works properly.
25289+ * This is used both for the kernel exception table and for
25290+ * the exception tables of modules that get loaded.
25291+ */
25292+static int cmp_ex(const void *a, const void *b)
25293+{
25294+ const struct exception_table_entry *x = a, *y = b;
25295+
25296+ /* avoid overflow */
25297+ if (x->insn > y->insn)
25298+ return 1;
25299+ if (x->insn < y->insn)
25300+ return -1;
25301+ return 0;
25302+}
25303+
25304+static void swap_ex(void *a, void *b, int size)
25305+{
25306+ struct exception_table_entry t, *x = a, *y = b;
25307+
25308+ t = *x;
25309+
25310+ pax_open_kernel();
25311+ *x = *y;
25312+ *y = t;
25313+ pax_close_kernel();
25314+}
25315+
25316+void sort_extable(struct exception_table_entry *start,
25317+ struct exception_table_entry *finish)
25318+{
25319+ sort(start, finish - start, sizeof(struct exception_table_entry),
25320+ cmp_ex, swap_ex);
25321+}
25322+
25323+#ifdef CONFIG_MODULES
25324+/*
25325+ * If the exception table is sorted, any referring to the module init
25326+ * will be at the beginning or the end.
25327+ */
25328+void trim_init_extable(struct module *m)
25329+{
25330+ /*trim the beginning*/
25331+ while (m->num_exentries && within_module_init(m->extable[0].insn, m)) {
25332+ m->extable++;
25333+ m->num_exentries--;
25334+ }
25335+ /*trim the end*/
25336+ while (m->num_exentries &&
25337+ within_module_init(m->extable[m->num_exentries-1].insn, m))
25338+ m->num_exentries--;
25339+}
25340+#endif /* CONFIG_MODULES */
25341
25342 int fixup_exception(struct pt_regs *regs)
25343 {
25344 const struct exception_table_entry *fixup;
25345
25346 #ifdef CONFIG_PNPBIOS
25347- if (unlikely(SEGMENT_IS_PNP_CODE(regs->cs))) {
25348+ if (unlikely(!v8086_mode(regs) && SEGMENT_IS_PNP_CODE(regs->cs))) {
25349 extern u32 pnp_bios_fault_eip, pnp_bios_fault_esp;
25350 extern u32 pnp_bios_is_utter_crap;
25351 pnp_bios_is_utter_crap = 1;
25352diff --git a/arch/x86/mm/fault.c b/arch/x86/mm/fault.c
25353index 8ac0d76..ca501e2 100644
25354--- a/arch/x86/mm/fault.c
25355+++ b/arch/x86/mm/fault.c
25356@@ -11,10 +11,19 @@
25357 #include <linux/kprobes.h> /* __kprobes, ... */
25358 #include <linux/mmiotrace.h> /* kmmio_handler, ... */
25359 #include <linux/perf_event.h> /* perf_sw_event */
25360+#include <linux/unistd.h>
25361+#include <linux/compiler.h>
25362
25363 #include <asm/traps.h> /* dotraplinkage, ... */
25364 #include <asm/pgalloc.h> /* pgd_*(), ... */
25365 #include <asm/kmemcheck.h> /* kmemcheck_*(), ... */
25366+#include <asm/vsyscall.h>
25367+#include <asm/tlbflush.h>
25368+
25369+#if defined(CONFIG_X86_64) && defined(CONFIG_PAX_MEMORY_UDEREF)
25370+#include <asm/stacktrace.h>
25371+#include "../kernel/dumpstack.h"
25372+#endif
25373
25374 /*
25375 * Page fault error code bits:
25376@@ -51,7 +60,7 @@ static inline int notify_page_fault(struct pt_regs *regs)
25377 int ret = 0;
25378
25379 /* kprobe_running() needs smp_processor_id() */
25380- if (kprobes_built_in() && !user_mode_vm(regs)) {
25381+ if (kprobes_built_in() && !user_mode(regs)) {
25382 preempt_disable();
25383 if (kprobe_running() && kprobe_fault_handler(regs, 14))
25384 ret = 1;
25385@@ -112,7 +121,10 @@ check_prefetch_opcode(struct pt_regs *regs, unsigned char *instr,
25386 return !instr_lo || (instr_lo>>1) == 1;
25387 case 0x00:
25388 /* Prefetch instruction is 0x0F0D or 0x0F18 */
25389- if (probe_kernel_address(instr, opcode))
25390+ if (user_mode(regs)) {
25391+ if (__copy_from_user_inatomic(&opcode, (unsigned char __force_user *)(instr), 1))
25392+ return 0;
25393+ } else if (probe_kernel_address(instr, opcode))
25394 return 0;
25395
25396 *prefetch = (instr_lo == 0xF) &&
25397@@ -146,7 +158,10 @@ is_prefetch(struct pt_regs *regs, unsigned long error_code, unsigned long addr)
25398 while (instr < max_instr) {
25399 unsigned char opcode;
25400
25401- if (probe_kernel_address(instr, opcode))
25402+ if (user_mode(regs)) {
25403+ if (__copy_from_user_inatomic(&opcode, (unsigned char __force_user *)(instr), 1))
25404+ break;
25405+ } else if (probe_kernel_address(instr, opcode))
25406 break;
25407
25408 instr++;
25409@@ -172,6 +187,34 @@ force_sig_info_fault(int si_signo, int si_code, unsigned long address,
25410 force_sig_info(si_signo, &info, tsk);
25411 }
25412
25413+#if defined(CONFIG_PAX_PAGEEXEC) || defined(CONFIG_PAX_SEGMEXEC)
25414+static bool pax_is_fetch_fault(struct pt_regs *regs, unsigned long error_code, unsigned long address);
25415+#endif
25416+
25417+#ifdef CONFIG_PAX_EMUTRAMP
25418+static int pax_handle_fetch_fault(struct pt_regs *regs);
25419+#endif
25420+
25421+#ifdef CONFIG_PAX_PAGEEXEC
25422+static inline pmd_t * pax_get_pmd(struct mm_struct *mm, unsigned long address)
25423+{
25424+ pgd_t *pgd;
25425+ pud_t *pud;
25426+ pmd_t *pmd;
25427+
25428+ pgd = pgd_offset(mm, address);
25429+ if (!pgd_present(*pgd))
25430+ return NULL;
25431+ pud = pud_offset(pgd, address);
25432+ if (!pud_present(*pud))
25433+ return NULL;
25434+ pmd = pmd_offset(pud, address);
25435+ if (!pmd_present(*pmd))
25436+ return NULL;
25437+ return pmd;
25438+}
25439+#endif
25440+
25441 DEFINE_SPINLOCK(pgd_lock);
25442 LIST_HEAD(pgd_list);
25443
25444@@ -224,11 +267,24 @@ void vmalloc_sync_all(void)
25445 address += PMD_SIZE) {
25446
25447 unsigned long flags;
25448+
25449+#ifdef CONFIG_PAX_PER_CPU_PGD
25450+ unsigned long cpu;
25451+#else
25452 struct page *page;
25453+#endif
25454
25455 spin_lock_irqsave(&pgd_lock, flags);
25456+
25457+#ifdef CONFIG_PAX_PER_CPU_PGD
25458+ for (cpu = 0; cpu < nr_cpu_ids; ++cpu) {
25459+ pgd_t *pgd = get_cpu_pgd(cpu);
25460+#else
25461 list_for_each_entry(page, &pgd_list, lru) {
25462- if (!vmalloc_sync_one(page_address(page), address))
25463+ pgd_t *pgd = page_address(page);
25464+#endif
25465+
25466+ if (!vmalloc_sync_one(pgd, address))
25467 break;
25468 }
25469 spin_unlock_irqrestore(&pgd_lock, flags);
25470@@ -258,6 +314,11 @@ static noinline int vmalloc_fault(unsigned long address)
25471 * an interrupt in the middle of a task switch..
25472 */
25473 pgd_paddr = read_cr3();
25474+
25475+#ifdef CONFIG_PAX_PER_CPU_PGD
25476+ BUG_ON(__pa(get_cpu_pgd(smp_processor_id())) != (pgd_paddr & PHYSICAL_PAGE_MASK));
25477+#endif
25478+
25479 pmd_k = vmalloc_sync_one(__va(pgd_paddr), address);
25480 if (!pmd_k)
25481 return -1;
25482@@ -332,15 +393,27 @@ void vmalloc_sync_all(void)
25483
25484 const pgd_t *pgd_ref = pgd_offset_k(address);
25485 unsigned long flags;
25486+
25487+#ifdef CONFIG_PAX_PER_CPU_PGD
25488+ unsigned long cpu;
25489+#else
25490 struct page *page;
25491+#endif
25492
25493 if (pgd_none(*pgd_ref))
25494 continue;
25495
25496 spin_lock_irqsave(&pgd_lock, flags);
25497+
25498+#ifdef CONFIG_PAX_PER_CPU_PGD
25499+ for (cpu = 0; cpu < nr_cpu_ids; ++cpu) {
25500+ pgd_t *pgd = pgd_offset_cpu(cpu, address);
25501+#else
25502 list_for_each_entry(page, &pgd_list, lru) {
25503 pgd_t *pgd;
25504 pgd = (pgd_t *)page_address(page) + pgd_index(address);
25505+#endif
25506+
25507 if (pgd_none(*pgd))
25508 set_pgd(pgd, *pgd_ref);
25509 else
25510@@ -373,7 +446,14 @@ static noinline int vmalloc_fault(unsigned long address)
25511 * happen within a race in page table update. In the later
25512 * case just flush:
25513 */
25514+
25515+#ifdef CONFIG_PAX_PER_CPU_PGD
25516+ BUG_ON(__pa(get_cpu_pgd(smp_processor_id())) != (read_cr3() & PHYSICAL_PAGE_MASK));
25517+ pgd = pgd_offset_cpu(smp_processor_id(), address);
25518+#else
25519 pgd = pgd_offset(current->active_mm, address);
25520+#endif
25521+
25522 pgd_ref = pgd_offset_k(address);
25523 if (pgd_none(*pgd_ref))
25524 return -1;
25525@@ -535,7 +615,7 @@ static int is_errata93(struct pt_regs *regs, unsigned long address)
25526 static int is_errata100(struct pt_regs *regs, unsigned long address)
25527 {
25528 #ifdef CONFIG_X86_64
25529- if ((regs->cs == __USER32_CS || (regs->cs & (1<<2))) && (address >> 32))
25530+ if ((regs->cs == __USER32_CS || (regs->cs & SEGMENT_LDT)) && (address >> 32))
25531 return 1;
25532 #endif
25533 return 0;
25534@@ -562,7 +642,7 @@ static int is_f00f_bug(struct pt_regs *regs, unsigned long address)
25535 }
25536
25537 static const char nx_warning[] = KERN_CRIT
25538-"kernel tried to execute NX-protected page - exploit attempt? (uid: %d)\n";
25539+"kernel tried to execute NX-protected page - exploit attempt? (uid: %d, task: %s, pid: %d)\n";
25540
25541 static void
25542 show_fault_oops(struct pt_regs *regs, unsigned long error_code,
25543@@ -571,15 +651,26 @@ show_fault_oops(struct pt_regs *regs, unsigned long error_code,
25544 if (!oops_may_print())
25545 return;
25546
25547- if (error_code & PF_INSTR) {
25548+ if (nx_enabled && (error_code & PF_INSTR)) {
25549 unsigned int level;
25550
25551 pte_t *pte = lookup_address(address, &level);
25552
25553 if (pte && pte_present(*pte) && !pte_exec(*pte))
25554- printk(nx_warning, current_uid());
25555+ printk(nx_warning, current_uid(), current->comm, task_pid_nr(current));
25556 }
25557
25558+#ifdef CONFIG_PAX_KERNEXEC
25559+ if (init_mm.start_code <= address && address < init_mm.end_code) {
25560+ if (current->signal->curr_ip)
25561+ printk(KERN_ERR "PAX: From %pI4: %s:%d, uid/euid: %u/%u, attempted to modify kernel code\n",
25562+ &current->signal->curr_ip, current->comm, task_pid_nr(current), current_uid(), current_euid());
25563+ else
25564+ printk(KERN_ERR "PAX: %s:%d, uid/euid: %u/%u, attempted to modify kernel code\n",
25565+ current->comm, task_pid_nr(current), current_uid(), current_euid());
25566+ }
25567+#endif
25568+
25569 printk(KERN_ALERT "BUG: unable to handle kernel ");
25570 if (address < PAGE_SIZE)
25571 printk(KERN_CONT "NULL pointer dereference");
25572@@ -705,6 +796,23 @@ __bad_area_nosemaphore(struct pt_regs *regs, unsigned long error_code,
25573 {
25574 struct task_struct *tsk = current;
25575
25576+#ifdef CONFIG_X86_64
25577+ struct mm_struct *mm = tsk->mm;
25578+
25579+ if (mm && (error_code & PF_INSTR) && mm->context.vdso) {
25580+ if (regs->ip == (unsigned long)vgettimeofday) {
25581+ regs->ip = (unsigned long)VDSO64_SYMBOL(mm->context.vdso, fallback_gettimeofday);
25582+ return;
25583+ } else if (regs->ip == (unsigned long)vtime) {
25584+ regs->ip = (unsigned long)VDSO64_SYMBOL(mm->context.vdso, fallback_time);
25585+ return;
25586+ } else if (regs->ip == (unsigned long)vgetcpu) {
25587+ regs->ip = (unsigned long)VDSO64_SYMBOL(mm->context.vdso, getcpu);
25588+ return;
25589+ }
25590+ }
25591+#endif
25592+
25593 /* User mode accesses just cause a SIGSEGV */
25594 if (error_code & PF_USER) {
25595 /*
25596@@ -722,6 +830,21 @@ __bad_area_nosemaphore(struct pt_regs *regs, unsigned long error_code,
25597 if (is_errata100(regs, address))
25598 return;
25599
25600+#if defined(CONFIG_PAX_PAGEEXEC) || defined(CONFIG_PAX_SEGMEXEC)
25601+ if (pax_is_fetch_fault(regs, error_code, address)) {
25602+
25603+#ifdef CONFIG_PAX_EMUTRAMP
25604+ switch (pax_handle_fetch_fault(regs)) {
25605+ case 2:
25606+ return;
25607+ }
25608+#endif
25609+
25610+ pax_report_fault(regs, (void *)regs->ip, (void *)regs->sp);
25611+ do_group_exit(SIGKILL);
25612+ }
25613+#endif
25614+
25615 if (unlikely(show_unhandled_signals))
25616 show_signal_msg(regs, error_code, address, tsk);
25617
25618@@ -818,7 +941,7 @@ do_sigbus(struct pt_regs *regs, unsigned long error_code, unsigned long address,
25619 if (fault & VM_FAULT_HWPOISON) {
25620 printk(KERN_ERR
25621 "MCE: Killing %s:%d due to hardware memory corruption fault at %lx\n",
25622- tsk->comm, tsk->pid, address);
25623+ tsk->comm, task_pid_nr(tsk), address);
25624 code = BUS_MCEERR_AR;
25625 }
25626 #endif
25627@@ -857,6 +980,99 @@ static int spurious_fault_check(unsigned long error_code, pte_t *pte)
25628 return 1;
25629 }
25630
25631+#if defined(CONFIG_X86_32) && defined(CONFIG_PAX_PAGEEXEC)
25632+static int pax_handle_pageexec_fault(struct pt_regs *regs, struct mm_struct *mm, unsigned long address, unsigned long error_code)
25633+{
25634+ pte_t *pte;
25635+ pmd_t *pmd;
25636+ spinlock_t *ptl;
25637+ unsigned char pte_mask;
25638+
25639+ if (nx_enabled || (error_code & (PF_PROT|PF_USER)) != (PF_PROT|PF_USER) || v8086_mode(regs) ||
25640+ !(mm->pax_flags & MF_PAX_PAGEEXEC))
25641+ return 0;
25642+
25643+ /* PaX: it's our fault, let's handle it if we can */
25644+
25645+ /* PaX: take a look at read faults before acquiring any locks */
25646+ if (unlikely(!(error_code & PF_WRITE) && (regs->ip == address))) {
25647+ /* instruction fetch attempt from a protected page in user mode */
25648+ up_read(&mm->mmap_sem);
25649+
25650+#ifdef CONFIG_PAX_EMUTRAMP
25651+ switch (pax_handle_fetch_fault(regs)) {
25652+ case 2:
25653+ return 1;
25654+ }
25655+#endif
25656+
25657+ pax_report_fault(regs, (void *)regs->ip, (void *)regs->sp);
25658+ do_group_exit(SIGKILL);
25659+ }
25660+
25661+ pmd = pax_get_pmd(mm, address);
25662+ if (unlikely(!pmd))
25663+ return 0;
25664+
25665+ pte = pte_offset_map_lock(mm, pmd, address, &ptl);
25666+ if (unlikely(!(pte_val(*pte) & _PAGE_PRESENT) || pte_user(*pte))) {
25667+ pte_unmap_unlock(pte, ptl);
25668+ return 0;
25669+ }
25670+
25671+ if (unlikely((error_code & PF_WRITE) && !pte_write(*pte))) {
25672+ /* write attempt to a protected page in user mode */
25673+ pte_unmap_unlock(pte, ptl);
25674+ return 0;
25675+ }
25676+
25677+#ifdef CONFIG_SMP
25678+ if (likely(address > get_limit(regs->cs) && cpu_isset(smp_processor_id(), mm->context.cpu_user_cs_mask)))
25679+#else
25680+ if (likely(address > get_limit(regs->cs)))
25681+#endif
25682+ {
25683+ set_pte(pte, pte_mkread(*pte));
25684+ __flush_tlb_one(address);
25685+ pte_unmap_unlock(pte, ptl);
25686+ up_read(&mm->mmap_sem);
25687+ return 1;
25688+ }
25689+
25690+ pte_mask = _PAGE_ACCESSED | _PAGE_USER | ((error_code & PF_WRITE) << (_PAGE_BIT_DIRTY-1));
25691+
25692+ /*
25693+ * PaX: fill DTLB with user rights and retry
25694+ */
25695+ __asm__ __volatile__ (
25696+ "orb %2,(%1)\n"
25697+#if defined(CONFIG_M586) || defined(CONFIG_M586TSC)
25698+/*
25699+ * PaX: let this uncommented 'invlpg' remind us on the behaviour of Intel's
25700+ * (and AMD's) TLBs. namely, they do not cache PTEs that would raise *any*
25701+ * page fault when examined during a TLB load attempt. this is true not only
25702+ * for PTEs holding a non-present entry but also present entries that will
25703+ * raise a page fault (such as those set up by PaX, or the copy-on-write
25704+ * mechanism). in effect it means that we do *not* need to flush the TLBs
25705+ * for our target pages since their PTEs are simply not in the TLBs at all.
25706+
25707+ * the best thing in omitting it is that we gain around 15-20% speed in the
25708+ * fast path of the page fault handler and can get rid of tracing since we
25709+ * can no longer flush unintended entries.
25710+ */
25711+ "invlpg (%0)\n"
25712+#endif
25713+ __copyuser_seg"testb $0,(%0)\n"
25714+ "xorb %3,(%1)\n"
25715+ :
25716+ : "r" (address), "r" (pte), "q" (pte_mask), "i" (_PAGE_USER)
25717+ : "memory", "cc");
25718+ pte_unmap_unlock(pte, ptl);
25719+ up_read(&mm->mmap_sem);
25720+ return 1;
25721+}
25722+#endif
25723+
25724 /*
25725 * Handle a spurious fault caused by a stale TLB entry.
25726 *
25727@@ -923,6 +1139,9 @@ int show_unhandled_signals = 1;
25728 static inline int
25729 access_error(unsigned long error_code, int write, struct vm_area_struct *vma)
25730 {
25731+ if (nx_enabled && (error_code & PF_INSTR) && !(vma->vm_flags & VM_EXEC))
25732+ return 1;
25733+
25734 if (write) {
25735 /* write, present and write, not present: */
25736 if (unlikely(!(vma->vm_flags & VM_WRITE)))
25737@@ -956,16 +1175,30 @@ do_page_fault(struct pt_regs *regs, unsigned long error_code)
25738 {
25739 struct vm_area_struct *vma;
25740 struct task_struct *tsk;
25741- unsigned long address;
25742 struct mm_struct *mm;
25743 int write;
25744 int fault;
25745
25746- tsk = current;
25747- mm = tsk->mm;
25748-
25749 /* Get the faulting address: */
25750- address = read_cr2();
25751+ unsigned long address = read_cr2();
25752+
25753+#if defined(CONFIG_X86_64) && defined(CONFIG_PAX_MEMORY_UDEREF)
25754+ if (!user_mode(regs) && address < 2 * PAX_USER_SHADOW_BASE) {
25755+ if (!search_exception_tables(regs->ip)) {
25756+ bad_area_nosemaphore(regs, error_code, address);
25757+ return;
25758+ }
25759+ if (address < PAX_USER_SHADOW_BASE) {
25760+ printk(KERN_ERR "PAX: please report this to pageexec@freemail.hu\n");
25761+ printk(KERN_ERR "PAX: faulting IP: %pA\n", (void *)regs->ip);
25762+ show_trace_log_lvl(NULL, NULL, (void *)regs->sp, regs->bp, KERN_ERR);
25763+ } else
25764+ address -= PAX_USER_SHADOW_BASE;
25765+ }
25766+#endif
25767+
25768+ tsk = current;
25769+ mm = tsk->mm;
25770
25771 /*
25772 * Detect and handle instructions that would cause a page fault for
25773@@ -1026,7 +1259,7 @@ do_page_fault(struct pt_regs *regs, unsigned long error_code)
25774 * User-mode registers count as a user access even for any
25775 * potential system fault or CPU buglet:
25776 */
25777- if (user_mode_vm(regs)) {
25778+ if (user_mode(regs)) {
25779 local_irq_enable();
25780 error_code |= PF_USER;
25781 } else {
25782@@ -1080,6 +1313,11 @@ do_page_fault(struct pt_regs *regs, unsigned long error_code)
25783 might_sleep();
25784 }
25785
25786+#if defined(CONFIG_X86_32) && defined(CONFIG_PAX_PAGEEXEC)
25787+ if (pax_handle_pageexec_fault(regs, mm, address, error_code))
25788+ return;
25789+#endif
25790+
25791 vma = find_vma(mm, address);
25792 if (unlikely(!vma)) {
25793 bad_area(regs, error_code, address);
25794@@ -1091,18 +1329,24 @@ do_page_fault(struct pt_regs *regs, unsigned long error_code)
25795 bad_area(regs, error_code, address);
25796 return;
25797 }
25798- if (error_code & PF_USER) {
25799- /*
25800- * Accessing the stack below %sp is always a bug.
25801- * The large cushion allows instructions like enter
25802- * and pusha to work. ("enter $65535, $31" pushes
25803- * 32 pointers and then decrements %sp by 65535.)
25804- */
25805- if (unlikely(address + 65536 + 32 * sizeof(unsigned long) < regs->sp)) {
25806- bad_area(regs, error_code, address);
25807- return;
25808- }
25809+ /*
25810+ * Accessing the stack below %sp is always a bug.
25811+ * The large cushion allows instructions like enter
25812+ * and pusha to work. ("enter $65535, $31" pushes
25813+ * 32 pointers and then decrements %sp by 65535.)
25814+ */
25815+ if (unlikely(address + 65536 + 32 * sizeof(unsigned long) < task_pt_regs(tsk)->sp)) {
25816+ bad_area(regs, error_code, address);
25817+ return;
25818 }
25819+
25820+#ifdef CONFIG_PAX_SEGMEXEC
25821+ if (unlikely((mm->pax_flags & MF_PAX_SEGMEXEC) && vma->vm_end - SEGMEXEC_TASK_SIZE - 1 < address - SEGMEXEC_TASK_SIZE - 1)) {
25822+ bad_area(regs, error_code, address);
25823+ return;
25824+ }
25825+#endif
25826+
25827 if (unlikely(expand_stack(vma, address))) {
25828 bad_area(regs, error_code, address);
25829 return;
25830@@ -1146,3 +1390,292 @@ good_area:
25831
25832 up_read(&mm->mmap_sem);
25833 }
25834+
25835+#if defined(CONFIG_PAX_PAGEEXEC) || defined(CONFIG_PAX_SEGMEXEC)
25836+static bool pax_is_fetch_fault(struct pt_regs *regs, unsigned long error_code, unsigned long address)
25837+{
25838+ struct mm_struct *mm = current->mm;
25839+ unsigned long ip = regs->ip;
25840+
25841+ if (v8086_mode(regs))
25842+ ip = ((regs->cs & 0xffff) << 4) + (ip & 0xffff);
25843+
25844+#ifdef CONFIG_PAX_PAGEEXEC
25845+ if (mm->pax_flags & MF_PAX_PAGEEXEC) {
25846+ if ((__supported_pte_mask & _PAGE_NX) && (error_code & PF_INSTR))
25847+ return true;
25848+ if (!(error_code & (PF_PROT | PF_WRITE)) && ip == address)
25849+ return true;
25850+ return false;
25851+ }
25852+#endif
25853+
25854+#ifdef CONFIG_PAX_SEGMEXEC
25855+ if (mm->pax_flags & MF_PAX_SEGMEXEC) {
25856+ if (!(error_code & (PF_PROT | PF_WRITE)) && (ip + SEGMEXEC_TASK_SIZE == address))
25857+ return true;
25858+ return false;
25859+ }
25860+#endif
25861+
25862+ return false;
25863+}
25864+#endif
25865+
25866+#ifdef CONFIG_PAX_EMUTRAMP
25867+static int pax_handle_fetch_fault_32(struct pt_regs *regs)
25868+{
25869+ int err;
25870+
25871+ do { /* PaX: libffi trampoline emulation */
25872+ unsigned char mov, jmp;
25873+ unsigned int addr1, addr2;
25874+
25875+#ifdef CONFIG_X86_64
25876+ if ((regs->ip + 9) >> 32)
25877+ break;
25878+#endif
25879+
25880+ err = get_user(mov, (unsigned char __user *)regs->ip);
25881+ err |= get_user(addr1, (unsigned int __user *)(regs->ip + 1));
25882+ err |= get_user(jmp, (unsigned char __user *)(regs->ip + 5));
25883+ err |= get_user(addr2, (unsigned int __user *)(regs->ip + 6));
25884+
25885+ if (err)
25886+ break;
25887+
25888+ if (mov == 0xB8 && jmp == 0xE9) {
25889+ regs->ax = addr1;
25890+ regs->ip = (unsigned int)(regs->ip + addr2 + 10);
25891+ return 2;
25892+ }
25893+ } while (0);
25894+
25895+ do { /* PaX: gcc trampoline emulation #1 */
25896+ unsigned char mov1, mov2;
25897+ unsigned short jmp;
25898+ unsigned int addr1, addr2;
25899+
25900+#ifdef CONFIG_X86_64
25901+ if ((regs->ip + 11) >> 32)
25902+ break;
25903+#endif
25904+
25905+ err = get_user(mov1, (unsigned char __user *)regs->ip);
25906+ err |= get_user(addr1, (unsigned int __user *)(regs->ip + 1));
25907+ err |= get_user(mov2, (unsigned char __user *)(regs->ip + 5));
25908+ err |= get_user(addr2, (unsigned int __user *)(regs->ip + 6));
25909+ err |= get_user(jmp, (unsigned short __user *)(regs->ip + 10));
25910+
25911+ if (err)
25912+ break;
25913+
25914+ if (mov1 == 0xB9 && mov2 == 0xB8 && jmp == 0xE0FF) {
25915+ regs->cx = addr1;
25916+ regs->ax = addr2;
25917+ regs->ip = addr2;
25918+ return 2;
25919+ }
25920+ } while (0);
25921+
25922+ do { /* PaX: gcc trampoline emulation #2 */
25923+ unsigned char mov, jmp;
25924+ unsigned int addr1, addr2;
25925+
25926+#ifdef CONFIG_X86_64
25927+ if ((regs->ip + 9) >> 32)
25928+ break;
25929+#endif
25930+
25931+ err = get_user(mov, (unsigned char __user *)regs->ip);
25932+ err |= get_user(addr1, (unsigned int __user *)(regs->ip + 1));
25933+ err |= get_user(jmp, (unsigned char __user *)(regs->ip + 5));
25934+ err |= get_user(addr2, (unsigned int __user *)(regs->ip + 6));
25935+
25936+ if (err)
25937+ break;
25938+
25939+ if (mov == 0xB9 && jmp == 0xE9) {
25940+ regs->cx = addr1;
25941+ regs->ip = (unsigned int)(regs->ip + addr2 + 10);
25942+ return 2;
25943+ }
25944+ } while (0);
25945+
25946+ return 1; /* PaX in action */
25947+}
25948+
25949+#ifdef CONFIG_X86_64
25950+static int pax_handle_fetch_fault_64(struct pt_regs *regs)
25951+{
25952+ int err;
25953+
25954+ do { /* PaX: libffi trampoline emulation */
25955+ unsigned short mov1, mov2, jmp1;
25956+ unsigned char stcclc, jmp2;
25957+ unsigned long addr1, addr2;
25958+
25959+ err = get_user(mov1, (unsigned short __user *)regs->ip);
25960+ err |= get_user(addr1, (unsigned long __user *)(regs->ip + 2));
25961+ err |= get_user(mov2, (unsigned short __user *)(regs->ip + 10));
25962+ err |= get_user(addr2, (unsigned long __user *)(regs->ip + 12));
25963+ err |= get_user(stcclc, (unsigned char __user *)(regs->ip + 20));
25964+ err |= get_user(jmp1, (unsigned short __user *)(regs->ip + 21));
25965+ err |= get_user(jmp2, (unsigned char __user *)(regs->ip + 23));
25966+
25967+ if (err)
25968+ break;
25969+
25970+ if (mov1 == 0xBB49 && mov2 == 0xBA49 && (stcclc == 0xF8 || stcclc == 0xF9) && jmp1 == 0xFF49 && jmp2 == 0xE3) {
25971+ regs->r11 = addr1;
25972+ regs->r10 = addr2;
25973+ if (stcclc == 0xF8)
25974+ regs->flags &= ~X86_EFLAGS_CF;
25975+ else
25976+ regs->flags |= X86_EFLAGS_CF;
25977+ regs->ip = addr1;
25978+ return 2;
25979+ }
25980+ } while (0);
25981+
25982+ do { /* PaX: gcc trampoline emulation #1 */
25983+ unsigned short mov1, mov2, jmp1;
25984+ unsigned char jmp2;
25985+ unsigned int addr1;
25986+ unsigned long addr2;
25987+
25988+ err = get_user(mov1, (unsigned short __user *)regs->ip);
25989+ err |= get_user(addr1, (unsigned int __user *)(regs->ip + 2));
25990+ err |= get_user(mov2, (unsigned short __user *)(regs->ip + 6));
25991+ err |= get_user(addr2, (unsigned long __user *)(regs->ip + 8));
25992+ err |= get_user(jmp1, (unsigned short __user *)(regs->ip + 16));
25993+ err |= get_user(jmp2, (unsigned char __user *)(regs->ip + 18));
25994+
25995+ if (err)
25996+ break;
25997+
25998+ if (mov1 == 0xBB41 && mov2 == 0xBA49 && jmp1 == 0xFF49 && jmp2 == 0xE3) {
25999+ regs->r11 = addr1;
26000+ regs->r10 = addr2;
26001+ regs->ip = addr1;
26002+ return 2;
26003+ }
26004+ } while (0);
26005+
26006+ do { /* PaX: gcc trampoline emulation #2 */
26007+ unsigned short mov1, mov2, jmp1;
26008+ unsigned char jmp2;
26009+ unsigned long addr1, addr2;
26010+
26011+ err = get_user(mov1, (unsigned short __user *)regs->ip);
26012+ err |= get_user(addr1, (unsigned long __user *)(regs->ip + 2));
26013+ err |= get_user(mov2, (unsigned short __user *)(regs->ip + 10));
26014+ err |= get_user(addr2, (unsigned long __user *)(regs->ip + 12));
26015+ err |= get_user(jmp1, (unsigned short __user *)(regs->ip + 20));
26016+ err |= get_user(jmp2, (unsigned char __user *)(regs->ip + 22));
26017+
26018+ if (err)
26019+ break;
26020+
26021+ if (mov1 == 0xBB49 && mov2 == 0xBA49 && jmp1 == 0xFF49 && jmp2 == 0xE3) {
26022+ regs->r11 = addr1;
26023+ regs->r10 = addr2;
26024+ regs->ip = addr1;
26025+ return 2;
26026+ }
26027+ } while (0);
26028+
26029+ return 1; /* PaX in action */
26030+}
26031+#endif
26032+
26033+/*
26034+ * PaX: decide what to do with offenders (regs->ip = fault address)
26035+ *
26036+ * returns 1 when task should be killed
26037+ * 2 when gcc trampoline was detected
26038+ */
26039+static int pax_handle_fetch_fault(struct pt_regs *regs)
26040+{
26041+ if (v8086_mode(regs))
26042+ return 1;
26043+
26044+ if (!(current->mm->pax_flags & MF_PAX_EMUTRAMP))
26045+ return 1;
26046+
26047+#ifdef CONFIG_X86_32
26048+ return pax_handle_fetch_fault_32(regs);
26049+#else
26050+ if (regs->cs == __USER32_CS || (regs->cs & SEGMENT_LDT))
26051+ return pax_handle_fetch_fault_32(regs);
26052+ else
26053+ return pax_handle_fetch_fault_64(regs);
26054+#endif
26055+}
26056+#endif
26057+
26058+#if defined(CONFIG_PAX_PAGEEXEC) || defined(CONFIG_PAX_SEGMEXEC)
26059+void pax_report_insns(struct pt_regs *regs, void *pc, void *sp)
26060+{
26061+ long i;
26062+
26063+ printk(KERN_ERR "PAX: bytes at PC: ");
26064+ for (i = 0; i < 20; i++) {
26065+ unsigned char c;
26066+ if (get_user(c, (unsigned char __force_user *)pc+i))
26067+ printk(KERN_CONT "?? ");
26068+ else
26069+ printk(KERN_CONT "%02x ", c);
26070+ }
26071+ printk("\n");
26072+
26073+ printk(KERN_ERR "PAX: bytes at SP-%lu: ", (unsigned long)sizeof(long));
26074+ for (i = -1; i < 80 / (long)sizeof(long); i++) {
26075+ unsigned long c;
26076+ if (get_user(c, (unsigned long __force_user *)sp+i)) {
26077+#ifdef CONFIG_X86_32
26078+ printk(KERN_CONT "???????? ");
26079+#else
26080+ if ((regs->cs == __USER32_CS || (regs->cs & SEGMENT_LDT)))
26081+ printk(KERN_CONT "???????? ???????? ");
26082+ else
26083+ printk(KERN_CONT "???????????????? ");
26084+#endif
26085+ } else {
26086+#ifdef CONFIG_X86_64
26087+ if ((regs->cs == __USER32_CS || (regs->cs & SEGMENT_LDT))) {
26088+ printk(KERN_CONT "%08x ", (unsigned int)c);
26089+ printk(KERN_CONT "%08x ", (unsigned int)(c >> 32));
26090+ } else
26091+#endif
26092+ printk(KERN_CONT "%0*lx ", 2 * (int)sizeof(long), c);
26093+ }
26094+ }
26095+ printk("\n");
26096+}
26097+#endif
26098+
26099+/**
26100+ * probe_kernel_write(): safely attempt to write to a location
26101+ * @dst: address to write to
26102+ * @src: pointer to the data that shall be written
26103+ * @size: size of the data chunk
26104+ *
26105+ * Safely write to address @dst from the buffer at @src. If a kernel fault
26106+ * happens, handle that and return -EFAULT.
26107+ */
26108+long notrace probe_kernel_write(void *dst, const void *src, size_t size)
26109+{
26110+ long ret;
26111+ mm_segment_t old_fs = get_fs();
26112+
26113+ set_fs(KERNEL_DS);
26114+ pagefault_disable();
26115+ pax_open_kernel();
26116+ ret = __copy_to_user_inatomic((void __force_user *)dst, src, size);
26117+ pax_close_kernel();
26118+ pagefault_enable();
26119+ set_fs(old_fs);
26120+
26121+ return ret ? -EFAULT : 0;
26122+}
26123diff --git a/arch/x86/mm/gup.c b/arch/x86/mm/gup.c
26124index 71da1bc..7a16bf4 100644
26125--- a/arch/x86/mm/gup.c
26126+++ b/arch/x86/mm/gup.c
26127@@ -237,7 +237,7 @@ int __get_user_pages_fast(unsigned long start, int nr_pages, int write,
26128 addr = start;
26129 len = (unsigned long) nr_pages << PAGE_SHIFT;
26130 end = start + len;
26131- if (unlikely(!access_ok(write ? VERIFY_WRITE : VERIFY_READ,
26132+ if (unlikely(!__access_ok(write ? VERIFY_WRITE : VERIFY_READ,
26133 (void __user *)start, len)))
26134 return 0;
26135
26136diff --git a/arch/x86/mm/highmem_32.c b/arch/x86/mm/highmem_32.c
26137index 63a6ba6..79abd7a 100644
26138--- a/arch/x86/mm/highmem_32.c
26139+++ b/arch/x86/mm/highmem_32.c
26140@@ -43,7 +43,10 @@ void *kmap_atomic_prot(struct page *page, enum km_type type, pgprot_t prot)
26141 idx = type + KM_TYPE_NR*smp_processor_id();
26142 vaddr = __fix_to_virt(FIX_KMAP_BEGIN + idx);
26143 BUG_ON(!pte_none(*(kmap_pte-idx)));
26144+
26145+ pax_open_kernel();
26146 set_pte(kmap_pte-idx, mk_pte(page, prot));
26147+ pax_close_kernel();
26148
26149 return (void *)vaddr;
26150 }
26151diff --git a/arch/x86/mm/hugetlbpage.c b/arch/x86/mm/hugetlbpage.c
26152index f46c3407..6ff9a26 100644
26153--- a/arch/x86/mm/hugetlbpage.c
26154+++ b/arch/x86/mm/hugetlbpage.c
26155@@ -267,13 +267,20 @@ static unsigned long hugetlb_get_unmapped_area_bottomup(struct file *file,
26156 struct hstate *h = hstate_file(file);
26157 struct mm_struct *mm = current->mm;
26158 struct vm_area_struct *vma;
26159- unsigned long start_addr;
26160+ unsigned long start_addr, pax_task_size = TASK_SIZE;
26161+
26162+#ifdef CONFIG_PAX_SEGMEXEC
26163+ if (mm->pax_flags & MF_PAX_SEGMEXEC)
26164+ pax_task_size = SEGMEXEC_TASK_SIZE;
26165+#endif
26166+
26167+ pax_task_size -= PAGE_SIZE;
26168
26169 if (len > mm->cached_hole_size) {
26170- start_addr = mm->free_area_cache;
26171+ start_addr = mm->free_area_cache;
26172 } else {
26173- start_addr = TASK_UNMAPPED_BASE;
26174- mm->cached_hole_size = 0;
26175+ start_addr = mm->mmap_base;
26176+ mm->cached_hole_size = 0;
26177 }
26178
26179 full_search:
26180@@ -281,26 +288,27 @@ full_search:
26181
26182 for (vma = find_vma(mm, addr); ; vma = vma->vm_next) {
26183 /* At this point: (!vma || addr < vma->vm_end). */
26184- if (TASK_SIZE - len < addr) {
26185+ if (pax_task_size - len < addr) {
26186 /*
26187 * Start a new search - just in case we missed
26188 * some holes.
26189 */
26190- if (start_addr != TASK_UNMAPPED_BASE) {
26191- start_addr = TASK_UNMAPPED_BASE;
26192+ if (start_addr != mm->mmap_base) {
26193+ start_addr = mm->mmap_base;
26194 mm->cached_hole_size = 0;
26195 goto full_search;
26196 }
26197 return -ENOMEM;
26198 }
26199- if (!vma || addr + len <= vma->vm_start) {
26200- mm->free_area_cache = addr + len;
26201- return addr;
26202- }
26203+ if (check_heap_stack_gap(vma, addr, len))
26204+ break;
26205 if (addr + mm->cached_hole_size < vma->vm_start)
26206 mm->cached_hole_size = vma->vm_start - addr;
26207 addr = ALIGN(vma->vm_end, huge_page_size(h));
26208 }
26209+
26210+ mm->free_area_cache = addr + len;
26211+ return addr;
26212 }
26213
26214 static unsigned long hugetlb_get_unmapped_area_topdown(struct file *file,
26215@@ -309,10 +317,9 @@ static unsigned long hugetlb_get_unmapped_area_topdown(struct file *file,
26216 {
26217 struct hstate *h = hstate_file(file);
26218 struct mm_struct *mm = current->mm;
26219- struct vm_area_struct *vma, *prev_vma;
26220- unsigned long base = mm->mmap_base, addr = addr0;
26221+ struct vm_area_struct *vma;
26222+ unsigned long base = mm->mmap_base, addr;
26223 unsigned long largest_hole = mm->cached_hole_size;
26224- int first_time = 1;
26225
26226 /* don't allow allocations above current base */
26227 if (mm->free_area_cache > base)
26228@@ -322,64 +329,63 @@ static unsigned long hugetlb_get_unmapped_area_topdown(struct file *file,
26229 largest_hole = 0;
26230 mm->free_area_cache = base;
26231 }
26232-try_again:
26233+
26234 /* make sure it can fit in the remaining address space */
26235 if (mm->free_area_cache < len)
26236 goto fail;
26237
26238 /* either no address requested or cant fit in requested address hole */
26239- addr = (mm->free_area_cache - len) & huge_page_mask(h);
26240+ addr = (mm->free_area_cache - len);
26241 do {
26242+ addr &= huge_page_mask(h);
26243+ vma = find_vma(mm, addr);
26244 /*
26245 * Lookup failure means no vma is above this address,
26246 * i.e. return with success:
26247- */
26248- if (!(vma = find_vma_prev(mm, addr, &prev_vma)))
26249- return addr;
26250-
26251- /*
26252 * new region fits between prev_vma->vm_end and
26253 * vma->vm_start, use it:
26254 */
26255- if (addr + len <= vma->vm_start &&
26256- (!prev_vma || (addr >= prev_vma->vm_end))) {
26257+ if (check_heap_stack_gap(vma, addr, len)) {
26258 /* remember the address as a hint for next time */
26259- mm->cached_hole_size = largest_hole;
26260- return (mm->free_area_cache = addr);
26261- } else {
26262- /* pull free_area_cache down to the first hole */
26263- if (mm->free_area_cache == vma->vm_end) {
26264- mm->free_area_cache = vma->vm_start;
26265- mm->cached_hole_size = largest_hole;
26266- }
26267+ mm->cached_hole_size = largest_hole;
26268+ return (mm->free_area_cache = addr);
26269+ }
26270+ /* pull free_area_cache down to the first hole */
26271+ if (mm->free_area_cache == vma->vm_end) {
26272+ mm->free_area_cache = vma->vm_start;
26273+ mm->cached_hole_size = largest_hole;
26274 }
26275
26276 /* remember the largest hole we saw so far */
26277 if (addr + largest_hole < vma->vm_start)
26278- largest_hole = vma->vm_start - addr;
26279+ largest_hole = vma->vm_start - addr;
26280
26281 /* try just below the current vma->vm_start */
26282- addr = (vma->vm_start - len) & huge_page_mask(h);
26283- } while (len <= vma->vm_start);
26284+ addr = skip_heap_stack_gap(vma, len);
26285+ } while (!IS_ERR_VALUE(addr));
26286
26287 fail:
26288 /*
26289- * if hint left us with no space for the requested
26290- * mapping then try again:
26291- */
26292- if (first_time) {
26293- mm->free_area_cache = base;
26294- largest_hole = 0;
26295- first_time = 0;
26296- goto try_again;
26297- }
26298- /*
26299 * A failed mmap() very likely causes application failure,
26300 * so fall back to the bottom-up function here. This scenario
26301 * can happen with large stack limits and large mmap()
26302 * allocations.
26303 */
26304- mm->free_area_cache = TASK_UNMAPPED_BASE;
26305+
26306+#ifdef CONFIG_PAX_SEGMEXEC
26307+ if (mm->pax_flags & MF_PAX_SEGMEXEC)
26308+ mm->mmap_base = SEGMEXEC_TASK_UNMAPPED_BASE;
26309+ else
26310+#endif
26311+
26312+ mm->mmap_base = TASK_UNMAPPED_BASE;
26313+
26314+#ifdef CONFIG_PAX_RANDMMAP
26315+ if (mm->pax_flags & MF_PAX_RANDMMAP)
26316+ mm->mmap_base += mm->delta_mmap;
26317+#endif
26318+
26319+ mm->free_area_cache = mm->mmap_base;
26320 mm->cached_hole_size = ~0UL;
26321 addr = hugetlb_get_unmapped_area_bottomup(file, addr0,
26322 len, pgoff, flags);
26323@@ -387,6 +393,7 @@ fail:
26324 /*
26325 * Restore the topdown base:
26326 */
26327+ mm->mmap_base = base;
26328 mm->free_area_cache = base;
26329 mm->cached_hole_size = ~0UL;
26330
26331@@ -400,10 +407,19 @@ hugetlb_get_unmapped_area(struct file *file, unsigned long addr,
26332 struct hstate *h = hstate_file(file);
26333 struct mm_struct *mm = current->mm;
26334 struct vm_area_struct *vma;
26335+ unsigned long pax_task_size = TASK_SIZE;
26336
26337 if (len & ~huge_page_mask(h))
26338 return -EINVAL;
26339- if (len > TASK_SIZE)
26340+
26341+#ifdef CONFIG_PAX_SEGMEXEC
26342+ if (mm->pax_flags & MF_PAX_SEGMEXEC)
26343+ pax_task_size = SEGMEXEC_TASK_SIZE;
26344+#endif
26345+
26346+ pax_task_size -= PAGE_SIZE;
26347+
26348+ if (len > pax_task_size)
26349 return -ENOMEM;
26350
26351 if (flags & MAP_FIXED) {
26352@@ -415,8 +431,7 @@ hugetlb_get_unmapped_area(struct file *file, unsigned long addr,
26353 if (addr) {
26354 addr = ALIGN(addr, huge_page_size(h));
26355 vma = find_vma(mm, addr);
26356- if (TASK_SIZE - len >= addr &&
26357- (!vma || addr + len <= vma->vm_start))
26358+ if (pax_task_size - len >= addr && check_heap_stack_gap(vma, addr, len))
26359 return addr;
26360 }
26361 if (mm->get_unmapped_area == arch_get_unmapped_area)
26362diff --git a/arch/x86/mm/init.c b/arch/x86/mm/init.c
26363index 73ffd55..f61c2a7 100644
26364--- a/arch/x86/mm/init.c
26365+++ b/arch/x86/mm/init.c
26366@@ -69,11 +69,7 @@ static void __init find_early_table_space(unsigned long end, int use_pse,
26367 * cause a hotspot and fill up ZONE_DMA. The page tables
26368 * need roughly 0.5KB per GB.
26369 */
26370-#ifdef CONFIG_X86_32
26371- start = 0x7000;
26372-#else
26373- start = 0x8000;
26374-#endif
26375+ start = 0x100000;
26376 e820_table_start = find_e820_area(start, max_pfn_mapped<<PAGE_SHIFT,
26377 tables, PAGE_SIZE);
26378 if (e820_table_start == -1UL)
26379@@ -147,7 +143,7 @@ unsigned long __init_refok init_memory_mapping(unsigned long start,
26380 #endif
26381
26382 set_nx();
26383- if (nx_enabled)
26384+ if (nx_enabled && cpu_has_nx)
26385 printk(KERN_INFO "NX (Execute Disable) protection: active\n");
26386
26387 /* Enable PSE if available */
26388@@ -329,10 +325,27 @@ unsigned long __init_refok init_memory_mapping(unsigned long start,
26389 * Access has to be given to non-kernel-ram areas as well, these contain the PCI
26390 * mmio resources as well as potential bios/acpi data regions.
26391 */
26392+
26393 int devmem_is_allowed(unsigned long pagenr)
26394 {
26395+#ifdef CONFIG_GRKERNSEC_KMEM
26396+ /* allow BDA */
26397+ if (!pagenr)
26398+ return 1;
26399+ /* allow EBDA */
26400+ if ((0x9f000 >> PAGE_SHIFT) == pagenr)
26401+ return 1;
26402+ /* allow ISA/video mem */
26403+ if ((ISA_START_ADDRESS >> PAGE_SHIFT) <= pagenr && pagenr < (ISA_END_ADDRESS >> PAGE_SHIFT))
26404+ return 1;
26405+ /* throw out everything else below 1MB */
26406+ if (pagenr <= 256)
26407+ return 0;
26408+#else
26409 if (pagenr <= 256)
26410 return 1;
26411+#endif
26412+
26413 if (iomem_is_exclusive(pagenr << PAGE_SHIFT))
26414 return 0;
26415 if (!page_is_ram(pagenr))
26416@@ -379,6 +392,86 @@ void free_init_pages(char *what, unsigned long begin, unsigned long end)
26417
26418 void free_initmem(void)
26419 {
26420+
26421+#ifdef CONFIG_PAX_KERNEXEC
26422+#ifdef CONFIG_X86_32
26423+ /* PaX: limit KERNEL_CS to actual size */
26424+ unsigned long addr, limit;
26425+ struct desc_struct d;
26426+ int cpu;
26427+
26428+ limit = paravirt_enabled() ? ktva_ktla(0xffffffff) : (unsigned long)&_etext;
26429+ limit = (limit - 1UL) >> PAGE_SHIFT;
26430+
26431+ memset(__LOAD_PHYSICAL_ADDR + PAGE_OFFSET, POISON_FREE_INITMEM, PAGE_SIZE);
26432+ for (cpu = 0; cpu < nr_cpu_ids; cpu++) {
26433+ pack_descriptor(&d, get_desc_base(&get_cpu_gdt_table(cpu)[GDT_ENTRY_KERNEL_CS]), limit, 0x9B, 0xC);
26434+ write_gdt_entry(get_cpu_gdt_table(cpu), GDT_ENTRY_KERNEL_CS, &d, DESCTYPE_S);
26435+ }
26436+
26437+ /* PaX: make KERNEL_CS read-only */
26438+ addr = PFN_ALIGN(ktla_ktva((unsigned long)&_text));
26439+ if (!paravirt_enabled())
26440+ set_memory_ro(addr, (PFN_ALIGN(_sdata) - addr) >> PAGE_SHIFT);
26441+/*
26442+ for (addr = ktla_ktva((unsigned long)&_text); addr < (unsigned long)&_sdata; addr += PMD_SIZE) {
26443+ pgd = pgd_offset_k(addr);
26444+ pud = pud_offset(pgd, addr);
26445+ pmd = pmd_offset(pud, addr);
26446+ set_pmd(pmd, __pmd(pmd_val(*pmd) & ~_PAGE_RW));
26447+ }
26448+*/
26449+#ifdef CONFIG_X86_PAE
26450+ set_memory_nx(PFN_ALIGN(__init_begin), (PFN_ALIGN(__init_end) - PFN_ALIGN(__init_begin)) >> PAGE_SHIFT);
26451+/*
26452+ for (addr = (unsigned long)&__init_begin; addr < (unsigned long)&__init_end; addr += PMD_SIZE) {
26453+ pgd = pgd_offset_k(addr);
26454+ pud = pud_offset(pgd, addr);
26455+ pmd = pmd_offset(pud, addr);
26456+ set_pmd(pmd, __pmd(pmd_val(*pmd) | (_PAGE_NX & __supported_pte_mask)));
26457+ }
26458+*/
26459+#endif
26460+
26461+#ifdef CONFIG_MODULES
26462+ set_memory_4k((unsigned long)MODULES_EXEC_VADDR, (MODULES_EXEC_END - MODULES_EXEC_VADDR) >> PAGE_SHIFT);
26463+#endif
26464+
26465+#else
26466+ pgd_t *pgd;
26467+ pud_t *pud;
26468+ pmd_t *pmd;
26469+ unsigned long addr, end;
26470+
26471+ /* PaX: make kernel code/rodata read-only, rest non-executable */
26472+ for (addr = __START_KERNEL_map; addr < __START_KERNEL_map + KERNEL_IMAGE_SIZE; addr += PMD_SIZE) {
26473+ pgd = pgd_offset_k(addr);
26474+ pud = pud_offset(pgd, addr);
26475+ pmd = pmd_offset(pud, addr);
26476+ if (!pmd_present(*pmd))
26477+ continue;
26478+ if ((unsigned long)_text <= addr && addr < (unsigned long)_sdata)
26479+ set_pmd(pmd, __pmd(pmd_val(*pmd) & ~_PAGE_RW));
26480+ else
26481+ set_pmd(pmd, __pmd(pmd_val(*pmd) | (_PAGE_NX & __supported_pte_mask)));
26482+ }
26483+
26484+ addr = (unsigned long)__va(__pa(__START_KERNEL_map));
26485+ end = addr + KERNEL_IMAGE_SIZE;
26486+ for (; addr < end; addr += PMD_SIZE) {
26487+ pgd = pgd_offset_k(addr);
26488+ pud = pud_offset(pgd, addr);
26489+ pmd = pmd_offset(pud, addr);
26490+ if (!pmd_present(*pmd))
26491+ continue;
26492+ if ((unsigned long)__va(__pa(_text)) <= addr && addr < (unsigned long)__va(__pa(_sdata)))
26493+ set_pmd(pmd, __pmd(pmd_val(*pmd) & ~_PAGE_RW));
26494+ }
26495+#endif
26496+
26497+ flush_tlb_all();
26498+#endif
26499+
26500 free_init_pages("unused kernel memory",
26501 (unsigned long)(&__init_begin),
26502 (unsigned long)(&__init_end));
26503diff --git a/arch/x86/mm/init_32.c b/arch/x86/mm/init_32.c
26504index 30938c1..bda3d5d 100644
26505--- a/arch/x86/mm/init_32.c
26506+++ b/arch/x86/mm/init_32.c
26507@@ -72,36 +72,6 @@ static __init void *alloc_low_page(void)
26508 }
26509
26510 /*
26511- * Creates a middle page table and puts a pointer to it in the
26512- * given global directory entry. This only returns the gd entry
26513- * in non-PAE compilation mode, since the middle layer is folded.
26514- */
26515-static pmd_t * __init one_md_table_init(pgd_t *pgd)
26516-{
26517- pud_t *pud;
26518- pmd_t *pmd_table;
26519-
26520-#ifdef CONFIG_X86_PAE
26521- if (!(pgd_val(*pgd) & _PAGE_PRESENT)) {
26522- if (after_bootmem)
26523- pmd_table = (pmd_t *)alloc_bootmem_pages(PAGE_SIZE);
26524- else
26525- pmd_table = (pmd_t *)alloc_low_page();
26526- paravirt_alloc_pmd(&init_mm, __pa(pmd_table) >> PAGE_SHIFT);
26527- set_pgd(pgd, __pgd(__pa(pmd_table) | _PAGE_PRESENT));
26528- pud = pud_offset(pgd, 0);
26529- BUG_ON(pmd_table != pmd_offset(pud, 0));
26530-
26531- return pmd_table;
26532- }
26533-#endif
26534- pud = pud_offset(pgd, 0);
26535- pmd_table = pmd_offset(pud, 0);
26536-
26537- return pmd_table;
26538-}
26539-
26540-/*
26541 * Create a page table and place a pointer to it in a middle page
26542 * directory entry:
26543 */
26544@@ -121,13 +91,28 @@ static pte_t * __init one_page_table_init(pmd_t *pmd)
26545 page_table = (pte_t *)alloc_low_page();
26546
26547 paravirt_alloc_pte(&init_mm, __pa(page_table) >> PAGE_SHIFT);
26548+#if defined(CONFIG_PAX_PAGEEXEC) || defined(CONFIG_PAX_SEGMEXEC)
26549+ set_pmd(pmd, __pmd(__pa(page_table) | _KERNPG_TABLE));
26550+#else
26551 set_pmd(pmd, __pmd(__pa(page_table) | _PAGE_TABLE));
26552+#endif
26553 BUG_ON(page_table != pte_offset_kernel(pmd, 0));
26554 }
26555
26556 return pte_offset_kernel(pmd, 0);
26557 }
26558
26559+static pmd_t * __init one_md_table_init(pgd_t *pgd)
26560+{
26561+ pud_t *pud;
26562+ pmd_t *pmd_table;
26563+
26564+ pud = pud_offset(pgd, 0);
26565+ pmd_table = pmd_offset(pud, 0);
26566+
26567+ return pmd_table;
26568+}
26569+
26570 pmd_t * __init populate_extra_pmd(unsigned long vaddr)
26571 {
26572 int pgd_idx = pgd_index(vaddr);
26573@@ -201,6 +186,7 @@ page_table_range_init(unsigned long start, unsigned long end, pgd_t *pgd_base)
26574 int pgd_idx, pmd_idx;
26575 unsigned long vaddr;
26576 pgd_t *pgd;
26577+ pud_t *pud;
26578 pmd_t *pmd;
26579 pte_t *pte = NULL;
26580
26581@@ -210,8 +196,13 @@ page_table_range_init(unsigned long start, unsigned long end, pgd_t *pgd_base)
26582 pgd = pgd_base + pgd_idx;
26583
26584 for ( ; (pgd_idx < PTRS_PER_PGD) && (vaddr != end); pgd++, pgd_idx++) {
26585- pmd = one_md_table_init(pgd);
26586- pmd = pmd + pmd_index(vaddr);
26587+ pud = pud_offset(pgd, vaddr);
26588+ pmd = pmd_offset(pud, vaddr);
26589+
26590+#ifdef CONFIG_X86_PAE
26591+ paravirt_alloc_pmd(&init_mm, __pa(pmd) >> PAGE_SHIFT);
26592+#endif
26593+
26594 for (; (pmd_idx < PTRS_PER_PMD) && (vaddr != end);
26595 pmd++, pmd_idx++) {
26596 pte = page_table_kmap_check(one_page_table_init(pmd),
26597@@ -223,11 +214,20 @@ page_table_range_init(unsigned long start, unsigned long end, pgd_t *pgd_base)
26598 }
26599 }
26600
26601-static inline int is_kernel_text(unsigned long addr)
26602+static inline int is_kernel_text(unsigned long start, unsigned long end)
26603 {
26604- if (addr >= PAGE_OFFSET && addr <= (unsigned long)__init_end)
26605- return 1;
26606- return 0;
26607+ if ((start > ktla_ktva((unsigned long)_etext) ||
26608+ end <= ktla_ktva((unsigned long)_stext)) &&
26609+ (start > ktla_ktva((unsigned long)_einittext) ||
26610+ end <= ktla_ktva((unsigned long)_sinittext)) &&
26611+
26612+#ifdef CONFIG_ACPI_SLEEP
26613+ (start > (unsigned long)__va(acpi_wakeup_address) + 0x4000 || end <= (unsigned long)__va(acpi_wakeup_address)) &&
26614+#endif
26615+
26616+ (start > (unsigned long)__va(0xfffff) || end <= (unsigned long)__va(0xc0000)))
26617+ return 0;
26618+ return 1;
26619 }
26620
26621 /*
26622@@ -243,9 +243,10 @@ kernel_physical_mapping_init(unsigned long start,
26623 int use_pse = page_size_mask == (1<<PG_LEVEL_2M);
26624 unsigned long start_pfn, end_pfn;
26625 pgd_t *pgd_base = swapper_pg_dir;
26626- int pgd_idx, pmd_idx, pte_ofs;
26627+ unsigned int pgd_idx, pmd_idx, pte_ofs;
26628 unsigned long pfn;
26629 pgd_t *pgd;
26630+ pud_t *pud;
26631 pmd_t *pmd;
26632 pte_t *pte;
26633 unsigned pages_2m, pages_4k;
26634@@ -278,8 +279,13 @@ repeat:
26635 pfn = start_pfn;
26636 pgd_idx = pgd_index((pfn<<PAGE_SHIFT) + PAGE_OFFSET);
26637 pgd = pgd_base + pgd_idx;
26638- for (; pgd_idx < PTRS_PER_PGD; pgd++, pgd_idx++) {
26639- pmd = one_md_table_init(pgd);
26640+ for (; pgd_idx < PTRS_PER_PGD && pfn < max_low_pfn; pgd++, pgd_idx++) {
26641+ pud = pud_offset(pgd, 0);
26642+ pmd = pmd_offset(pud, 0);
26643+
26644+#ifdef CONFIG_X86_PAE
26645+ paravirt_alloc_pmd(&init_mm, __pa(pmd) >> PAGE_SHIFT);
26646+#endif
26647
26648 if (pfn >= end_pfn)
26649 continue;
26650@@ -291,14 +297,13 @@ repeat:
26651 #endif
26652 for (; pmd_idx < PTRS_PER_PMD && pfn < end_pfn;
26653 pmd++, pmd_idx++) {
26654- unsigned int addr = pfn * PAGE_SIZE + PAGE_OFFSET;
26655+ unsigned long address = pfn * PAGE_SIZE + PAGE_OFFSET;
26656
26657 /*
26658 * Map with big pages if possible, otherwise
26659 * create normal page tables:
26660 */
26661 if (use_pse) {
26662- unsigned int addr2;
26663 pgprot_t prot = PAGE_KERNEL_LARGE;
26664 /*
26665 * first pass will use the same initial
26666@@ -308,11 +313,7 @@ repeat:
26667 __pgprot(PTE_IDENT_ATTR |
26668 _PAGE_PSE);
26669
26670- addr2 = (pfn + PTRS_PER_PTE-1) * PAGE_SIZE +
26671- PAGE_OFFSET + PAGE_SIZE-1;
26672-
26673- if (is_kernel_text(addr) ||
26674- is_kernel_text(addr2))
26675+ if (is_kernel_text(address, address + PMD_SIZE))
26676 prot = PAGE_KERNEL_LARGE_EXEC;
26677
26678 pages_2m++;
26679@@ -329,7 +330,7 @@ repeat:
26680 pte_ofs = pte_index((pfn<<PAGE_SHIFT) + PAGE_OFFSET);
26681 pte += pte_ofs;
26682 for (; pte_ofs < PTRS_PER_PTE && pfn < end_pfn;
26683- pte++, pfn++, pte_ofs++, addr += PAGE_SIZE) {
26684+ pte++, pfn++, pte_ofs++, address += PAGE_SIZE) {
26685 pgprot_t prot = PAGE_KERNEL;
26686 /*
26687 * first pass will use the same initial
26688@@ -337,7 +338,7 @@ repeat:
26689 */
26690 pgprot_t init_prot = __pgprot(PTE_IDENT_ATTR);
26691
26692- if (is_kernel_text(addr))
26693+ if (is_kernel_text(address, address + PAGE_SIZE))
26694 prot = PAGE_KERNEL_EXEC;
26695
26696 pages_4k++;
26697@@ -489,7 +490,7 @@ void __init native_pagetable_setup_start(pgd_t *base)
26698
26699 pud = pud_offset(pgd, va);
26700 pmd = pmd_offset(pud, va);
26701- if (!pmd_present(*pmd))
26702+ if (!pmd_present(*pmd) || pmd_huge(*pmd))
26703 break;
26704
26705 pte = pte_offset_kernel(pmd, va);
26706@@ -541,9 +542,7 @@ void __init early_ioremap_page_table_range_init(void)
26707
26708 static void __init pagetable_init(void)
26709 {
26710- pgd_t *pgd_base = swapper_pg_dir;
26711-
26712- permanent_kmaps_init(pgd_base);
26713+ permanent_kmaps_init(swapper_pg_dir);
26714 }
26715
26716 #ifdef CONFIG_ACPI_SLEEP
26717@@ -551,12 +550,12 @@ static void __init pagetable_init(void)
26718 * ACPI suspend needs this for resume, because things like the intel-agp
26719 * driver might have split up a kernel 4MB mapping.
26720 */
26721-char swsusp_pg_dir[PAGE_SIZE]
26722+pgd_t swsusp_pg_dir[PTRS_PER_PGD]
26723 __attribute__ ((aligned(PAGE_SIZE)));
26724
26725 static inline void save_pg_dir(void)
26726 {
26727- memcpy(swsusp_pg_dir, swapper_pg_dir, PAGE_SIZE);
26728+ clone_pgd_range(swsusp_pg_dir, swapper_pg_dir, PTRS_PER_PGD);
26729 }
26730 #else /* !CONFIG_ACPI_SLEEP */
26731 static inline void save_pg_dir(void)
26732@@ -588,7 +587,7 @@ void zap_low_mappings(bool early)
26733 flush_tlb_all();
26734 }
26735
26736-pteval_t __supported_pte_mask __read_mostly = ~(_PAGE_NX | _PAGE_GLOBAL | _PAGE_IOMAP);
26737+pteval_t __supported_pte_mask __read_only = ~(_PAGE_NX | _PAGE_GLOBAL | _PAGE_IOMAP);
26738 EXPORT_SYMBOL_GPL(__supported_pte_mask);
26739
26740 /* user-defined highmem size */
26741@@ -777,7 +776,7 @@ void __init setup_bootmem_allocator(void)
26742 * Initialize the boot-time allocator (with low memory only):
26743 */
26744 bootmap_size = bootmem_bootmap_pages(max_low_pfn)<<PAGE_SHIFT;
26745- bootmap = find_e820_area(0, max_pfn_mapped<<PAGE_SHIFT, bootmap_size,
26746+ bootmap = find_e820_area(0x100000, max_pfn_mapped<<PAGE_SHIFT, bootmap_size,
26747 PAGE_SIZE);
26748 if (bootmap == -1L)
26749 panic("Cannot find bootmem map of size %ld\n", bootmap_size);
26750@@ -864,6 +863,12 @@ void __init mem_init(void)
26751
26752 pci_iommu_alloc();
26753
26754+#ifdef CONFIG_PAX_PER_CPU_PGD
26755+ clone_pgd_range(get_cpu_pgd(0) + KERNEL_PGD_BOUNDARY,
26756+ swapper_pg_dir + KERNEL_PGD_BOUNDARY,
26757+ KERNEL_PGD_PTRS);
26758+#endif
26759+
26760 #ifdef CONFIG_FLATMEM
26761 BUG_ON(!mem_map);
26762 #endif
26763@@ -881,7 +886,7 @@ void __init mem_init(void)
26764 set_highmem_pages_init();
26765
26766 codesize = (unsigned long) &_etext - (unsigned long) &_text;
26767- datasize = (unsigned long) &_edata - (unsigned long) &_etext;
26768+ datasize = (unsigned long) &_edata - (unsigned long) &_sdata;
26769 initsize = (unsigned long) &__init_end - (unsigned long) &__init_begin;
26770
26771 printk(KERN_INFO "Memory: %luk/%luk available (%dk kernel code, "
26772@@ -923,10 +928,10 @@ void __init mem_init(void)
26773 ((unsigned long)&__init_end -
26774 (unsigned long)&__init_begin) >> 10,
26775
26776- (unsigned long)&_etext, (unsigned long)&_edata,
26777- ((unsigned long)&_edata - (unsigned long)&_etext) >> 10,
26778+ (unsigned long)&_sdata, (unsigned long)&_edata,
26779+ ((unsigned long)&_edata - (unsigned long)&_sdata) >> 10,
26780
26781- (unsigned long)&_text, (unsigned long)&_etext,
26782+ ktla_ktva((unsigned long)&_text), ktla_ktva((unsigned long)&_etext),
26783 ((unsigned long)&_etext - (unsigned long)&_text) >> 10);
26784
26785 /*
26786@@ -1007,6 +1012,7 @@ void set_kernel_text_rw(void)
26787 if (!kernel_set_to_readonly)
26788 return;
26789
26790+ start = ktla_ktva(start);
26791 pr_debug("Set kernel text: %lx - %lx for read write\n",
26792 start, start+size);
26793
26794@@ -1021,6 +1027,7 @@ void set_kernel_text_ro(void)
26795 if (!kernel_set_to_readonly)
26796 return;
26797
26798+ start = ktla_ktva(start);
26799 pr_debug("Set kernel text: %lx - %lx for read only\n",
26800 start, start+size);
26801
26802@@ -1032,6 +1039,7 @@ void mark_rodata_ro(void)
26803 unsigned long start = PFN_ALIGN(_text);
26804 unsigned long size = PFN_ALIGN(_etext) - start;
26805
26806+ start = ktla_ktva(start);
26807 set_pages_ro(virt_to_page(start), size >> PAGE_SHIFT);
26808 printk(KERN_INFO "Write protecting the kernel text: %luk\n",
26809 size >> 10);
26810diff --git a/arch/x86/mm/init_64.c b/arch/x86/mm/init_64.c
26811index 7d095ad..25d2549 100644
26812--- a/arch/x86/mm/init_64.c
26813+++ b/arch/x86/mm/init_64.c
26814@@ -164,7 +164,9 @@ void set_pte_vaddr_pud(pud_t *pud_page, unsigned long vaddr, pte_t new_pte)
26815 pmd = fill_pmd(pud, vaddr);
26816 pte = fill_pte(pmd, vaddr);
26817
26818+ pax_open_kernel();
26819 set_pte(pte, new_pte);
26820+ pax_close_kernel();
26821
26822 /*
26823 * It's enough to flush this one mapping.
26824@@ -223,14 +225,12 @@ static void __init __init_extra_mapping(unsigned long phys, unsigned long size,
26825 pgd = pgd_offset_k((unsigned long)__va(phys));
26826 if (pgd_none(*pgd)) {
26827 pud = (pud_t *) spp_getpage();
26828- set_pgd(pgd, __pgd(__pa(pud) | _KERNPG_TABLE |
26829- _PAGE_USER));
26830+ set_pgd(pgd, __pgd(__pa(pud) | _PAGE_TABLE));
26831 }
26832 pud = pud_offset(pgd, (unsigned long)__va(phys));
26833 if (pud_none(*pud)) {
26834 pmd = (pmd_t *) spp_getpage();
26835- set_pud(pud, __pud(__pa(pmd) | _KERNPG_TABLE |
26836- _PAGE_USER));
26837+ set_pud(pud, __pud(__pa(pmd) | _PAGE_TABLE));
26838 }
26839 pmd = pmd_offset(pud, phys);
26840 BUG_ON(!pmd_none(*pmd));
26841@@ -675,6 +675,12 @@ void __init mem_init(void)
26842
26843 pci_iommu_alloc();
26844
26845+#ifdef CONFIG_PAX_PER_CPU_PGD
26846+ clone_pgd_range(get_cpu_pgd(0) + KERNEL_PGD_BOUNDARY,
26847+ swapper_pg_dir + KERNEL_PGD_BOUNDARY,
26848+ KERNEL_PGD_PTRS);
26849+#endif
26850+
26851 /* clear_bss() already clear the empty_zero_page */
26852
26853 reservedpages = 0;
26854@@ -861,8 +867,8 @@ int kern_addr_valid(unsigned long addr)
26855 static struct vm_area_struct gate_vma = {
26856 .vm_start = VSYSCALL_START,
26857 .vm_end = VSYSCALL_START + (VSYSCALL_MAPPED_PAGES * PAGE_SIZE),
26858- .vm_page_prot = PAGE_READONLY_EXEC,
26859- .vm_flags = VM_READ | VM_EXEC
26860+ .vm_page_prot = PAGE_READONLY,
26861+ .vm_flags = VM_READ
26862 };
26863
26864 struct vm_area_struct *get_gate_vma(struct task_struct *tsk)
26865@@ -896,7 +902,7 @@ int in_gate_area_no_task(unsigned long addr)
26866
26867 const char *arch_vma_name(struct vm_area_struct *vma)
26868 {
26869- if (vma->vm_mm && vma->vm_start == (long)vma->vm_mm->context.vdso)
26870+ if (vma->vm_mm && vma->vm_start == vma->vm_mm->context.vdso)
26871 return "[vdso]";
26872 if (vma == &gate_vma)
26873 return "[vsyscall]";
26874diff --git a/arch/x86/mm/iomap_32.c b/arch/x86/mm/iomap_32.c
26875index 84e236c..69bd3f6 100644
26876--- a/arch/x86/mm/iomap_32.c
26877+++ b/arch/x86/mm/iomap_32.c
26878@@ -65,7 +65,11 @@ void *kmap_atomic_prot_pfn(unsigned long pfn, enum km_type type, pgprot_t prot)
26879 debug_kmap_atomic(type);
26880 idx = type + KM_TYPE_NR * smp_processor_id();
26881 vaddr = __fix_to_virt(FIX_KMAP_BEGIN + idx);
26882+
26883+ pax_open_kernel();
26884 set_pte(kmap_pte - idx, pfn_pte(pfn, prot));
26885+ pax_close_kernel();
26886+
26887 arch_flush_lazy_mmu_mode();
26888
26889 return (void *)vaddr;
26890diff --git a/arch/x86/mm/ioremap.c b/arch/x86/mm/ioremap.c
26891index 2feb9bd..ab91e7b 100644
26892--- a/arch/x86/mm/ioremap.c
26893+++ b/arch/x86/mm/ioremap.c
26894@@ -41,8 +41,8 @@ int page_is_ram(unsigned long pagenr)
26895 * Second special case: Some BIOSen report the PC BIOS
26896 * area (640->1Mb) as ram even though it is not.
26897 */
26898- if (pagenr >= (BIOS_BEGIN >> PAGE_SHIFT) &&
26899- pagenr < (BIOS_END >> PAGE_SHIFT))
26900+ if (pagenr >= (ISA_START_ADDRESS >> PAGE_SHIFT) &&
26901+ pagenr < (ISA_END_ADDRESS >> PAGE_SHIFT))
26902 return 0;
26903
26904 for (i = 0; i < e820.nr_map; i++) {
26905@@ -137,13 +137,10 @@ static void __iomem *__ioremap_caller(resource_size_t phys_addr,
26906 /*
26907 * Don't allow anybody to remap normal RAM that we're using..
26908 */
26909- for (pfn = phys_addr >> PAGE_SHIFT;
26910- (pfn << PAGE_SHIFT) < (last_addr & PAGE_MASK);
26911- pfn++) {
26912-
26913+ for (pfn = phys_addr >> PAGE_SHIFT; ((resource_size_t)pfn << PAGE_SHIFT) < (last_addr & PAGE_MASK); pfn++) {
26914 int is_ram = page_is_ram(pfn);
26915
26916- if (is_ram && pfn_valid(pfn) && !PageReserved(pfn_to_page(pfn)))
26917+ if (is_ram && pfn_valid(pfn) && (pfn >= 0x100 || !PageReserved(pfn_to_page(pfn))))
26918 return NULL;
26919 WARN_ON_ONCE(is_ram);
26920 }
26921@@ -378,6 +375,9 @@ void *xlate_dev_mem_ptr(unsigned long phys)
26922
26923 /* If page is RAM, we can use __va. Otherwise ioremap and unmap. */
26924 if (page_is_ram(start >> PAGE_SHIFT))
26925+#ifdef CONFIG_HIGHMEM
26926+ if ((start >> PAGE_SHIFT) < max_low_pfn)
26927+#endif
26928 return __va(phys);
26929
26930 addr = (void __force *)ioremap_default(start, PAGE_SIZE);
26931@@ -407,7 +407,7 @@ static int __init early_ioremap_debug_setup(char *str)
26932 early_param("early_ioremap_debug", early_ioremap_debug_setup);
26933
26934 static __initdata int after_paging_init;
26935-static pte_t bm_pte[PAGE_SIZE/sizeof(pte_t)] __page_aligned_bss;
26936+static pte_t bm_pte[PAGE_SIZE/sizeof(pte_t)] __read_only __aligned(PAGE_SIZE);
26937
26938 static inline pmd_t * __init early_ioremap_pmd(unsigned long addr)
26939 {
26940@@ -439,8 +439,7 @@ void __init early_ioremap_init(void)
26941 slot_virt[i] = __fix_to_virt(FIX_BTMAP_BEGIN - NR_FIX_BTMAPS*i);
26942
26943 pmd = early_ioremap_pmd(fix_to_virt(FIX_BTMAP_BEGIN));
26944- memset(bm_pte, 0, sizeof(bm_pte));
26945- pmd_populate_kernel(&init_mm, pmd, bm_pte);
26946+ pmd_populate_user(&init_mm, pmd, bm_pte);
26947
26948 /*
26949 * The boot-ioremap range spans multiple pmds, for which
26950diff --git a/arch/x86/mm/kmemcheck/kmemcheck.c b/arch/x86/mm/kmemcheck/kmemcheck.c
26951index 8cc1833..1abbc5b 100644
26952--- a/arch/x86/mm/kmemcheck/kmemcheck.c
26953+++ b/arch/x86/mm/kmemcheck/kmemcheck.c
26954@@ -622,9 +622,9 @@ bool kmemcheck_fault(struct pt_regs *regs, unsigned long address,
26955 * memory (e.g. tracked pages)? For now, we need this to avoid
26956 * invoking kmemcheck for PnP BIOS calls.
26957 */
26958- if (regs->flags & X86_VM_MASK)
26959+ if (v8086_mode(regs))
26960 return false;
26961- if (regs->cs != __KERNEL_CS)
26962+ if (regs->cs != __KERNEL_CS && regs->cs != __KERNEXEC_KERNEL_CS)
26963 return false;
26964
26965 pte = kmemcheck_pte_lookup(address);
26966diff --git a/arch/x86/mm/mmap.c b/arch/x86/mm/mmap.c
26967index c9e57af..07a321b 100644
26968--- a/arch/x86/mm/mmap.c
26969+++ b/arch/x86/mm/mmap.c
26970@@ -49,7 +49,7 @@ static unsigned int stack_maxrandom_size(void)
26971 * Leave an at least ~128 MB hole with possible stack randomization.
26972 */
26973 #define MIN_GAP (128*1024*1024UL + stack_maxrandom_size())
26974-#define MAX_GAP (TASK_SIZE/6*5)
26975+#define MAX_GAP (pax_task_size/6*5)
26976
26977 /*
26978 * True on X86_32 or when emulating IA32 on X86_64
26979@@ -94,27 +94,40 @@ static unsigned long mmap_rnd(void)
26980 return rnd << PAGE_SHIFT;
26981 }
26982
26983-static unsigned long mmap_base(void)
26984+static unsigned long mmap_base(struct mm_struct *mm)
26985 {
26986 unsigned long gap = current->signal->rlim[RLIMIT_STACK].rlim_cur;
26987+ unsigned long pax_task_size = TASK_SIZE;
26988+
26989+#ifdef CONFIG_PAX_SEGMEXEC
26990+ if (mm->pax_flags & MF_PAX_SEGMEXEC)
26991+ pax_task_size = SEGMEXEC_TASK_SIZE;
26992+#endif
26993
26994 if (gap < MIN_GAP)
26995 gap = MIN_GAP;
26996 else if (gap > MAX_GAP)
26997 gap = MAX_GAP;
26998
26999- return PAGE_ALIGN(TASK_SIZE - gap - mmap_rnd());
27000+ return PAGE_ALIGN(pax_task_size - gap - mmap_rnd());
27001 }
27002
27003 /*
27004 * Bottom-up (legacy) layout on X86_32 did not support randomization, X86_64
27005 * does, but not when emulating X86_32
27006 */
27007-static unsigned long mmap_legacy_base(void)
27008+static unsigned long mmap_legacy_base(struct mm_struct *mm)
27009 {
27010- if (mmap_is_ia32())
27011+ if (mmap_is_ia32()) {
27012+
27013+#ifdef CONFIG_PAX_SEGMEXEC
27014+ if (mm->pax_flags & MF_PAX_SEGMEXEC)
27015+ return SEGMEXEC_TASK_UNMAPPED_BASE;
27016+ else
27017+#endif
27018+
27019 return TASK_UNMAPPED_BASE;
27020- else
27021+ } else
27022 return TASK_UNMAPPED_BASE + mmap_rnd();
27023 }
27024
27025@@ -125,11 +138,23 @@ static unsigned long mmap_legacy_base(void)
27026 void arch_pick_mmap_layout(struct mm_struct *mm)
27027 {
27028 if (mmap_is_legacy()) {
27029- mm->mmap_base = mmap_legacy_base();
27030+ mm->mmap_base = mmap_legacy_base(mm);
27031+
27032+#ifdef CONFIG_PAX_RANDMMAP
27033+ if (mm->pax_flags & MF_PAX_RANDMMAP)
27034+ mm->mmap_base += mm->delta_mmap;
27035+#endif
27036+
27037 mm->get_unmapped_area = arch_get_unmapped_area;
27038 mm->unmap_area = arch_unmap_area;
27039 } else {
27040- mm->mmap_base = mmap_base();
27041+ mm->mmap_base = mmap_base(mm);
27042+
27043+#ifdef CONFIG_PAX_RANDMMAP
27044+ if (mm->pax_flags & MF_PAX_RANDMMAP)
27045+ mm->mmap_base -= mm->delta_mmap + mm->delta_stack;
27046+#endif
27047+
27048 mm->get_unmapped_area = arch_get_unmapped_area_topdown;
27049 mm->unmap_area = arch_unmap_area_topdown;
27050 }
27051diff --git a/arch/x86/mm/mmio-mod.c b/arch/x86/mm/mmio-mod.c
27052index 132772a..b961f11 100644
27053--- a/arch/x86/mm/mmio-mod.c
27054+++ b/arch/x86/mm/mmio-mod.c
27055@@ -193,7 +193,7 @@ static void pre(struct kmmio_probe *p, struct pt_regs *regs,
27056 break;
27057 default:
27058 {
27059- unsigned char *ip = (unsigned char *)instptr;
27060+ unsigned char *ip = (unsigned char *)ktla_ktva(instptr);
27061 my_trace->opcode = MMIO_UNKNOWN_OP;
27062 my_trace->width = 0;
27063 my_trace->value = (*ip) << 16 | *(ip + 1) << 8 |
27064@@ -233,7 +233,7 @@ static void post(struct kmmio_probe *p, unsigned long condition,
27065 static void ioremap_trace_core(resource_size_t offset, unsigned long size,
27066 void __iomem *addr)
27067 {
27068- static atomic_t next_id;
27069+ static atomic_unchecked_t next_id;
27070 struct remap_trace *trace = kmalloc(sizeof(*trace), GFP_KERNEL);
27071 /* These are page-unaligned. */
27072 struct mmiotrace_map map = {
27073@@ -257,7 +257,7 @@ static void ioremap_trace_core(resource_size_t offset, unsigned long size,
27074 .private = trace
27075 },
27076 .phys = offset,
27077- .id = atomic_inc_return(&next_id)
27078+ .id = atomic_inc_return_unchecked(&next_id)
27079 };
27080 map.map_id = trace->id;
27081
27082diff --git a/arch/x86/mm/numa_32.c b/arch/x86/mm/numa_32.c
27083index d253006..e56dd6a 100644
27084--- a/arch/x86/mm/numa_32.c
27085+++ b/arch/x86/mm/numa_32.c
27086@@ -98,7 +98,6 @@ unsigned long node_memmap_size_bytes(int nid, unsigned long start_pfn,
27087 }
27088 #endif
27089
27090-extern unsigned long find_max_low_pfn(void);
27091 extern unsigned long highend_pfn, highstart_pfn;
27092
27093 #define LARGE_PAGE_BYTES (PTRS_PER_PTE * PAGE_SIZE)
27094diff --git a/arch/x86/mm/pageattr-test.c b/arch/x86/mm/pageattr-test.c
27095index e1d1069..2251ff3 100644
27096--- a/arch/x86/mm/pageattr-test.c
27097+++ b/arch/x86/mm/pageattr-test.c
27098@@ -36,7 +36,7 @@ enum {
27099
27100 static int pte_testbit(pte_t pte)
27101 {
27102- return pte_flags(pte) & _PAGE_UNUSED1;
27103+ return pte_flags(pte) & _PAGE_CPA_TEST;
27104 }
27105
27106 struct split_state {
27107diff --git a/arch/x86/mm/pageattr.c b/arch/x86/mm/pageattr.c
27108index dd38bfb..b72c63e 100644
27109--- a/arch/x86/mm/pageattr.c
27110+++ b/arch/x86/mm/pageattr.c
27111@@ -261,16 +261,17 @@ static inline pgprot_t static_protections(pgprot_t prot, unsigned long address,
27112 * PCI BIOS based config access (CONFIG_PCI_GOBIOS) support.
27113 */
27114 if (within(pfn, BIOS_BEGIN >> PAGE_SHIFT, BIOS_END >> PAGE_SHIFT))
27115- pgprot_val(forbidden) |= _PAGE_NX;
27116+ pgprot_val(forbidden) |= _PAGE_NX & __supported_pte_mask;
27117
27118 /*
27119 * The kernel text needs to be executable for obvious reasons
27120 * Does not cover __inittext since that is gone later on. On
27121 * 64bit we do not enforce !NX on the low mapping
27122 */
27123- if (within(address, (unsigned long)_text, (unsigned long)_etext))
27124- pgprot_val(forbidden) |= _PAGE_NX;
27125+ if (within(address, ktla_ktva((unsigned long)_text), ktla_ktva((unsigned long)_etext)))
27126+ pgprot_val(forbidden) |= _PAGE_NX & __supported_pte_mask;
27127
27128+#ifdef CONFIG_DEBUG_RODATA
27129 /*
27130 * The .rodata section needs to be read-only. Using the pfn
27131 * catches all aliases.
27132@@ -278,6 +279,14 @@ static inline pgprot_t static_protections(pgprot_t prot, unsigned long address,
27133 if (within(pfn, __pa((unsigned long)__start_rodata) >> PAGE_SHIFT,
27134 __pa((unsigned long)__end_rodata) >> PAGE_SHIFT))
27135 pgprot_val(forbidden) |= _PAGE_RW;
27136+#endif
27137+
27138+#ifdef CONFIG_PAX_KERNEXEC
27139+ if (within(pfn, __pa((unsigned long)&_text), __pa((unsigned long)&_sdata))) {
27140+ pgprot_val(forbidden) |= _PAGE_RW;
27141+ pgprot_val(forbidden) |= _PAGE_NX & __supported_pte_mask;
27142+ }
27143+#endif
27144
27145 prot = __pgprot(pgprot_val(prot) & ~pgprot_val(forbidden));
27146
27147@@ -331,23 +340,37 @@ EXPORT_SYMBOL_GPL(lookup_address);
27148 static void __set_pmd_pte(pte_t *kpte, unsigned long address, pte_t pte)
27149 {
27150 /* change init_mm */
27151+ pax_open_kernel();
27152 set_pte_atomic(kpte, pte);
27153+
27154 #ifdef CONFIG_X86_32
27155 if (!SHARED_KERNEL_PMD) {
27156+
27157+#ifdef CONFIG_PAX_PER_CPU_PGD
27158+ unsigned long cpu;
27159+#else
27160 struct page *page;
27161+#endif
27162
27163+#ifdef CONFIG_PAX_PER_CPU_PGD
27164+ for (cpu = 0; cpu < nr_cpu_ids; ++cpu) {
27165+ pgd_t *pgd = get_cpu_pgd(cpu);
27166+#else
27167 list_for_each_entry(page, &pgd_list, lru) {
27168- pgd_t *pgd;
27169+ pgd_t *pgd = (pgd_t *)page_address(page);
27170+#endif
27171+
27172 pud_t *pud;
27173 pmd_t *pmd;
27174
27175- pgd = (pgd_t *)page_address(page) + pgd_index(address);
27176+ pgd += pgd_index(address);
27177 pud = pud_offset(pgd, address);
27178 pmd = pmd_offset(pud, address);
27179 set_pte_atomic((pte_t *)pmd, pte);
27180 }
27181 }
27182 #endif
27183+ pax_close_kernel();
27184 }
27185
27186 static int
27187diff --git a/arch/x86/mm/pat.c b/arch/x86/mm/pat.c
27188index e78cd0e..de0a817 100644
27189--- a/arch/x86/mm/pat.c
27190+++ b/arch/x86/mm/pat.c
27191@@ -258,7 +258,7 @@ chk_conflict(struct memtype *new, struct memtype *entry, unsigned long *type)
27192
27193 conflict:
27194 printk(KERN_INFO "%s:%d conflicting memory types "
27195- "%Lx-%Lx %s<->%s\n", current->comm, current->pid, new->start,
27196+ "%Lx-%Lx %s<->%s\n", current->comm, task_pid_nr(current), new->start,
27197 new->end, cattr_name(new->type), cattr_name(entry->type));
27198 return -EBUSY;
27199 }
27200@@ -559,7 +559,7 @@ unlock_ret:
27201
27202 if (err) {
27203 printk(KERN_INFO "%s:%d freeing invalid memtype %Lx-%Lx\n",
27204- current->comm, current->pid, start, end);
27205+ current->comm, task_pid_nr(current), start, end);
27206 }
27207
27208 dprintk("free_memtype request 0x%Lx-0x%Lx\n", start, end);
27209@@ -689,8 +689,8 @@ static inline int range_is_allowed(unsigned long pfn, unsigned long size)
27210 while (cursor < to) {
27211 if (!devmem_is_allowed(pfn)) {
27212 printk(KERN_INFO
27213- "Program %s tried to access /dev/mem between %Lx->%Lx.\n",
27214- current->comm, from, to);
27215+ "Program %s tried to access /dev/mem between %Lx->%Lx (%Lx).\n",
27216+ current->comm, from, to, cursor);
27217 return 0;
27218 }
27219 cursor += PAGE_SIZE;
27220@@ -755,7 +755,7 @@ int kernel_map_sync_memtype(u64 base, unsigned long size, unsigned long flags)
27221 printk(KERN_INFO
27222 "%s:%d ioremap_change_attr failed %s "
27223 "for %Lx-%Lx\n",
27224- current->comm, current->pid,
27225+ current->comm, task_pid_nr(current),
27226 cattr_name(flags),
27227 base, (unsigned long long)(base + size));
27228 return -EINVAL;
27229@@ -813,7 +813,7 @@ static int reserve_pfn_range(u64 paddr, unsigned long size, pgprot_t *vma_prot,
27230 free_memtype(paddr, paddr + size);
27231 printk(KERN_ERR "%s:%d map pfn expected mapping type %s"
27232 " for %Lx-%Lx, got %s\n",
27233- current->comm, current->pid,
27234+ current->comm, task_pid_nr(current),
27235 cattr_name(want_flags),
27236 (unsigned long long)paddr,
27237 (unsigned long long)(paddr + size),
27238diff --git a/arch/x86/mm/pf_in.c b/arch/x86/mm/pf_in.c
27239index df3d5c8..c2223e1 100644
27240--- a/arch/x86/mm/pf_in.c
27241+++ b/arch/x86/mm/pf_in.c
27242@@ -148,7 +148,7 @@ enum reason_type get_ins_type(unsigned long ins_addr)
27243 int i;
27244 enum reason_type rv = OTHERS;
27245
27246- p = (unsigned char *)ins_addr;
27247+ p = (unsigned char *)ktla_ktva(ins_addr);
27248 p += skip_prefix(p, &prf);
27249 p += get_opcode(p, &opcode);
27250
27251@@ -168,7 +168,7 @@ static unsigned int get_ins_reg_width(unsigned long ins_addr)
27252 struct prefix_bits prf;
27253 int i;
27254
27255- p = (unsigned char *)ins_addr;
27256+ p = (unsigned char *)ktla_ktva(ins_addr);
27257 p += skip_prefix(p, &prf);
27258 p += get_opcode(p, &opcode);
27259
27260@@ -191,7 +191,7 @@ unsigned int get_ins_mem_width(unsigned long ins_addr)
27261 struct prefix_bits prf;
27262 int i;
27263
27264- p = (unsigned char *)ins_addr;
27265+ p = (unsigned char *)ktla_ktva(ins_addr);
27266 p += skip_prefix(p, &prf);
27267 p += get_opcode(p, &opcode);
27268
27269@@ -417,7 +417,7 @@ unsigned long get_ins_reg_val(unsigned long ins_addr, struct pt_regs *regs)
27270 int i;
27271 unsigned long rv;
27272
27273- p = (unsigned char *)ins_addr;
27274+ p = (unsigned char *)ktla_ktva(ins_addr);
27275 p += skip_prefix(p, &prf);
27276 p += get_opcode(p, &opcode);
27277 for (i = 0; i < ARRAY_SIZE(reg_rop); i++)
27278@@ -472,7 +472,7 @@ unsigned long get_ins_imm_val(unsigned long ins_addr)
27279 int i;
27280 unsigned long rv;
27281
27282- p = (unsigned char *)ins_addr;
27283+ p = (unsigned char *)ktla_ktva(ins_addr);
27284 p += skip_prefix(p, &prf);
27285 p += get_opcode(p, &opcode);
27286 for (i = 0; i < ARRAY_SIZE(imm_wop); i++)
27287diff --git a/arch/x86/mm/pgtable.c b/arch/x86/mm/pgtable.c
27288index e0e6fad..c56b495 100644
27289--- a/arch/x86/mm/pgtable.c
27290+++ b/arch/x86/mm/pgtable.c
27291@@ -83,9 +83,52 @@ static inline void pgd_list_del(pgd_t *pgd)
27292 list_del(&page->lru);
27293 }
27294
27295-#define UNSHARED_PTRS_PER_PGD \
27296- (SHARED_KERNEL_PMD ? KERNEL_PGD_BOUNDARY : PTRS_PER_PGD)
27297+#if defined(CONFIG_X86_64) && defined(CONFIG_PAX_MEMORY_UDEREF)
27298+pgdval_t clone_pgd_mask __read_only = ~_PAGE_PRESENT;
27299
27300+void __shadow_user_pgds(pgd_t *dst, const pgd_t *src, int count)
27301+{
27302+ while (count--)
27303+ *dst++ = __pgd((pgd_val(*src++) | (_PAGE_NX & __supported_pte_mask)) & ~_PAGE_USER);
27304+}
27305+#endif
27306+
27307+#ifdef CONFIG_PAX_PER_CPU_PGD
27308+void __clone_user_pgds(pgd_t *dst, const pgd_t *src, int count)
27309+{
27310+ while (count--)
27311+
27312+#if defined(CONFIG_X86_64) && defined(CONFIG_PAX_MEMORY_UDEREF)
27313+ *dst++ = __pgd(pgd_val(*src++) & clone_pgd_mask);
27314+#else
27315+ *dst++ = *src++;
27316+#endif
27317+
27318+}
27319+#endif
27320+
27321+#ifdef CONFIG_X86_64
27322+#define pxd_t pud_t
27323+#define pyd_t pgd_t
27324+#define paravirt_release_pxd(pfn) paravirt_release_pud(pfn)
27325+#define pxd_free(mm, pud) pud_free((mm), (pud))
27326+#define pyd_populate(mm, pgd, pud) pgd_populate((mm), (pgd), (pud))
27327+#define pyd_offset(mm, address) pgd_offset((mm), (address))
27328+#define PYD_SIZE PGDIR_SIZE
27329+#else
27330+#define pxd_t pmd_t
27331+#define pyd_t pud_t
27332+#define paravirt_release_pxd(pfn) paravirt_release_pmd(pfn)
27333+#define pxd_free(mm, pud) pmd_free((mm), (pud))
27334+#define pyd_populate(mm, pgd, pud) pud_populate((mm), (pgd), (pud))
27335+#define pyd_offset(mm, address) pud_offset((mm), (address))
27336+#define PYD_SIZE PUD_SIZE
27337+#endif
27338+
27339+#ifdef CONFIG_PAX_PER_CPU_PGD
27340+static inline void pgd_ctor(pgd_t *pgd) {}
27341+static inline void pgd_dtor(pgd_t *pgd) {}
27342+#else
27343 static void pgd_ctor(pgd_t *pgd)
27344 {
27345 /* If the pgd points to a shared pagetable level (either the
27346@@ -119,6 +162,7 @@ static void pgd_dtor(pgd_t *pgd)
27347 pgd_list_del(pgd);
27348 spin_unlock_irqrestore(&pgd_lock, flags);
27349 }
27350+#endif
27351
27352 /*
27353 * List of all pgd's needed for non-PAE so it can invalidate entries
27354@@ -131,7 +175,7 @@ static void pgd_dtor(pgd_t *pgd)
27355 * -- wli
27356 */
27357
27358-#ifdef CONFIG_X86_PAE
27359+#if defined(CONFIG_X86_32) && defined(CONFIG_X86_PAE)
27360 /*
27361 * In PAE mode, we need to do a cr3 reload (=tlb flush) when
27362 * updating the top-level pagetable entries to guarantee the
27363@@ -143,7 +187,7 @@ static void pgd_dtor(pgd_t *pgd)
27364 * not shared between pagetables (!SHARED_KERNEL_PMDS), we allocate
27365 * and initialize the kernel pmds here.
27366 */
27367-#define PREALLOCATED_PMDS UNSHARED_PTRS_PER_PGD
27368+#define PREALLOCATED_PXDS (SHARED_KERNEL_PMD ? KERNEL_PGD_BOUNDARY : PTRS_PER_PGD)
27369
27370 void pud_populate(struct mm_struct *mm, pud_t *pudp, pmd_t *pmd)
27371 {
27372@@ -161,36 +205,38 @@ void pud_populate(struct mm_struct *mm, pud_t *pudp, pmd_t *pmd)
27373 */
27374 flush_tlb_mm(mm);
27375 }
27376+#elif defined(CONFIG_X86_64) && defined(CONFIG_PAX_PER_CPU_PGD)
27377+#define PREALLOCATED_PXDS USER_PGD_PTRS
27378 #else /* !CONFIG_X86_PAE */
27379
27380 /* No need to prepopulate any pagetable entries in non-PAE modes. */
27381-#define PREALLOCATED_PMDS 0
27382+#define PREALLOCATED_PXDS 0
27383
27384 #endif /* CONFIG_X86_PAE */
27385
27386-static void free_pmds(pmd_t *pmds[])
27387+static void free_pxds(pxd_t *pxds[])
27388 {
27389 int i;
27390
27391- for(i = 0; i < PREALLOCATED_PMDS; i++)
27392- if (pmds[i])
27393- free_page((unsigned long)pmds[i]);
27394+ for(i = 0; i < PREALLOCATED_PXDS; i++)
27395+ if (pxds[i])
27396+ free_page((unsigned long)pxds[i]);
27397 }
27398
27399-static int preallocate_pmds(pmd_t *pmds[])
27400+static int preallocate_pxds(pxd_t *pxds[])
27401 {
27402 int i;
27403 bool failed = false;
27404
27405- for(i = 0; i < PREALLOCATED_PMDS; i++) {
27406- pmd_t *pmd = (pmd_t *)__get_free_page(PGALLOC_GFP);
27407- if (pmd == NULL)
27408+ for(i = 0; i < PREALLOCATED_PXDS; i++) {
27409+ pxd_t *pxd = (pxd_t *)__get_free_page(PGALLOC_GFP);
27410+ if (pxd == NULL)
27411 failed = true;
27412- pmds[i] = pmd;
27413+ pxds[i] = pxd;
27414 }
27415
27416 if (failed) {
27417- free_pmds(pmds);
27418+ free_pxds(pxds);
27419 return -ENOMEM;
27420 }
27421
27422@@ -203,51 +249,56 @@ static int preallocate_pmds(pmd_t *pmds[])
27423 * preallocate which never got a corresponding vma will need to be
27424 * freed manually.
27425 */
27426-static void pgd_mop_up_pmds(struct mm_struct *mm, pgd_t *pgdp)
27427+static void pgd_mop_up_pxds(struct mm_struct *mm, pgd_t *pgdp)
27428 {
27429 int i;
27430
27431- for(i = 0; i < PREALLOCATED_PMDS; i++) {
27432+ for(i = 0; i < PREALLOCATED_PXDS; i++) {
27433 pgd_t pgd = pgdp[i];
27434
27435 if (pgd_val(pgd) != 0) {
27436- pmd_t *pmd = (pmd_t *)pgd_page_vaddr(pgd);
27437+ pxd_t *pxd = (pxd_t *)pgd_page_vaddr(pgd);
27438
27439- pgdp[i] = native_make_pgd(0);
27440+ set_pgd(pgdp + i, native_make_pgd(0));
27441
27442- paravirt_release_pmd(pgd_val(pgd) >> PAGE_SHIFT);
27443- pmd_free(mm, pmd);
27444+ paravirt_release_pxd(pgd_val(pgd) >> PAGE_SHIFT);
27445+ pxd_free(mm, pxd);
27446 }
27447 }
27448 }
27449
27450-static void pgd_prepopulate_pmd(struct mm_struct *mm, pgd_t *pgd, pmd_t *pmds[])
27451+static void pgd_prepopulate_pxd(struct mm_struct *mm, pgd_t *pgd, pxd_t *pxds[])
27452 {
27453- pud_t *pud;
27454+ pyd_t *pyd;
27455 unsigned long addr;
27456 int i;
27457
27458- if (PREALLOCATED_PMDS == 0) /* Work around gcc-3.4.x bug */
27459+ if (PREALLOCATED_PXDS == 0) /* Work around gcc-3.4.x bug */
27460 return;
27461
27462- pud = pud_offset(pgd, 0);
27463+#ifdef CONFIG_X86_64
27464+ pyd = pyd_offset(mm, 0L);
27465+#else
27466+ pyd = pyd_offset(pgd, 0L);
27467+#endif
27468
27469- for (addr = i = 0; i < PREALLOCATED_PMDS;
27470- i++, pud++, addr += PUD_SIZE) {
27471- pmd_t *pmd = pmds[i];
27472+ for (addr = i = 0; i < PREALLOCATED_PXDS;
27473+ i++, pyd++, addr += PYD_SIZE) {
27474+ pxd_t *pxd = pxds[i];
27475
27476 if (i >= KERNEL_PGD_BOUNDARY)
27477- memcpy(pmd, (pmd_t *)pgd_page_vaddr(swapper_pg_dir[i]),
27478- sizeof(pmd_t) * PTRS_PER_PMD);
27479+ memcpy(pxd, (pxd_t *)pgd_page_vaddr(swapper_pg_dir[i]),
27480+ sizeof(pxd_t) * PTRS_PER_PMD);
27481
27482- pud_populate(mm, pud, pmd);
27483+ pyd_populate(mm, pyd, pxd);
27484 }
27485 }
27486
27487 pgd_t *pgd_alloc(struct mm_struct *mm)
27488 {
27489 pgd_t *pgd;
27490- pmd_t *pmds[PREALLOCATED_PMDS];
27491+ pxd_t *pxds[PREALLOCATED_PXDS];
27492+
27493 unsigned long flags;
27494
27495 pgd = (pgd_t *)__get_free_page(PGALLOC_GFP);
27496@@ -257,11 +308,11 @@ pgd_t *pgd_alloc(struct mm_struct *mm)
27497
27498 mm->pgd = pgd;
27499
27500- if (preallocate_pmds(pmds) != 0)
27501+ if (preallocate_pxds(pxds) != 0)
27502 goto out_free_pgd;
27503
27504 if (paravirt_pgd_alloc(mm) != 0)
27505- goto out_free_pmds;
27506+ goto out_free_pxds;
27507
27508 /*
27509 * Make sure that pre-populating the pmds is atomic with
27510@@ -271,14 +322,14 @@ pgd_t *pgd_alloc(struct mm_struct *mm)
27511 spin_lock_irqsave(&pgd_lock, flags);
27512
27513 pgd_ctor(pgd);
27514- pgd_prepopulate_pmd(mm, pgd, pmds);
27515+ pgd_prepopulate_pxd(mm, pgd, pxds);
27516
27517 spin_unlock_irqrestore(&pgd_lock, flags);
27518
27519 return pgd;
27520
27521-out_free_pmds:
27522- free_pmds(pmds);
27523+out_free_pxds:
27524+ free_pxds(pxds);
27525 out_free_pgd:
27526 free_page((unsigned long)pgd);
27527 out:
27528@@ -287,7 +338,7 @@ out:
27529
27530 void pgd_free(struct mm_struct *mm, pgd_t *pgd)
27531 {
27532- pgd_mop_up_pmds(mm, pgd);
27533+ pgd_mop_up_pxds(mm, pgd);
27534 pgd_dtor(pgd);
27535 paravirt_pgd_free(mm, pgd);
27536 free_page((unsigned long)pgd);
27537diff --git a/arch/x86/mm/pgtable_32.c b/arch/x86/mm/pgtable_32.c
27538index 46c8834..fcab43d 100644
27539--- a/arch/x86/mm/pgtable_32.c
27540+++ b/arch/x86/mm/pgtable_32.c
27541@@ -49,10 +49,13 @@ void set_pte_vaddr(unsigned long vaddr, pte_t pteval)
27542 return;
27543 }
27544 pte = pte_offset_kernel(pmd, vaddr);
27545+
27546+ pax_open_kernel();
27547 if (pte_val(pteval))
27548 set_pte_at(&init_mm, vaddr, pte, pteval);
27549 else
27550 pte_clear(&init_mm, vaddr, pte);
27551+ pax_close_kernel();
27552
27553 /*
27554 * It's enough to flush this one mapping.
27555diff --git a/arch/x86/mm/setup_nx.c b/arch/x86/mm/setup_nx.c
27556index 513d8ed..978c161 100644
27557--- a/arch/x86/mm/setup_nx.c
27558+++ b/arch/x86/mm/setup_nx.c
27559@@ -4,11 +4,10 @@
27560
27561 #include <asm/pgtable.h>
27562
27563+#if defined(CONFIG_X86_32) && defined(CONFIG_X86_PAE)
27564 int nx_enabled;
27565
27566-#if defined(CONFIG_X86_64) || defined(CONFIG_X86_PAE)
27567-static int disable_nx __cpuinitdata;
27568-
27569+#ifndef CONFIG_PAX_PAGEEXEC
27570 /*
27571 * noexec = on|off
27572 *
27573@@ -22,32 +21,26 @@ static int __init noexec_setup(char *str)
27574 if (!str)
27575 return -EINVAL;
27576 if (!strncmp(str, "on", 2)) {
27577- __supported_pte_mask |= _PAGE_NX;
27578- disable_nx = 0;
27579+ nx_enabled = 1;
27580 } else if (!strncmp(str, "off", 3)) {
27581- disable_nx = 1;
27582- __supported_pte_mask &= ~_PAGE_NX;
27583+ nx_enabled = 0;
27584 }
27585 return 0;
27586 }
27587 early_param("noexec", noexec_setup);
27588 #endif
27589+#endif
27590
27591 #ifdef CONFIG_X86_PAE
27592 void __init set_nx(void)
27593 {
27594- unsigned int v[4], l, h;
27595+ if (!nx_enabled && cpu_has_nx) {
27596+ unsigned l, h;
27597
27598- if (cpu_has_pae && (cpuid_eax(0x80000000) > 0x80000001)) {
27599- cpuid(0x80000001, &v[0], &v[1], &v[2], &v[3]);
27600-
27601- if ((v[3] & (1 << 20)) && !disable_nx) {
27602- rdmsr(MSR_EFER, l, h);
27603- l |= EFER_NX;
27604- wrmsr(MSR_EFER, l, h);
27605- nx_enabled = 1;
27606- __supported_pte_mask |= _PAGE_NX;
27607- }
27608+ __supported_pte_mask &= ~_PAGE_NX;
27609+ rdmsr(MSR_EFER, l, h);
27610+ l &= ~EFER_NX;
27611+ wrmsr(MSR_EFER, l, h);
27612 }
27613 }
27614 #else
27615@@ -62,7 +55,7 @@ void __cpuinit check_efer(void)
27616 unsigned long efer;
27617
27618 rdmsrl(MSR_EFER, efer);
27619- if (!(efer & EFER_NX) || disable_nx)
27620+ if (!(efer & EFER_NX) || !nx_enabled)
27621 __supported_pte_mask &= ~_PAGE_NX;
27622 }
27623 #endif
27624diff --git a/arch/x86/mm/tlb.c b/arch/x86/mm/tlb.c
27625index 36fe08e..b123d3a 100644
27626--- a/arch/x86/mm/tlb.c
27627+++ b/arch/x86/mm/tlb.c
27628@@ -61,7 +61,11 @@ void leave_mm(int cpu)
27629 BUG();
27630 cpumask_clear_cpu(cpu,
27631 mm_cpumask(percpu_read(cpu_tlbstate.active_mm)));
27632+
27633+#ifndef CONFIG_PAX_PER_CPU_PGD
27634 load_cr3(swapper_pg_dir);
27635+#endif
27636+
27637 }
27638 EXPORT_SYMBOL_GPL(leave_mm);
27639
27640diff --git a/arch/x86/oprofile/backtrace.c b/arch/x86/oprofile/backtrace.c
27641index 829edf0..672adb3 100644
27642--- a/arch/x86/oprofile/backtrace.c
27643+++ b/arch/x86/oprofile/backtrace.c
27644@@ -115,7 +115,7 @@ x86_backtrace(struct pt_regs * const regs, unsigned int depth)
27645 {
27646 struct frame_head *head = (struct frame_head *)frame_pointer(regs);
27647
27648- if (!user_mode_vm(regs)) {
27649+ if (!user_mode(regs)) {
27650 unsigned long stack = kernel_stack_pointer(regs);
27651 if (depth)
27652 dump_trace(NULL, regs, (unsigned long *)stack, 0,
27653diff --git a/arch/x86/oprofile/op_model_p4.c b/arch/x86/oprofile/op_model_p4.c
27654index e6a160a..36deff6 100644
27655--- a/arch/x86/oprofile/op_model_p4.c
27656+++ b/arch/x86/oprofile/op_model_p4.c
27657@@ -50,7 +50,7 @@ static inline void setup_num_counters(void)
27658 #endif
27659 }
27660
27661-static int inline addr_increment(void)
27662+static inline int addr_increment(void)
27663 {
27664 #ifdef CONFIG_SMP
27665 return smp_num_siblings == 2 ? 2 : 1;
27666diff --git a/arch/x86/pci/common.c b/arch/x86/pci/common.c
27667index 1331fcf..03901b2 100644
27668--- a/arch/x86/pci/common.c
27669+++ b/arch/x86/pci/common.c
27670@@ -31,8 +31,8 @@ int noioapicreroute = 1;
27671 int pcibios_last_bus = -1;
27672 unsigned long pirq_table_addr;
27673 struct pci_bus *pci_root_bus;
27674-struct pci_raw_ops *raw_pci_ops;
27675-struct pci_raw_ops *raw_pci_ext_ops;
27676+const struct pci_raw_ops *raw_pci_ops;
27677+const struct pci_raw_ops *raw_pci_ext_ops;
27678
27679 int raw_pci_read(unsigned int domain, unsigned int bus, unsigned int devfn,
27680 int reg, int len, u32 *val)
27681diff --git a/arch/x86/pci/direct.c b/arch/x86/pci/direct.c
27682index 347d882..4baf6b6 100644
27683--- a/arch/x86/pci/direct.c
27684+++ b/arch/x86/pci/direct.c
27685@@ -79,7 +79,7 @@ static int pci_conf1_write(unsigned int seg, unsigned int bus,
27686
27687 #undef PCI_CONF1_ADDRESS
27688
27689-struct pci_raw_ops pci_direct_conf1 = {
27690+const struct pci_raw_ops pci_direct_conf1 = {
27691 .read = pci_conf1_read,
27692 .write = pci_conf1_write,
27693 };
27694@@ -173,7 +173,7 @@ static int pci_conf2_write(unsigned int seg, unsigned int bus,
27695
27696 #undef PCI_CONF2_ADDRESS
27697
27698-struct pci_raw_ops pci_direct_conf2 = {
27699+const struct pci_raw_ops pci_direct_conf2 = {
27700 .read = pci_conf2_read,
27701 .write = pci_conf2_write,
27702 };
27703@@ -189,7 +189,7 @@ struct pci_raw_ops pci_direct_conf2 = {
27704 * This should be close to trivial, but it isn't, because there are buggy
27705 * chipsets (yes, you guessed it, by Intel and Compaq) that have no class ID.
27706 */
27707-static int __init pci_sanity_check(struct pci_raw_ops *o)
27708+static int __init pci_sanity_check(const struct pci_raw_ops *o)
27709 {
27710 u32 x = 0;
27711 int year, devfn;
27712diff --git a/arch/x86/pci/mmconfig_32.c b/arch/x86/pci/mmconfig_32.c
27713index f10a7e9..0425342 100644
27714--- a/arch/x86/pci/mmconfig_32.c
27715+++ b/arch/x86/pci/mmconfig_32.c
27716@@ -125,7 +125,7 @@ static int pci_mmcfg_write(unsigned int seg, unsigned int bus,
27717 return 0;
27718 }
27719
27720-static struct pci_raw_ops pci_mmcfg = {
27721+static const struct pci_raw_ops pci_mmcfg = {
27722 .read = pci_mmcfg_read,
27723 .write = pci_mmcfg_write,
27724 };
27725diff --git a/arch/x86/pci/mmconfig_64.c b/arch/x86/pci/mmconfig_64.c
27726index 94349f8..41600a7 100644
27727--- a/arch/x86/pci/mmconfig_64.c
27728+++ b/arch/x86/pci/mmconfig_64.c
27729@@ -104,7 +104,7 @@ static int pci_mmcfg_write(unsigned int seg, unsigned int bus,
27730 return 0;
27731 }
27732
27733-static struct pci_raw_ops pci_mmcfg = {
27734+static const struct pci_raw_ops pci_mmcfg = {
27735 .read = pci_mmcfg_read,
27736 .write = pci_mmcfg_write,
27737 };
27738diff --git a/arch/x86/pci/numaq_32.c b/arch/x86/pci/numaq_32.c
27739index 8eb295e..86bd657 100644
27740--- a/arch/x86/pci/numaq_32.c
27741+++ b/arch/x86/pci/numaq_32.c
27742@@ -112,7 +112,7 @@ static int pci_conf1_mq_write(unsigned int seg, unsigned int bus,
27743
27744 #undef PCI_CONF1_MQ_ADDRESS
27745
27746-static struct pci_raw_ops pci_direct_conf1_mq = {
27747+static const struct pci_raw_ops pci_direct_conf1_mq = {
27748 .read = pci_conf1_mq_read,
27749 .write = pci_conf1_mq_write
27750 };
27751diff --git a/arch/x86/pci/olpc.c b/arch/x86/pci/olpc.c
27752index b889d82..5a58a0a 100644
27753--- a/arch/x86/pci/olpc.c
27754+++ b/arch/x86/pci/olpc.c
27755@@ -297,7 +297,7 @@ static int pci_olpc_write(unsigned int seg, unsigned int bus,
27756 return 0;
27757 }
27758
27759-static struct pci_raw_ops pci_olpc_conf = {
27760+static const struct pci_raw_ops pci_olpc_conf = {
27761 .read = pci_olpc_read,
27762 .write = pci_olpc_write,
27763 };
27764diff --git a/arch/x86/pci/pcbios.c b/arch/x86/pci/pcbios.c
27765index 1c975cc..b8e16c2 100644
27766--- a/arch/x86/pci/pcbios.c
27767+++ b/arch/x86/pci/pcbios.c
27768@@ -56,50 +56,93 @@ union bios32 {
27769 static struct {
27770 unsigned long address;
27771 unsigned short segment;
27772-} bios32_indirect = { 0, __KERNEL_CS };
27773+} bios32_indirect __read_only = { 0, __PCIBIOS_CS };
27774
27775 /*
27776 * Returns the entry point for the given service, NULL on error
27777 */
27778
27779-static unsigned long bios32_service(unsigned long service)
27780+static unsigned long __devinit bios32_service(unsigned long service)
27781 {
27782 unsigned char return_code; /* %al */
27783 unsigned long address; /* %ebx */
27784 unsigned long length; /* %ecx */
27785 unsigned long entry; /* %edx */
27786 unsigned long flags;
27787+ struct desc_struct d, *gdt;
27788
27789 local_irq_save(flags);
27790- __asm__("lcall *(%%edi); cld"
27791+
27792+ gdt = get_cpu_gdt_table(smp_processor_id());
27793+
27794+ pack_descriptor(&d, 0UL, 0xFFFFFUL, 0x9B, 0xC);
27795+ write_gdt_entry(gdt, GDT_ENTRY_PCIBIOS_CS, &d, DESCTYPE_S);
27796+ pack_descriptor(&d, 0UL, 0xFFFFFUL, 0x93, 0xC);
27797+ write_gdt_entry(gdt, GDT_ENTRY_PCIBIOS_DS, &d, DESCTYPE_S);
27798+
27799+ __asm__("movw %w7, %%ds; lcall *(%%edi); push %%ss; pop %%ds; cld"
27800 : "=a" (return_code),
27801 "=b" (address),
27802 "=c" (length),
27803 "=d" (entry)
27804 : "0" (service),
27805 "1" (0),
27806- "D" (&bios32_indirect));
27807+ "D" (&bios32_indirect),
27808+ "r"(__PCIBIOS_DS)
27809+ : "memory");
27810+
27811+ pax_open_kernel();
27812+ gdt[GDT_ENTRY_PCIBIOS_CS].a = 0;
27813+ gdt[GDT_ENTRY_PCIBIOS_CS].b = 0;
27814+ gdt[GDT_ENTRY_PCIBIOS_DS].a = 0;
27815+ gdt[GDT_ENTRY_PCIBIOS_DS].b = 0;
27816+ pax_close_kernel();
27817+
27818 local_irq_restore(flags);
27819
27820 switch (return_code) {
27821- case 0:
27822- return address + entry;
27823- case 0x80: /* Not present */
27824- printk(KERN_WARNING "bios32_service(0x%lx): not present\n", service);
27825- return 0;
27826- default: /* Shouldn't happen */
27827- printk(KERN_WARNING "bios32_service(0x%lx): returned 0x%x -- BIOS bug!\n",
27828- service, return_code);
27829+ case 0: {
27830+ int cpu;
27831+ unsigned char flags;
27832+
27833+ printk(KERN_INFO "bios32_service: base:%08lx length:%08lx entry:%08lx\n", address, length, entry);
27834+ if (address >= 0xFFFF0 || length > 0x100000 - address || length <= entry) {
27835+ printk(KERN_WARNING "bios32_service: not valid\n");
27836 return 0;
27837+ }
27838+ address = address + PAGE_OFFSET;
27839+ length += 16UL; /* some BIOSs underreport this... */
27840+ flags = 4;
27841+ if (length >= 64*1024*1024) {
27842+ length >>= PAGE_SHIFT;
27843+ flags |= 8;
27844+ }
27845+
27846+ for (cpu = 0; cpu < nr_cpu_ids; cpu++) {
27847+ gdt = get_cpu_gdt_table(cpu);
27848+ pack_descriptor(&d, address, length, 0x9b, flags);
27849+ write_gdt_entry(gdt, GDT_ENTRY_PCIBIOS_CS, &d, DESCTYPE_S);
27850+ pack_descriptor(&d, address, length, 0x93, flags);
27851+ write_gdt_entry(gdt, GDT_ENTRY_PCIBIOS_DS, &d, DESCTYPE_S);
27852+ }
27853+ return entry;
27854+ }
27855+ case 0x80: /* Not present */
27856+ printk(KERN_WARNING "bios32_service(0x%lx): not present\n", service);
27857+ return 0;
27858+ default: /* Shouldn't happen */
27859+ printk(KERN_WARNING "bios32_service(0x%lx): returned 0x%x -- BIOS bug!\n",
27860+ service, return_code);
27861+ return 0;
27862 }
27863 }
27864
27865 static struct {
27866 unsigned long address;
27867 unsigned short segment;
27868-} pci_indirect = { 0, __KERNEL_CS };
27869+} pci_indirect __read_only = { 0, __PCIBIOS_CS };
27870
27871-static int pci_bios_present;
27872+static int pci_bios_present __read_only;
27873
27874 static int __devinit check_pcibios(void)
27875 {
27876@@ -108,11 +151,13 @@ static int __devinit check_pcibios(void)
27877 unsigned long flags, pcibios_entry;
27878
27879 if ((pcibios_entry = bios32_service(PCI_SERVICE))) {
27880- pci_indirect.address = pcibios_entry + PAGE_OFFSET;
27881+ pci_indirect.address = pcibios_entry;
27882
27883 local_irq_save(flags);
27884- __asm__(
27885- "lcall *(%%edi); cld\n\t"
27886+ __asm__("movw %w6, %%ds\n\t"
27887+ "lcall *%%ss:(%%edi); cld\n\t"
27888+ "push %%ss\n\t"
27889+ "pop %%ds\n\t"
27890 "jc 1f\n\t"
27891 "xor %%ah, %%ah\n"
27892 "1:"
27893@@ -121,7 +166,8 @@ static int __devinit check_pcibios(void)
27894 "=b" (ebx),
27895 "=c" (ecx)
27896 : "1" (PCIBIOS_PCI_BIOS_PRESENT),
27897- "D" (&pci_indirect)
27898+ "D" (&pci_indirect),
27899+ "r" (__PCIBIOS_DS)
27900 : "memory");
27901 local_irq_restore(flags);
27902
27903@@ -165,7 +211,10 @@ static int pci_bios_read(unsigned int seg, unsigned int bus,
27904
27905 switch (len) {
27906 case 1:
27907- __asm__("lcall *(%%esi); cld\n\t"
27908+ __asm__("movw %w6, %%ds\n\t"
27909+ "lcall *%%ss:(%%esi); cld\n\t"
27910+ "push %%ss\n\t"
27911+ "pop %%ds\n\t"
27912 "jc 1f\n\t"
27913 "xor %%ah, %%ah\n"
27914 "1:"
27915@@ -174,7 +223,8 @@ static int pci_bios_read(unsigned int seg, unsigned int bus,
27916 : "1" (PCIBIOS_READ_CONFIG_BYTE),
27917 "b" (bx),
27918 "D" ((long)reg),
27919- "S" (&pci_indirect));
27920+ "S" (&pci_indirect),
27921+ "r" (__PCIBIOS_DS));
27922 /*
27923 * Zero-extend the result beyond 8 bits, do not trust the
27924 * BIOS having done it:
27925@@ -182,7 +232,10 @@ static int pci_bios_read(unsigned int seg, unsigned int bus,
27926 *value &= 0xff;
27927 break;
27928 case 2:
27929- __asm__("lcall *(%%esi); cld\n\t"
27930+ __asm__("movw %w6, %%ds\n\t"
27931+ "lcall *%%ss:(%%esi); cld\n\t"
27932+ "push %%ss\n\t"
27933+ "pop %%ds\n\t"
27934 "jc 1f\n\t"
27935 "xor %%ah, %%ah\n"
27936 "1:"
27937@@ -191,7 +244,8 @@ static int pci_bios_read(unsigned int seg, unsigned int bus,
27938 : "1" (PCIBIOS_READ_CONFIG_WORD),
27939 "b" (bx),
27940 "D" ((long)reg),
27941- "S" (&pci_indirect));
27942+ "S" (&pci_indirect),
27943+ "r" (__PCIBIOS_DS));
27944 /*
27945 * Zero-extend the result beyond 16 bits, do not trust the
27946 * BIOS having done it:
27947@@ -199,7 +253,10 @@ static int pci_bios_read(unsigned int seg, unsigned int bus,
27948 *value &= 0xffff;
27949 break;
27950 case 4:
27951- __asm__("lcall *(%%esi); cld\n\t"
27952+ __asm__("movw %w6, %%ds\n\t"
27953+ "lcall *%%ss:(%%esi); cld\n\t"
27954+ "push %%ss\n\t"
27955+ "pop %%ds\n\t"
27956 "jc 1f\n\t"
27957 "xor %%ah, %%ah\n"
27958 "1:"
27959@@ -208,7 +265,8 @@ static int pci_bios_read(unsigned int seg, unsigned int bus,
27960 : "1" (PCIBIOS_READ_CONFIG_DWORD),
27961 "b" (bx),
27962 "D" ((long)reg),
27963- "S" (&pci_indirect));
27964+ "S" (&pci_indirect),
27965+ "r" (__PCIBIOS_DS));
27966 break;
27967 }
27968
27969@@ -231,7 +289,10 @@ static int pci_bios_write(unsigned int seg, unsigned int bus,
27970
27971 switch (len) {
27972 case 1:
27973- __asm__("lcall *(%%esi); cld\n\t"
27974+ __asm__("movw %w6, %%ds\n\t"
27975+ "lcall *%%ss:(%%esi); cld\n\t"
27976+ "push %%ss\n\t"
27977+ "pop %%ds\n\t"
27978 "jc 1f\n\t"
27979 "xor %%ah, %%ah\n"
27980 "1:"
27981@@ -240,10 +301,14 @@ static int pci_bios_write(unsigned int seg, unsigned int bus,
27982 "c" (value),
27983 "b" (bx),
27984 "D" ((long)reg),
27985- "S" (&pci_indirect));
27986+ "S" (&pci_indirect),
27987+ "r" (__PCIBIOS_DS));
27988 break;
27989 case 2:
27990- __asm__("lcall *(%%esi); cld\n\t"
27991+ __asm__("movw %w6, %%ds\n\t"
27992+ "lcall *%%ss:(%%esi); cld\n\t"
27993+ "push %%ss\n\t"
27994+ "pop %%ds\n\t"
27995 "jc 1f\n\t"
27996 "xor %%ah, %%ah\n"
27997 "1:"
27998@@ -252,10 +317,14 @@ static int pci_bios_write(unsigned int seg, unsigned int bus,
27999 "c" (value),
28000 "b" (bx),
28001 "D" ((long)reg),
28002- "S" (&pci_indirect));
28003+ "S" (&pci_indirect),
28004+ "r" (__PCIBIOS_DS));
28005 break;
28006 case 4:
28007- __asm__("lcall *(%%esi); cld\n\t"
28008+ __asm__("movw %w6, %%ds\n\t"
28009+ "lcall *%%ss:(%%esi); cld\n\t"
28010+ "push %%ss\n\t"
28011+ "pop %%ds\n\t"
28012 "jc 1f\n\t"
28013 "xor %%ah, %%ah\n"
28014 "1:"
28015@@ -264,7 +333,8 @@ static int pci_bios_write(unsigned int seg, unsigned int bus,
28016 "c" (value),
28017 "b" (bx),
28018 "D" ((long)reg),
28019- "S" (&pci_indirect));
28020+ "S" (&pci_indirect),
28021+ "r" (__PCIBIOS_DS));
28022 break;
28023 }
28024
28025@@ -278,7 +348,7 @@ static int pci_bios_write(unsigned int seg, unsigned int bus,
28026 * Function table for BIOS32 access
28027 */
28028
28029-static struct pci_raw_ops pci_bios_access = {
28030+static const struct pci_raw_ops pci_bios_access = {
28031 .read = pci_bios_read,
28032 .write = pci_bios_write
28033 };
28034@@ -287,7 +357,7 @@ static struct pci_raw_ops pci_bios_access = {
28035 * Try to find PCI BIOS.
28036 */
28037
28038-static struct pci_raw_ops * __devinit pci_find_bios(void)
28039+static const struct pci_raw_ops * __devinit pci_find_bios(void)
28040 {
28041 union bios32 *check;
28042 unsigned char sum;
28043@@ -368,10 +438,13 @@ struct irq_routing_table * pcibios_get_irq_routing_table(void)
28044
28045 DBG("PCI: Fetching IRQ routing table... ");
28046 __asm__("push %%es\n\t"
28047+ "movw %w8, %%ds\n\t"
28048 "push %%ds\n\t"
28049 "pop %%es\n\t"
28050- "lcall *(%%esi); cld\n\t"
28051+ "lcall *%%ss:(%%esi); cld\n\t"
28052 "pop %%es\n\t"
28053+ "push %%ss\n\t"
28054+ "pop %%ds\n"
28055 "jc 1f\n\t"
28056 "xor %%ah, %%ah\n"
28057 "1:"
28058@@ -382,7 +455,8 @@ struct irq_routing_table * pcibios_get_irq_routing_table(void)
28059 "1" (0),
28060 "D" ((long) &opt),
28061 "S" (&pci_indirect),
28062- "m" (opt)
28063+ "m" (opt),
28064+ "r" (__PCIBIOS_DS)
28065 : "memory");
28066 DBG("OK ret=%d, size=%d, map=%x\n", ret, opt.size, map);
28067 if (ret & 0xff00)
28068@@ -406,7 +480,10 @@ int pcibios_set_irq_routing(struct pci_dev *dev, int pin, int irq)
28069 {
28070 int ret;
28071
28072- __asm__("lcall *(%%esi); cld\n\t"
28073+ __asm__("movw %w5, %%ds\n\t"
28074+ "lcall *%%ss:(%%esi); cld\n\t"
28075+ "push %%ss\n\t"
28076+ "pop %%ds\n"
28077 "jc 1f\n\t"
28078 "xor %%ah, %%ah\n"
28079 "1:"
28080@@ -414,7 +491,8 @@ int pcibios_set_irq_routing(struct pci_dev *dev, int pin, int irq)
28081 : "0" (PCIBIOS_SET_PCI_HW_INT),
28082 "b" ((dev->bus->number << 8) | dev->devfn),
28083 "c" ((irq << 8) | (pin + 10)),
28084- "S" (&pci_indirect));
28085+ "S" (&pci_indirect),
28086+ "r" (__PCIBIOS_DS));
28087 return !(ret & 0xff00);
28088 }
28089 EXPORT_SYMBOL(pcibios_set_irq_routing);
28090diff --git a/arch/x86/power/cpu.c b/arch/x86/power/cpu.c
28091index fa0f651..9d8f3d9 100644
28092--- a/arch/x86/power/cpu.c
28093+++ b/arch/x86/power/cpu.c
28094@@ -129,7 +129,7 @@ static void do_fpu_end(void)
28095 static void fix_processor_context(void)
28096 {
28097 int cpu = smp_processor_id();
28098- struct tss_struct *t = &per_cpu(init_tss, cpu);
28099+ struct tss_struct *t = init_tss + cpu;
28100
28101 set_tss_desc(cpu, t); /*
28102 * This just modifies memory; should not be
28103@@ -139,7 +139,9 @@ static void fix_processor_context(void)
28104 */
28105
28106 #ifdef CONFIG_X86_64
28107+ pax_open_kernel();
28108 get_cpu_gdt_table(cpu)[GDT_ENTRY_TSS].type = 9;
28109+ pax_close_kernel();
28110
28111 syscall_init(); /* This sets MSR_*STAR and related */
28112 #endif
28113diff --git a/arch/x86/vdso/Makefile b/arch/x86/vdso/Makefile
28114index dd78ef6..f9d928d 100644
28115--- a/arch/x86/vdso/Makefile
28116+++ b/arch/x86/vdso/Makefile
28117@@ -122,7 +122,7 @@ quiet_cmd_vdso = VDSO $@
28118 $(VDSO_LDFLAGS) $(VDSO_LDFLAGS_$(filter %.lds,$(^F))) \
28119 -Wl,-T,$(filter %.lds,$^) $(filter %.o,$^)
28120
28121-VDSO_LDFLAGS = -fPIC -shared $(call cc-ldoption, -Wl$(comma)--hash-style=sysv)
28122+VDSO_LDFLAGS = -fPIC -shared -Wl,--no-undefined $(call cc-ldoption, -Wl$(comma)--hash-style=sysv)
28123 GCOV_PROFILE := n
28124
28125 #
28126diff --git a/arch/x86/vdso/vclock_gettime.c b/arch/x86/vdso/vclock_gettime.c
28127index ee55754..0013b2e 100644
28128--- a/arch/x86/vdso/vclock_gettime.c
28129+++ b/arch/x86/vdso/vclock_gettime.c
28130@@ -22,24 +22,48 @@
28131 #include <asm/hpet.h>
28132 #include <asm/unistd.h>
28133 #include <asm/io.h>
28134+#include <asm/fixmap.h>
28135 #include "vextern.h"
28136
28137 #define gtod vdso_vsyscall_gtod_data
28138
28139+notrace noinline long __vdso_fallback_time(long *t)
28140+{
28141+ long secs;
28142+ asm volatile("syscall"
28143+ : "=a" (secs)
28144+ : "0" (__NR_time),"D" (t) : "r11", "cx", "memory");
28145+ return secs;
28146+}
28147+
28148 notrace static long vdso_fallback_gettime(long clock, struct timespec *ts)
28149 {
28150 long ret;
28151 asm("syscall" : "=a" (ret) :
28152- "0" (__NR_clock_gettime),"D" (clock), "S" (ts) : "memory");
28153+ "0" (__NR_clock_gettime),"D" (clock), "S" (ts) : "r11", "cx", "memory");
28154 return ret;
28155 }
28156
28157+notrace static inline cycle_t __vdso_vread_hpet(void)
28158+{
28159+ return readl((const void __iomem *)fix_to_virt(VSYSCALL_HPET) + 0xf0);
28160+}
28161+
28162+notrace static inline cycle_t __vdso_vread_tsc(void)
28163+{
28164+ cycle_t ret = (cycle_t)vget_cycles();
28165+
28166+ return ret >= gtod->clock.cycle_last ? ret : gtod->clock.cycle_last;
28167+}
28168+
28169 notrace static inline long vgetns(void)
28170 {
28171 long v;
28172- cycles_t (*vread)(void);
28173- vread = gtod->clock.vread;
28174- v = (vread() - gtod->clock.cycle_last) & gtod->clock.mask;
28175+ if (gtod->clock.name[0] == 't' && gtod->clock.name[1] == 's' && gtod->clock.name[2] == 'c' && !gtod->clock.name[3])
28176+ v = __vdso_vread_tsc();
28177+ else
28178+ v = __vdso_vread_hpet();
28179+ v = (v - gtod->clock.cycle_last) & gtod->clock.mask;
28180 return (v * gtod->clock.mult) >> gtod->clock.shift;
28181 }
28182
28183@@ -113,7 +137,9 @@ notrace static noinline int do_monotonic_coarse(struct timespec *ts)
28184
28185 notrace int __vdso_clock_gettime(clockid_t clock, struct timespec *ts)
28186 {
28187- if (likely(gtod->sysctl_enabled))
28188+ if (likely(gtod->sysctl_enabled &&
28189+ ((gtod->clock.name[0] == 'h' && gtod->clock.name[1] == 'p' && gtod->clock.name[2] == 'e' && gtod->clock.name[3] == 't' && !gtod->clock.name[4]) ||
28190+ (gtod->clock.name[0] == 't' && gtod->clock.name[1] == 's' && gtod->clock.name[2] == 'c' && !gtod->clock.name[3]))))
28191 switch (clock) {
28192 case CLOCK_REALTIME:
28193 if (likely(gtod->clock.vread))
28194@@ -133,10 +159,20 @@ notrace int __vdso_clock_gettime(clockid_t clock, struct timespec *ts)
28195 int clock_gettime(clockid_t, struct timespec *)
28196 __attribute__((weak, alias("__vdso_clock_gettime")));
28197
28198+notrace noinline int __vdso_fallback_gettimeofday(struct timeval *tv, struct timezone *tz)
28199+{
28200+ long ret;
28201+ asm("syscall" : "=a" (ret) :
28202+ "0" (__NR_gettimeofday), "D" (tv), "S" (tz) : "r11", "cx", "memory");
28203+ return ret;
28204+}
28205+
28206 notrace int __vdso_gettimeofday(struct timeval *tv, struct timezone *tz)
28207 {
28208- long ret;
28209- if (likely(gtod->sysctl_enabled && gtod->clock.vread)) {
28210+ if (likely(gtod->sysctl_enabled &&
28211+ ((gtod->clock.name[0] == 'h' && gtod->clock.name[1] == 'p' && gtod->clock.name[2] == 'e' && gtod->clock.name[3] == 't' && !gtod->clock.name[4]) ||
28212+ (gtod->clock.name[0] == 't' && gtod->clock.name[1] == 's' && gtod->clock.name[2] == 'c' && !gtod->clock.name[3]))))
28213+ {
28214 if (likely(tv != NULL)) {
28215 BUILD_BUG_ON(offsetof(struct timeval, tv_usec) !=
28216 offsetof(struct timespec, tv_nsec) ||
28217@@ -151,9 +187,7 @@ notrace int __vdso_gettimeofday(struct timeval *tv, struct timezone *tz)
28218 }
28219 return 0;
28220 }
28221- asm("syscall" : "=a" (ret) :
28222- "0" (__NR_gettimeofday), "D" (tv), "S" (tz) : "memory");
28223- return ret;
28224+ return __vdso_fallback_gettimeofday(tv, tz);
28225 }
28226 int gettimeofday(struct timeval *, struct timezone *)
28227 __attribute__((weak, alias("__vdso_gettimeofday")));
28228diff --git a/arch/x86/vdso/vdso.lds.S b/arch/x86/vdso/vdso.lds.S
28229index 4e5dd3b..00ba15e 100644
28230--- a/arch/x86/vdso/vdso.lds.S
28231+++ b/arch/x86/vdso/vdso.lds.S
28232@@ -35,3 +35,9 @@ VDSO64_PRELINK = VDSO_PRELINK;
28233 #define VEXTERN(x) VDSO64_ ## x = vdso_ ## x;
28234 #include "vextern.h"
28235 #undef VEXTERN
28236+
28237+#define VEXTERN(x) VDSO64_ ## x = __vdso_ ## x;
28238+VEXTERN(fallback_gettimeofday)
28239+VEXTERN(fallback_time)
28240+VEXTERN(getcpu)
28241+#undef VEXTERN
28242diff --git a/arch/x86/vdso/vdso32-setup.c b/arch/x86/vdso/vdso32-setup.c
28243index 58bc00f..d53fb48 100644
28244--- a/arch/x86/vdso/vdso32-setup.c
28245+++ b/arch/x86/vdso/vdso32-setup.c
28246@@ -25,6 +25,7 @@
28247 #include <asm/tlbflush.h>
28248 #include <asm/vdso.h>
28249 #include <asm/proto.h>
28250+#include <asm/mman.h>
28251
28252 enum {
28253 VDSO_DISABLED = 0,
28254@@ -226,7 +227,7 @@ static inline void map_compat_vdso(int map)
28255 void enable_sep_cpu(void)
28256 {
28257 int cpu = get_cpu();
28258- struct tss_struct *tss = &per_cpu(init_tss, cpu);
28259+ struct tss_struct *tss = init_tss + cpu;
28260
28261 if (!boot_cpu_has(X86_FEATURE_SEP)) {
28262 put_cpu();
28263@@ -249,7 +250,7 @@ static int __init gate_vma_init(void)
28264 gate_vma.vm_start = FIXADDR_USER_START;
28265 gate_vma.vm_end = FIXADDR_USER_END;
28266 gate_vma.vm_flags = VM_READ | VM_MAYREAD | VM_EXEC | VM_MAYEXEC;
28267- gate_vma.vm_page_prot = __P101;
28268+ gate_vma.vm_page_prot = vm_get_page_prot(gate_vma.vm_flags);
28269 /*
28270 * Make sure the vDSO gets into every core dump.
28271 * Dumping its contents makes post-mortem fully interpretable later
28272@@ -331,14 +332,14 @@ int arch_setup_additional_pages(struct linux_binprm *bprm, int uses_interp)
28273 if (compat)
28274 addr = VDSO_HIGH_BASE;
28275 else {
28276- addr = get_unmapped_area(NULL, 0, PAGE_SIZE, 0, 0);
28277+ addr = get_unmapped_area(NULL, 0, PAGE_SIZE, 0, MAP_EXECUTABLE);
28278 if (IS_ERR_VALUE(addr)) {
28279 ret = addr;
28280 goto up_fail;
28281 }
28282 }
28283
28284- current->mm->context.vdso = (void *)addr;
28285+ current->mm->context.vdso = addr;
28286
28287 if (compat_uses_vma || !compat) {
28288 /*
28289@@ -361,11 +362,11 @@ int arch_setup_additional_pages(struct linux_binprm *bprm, int uses_interp)
28290 }
28291
28292 current_thread_info()->sysenter_return =
28293- VDSO32_SYMBOL(addr, SYSENTER_RETURN);
28294+ (__force void __user *)VDSO32_SYMBOL(addr, SYSENTER_RETURN);
28295
28296 up_fail:
28297 if (ret)
28298- current->mm->context.vdso = NULL;
28299+ current->mm->context.vdso = 0;
28300
28301 up_write(&mm->mmap_sem);
28302
28303@@ -413,8 +414,14 @@ __initcall(ia32_binfmt_init);
28304
28305 const char *arch_vma_name(struct vm_area_struct *vma)
28306 {
28307- if (vma->vm_mm && vma->vm_start == (long)vma->vm_mm->context.vdso)
28308+ if (vma->vm_mm && vma->vm_start == vma->vm_mm->context.vdso)
28309 return "[vdso]";
28310+
28311+#ifdef CONFIG_PAX_SEGMEXEC
28312+ if (vma->vm_mm && vma->vm_mirror && vma->vm_mirror->vm_start == vma->vm_mm->context.vdso)
28313+ return "[vdso]";
28314+#endif
28315+
28316 return NULL;
28317 }
28318
28319@@ -423,7 +430,7 @@ struct vm_area_struct *get_gate_vma(struct task_struct *tsk)
28320 struct mm_struct *mm = tsk->mm;
28321
28322 /* Check to see if this task was created in compat vdso mode */
28323- if (mm && mm->context.vdso == (void *)VDSO_HIGH_BASE)
28324+ if (mm && mm->context.vdso == VDSO_HIGH_BASE)
28325 return &gate_vma;
28326 return NULL;
28327 }
28328diff --git a/arch/x86/vdso/vextern.h b/arch/x86/vdso/vextern.h
28329index 1683ba2..48d07f3 100644
28330--- a/arch/x86/vdso/vextern.h
28331+++ b/arch/x86/vdso/vextern.h
28332@@ -11,6 +11,5 @@
28333 put into vextern.h and be referenced as a pointer with vdso prefix.
28334 The main kernel later fills in the values. */
28335
28336-VEXTERN(jiffies)
28337 VEXTERN(vgetcpu_mode)
28338 VEXTERN(vsyscall_gtod_data)
28339diff --git a/arch/x86/vdso/vma.c b/arch/x86/vdso/vma.c
28340index 21e1aeb..2c0b3c4 100644
28341--- a/arch/x86/vdso/vma.c
28342+++ b/arch/x86/vdso/vma.c
28343@@ -17,8 +17,6 @@
28344 #include "vextern.h" /* Just for VMAGIC. */
28345 #undef VEXTERN
28346
28347-unsigned int __read_mostly vdso_enabled = 1;
28348-
28349 extern char vdso_start[], vdso_end[];
28350 extern unsigned short vdso_sync_cpuid;
28351
28352@@ -27,10 +25,8 @@ static unsigned vdso_size;
28353
28354 static inline void *var_ref(void *p, char *name)
28355 {
28356- if (*(void **)p != (void *)VMAGIC) {
28357- printk("VDSO: variable %s broken\n", name);
28358- vdso_enabled = 0;
28359- }
28360+ if (*(void **)p != (void *)VMAGIC)
28361+ panic("VDSO: variable %s broken\n", name);
28362 return p;
28363 }
28364
28365@@ -57,21 +53,18 @@ static int __init init_vdso_vars(void)
28366 if (!vbase)
28367 goto oom;
28368
28369- if (memcmp(vbase, "\177ELF", 4)) {
28370- printk("VDSO: I'm broken; not ELF\n");
28371- vdso_enabled = 0;
28372- }
28373+ if (memcmp(vbase, ELFMAG, SELFMAG))
28374+ panic("VDSO: I'm broken; not ELF\n");
28375
28376 #define VEXTERN(x) \
28377 *(typeof(__ ## x) **) var_ref(VDSO64_SYMBOL(vbase, x), #x) = &__ ## x;
28378 #include "vextern.h"
28379 #undef VEXTERN
28380+ vunmap(vbase);
28381 return 0;
28382
28383 oom:
28384- printk("Cannot allocate vdso\n");
28385- vdso_enabled = 0;
28386- return -ENOMEM;
28387+ panic("Cannot allocate vdso\n");
28388 }
28389 __initcall(init_vdso_vars);
28390
28391@@ -102,13 +95,15 @@ static unsigned long vdso_addr(unsigned long start, unsigned len)
28392 int arch_setup_additional_pages(struct linux_binprm *bprm, int uses_interp)
28393 {
28394 struct mm_struct *mm = current->mm;
28395- unsigned long addr;
28396+ unsigned long addr = 0;
28397 int ret;
28398
28399- if (!vdso_enabled)
28400- return 0;
28401-
28402 down_write(&mm->mmap_sem);
28403+
28404+#ifdef CONFIG_PAX_RANDMMAP
28405+ if (!(mm->pax_flags & MF_PAX_RANDMMAP))
28406+#endif
28407+
28408 addr = vdso_addr(mm->start_stack, vdso_size);
28409 addr = get_unmapped_area(NULL, addr, vdso_size, 0, 0);
28410 if (IS_ERR_VALUE(addr)) {
28411@@ -116,7 +111,7 @@ int arch_setup_additional_pages(struct linux_binprm *bprm, int uses_interp)
28412 goto up_fail;
28413 }
28414
28415- current->mm->context.vdso = (void *)addr;
28416+ current->mm->context.vdso = addr;
28417
28418 ret = install_special_mapping(mm, addr, vdso_size,
28419 VM_READ|VM_EXEC|
28420@@ -124,7 +119,7 @@ int arch_setup_additional_pages(struct linux_binprm *bprm, int uses_interp)
28421 VM_ALWAYSDUMP,
28422 vdso_pages);
28423 if (ret) {
28424- current->mm->context.vdso = NULL;
28425+ current->mm->context.vdso = 0;
28426 goto up_fail;
28427 }
28428
28429@@ -132,10 +127,3 @@ up_fail:
28430 up_write(&mm->mmap_sem);
28431 return ret;
28432 }
28433-
28434-static __init int vdso_setup(char *s)
28435-{
28436- vdso_enabled = simple_strtoul(s, NULL, 0);
28437- return 0;
28438-}
28439-__setup("vdso=", vdso_setup);
28440diff --git a/arch/x86/xen/enlighten.c b/arch/x86/xen/enlighten.c
28441index 0087b00..eecb34f 100644
28442--- a/arch/x86/xen/enlighten.c
28443+++ b/arch/x86/xen/enlighten.c
28444@@ -71,8 +71,6 @@ EXPORT_SYMBOL_GPL(xen_start_info);
28445
28446 struct shared_info xen_dummy_shared_info;
28447
28448-void *xen_initial_gdt;
28449-
28450 /*
28451 * Point at some empty memory to start with. We map the real shared_info
28452 * page as soon as fixmap is up and running.
28453@@ -548,7 +546,7 @@ static void xen_write_idt_entry(gate_desc *dt, int entrynum, const gate_desc *g)
28454
28455 preempt_disable();
28456
28457- start = __get_cpu_var(idt_desc).address;
28458+ start = (unsigned long)__get_cpu_var(idt_desc).address;
28459 end = start + __get_cpu_var(idt_desc).size + 1;
28460
28461 xen_mc_flush();
28462@@ -993,7 +991,7 @@ static const struct pv_apic_ops xen_apic_ops __initdata = {
28463 #endif
28464 };
28465
28466-static void xen_reboot(int reason)
28467+static __noreturn void xen_reboot(int reason)
28468 {
28469 struct sched_shutdown r = { .reason = reason };
28470
28471@@ -1001,17 +999,17 @@ static void xen_reboot(int reason)
28472 BUG();
28473 }
28474
28475-static void xen_restart(char *msg)
28476+static __noreturn void xen_restart(char *msg)
28477 {
28478 xen_reboot(SHUTDOWN_reboot);
28479 }
28480
28481-static void xen_emergency_restart(void)
28482+static __noreturn void xen_emergency_restart(void)
28483 {
28484 xen_reboot(SHUTDOWN_reboot);
28485 }
28486
28487-static void xen_machine_halt(void)
28488+static __noreturn void xen_machine_halt(void)
28489 {
28490 xen_reboot(SHUTDOWN_poweroff);
28491 }
28492@@ -1095,9 +1093,20 @@ asmlinkage void __init xen_start_kernel(void)
28493 */
28494 __userpte_alloc_gfp &= ~__GFP_HIGHMEM;
28495
28496-#ifdef CONFIG_X86_64
28497 /* Work out if we support NX */
28498- check_efer();
28499+#if defined(CONFIG_X86_64) || defined(CONFIG_X86_PAE)
28500+ if ((cpuid_eax(0x80000000) & 0xffff0000) == 0x80000000 &&
28501+ (cpuid_edx(0x80000001) & (1U << (X86_FEATURE_NX & 31)))) {
28502+ unsigned l, h;
28503+
28504+#ifdef CONFIG_X86_PAE
28505+ nx_enabled = 1;
28506+#endif
28507+ __supported_pte_mask |= _PAGE_NX;
28508+ rdmsr(MSR_EFER, l, h);
28509+ l |= EFER_NX;
28510+ wrmsr(MSR_EFER, l, h);
28511+ }
28512 #endif
28513
28514 xen_setup_features();
28515@@ -1129,13 +1138,6 @@ asmlinkage void __init xen_start_kernel(void)
28516
28517 machine_ops = xen_machine_ops;
28518
28519- /*
28520- * The only reliable way to retain the initial address of the
28521- * percpu gdt_page is to remember it here, so we can go and
28522- * mark it RW later, when the initial percpu area is freed.
28523- */
28524- xen_initial_gdt = &per_cpu(gdt_page, 0);
28525-
28526 xen_smp_init();
28527
28528 pgd = (pgd_t *)xen_start_info->pt_base;
28529diff --git a/arch/x86/xen/mmu.c b/arch/x86/xen/mmu.c
28530index 3f90a2c..2c2ad84 100644
28531--- a/arch/x86/xen/mmu.c
28532+++ b/arch/x86/xen/mmu.c
28533@@ -1719,6 +1719,9 @@ __init pgd_t *xen_setup_kernel_pagetable(pgd_t *pgd,
28534 convert_pfn_mfn(init_level4_pgt);
28535 convert_pfn_mfn(level3_ident_pgt);
28536 convert_pfn_mfn(level3_kernel_pgt);
28537+ convert_pfn_mfn(level3_vmalloc_start_pgt);
28538+ convert_pfn_mfn(level3_vmalloc_end_pgt);
28539+ convert_pfn_mfn(level3_vmemmap_pgt);
28540
28541 l3 = m2v(pgd[pgd_index(__START_KERNEL_map)].pgd);
28542 l2 = m2v(l3[pud_index(__START_KERNEL_map)].pud);
28543@@ -1737,7 +1740,11 @@ __init pgd_t *xen_setup_kernel_pagetable(pgd_t *pgd,
28544 set_page_prot(init_level4_pgt, PAGE_KERNEL_RO);
28545 set_page_prot(level3_ident_pgt, PAGE_KERNEL_RO);
28546 set_page_prot(level3_kernel_pgt, PAGE_KERNEL_RO);
28547+ set_page_prot(level3_vmalloc_start_pgt, PAGE_KERNEL_RO);
28548+ set_page_prot(level3_vmalloc_end_pgt, PAGE_KERNEL_RO);
28549+ set_page_prot(level3_vmemmap_pgt, PAGE_KERNEL_RO);
28550 set_page_prot(level3_user_vsyscall, PAGE_KERNEL_RO);
28551+ set_page_prot(level2_vmemmap_pgt, PAGE_KERNEL_RO);
28552 set_page_prot(level2_kernel_pgt, PAGE_KERNEL_RO);
28553 set_page_prot(level2_fixmap_pgt, PAGE_KERNEL_RO);
28554
28555@@ -1860,6 +1867,7 @@ static __init void xen_post_allocator_init(void)
28556 pv_mmu_ops.set_pud = xen_set_pud;
28557 #if PAGETABLE_LEVELS == 4
28558 pv_mmu_ops.set_pgd = xen_set_pgd;
28559+ pv_mmu_ops.set_pgd_batched = xen_set_pgd;
28560 #endif
28561
28562 /* This will work as long as patching hasn't happened yet
28563@@ -1946,6 +1954,7 @@ static const struct pv_mmu_ops xen_mmu_ops __initdata = {
28564 .pud_val = PV_CALLEE_SAVE(xen_pud_val),
28565 .make_pud = PV_CALLEE_SAVE(xen_make_pud),
28566 .set_pgd = xen_set_pgd_hyper,
28567+ .set_pgd_batched = xen_set_pgd_hyper,
28568
28569 .alloc_pud = xen_alloc_pmd_init,
28570 .release_pud = xen_release_pmd_init,
28571diff --git a/arch/x86/xen/smp.c b/arch/x86/xen/smp.c
28572index a96204a..fca9b8e 100644
28573--- a/arch/x86/xen/smp.c
28574+++ b/arch/x86/xen/smp.c
28575@@ -168,11 +168,6 @@ static void __init xen_smp_prepare_boot_cpu(void)
28576 {
28577 BUG_ON(smp_processor_id() != 0);
28578 native_smp_prepare_boot_cpu();
28579-
28580- /* We've switched to the "real" per-cpu gdt, so make sure the
28581- old memory can be recycled */
28582- make_lowmem_page_readwrite(xen_initial_gdt);
28583-
28584 xen_setup_vcpu_info_placement();
28585 }
28586
28587@@ -241,12 +236,12 @@ cpu_initialize_context(unsigned int cpu, struct task_struct *idle)
28588 gdt = get_cpu_gdt_table(cpu);
28589
28590 ctxt->flags = VGCF_IN_KERNEL;
28591- ctxt->user_regs.ds = __USER_DS;
28592- ctxt->user_regs.es = __USER_DS;
28593+ ctxt->user_regs.ds = __KERNEL_DS;
28594+ ctxt->user_regs.es = __KERNEL_DS;
28595 ctxt->user_regs.ss = __KERNEL_DS;
28596 #ifdef CONFIG_X86_32
28597 ctxt->user_regs.fs = __KERNEL_PERCPU;
28598- ctxt->user_regs.gs = __KERNEL_STACK_CANARY;
28599+ savesegment(gs, ctxt->user_regs.gs);
28600 #else
28601 ctxt->gs_base_kernel = per_cpu_offset(cpu);
28602 #endif
28603@@ -297,13 +292,12 @@ static int __cpuinit xen_cpu_up(unsigned int cpu)
28604 int rc;
28605
28606 per_cpu(current_task, cpu) = idle;
28607+ per_cpu(current_tinfo, cpu) = &idle->tinfo;
28608 #ifdef CONFIG_X86_32
28609 irq_ctx_init(cpu);
28610 #else
28611 clear_tsk_thread_flag(idle, TIF_FORK);
28612- per_cpu(kernel_stack, cpu) =
28613- (unsigned long)task_stack_page(idle) -
28614- KERNEL_STACK_OFFSET + THREAD_SIZE;
28615+ per_cpu(kernel_stack, cpu) = (unsigned long)task_stack_page(idle) - 16 + THREAD_SIZE;
28616 #endif
28617 xen_setup_runstate_info(cpu);
28618 xen_setup_timer(cpu);
28619diff --git a/arch/x86/xen/xen-asm_32.S b/arch/x86/xen/xen-asm_32.S
28620index 9a95a9c..4f39e774 100644
28621--- a/arch/x86/xen/xen-asm_32.S
28622+++ b/arch/x86/xen/xen-asm_32.S
28623@@ -83,14 +83,14 @@ ENTRY(xen_iret)
28624 ESP_OFFSET=4 # bytes pushed onto stack
28625
28626 /*
28627- * Store vcpu_info pointer for easy access. Do it this way to
28628- * avoid having to reload %fs
28629+ * Store vcpu_info pointer for easy access.
28630 */
28631 #ifdef CONFIG_SMP
28632- GET_THREAD_INFO(%eax)
28633- movl TI_cpu(%eax), %eax
28634- movl __per_cpu_offset(,%eax,4), %eax
28635- mov per_cpu__xen_vcpu(%eax), %eax
28636+ push %fs
28637+ mov $(__KERNEL_PERCPU), %eax
28638+ mov %eax, %fs
28639+ mov PER_CPU_VAR(xen_vcpu), %eax
28640+ pop %fs
28641 #else
28642 movl per_cpu__xen_vcpu, %eax
28643 #endif
28644diff --git a/arch/x86/xen/xen-head.S b/arch/x86/xen/xen-head.S
28645index 1a5ff24..a187d40 100644
28646--- a/arch/x86/xen/xen-head.S
28647+++ b/arch/x86/xen/xen-head.S
28648@@ -19,6 +19,17 @@ ENTRY(startup_xen)
28649 #ifdef CONFIG_X86_32
28650 mov %esi,xen_start_info
28651 mov $init_thread_union+THREAD_SIZE,%esp
28652+#ifdef CONFIG_SMP
28653+ movl $cpu_gdt_table,%edi
28654+ movl $__per_cpu_load,%eax
28655+ movw %ax,__KERNEL_PERCPU + 2(%edi)
28656+ rorl $16,%eax
28657+ movb %al,__KERNEL_PERCPU + 4(%edi)
28658+ movb %ah,__KERNEL_PERCPU + 7(%edi)
28659+ movl $__per_cpu_end - 1,%eax
28660+ subl $__per_cpu_start,%eax
28661+ movw %ax,__KERNEL_PERCPU + 0(%edi)
28662+#endif
28663 #else
28664 mov %rsi,xen_start_info
28665 mov $init_thread_union+THREAD_SIZE,%rsp
28666diff --git a/arch/x86/xen/xen-ops.h b/arch/x86/xen/xen-ops.h
28667index f9153a3..51eab3d 100644
28668--- a/arch/x86/xen/xen-ops.h
28669+++ b/arch/x86/xen/xen-ops.h
28670@@ -10,8 +10,6 @@
28671 extern const char xen_hypervisor_callback[];
28672 extern const char xen_failsafe_callback[];
28673
28674-extern void *xen_initial_gdt;
28675-
28676 struct trap_info;
28677 void xen_copy_trap_info(struct trap_info *traps);
28678
28679diff --git a/arch/xtensa/variants/dc232b/include/variant/core.h b/arch/xtensa/variants/dc232b/include/variant/core.h
28680index 525bd3d..ef888b1 100644
28681--- a/arch/xtensa/variants/dc232b/include/variant/core.h
28682+++ b/arch/xtensa/variants/dc232b/include/variant/core.h
28683@@ -119,9 +119,9 @@
28684 ----------------------------------------------------------------------*/
28685
28686 #define XCHAL_ICACHE_LINESIZE 32 /* I-cache line size in bytes */
28687-#define XCHAL_DCACHE_LINESIZE 32 /* D-cache line size in bytes */
28688 #define XCHAL_ICACHE_LINEWIDTH 5 /* log2(I line size in bytes) */
28689 #define XCHAL_DCACHE_LINEWIDTH 5 /* log2(D line size in bytes) */
28690+#define XCHAL_DCACHE_LINESIZE (_AC(1,UL) << XCHAL_DCACHE_LINEWIDTH) /* D-cache line size in bytes */
28691
28692 #define XCHAL_ICACHE_SIZE 16384 /* I-cache size in bytes or 0 */
28693 #define XCHAL_DCACHE_SIZE 16384 /* D-cache size in bytes or 0 */
28694diff --git a/arch/xtensa/variants/fsf/include/variant/core.h b/arch/xtensa/variants/fsf/include/variant/core.h
28695index 2f33760..835e50a 100644
28696--- a/arch/xtensa/variants/fsf/include/variant/core.h
28697+++ b/arch/xtensa/variants/fsf/include/variant/core.h
28698@@ -11,6 +11,7 @@
28699 #ifndef _XTENSA_CORE_H
28700 #define _XTENSA_CORE_H
28701
28702+#include <linux/const.h>
28703
28704 /****************************************************************************
28705 Parameters Useful for Any Code, USER or PRIVILEGED
28706@@ -112,9 +113,9 @@
28707 ----------------------------------------------------------------------*/
28708
28709 #define XCHAL_ICACHE_LINESIZE 16 /* I-cache line size in bytes */
28710-#define XCHAL_DCACHE_LINESIZE 16 /* D-cache line size in bytes */
28711 #define XCHAL_ICACHE_LINEWIDTH 4 /* log2(I line size in bytes) */
28712 #define XCHAL_DCACHE_LINEWIDTH 4 /* log2(D line size in bytes) */
28713+#define XCHAL_DCACHE_LINESIZE (_AC(1,UL) << XCHAL_DCACHE_LINEWIDTH) /* D-cache line size in bytes */
28714
28715 #define XCHAL_ICACHE_SIZE 8192 /* I-cache size in bytes or 0 */
28716 #define XCHAL_DCACHE_SIZE 8192 /* D-cache size in bytes or 0 */
28717diff --git a/arch/xtensa/variants/s6000/include/variant/core.h b/arch/xtensa/variants/s6000/include/variant/core.h
28718index af00795..2bb8105 100644
28719--- a/arch/xtensa/variants/s6000/include/variant/core.h
28720+++ b/arch/xtensa/variants/s6000/include/variant/core.h
28721@@ -11,6 +11,7 @@
28722 #ifndef _XTENSA_CORE_CONFIGURATION_H
28723 #define _XTENSA_CORE_CONFIGURATION_H
28724
28725+#include <linux/const.h>
28726
28727 /****************************************************************************
28728 Parameters Useful for Any Code, USER or PRIVILEGED
28729@@ -118,9 +119,9 @@
28730 ----------------------------------------------------------------------*/
28731
28732 #define XCHAL_ICACHE_LINESIZE 16 /* I-cache line size in bytes */
28733-#define XCHAL_DCACHE_LINESIZE 16 /* D-cache line size in bytes */
28734 #define XCHAL_ICACHE_LINEWIDTH 4 /* log2(I line size in bytes) */
28735 #define XCHAL_DCACHE_LINEWIDTH 4 /* log2(D line size in bytes) */
28736+#define XCHAL_DCACHE_LINESIZE (_AC(1,UL) << XCHAL_DCACHE_LINEWIDTH) /* D-cache line size in bytes */
28737
28738 #define XCHAL_ICACHE_SIZE 32768 /* I-cache size in bytes or 0 */
28739 #define XCHAL_DCACHE_SIZE 32768 /* D-cache size in bytes or 0 */
28740diff --git a/block/blk-integrity.c b/block/blk-integrity.c
28741index 15c6308..96e83c2 100644
28742--- a/block/blk-integrity.c
28743+++ b/block/blk-integrity.c
28744@@ -278,7 +278,7 @@ static struct attribute *integrity_attrs[] = {
28745 NULL,
28746 };
28747
28748-static struct sysfs_ops integrity_ops = {
28749+static const struct sysfs_ops integrity_ops = {
28750 .show = &integrity_attr_show,
28751 .store = &integrity_attr_store,
28752 };
28753diff --git a/block/blk-ioc.c b/block/blk-ioc.c
28754index d4ed600..cbdabb0 100644
28755--- a/block/blk-ioc.c
28756+++ b/block/blk-ioc.c
28757@@ -66,22 +66,22 @@ static void cfq_exit(struct io_context *ioc)
28758 }
28759
28760 /* Called by the exitting task */
28761-void exit_io_context(void)
28762+void exit_io_context(struct task_struct *task)
28763 {
28764 struct io_context *ioc;
28765
28766- task_lock(current);
28767- ioc = current->io_context;
28768- current->io_context = NULL;
28769- task_unlock(current);
28770+ task_lock(task);
28771+ ioc = task->io_context;
28772+ task->io_context = NULL;
28773+ task_unlock(task);
28774
28775 if (atomic_dec_and_test(&ioc->nr_tasks)) {
28776 if (ioc->aic && ioc->aic->exit)
28777 ioc->aic->exit(ioc->aic);
28778 cfq_exit(ioc);
28779
28780- put_io_context(ioc);
28781 }
28782+ put_io_context(ioc);
28783 }
28784
28785 struct io_context *alloc_io_context(gfp_t gfp_flags, int node)
28786diff --git a/block/blk-iopoll.c b/block/blk-iopoll.c
28787index ca56420..f2fc409 100644
28788--- a/block/blk-iopoll.c
28789+++ b/block/blk-iopoll.c
28790@@ -77,7 +77,7 @@ void blk_iopoll_complete(struct blk_iopoll *iopoll)
28791 }
28792 EXPORT_SYMBOL(blk_iopoll_complete);
28793
28794-static void blk_iopoll_softirq(struct softirq_action *h)
28795+static void blk_iopoll_softirq(void)
28796 {
28797 struct list_head *list = &__get_cpu_var(blk_cpu_iopoll);
28798 int rearm = 0, budget = blk_iopoll_budget;
28799diff --git a/block/blk-map.c b/block/blk-map.c
28800index 30a7e51..0aeec6a 100644
28801--- a/block/blk-map.c
28802+++ b/block/blk-map.c
28803@@ -54,7 +54,7 @@ static int __blk_rq_map_user(struct request_queue *q, struct request *rq,
28804 * direct dma. else, set up kernel bounce buffers
28805 */
28806 uaddr = (unsigned long) ubuf;
28807- if (blk_rq_aligned(q, ubuf, len) && !map_data)
28808+ if (blk_rq_aligned(q, (__force void *)ubuf, len) && !map_data)
28809 bio = bio_map_user(q, NULL, uaddr, len, reading, gfp_mask);
28810 else
28811 bio = bio_copy_user(q, map_data, uaddr, len, reading, gfp_mask);
28812@@ -201,12 +201,13 @@ int blk_rq_map_user_iov(struct request_queue *q, struct request *rq,
28813 for (i = 0; i < iov_count; i++) {
28814 unsigned long uaddr = (unsigned long)iov[i].iov_base;
28815
28816+ if (!iov[i].iov_len)
28817+ return -EINVAL;
28818+
28819 if (uaddr & queue_dma_alignment(q)) {
28820 unaligned = 1;
28821 break;
28822 }
28823- if (!iov[i].iov_len)
28824- return -EINVAL;
28825 }
28826
28827 if (unaligned || (q->dma_pad_mask & len) || map_data)
28828@@ -299,7 +300,7 @@ int blk_rq_map_kern(struct request_queue *q, struct request *rq, void *kbuf,
28829 if (!len || !kbuf)
28830 return -EINVAL;
28831
28832- do_copy = !blk_rq_aligned(q, kbuf, len) || object_is_on_stack(kbuf);
28833+ do_copy = !blk_rq_aligned(q, kbuf, len) || object_starts_on_stack(kbuf);
28834 if (do_copy)
28835 bio = bio_copy_kern(q, kbuf, len, gfp_mask, reading);
28836 else
28837diff --git a/block/blk-softirq.c b/block/blk-softirq.c
28838index ee9c216..58d410a 100644
28839--- a/block/blk-softirq.c
28840+++ b/block/blk-softirq.c
28841@@ -17,7 +17,7 @@ static DEFINE_PER_CPU(struct list_head, blk_cpu_done);
28842 * Softirq action handler - move entries to local list and loop over them
28843 * while passing them to the queue registered handler.
28844 */
28845-static void blk_done_softirq(struct softirq_action *h)
28846+static void blk_done_softirq(void)
28847 {
28848 struct list_head *cpu_list, local_list;
28849
28850diff --git a/block/blk-sysfs.c b/block/blk-sysfs.c
28851index bb9c5ea..5330d48 100644
28852--- a/block/blk-sysfs.c
28853+++ b/block/blk-sysfs.c
28854@@ -414,7 +414,7 @@ static void blk_release_queue(struct kobject *kobj)
28855 kmem_cache_free(blk_requestq_cachep, q);
28856 }
28857
28858-static struct sysfs_ops queue_sysfs_ops = {
28859+static const struct sysfs_ops queue_sysfs_ops = {
28860 .show = queue_attr_show,
28861 .store = queue_attr_store,
28862 };
28863diff --git a/block/bsg.c b/block/bsg.c
28864index e3e3241..759ebf7 100644
28865--- a/block/bsg.c
28866+++ b/block/bsg.c
28867@@ -175,16 +175,24 @@ static int blk_fill_sgv4_hdr_rq(struct request_queue *q, struct request *rq,
28868 struct sg_io_v4 *hdr, struct bsg_device *bd,
28869 fmode_t has_write_perm)
28870 {
28871+ unsigned char tmpcmd[sizeof(rq->__cmd)];
28872+ unsigned char *cmdptr;
28873+
28874 if (hdr->request_len > BLK_MAX_CDB) {
28875 rq->cmd = kzalloc(hdr->request_len, GFP_KERNEL);
28876 if (!rq->cmd)
28877 return -ENOMEM;
28878- }
28879+ cmdptr = rq->cmd;
28880+ } else
28881+ cmdptr = tmpcmd;
28882
28883- if (copy_from_user(rq->cmd, (void *)(unsigned long)hdr->request,
28884+ if (copy_from_user(cmdptr, (void __user *)(unsigned long)hdr->request,
28885 hdr->request_len))
28886 return -EFAULT;
28887
28888+ if (cmdptr != rq->cmd)
28889+ memcpy(rq->cmd, cmdptr, hdr->request_len);
28890+
28891 if (hdr->subprotocol == BSG_SUB_PROTOCOL_SCSI_CMD) {
28892 if (blk_verify_command(rq->cmd, has_write_perm))
28893 return -EPERM;
28894@@ -282,7 +290,7 @@ bsg_map_hdr(struct bsg_device *bd, struct sg_io_v4 *hdr, fmode_t has_write_perm,
28895 rq->next_rq = next_rq;
28896 next_rq->cmd_type = rq->cmd_type;
28897
28898- dxferp = (void*)(unsigned long)hdr->din_xferp;
28899+ dxferp = (void __user *)(unsigned long)hdr->din_xferp;
28900 ret = blk_rq_map_user(q, next_rq, NULL, dxferp,
28901 hdr->din_xfer_len, GFP_KERNEL);
28902 if (ret)
28903@@ -291,10 +299,10 @@ bsg_map_hdr(struct bsg_device *bd, struct sg_io_v4 *hdr, fmode_t has_write_perm,
28904
28905 if (hdr->dout_xfer_len) {
28906 dxfer_len = hdr->dout_xfer_len;
28907- dxferp = (void*)(unsigned long)hdr->dout_xferp;
28908+ dxferp = (void __user *)(unsigned long)hdr->dout_xferp;
28909 } else if (hdr->din_xfer_len) {
28910 dxfer_len = hdr->din_xfer_len;
28911- dxferp = (void*)(unsigned long)hdr->din_xferp;
28912+ dxferp = (void __user *)(unsigned long)hdr->din_xferp;
28913 } else
28914 dxfer_len = 0;
28915
28916@@ -436,7 +444,7 @@ static int blk_complete_sgv4_hdr_rq(struct request *rq, struct sg_io_v4 *hdr,
28917 int len = min_t(unsigned int, hdr->max_response_len,
28918 rq->sense_len);
28919
28920- ret = copy_to_user((void*)(unsigned long)hdr->response,
28921+ ret = copy_to_user((void __user *)(unsigned long)hdr->response,
28922 rq->sense, len);
28923 if (!ret)
28924 hdr->response_len = len;
28925diff --git a/block/compat_ioctl.c b/block/compat_ioctl.c
28926index 9bd086c..ca1fc22 100644
28927--- a/block/compat_ioctl.c
28928+++ b/block/compat_ioctl.c
28929@@ -354,7 +354,7 @@ static int compat_fd_ioctl(struct block_device *bdev, fmode_t mode,
28930 err |= __get_user(f->spec1, &uf->spec1);
28931 err |= __get_user(f->fmt_gap, &uf->fmt_gap);
28932 err |= __get_user(name, &uf->name);
28933- f->name = compat_ptr(name);
28934+ f->name = (void __force_kernel *)compat_ptr(name);
28935 if (err) {
28936 err = -EFAULT;
28937 goto out;
28938diff --git a/block/elevator.c b/block/elevator.c
28939index a847046..75a1746 100644
28940--- a/block/elevator.c
28941+++ b/block/elevator.c
28942@@ -889,7 +889,7 @@ elv_attr_store(struct kobject *kobj, struct attribute *attr,
28943 return error;
28944 }
28945
28946-static struct sysfs_ops elv_sysfs_ops = {
28947+static const struct sysfs_ops elv_sysfs_ops = {
28948 .show = elv_attr_show,
28949 .store = elv_attr_store,
28950 };
28951diff --git a/block/scsi_ioctl.c b/block/scsi_ioctl.c
28952index 2be0a97..bded3fd 100644
28953--- a/block/scsi_ioctl.c
28954+++ b/block/scsi_ioctl.c
28955@@ -221,8 +221,20 @@ EXPORT_SYMBOL(blk_verify_command);
28956 static int blk_fill_sghdr_rq(struct request_queue *q, struct request *rq,
28957 struct sg_io_hdr *hdr, fmode_t mode)
28958 {
28959- if (copy_from_user(rq->cmd, hdr->cmdp, hdr->cmd_len))
28960+ unsigned char tmpcmd[sizeof(rq->__cmd)];
28961+ unsigned char *cmdptr;
28962+
28963+ if (rq->cmd != rq->__cmd)
28964+ cmdptr = rq->cmd;
28965+ else
28966+ cmdptr = tmpcmd;
28967+
28968+ if (copy_from_user(cmdptr, hdr->cmdp, hdr->cmd_len))
28969 return -EFAULT;
28970+
28971+ if (cmdptr != rq->cmd)
28972+ memcpy(rq->cmd, cmdptr, hdr->cmd_len);
28973+
28974 if (blk_verify_command(rq->cmd, mode & FMODE_WRITE))
28975 return -EPERM;
28976
28977@@ -431,6 +443,8 @@ int sg_scsi_ioctl(struct request_queue *q, struct gendisk *disk, fmode_t mode,
28978 int err;
28979 unsigned int in_len, out_len, bytes, opcode, cmdlen;
28980 char *buffer = NULL, sense[SCSI_SENSE_BUFFERSIZE];
28981+ unsigned char tmpcmd[sizeof(rq->__cmd)];
28982+ unsigned char *cmdptr;
28983
28984 if (!sic)
28985 return -EINVAL;
28986@@ -464,9 +478,18 @@ int sg_scsi_ioctl(struct request_queue *q, struct gendisk *disk, fmode_t mode,
28987 */
28988 err = -EFAULT;
28989 rq->cmd_len = cmdlen;
28990- if (copy_from_user(rq->cmd, sic->data, cmdlen))
28991+
28992+ if (rq->cmd != rq->__cmd)
28993+ cmdptr = rq->cmd;
28994+ else
28995+ cmdptr = tmpcmd;
28996+
28997+ if (copy_from_user(cmdptr, sic->data, cmdlen))
28998 goto error;
28999
29000+ if (rq->cmd != cmdptr)
29001+ memcpy(rq->cmd, cmdptr, cmdlen);
29002+
29003 if (in_len && copy_from_user(buffer, sic->data + cmdlen, in_len))
29004 goto error;
29005
29006diff --git a/crypto/ablkcipher.c b/crypto/ablkcipher.c
29007index f6f0833..514d986 100644
29008--- a/crypto/ablkcipher.c
29009+++ b/crypto/ablkcipher.c
29010@@ -29,6 +29,8 @@
29011 static const char *skcipher_default_geniv __read_mostly;
29012
29013 static int setkey_unaligned(struct crypto_ablkcipher *tfm, const u8 *key,
29014+ unsigned int keylen) __size_overflow(3);
29015+static int setkey_unaligned(struct crypto_ablkcipher *tfm, const u8 *key,
29016 unsigned int keylen)
29017 {
29018 struct ablkcipher_alg *cipher = crypto_ablkcipher_alg(tfm);
29019@@ -51,6 +53,8 @@ static int setkey_unaligned(struct crypto_ablkcipher *tfm, const u8 *key,
29020 }
29021
29022 static int setkey(struct crypto_ablkcipher *tfm, const u8 *key,
29023+ unsigned int keylen) __size_overflow(3);
29024+static int setkey(struct crypto_ablkcipher *tfm, const u8 *key,
29025 unsigned int keylen)
29026 {
29027 struct ablkcipher_alg *cipher = crypto_ablkcipher_alg(tfm);
29028diff --git a/crypto/aead.c b/crypto/aead.c
29029index 0a55da7..9256a04 100644
29030--- a/crypto/aead.c
29031+++ b/crypto/aead.c
29032@@ -25,6 +25,8 @@
29033 #include "internal.h"
29034
29035 static int setkey_unaligned(struct crypto_aead *tfm, const u8 *key,
29036+ unsigned int keylen) __size_overflow(3);
29037+static int setkey_unaligned(struct crypto_aead *tfm, const u8 *key,
29038 unsigned int keylen)
29039 {
29040 struct aead_alg *aead = crypto_aead_alg(tfm);
29041@@ -46,6 +48,7 @@ static int setkey_unaligned(struct crypto_aead *tfm, const u8 *key,
29042 return ret;
29043 }
29044
29045+static int setkey(struct crypto_aead *tfm, const u8 *key, unsigned int keylen) __size_overflow(3);
29046 static int setkey(struct crypto_aead *tfm, const u8 *key, unsigned int keylen)
29047 {
29048 struct aead_alg *aead = crypto_aead_alg(tfm);
29049diff --git a/crypto/blkcipher.c b/crypto/blkcipher.c
29050index 90d26c9..3db7c03 100644
29051--- a/crypto/blkcipher.c
29052+++ b/crypto/blkcipher.c
29053@@ -357,6 +357,8 @@ int blkcipher_walk_virt_block(struct blkcipher_desc *desc,
29054 EXPORT_SYMBOL_GPL(blkcipher_walk_virt_block);
29055
29056 static int setkey_unaligned(struct crypto_tfm *tfm, const u8 *key,
29057+ unsigned int keylen) __size_overflow(3);
29058+static int setkey_unaligned(struct crypto_tfm *tfm, const u8 *key,
29059 unsigned int keylen)
29060 {
29061 struct blkcipher_alg *cipher = &tfm->__crt_alg->cra_blkcipher;
29062@@ -378,6 +380,7 @@ static int setkey_unaligned(struct crypto_tfm *tfm, const u8 *key,
29063 return ret;
29064 }
29065
29066+static int setkey(struct crypto_tfm *tfm, const u8 *key, unsigned int keylen) __size_overflow(3);
29067 static int setkey(struct crypto_tfm *tfm, const u8 *key, unsigned int keylen)
29068 {
29069 struct blkcipher_alg *cipher = &tfm->__crt_alg->cra_blkcipher;
29070diff --git a/crypto/cipher.c b/crypto/cipher.c
29071index 9a1a731..41454c2 100644
29072--- a/crypto/cipher.c
29073+++ b/crypto/cipher.c
29074@@ -21,6 +21,8 @@
29075 #include "internal.h"
29076
29077 static int setkey_unaligned(struct crypto_tfm *tfm, const u8 *key,
29078+ unsigned int keylen) __size_overflow(3);
29079+static int setkey_unaligned(struct crypto_tfm *tfm, const u8 *key,
29080 unsigned int keylen)
29081 {
29082 struct cipher_alg *cia = &tfm->__crt_alg->cra_cipher;
29083@@ -43,6 +45,7 @@ static int setkey_unaligned(struct crypto_tfm *tfm, const u8 *key,
29084
29085 }
29086
29087+static int setkey(struct crypto_tfm *tfm, const u8 *key, unsigned int keylen) __size_overflow(3);
29088 static int setkey(struct crypto_tfm *tfm, const u8 *key, unsigned int keylen)
29089 {
29090 struct cipher_alg *cia = &tfm->__crt_alg->cra_cipher;
29091diff --git a/crypto/cryptd.c b/crypto/cryptd.c
29092index 3533582..f143117 100644
29093--- a/crypto/cryptd.c
29094+++ b/crypto/cryptd.c
29095@@ -50,7 +50,7 @@ struct cryptd_blkcipher_ctx {
29096
29097 struct cryptd_blkcipher_request_ctx {
29098 crypto_completion_t complete;
29099-};
29100+} __no_const;
29101
29102 struct cryptd_hash_ctx {
29103 struct crypto_shash *child;
29104diff --git a/crypto/gf128mul.c b/crypto/gf128mul.c
29105index a90d260..7a9765e 100644
29106--- a/crypto/gf128mul.c
29107+++ b/crypto/gf128mul.c
29108@@ -182,7 +182,7 @@ void gf128mul_lle(be128 *r, const be128 *b)
29109 for (i = 0; i < 7; ++i)
29110 gf128mul_x_lle(&p[i + 1], &p[i]);
29111
29112- memset(r, 0, sizeof(r));
29113+ memset(r, 0, sizeof(*r));
29114 for (i = 0;;) {
29115 u8 ch = ((u8 *)b)[15 - i];
29116
29117@@ -220,7 +220,7 @@ void gf128mul_bbe(be128 *r, const be128 *b)
29118 for (i = 0; i < 7; ++i)
29119 gf128mul_x_bbe(&p[i + 1], &p[i]);
29120
29121- memset(r, 0, sizeof(r));
29122+ memset(r, 0, sizeof(*r));
29123 for (i = 0;;) {
29124 u8 ch = ((u8 *)b)[i];
29125
29126diff --git a/crypto/serpent.c b/crypto/serpent.c
29127index b651a55..023297d 100644
29128--- a/crypto/serpent.c
29129+++ b/crypto/serpent.c
29130@@ -21,6 +21,7 @@
29131 #include <asm/byteorder.h>
29132 #include <linux/crypto.h>
29133 #include <linux/types.h>
29134+#include <linux/sched.h>
29135
29136 /* Key is padded to the maximum of 256 bits before round key generation.
29137 * Any key length <= 256 bits (32 bytes) is allowed by the algorithm.
29138@@ -224,6 +225,8 @@ static int serpent_setkey(struct crypto_tfm *tfm, const u8 *key,
29139 u32 r0,r1,r2,r3,r4;
29140 int i;
29141
29142+ pax_track_stack();
29143+
29144 /* Copy key, add padding */
29145
29146 for (i = 0; i < keylen; ++i)
29147diff --git a/drivers/acpi/acpi_pad.c b/drivers/acpi/acpi_pad.c
29148index 0d2cdb8..d8de48d 100644
29149--- a/drivers/acpi/acpi_pad.c
29150+++ b/drivers/acpi/acpi_pad.c
29151@@ -30,7 +30,7 @@
29152 #include <acpi/acpi_bus.h>
29153 #include <acpi/acpi_drivers.h>
29154
29155-#define ACPI_PROCESSOR_AGGREGATOR_CLASS "processor_aggregator"
29156+#define ACPI_PROCESSOR_AGGREGATOR_CLASS "acpi_pad"
29157 #define ACPI_PROCESSOR_AGGREGATOR_DEVICE_NAME "Processor Aggregator"
29158 #define ACPI_PROCESSOR_AGGREGATOR_NOTIFY 0x80
29159 static DEFINE_MUTEX(isolated_cpus_lock);
29160diff --git a/drivers/acpi/battery.c b/drivers/acpi/battery.c
29161index 3f4602b..1978af1 100644
29162--- a/drivers/acpi/battery.c
29163+++ b/drivers/acpi/battery.c
29164@@ -678,6 +678,9 @@ static int acpi_battery_print_alarm(struct seq_file *seq, int result)
29165
29166 static ssize_t acpi_battery_write_alarm(struct file *file,
29167 const char __user * buffer,
29168+ size_t count, loff_t * ppos) __size_overflow(3);
29169+static ssize_t acpi_battery_write_alarm(struct file *file,
29170+ const char __user * buffer,
29171 size_t count, loff_t * ppos)
29172 {
29173 int result = 0;
29174@@ -763,7 +766,7 @@ DECLARE_FILE_FUNCTIONS(alarm);
29175 }
29176
29177 static struct battery_file {
29178- struct file_operations ops;
29179+ const struct file_operations ops;
29180 mode_t mode;
29181 const char *name;
29182 } acpi_battery_file[] = {
29183diff --git a/drivers/acpi/dock.c b/drivers/acpi/dock.c
29184index 7338b6a..82f0257 100644
29185--- a/drivers/acpi/dock.c
29186+++ b/drivers/acpi/dock.c
29187@@ -77,7 +77,7 @@ struct dock_dependent_device {
29188 struct list_head list;
29189 struct list_head hotplug_list;
29190 acpi_handle handle;
29191- struct acpi_dock_ops *ops;
29192+ const struct acpi_dock_ops *ops;
29193 void *context;
29194 };
29195
29196@@ -605,7 +605,7 @@ EXPORT_SYMBOL_GPL(unregister_dock_notifier);
29197 * the dock driver after _DCK is executed.
29198 */
29199 int
29200-register_hotplug_dock_device(acpi_handle handle, struct acpi_dock_ops *ops,
29201+register_hotplug_dock_device(acpi_handle handle, const struct acpi_dock_ops *ops,
29202 void *context)
29203 {
29204 struct dock_dependent_device *dd;
29205diff --git a/drivers/acpi/osl.c b/drivers/acpi/osl.c
29206index 7c1c59e..2993595 100644
29207--- a/drivers/acpi/osl.c
29208+++ b/drivers/acpi/osl.c
29209@@ -523,6 +523,8 @@ acpi_os_read_memory(acpi_physical_address phys_addr, u32 * value, u32 width)
29210 void __iomem *virt_addr;
29211
29212 virt_addr = ioremap(phys_addr, width);
29213+ if (!virt_addr)
29214+ return AE_NO_MEMORY;
29215 if (!value)
29216 value = &dummy;
29217
29218@@ -551,6 +553,8 @@ acpi_os_write_memory(acpi_physical_address phys_addr, u32 value, u32 width)
29219 void __iomem *virt_addr;
29220
29221 virt_addr = ioremap(phys_addr, width);
29222+ if (!virt_addr)
29223+ return AE_NO_MEMORY;
29224
29225 switch (width) {
29226 case 8:
29227diff --git a/drivers/acpi/power_meter.c b/drivers/acpi/power_meter.c
29228index c216062..eec10d2 100644
29229--- a/drivers/acpi/power_meter.c
29230+++ b/drivers/acpi/power_meter.c
29231@@ -315,8 +315,6 @@ static ssize_t set_trip(struct device *dev, struct device_attribute *devattr,
29232 return res;
29233
29234 temp /= 1000;
29235- if (temp < 0)
29236- return -EINVAL;
29237
29238 mutex_lock(&resource->lock);
29239 resource->trip[attr->index - 7] = temp;
29240diff --git a/drivers/acpi/proc.c b/drivers/acpi/proc.c
29241index d0d25e2..961643d 100644
29242--- a/drivers/acpi/proc.c
29243+++ b/drivers/acpi/proc.c
29244@@ -391,20 +391,15 @@ acpi_system_write_wakeup_device(struct file *file,
29245 size_t count, loff_t * ppos)
29246 {
29247 struct list_head *node, *next;
29248- char strbuf[5];
29249- char str[5] = "";
29250- unsigned int len = count;
29251+ char strbuf[5] = {0};
29252 struct acpi_device *found_dev = NULL;
29253
29254- if (len > 4)
29255- len = 4;
29256- if (len < 0)
29257- return -EFAULT;
29258+ if (count > 4)
29259+ count = 4;
29260
29261- if (copy_from_user(strbuf, buffer, len))
29262+ if (copy_from_user(strbuf, buffer, count))
29263 return -EFAULT;
29264- strbuf[len] = '\0';
29265- sscanf(strbuf, "%s", str);
29266+ strbuf[count] = '\0';
29267
29268 mutex_lock(&acpi_device_lock);
29269 list_for_each_safe(node, next, &acpi_wakeup_device_list) {
29270@@ -413,7 +408,7 @@ acpi_system_write_wakeup_device(struct file *file,
29271 if (!dev->wakeup.flags.valid)
29272 continue;
29273
29274- if (!strncmp(dev->pnp.bus_id, str, 4)) {
29275+ if (!strncmp(dev->pnp.bus_id, strbuf, 4)) {
29276 dev->wakeup.state.enabled =
29277 dev->wakeup.state.enabled ? 0 : 1;
29278 found_dev = dev;
29279diff --git a/drivers/acpi/processor_core.c b/drivers/acpi/processor_core.c
29280index 7102474..de8ad22 100644
29281--- a/drivers/acpi/processor_core.c
29282+++ b/drivers/acpi/processor_core.c
29283@@ -790,7 +790,7 @@ static int __cpuinit acpi_processor_add(struct acpi_device *device)
29284 return 0;
29285 }
29286
29287- BUG_ON((pr->id >= nr_cpu_ids) || (pr->id < 0));
29288+ BUG_ON(pr->id >= nr_cpu_ids);
29289
29290 /*
29291 * Buggy BIOS check
29292diff --git a/drivers/acpi/sbs.c b/drivers/acpi/sbs.c
29293index 52b9db8..a519aab 100644
29294--- a/drivers/acpi/sbs.c
29295+++ b/drivers/acpi/sbs.c
29296@@ -647,6 +647,9 @@ static int acpi_battery_read_alarm(struct seq_file *seq, void *offset)
29297
29298 static ssize_t
29299 acpi_battery_write_alarm(struct file *file, const char __user * buffer,
29300+ size_t count, loff_t * ppos) __size_overflow(3);
29301+static ssize_t
29302+acpi_battery_write_alarm(struct file *file, const char __user * buffer,
29303 size_t count, loff_t * ppos)
29304 {
29305 struct seq_file *seq = file->private_data;
29306diff --git a/drivers/acpi/sbshc.c b/drivers/acpi/sbshc.c
29307index d933980..5761f13 100644
29308--- a/drivers/acpi/sbshc.c
29309+++ b/drivers/acpi/sbshc.c
29310@@ -17,7 +17,7 @@
29311
29312 #define PREFIX "ACPI: "
29313
29314-#define ACPI_SMB_HC_CLASS "smbus_host_controller"
29315+#define ACPI_SMB_HC_CLASS "smbus_host_ctl"
29316 #define ACPI_SMB_HC_DEVICE_NAME "ACPI SMBus HC"
29317
29318 struct acpi_smb_hc {
29319diff --git a/drivers/acpi/sleep.c b/drivers/acpi/sleep.c
29320index 0458094..6978e7b 100644
29321--- a/drivers/acpi/sleep.c
29322+++ b/drivers/acpi/sleep.c
29323@@ -283,7 +283,7 @@ static int acpi_suspend_state_valid(suspend_state_t pm_state)
29324 }
29325 }
29326
29327-static struct platform_suspend_ops acpi_suspend_ops = {
29328+static const struct platform_suspend_ops acpi_suspend_ops = {
29329 .valid = acpi_suspend_state_valid,
29330 .begin = acpi_suspend_begin,
29331 .prepare_late = acpi_pm_prepare,
29332@@ -311,7 +311,7 @@ static int acpi_suspend_begin_old(suspend_state_t pm_state)
29333 * The following callbacks are used if the pre-ACPI 2.0 suspend ordering has
29334 * been requested.
29335 */
29336-static struct platform_suspend_ops acpi_suspend_ops_old = {
29337+static const struct platform_suspend_ops acpi_suspend_ops_old = {
29338 .valid = acpi_suspend_state_valid,
29339 .begin = acpi_suspend_begin_old,
29340 .prepare_late = acpi_pm_disable_gpes,
29341@@ -460,7 +460,7 @@ static void acpi_pm_enable_gpes(void)
29342 acpi_enable_all_runtime_gpes();
29343 }
29344
29345-static struct platform_hibernation_ops acpi_hibernation_ops = {
29346+static const struct platform_hibernation_ops acpi_hibernation_ops = {
29347 .begin = acpi_hibernation_begin,
29348 .end = acpi_pm_end,
29349 .pre_snapshot = acpi_hibernation_pre_snapshot,
29350@@ -513,7 +513,7 @@ static int acpi_hibernation_pre_snapshot_old(void)
29351 * The following callbacks are used if the pre-ACPI 2.0 suspend ordering has
29352 * been requested.
29353 */
29354-static struct platform_hibernation_ops acpi_hibernation_ops_old = {
29355+static const struct platform_hibernation_ops acpi_hibernation_ops_old = {
29356 .begin = acpi_hibernation_begin_old,
29357 .end = acpi_pm_end,
29358 .pre_snapshot = acpi_hibernation_pre_snapshot_old,
29359diff --git a/drivers/acpi/video.c b/drivers/acpi/video.c
29360index 05dff63..b662ab7 100644
29361--- a/drivers/acpi/video.c
29362+++ b/drivers/acpi/video.c
29363@@ -359,7 +359,7 @@ static int acpi_video_set_brightness(struct backlight_device *bd)
29364 vd->brightness->levels[request_level]);
29365 }
29366
29367-static struct backlight_ops acpi_backlight_ops = {
29368+static const struct backlight_ops acpi_backlight_ops = {
29369 .get_brightness = acpi_video_get_brightness,
29370 .update_status = acpi_video_set_brightness,
29371 };
29372diff --git a/drivers/ata/ahci.c b/drivers/ata/ahci.c
29373index 6787aab..23ffb0e 100644
29374--- a/drivers/ata/ahci.c
29375+++ b/drivers/ata/ahci.c
29376@@ -387,7 +387,7 @@ static struct scsi_host_template ahci_sht = {
29377 .sdev_attrs = ahci_sdev_attrs,
29378 };
29379
29380-static struct ata_port_operations ahci_ops = {
29381+static const struct ata_port_operations ahci_ops = {
29382 .inherits = &sata_pmp_port_ops,
29383
29384 .qc_defer = sata_pmp_qc_defer_cmd_switch,
29385@@ -424,17 +424,17 @@ static struct ata_port_operations ahci_ops = {
29386 .port_stop = ahci_port_stop,
29387 };
29388
29389-static struct ata_port_operations ahci_vt8251_ops = {
29390+static const struct ata_port_operations ahci_vt8251_ops = {
29391 .inherits = &ahci_ops,
29392 .hardreset = ahci_vt8251_hardreset,
29393 };
29394
29395-static struct ata_port_operations ahci_p5wdh_ops = {
29396+static const struct ata_port_operations ahci_p5wdh_ops = {
29397 .inherits = &ahci_ops,
29398 .hardreset = ahci_p5wdh_hardreset,
29399 };
29400
29401-static struct ata_port_operations ahci_sb600_ops = {
29402+static const struct ata_port_operations ahci_sb600_ops = {
29403 .inherits = &ahci_ops,
29404 .softreset = ahci_sb600_softreset,
29405 .pmp_softreset = ahci_sb600_softreset,
29406diff --git a/drivers/ata/ata_generic.c b/drivers/ata/ata_generic.c
29407index 99e7196..4968c77 100644
29408--- a/drivers/ata/ata_generic.c
29409+++ b/drivers/ata/ata_generic.c
29410@@ -104,7 +104,7 @@ static struct scsi_host_template generic_sht = {
29411 ATA_BMDMA_SHT(DRV_NAME),
29412 };
29413
29414-static struct ata_port_operations generic_port_ops = {
29415+static const struct ata_port_operations generic_port_ops = {
29416 .inherits = &ata_bmdma_port_ops,
29417 .cable_detect = ata_cable_unknown,
29418 .set_mode = generic_set_mode,
29419diff --git a/drivers/ata/ata_piix.c b/drivers/ata/ata_piix.c
29420index c33591d..000c121 100644
29421--- a/drivers/ata/ata_piix.c
29422+++ b/drivers/ata/ata_piix.c
29423@@ -318,7 +318,7 @@ static struct scsi_host_template piix_sht = {
29424 ATA_BMDMA_SHT(DRV_NAME),
29425 };
29426
29427-static struct ata_port_operations piix_pata_ops = {
29428+static const struct ata_port_operations piix_pata_ops = {
29429 .inherits = &ata_bmdma32_port_ops,
29430 .cable_detect = ata_cable_40wire,
29431 .set_piomode = piix_set_piomode,
29432@@ -326,22 +326,22 @@ static struct ata_port_operations piix_pata_ops = {
29433 .prereset = piix_pata_prereset,
29434 };
29435
29436-static struct ata_port_operations piix_vmw_ops = {
29437+static const struct ata_port_operations piix_vmw_ops = {
29438 .inherits = &piix_pata_ops,
29439 .bmdma_status = piix_vmw_bmdma_status,
29440 };
29441
29442-static struct ata_port_operations ich_pata_ops = {
29443+static const struct ata_port_operations ich_pata_ops = {
29444 .inherits = &piix_pata_ops,
29445 .cable_detect = ich_pata_cable_detect,
29446 .set_dmamode = ich_set_dmamode,
29447 };
29448
29449-static struct ata_port_operations piix_sata_ops = {
29450+static const struct ata_port_operations piix_sata_ops = {
29451 .inherits = &ata_bmdma_port_ops,
29452 };
29453
29454-static struct ata_port_operations piix_sidpr_sata_ops = {
29455+static const struct ata_port_operations piix_sidpr_sata_ops = {
29456 .inherits = &piix_sata_ops,
29457 .hardreset = sata_std_hardreset,
29458 .scr_read = piix_sidpr_scr_read,
29459diff --git a/drivers/ata/libata-acpi.c b/drivers/ata/libata-acpi.c
29460index b0882cd..c295d65 100644
29461--- a/drivers/ata/libata-acpi.c
29462+++ b/drivers/ata/libata-acpi.c
29463@@ -223,12 +223,12 @@ static void ata_acpi_dev_uevent(acpi_handle handle, u32 event, void *data)
29464 ata_acpi_uevent(dev->link->ap, dev, event);
29465 }
29466
29467-static struct acpi_dock_ops ata_acpi_dev_dock_ops = {
29468+static const struct acpi_dock_ops ata_acpi_dev_dock_ops = {
29469 .handler = ata_acpi_dev_notify_dock,
29470 .uevent = ata_acpi_dev_uevent,
29471 };
29472
29473-static struct acpi_dock_ops ata_acpi_ap_dock_ops = {
29474+static const struct acpi_dock_ops ata_acpi_ap_dock_ops = {
29475 .handler = ata_acpi_ap_notify_dock,
29476 .uevent = ata_acpi_ap_uevent,
29477 };
29478diff --git a/drivers/ata/libata-core.c b/drivers/ata/libata-core.c
29479index d4f7f99..94f603e 100644
29480--- a/drivers/ata/libata-core.c
29481+++ b/drivers/ata/libata-core.c
29482@@ -4954,7 +4954,7 @@ void ata_qc_free(struct ata_queued_cmd *qc)
29483 struct ata_port *ap;
29484 unsigned int tag;
29485
29486- WARN_ON_ONCE(qc == NULL); /* ata_qc_from_tag _might_ return NULL */
29487+ BUG_ON(qc == NULL); /* ata_qc_from_tag _might_ return NULL */
29488 ap = qc->ap;
29489
29490 qc->flags = 0;
29491@@ -4970,7 +4970,7 @@ void __ata_qc_complete(struct ata_queued_cmd *qc)
29492 struct ata_port *ap;
29493 struct ata_link *link;
29494
29495- WARN_ON_ONCE(qc == NULL); /* ata_qc_from_tag _might_ return NULL */
29496+ BUG_ON(qc == NULL); /* ata_qc_from_tag _might_ return NULL */
29497 WARN_ON_ONCE(!(qc->flags & ATA_QCFLAG_ACTIVE));
29498 ap = qc->ap;
29499 link = qc->dev->link;
29500@@ -5987,7 +5987,7 @@ static void ata_host_stop(struct device *gendev, void *res)
29501 * LOCKING:
29502 * None.
29503 */
29504-static void ata_finalize_port_ops(struct ata_port_operations *ops)
29505+static void ata_finalize_port_ops(const struct ata_port_operations *ops)
29506 {
29507 static DEFINE_SPINLOCK(lock);
29508 const struct ata_port_operations *cur;
29509@@ -5999,6 +5999,7 @@ static void ata_finalize_port_ops(struct ata_port_operations *ops)
29510 return;
29511
29512 spin_lock(&lock);
29513+ pax_open_kernel();
29514
29515 for (cur = ops->inherits; cur; cur = cur->inherits) {
29516 void **inherit = (void **)cur;
29517@@ -6012,8 +6013,9 @@ static void ata_finalize_port_ops(struct ata_port_operations *ops)
29518 if (IS_ERR(*pp))
29519 *pp = NULL;
29520
29521- ops->inherits = NULL;
29522+ *(struct ata_port_operations **)&ops->inherits = NULL;
29523
29524+ pax_close_kernel();
29525 spin_unlock(&lock);
29526 }
29527
29528@@ -6110,7 +6112,7 @@ int ata_host_start(struct ata_host *host)
29529 */
29530 /* KILLME - the only user left is ipr */
29531 void ata_host_init(struct ata_host *host, struct device *dev,
29532- unsigned long flags, struct ata_port_operations *ops)
29533+ unsigned long flags, const struct ata_port_operations *ops)
29534 {
29535 spin_lock_init(&host->lock);
29536 host->dev = dev;
29537@@ -6773,7 +6775,7 @@ static void ata_dummy_error_handler(struct ata_port *ap)
29538 /* truly dummy */
29539 }
29540
29541-struct ata_port_operations ata_dummy_port_ops = {
29542+const struct ata_port_operations ata_dummy_port_ops = {
29543 .qc_prep = ata_noop_qc_prep,
29544 .qc_issue = ata_dummy_qc_issue,
29545 .error_handler = ata_dummy_error_handler,
29546diff --git a/drivers/ata/libata-eh.c b/drivers/ata/libata-eh.c
29547index e5bdb9b..45a8e72 100644
29548--- a/drivers/ata/libata-eh.c
29549+++ b/drivers/ata/libata-eh.c
29550@@ -2423,6 +2423,8 @@ void ata_eh_report(struct ata_port *ap)
29551 {
29552 struct ata_link *link;
29553
29554+ pax_track_stack();
29555+
29556 ata_for_each_link(link, ap, HOST_FIRST)
29557 ata_eh_link_report(link);
29558 }
29559@@ -3594,7 +3596,7 @@ void ata_do_eh(struct ata_port *ap, ata_prereset_fn_t prereset,
29560 */
29561 void ata_std_error_handler(struct ata_port *ap)
29562 {
29563- struct ata_port_operations *ops = ap->ops;
29564+ const struct ata_port_operations *ops = ap->ops;
29565 ata_reset_fn_t hardreset = ops->hardreset;
29566
29567 /* ignore built-in hardreset if SCR access is not available */
29568diff --git a/drivers/ata/libata-pmp.c b/drivers/ata/libata-pmp.c
29569index 51f0ffb..19ce3e3 100644
29570--- a/drivers/ata/libata-pmp.c
29571+++ b/drivers/ata/libata-pmp.c
29572@@ -841,7 +841,7 @@ static int sata_pmp_handle_link_fail(struct ata_link *link, int *link_tries)
29573 */
29574 static int sata_pmp_eh_recover(struct ata_port *ap)
29575 {
29576- struct ata_port_operations *ops = ap->ops;
29577+ const struct ata_port_operations *ops = ap->ops;
29578 int pmp_tries, link_tries[SATA_PMP_MAX_PORTS];
29579 struct ata_link *pmp_link = &ap->link;
29580 struct ata_device *pmp_dev = pmp_link->device;
29581diff --git a/drivers/ata/pata_acpi.c b/drivers/ata/pata_acpi.c
29582index d8f35fe..288180a 100644
29583--- a/drivers/ata/pata_acpi.c
29584+++ b/drivers/ata/pata_acpi.c
29585@@ -215,7 +215,7 @@ static struct scsi_host_template pacpi_sht = {
29586 ATA_BMDMA_SHT(DRV_NAME),
29587 };
29588
29589-static struct ata_port_operations pacpi_ops = {
29590+static const struct ata_port_operations pacpi_ops = {
29591 .inherits = &ata_bmdma_port_ops,
29592 .qc_issue = pacpi_qc_issue,
29593 .cable_detect = pacpi_cable_detect,
29594diff --git a/drivers/ata/pata_ali.c b/drivers/ata/pata_ali.c
29595index 9434114..1f2f364 100644
29596--- a/drivers/ata/pata_ali.c
29597+++ b/drivers/ata/pata_ali.c
29598@@ -365,7 +365,7 @@ static struct scsi_host_template ali_sht = {
29599 * Port operations for PIO only ALi
29600 */
29601
29602-static struct ata_port_operations ali_early_port_ops = {
29603+static const struct ata_port_operations ali_early_port_ops = {
29604 .inherits = &ata_sff_port_ops,
29605 .cable_detect = ata_cable_40wire,
29606 .set_piomode = ali_set_piomode,
29607@@ -382,7 +382,7 @@ static const struct ata_port_operations ali_dma_base_ops = {
29608 * Port operations for DMA capable ALi without cable
29609 * detect
29610 */
29611-static struct ata_port_operations ali_20_port_ops = {
29612+static const struct ata_port_operations ali_20_port_ops = {
29613 .inherits = &ali_dma_base_ops,
29614 .cable_detect = ata_cable_40wire,
29615 .mode_filter = ali_20_filter,
29616@@ -393,7 +393,7 @@ static struct ata_port_operations ali_20_port_ops = {
29617 /*
29618 * Port operations for DMA capable ALi with cable detect
29619 */
29620-static struct ata_port_operations ali_c2_port_ops = {
29621+static const struct ata_port_operations ali_c2_port_ops = {
29622 .inherits = &ali_dma_base_ops,
29623 .check_atapi_dma = ali_check_atapi_dma,
29624 .cable_detect = ali_c2_cable_detect,
29625@@ -404,7 +404,7 @@ static struct ata_port_operations ali_c2_port_ops = {
29626 /*
29627 * Port operations for DMA capable ALi with cable detect
29628 */
29629-static struct ata_port_operations ali_c4_port_ops = {
29630+static const struct ata_port_operations ali_c4_port_ops = {
29631 .inherits = &ali_dma_base_ops,
29632 .check_atapi_dma = ali_check_atapi_dma,
29633 .cable_detect = ali_c2_cable_detect,
29634@@ -414,7 +414,7 @@ static struct ata_port_operations ali_c4_port_ops = {
29635 /*
29636 * Port operations for DMA capable ALi with cable detect and LBA48
29637 */
29638-static struct ata_port_operations ali_c5_port_ops = {
29639+static const struct ata_port_operations ali_c5_port_ops = {
29640 .inherits = &ali_dma_base_ops,
29641 .check_atapi_dma = ali_check_atapi_dma,
29642 .dev_config = ali_warn_atapi_dma,
29643diff --git a/drivers/ata/pata_amd.c b/drivers/ata/pata_amd.c
29644index 567f3f7..c8ee0da 100644
29645--- a/drivers/ata/pata_amd.c
29646+++ b/drivers/ata/pata_amd.c
29647@@ -397,28 +397,28 @@ static const struct ata_port_operations amd_base_port_ops = {
29648 .prereset = amd_pre_reset,
29649 };
29650
29651-static struct ata_port_operations amd33_port_ops = {
29652+static const struct ata_port_operations amd33_port_ops = {
29653 .inherits = &amd_base_port_ops,
29654 .cable_detect = ata_cable_40wire,
29655 .set_piomode = amd33_set_piomode,
29656 .set_dmamode = amd33_set_dmamode,
29657 };
29658
29659-static struct ata_port_operations amd66_port_ops = {
29660+static const struct ata_port_operations amd66_port_ops = {
29661 .inherits = &amd_base_port_ops,
29662 .cable_detect = ata_cable_unknown,
29663 .set_piomode = amd66_set_piomode,
29664 .set_dmamode = amd66_set_dmamode,
29665 };
29666
29667-static struct ata_port_operations amd100_port_ops = {
29668+static const struct ata_port_operations amd100_port_ops = {
29669 .inherits = &amd_base_port_ops,
29670 .cable_detect = ata_cable_unknown,
29671 .set_piomode = amd100_set_piomode,
29672 .set_dmamode = amd100_set_dmamode,
29673 };
29674
29675-static struct ata_port_operations amd133_port_ops = {
29676+static const struct ata_port_operations amd133_port_ops = {
29677 .inherits = &amd_base_port_ops,
29678 .cable_detect = amd_cable_detect,
29679 .set_piomode = amd133_set_piomode,
29680@@ -433,13 +433,13 @@ static const struct ata_port_operations nv_base_port_ops = {
29681 .host_stop = nv_host_stop,
29682 };
29683
29684-static struct ata_port_operations nv100_port_ops = {
29685+static const struct ata_port_operations nv100_port_ops = {
29686 .inherits = &nv_base_port_ops,
29687 .set_piomode = nv100_set_piomode,
29688 .set_dmamode = nv100_set_dmamode,
29689 };
29690
29691-static struct ata_port_operations nv133_port_ops = {
29692+static const struct ata_port_operations nv133_port_ops = {
29693 .inherits = &nv_base_port_ops,
29694 .set_piomode = nv133_set_piomode,
29695 .set_dmamode = nv133_set_dmamode,
29696diff --git a/drivers/ata/pata_artop.c b/drivers/ata/pata_artop.c
29697index d332cfd..4b7eaae 100644
29698--- a/drivers/ata/pata_artop.c
29699+++ b/drivers/ata/pata_artop.c
29700@@ -311,7 +311,7 @@ static struct scsi_host_template artop_sht = {
29701 ATA_BMDMA_SHT(DRV_NAME),
29702 };
29703
29704-static struct ata_port_operations artop6210_ops = {
29705+static const struct ata_port_operations artop6210_ops = {
29706 .inherits = &ata_bmdma_port_ops,
29707 .cable_detect = ata_cable_40wire,
29708 .set_piomode = artop6210_set_piomode,
29709@@ -320,7 +320,7 @@ static struct ata_port_operations artop6210_ops = {
29710 .qc_defer = artop6210_qc_defer,
29711 };
29712
29713-static struct ata_port_operations artop6260_ops = {
29714+static const struct ata_port_operations artop6260_ops = {
29715 .inherits = &ata_bmdma_port_ops,
29716 .cable_detect = artop6260_cable_detect,
29717 .set_piomode = artop6260_set_piomode,
29718diff --git a/drivers/ata/pata_at32.c b/drivers/ata/pata_at32.c
29719index 5c129f9..7bb7ccb 100644
29720--- a/drivers/ata/pata_at32.c
29721+++ b/drivers/ata/pata_at32.c
29722@@ -172,7 +172,7 @@ static struct scsi_host_template at32_sht = {
29723 ATA_PIO_SHT(DRV_NAME),
29724 };
29725
29726-static struct ata_port_operations at32_port_ops = {
29727+static const struct ata_port_operations at32_port_ops = {
29728 .inherits = &ata_sff_port_ops,
29729 .cable_detect = ata_cable_40wire,
29730 .set_piomode = pata_at32_set_piomode,
29731diff --git a/drivers/ata/pata_at91.c b/drivers/ata/pata_at91.c
29732index 41c94b1..829006d 100644
29733--- a/drivers/ata/pata_at91.c
29734+++ b/drivers/ata/pata_at91.c
29735@@ -195,7 +195,7 @@ static struct scsi_host_template pata_at91_sht = {
29736 ATA_PIO_SHT(DRV_NAME),
29737 };
29738
29739-static struct ata_port_operations pata_at91_port_ops = {
29740+static const struct ata_port_operations pata_at91_port_ops = {
29741 .inherits = &ata_sff_port_ops,
29742
29743 .sff_data_xfer = pata_at91_data_xfer_noirq,
29744diff --git a/drivers/ata/pata_atiixp.c b/drivers/ata/pata_atiixp.c
29745index ae4454d..d391eb4 100644
29746--- a/drivers/ata/pata_atiixp.c
29747+++ b/drivers/ata/pata_atiixp.c
29748@@ -205,7 +205,7 @@ static struct scsi_host_template atiixp_sht = {
29749 .sg_tablesize = LIBATA_DUMB_MAX_PRD,
29750 };
29751
29752-static struct ata_port_operations atiixp_port_ops = {
29753+static const struct ata_port_operations atiixp_port_ops = {
29754 .inherits = &ata_bmdma_port_ops,
29755
29756 .qc_prep = ata_sff_dumb_qc_prep,
29757diff --git a/drivers/ata/pata_atp867x.c b/drivers/ata/pata_atp867x.c
29758index 6fe7ded..2a425dc 100644
29759--- a/drivers/ata/pata_atp867x.c
29760+++ b/drivers/ata/pata_atp867x.c
29761@@ -274,7 +274,7 @@ static struct scsi_host_template atp867x_sht = {
29762 ATA_BMDMA_SHT(DRV_NAME),
29763 };
29764
29765-static struct ata_port_operations atp867x_ops = {
29766+static const struct ata_port_operations atp867x_ops = {
29767 .inherits = &ata_bmdma_port_ops,
29768 .cable_detect = atp867x_cable_detect,
29769 .set_piomode = atp867x_set_piomode,
29770diff --git a/drivers/ata/pata_bf54x.c b/drivers/ata/pata_bf54x.c
29771index c4b47a3..b27a367 100644
29772--- a/drivers/ata/pata_bf54x.c
29773+++ b/drivers/ata/pata_bf54x.c
29774@@ -1464,7 +1464,7 @@ static struct scsi_host_template bfin_sht = {
29775 .dma_boundary = ATA_DMA_BOUNDARY,
29776 };
29777
29778-static struct ata_port_operations bfin_pata_ops = {
29779+static const struct ata_port_operations bfin_pata_ops = {
29780 .inherits = &ata_sff_port_ops,
29781
29782 .set_piomode = bfin_set_piomode,
29783diff --git a/drivers/ata/pata_cmd640.c b/drivers/ata/pata_cmd640.c
29784index 5acf9fa..84248be 100644
29785--- a/drivers/ata/pata_cmd640.c
29786+++ b/drivers/ata/pata_cmd640.c
29787@@ -168,7 +168,7 @@ static struct scsi_host_template cmd640_sht = {
29788 ATA_BMDMA_SHT(DRV_NAME),
29789 };
29790
29791-static struct ata_port_operations cmd640_port_ops = {
29792+static const struct ata_port_operations cmd640_port_ops = {
29793 .inherits = &ata_bmdma_port_ops,
29794 /* In theory xfer_noirq is not needed once we kill the prefetcher */
29795 .sff_data_xfer = ata_sff_data_xfer_noirq,
29796diff --git a/drivers/ata/pata_cmd64x.c b/drivers/ata/pata_cmd64x.c
29797index ccd2694..c869c3d 100644
29798--- a/drivers/ata/pata_cmd64x.c
29799+++ b/drivers/ata/pata_cmd64x.c
29800@@ -271,18 +271,18 @@ static const struct ata_port_operations cmd64x_base_ops = {
29801 .set_dmamode = cmd64x_set_dmamode,
29802 };
29803
29804-static struct ata_port_operations cmd64x_port_ops = {
29805+static const struct ata_port_operations cmd64x_port_ops = {
29806 .inherits = &cmd64x_base_ops,
29807 .cable_detect = ata_cable_40wire,
29808 };
29809
29810-static struct ata_port_operations cmd646r1_port_ops = {
29811+static const struct ata_port_operations cmd646r1_port_ops = {
29812 .inherits = &cmd64x_base_ops,
29813 .bmdma_stop = cmd646r1_bmdma_stop,
29814 .cable_detect = ata_cable_40wire,
29815 };
29816
29817-static struct ata_port_operations cmd648_port_ops = {
29818+static const struct ata_port_operations cmd648_port_ops = {
29819 .inherits = &cmd64x_base_ops,
29820 .bmdma_stop = cmd648_bmdma_stop,
29821 .cable_detect = cmd648_cable_detect,
29822diff --git a/drivers/ata/pata_cs5520.c b/drivers/ata/pata_cs5520.c
29823index 0df83cf..d7595b0 100644
29824--- a/drivers/ata/pata_cs5520.c
29825+++ b/drivers/ata/pata_cs5520.c
29826@@ -144,7 +144,7 @@ static struct scsi_host_template cs5520_sht = {
29827 .sg_tablesize = LIBATA_DUMB_MAX_PRD,
29828 };
29829
29830-static struct ata_port_operations cs5520_port_ops = {
29831+static const struct ata_port_operations cs5520_port_ops = {
29832 .inherits = &ata_bmdma_port_ops,
29833 .qc_prep = ata_sff_dumb_qc_prep,
29834 .cable_detect = ata_cable_40wire,
29835diff --git a/drivers/ata/pata_cs5530.c b/drivers/ata/pata_cs5530.c
29836index c974b05..6d26b11 100644
29837--- a/drivers/ata/pata_cs5530.c
29838+++ b/drivers/ata/pata_cs5530.c
29839@@ -164,7 +164,7 @@ static struct scsi_host_template cs5530_sht = {
29840 .sg_tablesize = LIBATA_DUMB_MAX_PRD,
29841 };
29842
29843-static struct ata_port_operations cs5530_port_ops = {
29844+static const struct ata_port_operations cs5530_port_ops = {
29845 .inherits = &ata_bmdma_port_ops,
29846
29847 .qc_prep = ata_sff_dumb_qc_prep,
29848diff --git a/drivers/ata/pata_cs5535.c b/drivers/ata/pata_cs5535.c
29849index 403f561..aacd26b 100644
29850--- a/drivers/ata/pata_cs5535.c
29851+++ b/drivers/ata/pata_cs5535.c
29852@@ -160,7 +160,7 @@ static struct scsi_host_template cs5535_sht = {
29853 ATA_BMDMA_SHT(DRV_NAME),
29854 };
29855
29856-static struct ata_port_operations cs5535_port_ops = {
29857+static const struct ata_port_operations cs5535_port_ops = {
29858 .inherits = &ata_bmdma_port_ops,
29859 .cable_detect = cs5535_cable_detect,
29860 .set_piomode = cs5535_set_piomode,
29861diff --git a/drivers/ata/pata_cs5536.c b/drivers/ata/pata_cs5536.c
29862index 6da4cb4..de24a25 100644
29863--- a/drivers/ata/pata_cs5536.c
29864+++ b/drivers/ata/pata_cs5536.c
29865@@ -223,7 +223,7 @@ static struct scsi_host_template cs5536_sht = {
29866 ATA_BMDMA_SHT(DRV_NAME),
29867 };
29868
29869-static struct ata_port_operations cs5536_port_ops = {
29870+static const struct ata_port_operations cs5536_port_ops = {
29871 .inherits = &ata_bmdma_port_ops,
29872 .cable_detect = cs5536_cable_detect,
29873 .set_piomode = cs5536_set_piomode,
29874diff --git a/drivers/ata/pata_cypress.c b/drivers/ata/pata_cypress.c
29875index 8fb040b..b16a9c9 100644
29876--- a/drivers/ata/pata_cypress.c
29877+++ b/drivers/ata/pata_cypress.c
29878@@ -113,7 +113,7 @@ static struct scsi_host_template cy82c693_sht = {
29879 ATA_BMDMA_SHT(DRV_NAME),
29880 };
29881
29882-static struct ata_port_operations cy82c693_port_ops = {
29883+static const struct ata_port_operations cy82c693_port_ops = {
29884 .inherits = &ata_bmdma_port_ops,
29885 .cable_detect = ata_cable_40wire,
29886 .set_piomode = cy82c693_set_piomode,
29887diff --git a/drivers/ata/pata_efar.c b/drivers/ata/pata_efar.c
29888index 2a6412f..555ee11 100644
29889--- a/drivers/ata/pata_efar.c
29890+++ b/drivers/ata/pata_efar.c
29891@@ -222,7 +222,7 @@ static struct scsi_host_template efar_sht = {
29892 ATA_BMDMA_SHT(DRV_NAME),
29893 };
29894
29895-static struct ata_port_operations efar_ops = {
29896+static const struct ata_port_operations efar_ops = {
29897 .inherits = &ata_bmdma_port_ops,
29898 .cable_detect = efar_cable_detect,
29899 .set_piomode = efar_set_piomode,
29900diff --git a/drivers/ata/pata_hpt366.c b/drivers/ata/pata_hpt366.c
29901index b9d8836..0b92030 100644
29902--- a/drivers/ata/pata_hpt366.c
29903+++ b/drivers/ata/pata_hpt366.c
29904@@ -282,7 +282,7 @@ static struct scsi_host_template hpt36x_sht = {
29905 * Configuration for HPT366/68
29906 */
29907
29908-static struct ata_port_operations hpt366_port_ops = {
29909+static const struct ata_port_operations hpt366_port_ops = {
29910 .inherits = &ata_bmdma_port_ops,
29911 .cable_detect = hpt36x_cable_detect,
29912 .mode_filter = hpt366_filter,
29913diff --git a/drivers/ata/pata_hpt37x.c b/drivers/ata/pata_hpt37x.c
29914index 5af7f19..00c4980 100644
29915--- a/drivers/ata/pata_hpt37x.c
29916+++ b/drivers/ata/pata_hpt37x.c
29917@@ -576,7 +576,7 @@ static struct scsi_host_template hpt37x_sht = {
29918 * Configuration for HPT370
29919 */
29920
29921-static struct ata_port_operations hpt370_port_ops = {
29922+static const struct ata_port_operations hpt370_port_ops = {
29923 .inherits = &ata_bmdma_port_ops,
29924
29925 .bmdma_stop = hpt370_bmdma_stop,
29926@@ -591,7 +591,7 @@ static struct ata_port_operations hpt370_port_ops = {
29927 * Configuration for HPT370A. Close to 370 but less filters
29928 */
29929
29930-static struct ata_port_operations hpt370a_port_ops = {
29931+static const struct ata_port_operations hpt370a_port_ops = {
29932 .inherits = &hpt370_port_ops,
29933 .mode_filter = hpt370a_filter,
29934 };
29935@@ -601,7 +601,7 @@ static struct ata_port_operations hpt370a_port_ops = {
29936 * and DMA mode setting functionality.
29937 */
29938
29939-static struct ata_port_operations hpt372_port_ops = {
29940+static const struct ata_port_operations hpt372_port_ops = {
29941 .inherits = &ata_bmdma_port_ops,
29942
29943 .bmdma_stop = hpt37x_bmdma_stop,
29944@@ -616,7 +616,7 @@ static struct ata_port_operations hpt372_port_ops = {
29945 * but we have a different cable detection procedure for function 1.
29946 */
29947
29948-static struct ata_port_operations hpt374_fn1_port_ops = {
29949+static const struct ata_port_operations hpt374_fn1_port_ops = {
29950 .inherits = &hpt372_port_ops,
29951 .prereset = hpt374_fn1_pre_reset,
29952 };
29953diff --git a/drivers/ata/pata_hpt3x2n.c b/drivers/ata/pata_hpt3x2n.c
29954index 100f227..2e39382 100644
29955--- a/drivers/ata/pata_hpt3x2n.c
29956+++ b/drivers/ata/pata_hpt3x2n.c
29957@@ -337,7 +337,7 @@ static struct scsi_host_template hpt3x2n_sht = {
29958 * Configuration for HPT3x2n.
29959 */
29960
29961-static struct ata_port_operations hpt3x2n_port_ops = {
29962+static const struct ata_port_operations hpt3x2n_port_ops = {
29963 .inherits = &ata_bmdma_port_ops,
29964
29965 .bmdma_stop = hpt3x2n_bmdma_stop,
29966diff --git a/drivers/ata/pata_hpt3x3.c b/drivers/ata/pata_hpt3x3.c
29967index 7e31025..6fca8f4 100644
29968--- a/drivers/ata/pata_hpt3x3.c
29969+++ b/drivers/ata/pata_hpt3x3.c
29970@@ -141,7 +141,7 @@ static struct scsi_host_template hpt3x3_sht = {
29971 ATA_BMDMA_SHT(DRV_NAME),
29972 };
29973
29974-static struct ata_port_operations hpt3x3_port_ops = {
29975+static const struct ata_port_operations hpt3x3_port_ops = {
29976 .inherits = &ata_bmdma_port_ops,
29977 .cable_detect = ata_cable_40wire,
29978 .set_piomode = hpt3x3_set_piomode,
29979diff --git a/drivers/ata/pata_icside.c b/drivers/ata/pata_icside.c
29980index b663b7f..9a26c2a 100644
29981--- a/drivers/ata/pata_icside.c
29982+++ b/drivers/ata/pata_icside.c
29983@@ -319,7 +319,7 @@ static void pata_icside_postreset(struct ata_link *link, unsigned int *classes)
29984 }
29985 }
29986
29987-static struct ata_port_operations pata_icside_port_ops = {
29988+static const struct ata_port_operations pata_icside_port_ops = {
29989 .inherits = &ata_sff_port_ops,
29990 /* no need to build any PRD tables for DMA */
29991 .qc_prep = ata_noop_qc_prep,
29992diff --git a/drivers/ata/pata_isapnp.c b/drivers/ata/pata_isapnp.c
29993index 4bceb88..457dfb6 100644
29994--- a/drivers/ata/pata_isapnp.c
29995+++ b/drivers/ata/pata_isapnp.c
29996@@ -23,12 +23,12 @@ static struct scsi_host_template isapnp_sht = {
29997 ATA_PIO_SHT(DRV_NAME),
29998 };
29999
30000-static struct ata_port_operations isapnp_port_ops = {
30001+static const struct ata_port_operations isapnp_port_ops = {
30002 .inherits = &ata_sff_port_ops,
30003 .cable_detect = ata_cable_40wire,
30004 };
30005
30006-static struct ata_port_operations isapnp_noalt_port_ops = {
30007+static const struct ata_port_operations isapnp_noalt_port_ops = {
30008 .inherits = &ata_sff_port_ops,
30009 .cable_detect = ata_cable_40wire,
30010 /* No altstatus so we don't want to use the lost interrupt poll */
30011diff --git a/drivers/ata/pata_it8213.c b/drivers/ata/pata_it8213.c
30012index f156da8..24976e2 100644
30013--- a/drivers/ata/pata_it8213.c
30014+++ b/drivers/ata/pata_it8213.c
30015@@ -234,7 +234,7 @@ static struct scsi_host_template it8213_sht = {
30016 };
30017
30018
30019-static struct ata_port_operations it8213_ops = {
30020+static const struct ata_port_operations it8213_ops = {
30021 .inherits = &ata_bmdma_port_ops,
30022 .cable_detect = it8213_cable_detect,
30023 .set_piomode = it8213_set_piomode,
30024diff --git a/drivers/ata/pata_it821x.c b/drivers/ata/pata_it821x.c
30025index 188bc2f..ca9e785 100644
30026--- a/drivers/ata/pata_it821x.c
30027+++ b/drivers/ata/pata_it821x.c
30028@@ -800,7 +800,7 @@ static struct scsi_host_template it821x_sht = {
30029 ATA_BMDMA_SHT(DRV_NAME),
30030 };
30031
30032-static struct ata_port_operations it821x_smart_port_ops = {
30033+static const struct ata_port_operations it821x_smart_port_ops = {
30034 .inherits = &ata_bmdma_port_ops,
30035
30036 .check_atapi_dma= it821x_check_atapi_dma,
30037@@ -814,7 +814,7 @@ static struct ata_port_operations it821x_smart_port_ops = {
30038 .port_start = it821x_port_start,
30039 };
30040
30041-static struct ata_port_operations it821x_passthru_port_ops = {
30042+static const struct ata_port_operations it821x_passthru_port_ops = {
30043 .inherits = &ata_bmdma_port_ops,
30044
30045 .check_atapi_dma= it821x_check_atapi_dma,
30046@@ -830,7 +830,7 @@ static struct ata_port_operations it821x_passthru_port_ops = {
30047 .port_start = it821x_port_start,
30048 };
30049
30050-static struct ata_port_operations it821x_rdc_port_ops = {
30051+static const struct ata_port_operations it821x_rdc_port_ops = {
30052 .inherits = &ata_bmdma_port_ops,
30053
30054 .check_atapi_dma= it821x_check_atapi_dma,
30055diff --git a/drivers/ata/pata_ixp4xx_cf.c b/drivers/ata/pata_ixp4xx_cf.c
30056index ba54b08..4b952b7 100644
30057--- a/drivers/ata/pata_ixp4xx_cf.c
30058+++ b/drivers/ata/pata_ixp4xx_cf.c
30059@@ -89,7 +89,7 @@ static struct scsi_host_template ixp4xx_sht = {
30060 ATA_PIO_SHT(DRV_NAME),
30061 };
30062
30063-static struct ata_port_operations ixp4xx_port_ops = {
30064+static const struct ata_port_operations ixp4xx_port_ops = {
30065 .inherits = &ata_sff_port_ops,
30066 .sff_data_xfer = ixp4xx_mmio_data_xfer,
30067 .cable_detect = ata_cable_40wire,
30068diff --git a/drivers/ata/pata_jmicron.c b/drivers/ata/pata_jmicron.c
30069index 3a1474a..434b0ff 100644
30070--- a/drivers/ata/pata_jmicron.c
30071+++ b/drivers/ata/pata_jmicron.c
30072@@ -111,7 +111,7 @@ static struct scsi_host_template jmicron_sht = {
30073 ATA_BMDMA_SHT(DRV_NAME),
30074 };
30075
30076-static struct ata_port_operations jmicron_ops = {
30077+static const struct ata_port_operations jmicron_ops = {
30078 .inherits = &ata_bmdma_port_ops,
30079 .prereset = jmicron_pre_reset,
30080 };
30081diff --git a/drivers/ata/pata_legacy.c b/drivers/ata/pata_legacy.c
30082index 6932e56..220e71d 100644
30083--- a/drivers/ata/pata_legacy.c
30084+++ b/drivers/ata/pata_legacy.c
30085@@ -106,7 +106,7 @@ struct legacy_probe {
30086
30087 struct legacy_controller {
30088 const char *name;
30089- struct ata_port_operations *ops;
30090+ const struct ata_port_operations *ops;
30091 unsigned int pio_mask;
30092 unsigned int flags;
30093 unsigned int pflags;
30094@@ -223,12 +223,12 @@ static const struct ata_port_operations legacy_base_port_ops = {
30095 * pio_mask as well.
30096 */
30097
30098-static struct ata_port_operations simple_port_ops = {
30099+static const struct ata_port_operations simple_port_ops = {
30100 .inherits = &legacy_base_port_ops,
30101 .sff_data_xfer = ata_sff_data_xfer_noirq,
30102 };
30103
30104-static struct ata_port_operations legacy_port_ops = {
30105+static const struct ata_port_operations legacy_port_ops = {
30106 .inherits = &legacy_base_port_ops,
30107 .sff_data_xfer = ata_sff_data_xfer_noirq,
30108 .set_mode = legacy_set_mode,
30109@@ -324,7 +324,7 @@ static unsigned int pdc_data_xfer_vlb(struct ata_device *dev,
30110 return buflen;
30111 }
30112
30113-static struct ata_port_operations pdc20230_port_ops = {
30114+static const struct ata_port_operations pdc20230_port_ops = {
30115 .inherits = &legacy_base_port_ops,
30116 .set_piomode = pdc20230_set_piomode,
30117 .sff_data_xfer = pdc_data_xfer_vlb,
30118@@ -357,7 +357,7 @@ static void ht6560a_set_piomode(struct ata_port *ap, struct ata_device *adev)
30119 ioread8(ap->ioaddr.status_addr);
30120 }
30121
30122-static struct ata_port_operations ht6560a_port_ops = {
30123+static const struct ata_port_operations ht6560a_port_ops = {
30124 .inherits = &legacy_base_port_ops,
30125 .set_piomode = ht6560a_set_piomode,
30126 };
30127@@ -400,7 +400,7 @@ static void ht6560b_set_piomode(struct ata_port *ap, struct ata_device *adev)
30128 ioread8(ap->ioaddr.status_addr);
30129 }
30130
30131-static struct ata_port_operations ht6560b_port_ops = {
30132+static const struct ata_port_operations ht6560b_port_ops = {
30133 .inherits = &legacy_base_port_ops,
30134 .set_piomode = ht6560b_set_piomode,
30135 };
30136@@ -499,7 +499,7 @@ static void opti82c611a_set_piomode(struct ata_port *ap,
30137 }
30138
30139
30140-static struct ata_port_operations opti82c611a_port_ops = {
30141+static const struct ata_port_operations opti82c611a_port_ops = {
30142 .inherits = &legacy_base_port_ops,
30143 .set_piomode = opti82c611a_set_piomode,
30144 };
30145@@ -609,7 +609,7 @@ static unsigned int opti82c46x_qc_issue(struct ata_queued_cmd *qc)
30146 return ata_sff_qc_issue(qc);
30147 }
30148
30149-static struct ata_port_operations opti82c46x_port_ops = {
30150+static const struct ata_port_operations opti82c46x_port_ops = {
30151 .inherits = &legacy_base_port_ops,
30152 .set_piomode = opti82c46x_set_piomode,
30153 .qc_issue = opti82c46x_qc_issue,
30154@@ -771,20 +771,20 @@ static int qdi_port(struct platform_device *dev,
30155 return 0;
30156 }
30157
30158-static struct ata_port_operations qdi6500_port_ops = {
30159+static const struct ata_port_operations qdi6500_port_ops = {
30160 .inherits = &legacy_base_port_ops,
30161 .set_piomode = qdi6500_set_piomode,
30162 .qc_issue = qdi_qc_issue,
30163 .sff_data_xfer = vlb32_data_xfer,
30164 };
30165
30166-static struct ata_port_operations qdi6580_port_ops = {
30167+static const struct ata_port_operations qdi6580_port_ops = {
30168 .inherits = &legacy_base_port_ops,
30169 .set_piomode = qdi6580_set_piomode,
30170 .sff_data_xfer = vlb32_data_xfer,
30171 };
30172
30173-static struct ata_port_operations qdi6580dp_port_ops = {
30174+static const struct ata_port_operations qdi6580dp_port_ops = {
30175 .inherits = &legacy_base_port_ops,
30176 .set_piomode = qdi6580dp_set_piomode,
30177 .sff_data_xfer = vlb32_data_xfer,
30178@@ -855,7 +855,7 @@ static int winbond_port(struct platform_device *dev,
30179 return 0;
30180 }
30181
30182-static struct ata_port_operations winbond_port_ops = {
30183+static const struct ata_port_operations winbond_port_ops = {
30184 .inherits = &legacy_base_port_ops,
30185 .set_piomode = winbond_set_piomode,
30186 .sff_data_xfer = vlb32_data_xfer,
30187@@ -978,7 +978,7 @@ static __init int legacy_init_one(struct legacy_probe *probe)
30188 int pio_modes = controller->pio_mask;
30189 unsigned long io = probe->port;
30190 u32 mask = (1 << probe->slot);
30191- struct ata_port_operations *ops = controller->ops;
30192+ const struct ata_port_operations *ops = controller->ops;
30193 struct legacy_data *ld = &legacy_data[probe->slot];
30194 struct ata_host *host = NULL;
30195 struct ata_port *ap;
30196diff --git a/drivers/ata/pata_marvell.c b/drivers/ata/pata_marvell.c
30197index 2096fb7..4d090fc 100644
30198--- a/drivers/ata/pata_marvell.c
30199+++ b/drivers/ata/pata_marvell.c
30200@@ -100,7 +100,7 @@ static struct scsi_host_template marvell_sht = {
30201 ATA_BMDMA_SHT(DRV_NAME),
30202 };
30203
30204-static struct ata_port_operations marvell_ops = {
30205+static const struct ata_port_operations marvell_ops = {
30206 .inherits = &ata_bmdma_port_ops,
30207 .cable_detect = marvell_cable_detect,
30208 .prereset = marvell_pre_reset,
30209diff --git a/drivers/ata/pata_mpc52xx.c b/drivers/ata/pata_mpc52xx.c
30210index 99d41be..7d56aa8 100644
30211--- a/drivers/ata/pata_mpc52xx.c
30212+++ b/drivers/ata/pata_mpc52xx.c
30213@@ -609,7 +609,7 @@ static struct scsi_host_template mpc52xx_ata_sht = {
30214 ATA_PIO_SHT(DRV_NAME),
30215 };
30216
30217-static struct ata_port_operations mpc52xx_ata_port_ops = {
30218+static const struct ata_port_operations mpc52xx_ata_port_ops = {
30219 .inherits = &ata_bmdma_port_ops,
30220 .sff_dev_select = mpc52xx_ata_dev_select,
30221 .set_piomode = mpc52xx_ata_set_piomode,
30222diff --git a/drivers/ata/pata_mpiix.c b/drivers/ata/pata_mpiix.c
30223index b21f002..0a27e7f 100644
30224--- a/drivers/ata/pata_mpiix.c
30225+++ b/drivers/ata/pata_mpiix.c
30226@@ -140,7 +140,7 @@ static struct scsi_host_template mpiix_sht = {
30227 ATA_PIO_SHT(DRV_NAME),
30228 };
30229
30230-static struct ata_port_operations mpiix_port_ops = {
30231+static const struct ata_port_operations mpiix_port_ops = {
30232 .inherits = &ata_sff_port_ops,
30233 .qc_issue = mpiix_qc_issue,
30234 .cable_detect = ata_cable_40wire,
30235diff --git a/drivers/ata/pata_netcell.c b/drivers/ata/pata_netcell.c
30236index f0d52f7..89c3be3 100644
30237--- a/drivers/ata/pata_netcell.c
30238+++ b/drivers/ata/pata_netcell.c
30239@@ -34,7 +34,7 @@ static struct scsi_host_template netcell_sht = {
30240 ATA_BMDMA_SHT(DRV_NAME),
30241 };
30242
30243-static struct ata_port_operations netcell_ops = {
30244+static const struct ata_port_operations netcell_ops = {
30245 .inherits = &ata_bmdma_port_ops,
30246 .cable_detect = ata_cable_80wire,
30247 .read_id = netcell_read_id,
30248diff --git a/drivers/ata/pata_ninja32.c b/drivers/ata/pata_ninja32.c
30249index dd53a66..a3f4317 100644
30250--- a/drivers/ata/pata_ninja32.c
30251+++ b/drivers/ata/pata_ninja32.c
30252@@ -81,7 +81,7 @@ static struct scsi_host_template ninja32_sht = {
30253 ATA_BMDMA_SHT(DRV_NAME),
30254 };
30255
30256-static struct ata_port_operations ninja32_port_ops = {
30257+static const struct ata_port_operations ninja32_port_ops = {
30258 .inherits = &ata_bmdma_port_ops,
30259 .sff_dev_select = ninja32_dev_select,
30260 .cable_detect = ata_cable_40wire,
30261diff --git a/drivers/ata/pata_ns87410.c b/drivers/ata/pata_ns87410.c
30262index ca53fac..9aa93ef 100644
30263--- a/drivers/ata/pata_ns87410.c
30264+++ b/drivers/ata/pata_ns87410.c
30265@@ -132,7 +132,7 @@ static struct scsi_host_template ns87410_sht = {
30266 ATA_PIO_SHT(DRV_NAME),
30267 };
30268
30269-static struct ata_port_operations ns87410_port_ops = {
30270+static const struct ata_port_operations ns87410_port_ops = {
30271 .inherits = &ata_sff_port_ops,
30272 .qc_issue = ns87410_qc_issue,
30273 .cable_detect = ata_cable_40wire,
30274diff --git a/drivers/ata/pata_ns87415.c b/drivers/ata/pata_ns87415.c
30275index 773b159..55f454e 100644
30276--- a/drivers/ata/pata_ns87415.c
30277+++ b/drivers/ata/pata_ns87415.c
30278@@ -299,7 +299,7 @@ static u8 ns87560_bmdma_status(struct ata_port *ap)
30279 }
30280 #endif /* 87560 SuperIO Support */
30281
30282-static struct ata_port_operations ns87415_pata_ops = {
30283+static const struct ata_port_operations ns87415_pata_ops = {
30284 .inherits = &ata_bmdma_port_ops,
30285
30286 .check_atapi_dma = ns87415_check_atapi_dma,
30287@@ -313,7 +313,7 @@ static struct ata_port_operations ns87415_pata_ops = {
30288 };
30289
30290 #if defined(CONFIG_SUPERIO)
30291-static struct ata_port_operations ns87560_pata_ops = {
30292+static const struct ata_port_operations ns87560_pata_ops = {
30293 .inherits = &ns87415_pata_ops,
30294 .sff_tf_read = ns87560_tf_read,
30295 .sff_check_status = ns87560_check_status,
30296diff --git a/drivers/ata/pata_octeon_cf.c b/drivers/ata/pata_octeon_cf.c
30297index d6f6956..639295b 100644
30298--- a/drivers/ata/pata_octeon_cf.c
30299+++ b/drivers/ata/pata_octeon_cf.c
30300@@ -801,6 +801,7 @@ static unsigned int octeon_cf_qc_issue(struct ata_queued_cmd *qc)
30301 return 0;
30302 }
30303
30304+/* cannot be const */
30305 static struct ata_port_operations octeon_cf_ops = {
30306 .inherits = &ata_sff_port_ops,
30307 .check_atapi_dma = octeon_cf_check_atapi_dma,
30308diff --git a/drivers/ata/pata_oldpiix.c b/drivers/ata/pata_oldpiix.c
30309index 84ac503..adee1cd 100644
30310--- a/drivers/ata/pata_oldpiix.c
30311+++ b/drivers/ata/pata_oldpiix.c
30312@@ -208,7 +208,7 @@ static struct scsi_host_template oldpiix_sht = {
30313 ATA_BMDMA_SHT(DRV_NAME),
30314 };
30315
30316-static struct ata_port_operations oldpiix_pata_ops = {
30317+static const struct ata_port_operations oldpiix_pata_ops = {
30318 .inherits = &ata_bmdma_port_ops,
30319 .qc_issue = oldpiix_qc_issue,
30320 .cable_detect = ata_cable_40wire,
30321diff --git a/drivers/ata/pata_opti.c b/drivers/ata/pata_opti.c
30322index 99eddda..3a4c0aa 100644
30323--- a/drivers/ata/pata_opti.c
30324+++ b/drivers/ata/pata_opti.c
30325@@ -152,7 +152,7 @@ static struct scsi_host_template opti_sht = {
30326 ATA_PIO_SHT(DRV_NAME),
30327 };
30328
30329-static struct ata_port_operations opti_port_ops = {
30330+static const struct ata_port_operations opti_port_ops = {
30331 .inherits = &ata_sff_port_ops,
30332 .cable_detect = ata_cable_40wire,
30333 .set_piomode = opti_set_piomode,
30334diff --git a/drivers/ata/pata_optidma.c b/drivers/ata/pata_optidma.c
30335index 86885a4..8e9968d 100644
30336--- a/drivers/ata/pata_optidma.c
30337+++ b/drivers/ata/pata_optidma.c
30338@@ -337,7 +337,7 @@ static struct scsi_host_template optidma_sht = {
30339 ATA_BMDMA_SHT(DRV_NAME),
30340 };
30341
30342-static struct ata_port_operations optidma_port_ops = {
30343+static const struct ata_port_operations optidma_port_ops = {
30344 .inherits = &ata_bmdma_port_ops,
30345 .cable_detect = ata_cable_40wire,
30346 .set_piomode = optidma_set_pio_mode,
30347@@ -346,7 +346,7 @@ static struct ata_port_operations optidma_port_ops = {
30348 .prereset = optidma_pre_reset,
30349 };
30350
30351-static struct ata_port_operations optiplus_port_ops = {
30352+static const struct ata_port_operations optiplus_port_ops = {
30353 .inherits = &optidma_port_ops,
30354 .set_piomode = optiplus_set_pio_mode,
30355 .set_dmamode = optiplus_set_dma_mode,
30356diff --git a/drivers/ata/pata_palmld.c b/drivers/ata/pata_palmld.c
30357index 11fb4cc..1a14022 100644
30358--- a/drivers/ata/pata_palmld.c
30359+++ b/drivers/ata/pata_palmld.c
30360@@ -37,7 +37,7 @@ static struct scsi_host_template palmld_sht = {
30361 ATA_PIO_SHT(DRV_NAME),
30362 };
30363
30364-static struct ata_port_operations palmld_port_ops = {
30365+static const struct ata_port_operations palmld_port_ops = {
30366 .inherits = &ata_sff_port_ops,
30367 .sff_data_xfer = ata_sff_data_xfer_noirq,
30368 .cable_detect = ata_cable_40wire,
30369diff --git a/drivers/ata/pata_pcmcia.c b/drivers/ata/pata_pcmcia.c
30370index dc99e26..7f4b1e4 100644
30371--- a/drivers/ata/pata_pcmcia.c
30372+++ b/drivers/ata/pata_pcmcia.c
30373@@ -162,14 +162,14 @@ static struct scsi_host_template pcmcia_sht = {
30374 ATA_PIO_SHT(DRV_NAME),
30375 };
30376
30377-static struct ata_port_operations pcmcia_port_ops = {
30378+static const struct ata_port_operations pcmcia_port_ops = {
30379 .inherits = &ata_sff_port_ops,
30380 .sff_data_xfer = ata_sff_data_xfer_noirq,
30381 .cable_detect = ata_cable_40wire,
30382 .set_mode = pcmcia_set_mode,
30383 };
30384
30385-static struct ata_port_operations pcmcia_8bit_port_ops = {
30386+static const struct ata_port_operations pcmcia_8bit_port_ops = {
30387 .inherits = &ata_sff_port_ops,
30388 .sff_data_xfer = ata_data_xfer_8bit,
30389 .cable_detect = ata_cable_40wire,
30390@@ -256,7 +256,7 @@ static int pcmcia_init_one(struct pcmcia_device *pdev)
30391 unsigned long io_base, ctl_base;
30392 void __iomem *io_addr, *ctl_addr;
30393 int n_ports = 1;
30394- struct ata_port_operations *ops = &pcmcia_port_ops;
30395+ const struct ata_port_operations *ops = &pcmcia_port_ops;
30396
30397 info = kzalloc(sizeof(*info), GFP_KERNEL);
30398 if (info == NULL)
30399diff --git a/drivers/ata/pata_pdc2027x.c b/drivers/ata/pata_pdc2027x.c
30400index ca5cad0..3a1f125 100644
30401--- a/drivers/ata/pata_pdc2027x.c
30402+++ b/drivers/ata/pata_pdc2027x.c
30403@@ -132,14 +132,14 @@ static struct scsi_host_template pdc2027x_sht = {
30404 ATA_BMDMA_SHT(DRV_NAME),
30405 };
30406
30407-static struct ata_port_operations pdc2027x_pata100_ops = {
30408+static const struct ata_port_operations pdc2027x_pata100_ops = {
30409 .inherits = &ata_bmdma_port_ops,
30410 .check_atapi_dma = pdc2027x_check_atapi_dma,
30411 .cable_detect = pdc2027x_cable_detect,
30412 .prereset = pdc2027x_prereset,
30413 };
30414
30415-static struct ata_port_operations pdc2027x_pata133_ops = {
30416+static const struct ata_port_operations pdc2027x_pata133_ops = {
30417 .inherits = &pdc2027x_pata100_ops,
30418 .mode_filter = pdc2027x_mode_filter,
30419 .set_piomode = pdc2027x_set_piomode,
30420diff --git a/drivers/ata/pata_pdc202xx_old.c b/drivers/ata/pata_pdc202xx_old.c
30421index 2911120..4bf62aa 100644
30422--- a/drivers/ata/pata_pdc202xx_old.c
30423+++ b/drivers/ata/pata_pdc202xx_old.c
30424@@ -274,7 +274,7 @@ static struct scsi_host_template pdc202xx_sht = {
30425 ATA_BMDMA_SHT(DRV_NAME),
30426 };
30427
30428-static struct ata_port_operations pdc2024x_port_ops = {
30429+static const struct ata_port_operations pdc2024x_port_ops = {
30430 .inherits = &ata_bmdma_port_ops,
30431
30432 .cable_detect = ata_cable_40wire,
30433@@ -284,7 +284,7 @@ static struct ata_port_operations pdc2024x_port_ops = {
30434 .sff_exec_command = pdc202xx_exec_command,
30435 };
30436
30437-static struct ata_port_operations pdc2026x_port_ops = {
30438+static const struct ata_port_operations pdc2026x_port_ops = {
30439 .inherits = &pdc2024x_port_ops,
30440
30441 .check_atapi_dma = pdc2026x_check_atapi_dma,
30442diff --git a/drivers/ata/pata_platform.c b/drivers/ata/pata_platform.c
30443index 3f6ebc6..a18c358 100644
30444--- a/drivers/ata/pata_platform.c
30445+++ b/drivers/ata/pata_platform.c
30446@@ -48,7 +48,7 @@ static struct scsi_host_template pata_platform_sht = {
30447 ATA_PIO_SHT(DRV_NAME),
30448 };
30449
30450-static struct ata_port_operations pata_platform_port_ops = {
30451+static const struct ata_port_operations pata_platform_port_ops = {
30452 .inherits = &ata_sff_port_ops,
30453 .sff_data_xfer = ata_sff_data_xfer_noirq,
30454 .cable_detect = ata_cable_unknown,
30455diff --git a/drivers/ata/pata_qdi.c b/drivers/ata/pata_qdi.c
30456index 45879dc..165a9f9 100644
30457--- a/drivers/ata/pata_qdi.c
30458+++ b/drivers/ata/pata_qdi.c
30459@@ -157,7 +157,7 @@ static struct scsi_host_template qdi_sht = {
30460 ATA_PIO_SHT(DRV_NAME),
30461 };
30462
30463-static struct ata_port_operations qdi6500_port_ops = {
30464+static const struct ata_port_operations qdi6500_port_ops = {
30465 .inherits = &ata_sff_port_ops,
30466 .qc_issue = qdi_qc_issue,
30467 .sff_data_xfer = qdi_data_xfer,
30468@@ -165,7 +165,7 @@ static struct ata_port_operations qdi6500_port_ops = {
30469 .set_piomode = qdi6500_set_piomode,
30470 };
30471
30472-static struct ata_port_operations qdi6580_port_ops = {
30473+static const struct ata_port_operations qdi6580_port_ops = {
30474 .inherits = &qdi6500_port_ops,
30475 .set_piomode = qdi6580_set_piomode,
30476 };
30477diff --git a/drivers/ata/pata_radisys.c b/drivers/ata/pata_radisys.c
30478index 4401b33..716c5cc 100644
30479--- a/drivers/ata/pata_radisys.c
30480+++ b/drivers/ata/pata_radisys.c
30481@@ -187,7 +187,7 @@ static struct scsi_host_template radisys_sht = {
30482 ATA_BMDMA_SHT(DRV_NAME),
30483 };
30484
30485-static struct ata_port_operations radisys_pata_ops = {
30486+static const struct ata_port_operations radisys_pata_ops = {
30487 .inherits = &ata_bmdma_port_ops,
30488 .qc_issue = radisys_qc_issue,
30489 .cable_detect = ata_cable_unknown,
30490diff --git a/drivers/ata/pata_rb532_cf.c b/drivers/ata/pata_rb532_cf.c
30491index 45f1e10..fab6bca 100644
30492--- a/drivers/ata/pata_rb532_cf.c
30493+++ b/drivers/ata/pata_rb532_cf.c
30494@@ -68,7 +68,7 @@ static irqreturn_t rb532_pata_irq_handler(int irq, void *dev_instance)
30495 return IRQ_HANDLED;
30496 }
30497
30498-static struct ata_port_operations rb532_pata_port_ops = {
30499+static const struct ata_port_operations rb532_pata_port_ops = {
30500 .inherits = &ata_sff_port_ops,
30501 .sff_data_xfer = ata_sff_data_xfer32,
30502 };
30503diff --git a/drivers/ata/pata_rdc.c b/drivers/ata/pata_rdc.c
30504index c843a1e..b5853c3 100644
30505--- a/drivers/ata/pata_rdc.c
30506+++ b/drivers/ata/pata_rdc.c
30507@@ -272,7 +272,7 @@ static void rdc_set_dmamode(struct ata_port *ap, struct ata_device *adev)
30508 pci_write_config_byte(dev, 0x48, udma_enable);
30509 }
30510
30511-static struct ata_port_operations rdc_pata_ops = {
30512+static const struct ata_port_operations rdc_pata_ops = {
30513 .inherits = &ata_bmdma32_port_ops,
30514 .cable_detect = rdc_pata_cable_detect,
30515 .set_piomode = rdc_set_piomode,
30516diff --git a/drivers/ata/pata_rz1000.c b/drivers/ata/pata_rz1000.c
30517index a5e4dfe..080c8c9 100644
30518--- a/drivers/ata/pata_rz1000.c
30519+++ b/drivers/ata/pata_rz1000.c
30520@@ -54,7 +54,7 @@ static struct scsi_host_template rz1000_sht = {
30521 ATA_PIO_SHT(DRV_NAME),
30522 };
30523
30524-static struct ata_port_operations rz1000_port_ops = {
30525+static const struct ata_port_operations rz1000_port_ops = {
30526 .inherits = &ata_sff_port_ops,
30527 .cable_detect = ata_cable_40wire,
30528 .set_mode = rz1000_set_mode,
30529diff --git a/drivers/ata/pata_sc1200.c b/drivers/ata/pata_sc1200.c
30530index 3bbed83..e309daf 100644
30531--- a/drivers/ata/pata_sc1200.c
30532+++ b/drivers/ata/pata_sc1200.c
30533@@ -207,7 +207,7 @@ static struct scsi_host_template sc1200_sht = {
30534 .sg_tablesize = LIBATA_DUMB_MAX_PRD,
30535 };
30536
30537-static struct ata_port_operations sc1200_port_ops = {
30538+static const struct ata_port_operations sc1200_port_ops = {
30539 .inherits = &ata_bmdma_port_ops,
30540 .qc_prep = ata_sff_dumb_qc_prep,
30541 .qc_issue = sc1200_qc_issue,
30542diff --git a/drivers/ata/pata_scc.c b/drivers/ata/pata_scc.c
30543index 4257d6b..4c1d9d5 100644
30544--- a/drivers/ata/pata_scc.c
30545+++ b/drivers/ata/pata_scc.c
30546@@ -965,7 +965,7 @@ static struct scsi_host_template scc_sht = {
30547 ATA_BMDMA_SHT(DRV_NAME),
30548 };
30549
30550-static struct ata_port_operations scc_pata_ops = {
30551+static const struct ata_port_operations scc_pata_ops = {
30552 .inherits = &ata_bmdma_port_ops,
30553
30554 .set_piomode = scc_set_piomode,
30555diff --git a/drivers/ata/pata_sch.c b/drivers/ata/pata_sch.c
30556index 99cceb45..e2e0a87 100644
30557--- a/drivers/ata/pata_sch.c
30558+++ b/drivers/ata/pata_sch.c
30559@@ -75,7 +75,7 @@ static struct scsi_host_template sch_sht = {
30560 ATA_BMDMA_SHT(DRV_NAME),
30561 };
30562
30563-static struct ata_port_operations sch_pata_ops = {
30564+static const struct ata_port_operations sch_pata_ops = {
30565 .inherits = &ata_bmdma_port_ops,
30566 .cable_detect = ata_cable_unknown,
30567 .set_piomode = sch_set_piomode,
30568diff --git a/drivers/ata/pata_serverworks.c b/drivers/ata/pata_serverworks.c
30569index beaed12..39969f1 100644
30570--- a/drivers/ata/pata_serverworks.c
30571+++ b/drivers/ata/pata_serverworks.c
30572@@ -299,7 +299,7 @@ static struct scsi_host_template serverworks_sht = {
30573 ATA_BMDMA_SHT(DRV_NAME),
30574 };
30575
30576-static struct ata_port_operations serverworks_osb4_port_ops = {
30577+static const struct ata_port_operations serverworks_osb4_port_ops = {
30578 .inherits = &ata_bmdma_port_ops,
30579 .cable_detect = serverworks_cable_detect,
30580 .mode_filter = serverworks_osb4_filter,
30581@@ -307,7 +307,7 @@ static struct ata_port_operations serverworks_osb4_port_ops = {
30582 .set_dmamode = serverworks_set_dmamode,
30583 };
30584
30585-static struct ata_port_operations serverworks_csb_port_ops = {
30586+static const struct ata_port_operations serverworks_csb_port_ops = {
30587 .inherits = &serverworks_osb4_port_ops,
30588 .mode_filter = serverworks_csb_filter,
30589 };
30590diff --git a/drivers/ata/pata_sil680.c b/drivers/ata/pata_sil680.c
30591index a2ace48..0463b44 100644
30592--- a/drivers/ata/pata_sil680.c
30593+++ b/drivers/ata/pata_sil680.c
30594@@ -194,7 +194,7 @@ static struct scsi_host_template sil680_sht = {
30595 ATA_BMDMA_SHT(DRV_NAME),
30596 };
30597
30598-static struct ata_port_operations sil680_port_ops = {
30599+static const struct ata_port_operations sil680_port_ops = {
30600 .inherits = &ata_bmdma32_port_ops,
30601 .cable_detect = sil680_cable_detect,
30602 .set_piomode = sil680_set_piomode,
30603diff --git a/drivers/ata/pata_sis.c b/drivers/ata/pata_sis.c
30604index 488e77b..b3724d5 100644
30605--- a/drivers/ata/pata_sis.c
30606+++ b/drivers/ata/pata_sis.c
30607@@ -503,47 +503,47 @@ static struct scsi_host_template sis_sht = {
30608 ATA_BMDMA_SHT(DRV_NAME),
30609 };
30610
30611-static struct ata_port_operations sis_133_for_sata_ops = {
30612+static const struct ata_port_operations sis_133_for_sata_ops = {
30613 .inherits = &ata_bmdma_port_ops,
30614 .set_piomode = sis_133_set_piomode,
30615 .set_dmamode = sis_133_set_dmamode,
30616 .cable_detect = sis_133_cable_detect,
30617 };
30618
30619-static struct ata_port_operations sis_base_ops = {
30620+static const struct ata_port_operations sis_base_ops = {
30621 .inherits = &ata_bmdma_port_ops,
30622 .prereset = sis_pre_reset,
30623 };
30624
30625-static struct ata_port_operations sis_133_ops = {
30626+static const struct ata_port_operations sis_133_ops = {
30627 .inherits = &sis_base_ops,
30628 .set_piomode = sis_133_set_piomode,
30629 .set_dmamode = sis_133_set_dmamode,
30630 .cable_detect = sis_133_cable_detect,
30631 };
30632
30633-static struct ata_port_operations sis_133_early_ops = {
30634+static const struct ata_port_operations sis_133_early_ops = {
30635 .inherits = &sis_base_ops,
30636 .set_piomode = sis_100_set_piomode,
30637 .set_dmamode = sis_133_early_set_dmamode,
30638 .cable_detect = sis_66_cable_detect,
30639 };
30640
30641-static struct ata_port_operations sis_100_ops = {
30642+static const struct ata_port_operations sis_100_ops = {
30643 .inherits = &sis_base_ops,
30644 .set_piomode = sis_100_set_piomode,
30645 .set_dmamode = sis_100_set_dmamode,
30646 .cable_detect = sis_66_cable_detect,
30647 };
30648
30649-static struct ata_port_operations sis_66_ops = {
30650+static const struct ata_port_operations sis_66_ops = {
30651 .inherits = &sis_base_ops,
30652 .set_piomode = sis_old_set_piomode,
30653 .set_dmamode = sis_66_set_dmamode,
30654 .cable_detect = sis_66_cable_detect,
30655 };
30656
30657-static struct ata_port_operations sis_old_ops = {
30658+static const struct ata_port_operations sis_old_ops = {
30659 .inherits = &sis_base_ops,
30660 .set_piomode = sis_old_set_piomode,
30661 .set_dmamode = sis_old_set_dmamode,
30662diff --git a/drivers/ata/pata_sl82c105.c b/drivers/ata/pata_sl82c105.c
30663index 29f733c..43e9ca0 100644
30664--- a/drivers/ata/pata_sl82c105.c
30665+++ b/drivers/ata/pata_sl82c105.c
30666@@ -231,7 +231,7 @@ static struct scsi_host_template sl82c105_sht = {
30667 ATA_BMDMA_SHT(DRV_NAME),
30668 };
30669
30670-static struct ata_port_operations sl82c105_port_ops = {
30671+static const struct ata_port_operations sl82c105_port_ops = {
30672 .inherits = &ata_bmdma_port_ops,
30673 .qc_defer = sl82c105_qc_defer,
30674 .bmdma_start = sl82c105_bmdma_start,
30675diff --git a/drivers/ata/pata_triflex.c b/drivers/ata/pata_triflex.c
30676index f1f13ff..df39e99 100644
30677--- a/drivers/ata/pata_triflex.c
30678+++ b/drivers/ata/pata_triflex.c
30679@@ -178,7 +178,7 @@ static struct scsi_host_template triflex_sht = {
30680 ATA_BMDMA_SHT(DRV_NAME),
30681 };
30682
30683-static struct ata_port_operations triflex_port_ops = {
30684+static const struct ata_port_operations triflex_port_ops = {
30685 .inherits = &ata_bmdma_port_ops,
30686 .bmdma_start = triflex_bmdma_start,
30687 .bmdma_stop = triflex_bmdma_stop,
30688diff --git a/drivers/ata/pata_via.c b/drivers/ata/pata_via.c
30689index 1d73b8d..98a4b29 100644
30690--- a/drivers/ata/pata_via.c
30691+++ b/drivers/ata/pata_via.c
30692@@ -419,7 +419,7 @@ static struct scsi_host_template via_sht = {
30693 ATA_BMDMA_SHT(DRV_NAME),
30694 };
30695
30696-static struct ata_port_operations via_port_ops = {
30697+static const struct ata_port_operations via_port_ops = {
30698 .inherits = &ata_bmdma_port_ops,
30699 .cable_detect = via_cable_detect,
30700 .set_piomode = via_set_piomode,
30701@@ -429,7 +429,7 @@ static struct ata_port_operations via_port_ops = {
30702 .port_start = via_port_start,
30703 };
30704
30705-static struct ata_port_operations via_port_ops_noirq = {
30706+static const struct ata_port_operations via_port_ops_noirq = {
30707 .inherits = &via_port_ops,
30708 .sff_data_xfer = ata_sff_data_xfer_noirq,
30709 };
30710diff --git a/drivers/ata/pata_winbond.c b/drivers/ata/pata_winbond.c
30711index 6d8619b..ad511c4 100644
30712--- a/drivers/ata/pata_winbond.c
30713+++ b/drivers/ata/pata_winbond.c
30714@@ -125,7 +125,7 @@ static struct scsi_host_template winbond_sht = {
30715 ATA_PIO_SHT(DRV_NAME),
30716 };
30717
30718-static struct ata_port_operations winbond_port_ops = {
30719+static const struct ata_port_operations winbond_port_ops = {
30720 .inherits = &ata_sff_port_ops,
30721 .sff_data_xfer = winbond_data_xfer,
30722 .cable_detect = ata_cable_40wire,
30723diff --git a/drivers/ata/pdc_adma.c b/drivers/ata/pdc_adma.c
30724index 6c65b07..f996ec7 100644
30725--- a/drivers/ata/pdc_adma.c
30726+++ b/drivers/ata/pdc_adma.c
30727@@ -145,7 +145,7 @@ static struct scsi_host_template adma_ata_sht = {
30728 .dma_boundary = ADMA_DMA_BOUNDARY,
30729 };
30730
30731-static struct ata_port_operations adma_ata_ops = {
30732+static const struct ata_port_operations adma_ata_ops = {
30733 .inherits = &ata_sff_port_ops,
30734
30735 .lost_interrupt = ATA_OP_NULL,
30736diff --git a/drivers/ata/sata_fsl.c b/drivers/ata/sata_fsl.c
30737index 172b57e..c49bc1e 100644
30738--- a/drivers/ata/sata_fsl.c
30739+++ b/drivers/ata/sata_fsl.c
30740@@ -1258,7 +1258,7 @@ static struct scsi_host_template sata_fsl_sht = {
30741 .dma_boundary = ATA_DMA_BOUNDARY,
30742 };
30743
30744-static struct ata_port_operations sata_fsl_ops = {
30745+static const struct ata_port_operations sata_fsl_ops = {
30746 .inherits = &sata_pmp_port_ops,
30747
30748 .qc_defer = ata_std_qc_defer,
30749diff --git a/drivers/ata/sata_inic162x.c b/drivers/ata/sata_inic162x.c
30750index 4406902..60603ef 100644
30751--- a/drivers/ata/sata_inic162x.c
30752+++ b/drivers/ata/sata_inic162x.c
30753@@ -721,7 +721,7 @@ static int inic_port_start(struct ata_port *ap)
30754 return 0;
30755 }
30756
30757-static struct ata_port_operations inic_port_ops = {
30758+static const struct ata_port_operations inic_port_ops = {
30759 .inherits = &sata_port_ops,
30760
30761 .check_atapi_dma = inic_check_atapi_dma,
30762diff --git a/drivers/ata/sata_mv.c b/drivers/ata/sata_mv.c
30763index cf41126..8107be6 100644
30764--- a/drivers/ata/sata_mv.c
30765+++ b/drivers/ata/sata_mv.c
30766@@ -656,7 +656,7 @@ static struct scsi_host_template mv6_sht = {
30767 .dma_boundary = MV_DMA_BOUNDARY,
30768 };
30769
30770-static struct ata_port_operations mv5_ops = {
30771+static const struct ata_port_operations mv5_ops = {
30772 .inherits = &ata_sff_port_ops,
30773
30774 .lost_interrupt = ATA_OP_NULL,
30775@@ -678,7 +678,7 @@ static struct ata_port_operations mv5_ops = {
30776 .port_stop = mv_port_stop,
30777 };
30778
30779-static struct ata_port_operations mv6_ops = {
30780+static const struct ata_port_operations mv6_ops = {
30781 .inherits = &mv5_ops,
30782 .dev_config = mv6_dev_config,
30783 .scr_read = mv_scr_read,
30784@@ -698,7 +698,7 @@ static struct ata_port_operations mv6_ops = {
30785 .bmdma_status = mv_bmdma_status,
30786 };
30787
30788-static struct ata_port_operations mv_iie_ops = {
30789+static const struct ata_port_operations mv_iie_ops = {
30790 .inherits = &mv6_ops,
30791 .dev_config = ATA_OP_NULL,
30792 .qc_prep = mv_qc_prep_iie,
30793diff --git a/drivers/ata/sata_nv.c b/drivers/ata/sata_nv.c
30794index ae2297c..d5c9c33 100644
30795--- a/drivers/ata/sata_nv.c
30796+++ b/drivers/ata/sata_nv.c
30797@@ -464,7 +464,7 @@ static struct scsi_host_template nv_swncq_sht = {
30798 * cases. Define nv_hardreset() which only kicks in for post-boot
30799 * probing and use it for all variants.
30800 */
30801-static struct ata_port_operations nv_generic_ops = {
30802+static const struct ata_port_operations nv_generic_ops = {
30803 .inherits = &ata_bmdma_port_ops,
30804 .lost_interrupt = ATA_OP_NULL,
30805 .scr_read = nv_scr_read,
30806@@ -472,20 +472,20 @@ static struct ata_port_operations nv_generic_ops = {
30807 .hardreset = nv_hardreset,
30808 };
30809
30810-static struct ata_port_operations nv_nf2_ops = {
30811+static const struct ata_port_operations nv_nf2_ops = {
30812 .inherits = &nv_generic_ops,
30813 .freeze = nv_nf2_freeze,
30814 .thaw = nv_nf2_thaw,
30815 };
30816
30817-static struct ata_port_operations nv_ck804_ops = {
30818+static const struct ata_port_operations nv_ck804_ops = {
30819 .inherits = &nv_generic_ops,
30820 .freeze = nv_ck804_freeze,
30821 .thaw = nv_ck804_thaw,
30822 .host_stop = nv_ck804_host_stop,
30823 };
30824
30825-static struct ata_port_operations nv_adma_ops = {
30826+static const struct ata_port_operations nv_adma_ops = {
30827 .inherits = &nv_ck804_ops,
30828
30829 .check_atapi_dma = nv_adma_check_atapi_dma,
30830@@ -509,7 +509,7 @@ static struct ata_port_operations nv_adma_ops = {
30831 .host_stop = nv_adma_host_stop,
30832 };
30833
30834-static struct ata_port_operations nv_swncq_ops = {
30835+static const struct ata_port_operations nv_swncq_ops = {
30836 .inherits = &nv_generic_ops,
30837
30838 .qc_defer = ata_std_qc_defer,
30839diff --git a/drivers/ata/sata_promise.c b/drivers/ata/sata_promise.c
30840index 07d8d00..6cc70bb 100644
30841--- a/drivers/ata/sata_promise.c
30842+++ b/drivers/ata/sata_promise.c
30843@@ -195,7 +195,7 @@ static const struct ata_port_operations pdc_common_ops = {
30844 .error_handler = pdc_error_handler,
30845 };
30846
30847-static struct ata_port_operations pdc_sata_ops = {
30848+static const struct ata_port_operations pdc_sata_ops = {
30849 .inherits = &pdc_common_ops,
30850 .cable_detect = pdc_sata_cable_detect,
30851 .freeze = pdc_sata_freeze,
30852@@ -208,14 +208,14 @@ static struct ata_port_operations pdc_sata_ops = {
30853
30854 /* First-generation chips need a more restrictive ->check_atapi_dma op,
30855 and ->freeze/thaw that ignore the hotplug controls. */
30856-static struct ata_port_operations pdc_old_sata_ops = {
30857+static const struct ata_port_operations pdc_old_sata_ops = {
30858 .inherits = &pdc_sata_ops,
30859 .freeze = pdc_freeze,
30860 .thaw = pdc_thaw,
30861 .check_atapi_dma = pdc_old_sata_check_atapi_dma,
30862 };
30863
30864-static struct ata_port_operations pdc_pata_ops = {
30865+static const struct ata_port_operations pdc_pata_ops = {
30866 .inherits = &pdc_common_ops,
30867 .cable_detect = pdc_pata_cable_detect,
30868 .freeze = pdc_freeze,
30869diff --git a/drivers/ata/sata_qstor.c b/drivers/ata/sata_qstor.c
30870index 326c0cf..36ecebe 100644
30871--- a/drivers/ata/sata_qstor.c
30872+++ b/drivers/ata/sata_qstor.c
30873@@ -132,7 +132,7 @@ static struct scsi_host_template qs_ata_sht = {
30874 .dma_boundary = QS_DMA_BOUNDARY,
30875 };
30876
30877-static struct ata_port_operations qs_ata_ops = {
30878+static const struct ata_port_operations qs_ata_ops = {
30879 .inherits = &ata_sff_port_ops,
30880
30881 .check_atapi_dma = qs_check_atapi_dma,
30882diff --git a/drivers/ata/sata_sil.c b/drivers/ata/sata_sil.c
30883index 3cb69d5..0871d3c 100644
30884--- a/drivers/ata/sata_sil.c
30885+++ b/drivers/ata/sata_sil.c
30886@@ -182,7 +182,7 @@ static struct scsi_host_template sil_sht = {
30887 .sg_tablesize = ATA_MAX_PRD
30888 };
30889
30890-static struct ata_port_operations sil_ops = {
30891+static const struct ata_port_operations sil_ops = {
30892 .inherits = &ata_bmdma32_port_ops,
30893 .dev_config = sil_dev_config,
30894 .set_mode = sil_set_mode,
30895diff --git a/drivers/ata/sata_sil24.c b/drivers/ata/sata_sil24.c
30896index e6946fc..eddb794 100644
30897--- a/drivers/ata/sata_sil24.c
30898+++ b/drivers/ata/sata_sil24.c
30899@@ -388,7 +388,7 @@ static struct scsi_host_template sil24_sht = {
30900 .dma_boundary = ATA_DMA_BOUNDARY,
30901 };
30902
30903-static struct ata_port_operations sil24_ops = {
30904+static const struct ata_port_operations sil24_ops = {
30905 .inherits = &sata_pmp_port_ops,
30906
30907 .qc_defer = sil24_qc_defer,
30908diff --git a/drivers/ata/sata_sis.c b/drivers/ata/sata_sis.c
30909index f8a91bf..9cb06b6 100644
30910--- a/drivers/ata/sata_sis.c
30911+++ b/drivers/ata/sata_sis.c
30912@@ -89,7 +89,7 @@ static struct scsi_host_template sis_sht = {
30913 ATA_BMDMA_SHT(DRV_NAME),
30914 };
30915
30916-static struct ata_port_operations sis_ops = {
30917+static const struct ata_port_operations sis_ops = {
30918 .inherits = &ata_bmdma_port_ops,
30919 .scr_read = sis_scr_read,
30920 .scr_write = sis_scr_write,
30921diff --git a/drivers/ata/sata_svw.c b/drivers/ata/sata_svw.c
30922index 7257f2d..d04c6f5 100644
30923--- a/drivers/ata/sata_svw.c
30924+++ b/drivers/ata/sata_svw.c
30925@@ -344,7 +344,7 @@ static struct scsi_host_template k2_sata_sht = {
30926 };
30927
30928
30929-static struct ata_port_operations k2_sata_ops = {
30930+static const struct ata_port_operations k2_sata_ops = {
30931 .inherits = &ata_bmdma_port_ops,
30932 .sff_tf_load = k2_sata_tf_load,
30933 .sff_tf_read = k2_sata_tf_read,
30934diff --git a/drivers/ata/sata_sx4.c b/drivers/ata/sata_sx4.c
30935index bbcf970..cd0df0d 100644
30936--- a/drivers/ata/sata_sx4.c
30937+++ b/drivers/ata/sata_sx4.c
30938@@ -248,7 +248,7 @@ static struct scsi_host_template pdc_sata_sht = {
30939 };
30940
30941 /* TODO: inherit from base port_ops after converting to new EH */
30942-static struct ata_port_operations pdc_20621_ops = {
30943+static const struct ata_port_operations pdc_20621_ops = {
30944 .inherits = &ata_sff_port_ops,
30945
30946 .check_atapi_dma = pdc_check_atapi_dma,
30947diff --git a/drivers/ata/sata_uli.c b/drivers/ata/sata_uli.c
30948index e5bff47..089d859 100644
30949--- a/drivers/ata/sata_uli.c
30950+++ b/drivers/ata/sata_uli.c
30951@@ -79,7 +79,7 @@ static struct scsi_host_template uli_sht = {
30952 ATA_BMDMA_SHT(DRV_NAME),
30953 };
30954
30955-static struct ata_port_operations uli_ops = {
30956+static const struct ata_port_operations uli_ops = {
30957 .inherits = &ata_bmdma_port_ops,
30958 .scr_read = uli_scr_read,
30959 .scr_write = uli_scr_write,
30960diff --git a/drivers/ata/sata_via.c b/drivers/ata/sata_via.c
30961index f5dcca7..77b94eb 100644
30962--- a/drivers/ata/sata_via.c
30963+++ b/drivers/ata/sata_via.c
30964@@ -115,32 +115,32 @@ static struct scsi_host_template svia_sht = {
30965 ATA_BMDMA_SHT(DRV_NAME),
30966 };
30967
30968-static struct ata_port_operations svia_base_ops = {
30969+static const struct ata_port_operations svia_base_ops = {
30970 .inherits = &ata_bmdma_port_ops,
30971 .sff_tf_load = svia_tf_load,
30972 };
30973
30974-static struct ata_port_operations vt6420_sata_ops = {
30975+static const struct ata_port_operations vt6420_sata_ops = {
30976 .inherits = &svia_base_ops,
30977 .freeze = svia_noop_freeze,
30978 .prereset = vt6420_prereset,
30979 .bmdma_start = vt6420_bmdma_start,
30980 };
30981
30982-static struct ata_port_operations vt6421_pata_ops = {
30983+static const struct ata_port_operations vt6421_pata_ops = {
30984 .inherits = &svia_base_ops,
30985 .cable_detect = vt6421_pata_cable_detect,
30986 .set_piomode = vt6421_set_pio_mode,
30987 .set_dmamode = vt6421_set_dma_mode,
30988 };
30989
30990-static struct ata_port_operations vt6421_sata_ops = {
30991+static const struct ata_port_operations vt6421_sata_ops = {
30992 .inherits = &svia_base_ops,
30993 .scr_read = svia_scr_read,
30994 .scr_write = svia_scr_write,
30995 };
30996
30997-static struct ata_port_operations vt8251_ops = {
30998+static const struct ata_port_operations vt8251_ops = {
30999 .inherits = &svia_base_ops,
31000 .hardreset = sata_std_hardreset,
31001 .scr_read = vt8251_scr_read,
31002diff --git a/drivers/ata/sata_vsc.c b/drivers/ata/sata_vsc.c
31003index 8b2a278..51e65d3 100644
31004--- a/drivers/ata/sata_vsc.c
31005+++ b/drivers/ata/sata_vsc.c
31006@@ -306,7 +306,7 @@ static struct scsi_host_template vsc_sata_sht = {
31007 };
31008
31009
31010-static struct ata_port_operations vsc_sata_ops = {
31011+static const struct ata_port_operations vsc_sata_ops = {
31012 .inherits = &ata_bmdma_port_ops,
31013 /* The IRQ handling is not quite standard SFF behaviour so we
31014 cannot use the default lost interrupt handler */
31015diff --git a/drivers/atm/adummy.c b/drivers/atm/adummy.c
31016index 5effec6..7e4019a 100644
31017--- a/drivers/atm/adummy.c
31018+++ b/drivers/atm/adummy.c
31019@@ -77,7 +77,7 @@ adummy_send(struct atm_vcc *vcc, struct sk_buff *skb)
31020 vcc->pop(vcc, skb);
31021 else
31022 dev_kfree_skb_any(skb);
31023- atomic_inc(&vcc->stats->tx);
31024+ atomic_inc_unchecked(&vcc->stats->tx);
31025
31026 return 0;
31027 }
31028diff --git a/drivers/atm/ambassador.c b/drivers/atm/ambassador.c
31029index 66e1813..26a27c6 100644
31030--- a/drivers/atm/ambassador.c
31031+++ b/drivers/atm/ambassador.c
31032@@ -453,7 +453,7 @@ static void tx_complete (amb_dev * dev, tx_out * tx) {
31033 PRINTD (DBG_FLOW|DBG_TX, "tx_complete %p %p", dev, tx);
31034
31035 // VC layer stats
31036- atomic_inc(&ATM_SKB(skb)->vcc->stats->tx);
31037+ atomic_inc_unchecked(&ATM_SKB(skb)->vcc->stats->tx);
31038
31039 // free the descriptor
31040 kfree (tx_descr);
31041@@ -494,7 +494,7 @@ static void rx_complete (amb_dev * dev, rx_out * rx) {
31042 dump_skb ("<<<", vc, skb);
31043
31044 // VC layer stats
31045- atomic_inc(&atm_vcc->stats->rx);
31046+ atomic_inc_unchecked(&atm_vcc->stats->rx);
31047 __net_timestamp(skb);
31048 // end of our responsability
31049 atm_vcc->push (atm_vcc, skb);
31050@@ -509,7 +509,7 @@ static void rx_complete (amb_dev * dev, rx_out * rx) {
31051 } else {
31052 PRINTK (KERN_INFO, "dropped over-size frame");
31053 // should we count this?
31054- atomic_inc(&atm_vcc->stats->rx_drop);
31055+ atomic_inc_unchecked(&atm_vcc->stats->rx_drop);
31056 }
31057
31058 } else {
31059@@ -1341,7 +1341,7 @@ static int amb_send (struct atm_vcc * atm_vcc, struct sk_buff * skb) {
31060 }
31061
31062 if (check_area (skb->data, skb->len)) {
31063- atomic_inc(&atm_vcc->stats->tx_err);
31064+ atomic_inc_unchecked(&atm_vcc->stats->tx_err);
31065 return -ENOMEM; // ?
31066 }
31067
31068diff --git a/drivers/atm/atmtcp.c b/drivers/atm/atmtcp.c
31069index 02ad83d..6daffeb 100644
31070--- a/drivers/atm/atmtcp.c
31071+++ b/drivers/atm/atmtcp.c
31072@@ -206,7 +206,7 @@ static int atmtcp_v_send(struct atm_vcc *vcc,struct sk_buff *skb)
31073 if (vcc->pop) vcc->pop(vcc,skb);
31074 else dev_kfree_skb(skb);
31075 if (dev_data) return 0;
31076- atomic_inc(&vcc->stats->tx_err);
31077+ atomic_inc_unchecked(&vcc->stats->tx_err);
31078 return -ENOLINK;
31079 }
31080 size = skb->len+sizeof(struct atmtcp_hdr);
31081@@ -214,7 +214,7 @@ static int atmtcp_v_send(struct atm_vcc *vcc,struct sk_buff *skb)
31082 if (!new_skb) {
31083 if (vcc->pop) vcc->pop(vcc,skb);
31084 else dev_kfree_skb(skb);
31085- atomic_inc(&vcc->stats->tx_err);
31086+ atomic_inc_unchecked(&vcc->stats->tx_err);
31087 return -ENOBUFS;
31088 }
31089 hdr = (void *) skb_put(new_skb,sizeof(struct atmtcp_hdr));
31090@@ -225,8 +225,8 @@ static int atmtcp_v_send(struct atm_vcc *vcc,struct sk_buff *skb)
31091 if (vcc->pop) vcc->pop(vcc,skb);
31092 else dev_kfree_skb(skb);
31093 out_vcc->push(out_vcc,new_skb);
31094- atomic_inc(&vcc->stats->tx);
31095- atomic_inc(&out_vcc->stats->rx);
31096+ atomic_inc_unchecked(&vcc->stats->tx);
31097+ atomic_inc_unchecked(&out_vcc->stats->rx);
31098 return 0;
31099 }
31100
31101@@ -300,7 +300,7 @@ static int atmtcp_c_send(struct atm_vcc *vcc,struct sk_buff *skb)
31102 out_vcc = find_vcc(dev, ntohs(hdr->vpi), ntohs(hdr->vci));
31103 read_unlock(&vcc_sklist_lock);
31104 if (!out_vcc) {
31105- atomic_inc(&vcc->stats->tx_err);
31106+ atomic_inc_unchecked(&vcc->stats->tx_err);
31107 goto done;
31108 }
31109 skb_pull(skb,sizeof(struct atmtcp_hdr));
31110@@ -312,8 +312,8 @@ static int atmtcp_c_send(struct atm_vcc *vcc,struct sk_buff *skb)
31111 __net_timestamp(new_skb);
31112 skb_copy_from_linear_data(skb, skb_put(new_skb, skb->len), skb->len);
31113 out_vcc->push(out_vcc,new_skb);
31114- atomic_inc(&vcc->stats->tx);
31115- atomic_inc(&out_vcc->stats->rx);
31116+ atomic_inc_unchecked(&vcc->stats->tx);
31117+ atomic_inc_unchecked(&out_vcc->stats->rx);
31118 done:
31119 if (vcc->pop) vcc->pop(vcc,skb);
31120 else dev_kfree_skb(skb);
31121diff --git a/drivers/atm/eni.c b/drivers/atm/eni.c
31122index 0c30261..3da356e 100644
31123--- a/drivers/atm/eni.c
31124+++ b/drivers/atm/eni.c
31125@@ -525,7 +525,7 @@ static int rx_aal0(struct atm_vcc *vcc)
31126 DPRINTK(DEV_LABEL "(itf %d): trashing empty cell\n",
31127 vcc->dev->number);
31128 length = 0;
31129- atomic_inc(&vcc->stats->rx_err);
31130+ atomic_inc_unchecked(&vcc->stats->rx_err);
31131 }
31132 else {
31133 length = ATM_CELL_SIZE-1; /* no HEC */
31134@@ -580,7 +580,7 @@ static int rx_aal5(struct atm_vcc *vcc)
31135 size);
31136 }
31137 eff = length = 0;
31138- atomic_inc(&vcc->stats->rx_err);
31139+ atomic_inc_unchecked(&vcc->stats->rx_err);
31140 }
31141 else {
31142 size = (descr & MID_RED_COUNT)*(ATM_CELL_PAYLOAD >> 2);
31143@@ -597,7 +597,7 @@ static int rx_aal5(struct atm_vcc *vcc)
31144 "(VCI=%d,length=%ld,size=%ld (descr 0x%lx))\n",
31145 vcc->dev->number,vcc->vci,length,size << 2,descr);
31146 length = eff = 0;
31147- atomic_inc(&vcc->stats->rx_err);
31148+ atomic_inc_unchecked(&vcc->stats->rx_err);
31149 }
31150 }
31151 skb = eff ? atm_alloc_charge(vcc,eff << 2,GFP_ATOMIC) : NULL;
31152@@ -770,7 +770,7 @@ rx_dequeued++;
31153 vcc->push(vcc,skb);
31154 pushed++;
31155 }
31156- atomic_inc(&vcc->stats->rx);
31157+ atomic_inc_unchecked(&vcc->stats->rx);
31158 }
31159 wake_up(&eni_dev->rx_wait);
31160 }
31161@@ -1227,7 +1227,7 @@ static void dequeue_tx(struct atm_dev *dev)
31162 PCI_DMA_TODEVICE);
31163 if (vcc->pop) vcc->pop(vcc,skb);
31164 else dev_kfree_skb_irq(skb);
31165- atomic_inc(&vcc->stats->tx);
31166+ atomic_inc_unchecked(&vcc->stats->tx);
31167 wake_up(&eni_dev->tx_wait);
31168 dma_complete++;
31169 }
31170@@ -1570,7 +1570,7 @@ tx_complete++;
31171 /*--------------------------------- entries ---------------------------------*/
31172
31173
31174-static const char *media_name[] __devinitdata = {
31175+static const char *media_name[] __devinitconst = {
31176 "MMF", "SMF", "MMF", "03?", /* 0- 3 */
31177 "UTP", "05?", "06?", "07?", /* 4- 7 */
31178 "TAXI","09?", "10?", "11?", /* 8-11 */
31179diff --git a/drivers/atm/firestream.c b/drivers/atm/firestream.c
31180index cd5049a..a51209f 100644
31181--- a/drivers/atm/firestream.c
31182+++ b/drivers/atm/firestream.c
31183@@ -748,7 +748,7 @@ static void process_txdone_queue (struct fs_dev *dev, struct queue *q)
31184 }
31185 }
31186
31187- atomic_inc(&ATM_SKB(skb)->vcc->stats->tx);
31188+ atomic_inc_unchecked(&ATM_SKB(skb)->vcc->stats->tx);
31189
31190 fs_dprintk (FS_DEBUG_TXMEM, "i");
31191 fs_dprintk (FS_DEBUG_ALLOC, "Free t-skb: %p\n", skb);
31192@@ -815,7 +815,7 @@ static void process_incoming (struct fs_dev *dev, struct queue *q)
31193 #endif
31194 skb_put (skb, qe->p1 & 0xffff);
31195 ATM_SKB(skb)->vcc = atm_vcc;
31196- atomic_inc(&atm_vcc->stats->rx);
31197+ atomic_inc_unchecked(&atm_vcc->stats->rx);
31198 __net_timestamp(skb);
31199 fs_dprintk (FS_DEBUG_ALLOC, "Free rec-skb: %p (pushed)\n", skb);
31200 atm_vcc->push (atm_vcc, skb);
31201@@ -836,12 +836,12 @@ static void process_incoming (struct fs_dev *dev, struct queue *q)
31202 kfree (pe);
31203 }
31204 if (atm_vcc)
31205- atomic_inc(&atm_vcc->stats->rx_drop);
31206+ atomic_inc_unchecked(&atm_vcc->stats->rx_drop);
31207 break;
31208 case 0x1f: /* Reassembly abort: no buffers. */
31209 /* Silently increment error counter. */
31210 if (atm_vcc)
31211- atomic_inc(&atm_vcc->stats->rx_drop);
31212+ atomic_inc_unchecked(&atm_vcc->stats->rx_drop);
31213 break;
31214 default: /* Hmm. Haven't written the code to handle the others yet... -- REW */
31215 printk (KERN_WARNING "Don't know what to do with RX status %x: %s.\n",
31216diff --git a/drivers/atm/fore200e.c b/drivers/atm/fore200e.c
31217index f766cc4..a34002e 100644
31218--- a/drivers/atm/fore200e.c
31219+++ b/drivers/atm/fore200e.c
31220@@ -931,9 +931,9 @@ fore200e_tx_irq(struct fore200e* fore200e)
31221 #endif
31222 /* check error condition */
31223 if (*entry->status & STATUS_ERROR)
31224- atomic_inc(&vcc->stats->tx_err);
31225+ atomic_inc_unchecked(&vcc->stats->tx_err);
31226 else
31227- atomic_inc(&vcc->stats->tx);
31228+ atomic_inc_unchecked(&vcc->stats->tx);
31229 }
31230 }
31231
31232@@ -1082,7 +1082,7 @@ fore200e_push_rpd(struct fore200e* fore200e, struct atm_vcc* vcc, struct rpd* rp
31233 if (skb == NULL) {
31234 DPRINTK(2, "unable to alloc new skb, rx PDU length = %d\n", pdu_len);
31235
31236- atomic_inc(&vcc->stats->rx_drop);
31237+ atomic_inc_unchecked(&vcc->stats->rx_drop);
31238 return -ENOMEM;
31239 }
31240
31241@@ -1125,14 +1125,14 @@ fore200e_push_rpd(struct fore200e* fore200e, struct atm_vcc* vcc, struct rpd* rp
31242
31243 dev_kfree_skb_any(skb);
31244
31245- atomic_inc(&vcc->stats->rx_drop);
31246+ atomic_inc_unchecked(&vcc->stats->rx_drop);
31247 return -ENOMEM;
31248 }
31249
31250 ASSERT(atomic_read(&sk_atm(vcc)->sk_wmem_alloc) >= 0);
31251
31252 vcc->push(vcc, skb);
31253- atomic_inc(&vcc->stats->rx);
31254+ atomic_inc_unchecked(&vcc->stats->rx);
31255
31256 ASSERT(atomic_read(&sk_atm(vcc)->sk_wmem_alloc) >= 0);
31257
31258@@ -1210,7 +1210,7 @@ fore200e_rx_irq(struct fore200e* fore200e)
31259 DPRINTK(2, "damaged PDU on %d.%d.%d\n",
31260 fore200e->atm_dev->number,
31261 entry->rpd->atm_header.vpi, entry->rpd->atm_header.vci);
31262- atomic_inc(&vcc->stats->rx_err);
31263+ atomic_inc_unchecked(&vcc->stats->rx_err);
31264 }
31265 }
31266
31267@@ -1655,7 +1655,7 @@ fore200e_send(struct atm_vcc *vcc, struct sk_buff *skb)
31268 goto retry_here;
31269 }
31270
31271- atomic_inc(&vcc->stats->tx_err);
31272+ atomic_inc_unchecked(&vcc->stats->tx_err);
31273
31274 fore200e->tx_sat++;
31275 DPRINTK(2, "tx queue of device %s is saturated, PDU dropped - heartbeat is %08x\n",
31276diff --git a/drivers/atm/he.c b/drivers/atm/he.c
31277index 7066703..2b130de 100644
31278--- a/drivers/atm/he.c
31279+++ b/drivers/atm/he.c
31280@@ -1769,7 +1769,7 @@ he_service_rbrq(struct he_dev *he_dev, int group)
31281
31282 if (RBRQ_HBUF_ERR(he_dev->rbrq_head)) {
31283 hprintk("HBUF_ERR! (cid 0x%x)\n", cid);
31284- atomic_inc(&vcc->stats->rx_drop);
31285+ atomic_inc_unchecked(&vcc->stats->rx_drop);
31286 goto return_host_buffers;
31287 }
31288
31289@@ -1802,7 +1802,7 @@ he_service_rbrq(struct he_dev *he_dev, int group)
31290 RBRQ_LEN_ERR(he_dev->rbrq_head)
31291 ? "LEN_ERR" : "",
31292 vcc->vpi, vcc->vci);
31293- atomic_inc(&vcc->stats->rx_err);
31294+ atomic_inc_unchecked(&vcc->stats->rx_err);
31295 goto return_host_buffers;
31296 }
31297
31298@@ -1861,7 +1861,7 @@ he_service_rbrq(struct he_dev *he_dev, int group)
31299 vcc->push(vcc, skb);
31300 spin_lock(&he_dev->global_lock);
31301
31302- atomic_inc(&vcc->stats->rx);
31303+ atomic_inc_unchecked(&vcc->stats->rx);
31304
31305 return_host_buffers:
31306 ++pdus_assembled;
31307@@ -2206,7 +2206,7 @@ __enqueue_tpd(struct he_dev *he_dev, struct he_tpd *tpd, unsigned cid)
31308 tpd->vcc->pop(tpd->vcc, tpd->skb);
31309 else
31310 dev_kfree_skb_any(tpd->skb);
31311- atomic_inc(&tpd->vcc->stats->tx_err);
31312+ atomic_inc_unchecked(&tpd->vcc->stats->tx_err);
31313 }
31314 pci_pool_free(he_dev->tpd_pool, tpd, TPD_ADDR(tpd->status));
31315 return;
31316@@ -2618,7 +2618,7 @@ he_send(struct atm_vcc *vcc, struct sk_buff *skb)
31317 vcc->pop(vcc, skb);
31318 else
31319 dev_kfree_skb_any(skb);
31320- atomic_inc(&vcc->stats->tx_err);
31321+ atomic_inc_unchecked(&vcc->stats->tx_err);
31322 return -EINVAL;
31323 }
31324
31325@@ -2629,7 +2629,7 @@ he_send(struct atm_vcc *vcc, struct sk_buff *skb)
31326 vcc->pop(vcc, skb);
31327 else
31328 dev_kfree_skb_any(skb);
31329- atomic_inc(&vcc->stats->tx_err);
31330+ atomic_inc_unchecked(&vcc->stats->tx_err);
31331 return -EINVAL;
31332 }
31333 #endif
31334@@ -2641,7 +2641,7 @@ he_send(struct atm_vcc *vcc, struct sk_buff *skb)
31335 vcc->pop(vcc, skb);
31336 else
31337 dev_kfree_skb_any(skb);
31338- atomic_inc(&vcc->stats->tx_err);
31339+ atomic_inc_unchecked(&vcc->stats->tx_err);
31340 spin_unlock_irqrestore(&he_dev->global_lock, flags);
31341 return -ENOMEM;
31342 }
31343@@ -2683,7 +2683,7 @@ he_send(struct atm_vcc *vcc, struct sk_buff *skb)
31344 vcc->pop(vcc, skb);
31345 else
31346 dev_kfree_skb_any(skb);
31347- atomic_inc(&vcc->stats->tx_err);
31348+ atomic_inc_unchecked(&vcc->stats->tx_err);
31349 spin_unlock_irqrestore(&he_dev->global_lock, flags);
31350 return -ENOMEM;
31351 }
31352@@ -2714,7 +2714,7 @@ he_send(struct atm_vcc *vcc, struct sk_buff *skb)
31353 __enqueue_tpd(he_dev, tpd, cid);
31354 spin_unlock_irqrestore(&he_dev->global_lock, flags);
31355
31356- atomic_inc(&vcc->stats->tx);
31357+ atomic_inc_unchecked(&vcc->stats->tx);
31358
31359 return 0;
31360 }
31361diff --git a/drivers/atm/horizon.c b/drivers/atm/horizon.c
31362index 4e49021..01b1512 100644
31363--- a/drivers/atm/horizon.c
31364+++ b/drivers/atm/horizon.c
31365@@ -1033,7 +1033,7 @@ static void rx_schedule (hrz_dev * dev, int irq) {
31366 {
31367 struct atm_vcc * vcc = ATM_SKB(skb)->vcc;
31368 // VC layer stats
31369- atomic_inc(&vcc->stats->rx);
31370+ atomic_inc_unchecked(&vcc->stats->rx);
31371 __net_timestamp(skb);
31372 // end of our responsability
31373 vcc->push (vcc, skb);
31374@@ -1185,7 +1185,7 @@ static void tx_schedule (hrz_dev * const dev, int irq) {
31375 dev->tx_iovec = NULL;
31376
31377 // VC layer stats
31378- atomic_inc(&ATM_SKB(skb)->vcc->stats->tx);
31379+ atomic_inc_unchecked(&ATM_SKB(skb)->vcc->stats->tx);
31380
31381 // free the skb
31382 hrz_kfree_skb (skb);
31383diff --git a/drivers/atm/idt77252.c b/drivers/atm/idt77252.c
31384index e33ae00..9deb4ab 100644
31385--- a/drivers/atm/idt77252.c
31386+++ b/drivers/atm/idt77252.c
31387@@ -810,7 +810,7 @@ drain_scq(struct idt77252_dev *card, struct vc_map *vc)
31388 else
31389 dev_kfree_skb(skb);
31390
31391- atomic_inc(&vcc->stats->tx);
31392+ atomic_inc_unchecked(&vcc->stats->tx);
31393 }
31394
31395 atomic_dec(&scq->used);
31396@@ -1073,13 +1073,13 @@ dequeue_rx(struct idt77252_dev *card, struct rsq_entry *rsqe)
31397 if ((sb = dev_alloc_skb(64)) == NULL) {
31398 printk("%s: Can't allocate buffers for aal0.\n",
31399 card->name);
31400- atomic_add(i, &vcc->stats->rx_drop);
31401+ atomic_add_unchecked(i, &vcc->stats->rx_drop);
31402 break;
31403 }
31404 if (!atm_charge(vcc, sb->truesize)) {
31405 RXPRINTK("%s: atm_charge() dropped aal0 packets.\n",
31406 card->name);
31407- atomic_add(i - 1, &vcc->stats->rx_drop);
31408+ atomic_add_unchecked(i - 1, &vcc->stats->rx_drop);
31409 dev_kfree_skb(sb);
31410 break;
31411 }
31412@@ -1096,7 +1096,7 @@ dequeue_rx(struct idt77252_dev *card, struct rsq_entry *rsqe)
31413 ATM_SKB(sb)->vcc = vcc;
31414 __net_timestamp(sb);
31415 vcc->push(vcc, sb);
31416- atomic_inc(&vcc->stats->rx);
31417+ atomic_inc_unchecked(&vcc->stats->rx);
31418
31419 cell += ATM_CELL_PAYLOAD;
31420 }
31421@@ -1133,13 +1133,13 @@ dequeue_rx(struct idt77252_dev *card, struct rsq_entry *rsqe)
31422 "(CDC: %08x)\n",
31423 card->name, len, rpp->len, readl(SAR_REG_CDC));
31424 recycle_rx_pool_skb(card, rpp);
31425- atomic_inc(&vcc->stats->rx_err);
31426+ atomic_inc_unchecked(&vcc->stats->rx_err);
31427 return;
31428 }
31429 if (stat & SAR_RSQE_CRC) {
31430 RXPRINTK("%s: AAL5 CRC error.\n", card->name);
31431 recycle_rx_pool_skb(card, rpp);
31432- atomic_inc(&vcc->stats->rx_err);
31433+ atomic_inc_unchecked(&vcc->stats->rx_err);
31434 return;
31435 }
31436 if (skb_queue_len(&rpp->queue) > 1) {
31437@@ -1150,7 +1150,7 @@ dequeue_rx(struct idt77252_dev *card, struct rsq_entry *rsqe)
31438 RXPRINTK("%s: Can't alloc RX skb.\n",
31439 card->name);
31440 recycle_rx_pool_skb(card, rpp);
31441- atomic_inc(&vcc->stats->rx_err);
31442+ atomic_inc_unchecked(&vcc->stats->rx_err);
31443 return;
31444 }
31445 if (!atm_charge(vcc, skb->truesize)) {
31446@@ -1169,7 +1169,7 @@ dequeue_rx(struct idt77252_dev *card, struct rsq_entry *rsqe)
31447 __net_timestamp(skb);
31448
31449 vcc->push(vcc, skb);
31450- atomic_inc(&vcc->stats->rx);
31451+ atomic_inc_unchecked(&vcc->stats->rx);
31452
31453 return;
31454 }
31455@@ -1191,7 +1191,7 @@ dequeue_rx(struct idt77252_dev *card, struct rsq_entry *rsqe)
31456 __net_timestamp(skb);
31457
31458 vcc->push(vcc, skb);
31459- atomic_inc(&vcc->stats->rx);
31460+ atomic_inc_unchecked(&vcc->stats->rx);
31461
31462 if (skb->truesize > SAR_FB_SIZE_3)
31463 add_rx_skb(card, 3, SAR_FB_SIZE_3, 1);
31464@@ -1303,14 +1303,14 @@ idt77252_rx_raw(struct idt77252_dev *card)
31465 if (vcc->qos.aal != ATM_AAL0) {
31466 RPRINTK("%s: raw cell for non AAL0 vc %u.%u\n",
31467 card->name, vpi, vci);
31468- atomic_inc(&vcc->stats->rx_drop);
31469+ atomic_inc_unchecked(&vcc->stats->rx_drop);
31470 goto drop;
31471 }
31472
31473 if ((sb = dev_alloc_skb(64)) == NULL) {
31474 printk("%s: Can't allocate buffers for AAL0.\n",
31475 card->name);
31476- atomic_inc(&vcc->stats->rx_err);
31477+ atomic_inc_unchecked(&vcc->stats->rx_err);
31478 goto drop;
31479 }
31480
31481@@ -1329,7 +1329,7 @@ idt77252_rx_raw(struct idt77252_dev *card)
31482 ATM_SKB(sb)->vcc = vcc;
31483 __net_timestamp(sb);
31484 vcc->push(vcc, sb);
31485- atomic_inc(&vcc->stats->rx);
31486+ atomic_inc_unchecked(&vcc->stats->rx);
31487
31488 drop:
31489 skb_pull(queue, 64);
31490@@ -1954,13 +1954,13 @@ idt77252_send_skb(struct atm_vcc *vcc, struct sk_buff *skb, int oam)
31491
31492 if (vc == NULL) {
31493 printk("%s: NULL connection in send().\n", card->name);
31494- atomic_inc(&vcc->stats->tx_err);
31495+ atomic_inc_unchecked(&vcc->stats->tx_err);
31496 dev_kfree_skb(skb);
31497 return -EINVAL;
31498 }
31499 if (!test_bit(VCF_TX, &vc->flags)) {
31500 printk("%s: Trying to transmit on a non-tx VC.\n", card->name);
31501- atomic_inc(&vcc->stats->tx_err);
31502+ atomic_inc_unchecked(&vcc->stats->tx_err);
31503 dev_kfree_skb(skb);
31504 return -EINVAL;
31505 }
31506@@ -1972,14 +1972,14 @@ idt77252_send_skb(struct atm_vcc *vcc, struct sk_buff *skb, int oam)
31507 break;
31508 default:
31509 printk("%s: Unsupported AAL: %d\n", card->name, vcc->qos.aal);
31510- atomic_inc(&vcc->stats->tx_err);
31511+ atomic_inc_unchecked(&vcc->stats->tx_err);
31512 dev_kfree_skb(skb);
31513 return -EINVAL;
31514 }
31515
31516 if (skb_shinfo(skb)->nr_frags != 0) {
31517 printk("%s: No scatter-gather yet.\n", card->name);
31518- atomic_inc(&vcc->stats->tx_err);
31519+ atomic_inc_unchecked(&vcc->stats->tx_err);
31520 dev_kfree_skb(skb);
31521 return -EINVAL;
31522 }
31523@@ -1987,7 +1987,7 @@ idt77252_send_skb(struct atm_vcc *vcc, struct sk_buff *skb, int oam)
31524
31525 err = queue_skb(card, vc, skb, oam);
31526 if (err) {
31527- atomic_inc(&vcc->stats->tx_err);
31528+ atomic_inc_unchecked(&vcc->stats->tx_err);
31529 dev_kfree_skb(skb);
31530 return err;
31531 }
31532@@ -2010,7 +2010,7 @@ idt77252_send_oam(struct atm_vcc *vcc, void *cell, int flags)
31533 skb = dev_alloc_skb(64);
31534 if (!skb) {
31535 printk("%s: Out of memory in send_oam().\n", card->name);
31536- atomic_inc(&vcc->stats->tx_err);
31537+ atomic_inc_unchecked(&vcc->stats->tx_err);
31538 return -ENOMEM;
31539 }
31540 atomic_add(skb->truesize, &sk_atm(vcc)->sk_wmem_alloc);
31541diff --git a/drivers/atm/iphase.c b/drivers/atm/iphase.c
31542index b2c1b37..faa672b 100644
31543--- a/drivers/atm/iphase.c
31544+++ b/drivers/atm/iphase.c
31545@@ -1123,7 +1123,7 @@ static int rx_pkt(struct atm_dev *dev)
31546 status = (u_short) (buf_desc_ptr->desc_mode);
31547 if (status & (RX_CER | RX_PTE | RX_OFL))
31548 {
31549- atomic_inc(&vcc->stats->rx_err);
31550+ atomic_inc_unchecked(&vcc->stats->rx_err);
31551 IF_ERR(printk("IA: bad packet, dropping it");)
31552 if (status & RX_CER) {
31553 IF_ERR(printk(" cause: packet CRC error\n");)
31554@@ -1146,7 +1146,7 @@ static int rx_pkt(struct atm_dev *dev)
31555 len = dma_addr - buf_addr;
31556 if (len > iadev->rx_buf_sz) {
31557 printk("Over %d bytes sdu received, dropped!!!\n", iadev->rx_buf_sz);
31558- atomic_inc(&vcc->stats->rx_err);
31559+ atomic_inc_unchecked(&vcc->stats->rx_err);
31560 goto out_free_desc;
31561 }
31562
31563@@ -1296,7 +1296,7 @@ static void rx_dle_intr(struct atm_dev *dev)
31564 ia_vcc = INPH_IA_VCC(vcc);
31565 if (ia_vcc == NULL)
31566 {
31567- atomic_inc(&vcc->stats->rx_err);
31568+ atomic_inc_unchecked(&vcc->stats->rx_err);
31569 dev_kfree_skb_any(skb);
31570 atm_return(vcc, atm_guess_pdu2truesize(len));
31571 goto INCR_DLE;
31572@@ -1308,7 +1308,7 @@ static void rx_dle_intr(struct atm_dev *dev)
31573 if ((length > iadev->rx_buf_sz) || (length >
31574 (skb->len - sizeof(struct cpcs_trailer))))
31575 {
31576- atomic_inc(&vcc->stats->rx_err);
31577+ atomic_inc_unchecked(&vcc->stats->rx_err);
31578 IF_ERR(printk("rx_dle_intr: Bad AAL5 trailer %d (skb len %d)",
31579 length, skb->len);)
31580 dev_kfree_skb_any(skb);
31581@@ -1324,7 +1324,7 @@ static void rx_dle_intr(struct atm_dev *dev)
31582
31583 IF_RX(printk("rx_dle_intr: skb push");)
31584 vcc->push(vcc,skb);
31585- atomic_inc(&vcc->stats->rx);
31586+ atomic_inc_unchecked(&vcc->stats->rx);
31587 iadev->rx_pkt_cnt++;
31588 }
31589 INCR_DLE:
31590@@ -2806,15 +2806,15 @@ static int ia_ioctl(struct atm_dev *dev, unsigned int cmd, void __user *arg)
31591 {
31592 struct k_sonet_stats *stats;
31593 stats = &PRIV(_ia_dev[board])->sonet_stats;
31594- printk("section_bip: %d\n", atomic_read(&stats->section_bip));
31595- printk("line_bip : %d\n", atomic_read(&stats->line_bip));
31596- printk("path_bip : %d\n", atomic_read(&stats->path_bip));
31597- printk("line_febe : %d\n", atomic_read(&stats->line_febe));
31598- printk("path_febe : %d\n", atomic_read(&stats->path_febe));
31599- printk("corr_hcs : %d\n", atomic_read(&stats->corr_hcs));
31600- printk("uncorr_hcs : %d\n", atomic_read(&stats->uncorr_hcs));
31601- printk("tx_cells : %d\n", atomic_read(&stats->tx_cells));
31602- printk("rx_cells : %d\n", atomic_read(&stats->rx_cells));
31603+ printk("section_bip: %d\n", atomic_read_unchecked(&stats->section_bip));
31604+ printk("line_bip : %d\n", atomic_read_unchecked(&stats->line_bip));
31605+ printk("path_bip : %d\n", atomic_read_unchecked(&stats->path_bip));
31606+ printk("line_febe : %d\n", atomic_read_unchecked(&stats->line_febe));
31607+ printk("path_febe : %d\n", atomic_read_unchecked(&stats->path_febe));
31608+ printk("corr_hcs : %d\n", atomic_read_unchecked(&stats->corr_hcs));
31609+ printk("uncorr_hcs : %d\n", atomic_read_unchecked(&stats->uncorr_hcs));
31610+ printk("tx_cells : %d\n", atomic_read_unchecked(&stats->tx_cells));
31611+ printk("rx_cells : %d\n", atomic_read_unchecked(&stats->rx_cells));
31612 }
31613 ia_cmds.status = 0;
31614 break;
31615@@ -2919,7 +2919,7 @@ static int ia_pkt_tx (struct atm_vcc *vcc, struct sk_buff *skb) {
31616 if ((desc == 0) || (desc > iadev->num_tx_desc))
31617 {
31618 IF_ERR(printk(DEV_LABEL "invalid desc for send: %d\n", desc);)
31619- atomic_inc(&vcc->stats->tx);
31620+ atomic_inc_unchecked(&vcc->stats->tx);
31621 if (vcc->pop)
31622 vcc->pop(vcc, skb);
31623 else
31624@@ -3024,14 +3024,14 @@ static int ia_pkt_tx (struct atm_vcc *vcc, struct sk_buff *skb) {
31625 ATM_DESC(skb) = vcc->vci;
31626 skb_queue_tail(&iadev->tx_dma_q, skb);
31627
31628- atomic_inc(&vcc->stats->tx);
31629+ atomic_inc_unchecked(&vcc->stats->tx);
31630 iadev->tx_pkt_cnt++;
31631 /* Increment transaction counter */
31632 writel(2, iadev->dma+IPHASE5575_TX_COUNTER);
31633
31634 #if 0
31635 /* add flow control logic */
31636- if (atomic_read(&vcc->stats->tx) % 20 == 0) {
31637+ if (atomic_read_unchecked(&vcc->stats->tx) % 20 == 0) {
31638 if (iavcc->vc_desc_cnt > 10) {
31639 vcc->tx_quota = vcc->tx_quota * 3 / 4;
31640 printk("Tx1: vcc->tx_quota = %d \n", (u32)vcc->tx_quota );
31641diff --git a/drivers/atm/lanai.c b/drivers/atm/lanai.c
31642index cf97c34..8d30655 100644
31643--- a/drivers/atm/lanai.c
31644+++ b/drivers/atm/lanai.c
31645@@ -1305,7 +1305,7 @@ static void lanai_send_one_aal5(struct lanai_dev *lanai,
31646 vcc_tx_add_aal5_trailer(lvcc, skb->len, 0, 0);
31647 lanai_endtx(lanai, lvcc);
31648 lanai_free_skb(lvcc->tx.atmvcc, skb);
31649- atomic_inc(&lvcc->tx.atmvcc->stats->tx);
31650+ atomic_inc_unchecked(&lvcc->tx.atmvcc->stats->tx);
31651 }
31652
31653 /* Try to fill the buffer - don't call unless there is backlog */
31654@@ -1428,7 +1428,7 @@ static void vcc_rx_aal5(struct lanai_vcc *lvcc, int endptr)
31655 ATM_SKB(skb)->vcc = lvcc->rx.atmvcc;
31656 __net_timestamp(skb);
31657 lvcc->rx.atmvcc->push(lvcc->rx.atmvcc, skb);
31658- atomic_inc(&lvcc->rx.atmvcc->stats->rx);
31659+ atomic_inc_unchecked(&lvcc->rx.atmvcc->stats->rx);
31660 out:
31661 lvcc->rx.buf.ptr = end;
31662 cardvcc_write(lvcc, endptr, vcc_rxreadptr);
31663@@ -1670,7 +1670,7 @@ static int handle_service(struct lanai_dev *lanai, u32 s)
31664 DPRINTK("(itf %d) got RX service entry 0x%X for non-AAL5 "
31665 "vcc %d\n", lanai->number, (unsigned int) s, vci);
31666 lanai->stats.service_rxnotaal5++;
31667- atomic_inc(&lvcc->rx.atmvcc->stats->rx_err);
31668+ atomic_inc_unchecked(&lvcc->rx.atmvcc->stats->rx_err);
31669 return 0;
31670 }
31671 if (likely(!(s & (SERVICE_TRASH | SERVICE_STREAM | SERVICE_CRCERR)))) {
31672@@ -1682,7 +1682,7 @@ static int handle_service(struct lanai_dev *lanai, u32 s)
31673 int bytes;
31674 read_unlock(&vcc_sklist_lock);
31675 DPRINTK("got trashed rx pdu on vci %d\n", vci);
31676- atomic_inc(&lvcc->rx.atmvcc->stats->rx_err);
31677+ atomic_inc_unchecked(&lvcc->rx.atmvcc->stats->rx_err);
31678 lvcc->stats.x.aal5.service_trash++;
31679 bytes = (SERVICE_GET_END(s) * 16) -
31680 (((unsigned long) lvcc->rx.buf.ptr) -
31681@@ -1694,7 +1694,7 @@ static int handle_service(struct lanai_dev *lanai, u32 s)
31682 }
31683 if (s & SERVICE_STREAM) {
31684 read_unlock(&vcc_sklist_lock);
31685- atomic_inc(&lvcc->rx.atmvcc->stats->rx_err);
31686+ atomic_inc_unchecked(&lvcc->rx.atmvcc->stats->rx_err);
31687 lvcc->stats.x.aal5.service_stream++;
31688 printk(KERN_ERR DEV_LABEL "(itf %d): Got AAL5 stream "
31689 "PDU on VCI %d!\n", lanai->number, vci);
31690@@ -1702,7 +1702,7 @@ static int handle_service(struct lanai_dev *lanai, u32 s)
31691 return 0;
31692 }
31693 DPRINTK("got rx crc error on vci %d\n", vci);
31694- atomic_inc(&lvcc->rx.atmvcc->stats->rx_err);
31695+ atomic_inc_unchecked(&lvcc->rx.atmvcc->stats->rx_err);
31696 lvcc->stats.x.aal5.service_rxcrc++;
31697 lvcc->rx.buf.ptr = &lvcc->rx.buf.start[SERVICE_GET_END(s) * 4];
31698 cardvcc_write(lvcc, SERVICE_GET_END(s), vcc_rxreadptr);
31699diff --git a/drivers/atm/nicstar.c b/drivers/atm/nicstar.c
31700index 3da804b..d3b0eed 100644
31701--- a/drivers/atm/nicstar.c
31702+++ b/drivers/atm/nicstar.c
31703@@ -1723,7 +1723,7 @@ static int ns_send(struct atm_vcc *vcc, struct sk_buff *skb)
31704 if ((vc = (vc_map *) vcc->dev_data) == NULL)
31705 {
31706 printk("nicstar%d: vcc->dev_data == NULL on ns_send().\n", card->index);
31707- atomic_inc(&vcc->stats->tx_err);
31708+ atomic_inc_unchecked(&vcc->stats->tx_err);
31709 dev_kfree_skb_any(skb);
31710 return -EINVAL;
31711 }
31712@@ -1731,7 +1731,7 @@ static int ns_send(struct atm_vcc *vcc, struct sk_buff *skb)
31713 if (!vc->tx)
31714 {
31715 printk("nicstar%d: Trying to transmit on a non-tx VC.\n", card->index);
31716- atomic_inc(&vcc->stats->tx_err);
31717+ atomic_inc_unchecked(&vcc->stats->tx_err);
31718 dev_kfree_skb_any(skb);
31719 return -EINVAL;
31720 }
31721@@ -1739,7 +1739,7 @@ static int ns_send(struct atm_vcc *vcc, struct sk_buff *skb)
31722 if (vcc->qos.aal != ATM_AAL5 && vcc->qos.aal != ATM_AAL0)
31723 {
31724 printk("nicstar%d: Only AAL0 and AAL5 are supported.\n", card->index);
31725- atomic_inc(&vcc->stats->tx_err);
31726+ atomic_inc_unchecked(&vcc->stats->tx_err);
31727 dev_kfree_skb_any(skb);
31728 return -EINVAL;
31729 }
31730@@ -1747,7 +1747,7 @@ static int ns_send(struct atm_vcc *vcc, struct sk_buff *skb)
31731 if (skb_shinfo(skb)->nr_frags != 0)
31732 {
31733 printk("nicstar%d: No scatter-gather yet.\n", card->index);
31734- atomic_inc(&vcc->stats->tx_err);
31735+ atomic_inc_unchecked(&vcc->stats->tx_err);
31736 dev_kfree_skb_any(skb);
31737 return -EINVAL;
31738 }
31739@@ -1792,11 +1792,11 @@ static int ns_send(struct atm_vcc *vcc, struct sk_buff *skb)
31740
31741 if (push_scqe(card, vc, scq, &scqe, skb) != 0)
31742 {
31743- atomic_inc(&vcc->stats->tx_err);
31744+ atomic_inc_unchecked(&vcc->stats->tx_err);
31745 dev_kfree_skb_any(skb);
31746 return -EIO;
31747 }
31748- atomic_inc(&vcc->stats->tx);
31749+ atomic_inc_unchecked(&vcc->stats->tx);
31750
31751 return 0;
31752 }
31753@@ -2111,14 +2111,14 @@ static void dequeue_rx(ns_dev *card, ns_rsqe *rsqe)
31754 {
31755 printk("nicstar%d: Can't allocate buffers for aal0.\n",
31756 card->index);
31757- atomic_add(i,&vcc->stats->rx_drop);
31758+ atomic_add_unchecked(i,&vcc->stats->rx_drop);
31759 break;
31760 }
31761 if (!atm_charge(vcc, sb->truesize))
31762 {
31763 RXPRINTK("nicstar%d: atm_charge() dropped aal0 packets.\n",
31764 card->index);
31765- atomic_add(i-1,&vcc->stats->rx_drop); /* already increased by 1 */
31766+ atomic_add_unchecked(i-1,&vcc->stats->rx_drop); /* already increased by 1 */
31767 dev_kfree_skb_any(sb);
31768 break;
31769 }
31770@@ -2133,7 +2133,7 @@ static void dequeue_rx(ns_dev *card, ns_rsqe *rsqe)
31771 ATM_SKB(sb)->vcc = vcc;
31772 __net_timestamp(sb);
31773 vcc->push(vcc, sb);
31774- atomic_inc(&vcc->stats->rx);
31775+ atomic_inc_unchecked(&vcc->stats->rx);
31776 cell += ATM_CELL_PAYLOAD;
31777 }
31778
31779@@ -2152,7 +2152,7 @@ static void dequeue_rx(ns_dev *card, ns_rsqe *rsqe)
31780 if (iovb == NULL)
31781 {
31782 printk("nicstar%d: Out of iovec buffers.\n", card->index);
31783- atomic_inc(&vcc->stats->rx_drop);
31784+ atomic_inc_unchecked(&vcc->stats->rx_drop);
31785 recycle_rx_buf(card, skb);
31786 return;
31787 }
31788@@ -2182,7 +2182,7 @@ static void dequeue_rx(ns_dev *card, ns_rsqe *rsqe)
31789 else if (NS_SKB(iovb)->iovcnt >= NS_MAX_IOVECS)
31790 {
31791 printk("nicstar%d: received too big AAL5 SDU.\n", card->index);
31792- atomic_inc(&vcc->stats->rx_err);
31793+ atomic_inc_unchecked(&vcc->stats->rx_err);
31794 recycle_iovec_rx_bufs(card, (struct iovec *) iovb->data, NS_MAX_IOVECS);
31795 NS_SKB(iovb)->iovcnt = 0;
31796 iovb->len = 0;
31797@@ -2202,7 +2202,7 @@ static void dequeue_rx(ns_dev *card, ns_rsqe *rsqe)
31798 printk("nicstar%d: Expected a small buffer, and this is not one.\n",
31799 card->index);
31800 which_list(card, skb);
31801- atomic_inc(&vcc->stats->rx_err);
31802+ atomic_inc_unchecked(&vcc->stats->rx_err);
31803 recycle_rx_buf(card, skb);
31804 vc->rx_iov = NULL;
31805 recycle_iov_buf(card, iovb);
31806@@ -2216,7 +2216,7 @@ static void dequeue_rx(ns_dev *card, ns_rsqe *rsqe)
31807 printk("nicstar%d: Expected a large buffer, and this is not one.\n",
31808 card->index);
31809 which_list(card, skb);
31810- atomic_inc(&vcc->stats->rx_err);
31811+ atomic_inc_unchecked(&vcc->stats->rx_err);
31812 recycle_iovec_rx_bufs(card, (struct iovec *) iovb->data,
31813 NS_SKB(iovb)->iovcnt);
31814 vc->rx_iov = NULL;
31815@@ -2240,7 +2240,7 @@ static void dequeue_rx(ns_dev *card, ns_rsqe *rsqe)
31816 printk(" - PDU size mismatch.\n");
31817 else
31818 printk(".\n");
31819- atomic_inc(&vcc->stats->rx_err);
31820+ atomic_inc_unchecked(&vcc->stats->rx_err);
31821 recycle_iovec_rx_bufs(card, (struct iovec *) iovb->data,
31822 NS_SKB(iovb)->iovcnt);
31823 vc->rx_iov = NULL;
31824@@ -2256,7 +2256,7 @@ static void dequeue_rx(ns_dev *card, ns_rsqe *rsqe)
31825 if (!atm_charge(vcc, skb->truesize))
31826 {
31827 push_rxbufs(card, skb);
31828- atomic_inc(&vcc->stats->rx_drop);
31829+ atomic_inc_unchecked(&vcc->stats->rx_drop);
31830 }
31831 else
31832 {
31833@@ -2268,7 +2268,7 @@ static void dequeue_rx(ns_dev *card, ns_rsqe *rsqe)
31834 ATM_SKB(skb)->vcc = vcc;
31835 __net_timestamp(skb);
31836 vcc->push(vcc, skb);
31837- atomic_inc(&vcc->stats->rx);
31838+ atomic_inc_unchecked(&vcc->stats->rx);
31839 }
31840 }
31841 else if (NS_SKB(iovb)->iovcnt == 2) /* One small plus one large buffer */
31842@@ -2283,7 +2283,7 @@ static void dequeue_rx(ns_dev *card, ns_rsqe *rsqe)
31843 if (!atm_charge(vcc, sb->truesize))
31844 {
31845 push_rxbufs(card, sb);
31846- atomic_inc(&vcc->stats->rx_drop);
31847+ atomic_inc_unchecked(&vcc->stats->rx_drop);
31848 }
31849 else
31850 {
31851@@ -2295,7 +2295,7 @@ static void dequeue_rx(ns_dev *card, ns_rsqe *rsqe)
31852 ATM_SKB(sb)->vcc = vcc;
31853 __net_timestamp(sb);
31854 vcc->push(vcc, sb);
31855- atomic_inc(&vcc->stats->rx);
31856+ atomic_inc_unchecked(&vcc->stats->rx);
31857 }
31858
31859 push_rxbufs(card, skb);
31860@@ -2306,7 +2306,7 @@ static void dequeue_rx(ns_dev *card, ns_rsqe *rsqe)
31861 if (!atm_charge(vcc, skb->truesize))
31862 {
31863 push_rxbufs(card, skb);
31864- atomic_inc(&vcc->stats->rx_drop);
31865+ atomic_inc_unchecked(&vcc->stats->rx_drop);
31866 }
31867 else
31868 {
31869@@ -2320,7 +2320,7 @@ static void dequeue_rx(ns_dev *card, ns_rsqe *rsqe)
31870 ATM_SKB(skb)->vcc = vcc;
31871 __net_timestamp(skb);
31872 vcc->push(vcc, skb);
31873- atomic_inc(&vcc->stats->rx);
31874+ atomic_inc_unchecked(&vcc->stats->rx);
31875 }
31876
31877 push_rxbufs(card, sb);
31878@@ -2342,7 +2342,7 @@ static void dequeue_rx(ns_dev *card, ns_rsqe *rsqe)
31879 if (hb == NULL)
31880 {
31881 printk("nicstar%d: Out of huge buffers.\n", card->index);
31882- atomic_inc(&vcc->stats->rx_drop);
31883+ atomic_inc_unchecked(&vcc->stats->rx_drop);
31884 recycle_iovec_rx_bufs(card, (struct iovec *) iovb->data,
31885 NS_SKB(iovb)->iovcnt);
31886 vc->rx_iov = NULL;
31887@@ -2393,7 +2393,7 @@ static void dequeue_rx(ns_dev *card, ns_rsqe *rsqe)
31888 }
31889 else
31890 dev_kfree_skb_any(hb);
31891- atomic_inc(&vcc->stats->rx_drop);
31892+ atomic_inc_unchecked(&vcc->stats->rx_drop);
31893 }
31894 else
31895 {
31896@@ -2427,7 +2427,7 @@ static void dequeue_rx(ns_dev *card, ns_rsqe *rsqe)
31897 #endif /* NS_USE_DESTRUCTORS */
31898 __net_timestamp(hb);
31899 vcc->push(vcc, hb);
31900- atomic_inc(&vcc->stats->rx);
31901+ atomic_inc_unchecked(&vcc->stats->rx);
31902 }
31903 }
31904
31905diff --git a/drivers/atm/solos-pci.c b/drivers/atm/solos-pci.c
31906index 84c93ff..e6ed269 100644
31907--- a/drivers/atm/solos-pci.c
31908+++ b/drivers/atm/solos-pci.c
31909@@ -708,7 +708,7 @@ void solos_bh(unsigned long card_arg)
31910 }
31911 atm_charge(vcc, skb->truesize);
31912 vcc->push(vcc, skb);
31913- atomic_inc(&vcc->stats->rx);
31914+ atomic_inc_unchecked(&vcc->stats->rx);
31915 break;
31916
31917 case PKT_STATUS:
31918@@ -914,6 +914,8 @@ static int print_buffer(struct sk_buff *buf)
31919 char msg[500];
31920 char item[10];
31921
31922+ pax_track_stack();
31923+
31924 len = buf->len;
31925 for (i = 0; i < len; i++){
31926 if(i % 8 == 0)
31927@@ -1023,7 +1025,7 @@ static uint32_t fpga_tx(struct solos_card *card)
31928 vcc = SKB_CB(oldskb)->vcc;
31929
31930 if (vcc) {
31931- atomic_inc(&vcc->stats->tx);
31932+ atomic_inc_unchecked(&vcc->stats->tx);
31933 solos_pop(vcc, oldskb);
31934 } else
31935 dev_kfree_skb_irq(oldskb);
31936diff --git a/drivers/atm/suni.c b/drivers/atm/suni.c
31937index 6dd3f59..ee377f3 100644
31938--- a/drivers/atm/suni.c
31939+++ b/drivers/atm/suni.c
31940@@ -49,8 +49,8 @@ static DEFINE_SPINLOCK(sunis_lock);
31941
31942
31943 #define ADD_LIMITED(s,v) \
31944- atomic_add((v),&stats->s); \
31945- if (atomic_read(&stats->s) < 0) atomic_set(&stats->s,INT_MAX);
31946+ atomic_add_unchecked((v),&stats->s); \
31947+ if (atomic_read_unchecked(&stats->s) < 0) atomic_set_unchecked(&stats->s,INT_MAX);
31948
31949
31950 static void suni_hz(unsigned long from_timer)
31951diff --git a/drivers/atm/uPD98402.c b/drivers/atm/uPD98402.c
31952index fc8cb07..4a80e53 100644
31953--- a/drivers/atm/uPD98402.c
31954+++ b/drivers/atm/uPD98402.c
31955@@ -41,7 +41,7 @@ static int fetch_stats(struct atm_dev *dev,struct sonet_stats __user *arg,int ze
31956 struct sonet_stats tmp;
31957 int error = 0;
31958
31959- atomic_add(GET(HECCT),&PRIV(dev)->sonet_stats.uncorr_hcs);
31960+ atomic_add_unchecked(GET(HECCT),&PRIV(dev)->sonet_stats.uncorr_hcs);
31961 sonet_copy_stats(&PRIV(dev)->sonet_stats,&tmp);
31962 if (arg) error = copy_to_user(arg,&tmp,sizeof(tmp));
31963 if (zero && !error) {
31964@@ -160,9 +160,9 @@ static int uPD98402_ioctl(struct atm_dev *dev,unsigned int cmd,void __user *arg)
31965
31966
31967 #define ADD_LIMITED(s,v) \
31968- { atomic_add(GET(v),&PRIV(dev)->sonet_stats.s); \
31969- if (atomic_read(&PRIV(dev)->sonet_stats.s) < 0) \
31970- atomic_set(&PRIV(dev)->sonet_stats.s,INT_MAX); }
31971+ { atomic_add_unchecked(GET(v),&PRIV(dev)->sonet_stats.s); \
31972+ if (atomic_read_unchecked(&PRIV(dev)->sonet_stats.s) < 0) \
31973+ atomic_set_unchecked(&PRIV(dev)->sonet_stats.s,INT_MAX); }
31974
31975
31976 static void stat_event(struct atm_dev *dev)
31977@@ -193,7 +193,7 @@ static void uPD98402_int(struct atm_dev *dev)
31978 if (reason & uPD98402_INT_PFM) stat_event(dev);
31979 if (reason & uPD98402_INT_PCO) {
31980 (void) GET(PCOCR); /* clear interrupt cause */
31981- atomic_add(GET(HECCT),
31982+ atomic_add_unchecked(GET(HECCT),
31983 &PRIV(dev)->sonet_stats.uncorr_hcs);
31984 }
31985 if ((reason & uPD98402_INT_RFO) &&
31986@@ -221,9 +221,9 @@ static int uPD98402_start(struct atm_dev *dev)
31987 PUT(~(uPD98402_INT_PFM | uPD98402_INT_ALM | uPD98402_INT_RFO |
31988 uPD98402_INT_LOS),PIMR); /* enable them */
31989 (void) fetch_stats(dev,NULL,1); /* clear kernel counters */
31990- atomic_set(&PRIV(dev)->sonet_stats.corr_hcs,-1);
31991- atomic_set(&PRIV(dev)->sonet_stats.tx_cells,-1);
31992- atomic_set(&PRIV(dev)->sonet_stats.rx_cells,-1);
31993+ atomic_set_unchecked(&PRIV(dev)->sonet_stats.corr_hcs,-1);
31994+ atomic_set_unchecked(&PRIV(dev)->sonet_stats.tx_cells,-1);
31995+ atomic_set_unchecked(&PRIV(dev)->sonet_stats.rx_cells,-1);
31996 return 0;
31997 }
31998
31999diff --git a/drivers/atm/zatm.c b/drivers/atm/zatm.c
32000index 2e9635b..32927b4 100644
32001--- a/drivers/atm/zatm.c
32002+++ b/drivers/atm/zatm.c
32003@@ -458,7 +458,7 @@ printk("dummy: 0x%08lx, 0x%08lx\n",dummy[0],dummy[1]);
32004 }
32005 if (!size) {
32006 dev_kfree_skb_irq(skb);
32007- if (vcc) atomic_inc(&vcc->stats->rx_err);
32008+ if (vcc) atomic_inc_unchecked(&vcc->stats->rx_err);
32009 continue;
32010 }
32011 if (!atm_charge(vcc,skb->truesize)) {
32012@@ -468,7 +468,7 @@ printk("dummy: 0x%08lx, 0x%08lx\n",dummy[0],dummy[1]);
32013 skb->len = size;
32014 ATM_SKB(skb)->vcc = vcc;
32015 vcc->push(vcc,skb);
32016- atomic_inc(&vcc->stats->rx);
32017+ atomic_inc_unchecked(&vcc->stats->rx);
32018 }
32019 zout(pos & 0xffff,MTA(mbx));
32020 #if 0 /* probably a stupid idea */
32021@@ -732,7 +732,7 @@ if (*ZATM_PRV_DSC(skb) != (uPD98401_TXPD_V | uPD98401_TXPD_DP |
32022 skb_queue_head(&zatm_vcc->backlog,skb);
32023 break;
32024 }
32025- atomic_inc(&vcc->stats->tx);
32026+ atomic_inc_unchecked(&vcc->stats->tx);
32027 wake_up(&zatm_vcc->tx_wait);
32028 }
32029
32030diff --git a/drivers/base/bus.c b/drivers/base/bus.c
32031index 63c143e..fece183 100644
32032--- a/drivers/base/bus.c
32033+++ b/drivers/base/bus.c
32034@@ -70,7 +70,7 @@ static ssize_t drv_attr_store(struct kobject *kobj, struct attribute *attr,
32035 return ret;
32036 }
32037
32038-static struct sysfs_ops driver_sysfs_ops = {
32039+static const struct sysfs_ops driver_sysfs_ops = {
32040 .show = drv_attr_show,
32041 .store = drv_attr_store,
32042 };
32043@@ -115,7 +115,7 @@ static ssize_t bus_attr_store(struct kobject *kobj, struct attribute *attr,
32044 return ret;
32045 }
32046
32047-static struct sysfs_ops bus_sysfs_ops = {
32048+static const struct sysfs_ops bus_sysfs_ops = {
32049 .show = bus_attr_show,
32050 .store = bus_attr_store,
32051 };
32052@@ -154,7 +154,7 @@ static int bus_uevent_filter(struct kset *kset, struct kobject *kobj)
32053 return 0;
32054 }
32055
32056-static struct kset_uevent_ops bus_uevent_ops = {
32057+static const struct kset_uevent_ops bus_uevent_ops = {
32058 .filter = bus_uevent_filter,
32059 };
32060
32061diff --git a/drivers/base/class.c b/drivers/base/class.c
32062index 6e2c3b0..cb61871 100644
32063--- a/drivers/base/class.c
32064+++ b/drivers/base/class.c
32065@@ -63,7 +63,7 @@ static void class_release(struct kobject *kobj)
32066 kfree(cp);
32067 }
32068
32069-static struct sysfs_ops class_sysfs_ops = {
32070+static const struct sysfs_ops class_sysfs_ops = {
32071 .show = class_attr_show,
32072 .store = class_attr_store,
32073 };
32074diff --git a/drivers/base/core.c b/drivers/base/core.c
32075index f33d768..a9358d0 100644
32076--- a/drivers/base/core.c
32077+++ b/drivers/base/core.c
32078@@ -100,7 +100,7 @@ static ssize_t dev_attr_store(struct kobject *kobj, struct attribute *attr,
32079 return ret;
32080 }
32081
32082-static struct sysfs_ops dev_sysfs_ops = {
32083+static const struct sysfs_ops dev_sysfs_ops = {
32084 .show = dev_attr_show,
32085 .store = dev_attr_store,
32086 };
32087@@ -252,7 +252,7 @@ static int dev_uevent(struct kset *kset, struct kobject *kobj,
32088 return retval;
32089 }
32090
32091-static struct kset_uevent_ops device_uevent_ops = {
32092+static const struct kset_uevent_ops device_uevent_ops = {
32093 .filter = dev_uevent_filter,
32094 .name = dev_uevent_name,
32095 .uevent = dev_uevent,
32096diff --git a/drivers/base/memory.c b/drivers/base/memory.c
32097index 989429c..2272b00 100644
32098--- a/drivers/base/memory.c
32099+++ b/drivers/base/memory.c
32100@@ -44,7 +44,7 @@ static int memory_uevent(struct kset *kset, struct kobject *obj, struct kobj_uev
32101 return retval;
32102 }
32103
32104-static struct kset_uevent_ops memory_uevent_ops = {
32105+static const struct kset_uevent_ops memory_uevent_ops = {
32106 .name = memory_uevent_name,
32107 .uevent = memory_uevent,
32108 };
32109diff --git a/drivers/base/sys.c b/drivers/base/sys.c
32110index 3f202f7..61c4a6f 100644
32111--- a/drivers/base/sys.c
32112+++ b/drivers/base/sys.c
32113@@ -54,7 +54,7 @@ sysdev_store(struct kobject *kobj, struct attribute *attr,
32114 return -EIO;
32115 }
32116
32117-static struct sysfs_ops sysfs_ops = {
32118+static const struct sysfs_ops sysfs_ops = {
32119 .show = sysdev_show,
32120 .store = sysdev_store,
32121 };
32122@@ -104,7 +104,7 @@ static ssize_t sysdev_class_store(struct kobject *kobj, struct attribute *attr,
32123 return -EIO;
32124 }
32125
32126-static struct sysfs_ops sysfs_class_ops = {
32127+static const struct sysfs_ops sysfs_class_ops = {
32128 .show = sysdev_class_show,
32129 .store = sysdev_class_store,
32130 };
32131diff --git a/drivers/block/DAC960.c b/drivers/block/DAC960.c
32132index eb4fa19..1954777 100644
32133--- a/drivers/block/DAC960.c
32134+++ b/drivers/block/DAC960.c
32135@@ -1973,6 +1973,8 @@ static bool DAC960_V1_ReadDeviceConfiguration(DAC960_Controller_T
32136 unsigned long flags;
32137 int Channel, TargetID;
32138
32139+ pax_track_stack();
32140+
32141 if (!init_dma_loaf(Controller->PCIDevice, &local_dma,
32142 DAC960_V1_MaxChannels*(sizeof(DAC960_V1_DCDB_T) +
32143 sizeof(DAC960_SCSI_Inquiry_T) +
32144diff --git a/drivers/block/cciss.c b/drivers/block/cciss.c
32145index 68b90d9..7e2e3f3 100644
32146--- a/drivers/block/cciss.c
32147+++ b/drivers/block/cciss.c
32148@@ -1011,6 +1011,8 @@ static int cciss_ioctl32_passthru(struct block_device *bdev, fmode_t mode,
32149 int err;
32150 u32 cp;
32151
32152+ memset(&arg64, 0, sizeof(arg64));
32153+
32154 err = 0;
32155 err |=
32156 copy_from_user(&arg64.LUN_info, &arg32->LUN_info,
32157@@ -2852,7 +2854,7 @@ static unsigned long pollcomplete(int ctlr)
32158 /* Wait (up to 20 seconds) for a command to complete */
32159
32160 for (i = 20 * HZ; i > 0; i--) {
32161- done = hba[ctlr]->access.command_completed(hba[ctlr]);
32162+ done = hba[ctlr]->access->command_completed(hba[ctlr]);
32163 if (done == FIFO_EMPTY)
32164 schedule_timeout_uninterruptible(1);
32165 else
32166@@ -2876,7 +2878,7 @@ static int sendcmd_core(ctlr_info_t *h, CommandList_struct *c)
32167 resend_cmd1:
32168
32169 /* Disable interrupt on the board. */
32170- h->access.set_intr_mask(h, CCISS_INTR_OFF);
32171+ h->access->set_intr_mask(h, CCISS_INTR_OFF);
32172
32173 /* Make sure there is room in the command FIFO */
32174 /* Actually it should be completely empty at this time */
32175@@ -2884,13 +2886,13 @@ resend_cmd1:
32176 /* tape side of the driver. */
32177 for (i = 200000; i > 0; i--) {
32178 /* if fifo isn't full go */
32179- if (!(h->access.fifo_full(h)))
32180+ if (!(h->access->fifo_full(h)))
32181 break;
32182 udelay(10);
32183 printk(KERN_WARNING "cciss cciss%d: SendCmd FIFO full,"
32184 " waiting!\n", h->ctlr);
32185 }
32186- h->access.submit_command(h, c); /* Send the cmd */
32187+ h->access->submit_command(h, c); /* Send the cmd */
32188 do {
32189 complete = pollcomplete(h->ctlr);
32190
32191@@ -3023,7 +3025,7 @@ static void start_io(ctlr_info_t *h)
32192 while (!hlist_empty(&h->reqQ)) {
32193 c = hlist_entry(h->reqQ.first, CommandList_struct, list);
32194 /* can't do anything if fifo is full */
32195- if ((h->access.fifo_full(h))) {
32196+ if ((h->access->fifo_full(h))) {
32197 printk(KERN_WARNING "cciss: fifo full\n");
32198 break;
32199 }
32200@@ -3033,7 +3035,7 @@ static void start_io(ctlr_info_t *h)
32201 h->Qdepth--;
32202
32203 /* Tell the controller execute command */
32204- h->access.submit_command(h, c);
32205+ h->access->submit_command(h, c);
32206
32207 /* Put job onto the completed Q */
32208 addQ(&h->cmpQ, c);
32209@@ -3393,17 +3395,17 @@ startio:
32210
32211 static inline unsigned long get_next_completion(ctlr_info_t *h)
32212 {
32213- return h->access.command_completed(h);
32214+ return h->access->command_completed(h);
32215 }
32216
32217 static inline int interrupt_pending(ctlr_info_t *h)
32218 {
32219- return h->access.intr_pending(h);
32220+ return h->access->intr_pending(h);
32221 }
32222
32223 static inline long interrupt_not_for_us(ctlr_info_t *h)
32224 {
32225- return (((h->access.intr_pending(h) == 0) ||
32226+ return (((h->access->intr_pending(h) == 0) ||
32227 (h->interrupts_enabled == 0)));
32228 }
32229
32230@@ -3892,7 +3894,7 @@ static int __devinit cciss_pci_init(ctlr_info_t *c, struct pci_dev *pdev)
32231 */
32232 c->max_commands = readl(&(c->cfgtable->CmdsOutMax));
32233 c->product_name = products[prod_index].product_name;
32234- c->access = *(products[prod_index].access);
32235+ c->access = products[prod_index].access;
32236 c->nr_cmds = c->max_commands - 4;
32237 if ((readb(&c->cfgtable->Signature[0]) != 'C') ||
32238 (readb(&c->cfgtable->Signature[1]) != 'I') ||
32239@@ -4291,7 +4293,7 @@ static int __devinit cciss_init_one(struct pci_dev *pdev,
32240 }
32241
32242 /* make sure the board interrupts are off */
32243- hba[i]->access.set_intr_mask(hba[i], CCISS_INTR_OFF);
32244+ hba[i]->access->set_intr_mask(hba[i], CCISS_INTR_OFF);
32245 if (request_irq(hba[i]->intr[SIMPLE_MODE_INT], do_cciss_intr,
32246 IRQF_DISABLED | IRQF_SHARED, hba[i]->devname, hba[i])) {
32247 printk(KERN_ERR "cciss: Unable to get irq %d for %s\n",
32248@@ -4341,7 +4343,7 @@ static int __devinit cciss_init_one(struct pci_dev *pdev,
32249 cciss_scsi_setup(i);
32250
32251 /* Turn the interrupts on so we can service requests */
32252- hba[i]->access.set_intr_mask(hba[i], CCISS_INTR_ON);
32253+ hba[i]->access->set_intr_mask(hba[i], CCISS_INTR_ON);
32254
32255 /* Get the firmware version */
32256 inq_buff = kzalloc(sizeof(InquiryData_struct), GFP_KERNEL);
32257diff --git a/drivers/block/cciss.h b/drivers/block/cciss.h
32258index 04d6bf8..36e712d 100644
32259--- a/drivers/block/cciss.h
32260+++ b/drivers/block/cciss.h
32261@@ -90,7 +90,7 @@ struct ctlr_info
32262 // information about each logical volume
32263 drive_info_struct *drv[CISS_MAX_LUN];
32264
32265- struct access_method access;
32266+ struct access_method *access;
32267
32268 /* queue and queue Info */
32269 struct hlist_head reqQ;
32270diff --git a/drivers/block/cpqarray.c b/drivers/block/cpqarray.c
32271index 6422651..bb1bdef 100644
32272--- a/drivers/block/cpqarray.c
32273+++ b/drivers/block/cpqarray.c
32274@@ -402,7 +402,7 @@ static int __init cpqarray_register_ctlr( int i, struct pci_dev *pdev)
32275 if (register_blkdev(COMPAQ_SMART2_MAJOR+i, hba[i]->devname)) {
32276 goto Enomem4;
32277 }
32278- hba[i]->access.set_intr_mask(hba[i], 0);
32279+ hba[i]->access->set_intr_mask(hba[i], 0);
32280 if (request_irq(hba[i]->intr, do_ida_intr,
32281 IRQF_DISABLED|IRQF_SHARED, hba[i]->devname, hba[i]))
32282 {
32283@@ -460,7 +460,7 @@ static int __init cpqarray_register_ctlr( int i, struct pci_dev *pdev)
32284 add_timer(&hba[i]->timer);
32285
32286 /* Enable IRQ now that spinlock and rate limit timer are set up */
32287- hba[i]->access.set_intr_mask(hba[i], FIFO_NOT_EMPTY);
32288+ hba[i]->access->set_intr_mask(hba[i], FIFO_NOT_EMPTY);
32289
32290 for(j=0; j<NWD; j++) {
32291 struct gendisk *disk = ida_gendisk[i][j];
32292@@ -695,7 +695,7 @@ DBGINFO(
32293 for(i=0; i<NR_PRODUCTS; i++) {
32294 if (board_id == products[i].board_id) {
32295 c->product_name = products[i].product_name;
32296- c->access = *(products[i].access);
32297+ c->access = products[i].access;
32298 break;
32299 }
32300 }
32301@@ -793,7 +793,7 @@ static int __init cpqarray_eisa_detect(void)
32302 hba[ctlr]->intr = intr;
32303 sprintf(hba[ctlr]->devname, "ida%d", nr_ctlr);
32304 hba[ctlr]->product_name = products[j].product_name;
32305- hba[ctlr]->access = *(products[j].access);
32306+ hba[ctlr]->access = products[j].access;
32307 hba[ctlr]->ctlr = ctlr;
32308 hba[ctlr]->board_id = board_id;
32309 hba[ctlr]->pci_dev = NULL; /* not PCI */
32310@@ -896,6 +896,8 @@ static void do_ida_request(struct request_queue *q)
32311 struct scatterlist tmp_sg[SG_MAX];
32312 int i, dir, seg;
32313
32314+ pax_track_stack();
32315+
32316 if (blk_queue_plugged(q))
32317 goto startio;
32318
32319@@ -968,7 +970,7 @@ static void start_io(ctlr_info_t *h)
32320
32321 while((c = h->reqQ) != NULL) {
32322 /* Can't do anything if we're busy */
32323- if (h->access.fifo_full(h) == 0)
32324+ if (h->access->fifo_full(h) == 0)
32325 return;
32326
32327 /* Get the first entry from the request Q */
32328@@ -976,7 +978,7 @@ static void start_io(ctlr_info_t *h)
32329 h->Qdepth--;
32330
32331 /* Tell the controller to do our bidding */
32332- h->access.submit_command(h, c);
32333+ h->access->submit_command(h, c);
32334
32335 /* Get onto the completion Q */
32336 addQ(&h->cmpQ, c);
32337@@ -1038,7 +1040,7 @@ static irqreturn_t do_ida_intr(int irq, void *dev_id)
32338 unsigned long flags;
32339 __u32 a,a1;
32340
32341- istat = h->access.intr_pending(h);
32342+ istat = h->access->intr_pending(h);
32343 /* Is this interrupt for us? */
32344 if (istat == 0)
32345 return IRQ_NONE;
32346@@ -1049,7 +1051,7 @@ static irqreturn_t do_ida_intr(int irq, void *dev_id)
32347 */
32348 spin_lock_irqsave(IDA_LOCK(h->ctlr), flags);
32349 if (istat & FIFO_NOT_EMPTY) {
32350- while((a = h->access.command_completed(h))) {
32351+ while((a = h->access->command_completed(h))) {
32352 a1 = a; a &= ~3;
32353 if ((c = h->cmpQ) == NULL)
32354 {
32355@@ -1434,11 +1436,11 @@ static int sendcmd(
32356 /*
32357 * Disable interrupt
32358 */
32359- info_p->access.set_intr_mask(info_p, 0);
32360+ info_p->access->set_intr_mask(info_p, 0);
32361 /* Make sure there is room in the command FIFO */
32362 /* Actually it should be completely empty at this time. */
32363 for (i = 200000; i > 0; i--) {
32364- temp = info_p->access.fifo_full(info_p);
32365+ temp = info_p->access->fifo_full(info_p);
32366 if (temp != 0) {
32367 break;
32368 }
32369@@ -1451,7 +1453,7 @@ DBG(
32370 /*
32371 * Send the cmd
32372 */
32373- info_p->access.submit_command(info_p, c);
32374+ info_p->access->submit_command(info_p, c);
32375 complete = pollcomplete(ctlr);
32376
32377 pci_unmap_single(info_p->pci_dev, (dma_addr_t) c->req.sg[0].addr,
32378@@ -1534,9 +1536,9 @@ static int revalidate_allvol(ctlr_info_t *host)
32379 * we check the new geometry. Then turn interrupts back on when
32380 * we're done.
32381 */
32382- host->access.set_intr_mask(host, 0);
32383+ host->access->set_intr_mask(host, 0);
32384 getgeometry(ctlr);
32385- host->access.set_intr_mask(host, FIFO_NOT_EMPTY);
32386+ host->access->set_intr_mask(host, FIFO_NOT_EMPTY);
32387
32388 for(i=0; i<NWD; i++) {
32389 struct gendisk *disk = ida_gendisk[ctlr][i];
32390@@ -1576,7 +1578,7 @@ static int pollcomplete(int ctlr)
32391 /* Wait (up to 2 seconds) for a command to complete */
32392
32393 for (i = 200000; i > 0; i--) {
32394- done = hba[ctlr]->access.command_completed(hba[ctlr]);
32395+ done = hba[ctlr]->access->command_completed(hba[ctlr]);
32396 if (done == 0) {
32397 udelay(10); /* a short fixed delay */
32398 } else
32399diff --git a/drivers/block/cpqarray.h b/drivers/block/cpqarray.h
32400index be73e9d..7fbf140 100644
32401--- a/drivers/block/cpqarray.h
32402+++ b/drivers/block/cpqarray.h
32403@@ -99,7 +99,7 @@ struct ctlr_info {
32404 drv_info_t drv[NWD];
32405 struct proc_dir_entry *proc;
32406
32407- struct access_method access;
32408+ struct access_method *access;
32409
32410 cmdlist_t *reqQ;
32411 cmdlist_t *cmpQ;
32412diff --git a/drivers/block/loop.c b/drivers/block/loop.c
32413index 8ec2d70..2804b30 100644
32414--- a/drivers/block/loop.c
32415+++ b/drivers/block/loop.c
32416@@ -282,7 +282,7 @@ static int __do_lo_send_write(struct file *file,
32417 mm_segment_t old_fs = get_fs();
32418
32419 set_fs(get_ds());
32420- bw = file->f_op->write(file, buf, len, &pos);
32421+ bw = file->f_op->write(file, (const char __force_user *)buf, len, &pos);
32422 set_fs(old_fs);
32423 if (likely(bw == len))
32424 return 0;
32425diff --git a/drivers/block/nbd.c b/drivers/block/nbd.c
32426index 26ada47..083c480 100644
32427--- a/drivers/block/nbd.c
32428+++ b/drivers/block/nbd.c
32429@@ -155,6 +155,8 @@ static int sock_xmit(struct nbd_device *lo, int send, void *buf, int size,
32430 struct kvec iov;
32431 sigset_t blocked, oldset;
32432
32433+ pax_track_stack();
32434+
32435 if (unlikely(!sock)) {
32436 printk(KERN_ERR "%s: Attempted %s on closed socket in sock_xmit\n",
32437 lo->disk->disk_name, (send ? "send" : "recv"));
32438@@ -569,6 +571,8 @@ static void do_nbd_request(struct request_queue *q)
32439 static int __nbd_ioctl(struct block_device *bdev, struct nbd_device *lo,
32440 unsigned int cmd, unsigned long arg)
32441 {
32442+ pax_track_stack();
32443+
32444 switch (cmd) {
32445 case NBD_DISCONNECT: {
32446 struct request sreq;
32447diff --git a/drivers/block/pktcdvd.c b/drivers/block/pktcdvd.c
32448index a5d585d..d087be3 100644
32449--- a/drivers/block/pktcdvd.c
32450+++ b/drivers/block/pktcdvd.c
32451@@ -284,7 +284,7 @@ static ssize_t kobj_pkt_store(struct kobject *kobj,
32452 return len;
32453 }
32454
32455-static struct sysfs_ops kobj_pkt_ops = {
32456+static const struct sysfs_ops kobj_pkt_ops = {
32457 .show = kobj_pkt_show,
32458 .store = kobj_pkt_store
32459 };
32460diff --git a/drivers/char/Kconfig b/drivers/char/Kconfig
32461index 6aad99e..89cd142 100644
32462--- a/drivers/char/Kconfig
32463+++ b/drivers/char/Kconfig
32464@@ -90,7 +90,8 @@ config VT_HW_CONSOLE_BINDING
32465
32466 config DEVKMEM
32467 bool "/dev/kmem virtual device support"
32468- default y
32469+ default n
32470+ depends on !GRKERNSEC_KMEM
32471 help
32472 Say Y here if you want to support the /dev/kmem device. The
32473 /dev/kmem device is rarely used, but can be used for certain
32474@@ -1114,6 +1115,7 @@ config DEVPORT
32475 bool
32476 depends on !M68K
32477 depends on ISA || PCI
32478+ depends on !GRKERNSEC_KMEM
32479 default y
32480
32481 source "drivers/s390/char/Kconfig"
32482diff --git a/drivers/char/agp/frontend.c b/drivers/char/agp/frontend.c
32483index a96f319..a778a5b 100644
32484--- a/drivers/char/agp/frontend.c
32485+++ b/drivers/char/agp/frontend.c
32486@@ -824,7 +824,7 @@ static int agpioc_reserve_wrap(struct agp_file_private *priv, void __user *arg)
32487 if (copy_from_user(&reserve, arg, sizeof(struct agp_region)))
32488 return -EFAULT;
32489
32490- if ((unsigned) reserve.seg_count >= ~0U/sizeof(struct agp_segment))
32491+ if ((unsigned) reserve.seg_count >= ~0U/sizeof(struct agp_segment_priv))
32492 return -EFAULT;
32493
32494 client = agp_find_client_by_pid(reserve.pid);
32495diff --git a/drivers/char/briq_panel.c b/drivers/char/briq_panel.c
32496index d8cff90..9628e70 100644
32497--- a/drivers/char/briq_panel.c
32498+++ b/drivers/char/briq_panel.c
32499@@ -10,6 +10,7 @@
32500 #include <linux/types.h>
32501 #include <linux/errno.h>
32502 #include <linux/tty.h>
32503+#include <linux/mutex.h>
32504 #include <linux/timer.h>
32505 #include <linux/kernel.h>
32506 #include <linux/wait.h>
32507@@ -36,6 +37,7 @@ static int vfd_is_open;
32508 static unsigned char vfd[40];
32509 static int vfd_cursor;
32510 static unsigned char ledpb, led;
32511+static DEFINE_MUTEX(vfd_mutex);
32512
32513 static void update_vfd(void)
32514 {
32515@@ -142,12 +144,15 @@ static ssize_t briq_panel_write(struct file *file, const char __user *buf, size_
32516 if (!vfd_is_open)
32517 return -EBUSY;
32518
32519+ mutex_lock(&vfd_mutex);
32520 for (;;) {
32521 char c;
32522 if (!indx)
32523 break;
32524- if (get_user(c, buf))
32525+ if (get_user(c, buf)) {
32526+ mutex_unlock(&vfd_mutex);
32527 return -EFAULT;
32528+ }
32529 if (esc) {
32530 set_led(c);
32531 esc = 0;
32532@@ -177,6 +182,7 @@ static ssize_t briq_panel_write(struct file *file, const char __user *buf, size_
32533 buf++;
32534 }
32535 update_vfd();
32536+ mutex_unlock(&vfd_mutex);
32537
32538 return len;
32539 }
32540diff --git a/drivers/char/genrtc.c b/drivers/char/genrtc.c
32541index 31e7c91..161afc0 100644
32542--- a/drivers/char/genrtc.c
32543+++ b/drivers/char/genrtc.c
32544@@ -272,6 +272,7 @@ static int gen_rtc_ioctl(struct inode *inode, struct file *file,
32545 switch (cmd) {
32546
32547 case RTC_PLL_GET:
32548+ memset(&pll, 0, sizeof(pll));
32549 if (get_rtc_pll(&pll))
32550 return -EINVAL;
32551 else
32552diff --git a/drivers/char/hpet.c b/drivers/char/hpet.c
32553index 006466d..a2bb21c 100644
32554--- a/drivers/char/hpet.c
32555+++ b/drivers/char/hpet.c
32556@@ -430,7 +430,7 @@ static int hpet_release(struct inode *inode, struct file *file)
32557 return 0;
32558 }
32559
32560-static int hpet_ioctl_common(struct hpet_dev *, int, unsigned long, int);
32561+static int hpet_ioctl_common(struct hpet_dev *, unsigned int, unsigned long, int);
32562
32563 static int
32564 hpet_ioctl(struct inode *inode, struct file *file, unsigned int cmd,
32565@@ -565,7 +565,7 @@ static inline unsigned long hpet_time_div(struct hpets *hpets,
32566 }
32567
32568 static int
32569-hpet_ioctl_common(struct hpet_dev *devp, int cmd, unsigned long arg, int kernel)
32570+hpet_ioctl_common(struct hpet_dev *devp, unsigned int cmd, unsigned long arg, int kernel)
32571 {
32572 struct hpet_timer __iomem *timer;
32573 struct hpet __iomem *hpet;
32574@@ -608,11 +608,11 @@ hpet_ioctl_common(struct hpet_dev *devp, int cmd, unsigned long arg, int kernel)
32575 {
32576 struct hpet_info info;
32577
32578+ memset(&info, 0, sizeof(info));
32579+
32580 if (devp->hd_ireqfreq)
32581 info.hi_ireqfreq =
32582 hpet_time_div(hpetp, devp->hd_ireqfreq);
32583- else
32584- info.hi_ireqfreq = 0;
32585 info.hi_flags =
32586 readq(&timer->hpet_config) & Tn_PER_INT_CAP_MASK;
32587 info.hi_hpet = hpetp->hp_which;
32588diff --git a/drivers/char/hvc_beat.c b/drivers/char/hvc_beat.c
32589index 0afc8b8..6913fc3 100644
32590--- a/drivers/char/hvc_beat.c
32591+++ b/drivers/char/hvc_beat.c
32592@@ -84,7 +84,7 @@ static int hvc_beat_put_chars(uint32_t vtermno, const char *buf, int cnt)
32593 return cnt;
32594 }
32595
32596-static struct hv_ops hvc_beat_get_put_ops = {
32597+static const struct hv_ops hvc_beat_get_put_ops = {
32598 .get_chars = hvc_beat_get_chars,
32599 .put_chars = hvc_beat_put_chars,
32600 };
32601diff --git a/drivers/char/hvc_console.c b/drivers/char/hvc_console.c
32602index 98097f2..407dddc 100644
32603--- a/drivers/char/hvc_console.c
32604+++ b/drivers/char/hvc_console.c
32605@@ -125,7 +125,7 @@ static struct hvc_struct *hvc_get_by_index(int index)
32606 * console interfaces but can still be used as a tty device. This has to be
32607 * static because kmalloc will not work during early console init.
32608 */
32609-static struct hv_ops *cons_ops[MAX_NR_HVC_CONSOLES];
32610+static const struct hv_ops *cons_ops[MAX_NR_HVC_CONSOLES];
32611 static uint32_t vtermnos[MAX_NR_HVC_CONSOLES] =
32612 {[0 ... MAX_NR_HVC_CONSOLES - 1] = -1};
32613
32614@@ -249,7 +249,7 @@ static void destroy_hvc_struct(struct kref *kref)
32615 * vty adapters do NOT get an hvc_instantiate() callback since they
32616 * appear after early console init.
32617 */
32618-int hvc_instantiate(uint32_t vtermno, int index, struct hv_ops *ops)
32619+int hvc_instantiate(uint32_t vtermno, int index, const struct hv_ops *ops)
32620 {
32621 struct hvc_struct *hp;
32622
32623@@ -758,7 +758,7 @@ static const struct tty_operations hvc_ops = {
32624 };
32625
32626 struct hvc_struct __devinit *hvc_alloc(uint32_t vtermno, int data,
32627- struct hv_ops *ops, int outbuf_size)
32628+ const struct hv_ops *ops, int outbuf_size)
32629 {
32630 struct hvc_struct *hp;
32631 int i;
32632diff --git a/drivers/char/hvc_console.h b/drivers/char/hvc_console.h
32633index 10950ca..ed176c3 100644
32634--- a/drivers/char/hvc_console.h
32635+++ b/drivers/char/hvc_console.h
32636@@ -55,7 +55,7 @@ struct hvc_struct {
32637 int outbuf_size;
32638 int n_outbuf;
32639 uint32_t vtermno;
32640- struct hv_ops *ops;
32641+ const struct hv_ops *ops;
32642 int irq_requested;
32643 int data;
32644 struct winsize ws;
32645@@ -76,11 +76,11 @@ struct hv_ops {
32646 };
32647
32648 /* Register a vterm and a slot index for use as a console (console_init) */
32649-extern int hvc_instantiate(uint32_t vtermno, int index, struct hv_ops *ops);
32650+extern int hvc_instantiate(uint32_t vtermno, int index, const struct hv_ops *ops);
32651
32652 /* register a vterm for hvc tty operation (module_init or hotplug add) */
32653 extern struct hvc_struct * __devinit hvc_alloc(uint32_t vtermno, int data,
32654- struct hv_ops *ops, int outbuf_size);
32655+ const struct hv_ops *ops, int outbuf_size);
32656 /* remove a vterm from hvc tty operation (module_exit or hotplug remove) */
32657 extern int hvc_remove(struct hvc_struct *hp);
32658
32659diff --git a/drivers/char/hvc_iseries.c b/drivers/char/hvc_iseries.c
32660index 936d05b..fd02426 100644
32661--- a/drivers/char/hvc_iseries.c
32662+++ b/drivers/char/hvc_iseries.c
32663@@ -197,7 +197,7 @@ done:
32664 return sent;
32665 }
32666
32667-static struct hv_ops hvc_get_put_ops = {
32668+static const struct hv_ops hvc_get_put_ops = {
32669 .get_chars = get_chars,
32670 .put_chars = put_chars,
32671 .notifier_add = notifier_add_irq,
32672diff --git a/drivers/char/hvc_iucv.c b/drivers/char/hvc_iucv.c
32673index b0e168f..69cda2a 100644
32674--- a/drivers/char/hvc_iucv.c
32675+++ b/drivers/char/hvc_iucv.c
32676@@ -924,7 +924,7 @@ static int hvc_iucv_pm_restore_thaw(struct device *dev)
32677
32678
32679 /* HVC operations */
32680-static struct hv_ops hvc_iucv_ops = {
32681+static const struct hv_ops hvc_iucv_ops = {
32682 .get_chars = hvc_iucv_get_chars,
32683 .put_chars = hvc_iucv_put_chars,
32684 .notifier_add = hvc_iucv_notifier_add,
32685diff --git a/drivers/char/hvc_rtas.c b/drivers/char/hvc_rtas.c
32686index 88590d0..61c4a61 100644
32687--- a/drivers/char/hvc_rtas.c
32688+++ b/drivers/char/hvc_rtas.c
32689@@ -71,7 +71,7 @@ static int hvc_rtas_read_console(uint32_t vtermno, char *buf, int count)
32690 return i;
32691 }
32692
32693-static struct hv_ops hvc_rtas_get_put_ops = {
32694+static const struct hv_ops hvc_rtas_get_put_ops = {
32695 .get_chars = hvc_rtas_read_console,
32696 .put_chars = hvc_rtas_write_console,
32697 };
32698diff --git a/drivers/char/hvc_udbg.c b/drivers/char/hvc_udbg.c
32699index bd63ba8..b0957e6 100644
32700--- a/drivers/char/hvc_udbg.c
32701+++ b/drivers/char/hvc_udbg.c
32702@@ -58,7 +58,7 @@ static int hvc_udbg_get(uint32_t vtermno, char *buf, int count)
32703 return i;
32704 }
32705
32706-static struct hv_ops hvc_udbg_ops = {
32707+static const struct hv_ops hvc_udbg_ops = {
32708 .get_chars = hvc_udbg_get,
32709 .put_chars = hvc_udbg_put,
32710 };
32711diff --git a/drivers/char/hvc_vio.c b/drivers/char/hvc_vio.c
32712index 10be343..27370e9 100644
32713--- a/drivers/char/hvc_vio.c
32714+++ b/drivers/char/hvc_vio.c
32715@@ -77,7 +77,7 @@ static int filtered_get_chars(uint32_t vtermno, char *buf, int count)
32716 return got;
32717 }
32718
32719-static struct hv_ops hvc_get_put_ops = {
32720+static const struct hv_ops hvc_get_put_ops = {
32721 .get_chars = filtered_get_chars,
32722 .put_chars = hvc_put_chars,
32723 .notifier_add = notifier_add_irq,
32724diff --git a/drivers/char/hvc_xen.c b/drivers/char/hvc_xen.c
32725index a6ee32b..94f8c26 100644
32726--- a/drivers/char/hvc_xen.c
32727+++ b/drivers/char/hvc_xen.c
32728@@ -120,7 +120,7 @@ static int read_console(uint32_t vtermno, char *buf, int len)
32729 return recv;
32730 }
32731
32732-static struct hv_ops hvc_ops = {
32733+static const struct hv_ops hvc_ops = {
32734 .get_chars = read_console,
32735 .put_chars = write_console,
32736 .notifier_add = notifier_add_irq,
32737diff --git a/drivers/char/hvcs.c b/drivers/char/hvcs.c
32738index 266b858..f3ee0bb 100644
32739--- a/drivers/char/hvcs.c
32740+++ b/drivers/char/hvcs.c
32741@@ -82,6 +82,7 @@
32742 #include <asm/hvcserver.h>
32743 #include <asm/uaccess.h>
32744 #include <asm/vio.h>
32745+#include <asm/local.h>
32746
32747 /*
32748 * 1.3.0 -> 1.3.1 In hvcs_open memset(..,0x00,..) instead of memset(..,0x3F,00).
32749@@ -269,7 +270,7 @@ struct hvcs_struct {
32750 unsigned int index;
32751
32752 struct tty_struct *tty;
32753- int open_count;
32754+ local_t open_count;
32755
32756 /*
32757 * Used to tell the driver kernel_thread what operations need to take
32758@@ -419,7 +420,7 @@ static ssize_t hvcs_vterm_state_store(struct device *dev, struct device_attribut
32759
32760 spin_lock_irqsave(&hvcsd->lock, flags);
32761
32762- if (hvcsd->open_count > 0) {
32763+ if (local_read(&hvcsd->open_count) > 0) {
32764 spin_unlock_irqrestore(&hvcsd->lock, flags);
32765 printk(KERN_INFO "HVCS: vterm state unchanged. "
32766 "The hvcs device node is still in use.\n");
32767@@ -1135,7 +1136,7 @@ static int hvcs_open(struct tty_struct *tty, struct file *filp)
32768 if ((retval = hvcs_partner_connect(hvcsd)))
32769 goto error_release;
32770
32771- hvcsd->open_count = 1;
32772+ local_set(&hvcsd->open_count, 1);
32773 hvcsd->tty = tty;
32774 tty->driver_data = hvcsd;
32775
32776@@ -1169,7 +1170,7 @@ fast_open:
32777
32778 spin_lock_irqsave(&hvcsd->lock, flags);
32779 kref_get(&hvcsd->kref);
32780- hvcsd->open_count++;
32781+ local_inc(&hvcsd->open_count);
32782 hvcsd->todo_mask |= HVCS_SCHED_READ;
32783 spin_unlock_irqrestore(&hvcsd->lock, flags);
32784
32785@@ -1213,7 +1214,7 @@ static void hvcs_close(struct tty_struct *tty, struct file *filp)
32786 hvcsd = tty->driver_data;
32787
32788 spin_lock_irqsave(&hvcsd->lock, flags);
32789- if (--hvcsd->open_count == 0) {
32790+ if (local_dec_and_test(&hvcsd->open_count)) {
32791
32792 vio_disable_interrupts(hvcsd->vdev);
32793
32794@@ -1239,10 +1240,10 @@ static void hvcs_close(struct tty_struct *tty, struct file *filp)
32795 free_irq(irq, hvcsd);
32796 kref_put(&hvcsd->kref, destroy_hvcs_struct);
32797 return;
32798- } else if (hvcsd->open_count < 0) {
32799+ } else if (local_read(&hvcsd->open_count) < 0) {
32800 printk(KERN_ERR "HVCS: vty-server@%X open_count: %d"
32801 " is missmanaged.\n",
32802- hvcsd->vdev->unit_address, hvcsd->open_count);
32803+ hvcsd->vdev->unit_address, local_read(&hvcsd->open_count));
32804 }
32805
32806 spin_unlock_irqrestore(&hvcsd->lock, flags);
32807@@ -1258,7 +1259,7 @@ static void hvcs_hangup(struct tty_struct * tty)
32808
32809 spin_lock_irqsave(&hvcsd->lock, flags);
32810 /* Preserve this so that we know how many kref refs to put */
32811- temp_open_count = hvcsd->open_count;
32812+ temp_open_count = local_read(&hvcsd->open_count);
32813
32814 /*
32815 * Don't kref put inside the spinlock because the destruction
32816@@ -1273,7 +1274,7 @@ static void hvcs_hangup(struct tty_struct * tty)
32817 hvcsd->tty->driver_data = NULL;
32818 hvcsd->tty = NULL;
32819
32820- hvcsd->open_count = 0;
32821+ local_set(&hvcsd->open_count, 0);
32822
32823 /* This will drop any buffered data on the floor which is OK in a hangup
32824 * scenario. */
32825@@ -1344,7 +1345,7 @@ static int hvcs_write(struct tty_struct *tty,
32826 * the middle of a write operation? This is a crummy place to do this
32827 * but we want to keep it all in the spinlock.
32828 */
32829- if (hvcsd->open_count <= 0) {
32830+ if (local_read(&hvcsd->open_count) <= 0) {
32831 spin_unlock_irqrestore(&hvcsd->lock, flags);
32832 return -ENODEV;
32833 }
32834@@ -1418,7 +1419,7 @@ static int hvcs_write_room(struct tty_struct *tty)
32835 {
32836 struct hvcs_struct *hvcsd = tty->driver_data;
32837
32838- if (!hvcsd || hvcsd->open_count <= 0)
32839+ if (!hvcsd || local_read(&hvcsd->open_count) <= 0)
32840 return 0;
32841
32842 return HVCS_BUFF_LEN - hvcsd->chars_in_buffer;
32843diff --git a/drivers/char/ipmi/ipmi_msghandler.c b/drivers/char/ipmi/ipmi_msghandler.c
32844index ec5e3f8..02455ba 100644
32845--- a/drivers/char/ipmi/ipmi_msghandler.c
32846+++ b/drivers/char/ipmi/ipmi_msghandler.c
32847@@ -414,7 +414,7 @@ struct ipmi_smi {
32848 struct proc_dir_entry *proc_dir;
32849 char proc_dir_name[10];
32850
32851- atomic_t stats[IPMI_NUM_STATS];
32852+ atomic_unchecked_t stats[IPMI_NUM_STATS];
32853
32854 /*
32855 * run_to_completion duplicate of smb_info, smi_info
32856@@ -447,9 +447,9 @@ static DEFINE_MUTEX(smi_watchers_mutex);
32857
32858
32859 #define ipmi_inc_stat(intf, stat) \
32860- atomic_inc(&(intf)->stats[IPMI_STAT_ ## stat])
32861+ atomic_inc_unchecked(&(intf)->stats[IPMI_STAT_ ## stat])
32862 #define ipmi_get_stat(intf, stat) \
32863- ((unsigned int) atomic_read(&(intf)->stats[IPMI_STAT_ ## stat]))
32864+ ((unsigned int) atomic_read_unchecked(&(intf)->stats[IPMI_STAT_ ## stat]))
32865
32866 static int is_lan_addr(struct ipmi_addr *addr)
32867 {
32868@@ -2808,7 +2808,7 @@ int ipmi_register_smi(struct ipmi_smi_handlers *handlers,
32869 INIT_LIST_HEAD(&intf->cmd_rcvrs);
32870 init_waitqueue_head(&intf->waitq);
32871 for (i = 0; i < IPMI_NUM_STATS; i++)
32872- atomic_set(&intf->stats[i], 0);
32873+ atomic_set_unchecked(&intf->stats[i], 0);
32874
32875 intf->proc_dir = NULL;
32876
32877@@ -4160,6 +4160,8 @@ static void send_panic_events(char *str)
32878 struct ipmi_smi_msg smi_msg;
32879 struct ipmi_recv_msg recv_msg;
32880
32881+ pax_track_stack();
32882+
32883 si = (struct ipmi_system_interface_addr *) &addr;
32884 si->addr_type = IPMI_SYSTEM_INTERFACE_ADDR_TYPE;
32885 si->channel = IPMI_BMC_CHANNEL;
32886diff --git a/drivers/char/ipmi/ipmi_si_intf.c b/drivers/char/ipmi/ipmi_si_intf.c
32887index abae8c9..8021979 100644
32888--- a/drivers/char/ipmi/ipmi_si_intf.c
32889+++ b/drivers/char/ipmi/ipmi_si_intf.c
32890@@ -277,7 +277,7 @@ struct smi_info {
32891 unsigned char slave_addr;
32892
32893 /* Counters and things for the proc filesystem. */
32894- atomic_t stats[SI_NUM_STATS];
32895+ atomic_unchecked_t stats[SI_NUM_STATS];
32896
32897 struct task_struct *thread;
32898
32899@@ -285,9 +285,9 @@ struct smi_info {
32900 };
32901
32902 #define smi_inc_stat(smi, stat) \
32903- atomic_inc(&(smi)->stats[SI_STAT_ ## stat])
32904+ atomic_inc_unchecked(&(smi)->stats[SI_STAT_ ## stat])
32905 #define smi_get_stat(smi, stat) \
32906- ((unsigned int) atomic_read(&(smi)->stats[SI_STAT_ ## stat]))
32907+ ((unsigned int) atomic_read_unchecked(&(smi)->stats[SI_STAT_ ## stat]))
32908
32909 #define SI_MAX_PARMS 4
32910
32911@@ -2931,7 +2931,7 @@ static int try_smi_init(struct smi_info *new_smi)
32912 atomic_set(&new_smi->req_events, 0);
32913 new_smi->run_to_completion = 0;
32914 for (i = 0; i < SI_NUM_STATS; i++)
32915- atomic_set(&new_smi->stats[i], 0);
32916+ atomic_set_unchecked(&new_smi->stats[i], 0);
32917
32918 new_smi->interrupt_disabled = 0;
32919 atomic_set(&new_smi->stop_operation, 0);
32920diff --git a/drivers/char/istallion.c b/drivers/char/istallion.c
32921index 402838f..55e2200 100644
32922--- a/drivers/char/istallion.c
32923+++ b/drivers/char/istallion.c
32924@@ -187,7 +187,6 @@ static struct ktermios stli_deftermios = {
32925 * re-used for each stats call.
32926 */
32927 static comstats_t stli_comstats;
32928-static combrd_t stli_brdstats;
32929 static struct asystats stli_cdkstats;
32930
32931 /*****************************************************************************/
32932@@ -4058,6 +4057,7 @@ static int stli_getbrdstats(combrd_t __user *bp)
32933 {
32934 struct stlibrd *brdp;
32935 unsigned int i;
32936+ combrd_t stli_brdstats;
32937
32938 if (copy_from_user(&stli_brdstats, bp, sizeof(combrd_t)))
32939 return -EFAULT;
32940@@ -4269,6 +4269,8 @@ static int stli_getportstruct(struct stliport __user *arg)
32941 struct stliport stli_dummyport;
32942 struct stliport *portp;
32943
32944+ pax_track_stack();
32945+
32946 if (copy_from_user(&stli_dummyport, arg, sizeof(struct stliport)))
32947 return -EFAULT;
32948 portp = stli_getport(stli_dummyport.brdnr, stli_dummyport.panelnr,
32949@@ -4291,6 +4293,8 @@ static int stli_getbrdstruct(struct stlibrd __user *arg)
32950 struct stlibrd stli_dummybrd;
32951 struct stlibrd *brdp;
32952
32953+ pax_track_stack();
32954+
32955 if (copy_from_user(&stli_dummybrd, arg, sizeof(struct stlibrd)))
32956 return -EFAULT;
32957 if (stli_dummybrd.brdnr >= STL_MAXBRDS)
32958diff --git a/drivers/char/keyboard.c b/drivers/char/keyboard.c
32959index 950837c..e55a288 100644
32960--- a/drivers/char/keyboard.c
32961+++ b/drivers/char/keyboard.c
32962@@ -635,6 +635,16 @@ static void k_spec(struct vc_data *vc, unsigned char value, char up_flag)
32963 kbd->kbdmode == VC_MEDIUMRAW) &&
32964 value != KVAL(K_SAK))
32965 return; /* SAK is allowed even in raw mode */
32966+
32967+#if defined(CONFIG_GRKERNSEC_PROC) || defined(CONFIG_GRKERNSEC_PROC_MEMMAP)
32968+ {
32969+ void *func = fn_handler[value];
32970+ if (func == fn_show_state || func == fn_show_ptregs ||
32971+ func == fn_show_mem)
32972+ return;
32973+ }
32974+#endif
32975+
32976 fn_handler[value](vc);
32977 }
32978
32979@@ -1386,7 +1396,7 @@ static const struct input_device_id kbd_ids[] = {
32980 .evbit = { BIT_MASK(EV_SND) },
32981 },
32982
32983- { }, /* Terminating entry */
32984+ { 0 }, /* Terminating entry */
32985 };
32986
32987 MODULE_DEVICE_TABLE(input, kbd_ids);
32988diff --git a/drivers/char/mbcs.c b/drivers/char/mbcs.c
32989index 87c67b4..230527a 100644
32990--- a/drivers/char/mbcs.c
32991+++ b/drivers/char/mbcs.c
32992@@ -799,7 +799,7 @@ static int mbcs_remove(struct cx_dev *dev)
32993 return 0;
32994 }
32995
32996-static const struct cx_device_id __devinitdata mbcs_id_table[] = {
32997+static const struct cx_device_id __devinitconst mbcs_id_table[] = {
32998 {
32999 .part_num = MBCS_PART_NUM,
33000 .mfg_num = MBCS_MFG_NUM,
33001diff --git a/drivers/char/mem.c b/drivers/char/mem.c
33002index 1270f64..8495f49 100644
33003--- a/drivers/char/mem.c
33004+++ b/drivers/char/mem.c
33005@@ -18,6 +18,7 @@
33006 #include <linux/raw.h>
33007 #include <linux/tty.h>
33008 #include <linux/capability.h>
33009+#include <linux/security.h>
33010 #include <linux/ptrace.h>
33011 #include <linux/device.h>
33012 #include <linux/highmem.h>
33013@@ -35,6 +36,10 @@
33014 # include <linux/efi.h>
33015 #endif
33016
33017+#if defined(CONFIG_GRKERNSEC) && !defined(CONFIG_GRKERNSEC_NO_RBAC)
33018+extern struct file_operations grsec_fops;
33019+#endif
33020+
33021 static inline unsigned long size_inside_page(unsigned long start,
33022 unsigned long size)
33023 {
33024@@ -102,9 +107,13 @@ static inline int range_is_allowed(unsigned long pfn, unsigned long size)
33025
33026 while (cursor < to) {
33027 if (!devmem_is_allowed(pfn)) {
33028+#ifdef CONFIG_GRKERNSEC_KMEM
33029+ gr_handle_mem_readwrite(from, to);
33030+#else
33031 printk(KERN_INFO
33032 "Program %s tried to access /dev/mem between %Lx->%Lx.\n",
33033 current->comm, from, to);
33034+#endif
33035 return 0;
33036 }
33037 cursor += PAGE_SIZE;
33038@@ -112,6 +121,11 @@ static inline int range_is_allowed(unsigned long pfn, unsigned long size)
33039 }
33040 return 1;
33041 }
33042+#elif defined(CONFIG_GRKERNSEC_KMEM)
33043+static inline int range_is_allowed(unsigned long pfn, unsigned long size)
33044+{
33045+ return 0;
33046+}
33047 #else
33048 static inline int range_is_allowed(unsigned long pfn, unsigned long size)
33049 {
33050@@ -155,6 +169,8 @@ static ssize_t read_mem(struct file * file, char __user * buf,
33051 #endif
33052
33053 while (count > 0) {
33054+ char *temp;
33055+
33056 /*
33057 * Handle first page in case it's not aligned
33058 */
33059@@ -177,11 +193,31 @@ static ssize_t read_mem(struct file * file, char __user * buf,
33060 if (!ptr)
33061 return -EFAULT;
33062
33063- if (copy_to_user(buf, ptr, sz)) {
33064+#ifdef CONFIG_PAX_USERCOPY
33065+ temp = kmalloc(sz, GFP_KERNEL);
33066+ if (!temp) {
33067+ unxlate_dev_mem_ptr(p, ptr);
33068+ return -ENOMEM;
33069+ }
33070+ memcpy(temp, ptr, sz);
33071+#else
33072+ temp = ptr;
33073+#endif
33074+
33075+ if (copy_to_user(buf, temp, sz)) {
33076+
33077+#ifdef CONFIG_PAX_USERCOPY
33078+ kfree(temp);
33079+#endif
33080+
33081 unxlate_dev_mem_ptr(p, ptr);
33082 return -EFAULT;
33083 }
33084
33085+#ifdef CONFIG_PAX_USERCOPY
33086+ kfree(temp);
33087+#endif
33088+
33089 unxlate_dev_mem_ptr(p, ptr);
33090
33091 buf += sz;
33092@@ -419,9 +455,8 @@ static ssize_t read_kmem(struct file *file, char __user *buf,
33093 size_t count, loff_t *ppos)
33094 {
33095 unsigned long p = *ppos;
33096- ssize_t low_count, read, sz;
33097+ ssize_t low_count, read, sz, err = 0;
33098 char * kbuf; /* k-addr because vread() takes vmlist_lock rwlock */
33099- int err = 0;
33100
33101 read = 0;
33102 if (p < (unsigned long) high_memory) {
33103@@ -444,6 +479,8 @@ static ssize_t read_kmem(struct file *file, char __user *buf,
33104 }
33105 #endif
33106 while (low_count > 0) {
33107+ char *temp;
33108+
33109 sz = size_inside_page(p, low_count);
33110
33111 /*
33112@@ -453,7 +490,22 @@ static ssize_t read_kmem(struct file *file, char __user *buf,
33113 */
33114 kbuf = xlate_dev_kmem_ptr((char *)p);
33115
33116- if (copy_to_user(buf, kbuf, sz))
33117+#ifdef CONFIG_PAX_USERCOPY
33118+ temp = kmalloc(sz, GFP_KERNEL);
33119+ if (!temp)
33120+ return -ENOMEM;
33121+ memcpy(temp, kbuf, sz);
33122+#else
33123+ temp = kbuf;
33124+#endif
33125+
33126+ err = copy_to_user(buf, temp, sz);
33127+
33128+#ifdef CONFIG_PAX_USERCOPY
33129+ kfree(temp);
33130+#endif
33131+
33132+ if (err)
33133 return -EFAULT;
33134 buf += sz;
33135 p += sz;
33136@@ -889,6 +941,9 @@ static const struct memdev {
33137 #ifdef CONFIG_CRASH_DUMP
33138 [12] = { "oldmem", 0, &oldmem_fops, NULL },
33139 #endif
33140+#if defined(CONFIG_GRKERNSEC) && !defined(CONFIG_GRKERNSEC_NO_RBAC)
33141+ [13] = { "grsec",S_IRUSR | S_IWUGO, &grsec_fops, NULL },
33142+#endif
33143 };
33144
33145 static int memory_open(struct inode *inode, struct file *filp)
33146diff --git a/drivers/char/mmtimer.c b/drivers/char/mmtimer.c
33147index 918711a..4ffaf5e 100644
33148--- a/drivers/char/mmtimer.c
33149+++ b/drivers/char/mmtimer.c
33150@@ -756,7 +756,7 @@ static int sgi_timer_set(struct k_itimer *timr, int flags,
33151 return err;
33152 }
33153
33154-static struct k_clock sgi_clock = {
33155+static k_clock_no_const sgi_clock = {
33156 .res = 0,
33157 .clock_set = sgi_clock_set,
33158 .clock_get = sgi_clock_get,
33159diff --git a/drivers/char/pcmcia/ipwireless/tty.c b/drivers/char/pcmcia/ipwireless/tty.c
33160index 674b3ab..a8d1970 100644
33161--- a/drivers/char/pcmcia/ipwireless/tty.c
33162+++ b/drivers/char/pcmcia/ipwireless/tty.c
33163@@ -29,6 +29,7 @@
33164 #include <linux/tty_driver.h>
33165 #include <linux/tty_flip.h>
33166 #include <linux/uaccess.h>
33167+#include <asm/local.h>
33168
33169 #include "tty.h"
33170 #include "network.h"
33171@@ -51,7 +52,7 @@ struct ipw_tty {
33172 int tty_type;
33173 struct ipw_network *network;
33174 struct tty_struct *linux_tty;
33175- int open_count;
33176+ local_t open_count;
33177 unsigned int control_lines;
33178 struct mutex ipw_tty_mutex;
33179 int tx_bytes_queued;
33180@@ -127,10 +128,10 @@ static int ipw_open(struct tty_struct *linux_tty, struct file *filp)
33181 mutex_unlock(&tty->ipw_tty_mutex);
33182 return -ENODEV;
33183 }
33184- if (tty->open_count == 0)
33185+ if (local_read(&tty->open_count) == 0)
33186 tty->tx_bytes_queued = 0;
33187
33188- tty->open_count++;
33189+ local_inc(&tty->open_count);
33190
33191 tty->linux_tty = linux_tty;
33192 linux_tty->driver_data = tty;
33193@@ -146,9 +147,7 @@ static int ipw_open(struct tty_struct *linux_tty, struct file *filp)
33194
33195 static void do_ipw_close(struct ipw_tty *tty)
33196 {
33197- tty->open_count--;
33198-
33199- if (tty->open_count == 0) {
33200+ if (local_dec_return(&tty->open_count) == 0) {
33201 struct tty_struct *linux_tty = tty->linux_tty;
33202
33203 if (linux_tty != NULL) {
33204@@ -169,7 +168,7 @@ static void ipw_hangup(struct tty_struct *linux_tty)
33205 return;
33206
33207 mutex_lock(&tty->ipw_tty_mutex);
33208- if (tty->open_count == 0) {
33209+ if (local_read(&tty->open_count) == 0) {
33210 mutex_unlock(&tty->ipw_tty_mutex);
33211 return;
33212 }
33213@@ -198,7 +197,7 @@ void ipwireless_tty_received(struct ipw_tty *tty, unsigned char *data,
33214 return;
33215 }
33216
33217- if (!tty->open_count) {
33218+ if (!local_read(&tty->open_count)) {
33219 mutex_unlock(&tty->ipw_tty_mutex);
33220 return;
33221 }
33222@@ -240,7 +239,7 @@ static int ipw_write(struct tty_struct *linux_tty,
33223 return -ENODEV;
33224
33225 mutex_lock(&tty->ipw_tty_mutex);
33226- if (!tty->open_count) {
33227+ if (!local_read(&tty->open_count)) {
33228 mutex_unlock(&tty->ipw_tty_mutex);
33229 return -EINVAL;
33230 }
33231@@ -280,7 +279,7 @@ static int ipw_write_room(struct tty_struct *linux_tty)
33232 if (!tty)
33233 return -ENODEV;
33234
33235- if (!tty->open_count)
33236+ if (!local_read(&tty->open_count))
33237 return -EINVAL;
33238
33239 room = IPWIRELESS_TX_QUEUE_SIZE - tty->tx_bytes_queued;
33240@@ -322,7 +321,7 @@ static int ipw_chars_in_buffer(struct tty_struct *linux_tty)
33241 if (!tty)
33242 return 0;
33243
33244- if (!tty->open_count)
33245+ if (!local_read(&tty->open_count))
33246 return 0;
33247
33248 return tty->tx_bytes_queued;
33249@@ -403,7 +402,7 @@ static int ipw_tiocmget(struct tty_struct *linux_tty, struct file *file)
33250 if (!tty)
33251 return -ENODEV;
33252
33253- if (!tty->open_count)
33254+ if (!local_read(&tty->open_count))
33255 return -EINVAL;
33256
33257 return get_control_lines(tty);
33258@@ -419,7 +418,7 @@ ipw_tiocmset(struct tty_struct *linux_tty, struct file *file,
33259 if (!tty)
33260 return -ENODEV;
33261
33262- if (!tty->open_count)
33263+ if (!local_read(&tty->open_count))
33264 return -EINVAL;
33265
33266 return set_control_lines(tty, set, clear);
33267@@ -433,7 +432,7 @@ static int ipw_ioctl(struct tty_struct *linux_tty, struct file *file,
33268 if (!tty)
33269 return -ENODEV;
33270
33271- if (!tty->open_count)
33272+ if (!local_read(&tty->open_count))
33273 return -EINVAL;
33274
33275 /* FIXME: Exactly how is the tty object locked here .. */
33276@@ -591,7 +590,7 @@ void ipwireless_tty_free(struct ipw_tty *tty)
33277 against a parallel ioctl etc */
33278 mutex_lock(&ttyj->ipw_tty_mutex);
33279 }
33280- while (ttyj->open_count)
33281+ while (local_read(&ttyj->open_count))
33282 do_ipw_close(ttyj);
33283 ipwireless_disassociate_network_ttys(network,
33284 ttyj->channel_idx);
33285diff --git a/drivers/char/pty.c b/drivers/char/pty.c
33286index 62f282e..e45c45c 100644
33287--- a/drivers/char/pty.c
33288+++ b/drivers/char/pty.c
33289@@ -736,8 +736,10 @@ static void __init unix98_pty_init(void)
33290 register_sysctl_table(pty_root_table);
33291
33292 /* Now create the /dev/ptmx special device */
33293+ pax_open_kernel();
33294 tty_default_fops(&ptmx_fops);
33295- ptmx_fops.open = ptmx_open;
33296+ *(void **)&ptmx_fops.open = ptmx_open;
33297+ pax_close_kernel();
33298
33299 cdev_init(&ptmx_cdev, &ptmx_fops);
33300 if (cdev_add(&ptmx_cdev, MKDEV(TTYAUX_MAJOR, 2), 1) ||
33301diff --git a/drivers/char/random.c b/drivers/char/random.c
33302index 3a19e2d..6ed09d3 100644
33303--- a/drivers/char/random.c
33304+++ b/drivers/char/random.c
33305@@ -254,8 +254,13 @@
33306 /*
33307 * Configuration information
33308 */
33309+#ifdef CONFIG_GRKERNSEC_RANDNET
33310+#define INPUT_POOL_WORDS 512
33311+#define OUTPUT_POOL_WORDS 128
33312+#else
33313 #define INPUT_POOL_WORDS 128
33314 #define OUTPUT_POOL_WORDS 32
33315+#endif
33316 #define SEC_XFER_SIZE 512
33317
33318 /*
33319@@ -292,10 +297,17 @@ static struct poolinfo {
33320 int poolwords;
33321 int tap1, tap2, tap3, tap4, tap5;
33322 } poolinfo_table[] = {
33323+#ifdef CONFIG_GRKERNSEC_RANDNET
33324+ /* x^512 + x^411 + x^308 + x^208 +x^104 + x + 1 -- 225 */
33325+ { 512, 411, 308, 208, 104, 1 },
33326+ /* x^128 + x^103 + x^76 + x^51 + x^25 + x + 1 -- 105 */
33327+ { 128, 103, 76, 51, 25, 1 },
33328+#else
33329 /* x^128 + x^103 + x^76 + x^51 +x^25 + x + 1 -- 105 */
33330 { 128, 103, 76, 51, 25, 1 },
33331 /* x^32 + x^26 + x^20 + x^14 + x^7 + x + 1 -- 15 */
33332 { 32, 26, 20, 14, 7, 1 },
33333+#endif
33334 #if 0
33335 /* x^2048 + x^1638 + x^1231 + x^819 + x^411 + x + 1 -- 115 */
33336 { 2048, 1638, 1231, 819, 411, 1 },
33337@@ -1209,7 +1221,7 @@ EXPORT_SYMBOL(generate_random_uuid);
33338 #include <linux/sysctl.h>
33339
33340 static int min_read_thresh = 8, min_write_thresh;
33341-static int max_read_thresh = INPUT_POOL_WORDS * 32;
33342+static int max_read_thresh = OUTPUT_POOL_WORDS * 32;
33343 static int max_write_thresh = INPUT_POOL_WORDS * 32;
33344 static char sysctl_bootid[16];
33345
33346diff --git a/drivers/char/rocket.c b/drivers/char/rocket.c
33347index 0e29a23..0efc2c2 100644
33348--- a/drivers/char/rocket.c
33349+++ b/drivers/char/rocket.c
33350@@ -1266,6 +1266,8 @@ static int get_ports(struct r_port *info, struct rocket_ports __user *retports)
33351 struct rocket_ports tmp;
33352 int board;
33353
33354+ pax_track_stack();
33355+
33356 if (!retports)
33357 return -EFAULT;
33358 memset(&tmp, 0, sizeof (tmp));
33359diff --git a/drivers/char/sonypi.c b/drivers/char/sonypi.c
33360index 8c262aa..4d3b058 100644
33361--- a/drivers/char/sonypi.c
33362+++ b/drivers/char/sonypi.c
33363@@ -55,6 +55,7 @@
33364 #include <asm/uaccess.h>
33365 #include <asm/io.h>
33366 #include <asm/system.h>
33367+#include <asm/local.h>
33368
33369 #include <linux/sonypi.h>
33370
33371@@ -491,7 +492,7 @@ static struct sonypi_device {
33372 spinlock_t fifo_lock;
33373 wait_queue_head_t fifo_proc_list;
33374 struct fasync_struct *fifo_async;
33375- int open_count;
33376+ local_t open_count;
33377 int model;
33378 struct input_dev *input_jog_dev;
33379 struct input_dev *input_key_dev;
33380@@ -895,7 +896,7 @@ static int sonypi_misc_fasync(int fd, struct file *filp, int on)
33381 static int sonypi_misc_release(struct inode *inode, struct file *file)
33382 {
33383 mutex_lock(&sonypi_device.lock);
33384- sonypi_device.open_count--;
33385+ local_dec(&sonypi_device.open_count);
33386 mutex_unlock(&sonypi_device.lock);
33387 return 0;
33388 }
33389@@ -905,9 +906,9 @@ static int sonypi_misc_open(struct inode *inode, struct file *file)
33390 lock_kernel();
33391 mutex_lock(&sonypi_device.lock);
33392 /* Flush input queue on first open */
33393- if (!sonypi_device.open_count)
33394+ if (!local_read(&sonypi_device.open_count))
33395 kfifo_reset(sonypi_device.fifo);
33396- sonypi_device.open_count++;
33397+ local_inc(&sonypi_device.open_count);
33398 mutex_unlock(&sonypi_device.lock);
33399 unlock_kernel();
33400 return 0;
33401diff --git a/drivers/char/stallion.c b/drivers/char/stallion.c
33402index db6dcfa..13834cb 100644
33403--- a/drivers/char/stallion.c
33404+++ b/drivers/char/stallion.c
33405@@ -2448,6 +2448,8 @@ static int stl_getportstruct(struct stlport __user *arg)
33406 struct stlport stl_dummyport;
33407 struct stlport *portp;
33408
33409+ pax_track_stack();
33410+
33411 if (copy_from_user(&stl_dummyport, arg, sizeof(struct stlport)))
33412 return -EFAULT;
33413 portp = stl_getport(stl_dummyport.brdnr, stl_dummyport.panelnr,
33414diff --git a/drivers/char/tpm/tpm.c b/drivers/char/tpm/tpm.c
33415index a0789f6..cea3902 100644
33416--- a/drivers/char/tpm/tpm.c
33417+++ b/drivers/char/tpm/tpm.c
33418@@ -405,7 +405,7 @@ static ssize_t tpm_transmit(struct tpm_chip *chip, const char *buf,
33419 chip->vendor.req_complete_val)
33420 goto out_recv;
33421
33422- if ((status == chip->vendor.req_canceled)) {
33423+ if (status == chip->vendor.req_canceled) {
33424 dev_err(chip->dev, "Operation Canceled\n");
33425 rc = -ECANCELED;
33426 goto out;
33427@@ -824,6 +824,8 @@ ssize_t tpm_show_pubek(struct device *dev, struct device_attribute *attr,
33428
33429 struct tpm_chip *chip = dev_get_drvdata(dev);
33430
33431+ pax_track_stack();
33432+
33433 tpm_cmd.header.in = tpm_readpubek_header;
33434 err = transmit_cmd(chip, &tpm_cmd, READ_PUBEK_RESULT_SIZE,
33435 "attempting to read the PUBEK");
33436diff --git a/drivers/char/tpm/tpm_bios.c b/drivers/char/tpm/tpm_bios.c
33437index bf2170f..ce8cab9 100644
33438--- a/drivers/char/tpm/tpm_bios.c
33439+++ b/drivers/char/tpm/tpm_bios.c
33440@@ -172,7 +172,7 @@ static void *tpm_bios_measurements_start(struct seq_file *m, loff_t *pos)
33441 event = addr;
33442
33443 if ((event->event_type == 0 && event->event_size == 0) ||
33444- ((addr + sizeof(struct tcpa_event) + event->event_size) >= limit))
33445+ (event->event_size >= limit - addr - sizeof(struct tcpa_event)))
33446 return NULL;
33447
33448 return addr;
33449@@ -197,7 +197,7 @@ static void *tpm_bios_measurements_next(struct seq_file *m, void *v,
33450 return NULL;
33451
33452 if ((event->event_type == 0 && event->event_size == 0) ||
33453- ((v + sizeof(struct tcpa_event) + event->event_size) >= limit))
33454+ (event->event_size >= limit - v - sizeof(struct tcpa_event)))
33455 return NULL;
33456
33457 (*pos)++;
33458@@ -290,7 +290,8 @@ static int tpm_binary_bios_measurements_show(struct seq_file *m, void *v)
33459 int i;
33460
33461 for (i = 0; i < sizeof(struct tcpa_event) + event->event_size; i++)
33462- seq_putc(m, data[i]);
33463+ if (!seq_putc(m, data[i]))
33464+ return -EFAULT;
33465
33466 return 0;
33467 }
33468@@ -409,8 +410,13 @@ static int read_log(struct tpm_bios_log *log)
33469 log->bios_event_log_end = log->bios_event_log + len;
33470
33471 virt = acpi_os_map_memory(start, len);
33472+ if (!virt) {
33473+ kfree(log->bios_event_log);
33474+ log->bios_event_log = NULL;
33475+ return -EFAULT;
33476+ }
33477
33478- memcpy(log->bios_event_log, virt, len);
33479+ memcpy(log->bios_event_log, (const char __force_kernel *)virt, len);
33480
33481 acpi_os_unmap_memory(virt, len);
33482 return 0;
33483diff --git a/drivers/char/tty_io.c b/drivers/char/tty_io.c
33484index 123cedf..6664cb4 100644
33485--- a/drivers/char/tty_io.c
33486+++ b/drivers/char/tty_io.c
33487@@ -146,7 +146,7 @@ static int tty_open(struct inode *, struct file *);
33488 static int tty_release(struct inode *, struct file *);
33489 long tty_ioctl(struct file *file, unsigned int cmd, unsigned long arg);
33490 #ifdef CONFIG_COMPAT
33491-static long tty_compat_ioctl(struct file *file, unsigned int cmd,
33492+long tty_compat_ioctl(struct file *file, unsigned int cmd,
33493 unsigned long arg);
33494 #else
33495 #define tty_compat_ioctl NULL
33496@@ -1774,6 +1774,7 @@ got_driver:
33497
33498 if (IS_ERR(tty)) {
33499 mutex_unlock(&tty_mutex);
33500+ tty_driver_kref_put(driver);
33501 return PTR_ERR(tty);
33502 }
33503 }
33504@@ -2603,8 +2604,10 @@ long tty_ioctl(struct file *file, unsigned int cmd, unsigned long arg)
33505 return retval;
33506 }
33507
33508+EXPORT_SYMBOL(tty_ioctl);
33509+
33510 #ifdef CONFIG_COMPAT
33511-static long tty_compat_ioctl(struct file *file, unsigned int cmd,
33512+long tty_compat_ioctl(struct file *file, unsigned int cmd,
33513 unsigned long arg)
33514 {
33515 struct inode *inode = file->f_dentry->d_inode;
33516@@ -2628,6 +2631,8 @@ static long tty_compat_ioctl(struct file *file, unsigned int cmd,
33517
33518 return retval;
33519 }
33520+
33521+EXPORT_SYMBOL(tty_compat_ioctl);
33522 #endif
33523
33524 /*
33525@@ -3073,7 +3078,7 @@ EXPORT_SYMBOL_GPL(get_current_tty);
33526
33527 void tty_default_fops(struct file_operations *fops)
33528 {
33529- *fops = tty_fops;
33530+ memcpy((void *)fops, &tty_fops, sizeof(tty_fops));
33531 }
33532
33533 /*
33534diff --git a/drivers/char/tty_ldisc.c b/drivers/char/tty_ldisc.c
33535index d814a3d..b55b9c9 100644
33536--- a/drivers/char/tty_ldisc.c
33537+++ b/drivers/char/tty_ldisc.c
33538@@ -74,7 +74,7 @@ static void put_ldisc(struct tty_ldisc *ld)
33539 if (atomic_dec_and_lock(&ld->users, &tty_ldisc_lock)) {
33540 struct tty_ldisc_ops *ldo = ld->ops;
33541
33542- ldo->refcount--;
33543+ atomic_dec(&ldo->refcount);
33544 module_put(ldo->owner);
33545 spin_unlock_irqrestore(&tty_ldisc_lock, flags);
33546
33547@@ -109,7 +109,7 @@ int tty_register_ldisc(int disc, struct tty_ldisc_ops *new_ldisc)
33548 spin_lock_irqsave(&tty_ldisc_lock, flags);
33549 tty_ldiscs[disc] = new_ldisc;
33550 new_ldisc->num = disc;
33551- new_ldisc->refcount = 0;
33552+ atomic_set(&new_ldisc->refcount, 0);
33553 spin_unlock_irqrestore(&tty_ldisc_lock, flags);
33554
33555 return ret;
33556@@ -137,7 +137,7 @@ int tty_unregister_ldisc(int disc)
33557 return -EINVAL;
33558
33559 spin_lock_irqsave(&tty_ldisc_lock, flags);
33560- if (tty_ldiscs[disc]->refcount)
33561+ if (atomic_read(&tty_ldiscs[disc]->refcount))
33562 ret = -EBUSY;
33563 else
33564 tty_ldiscs[disc] = NULL;
33565@@ -158,7 +158,7 @@ static struct tty_ldisc_ops *get_ldops(int disc)
33566 if (ldops) {
33567 ret = ERR_PTR(-EAGAIN);
33568 if (try_module_get(ldops->owner)) {
33569- ldops->refcount++;
33570+ atomic_inc(&ldops->refcount);
33571 ret = ldops;
33572 }
33573 }
33574@@ -171,7 +171,7 @@ static void put_ldops(struct tty_ldisc_ops *ldops)
33575 unsigned long flags;
33576
33577 spin_lock_irqsave(&tty_ldisc_lock, flags);
33578- ldops->refcount--;
33579+ atomic_dec(&ldops->refcount);
33580 module_put(ldops->owner);
33581 spin_unlock_irqrestore(&tty_ldisc_lock, flags);
33582 }
33583diff --git a/drivers/char/virtio_console.c b/drivers/char/virtio_console.c
33584index a035ae3..c27fe2c 100644
33585--- a/drivers/char/virtio_console.c
33586+++ b/drivers/char/virtio_console.c
33587@@ -133,7 +133,9 @@ static int get_chars(u32 vtermno, char *buf, int count)
33588 * virtqueue, so we let the drivers do some boutique early-output thing. */
33589 int __init virtio_cons_early_init(int (*put_chars)(u32, const char *, int))
33590 {
33591- virtio_cons.put_chars = put_chars;
33592+ pax_open_kernel();
33593+ *(void **)&virtio_cons.put_chars = put_chars;
33594+ pax_close_kernel();
33595 return hvc_instantiate(0, 0, &virtio_cons);
33596 }
33597
33598@@ -213,11 +215,13 @@ static int __devinit virtcons_probe(struct virtio_device *dev)
33599 out_vq = vqs[1];
33600
33601 /* Start using the new console output. */
33602- virtio_cons.get_chars = get_chars;
33603- virtio_cons.put_chars = put_chars;
33604- virtio_cons.notifier_add = notifier_add_vio;
33605- virtio_cons.notifier_del = notifier_del_vio;
33606- virtio_cons.notifier_hangup = notifier_del_vio;
33607+ pax_open_kernel();
33608+ *(void **)&virtio_cons.get_chars = get_chars;
33609+ *(void **)&virtio_cons.put_chars = put_chars;
33610+ *(void **)&virtio_cons.notifier_add = notifier_add_vio;
33611+ *(void **)&virtio_cons.notifier_del = notifier_del_vio;
33612+ *(void **)&virtio_cons.notifier_hangup = notifier_del_vio;
33613+ pax_close_kernel();
33614
33615 /* The first argument of hvc_alloc() is the virtual console number, so
33616 * we use zero. The second argument is the parameter for the
33617diff --git a/drivers/char/vt.c b/drivers/char/vt.c
33618index 0c80c68..53d59c1 100644
33619--- a/drivers/char/vt.c
33620+++ b/drivers/char/vt.c
33621@@ -243,7 +243,7 @@ EXPORT_SYMBOL_GPL(unregister_vt_notifier);
33622
33623 static void notify_write(struct vc_data *vc, unsigned int unicode)
33624 {
33625- struct vt_notifier_param param = { .vc = vc, unicode = unicode };
33626+ struct vt_notifier_param param = { .vc = vc, .c = unicode };
33627 atomic_notifier_call_chain(&vt_notifier_list, VT_WRITE, &param);
33628 }
33629
33630diff --git a/drivers/char/vt_ioctl.c b/drivers/char/vt_ioctl.c
33631index 6351a26..999af95 100644
33632--- a/drivers/char/vt_ioctl.c
33633+++ b/drivers/char/vt_ioctl.c
33634@@ -210,9 +210,6 @@ do_kdsk_ioctl(int cmd, struct kbentry __user *user_kbe, int perm, struct kbd_str
33635 if (copy_from_user(&tmp, user_kbe, sizeof(struct kbentry)))
33636 return -EFAULT;
33637
33638- if (!capable(CAP_SYS_TTY_CONFIG))
33639- perm = 0;
33640-
33641 switch (cmd) {
33642 case KDGKBENT:
33643 key_map = key_maps[s];
33644@@ -224,8 +221,12 @@ do_kdsk_ioctl(int cmd, struct kbentry __user *user_kbe, int perm, struct kbd_str
33645 val = (i ? K_HOLE : K_NOSUCHMAP);
33646 return put_user(val, &user_kbe->kb_value);
33647 case KDSKBENT:
33648+ if (!capable(CAP_SYS_TTY_CONFIG))
33649+ perm = 0;
33650+
33651 if (!perm)
33652 return -EPERM;
33653+
33654 if (!i && v == K_NOSUCHMAP) {
33655 /* deallocate map */
33656 key_map = key_maps[s];
33657@@ -325,9 +326,6 @@ do_kdgkb_ioctl(int cmd, struct kbsentry __user *user_kdgkb, int perm)
33658 int i, j, k;
33659 int ret;
33660
33661- if (!capable(CAP_SYS_TTY_CONFIG))
33662- perm = 0;
33663-
33664 kbs = kmalloc(sizeof(*kbs), GFP_KERNEL);
33665 if (!kbs) {
33666 ret = -ENOMEM;
33667@@ -361,6 +359,9 @@ do_kdgkb_ioctl(int cmd, struct kbsentry __user *user_kdgkb, int perm)
33668 kfree(kbs);
33669 return ((p && *p) ? -EOVERFLOW : 0);
33670 case KDSKBSENT:
33671+ if (!capable(CAP_SYS_TTY_CONFIG))
33672+ perm = 0;
33673+
33674 if (!perm) {
33675 ret = -EPERM;
33676 goto reterr;
33677diff --git a/drivers/cpufreq/cpufreq.c b/drivers/cpufreq/cpufreq.c
33678index c7ae026..1769c1d 100644
33679--- a/drivers/cpufreq/cpufreq.c
33680+++ b/drivers/cpufreq/cpufreq.c
33681@@ -750,7 +750,7 @@ static void cpufreq_sysfs_release(struct kobject *kobj)
33682 complete(&policy->kobj_unregister);
33683 }
33684
33685-static struct sysfs_ops sysfs_ops = {
33686+static const struct sysfs_ops sysfs_ops = {
33687 .show = show,
33688 .store = store,
33689 };
33690diff --git a/drivers/cpuidle/sysfs.c b/drivers/cpuidle/sysfs.c
33691index 97b0038..2056670 100644
33692--- a/drivers/cpuidle/sysfs.c
33693+++ b/drivers/cpuidle/sysfs.c
33694@@ -191,7 +191,7 @@ static ssize_t cpuidle_store(struct kobject * kobj, struct attribute * attr,
33695 return ret;
33696 }
33697
33698-static struct sysfs_ops cpuidle_sysfs_ops = {
33699+static const struct sysfs_ops cpuidle_sysfs_ops = {
33700 .show = cpuidle_show,
33701 .store = cpuidle_store,
33702 };
33703@@ -277,7 +277,7 @@ static ssize_t cpuidle_state_show(struct kobject * kobj,
33704 return ret;
33705 }
33706
33707-static struct sysfs_ops cpuidle_state_sysfs_ops = {
33708+static const struct sysfs_ops cpuidle_state_sysfs_ops = {
33709 .show = cpuidle_state_show,
33710 };
33711
33712@@ -294,7 +294,7 @@ static struct kobj_type ktype_state_cpuidle = {
33713 .release = cpuidle_state_sysfs_release,
33714 };
33715
33716-static void inline cpuidle_free_state_kobj(struct cpuidle_device *device, int i)
33717+static inline void cpuidle_free_state_kobj(struct cpuidle_device *device, int i)
33718 {
33719 kobject_put(&device->kobjs[i]->kobj);
33720 wait_for_completion(&device->kobjs[i]->kobj_unregister);
33721diff --git a/drivers/crypto/hifn_795x.c b/drivers/crypto/hifn_795x.c
33722index 5f753fc..0377ae9 100644
33723--- a/drivers/crypto/hifn_795x.c
33724+++ b/drivers/crypto/hifn_795x.c
33725@@ -1655,6 +1655,8 @@ static int hifn_test(struct hifn_device *dev, int encdec, u8 snum)
33726 0xCA, 0x34, 0x2B, 0x2E};
33727 struct scatterlist sg;
33728
33729+ pax_track_stack();
33730+
33731 memset(src, 0, sizeof(src));
33732 memset(ctx.key, 0, sizeof(ctx.key));
33733
33734diff --git a/drivers/crypto/padlock-aes.c b/drivers/crypto/padlock-aes.c
33735index 71e6482..de8d96c 100644
33736--- a/drivers/crypto/padlock-aes.c
33737+++ b/drivers/crypto/padlock-aes.c
33738@@ -108,6 +108,8 @@ static int aes_set_key(struct crypto_tfm *tfm, const u8 *in_key,
33739 struct crypto_aes_ctx gen_aes;
33740 int cpu;
33741
33742+ pax_track_stack();
33743+
33744 if (key_len % 8) {
33745 *flags |= CRYPTO_TFM_RES_BAD_KEY_LEN;
33746 return -EINVAL;
33747diff --git a/drivers/dma/ioat/dma.c b/drivers/dma/ioat/dma.c
33748index dcc4ab7..cc834bb 100644
33749--- a/drivers/dma/ioat/dma.c
33750+++ b/drivers/dma/ioat/dma.c
33751@@ -1146,7 +1146,7 @@ ioat_attr_show(struct kobject *kobj, struct attribute *attr, char *page)
33752 return entry->show(&chan->common, page);
33753 }
33754
33755-struct sysfs_ops ioat_sysfs_ops = {
33756+const struct sysfs_ops ioat_sysfs_ops = {
33757 .show = ioat_attr_show,
33758 };
33759
33760diff --git a/drivers/dma/ioat/dma.h b/drivers/dma/ioat/dma.h
33761index bbc3e78..f2db62c 100644
33762--- a/drivers/dma/ioat/dma.h
33763+++ b/drivers/dma/ioat/dma.h
33764@@ -347,7 +347,7 @@ bool ioat_cleanup_preamble(struct ioat_chan_common *chan,
33765 unsigned long *phys_complete);
33766 void ioat_kobject_add(struct ioatdma_device *device, struct kobj_type *type);
33767 void ioat_kobject_del(struct ioatdma_device *device);
33768-extern struct sysfs_ops ioat_sysfs_ops;
33769+extern const struct sysfs_ops ioat_sysfs_ops;
33770 extern struct ioat_sysfs_entry ioat_version_attr;
33771 extern struct ioat_sysfs_entry ioat_cap_attr;
33772 #endif /* IOATDMA_H */
33773diff --git a/drivers/dma/ioat/dma_v3.c b/drivers/dma/ioat/dma_v3.c
33774index 9908c9e..3ceb0e5 100644
33775--- a/drivers/dma/ioat/dma_v3.c
33776+++ b/drivers/dma/ioat/dma_v3.c
33777@@ -71,10 +71,10 @@
33778 /* provide a lookup table for setting the source address in the base or
33779 * extended descriptor of an xor or pq descriptor
33780 */
33781-static const u8 xor_idx_to_desc __read_mostly = 0xd0;
33782-static const u8 xor_idx_to_field[] __read_mostly = { 1, 4, 5, 6, 7, 0, 1, 2 };
33783-static const u8 pq_idx_to_desc __read_mostly = 0xf8;
33784-static const u8 pq_idx_to_field[] __read_mostly = { 1, 4, 5, 0, 1, 2, 4, 5 };
33785+static const u8 xor_idx_to_desc = 0xd0;
33786+static const u8 xor_idx_to_field[] = { 1, 4, 5, 6, 7, 0, 1, 2 };
33787+static const u8 pq_idx_to_desc = 0xf8;
33788+static const u8 pq_idx_to_field[] = { 1, 4, 5, 0, 1, 2, 4, 5 };
33789
33790 static dma_addr_t xor_get_src(struct ioat_raw_descriptor *descs[2], int idx)
33791 {
33792diff --git a/drivers/edac/amd64_edac.c b/drivers/edac/amd64_edac.c
33793index 85c464a..afd1e73 100644
33794--- a/drivers/edac/amd64_edac.c
33795+++ b/drivers/edac/amd64_edac.c
33796@@ -3099,7 +3099,7 @@ static void __devexit amd64_remove_one_instance(struct pci_dev *pdev)
33797 * PCI core identifies what devices are on a system during boot, and then
33798 * inquiry this table to see if this driver is for a given device found.
33799 */
33800-static const struct pci_device_id amd64_pci_table[] __devinitdata = {
33801+static const struct pci_device_id amd64_pci_table[] __devinitconst = {
33802 {
33803 .vendor = PCI_VENDOR_ID_AMD,
33804 .device = PCI_DEVICE_ID_AMD_K8_NB_MEMCTL,
33805diff --git a/drivers/edac/amd76x_edac.c b/drivers/edac/amd76x_edac.c
33806index 2b95f1a..4f52793 100644
33807--- a/drivers/edac/amd76x_edac.c
33808+++ b/drivers/edac/amd76x_edac.c
33809@@ -322,7 +322,7 @@ static void __devexit amd76x_remove_one(struct pci_dev *pdev)
33810 edac_mc_free(mci);
33811 }
33812
33813-static const struct pci_device_id amd76x_pci_tbl[] __devinitdata = {
33814+static const struct pci_device_id amd76x_pci_tbl[] __devinitconst = {
33815 {
33816 PCI_VEND_DEV(AMD, FE_GATE_700C), PCI_ANY_ID, PCI_ANY_ID, 0, 0,
33817 AMD762},
33818diff --git a/drivers/edac/e752x_edac.c b/drivers/edac/e752x_edac.c
33819index d205d49..74c9672 100644
33820--- a/drivers/edac/e752x_edac.c
33821+++ b/drivers/edac/e752x_edac.c
33822@@ -1282,7 +1282,7 @@ static void __devexit e752x_remove_one(struct pci_dev *pdev)
33823 edac_mc_free(mci);
33824 }
33825
33826-static const struct pci_device_id e752x_pci_tbl[] __devinitdata = {
33827+static const struct pci_device_id e752x_pci_tbl[] __devinitconst = {
33828 {
33829 PCI_VEND_DEV(INTEL, 7520_0), PCI_ANY_ID, PCI_ANY_ID, 0, 0,
33830 E7520},
33831diff --git a/drivers/edac/e7xxx_edac.c b/drivers/edac/e7xxx_edac.c
33832index c7d11cc..c59c1ca 100644
33833--- a/drivers/edac/e7xxx_edac.c
33834+++ b/drivers/edac/e7xxx_edac.c
33835@@ -526,7 +526,7 @@ static void __devexit e7xxx_remove_one(struct pci_dev *pdev)
33836 edac_mc_free(mci);
33837 }
33838
33839-static const struct pci_device_id e7xxx_pci_tbl[] __devinitdata = {
33840+static const struct pci_device_id e7xxx_pci_tbl[] __devinitconst = {
33841 {
33842 PCI_VEND_DEV(INTEL, 7205_0), PCI_ANY_ID, PCI_ANY_ID, 0, 0,
33843 E7205},
33844diff --git a/drivers/edac/edac_device_sysfs.c b/drivers/edac/edac_device_sysfs.c
33845index 5376457..5fdedbc 100644
33846--- a/drivers/edac/edac_device_sysfs.c
33847+++ b/drivers/edac/edac_device_sysfs.c
33848@@ -137,7 +137,7 @@ static ssize_t edac_dev_ctl_info_store(struct kobject *kobj,
33849 }
33850
33851 /* edac_dev file operations for an 'ctl_info' */
33852-static struct sysfs_ops device_ctl_info_ops = {
33853+static const struct sysfs_ops device_ctl_info_ops = {
33854 .show = edac_dev_ctl_info_show,
33855 .store = edac_dev_ctl_info_store
33856 };
33857@@ -373,7 +373,7 @@ static ssize_t edac_dev_instance_store(struct kobject *kobj,
33858 }
33859
33860 /* edac_dev file operations for an 'instance' */
33861-static struct sysfs_ops device_instance_ops = {
33862+static const struct sysfs_ops device_instance_ops = {
33863 .show = edac_dev_instance_show,
33864 .store = edac_dev_instance_store
33865 };
33866@@ -476,7 +476,7 @@ static ssize_t edac_dev_block_store(struct kobject *kobj,
33867 }
33868
33869 /* edac_dev file operations for a 'block' */
33870-static struct sysfs_ops device_block_ops = {
33871+static const struct sysfs_ops device_block_ops = {
33872 .show = edac_dev_block_show,
33873 .store = edac_dev_block_store
33874 };
33875diff --git a/drivers/edac/edac_mc_sysfs.c b/drivers/edac/edac_mc_sysfs.c
33876index e1d4ce0..88840e9 100644
33877--- a/drivers/edac/edac_mc_sysfs.c
33878+++ b/drivers/edac/edac_mc_sysfs.c
33879@@ -245,7 +245,7 @@ static ssize_t csrowdev_store(struct kobject *kobj, struct attribute *attr,
33880 return -EIO;
33881 }
33882
33883-static struct sysfs_ops csrowfs_ops = {
33884+static const struct sysfs_ops csrowfs_ops = {
33885 .show = csrowdev_show,
33886 .store = csrowdev_store
33887 };
33888@@ -575,7 +575,7 @@ static ssize_t mcidev_store(struct kobject *kobj, struct attribute *attr,
33889 }
33890
33891 /* Intermediate show/store table */
33892-static struct sysfs_ops mci_ops = {
33893+static const struct sysfs_ops mci_ops = {
33894 .show = mcidev_show,
33895 .store = mcidev_store
33896 };
33897diff --git a/drivers/edac/edac_pci_sysfs.c b/drivers/edac/edac_pci_sysfs.c
33898index 422728c..d8d9c88 100644
33899--- a/drivers/edac/edac_pci_sysfs.c
33900+++ b/drivers/edac/edac_pci_sysfs.c
33901@@ -25,8 +25,8 @@ static int edac_pci_log_pe = 1; /* log PCI parity errors */
33902 static int edac_pci_log_npe = 1; /* log PCI non-parity error errors */
33903 static int edac_pci_poll_msec = 1000; /* one second workq period */
33904
33905-static atomic_t pci_parity_count = ATOMIC_INIT(0);
33906-static atomic_t pci_nonparity_count = ATOMIC_INIT(0);
33907+static atomic_unchecked_t pci_parity_count = ATOMIC_INIT(0);
33908+static atomic_unchecked_t pci_nonparity_count = ATOMIC_INIT(0);
33909
33910 static struct kobject *edac_pci_top_main_kobj;
33911 static atomic_t edac_pci_sysfs_refcount = ATOMIC_INIT(0);
33912@@ -121,7 +121,7 @@ static ssize_t edac_pci_instance_store(struct kobject *kobj,
33913 }
33914
33915 /* fs_ops table */
33916-static struct sysfs_ops pci_instance_ops = {
33917+static const struct sysfs_ops pci_instance_ops = {
33918 .show = edac_pci_instance_show,
33919 .store = edac_pci_instance_store
33920 };
33921@@ -261,7 +261,7 @@ static ssize_t edac_pci_dev_store(struct kobject *kobj,
33922 return -EIO;
33923 }
33924
33925-static struct sysfs_ops edac_pci_sysfs_ops = {
33926+static const struct sysfs_ops edac_pci_sysfs_ops = {
33927 .show = edac_pci_dev_show,
33928 .store = edac_pci_dev_store
33929 };
33930@@ -579,7 +579,7 @@ static void edac_pci_dev_parity_test(struct pci_dev *dev)
33931 edac_printk(KERN_CRIT, EDAC_PCI,
33932 "Signaled System Error on %s\n",
33933 pci_name(dev));
33934- atomic_inc(&pci_nonparity_count);
33935+ atomic_inc_unchecked(&pci_nonparity_count);
33936 }
33937
33938 if (status & (PCI_STATUS_PARITY)) {
33939@@ -587,7 +587,7 @@ static void edac_pci_dev_parity_test(struct pci_dev *dev)
33940 "Master Data Parity Error on %s\n",
33941 pci_name(dev));
33942
33943- atomic_inc(&pci_parity_count);
33944+ atomic_inc_unchecked(&pci_parity_count);
33945 }
33946
33947 if (status & (PCI_STATUS_DETECTED_PARITY)) {
33948@@ -595,7 +595,7 @@ static void edac_pci_dev_parity_test(struct pci_dev *dev)
33949 "Detected Parity Error on %s\n",
33950 pci_name(dev));
33951
33952- atomic_inc(&pci_parity_count);
33953+ atomic_inc_unchecked(&pci_parity_count);
33954 }
33955 }
33956
33957@@ -616,7 +616,7 @@ static void edac_pci_dev_parity_test(struct pci_dev *dev)
33958 edac_printk(KERN_CRIT, EDAC_PCI, "Bridge "
33959 "Signaled System Error on %s\n",
33960 pci_name(dev));
33961- atomic_inc(&pci_nonparity_count);
33962+ atomic_inc_unchecked(&pci_nonparity_count);
33963 }
33964
33965 if (status & (PCI_STATUS_PARITY)) {
33966@@ -624,7 +624,7 @@ static void edac_pci_dev_parity_test(struct pci_dev *dev)
33967 "Master Data Parity Error on "
33968 "%s\n", pci_name(dev));
33969
33970- atomic_inc(&pci_parity_count);
33971+ atomic_inc_unchecked(&pci_parity_count);
33972 }
33973
33974 if (status & (PCI_STATUS_DETECTED_PARITY)) {
33975@@ -632,7 +632,7 @@ static void edac_pci_dev_parity_test(struct pci_dev *dev)
33976 "Detected Parity Error on %s\n",
33977 pci_name(dev));
33978
33979- atomic_inc(&pci_parity_count);
33980+ atomic_inc_unchecked(&pci_parity_count);
33981 }
33982 }
33983 }
33984@@ -674,7 +674,7 @@ void edac_pci_do_parity_check(void)
33985 if (!check_pci_errors)
33986 return;
33987
33988- before_count = atomic_read(&pci_parity_count);
33989+ before_count = atomic_read_unchecked(&pci_parity_count);
33990
33991 /* scan all PCI devices looking for a Parity Error on devices and
33992 * bridges.
33993@@ -686,7 +686,7 @@ void edac_pci_do_parity_check(void)
33994 /* Only if operator has selected panic on PCI Error */
33995 if (edac_pci_get_panic_on_pe()) {
33996 /* If the count is different 'after' from 'before' */
33997- if (before_count != atomic_read(&pci_parity_count))
33998+ if (before_count != atomic_read_unchecked(&pci_parity_count))
33999 panic("EDAC: PCI Parity Error");
34000 }
34001 }
34002diff --git a/drivers/edac/i3000_edac.c b/drivers/edac/i3000_edac.c
34003index 6c9a0f2..9c1cf7e 100644
34004--- a/drivers/edac/i3000_edac.c
34005+++ b/drivers/edac/i3000_edac.c
34006@@ -471,7 +471,7 @@ static void __devexit i3000_remove_one(struct pci_dev *pdev)
34007 edac_mc_free(mci);
34008 }
34009
34010-static const struct pci_device_id i3000_pci_tbl[] __devinitdata = {
34011+static const struct pci_device_id i3000_pci_tbl[] __devinitconst = {
34012 {
34013 PCI_VEND_DEV(INTEL, 3000_HB), PCI_ANY_ID, PCI_ANY_ID, 0, 0,
34014 I3000},
34015diff --git a/drivers/edac/i3200_edac.c b/drivers/edac/i3200_edac.c
34016index fde4db9..fe108f9 100644
34017--- a/drivers/edac/i3200_edac.c
34018+++ b/drivers/edac/i3200_edac.c
34019@@ -444,7 +444,7 @@ static void __devexit i3200_remove_one(struct pci_dev *pdev)
34020 edac_mc_free(mci);
34021 }
34022
34023-static const struct pci_device_id i3200_pci_tbl[] __devinitdata = {
34024+static const struct pci_device_id i3200_pci_tbl[] __devinitconst = {
34025 {
34026 PCI_VEND_DEV(INTEL, 3200_HB), PCI_ANY_ID, PCI_ANY_ID, 0, 0,
34027 I3200},
34028diff --git a/drivers/edac/i5000_edac.c b/drivers/edac/i5000_edac.c
34029index adc10a2..57d4ccf 100644
34030--- a/drivers/edac/i5000_edac.c
34031+++ b/drivers/edac/i5000_edac.c
34032@@ -1516,7 +1516,7 @@ static void __devexit i5000_remove_one(struct pci_dev *pdev)
34033 *
34034 * The "E500P" device is the first device supported.
34035 */
34036-static const struct pci_device_id i5000_pci_tbl[] __devinitdata = {
34037+static const struct pci_device_id i5000_pci_tbl[] __devinitconst = {
34038 {PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_I5000_DEV16),
34039 .driver_data = I5000P},
34040
34041diff --git a/drivers/edac/i5100_edac.c b/drivers/edac/i5100_edac.c
34042index 22db05a..b2b5503 100644
34043--- a/drivers/edac/i5100_edac.c
34044+++ b/drivers/edac/i5100_edac.c
34045@@ -944,7 +944,7 @@ static void __devexit i5100_remove_one(struct pci_dev *pdev)
34046 edac_mc_free(mci);
34047 }
34048
34049-static const struct pci_device_id i5100_pci_tbl[] __devinitdata = {
34050+static const struct pci_device_id i5100_pci_tbl[] __devinitconst = {
34051 /* Device 16, Function 0, Channel 0 Memory Map, Error Flag/Mask, ... */
34052 { PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_5100_16) },
34053 { 0, }
34054diff --git a/drivers/edac/i5400_edac.c b/drivers/edac/i5400_edac.c
34055index f99d106..f050710 100644
34056--- a/drivers/edac/i5400_edac.c
34057+++ b/drivers/edac/i5400_edac.c
34058@@ -1383,7 +1383,7 @@ static void __devexit i5400_remove_one(struct pci_dev *pdev)
34059 *
34060 * The "E500P" device is the first device supported.
34061 */
34062-static const struct pci_device_id i5400_pci_tbl[] __devinitdata = {
34063+static const struct pci_device_id i5400_pci_tbl[] __devinitconst = {
34064 {PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_5400_ERR)},
34065 {0,} /* 0 terminated list. */
34066 };
34067diff --git a/drivers/edac/i82443bxgx_edac.c b/drivers/edac/i82443bxgx_edac.c
34068index 577760a..9ce16ce 100644
34069--- a/drivers/edac/i82443bxgx_edac.c
34070+++ b/drivers/edac/i82443bxgx_edac.c
34071@@ -381,7 +381,7 @@ static void __devexit i82443bxgx_edacmc_remove_one(struct pci_dev *pdev)
34072
34073 EXPORT_SYMBOL_GPL(i82443bxgx_edacmc_remove_one);
34074
34075-static const struct pci_device_id i82443bxgx_pci_tbl[] __devinitdata = {
34076+static const struct pci_device_id i82443bxgx_pci_tbl[] __devinitconst = {
34077 {PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_82443BX_0)},
34078 {PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_82443BX_2)},
34079 {PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_82443GX_0)},
34080diff --git a/drivers/edac/i82860_edac.c b/drivers/edac/i82860_edac.c
34081index c0088ba..64a7b98 100644
34082--- a/drivers/edac/i82860_edac.c
34083+++ b/drivers/edac/i82860_edac.c
34084@@ -271,7 +271,7 @@ static void __devexit i82860_remove_one(struct pci_dev *pdev)
34085 edac_mc_free(mci);
34086 }
34087
34088-static const struct pci_device_id i82860_pci_tbl[] __devinitdata = {
34089+static const struct pci_device_id i82860_pci_tbl[] __devinitconst = {
34090 {
34091 PCI_VEND_DEV(INTEL, 82860_0), PCI_ANY_ID, PCI_ANY_ID, 0, 0,
34092 I82860},
34093diff --git a/drivers/edac/i82875p_edac.c b/drivers/edac/i82875p_edac.c
34094index b2d83b9..a34357b 100644
34095--- a/drivers/edac/i82875p_edac.c
34096+++ b/drivers/edac/i82875p_edac.c
34097@@ -512,7 +512,7 @@ static void __devexit i82875p_remove_one(struct pci_dev *pdev)
34098 edac_mc_free(mci);
34099 }
34100
34101-static const struct pci_device_id i82875p_pci_tbl[] __devinitdata = {
34102+static const struct pci_device_id i82875p_pci_tbl[] __devinitconst = {
34103 {
34104 PCI_VEND_DEV(INTEL, 82875_0), PCI_ANY_ID, PCI_ANY_ID, 0, 0,
34105 I82875P},
34106diff --git a/drivers/edac/i82975x_edac.c b/drivers/edac/i82975x_edac.c
34107index 2eed3ea..87bbbd1 100644
34108--- a/drivers/edac/i82975x_edac.c
34109+++ b/drivers/edac/i82975x_edac.c
34110@@ -586,7 +586,7 @@ static void __devexit i82975x_remove_one(struct pci_dev *pdev)
34111 edac_mc_free(mci);
34112 }
34113
34114-static const struct pci_device_id i82975x_pci_tbl[] __devinitdata = {
34115+static const struct pci_device_id i82975x_pci_tbl[] __devinitconst = {
34116 {
34117 PCI_VEND_DEV(INTEL, 82975_0), PCI_ANY_ID, PCI_ANY_ID, 0, 0,
34118 I82975X
34119diff --git a/drivers/edac/r82600_edac.c b/drivers/edac/r82600_edac.c
34120index 9900675..78ac2b6 100644
34121--- a/drivers/edac/r82600_edac.c
34122+++ b/drivers/edac/r82600_edac.c
34123@@ -374,7 +374,7 @@ static void __devexit r82600_remove_one(struct pci_dev *pdev)
34124 edac_mc_free(mci);
34125 }
34126
34127-static const struct pci_device_id r82600_pci_tbl[] __devinitdata = {
34128+static const struct pci_device_id r82600_pci_tbl[] __devinitconst = {
34129 {
34130 PCI_DEVICE(PCI_VENDOR_ID_RADISYS, R82600_BRIDGE_ID)
34131 },
34132diff --git a/drivers/edac/x38_edac.c b/drivers/edac/x38_edac.c
34133index d4ec605..4cfec4e 100644
34134--- a/drivers/edac/x38_edac.c
34135+++ b/drivers/edac/x38_edac.c
34136@@ -441,7 +441,7 @@ static void __devexit x38_remove_one(struct pci_dev *pdev)
34137 edac_mc_free(mci);
34138 }
34139
34140-static const struct pci_device_id x38_pci_tbl[] __devinitdata = {
34141+static const struct pci_device_id x38_pci_tbl[] __devinitconst = {
34142 {
34143 PCI_VEND_DEV(INTEL, X38_HB), PCI_ANY_ID, PCI_ANY_ID, 0, 0,
34144 X38},
34145diff --git a/drivers/firewire/core-card.c b/drivers/firewire/core-card.c
34146index 3fc2ceb..daf098f 100644
34147--- a/drivers/firewire/core-card.c
34148+++ b/drivers/firewire/core-card.c
34149@@ -558,7 +558,7 @@ void fw_card_release(struct kref *kref)
34150
34151 void fw_core_remove_card(struct fw_card *card)
34152 {
34153- struct fw_card_driver dummy_driver = dummy_driver_template;
34154+ fw_card_driver_no_const dummy_driver = dummy_driver_template;
34155
34156 card->driver->update_phy_reg(card, 4,
34157 PHY_LINK_ACTIVE | PHY_CONTENDER, 0);
34158diff --git a/drivers/firewire/core-cdev.c b/drivers/firewire/core-cdev.c
34159index 4560d8f..36db24a 100644
34160--- a/drivers/firewire/core-cdev.c
34161+++ b/drivers/firewire/core-cdev.c
34162@@ -1141,8 +1141,7 @@ static int init_iso_resource(struct client *client,
34163 int ret;
34164
34165 if ((request->channels == 0 && request->bandwidth == 0) ||
34166- request->bandwidth > BANDWIDTH_AVAILABLE_INITIAL ||
34167- request->bandwidth < 0)
34168+ request->bandwidth > BANDWIDTH_AVAILABLE_INITIAL)
34169 return -EINVAL;
34170
34171 r = kmalloc(sizeof(*r), GFP_KERNEL);
34172diff --git a/drivers/firewire/core-transaction.c b/drivers/firewire/core-transaction.c
34173index da628c7..cf54a2c 100644
34174--- a/drivers/firewire/core-transaction.c
34175+++ b/drivers/firewire/core-transaction.c
34176@@ -36,6 +36,7 @@
34177 #include <linux/string.h>
34178 #include <linux/timer.h>
34179 #include <linux/types.h>
34180+#include <linux/sched.h>
34181
34182 #include <asm/byteorder.h>
34183
34184@@ -344,6 +345,8 @@ int fw_run_transaction(struct fw_card *card, int tcode, int destination_id,
34185 struct transaction_callback_data d;
34186 struct fw_transaction t;
34187
34188+ pax_track_stack();
34189+
34190 init_completion(&d.done);
34191 d.payload = payload;
34192 fw_send_request(card, &t, tcode, destination_id, generation, speed,
34193diff --git a/drivers/firewire/core.h b/drivers/firewire/core.h
34194index 7ff6e75..a2965d9 100644
34195--- a/drivers/firewire/core.h
34196+++ b/drivers/firewire/core.h
34197@@ -86,6 +86,7 @@ struct fw_card_driver {
34198
34199 int (*stop_iso)(struct fw_iso_context *ctx);
34200 };
34201+typedef struct fw_card_driver __no_const fw_card_driver_no_const;
34202
34203 void fw_card_initialize(struct fw_card *card,
34204 const struct fw_card_driver *driver, struct device *device);
34205diff --git a/drivers/firmware/dmi_scan.c b/drivers/firmware/dmi_scan.c
34206index 3a2ccb0..82fd7c4 100644
34207--- a/drivers/firmware/dmi_scan.c
34208+++ b/drivers/firmware/dmi_scan.c
34209@@ -391,11 +391,6 @@ void __init dmi_scan_machine(void)
34210 }
34211 }
34212 else {
34213- /*
34214- * no iounmap() for that ioremap(); it would be a no-op, but
34215- * it's so early in setup that sucker gets confused into doing
34216- * what it shouldn't if we actually call it.
34217- */
34218 p = dmi_ioremap(0xF0000, 0x10000);
34219 if (p == NULL)
34220 goto error;
34221@@ -667,7 +662,7 @@ int dmi_walk(void (*decode)(const struct dmi_header *, void *),
34222 if (buf == NULL)
34223 return -1;
34224
34225- dmi_table(buf, dmi_len, dmi_num, decode, private_data);
34226+ dmi_table((char __force_kernel *)buf, dmi_len, dmi_num, decode, private_data);
34227
34228 iounmap(buf);
34229 return 0;
34230diff --git a/drivers/firmware/edd.c b/drivers/firmware/edd.c
34231index 9e4f59d..110e24e 100644
34232--- a/drivers/firmware/edd.c
34233+++ b/drivers/firmware/edd.c
34234@@ -122,7 +122,7 @@ edd_attr_show(struct kobject * kobj, struct attribute *attr, char *buf)
34235 return ret;
34236 }
34237
34238-static struct sysfs_ops edd_attr_ops = {
34239+static const struct sysfs_ops edd_attr_ops = {
34240 .show = edd_attr_show,
34241 };
34242
34243diff --git a/drivers/firmware/efivars.c b/drivers/firmware/efivars.c
34244index f4f709d..082f06e 100644
34245--- a/drivers/firmware/efivars.c
34246+++ b/drivers/firmware/efivars.c
34247@@ -362,7 +362,7 @@ static ssize_t efivar_attr_store(struct kobject *kobj, struct attribute *attr,
34248 return ret;
34249 }
34250
34251-static struct sysfs_ops efivar_attr_ops = {
34252+static const struct sysfs_ops efivar_attr_ops = {
34253 .show = efivar_attr_show,
34254 .store = efivar_attr_store,
34255 };
34256diff --git a/drivers/firmware/iscsi_ibft.c b/drivers/firmware/iscsi_ibft.c
34257index 051d1eb..0a5d4e7 100644
34258--- a/drivers/firmware/iscsi_ibft.c
34259+++ b/drivers/firmware/iscsi_ibft.c
34260@@ -525,7 +525,7 @@ static ssize_t ibft_show_attribute(struct kobject *kobj,
34261 return ret;
34262 }
34263
34264-static struct sysfs_ops ibft_attr_ops = {
34265+static const struct sysfs_ops ibft_attr_ops = {
34266 .show = ibft_show_attribute,
34267 };
34268
34269diff --git a/drivers/firmware/memmap.c b/drivers/firmware/memmap.c
34270index 56f9234..8c58c7b 100644
34271--- a/drivers/firmware/memmap.c
34272+++ b/drivers/firmware/memmap.c
34273@@ -74,7 +74,7 @@ static struct attribute *def_attrs[] = {
34274 NULL
34275 };
34276
34277-static struct sysfs_ops memmap_attr_ops = {
34278+static const struct sysfs_ops memmap_attr_ops = {
34279 .show = memmap_attr_show,
34280 };
34281
34282diff --git a/drivers/gpio/vr41xx_giu.c b/drivers/gpio/vr41xx_giu.c
34283index b16c9a8..2af7d3f 100644
34284--- a/drivers/gpio/vr41xx_giu.c
34285+++ b/drivers/gpio/vr41xx_giu.c
34286@@ -204,7 +204,7 @@ static int giu_get_irq(unsigned int irq)
34287 printk(KERN_ERR "spurious GIU interrupt: %04x(%04x),%04x(%04x)\n",
34288 maskl, pendl, maskh, pendh);
34289
34290- atomic_inc(&irq_err_count);
34291+ atomic_inc_unchecked(&irq_err_count);
34292
34293 return -EINVAL;
34294 }
34295diff --git a/drivers/gpu/drm/drm_crtc.c b/drivers/gpu/drm/drm_crtc.c
34296index bea6efc..3dc0f42 100644
34297--- a/drivers/gpu/drm/drm_crtc.c
34298+++ b/drivers/gpu/drm/drm_crtc.c
34299@@ -1323,7 +1323,7 @@ int drm_mode_getconnector(struct drm_device *dev, void *data,
34300 */
34301 if ((out_resp->count_modes >= mode_count) && mode_count) {
34302 copied = 0;
34303- mode_ptr = (struct drm_mode_modeinfo *)(unsigned long)out_resp->modes_ptr;
34304+ mode_ptr = (struct drm_mode_modeinfo __user *)(unsigned long)out_resp->modes_ptr;
34305 list_for_each_entry(mode, &connector->modes, head) {
34306 drm_crtc_convert_to_umode(&u_mode, mode);
34307 if (copy_to_user(mode_ptr + copied,
34308@@ -1338,8 +1338,8 @@ int drm_mode_getconnector(struct drm_device *dev, void *data,
34309
34310 if ((out_resp->count_props >= props_count) && props_count) {
34311 copied = 0;
34312- prop_ptr = (uint32_t *)(unsigned long)(out_resp->props_ptr);
34313- prop_values = (uint64_t *)(unsigned long)(out_resp->prop_values_ptr);
34314+ prop_ptr = (uint32_t __user *)(unsigned long)(out_resp->props_ptr);
34315+ prop_values = (uint64_t __user *)(unsigned long)(out_resp->prop_values_ptr);
34316 for (i = 0; i < DRM_CONNECTOR_MAX_PROPERTY; i++) {
34317 if (connector->property_ids[i] != 0) {
34318 if (put_user(connector->property_ids[i],
34319@@ -1361,7 +1361,7 @@ int drm_mode_getconnector(struct drm_device *dev, void *data,
34320
34321 if ((out_resp->count_encoders >= encoders_count) && encoders_count) {
34322 copied = 0;
34323- encoder_ptr = (uint32_t *)(unsigned long)(out_resp->encoders_ptr);
34324+ encoder_ptr = (uint32_t __user *)(unsigned long)(out_resp->encoders_ptr);
34325 for (i = 0; i < DRM_CONNECTOR_MAX_ENCODER; i++) {
34326 if (connector->encoder_ids[i] != 0) {
34327 if (put_user(connector->encoder_ids[i],
34328@@ -1513,7 +1513,7 @@ int drm_mode_setcrtc(struct drm_device *dev, void *data,
34329 }
34330
34331 for (i = 0; i < crtc_req->count_connectors; i++) {
34332- set_connectors_ptr = (uint32_t *)(unsigned long)crtc_req->set_connectors_ptr;
34333+ set_connectors_ptr = (uint32_t __user *)(unsigned long)crtc_req->set_connectors_ptr;
34334 if (get_user(out_id, &set_connectors_ptr[i])) {
34335 ret = -EFAULT;
34336 goto out;
34337@@ -2118,7 +2118,7 @@ int drm_mode_getproperty_ioctl(struct drm_device *dev,
34338 out_resp->flags = property->flags;
34339
34340 if ((out_resp->count_values >= value_count) && value_count) {
34341- values_ptr = (uint64_t *)(unsigned long)out_resp->values_ptr;
34342+ values_ptr = (uint64_t __user *)(unsigned long)out_resp->values_ptr;
34343 for (i = 0; i < value_count; i++) {
34344 if (copy_to_user(values_ptr + i, &property->values[i], sizeof(uint64_t))) {
34345 ret = -EFAULT;
34346@@ -2131,7 +2131,7 @@ int drm_mode_getproperty_ioctl(struct drm_device *dev,
34347 if (property->flags & DRM_MODE_PROP_ENUM) {
34348 if ((out_resp->count_enum_blobs >= enum_count) && enum_count) {
34349 copied = 0;
34350- enum_ptr = (struct drm_mode_property_enum *)(unsigned long)out_resp->enum_blob_ptr;
34351+ enum_ptr = (struct drm_mode_property_enum __user *)(unsigned long)out_resp->enum_blob_ptr;
34352 list_for_each_entry(prop_enum, &property->enum_blob_list, head) {
34353
34354 if (copy_to_user(&enum_ptr[copied].value, &prop_enum->value, sizeof(uint64_t))) {
34355@@ -2154,7 +2154,7 @@ int drm_mode_getproperty_ioctl(struct drm_device *dev,
34356 if ((out_resp->count_enum_blobs >= blob_count) && blob_count) {
34357 copied = 0;
34358 blob_id_ptr = (uint32_t *)(unsigned long)out_resp->enum_blob_ptr;
34359- blob_length_ptr = (uint32_t *)(unsigned long)out_resp->values_ptr;
34360+ blob_length_ptr = (uint32_t __user *)(unsigned long)out_resp->values_ptr;
34361
34362 list_for_each_entry(prop_blob, &property->enum_blob_list, head) {
34363 if (put_user(prop_blob->base.id, blob_id_ptr + copied)) {
34364@@ -2226,7 +2226,7 @@ int drm_mode_getblob_ioctl(struct drm_device *dev,
34365 blob = obj_to_blob(obj);
34366
34367 if (out_resp->length == blob->length) {
34368- blob_ptr = (void *)(unsigned long)out_resp->data;
34369+ blob_ptr = (void __user *)(unsigned long)out_resp->data;
34370 if (copy_to_user(blob_ptr, blob->data, blob->length)){
34371 ret = -EFAULT;
34372 goto done;
34373diff --git a/drivers/gpu/drm/drm_crtc_helper.c b/drivers/gpu/drm/drm_crtc_helper.c
34374index 1b8745d..92fdbf6 100644
34375--- a/drivers/gpu/drm/drm_crtc_helper.c
34376+++ b/drivers/gpu/drm/drm_crtc_helper.c
34377@@ -573,7 +573,7 @@ static bool drm_encoder_crtc_ok(struct drm_encoder *encoder,
34378 struct drm_crtc *tmp;
34379 int crtc_mask = 1;
34380
34381- WARN(!crtc, "checking null crtc?");
34382+ BUG_ON(!crtc);
34383
34384 dev = crtc->dev;
34385
34386@@ -642,6 +642,8 @@ bool drm_crtc_helper_set_mode(struct drm_crtc *crtc,
34387
34388 adjusted_mode = drm_mode_duplicate(dev, mode);
34389
34390+ pax_track_stack();
34391+
34392 crtc->enabled = drm_helper_crtc_in_use(crtc);
34393
34394 if (!crtc->enabled)
34395diff --git a/drivers/gpu/drm/drm_drv.c b/drivers/gpu/drm/drm_drv.c
34396index 0e27d98..dec8768 100644
34397--- a/drivers/gpu/drm/drm_drv.c
34398+++ b/drivers/gpu/drm/drm_drv.c
34399@@ -417,7 +417,7 @@ int drm_ioctl(struct inode *inode, struct file *filp,
34400 char *kdata = NULL;
34401
34402 atomic_inc(&dev->ioctl_count);
34403- atomic_inc(&dev->counts[_DRM_STAT_IOCTLS]);
34404+ atomic_inc_unchecked(&dev->counts[_DRM_STAT_IOCTLS]);
34405 ++file_priv->ioctl_count;
34406
34407 DRM_DEBUG("pid=%d, cmd=0x%02x, nr=0x%02x, dev 0x%lx, auth=%d\n",
34408diff --git a/drivers/gpu/drm/drm_fops.c b/drivers/gpu/drm/drm_fops.c
34409index 519161e..98c840c 100644
34410--- a/drivers/gpu/drm/drm_fops.c
34411+++ b/drivers/gpu/drm/drm_fops.c
34412@@ -66,7 +66,7 @@ static int drm_setup(struct drm_device * dev)
34413 }
34414
34415 for (i = 0; i < ARRAY_SIZE(dev->counts); i++)
34416- atomic_set(&dev->counts[i], 0);
34417+ atomic_set_unchecked(&dev->counts[i], 0);
34418
34419 dev->sigdata.lock = NULL;
34420
34421@@ -130,9 +130,9 @@ int drm_open(struct inode *inode, struct file *filp)
34422
34423 retcode = drm_open_helper(inode, filp, dev);
34424 if (!retcode) {
34425- atomic_inc(&dev->counts[_DRM_STAT_OPENS]);
34426+ atomic_inc_unchecked(&dev->counts[_DRM_STAT_OPENS]);
34427 spin_lock(&dev->count_lock);
34428- if (!dev->open_count++) {
34429+ if (local_inc_return(&dev->open_count) == 1) {
34430 spin_unlock(&dev->count_lock);
34431 retcode = drm_setup(dev);
34432 goto out;
34433@@ -435,7 +435,7 @@ int drm_release(struct inode *inode, struct file *filp)
34434
34435 lock_kernel();
34436
34437- DRM_DEBUG("open_count = %d\n", dev->open_count);
34438+ DRM_DEBUG("open_count = %d\n", local_read(&dev->open_count));
34439
34440 if (dev->driver->preclose)
34441 dev->driver->preclose(dev, file_priv);
34442@@ -447,7 +447,7 @@ int drm_release(struct inode *inode, struct file *filp)
34443 DRM_DEBUG("pid = %d, device = 0x%lx, open_count = %d\n",
34444 task_pid_nr(current),
34445 (long)old_encode_dev(file_priv->minor->device),
34446- dev->open_count);
34447+ local_read(&dev->open_count));
34448
34449 /* Release any auth tokens that might point to this file_priv,
34450 (do that under the drm_global_mutex) */
34451@@ -529,9 +529,9 @@ int drm_release(struct inode *inode, struct file *filp)
34452 * End inline drm_release
34453 */
34454
34455- atomic_inc(&dev->counts[_DRM_STAT_CLOSES]);
34456+ atomic_inc_unchecked(&dev->counts[_DRM_STAT_CLOSES]);
34457 spin_lock(&dev->count_lock);
34458- if (!--dev->open_count) {
34459+ if (local_dec_and_test(&dev->open_count)) {
34460 if (atomic_read(&dev->ioctl_count)) {
34461 DRM_ERROR("Device busy: %d\n",
34462 atomic_read(&dev->ioctl_count));
34463diff --git a/drivers/gpu/drm/drm_gem.c b/drivers/gpu/drm/drm_gem.c
34464index 8bf3770..79422805 100644
34465--- a/drivers/gpu/drm/drm_gem.c
34466+++ b/drivers/gpu/drm/drm_gem.c
34467@@ -83,11 +83,11 @@ drm_gem_init(struct drm_device *dev)
34468 spin_lock_init(&dev->object_name_lock);
34469 idr_init(&dev->object_name_idr);
34470 atomic_set(&dev->object_count, 0);
34471- atomic_set(&dev->object_memory, 0);
34472+ atomic_set_unchecked(&dev->object_memory, 0);
34473 atomic_set(&dev->pin_count, 0);
34474- atomic_set(&dev->pin_memory, 0);
34475+ atomic_set_unchecked(&dev->pin_memory, 0);
34476 atomic_set(&dev->gtt_count, 0);
34477- atomic_set(&dev->gtt_memory, 0);
34478+ atomic_set_unchecked(&dev->gtt_memory, 0);
34479
34480 mm = kzalloc(sizeof(struct drm_gem_mm), GFP_KERNEL);
34481 if (!mm) {
34482@@ -150,7 +150,7 @@ drm_gem_object_alloc(struct drm_device *dev, size_t size)
34483 goto fput;
34484 }
34485 atomic_inc(&dev->object_count);
34486- atomic_add(obj->size, &dev->object_memory);
34487+ atomic_add_unchecked(obj->size, &dev->object_memory);
34488 return obj;
34489 fput:
34490 fput(obj->filp);
34491@@ -429,7 +429,7 @@ drm_gem_object_free(struct kref *kref)
34492
34493 fput(obj->filp);
34494 atomic_dec(&dev->object_count);
34495- atomic_sub(obj->size, &dev->object_memory);
34496+ atomic_sub_unchecked(obj->size, &dev->object_memory);
34497 kfree(obj);
34498 }
34499 EXPORT_SYMBOL(drm_gem_object_free);
34500diff --git a/drivers/gpu/drm/drm_info.c b/drivers/gpu/drm/drm_info.c
34501index f0f6c6b..34af322 100644
34502--- a/drivers/gpu/drm/drm_info.c
34503+++ b/drivers/gpu/drm/drm_info.c
34504@@ -75,10 +75,14 @@ int drm_vm_info(struct seq_file *m, void *data)
34505 struct drm_local_map *map;
34506 struct drm_map_list *r_list;
34507
34508- /* Hardcoded from _DRM_FRAME_BUFFER,
34509- _DRM_REGISTERS, _DRM_SHM, _DRM_AGP, and
34510- _DRM_SCATTER_GATHER and _DRM_CONSISTENT */
34511- const char *types[] = { "FB", "REG", "SHM", "AGP", "SG", "PCI" };
34512+ static const char * const types[] = {
34513+ [_DRM_FRAME_BUFFER] = "FB",
34514+ [_DRM_REGISTERS] = "REG",
34515+ [_DRM_SHM] = "SHM",
34516+ [_DRM_AGP] = "AGP",
34517+ [_DRM_SCATTER_GATHER] = "SG",
34518+ [_DRM_CONSISTENT] = "PCI",
34519+ [_DRM_GEM] = "GEM" };
34520 const char *type;
34521 int i;
34522
34523@@ -89,7 +93,7 @@ int drm_vm_info(struct seq_file *m, void *data)
34524 map = r_list->map;
34525 if (!map)
34526 continue;
34527- if (map->type < 0 || map->type > 5)
34528+ if (map->type >= ARRAY_SIZE(types))
34529 type = "??";
34530 else
34531 type = types[map->type];
34532@@ -265,10 +269,10 @@ int drm_gem_object_info(struct seq_file *m, void* data)
34533 struct drm_device *dev = node->minor->dev;
34534
34535 seq_printf(m, "%d objects\n", atomic_read(&dev->object_count));
34536- seq_printf(m, "%d object bytes\n", atomic_read(&dev->object_memory));
34537+ seq_printf(m, "%d object bytes\n", atomic_read_unchecked(&dev->object_memory));
34538 seq_printf(m, "%d pinned\n", atomic_read(&dev->pin_count));
34539- seq_printf(m, "%d pin bytes\n", atomic_read(&dev->pin_memory));
34540- seq_printf(m, "%d gtt bytes\n", atomic_read(&dev->gtt_memory));
34541+ seq_printf(m, "%d pin bytes\n", atomic_read_unchecked(&dev->pin_memory));
34542+ seq_printf(m, "%d gtt bytes\n", atomic_read_unchecked(&dev->gtt_memory));
34543 seq_printf(m, "%d gtt total\n", dev->gtt_total);
34544 return 0;
34545 }
34546@@ -288,7 +292,11 @@ int drm_vma_info(struct seq_file *m, void *data)
34547 mutex_lock(&dev->struct_mutex);
34548 seq_printf(m, "vma use count: %d, high_memory = %p, 0x%08llx\n",
34549 atomic_read(&dev->vma_count),
34550+#ifdef CONFIG_GRKERNSEC_HIDESYM
34551+ NULL, 0);
34552+#else
34553 high_memory, (u64)virt_to_phys(high_memory));
34554+#endif
34555
34556 list_for_each_entry(pt, &dev->vmalist, head) {
34557 vma = pt->vma;
34558@@ -296,14 +304,23 @@ int drm_vma_info(struct seq_file *m, void *data)
34559 continue;
34560 seq_printf(m,
34561 "\n%5d 0x%08lx-0x%08lx %c%c%c%c%c%c 0x%08lx000",
34562- pt->pid, vma->vm_start, vma->vm_end,
34563+ pt->pid,
34564+#ifdef CONFIG_GRKERNSEC_HIDESYM
34565+ 0, 0,
34566+#else
34567+ vma->vm_start, vma->vm_end,
34568+#endif
34569 vma->vm_flags & VM_READ ? 'r' : '-',
34570 vma->vm_flags & VM_WRITE ? 'w' : '-',
34571 vma->vm_flags & VM_EXEC ? 'x' : '-',
34572 vma->vm_flags & VM_MAYSHARE ? 's' : 'p',
34573 vma->vm_flags & VM_LOCKED ? 'l' : '-',
34574 vma->vm_flags & VM_IO ? 'i' : '-',
34575+#ifdef CONFIG_GRKERNSEC_HIDESYM
34576+ 0);
34577+#else
34578 vma->vm_pgoff);
34579+#endif
34580
34581 #if defined(__i386__)
34582 pgprot = pgprot_val(vma->vm_page_prot);
34583diff --git a/drivers/gpu/drm/drm_ioc32.c b/drivers/gpu/drm/drm_ioc32.c
34584index 282d9fd..71e5f11 100644
34585--- a/drivers/gpu/drm/drm_ioc32.c
34586+++ b/drivers/gpu/drm/drm_ioc32.c
34587@@ -463,7 +463,7 @@ static int compat_drm_infobufs(struct file *file, unsigned int cmd,
34588 request = compat_alloc_user_space(nbytes);
34589 if (!access_ok(VERIFY_WRITE, request, nbytes))
34590 return -EFAULT;
34591- list = (struct drm_buf_desc *) (request + 1);
34592+ list = (struct drm_buf_desc __user *) (request + 1);
34593
34594 if (__put_user(count, &request->count)
34595 || __put_user(list, &request->list))
34596@@ -525,7 +525,7 @@ static int compat_drm_mapbufs(struct file *file, unsigned int cmd,
34597 request = compat_alloc_user_space(nbytes);
34598 if (!access_ok(VERIFY_WRITE, request, nbytes))
34599 return -EFAULT;
34600- list = (struct drm_buf_pub *) (request + 1);
34601+ list = (struct drm_buf_pub __user *) (request + 1);
34602
34603 if (__put_user(count, &request->count)
34604 || __put_user(list, &request->list))
34605diff --git a/drivers/gpu/drm/drm_ioctl.c b/drivers/gpu/drm/drm_ioctl.c
34606index 9b9ff46..4ea724c 100644
34607--- a/drivers/gpu/drm/drm_ioctl.c
34608+++ b/drivers/gpu/drm/drm_ioctl.c
34609@@ -283,7 +283,7 @@ int drm_getstats(struct drm_device *dev, void *data,
34610 stats->data[i].value =
34611 (file_priv->master->lock.hw_lock ? file_priv->master->lock.hw_lock->lock : 0);
34612 else
34613- stats->data[i].value = atomic_read(&dev->counts[i]);
34614+ stats->data[i].value = atomic_read_unchecked(&dev->counts[i]);
34615 stats->data[i].type = dev->types[i];
34616 }
34617
34618diff --git a/drivers/gpu/drm/drm_lock.c b/drivers/gpu/drm/drm_lock.c
34619index e2f70a5..c703e86 100644
34620--- a/drivers/gpu/drm/drm_lock.c
34621+++ b/drivers/gpu/drm/drm_lock.c
34622@@ -87,7 +87,7 @@ int drm_lock(struct drm_device *dev, void *data, struct drm_file *file_priv)
34623 if (drm_lock_take(&master->lock, lock->context)) {
34624 master->lock.file_priv = file_priv;
34625 master->lock.lock_time = jiffies;
34626- atomic_inc(&dev->counts[_DRM_STAT_LOCKS]);
34627+ atomic_inc_unchecked(&dev->counts[_DRM_STAT_LOCKS]);
34628 break; /* Got lock */
34629 }
34630
34631@@ -165,7 +165,7 @@ int drm_unlock(struct drm_device *dev, void *data, struct drm_file *file_priv)
34632 return -EINVAL;
34633 }
34634
34635- atomic_inc(&dev->counts[_DRM_STAT_UNLOCKS]);
34636+ atomic_inc_unchecked(&dev->counts[_DRM_STAT_UNLOCKS]);
34637
34638 /* kernel_context_switch isn't used by any of the x86 drm
34639 * modules but is required by the Sparc driver.
34640diff --git a/drivers/gpu/drm/i810/i810_dma.c b/drivers/gpu/drm/i810/i810_dma.c
34641index 7d1d88c..b9131b2 100644
34642--- a/drivers/gpu/drm/i810/i810_dma.c
34643+++ b/drivers/gpu/drm/i810/i810_dma.c
34644@@ -952,8 +952,8 @@ static int i810_dma_vertex(struct drm_device *dev, void *data,
34645 dma->buflist[vertex->idx],
34646 vertex->discard, vertex->used);
34647
34648- atomic_add(vertex->used, &dev->counts[_DRM_STAT_SECONDARY]);
34649- atomic_inc(&dev->counts[_DRM_STAT_DMA]);
34650+ atomic_add_unchecked(vertex->used, &dev->counts[_DRM_STAT_SECONDARY]);
34651+ atomic_inc_unchecked(&dev->counts[_DRM_STAT_DMA]);
34652 sarea_priv->last_enqueue = dev_priv->counter - 1;
34653 sarea_priv->last_dispatch = (int)hw_status[5];
34654
34655@@ -1115,8 +1115,8 @@ static int i810_dma_mc(struct drm_device *dev, void *data,
34656 i810_dma_dispatch_mc(dev, dma->buflist[mc->idx], mc->used,
34657 mc->last_render);
34658
34659- atomic_add(mc->used, &dev->counts[_DRM_STAT_SECONDARY]);
34660- atomic_inc(&dev->counts[_DRM_STAT_DMA]);
34661+ atomic_add_unchecked(mc->used, &dev->counts[_DRM_STAT_SECONDARY]);
34662+ atomic_inc_unchecked(&dev->counts[_DRM_STAT_DMA]);
34663 sarea_priv->last_enqueue = dev_priv->counter - 1;
34664 sarea_priv->last_dispatch = (int)hw_status[5];
34665
34666diff --git a/drivers/gpu/drm/i810/i810_drv.h b/drivers/gpu/drm/i810/i810_drv.h
34667index 21e2691..7321edd 100644
34668--- a/drivers/gpu/drm/i810/i810_drv.h
34669+++ b/drivers/gpu/drm/i810/i810_drv.h
34670@@ -108,8 +108,8 @@ typedef struct drm_i810_private {
34671 int page_flipping;
34672
34673 wait_queue_head_t irq_queue;
34674- atomic_t irq_received;
34675- atomic_t irq_emitted;
34676+ atomic_unchecked_t irq_received;
34677+ atomic_unchecked_t irq_emitted;
34678
34679 int front_offset;
34680 } drm_i810_private_t;
34681diff --git a/drivers/gpu/drm/i830/i830_drv.h b/drivers/gpu/drm/i830/i830_drv.h
34682index da82afe..48a45de 100644
34683--- a/drivers/gpu/drm/i830/i830_drv.h
34684+++ b/drivers/gpu/drm/i830/i830_drv.h
34685@@ -115,8 +115,8 @@ typedef struct drm_i830_private {
34686 int page_flipping;
34687
34688 wait_queue_head_t irq_queue;
34689- atomic_t irq_received;
34690- atomic_t irq_emitted;
34691+ atomic_unchecked_t irq_received;
34692+ atomic_unchecked_t irq_emitted;
34693
34694 int use_mi_batchbuffer_start;
34695
34696diff --git a/drivers/gpu/drm/i830/i830_irq.c b/drivers/gpu/drm/i830/i830_irq.c
34697index 91ec2bb..6f21fab 100644
34698--- a/drivers/gpu/drm/i830/i830_irq.c
34699+++ b/drivers/gpu/drm/i830/i830_irq.c
34700@@ -47,7 +47,7 @@ irqreturn_t i830_driver_irq_handler(DRM_IRQ_ARGS)
34701
34702 I830_WRITE16(I830REG_INT_IDENTITY_R, temp);
34703
34704- atomic_inc(&dev_priv->irq_received);
34705+ atomic_inc_unchecked(&dev_priv->irq_received);
34706 wake_up_interruptible(&dev_priv->irq_queue);
34707
34708 return IRQ_HANDLED;
34709@@ -60,14 +60,14 @@ static int i830_emit_irq(struct drm_device * dev)
34710
34711 DRM_DEBUG("%s\n", __func__);
34712
34713- atomic_inc(&dev_priv->irq_emitted);
34714+ atomic_inc_unchecked(&dev_priv->irq_emitted);
34715
34716 BEGIN_LP_RING(2);
34717 OUT_RING(0);
34718 OUT_RING(GFX_OP_USER_INTERRUPT);
34719 ADVANCE_LP_RING();
34720
34721- return atomic_read(&dev_priv->irq_emitted);
34722+ return atomic_read_unchecked(&dev_priv->irq_emitted);
34723 }
34724
34725 static int i830_wait_irq(struct drm_device * dev, int irq_nr)
34726@@ -79,7 +79,7 @@ static int i830_wait_irq(struct drm_device * dev, int irq_nr)
34727
34728 DRM_DEBUG("%s\n", __func__);
34729
34730- if (atomic_read(&dev_priv->irq_received) >= irq_nr)
34731+ if (atomic_read_unchecked(&dev_priv->irq_received) >= irq_nr)
34732 return 0;
34733
34734 dev_priv->sarea_priv->perf_boxes |= I830_BOX_WAIT;
34735@@ -88,7 +88,7 @@ static int i830_wait_irq(struct drm_device * dev, int irq_nr)
34736
34737 for (;;) {
34738 __set_current_state(TASK_INTERRUPTIBLE);
34739- if (atomic_read(&dev_priv->irq_received) >= irq_nr)
34740+ if (atomic_read_unchecked(&dev_priv->irq_received) >= irq_nr)
34741 break;
34742 if ((signed)(end - jiffies) <= 0) {
34743 DRM_ERROR("timeout iir %x imr %x ier %x hwstam %x\n",
34744@@ -163,8 +163,8 @@ void i830_driver_irq_preinstall(struct drm_device * dev)
34745 I830_WRITE16(I830REG_HWSTAM, 0xffff);
34746 I830_WRITE16(I830REG_INT_MASK_R, 0x0);
34747 I830_WRITE16(I830REG_INT_ENABLE_R, 0x0);
34748- atomic_set(&dev_priv->irq_received, 0);
34749- atomic_set(&dev_priv->irq_emitted, 0);
34750+ atomic_set_unchecked(&dev_priv->irq_received, 0);
34751+ atomic_set_unchecked(&dev_priv->irq_emitted, 0);
34752 init_waitqueue_head(&dev_priv->irq_queue);
34753 }
34754
34755diff --git a/drivers/gpu/drm/i915/dvo.h b/drivers/gpu/drm/i915/dvo.h
34756index 288fc50..c6092055 100644
34757--- a/drivers/gpu/drm/i915/dvo.h
34758+++ b/drivers/gpu/drm/i915/dvo.h
34759@@ -135,23 +135,23 @@ struct intel_dvo_dev_ops {
34760 *
34761 * \return singly-linked list of modes or NULL if no modes found.
34762 */
34763- struct drm_display_mode *(*get_modes)(struct intel_dvo_device *dvo);
34764+ struct drm_display_mode *(* const get_modes)(struct intel_dvo_device *dvo);
34765
34766 /**
34767 * Clean up driver-specific bits of the output
34768 */
34769- void (*destroy) (struct intel_dvo_device *dvo);
34770+ void (* const destroy) (struct intel_dvo_device *dvo);
34771
34772 /**
34773 * Debugging hook to dump device registers to log file
34774 */
34775- void (*dump_regs)(struct intel_dvo_device *dvo);
34776+ void (* const dump_regs)(struct intel_dvo_device *dvo);
34777 };
34778
34779-extern struct intel_dvo_dev_ops sil164_ops;
34780-extern struct intel_dvo_dev_ops ch7xxx_ops;
34781-extern struct intel_dvo_dev_ops ivch_ops;
34782-extern struct intel_dvo_dev_ops tfp410_ops;
34783-extern struct intel_dvo_dev_ops ch7017_ops;
34784+extern const struct intel_dvo_dev_ops sil164_ops;
34785+extern const struct intel_dvo_dev_ops ch7xxx_ops;
34786+extern const struct intel_dvo_dev_ops ivch_ops;
34787+extern const struct intel_dvo_dev_ops tfp410_ops;
34788+extern const struct intel_dvo_dev_ops ch7017_ops;
34789
34790 #endif /* _INTEL_DVO_H */
34791diff --git a/drivers/gpu/drm/i915/dvo_ch7017.c b/drivers/gpu/drm/i915/dvo_ch7017.c
34792index 621815b..499d82e 100644
34793--- a/drivers/gpu/drm/i915/dvo_ch7017.c
34794+++ b/drivers/gpu/drm/i915/dvo_ch7017.c
34795@@ -443,7 +443,7 @@ static void ch7017_destroy(struct intel_dvo_device *dvo)
34796 }
34797 }
34798
34799-struct intel_dvo_dev_ops ch7017_ops = {
34800+const struct intel_dvo_dev_ops ch7017_ops = {
34801 .init = ch7017_init,
34802 .detect = ch7017_detect,
34803 .mode_valid = ch7017_mode_valid,
34804diff --git a/drivers/gpu/drm/i915/dvo_ch7xxx.c b/drivers/gpu/drm/i915/dvo_ch7xxx.c
34805index a9b8962..ac769ba 100644
34806--- a/drivers/gpu/drm/i915/dvo_ch7xxx.c
34807+++ b/drivers/gpu/drm/i915/dvo_ch7xxx.c
34808@@ -356,7 +356,7 @@ static void ch7xxx_destroy(struct intel_dvo_device *dvo)
34809 }
34810 }
34811
34812-struct intel_dvo_dev_ops ch7xxx_ops = {
34813+const struct intel_dvo_dev_ops ch7xxx_ops = {
34814 .init = ch7xxx_init,
34815 .detect = ch7xxx_detect,
34816 .mode_valid = ch7xxx_mode_valid,
34817diff --git a/drivers/gpu/drm/i915/dvo_ivch.c b/drivers/gpu/drm/i915/dvo_ivch.c
34818index aa176f9..ed2930c 100644
34819--- a/drivers/gpu/drm/i915/dvo_ivch.c
34820+++ b/drivers/gpu/drm/i915/dvo_ivch.c
34821@@ -430,7 +430,7 @@ static void ivch_destroy(struct intel_dvo_device *dvo)
34822 }
34823 }
34824
34825-struct intel_dvo_dev_ops ivch_ops= {
34826+const struct intel_dvo_dev_ops ivch_ops= {
34827 .init = ivch_init,
34828 .dpms = ivch_dpms,
34829 .save = ivch_save,
34830diff --git a/drivers/gpu/drm/i915/dvo_sil164.c b/drivers/gpu/drm/i915/dvo_sil164.c
34831index e1c1f73..7dbebcf 100644
34832--- a/drivers/gpu/drm/i915/dvo_sil164.c
34833+++ b/drivers/gpu/drm/i915/dvo_sil164.c
34834@@ -290,7 +290,7 @@ static void sil164_destroy(struct intel_dvo_device *dvo)
34835 }
34836 }
34837
34838-struct intel_dvo_dev_ops sil164_ops = {
34839+const struct intel_dvo_dev_ops sil164_ops = {
34840 .init = sil164_init,
34841 .detect = sil164_detect,
34842 .mode_valid = sil164_mode_valid,
34843diff --git a/drivers/gpu/drm/i915/dvo_tfp410.c b/drivers/gpu/drm/i915/dvo_tfp410.c
34844index 16dce84..7e1b6f8 100644
34845--- a/drivers/gpu/drm/i915/dvo_tfp410.c
34846+++ b/drivers/gpu/drm/i915/dvo_tfp410.c
34847@@ -323,7 +323,7 @@ static void tfp410_destroy(struct intel_dvo_device *dvo)
34848 }
34849 }
34850
34851-struct intel_dvo_dev_ops tfp410_ops = {
34852+const struct intel_dvo_dev_ops tfp410_ops = {
34853 .init = tfp410_init,
34854 .detect = tfp410_detect,
34855 .mode_valid = tfp410_mode_valid,
34856diff --git a/drivers/gpu/drm/i915/i915_debugfs.c b/drivers/gpu/drm/i915/i915_debugfs.c
34857index 7e859d6..7d1cf2b 100644
34858--- a/drivers/gpu/drm/i915/i915_debugfs.c
34859+++ b/drivers/gpu/drm/i915/i915_debugfs.c
34860@@ -192,7 +192,7 @@ static int i915_interrupt_info(struct seq_file *m, void *data)
34861 I915_READ(GTIMR));
34862 }
34863 seq_printf(m, "Interrupts received: %d\n",
34864- atomic_read(&dev_priv->irq_received));
34865+ atomic_read_unchecked(&dev_priv->irq_received));
34866 if (dev_priv->hw_status_page != NULL) {
34867 seq_printf(m, "Current sequence: %d\n",
34868 i915_get_gem_seqno(dev));
34869diff --git a/drivers/gpu/drm/i915/i915_drv.c b/drivers/gpu/drm/i915/i915_drv.c
34870index 5449239..7e4f68d 100644
34871--- a/drivers/gpu/drm/i915/i915_drv.c
34872+++ b/drivers/gpu/drm/i915/i915_drv.c
34873@@ -285,7 +285,7 @@ i915_pci_resume(struct pci_dev *pdev)
34874 return i915_resume(dev);
34875 }
34876
34877-static struct vm_operations_struct i915_gem_vm_ops = {
34878+static const struct vm_operations_struct i915_gem_vm_ops = {
34879 .fault = i915_gem_fault,
34880 .open = drm_gem_vm_open,
34881 .close = drm_gem_vm_close,
34882diff --git a/drivers/gpu/drm/i915/i915_drv.h b/drivers/gpu/drm/i915/i915_drv.h
34883index 97163f7..c24c7c7 100644
34884--- a/drivers/gpu/drm/i915/i915_drv.h
34885+++ b/drivers/gpu/drm/i915/i915_drv.h
34886@@ -168,7 +168,7 @@ struct drm_i915_display_funcs {
34887 /* display clock increase/decrease */
34888 /* pll clock increase/decrease */
34889 /* clock gating init */
34890-};
34891+} __no_const;
34892
34893 typedef struct drm_i915_private {
34894 struct drm_device *dev;
34895@@ -197,7 +197,7 @@ typedef struct drm_i915_private {
34896 int page_flipping;
34897
34898 wait_queue_head_t irq_queue;
34899- atomic_t irq_received;
34900+ atomic_unchecked_t irq_received;
34901 /** Protects user_irq_refcount and irq_mask_reg */
34902 spinlock_t user_irq_lock;
34903 /** Refcount for i915_user_irq_get() versus i915_user_irq_put(). */
34904diff --git a/drivers/gpu/drm/i915/i915_gem.c b/drivers/gpu/drm/i915/i915_gem.c
34905index 27a3074..eb3f959 100644
34906--- a/drivers/gpu/drm/i915/i915_gem.c
34907+++ b/drivers/gpu/drm/i915/i915_gem.c
34908@@ -102,7 +102,7 @@ i915_gem_get_aperture_ioctl(struct drm_device *dev, void *data,
34909
34910 args->aper_size = dev->gtt_total;
34911 args->aper_available_size = (args->aper_size -
34912- atomic_read(&dev->pin_memory));
34913+ atomic_read_unchecked(&dev->pin_memory));
34914
34915 return 0;
34916 }
34917@@ -2058,7 +2058,7 @@ i915_gem_object_unbind(struct drm_gem_object *obj)
34918
34919 if (obj_priv->gtt_space) {
34920 atomic_dec(&dev->gtt_count);
34921- atomic_sub(obj->size, &dev->gtt_memory);
34922+ atomic_sub_unchecked(obj->size, &dev->gtt_memory);
34923
34924 drm_mm_put_block(obj_priv->gtt_space);
34925 obj_priv->gtt_space = NULL;
34926@@ -2701,7 +2701,7 @@ i915_gem_object_bind_to_gtt(struct drm_gem_object *obj, unsigned alignment)
34927 goto search_free;
34928 }
34929 atomic_inc(&dev->gtt_count);
34930- atomic_add(obj->size, &dev->gtt_memory);
34931+ atomic_add_unchecked(obj->size, &dev->gtt_memory);
34932
34933 /* Assert that the object is not currently in any GPU domain. As it
34934 * wasn't in the GTT, there shouldn't be any way it could have been in
34935@@ -3755,9 +3755,9 @@ i915_gem_execbuffer(struct drm_device *dev, void *data,
34936 "%d/%d gtt bytes\n",
34937 atomic_read(&dev->object_count),
34938 atomic_read(&dev->pin_count),
34939- atomic_read(&dev->object_memory),
34940- atomic_read(&dev->pin_memory),
34941- atomic_read(&dev->gtt_memory),
34942+ atomic_read_unchecked(&dev->object_memory),
34943+ atomic_read_unchecked(&dev->pin_memory),
34944+ atomic_read_unchecked(&dev->gtt_memory),
34945 dev->gtt_total);
34946 }
34947 goto err;
34948@@ -3989,7 +3989,7 @@ i915_gem_object_pin(struct drm_gem_object *obj, uint32_t alignment)
34949 */
34950 if (obj_priv->pin_count == 1) {
34951 atomic_inc(&dev->pin_count);
34952- atomic_add(obj->size, &dev->pin_memory);
34953+ atomic_add_unchecked(obj->size, &dev->pin_memory);
34954 if (!obj_priv->active &&
34955 (obj->write_domain & I915_GEM_GPU_DOMAINS) == 0 &&
34956 !list_empty(&obj_priv->list))
34957@@ -4022,7 +4022,7 @@ i915_gem_object_unpin(struct drm_gem_object *obj)
34958 list_move_tail(&obj_priv->list,
34959 &dev_priv->mm.inactive_list);
34960 atomic_dec(&dev->pin_count);
34961- atomic_sub(obj->size, &dev->pin_memory);
34962+ atomic_sub_unchecked(obj->size, &dev->pin_memory);
34963 }
34964 i915_verify_inactive(dev, __FILE__, __LINE__);
34965 }
34966diff --git a/drivers/gpu/drm/i915/i915_irq.c b/drivers/gpu/drm/i915/i915_irq.c
34967index 63f28ad..f5469da 100644
34968--- a/drivers/gpu/drm/i915/i915_irq.c
34969+++ b/drivers/gpu/drm/i915/i915_irq.c
34970@@ -528,7 +528,7 @@ irqreturn_t i915_driver_irq_handler(DRM_IRQ_ARGS)
34971 int irq_received;
34972 int ret = IRQ_NONE;
34973
34974- atomic_inc(&dev_priv->irq_received);
34975+ atomic_inc_unchecked(&dev_priv->irq_received);
34976
34977 if (IS_IGDNG(dev))
34978 return igdng_irq_handler(dev);
34979@@ -1021,7 +1021,7 @@ void i915_driver_irq_preinstall(struct drm_device * dev)
34980 {
34981 drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
34982
34983- atomic_set(&dev_priv->irq_received, 0);
34984+ atomic_set_unchecked(&dev_priv->irq_received, 0);
34985
34986 INIT_WORK(&dev_priv->hotplug_work, i915_hotplug_work_func);
34987 INIT_WORK(&dev_priv->error_work, i915_error_work_func);
34988diff --git a/drivers/gpu/drm/i915/intel_sdvo.c b/drivers/gpu/drm/i915/intel_sdvo.c
34989index 5d9c6a7..d1b0e29 100644
34990--- a/drivers/gpu/drm/i915/intel_sdvo.c
34991+++ b/drivers/gpu/drm/i915/intel_sdvo.c
34992@@ -2795,7 +2795,9 @@ bool intel_sdvo_init(struct drm_device *dev, int output_device)
34993 sdvo_priv->slave_addr = intel_sdvo_get_slave_addr(dev, output_device);
34994
34995 /* Save the bit-banging i2c functionality for use by the DDC wrapper */
34996- intel_sdvo_i2c_bit_algo.functionality = intel_output->i2c_bus->algo->functionality;
34997+ pax_open_kernel();
34998+ *(void **)&intel_sdvo_i2c_bit_algo.functionality = intel_output->i2c_bus->algo->functionality;
34999+ pax_close_kernel();
35000
35001 /* Read the regs to test if we can talk to the device */
35002 for (i = 0; i < 0x40; i++) {
35003diff --git a/drivers/gpu/drm/mga/mga_drv.h b/drivers/gpu/drm/mga/mga_drv.h
35004index be6c6b9..8615d9c 100644
35005--- a/drivers/gpu/drm/mga/mga_drv.h
35006+++ b/drivers/gpu/drm/mga/mga_drv.h
35007@@ -120,9 +120,9 @@ typedef struct drm_mga_private {
35008 u32 clear_cmd;
35009 u32 maccess;
35010
35011- atomic_t vbl_received; /**< Number of vblanks received. */
35012+ atomic_unchecked_t vbl_received; /**< Number of vblanks received. */
35013 wait_queue_head_t fence_queue;
35014- atomic_t last_fence_retired;
35015+ atomic_unchecked_t last_fence_retired;
35016 u32 next_fence_to_post;
35017
35018 unsigned int fb_cpp;
35019diff --git a/drivers/gpu/drm/mga/mga_irq.c b/drivers/gpu/drm/mga/mga_irq.c
35020index daa6041..a28a5da 100644
35021--- a/drivers/gpu/drm/mga/mga_irq.c
35022+++ b/drivers/gpu/drm/mga/mga_irq.c
35023@@ -44,7 +44,7 @@ u32 mga_get_vblank_counter(struct drm_device *dev, int crtc)
35024 if (crtc != 0)
35025 return 0;
35026
35027- return atomic_read(&dev_priv->vbl_received);
35028+ return atomic_read_unchecked(&dev_priv->vbl_received);
35029 }
35030
35031
35032@@ -60,7 +60,7 @@ irqreturn_t mga_driver_irq_handler(DRM_IRQ_ARGS)
35033 /* VBLANK interrupt */
35034 if (status & MGA_VLINEPEN) {
35035 MGA_WRITE(MGA_ICLEAR, MGA_VLINEICLR);
35036- atomic_inc(&dev_priv->vbl_received);
35037+ atomic_inc_unchecked(&dev_priv->vbl_received);
35038 drm_handle_vblank(dev, 0);
35039 handled = 1;
35040 }
35041@@ -80,7 +80,7 @@ irqreturn_t mga_driver_irq_handler(DRM_IRQ_ARGS)
35042 MGA_WRITE(MGA_PRIMEND, prim_end);
35043 }
35044
35045- atomic_inc(&dev_priv->last_fence_retired);
35046+ atomic_inc_unchecked(&dev_priv->last_fence_retired);
35047 DRM_WAKEUP(&dev_priv->fence_queue);
35048 handled = 1;
35049 }
35050@@ -131,7 +131,7 @@ int mga_driver_fence_wait(struct drm_device * dev, unsigned int *sequence)
35051 * using fences.
35052 */
35053 DRM_WAIT_ON(ret, dev_priv->fence_queue, 3 * DRM_HZ,
35054- (((cur_fence = atomic_read(&dev_priv->last_fence_retired))
35055+ (((cur_fence = atomic_read_unchecked(&dev_priv->last_fence_retired))
35056 - *sequence) <= (1 << 23)));
35057
35058 *sequence = cur_fence;
35059diff --git a/drivers/gpu/drm/r128/r128_cce.c b/drivers/gpu/drm/r128/r128_cce.c
35060index 4c39a40..b22a9ea 100644
35061--- a/drivers/gpu/drm/r128/r128_cce.c
35062+++ b/drivers/gpu/drm/r128/r128_cce.c
35063@@ -377,7 +377,7 @@ static int r128_do_init_cce(struct drm_device * dev, drm_r128_init_t * init)
35064
35065 /* GH: Simple idle check.
35066 */
35067- atomic_set(&dev_priv->idle_count, 0);
35068+ atomic_set_unchecked(&dev_priv->idle_count, 0);
35069
35070 /* We don't support anything other than bus-mastering ring mode,
35071 * but the ring can be in either AGP or PCI space for the ring
35072diff --git a/drivers/gpu/drm/r128/r128_drv.h b/drivers/gpu/drm/r128/r128_drv.h
35073index 3c60829..4faf484 100644
35074--- a/drivers/gpu/drm/r128/r128_drv.h
35075+++ b/drivers/gpu/drm/r128/r128_drv.h
35076@@ -90,14 +90,14 @@ typedef struct drm_r128_private {
35077 int is_pci;
35078 unsigned long cce_buffers_offset;
35079
35080- atomic_t idle_count;
35081+ atomic_unchecked_t idle_count;
35082
35083 int page_flipping;
35084 int current_page;
35085 u32 crtc_offset;
35086 u32 crtc_offset_cntl;
35087
35088- atomic_t vbl_received;
35089+ atomic_unchecked_t vbl_received;
35090
35091 u32 color_fmt;
35092 unsigned int front_offset;
35093diff --git a/drivers/gpu/drm/r128/r128_irq.c b/drivers/gpu/drm/r128/r128_irq.c
35094index 69810fb..97bf17a 100644
35095--- a/drivers/gpu/drm/r128/r128_irq.c
35096+++ b/drivers/gpu/drm/r128/r128_irq.c
35097@@ -42,7 +42,7 @@ u32 r128_get_vblank_counter(struct drm_device *dev, int crtc)
35098 if (crtc != 0)
35099 return 0;
35100
35101- return atomic_read(&dev_priv->vbl_received);
35102+ return atomic_read_unchecked(&dev_priv->vbl_received);
35103 }
35104
35105 irqreturn_t r128_driver_irq_handler(DRM_IRQ_ARGS)
35106@@ -56,7 +56,7 @@ irqreturn_t r128_driver_irq_handler(DRM_IRQ_ARGS)
35107 /* VBLANK interrupt */
35108 if (status & R128_CRTC_VBLANK_INT) {
35109 R128_WRITE(R128_GEN_INT_STATUS, R128_CRTC_VBLANK_INT_AK);
35110- atomic_inc(&dev_priv->vbl_received);
35111+ atomic_inc_unchecked(&dev_priv->vbl_received);
35112 drm_handle_vblank(dev, 0);
35113 return IRQ_HANDLED;
35114 }
35115diff --git a/drivers/gpu/drm/r128/r128_state.c b/drivers/gpu/drm/r128/r128_state.c
35116index af2665c..51922d2 100644
35117--- a/drivers/gpu/drm/r128/r128_state.c
35118+++ b/drivers/gpu/drm/r128/r128_state.c
35119@@ -323,10 +323,10 @@ static void r128_clear_box(drm_r128_private_t * dev_priv,
35120
35121 static void r128_cce_performance_boxes(drm_r128_private_t * dev_priv)
35122 {
35123- if (atomic_read(&dev_priv->idle_count) == 0) {
35124+ if (atomic_read_unchecked(&dev_priv->idle_count) == 0) {
35125 r128_clear_box(dev_priv, 64, 4, 8, 8, 0, 255, 0);
35126 } else {
35127- atomic_set(&dev_priv->idle_count, 0);
35128+ atomic_set_unchecked(&dev_priv->idle_count, 0);
35129 }
35130 }
35131
35132diff --git a/drivers/gpu/drm/radeon/atom.c b/drivers/gpu/drm/radeon/atom.c
35133index dd72b91..8644b3c 100644
35134--- a/drivers/gpu/drm/radeon/atom.c
35135+++ b/drivers/gpu/drm/radeon/atom.c
35136@@ -1115,6 +1115,8 @@ struct atom_context *atom_parse(struct card_info *card, void *bios)
35137 char name[512];
35138 int i;
35139
35140+ pax_track_stack();
35141+
35142 ctx->card = card;
35143 ctx->bios = bios;
35144
35145diff --git a/drivers/gpu/drm/radeon/mkregtable.c b/drivers/gpu/drm/radeon/mkregtable.c
35146index 0d79577..efaa7a5 100644
35147--- a/drivers/gpu/drm/radeon/mkregtable.c
35148+++ b/drivers/gpu/drm/radeon/mkregtable.c
35149@@ -637,14 +637,14 @@ static int parser_auth(struct table *t, const char *filename)
35150 regex_t mask_rex;
35151 regmatch_t match[4];
35152 char buf[1024];
35153- size_t end;
35154+ long end;
35155 int len;
35156 int done = 0;
35157 int r;
35158 unsigned o;
35159 struct offset *offset;
35160 char last_reg_s[10];
35161- int last_reg;
35162+ unsigned long last_reg;
35163
35164 if (regcomp
35165 (&mask_rex, "(0x[0-9a-fA-F]*) *([_a-zA-Z0-9]*)", REG_EXTENDED)) {
35166diff --git a/drivers/gpu/drm/radeon/radeon.h b/drivers/gpu/drm/radeon/radeon.h
35167index 6735213..38c2c67 100644
35168--- a/drivers/gpu/drm/radeon/radeon.h
35169+++ b/drivers/gpu/drm/radeon/radeon.h
35170@@ -149,7 +149,7 @@ int radeon_pm_init(struct radeon_device *rdev);
35171 */
35172 struct radeon_fence_driver {
35173 uint32_t scratch_reg;
35174- atomic_t seq;
35175+ atomic_unchecked_t seq;
35176 uint32_t last_seq;
35177 unsigned long count_timeout;
35178 wait_queue_head_t queue;
35179@@ -640,7 +640,7 @@ struct radeon_asic {
35180 uint32_t offset, uint32_t obj_size);
35181 int (*clear_surface_reg)(struct radeon_device *rdev, int reg);
35182 void (*bandwidth_update)(struct radeon_device *rdev);
35183-};
35184+} __no_const;
35185
35186 /*
35187 * Asic structures
35188diff --git a/drivers/gpu/drm/radeon/radeon_atombios.c b/drivers/gpu/drm/radeon/radeon_atombios.c
35189index 4e928b9..d8b6008 100644
35190--- a/drivers/gpu/drm/radeon/radeon_atombios.c
35191+++ b/drivers/gpu/drm/radeon/radeon_atombios.c
35192@@ -275,6 +275,8 @@ bool radeon_get_atom_connector_info_from_object_table(struct drm_device *dev)
35193 bool linkb;
35194 struct radeon_i2c_bus_rec ddc_bus;
35195
35196+ pax_track_stack();
35197+
35198 atom_parse_data_header(ctx, index, &size, &frev, &crev, &data_offset);
35199
35200 if (data_offset == 0)
35201@@ -520,13 +522,13 @@ static uint16_t atombios_get_connector_object_id(struct drm_device *dev,
35202 }
35203 }
35204
35205-struct bios_connector {
35206+static struct bios_connector {
35207 bool valid;
35208 uint16_t line_mux;
35209 uint16_t devices;
35210 int connector_type;
35211 struct radeon_i2c_bus_rec ddc_bus;
35212-};
35213+} bios_connectors[ATOM_MAX_SUPPORTED_DEVICE];
35214
35215 bool radeon_get_atom_connector_info_from_supported_devices_table(struct
35216 drm_device
35217@@ -542,7 +544,6 @@ bool radeon_get_atom_connector_info_from_supported_devices_table(struct
35218 uint8_t dac;
35219 union atom_supported_devices *supported_devices;
35220 int i, j;
35221- struct bios_connector bios_connectors[ATOM_MAX_SUPPORTED_DEVICE];
35222
35223 atom_parse_data_header(ctx, index, &size, &frev, &crev, &data_offset);
35224
35225diff --git a/drivers/gpu/drm/radeon/radeon_display.c b/drivers/gpu/drm/radeon/radeon_display.c
35226index 083a181..ccccae0 100644
35227--- a/drivers/gpu/drm/radeon/radeon_display.c
35228+++ b/drivers/gpu/drm/radeon/radeon_display.c
35229@@ -482,7 +482,7 @@ void radeon_compute_pll(struct radeon_pll *pll,
35230
35231 if (flags & RADEON_PLL_PREFER_CLOSEST_LOWER) {
35232 error = freq - current_freq;
35233- error = error < 0 ? 0xffffffff : error;
35234+ error = (int32_t)error < 0 ? 0xffffffff : error;
35235 } else
35236 error = abs(current_freq - freq);
35237 vco_diff = abs(vco - best_vco);
35238diff --git a/drivers/gpu/drm/radeon/radeon_drv.h b/drivers/gpu/drm/radeon/radeon_drv.h
35239index 76e4070..193fa7f 100644
35240--- a/drivers/gpu/drm/radeon/radeon_drv.h
35241+++ b/drivers/gpu/drm/radeon/radeon_drv.h
35242@@ -253,7 +253,7 @@ typedef struct drm_radeon_private {
35243
35244 /* SW interrupt */
35245 wait_queue_head_t swi_queue;
35246- atomic_t swi_emitted;
35247+ atomic_unchecked_t swi_emitted;
35248 int vblank_crtc;
35249 uint32_t irq_enable_reg;
35250 uint32_t r500_disp_irq_reg;
35251diff --git a/drivers/gpu/drm/radeon/radeon_fence.c b/drivers/gpu/drm/radeon/radeon_fence.c
35252index 3beb26d..6ce9c4a 100644
35253--- a/drivers/gpu/drm/radeon/radeon_fence.c
35254+++ b/drivers/gpu/drm/radeon/radeon_fence.c
35255@@ -47,7 +47,7 @@ int radeon_fence_emit(struct radeon_device *rdev, struct radeon_fence *fence)
35256 write_unlock_irqrestore(&rdev->fence_drv.lock, irq_flags);
35257 return 0;
35258 }
35259- fence->seq = atomic_add_return(1, &rdev->fence_drv.seq);
35260+ fence->seq = atomic_add_return_unchecked(1, &rdev->fence_drv.seq);
35261 if (!rdev->cp.ready) {
35262 /* FIXME: cp is not running assume everythings is done right
35263 * away
35264@@ -364,7 +364,7 @@ int radeon_fence_driver_init(struct radeon_device *rdev)
35265 return r;
35266 }
35267 WREG32(rdev->fence_drv.scratch_reg, 0);
35268- atomic_set(&rdev->fence_drv.seq, 0);
35269+ atomic_set_unchecked(&rdev->fence_drv.seq, 0);
35270 INIT_LIST_HEAD(&rdev->fence_drv.created);
35271 INIT_LIST_HEAD(&rdev->fence_drv.emited);
35272 INIT_LIST_HEAD(&rdev->fence_drv.signaled);
35273diff --git a/drivers/gpu/drm/radeon/radeon_ioc32.c b/drivers/gpu/drm/radeon/radeon_ioc32.c
35274index a1bf11d..4a123c0 100644
35275--- a/drivers/gpu/drm/radeon/radeon_ioc32.c
35276+++ b/drivers/gpu/drm/radeon/radeon_ioc32.c
35277@@ -368,7 +368,7 @@ static int compat_radeon_cp_setparam(struct file *file, unsigned int cmd,
35278 request = compat_alloc_user_space(sizeof(*request));
35279 if (!access_ok(VERIFY_WRITE, request, sizeof(*request))
35280 || __put_user(req32.param, &request->param)
35281- || __put_user((void __user *)(unsigned long)req32.value,
35282+ || __put_user((unsigned long)req32.value,
35283 &request->value))
35284 return -EFAULT;
35285
35286diff --git a/drivers/gpu/drm/radeon/radeon_irq.c b/drivers/gpu/drm/radeon/radeon_irq.c
35287index b79ecc4..8dab92d 100644
35288--- a/drivers/gpu/drm/radeon/radeon_irq.c
35289+++ b/drivers/gpu/drm/radeon/radeon_irq.c
35290@@ -225,8 +225,8 @@ static int radeon_emit_irq(struct drm_device * dev)
35291 unsigned int ret;
35292 RING_LOCALS;
35293
35294- atomic_inc(&dev_priv->swi_emitted);
35295- ret = atomic_read(&dev_priv->swi_emitted);
35296+ atomic_inc_unchecked(&dev_priv->swi_emitted);
35297+ ret = atomic_read_unchecked(&dev_priv->swi_emitted);
35298
35299 BEGIN_RING(4);
35300 OUT_RING_REG(RADEON_LAST_SWI_REG, ret);
35301@@ -352,7 +352,7 @@ int radeon_driver_irq_postinstall(struct drm_device *dev)
35302 drm_radeon_private_t *dev_priv =
35303 (drm_radeon_private_t *) dev->dev_private;
35304
35305- atomic_set(&dev_priv->swi_emitted, 0);
35306+ atomic_set_unchecked(&dev_priv->swi_emitted, 0);
35307 DRM_INIT_WAITQUEUE(&dev_priv->swi_queue);
35308
35309 dev->max_vblank_count = 0x001fffff;
35310diff --git a/drivers/gpu/drm/radeon/radeon_state.c b/drivers/gpu/drm/radeon/radeon_state.c
35311index 4747910..48ca4b3 100644
35312--- a/drivers/gpu/drm/radeon/radeon_state.c
35313+++ b/drivers/gpu/drm/radeon/radeon_state.c
35314@@ -3021,7 +3021,7 @@ static int radeon_cp_getparam(struct drm_device *dev, void *data, struct drm_fil
35315 {
35316 drm_radeon_private_t *dev_priv = dev->dev_private;
35317 drm_radeon_getparam_t *param = data;
35318- int value;
35319+ int value = 0;
35320
35321 DRM_DEBUG("pid=%d\n", DRM_CURRENTPID);
35322
35323diff --git a/drivers/gpu/drm/radeon/radeon_ttm.c b/drivers/gpu/drm/radeon/radeon_ttm.c
35324index 1381e06..0e53b17 100644
35325--- a/drivers/gpu/drm/radeon/radeon_ttm.c
35326+++ b/drivers/gpu/drm/radeon/radeon_ttm.c
35327@@ -535,27 +535,10 @@ void radeon_ttm_fini(struct radeon_device *rdev)
35328 DRM_INFO("radeon: ttm finalized\n");
35329 }
35330
35331-static struct vm_operations_struct radeon_ttm_vm_ops;
35332-static const struct vm_operations_struct *ttm_vm_ops = NULL;
35333-
35334-static int radeon_ttm_fault(struct vm_area_struct *vma, struct vm_fault *vmf)
35335-{
35336- struct ttm_buffer_object *bo;
35337- int r;
35338-
35339- bo = (struct ttm_buffer_object *)vma->vm_private_data;
35340- if (bo == NULL) {
35341- return VM_FAULT_NOPAGE;
35342- }
35343- r = ttm_vm_ops->fault(vma, vmf);
35344- return r;
35345-}
35346-
35347 int radeon_mmap(struct file *filp, struct vm_area_struct *vma)
35348 {
35349 struct drm_file *file_priv;
35350 struct radeon_device *rdev;
35351- int r;
35352
35353 if (unlikely(vma->vm_pgoff < DRM_FILE_PAGE_OFFSET)) {
35354 return drm_mmap(filp, vma);
35355@@ -563,20 +546,9 @@ int radeon_mmap(struct file *filp, struct vm_area_struct *vma)
35356
35357 file_priv = (struct drm_file *)filp->private_data;
35358 rdev = file_priv->minor->dev->dev_private;
35359- if (rdev == NULL) {
35360+ if (!rdev)
35361 return -EINVAL;
35362- }
35363- r = ttm_bo_mmap(filp, vma, &rdev->mman.bdev);
35364- if (unlikely(r != 0)) {
35365- return r;
35366- }
35367- if (unlikely(ttm_vm_ops == NULL)) {
35368- ttm_vm_ops = vma->vm_ops;
35369- radeon_ttm_vm_ops = *ttm_vm_ops;
35370- radeon_ttm_vm_ops.fault = &radeon_ttm_fault;
35371- }
35372- vma->vm_ops = &radeon_ttm_vm_ops;
35373- return 0;
35374+ return ttm_bo_mmap(filp, vma, &rdev->mman.bdev);
35375 }
35376
35377
35378diff --git a/drivers/gpu/drm/radeon/rs690.c b/drivers/gpu/drm/radeon/rs690.c
35379index b12ff76..0bd0c6e 100644
35380--- a/drivers/gpu/drm/radeon/rs690.c
35381+++ b/drivers/gpu/drm/radeon/rs690.c
35382@@ -302,9 +302,11 @@ void rs690_crtc_bandwidth_compute(struct radeon_device *rdev,
35383 if (rdev->pm.max_bandwidth.full > rdev->pm.sideport_bandwidth.full &&
35384 rdev->pm.sideport_bandwidth.full)
35385 rdev->pm.max_bandwidth = rdev->pm.sideport_bandwidth;
35386- read_delay_latency.full = rfixed_const(370 * 800 * 1000);
35387+ read_delay_latency.full = rfixed_const(800 * 1000);
35388 read_delay_latency.full = rfixed_div(read_delay_latency,
35389 rdev->pm.igp_sideport_mclk);
35390+ a.full = rfixed_const(370);
35391+ read_delay_latency.full = rfixed_mul(read_delay_latency, a);
35392 } else {
35393 if (rdev->pm.max_bandwidth.full > rdev->pm.k8_bandwidth.full &&
35394 rdev->pm.k8_bandwidth.full)
35395diff --git a/drivers/gpu/drm/ttm/ttm_bo.c b/drivers/gpu/drm/ttm/ttm_bo.c
35396index 0ed436e..e6e7ce3 100644
35397--- a/drivers/gpu/drm/ttm/ttm_bo.c
35398+++ b/drivers/gpu/drm/ttm/ttm_bo.c
35399@@ -67,7 +67,7 @@ static struct attribute *ttm_bo_global_attrs[] = {
35400 NULL
35401 };
35402
35403-static struct sysfs_ops ttm_bo_global_ops = {
35404+static const struct sysfs_ops ttm_bo_global_ops = {
35405 .show = &ttm_bo_global_show
35406 };
35407
35408diff --git a/drivers/gpu/drm/ttm/ttm_bo_vm.c b/drivers/gpu/drm/ttm/ttm_bo_vm.c
35409index 1c040d0..f9e4af8 100644
35410--- a/drivers/gpu/drm/ttm/ttm_bo_vm.c
35411+++ b/drivers/gpu/drm/ttm/ttm_bo_vm.c
35412@@ -73,7 +73,7 @@ static int ttm_bo_vm_fault(struct vm_area_struct *vma, struct vm_fault *vmf)
35413 {
35414 struct ttm_buffer_object *bo = (struct ttm_buffer_object *)
35415 vma->vm_private_data;
35416- struct ttm_bo_device *bdev = bo->bdev;
35417+ struct ttm_bo_device *bdev;
35418 unsigned long bus_base;
35419 unsigned long bus_offset;
35420 unsigned long bus_size;
35421@@ -88,6 +88,10 @@ static int ttm_bo_vm_fault(struct vm_area_struct *vma, struct vm_fault *vmf)
35422 unsigned long address = (unsigned long)vmf->virtual_address;
35423 int retval = VM_FAULT_NOPAGE;
35424
35425+ if (!bo)
35426+ return VM_FAULT_NOPAGE;
35427+ bdev = bo->bdev;
35428+
35429 /*
35430 * Work around locking order reversal in fault / nopfn
35431 * between mmap_sem and bo_reserve: Perform a trylock operation
35432diff --git a/drivers/gpu/drm/ttm/ttm_global.c b/drivers/gpu/drm/ttm/ttm_global.c
35433index b170071..28ae90e 100644
35434--- a/drivers/gpu/drm/ttm/ttm_global.c
35435+++ b/drivers/gpu/drm/ttm/ttm_global.c
35436@@ -36,7 +36,7 @@
35437 struct ttm_global_item {
35438 struct mutex mutex;
35439 void *object;
35440- int refcount;
35441+ atomic_t refcount;
35442 };
35443
35444 static struct ttm_global_item glob[TTM_GLOBAL_NUM];
35445@@ -49,7 +49,7 @@ void ttm_global_init(void)
35446 struct ttm_global_item *item = &glob[i];
35447 mutex_init(&item->mutex);
35448 item->object = NULL;
35449- item->refcount = 0;
35450+ atomic_set(&item->refcount, 0);
35451 }
35452 }
35453
35454@@ -59,7 +59,7 @@ void ttm_global_release(void)
35455 for (i = 0; i < TTM_GLOBAL_NUM; ++i) {
35456 struct ttm_global_item *item = &glob[i];
35457 BUG_ON(item->object != NULL);
35458- BUG_ON(item->refcount != 0);
35459+ BUG_ON(atomic_read(&item->refcount) != 0);
35460 }
35461 }
35462
35463@@ -70,7 +70,7 @@ int ttm_global_item_ref(struct ttm_global_reference *ref)
35464 void *object;
35465
35466 mutex_lock(&item->mutex);
35467- if (item->refcount == 0) {
35468+ if (atomic_read(&item->refcount) == 0) {
35469 item->object = kzalloc(ref->size, GFP_KERNEL);
35470 if (unlikely(item->object == NULL)) {
35471 ret = -ENOMEM;
35472@@ -83,7 +83,7 @@ int ttm_global_item_ref(struct ttm_global_reference *ref)
35473 goto out_err;
35474
35475 }
35476- ++item->refcount;
35477+ atomic_inc(&item->refcount);
35478 ref->object = item->object;
35479 object = item->object;
35480 mutex_unlock(&item->mutex);
35481@@ -100,9 +100,9 @@ void ttm_global_item_unref(struct ttm_global_reference *ref)
35482 struct ttm_global_item *item = &glob[ref->global_type];
35483
35484 mutex_lock(&item->mutex);
35485- BUG_ON(item->refcount == 0);
35486+ BUG_ON(atomic_read(&item->refcount) == 0);
35487 BUG_ON(ref->object != item->object);
35488- if (--item->refcount == 0) {
35489+ if (atomic_dec_and_test(&item->refcount)) {
35490 ref->release(ref);
35491 item->object = NULL;
35492 }
35493diff --git a/drivers/gpu/drm/ttm/ttm_memory.c b/drivers/gpu/drm/ttm/ttm_memory.c
35494index 072c281..d8ef483 100644
35495--- a/drivers/gpu/drm/ttm/ttm_memory.c
35496+++ b/drivers/gpu/drm/ttm/ttm_memory.c
35497@@ -152,7 +152,7 @@ static struct attribute *ttm_mem_zone_attrs[] = {
35498 NULL
35499 };
35500
35501-static struct sysfs_ops ttm_mem_zone_ops = {
35502+static const struct sysfs_ops ttm_mem_zone_ops = {
35503 .show = &ttm_mem_zone_show,
35504 .store = &ttm_mem_zone_store
35505 };
35506diff --git a/drivers/gpu/drm/via/via_drv.h b/drivers/gpu/drm/via/via_drv.h
35507index cafcb84..b8e66cc 100644
35508--- a/drivers/gpu/drm/via/via_drv.h
35509+++ b/drivers/gpu/drm/via/via_drv.h
35510@@ -51,7 +51,7 @@ typedef struct drm_via_ring_buffer {
35511 typedef uint32_t maskarray_t[5];
35512
35513 typedef struct drm_via_irq {
35514- atomic_t irq_received;
35515+ atomic_unchecked_t irq_received;
35516 uint32_t pending_mask;
35517 uint32_t enable_mask;
35518 wait_queue_head_t irq_queue;
35519@@ -75,7 +75,7 @@ typedef struct drm_via_private {
35520 struct timeval last_vblank;
35521 int last_vblank_valid;
35522 unsigned usec_per_vblank;
35523- atomic_t vbl_received;
35524+ atomic_unchecked_t vbl_received;
35525 drm_via_state_t hc_state;
35526 char pci_buf[VIA_PCI_BUF_SIZE];
35527 const uint32_t *fire_offsets[VIA_FIRE_BUF_SIZE];
35528diff --git a/drivers/gpu/drm/via/via_irq.c b/drivers/gpu/drm/via/via_irq.c
35529index 5935b88..127a8a6 100644
35530--- a/drivers/gpu/drm/via/via_irq.c
35531+++ b/drivers/gpu/drm/via/via_irq.c
35532@@ -102,7 +102,7 @@ u32 via_get_vblank_counter(struct drm_device *dev, int crtc)
35533 if (crtc != 0)
35534 return 0;
35535
35536- return atomic_read(&dev_priv->vbl_received);
35537+ return atomic_read_unchecked(&dev_priv->vbl_received);
35538 }
35539
35540 irqreturn_t via_driver_irq_handler(DRM_IRQ_ARGS)
35541@@ -117,8 +117,8 @@ irqreturn_t via_driver_irq_handler(DRM_IRQ_ARGS)
35542
35543 status = VIA_READ(VIA_REG_INTERRUPT);
35544 if (status & VIA_IRQ_VBLANK_PENDING) {
35545- atomic_inc(&dev_priv->vbl_received);
35546- if (!(atomic_read(&dev_priv->vbl_received) & 0x0F)) {
35547+ atomic_inc_unchecked(&dev_priv->vbl_received);
35548+ if (!(atomic_read_unchecked(&dev_priv->vbl_received) & 0x0F)) {
35549 do_gettimeofday(&cur_vblank);
35550 if (dev_priv->last_vblank_valid) {
35551 dev_priv->usec_per_vblank =
35552@@ -128,7 +128,7 @@ irqreturn_t via_driver_irq_handler(DRM_IRQ_ARGS)
35553 dev_priv->last_vblank = cur_vblank;
35554 dev_priv->last_vblank_valid = 1;
35555 }
35556- if (!(atomic_read(&dev_priv->vbl_received) & 0xFF)) {
35557+ if (!(atomic_read_unchecked(&dev_priv->vbl_received) & 0xFF)) {
35558 DRM_DEBUG("US per vblank is: %u\n",
35559 dev_priv->usec_per_vblank);
35560 }
35561@@ -138,7 +138,7 @@ irqreturn_t via_driver_irq_handler(DRM_IRQ_ARGS)
35562
35563 for (i = 0; i < dev_priv->num_irqs; ++i) {
35564 if (status & cur_irq->pending_mask) {
35565- atomic_inc(&cur_irq->irq_received);
35566+ atomic_inc_unchecked(&cur_irq->irq_received);
35567 DRM_WAKEUP(&cur_irq->irq_queue);
35568 handled = 1;
35569 if (dev_priv->irq_map[drm_via_irq_dma0_td] == i) {
35570@@ -244,11 +244,11 @@ via_driver_irq_wait(struct drm_device * dev, unsigned int irq, int force_sequenc
35571 DRM_WAIT_ON(ret, cur_irq->irq_queue, 3 * DRM_HZ,
35572 ((VIA_READ(masks[irq][2]) & masks[irq][3]) ==
35573 masks[irq][4]));
35574- cur_irq_sequence = atomic_read(&cur_irq->irq_received);
35575+ cur_irq_sequence = atomic_read_unchecked(&cur_irq->irq_received);
35576 } else {
35577 DRM_WAIT_ON(ret, cur_irq->irq_queue, 3 * DRM_HZ,
35578 (((cur_irq_sequence =
35579- atomic_read(&cur_irq->irq_received)) -
35580+ atomic_read_unchecked(&cur_irq->irq_received)) -
35581 *sequence) <= (1 << 23)));
35582 }
35583 *sequence = cur_irq_sequence;
35584@@ -286,7 +286,7 @@ void via_driver_irq_preinstall(struct drm_device * dev)
35585 }
35586
35587 for (i = 0; i < dev_priv->num_irqs; ++i) {
35588- atomic_set(&cur_irq->irq_received, 0);
35589+ atomic_set_unchecked(&cur_irq->irq_received, 0);
35590 cur_irq->enable_mask = dev_priv->irq_masks[i][0];
35591 cur_irq->pending_mask = dev_priv->irq_masks[i][1];
35592 DRM_INIT_WAITQUEUE(&cur_irq->irq_queue);
35593@@ -368,7 +368,7 @@ int via_wait_irq(struct drm_device *dev, void *data, struct drm_file *file_priv)
35594 switch (irqwait->request.type & ~VIA_IRQ_FLAGS_MASK) {
35595 case VIA_IRQ_RELATIVE:
35596 irqwait->request.sequence +=
35597- atomic_read(&cur_irq->irq_received);
35598+ atomic_read_unchecked(&cur_irq->irq_received);
35599 irqwait->request.type &= ~_DRM_VBLANK_RELATIVE;
35600 case VIA_IRQ_ABSOLUTE:
35601 break;
35602diff --git a/drivers/gpu/vga/vgaarb.c b/drivers/gpu/vga/vgaarb.c
35603index aa8688d..6a0140c 100644
35604--- a/drivers/gpu/vga/vgaarb.c
35605+++ b/drivers/gpu/vga/vgaarb.c
35606@@ -894,14 +894,20 @@ static ssize_t vga_arb_write(struct file *file, const char __user * buf,
35607 uc = &priv->cards[i];
35608 }
35609
35610- if (!uc)
35611- return -EINVAL;
35612+ if (!uc) {
35613+ ret_val = -EINVAL;
35614+ goto done;
35615+ }
35616
35617- if (io_state & VGA_RSRC_LEGACY_IO && uc->io_cnt == 0)
35618- return -EINVAL;
35619+ if (io_state & VGA_RSRC_LEGACY_IO && uc->io_cnt == 0) {
35620+ ret_val = -EINVAL;
35621+ goto done;
35622+ }
35623
35624- if (io_state & VGA_RSRC_LEGACY_MEM && uc->mem_cnt == 0)
35625- return -EINVAL;
35626+ if (io_state & VGA_RSRC_LEGACY_MEM && uc->mem_cnt == 0) {
35627+ ret_val = -EINVAL;
35628+ goto done;
35629+ }
35630
35631 vga_put(pdev, io_state);
35632
35633diff --git a/drivers/hid/hid-core.c b/drivers/hid/hid-core.c
35634index 11f8069..4783396 100644
35635--- a/drivers/hid/hid-core.c
35636+++ b/drivers/hid/hid-core.c
35637@@ -1752,7 +1752,7 @@ static bool hid_ignore(struct hid_device *hdev)
35638
35639 int hid_add_device(struct hid_device *hdev)
35640 {
35641- static atomic_t id = ATOMIC_INIT(0);
35642+ static atomic_unchecked_t id = ATOMIC_INIT(0);
35643 int ret;
35644
35645 if (WARN_ON(hdev->status & HID_STAT_ADDED))
35646@@ -1766,7 +1766,7 @@ int hid_add_device(struct hid_device *hdev)
35647 /* XXX hack, any other cleaner solution after the driver core
35648 * is converted to allow more than 20 bytes as the device name? */
35649 dev_set_name(&hdev->dev, "%04X:%04X:%04X.%04X", hdev->bus,
35650- hdev->vendor, hdev->product, atomic_inc_return(&id));
35651+ hdev->vendor, hdev->product, atomic_inc_return_unchecked(&id));
35652
35653 ret = device_add(&hdev->dev);
35654 if (!ret)
35655diff --git a/drivers/hid/usbhid/hiddev.c b/drivers/hid/usbhid/hiddev.c
35656index 8b6ee24..70f657d 100644
35657--- a/drivers/hid/usbhid/hiddev.c
35658+++ b/drivers/hid/usbhid/hiddev.c
35659@@ -617,7 +617,7 @@ static long hiddev_ioctl(struct file *file, unsigned int cmd, unsigned long arg)
35660 return put_user(HID_VERSION, (int __user *)arg);
35661
35662 case HIDIOCAPPLICATION:
35663- if (arg < 0 || arg >= hid->maxapplication)
35664+ if (arg >= hid->maxapplication)
35665 return -EINVAL;
35666
35667 for (i = 0; i < hid->maxcollection; i++)
35668diff --git a/drivers/hwmon/lis3lv02d.c b/drivers/hwmon/lis3lv02d.c
35669index 5d5ed69..f40533e 100644
35670--- a/drivers/hwmon/lis3lv02d.c
35671+++ b/drivers/hwmon/lis3lv02d.c
35672@@ -146,7 +146,7 @@ static irqreturn_t lis302dl_interrupt(int irq, void *dummy)
35673 * the lid is closed. This leads to interrupts as soon as a little move
35674 * is done.
35675 */
35676- atomic_inc(&lis3_dev.count);
35677+ atomic_inc_unchecked(&lis3_dev.count);
35678
35679 wake_up_interruptible(&lis3_dev.misc_wait);
35680 kill_fasync(&lis3_dev.async_queue, SIGIO, POLL_IN);
35681@@ -160,7 +160,7 @@ static int lis3lv02d_misc_open(struct inode *inode, struct file *file)
35682 if (test_and_set_bit(0, &lis3_dev.misc_opened))
35683 return -EBUSY; /* already open */
35684
35685- atomic_set(&lis3_dev.count, 0);
35686+ atomic_set_unchecked(&lis3_dev.count, 0);
35687
35688 /*
35689 * The sensor can generate interrupts for free-fall and direction
35690@@ -206,7 +206,7 @@ static ssize_t lis3lv02d_misc_read(struct file *file, char __user *buf,
35691 add_wait_queue(&lis3_dev.misc_wait, &wait);
35692 while (true) {
35693 set_current_state(TASK_INTERRUPTIBLE);
35694- data = atomic_xchg(&lis3_dev.count, 0);
35695+ data = atomic_xchg_unchecked(&lis3_dev.count, 0);
35696 if (data)
35697 break;
35698
35699@@ -244,7 +244,7 @@ out:
35700 static unsigned int lis3lv02d_misc_poll(struct file *file, poll_table *wait)
35701 {
35702 poll_wait(file, &lis3_dev.misc_wait, wait);
35703- if (atomic_read(&lis3_dev.count))
35704+ if (atomic_read_unchecked(&lis3_dev.count))
35705 return POLLIN | POLLRDNORM;
35706 return 0;
35707 }
35708diff --git a/drivers/hwmon/lis3lv02d.h b/drivers/hwmon/lis3lv02d.h
35709index 7cdd76f..fe0efdf 100644
35710--- a/drivers/hwmon/lis3lv02d.h
35711+++ b/drivers/hwmon/lis3lv02d.h
35712@@ -201,7 +201,7 @@ struct lis3lv02d {
35713
35714 struct input_polled_dev *idev; /* input device */
35715 struct platform_device *pdev; /* platform device */
35716- atomic_t count; /* interrupt count after last read */
35717+ atomic_unchecked_t count; /* interrupt count after last read */
35718 int xcalib; /* calibrated null value for x */
35719 int ycalib; /* calibrated null value for y */
35720 int zcalib; /* calibrated null value for z */
35721diff --git a/drivers/hwmon/sht15.c b/drivers/hwmon/sht15.c
35722index 740785e..5a5c6c6 100644
35723--- a/drivers/hwmon/sht15.c
35724+++ b/drivers/hwmon/sht15.c
35725@@ -112,7 +112,7 @@ struct sht15_data {
35726 int supply_uV;
35727 int supply_uV_valid;
35728 struct work_struct update_supply_work;
35729- atomic_t interrupt_handled;
35730+ atomic_unchecked_t interrupt_handled;
35731 };
35732
35733 /**
35734@@ -245,13 +245,13 @@ static inline int sht15_update_single_val(struct sht15_data *data,
35735 return ret;
35736
35737 gpio_direction_input(data->pdata->gpio_data);
35738- atomic_set(&data->interrupt_handled, 0);
35739+ atomic_set_unchecked(&data->interrupt_handled, 0);
35740
35741 enable_irq(gpio_to_irq(data->pdata->gpio_data));
35742 if (gpio_get_value(data->pdata->gpio_data) == 0) {
35743 disable_irq_nosync(gpio_to_irq(data->pdata->gpio_data));
35744 /* Only relevant if the interrupt hasn't occured. */
35745- if (!atomic_read(&data->interrupt_handled))
35746+ if (!atomic_read_unchecked(&data->interrupt_handled))
35747 schedule_work(&data->read_work);
35748 }
35749 ret = wait_event_timeout(data->wait_queue,
35750@@ -398,7 +398,7 @@ static irqreturn_t sht15_interrupt_fired(int irq, void *d)
35751 struct sht15_data *data = d;
35752 /* First disable the interrupt */
35753 disable_irq_nosync(irq);
35754- atomic_inc(&data->interrupt_handled);
35755+ atomic_inc_unchecked(&data->interrupt_handled);
35756 /* Then schedule a reading work struct */
35757 if (data->flag != SHT15_READING_NOTHING)
35758 schedule_work(&data->read_work);
35759@@ -449,11 +449,11 @@ static void sht15_bh_read_data(struct work_struct *work_s)
35760 here as could have gone low in meantime so verify
35761 it hasn't!
35762 */
35763- atomic_set(&data->interrupt_handled, 0);
35764+ atomic_set_unchecked(&data->interrupt_handled, 0);
35765 enable_irq(gpio_to_irq(data->pdata->gpio_data));
35766 /* If still not occured or another handler has been scheduled */
35767 if (gpio_get_value(data->pdata->gpio_data)
35768- || atomic_read(&data->interrupt_handled))
35769+ || atomic_read_unchecked(&data->interrupt_handled))
35770 return;
35771 }
35772 /* Read the data back from the device */
35773diff --git a/drivers/hwmon/w83791d.c b/drivers/hwmon/w83791d.c
35774index 97851c5..cb40626 100644
35775--- a/drivers/hwmon/w83791d.c
35776+++ b/drivers/hwmon/w83791d.c
35777@@ -330,8 +330,8 @@ static int w83791d_detect(struct i2c_client *client, int kind,
35778 struct i2c_board_info *info);
35779 static int w83791d_remove(struct i2c_client *client);
35780
35781-static int w83791d_read(struct i2c_client *client, u8 register);
35782-static int w83791d_write(struct i2c_client *client, u8 register, u8 value);
35783+static int w83791d_read(struct i2c_client *client, u8 reg);
35784+static int w83791d_write(struct i2c_client *client, u8 reg, u8 value);
35785 static struct w83791d_data *w83791d_update_device(struct device *dev);
35786
35787 #ifdef DEBUG
35788diff --git a/drivers/i2c/busses/i2c-amd756-s4882.c b/drivers/i2c/busses/i2c-amd756-s4882.c
35789index 378fcb5..5e91fa8 100644
35790--- a/drivers/i2c/busses/i2c-amd756-s4882.c
35791+++ b/drivers/i2c/busses/i2c-amd756-s4882.c
35792@@ -43,7 +43,7 @@
35793 extern struct i2c_adapter amd756_smbus;
35794
35795 static struct i2c_adapter *s4882_adapter;
35796-static struct i2c_algorithm *s4882_algo;
35797+static i2c_algorithm_no_const *s4882_algo;
35798
35799 /* Wrapper access functions for multiplexed SMBus */
35800 static DEFINE_MUTEX(amd756_lock);
35801diff --git a/drivers/i2c/busses/i2c-nforce2-s4985.c b/drivers/i2c/busses/i2c-nforce2-s4985.c
35802index 29015eb..af2d8e9 100644
35803--- a/drivers/i2c/busses/i2c-nforce2-s4985.c
35804+++ b/drivers/i2c/busses/i2c-nforce2-s4985.c
35805@@ -41,7 +41,7 @@
35806 extern struct i2c_adapter *nforce2_smbus;
35807
35808 static struct i2c_adapter *s4985_adapter;
35809-static struct i2c_algorithm *s4985_algo;
35810+static i2c_algorithm_no_const *s4985_algo;
35811
35812 /* Wrapper access functions for multiplexed SMBus */
35813 static DEFINE_MUTEX(nforce2_lock);
35814diff --git a/drivers/ide/aec62xx.c b/drivers/ide/aec62xx.c
35815index 878f8ec..12376fc 100644
35816--- a/drivers/ide/aec62xx.c
35817+++ b/drivers/ide/aec62xx.c
35818@@ -180,7 +180,7 @@ static const struct ide_port_ops atp86x_port_ops = {
35819 .cable_detect = atp86x_cable_detect,
35820 };
35821
35822-static const struct ide_port_info aec62xx_chipsets[] __devinitdata = {
35823+static const struct ide_port_info aec62xx_chipsets[] __devinitconst = {
35824 { /* 0: AEC6210 */
35825 .name = DRV_NAME,
35826 .init_chipset = init_chipset_aec62xx,
35827diff --git a/drivers/ide/alim15x3.c b/drivers/ide/alim15x3.c
35828index e59b6de..4b4fc65 100644
35829--- a/drivers/ide/alim15x3.c
35830+++ b/drivers/ide/alim15x3.c
35831@@ -509,7 +509,7 @@ static const struct ide_dma_ops ali_dma_ops = {
35832 .dma_sff_read_status = ide_dma_sff_read_status,
35833 };
35834
35835-static const struct ide_port_info ali15x3_chipset __devinitdata = {
35836+static const struct ide_port_info ali15x3_chipset __devinitconst = {
35837 .name = DRV_NAME,
35838 .init_chipset = init_chipset_ali15x3,
35839 .init_hwif = init_hwif_ali15x3,
35840diff --git a/drivers/ide/amd74xx.c b/drivers/ide/amd74xx.c
35841index 628cd2e..087a414 100644
35842--- a/drivers/ide/amd74xx.c
35843+++ b/drivers/ide/amd74xx.c
35844@@ -221,7 +221,7 @@ static const struct ide_port_ops amd_port_ops = {
35845 .udma_mask = udma, \
35846 }
35847
35848-static const struct ide_port_info amd74xx_chipsets[] __devinitdata = {
35849+static const struct ide_port_info amd74xx_chipsets[] __devinitconst = {
35850 /* 0: AMD7401 */ DECLARE_AMD_DEV(0x00, ATA_UDMA2),
35851 /* 1: AMD7409 */ DECLARE_AMD_DEV(ATA_SWDMA2, ATA_UDMA4),
35852 /* 2: AMD7411/7441 */ DECLARE_AMD_DEV(ATA_SWDMA2, ATA_UDMA5),
35853diff --git a/drivers/ide/atiixp.c b/drivers/ide/atiixp.c
35854index 837322b..837fd71 100644
35855--- a/drivers/ide/atiixp.c
35856+++ b/drivers/ide/atiixp.c
35857@@ -137,7 +137,7 @@ static const struct ide_port_ops atiixp_port_ops = {
35858 .cable_detect = atiixp_cable_detect,
35859 };
35860
35861-static const struct ide_port_info atiixp_pci_info[] __devinitdata = {
35862+static const struct ide_port_info atiixp_pci_info[] __devinitconst = {
35863 { /* 0: IXP200/300/400/700 */
35864 .name = DRV_NAME,
35865 .enablebits = {{0x48,0x01,0x00}, {0x48,0x08,0x00}},
35866diff --git a/drivers/ide/cmd64x.c b/drivers/ide/cmd64x.c
35867index ca0c46f..d55318a 100644
35868--- a/drivers/ide/cmd64x.c
35869+++ b/drivers/ide/cmd64x.c
35870@@ -372,7 +372,7 @@ static const struct ide_dma_ops cmd646_rev1_dma_ops = {
35871 .dma_sff_read_status = ide_dma_sff_read_status,
35872 };
35873
35874-static const struct ide_port_info cmd64x_chipsets[] __devinitdata = {
35875+static const struct ide_port_info cmd64x_chipsets[] __devinitconst = {
35876 { /* 0: CMD643 */
35877 .name = DRV_NAME,
35878 .init_chipset = init_chipset_cmd64x,
35879diff --git a/drivers/ide/cs5520.c b/drivers/ide/cs5520.c
35880index 09f98ed..cebc5bc 100644
35881--- a/drivers/ide/cs5520.c
35882+++ b/drivers/ide/cs5520.c
35883@@ -93,7 +93,7 @@ static const struct ide_port_ops cs5520_port_ops = {
35884 .set_dma_mode = cs5520_set_dma_mode,
35885 };
35886
35887-static const struct ide_port_info cyrix_chipset __devinitdata = {
35888+static const struct ide_port_info cyrix_chipset __devinitconst = {
35889 .name = DRV_NAME,
35890 .enablebits = { { 0x60, 0x01, 0x01 }, { 0x60, 0x02, 0x02 } },
35891 .port_ops = &cs5520_port_ops,
35892diff --git a/drivers/ide/cs5530.c b/drivers/ide/cs5530.c
35893index 40bf05e..7d58ca0 100644
35894--- a/drivers/ide/cs5530.c
35895+++ b/drivers/ide/cs5530.c
35896@@ -244,7 +244,7 @@ static const struct ide_port_ops cs5530_port_ops = {
35897 .udma_filter = cs5530_udma_filter,
35898 };
35899
35900-static const struct ide_port_info cs5530_chipset __devinitdata = {
35901+static const struct ide_port_info cs5530_chipset __devinitconst = {
35902 .name = DRV_NAME,
35903 .init_chipset = init_chipset_cs5530,
35904 .init_hwif = init_hwif_cs5530,
35905diff --git a/drivers/ide/cs5535.c b/drivers/ide/cs5535.c
35906index 983d957..53e6172 100644
35907--- a/drivers/ide/cs5535.c
35908+++ b/drivers/ide/cs5535.c
35909@@ -170,7 +170,7 @@ static const struct ide_port_ops cs5535_port_ops = {
35910 .cable_detect = cs5535_cable_detect,
35911 };
35912
35913-static const struct ide_port_info cs5535_chipset __devinitdata = {
35914+static const struct ide_port_info cs5535_chipset __devinitconst = {
35915 .name = DRV_NAME,
35916 .port_ops = &cs5535_port_ops,
35917 .host_flags = IDE_HFLAG_SINGLE | IDE_HFLAG_POST_SET_MODE,
35918diff --git a/drivers/ide/cy82c693.c b/drivers/ide/cy82c693.c
35919index 74fc540..8e933d8 100644
35920--- a/drivers/ide/cy82c693.c
35921+++ b/drivers/ide/cy82c693.c
35922@@ -288,7 +288,7 @@ static const struct ide_port_ops cy82c693_port_ops = {
35923 .set_dma_mode = cy82c693_set_dma_mode,
35924 };
35925
35926-static const struct ide_port_info cy82c693_chipset __devinitdata = {
35927+static const struct ide_port_info cy82c693_chipset __devinitconst = {
35928 .name = DRV_NAME,
35929 .init_iops = init_iops_cy82c693,
35930 .port_ops = &cy82c693_port_ops,
35931diff --git a/drivers/ide/hpt366.c b/drivers/ide/hpt366.c
35932index 7ce68ef..e78197d 100644
35933--- a/drivers/ide/hpt366.c
35934+++ b/drivers/ide/hpt366.c
35935@@ -507,7 +507,7 @@ static struct hpt_timings hpt37x_timings = {
35936 }
35937 };
35938
35939-static const struct hpt_info hpt36x __devinitdata = {
35940+static const struct hpt_info hpt36x __devinitconst = {
35941 .chip_name = "HPT36x",
35942 .chip_type = HPT36x,
35943 .udma_mask = HPT366_ALLOW_ATA66_3 ? (HPT366_ALLOW_ATA66_4 ? ATA_UDMA4 : ATA_UDMA3) : ATA_UDMA2,
35944@@ -515,7 +515,7 @@ static const struct hpt_info hpt36x __devinitdata = {
35945 .timings = &hpt36x_timings
35946 };
35947
35948-static const struct hpt_info hpt370 __devinitdata = {
35949+static const struct hpt_info hpt370 __devinitconst = {
35950 .chip_name = "HPT370",
35951 .chip_type = HPT370,
35952 .udma_mask = HPT370_ALLOW_ATA100_5 ? ATA_UDMA5 : ATA_UDMA4,
35953@@ -523,7 +523,7 @@ static const struct hpt_info hpt370 __devinitdata = {
35954 .timings = &hpt37x_timings
35955 };
35956
35957-static const struct hpt_info hpt370a __devinitdata = {
35958+static const struct hpt_info hpt370a __devinitconst = {
35959 .chip_name = "HPT370A",
35960 .chip_type = HPT370A,
35961 .udma_mask = HPT370_ALLOW_ATA100_5 ? ATA_UDMA5 : ATA_UDMA4,
35962@@ -531,7 +531,7 @@ static const struct hpt_info hpt370a __devinitdata = {
35963 .timings = &hpt37x_timings
35964 };
35965
35966-static const struct hpt_info hpt374 __devinitdata = {
35967+static const struct hpt_info hpt374 __devinitconst = {
35968 .chip_name = "HPT374",
35969 .chip_type = HPT374,
35970 .udma_mask = ATA_UDMA5,
35971@@ -539,7 +539,7 @@ static const struct hpt_info hpt374 __devinitdata = {
35972 .timings = &hpt37x_timings
35973 };
35974
35975-static const struct hpt_info hpt372 __devinitdata = {
35976+static const struct hpt_info hpt372 __devinitconst = {
35977 .chip_name = "HPT372",
35978 .chip_type = HPT372,
35979 .udma_mask = HPT372_ALLOW_ATA133_6 ? ATA_UDMA6 : ATA_UDMA5,
35980@@ -547,7 +547,7 @@ static const struct hpt_info hpt372 __devinitdata = {
35981 .timings = &hpt37x_timings
35982 };
35983
35984-static const struct hpt_info hpt372a __devinitdata = {
35985+static const struct hpt_info hpt372a __devinitconst = {
35986 .chip_name = "HPT372A",
35987 .chip_type = HPT372A,
35988 .udma_mask = HPT372_ALLOW_ATA133_6 ? ATA_UDMA6 : ATA_UDMA5,
35989@@ -555,7 +555,7 @@ static const struct hpt_info hpt372a __devinitdata = {
35990 .timings = &hpt37x_timings
35991 };
35992
35993-static const struct hpt_info hpt302 __devinitdata = {
35994+static const struct hpt_info hpt302 __devinitconst = {
35995 .chip_name = "HPT302",
35996 .chip_type = HPT302,
35997 .udma_mask = HPT302_ALLOW_ATA133_6 ? ATA_UDMA6 : ATA_UDMA5,
35998@@ -563,7 +563,7 @@ static const struct hpt_info hpt302 __devinitdata = {
35999 .timings = &hpt37x_timings
36000 };
36001
36002-static const struct hpt_info hpt371 __devinitdata = {
36003+static const struct hpt_info hpt371 __devinitconst = {
36004 .chip_name = "HPT371",
36005 .chip_type = HPT371,
36006 .udma_mask = HPT371_ALLOW_ATA133_6 ? ATA_UDMA6 : ATA_UDMA5,
36007@@ -571,7 +571,7 @@ static const struct hpt_info hpt371 __devinitdata = {
36008 .timings = &hpt37x_timings
36009 };
36010
36011-static const struct hpt_info hpt372n __devinitdata = {
36012+static const struct hpt_info hpt372n __devinitconst = {
36013 .chip_name = "HPT372N",
36014 .chip_type = HPT372N,
36015 .udma_mask = HPT372_ALLOW_ATA133_6 ? ATA_UDMA6 : ATA_UDMA5,
36016@@ -579,7 +579,7 @@ static const struct hpt_info hpt372n __devinitdata = {
36017 .timings = &hpt37x_timings
36018 };
36019
36020-static const struct hpt_info hpt302n __devinitdata = {
36021+static const struct hpt_info hpt302n __devinitconst = {
36022 .chip_name = "HPT302N",
36023 .chip_type = HPT302N,
36024 .udma_mask = HPT302_ALLOW_ATA133_6 ? ATA_UDMA6 : ATA_UDMA5,
36025@@ -587,7 +587,7 @@ static const struct hpt_info hpt302n __devinitdata = {
36026 .timings = &hpt37x_timings
36027 };
36028
36029-static const struct hpt_info hpt371n __devinitdata = {
36030+static const struct hpt_info hpt371n __devinitconst = {
36031 .chip_name = "HPT371N",
36032 .chip_type = HPT371N,
36033 .udma_mask = HPT371_ALLOW_ATA133_6 ? ATA_UDMA6 : ATA_UDMA5,
36034@@ -1422,7 +1422,7 @@ static const struct ide_dma_ops hpt36x_dma_ops = {
36035 .dma_sff_read_status = ide_dma_sff_read_status,
36036 };
36037
36038-static const struct ide_port_info hpt366_chipsets[] __devinitdata = {
36039+static const struct ide_port_info hpt366_chipsets[] __devinitconst = {
36040 { /* 0: HPT36x */
36041 .name = DRV_NAME,
36042 .init_chipset = init_chipset_hpt366,
36043diff --git a/drivers/ide/ide-cd.c b/drivers/ide/ide-cd.c
36044index 2de76cc..74186a1 100644
36045--- a/drivers/ide/ide-cd.c
36046+++ b/drivers/ide/ide-cd.c
36047@@ -774,7 +774,7 @@ static void cdrom_do_block_pc(ide_drive_t *drive, struct request *rq)
36048 alignment = queue_dma_alignment(q) | q->dma_pad_mask;
36049 if ((unsigned long)buf & alignment
36050 || blk_rq_bytes(rq) & q->dma_pad_mask
36051- || object_is_on_stack(buf))
36052+ || object_starts_on_stack(buf))
36053 drive->dma = 0;
36054 }
36055 }
36056diff --git a/drivers/ide/ide-floppy.c b/drivers/ide/ide-floppy.c
36057index fefbdfc..62ff465 100644
36058--- a/drivers/ide/ide-floppy.c
36059+++ b/drivers/ide/ide-floppy.c
36060@@ -373,6 +373,8 @@ static int ide_floppy_get_capacity(ide_drive_t *drive)
36061 u8 pc_buf[256], header_len, desc_cnt;
36062 int i, rc = 1, blocks, length;
36063
36064+ pax_track_stack();
36065+
36066 ide_debug_log(IDE_DBG_FUNC, "enter");
36067
36068 drive->bios_cyl = 0;
36069diff --git a/drivers/ide/ide-pci-generic.c b/drivers/ide/ide-pci-generic.c
36070index 39d4e01..11538ce 100644
36071--- a/drivers/ide/ide-pci-generic.c
36072+++ b/drivers/ide/ide-pci-generic.c
36073@@ -53,7 +53,7 @@ static const struct ide_port_ops netcell_port_ops = {
36074 .udma_mask = ATA_UDMA6, \
36075 }
36076
36077-static const struct ide_port_info generic_chipsets[] __devinitdata = {
36078+static const struct ide_port_info generic_chipsets[] __devinitconst = {
36079 /* 0: Unknown */
36080 DECLARE_GENERIC_PCI_DEV(0),
36081
36082diff --git a/drivers/ide/it8172.c b/drivers/ide/it8172.c
36083index 0d266a5..aaca790 100644
36084--- a/drivers/ide/it8172.c
36085+++ b/drivers/ide/it8172.c
36086@@ -115,7 +115,7 @@ static const struct ide_port_ops it8172_port_ops = {
36087 .set_dma_mode = it8172_set_dma_mode,
36088 };
36089
36090-static const struct ide_port_info it8172_port_info __devinitdata = {
36091+static const struct ide_port_info it8172_port_info __devinitconst = {
36092 .name = DRV_NAME,
36093 .port_ops = &it8172_port_ops,
36094 .enablebits = { {0x41, 0x80, 0x80}, {0x00, 0x00, 0x00} },
36095diff --git a/drivers/ide/it8213.c b/drivers/ide/it8213.c
36096index 4797616..4be488a 100644
36097--- a/drivers/ide/it8213.c
36098+++ b/drivers/ide/it8213.c
36099@@ -156,7 +156,7 @@ static const struct ide_port_ops it8213_port_ops = {
36100 .cable_detect = it8213_cable_detect,
36101 };
36102
36103-static const struct ide_port_info it8213_chipset __devinitdata = {
36104+static const struct ide_port_info it8213_chipset __devinitconst = {
36105 .name = DRV_NAME,
36106 .enablebits = { {0x41, 0x80, 0x80} },
36107 .port_ops = &it8213_port_ops,
36108diff --git a/drivers/ide/it821x.c b/drivers/ide/it821x.c
36109index 51aa745..146ee60 100644
36110--- a/drivers/ide/it821x.c
36111+++ b/drivers/ide/it821x.c
36112@@ -627,7 +627,7 @@ static const struct ide_port_ops it821x_port_ops = {
36113 .cable_detect = it821x_cable_detect,
36114 };
36115
36116-static const struct ide_port_info it821x_chipset __devinitdata = {
36117+static const struct ide_port_info it821x_chipset __devinitconst = {
36118 .name = DRV_NAME,
36119 .init_chipset = init_chipset_it821x,
36120 .init_hwif = init_hwif_it821x,
36121diff --git a/drivers/ide/jmicron.c b/drivers/ide/jmicron.c
36122index bf2be64..9270098 100644
36123--- a/drivers/ide/jmicron.c
36124+++ b/drivers/ide/jmicron.c
36125@@ -102,7 +102,7 @@ static const struct ide_port_ops jmicron_port_ops = {
36126 .cable_detect = jmicron_cable_detect,
36127 };
36128
36129-static const struct ide_port_info jmicron_chipset __devinitdata = {
36130+static const struct ide_port_info jmicron_chipset __devinitconst = {
36131 .name = DRV_NAME,
36132 .enablebits = { { 0x40, 0x01, 0x01 }, { 0x40, 0x10, 0x10 } },
36133 .port_ops = &jmicron_port_ops,
36134diff --git a/drivers/ide/ns87415.c b/drivers/ide/ns87415.c
36135index 95327a2..73f78d8 100644
36136--- a/drivers/ide/ns87415.c
36137+++ b/drivers/ide/ns87415.c
36138@@ -293,7 +293,7 @@ static const struct ide_dma_ops ns87415_dma_ops = {
36139 .dma_sff_read_status = superio_dma_sff_read_status,
36140 };
36141
36142-static const struct ide_port_info ns87415_chipset __devinitdata = {
36143+static const struct ide_port_info ns87415_chipset __devinitconst = {
36144 .name = DRV_NAME,
36145 .init_hwif = init_hwif_ns87415,
36146 .tp_ops = &ns87415_tp_ops,
36147diff --git a/drivers/ide/opti621.c b/drivers/ide/opti621.c
36148index f1d70d6..e1de05b 100644
36149--- a/drivers/ide/opti621.c
36150+++ b/drivers/ide/opti621.c
36151@@ -202,7 +202,7 @@ static const struct ide_port_ops opti621_port_ops = {
36152 .set_pio_mode = opti621_set_pio_mode,
36153 };
36154
36155-static const struct ide_port_info opti621_chipset __devinitdata = {
36156+static const struct ide_port_info opti621_chipset __devinitconst = {
36157 .name = DRV_NAME,
36158 .enablebits = { {0x45, 0x80, 0x00}, {0x40, 0x08, 0x00} },
36159 .port_ops = &opti621_port_ops,
36160diff --git a/drivers/ide/pdc202xx_new.c b/drivers/ide/pdc202xx_new.c
36161index 65ba823..7311f4d 100644
36162--- a/drivers/ide/pdc202xx_new.c
36163+++ b/drivers/ide/pdc202xx_new.c
36164@@ -465,7 +465,7 @@ static const struct ide_port_ops pdcnew_port_ops = {
36165 .udma_mask = udma, \
36166 }
36167
36168-static const struct ide_port_info pdcnew_chipsets[] __devinitdata = {
36169+static const struct ide_port_info pdcnew_chipsets[] __devinitconst = {
36170 /* 0: PDC202{68,70} */ DECLARE_PDCNEW_DEV(ATA_UDMA5),
36171 /* 1: PDC202{69,71,75,76,77} */ DECLARE_PDCNEW_DEV(ATA_UDMA6),
36172 };
36173diff --git a/drivers/ide/pdc202xx_old.c b/drivers/ide/pdc202xx_old.c
36174index cb812f3..af816ef 100644
36175--- a/drivers/ide/pdc202xx_old.c
36176+++ b/drivers/ide/pdc202xx_old.c
36177@@ -285,7 +285,7 @@ static const struct ide_dma_ops pdc2026x_dma_ops = {
36178 .max_sectors = sectors, \
36179 }
36180
36181-static const struct ide_port_info pdc202xx_chipsets[] __devinitdata = {
36182+static const struct ide_port_info pdc202xx_chipsets[] __devinitconst = {
36183 { /* 0: PDC20246 */
36184 .name = DRV_NAME,
36185 .init_chipset = init_chipset_pdc202xx,
36186diff --git a/drivers/ide/piix.c b/drivers/ide/piix.c
36187index bf14f39..15c4b98 100644
36188--- a/drivers/ide/piix.c
36189+++ b/drivers/ide/piix.c
36190@@ -344,7 +344,7 @@ static const struct ide_port_ops ich_port_ops = {
36191 .udma_mask = udma, \
36192 }
36193
36194-static const struct ide_port_info piix_pci_info[] __devinitdata = {
36195+static const struct ide_port_info piix_pci_info[] __devinitconst = {
36196 /* 0: MPIIX */
36197 { /*
36198 * MPIIX actually has only a single IDE channel mapped to
36199diff --git a/drivers/ide/rz1000.c b/drivers/ide/rz1000.c
36200index a6414a8..c04173e 100644
36201--- a/drivers/ide/rz1000.c
36202+++ b/drivers/ide/rz1000.c
36203@@ -38,7 +38,7 @@ static int __devinit rz1000_disable_readahead(struct pci_dev *dev)
36204 }
36205 }
36206
36207-static const struct ide_port_info rz1000_chipset __devinitdata = {
36208+static const struct ide_port_info rz1000_chipset __devinitconst = {
36209 .name = DRV_NAME,
36210 .host_flags = IDE_HFLAG_NO_DMA,
36211 };
36212diff --git a/drivers/ide/sc1200.c b/drivers/ide/sc1200.c
36213index d467478..9203942 100644
36214--- a/drivers/ide/sc1200.c
36215+++ b/drivers/ide/sc1200.c
36216@@ -290,7 +290,7 @@ static const struct ide_dma_ops sc1200_dma_ops = {
36217 .dma_sff_read_status = ide_dma_sff_read_status,
36218 };
36219
36220-static const struct ide_port_info sc1200_chipset __devinitdata = {
36221+static const struct ide_port_info sc1200_chipset __devinitconst = {
36222 .name = DRV_NAME,
36223 .port_ops = &sc1200_port_ops,
36224 .dma_ops = &sc1200_dma_ops,
36225diff --git a/drivers/ide/scc_pata.c b/drivers/ide/scc_pata.c
36226index 1104bb3..59c5194 100644
36227--- a/drivers/ide/scc_pata.c
36228+++ b/drivers/ide/scc_pata.c
36229@@ -811,7 +811,7 @@ static const struct ide_dma_ops scc_dma_ops = {
36230 .dma_sff_read_status = scc_dma_sff_read_status,
36231 };
36232
36233-static const struct ide_port_info scc_chipset __devinitdata = {
36234+static const struct ide_port_info scc_chipset __devinitconst = {
36235 .name = "sccIDE",
36236 .init_iops = init_iops_scc,
36237 .init_dma = scc_init_dma,
36238diff --git a/drivers/ide/serverworks.c b/drivers/ide/serverworks.c
36239index b6554ef..6cc2cc3 100644
36240--- a/drivers/ide/serverworks.c
36241+++ b/drivers/ide/serverworks.c
36242@@ -353,7 +353,7 @@ static const struct ide_port_ops svwks_port_ops = {
36243 .cable_detect = svwks_cable_detect,
36244 };
36245
36246-static const struct ide_port_info serverworks_chipsets[] __devinitdata = {
36247+static const struct ide_port_info serverworks_chipsets[] __devinitconst = {
36248 { /* 0: OSB4 */
36249 .name = DRV_NAME,
36250 .init_chipset = init_chipset_svwks,
36251diff --git a/drivers/ide/setup-pci.c b/drivers/ide/setup-pci.c
36252index ab3db61..afed580 100644
36253--- a/drivers/ide/setup-pci.c
36254+++ b/drivers/ide/setup-pci.c
36255@@ -542,6 +542,8 @@ int ide_pci_init_two(struct pci_dev *dev1, struct pci_dev *dev2,
36256 int ret, i, n_ports = dev2 ? 4 : 2;
36257 struct ide_hw hw[4], *hws[] = { NULL, NULL, NULL, NULL };
36258
36259+ pax_track_stack();
36260+
36261 for (i = 0; i < n_ports / 2; i++) {
36262 ret = ide_setup_pci_controller(pdev[i], d, !i);
36263 if (ret < 0)
36264diff --git a/drivers/ide/siimage.c b/drivers/ide/siimage.c
36265index d95df52..0b03a39 100644
36266--- a/drivers/ide/siimage.c
36267+++ b/drivers/ide/siimage.c
36268@@ -719,7 +719,7 @@ static const struct ide_dma_ops sil_dma_ops = {
36269 .udma_mask = ATA_UDMA6, \
36270 }
36271
36272-static const struct ide_port_info siimage_chipsets[] __devinitdata = {
36273+static const struct ide_port_info siimage_chipsets[] __devinitconst = {
36274 /* 0: SiI680 */ DECLARE_SII_DEV(&sil_pata_port_ops),
36275 /* 1: SiI3112 */ DECLARE_SII_DEV(&sil_sata_port_ops)
36276 };
36277diff --git a/drivers/ide/sis5513.c b/drivers/ide/sis5513.c
36278index 3b88eba..ca8699d 100644
36279--- a/drivers/ide/sis5513.c
36280+++ b/drivers/ide/sis5513.c
36281@@ -561,7 +561,7 @@ static const struct ide_port_ops sis_ata133_port_ops = {
36282 .cable_detect = sis_cable_detect,
36283 };
36284
36285-static const struct ide_port_info sis5513_chipset __devinitdata = {
36286+static const struct ide_port_info sis5513_chipset __devinitconst = {
36287 .name = DRV_NAME,
36288 .init_chipset = init_chipset_sis5513,
36289 .enablebits = { {0x4a, 0x02, 0x02}, {0x4a, 0x04, 0x04} },
36290diff --git a/drivers/ide/sl82c105.c b/drivers/ide/sl82c105.c
36291index d698da4..fca42a4 100644
36292--- a/drivers/ide/sl82c105.c
36293+++ b/drivers/ide/sl82c105.c
36294@@ -319,7 +319,7 @@ static const struct ide_dma_ops sl82c105_dma_ops = {
36295 .dma_sff_read_status = ide_dma_sff_read_status,
36296 };
36297
36298-static const struct ide_port_info sl82c105_chipset __devinitdata = {
36299+static const struct ide_port_info sl82c105_chipset __devinitconst = {
36300 .name = DRV_NAME,
36301 .init_chipset = init_chipset_sl82c105,
36302 .enablebits = {{0x40,0x01,0x01}, {0x40,0x10,0x10}},
36303diff --git a/drivers/ide/slc90e66.c b/drivers/ide/slc90e66.c
36304index 1ccfb40..83d5779 100644
36305--- a/drivers/ide/slc90e66.c
36306+++ b/drivers/ide/slc90e66.c
36307@@ -131,7 +131,7 @@ static const struct ide_port_ops slc90e66_port_ops = {
36308 .cable_detect = slc90e66_cable_detect,
36309 };
36310
36311-static const struct ide_port_info slc90e66_chipset __devinitdata = {
36312+static const struct ide_port_info slc90e66_chipset __devinitconst = {
36313 .name = DRV_NAME,
36314 .enablebits = { {0x41, 0x80, 0x80}, {0x43, 0x80, 0x80} },
36315 .port_ops = &slc90e66_port_ops,
36316diff --git a/drivers/ide/tc86c001.c b/drivers/ide/tc86c001.c
36317index 05a93d6..5f9e325 100644
36318--- a/drivers/ide/tc86c001.c
36319+++ b/drivers/ide/tc86c001.c
36320@@ -190,7 +190,7 @@ static const struct ide_dma_ops tc86c001_dma_ops = {
36321 .dma_sff_read_status = ide_dma_sff_read_status,
36322 };
36323
36324-static const struct ide_port_info tc86c001_chipset __devinitdata = {
36325+static const struct ide_port_info tc86c001_chipset __devinitconst = {
36326 .name = DRV_NAME,
36327 .init_hwif = init_hwif_tc86c001,
36328 .port_ops = &tc86c001_port_ops,
36329diff --git a/drivers/ide/triflex.c b/drivers/ide/triflex.c
36330index 8773c3b..7907d6c 100644
36331--- a/drivers/ide/triflex.c
36332+++ b/drivers/ide/triflex.c
36333@@ -92,7 +92,7 @@ static const struct ide_port_ops triflex_port_ops = {
36334 .set_dma_mode = triflex_set_mode,
36335 };
36336
36337-static const struct ide_port_info triflex_device __devinitdata = {
36338+static const struct ide_port_info triflex_device __devinitconst = {
36339 .name = DRV_NAME,
36340 .enablebits = {{0x80, 0x01, 0x01}, {0x80, 0x02, 0x02}},
36341 .port_ops = &triflex_port_ops,
36342diff --git a/drivers/ide/trm290.c b/drivers/ide/trm290.c
36343index 4b42ca0..e494a98 100644
36344--- a/drivers/ide/trm290.c
36345+++ b/drivers/ide/trm290.c
36346@@ -324,7 +324,7 @@ static struct ide_dma_ops trm290_dma_ops = {
36347 .dma_check = trm290_dma_check,
36348 };
36349
36350-static const struct ide_port_info trm290_chipset __devinitdata = {
36351+static const struct ide_port_info trm290_chipset __devinitconst = {
36352 .name = DRV_NAME,
36353 .init_hwif = init_hwif_trm290,
36354 .tp_ops = &trm290_tp_ops,
36355diff --git a/drivers/ide/via82cxxx.c b/drivers/ide/via82cxxx.c
36356index 028de26..520d5d5 100644
36357--- a/drivers/ide/via82cxxx.c
36358+++ b/drivers/ide/via82cxxx.c
36359@@ -374,7 +374,7 @@ static const struct ide_port_ops via_port_ops = {
36360 .cable_detect = via82cxxx_cable_detect,
36361 };
36362
36363-static const struct ide_port_info via82cxxx_chipset __devinitdata = {
36364+static const struct ide_port_info via82cxxx_chipset __devinitconst = {
36365 .name = DRV_NAME,
36366 .init_chipset = init_chipset_via82cxxx,
36367 .enablebits = { { 0x40, 0x02, 0x02 }, { 0x40, 0x01, 0x01 } },
36368diff --git a/drivers/ieee1394/dv1394.c b/drivers/ieee1394/dv1394.c
36369index 2cd00b5..14de699 100644
36370--- a/drivers/ieee1394/dv1394.c
36371+++ b/drivers/ieee1394/dv1394.c
36372@@ -739,7 +739,7 @@ static void frame_prepare(struct video_card *video, unsigned int this_frame)
36373 based upon DIF section and sequence
36374 */
36375
36376-static void inline
36377+static inline void
36378 frame_put_packet (struct frame *f, struct packet *p)
36379 {
36380 int section_type = p->data[0] >> 5; /* section type is in bits 5 - 7 */
36381diff --git a/drivers/ieee1394/hosts.c b/drivers/ieee1394/hosts.c
36382index e947d8f..6a966b9 100644
36383--- a/drivers/ieee1394/hosts.c
36384+++ b/drivers/ieee1394/hosts.c
36385@@ -78,6 +78,7 @@ static int dummy_isoctl(struct hpsb_iso *iso, enum isoctl_cmd command,
36386 }
36387
36388 static struct hpsb_host_driver dummy_driver = {
36389+ .name = "dummy",
36390 .transmit_packet = dummy_transmit_packet,
36391 .devctl = dummy_devctl,
36392 .isoctl = dummy_isoctl
36393diff --git a/drivers/ieee1394/init_ohci1394_dma.c b/drivers/ieee1394/init_ohci1394_dma.c
36394index ddaab6e..8d37435 100644
36395--- a/drivers/ieee1394/init_ohci1394_dma.c
36396+++ b/drivers/ieee1394/init_ohci1394_dma.c
36397@@ -257,7 +257,7 @@ void __init init_ohci1394_dma_on_all_controllers(void)
36398 for (func = 0; func < 8; func++) {
36399 u32 class = read_pci_config(num,slot,func,
36400 PCI_CLASS_REVISION);
36401- if ((class == 0xffffffff))
36402+ if (class == 0xffffffff)
36403 continue; /* No device at this func */
36404
36405 if (class>>8 != PCI_CLASS_SERIAL_FIREWIRE_OHCI)
36406diff --git a/drivers/ieee1394/ohci1394.c b/drivers/ieee1394/ohci1394.c
36407index 65c1429..5d8c11f 100644
36408--- a/drivers/ieee1394/ohci1394.c
36409+++ b/drivers/ieee1394/ohci1394.c
36410@@ -147,9 +147,9 @@ printk(level "%s: " fmt "\n" , OHCI1394_DRIVER_NAME , ## args)
36411 printk(level "%s: fw-host%d: " fmt "\n" , OHCI1394_DRIVER_NAME, ohci->host->id , ## args)
36412
36413 /* Module Parameters */
36414-static int phys_dma = 1;
36415+static int phys_dma;
36416 module_param(phys_dma, int, 0444);
36417-MODULE_PARM_DESC(phys_dma, "Enable physical DMA (default = 1).");
36418+MODULE_PARM_DESC(phys_dma, "Enable physical DMA (default = 0).");
36419
36420 static void dma_trm_tasklet(unsigned long data);
36421 static void dma_trm_reset(struct dma_trm_ctx *d);
36422diff --git a/drivers/ieee1394/sbp2.c b/drivers/ieee1394/sbp2.c
36423index f199896..78c9fc8 100644
36424--- a/drivers/ieee1394/sbp2.c
36425+++ b/drivers/ieee1394/sbp2.c
36426@@ -2111,7 +2111,7 @@ MODULE_DESCRIPTION("IEEE-1394 SBP-2 protocol driver");
36427 MODULE_SUPPORTED_DEVICE(SBP2_DEVICE_NAME);
36428 MODULE_LICENSE("GPL");
36429
36430-static int sbp2_module_init(void)
36431+static int __init sbp2_module_init(void)
36432 {
36433 int ret;
36434
36435diff --git a/drivers/infiniband/core/cm.c b/drivers/infiniband/core/cm.c
36436index a5dea6b..0cefe8f 100644
36437--- a/drivers/infiniband/core/cm.c
36438+++ b/drivers/infiniband/core/cm.c
36439@@ -112,7 +112,7 @@ static char const counter_group_names[CM_COUNTER_GROUPS]
36440
36441 struct cm_counter_group {
36442 struct kobject obj;
36443- atomic_long_t counter[CM_ATTR_COUNT];
36444+ atomic_long_unchecked_t counter[CM_ATTR_COUNT];
36445 };
36446
36447 struct cm_counter_attribute {
36448@@ -1386,7 +1386,7 @@ static void cm_dup_req_handler(struct cm_work *work,
36449 struct ib_mad_send_buf *msg = NULL;
36450 int ret;
36451
36452- atomic_long_inc(&work->port->counter_group[CM_RECV_DUPLICATES].
36453+ atomic_long_inc_unchecked(&work->port->counter_group[CM_RECV_DUPLICATES].
36454 counter[CM_REQ_COUNTER]);
36455
36456 /* Quick state check to discard duplicate REQs. */
36457@@ -1764,7 +1764,7 @@ static void cm_dup_rep_handler(struct cm_work *work)
36458 if (!cm_id_priv)
36459 return;
36460
36461- atomic_long_inc(&work->port->counter_group[CM_RECV_DUPLICATES].
36462+ atomic_long_inc_unchecked(&work->port->counter_group[CM_RECV_DUPLICATES].
36463 counter[CM_REP_COUNTER]);
36464 ret = cm_alloc_response_msg(work->port, work->mad_recv_wc, &msg);
36465 if (ret)
36466@@ -1931,7 +1931,7 @@ static int cm_rtu_handler(struct cm_work *work)
36467 if (cm_id_priv->id.state != IB_CM_REP_SENT &&
36468 cm_id_priv->id.state != IB_CM_MRA_REP_RCVD) {
36469 spin_unlock_irq(&cm_id_priv->lock);
36470- atomic_long_inc(&work->port->counter_group[CM_RECV_DUPLICATES].
36471+ atomic_long_inc_unchecked(&work->port->counter_group[CM_RECV_DUPLICATES].
36472 counter[CM_RTU_COUNTER]);
36473 goto out;
36474 }
36475@@ -2110,7 +2110,7 @@ static int cm_dreq_handler(struct cm_work *work)
36476 cm_id_priv = cm_acquire_id(dreq_msg->remote_comm_id,
36477 dreq_msg->local_comm_id);
36478 if (!cm_id_priv) {
36479- atomic_long_inc(&work->port->counter_group[CM_RECV_DUPLICATES].
36480+ atomic_long_inc_unchecked(&work->port->counter_group[CM_RECV_DUPLICATES].
36481 counter[CM_DREQ_COUNTER]);
36482 cm_issue_drep(work->port, work->mad_recv_wc);
36483 return -EINVAL;
36484@@ -2131,7 +2131,7 @@ static int cm_dreq_handler(struct cm_work *work)
36485 case IB_CM_MRA_REP_RCVD:
36486 break;
36487 case IB_CM_TIMEWAIT:
36488- atomic_long_inc(&work->port->counter_group[CM_RECV_DUPLICATES].
36489+ atomic_long_inc_unchecked(&work->port->counter_group[CM_RECV_DUPLICATES].
36490 counter[CM_DREQ_COUNTER]);
36491 if (cm_alloc_response_msg(work->port, work->mad_recv_wc, &msg))
36492 goto unlock;
36493@@ -2145,7 +2145,7 @@ static int cm_dreq_handler(struct cm_work *work)
36494 cm_free_msg(msg);
36495 goto deref;
36496 case IB_CM_DREQ_RCVD:
36497- atomic_long_inc(&work->port->counter_group[CM_RECV_DUPLICATES].
36498+ atomic_long_inc_unchecked(&work->port->counter_group[CM_RECV_DUPLICATES].
36499 counter[CM_DREQ_COUNTER]);
36500 goto unlock;
36501 default:
36502@@ -2501,7 +2501,7 @@ static int cm_mra_handler(struct cm_work *work)
36503 ib_modify_mad(cm_id_priv->av.port->mad_agent,
36504 cm_id_priv->msg, timeout)) {
36505 if (cm_id_priv->id.lap_state == IB_CM_MRA_LAP_RCVD)
36506- atomic_long_inc(&work->port->
36507+ atomic_long_inc_unchecked(&work->port->
36508 counter_group[CM_RECV_DUPLICATES].
36509 counter[CM_MRA_COUNTER]);
36510 goto out;
36511@@ -2510,7 +2510,7 @@ static int cm_mra_handler(struct cm_work *work)
36512 break;
36513 case IB_CM_MRA_REQ_RCVD:
36514 case IB_CM_MRA_REP_RCVD:
36515- atomic_long_inc(&work->port->counter_group[CM_RECV_DUPLICATES].
36516+ atomic_long_inc_unchecked(&work->port->counter_group[CM_RECV_DUPLICATES].
36517 counter[CM_MRA_COUNTER]);
36518 /* fall through */
36519 default:
36520@@ -2672,7 +2672,7 @@ static int cm_lap_handler(struct cm_work *work)
36521 case IB_CM_LAP_IDLE:
36522 break;
36523 case IB_CM_MRA_LAP_SENT:
36524- atomic_long_inc(&work->port->counter_group[CM_RECV_DUPLICATES].
36525+ atomic_long_inc_unchecked(&work->port->counter_group[CM_RECV_DUPLICATES].
36526 counter[CM_LAP_COUNTER]);
36527 if (cm_alloc_response_msg(work->port, work->mad_recv_wc, &msg))
36528 goto unlock;
36529@@ -2688,7 +2688,7 @@ static int cm_lap_handler(struct cm_work *work)
36530 cm_free_msg(msg);
36531 goto deref;
36532 case IB_CM_LAP_RCVD:
36533- atomic_long_inc(&work->port->counter_group[CM_RECV_DUPLICATES].
36534+ atomic_long_inc_unchecked(&work->port->counter_group[CM_RECV_DUPLICATES].
36535 counter[CM_LAP_COUNTER]);
36536 goto unlock;
36537 default:
36538@@ -2972,7 +2972,7 @@ static int cm_sidr_req_handler(struct cm_work *work)
36539 cur_cm_id_priv = cm_insert_remote_sidr(cm_id_priv);
36540 if (cur_cm_id_priv) {
36541 spin_unlock_irq(&cm.lock);
36542- atomic_long_inc(&work->port->counter_group[CM_RECV_DUPLICATES].
36543+ atomic_long_inc_unchecked(&work->port->counter_group[CM_RECV_DUPLICATES].
36544 counter[CM_SIDR_REQ_COUNTER]);
36545 goto out; /* Duplicate message. */
36546 }
36547@@ -3184,10 +3184,10 @@ static void cm_send_handler(struct ib_mad_agent *mad_agent,
36548 if (!msg->context[0] && (attr_index != CM_REJ_COUNTER))
36549 msg->retries = 1;
36550
36551- atomic_long_add(1 + msg->retries,
36552+ atomic_long_add_unchecked(1 + msg->retries,
36553 &port->counter_group[CM_XMIT].counter[attr_index]);
36554 if (msg->retries)
36555- atomic_long_add(msg->retries,
36556+ atomic_long_add_unchecked(msg->retries,
36557 &port->counter_group[CM_XMIT_RETRIES].
36558 counter[attr_index]);
36559
36560@@ -3397,7 +3397,7 @@ static void cm_recv_handler(struct ib_mad_agent *mad_agent,
36561 }
36562
36563 attr_id = be16_to_cpu(mad_recv_wc->recv_buf.mad->mad_hdr.attr_id);
36564- atomic_long_inc(&port->counter_group[CM_RECV].
36565+ atomic_long_inc_unchecked(&port->counter_group[CM_RECV].
36566 counter[attr_id - CM_ATTR_ID_OFFSET]);
36567
36568 work = kmalloc(sizeof *work + sizeof(struct ib_sa_path_rec) * paths,
36569@@ -3595,10 +3595,10 @@ static ssize_t cm_show_counter(struct kobject *obj, struct attribute *attr,
36570 cm_attr = container_of(attr, struct cm_counter_attribute, attr);
36571
36572 return sprintf(buf, "%ld\n",
36573- atomic_long_read(&group->counter[cm_attr->index]));
36574+ atomic_long_read_unchecked(&group->counter[cm_attr->index]));
36575 }
36576
36577-static struct sysfs_ops cm_counter_ops = {
36578+static const struct sysfs_ops cm_counter_ops = {
36579 .show = cm_show_counter
36580 };
36581
36582diff --git a/drivers/infiniband/core/cma.c b/drivers/infiniband/core/cma.c
36583index 8fd3a6f..61d8075 100644
36584--- a/drivers/infiniband/core/cma.c
36585+++ b/drivers/infiniband/core/cma.c
36586@@ -2267,6 +2267,9 @@ static int cma_resolve_ib_udp(struct rdma_id_private *id_priv,
36587
36588 req.private_data_len = sizeof(struct cma_hdr) +
36589 conn_param->private_data_len;
36590+ if (req.private_data_len < conn_param->private_data_len)
36591+ return -EINVAL;
36592+
36593 req.private_data = kzalloc(req.private_data_len, GFP_ATOMIC);
36594 if (!req.private_data)
36595 return -ENOMEM;
36596@@ -2314,6 +2317,9 @@ static int cma_connect_ib(struct rdma_id_private *id_priv,
36597 memset(&req, 0, sizeof req);
36598 offset = cma_user_data_offset(id_priv->id.ps);
36599 req.private_data_len = offset + conn_param->private_data_len;
36600+ if (req.private_data_len < conn_param->private_data_len)
36601+ return -EINVAL;
36602+
36603 private_data = kzalloc(req.private_data_len, GFP_ATOMIC);
36604 if (!private_data)
36605 return -ENOMEM;
36606diff --git a/drivers/infiniband/core/fmr_pool.c b/drivers/infiniband/core/fmr_pool.c
36607index 4507043..14ad522 100644
36608--- a/drivers/infiniband/core/fmr_pool.c
36609+++ b/drivers/infiniband/core/fmr_pool.c
36610@@ -97,8 +97,8 @@ struct ib_fmr_pool {
36611
36612 struct task_struct *thread;
36613
36614- atomic_t req_ser;
36615- atomic_t flush_ser;
36616+ atomic_unchecked_t req_ser;
36617+ atomic_unchecked_t flush_ser;
36618
36619 wait_queue_head_t force_wait;
36620 };
36621@@ -179,10 +179,10 @@ static int ib_fmr_cleanup_thread(void *pool_ptr)
36622 struct ib_fmr_pool *pool = pool_ptr;
36623
36624 do {
36625- if (atomic_read(&pool->flush_ser) - atomic_read(&pool->req_ser) < 0) {
36626+ if (atomic_read_unchecked(&pool->flush_ser) - atomic_read_unchecked(&pool->req_ser) < 0) {
36627 ib_fmr_batch_release(pool);
36628
36629- atomic_inc(&pool->flush_ser);
36630+ atomic_inc_unchecked(&pool->flush_ser);
36631 wake_up_interruptible(&pool->force_wait);
36632
36633 if (pool->flush_function)
36634@@ -190,7 +190,7 @@ static int ib_fmr_cleanup_thread(void *pool_ptr)
36635 }
36636
36637 set_current_state(TASK_INTERRUPTIBLE);
36638- if (atomic_read(&pool->flush_ser) - atomic_read(&pool->req_ser) >= 0 &&
36639+ if (atomic_read_unchecked(&pool->flush_ser) - atomic_read_unchecked(&pool->req_ser) >= 0 &&
36640 !kthread_should_stop())
36641 schedule();
36642 __set_current_state(TASK_RUNNING);
36643@@ -282,8 +282,8 @@ struct ib_fmr_pool *ib_create_fmr_pool(struct ib_pd *pd,
36644 pool->dirty_watermark = params->dirty_watermark;
36645 pool->dirty_len = 0;
36646 spin_lock_init(&pool->pool_lock);
36647- atomic_set(&pool->req_ser, 0);
36648- atomic_set(&pool->flush_ser, 0);
36649+ atomic_set_unchecked(&pool->req_ser, 0);
36650+ atomic_set_unchecked(&pool->flush_ser, 0);
36651 init_waitqueue_head(&pool->force_wait);
36652
36653 pool->thread = kthread_run(ib_fmr_cleanup_thread,
36654@@ -411,11 +411,11 @@ int ib_flush_fmr_pool(struct ib_fmr_pool *pool)
36655 }
36656 spin_unlock_irq(&pool->pool_lock);
36657
36658- serial = atomic_inc_return(&pool->req_ser);
36659+ serial = atomic_inc_return_unchecked(&pool->req_ser);
36660 wake_up_process(pool->thread);
36661
36662 if (wait_event_interruptible(pool->force_wait,
36663- atomic_read(&pool->flush_ser) - serial >= 0))
36664+ atomic_read_unchecked(&pool->flush_ser) - serial >= 0))
36665 return -EINTR;
36666
36667 return 0;
36668@@ -525,7 +525,7 @@ int ib_fmr_pool_unmap(struct ib_pool_fmr *fmr)
36669 } else {
36670 list_add_tail(&fmr->list, &pool->dirty_list);
36671 if (++pool->dirty_len >= pool->dirty_watermark) {
36672- atomic_inc(&pool->req_ser);
36673+ atomic_inc_unchecked(&pool->req_ser);
36674 wake_up_process(pool->thread);
36675 }
36676 }
36677diff --git a/drivers/infiniband/core/sysfs.c b/drivers/infiniband/core/sysfs.c
36678index 158a214..1558bb7 100644
36679--- a/drivers/infiniband/core/sysfs.c
36680+++ b/drivers/infiniband/core/sysfs.c
36681@@ -79,7 +79,7 @@ static ssize_t port_attr_show(struct kobject *kobj,
36682 return port_attr->show(p, port_attr, buf);
36683 }
36684
36685-static struct sysfs_ops port_sysfs_ops = {
36686+static const struct sysfs_ops port_sysfs_ops = {
36687 .show = port_attr_show
36688 };
36689
36690diff --git a/drivers/infiniband/core/uverbs_marshall.c b/drivers/infiniband/core/uverbs_marshall.c
36691index 5440da0..1194ecb 100644
36692--- a/drivers/infiniband/core/uverbs_marshall.c
36693+++ b/drivers/infiniband/core/uverbs_marshall.c
36694@@ -40,18 +40,21 @@ void ib_copy_ah_attr_to_user(struct ib_uverbs_ah_attr *dst,
36695 dst->grh.sgid_index = src->grh.sgid_index;
36696 dst->grh.hop_limit = src->grh.hop_limit;
36697 dst->grh.traffic_class = src->grh.traffic_class;
36698+ memset(&dst->grh.reserved, 0, sizeof(dst->grh.reserved));
36699 dst->dlid = src->dlid;
36700 dst->sl = src->sl;
36701 dst->src_path_bits = src->src_path_bits;
36702 dst->static_rate = src->static_rate;
36703 dst->is_global = src->ah_flags & IB_AH_GRH ? 1 : 0;
36704 dst->port_num = src->port_num;
36705+ dst->reserved = 0;
36706 }
36707 EXPORT_SYMBOL(ib_copy_ah_attr_to_user);
36708
36709 void ib_copy_qp_attr_to_user(struct ib_uverbs_qp_attr *dst,
36710 struct ib_qp_attr *src)
36711 {
36712+ dst->qp_state = src->qp_state;
36713 dst->cur_qp_state = src->cur_qp_state;
36714 dst->path_mtu = src->path_mtu;
36715 dst->path_mig_state = src->path_mig_state;
36716@@ -83,6 +86,7 @@ void ib_copy_qp_attr_to_user(struct ib_uverbs_qp_attr *dst,
36717 dst->rnr_retry = src->rnr_retry;
36718 dst->alt_port_num = src->alt_port_num;
36719 dst->alt_timeout = src->alt_timeout;
36720+ memset(dst->reserved, 0, sizeof(dst->reserved));
36721 }
36722 EXPORT_SYMBOL(ib_copy_qp_attr_to_user);
36723
36724diff --git a/drivers/infiniband/hw/ipath/ipath_fs.c b/drivers/infiniband/hw/ipath/ipath_fs.c
36725index 100da85..e0d6609 100644
36726--- a/drivers/infiniband/hw/ipath/ipath_fs.c
36727+++ b/drivers/infiniband/hw/ipath/ipath_fs.c
36728@@ -110,6 +110,8 @@ static ssize_t atomic_counters_read(struct file *file, char __user *buf,
36729 struct infinipath_counters counters;
36730 struct ipath_devdata *dd;
36731
36732+ pax_track_stack();
36733+
36734 dd = file->f_path.dentry->d_inode->i_private;
36735 dd->ipath_f_read_counters(dd, &counters);
36736
36737@@ -122,6 +124,8 @@ static const struct file_operations atomic_counters_ops = {
36738 };
36739
36740 static ssize_t flash_read(struct file *file, char __user *buf,
36741+ size_t count, loff_t *ppos) __size_overflow(3);
36742+static ssize_t flash_read(struct file *file, char __user *buf,
36743 size_t count, loff_t *ppos)
36744 {
36745 struct ipath_devdata *dd;
36746diff --git a/drivers/infiniband/hw/nes/nes.c b/drivers/infiniband/hw/nes/nes.c
36747index cbde0cf..afaf55c 100644
36748--- a/drivers/infiniband/hw/nes/nes.c
36749+++ b/drivers/infiniband/hw/nes/nes.c
36750@@ -102,7 +102,7 @@ MODULE_PARM_DESC(limit_maxrdreqsz, "Limit max read request size to 256 Bytes");
36751 LIST_HEAD(nes_adapter_list);
36752 static LIST_HEAD(nes_dev_list);
36753
36754-atomic_t qps_destroyed;
36755+atomic_unchecked_t qps_destroyed;
36756
36757 static unsigned int ee_flsh_adapter;
36758 static unsigned int sysfs_nonidx_addr;
36759@@ -259,7 +259,7 @@ static void nes_cqp_rem_ref_callback(struct nes_device *nesdev, struct nes_cqp_r
36760 struct nes_adapter *nesadapter = nesdev->nesadapter;
36761 u32 qp_id;
36762
36763- atomic_inc(&qps_destroyed);
36764+ atomic_inc_unchecked(&qps_destroyed);
36765
36766 /* Free the control structures */
36767
36768diff --git a/drivers/infiniband/hw/nes/nes.h b/drivers/infiniband/hw/nes/nes.h
36769index bcc6abc..9c76b2f 100644
36770--- a/drivers/infiniband/hw/nes/nes.h
36771+++ b/drivers/infiniband/hw/nes/nes.h
36772@@ -174,17 +174,17 @@ extern unsigned int nes_debug_level;
36773 extern unsigned int wqm_quanta;
36774 extern struct list_head nes_adapter_list;
36775
36776-extern atomic_t cm_connects;
36777-extern atomic_t cm_accepts;
36778-extern atomic_t cm_disconnects;
36779-extern atomic_t cm_closes;
36780-extern atomic_t cm_connecteds;
36781-extern atomic_t cm_connect_reqs;
36782-extern atomic_t cm_rejects;
36783-extern atomic_t mod_qp_timouts;
36784-extern atomic_t qps_created;
36785-extern atomic_t qps_destroyed;
36786-extern atomic_t sw_qps_destroyed;
36787+extern atomic_unchecked_t cm_connects;
36788+extern atomic_unchecked_t cm_accepts;
36789+extern atomic_unchecked_t cm_disconnects;
36790+extern atomic_unchecked_t cm_closes;
36791+extern atomic_unchecked_t cm_connecteds;
36792+extern atomic_unchecked_t cm_connect_reqs;
36793+extern atomic_unchecked_t cm_rejects;
36794+extern atomic_unchecked_t mod_qp_timouts;
36795+extern atomic_unchecked_t qps_created;
36796+extern atomic_unchecked_t qps_destroyed;
36797+extern atomic_unchecked_t sw_qps_destroyed;
36798 extern u32 mh_detected;
36799 extern u32 mh_pauses_sent;
36800 extern u32 cm_packets_sent;
36801@@ -196,11 +196,11 @@ extern u32 cm_packets_retrans;
36802 extern u32 cm_listens_created;
36803 extern u32 cm_listens_destroyed;
36804 extern u32 cm_backlog_drops;
36805-extern atomic_t cm_loopbacks;
36806-extern atomic_t cm_nodes_created;
36807-extern atomic_t cm_nodes_destroyed;
36808-extern atomic_t cm_accel_dropped_pkts;
36809-extern atomic_t cm_resets_recvd;
36810+extern atomic_unchecked_t cm_loopbacks;
36811+extern atomic_unchecked_t cm_nodes_created;
36812+extern atomic_unchecked_t cm_nodes_destroyed;
36813+extern atomic_unchecked_t cm_accel_dropped_pkts;
36814+extern atomic_unchecked_t cm_resets_recvd;
36815
36816 extern u32 int_mod_timer_init;
36817 extern u32 int_mod_cq_depth_256;
36818diff --git a/drivers/infiniband/hw/nes/nes_cm.c b/drivers/infiniband/hw/nes/nes_cm.c
36819index 73473db..5ed06e8 100644
36820--- a/drivers/infiniband/hw/nes/nes_cm.c
36821+++ b/drivers/infiniband/hw/nes/nes_cm.c
36822@@ -69,11 +69,11 @@ u32 cm_packets_received;
36823 u32 cm_listens_created;
36824 u32 cm_listens_destroyed;
36825 u32 cm_backlog_drops;
36826-atomic_t cm_loopbacks;
36827-atomic_t cm_nodes_created;
36828-atomic_t cm_nodes_destroyed;
36829-atomic_t cm_accel_dropped_pkts;
36830-atomic_t cm_resets_recvd;
36831+atomic_unchecked_t cm_loopbacks;
36832+atomic_unchecked_t cm_nodes_created;
36833+atomic_unchecked_t cm_nodes_destroyed;
36834+atomic_unchecked_t cm_accel_dropped_pkts;
36835+atomic_unchecked_t cm_resets_recvd;
36836
36837 static inline int mini_cm_accelerated(struct nes_cm_core *,
36838 struct nes_cm_node *);
36839@@ -149,13 +149,13 @@ static struct nes_cm_ops nes_cm_api = {
36840
36841 static struct nes_cm_core *g_cm_core;
36842
36843-atomic_t cm_connects;
36844-atomic_t cm_accepts;
36845-atomic_t cm_disconnects;
36846-atomic_t cm_closes;
36847-atomic_t cm_connecteds;
36848-atomic_t cm_connect_reqs;
36849-atomic_t cm_rejects;
36850+atomic_unchecked_t cm_connects;
36851+atomic_unchecked_t cm_accepts;
36852+atomic_unchecked_t cm_disconnects;
36853+atomic_unchecked_t cm_closes;
36854+atomic_unchecked_t cm_connecteds;
36855+atomic_unchecked_t cm_connect_reqs;
36856+atomic_unchecked_t cm_rejects;
36857
36858
36859 /**
36860@@ -1195,7 +1195,7 @@ static struct nes_cm_node *make_cm_node(struct nes_cm_core *cm_core,
36861 cm_node->rem_mac);
36862
36863 add_hte_node(cm_core, cm_node);
36864- atomic_inc(&cm_nodes_created);
36865+ atomic_inc_unchecked(&cm_nodes_created);
36866
36867 return cm_node;
36868 }
36869@@ -1253,7 +1253,7 @@ static int rem_ref_cm_node(struct nes_cm_core *cm_core,
36870 }
36871
36872 atomic_dec(&cm_core->node_cnt);
36873- atomic_inc(&cm_nodes_destroyed);
36874+ atomic_inc_unchecked(&cm_nodes_destroyed);
36875 nesqp = cm_node->nesqp;
36876 if (nesqp) {
36877 nesqp->cm_node = NULL;
36878@@ -1320,7 +1320,7 @@ static int process_options(struct nes_cm_node *cm_node, u8 *optionsloc,
36879
36880 static void drop_packet(struct sk_buff *skb)
36881 {
36882- atomic_inc(&cm_accel_dropped_pkts);
36883+ atomic_inc_unchecked(&cm_accel_dropped_pkts);
36884 dev_kfree_skb_any(skb);
36885 }
36886
36887@@ -1377,7 +1377,7 @@ static void handle_rst_pkt(struct nes_cm_node *cm_node, struct sk_buff *skb,
36888
36889 int reset = 0; /* whether to send reset in case of err.. */
36890 int passive_state;
36891- atomic_inc(&cm_resets_recvd);
36892+ atomic_inc_unchecked(&cm_resets_recvd);
36893 nes_debug(NES_DBG_CM, "Received Reset, cm_node = %p, state = %u."
36894 " refcnt=%d\n", cm_node, cm_node->state,
36895 atomic_read(&cm_node->ref_count));
36896@@ -2000,7 +2000,7 @@ static struct nes_cm_node *mini_cm_connect(struct nes_cm_core *cm_core,
36897 rem_ref_cm_node(cm_node->cm_core, cm_node);
36898 return NULL;
36899 }
36900- atomic_inc(&cm_loopbacks);
36901+ atomic_inc_unchecked(&cm_loopbacks);
36902 loopbackremotenode->loopbackpartner = cm_node;
36903 loopbackremotenode->tcp_cntxt.rcv_wscale =
36904 NES_CM_DEFAULT_RCV_WND_SCALE;
36905@@ -2262,7 +2262,7 @@ static int mini_cm_recv_pkt(struct nes_cm_core *cm_core,
36906 add_ref_cm_node(cm_node);
36907 } else if (cm_node->state == NES_CM_STATE_TSA) {
36908 rem_ref_cm_node(cm_core, cm_node);
36909- atomic_inc(&cm_accel_dropped_pkts);
36910+ atomic_inc_unchecked(&cm_accel_dropped_pkts);
36911 dev_kfree_skb_any(skb);
36912 break;
36913 }
36914@@ -2568,7 +2568,7 @@ static int nes_cm_disconn_true(struct nes_qp *nesqp)
36915
36916 if ((cm_id) && (cm_id->event_handler)) {
36917 if (issue_disconn) {
36918- atomic_inc(&cm_disconnects);
36919+ atomic_inc_unchecked(&cm_disconnects);
36920 cm_event.event = IW_CM_EVENT_DISCONNECT;
36921 cm_event.status = disconn_status;
36922 cm_event.local_addr = cm_id->local_addr;
36923@@ -2590,7 +2590,7 @@ static int nes_cm_disconn_true(struct nes_qp *nesqp)
36924 }
36925
36926 if (issue_close) {
36927- atomic_inc(&cm_closes);
36928+ atomic_inc_unchecked(&cm_closes);
36929 nes_disconnect(nesqp, 1);
36930
36931 cm_id->provider_data = nesqp;
36932@@ -2710,7 +2710,7 @@ int nes_accept(struct iw_cm_id *cm_id, struct iw_cm_conn_param *conn_param)
36933
36934 nes_debug(NES_DBG_CM, "QP%u, cm_node=%p, jiffies = %lu listener = %p\n",
36935 nesqp->hwqp.qp_id, cm_node, jiffies, cm_node->listener);
36936- atomic_inc(&cm_accepts);
36937+ atomic_inc_unchecked(&cm_accepts);
36938
36939 nes_debug(NES_DBG_CM, "netdev refcnt = %u.\n",
36940 atomic_read(&nesvnic->netdev->refcnt));
36941@@ -2919,7 +2919,7 @@ int nes_reject(struct iw_cm_id *cm_id, const void *pdata, u8 pdata_len)
36942
36943 struct nes_cm_core *cm_core;
36944
36945- atomic_inc(&cm_rejects);
36946+ atomic_inc_unchecked(&cm_rejects);
36947 cm_node = (struct nes_cm_node *) cm_id->provider_data;
36948 loopback = cm_node->loopbackpartner;
36949 cm_core = cm_node->cm_core;
36950@@ -2982,7 +2982,7 @@ int nes_connect(struct iw_cm_id *cm_id, struct iw_cm_conn_param *conn_param)
36951 ntohl(cm_id->local_addr.sin_addr.s_addr),
36952 ntohs(cm_id->local_addr.sin_port));
36953
36954- atomic_inc(&cm_connects);
36955+ atomic_inc_unchecked(&cm_connects);
36956 nesqp->active_conn = 1;
36957
36958 /* cache the cm_id in the qp */
36959@@ -3195,7 +3195,7 @@ static void cm_event_connected(struct nes_cm_event *event)
36960 if (nesqp->destroyed) {
36961 return;
36962 }
36963- atomic_inc(&cm_connecteds);
36964+ atomic_inc_unchecked(&cm_connecteds);
36965 nes_debug(NES_DBG_CM, "QP%u attempting to connect to 0x%08X:0x%04X on"
36966 " local port 0x%04X. jiffies = %lu.\n",
36967 nesqp->hwqp.qp_id,
36968@@ -3403,7 +3403,7 @@ static void cm_event_reset(struct nes_cm_event *event)
36969
36970 ret = cm_id->event_handler(cm_id, &cm_event);
36971 cm_id->add_ref(cm_id);
36972- atomic_inc(&cm_closes);
36973+ atomic_inc_unchecked(&cm_closes);
36974 cm_event.event = IW_CM_EVENT_CLOSE;
36975 cm_event.status = IW_CM_EVENT_STATUS_OK;
36976 cm_event.provider_data = cm_id->provider_data;
36977@@ -3439,7 +3439,7 @@ static void cm_event_mpa_req(struct nes_cm_event *event)
36978 return;
36979 cm_id = cm_node->cm_id;
36980
36981- atomic_inc(&cm_connect_reqs);
36982+ atomic_inc_unchecked(&cm_connect_reqs);
36983 nes_debug(NES_DBG_CM, "cm_node = %p - cm_id = %p, jiffies = %lu\n",
36984 cm_node, cm_id, jiffies);
36985
36986@@ -3477,7 +3477,7 @@ static void cm_event_mpa_reject(struct nes_cm_event *event)
36987 return;
36988 cm_id = cm_node->cm_id;
36989
36990- atomic_inc(&cm_connect_reqs);
36991+ atomic_inc_unchecked(&cm_connect_reqs);
36992 nes_debug(NES_DBG_CM, "cm_node = %p - cm_id = %p, jiffies = %lu\n",
36993 cm_node, cm_id, jiffies);
36994
36995diff --git a/drivers/infiniband/hw/nes/nes_nic.c b/drivers/infiniband/hw/nes/nes_nic.c
36996index e593af3..870694a 100644
36997--- a/drivers/infiniband/hw/nes/nes_nic.c
36998+++ b/drivers/infiniband/hw/nes/nes_nic.c
36999@@ -1210,17 +1210,17 @@ static void nes_netdev_get_ethtool_stats(struct net_device *netdev,
37000 target_stat_values[++index] = mh_detected;
37001 target_stat_values[++index] = mh_pauses_sent;
37002 target_stat_values[++index] = nesvnic->endnode_ipv4_tcp_retransmits;
37003- target_stat_values[++index] = atomic_read(&cm_connects);
37004- target_stat_values[++index] = atomic_read(&cm_accepts);
37005- target_stat_values[++index] = atomic_read(&cm_disconnects);
37006- target_stat_values[++index] = atomic_read(&cm_connecteds);
37007- target_stat_values[++index] = atomic_read(&cm_connect_reqs);
37008- target_stat_values[++index] = atomic_read(&cm_rejects);
37009- target_stat_values[++index] = atomic_read(&mod_qp_timouts);
37010- target_stat_values[++index] = atomic_read(&qps_created);
37011- target_stat_values[++index] = atomic_read(&sw_qps_destroyed);
37012- target_stat_values[++index] = atomic_read(&qps_destroyed);
37013- target_stat_values[++index] = atomic_read(&cm_closes);
37014+ target_stat_values[++index] = atomic_read_unchecked(&cm_connects);
37015+ target_stat_values[++index] = atomic_read_unchecked(&cm_accepts);
37016+ target_stat_values[++index] = atomic_read_unchecked(&cm_disconnects);
37017+ target_stat_values[++index] = atomic_read_unchecked(&cm_connecteds);
37018+ target_stat_values[++index] = atomic_read_unchecked(&cm_connect_reqs);
37019+ target_stat_values[++index] = atomic_read_unchecked(&cm_rejects);
37020+ target_stat_values[++index] = atomic_read_unchecked(&mod_qp_timouts);
37021+ target_stat_values[++index] = atomic_read_unchecked(&qps_created);
37022+ target_stat_values[++index] = atomic_read_unchecked(&sw_qps_destroyed);
37023+ target_stat_values[++index] = atomic_read_unchecked(&qps_destroyed);
37024+ target_stat_values[++index] = atomic_read_unchecked(&cm_closes);
37025 target_stat_values[++index] = cm_packets_sent;
37026 target_stat_values[++index] = cm_packets_bounced;
37027 target_stat_values[++index] = cm_packets_created;
37028@@ -1230,11 +1230,11 @@ static void nes_netdev_get_ethtool_stats(struct net_device *netdev,
37029 target_stat_values[++index] = cm_listens_created;
37030 target_stat_values[++index] = cm_listens_destroyed;
37031 target_stat_values[++index] = cm_backlog_drops;
37032- target_stat_values[++index] = atomic_read(&cm_loopbacks);
37033- target_stat_values[++index] = atomic_read(&cm_nodes_created);
37034- target_stat_values[++index] = atomic_read(&cm_nodes_destroyed);
37035- target_stat_values[++index] = atomic_read(&cm_accel_dropped_pkts);
37036- target_stat_values[++index] = atomic_read(&cm_resets_recvd);
37037+ target_stat_values[++index] = atomic_read_unchecked(&cm_loopbacks);
37038+ target_stat_values[++index] = atomic_read_unchecked(&cm_nodes_created);
37039+ target_stat_values[++index] = atomic_read_unchecked(&cm_nodes_destroyed);
37040+ target_stat_values[++index] = atomic_read_unchecked(&cm_accel_dropped_pkts);
37041+ target_stat_values[++index] = atomic_read_unchecked(&cm_resets_recvd);
37042 target_stat_values[++index] = int_mod_timer_init;
37043 target_stat_values[++index] = int_mod_cq_depth_1;
37044 target_stat_values[++index] = int_mod_cq_depth_4;
37045diff --git a/drivers/infiniband/hw/nes/nes_verbs.c b/drivers/infiniband/hw/nes/nes_verbs.c
37046index a680c42..f914deb 100644
37047--- a/drivers/infiniband/hw/nes/nes_verbs.c
37048+++ b/drivers/infiniband/hw/nes/nes_verbs.c
37049@@ -45,9 +45,9 @@
37050
37051 #include <rdma/ib_umem.h>
37052
37053-atomic_t mod_qp_timouts;
37054-atomic_t qps_created;
37055-atomic_t sw_qps_destroyed;
37056+atomic_unchecked_t mod_qp_timouts;
37057+atomic_unchecked_t qps_created;
37058+atomic_unchecked_t sw_qps_destroyed;
37059
37060 static void nes_unregister_ofa_device(struct nes_ib_device *nesibdev);
37061
37062@@ -1240,7 +1240,7 @@ static struct ib_qp *nes_create_qp(struct ib_pd *ibpd,
37063 if (init_attr->create_flags)
37064 return ERR_PTR(-EINVAL);
37065
37066- atomic_inc(&qps_created);
37067+ atomic_inc_unchecked(&qps_created);
37068 switch (init_attr->qp_type) {
37069 case IB_QPT_RC:
37070 if (nes_drv_opt & NES_DRV_OPT_NO_INLINE_DATA) {
37071@@ -1568,7 +1568,7 @@ static int nes_destroy_qp(struct ib_qp *ibqp)
37072 struct iw_cm_event cm_event;
37073 int ret;
37074
37075- atomic_inc(&sw_qps_destroyed);
37076+ atomic_inc_unchecked(&sw_qps_destroyed);
37077 nesqp->destroyed = 1;
37078
37079 /* Blow away the connection if it exists. */
37080diff --git a/drivers/input/gameport/gameport.c b/drivers/input/gameport/gameport.c
37081index ac11be0..3883c04 100644
37082--- a/drivers/input/gameport/gameport.c
37083+++ b/drivers/input/gameport/gameport.c
37084@@ -515,13 +515,13 @@ EXPORT_SYMBOL(gameport_set_phys);
37085 */
37086 static void gameport_init_port(struct gameport *gameport)
37087 {
37088- static atomic_t gameport_no = ATOMIC_INIT(0);
37089+ static atomic_unchecked_t gameport_no = ATOMIC_INIT(0);
37090
37091 __module_get(THIS_MODULE);
37092
37093 mutex_init(&gameport->drv_mutex);
37094 device_initialize(&gameport->dev);
37095- dev_set_name(&gameport->dev, "gameport%lu", (unsigned long)atomic_inc_return(&gameport_no) - 1);
37096+ dev_set_name(&gameport->dev, "gameport%lu", (unsigned long)atomic_inc_return_unchecked(&gameport_no) - 1);
37097 gameport->dev.bus = &gameport_bus;
37098 gameport->dev.release = gameport_release_port;
37099 if (gameport->parent)
37100diff --git a/drivers/input/input.c b/drivers/input/input.c
37101index c82ae82..8cfb9cb 100644
37102--- a/drivers/input/input.c
37103+++ b/drivers/input/input.c
37104@@ -1558,7 +1558,7 @@ EXPORT_SYMBOL(input_set_capability);
37105 */
37106 int input_register_device(struct input_dev *dev)
37107 {
37108- static atomic_t input_no = ATOMIC_INIT(0);
37109+ static atomic_unchecked_t input_no = ATOMIC_INIT(0);
37110 struct input_handler *handler;
37111 const char *path;
37112 int error;
37113@@ -1585,7 +1585,7 @@ int input_register_device(struct input_dev *dev)
37114 dev->setkeycode = input_default_setkeycode;
37115
37116 dev_set_name(&dev->dev, "input%ld",
37117- (unsigned long) atomic_inc_return(&input_no) - 1);
37118+ (unsigned long) atomic_inc_return_unchecked(&input_no) - 1);
37119
37120 error = device_add(&dev->dev);
37121 if (error)
37122diff --git a/drivers/input/joystick/sidewinder.c b/drivers/input/joystick/sidewinder.c
37123index ca13a6b..b032b0c 100644
37124--- a/drivers/input/joystick/sidewinder.c
37125+++ b/drivers/input/joystick/sidewinder.c
37126@@ -30,6 +30,7 @@
37127 #include <linux/kernel.h>
37128 #include <linux/module.h>
37129 #include <linux/slab.h>
37130+#include <linux/sched.h>
37131 #include <linux/init.h>
37132 #include <linux/input.h>
37133 #include <linux/gameport.h>
37134@@ -428,6 +429,8 @@ static int sw_read(struct sw *sw)
37135 unsigned char buf[SW_LENGTH];
37136 int i;
37137
37138+ pax_track_stack();
37139+
37140 i = sw_read_packet(sw->gameport, buf, sw->length, 0);
37141
37142 if (sw->type == SW_ID_3DP && sw->length == 66 && i != 66) { /* Broken packet, try to fix */
37143diff --git a/drivers/input/joystick/xpad.c b/drivers/input/joystick/xpad.c
37144index 79e3edc..01412b9 100644
37145--- a/drivers/input/joystick/xpad.c
37146+++ b/drivers/input/joystick/xpad.c
37147@@ -621,7 +621,7 @@ static void xpad_led_set(struct led_classdev *led_cdev,
37148
37149 static int xpad_led_probe(struct usb_xpad *xpad)
37150 {
37151- static atomic_t led_seq = ATOMIC_INIT(0);
37152+ static atomic_unchecked_t led_seq = ATOMIC_INIT(0);
37153 long led_no;
37154 struct xpad_led *led;
37155 struct led_classdev *led_cdev;
37156@@ -634,7 +634,7 @@ static int xpad_led_probe(struct usb_xpad *xpad)
37157 if (!led)
37158 return -ENOMEM;
37159
37160- led_no = (long)atomic_inc_return(&led_seq) - 1;
37161+ led_no = (long)atomic_inc_return_unchecked(&led_seq) - 1;
37162
37163 snprintf(led->name, sizeof(led->name), "xpad%ld", led_no);
37164 led->xpad = xpad;
37165diff --git a/drivers/input/serio/serio.c b/drivers/input/serio/serio.c
37166index 0236f0d..c7327f1 100644
37167--- a/drivers/input/serio/serio.c
37168+++ b/drivers/input/serio/serio.c
37169@@ -527,7 +527,7 @@ static void serio_release_port(struct device *dev)
37170 */
37171 static void serio_init_port(struct serio *serio)
37172 {
37173- static atomic_t serio_no = ATOMIC_INIT(0);
37174+ static atomic_unchecked_t serio_no = ATOMIC_INIT(0);
37175
37176 __module_get(THIS_MODULE);
37177
37178@@ -536,7 +536,7 @@ static void serio_init_port(struct serio *serio)
37179 mutex_init(&serio->drv_mutex);
37180 device_initialize(&serio->dev);
37181 dev_set_name(&serio->dev, "serio%ld",
37182- (long)atomic_inc_return(&serio_no) - 1);
37183+ (long)atomic_inc_return_unchecked(&serio_no) - 1);
37184 serio->dev.bus = &serio_bus;
37185 serio->dev.release = serio_release_port;
37186 if (serio->parent) {
37187diff --git a/drivers/isdn/gigaset/common.c b/drivers/isdn/gigaset/common.c
37188index 33dcd8d..2783d25 100644
37189--- a/drivers/isdn/gigaset/common.c
37190+++ b/drivers/isdn/gigaset/common.c
37191@@ -712,7 +712,7 @@ struct cardstate *gigaset_initcs(struct gigaset_driver *drv, int channels,
37192 cs->commands_pending = 0;
37193 cs->cur_at_seq = 0;
37194 cs->gotfwver = -1;
37195- cs->open_count = 0;
37196+ local_set(&cs->open_count, 0);
37197 cs->dev = NULL;
37198 cs->tty = NULL;
37199 cs->tty_dev = NULL;
37200diff --git a/drivers/isdn/gigaset/gigaset.h b/drivers/isdn/gigaset/gigaset.h
37201index a2f6125..6a70677 100644
37202--- a/drivers/isdn/gigaset/gigaset.h
37203+++ b/drivers/isdn/gigaset/gigaset.h
37204@@ -34,6 +34,7 @@
37205 #include <linux/tty_driver.h>
37206 #include <linux/list.h>
37207 #include <asm/atomic.h>
37208+#include <asm/local.h>
37209
37210 #define GIG_VERSION {0,5,0,0}
37211 #define GIG_COMPAT {0,4,0,0}
37212@@ -446,7 +447,7 @@ struct cardstate {
37213 spinlock_t cmdlock;
37214 unsigned curlen, cmdbytes;
37215
37216- unsigned open_count;
37217+ local_t open_count;
37218 struct tty_struct *tty;
37219 struct tasklet_struct if_wake_tasklet;
37220 unsigned control_state;
37221diff --git a/drivers/isdn/gigaset/interface.c b/drivers/isdn/gigaset/interface.c
37222index b3065b8..c7e8cc9 100644
37223--- a/drivers/isdn/gigaset/interface.c
37224+++ b/drivers/isdn/gigaset/interface.c
37225@@ -165,9 +165,7 @@ static int if_open(struct tty_struct *tty, struct file *filp)
37226 return -ERESTARTSYS; // FIXME -EINTR?
37227 tty->driver_data = cs;
37228
37229- ++cs->open_count;
37230-
37231- if (cs->open_count == 1) {
37232+ if (local_inc_return(&cs->open_count) == 1) {
37233 spin_lock_irqsave(&cs->lock, flags);
37234 cs->tty = tty;
37235 spin_unlock_irqrestore(&cs->lock, flags);
37236@@ -195,10 +193,10 @@ static void if_close(struct tty_struct *tty, struct file *filp)
37237
37238 if (!cs->connected)
37239 gig_dbg(DEBUG_IF, "not connected"); /* nothing to do */
37240- else if (!cs->open_count)
37241+ else if (!local_read(&cs->open_count))
37242 dev_warn(cs->dev, "%s: device not opened\n", __func__);
37243 else {
37244- if (!--cs->open_count) {
37245+ if (!local_dec_return(&cs->open_count)) {
37246 spin_lock_irqsave(&cs->lock, flags);
37247 cs->tty = NULL;
37248 spin_unlock_irqrestore(&cs->lock, flags);
37249@@ -233,7 +231,7 @@ static int if_ioctl(struct tty_struct *tty, struct file *file,
37250 if (!cs->connected) {
37251 gig_dbg(DEBUG_IF, "not connected");
37252 retval = -ENODEV;
37253- } else if (!cs->open_count)
37254+ } else if (!local_read(&cs->open_count))
37255 dev_warn(cs->dev, "%s: device not opened\n", __func__);
37256 else {
37257 retval = 0;
37258@@ -361,7 +359,7 @@ static int if_write(struct tty_struct *tty, const unsigned char *buf, int count)
37259 if (!cs->connected) {
37260 gig_dbg(DEBUG_IF, "not connected");
37261 retval = -ENODEV;
37262- } else if (!cs->open_count)
37263+ } else if (!local_read(&cs->open_count))
37264 dev_warn(cs->dev, "%s: device not opened\n", __func__);
37265 else if (cs->mstate != MS_LOCKED) {
37266 dev_warn(cs->dev, "can't write to unlocked device\n");
37267@@ -395,7 +393,7 @@ static int if_write_room(struct tty_struct *tty)
37268 if (!cs->connected) {
37269 gig_dbg(DEBUG_IF, "not connected");
37270 retval = -ENODEV;
37271- } else if (!cs->open_count)
37272+ } else if (!local_read(&cs->open_count))
37273 dev_warn(cs->dev, "%s: device not opened\n", __func__);
37274 else if (cs->mstate != MS_LOCKED) {
37275 dev_warn(cs->dev, "can't write to unlocked device\n");
37276@@ -425,7 +423,7 @@ static int if_chars_in_buffer(struct tty_struct *tty)
37277
37278 if (!cs->connected)
37279 gig_dbg(DEBUG_IF, "not connected");
37280- else if (!cs->open_count)
37281+ else if (!local_read(&cs->open_count))
37282 dev_warn(cs->dev, "%s: device not opened\n", __func__);
37283 else if (cs->mstate != MS_LOCKED)
37284 dev_warn(cs->dev, "can't write to unlocked device\n");
37285@@ -453,7 +451,7 @@ static void if_throttle(struct tty_struct *tty)
37286
37287 if (!cs->connected)
37288 gig_dbg(DEBUG_IF, "not connected"); /* nothing to do */
37289- else if (!cs->open_count)
37290+ else if (!local_read(&cs->open_count))
37291 dev_warn(cs->dev, "%s: device not opened\n", __func__);
37292 else {
37293 //FIXME
37294@@ -478,7 +476,7 @@ static void if_unthrottle(struct tty_struct *tty)
37295
37296 if (!cs->connected)
37297 gig_dbg(DEBUG_IF, "not connected"); /* nothing to do */
37298- else if (!cs->open_count)
37299+ else if (!local_read(&cs->open_count))
37300 dev_warn(cs->dev, "%s: device not opened\n", __func__);
37301 else {
37302 //FIXME
37303@@ -510,7 +508,7 @@ static void if_set_termios(struct tty_struct *tty, struct ktermios *old)
37304 goto out;
37305 }
37306
37307- if (!cs->open_count) {
37308+ if (!local_read(&cs->open_count)) {
37309 dev_warn(cs->dev, "%s: device not opened\n", __func__);
37310 goto out;
37311 }
37312diff --git a/drivers/isdn/hardware/avm/b1.c b/drivers/isdn/hardware/avm/b1.c
37313index a7c0083..62a7cb6 100644
37314--- a/drivers/isdn/hardware/avm/b1.c
37315+++ b/drivers/isdn/hardware/avm/b1.c
37316@@ -173,7 +173,7 @@ int b1_load_t4file(avmcard *card, capiloaddatapart * t4file)
37317 }
37318 if (left) {
37319 if (t4file->user) {
37320- if (copy_from_user(buf, dp, left))
37321+ if (left > sizeof buf || copy_from_user(buf, dp, left))
37322 return -EFAULT;
37323 } else {
37324 memcpy(buf, dp, left);
37325@@ -221,7 +221,7 @@ int b1_load_config(avmcard *card, capiloaddatapart * config)
37326 }
37327 if (left) {
37328 if (config->user) {
37329- if (copy_from_user(buf, dp, left))
37330+ if (left > sizeof buf || copy_from_user(buf, dp, left))
37331 return -EFAULT;
37332 } else {
37333 memcpy(buf, dp, left);
37334diff --git a/drivers/isdn/hardware/eicon/capidtmf.c b/drivers/isdn/hardware/eicon/capidtmf.c
37335index f130724..c373c68 100644
37336--- a/drivers/isdn/hardware/eicon/capidtmf.c
37337+++ b/drivers/isdn/hardware/eicon/capidtmf.c
37338@@ -498,6 +498,7 @@ void capidtmf_recv_block (t_capidtmf_state *p_state, byte *buffer, word leng
37339 byte goertzel_result_buffer[CAPIDTMF_RECV_TOTAL_FREQUENCY_COUNT];
37340 short windowed_sample_buffer[CAPIDTMF_RECV_WINDOWED_SAMPLES];
37341
37342+ pax_track_stack();
37343
37344 if (p_state->recv.state & CAPIDTMF_RECV_STATE_DTMF_ACTIVE)
37345 {
37346diff --git a/drivers/isdn/hardware/eicon/capifunc.c b/drivers/isdn/hardware/eicon/capifunc.c
37347index 4d425c6..a9be6c4 100644
37348--- a/drivers/isdn/hardware/eicon/capifunc.c
37349+++ b/drivers/isdn/hardware/eicon/capifunc.c
37350@@ -1055,6 +1055,8 @@ static int divacapi_connect_didd(void)
37351 IDI_SYNC_REQ req;
37352 DESCRIPTOR DIDD_Table[MAX_DESCRIPTORS];
37353
37354+ pax_track_stack();
37355+
37356 DIVA_DIDD_Read(DIDD_Table, sizeof(DIDD_Table));
37357
37358 for (x = 0; x < MAX_DESCRIPTORS; x++) {
37359diff --git a/drivers/isdn/hardware/eicon/diddfunc.c b/drivers/isdn/hardware/eicon/diddfunc.c
37360index 3029234..ef0d9e2 100644
37361--- a/drivers/isdn/hardware/eicon/diddfunc.c
37362+++ b/drivers/isdn/hardware/eicon/diddfunc.c
37363@@ -54,6 +54,8 @@ static int DIVA_INIT_FUNCTION connect_didd(void)
37364 IDI_SYNC_REQ req;
37365 DESCRIPTOR DIDD_Table[MAX_DESCRIPTORS];
37366
37367+ pax_track_stack();
37368+
37369 DIVA_DIDD_Read(DIDD_Table, sizeof(DIDD_Table));
37370
37371 for (x = 0; x < MAX_DESCRIPTORS; x++) {
37372diff --git a/drivers/isdn/hardware/eicon/divasfunc.c b/drivers/isdn/hardware/eicon/divasfunc.c
37373index d36a4c0..11e7d1a 100644
37374--- a/drivers/isdn/hardware/eicon/divasfunc.c
37375+++ b/drivers/isdn/hardware/eicon/divasfunc.c
37376@@ -161,6 +161,8 @@ static int DIVA_INIT_FUNCTION connect_didd(void)
37377 IDI_SYNC_REQ req;
37378 DESCRIPTOR DIDD_Table[MAX_DESCRIPTORS];
37379
37380+ pax_track_stack();
37381+
37382 DIVA_DIDD_Read(DIDD_Table, sizeof(DIDD_Table));
37383
37384 for (x = 0; x < MAX_DESCRIPTORS; x++) {
37385diff --git a/drivers/isdn/hardware/eicon/divasync.h b/drivers/isdn/hardware/eicon/divasync.h
37386index 85784a7..a19ca98 100644
37387--- a/drivers/isdn/hardware/eicon/divasync.h
37388+++ b/drivers/isdn/hardware/eicon/divasync.h
37389@@ -146,7 +146,7 @@ typedef struct _diva_didd_add_adapter {
37390 } diva_didd_add_adapter_t;
37391 typedef struct _diva_didd_remove_adapter {
37392 IDI_CALL p_request;
37393-} diva_didd_remove_adapter_t;
37394+} __no_const diva_didd_remove_adapter_t;
37395 typedef struct _diva_didd_read_adapter_array {
37396 void * buffer;
37397 dword length;
37398diff --git a/drivers/isdn/hardware/eicon/idifunc.c b/drivers/isdn/hardware/eicon/idifunc.c
37399index db87d51..7d09acf 100644
37400--- a/drivers/isdn/hardware/eicon/idifunc.c
37401+++ b/drivers/isdn/hardware/eicon/idifunc.c
37402@@ -188,6 +188,8 @@ static int DIVA_INIT_FUNCTION connect_didd(void)
37403 IDI_SYNC_REQ req;
37404 DESCRIPTOR DIDD_Table[MAX_DESCRIPTORS];
37405
37406+ pax_track_stack();
37407+
37408 DIVA_DIDD_Read(DIDD_Table, sizeof(DIDD_Table));
37409
37410 for (x = 0; x < MAX_DESCRIPTORS; x++) {
37411diff --git a/drivers/isdn/hardware/eicon/message.c b/drivers/isdn/hardware/eicon/message.c
37412index ae89fb8..0fab299 100644
37413--- a/drivers/isdn/hardware/eicon/message.c
37414+++ b/drivers/isdn/hardware/eicon/message.c
37415@@ -4889,6 +4889,8 @@ static void sig_ind(PLCI *plci)
37416 dword d;
37417 word w;
37418
37419+ pax_track_stack();
37420+
37421 a = plci->adapter;
37422 Id = ((word)plci->Id<<8)|a->Id;
37423 PUT_WORD(&SS_Ind[4],0x0000);
37424@@ -7484,6 +7486,8 @@ static word add_b1(PLCI *plci, API_PARSE *bp, word b_channel_info,
37425 word j, n, w;
37426 dword d;
37427
37428+ pax_track_stack();
37429+
37430
37431 for(i=0;i<8;i++) bp_parms[i].length = 0;
37432 for(i=0;i<2;i++) global_config[i].length = 0;
37433@@ -7958,6 +7962,8 @@ static word add_b23(PLCI *plci, API_PARSE *bp)
37434 const byte llc3[] = {4,3,2,2,6,6,0};
37435 const byte header[] = {0,2,3,3,0,0,0};
37436
37437+ pax_track_stack();
37438+
37439 for(i=0;i<8;i++) bp_parms[i].length = 0;
37440 for(i=0;i<6;i++) b2_config_parms[i].length = 0;
37441 for(i=0;i<5;i++) b3_config_parms[i].length = 0;
37442@@ -14761,6 +14767,8 @@ static void group_optimization(DIVA_CAPI_ADAPTER * a, PLCI * plci)
37443 word appl_number_group_type[MAX_APPL];
37444 PLCI *auxplci;
37445
37446+ pax_track_stack();
37447+
37448 set_group_ind_mask (plci); /* all APPLs within this inc. call are allowed to dial in */
37449
37450 if(!a->group_optimization_enabled)
37451diff --git a/drivers/isdn/hardware/eicon/mntfunc.c b/drivers/isdn/hardware/eicon/mntfunc.c
37452index a564b75..f3cf8b5 100644
37453--- a/drivers/isdn/hardware/eicon/mntfunc.c
37454+++ b/drivers/isdn/hardware/eicon/mntfunc.c
37455@@ -79,6 +79,8 @@ static int DIVA_INIT_FUNCTION connect_didd(void)
37456 IDI_SYNC_REQ req;
37457 DESCRIPTOR DIDD_Table[MAX_DESCRIPTORS];
37458
37459+ pax_track_stack();
37460+
37461 DIVA_DIDD_Read(DIDD_Table, sizeof(DIDD_Table));
37462
37463 for (x = 0; x < MAX_DESCRIPTORS; x++) {
37464diff --git a/drivers/isdn/hardware/eicon/xdi_adapter.h b/drivers/isdn/hardware/eicon/xdi_adapter.h
37465index a3bd163..8956575 100644
37466--- a/drivers/isdn/hardware/eicon/xdi_adapter.h
37467+++ b/drivers/isdn/hardware/eicon/xdi_adapter.h
37468@@ -44,7 +44,7 @@ typedef struct _xdi_mbox_t {
37469 typedef struct _diva_os_idi_adapter_interface {
37470 diva_init_card_proc_t cleanup_adapter_proc;
37471 diva_cmd_card_proc_t cmd_proc;
37472-} diva_os_idi_adapter_interface_t;
37473+} __no_const diva_os_idi_adapter_interface_t;
37474
37475 typedef struct _diva_os_xdi_adapter {
37476 struct list_head link;
37477diff --git a/drivers/isdn/i4l/isdn_common.c b/drivers/isdn/i4l/isdn_common.c
37478index adb1e8c..21b590b 100644
37479--- a/drivers/isdn/i4l/isdn_common.c
37480+++ b/drivers/isdn/i4l/isdn_common.c
37481@@ -1290,6 +1290,8 @@ isdn_ioctl(struct inode *inode, struct file *file, uint cmd, ulong arg)
37482 } iocpar;
37483 void __user *argp = (void __user *)arg;
37484
37485+ pax_track_stack();
37486+
37487 #define name iocpar.name
37488 #define bname iocpar.bname
37489 #define iocts iocpar.iocts
37490diff --git a/drivers/isdn/i4l/isdn_net.c b/drivers/isdn/i4l/isdn_net.c
37491index 90b56ed..5ed3305 100644
37492--- a/drivers/isdn/i4l/isdn_net.c
37493+++ b/drivers/isdn/i4l/isdn_net.c
37494@@ -1902,7 +1902,7 @@ static int isdn_net_header(struct sk_buff *skb, struct net_device *dev,
37495 {
37496 isdn_net_local *lp = netdev_priv(dev);
37497 unsigned char *p;
37498- ushort len = 0;
37499+ int len = 0;
37500
37501 switch (lp->p_encap) {
37502 case ISDN_NET_ENCAP_ETHER:
37503diff --git a/drivers/isdn/icn/icn.c b/drivers/isdn/icn/icn.c
37504index bf7997a..cf091db 100644
37505--- a/drivers/isdn/icn/icn.c
37506+++ b/drivers/isdn/icn/icn.c
37507@@ -1044,7 +1044,7 @@ icn_writecmd(const u_char * buf, int len, int user, icn_card * card)
37508 if (count > len)
37509 count = len;
37510 if (user) {
37511- if (copy_from_user(msg, buf, count))
37512+ if (count > sizeof msg || copy_from_user(msg, buf, count))
37513 return -EFAULT;
37514 } else
37515 memcpy(msg, buf, count);
37516diff --git a/drivers/isdn/mISDN/socket.c b/drivers/isdn/mISDN/socket.c
37517index feb0fa4..f76f830 100644
37518--- a/drivers/isdn/mISDN/socket.c
37519+++ b/drivers/isdn/mISDN/socket.c
37520@@ -391,6 +391,7 @@ data_sock_ioctl(struct socket *sock, unsigned int cmd, unsigned long arg)
37521 if (dev) {
37522 struct mISDN_devinfo di;
37523
37524+ memset(&di, 0, sizeof(di));
37525 di.id = dev->id;
37526 di.Dprotocols = dev->Dprotocols;
37527 di.Bprotocols = dev->Bprotocols | get_all_Bprotocols();
37528@@ -671,6 +672,7 @@ base_sock_ioctl(struct socket *sock, unsigned int cmd, unsigned long arg)
37529 if (dev) {
37530 struct mISDN_devinfo di;
37531
37532+ memset(&di, 0, sizeof(di));
37533 di.id = dev->id;
37534 di.Dprotocols = dev->Dprotocols;
37535 di.Bprotocols = dev->Bprotocols | get_all_Bprotocols();
37536diff --git a/drivers/isdn/sc/interrupt.c b/drivers/isdn/sc/interrupt.c
37537index 485be8b..f0225bc 100644
37538--- a/drivers/isdn/sc/interrupt.c
37539+++ b/drivers/isdn/sc/interrupt.c
37540@@ -112,11 +112,19 @@ irqreturn_t interrupt_handler(int dummy, void *card_inst)
37541 }
37542 else if(callid>=0x0000 && callid<=0x7FFF)
37543 {
37544+ int len;
37545+
37546 pr_debug("%s: Got Incoming Call\n",
37547 sc_adapter[card]->devicename);
37548- strcpy(setup.phone,&(rcvmsg.msg_data.byte_array[4]));
37549- strcpy(setup.eazmsn,
37550- sc_adapter[card]->channel[rcvmsg.phy_link_no-1].dn);
37551+ len = strlcpy(setup.phone, &(rcvmsg.msg_data.byte_array[4]),
37552+ sizeof(setup.phone));
37553+ if (len >= sizeof(setup.phone))
37554+ continue;
37555+ len = strlcpy(setup.eazmsn,
37556+ sc_adapter[card]->channel[rcvmsg.phy_link_no - 1].dn,
37557+ sizeof(setup.eazmsn));
37558+ if (len >= sizeof(setup.eazmsn))
37559+ continue;
37560 setup.si1 = 7;
37561 setup.si2 = 0;
37562 setup.plan = 0;
37563@@ -176,7 +184,9 @@ irqreturn_t interrupt_handler(int dummy, void *card_inst)
37564 * Handle a GetMyNumber Rsp
37565 */
37566 if (IS_CE_MESSAGE(rcvmsg,Call,0,GetMyNumber)){
37567- strcpy(sc_adapter[card]->channel[rcvmsg.phy_link_no-1].dn,rcvmsg.msg_data.byte_array);
37568+ strlcpy(sc_adapter[card]->channel[rcvmsg.phy_link_no - 1].dn,
37569+ rcvmsg.msg_data.byte_array,
37570+ sizeof(rcvmsg.msg_data.byte_array));
37571 continue;
37572 }
37573
37574diff --git a/drivers/lguest/core.c b/drivers/lguest/core.c
37575index 8744d24..d1f9a9a 100644
37576--- a/drivers/lguest/core.c
37577+++ b/drivers/lguest/core.c
37578@@ -91,9 +91,17 @@ static __init int map_switcher(void)
37579 * it's worked so far. The end address needs +1 because __get_vm_area
37580 * allocates an extra guard page, so we need space for that.
37581 */
37582+
37583+#if defined(CONFIG_MODULES) && defined(CONFIG_X86_32) && defined(CONFIG_PAX_KERNEXEC)
37584+ switcher_vma = __get_vm_area(TOTAL_SWITCHER_PAGES * PAGE_SIZE,
37585+ VM_ALLOC | VM_KERNEXEC, SWITCHER_ADDR, SWITCHER_ADDR
37586+ + (TOTAL_SWITCHER_PAGES+1) * PAGE_SIZE);
37587+#else
37588 switcher_vma = __get_vm_area(TOTAL_SWITCHER_PAGES * PAGE_SIZE,
37589 VM_ALLOC, SWITCHER_ADDR, SWITCHER_ADDR
37590 + (TOTAL_SWITCHER_PAGES+1) * PAGE_SIZE);
37591+#endif
37592+
37593 if (!switcher_vma) {
37594 err = -ENOMEM;
37595 printk("lguest: could not map switcher pages high\n");
37596@@ -118,7 +126,7 @@ static __init int map_switcher(void)
37597 * Now the Switcher is mapped at the right address, we can't fail!
37598 * Copy in the compiled-in Switcher code (from <arch>_switcher.S).
37599 */
37600- memcpy(switcher_vma->addr, start_switcher_text,
37601+ memcpy(switcher_vma->addr, ktla_ktva(start_switcher_text),
37602 end_switcher_text - start_switcher_text);
37603
37604 printk(KERN_INFO "lguest: mapped switcher at %p\n",
37605diff --git a/drivers/lguest/lguest_user.c b/drivers/lguest/lguest_user.c
37606index bd16323..ab460f7 100644
37607--- a/drivers/lguest/lguest_user.c
37608+++ b/drivers/lguest/lguest_user.c
37609@@ -194,6 +194,7 @@ static int user_send_irq(struct lg_cpu *cpu, const unsigned long __user *input)
37610 * Once our Guest is initialized, the Launcher makes it run by reading
37611 * from /dev/lguest.
37612 */
37613+static ssize_t read(struct file *file, char __user *user, size_t size,loff_t*o) __size_overflow(3);
37614 static ssize_t read(struct file *file, char __user *user, size_t size,loff_t*o)
37615 {
37616 struct lguest *lg = file->private_data;
37617diff --git a/drivers/lguest/x86/core.c b/drivers/lguest/x86/core.c
37618index 6ae3888..8b38145 100644
37619--- a/drivers/lguest/x86/core.c
37620+++ b/drivers/lguest/x86/core.c
37621@@ -59,7 +59,7 @@ static struct {
37622 /* Offset from where switcher.S was compiled to where we've copied it */
37623 static unsigned long switcher_offset(void)
37624 {
37625- return SWITCHER_ADDR - (unsigned long)start_switcher_text;
37626+ return SWITCHER_ADDR - (unsigned long)ktla_ktva(start_switcher_text);
37627 }
37628
37629 /* This cpu's struct lguest_pages. */
37630@@ -100,7 +100,13 @@ static void copy_in_guest_info(struct lg_cpu *cpu, struct lguest_pages *pages)
37631 * These copies are pretty cheap, so we do them unconditionally: */
37632 /* Save the current Host top-level page directory.
37633 */
37634+
37635+#ifdef CONFIG_PAX_PER_CPU_PGD
37636+ pages->state.host_cr3 = read_cr3();
37637+#else
37638 pages->state.host_cr3 = __pa(current->mm->pgd);
37639+#endif
37640+
37641 /*
37642 * Set up the Guest's page tables to see this CPU's pages (and no
37643 * other CPU's pages).
37644@@ -535,7 +541,7 @@ void __init lguest_arch_host_init(void)
37645 * compiled-in switcher code and the high-mapped copy we just made.
37646 */
37647 for (i = 0; i < IDT_ENTRIES; i++)
37648- default_idt_entries[i] += switcher_offset();
37649+ default_idt_entries[i] = ktla_ktva(default_idt_entries[i]) + switcher_offset();
37650
37651 /*
37652 * Set up the Switcher's per-cpu areas.
37653@@ -618,7 +624,7 @@ void __init lguest_arch_host_init(void)
37654 * it will be undisturbed when we switch. To change %cs and jump we
37655 * need this structure to feed to Intel's "lcall" instruction.
37656 */
37657- lguest_entry.offset = (long)switch_to_guest + switcher_offset();
37658+ lguest_entry.offset = (long)ktla_ktva(switch_to_guest) + switcher_offset();
37659 lguest_entry.segment = LGUEST_CS;
37660
37661 /*
37662diff --git a/drivers/lguest/x86/switcher_32.S b/drivers/lguest/x86/switcher_32.S
37663index 40634b0..4f5855e 100644
37664--- a/drivers/lguest/x86/switcher_32.S
37665+++ b/drivers/lguest/x86/switcher_32.S
37666@@ -87,6 +87,7 @@
37667 #include <asm/page.h>
37668 #include <asm/segment.h>
37669 #include <asm/lguest.h>
37670+#include <asm/processor-flags.h>
37671
37672 // We mark the start of the code to copy
37673 // It's placed in .text tho it's never run here
37674@@ -149,6 +150,13 @@ ENTRY(switch_to_guest)
37675 // Changes type when we load it: damn Intel!
37676 // For after we switch over our page tables
37677 // That entry will be read-only: we'd crash.
37678+
37679+#ifdef CONFIG_PAX_KERNEXEC
37680+ mov %cr0, %edx
37681+ xor $X86_CR0_WP, %edx
37682+ mov %edx, %cr0
37683+#endif
37684+
37685 movl $(GDT_ENTRY_TSS*8), %edx
37686 ltr %dx
37687
37688@@ -157,9 +165,15 @@ ENTRY(switch_to_guest)
37689 // Let's clear it again for our return.
37690 // The GDT descriptor of the Host
37691 // Points to the table after two "size" bytes
37692- movl (LGUEST_PAGES_host_gdt_desc+2)(%eax), %edx
37693+ movl (LGUEST_PAGES_host_gdt_desc+2)(%eax), %eax
37694 // Clear "used" from type field (byte 5, bit 2)
37695- andb $0xFD, (GDT_ENTRY_TSS*8 + 5)(%edx)
37696+ andb $0xFD, (GDT_ENTRY_TSS*8 + 5)(%eax)
37697+
37698+#ifdef CONFIG_PAX_KERNEXEC
37699+ mov %cr0, %eax
37700+ xor $X86_CR0_WP, %eax
37701+ mov %eax, %cr0
37702+#endif
37703
37704 // Once our page table's switched, the Guest is live!
37705 // The Host fades as we run this final step.
37706@@ -295,13 +309,12 @@ deliver_to_host:
37707 // I consulted gcc, and it gave
37708 // These instructions, which I gladly credit:
37709 leal (%edx,%ebx,8), %eax
37710- movzwl (%eax),%edx
37711- movl 4(%eax), %eax
37712- xorw %ax, %ax
37713- orl %eax, %edx
37714+ movl 4(%eax), %edx
37715+ movw (%eax), %dx
37716 // Now the address of the handler's in %edx
37717 // We call it now: its "iret" drops us home.
37718- jmp *%edx
37719+ ljmp $__KERNEL_CS, $1f
37720+1: jmp *%edx
37721
37722 // Every interrupt can come to us here
37723 // But we must truly tell each apart.
37724diff --git a/drivers/macintosh/macio_asic.c b/drivers/macintosh/macio_asic.c
37725index 588a5b0..b71db89 100644
37726--- a/drivers/macintosh/macio_asic.c
37727+++ b/drivers/macintosh/macio_asic.c
37728@@ -701,7 +701,7 @@ static void __devexit macio_pci_remove(struct pci_dev* pdev)
37729 * MacIO is matched against any Apple ID, it's probe() function
37730 * will then decide wether it applies or not
37731 */
37732-static const struct pci_device_id __devinitdata pci_ids [] = { {
37733+static const struct pci_device_id __devinitconst pci_ids [] = { {
37734 .vendor = PCI_VENDOR_ID_APPLE,
37735 .device = PCI_ANY_ID,
37736 .subvendor = PCI_ANY_ID,
37737diff --git a/drivers/macintosh/via-pmu-backlight.c b/drivers/macintosh/via-pmu-backlight.c
37738index a348bb0..ecd9b3f 100644
37739--- a/drivers/macintosh/via-pmu-backlight.c
37740+++ b/drivers/macintosh/via-pmu-backlight.c
37741@@ -15,7 +15,7 @@
37742
37743 #define MAX_PMU_LEVEL 0xFF
37744
37745-static struct backlight_ops pmu_backlight_data;
37746+static const struct backlight_ops pmu_backlight_data;
37747 static DEFINE_SPINLOCK(pmu_backlight_lock);
37748 static int sleeping, uses_pmu_bl;
37749 static u8 bl_curve[FB_BACKLIGHT_LEVELS];
37750@@ -115,7 +115,7 @@ static int pmu_backlight_get_brightness(struct backlight_device *bd)
37751 return bd->props.brightness;
37752 }
37753
37754-static struct backlight_ops pmu_backlight_data = {
37755+static const struct backlight_ops pmu_backlight_data = {
37756 .get_brightness = pmu_backlight_get_brightness,
37757 .update_status = pmu_backlight_update_status,
37758
37759diff --git a/drivers/macintosh/via-pmu.c b/drivers/macintosh/via-pmu.c
37760index 6f308a4..b5f7ff7 100644
37761--- a/drivers/macintosh/via-pmu.c
37762+++ b/drivers/macintosh/via-pmu.c
37763@@ -2232,7 +2232,7 @@ static int pmu_sleep_valid(suspend_state_t state)
37764 && (pmac_call_feature(PMAC_FTR_SLEEP_STATE, NULL, 0, -1) >= 0);
37765 }
37766
37767-static struct platform_suspend_ops pmu_pm_ops = {
37768+static const struct platform_suspend_ops pmu_pm_ops = {
37769 .enter = powerbook_sleep,
37770 .valid = pmu_sleep_valid,
37771 };
37772diff --git a/drivers/md/dm-ioctl.c b/drivers/md/dm-ioctl.c
37773index 818b617..4656e38 100644
37774--- a/drivers/md/dm-ioctl.c
37775+++ b/drivers/md/dm-ioctl.c
37776@@ -1437,7 +1437,7 @@ static int validate_params(uint cmd, struct dm_ioctl *param)
37777 cmd == DM_LIST_VERSIONS_CMD)
37778 return 0;
37779
37780- if ((cmd == DM_DEV_CREATE_CMD)) {
37781+ if (cmd == DM_DEV_CREATE_CMD) {
37782 if (!*param->name) {
37783 DMWARN("name not supplied when creating device");
37784 return -EINVAL;
37785diff --git a/drivers/md/dm-raid1.c b/drivers/md/dm-raid1.c
37786index 6021d0a..a878643 100644
37787--- a/drivers/md/dm-raid1.c
37788+++ b/drivers/md/dm-raid1.c
37789@@ -41,7 +41,7 @@ enum dm_raid1_error {
37790
37791 struct mirror {
37792 struct mirror_set *ms;
37793- atomic_t error_count;
37794+ atomic_unchecked_t error_count;
37795 unsigned long error_type;
37796 struct dm_dev *dev;
37797 sector_t offset;
37798@@ -203,7 +203,7 @@ static void fail_mirror(struct mirror *m, enum dm_raid1_error error_type)
37799 * simple way to tell if a device has encountered
37800 * errors.
37801 */
37802- atomic_inc(&m->error_count);
37803+ atomic_inc_unchecked(&m->error_count);
37804
37805 if (test_and_set_bit(error_type, &m->error_type))
37806 return;
37807@@ -225,7 +225,7 @@ static void fail_mirror(struct mirror *m, enum dm_raid1_error error_type)
37808 }
37809
37810 for (new = ms->mirror; new < ms->mirror + ms->nr_mirrors; new++)
37811- if (!atomic_read(&new->error_count)) {
37812+ if (!atomic_read_unchecked(&new->error_count)) {
37813 set_default_mirror(new);
37814 break;
37815 }
37816@@ -363,7 +363,7 @@ static struct mirror *choose_mirror(struct mirror_set *ms, sector_t sector)
37817 struct mirror *m = get_default_mirror(ms);
37818
37819 do {
37820- if (likely(!atomic_read(&m->error_count)))
37821+ if (likely(!atomic_read_unchecked(&m->error_count)))
37822 return m;
37823
37824 if (m-- == ms->mirror)
37825@@ -377,7 +377,7 @@ static int default_ok(struct mirror *m)
37826 {
37827 struct mirror *default_mirror = get_default_mirror(m->ms);
37828
37829- return !atomic_read(&default_mirror->error_count);
37830+ return !atomic_read_unchecked(&default_mirror->error_count);
37831 }
37832
37833 static int mirror_available(struct mirror_set *ms, struct bio *bio)
37834@@ -484,7 +484,7 @@ static void do_reads(struct mirror_set *ms, struct bio_list *reads)
37835 */
37836 if (likely(region_in_sync(ms, region, 1)))
37837 m = choose_mirror(ms, bio->bi_sector);
37838- else if (m && atomic_read(&m->error_count))
37839+ else if (m && atomic_read_unchecked(&m->error_count))
37840 m = NULL;
37841
37842 if (likely(m))
37843@@ -855,7 +855,7 @@ static int get_mirror(struct mirror_set *ms, struct dm_target *ti,
37844 }
37845
37846 ms->mirror[mirror].ms = ms;
37847- atomic_set(&(ms->mirror[mirror].error_count), 0);
37848+ atomic_set_unchecked(&(ms->mirror[mirror].error_count), 0);
37849 ms->mirror[mirror].error_type = 0;
37850 ms->mirror[mirror].offset = offset;
37851
37852@@ -1241,7 +1241,7 @@ static void mirror_resume(struct dm_target *ti)
37853 */
37854 static char device_status_char(struct mirror *m)
37855 {
37856- if (!atomic_read(&(m->error_count)))
37857+ if (!atomic_read_unchecked(&(m->error_count)))
37858 return 'A';
37859
37860 return (test_bit(DM_RAID1_WRITE_ERROR, &(m->error_type))) ? 'D' :
37861diff --git a/drivers/md/dm-stripe.c b/drivers/md/dm-stripe.c
37862index bd58703..9f26571 100644
37863--- a/drivers/md/dm-stripe.c
37864+++ b/drivers/md/dm-stripe.c
37865@@ -20,7 +20,7 @@ struct stripe {
37866 struct dm_dev *dev;
37867 sector_t physical_start;
37868
37869- atomic_t error_count;
37870+ atomic_unchecked_t error_count;
37871 };
37872
37873 struct stripe_c {
37874@@ -188,7 +188,7 @@ static int stripe_ctr(struct dm_target *ti, unsigned int argc, char **argv)
37875 kfree(sc);
37876 return r;
37877 }
37878- atomic_set(&(sc->stripe[i].error_count), 0);
37879+ atomic_set_unchecked(&(sc->stripe[i].error_count), 0);
37880 }
37881
37882 ti->private = sc;
37883@@ -257,7 +257,7 @@ static int stripe_status(struct dm_target *ti,
37884 DMEMIT("%d ", sc->stripes);
37885 for (i = 0; i < sc->stripes; i++) {
37886 DMEMIT("%s ", sc->stripe[i].dev->name);
37887- buffer[i] = atomic_read(&(sc->stripe[i].error_count)) ?
37888+ buffer[i] = atomic_read_unchecked(&(sc->stripe[i].error_count)) ?
37889 'D' : 'A';
37890 }
37891 buffer[i] = '\0';
37892@@ -304,8 +304,8 @@ static int stripe_end_io(struct dm_target *ti, struct bio *bio,
37893 */
37894 for (i = 0; i < sc->stripes; i++)
37895 if (!strcmp(sc->stripe[i].dev->name, major_minor)) {
37896- atomic_inc(&(sc->stripe[i].error_count));
37897- if (atomic_read(&(sc->stripe[i].error_count)) <
37898+ atomic_inc_unchecked(&(sc->stripe[i].error_count));
37899+ if (atomic_read_unchecked(&(sc->stripe[i].error_count)) <
37900 DM_IO_ERROR_THRESHOLD)
37901 queue_work(kstriped, &sc->kstriped_ws);
37902 }
37903diff --git a/drivers/md/dm-sysfs.c b/drivers/md/dm-sysfs.c
37904index 4b04590..13a77b2 100644
37905--- a/drivers/md/dm-sysfs.c
37906+++ b/drivers/md/dm-sysfs.c
37907@@ -75,7 +75,7 @@ static struct attribute *dm_attrs[] = {
37908 NULL,
37909 };
37910
37911-static struct sysfs_ops dm_sysfs_ops = {
37912+static const struct sysfs_ops dm_sysfs_ops = {
37913 .show = dm_attr_show,
37914 };
37915
37916diff --git a/drivers/md/dm-table.c b/drivers/md/dm-table.c
37917index 03345bb..332250d 100644
37918--- a/drivers/md/dm-table.c
37919+++ b/drivers/md/dm-table.c
37920@@ -376,7 +376,7 @@ static int device_area_is_invalid(struct dm_target *ti, struct dm_dev *dev,
37921 if (!dev_size)
37922 return 0;
37923
37924- if ((start >= dev_size) || (start + len > dev_size)) {
37925+ if ((start >= dev_size) || (len > dev_size - start)) {
37926 DMWARN("%s: %s too small for target: "
37927 "start=%llu, len=%llu, dev_size=%llu",
37928 dm_device_name(ti->table->md), bdevname(bdev, b),
37929diff --git a/drivers/md/dm.c b/drivers/md/dm.c
37930index c988ac2..c418141 100644
37931--- a/drivers/md/dm.c
37932+++ b/drivers/md/dm.c
37933@@ -165,9 +165,9 @@ struct mapped_device {
37934 /*
37935 * Event handling.
37936 */
37937- atomic_t event_nr;
37938+ atomic_unchecked_t event_nr;
37939 wait_queue_head_t eventq;
37940- atomic_t uevent_seq;
37941+ atomic_unchecked_t uevent_seq;
37942 struct list_head uevent_list;
37943 spinlock_t uevent_lock; /* Protect access to uevent_list */
37944
37945@@ -1776,8 +1776,8 @@ static struct mapped_device *alloc_dev(int minor)
37946 rwlock_init(&md->map_lock);
37947 atomic_set(&md->holders, 1);
37948 atomic_set(&md->open_count, 0);
37949- atomic_set(&md->event_nr, 0);
37950- atomic_set(&md->uevent_seq, 0);
37951+ atomic_set_unchecked(&md->event_nr, 0);
37952+ atomic_set_unchecked(&md->uevent_seq, 0);
37953 INIT_LIST_HEAD(&md->uevent_list);
37954 spin_lock_init(&md->uevent_lock);
37955
37956@@ -1927,7 +1927,7 @@ static void event_callback(void *context)
37957
37958 dm_send_uevents(&uevents, &disk_to_dev(md->disk)->kobj);
37959
37960- atomic_inc(&md->event_nr);
37961+ atomic_inc_unchecked(&md->event_nr);
37962 wake_up(&md->eventq);
37963 }
37964
37965@@ -2562,18 +2562,18 @@ void dm_kobject_uevent(struct mapped_device *md, enum kobject_action action,
37966
37967 uint32_t dm_next_uevent_seq(struct mapped_device *md)
37968 {
37969- return atomic_add_return(1, &md->uevent_seq);
37970+ return atomic_add_return_unchecked(1, &md->uevent_seq);
37971 }
37972
37973 uint32_t dm_get_event_nr(struct mapped_device *md)
37974 {
37975- return atomic_read(&md->event_nr);
37976+ return atomic_read_unchecked(&md->event_nr);
37977 }
37978
37979 int dm_wait_event(struct mapped_device *md, int event_nr)
37980 {
37981 return wait_event_interruptible(md->eventq,
37982- (event_nr != atomic_read(&md->event_nr)));
37983+ (event_nr != atomic_read_unchecked(&md->event_nr)));
37984 }
37985
37986 void dm_uevent_add(struct mapped_device *md, struct list_head *elist)
37987diff --git a/drivers/md/md.c b/drivers/md/md.c
37988index 4ce6e2f..7a9530a 100644
37989--- a/drivers/md/md.c
37990+++ b/drivers/md/md.c
37991@@ -153,10 +153,10 @@ static int start_readonly;
37992 * start build, activate spare
37993 */
37994 static DECLARE_WAIT_QUEUE_HEAD(md_event_waiters);
37995-static atomic_t md_event_count;
37996+static atomic_unchecked_t md_event_count;
37997 void md_new_event(mddev_t *mddev)
37998 {
37999- atomic_inc(&md_event_count);
38000+ atomic_inc_unchecked(&md_event_count);
38001 wake_up(&md_event_waiters);
38002 }
38003 EXPORT_SYMBOL_GPL(md_new_event);
38004@@ -166,7 +166,7 @@ EXPORT_SYMBOL_GPL(md_new_event);
38005 */
38006 static void md_new_event_inintr(mddev_t *mddev)
38007 {
38008- atomic_inc(&md_event_count);
38009+ atomic_inc_unchecked(&md_event_count);
38010 wake_up(&md_event_waiters);
38011 }
38012
38013@@ -1226,7 +1226,7 @@ static int super_1_load(mdk_rdev_t *rdev, mdk_rdev_t *refdev, int minor_version)
38014
38015 rdev->preferred_minor = 0xffff;
38016 rdev->data_offset = le64_to_cpu(sb->data_offset);
38017- atomic_set(&rdev->corrected_errors, le32_to_cpu(sb->cnt_corrected_read));
38018+ atomic_set_unchecked(&rdev->corrected_errors, le32_to_cpu(sb->cnt_corrected_read));
38019
38020 rdev->sb_size = le32_to_cpu(sb->max_dev) * 2 + 256;
38021 bmask = queue_logical_block_size(rdev->bdev->bd_disk->queue)-1;
38022@@ -1400,7 +1400,7 @@ static void super_1_sync(mddev_t *mddev, mdk_rdev_t *rdev)
38023 else
38024 sb->resync_offset = cpu_to_le64(0);
38025
38026- sb->cnt_corrected_read = cpu_to_le32(atomic_read(&rdev->corrected_errors));
38027+ sb->cnt_corrected_read = cpu_to_le32(atomic_read_unchecked(&rdev->corrected_errors));
38028
38029 sb->raid_disks = cpu_to_le32(mddev->raid_disks);
38030 sb->size = cpu_to_le64(mddev->dev_sectors);
38031@@ -2222,7 +2222,7 @@ __ATTR(state, S_IRUGO|S_IWUSR, state_show, state_store);
38032 static ssize_t
38033 errors_show(mdk_rdev_t *rdev, char *page)
38034 {
38035- return sprintf(page, "%d\n", atomic_read(&rdev->corrected_errors));
38036+ return sprintf(page, "%d\n", atomic_read_unchecked(&rdev->corrected_errors));
38037 }
38038
38039 static ssize_t
38040@@ -2231,7 +2231,7 @@ errors_store(mdk_rdev_t *rdev, const char *buf, size_t len)
38041 char *e;
38042 unsigned long n = simple_strtoul(buf, &e, 10);
38043 if (*buf && (*e == 0 || *e == '\n')) {
38044- atomic_set(&rdev->corrected_errors, n);
38045+ atomic_set_unchecked(&rdev->corrected_errors, n);
38046 return len;
38047 }
38048 return -EINVAL;
38049@@ -2525,7 +2525,7 @@ static void rdev_free(struct kobject *ko)
38050 mdk_rdev_t *rdev = container_of(ko, mdk_rdev_t, kobj);
38051 kfree(rdev);
38052 }
38053-static struct sysfs_ops rdev_sysfs_ops = {
38054+static const struct sysfs_ops rdev_sysfs_ops = {
38055 .show = rdev_attr_show,
38056 .store = rdev_attr_store,
38057 };
38058@@ -2574,8 +2574,8 @@ static mdk_rdev_t *md_import_device(dev_t newdev, int super_format, int super_mi
38059 rdev->data_offset = 0;
38060 rdev->sb_events = 0;
38061 atomic_set(&rdev->nr_pending, 0);
38062- atomic_set(&rdev->read_errors, 0);
38063- atomic_set(&rdev->corrected_errors, 0);
38064+ atomic_set_unchecked(&rdev->read_errors, 0);
38065+ atomic_set_unchecked(&rdev->corrected_errors, 0);
38066
38067 size = rdev->bdev->bd_inode->i_size >> BLOCK_SIZE_BITS;
38068 if (!size) {
38069@@ -3895,7 +3895,7 @@ static void md_free(struct kobject *ko)
38070 kfree(mddev);
38071 }
38072
38073-static struct sysfs_ops md_sysfs_ops = {
38074+static const struct sysfs_ops md_sysfs_ops = {
38075 .show = md_attr_show,
38076 .store = md_attr_store,
38077 };
38078@@ -4482,7 +4482,8 @@ out:
38079 err = 0;
38080 blk_integrity_unregister(disk);
38081 md_new_event(mddev);
38082- sysfs_notify_dirent(mddev->sysfs_state);
38083+ if (mddev->sysfs_state)
38084+ sysfs_notify_dirent(mddev->sysfs_state);
38085 return err;
38086 }
38087
38088@@ -5962,7 +5963,7 @@ static int md_seq_show(struct seq_file *seq, void *v)
38089
38090 spin_unlock(&pers_lock);
38091 seq_printf(seq, "\n");
38092- mi->event = atomic_read(&md_event_count);
38093+ mi->event = atomic_read_unchecked(&md_event_count);
38094 return 0;
38095 }
38096 if (v == (void*)2) {
38097@@ -6051,7 +6052,7 @@ static int md_seq_show(struct seq_file *seq, void *v)
38098 chunk_kb ? "KB" : "B");
38099 if (bitmap->file) {
38100 seq_printf(seq, ", file: ");
38101- seq_path(seq, &bitmap->file->f_path, " \t\n");
38102+ seq_path(seq, &bitmap->file->f_path, " \t\n\\");
38103 }
38104
38105 seq_printf(seq, "\n");
38106@@ -6085,7 +6086,7 @@ static int md_seq_open(struct inode *inode, struct file *file)
38107 else {
38108 struct seq_file *p = file->private_data;
38109 p->private = mi;
38110- mi->event = atomic_read(&md_event_count);
38111+ mi->event = atomic_read_unchecked(&md_event_count);
38112 }
38113 return error;
38114 }
38115@@ -6101,7 +6102,7 @@ static unsigned int mdstat_poll(struct file *filp, poll_table *wait)
38116 /* always allow read */
38117 mask = POLLIN | POLLRDNORM;
38118
38119- if (mi->event != atomic_read(&md_event_count))
38120+ if (mi->event != atomic_read_unchecked(&md_event_count))
38121 mask |= POLLERR | POLLPRI;
38122 return mask;
38123 }
38124@@ -6145,7 +6146,7 @@ static int is_mddev_idle(mddev_t *mddev, int init)
38125 struct gendisk *disk = rdev->bdev->bd_contains->bd_disk;
38126 curr_events = (int)part_stat_read(&disk->part0, sectors[0]) +
38127 (int)part_stat_read(&disk->part0, sectors[1]) -
38128- atomic_read(&disk->sync_io);
38129+ atomic_read_unchecked(&disk->sync_io);
38130 /* sync IO will cause sync_io to increase before the disk_stats
38131 * as sync_io is counted when a request starts, and
38132 * disk_stats is counted when it completes.
38133diff --git a/drivers/md/md.h b/drivers/md/md.h
38134index 87430fe..0024a4c 100644
38135--- a/drivers/md/md.h
38136+++ b/drivers/md/md.h
38137@@ -94,10 +94,10 @@ struct mdk_rdev_s
38138 * only maintained for arrays that
38139 * support hot removal
38140 */
38141- atomic_t read_errors; /* number of consecutive read errors that
38142+ atomic_unchecked_t read_errors; /* number of consecutive read errors that
38143 * we have tried to ignore.
38144 */
38145- atomic_t corrected_errors; /* number of corrected read errors,
38146+ atomic_unchecked_t corrected_errors; /* number of corrected read errors,
38147 * for reporting to userspace and storing
38148 * in superblock.
38149 */
38150@@ -304,7 +304,7 @@ static inline void rdev_dec_pending(mdk_rdev_t *rdev, mddev_t *mddev)
38151
38152 static inline void md_sync_acct(struct block_device *bdev, unsigned long nr_sectors)
38153 {
38154- atomic_add(nr_sectors, &bdev->bd_contains->bd_disk->sync_io);
38155+ atomic_add_unchecked(nr_sectors, &bdev->bd_contains->bd_disk->sync_io);
38156 }
38157
38158 struct mdk_personality
38159diff --git a/drivers/md/raid1.c b/drivers/md/raid1.c
38160index 968cb14..f0ad2e4 100644
38161--- a/drivers/md/raid1.c
38162+++ b/drivers/md/raid1.c
38163@@ -1415,7 +1415,7 @@ static void sync_request_write(mddev_t *mddev, r1bio_t *r1_bio)
38164 if (r1_bio->bios[d]->bi_end_io != end_sync_read)
38165 continue;
38166 rdev = conf->mirrors[d].rdev;
38167- atomic_add(s, &rdev->corrected_errors);
38168+ atomic_add_unchecked(s, &rdev->corrected_errors);
38169 if (sync_page_io(rdev->bdev,
38170 sect + rdev->data_offset,
38171 s<<9,
38172@@ -1564,7 +1564,7 @@ static void fix_read_error(conf_t *conf, int read_disk,
38173 /* Well, this device is dead */
38174 md_error(mddev, rdev);
38175 else {
38176- atomic_add(s, &rdev->corrected_errors);
38177+ atomic_add_unchecked(s, &rdev->corrected_errors);
38178 printk(KERN_INFO
38179 "raid1:%s: read error corrected "
38180 "(%d sectors at %llu on %s)\n",
38181diff --git a/drivers/md/raid10.c b/drivers/md/raid10.c
38182index 1b4e232..cf0f534b 100644
38183--- a/drivers/md/raid10.c
38184+++ b/drivers/md/raid10.c
38185@@ -1255,7 +1255,7 @@ static void end_sync_read(struct bio *bio, int error)
38186 if (test_bit(BIO_UPTODATE, &bio->bi_flags))
38187 set_bit(R10BIO_Uptodate, &r10_bio->state);
38188 else {
38189- atomic_add(r10_bio->sectors,
38190+ atomic_add_unchecked(r10_bio->sectors,
38191 &conf->mirrors[d].rdev->corrected_errors);
38192 if (!test_bit(MD_RECOVERY_SYNC, &conf->mddev->recovery))
38193 md_error(r10_bio->mddev,
38194@@ -1520,7 +1520,7 @@ static void fix_read_error(conf_t *conf, mddev_t *mddev, r10bio_t *r10_bio)
38195 test_bit(In_sync, &rdev->flags)) {
38196 atomic_inc(&rdev->nr_pending);
38197 rcu_read_unlock();
38198- atomic_add(s, &rdev->corrected_errors);
38199+ atomic_add_unchecked(s, &rdev->corrected_errors);
38200 if (sync_page_io(rdev->bdev,
38201 r10_bio->devs[sl].addr +
38202 sect + rdev->data_offset,
38203diff --git a/drivers/md/raid5.c b/drivers/md/raid5.c
38204index 883215d..675bf47 100644
38205--- a/drivers/md/raid5.c
38206+++ b/drivers/md/raid5.c
38207@@ -482,7 +482,7 @@ static void ops_run_io(struct stripe_head *sh, struct stripe_head_state *s)
38208 bi->bi_next = NULL;
38209 if ((rw & WRITE) &&
38210 test_bit(R5_ReWrite, &sh->dev[i].flags))
38211- atomic_add(STRIPE_SECTORS,
38212+ atomic_add_unchecked(STRIPE_SECTORS,
38213 &rdev->corrected_errors);
38214 generic_make_request(bi);
38215 } else {
38216@@ -1517,15 +1517,15 @@ static void raid5_end_read_request(struct bio * bi, int error)
38217 clear_bit(R5_ReadError, &sh->dev[i].flags);
38218 clear_bit(R5_ReWrite, &sh->dev[i].flags);
38219 }
38220- if (atomic_read(&conf->disks[i].rdev->read_errors))
38221- atomic_set(&conf->disks[i].rdev->read_errors, 0);
38222+ if (atomic_read_unchecked(&conf->disks[i].rdev->read_errors))
38223+ atomic_set_unchecked(&conf->disks[i].rdev->read_errors, 0);
38224 } else {
38225 const char *bdn = bdevname(conf->disks[i].rdev->bdev, b);
38226 int retry = 0;
38227 rdev = conf->disks[i].rdev;
38228
38229 clear_bit(R5_UPTODATE, &sh->dev[i].flags);
38230- atomic_inc(&rdev->read_errors);
38231+ atomic_inc_unchecked(&rdev->read_errors);
38232 if (conf->mddev->degraded >= conf->max_degraded)
38233 printk_rl(KERN_WARNING
38234 "raid5:%s: read error not correctable "
38235@@ -1543,7 +1543,7 @@ static void raid5_end_read_request(struct bio * bi, int error)
38236 (unsigned long long)(sh->sector
38237 + rdev->data_offset),
38238 bdn);
38239- else if (atomic_read(&rdev->read_errors)
38240+ else if (atomic_read_unchecked(&rdev->read_errors)
38241 > conf->max_nr_stripes)
38242 printk(KERN_WARNING
38243 "raid5:%s: Too many read errors, failing device %s.\n",
38244@@ -1870,6 +1870,7 @@ static sector_t compute_blocknr(struct stripe_head *sh, int i, int previous)
38245 sector_t r_sector;
38246 struct stripe_head sh2;
38247
38248+ pax_track_stack();
38249
38250 chunk_offset = sector_div(new_sector, sectors_per_chunk);
38251 stripe = new_sector;
38252diff --git a/drivers/media/common/saa7146_hlp.c b/drivers/media/common/saa7146_hlp.c
38253index 05bde9c..2f31d40 100644
38254--- a/drivers/media/common/saa7146_hlp.c
38255+++ b/drivers/media/common/saa7146_hlp.c
38256@@ -353,6 +353,8 @@ static void calculate_clipping_registers_rect(struct saa7146_dev *dev, struct sa
38257
38258 int x[32], y[32], w[32], h[32];
38259
38260+ pax_track_stack();
38261+
38262 /* clear out memory */
38263 memset(&line_list[0], 0x00, sizeof(u32)*32);
38264 memset(&pixel_list[0], 0x00, sizeof(u32)*32);
38265diff --git a/drivers/media/dvb/dvb-core/dvb_ca_en50221.c b/drivers/media/dvb/dvb-core/dvb_ca_en50221.c
38266index cb22da5..82b686e 100644
38267--- a/drivers/media/dvb/dvb-core/dvb_ca_en50221.c
38268+++ b/drivers/media/dvb/dvb-core/dvb_ca_en50221.c
38269@@ -590,6 +590,8 @@ static int dvb_ca_en50221_read_data(struct dvb_ca_private *ca, int slot, u8 * eb
38270 u8 buf[HOST_LINK_BUF_SIZE];
38271 int i;
38272
38273+ pax_track_stack();
38274+
38275 dprintk("%s\n", __func__);
38276
38277 /* check if we have space for a link buf in the rx_buffer */
38278@@ -1285,6 +1287,8 @@ static ssize_t dvb_ca_en50221_io_write(struct file *file,
38279 unsigned long timeout;
38280 int written;
38281
38282+ pax_track_stack();
38283+
38284 dprintk("%s\n", __func__);
38285
38286 /* Incoming packet has a 2 byte header. hdr[0] = slot_id, hdr[1] = connection_id */
38287diff --git a/drivers/media/dvb/dvb-core/dvb_demux.h b/drivers/media/dvb/dvb-core/dvb_demux.h
38288index 2fe05d0..a3289c4 100644
38289--- a/drivers/media/dvb/dvb-core/dvb_demux.h
38290+++ b/drivers/media/dvb/dvb-core/dvb_demux.h
38291@@ -71,7 +71,7 @@ struct dvb_demux_feed {
38292 union {
38293 dmx_ts_cb ts;
38294 dmx_section_cb sec;
38295- } cb;
38296+ } __no_const cb;
38297
38298 struct dvb_demux *demux;
38299 void *priv;
38300diff --git a/drivers/media/dvb/dvb-core/dvbdev.c b/drivers/media/dvb/dvb-core/dvbdev.c
38301index 94159b9..376bd8e 100644
38302--- a/drivers/media/dvb/dvb-core/dvbdev.c
38303+++ b/drivers/media/dvb/dvb-core/dvbdev.c
38304@@ -191,7 +191,7 @@ int dvb_register_device(struct dvb_adapter *adap, struct dvb_device **pdvbdev,
38305 const struct dvb_device *template, void *priv, int type)
38306 {
38307 struct dvb_device *dvbdev;
38308- struct file_operations *dvbdevfops;
38309+ file_operations_no_const *dvbdevfops;
38310 struct device *clsdev;
38311 int minor;
38312 int id;
38313diff --git a/drivers/media/dvb/dvb-usb/cxusb.c b/drivers/media/dvb/dvb-usb/cxusb.c
38314index 2a53dd0..db8c07a 100644
38315--- a/drivers/media/dvb/dvb-usb/cxusb.c
38316+++ b/drivers/media/dvb/dvb-usb/cxusb.c
38317@@ -1040,7 +1040,7 @@ static struct dib0070_config dib7070p_dib0070_config = {
38318 struct dib0700_adapter_state {
38319 int (*set_param_save) (struct dvb_frontend *,
38320 struct dvb_frontend_parameters *);
38321-};
38322+} __no_const;
38323
38324 static int dib7070_set_param_override(struct dvb_frontend *fe,
38325 struct dvb_frontend_parameters *fep)
38326diff --git a/drivers/media/dvb/dvb-usb/dib0700_core.c b/drivers/media/dvb/dvb-usb/dib0700_core.c
38327index db7f7f7..f55e96f 100644
38328--- a/drivers/media/dvb/dvb-usb/dib0700_core.c
38329+++ b/drivers/media/dvb/dvb-usb/dib0700_core.c
38330@@ -332,6 +332,8 @@ int dib0700_download_firmware(struct usb_device *udev, const struct firmware *fw
38331
38332 u8 buf[260];
38333
38334+ pax_track_stack();
38335+
38336 while ((ret = dvb_usb_get_hexline(fw, &hx, &pos)) > 0) {
38337 deb_fwdata("writing to address 0x%08x (buffer: 0x%02x %02x)\n",hx.addr, hx.len, hx.chk);
38338
38339diff --git a/drivers/media/dvb/dvb-usb/dib0700_devices.c b/drivers/media/dvb/dvb-usb/dib0700_devices.c
38340index 524acf5..5ffc403 100644
38341--- a/drivers/media/dvb/dvb-usb/dib0700_devices.c
38342+++ b/drivers/media/dvb/dvb-usb/dib0700_devices.c
38343@@ -28,7 +28,7 @@ MODULE_PARM_DESC(force_lna_activation, "force the activation of Low-Noise-Amplif
38344
38345 struct dib0700_adapter_state {
38346 int (*set_param_save) (struct dvb_frontend *, struct dvb_frontend_parameters *);
38347-};
38348+} __no_const;
38349
38350 /* Hauppauge Nova-T 500 (aka Bristol)
38351 * has a LNA on GPIO0 which is enabled by setting 1 */
38352diff --git a/drivers/media/dvb/frontends/dib3000.h b/drivers/media/dvb/frontends/dib3000.h
38353index ba91735..4261d84 100644
38354--- a/drivers/media/dvb/frontends/dib3000.h
38355+++ b/drivers/media/dvb/frontends/dib3000.h
38356@@ -39,7 +39,7 @@ struct dib_fe_xfer_ops
38357 int (*fifo_ctrl)(struct dvb_frontend *fe, int onoff);
38358 int (*pid_ctrl)(struct dvb_frontend *fe, int index, int pid, int onoff);
38359 int (*tuner_pass_ctrl)(struct dvb_frontend *fe, int onoff, u8 pll_ctrl);
38360-};
38361+} __no_const;
38362
38363 #if defined(CONFIG_DVB_DIB3000MB) || (defined(CONFIG_DVB_DIB3000MB_MODULE) && defined(MODULE))
38364 extern struct dvb_frontend* dib3000mb_attach(const struct dib3000_config* config,
38365diff --git a/drivers/media/dvb/frontends/or51211.c b/drivers/media/dvb/frontends/or51211.c
38366index c709ce6..b3fe620 100644
38367--- a/drivers/media/dvb/frontends/or51211.c
38368+++ b/drivers/media/dvb/frontends/or51211.c
38369@@ -113,6 +113,8 @@ static int or51211_load_firmware (struct dvb_frontend* fe,
38370 u8 tudata[585];
38371 int i;
38372
38373+ pax_track_stack();
38374+
38375 dprintk("Firmware is %zd bytes\n",fw->size);
38376
38377 /* Get eprom data */
38378diff --git a/drivers/media/radio/radio-cadet.c b/drivers/media/radio/radio-cadet.c
38379index 482d0f3..ee1e202 100644
38380--- a/drivers/media/radio/radio-cadet.c
38381+++ b/drivers/media/radio/radio-cadet.c
38382@@ -347,7 +347,7 @@ static ssize_t cadet_read(struct file *file, char __user *data, size_t count, lo
38383 while (i < count && dev->rdsin != dev->rdsout)
38384 readbuf[i++] = dev->rdsbuf[dev->rdsout++];
38385
38386- if (copy_to_user(data, readbuf, i))
38387+ if (i > sizeof readbuf || copy_to_user(data, readbuf, i))
38388 return -EFAULT;
38389 return i;
38390 }
38391diff --git a/drivers/media/video/cx18/cx18-driver.c b/drivers/media/video/cx18/cx18-driver.c
38392index 6dd51e2..0359b92 100644
38393--- a/drivers/media/video/cx18/cx18-driver.c
38394+++ b/drivers/media/video/cx18/cx18-driver.c
38395@@ -56,7 +56,7 @@ static struct pci_device_id cx18_pci_tbl[] __devinitdata = {
38396
38397 MODULE_DEVICE_TABLE(pci, cx18_pci_tbl);
38398
38399-static atomic_t cx18_instance = ATOMIC_INIT(0);
38400+static atomic_unchecked_t cx18_instance = ATOMIC_INIT(0);
38401
38402 /* Parameter declarations */
38403 static int cardtype[CX18_MAX_CARDS];
38404@@ -288,6 +288,8 @@ void cx18_read_eeprom(struct cx18 *cx, struct tveeprom *tv)
38405 struct i2c_client c;
38406 u8 eedata[256];
38407
38408+ pax_track_stack();
38409+
38410 memset(&c, 0, sizeof(c));
38411 strlcpy(c.name, "cx18 tveeprom tmp", sizeof(c.name));
38412 c.adapter = &cx->i2c_adap[0];
38413@@ -800,7 +802,7 @@ static int __devinit cx18_probe(struct pci_dev *pci_dev,
38414 struct cx18 *cx;
38415
38416 /* FIXME - module parameter arrays constrain max instances */
38417- i = atomic_inc_return(&cx18_instance) - 1;
38418+ i = atomic_inc_return_unchecked(&cx18_instance) - 1;
38419 if (i >= CX18_MAX_CARDS) {
38420 printk(KERN_ERR "cx18: cannot manage card %d, driver has a "
38421 "limit of 0 - %d\n", i, CX18_MAX_CARDS - 1);
38422diff --git a/drivers/media/video/ivtv/ivtv-driver.c b/drivers/media/video/ivtv/ivtv-driver.c
38423index 463ec34..2f4625a 100644
38424--- a/drivers/media/video/ivtv/ivtv-driver.c
38425+++ b/drivers/media/video/ivtv/ivtv-driver.c
38426@@ -79,7 +79,7 @@ static struct pci_device_id ivtv_pci_tbl[] __devinitdata = {
38427 MODULE_DEVICE_TABLE(pci,ivtv_pci_tbl);
38428
38429 /* ivtv instance counter */
38430-static atomic_t ivtv_instance = ATOMIC_INIT(0);
38431+static atomic_unchecked_t ivtv_instance = ATOMIC_INIT(0);
38432
38433 /* Parameter declarations */
38434 static int cardtype[IVTV_MAX_CARDS];
38435diff --git a/drivers/media/video/omap24xxcam.c b/drivers/media/video/omap24xxcam.c
38436index 5fc4ac0..652a54a 100644
38437--- a/drivers/media/video/omap24xxcam.c
38438+++ b/drivers/media/video/omap24xxcam.c
38439@@ -401,7 +401,7 @@ static void omap24xxcam_vbq_complete(struct omap24xxcam_sgdma *sgdma,
38440 spin_unlock_irqrestore(&cam->core_enable_disable_lock, flags);
38441
38442 do_gettimeofday(&vb->ts);
38443- vb->field_count = atomic_add_return(2, &fh->field_count);
38444+ vb->field_count = atomic_add_return_unchecked(2, &fh->field_count);
38445 if (csr & csr_error) {
38446 vb->state = VIDEOBUF_ERROR;
38447 if (!atomic_read(&fh->cam->in_reset)) {
38448diff --git a/drivers/media/video/omap24xxcam.h b/drivers/media/video/omap24xxcam.h
38449index 2ce67f5..cf26a5b 100644
38450--- a/drivers/media/video/omap24xxcam.h
38451+++ b/drivers/media/video/omap24xxcam.h
38452@@ -533,7 +533,7 @@ struct omap24xxcam_fh {
38453 spinlock_t vbq_lock; /* spinlock for the videobuf queue */
38454 struct videobuf_queue vbq;
38455 struct v4l2_pix_format pix; /* serialise pix by vbq->lock */
38456- atomic_t field_count; /* field counter for videobuf_buffer */
38457+ atomic_unchecked_t field_count; /* field counter for videobuf_buffer */
38458 /* accessing cam here doesn't need serialisation: it's constant */
38459 struct omap24xxcam_device *cam;
38460 };
38461diff --git a/drivers/media/video/pvrusb2/pvrusb2-eeprom.c b/drivers/media/video/pvrusb2/pvrusb2-eeprom.c
38462index 299afa4..eb47459 100644
38463--- a/drivers/media/video/pvrusb2/pvrusb2-eeprom.c
38464+++ b/drivers/media/video/pvrusb2/pvrusb2-eeprom.c
38465@@ -119,6 +119,8 @@ int pvr2_eeprom_analyze(struct pvr2_hdw *hdw)
38466 u8 *eeprom;
38467 struct tveeprom tvdata;
38468
38469+ pax_track_stack();
38470+
38471 memset(&tvdata,0,sizeof(tvdata));
38472
38473 eeprom = pvr2_eeprom_fetch(hdw);
38474diff --git a/drivers/media/video/pvrusb2/pvrusb2-hdw-internal.h b/drivers/media/video/pvrusb2/pvrusb2-hdw-internal.h
38475index 5b152ff..3320638 100644
38476--- a/drivers/media/video/pvrusb2/pvrusb2-hdw-internal.h
38477+++ b/drivers/media/video/pvrusb2/pvrusb2-hdw-internal.h
38478@@ -195,7 +195,7 @@ struct pvr2_hdw {
38479
38480 /* I2C stuff */
38481 struct i2c_adapter i2c_adap;
38482- struct i2c_algorithm i2c_algo;
38483+ i2c_algorithm_no_const i2c_algo;
38484 pvr2_i2c_func i2c_func[PVR2_I2C_FUNC_CNT];
38485 int i2c_cx25840_hack_state;
38486 int i2c_linked;
38487diff --git a/drivers/media/video/saa7134/saa6752hs.c b/drivers/media/video/saa7134/saa6752hs.c
38488index 1eabff6..8e2313a 100644
38489--- a/drivers/media/video/saa7134/saa6752hs.c
38490+++ b/drivers/media/video/saa7134/saa6752hs.c
38491@@ -683,6 +683,8 @@ static int saa6752hs_init(struct v4l2_subdev *sd, u32 leading_null_bytes)
38492 unsigned char localPAT[256];
38493 unsigned char localPMT[256];
38494
38495+ pax_track_stack();
38496+
38497 /* Set video format - must be done first as it resets other settings */
38498 set_reg8(client, 0x41, h->video_format);
38499
38500diff --git a/drivers/media/video/saa7164/saa7164-cmd.c b/drivers/media/video/saa7164/saa7164-cmd.c
38501index 9c1d3ac..b1b49e9 100644
38502--- a/drivers/media/video/saa7164/saa7164-cmd.c
38503+++ b/drivers/media/video/saa7164/saa7164-cmd.c
38504@@ -87,6 +87,8 @@ int saa7164_irq_dequeue(struct saa7164_dev *dev)
38505 wait_queue_head_t *q = 0;
38506 dprintk(DBGLVL_CMD, "%s()\n", __func__);
38507
38508+ pax_track_stack();
38509+
38510 /* While any outstand message on the bus exists... */
38511 do {
38512
38513@@ -126,6 +128,8 @@ int saa7164_cmd_dequeue(struct saa7164_dev *dev)
38514 u8 tmp[512];
38515 dprintk(DBGLVL_CMD, "%s()\n", __func__);
38516
38517+ pax_track_stack();
38518+
38519 while (loop) {
38520
38521 tmComResInfo_t tRsp = { 0, 0, 0, 0, 0, 0 };
38522diff --git a/drivers/media/video/usbvideo/ibmcam.c b/drivers/media/video/usbvideo/ibmcam.c
38523index b085496..cde0270 100644
38524--- a/drivers/media/video/usbvideo/ibmcam.c
38525+++ b/drivers/media/video/usbvideo/ibmcam.c
38526@@ -3947,15 +3947,15 @@ static struct usb_device_id id_table[] = {
38527 static int __init ibmcam_init(void)
38528 {
38529 struct usbvideo_cb cbTbl;
38530- memset(&cbTbl, 0, sizeof(cbTbl));
38531- cbTbl.probe = ibmcam_probe;
38532- cbTbl.setupOnOpen = ibmcam_setup_on_open;
38533- cbTbl.videoStart = ibmcam_video_start;
38534- cbTbl.videoStop = ibmcam_video_stop;
38535- cbTbl.processData = ibmcam_ProcessIsocData;
38536- cbTbl.postProcess = usbvideo_DeinterlaceFrame;
38537- cbTbl.adjustPicture = ibmcam_adjust_picture;
38538- cbTbl.getFPS = ibmcam_calculate_fps;
38539+ memset((void *)&cbTbl, 0, sizeof(cbTbl));
38540+ *(void **)&cbTbl.probe = ibmcam_probe;
38541+ *(void **)&cbTbl.setupOnOpen = ibmcam_setup_on_open;
38542+ *(void **)&cbTbl.videoStart = ibmcam_video_start;
38543+ *(void **)&cbTbl.videoStop = ibmcam_video_stop;
38544+ *(void **)&cbTbl.processData = ibmcam_ProcessIsocData;
38545+ *(void **)&cbTbl.postProcess = usbvideo_DeinterlaceFrame;
38546+ *(void **)&cbTbl.adjustPicture = ibmcam_adjust_picture;
38547+ *(void **)&cbTbl.getFPS = ibmcam_calculate_fps;
38548 return usbvideo_register(
38549 &cams,
38550 MAX_IBMCAM,
38551diff --git a/drivers/media/video/usbvideo/konicawc.c b/drivers/media/video/usbvideo/konicawc.c
38552index 31d57f2..600b735 100644
38553--- a/drivers/media/video/usbvideo/konicawc.c
38554+++ b/drivers/media/video/usbvideo/konicawc.c
38555@@ -225,7 +225,7 @@ static void konicawc_register_input(struct konicawc *cam, struct usb_device *dev
38556 int error;
38557
38558 usb_make_path(dev, cam->input_physname, sizeof(cam->input_physname));
38559- strncat(cam->input_physname, "/input0", sizeof(cam->input_physname));
38560+ strlcat(cam->input_physname, "/input0", sizeof(cam->input_physname));
38561
38562 cam->input = input_dev = input_allocate_device();
38563 if (!input_dev) {
38564@@ -935,16 +935,16 @@ static int __init konicawc_init(void)
38565 struct usbvideo_cb cbTbl;
38566 printk(KERN_INFO KBUILD_MODNAME ": " DRIVER_VERSION ":"
38567 DRIVER_DESC "\n");
38568- memset(&cbTbl, 0, sizeof(cbTbl));
38569- cbTbl.probe = konicawc_probe;
38570- cbTbl.setupOnOpen = konicawc_setup_on_open;
38571- cbTbl.processData = konicawc_process_isoc;
38572- cbTbl.getFPS = konicawc_calculate_fps;
38573- cbTbl.setVideoMode = konicawc_set_video_mode;
38574- cbTbl.startDataPump = konicawc_start_data;
38575- cbTbl.stopDataPump = konicawc_stop_data;
38576- cbTbl.adjustPicture = konicawc_adjust_picture;
38577- cbTbl.userFree = konicawc_free_uvd;
38578+ memset((void * )&cbTbl, 0, sizeof(cbTbl));
38579+ *(void **)&cbTbl.probe = konicawc_probe;
38580+ *(void **)&cbTbl.setupOnOpen = konicawc_setup_on_open;
38581+ *(void **)&cbTbl.processData = konicawc_process_isoc;
38582+ *(void **)&cbTbl.getFPS = konicawc_calculate_fps;
38583+ *(void **)&cbTbl.setVideoMode = konicawc_set_video_mode;
38584+ *(void **)&cbTbl.startDataPump = konicawc_start_data;
38585+ *(void **)&cbTbl.stopDataPump = konicawc_stop_data;
38586+ *(void **)&cbTbl.adjustPicture = konicawc_adjust_picture;
38587+ *(void **)&cbTbl.userFree = konicawc_free_uvd;
38588 return usbvideo_register(
38589 &cams,
38590 MAX_CAMERAS,
38591diff --git a/drivers/media/video/usbvideo/quickcam_messenger.c b/drivers/media/video/usbvideo/quickcam_messenger.c
38592index 803d3e4..c4d1b96 100644
38593--- a/drivers/media/video/usbvideo/quickcam_messenger.c
38594+++ b/drivers/media/video/usbvideo/quickcam_messenger.c
38595@@ -89,7 +89,7 @@ static void qcm_register_input(struct qcm *cam, struct usb_device *dev)
38596 int error;
38597
38598 usb_make_path(dev, cam->input_physname, sizeof(cam->input_physname));
38599- strncat(cam->input_physname, "/input0", sizeof(cam->input_physname));
38600+ strlcat(cam->input_physname, "/input0", sizeof(cam->input_physname));
38601
38602 cam->input = input_dev = input_allocate_device();
38603 if (!input_dev) {
38604diff --git a/drivers/media/video/usbvideo/ultracam.c b/drivers/media/video/usbvideo/ultracam.c
38605index fbd1b63..292f9f0 100644
38606--- a/drivers/media/video/usbvideo/ultracam.c
38607+++ b/drivers/media/video/usbvideo/ultracam.c
38608@@ -655,14 +655,14 @@ static int __init ultracam_init(void)
38609 {
38610 struct usbvideo_cb cbTbl;
38611 memset(&cbTbl, 0, sizeof(cbTbl));
38612- cbTbl.probe = ultracam_probe;
38613- cbTbl.setupOnOpen = ultracam_setup_on_open;
38614- cbTbl.videoStart = ultracam_video_start;
38615- cbTbl.videoStop = ultracam_video_stop;
38616- cbTbl.processData = ultracam_ProcessIsocData;
38617- cbTbl.postProcess = usbvideo_DeinterlaceFrame;
38618- cbTbl.adjustPicture = ultracam_adjust_picture;
38619- cbTbl.getFPS = ultracam_calculate_fps;
38620+ *(void **)&cbTbl.probe = ultracam_probe;
38621+ *(void **)&cbTbl.setupOnOpen = ultracam_setup_on_open;
38622+ *(void **)&cbTbl.videoStart = ultracam_video_start;
38623+ *(void **)&cbTbl.videoStop = ultracam_video_stop;
38624+ *(void **)&cbTbl.processData = ultracam_ProcessIsocData;
38625+ *(void **)&cbTbl.postProcess = usbvideo_DeinterlaceFrame;
38626+ *(void **)&cbTbl.adjustPicture = ultracam_adjust_picture;
38627+ *(void **)&cbTbl.getFPS = ultracam_calculate_fps;
38628 return usbvideo_register(
38629 &cams,
38630 MAX_CAMERAS,
38631diff --git a/drivers/media/video/usbvideo/usbvideo.c b/drivers/media/video/usbvideo/usbvideo.c
38632index dea8b32..34f6878 100644
38633--- a/drivers/media/video/usbvideo/usbvideo.c
38634+++ b/drivers/media/video/usbvideo/usbvideo.c
38635@@ -697,15 +697,15 @@ int usbvideo_register(
38636 __func__, cams, base_size, num_cams);
38637
38638 /* Copy callbacks, apply defaults for those that are not set */
38639- memmove(&cams->cb, cbTbl, sizeof(cams->cb));
38640+ memmove((void *)&cams->cb, cbTbl, sizeof(cams->cb));
38641 if (cams->cb.getFrame == NULL)
38642- cams->cb.getFrame = usbvideo_GetFrame;
38643+ *(void **)&cams->cb.getFrame = usbvideo_GetFrame;
38644 if (cams->cb.disconnect == NULL)
38645- cams->cb.disconnect = usbvideo_Disconnect;
38646+ *(void **)&cams->cb.disconnect = usbvideo_Disconnect;
38647 if (cams->cb.startDataPump == NULL)
38648- cams->cb.startDataPump = usbvideo_StartDataPump;
38649+ *(void **)&cams->cb.startDataPump = usbvideo_StartDataPump;
38650 if (cams->cb.stopDataPump == NULL)
38651- cams->cb.stopDataPump = usbvideo_StopDataPump;
38652+ *(void **)&cams->cb.stopDataPump = usbvideo_StopDataPump;
38653
38654 cams->num_cameras = num_cams;
38655 cams->cam = (struct uvd *) &cams[1];
38656diff --git a/drivers/media/video/usbvideo/usbvideo.h b/drivers/media/video/usbvideo/usbvideo.h
38657index c66985b..7fa143a 100644
38658--- a/drivers/media/video/usbvideo/usbvideo.h
38659+++ b/drivers/media/video/usbvideo/usbvideo.h
38660@@ -268,7 +268,7 @@ struct usbvideo_cb {
38661 int (*startDataPump)(struct uvd *uvd);
38662 void (*stopDataPump)(struct uvd *uvd);
38663 int (*setVideoMode)(struct uvd *uvd, struct video_window *vw);
38664-};
38665+} __no_const;
38666
38667 struct usbvideo {
38668 int num_cameras; /* As allocated */
38669diff --git a/drivers/media/video/usbvision/usbvision-core.c b/drivers/media/video/usbvision/usbvision-core.c
38670index e0f91e4..37554ea 100644
38671--- a/drivers/media/video/usbvision/usbvision-core.c
38672+++ b/drivers/media/video/usbvision/usbvision-core.c
38673@@ -820,6 +820,8 @@ static enum ParseState usbvision_parse_compress(struct usb_usbvision *usbvision,
38674 unsigned char rv, gv, bv;
38675 static unsigned char *Y, *U, *V;
38676
38677+ pax_track_stack();
38678+
38679 frame = usbvision->curFrame;
38680 imageSize = frame->frmwidth * frame->frmheight;
38681 if ( (frame->v4l2_format.format == V4L2_PIX_FMT_YUV422P) ||
38682diff --git a/drivers/media/video/v4l2-device.c b/drivers/media/video/v4l2-device.c
38683index 0d06e7c..3d17d24 100644
38684--- a/drivers/media/video/v4l2-device.c
38685+++ b/drivers/media/video/v4l2-device.c
38686@@ -50,9 +50,9 @@ int v4l2_device_register(struct device *dev, struct v4l2_device *v4l2_dev)
38687 EXPORT_SYMBOL_GPL(v4l2_device_register);
38688
38689 int v4l2_device_set_name(struct v4l2_device *v4l2_dev, const char *basename,
38690- atomic_t *instance)
38691+ atomic_unchecked_t *instance)
38692 {
38693- int num = atomic_inc_return(instance) - 1;
38694+ int num = atomic_inc_return_unchecked(instance) - 1;
38695 int len = strlen(basename);
38696
38697 if (basename[len - 1] >= '0' && basename[len - 1] <= '9')
38698diff --git a/drivers/media/video/videobuf-dma-sg.c b/drivers/media/video/videobuf-dma-sg.c
38699index 032ebae..4ebd8e8 100644
38700--- a/drivers/media/video/videobuf-dma-sg.c
38701+++ b/drivers/media/video/videobuf-dma-sg.c
38702@@ -631,6 +631,9 @@ static int __videobuf_mmap_mapper(struct videobuf_queue *q,
38703
38704 static int __videobuf_copy_to_user ( struct videobuf_queue *q,
38705 char __user *data, size_t count,
38706+ int nonblocking ) __size_overflow(3);
38707+static int __videobuf_copy_to_user ( struct videobuf_queue *q,
38708+ char __user *data, size_t count,
38709 int nonblocking )
38710 {
38711 struct videobuf_dma_sg_memory *mem = q->read_buf->priv;
38712@@ -693,6 +696,8 @@ void *videobuf_sg_alloc(size_t size)
38713 {
38714 struct videobuf_queue q;
38715
38716+ pax_track_stack();
38717+
38718 /* Required to make generic handler to call __videobuf_alloc */
38719 q.int_ops = &sg_ops;
38720
38721diff --git a/drivers/media/video/videobuf-vmalloc.c b/drivers/media/video/videobuf-vmalloc.c
38722index 35f3900..aa7c2f1 100644
38723--- a/drivers/media/video/videobuf-vmalloc.c
38724+++ b/drivers/media/video/videobuf-vmalloc.c
38725@@ -330,6 +330,9 @@ error:
38726
38727 static int __videobuf_copy_to_user ( struct videobuf_queue *q,
38728 char __user *data, size_t count,
38729+ int nonblocking ) __size_overflow(3);
38730+static int __videobuf_copy_to_user ( struct videobuf_queue *q,
38731+ char __user *data, size_t count,
38732 int nonblocking )
38733 {
38734 struct videobuf_vmalloc_memory *mem=q->read_buf->priv;
38735diff --git a/drivers/message/fusion/mptbase.c b/drivers/message/fusion/mptbase.c
38736index b6992b7..9fa7547 100644
38737--- a/drivers/message/fusion/mptbase.c
38738+++ b/drivers/message/fusion/mptbase.c
38739@@ -6709,8 +6709,14 @@ procmpt_iocinfo_read(char *buf, char **start, off_t offset, int request, int *eo
38740 len += sprintf(buf+len, " MaxChainDepth = 0x%02x frames\n", ioc->facts.MaxChainDepth);
38741 len += sprintf(buf+len, " MinBlockSize = 0x%02x bytes\n", 4*ioc->facts.BlockSize);
38742
38743+#ifdef CONFIG_GRKERNSEC_HIDESYM
38744+ len += sprintf(buf+len, " RequestFrames @ 0x%p (Dma @ 0x%p)\n",
38745+ NULL, NULL);
38746+#else
38747 len += sprintf(buf+len, " RequestFrames @ 0x%p (Dma @ 0x%p)\n",
38748 (void *)ioc->req_frames, (void *)(ulong)ioc->req_frames_dma);
38749+#endif
38750+
38751 /*
38752 * Rounding UP to nearest 4-kB boundary here...
38753 */
38754diff --git a/drivers/message/fusion/mptsas.c b/drivers/message/fusion/mptsas.c
38755index 83873e3..e360e9a 100644
38756--- a/drivers/message/fusion/mptsas.c
38757+++ b/drivers/message/fusion/mptsas.c
38758@@ -436,6 +436,23 @@ mptsas_is_end_device(struct mptsas_devinfo * attached)
38759 return 0;
38760 }
38761
38762+static inline void
38763+mptsas_set_rphy(MPT_ADAPTER *ioc, struct mptsas_phyinfo *phy_info, struct sas_rphy *rphy)
38764+{
38765+ if (phy_info->port_details) {
38766+ phy_info->port_details->rphy = rphy;
38767+ dsaswideprintk(ioc, printk(MYIOC_s_DEBUG_FMT "sas_rphy_add: rphy=%p\n",
38768+ ioc->name, rphy));
38769+ }
38770+
38771+ if (rphy) {
38772+ dsaswideprintk(ioc, dev_printk(KERN_DEBUG,
38773+ &rphy->dev, MYIOC_s_FMT "add:", ioc->name));
38774+ dsaswideprintk(ioc, printk(MYIOC_s_DEBUG_FMT "rphy=%p release=%p\n",
38775+ ioc->name, rphy, rphy->dev.release));
38776+ }
38777+}
38778+
38779 /* no mutex */
38780 static void
38781 mptsas_port_delete(MPT_ADAPTER *ioc, struct mptsas_portinfo_details * port_details)
38782@@ -474,23 +491,6 @@ mptsas_get_rphy(struct mptsas_phyinfo *phy_info)
38783 return NULL;
38784 }
38785
38786-static inline void
38787-mptsas_set_rphy(MPT_ADAPTER *ioc, struct mptsas_phyinfo *phy_info, struct sas_rphy *rphy)
38788-{
38789- if (phy_info->port_details) {
38790- phy_info->port_details->rphy = rphy;
38791- dsaswideprintk(ioc, printk(MYIOC_s_DEBUG_FMT "sas_rphy_add: rphy=%p\n",
38792- ioc->name, rphy));
38793- }
38794-
38795- if (rphy) {
38796- dsaswideprintk(ioc, dev_printk(KERN_DEBUG,
38797- &rphy->dev, MYIOC_s_FMT "add:", ioc->name));
38798- dsaswideprintk(ioc, printk(MYIOC_s_DEBUG_FMT "rphy=%p release=%p\n",
38799- ioc->name, rphy, rphy->dev.release));
38800- }
38801-}
38802-
38803 static inline struct sas_port *
38804 mptsas_get_port(struct mptsas_phyinfo *phy_info)
38805 {
38806diff --git a/drivers/message/fusion/mptscsih.c b/drivers/message/fusion/mptscsih.c
38807index bd096ca..332cf76 100644
38808--- a/drivers/message/fusion/mptscsih.c
38809+++ b/drivers/message/fusion/mptscsih.c
38810@@ -1248,15 +1248,16 @@ mptscsih_info(struct Scsi_Host *SChost)
38811
38812 h = shost_priv(SChost);
38813
38814- if (h) {
38815- if (h->info_kbuf == NULL)
38816- if ((h->info_kbuf = kmalloc(0x1000 /* 4Kb */, GFP_KERNEL)) == NULL)
38817- return h->info_kbuf;
38818- h->info_kbuf[0] = '\0';
38819+ if (!h)
38820+ return NULL;
38821
38822- mpt_print_ioc_summary(h->ioc, h->info_kbuf, &size, 0, 0);
38823- h->info_kbuf[size-1] = '\0';
38824- }
38825+ if (h->info_kbuf == NULL)
38826+ if ((h->info_kbuf = kmalloc(0x1000 /* 4Kb */, GFP_KERNEL)) == NULL)
38827+ return h->info_kbuf;
38828+ h->info_kbuf[0] = '\0';
38829+
38830+ mpt_print_ioc_summary(h->ioc, h->info_kbuf, &size, 0, 0);
38831+ h->info_kbuf[size-1] = '\0';
38832
38833 return h->info_kbuf;
38834 }
38835diff --git a/drivers/message/i2o/i2o_config.c b/drivers/message/i2o/i2o_config.c
38836index efba702..59b2c0f 100644
38837--- a/drivers/message/i2o/i2o_config.c
38838+++ b/drivers/message/i2o/i2o_config.c
38839@@ -787,6 +787,8 @@ static int i2o_cfg_passthru(unsigned long arg)
38840 struct i2o_message *msg;
38841 unsigned int iop;
38842
38843+ pax_track_stack();
38844+
38845 if (get_user(iop, &cmd->iop) || get_user(user_msg, &cmd->msg))
38846 return -EFAULT;
38847
38848diff --git a/drivers/message/i2o/i2o_proc.c b/drivers/message/i2o/i2o_proc.c
38849index 7045c45..c07b170 100644
38850--- a/drivers/message/i2o/i2o_proc.c
38851+++ b/drivers/message/i2o/i2o_proc.c
38852@@ -259,13 +259,6 @@ static char *scsi_devices[] = {
38853 "Array Controller Device"
38854 };
38855
38856-static char *chtostr(u8 * chars, int n)
38857-{
38858- char tmp[256];
38859- tmp[0] = 0;
38860- return strncat(tmp, (char *)chars, n);
38861-}
38862-
38863 static int i2o_report_query_status(struct seq_file *seq, int block_status,
38864 char *group)
38865 {
38866@@ -842,8 +835,7 @@ static int i2o_seq_show_ddm_table(struct seq_file *seq, void *v)
38867
38868 seq_printf(seq, "%-#7x", ddm_table.i2o_vendor_id);
38869 seq_printf(seq, "%-#8x", ddm_table.module_id);
38870- seq_printf(seq, "%-29s",
38871- chtostr(ddm_table.module_name_version, 28));
38872+ seq_printf(seq, "%-.28s", ddm_table.module_name_version);
38873 seq_printf(seq, "%9d ", ddm_table.data_size);
38874 seq_printf(seq, "%8d", ddm_table.code_size);
38875
38876@@ -944,8 +936,8 @@ static int i2o_seq_show_drivers_stored(struct seq_file *seq, void *v)
38877
38878 seq_printf(seq, "%-#7x", dst->i2o_vendor_id);
38879 seq_printf(seq, "%-#8x", dst->module_id);
38880- seq_printf(seq, "%-29s", chtostr(dst->module_name_version, 28));
38881- seq_printf(seq, "%-9s", chtostr(dst->date, 8));
38882+ seq_printf(seq, "%-.28s", dst->module_name_version);
38883+ seq_printf(seq, "%-.8s", dst->date);
38884 seq_printf(seq, "%8d ", dst->module_size);
38885 seq_printf(seq, "%8d ", dst->mpb_size);
38886 seq_printf(seq, "0x%04x", dst->module_flags);
38887@@ -1276,14 +1268,10 @@ static int i2o_seq_show_dev_identity(struct seq_file *seq, void *v)
38888 seq_printf(seq, "Device Class : %s\n", i2o_get_class_name(work16[0]));
38889 seq_printf(seq, "Owner TID : %0#5x\n", work16[2]);
38890 seq_printf(seq, "Parent TID : %0#5x\n", work16[3]);
38891- seq_printf(seq, "Vendor info : %s\n",
38892- chtostr((u8 *) (work32 + 2), 16));
38893- seq_printf(seq, "Product info : %s\n",
38894- chtostr((u8 *) (work32 + 6), 16));
38895- seq_printf(seq, "Description : %s\n",
38896- chtostr((u8 *) (work32 + 10), 16));
38897- seq_printf(seq, "Product rev. : %s\n",
38898- chtostr((u8 *) (work32 + 14), 8));
38899+ seq_printf(seq, "Vendor info : %.16s\n", (u8 *) (work32 + 2));
38900+ seq_printf(seq, "Product info : %.16s\n", (u8 *) (work32 + 6));
38901+ seq_printf(seq, "Description : %.16s\n", (u8 *) (work32 + 10));
38902+ seq_printf(seq, "Product rev. : %.8s\n", (u8 *) (work32 + 14));
38903
38904 seq_printf(seq, "Serial number : ");
38905 print_serial_number(seq, (u8 *) (work32 + 16),
38906@@ -1328,10 +1316,8 @@ static int i2o_seq_show_ddm_identity(struct seq_file *seq, void *v)
38907 }
38908
38909 seq_printf(seq, "Registering DDM TID : 0x%03x\n", result.ddm_tid);
38910- seq_printf(seq, "Module name : %s\n",
38911- chtostr(result.module_name, 24));
38912- seq_printf(seq, "Module revision : %s\n",
38913- chtostr(result.module_rev, 8));
38914+ seq_printf(seq, "Module name : %.24s\n", result.module_name);
38915+ seq_printf(seq, "Module revision : %.8s\n", result.module_rev);
38916
38917 seq_printf(seq, "Serial number : ");
38918 print_serial_number(seq, result.serial_number, sizeof(result) - 36);
38919@@ -1362,14 +1348,10 @@ static int i2o_seq_show_uinfo(struct seq_file *seq, void *v)
38920 return 0;
38921 }
38922
38923- seq_printf(seq, "Device name : %s\n",
38924- chtostr(result.device_name, 64));
38925- seq_printf(seq, "Service name : %s\n",
38926- chtostr(result.service_name, 64));
38927- seq_printf(seq, "Physical name : %s\n",
38928- chtostr(result.physical_location, 64));
38929- seq_printf(seq, "Instance number : %s\n",
38930- chtostr(result.instance_number, 4));
38931+ seq_printf(seq, "Device name : %.64s\n", result.device_name);
38932+ seq_printf(seq, "Service name : %.64s\n", result.service_name);
38933+ seq_printf(seq, "Physical name : %.64s\n", result.physical_location);
38934+ seq_printf(seq, "Instance number : %.4s\n", result.instance_number);
38935
38936 return 0;
38937 }
38938diff --git a/drivers/message/i2o/iop.c b/drivers/message/i2o/iop.c
38939index 27cf4af..b1205b8 100644
38940--- a/drivers/message/i2o/iop.c
38941+++ b/drivers/message/i2o/iop.c
38942@@ -110,10 +110,10 @@ u32 i2o_cntxt_list_add(struct i2o_controller * c, void *ptr)
38943
38944 spin_lock_irqsave(&c->context_list_lock, flags);
38945
38946- if (unlikely(atomic_inc_and_test(&c->context_list_counter)))
38947- atomic_inc(&c->context_list_counter);
38948+ if (unlikely(atomic_inc_and_test_unchecked(&c->context_list_counter)))
38949+ atomic_inc_unchecked(&c->context_list_counter);
38950
38951- entry->context = atomic_read(&c->context_list_counter);
38952+ entry->context = atomic_read_unchecked(&c->context_list_counter);
38953
38954 list_add(&entry->list, &c->context_list);
38955
38956@@ -1076,7 +1076,7 @@ struct i2o_controller *i2o_iop_alloc(void)
38957
38958 #if BITS_PER_LONG == 64
38959 spin_lock_init(&c->context_list_lock);
38960- atomic_set(&c->context_list_counter, 0);
38961+ atomic_set_unchecked(&c->context_list_counter, 0);
38962 INIT_LIST_HEAD(&c->context_list);
38963 #endif
38964
38965diff --git a/drivers/mfd/ab3100-core.c b/drivers/mfd/ab3100-core.c
38966index 78e3e85..66c9a0d 100644
38967--- a/drivers/mfd/ab3100-core.c
38968+++ b/drivers/mfd/ab3100-core.c
38969@@ -777,7 +777,7 @@ struct ab_family_id {
38970 char *name;
38971 };
38972
38973-static const struct ab_family_id ids[] __initdata = {
38974+static const struct ab_family_id ids[] __initconst = {
38975 /* AB3100 */
38976 {
38977 .id = 0xc0,
38978diff --git a/drivers/mfd/wm8350-i2c.c b/drivers/mfd/wm8350-i2c.c
38979index 8d8c932..8104515 100644
38980--- a/drivers/mfd/wm8350-i2c.c
38981+++ b/drivers/mfd/wm8350-i2c.c
38982@@ -43,6 +43,8 @@ static int wm8350_i2c_write_device(struct wm8350 *wm8350, char reg,
38983 u8 msg[(WM8350_MAX_REGISTER << 1) + 1];
38984 int ret;
38985
38986+ pax_track_stack();
38987+
38988 if (bytes > ((WM8350_MAX_REGISTER << 1) + 1))
38989 return -EINVAL;
38990
38991diff --git a/drivers/misc/kgdbts.c b/drivers/misc/kgdbts.c
38992index e4ff50b..4cc3f04 100644
38993--- a/drivers/misc/kgdbts.c
38994+++ b/drivers/misc/kgdbts.c
38995@@ -118,7 +118,7 @@
38996 } while (0)
38997 #define MAX_CONFIG_LEN 40
38998
38999-static struct kgdb_io kgdbts_io_ops;
39000+static const struct kgdb_io kgdbts_io_ops;
39001 static char get_buf[BUFMAX];
39002 static int get_buf_cnt;
39003 static char put_buf[BUFMAX];
39004@@ -1102,7 +1102,7 @@ static void kgdbts_post_exp_handler(void)
39005 module_put(THIS_MODULE);
39006 }
39007
39008-static struct kgdb_io kgdbts_io_ops = {
39009+static const struct kgdb_io kgdbts_io_ops = {
39010 .name = "kgdbts",
39011 .read_char = kgdbts_get_char,
39012 .write_char = kgdbts_put_char,
39013diff --git a/drivers/misc/sgi-gru/gruhandles.c b/drivers/misc/sgi-gru/gruhandles.c
39014index 37e7cfc..67cfb76 100644
39015--- a/drivers/misc/sgi-gru/gruhandles.c
39016+++ b/drivers/misc/sgi-gru/gruhandles.c
39017@@ -39,8 +39,8 @@ struct mcs_op_statistic mcs_op_statistics[mcsop_last];
39018
39019 static void update_mcs_stats(enum mcs_op op, unsigned long clks)
39020 {
39021- atomic_long_inc(&mcs_op_statistics[op].count);
39022- atomic_long_add(clks, &mcs_op_statistics[op].total);
39023+ atomic_long_inc_unchecked(&mcs_op_statistics[op].count);
39024+ atomic_long_add_unchecked(clks, &mcs_op_statistics[op].total);
39025 if (mcs_op_statistics[op].max < clks)
39026 mcs_op_statistics[op].max = clks;
39027 }
39028diff --git a/drivers/misc/sgi-gru/gruprocfs.c b/drivers/misc/sgi-gru/gruprocfs.c
39029index 3f2375c..467c6e6 100644
39030--- a/drivers/misc/sgi-gru/gruprocfs.c
39031+++ b/drivers/misc/sgi-gru/gruprocfs.c
39032@@ -32,9 +32,9 @@
39033
39034 #define printstat(s, f) printstat_val(s, &gru_stats.f, #f)
39035
39036-static void printstat_val(struct seq_file *s, atomic_long_t *v, char *id)
39037+static void printstat_val(struct seq_file *s, atomic_long_unchecked_t *v, char *id)
39038 {
39039- unsigned long val = atomic_long_read(v);
39040+ unsigned long val = atomic_long_read_unchecked(v);
39041
39042 if (val)
39043 seq_printf(s, "%16lu %s\n", val, id);
39044@@ -136,8 +136,8 @@ static int mcs_statistics_show(struct seq_file *s, void *p)
39045 "cch_interrupt_sync", "cch_deallocate", "tgh_invalidate"};
39046
39047 for (op = 0; op < mcsop_last; op++) {
39048- count = atomic_long_read(&mcs_op_statistics[op].count);
39049- total = atomic_long_read(&mcs_op_statistics[op].total);
39050+ count = atomic_long_read_unchecked(&mcs_op_statistics[op].count);
39051+ total = atomic_long_read_unchecked(&mcs_op_statistics[op].total);
39052 max = mcs_op_statistics[op].max;
39053 seq_printf(s, "%-20s%12ld%12ld%12ld\n", id[op], count,
39054 count ? total / count : 0, max);
39055diff --git a/drivers/misc/sgi-gru/grutables.h b/drivers/misc/sgi-gru/grutables.h
39056index 46990bc..4a251b5 100644
39057--- a/drivers/misc/sgi-gru/grutables.h
39058+++ b/drivers/misc/sgi-gru/grutables.h
39059@@ -167,84 +167,84 @@ extern unsigned int gru_max_gids;
39060 * GRU statistics.
39061 */
39062 struct gru_stats_s {
39063- atomic_long_t vdata_alloc;
39064- atomic_long_t vdata_free;
39065- atomic_long_t gts_alloc;
39066- atomic_long_t gts_free;
39067- atomic_long_t vdata_double_alloc;
39068- atomic_long_t gts_double_allocate;
39069- atomic_long_t assign_context;
39070- atomic_long_t assign_context_failed;
39071- atomic_long_t free_context;
39072- atomic_long_t load_user_context;
39073- atomic_long_t load_kernel_context;
39074- atomic_long_t lock_kernel_context;
39075- atomic_long_t unlock_kernel_context;
39076- atomic_long_t steal_user_context;
39077- atomic_long_t steal_kernel_context;
39078- atomic_long_t steal_context_failed;
39079- atomic_long_t nopfn;
39080- atomic_long_t break_cow;
39081- atomic_long_t asid_new;
39082- atomic_long_t asid_next;
39083- atomic_long_t asid_wrap;
39084- atomic_long_t asid_reuse;
39085- atomic_long_t intr;
39086- atomic_long_t intr_mm_lock_failed;
39087- atomic_long_t call_os;
39088- atomic_long_t call_os_offnode_reference;
39089- atomic_long_t call_os_check_for_bug;
39090- atomic_long_t call_os_wait_queue;
39091- atomic_long_t user_flush_tlb;
39092- atomic_long_t user_unload_context;
39093- atomic_long_t user_exception;
39094- atomic_long_t set_context_option;
39095- atomic_long_t migrate_check;
39096- atomic_long_t migrated_retarget;
39097- atomic_long_t migrated_unload;
39098- atomic_long_t migrated_unload_delay;
39099- atomic_long_t migrated_nopfn_retarget;
39100- atomic_long_t migrated_nopfn_unload;
39101- atomic_long_t tlb_dropin;
39102- atomic_long_t tlb_dropin_fail_no_asid;
39103- atomic_long_t tlb_dropin_fail_upm;
39104- atomic_long_t tlb_dropin_fail_invalid;
39105- atomic_long_t tlb_dropin_fail_range_active;
39106- atomic_long_t tlb_dropin_fail_idle;
39107- atomic_long_t tlb_dropin_fail_fmm;
39108- atomic_long_t tlb_dropin_fail_no_exception;
39109- atomic_long_t tlb_dropin_fail_no_exception_war;
39110- atomic_long_t tfh_stale_on_fault;
39111- atomic_long_t mmu_invalidate_range;
39112- atomic_long_t mmu_invalidate_page;
39113- atomic_long_t mmu_clear_flush_young;
39114- atomic_long_t flush_tlb;
39115- atomic_long_t flush_tlb_gru;
39116- atomic_long_t flush_tlb_gru_tgh;
39117- atomic_long_t flush_tlb_gru_zero_asid;
39118+ atomic_long_unchecked_t vdata_alloc;
39119+ atomic_long_unchecked_t vdata_free;
39120+ atomic_long_unchecked_t gts_alloc;
39121+ atomic_long_unchecked_t gts_free;
39122+ atomic_long_unchecked_t vdata_double_alloc;
39123+ atomic_long_unchecked_t gts_double_allocate;
39124+ atomic_long_unchecked_t assign_context;
39125+ atomic_long_unchecked_t assign_context_failed;
39126+ atomic_long_unchecked_t free_context;
39127+ atomic_long_unchecked_t load_user_context;
39128+ atomic_long_unchecked_t load_kernel_context;
39129+ atomic_long_unchecked_t lock_kernel_context;
39130+ atomic_long_unchecked_t unlock_kernel_context;
39131+ atomic_long_unchecked_t steal_user_context;
39132+ atomic_long_unchecked_t steal_kernel_context;
39133+ atomic_long_unchecked_t steal_context_failed;
39134+ atomic_long_unchecked_t nopfn;
39135+ atomic_long_unchecked_t break_cow;
39136+ atomic_long_unchecked_t asid_new;
39137+ atomic_long_unchecked_t asid_next;
39138+ atomic_long_unchecked_t asid_wrap;
39139+ atomic_long_unchecked_t asid_reuse;
39140+ atomic_long_unchecked_t intr;
39141+ atomic_long_unchecked_t intr_mm_lock_failed;
39142+ atomic_long_unchecked_t call_os;
39143+ atomic_long_unchecked_t call_os_offnode_reference;
39144+ atomic_long_unchecked_t call_os_check_for_bug;
39145+ atomic_long_unchecked_t call_os_wait_queue;
39146+ atomic_long_unchecked_t user_flush_tlb;
39147+ atomic_long_unchecked_t user_unload_context;
39148+ atomic_long_unchecked_t user_exception;
39149+ atomic_long_unchecked_t set_context_option;
39150+ atomic_long_unchecked_t migrate_check;
39151+ atomic_long_unchecked_t migrated_retarget;
39152+ atomic_long_unchecked_t migrated_unload;
39153+ atomic_long_unchecked_t migrated_unload_delay;
39154+ atomic_long_unchecked_t migrated_nopfn_retarget;
39155+ atomic_long_unchecked_t migrated_nopfn_unload;
39156+ atomic_long_unchecked_t tlb_dropin;
39157+ atomic_long_unchecked_t tlb_dropin_fail_no_asid;
39158+ atomic_long_unchecked_t tlb_dropin_fail_upm;
39159+ atomic_long_unchecked_t tlb_dropin_fail_invalid;
39160+ atomic_long_unchecked_t tlb_dropin_fail_range_active;
39161+ atomic_long_unchecked_t tlb_dropin_fail_idle;
39162+ atomic_long_unchecked_t tlb_dropin_fail_fmm;
39163+ atomic_long_unchecked_t tlb_dropin_fail_no_exception;
39164+ atomic_long_unchecked_t tlb_dropin_fail_no_exception_war;
39165+ atomic_long_unchecked_t tfh_stale_on_fault;
39166+ atomic_long_unchecked_t mmu_invalidate_range;
39167+ atomic_long_unchecked_t mmu_invalidate_page;
39168+ atomic_long_unchecked_t mmu_clear_flush_young;
39169+ atomic_long_unchecked_t flush_tlb;
39170+ atomic_long_unchecked_t flush_tlb_gru;
39171+ atomic_long_unchecked_t flush_tlb_gru_tgh;
39172+ atomic_long_unchecked_t flush_tlb_gru_zero_asid;
39173
39174- atomic_long_t copy_gpa;
39175+ atomic_long_unchecked_t copy_gpa;
39176
39177- atomic_long_t mesq_receive;
39178- atomic_long_t mesq_receive_none;
39179- atomic_long_t mesq_send;
39180- atomic_long_t mesq_send_failed;
39181- atomic_long_t mesq_noop;
39182- atomic_long_t mesq_send_unexpected_error;
39183- atomic_long_t mesq_send_lb_overflow;
39184- atomic_long_t mesq_send_qlimit_reached;
39185- atomic_long_t mesq_send_amo_nacked;
39186- atomic_long_t mesq_send_put_nacked;
39187- atomic_long_t mesq_qf_not_full;
39188- atomic_long_t mesq_qf_locked;
39189- atomic_long_t mesq_qf_noop_not_full;
39190- atomic_long_t mesq_qf_switch_head_failed;
39191- atomic_long_t mesq_qf_unexpected_error;
39192- atomic_long_t mesq_noop_unexpected_error;
39193- atomic_long_t mesq_noop_lb_overflow;
39194- atomic_long_t mesq_noop_qlimit_reached;
39195- atomic_long_t mesq_noop_amo_nacked;
39196- atomic_long_t mesq_noop_put_nacked;
39197+ atomic_long_unchecked_t mesq_receive;
39198+ atomic_long_unchecked_t mesq_receive_none;
39199+ atomic_long_unchecked_t mesq_send;
39200+ atomic_long_unchecked_t mesq_send_failed;
39201+ atomic_long_unchecked_t mesq_noop;
39202+ atomic_long_unchecked_t mesq_send_unexpected_error;
39203+ atomic_long_unchecked_t mesq_send_lb_overflow;
39204+ atomic_long_unchecked_t mesq_send_qlimit_reached;
39205+ atomic_long_unchecked_t mesq_send_amo_nacked;
39206+ atomic_long_unchecked_t mesq_send_put_nacked;
39207+ atomic_long_unchecked_t mesq_qf_not_full;
39208+ atomic_long_unchecked_t mesq_qf_locked;
39209+ atomic_long_unchecked_t mesq_qf_noop_not_full;
39210+ atomic_long_unchecked_t mesq_qf_switch_head_failed;
39211+ atomic_long_unchecked_t mesq_qf_unexpected_error;
39212+ atomic_long_unchecked_t mesq_noop_unexpected_error;
39213+ atomic_long_unchecked_t mesq_noop_lb_overflow;
39214+ atomic_long_unchecked_t mesq_noop_qlimit_reached;
39215+ atomic_long_unchecked_t mesq_noop_amo_nacked;
39216+ atomic_long_unchecked_t mesq_noop_put_nacked;
39217
39218 };
39219
39220@@ -252,8 +252,8 @@ enum mcs_op {cchop_allocate, cchop_start, cchop_interrupt, cchop_interrupt_sync,
39221 cchop_deallocate, tghop_invalidate, mcsop_last};
39222
39223 struct mcs_op_statistic {
39224- atomic_long_t count;
39225- atomic_long_t total;
39226+ atomic_long_unchecked_t count;
39227+ atomic_long_unchecked_t total;
39228 unsigned long max;
39229 };
39230
39231@@ -276,7 +276,7 @@ extern struct mcs_op_statistic mcs_op_statistics[mcsop_last];
39232
39233 #define STAT(id) do { \
39234 if (gru_options & OPT_STATS) \
39235- atomic_long_inc(&gru_stats.id); \
39236+ atomic_long_inc_unchecked(&gru_stats.id); \
39237 } while (0)
39238
39239 #ifdef CONFIG_SGI_GRU_DEBUG
39240diff --git a/drivers/misc/sgi-xp/xp.h b/drivers/misc/sgi-xp/xp.h
39241index 2275126..12a9dbfb 100644
39242--- a/drivers/misc/sgi-xp/xp.h
39243+++ b/drivers/misc/sgi-xp/xp.h
39244@@ -289,7 +289,7 @@ struct xpc_interface {
39245 xpc_notify_func, void *);
39246 void (*received) (short, int, void *);
39247 enum xp_retval (*partid_to_nasids) (short, void *);
39248-};
39249+} __no_const;
39250
39251 extern struct xpc_interface xpc_interface;
39252
39253diff --git a/drivers/misc/sgi-xp/xpc.h b/drivers/misc/sgi-xp/xpc.h
39254index b94d5f7..7f494c5 100644
39255--- a/drivers/misc/sgi-xp/xpc.h
39256+++ b/drivers/misc/sgi-xp/xpc.h
39257@@ -835,6 +835,7 @@ struct xpc_arch_operations {
39258 void (*received_payload) (struct xpc_channel *, void *);
39259 void (*notify_senders_of_disconnect) (struct xpc_channel *);
39260 };
39261+typedef struct xpc_arch_operations __no_const xpc_arch_operations_no_const;
39262
39263 /* struct xpc_partition act_state values (for XPC HB) */
39264
39265@@ -876,7 +877,7 @@ extern struct xpc_registration xpc_registrations[];
39266 /* found in xpc_main.c */
39267 extern struct device *xpc_part;
39268 extern struct device *xpc_chan;
39269-extern struct xpc_arch_operations xpc_arch_ops;
39270+extern xpc_arch_operations_no_const xpc_arch_ops;
39271 extern int xpc_disengage_timelimit;
39272 extern int xpc_disengage_timedout;
39273 extern int xpc_activate_IRQ_rcvd;
39274diff --git a/drivers/misc/sgi-xp/xpc_main.c b/drivers/misc/sgi-xp/xpc_main.c
39275index fd3688a..7e211a4 100644
39276--- a/drivers/misc/sgi-xp/xpc_main.c
39277+++ b/drivers/misc/sgi-xp/xpc_main.c
39278@@ -169,7 +169,7 @@ static struct notifier_block xpc_die_notifier = {
39279 .notifier_call = xpc_system_die,
39280 };
39281
39282-struct xpc_arch_operations xpc_arch_ops;
39283+xpc_arch_operations_no_const xpc_arch_ops;
39284
39285 /*
39286 * Timer function to enforce the timelimit on the partition disengage.
39287diff --git a/drivers/misc/sgi-xp/xpc_sn2.c b/drivers/misc/sgi-xp/xpc_sn2.c
39288index 8b70e03..700bda6 100644
39289--- a/drivers/misc/sgi-xp/xpc_sn2.c
39290+++ b/drivers/misc/sgi-xp/xpc_sn2.c
39291@@ -2350,7 +2350,7 @@ xpc_received_payload_sn2(struct xpc_channel *ch, void *payload)
39292 xpc_acknowledge_msgs_sn2(ch, get, msg->flags);
39293 }
39294
39295-static struct xpc_arch_operations xpc_arch_ops_sn2 = {
39296+static const struct xpc_arch_operations xpc_arch_ops_sn2 = {
39297 .setup_partitions = xpc_setup_partitions_sn2,
39298 .teardown_partitions = xpc_teardown_partitions_sn2,
39299 .process_activate_IRQ_rcvd = xpc_process_activate_IRQ_rcvd_sn2,
39300@@ -2413,7 +2413,9 @@ xpc_init_sn2(void)
39301 int ret;
39302 size_t buf_size;
39303
39304- xpc_arch_ops = xpc_arch_ops_sn2;
39305+ pax_open_kernel();
39306+ memcpy((void *)&xpc_arch_ops, &xpc_arch_ops_sn2, sizeof(xpc_arch_ops_sn2));
39307+ pax_close_kernel();
39308
39309 if (offsetof(struct xpc_msg_sn2, payload) > XPC_MSG_HDR_MAX_SIZE) {
39310 dev_err(xpc_part, "header portion of struct xpc_msg_sn2 is "
39311diff --git a/drivers/misc/sgi-xp/xpc_uv.c b/drivers/misc/sgi-xp/xpc_uv.c
39312index 8e08d71..7cb8c9b 100644
39313--- a/drivers/misc/sgi-xp/xpc_uv.c
39314+++ b/drivers/misc/sgi-xp/xpc_uv.c
39315@@ -1669,7 +1669,7 @@ xpc_received_payload_uv(struct xpc_channel *ch, void *payload)
39316 XPC_DEACTIVATE_PARTITION(&xpc_partitions[ch->partid], ret);
39317 }
39318
39319-static struct xpc_arch_operations xpc_arch_ops_uv = {
39320+static const struct xpc_arch_operations xpc_arch_ops_uv = {
39321 .setup_partitions = xpc_setup_partitions_uv,
39322 .teardown_partitions = xpc_teardown_partitions_uv,
39323 .process_activate_IRQ_rcvd = xpc_process_activate_IRQ_rcvd_uv,
39324@@ -1729,7 +1729,9 @@ static struct xpc_arch_operations xpc_arch_ops_uv = {
39325 int
39326 xpc_init_uv(void)
39327 {
39328- xpc_arch_ops = xpc_arch_ops_uv;
39329+ pax_open_kernel();
39330+ memcpy((void *)&xpc_arch_ops, &xpc_arch_ops_uv, sizeof(xpc_arch_ops_uv));
39331+ pax_close_kernel();
39332
39333 if (sizeof(struct xpc_notify_mq_msghdr_uv) > XPC_MSG_HDR_MAX_SIZE) {
39334 dev_err(xpc_part, "xpc_notify_mq_msghdr_uv is larger than %d\n",
39335diff --git a/drivers/mmc/host/sdhci-pci.c b/drivers/mmc/host/sdhci-pci.c
39336index 6fd20b42..650efe3 100644
39337--- a/drivers/mmc/host/sdhci-pci.c
39338+++ b/drivers/mmc/host/sdhci-pci.c
39339@@ -297,7 +297,7 @@ static const struct sdhci_pci_fixes sdhci_via = {
39340 .probe = via_probe,
39341 };
39342
39343-static const struct pci_device_id pci_ids[] __devinitdata = {
39344+static const struct pci_device_id pci_ids[] __devinitconst = {
39345 {
39346 .vendor = PCI_VENDOR_ID_RICOH,
39347 .device = PCI_DEVICE_ID_RICOH_R5C822,
39348diff --git a/drivers/mtd/chips/cfi_cmdset_0001.c b/drivers/mtd/chips/cfi_cmdset_0001.c
39349index e7563a9..5f90ce5 100644
39350--- a/drivers/mtd/chips/cfi_cmdset_0001.c
39351+++ b/drivers/mtd/chips/cfi_cmdset_0001.c
39352@@ -743,6 +743,8 @@ static int chip_ready (struct map_info *map, struct flchip *chip, unsigned long
39353 struct cfi_pri_intelext *cfip = cfi->cmdset_priv;
39354 unsigned long timeo = jiffies + HZ;
39355
39356+ pax_track_stack();
39357+
39358 /* Prevent setting state FL_SYNCING for chip in suspended state. */
39359 if (mode == FL_SYNCING && chip->oldstate != FL_READY)
39360 goto sleep;
39361@@ -1642,6 +1644,8 @@ static int __xipram do_write_buffer(struct map_info *map, struct flchip *chip,
39362 unsigned long initial_adr;
39363 int initial_len = len;
39364
39365+ pax_track_stack();
39366+
39367 wbufsize = cfi_interleave(cfi) << cfi->cfiq->MaxBufWriteSize;
39368 adr += chip->start;
39369 initial_adr = adr;
39370@@ -1860,6 +1864,8 @@ static int __xipram do_erase_oneblock(struct map_info *map, struct flchip *chip,
39371 int retries = 3;
39372 int ret;
39373
39374+ pax_track_stack();
39375+
39376 adr += chip->start;
39377
39378 retry:
39379diff --git a/drivers/mtd/chips/cfi_cmdset_0020.c b/drivers/mtd/chips/cfi_cmdset_0020.c
39380index 0667a67..3ab97ed 100644
39381--- a/drivers/mtd/chips/cfi_cmdset_0020.c
39382+++ b/drivers/mtd/chips/cfi_cmdset_0020.c
39383@@ -255,6 +255,8 @@ static inline int do_read_onechip(struct map_info *map, struct flchip *chip, lof
39384 unsigned long cmd_addr;
39385 struct cfi_private *cfi = map->fldrv_priv;
39386
39387+ pax_track_stack();
39388+
39389 adr += chip->start;
39390
39391 /* Ensure cmd read/writes are aligned. */
39392@@ -428,6 +430,8 @@ static inline int do_write_buffer(struct map_info *map, struct flchip *chip,
39393 DECLARE_WAITQUEUE(wait, current);
39394 int wbufsize, z;
39395
39396+ pax_track_stack();
39397+
39398 /* M58LW064A requires bus alignment for buffer wriets -- saw */
39399 if (adr & (map_bankwidth(map)-1))
39400 return -EINVAL;
39401@@ -742,6 +746,8 @@ static inline int do_erase_oneblock(struct map_info *map, struct flchip *chip, u
39402 DECLARE_WAITQUEUE(wait, current);
39403 int ret = 0;
39404
39405+ pax_track_stack();
39406+
39407 adr += chip->start;
39408
39409 /* Let's determine this according to the interleave only once */
39410@@ -1047,6 +1053,8 @@ static inline int do_lock_oneblock(struct map_info *map, struct flchip *chip, un
39411 unsigned long timeo = jiffies + HZ;
39412 DECLARE_WAITQUEUE(wait, current);
39413
39414+ pax_track_stack();
39415+
39416 adr += chip->start;
39417
39418 /* Let's determine this according to the interleave only once */
39419@@ -1196,6 +1204,8 @@ static inline int do_unlock_oneblock(struct map_info *map, struct flchip *chip,
39420 unsigned long timeo = jiffies + HZ;
39421 DECLARE_WAITQUEUE(wait, current);
39422
39423+ pax_track_stack();
39424+
39425 adr += chip->start;
39426
39427 /* Let's determine this according to the interleave only once */
39428diff --git a/drivers/mtd/devices/doc2000.c b/drivers/mtd/devices/doc2000.c
39429index 5bf5f46..c5de373 100644
39430--- a/drivers/mtd/devices/doc2000.c
39431+++ b/drivers/mtd/devices/doc2000.c
39432@@ -776,7 +776,7 @@ static int doc_write(struct mtd_info *mtd, loff_t to, size_t len,
39433
39434 /* The ECC will not be calculated correctly if less than 512 is written */
39435 /* DBB-
39436- if (len != 0x200 && eccbuf)
39437+ if (len != 0x200)
39438 printk(KERN_WARNING
39439 "ECC needs a full sector write (adr: %lx size %lx)\n",
39440 (long) to, (long) len);
39441diff --git a/drivers/mtd/devices/doc2001.c b/drivers/mtd/devices/doc2001.c
39442index 0990f78..bb4e8a4 100644
39443--- a/drivers/mtd/devices/doc2001.c
39444+++ b/drivers/mtd/devices/doc2001.c
39445@@ -393,7 +393,7 @@ static int doc_read (struct mtd_info *mtd, loff_t from, size_t len,
39446 struct Nand *mychip = &this->chips[from >> (this->chipshift)];
39447
39448 /* Don't allow read past end of device */
39449- if (from >= this->totlen)
39450+ if (from >= this->totlen || !len)
39451 return -EINVAL;
39452
39453 /* Don't allow a single read to cross a 512-byte block boundary */
39454diff --git a/drivers/mtd/ftl.c b/drivers/mtd/ftl.c
39455index e56d6b4..f07e6cf 100644
39456--- a/drivers/mtd/ftl.c
39457+++ b/drivers/mtd/ftl.c
39458@@ -474,6 +474,8 @@ static int copy_erase_unit(partition_t *part, uint16_t srcunit,
39459 loff_t offset;
39460 uint16_t srcunitswap = cpu_to_le16(srcunit);
39461
39462+ pax_track_stack();
39463+
39464 eun = &part->EUNInfo[srcunit];
39465 xfer = &part->XferInfo[xferunit];
39466 DEBUG(2, "ftl_cs: copying block 0x%x to 0x%x\n",
39467diff --git a/drivers/mtd/inftlcore.c b/drivers/mtd/inftlcore.c
39468index 8aca552..146446e 100755
39469--- a/drivers/mtd/inftlcore.c
39470+++ b/drivers/mtd/inftlcore.c
39471@@ -260,6 +260,8 @@ static u16 INFTL_foldchain(struct INFTLrecord *inftl, unsigned thisVUC, unsigned
39472 struct inftl_oob oob;
39473 size_t retlen;
39474
39475+ pax_track_stack();
39476+
39477 DEBUG(MTD_DEBUG_LEVEL3, "INFTL: INFTL_foldchain(inftl=%p,thisVUC=%d,"
39478 "pending=%d)\n", inftl, thisVUC, pendingblock);
39479
39480diff --git a/drivers/mtd/inftlmount.c b/drivers/mtd/inftlmount.c
39481index 32e82ae..ed50953 100644
39482--- a/drivers/mtd/inftlmount.c
39483+++ b/drivers/mtd/inftlmount.c
39484@@ -54,6 +54,8 @@ static int find_boot_record(struct INFTLrecord *inftl)
39485 struct INFTLPartition *ip;
39486 size_t retlen;
39487
39488+ pax_track_stack();
39489+
39490 DEBUG(MTD_DEBUG_LEVEL3, "INFTL: find_boot_record(inftl=%p)\n", inftl);
39491
39492 /*
39493diff --git a/drivers/mtd/lpddr/qinfo_probe.c b/drivers/mtd/lpddr/qinfo_probe.c
39494index 79bf40f..fe5f8fd 100644
39495--- a/drivers/mtd/lpddr/qinfo_probe.c
39496+++ b/drivers/mtd/lpddr/qinfo_probe.c
39497@@ -106,6 +106,8 @@ static int lpddr_pfow_present(struct map_info *map, struct lpddr_private *lpddr)
39498 {
39499 map_word pfow_val[4];
39500
39501+ pax_track_stack();
39502+
39503 /* Check identification string */
39504 pfow_val[0] = map_read(map, map->pfow_base + PFOW_QUERY_STRING_P);
39505 pfow_val[1] = map_read(map, map->pfow_base + PFOW_QUERY_STRING_F);
39506diff --git a/drivers/mtd/mtdchar.c b/drivers/mtd/mtdchar.c
39507index 726a1b8..f46b460 100644
39508--- a/drivers/mtd/mtdchar.c
39509+++ b/drivers/mtd/mtdchar.c
39510@@ -461,6 +461,8 @@ static int mtd_ioctl(struct inode *inode, struct file *file,
39511 u_long size;
39512 struct mtd_info_user info;
39513
39514+ pax_track_stack();
39515+
39516 DEBUG(MTD_DEBUG_LEVEL0, "MTD_ioctl\n");
39517
39518 size = (cmd & IOCSIZE_MASK) >> IOCSIZE_SHIFT;
39519diff --git a/drivers/mtd/nftlcore.c b/drivers/mtd/nftlcore.c
39520index 1002e18..26d82d5 100644
39521--- a/drivers/mtd/nftlcore.c
39522+++ b/drivers/mtd/nftlcore.c
39523@@ -254,6 +254,8 @@ static u16 NFTL_foldchain (struct NFTLrecord *nftl, unsigned thisVUC, unsigned p
39524 int inplace = 1;
39525 size_t retlen;
39526
39527+ pax_track_stack();
39528+
39529 memset(BlockMap, 0xff, sizeof(BlockMap));
39530 memset(BlockFreeFound, 0, sizeof(BlockFreeFound));
39531
39532diff --git a/drivers/mtd/nftlmount.c b/drivers/mtd/nftlmount.c
39533index 8b22b18..6fada85 100644
39534--- a/drivers/mtd/nftlmount.c
39535+++ b/drivers/mtd/nftlmount.c
39536@@ -23,6 +23,7 @@
39537 #include <asm/errno.h>
39538 #include <linux/delay.h>
39539 #include <linux/slab.h>
39540+#include <linux/sched.h>
39541 #include <linux/mtd/mtd.h>
39542 #include <linux/mtd/nand.h>
39543 #include <linux/mtd/nftl.h>
39544@@ -44,6 +45,8 @@ static int find_boot_record(struct NFTLrecord *nftl)
39545 struct mtd_info *mtd = nftl->mbd.mtd;
39546 unsigned int i;
39547
39548+ pax_track_stack();
39549+
39550 /* Assume logical EraseSize == physical erasesize for starting the scan.
39551 We'll sort it out later if we find a MediaHeader which says otherwise */
39552 /* Actually, we won't. The new DiskOnChip driver has already scanned
39553diff --git a/drivers/mtd/ubi/build.c b/drivers/mtd/ubi/build.c
39554index 14cec04..09d8519 100644
39555--- a/drivers/mtd/ubi/build.c
39556+++ b/drivers/mtd/ubi/build.c
39557@@ -1255,7 +1255,7 @@ module_exit(ubi_exit);
39558 static int __init bytes_str_to_int(const char *str)
39559 {
39560 char *endp;
39561- unsigned long result;
39562+ unsigned long result, scale = 1;
39563
39564 result = simple_strtoul(str, &endp, 0);
39565 if (str == endp || result >= INT_MAX) {
39566@@ -1266,11 +1266,11 @@ static int __init bytes_str_to_int(const char *str)
39567
39568 switch (*endp) {
39569 case 'G':
39570- result *= 1024;
39571+ scale *= 1024;
39572 case 'M':
39573- result *= 1024;
39574+ scale *= 1024;
39575 case 'K':
39576- result *= 1024;
39577+ scale *= 1024;
39578 if (endp[1] == 'i' && endp[2] == 'B')
39579 endp += 2;
39580 case '\0':
39581@@ -1281,7 +1281,13 @@ static int __init bytes_str_to_int(const char *str)
39582 return -EINVAL;
39583 }
39584
39585- return result;
39586+ if (result*scale >= INT_MAX) {
39587+ printk(KERN_ERR "UBI error: incorrect bytes count: \"%s\"\n",
39588+ str);
39589+ return -EINVAL;
39590+ }
39591+
39592+ return result*scale;
39593 }
39594
39595 /**
39596diff --git a/drivers/net/atlx/atl2.c b/drivers/net/atlx/atl2.c
39597index ab68886..ca405e8 100644
39598--- a/drivers/net/atlx/atl2.c
39599+++ b/drivers/net/atlx/atl2.c
39600@@ -2845,7 +2845,7 @@ static void atl2_force_ps(struct atl2_hw *hw)
39601 */
39602
39603 #define ATL2_PARAM(X, desc) \
39604- static const int __devinitdata X[ATL2_MAX_NIC + 1] = ATL2_PARAM_INIT; \
39605+ static const int __devinitconst X[ATL2_MAX_NIC + 1] = ATL2_PARAM_INIT; \
39606 MODULE_PARM(X, "1-" __MODULE_STRING(ATL2_MAX_NIC) "i"); \
39607 MODULE_PARM_DESC(X, desc);
39608 #else
39609diff --git a/drivers/net/benet/Makefile b/drivers/net/benet/Makefile
39610index a60cd80..0ed11ef 100644
39611--- a/drivers/net/benet/Makefile
39612+++ b/drivers/net/benet/Makefile
39613@@ -1,7 +1,9 @@
39614 #
39615-# Makefile to build the network driver for ServerEngine's BladeEngine.
39616+# Makefile to build the be2net network driver
39617 #
39618
39619+EXTRA_CFLAGS += -DCONFIG_PALAU
39620+
39621 obj-$(CONFIG_BE2NET) += be2net.o
39622
39623-be2net-y := be_main.o be_cmds.o be_ethtool.o
39624+be2net-y := be_main.o be_cmds.o be_ethtool.o be_compat.o be_misc.o
39625diff --git a/drivers/net/benet/be.h b/drivers/net/benet/be.h
39626index 5c74ff0..7382603 100644
39627--- a/drivers/net/benet/be.h
39628+++ b/drivers/net/benet/be.h
39629@@ -1,18 +1,18 @@
39630 /*
39631- * Copyright (C) 2005 - 2009 ServerEngines
39632+ * Copyright (C) 2005 - 2011 Emulex
39633 * All rights reserved.
39634 *
39635 * This program is free software; you can redistribute it and/or
39636 * modify it under the terms of the GNU General Public License version 2
39637- * as published by the Free Software Foundation. The full GNU General
39638+ * as published by the Free Software Foundation. The full GNU General
39639 * Public License is included in this distribution in the file called COPYING.
39640 *
39641 * Contact Information:
39642- * linux-drivers@serverengines.com
39643+ * linux-drivers@emulex.com
39644 *
39645- * ServerEngines
39646- * 209 N. Fair Oaks Ave
39647- * Sunnyvale, CA 94085
39648+ * Emulex
39649+ * 3333 Susan Street
39650+ * Costa Mesa, CA 92626
39651 */
39652
39653 #ifndef BE_H
39654@@ -29,32 +29,53 @@
39655 #include <linux/workqueue.h>
39656 #include <linux/interrupt.h>
39657 #include <linux/firmware.h>
39658+#include <linux/jhash.h>
39659+#ifndef CONFIG_PALAU
39660+#include <linux/inet_lro.h>
39661+#endif
39662
39663+#ifdef CONFIG_PALAU
39664+#include "be_compat.h"
39665+#endif
39666 #include "be_hw.h"
39667
39668-#define DRV_VER "2.101.205"
39669+#ifdef CONFIG_PALAU
39670+#include "version.h"
39671+#define DRV_VER STR_BE_MAJOR "." STR_BE_MINOR "."\
39672+ STR_BE_BUILD "." STR_BE_BRANCH
39673+#else
39674+#define DRV_VER "2.0.348"
39675+#endif
39676 #define DRV_NAME "be2net"
39677-#define BE_NAME "ServerEngines BladeEngine2 10Gbps NIC"
39678-#define BE3_NAME "ServerEngines BladeEngine3 10Gbps NIC"
39679-#define OC_NAME "Emulex OneConnect 10Gbps NIC"
39680-#define OC_NAME1 "Emulex OneConnect 10Gbps NIC (be3)"
39681-#define DRV_DESC BE_NAME "Driver"
39682+#define BE_NAME "Emulex BladeEngine2"
39683+#define BE3_NAME "Emulex BladeEngine3"
39684+#define OC_NAME "Emulex OneConnect"
39685+#define OC_NAME_BE OC_NAME "(be3)"
39686+#define OC_NAME_LANCER OC_NAME "(Lancer)"
39687+#define DRV_DESC "Emulex OneConnect 10Gbps NIC Driver"
39688
39689-#define BE_VENDOR_ID 0x19a2
39690+#define BE_VENDOR_ID 0x19a2
39691+#define EMULEX_VENDOR_ID 0x10df
39692 #define BE_DEVICE_ID1 0x211
39693 #define BE_DEVICE_ID2 0x221
39694-#define OC_DEVICE_ID1 0x700
39695-#define OC_DEVICE_ID2 0x701
39696-#define OC_DEVICE_ID3 0x710
39697+#define OC_DEVICE_ID1 0x700 /* Device Id for BE2 cards */
39698+#define OC_DEVICE_ID2 0x710 /* Device Id for BE3 cards */
39699+#define OC_DEVICE_ID3 0xe220 /* Device id for Lancer cards */
39700+
39701+#define OC_SUBSYS_DEVICE_ID1 0xE602
39702+#define OC_SUBSYS_DEVICE_ID2 0xE642
39703+#define OC_SUBSYS_DEVICE_ID3 0xE612
39704+#define OC_SUBSYS_DEVICE_ID4 0xE652
39705
39706 static inline char *nic_name(struct pci_dev *pdev)
39707 {
39708 switch (pdev->device) {
39709 case OC_DEVICE_ID1:
39710- case OC_DEVICE_ID2:
39711 return OC_NAME;
39712+ case OC_DEVICE_ID2:
39713+ return OC_NAME_BE;
39714 case OC_DEVICE_ID3:
39715- return OC_NAME1;
39716+ return OC_NAME_LANCER;
39717 case BE_DEVICE_ID2:
39718 return BE3_NAME;
39719 default:
39720@@ -63,7 +84,7 @@ static inline char *nic_name(struct pci_dev *pdev)
39721 }
39722
39723 /* Number of bytes of an RX frame that are copied to skb->data */
39724-#define BE_HDR_LEN 64
39725+#define BE_HDR_LEN ((u16) 64)
39726 #define BE_MAX_JUMBO_FRAME_SIZE 9018
39727 #define BE_MIN_MTU 256
39728
39729@@ -79,10 +100,24 @@ static inline char *nic_name(struct pci_dev *pdev)
39730 #define MCC_Q_LEN 128 /* total size not to exceed 8 pages */
39731 #define MCC_CQ_LEN 256
39732
39733+#define MAX_RSS_QS 4 /* BE limit is 4 queues/port */
39734+
39735+#define MAX_RX_QS (MAX_RSS_QS + 1)
39736+
39737+#ifdef MQ_TX
39738+#define MAX_TX_QS 8
39739+#else
39740+#define MAX_TX_QS 1
39741+#endif
39742+
39743+#define BE_MAX_MSIX_VECTORS (MAX_RX_QS + 1)/* RSS qs + 1 def Rx + Tx */
39744 #define BE_NAPI_WEIGHT 64
39745-#define MAX_RX_POST BE_NAPI_WEIGHT /* Frags posted at a time */
39746+#define MAX_RX_POST BE_NAPI_WEIGHT /* Frags posted at a time */
39747 #define RX_FRAGS_REFILL_WM (RX_Q_LEN - MAX_RX_POST)
39748
39749+#define BE_MAX_LRO_DESCRIPTORS 16
39750+#define BE_MAX_FRAGS_PER_FRAME (min((u32) 16, (u32) MAX_SKB_FRAGS))
39751+
39752 #define FW_VER_LEN 32
39753
39754 struct be_dma_mem {
39755@@ -127,6 +162,11 @@ static inline void *queue_tail_node(struct be_queue_info *q)
39756 return q->dma_mem.va + q->tail * q->entry_size;
39757 }
39758
39759+static inline void *queue_index_node(struct be_queue_info *q, u16 index)
39760+{
39761+ return q->dma_mem.va + index * q->entry_size;
39762+}
39763+
39764 static inline void queue_head_inc(struct be_queue_info *q)
39765 {
39766 index_inc(&q->head, q->len);
39767@@ -137,6 +177,7 @@ static inline void queue_tail_inc(struct be_queue_info *q)
39768 index_inc(&q->tail, q->len);
39769 }
39770
39771+
39772 struct be_eq_obj {
39773 struct be_queue_info q;
39774 char desc[32];
39775@@ -146,6 +187,7 @@ struct be_eq_obj {
39776 u16 min_eqd; /* in usecs */
39777 u16 max_eqd; /* in usecs */
39778 u16 cur_eqd; /* in usecs */
39779+ u8 eq_idx;
39780
39781 struct napi_struct napi;
39782 };
39783@@ -153,49 +195,20 @@ struct be_eq_obj {
39784 struct be_mcc_obj {
39785 struct be_queue_info q;
39786 struct be_queue_info cq;
39787+ bool rearm_cq;
39788 };
39789
39790-struct be_drvr_stats {
39791+struct be_tx_stats {
39792 u32 be_tx_reqs; /* number of TX requests initiated */
39793 u32 be_tx_stops; /* number of times TX Q was stopped */
39794- u32 be_fwd_reqs; /* number of send reqs through forwarding i/f */
39795 u32 be_tx_wrbs; /* number of tx WRBs used */
39796- u32 be_tx_events; /* number of tx completion events */
39797 u32 be_tx_compl; /* number of tx completion entries processed */
39798 ulong be_tx_jiffies;
39799 u64 be_tx_bytes;
39800 u64 be_tx_bytes_prev;
39801 u64 be_tx_pkts;
39802 u32 be_tx_rate;
39803-
39804- u32 cache_barrier[16];
39805-
39806- u32 be_ethrx_post_fail;/* number of ethrx buffer alloc failures */
39807- u32 be_polls; /* number of times NAPI called poll function */
39808- u32 be_rx_events; /* number of ucast rx completion events */
39809- u32 be_rx_compl; /* number of rx completion entries processed */
39810- ulong be_rx_jiffies;
39811- u64 be_rx_bytes;
39812- u64 be_rx_bytes_prev;
39813- u64 be_rx_pkts;
39814- u32 be_rx_rate;
39815- /* number of non ether type II frames dropped where
39816- * frame len > length field of Mac Hdr */
39817- u32 be_802_3_dropped_frames;
39818- /* number of non ether type II frames malformed where
39819- * in frame len < length field of Mac Hdr */
39820- u32 be_802_3_malformed_frames;
39821- u32 be_rxcp_err; /* Num rx completion entries w/ err set. */
39822- ulong rx_fps_jiffies; /* jiffies at last FPS calc */
39823- u32 be_rx_frags;
39824- u32 be_prev_rx_frags;
39825- u32 be_rx_fps; /* Rx frags per second */
39826-};
39827-
39828-struct be_stats_obj {
39829- struct be_drvr_stats drvr_stats;
39830- struct net_device_stats net_stats;
39831- struct be_dma_mem cmd;
39832+ u32 be_ipv6_ext_hdr_tx_drop;
39833 };
39834
39835 struct be_tx_obj {
39836@@ -203,23 +216,124 @@ struct be_tx_obj {
39837 struct be_queue_info cq;
39838 /* Remember the skbs that were transmitted */
39839 struct sk_buff *sent_skb_list[TX_Q_LEN];
39840+ struct be_tx_stats stats;
39841 };
39842
39843 /* Struct to remember the pages posted for rx frags */
39844 struct be_rx_page_info {
39845 struct page *page;
39846- dma_addr_t bus;
39847+ DEFINE_DMA_UNMAP_ADDR(bus);
39848 u16 page_offset;
39849 bool last_page_user;
39850 };
39851
39852+struct be_rx_stats {
39853+ u32 rx_post_fail;/* number of ethrx buffer alloc failures */
39854+ u32 rx_polls; /* number of times NAPI called poll function */
39855+ u32 rx_events; /* number of ucast rx completion events */
39856+ u32 rx_compl; /* number of rx completion entries processed */
39857+ ulong rx_jiffies;
39858+ u64 rx_bytes;
39859+ u64 rx_bytes_prev;
39860+ u64 rx_pkts;
39861+ u32 rx_rate;
39862+ u32 rx_mcast_pkts;
39863+ u32 rxcp_err; /* Num rx completion entries w/ err set. */
39864+ ulong rx_fps_jiffies; /* jiffies at last FPS calc */
39865+ u32 rx_frags;
39866+ u32 prev_rx_frags;
39867+ u32 rx_fps; /* Rx frags per second */
39868+ u32 rx_drops_no_frags;
39869+};
39870+
39871+struct be_rx_compl_info {
39872+ u32 rss_hash;
39873+ u16 vlan_tag;
39874+ u16 pkt_size;
39875+ u16 rxq_idx;
39876+ u16 port;
39877+ u8 vlanf;
39878+ u8 num_rcvd;
39879+ u8 err;
39880+ u8 ipf;
39881+ u8 tcpf;
39882+ u8 udpf;
39883+ u8 ip_csum;
39884+ u8 l4_csum;
39885+ u8 ipv6;
39886+ u8 vtm;
39887+ u8 pkt_type;
39888+};
39889+
39890 struct be_rx_obj {
39891+ struct be_adapter *adapter;
39892 struct be_queue_info q;
39893 struct be_queue_info cq;
39894- struct be_rx_page_info page_info_tbl[RX_Q_LEN];
39895+ struct be_rx_compl_info rxcp;
39896+ struct be_rx_page_info *page_info_tbl;
39897+ struct net_lro_mgr lro_mgr;
39898+ struct net_lro_desc lro_desc[BE_MAX_LRO_DESCRIPTORS];
39899+ struct be_eq_obj rx_eq;
39900+ struct be_rx_stats stats;
39901+ u8 rss_id;
39902+ bool rx_post_starved; /* Zero rx frags have been posted to BE */
39903+ u16 prev_frag_idx;
39904+ u32 cache_line_barrier[16];
39905 };
39906
39907-#define BE_NUM_MSIX_VECTORS 2 /* 1 each for Tx and Rx */
39908+struct be_drv_stats {
39909+ u32 be_on_die_temperature;
39910+ u32 be_tx_events;
39911+ u32 eth_red_drops;
39912+ u32 rx_drops_no_pbuf;
39913+ u32 rx_drops_no_txpb;
39914+ u32 rx_drops_no_erx_descr;
39915+ u32 rx_drops_no_tpre_descr;
39916+ u32 rx_drops_too_many_frags;
39917+ u32 rx_drops_invalid_ring;
39918+ u32 forwarded_packets;
39919+ u32 rx_drops_mtu;
39920+ u32 rx_crc_errors;
39921+ u32 rx_alignment_symbol_errors;
39922+ u32 rx_pause_frames;
39923+ u32 rx_priority_pause_frames;
39924+ u32 rx_control_frames;
39925+ u32 rx_in_range_errors;
39926+ u32 rx_out_range_errors;
39927+ u32 rx_frame_too_long;
39928+ u32 rx_address_match_errors;
39929+ u32 rx_dropped_too_small;
39930+ u32 rx_dropped_too_short;
39931+ u32 rx_dropped_header_too_small;
39932+ u32 rx_dropped_tcp_length;
39933+ u32 rx_dropped_runt;
39934+ u32 rx_ip_checksum_errs;
39935+ u32 rx_tcp_checksum_errs;
39936+ u32 rx_udp_checksum_errs;
39937+ u32 rx_switched_unicast_packets;
39938+ u32 rx_switched_multicast_packets;
39939+ u32 rx_switched_broadcast_packets;
39940+ u32 tx_pauseframes;
39941+ u32 tx_priority_pauseframes;
39942+ u32 tx_controlframes;
39943+ u32 rxpp_fifo_overflow_drop;
39944+ u32 rx_input_fifo_overflow_drop;
39945+ u32 pmem_fifo_overflow_drop;
39946+ u32 jabber_events;
39947+};
39948+
39949+struct be_vf_cfg {
39950+ unsigned char vf_mac_addr[ETH_ALEN];
39951+ u32 vf_if_handle;
39952+ u32 vf_pmac_id;
39953+ u16 vf_def_vid;
39954+ u16 vf_vlan_tag;
39955+ u32 vf_tx_rate;
39956+};
39957+
39958+#define BE_INVALID_PMAC_ID 0xffffffff
39959+#define BE_FLAGS_DCBX (1 << 16)
39960+
39961 struct be_adapter {
39962 struct pci_dev *pdev;
39963 struct net_device *netdev;
39964@@ -228,7 +342,7 @@ struct be_adapter {
39965 u8 __iomem *db; /* Door Bell */
39966 u8 __iomem *pcicfg; /* PCI config space */
39967
39968- spinlock_t mbox_lock; /* For serializing mbox cmds to BE card */
39969+ struct mutex mbox_lock; /* For serializing mbox cmds to BE card */
39970 struct be_dma_mem mbox_mem;
39971 /* Mbox mem is adjusted to align to 16 bytes. The allocated addr
39972 * is stored for freeing purpose */
39973@@ -238,66 +352,121 @@ struct be_adapter {
39974 spinlock_t mcc_lock; /* For serializing mcc cmds to BE card */
39975 spinlock_t mcc_cq_lock;
39976
39977- struct msix_entry msix_entries[BE_NUM_MSIX_VECTORS];
39978- bool msix_enabled;
39979+ struct msix_entry msix_entries[BE_MAX_MSIX_VECTORS];
39980+ u32 num_msix_vec;
39981 bool isr_registered;
39982
39983 /* TX Rings */
39984 struct be_eq_obj tx_eq;
39985- struct be_tx_obj tx_obj;
39986+ struct be_tx_obj tx_obj[MAX_TX_QS];
39987+ u8 num_tx_qs;
39988+ u8 prio_tc_map[MAX_TX_QS]; /* prio_tc_map[prio] => tc-id */
39989+ u8 tc_txq_map[MAX_TX_QS]; /* tc_txq_map[tc-id] => txq index */
39990
39991 u32 cache_line_break[8];
39992
39993 /* Rx rings */
39994- struct be_eq_obj rx_eq;
39995- struct be_rx_obj rx_obj;
39996+ struct be_rx_obj rx_obj[MAX_RX_QS]; /* one default non-rss Q */
39997+ u32 num_rx_qs;
39998+
39999+ struct be_dma_mem stats_cmd;
40000+ struct net_device_stats net_stats;
40001+ struct be_drv_stats drv_stats;
40002 u32 big_page_size; /* Compounded page size shared by rx wrbs */
40003- bool rx_post_starved; /* Zero rx frags have been posted to BE */
40004
40005 struct vlan_group *vlan_grp;
40006- u16 num_vlans;
40007+ u16 vlans_added;
40008+ u16 max_vlans; /* Number of vlans supported */
40009 u8 vlan_tag[VLAN_GROUP_ARRAY_LEN];
40010+ u8 vlan_prio_bmap; /* Available priority BitMap */
40011+ u16 recommended_prio; /* Recommended Priority */
40012+ struct be_dma_mem rx_filter;
40013
40014- struct be_stats_obj stats;
40015 /* Work queue used to perform periodic tasks like getting statistics */
40016 struct delayed_work work;
40017+ u16 work_counter;
40018
40019- /* Ethtool knobs and info */
40020- bool rx_csum; /* BE card must perform rx-checksumming */
40021+ u32 flags;
40022+ bool rx_csum; /* BE card must perform rx-checksumming */
40023+ u32 max_rx_coal;
40024 char fw_ver[FW_VER_LEN];
40025 u32 if_handle; /* Used to configure filtering */
40026 u32 pmac_id; /* MAC addr handle used by BE card */
40027+ u32 beacon_state; /* for set_phys_id */
40028
40029- bool link_up;
40030+ bool eeh_err;
40031+ int link_status;
40032 u32 port_num;
40033+ u32 hba_port_num;
40034 bool promiscuous;
40035- u32 cap;
40036+ bool wol;
40037+ u32 function_mode;
40038+ u32 function_caps;
40039 u32 rx_fc; /* Rx flow control */
40040 u32 tx_fc; /* Tx flow control */
40041+ bool ue_detected;
40042+ bool stats_cmd_sent;
40043+ bool gro_supported;
40044+ int link_speed;
40045+ u8 port_type;
40046+ u8 transceiver;
40047+ u8 autoneg;
40048 u8 generation; /* BladeEngine ASIC generation */
40049+ u32 flash_status;
40050+ struct completion flash_compl;
40051+
40052+ u8 eq_next_idx;
40053+ bool be3_native;
40054+ u16 num_vfs;
40055+ struct be_vf_cfg *vf_cfg;
40056+ u8 is_virtfn;
40057+ u16 pvid;
40058+ u32 sli_family;
40059+ u8 port_name[4];
40060+ char model_number[32];
40061 };
40062
40063 /* BladeEngine Generation numbers */
40064 #define BE_GEN2 2
40065 #define BE_GEN3 3
40066
40067-extern const struct ethtool_ops be_ethtool_ops;
40068+#define ON 1
40069+#define OFF 0
40070+#define lancer_chip(adapter) (adapter->pdev->device == OC_DEVICE_ID3)
40071+#define lancer_A0_chip(adapter) \
40072+ (adapter->sli_family == LANCER_A0_SLI_FAMILY)
40073
40074-#define drvr_stats(adapter) (&adapter->stats.drvr_stats)
40075+extern struct ethtool_ops be_ethtool_ops;
40076
40077-static inline unsigned int be_pci_func(struct be_adapter *adapter)
40078-{
40079- return PCI_FUNC(adapter->pdev->devfn);
40080-}
40081+#define msix_enabled(adapter) (adapter->num_msix_vec > 0)
40082+#define tx_stats(txo) (&txo->stats)
40083+#define rx_stats(rxo) (&rxo->stats)
40084
40085+#if LINUX_VERSION_CODE < KERNEL_VERSION(2, 6, 29)
40086+#define BE_SET_NETDEV_OPS(netdev, ops) be_netdev_ops_init(netdev, ops)
40087+#else
40088 #define BE_SET_NETDEV_OPS(netdev, ops) (netdev->netdev_ops = ops)
40089+#endif
40090+
40091+#define for_all_rx_queues(adapter, rxo, i) \
40092+ for (i = 0, rxo = &adapter->rx_obj[i]; i < adapter->num_rx_qs; \
40093+ i++, rxo++)
40094+
40095+/* Just skip the first default non-rss queue */
40096+#define for_all_rss_queues(adapter, rxo, i) \
40097+ for (i = 0, rxo = &adapter->rx_obj[i+1]; i < (adapter->num_rx_qs - 1);\
40098+ i++, rxo++)
40099+
40100+#define for_all_tx_queues(adapter, txo, i) \
40101+ for (i = 0, txo = &adapter->tx_obj[i]; i < adapter->num_tx_qs; \
40102+ i++, txo++)
40103
40104 #define PAGE_SHIFT_4K 12
40105 #define PAGE_SIZE_4K (1 << PAGE_SHIFT_4K)
40106
40107 /* Returns number of pages spanned by the data starting at the given addr */
40108-#define PAGES_4K_SPANNED(_address, size) \
40109- ((u32)((((size_t)(_address) & (PAGE_SIZE_4K - 1)) + \
40110+#define PAGES_4K_SPANNED(_address, size) \
40111+ ((u32)((((size_t)(_address) & (PAGE_SIZE_4K - 1)) + \
40112 (size) + (PAGE_SIZE_4K - 1)) >> PAGE_SHIFT_4K))
40113
40114 /* Byte offset into the page corresponding to given address */
40115@@ -305,7 +474,7 @@ static inline unsigned int be_pci_func(struct be_adapter *adapter)
40116 ((size_t)(addr) & (PAGE_SIZE_4K-1))
40117
40118 /* Returns bit offset within a DWORD of a bitfield */
40119-#define AMAP_BIT_OFFSET(_struct, field) \
40120+#define AMAP_BIT_OFFSET(_struct, field) \
40121 (((size_t)&(((_struct *)0)->field))%32)
40122
40123 /* Returns the bit mask of the field that is NOT shifted into location. */
40124@@ -356,6 +525,11 @@ static inline void swap_dws(void *wrb, int len)
40125 #endif /* __BIG_ENDIAN */
40126 }
40127
40128+static inline bool vlan_configured(struct be_adapter *adapter)
40129+{
40130+ return adapter->vlan_grp && adapter->vlans_added;
40131+}
40132+
40133 static inline u8 is_tcp_pkt(struct sk_buff *skb)
40134 {
40135 u8 val = 0;
40136@@ -380,9 +554,65 @@ static inline u8 is_udp_pkt(struct sk_buff *skb)
40137 return val;
40138 }
40139
40140+static inline u8 is_ipv6_ext_hdr(struct sk_buff *skb)
40141+{
40142+#if defined(CONFIG_IPV6) || defined(CONFIG_IPV6_MODULE)
40143+ if (ip_hdr(skb)->version == 6)
40144+ return ipv6_ext_hdr(ipv6_hdr(skb)->nexthdr);
40145+ else
40146+#endif
40147+ return 0;
40148+}
40149+
40150+static inline void be_check_sriov_fn_type(struct be_adapter *adapter)
40151+{
40152+ u32 sli_intf;
40153+
40154+ pci_read_config_dword(adapter->pdev, SLI_INTF_REG_OFFSET, &sli_intf);
40155+ adapter->is_virtfn = (sli_intf & SLI_INTF_FT_MASK) ? 1 : 0;
40156+}
40157+
40158+static inline void be_vf_eth_addr_generate(struct be_adapter *adapter, u8 *mac)
40159+{
40160+ u32 addr;
40161+
40162+ addr = jhash(adapter->netdev->dev_addr, ETH_ALEN, 0);
40163+
40164+ mac[5] = (u8)(addr & 0xFF);
40165+ mac[4] = (u8)((addr >> 8) & 0xFF);
40166+ mac[3] = (u8)((addr >> 16) & 0xFF);
40167+ /* Use the OUI programmed in hardware */
40168+ memcpy(mac, adapter->netdev->dev_addr, 3);
40169+}
40170+
40171+static inline u16 be_get_tx_vlan_tag(struct be_adapter *adapter,
40172+ struct sk_buff *skb)
40173+{
40174+ u8 vlan_prio = 0;
40175+ u16 vlan_tag = 0;
40176+
40177+ vlan_tag = vlan_tx_tag_get(skb);
40178+ vlan_prio = (vlan_tag & VLAN_PRIO_MASK) >> VLAN_PRIO_SHIFT;
40179+ /* If vlan priority provided by OS is NOT in available bmap */
40180+ if (!(adapter->vlan_prio_bmap & (1 << vlan_prio)))
40181+ vlan_tag = (vlan_tag & ~VLAN_PRIO_MASK) |
40182+ adapter->recommended_prio;
40183+
40184+ return vlan_tag;
40185+}
40186+
40187+#define be_physfn(adapter) (!adapter->is_virtfn)
40188+
40189 extern void be_cq_notify(struct be_adapter *adapter, u16 qid, bool arm,
40190 u16 num_popped);
40191-extern void be_link_status_update(struct be_adapter *adapter, bool link_up);
40192+extern void be_link_status_update(struct be_adapter *adapter, int link_status);
40193 extern void netdev_stats_update(struct be_adapter *adapter);
40194+extern void be_parse_stats(struct be_adapter *adapter);
40195 extern int be_load_fw(struct be_adapter *adapter, u8 *func);
40196+
40197+#ifdef CONFIG_PALAU
40198+extern void be_sysfs_create_group(struct be_adapter *adapter);
40199+extern void be_sysfs_remove_group(struct be_adapter *adapter);
40200+#endif
40201+
40202 #endif /* BE_H */
40203diff --git a/drivers/net/benet/be_cmds.c b/drivers/net/benet/be_cmds.c
40204index 28a0eda..b4ca89c 100644
40205--- a/drivers/net/benet/be_cmds.c
40206+++ b/drivers/net/benet/be_cmds.c
40207@@ -1,30 +1,45 @@
40208 /*
40209- * Copyright (C) 2005 - 2009 ServerEngines
40210+ * Copyright (C) 2005 - 2011 Emulex
40211 * All rights reserved.
40212 *
40213 * This program is free software; you can redistribute it and/or
40214 * modify it under the terms of the GNU General Public License version 2
40215- * as published by the Free Software Foundation. The full GNU General
40216+ * as published by the Free Software Foundation. The full GNU General
40217 * Public License is included in this distribution in the file called COPYING.
40218 *
40219 * Contact Information:
40220- * linux-drivers@serverengines.com
40221+ * linux-drivers@emulex.com
40222 *
40223- * ServerEngines
40224- * 209 N. Fair Oaks Ave
40225- * Sunnyvale, CA 94085
40226+ * Emulex
40227+ * 3333 Susan Street
40228+ * Costa Mesa, CA 92626
40229 */
40230
40231 #include "be.h"
40232 #include "be_cmds.h"
40233
40234+/* Must be a power of 2 or else MODULO will BUG_ON */
40235+static int be_get_temp_freq = 64;
40236+
40237+static inline void *embedded_payload(struct be_mcc_wrb *wrb)
40238+{
40239+ return wrb->payload.embedded_payload;
40240+}
40241+
40242 static void be_mcc_notify(struct be_adapter *adapter)
40243 {
40244 struct be_queue_info *mccq = &adapter->mcc_obj.q;
40245 u32 val = 0;
40246
40247+ if (adapter->eeh_err) {
40248+ dev_info(&adapter->pdev->dev, "Error in Card Detected! Cannot issue commands\n");
40249+ return;
40250+ }
40251+
40252 val |= mccq->id & DB_MCCQ_RING_ID_MASK;
40253 val |= 1 << DB_MCCQ_NUM_POSTED_SHIFT;
40254+
40255+ wmb();
40256 iowrite32(val, adapter->db + DB_MCCQ_OFFSET);
40257 }
40258
40259@@ -59,21 +74,67 @@ static int be_mcc_compl_process(struct be_adapter *adapter,
40260
40261 compl_status = (compl->status >> CQE_STATUS_COMPL_SHIFT) &
40262 CQE_STATUS_COMPL_MASK;
40263+
40264+ if ((compl->tag0 == OPCODE_COMMON_WRITE_FLASHROM) &&
40265+ (compl->tag1 == CMD_SUBSYSTEM_COMMON)) {
40266+ adapter->flash_status = compl_status;
40267+ complete(&adapter->flash_compl);
40268+ }
40269+
40270 if (compl_status == MCC_STATUS_SUCCESS) {
40271- if (compl->tag0 == OPCODE_ETH_GET_STATISTICS) {
40272- struct be_cmd_resp_get_stats *resp =
40273- adapter->stats.cmd.va;
40274- be_dws_le_to_cpu(&resp->hw_stats,
40275- sizeof(resp->hw_stats));
40276+ if ((compl->tag0 == OPCODE_ETH_GET_STATISTICS) &&
40277+ (compl->tag1 == CMD_SUBSYSTEM_ETH)) {
40278+ if (adapter->generation == BE_GEN3) {
40279+ struct be_cmd_resp_get_stats_v1 *resp =
40280+ adapter->stats_cmd.va;
40281+
40282+ be_dws_le_to_cpu(&resp->hw_stats,
40283+ sizeof(resp->hw_stats));
40284+ } else {
40285+ struct be_cmd_resp_get_stats_v0 *resp =
40286+ adapter->stats_cmd.va;
40287+
40288+ be_dws_le_to_cpu(&resp->hw_stats,
40289+ sizeof(resp->hw_stats));
40290+ }
40291+ be_parse_stats(adapter);
40292 netdev_stats_update(adapter);
40293+ adapter->stats_cmd_sent = false;
40294+ }
40295+ if (compl->tag0 ==
40296+ OPCODE_COMMON_GET_CNTL_ADDITIONAL_ATTRIBUTES) {
40297+ struct be_mcc_wrb *mcc_wrb =
40298+ queue_index_node(&adapter->mcc_obj.q,
40299+ compl->tag1);
40300+ struct be_cmd_resp_get_cntl_addnl_attribs *resp =
40301+ embedded_payload(mcc_wrb);
40302+ adapter->drv_stats.be_on_die_temperature =
40303+ resp->on_die_temperature;
40304+ }
40305+ } else {
40306+ if (compl->tag0 == OPCODE_COMMON_GET_CNTL_ADDITIONAL_ATTRIBUTES)
40307+ be_get_temp_freq = 0;
40308+
40309+ if (compl->tag1 == MCC_WRB_PASS_THRU)
40310+ goto done;
40311+
40312+ if (compl_status == MCC_STATUS_NOT_SUPPORTED ||
40313+ compl_status == MCC_STATUS_ILLEGAL_REQUEST)
40314+ goto done;
40315+
40316+ if (compl_status == MCC_STATUS_UNAUTHORIZED_REQUEST) {
40317+ dev_warn(&adapter->pdev->dev, "This domain(VM) is not "
40318+ "permitted to execute this cmd (opcode %d)\n",
40319+ compl->tag0);
40320+ } else {
40321+ extd_status = (compl->status >> CQE_STATUS_EXTD_SHIFT) &
40322+ CQE_STATUS_EXTD_MASK;
40323+ dev_err(&adapter->pdev->dev, "Cmd (opcode %d) failed:"
40324+ "status %d, extd-status %d\n",
40325+ compl->tag0, compl_status, extd_status);
40326 }
40327- } else if (compl_status != MCC_STATUS_NOT_SUPPORTED) {
40328- extd_status = (compl->status >> CQE_STATUS_EXTD_SHIFT) &
40329- CQE_STATUS_EXTD_MASK;
40330- dev_warn(&adapter->pdev->dev,
40331- "Error in cmd completion: status(compl/extd)=%d/%d\n",
40332- compl_status, extd_status);
40333 }
40334+done:
40335 return compl_status;
40336 }
40337
40338@@ -82,7 +143,70 @@ static void be_async_link_state_process(struct be_adapter *adapter,
40339 struct be_async_event_link_state *evt)
40340 {
40341 be_link_status_update(adapter,
40342- evt->port_link_status == ASYNC_EVENT_LINK_UP);
40343+ ((evt->port_link_status & ~ASYNC_EVENT_LOGICAL) ==
40344+ ASYNC_EVENT_LINK_UP ? LINK_UP : LINK_DOWN));
40345+}
40346+
40347+/* Grp5 CoS Priority evt */
40348+static void be_async_grp5_cos_priority_process(struct be_adapter *adapter,
40349+ struct be_async_event_grp5_cos_priority *evt)
40350+{
40351+ if (evt->valid) {
40352+ adapter->vlan_prio_bmap = evt->available_priority_bmap;
40353+ adapter->recommended_prio &= ~VLAN_PRIO_MASK;
40354+ adapter->recommended_prio =
40355+ evt->reco_default_priority << VLAN_PRIO_SHIFT;
40356+ }
40357+}
40358+
40359+/* Grp5 QOS Speed evt */
40360+static void be_async_grp5_qos_speed_process(struct be_adapter *adapter,
40361+ struct be_async_event_grp5_qos_link_speed *evt)
40362+{
40363+ if (evt->physical_port == adapter->hba_port_num) {
40364+ /* qos_link_speed is in units of 10 Mbps */
40365+ adapter->link_speed = evt->qos_link_speed * 10;
40366+ }
40367+}
40368+
40369+/*Grp5 PVID evt*/
40370+static void be_async_grp5_pvid_state_process(struct be_adapter *adapter,
40371+ struct be_async_event_grp5_pvid_state *evt)
40372+{
40373+ if (evt->enabled)
40374+ adapter->pvid = le16_to_cpu(evt->tag) & VLAN_VID_MASK ;
40375+ else
40376+ adapter->pvid = 0;
40377+}
40378+
40379+static void be_async_grp5_evt_process(struct be_adapter *adapter,
40380+ u32 trailer, struct be_mcc_compl *evt)
40381+{
40382+ u8 event_type = 0;
40383+
40384+ event_type = (trailer >> ASYNC_TRAILER_EVENT_TYPE_SHIFT) &
40385+ ASYNC_TRAILER_EVENT_TYPE_MASK;
40386+
40387+ switch (event_type) {
40388+ case ASYNC_EVENT_COS_PRIORITY:
40389+ be_async_grp5_cos_priority_process(adapter,
40390+ (struct be_async_event_grp5_cos_priority *)evt);
40391+ break;
40392+ case ASYNC_EVENT_QOS_SPEED:
40393+ be_async_grp5_qos_speed_process(adapter,
40394+ (struct be_async_event_grp5_qos_link_speed *)evt);
40395+ break;
40396+ case ASYNC_EVENT_PVID_STATE:
40397+ be_async_grp5_pvid_state_process(adapter,
40398+ (struct be_async_event_grp5_pvid_state *)evt);
40399+ break;
40400+ case GRP5_TYPE_PRIO_TC_MAP:
40401+ memcpy(adapter->prio_tc_map, evt, MAX_TX_QS);
40402+ break;
40403+ default:
40404+ printk(KERN_WARNING "Unknown grp5 event!\n");
40405+ break;
40406+ }
40407 }
40408
40409 static inline bool is_link_state_evt(u32 trailer)
40410@@ -92,6 +216,13 @@ static inline bool is_link_state_evt(u32 trailer)
40411 ASYNC_EVENT_CODE_LINK_STATE);
40412 }
40413
40414+static inline bool is_grp5_evt(u32 trailer)
40415+{
40416+ return (((trailer >> ASYNC_TRAILER_EVENT_CODE_SHIFT) &
40417+ ASYNC_TRAILER_EVENT_CODE_MASK) ==
40418+ ASYNC_EVENT_CODE_GRP_5);
40419+}
40420+
40421 static struct be_mcc_compl *be_mcc_compl_get(struct be_adapter *adapter)
40422 {
40423 struct be_queue_info *mcc_cq = &adapter->mcc_obj.cq;
40424@@ -104,46 +235,67 @@ static struct be_mcc_compl *be_mcc_compl_get(struct be_adapter *adapter)
40425 return NULL;
40426 }
40427
40428-int be_process_mcc(struct be_adapter *adapter)
40429+void be_async_mcc_enable(struct be_adapter *adapter)
40430+{
40431+ spin_lock_bh(&adapter->mcc_cq_lock);
40432+
40433+ be_cq_notify(adapter, adapter->mcc_obj.cq.id, true, 0);
40434+ adapter->mcc_obj.rearm_cq = true;
40435+
40436+ spin_unlock_bh(&adapter->mcc_cq_lock);
40437+}
40438+
40439+void be_async_mcc_disable(struct be_adapter *adapter)
40440+{
40441+ adapter->mcc_obj.rearm_cq = false;
40442+}
40443+
40444+int be_process_mcc(struct be_adapter *adapter, int *status)
40445 {
40446 struct be_mcc_compl *compl;
40447- int num = 0, status = 0;
40448+ int num = 0;
40449+ struct be_mcc_obj *mcc_obj = &adapter->mcc_obj;
40450
40451 spin_lock_bh(&adapter->mcc_cq_lock);
40452 while ((compl = be_mcc_compl_get(adapter))) {
40453 if (compl->flags & CQE_FLAGS_ASYNC_MASK) {
40454 /* Interpret flags as an async trailer */
40455- BUG_ON(!is_link_state_evt(compl->flags));
40456-
40457- /* Interpret compl as a async link evt */
40458- be_async_link_state_process(adapter,
40459+ if (is_link_state_evt(compl->flags))
40460+ be_async_link_state_process(adapter,
40461 (struct be_async_event_link_state *) compl);
40462+ else if (is_grp5_evt(compl->flags))
40463+ be_async_grp5_evt_process(adapter,
40464+ compl->flags, compl);
40465+
40466 } else if (compl->flags & CQE_FLAGS_COMPLETED_MASK) {
40467- status = be_mcc_compl_process(adapter, compl);
40468- atomic_dec(&adapter->mcc_obj.q.used);
40469+ *status = be_mcc_compl_process(adapter, compl);
40470+ atomic_dec(&mcc_obj->q.used);
40471 }
40472 be_mcc_compl_use(compl);
40473 num++;
40474 }
40475
40476- if (num)
40477- be_cq_notify(adapter, adapter->mcc_obj.cq.id, true, num);
40478-
40479 spin_unlock_bh(&adapter->mcc_cq_lock);
40480- return status;
40481+ return num;
40482 }
40483
40484 /* Wait till no more pending mcc requests are present */
40485 static int be_mcc_wait_compl(struct be_adapter *adapter)
40486 {
40487 #define mcc_timeout 120000 /* 12s timeout */
40488- int i, status;
40489+ int i, num, status = 0;
40490+ struct be_mcc_obj *mcc_obj = &adapter->mcc_obj;
40491+
40492+ if (adapter->eeh_err)
40493+ return -EIO;
40494+
40495 for (i = 0; i < mcc_timeout; i++) {
40496- status = be_process_mcc(adapter);
40497- if (status)
40498- return status;
40499+ num = be_process_mcc(adapter, &status);
40500+ if (num)
40501+ be_cq_notify(adapter, mcc_obj->cq.id,
40502+ mcc_obj->rearm_cq, num);
40503
40504- if (atomic_read(&adapter->mcc_obj.q.used) == 0)
40505+ if (atomic_read(&mcc_obj->q.used) == 0)
40506 break;
40507 udelay(100);
40508 }
40509@@ -151,7 +303,7 @@ static int be_mcc_wait_compl(struct be_adapter *adapter)
40510 dev_err(&adapter->pdev->dev, "mccq poll timed out\n");
40511 return -1;
40512 }
40513- return 0;
40514+ return status;
40515 }
40516
40517 /* Notify MCC requests and wait for completion */
40518@@ -163,23 +315,34 @@ static int be_mcc_notify_wait(struct be_adapter *adapter)
40519
40520 static int be_mbox_db_ready_wait(struct be_adapter *adapter, void __iomem *db)
40521 {
40522- int cnt = 0, wait = 5;
40523+ int msecs = 0;
40524 u32 ready;
40525
40526+ if (adapter->eeh_err) {
40527+ dev_err(&adapter->pdev->dev, "Error detected in card.Cannot issue commands\n");
40528+ return -EIO;
40529+ }
40530 do {
40531- ready = ioread32(db) & MPU_MAILBOX_DB_RDY_MASK;
40532+ ready = ioread32(db);
40533+ if (ready == 0xffffffff) {
40534+ dev_err(&adapter->pdev->dev,
40535+ "pci slot disconnected\n");
40536+ return -1;
40537+ }
40538+
40539+ ready &= MPU_MAILBOX_DB_RDY_MASK;
40540 if (ready)
40541 break;
40542
40543- if (cnt > 4000000) {
40544+ if (msecs > 4000) {
40545 dev_err(&adapter->pdev->dev, "mbox poll timed out\n");
40546+ be_detect_dump_ue(adapter);
40547 return -1;
40548 }
40549
40550- if (cnt > 50)
40551- wait = 200;
40552- cnt += wait;
40553- udelay(wait);
40554+ set_current_state(TASK_UNINTERRUPTIBLE);
40555+ schedule_timeout(msecs_to_jiffies(1));
40556+ msecs++;
40557 } while (true);
40558
40559 return 0;
40560@@ -198,6 +361,11 @@ static int be_mbox_notify_wait(struct be_adapter *adapter)
40561 struct be_mcc_mailbox *mbox = mbox_mem->va;
40562 struct be_mcc_compl *compl = &mbox->compl;
40563
40564+ /* wait for ready to be set */
40565+ status = be_mbox_db_ready_wait(adapter, db);
40566+ if (status != 0)
40567+ return status;
40568+
40569 val |= MPU_MAILBOX_DB_HI_MASK;
40570 /* at bits 2 - 31 place mbox dma addr msb bits 34 - 63 */
40571 val |= (upper_32_bits(mbox_mem->dma) >> 2) << 2;
40572@@ -232,7 +400,12 @@ static int be_mbox_notify_wait(struct be_adapter *adapter)
40573
40574 static int be_POST_stage_get(struct be_adapter *adapter, u16 *stage)
40575 {
40576- u32 sem = ioread32(adapter->csr + MPU_EP_SEMAPHORE_OFFSET);
40577+ u32 sem;
40578+
40579+ if (lancer_chip(adapter))
40580+ sem = ioread32(adapter->db + MPU_EP_SEMAPHORE_IF_TYPE2_OFFSET);
40581+ else
40582+ sem = ioread32(adapter->csr + MPU_EP_SEMAPHORE_OFFSET);
40583
40584 *stage = sem & EP_SEMAPHORE_POST_STAGE_MASK;
40585 if ((sem >> EP_SEMAPHORE_POST_ERR_SHIFT) & EP_SEMAPHORE_POST_ERR_MASK)
40586@@ -245,30 +418,29 @@ int be_cmd_POST(struct be_adapter *adapter)
40587 {
40588 u16 stage;
40589 int status, timeout = 0;
40590+ struct device *dev = &adapter->pdev->dev;
40591
40592 do {
40593 status = be_POST_stage_get(adapter, &stage);
40594 if (status) {
40595- dev_err(&adapter->pdev->dev, "POST error; stage=0x%x\n",
40596- stage);
40597+ dev_err(dev, "POST error; stage=0x%x\n", stage);
40598 return -1;
40599 } else if (stage != POST_STAGE_ARMFW_RDY) {
40600 set_current_state(TASK_INTERRUPTIBLE);
40601- schedule_timeout(2 * HZ);
40602+ if (schedule_timeout(2 * HZ)) {
40603+ dev_err(dev, "POST cmd aborted\n");
40604+ return -EINTR;
40605+ }
40606 timeout += 2;
40607 } else {
40608 return 0;
40609 }
40610- } while (timeout < 20);
40611+ } while (timeout < 40);
40612
40613- dev_err(&adapter->pdev->dev, "POST timeout; stage=0x%x\n", stage);
40614+ dev_err(dev, "POST timeout; stage=0x%x\n", stage);
40615 return -1;
40616 }
40617
40618-static inline void *embedded_payload(struct be_mcc_wrb *wrb)
40619-{
40620- return wrb->payload.embedded_payload;
40621-}
40622
40623 static inline struct be_sge *nonembedded_sgl(struct be_mcc_wrb *wrb)
40624 {
40625@@ -277,7 +449,7 @@ static inline struct be_sge *nonembedded_sgl(struct be_mcc_wrb *wrb)
40626
40627 /* Don't touch the hdr after it's prepared */
40628 static void be_wrb_hdr_prepare(struct be_mcc_wrb *wrb, int payload_len,
40629- bool embedded, u8 sge_cnt)
40630+ bool embedded, u8 sge_cnt, u32 opcode)
40631 {
40632 if (embedded)
40633 wrb->embedded |= MCC_WRB_EMBEDDED_MASK;
40634@@ -285,7 +457,8 @@ static void be_wrb_hdr_prepare(struct be_mcc_wrb *wrb, int payload_len,
40635 wrb->embedded |= (sge_cnt & MCC_WRB_SGE_CNT_MASK) <<
40636 MCC_WRB_SGE_CNT_SHIFT;
40637 wrb->payload_length = payload_len;
40638- be_dws_cpu_to_le(wrb, 20);
40639+ wrb->tag0 = opcode;
40640+ be_dws_cpu_to_le(wrb, 8);
40641 }
40642
40643 /* Don't touch the hdr after it's prepared */
40644@@ -295,6 +468,7 @@ static void be_cmd_hdr_prepare(struct be_cmd_req_hdr *req_hdr,
40645 req_hdr->opcode = opcode;
40646 req_hdr->subsystem = subsystem;
40647 req_hdr->request_length = cpu_to_le32(cmd_len - sizeof(*req_hdr));
40648+ req_hdr->version = 0;
40649 }
40650
40651 static void be_cmd_page_addrs_prepare(struct phys_addr *pages, u32 max_pages,
40652@@ -349,7 +523,11 @@ static struct be_mcc_wrb *wrb_from_mccq(struct be_adapter *adapter)
40653 struct be_queue_info *mccq = &adapter->mcc_obj.q;
40654 struct be_mcc_wrb *wrb;
40655
40656- BUG_ON(atomic_read(&mccq->used) >= mccq->len);
40657+ if (atomic_read(&mccq->used) >= mccq->len) {
40658+ dev_err(&adapter->pdev->dev, "Out of MCCQ wrbs\n");
40659+ return NULL;
40660+ }
40661+
40662 wrb = queue_head_node(mccq);
40663 queue_head_inc(mccq);
40664 atomic_inc(&mccq->used);
40665@@ -357,6 +535,59 @@ static struct be_mcc_wrb *wrb_from_mccq(struct be_adapter *adapter)
40666 return wrb;
40667 }
40668
40669+/* Tell fw we're about to start firing cmds by writing a
40670+ * special pattern across the wrb hdr; uses mbox
40671+ */
40672+int be_cmd_fw_init(struct be_adapter *adapter)
40673+{
40674+ u8 *wrb;
40675+ int status;
40676+
40677+ if (mutex_lock_interruptible(&adapter->mbox_lock))
40678+ return -1;
40679+
40680+ wrb = (u8 *)wrb_from_mbox(adapter);
40681+ *wrb++ = 0xFF;
40682+ *wrb++ = 0x12;
40683+ *wrb++ = 0x34;
40684+ *wrb++ = 0xFF;
40685+ *wrb++ = 0xFF;
40686+ *wrb++ = 0x56;
40687+ *wrb++ = 0x78;
40688+ *wrb = 0xFF;
40689+
40690+ status = be_mbox_notify_wait(adapter);
40691+
40692+ mutex_unlock(&adapter->mbox_lock);
40693+ return status;
40694+}
40695+
40696+/* Tell fw we're done with firing cmds by writing a
40697+ * special pattern across the wrb hdr; uses mbox
40698+ */
40699+int be_cmd_fw_clean(struct be_adapter *adapter)
40700+{
40701+ u8 *wrb;
40702+ int status;
40703+
40704+ if (mutex_lock_interruptible(&adapter->mbox_lock))
40705+ return -1;
40706+
40707+ wrb = (u8 *)wrb_from_mbox(adapter);
40708+ *wrb++ = 0xFF;
40709+ *wrb++ = 0xAA;
40710+ *wrb++ = 0xBB;
40711+ *wrb++ = 0xFF;
40712+ *wrb++ = 0xFF;
40713+ *wrb++ = 0xCC;
40714+ *wrb++ = 0xDD;
40715+ *wrb = 0xFF;
40716+
40717+ status = be_mbox_notify_wait(adapter);
40718+
40719+ mutex_unlock(&adapter->mbox_lock);
40720+ return status;
40721+}
40722 int be_cmd_eq_create(struct be_adapter *adapter,
40723 struct be_queue_info *eq, int eq_delay)
40724 {
40725@@ -365,20 +596,19 @@ int be_cmd_eq_create(struct be_adapter *adapter,
40726 struct be_dma_mem *q_mem = &eq->dma_mem;
40727 int status;
40728
40729- spin_lock(&adapter->mbox_lock);
40730+ if (mutex_lock_interruptible(&adapter->mbox_lock))
40731+ return -1;
40732
40733 wrb = wrb_from_mbox(adapter);
40734 req = embedded_payload(wrb);
40735
40736- be_wrb_hdr_prepare(wrb, sizeof(*req), true, 0);
40737+ be_wrb_hdr_prepare(wrb, sizeof(*req), true, 0, OPCODE_COMMON_EQ_CREATE);
40738
40739 be_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_COMMON,
40740 OPCODE_COMMON_EQ_CREATE, sizeof(*req));
40741
40742 req->num_pages = cpu_to_le16(PAGES_4K_SPANNED(q_mem->va, q_mem->size));
40743
40744- AMAP_SET_BITS(struct amap_eq_context, func, req->context,
40745- be_pci_func(adapter));
40746 AMAP_SET_BITS(struct amap_eq_context, valid, req->context, 1);
40747 /* 4byte eqe*/
40748 AMAP_SET_BITS(struct amap_eq_context, size, req->context, 0);
40749@@ -397,7 +627,7 @@ int be_cmd_eq_create(struct be_adapter *adapter,
40750 eq->created = true;
40751 }
40752
40753- spin_unlock(&adapter->mbox_lock);
40754+ mutex_unlock(&adapter->mbox_lock);
40755 return status;
40756 }
40757
40758@@ -409,12 +639,14 @@ int be_cmd_mac_addr_query(struct be_adapter *adapter, u8 *mac_addr,
40759 struct be_cmd_req_mac_query *req;
40760 int status;
40761
40762- spin_lock(&adapter->mbox_lock);
40763+ if (mutex_lock_interruptible(&adapter->mbox_lock))
40764+ return -1;
40765
40766 wrb = wrb_from_mbox(adapter);
40767 req = embedded_payload(wrb);
40768
40769- be_wrb_hdr_prepare(wrb, sizeof(*req), true, 0);
40770+ be_wrb_hdr_prepare(wrb, sizeof(*req), true, 0,
40771+ OPCODE_COMMON_NTWK_MAC_QUERY);
40772
40773 be_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_COMMON,
40774 OPCODE_COMMON_NTWK_MAC_QUERY, sizeof(*req));
40775@@ -433,13 +665,13 @@ int be_cmd_mac_addr_query(struct be_adapter *adapter, u8 *mac_addr,
40776 memcpy(mac_addr, resp->mac.addr, ETH_ALEN);
40777 }
40778
40779- spin_unlock(&adapter->mbox_lock);
40780+ mutex_unlock(&adapter->mbox_lock);
40781 return status;
40782 }
40783
40784 /* Uses synchronous MCCQ */
40785 int be_cmd_pmac_add(struct be_adapter *adapter, u8 *mac_addr,
40786- u32 if_id, u32 *pmac_id)
40787+ u32 if_id, u32 *pmac_id, u32 domain)
40788 {
40789 struct be_mcc_wrb *wrb;
40790 struct be_cmd_req_pmac_add *req;
40791@@ -448,13 +680,19 @@ int be_cmd_pmac_add(struct be_adapter *adapter, u8 *mac_addr,
40792 spin_lock_bh(&adapter->mcc_lock);
40793
40794 wrb = wrb_from_mccq(adapter);
40795+ if (!wrb) {
40796+ status = -EBUSY;
40797+ goto err;
40798+ }
40799 req = embedded_payload(wrb);
40800
40801- be_wrb_hdr_prepare(wrb, sizeof(*req), true, 0);
40802+ be_wrb_hdr_prepare(wrb, sizeof(*req), true, 0,
40803+ OPCODE_COMMON_NTWK_PMAC_ADD);
40804
40805 be_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_COMMON,
40806 OPCODE_COMMON_NTWK_PMAC_ADD, sizeof(*req));
40807
40808+ req->hdr.domain = domain;
40809 req->if_id = cpu_to_le32(if_id);
40810 memcpy(req->mac_address, mac_addr, ETH_ALEN);
40811
40812@@ -464,12 +702,13 @@ int be_cmd_pmac_add(struct be_adapter *adapter, u8 *mac_addr,
40813 *pmac_id = le32_to_cpu(resp->pmac_id);
40814 }
40815
40816+err:
40817 spin_unlock_bh(&adapter->mcc_lock);
40818 return status;
40819 }
40820
40821 /* Uses synchronous MCCQ */
40822-int be_cmd_pmac_del(struct be_adapter *adapter, u32 if_id, u32 pmac_id)
40823+int be_cmd_pmac_del(struct be_adapter *adapter, u32 if_id, u32 pmac_id, u32 dom)
40824 {
40825 struct be_mcc_wrb *wrb;
40826 struct be_cmd_req_pmac_del *req;
40827@@ -478,20 +717,26 @@ int be_cmd_pmac_del(struct be_adapter *adapter, u32 if_id, u32 pmac_id)
40828 spin_lock_bh(&adapter->mcc_lock);
40829
40830 wrb = wrb_from_mccq(adapter);
40831+ if (!wrb) {
40832+ status = -EBUSY;
40833+ goto err;
40834+ }
40835 req = embedded_payload(wrb);
40836
40837- be_wrb_hdr_prepare(wrb, sizeof(*req), true, 0);
40838+ be_wrb_hdr_prepare(wrb, sizeof(*req), true, 0,
40839+ OPCODE_COMMON_NTWK_PMAC_DEL);
40840
40841 be_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_COMMON,
40842 OPCODE_COMMON_NTWK_PMAC_DEL, sizeof(*req));
40843
40844+ req->hdr.domain = dom;
40845 req->if_id = cpu_to_le32(if_id);
40846 req->pmac_id = cpu_to_le32(pmac_id);
40847
40848 status = be_mcc_notify_wait(adapter);
40849
40850+err:
40851 spin_unlock_bh(&adapter->mcc_lock);
40852-
40853 return status;
40854 }
40855
40856@@ -506,29 +751,51 @@ int be_cmd_cq_create(struct be_adapter *adapter,
40857 void *ctxt;
40858 int status;
40859
40860- spin_lock(&adapter->mbox_lock);
40861+ if (mutex_lock_interruptible(&adapter->mbox_lock))
40862+ return -1;
40863
40864 wrb = wrb_from_mbox(adapter);
40865 req = embedded_payload(wrb);
40866 ctxt = &req->context;
40867
40868- be_wrb_hdr_prepare(wrb, sizeof(*req), true, 0);
40869+ be_wrb_hdr_prepare(wrb, sizeof(*req), true, 0,
40870+ OPCODE_COMMON_CQ_CREATE);
40871
40872 be_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_COMMON,
40873 OPCODE_COMMON_CQ_CREATE, sizeof(*req));
40874
40875 req->num_pages = cpu_to_le16(PAGES_4K_SPANNED(q_mem->va, q_mem->size));
40876
40877- AMAP_SET_BITS(struct amap_cq_context, coalescwm, ctxt, coalesce_wm);
40878- AMAP_SET_BITS(struct amap_cq_context, nodelay, ctxt, no_delay);
40879- AMAP_SET_BITS(struct amap_cq_context, count, ctxt,
40880- __ilog2_u32(cq->len/256));
40881- AMAP_SET_BITS(struct amap_cq_context, valid, ctxt, 1);
40882- AMAP_SET_BITS(struct amap_cq_context, solevent, ctxt, sol_evts);
40883- AMAP_SET_BITS(struct amap_cq_context, eventable, ctxt, 1);
40884- AMAP_SET_BITS(struct amap_cq_context, eqid, ctxt, eq->id);
40885- AMAP_SET_BITS(struct amap_cq_context, armed, ctxt, 1);
40886- AMAP_SET_BITS(struct amap_cq_context, func, ctxt, be_pci_func(adapter));
40887+ if (lancer_chip(adapter)) {
40888+ req->hdr.version = 2;
40889+ req->page_size = 1; /* 1 for 4K */
40890+ AMAP_SET_BITS(struct amap_cq_context_lancer, coalescwm, ctxt,
40891+ coalesce_wm);
40892+ AMAP_SET_BITS(struct amap_cq_context_lancer, nodelay, ctxt,
40893+ no_delay);
40894+ AMAP_SET_BITS(struct amap_cq_context_lancer, count, ctxt,
40895+ __ilog2_u32(cq->len/256));
40896+ AMAP_SET_BITS(struct amap_cq_context_lancer, valid, ctxt, 1);
40897+ AMAP_SET_BITS(struct amap_cq_context_lancer, eventable,
40898+ ctxt, 1);
40899+ AMAP_SET_BITS(struct amap_cq_context_lancer, eqid,
40900+ ctxt, eq->id);
40901+ AMAP_SET_BITS(struct amap_cq_context_lancer, armed, ctxt, 1);
40902+ } else {
40903+ AMAP_SET_BITS(struct amap_cq_context_be, coalescwm, ctxt,
40904+ coalesce_wm);
40905+ AMAP_SET_BITS(struct amap_cq_context_be, nodelay,
40906+ ctxt, no_delay);
40907+ AMAP_SET_BITS(struct amap_cq_context_be, count, ctxt,
40908+ __ilog2_u32(cq->len/256));
40909+ AMAP_SET_BITS(struct amap_cq_context_be, valid, ctxt, 1);
40910+ AMAP_SET_BITS(struct amap_cq_context_be, solevent,
40911+ ctxt, sol_evts);
40912+ AMAP_SET_BITS(struct amap_cq_context_be, eventable, ctxt, 1);
40913+ AMAP_SET_BITS(struct amap_cq_context_be, eqid, ctxt, eq->id);
40914+ AMAP_SET_BITS(struct amap_cq_context_be, armed, ctxt, 1);
40915+ }
40916+
40917 be_dws_cpu_to_le(ctxt, sizeof(req->context));
40918
40919 be_cmd_page_addrs_prepare(req->pages, ARRAY_SIZE(req->pages), q_mem);
40920@@ -540,8 +807,7 @@ int be_cmd_cq_create(struct be_adapter *adapter,
40921 cq->created = true;
40922 }
40923
40924- spin_unlock(&adapter->mbox_lock);
40925-
40926+ mutex_unlock(&adapter->mbox_lock);
40927 return status;
40928 }
40929
40930@@ -553,7 +819,68 @@ static u32 be_encoded_q_len(int q_len)
40931 return len_encoded;
40932 }
40933
40934-int be_cmd_mccq_create(struct be_adapter *adapter,
40935+int be_cmd_mccq_ext_create(struct be_adapter *adapter,
40936+ struct be_queue_info *mccq,
40937+ struct be_queue_info *cq)
40938+{
40939+ struct be_mcc_wrb *wrb;
40940+ struct be_cmd_req_mcc_ext_create *req;
40941+ struct be_dma_mem *q_mem = &mccq->dma_mem;
40942+ void *ctxt;
40943+ int status;
40944+
40945+ if (mutex_lock_interruptible(&adapter->mbox_lock))
40946+ return -1;
40947+
40948+ wrb = wrb_from_mbox(adapter);
40949+ req = embedded_payload(wrb);
40950+ ctxt = &req->context;
40951+
40952+ be_wrb_hdr_prepare(wrb, sizeof(*req), true, 0,
40953+ OPCODE_COMMON_MCC_CREATE_EXT);
40954+
40955+ be_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_COMMON,
40956+ OPCODE_COMMON_MCC_CREATE_EXT, sizeof(*req));
40957+
40958+ req->num_pages = cpu_to_le16(PAGES_4K_SPANNED(q_mem->va, q_mem->size));
40959+ if (lancer_chip(adapter)) {
40960+ req->hdr.version = 1;
40961+ req->cq_id = cpu_to_le16(cq->id);
40962+
40963+ AMAP_SET_BITS(struct amap_mcc_context_lancer, ring_size, ctxt,
40964+ be_encoded_q_len(mccq->len));
40965+ AMAP_SET_BITS(struct amap_mcc_context_lancer, valid, ctxt, 1);
40966+ AMAP_SET_BITS(struct amap_mcc_context_lancer, async_cq_id,
40967+ ctxt, cq->id);
40968+ AMAP_SET_BITS(struct amap_mcc_context_lancer, async_cq_valid,
40969+ ctxt, 1);
40970+
40971+ } else {
40972+ AMAP_SET_BITS(struct amap_mcc_context_be, valid, ctxt, 1);
40973+ AMAP_SET_BITS(struct amap_mcc_context_be, ring_size, ctxt,
40974+ be_encoded_q_len(mccq->len));
40975+ AMAP_SET_BITS(struct amap_mcc_context_be, cq_id, ctxt, cq->id);
40976+ }
40977+
40978+ /* Subscribe to Link State and Group 5 Events(bits 1 and 5 set) */
40979+ req->async_event_bitmap[0] |= cpu_to_le32(0x00000022);
40980+
40981+ be_dws_cpu_to_le(ctxt, sizeof(req->context));
40982+
40983+ be_cmd_page_addrs_prepare(req->pages, ARRAY_SIZE(req->pages), q_mem);
40984+
40985+ status = be_mbox_notify_wait(adapter);
40986+ if (!status) {
40987+ struct be_cmd_resp_mcc_create *resp = embedded_payload(wrb);
40988+ mccq->id = le16_to_cpu(resp->id);
40989+ mccq->created = true;
40990+ }
40991+
40992+ mutex_unlock(&adapter->mbox_lock);
40993+ return status;
40994+}
40995+
40996+int be_cmd_mccq_org_create(struct be_adapter *adapter,
40997 struct be_queue_info *mccq,
40998 struct be_queue_info *cq)
40999 {
41000@@ -563,24 +890,25 @@ int be_cmd_mccq_create(struct be_adapter *adapter,
41001 void *ctxt;
41002 int status;
41003
41004- spin_lock(&adapter->mbox_lock);
41005+ if (mutex_lock_interruptible(&adapter->mbox_lock))
41006+ return -1;
41007
41008 wrb = wrb_from_mbox(adapter);
41009 req = embedded_payload(wrb);
41010 ctxt = &req->context;
41011
41012- be_wrb_hdr_prepare(wrb, sizeof(*req), true, 0);
41013+ be_wrb_hdr_prepare(wrb, sizeof(*req), true, 0,
41014+ OPCODE_COMMON_MCC_CREATE);
41015
41016 be_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_COMMON,
41017 OPCODE_COMMON_MCC_CREATE, sizeof(*req));
41018
41019- req->num_pages = PAGES_4K_SPANNED(q_mem->va, q_mem->size);
41020+ req->num_pages = cpu_to_le16(PAGES_4K_SPANNED(q_mem->va, q_mem->size));
41021
41022- AMAP_SET_BITS(struct amap_mcc_context, fid, ctxt, be_pci_func(adapter));
41023- AMAP_SET_BITS(struct amap_mcc_context, valid, ctxt, 1);
41024- AMAP_SET_BITS(struct amap_mcc_context, ring_size, ctxt,
41025- be_encoded_q_len(mccq->len));
41026- AMAP_SET_BITS(struct amap_mcc_context, cq_id, ctxt, cq->id);
41027+ AMAP_SET_BITS(struct amap_mcc_context_be, valid, ctxt, 1);
41028+ AMAP_SET_BITS(struct amap_mcc_context_be, ring_size, ctxt,
41029+ be_encoded_q_len(mccq->len));
41030+ AMAP_SET_BITS(struct amap_mcc_context_be, cq_id, ctxt, cq->id);
41031
41032 be_dws_cpu_to_le(ctxt, sizeof(req->context));
41033
41034@@ -592,75 +920,93 @@ int be_cmd_mccq_create(struct be_adapter *adapter,
41035 mccq->id = le16_to_cpu(resp->id);
41036 mccq->created = true;
41037 }
41038- spin_unlock(&adapter->mbox_lock);
41039
41040+ mutex_unlock(&adapter->mbox_lock);
41041 return status;
41042 }
41043
41044-int be_cmd_txq_create(struct be_adapter *adapter,
41045- struct be_queue_info *txq,
41046+int be_cmd_mccq_create(struct be_adapter *adapter,
41047+ struct be_queue_info *mccq,
41048 struct be_queue_info *cq)
41049 {
41050+ int status;
41051+
41052+ status = be_cmd_mccq_ext_create(adapter, mccq, cq);
41053+ if (status && !lancer_chip(adapter)) {
41054+ dev_warn(&adapter->pdev->dev, "Upgrade to F/W ver 2.102.235.0 "
41055+ "or newer to avoid conflicting priorities between NIC "
41056+ "and FCoE traffic");
41057+ status = be_cmd_mccq_org_create(adapter, mccq, cq);
41058+ }
41059+ return status;
41060+}
41061+
41062+int be_cmd_txq_create(struct be_adapter *adapter, struct be_queue_info *txq,
41063+ struct be_queue_info *cq, u8 *tc_id)
41064+{
41065 struct be_mcc_wrb *wrb;
41066 struct be_cmd_req_eth_tx_create *req;
41067 struct be_dma_mem *q_mem = &txq->dma_mem;
41068- void *ctxt;
41069 int status;
41070
41071- spin_lock(&adapter->mbox_lock);
41072+ if (mutex_lock_interruptible(&adapter->mbox_lock))
41073+ return -1;
41074
41075 wrb = wrb_from_mbox(adapter);
41076 req = embedded_payload(wrb);
41077- ctxt = &req->context;
41078-
41079- be_wrb_hdr_prepare(wrb, sizeof(*req), true, 0);
41080
41081+ be_wrb_hdr_prepare(wrb, sizeof(*req), true, 0, OPCODE_ETH_TX_CREATE);
41082 be_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_ETH, OPCODE_ETH_TX_CREATE,
41083 sizeof(*req));
41084
41085- req->num_pages = PAGES_4K_SPANNED(q_mem->va, q_mem->size);
41086+ if (adapter->flags & BE_FLAGS_DCBX || lancer_chip(adapter)) {
41087+ req->hdr.version = 1;
41088+ req->if_id = cpu_to_le16(adapter->if_handle);
41089+ }
41090+ if (adapter->flags & BE_FLAGS_DCBX)
41091+ req->type = cpu_to_le16(ETX_QUEUE_TYPE_PRIORITY);
41092+ else
41093+ req->type = cpu_to_le16(ETX_QUEUE_TYPE_STANDARD);
41094 req->ulp_num = BE_ULP1_NUM;
41095- req->type = BE_ETH_TX_RING_TYPE_STANDARD;
41096-
41097- AMAP_SET_BITS(struct amap_tx_context, tx_ring_size, ctxt,
41098- be_encoded_q_len(txq->len));
41099- AMAP_SET_BITS(struct amap_tx_context, pci_func_id, ctxt,
41100- be_pci_func(adapter));
41101- AMAP_SET_BITS(struct amap_tx_context, ctx_valid, ctxt, 1);
41102- AMAP_SET_BITS(struct amap_tx_context, cq_id_send, ctxt, cq->id);
41103-
41104- be_dws_cpu_to_le(ctxt, sizeof(req->context));
41105-
41106+ req->cq_id = cpu_to_le16(cq->id);
41107+ req->queue_size = be_encoded_q_len(txq->len);
41108+ req->num_pages = PAGES_4K_SPANNED(q_mem->va, q_mem->size);
41109 be_cmd_page_addrs_prepare(req->pages, ARRAY_SIZE(req->pages), q_mem);
41110
41111 status = be_mbox_notify_wait(adapter);
41112 if (!status) {
41113 struct be_cmd_resp_eth_tx_create *resp = embedded_payload(wrb);
41114 txq->id = le16_to_cpu(resp->cid);
41115+ if (adapter->flags & BE_FLAGS_DCBX)
41116+ *tc_id = resp->tc_id;
41117 txq->created = true;
41118 }
41119
41120- spin_unlock(&adapter->mbox_lock);
41121-
41122+ mutex_unlock(&adapter->mbox_lock);
41123 return status;
41124 }
41125
41126-/* Uses mbox */
41127+/* Uses MCC */
41128 int be_cmd_rxq_create(struct be_adapter *adapter,
41129 struct be_queue_info *rxq, u16 cq_id, u16 frag_size,
41130- u16 max_frame_size, u32 if_id, u32 rss)
41131+ u16 max_frame_size, u32 if_id, u32 rss, u8 *rss_id)
41132 {
41133 struct be_mcc_wrb *wrb;
41134 struct be_cmd_req_eth_rx_create *req;
41135 struct be_dma_mem *q_mem = &rxq->dma_mem;
41136 int status;
41137
41138- spin_lock(&adapter->mbox_lock);
41139+ spin_lock_bh(&adapter->mcc_lock);
41140
41141- wrb = wrb_from_mbox(adapter);
41142+ wrb = wrb_from_mccq(adapter);
41143+ if (!wrb) {
41144+ status = -EBUSY;
41145+ goto err;
41146+ }
41147 req = embedded_payload(wrb);
41148
41149- be_wrb_hdr_prepare(wrb, sizeof(*req), true, 0);
41150+ be_wrb_hdr_prepare(wrb, sizeof(*req), true, 0,
41151+ OPCODE_ETH_RX_CREATE);
41152
41153 be_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_ETH, OPCODE_ETH_RX_CREATE,
41154 sizeof(*req));
41155@@ -673,15 +1019,16 @@ int be_cmd_rxq_create(struct be_adapter *adapter,
41156 req->max_frame_size = cpu_to_le16(max_frame_size);
41157 req->rss_queue = cpu_to_le32(rss);
41158
41159- status = be_mbox_notify_wait(adapter);
41160+ status = be_mcc_notify_wait(adapter);
41161 if (!status) {
41162 struct be_cmd_resp_eth_rx_create *resp = embedded_payload(wrb);
41163 rxq->id = le16_to_cpu(resp->id);
41164 rxq->created = true;
41165+ *rss_id = resp->rss_id;
41166 }
41167
41168- spin_unlock(&adapter->mbox_lock);
41169-
41170+err:
41171+ spin_unlock_bh(&adapter->mcc_lock);
41172 return status;
41173 }
41174
41175@@ -696,13 +1043,12 @@ int be_cmd_q_destroy(struct be_adapter *adapter, struct be_queue_info *q,
41176 u8 subsys = 0, opcode = 0;
41177 int status;
41178
41179- spin_lock(&adapter->mbox_lock);
41180+ if (mutex_lock_interruptible(&adapter->mbox_lock))
41181+ return -1;
41182
41183 wrb = wrb_from_mbox(adapter);
41184 req = embedded_payload(wrb);
41185
41186- be_wrb_hdr_prepare(wrb, sizeof(*req), true, 0);
41187-
41188 switch (queue_type) {
41189 case QTYPE_EQ:
41190 subsys = CMD_SUBSYSTEM_COMMON;
41191@@ -727,13 +1073,47 @@ int be_cmd_q_destroy(struct be_adapter *adapter, struct be_queue_info *q,
41192 default:
41193 BUG();
41194 }
41195+
41196+ be_wrb_hdr_prepare(wrb, sizeof(*req), true, 0, opcode);
41197+
41198 be_cmd_hdr_prepare(&req->hdr, subsys, opcode, sizeof(*req));
41199 req->id = cpu_to_le16(q->id);
41200
41201 status = be_mbox_notify_wait(adapter);
41202+ if (!status)
41203+ q->created = false;
41204
41205- spin_unlock(&adapter->mbox_lock);
41206+ mutex_unlock(&adapter->mbox_lock);
41207+ return status;
41208+}
41209
41210+/* Uses MCC */
41211+int be_cmd_rxq_destroy(struct be_adapter *adapter, struct be_queue_info *q)
41212+{
41213+ struct be_mcc_wrb *wrb;
41214+ struct be_cmd_req_q_destroy *req;
41215+ int status;
41216+
41217+ spin_lock_bh(&adapter->mcc_lock);
41218+
41219+ wrb = wrb_from_mccq(adapter);
41220+ if (!wrb) {
41221+ status = -EBUSY;
41222+ goto err;
41223+ }
41224+ req = embedded_payload(wrb);
41225+
41226+ be_wrb_hdr_prepare(wrb, sizeof(*req), true, 0, OPCODE_ETH_RX_DESTROY);
41227+ be_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_ETH, OPCODE_ETH_RX_DESTROY,
41228+ sizeof(*req));
41229+ req->id = cpu_to_le16(q->id);
41230+
41231+ status = be_mcc_notify_wait(adapter);
41232+ if (!status)
41233+ q->created = false;
41234+
41235+err:
41236+ spin_unlock_bh(&adapter->mcc_lock);
41237 return status;
41238 }
41239
41240@@ -741,22 +1121,26 @@ int be_cmd_q_destroy(struct be_adapter *adapter, struct be_queue_info *q,
41241 * Uses mbox
41242 */
41243 int be_cmd_if_create(struct be_adapter *adapter, u32 cap_flags, u32 en_flags,
41244- u8 *mac, bool pmac_invalid, u32 *if_handle, u32 *pmac_id)
41245+ u8 *mac, bool pmac_invalid, u32 *if_handle, u32 *pmac_id,
41246+ u32 domain)
41247 {
41248 struct be_mcc_wrb *wrb;
41249 struct be_cmd_req_if_create *req;
41250 int status;
41251
41252- spin_lock(&adapter->mbox_lock);
41253+ if (mutex_lock_interruptible(&adapter->mbox_lock))
41254+ return -1;
41255
41256 wrb = wrb_from_mbox(adapter);
41257 req = embedded_payload(wrb);
41258
41259- be_wrb_hdr_prepare(wrb, sizeof(*req), true, 0);
41260+ be_wrb_hdr_prepare(wrb, sizeof(*req), true, 0,
41261+ OPCODE_COMMON_NTWK_INTERFACE_CREATE);
41262
41263 be_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_COMMON,
41264 OPCODE_COMMON_NTWK_INTERFACE_CREATE, sizeof(*req));
41265
41266+ req->hdr.domain = domain;
41267 req->capability_flags = cpu_to_le32(cap_flags);
41268 req->enable_flags = cpu_to_le32(en_flags);
41269 req->pmac_invalid = pmac_invalid;
41270@@ -771,33 +1155,35 @@ int be_cmd_if_create(struct be_adapter *adapter, u32 cap_flags, u32 en_flags,
41271 *pmac_id = le32_to_cpu(resp->pmac_id);
41272 }
41273
41274- spin_unlock(&adapter->mbox_lock);
41275+ mutex_unlock(&adapter->mbox_lock);
41276 return status;
41277 }
41278
41279 /* Uses mbox */
41280-int be_cmd_if_destroy(struct be_adapter *adapter, u32 interface_id)
41281+int be_cmd_if_destroy(struct be_adapter *adapter, u32 interface_id, u32 domain)
41282 {
41283 struct be_mcc_wrb *wrb;
41284 struct be_cmd_req_if_destroy *req;
41285 int status;
41286
41287- spin_lock(&adapter->mbox_lock);
41288+ if (mutex_lock_interruptible(&adapter->mbox_lock))
41289+ return -1;
41290
41291 wrb = wrb_from_mbox(adapter);
41292 req = embedded_payload(wrb);
41293
41294- be_wrb_hdr_prepare(wrb, sizeof(*req), true, 0);
41295+ be_wrb_hdr_prepare(wrb, sizeof(*req), true, 0,
41296+ OPCODE_COMMON_NTWK_INTERFACE_DESTROY);
41297
41298 be_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_COMMON,
41299 OPCODE_COMMON_NTWK_INTERFACE_DESTROY, sizeof(*req));
41300
41301+ req->hdr.domain = domain;
41302 req->interface_id = cpu_to_le32(interface_id);
41303
41304 status = be_mbox_notify_wait(adapter);
41305
41306- spin_unlock(&adapter->mbox_lock);
41307-
41308+ mutex_unlock(&adapter->mbox_lock);
41309 return status;
41310 }
41311
41312@@ -808,33 +1194,48 @@ int be_cmd_if_destroy(struct be_adapter *adapter, u32 interface_id)
41313 int be_cmd_get_stats(struct be_adapter *adapter, struct be_dma_mem *nonemb_cmd)
41314 {
41315 struct be_mcc_wrb *wrb;
41316- struct be_cmd_req_get_stats *req;
41317+ struct be_cmd_req_hdr *hdr;
41318 struct be_sge *sge;
41319+ int status = 0;
41320+
41321+ if (MODULO(adapter->work_counter, be_get_temp_freq) == 0)
41322+ be_cmd_get_die_temperature(adapter);
41323
41324 spin_lock_bh(&adapter->mcc_lock);
41325
41326 wrb = wrb_from_mccq(adapter);
41327- req = nonemb_cmd->va;
41328+ if (!wrb) {
41329+ status = -EBUSY;
41330+ goto err;
41331+ }
41332+ hdr = nonemb_cmd->va;
41333 sge = nonembedded_sgl(wrb);
41334
41335- be_wrb_hdr_prepare(wrb, sizeof(*req), false, 1);
41336- wrb->tag0 = OPCODE_ETH_GET_STATISTICS;
41337+ be_wrb_hdr_prepare(wrb, nonemb_cmd->size, false, 1,
41338+ OPCODE_ETH_GET_STATISTICS);
41339
41340- be_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_ETH,
41341- OPCODE_ETH_GET_STATISTICS, sizeof(*req));
41342+ be_cmd_hdr_prepare(hdr, CMD_SUBSYSTEM_ETH,
41343+ OPCODE_ETH_GET_STATISTICS, nonemb_cmd->size);
41344+
41345+ if (adapter->generation == BE_GEN3)
41346+ hdr->version = 1;
41347+
41348+ wrb->tag1 = CMD_SUBSYSTEM_ETH;
41349 sge->pa_hi = cpu_to_le32(upper_32_bits(nonemb_cmd->dma));
41350 sge->pa_lo = cpu_to_le32(nonemb_cmd->dma & 0xFFFFFFFF);
41351 sge->len = cpu_to_le32(nonemb_cmd->size);
41352
41353 be_mcc_notify(adapter);
41354+ adapter->stats_cmd_sent = true;
41355
41356+err:
41357 spin_unlock_bh(&adapter->mcc_lock);
41358- return 0;
41359+ return status;
41360 }
41361
41362 /* Uses synchronous mcc */
41363 int be_cmd_link_status_query(struct be_adapter *adapter,
41364- bool *link_up)
41365+ int *link_status, u8 *mac_speed, u16 *link_speed, u32 dom)
41366 {
41367 struct be_mcc_wrb *wrb;
41368 struct be_cmd_req_link_status *req;
41369@@ -843,50 +1244,216 @@ int be_cmd_link_status_query(struct be_adapter *adapter,
41370 spin_lock_bh(&adapter->mcc_lock);
41371
41372 wrb = wrb_from_mccq(adapter);
41373+ if (!wrb) {
41374+ status = -EBUSY;
41375+ goto err;
41376+ }
41377 req = embedded_payload(wrb);
41378
41379- *link_up = false;
41380+ *link_status = LINK_DOWN;
41381
41382- be_wrb_hdr_prepare(wrb, sizeof(*req), true, 0);
41383+ be_wrb_hdr_prepare(wrb, sizeof(*req), true, 0,
41384+ OPCODE_COMMON_NTWK_LINK_STATUS_QUERY);
41385
41386 be_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_COMMON,
41387 OPCODE_COMMON_NTWK_LINK_STATUS_QUERY, sizeof(*req));
41388
41389+ req->hdr.domain = dom;
41390+
41391 status = be_mcc_notify_wait(adapter);
41392 if (!status) {
41393 struct be_cmd_resp_link_status *resp = embedded_payload(wrb);
41394- if (resp->mac_speed != PHY_LINK_SPEED_ZERO)
41395- *link_up = true;
41396+ if (resp->mac_speed != PHY_LINK_SPEED_ZERO) {
41397+ *link_status = LINK_UP;
41398+ *link_speed = le16_to_cpu(resp->link_speed);
41399+ *mac_speed = resp->mac_speed;
41400+ }
41401 }
41402
41403+err:
41404 spin_unlock_bh(&adapter->mcc_lock);
41405 return status;
41406 }
41407
41408-/* Uses Mbox */
41409-int be_cmd_get_fw_ver(struct be_adapter *adapter, char *fw_ver)
41410+/* Uses synchronous mcc */
41411+int be_cmd_get_die_temperature(struct be_adapter *adapter)
41412+{
41413+ struct be_mcc_wrb *wrb;
41414+ struct be_cmd_req_get_cntl_addnl_attribs *req;
41415+ u16 mccq_index;
41416+ int status;
41417+
41418+ spin_lock_bh(&adapter->mcc_lock);
41419+
41420+ mccq_index = adapter->mcc_obj.q.head;
41421+
41422+ wrb = wrb_from_mccq(adapter);
41423+ if (!wrb) {
41424+ status = -EBUSY;
41425+ goto err;
41426+ }
41427+ req = embedded_payload(wrb);
41428+
41429+ be_wrb_hdr_prepare(wrb, sizeof(*req), true, 0,
41430+ OPCODE_COMMON_GET_CNTL_ADDITIONAL_ATTRIBUTES);
41431+
41432+ be_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_COMMON,
41433+ OPCODE_COMMON_GET_CNTL_ADDITIONAL_ATTRIBUTES, sizeof(*req));
41434+
41435+ wrb->tag1 = mccq_index;
41436+
41437+ be_mcc_notify(adapter);
41438+
41439+err:
41440+ spin_unlock_bh(&adapter->mcc_lock);
41441+ return status;
41442+}
41443+
41444+
41445+/* Uses synchronous mcc */
41446+int be_cmd_get_reg_len(struct be_adapter *adapter, u32 *log_size)
41447+{
41448+ struct be_mcc_wrb *wrb;
41449+ struct be_cmd_req_get_fat *req;
41450+ int status;
41451+
41452+ spin_lock_bh(&adapter->mcc_lock);
41453+
41454+ wrb = wrb_from_mccq(adapter);
41455+ if (!wrb) {
41456+ status = -EBUSY;
41457+ goto err;
41458+ }
41459+ req = embedded_payload(wrb);
41460+
41461+ be_wrb_hdr_prepare(wrb, sizeof(*req), true, 0,
41462+ OPCODE_COMMON_MANAGE_FAT);
41463+
41464+ be_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_COMMON,
41465+ OPCODE_COMMON_MANAGE_FAT, sizeof(*req));
41466+ req->fat_operation = cpu_to_le32(QUERY_FAT);
41467+ status = be_mcc_notify_wait(adapter);
41468+ if (!status) {
41469+ struct be_cmd_resp_get_fat *resp = embedded_payload(wrb);
41470+ if (log_size && resp->log_size)
41471+ *log_size = le32_to_cpu(resp->log_size) -
41472+ sizeof(u32);
41473+ }
41474+err:
41475+ spin_unlock_bh(&adapter->mcc_lock);
41476+ return status;
41477+}
41478+
41479+void be_cmd_get_regs(struct be_adapter *adapter, u32 buf_len, void *buf)
41480+{
41481+ struct be_dma_mem get_fat_cmd;
41482+ struct be_mcc_wrb *wrb;
41483+ struct be_cmd_req_get_fat *req;
41484+ struct be_sge *sge;
41485+ u32 offset = 0, total_size, buf_size,
41486+ log_offset = sizeof(u32), payload_len;
41487+ int status;
41488+
41489+ if (buf_len == 0)
41490+ return;
41491+
41492+ total_size = buf_len;
41493+
41494+ get_fat_cmd.size = sizeof(struct be_cmd_req_get_fat) + 60*1024;
41495+ get_fat_cmd.va = pci_alloc_consistent(adapter->pdev,
41496+ get_fat_cmd.size,
41497+ &get_fat_cmd.dma);
41498+ if (!get_fat_cmd.va) {
41499+ status = -ENOMEM;
41500+ dev_err(&adapter->pdev->dev,
41501+ "Memory allocation failure while retrieving FAT data\n");
41502+ return;
41503+ }
41504+
41505+ spin_lock_bh(&adapter->mcc_lock);
41506+
41507+ while (total_size) {
41508+ buf_size = min(total_size, (u32)60*1024);
41509+ total_size -= buf_size;
41510+
41511+ wrb = wrb_from_mccq(adapter);
41512+ if (!wrb) {
41513+ status = -EBUSY;
41514+ goto err;
41515+ }
41516+ req = get_fat_cmd.va;
41517+ sge = nonembedded_sgl(wrb);
41518+
41519+ payload_len = sizeof(struct be_cmd_req_get_fat) + buf_size;
41520+ be_wrb_hdr_prepare(wrb, payload_len, false, 1,
41521+ OPCODE_COMMON_MANAGE_FAT);
41522+
41523+ be_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_COMMON,
41524+ OPCODE_COMMON_MANAGE_FAT, payload_len);
41525+
41526+ sge->pa_hi = cpu_to_le32(upper_32_bits(get_fat_cmd.dma));
41527+ sge->pa_lo = cpu_to_le32(get_fat_cmd.dma & 0xFFFFFFFF);
41528+ sge->len = cpu_to_le32(get_fat_cmd.size);
41529+
41530+ req->fat_operation = cpu_to_le32(RETRIEVE_FAT);
41531+ req->read_log_offset = cpu_to_le32(log_offset);
41532+ req->read_log_length = cpu_to_le32(buf_size);
41533+ req->data_buffer_size = cpu_to_le32(buf_size);
41534+
41535+ status = be_mcc_notify_wait(adapter);
41536+ if (!status) {
41537+ struct be_cmd_resp_get_fat *resp = get_fat_cmd.va;
41538+ memcpy(buf + offset,
41539+ resp->data_buffer,
41540+ le32_to_cpu(resp->read_log_length));
41541+ } else {
41542+ dev_err(&adapter->pdev->dev, "FAT Table Retrieve error\n");
41543+ goto err;
41544+ }
41545+ offset += buf_size;
41546+ log_offset += buf_size;
41547+ }
41548+err:
41549+ pci_free_consistent(adapter->pdev, get_fat_cmd.size,
41550+ get_fat_cmd.va,
41551+ get_fat_cmd.dma);
41552+ spin_unlock_bh(&adapter->mcc_lock);
41553+}
41554+
41555+/* Uses synchronous mcc */
41556+int be_cmd_get_fw_ver(struct be_adapter *adapter, char *fw_ver,
41557+ char *fw_on_flash)
41558 {
41559 struct be_mcc_wrb *wrb;
41560 struct be_cmd_req_get_fw_version *req;
41561 int status;
41562
41563- spin_lock(&adapter->mbox_lock);
41564+ spin_lock_bh(&adapter->mcc_lock);
41565+
41566+ wrb = wrb_from_mccq(adapter);
41567+ if (!wrb) {
41568+ status = -EBUSY;
41569+ goto err;
41570+ }
41571
41572- wrb = wrb_from_mbox(adapter);
41573 req = embedded_payload(wrb);
41574
41575- be_wrb_hdr_prepare(wrb, sizeof(*req), true, 0);
41576+ be_wrb_hdr_prepare(wrb, sizeof(*req), true, 0,
41577+ OPCODE_COMMON_GET_FW_VERSION);
41578
41579 be_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_COMMON,
41580 OPCODE_COMMON_GET_FW_VERSION, sizeof(*req));
41581
41582- status = be_mbox_notify_wait(adapter);
41583+ status = be_mcc_notify_wait(adapter);
41584 if (!status) {
41585 struct be_cmd_resp_get_fw_version *resp = embedded_payload(wrb);
41586- strncpy(fw_ver, resp->firmware_version_string, FW_VER_LEN);
41587+ strncpy(fw_ver, resp->firmware_version_string, FW_VER_LEN-1);
41588+ if (fw_on_flash)
41589+ strncpy(fw_on_flash, resp->fw_on_flash_version_string,
41590+ FW_VER_LEN-1);
41591 }
41592-
41593- spin_unlock(&adapter->mbox_lock);
41594+err:
41595+ spin_unlock_bh(&adapter->mcc_lock);
41596 return status;
41597 }
41598
41599@@ -897,13 +1464,19 @@ int be_cmd_modify_eqd(struct be_adapter *adapter, u32 eq_id, u32 eqd)
41600 {
41601 struct be_mcc_wrb *wrb;
41602 struct be_cmd_req_modify_eq_delay *req;
41603+ int status = 0;
41604
41605 spin_lock_bh(&adapter->mcc_lock);
41606
41607 wrb = wrb_from_mccq(adapter);
41608+ if (!wrb) {
41609+ status = -EBUSY;
41610+ goto err;
41611+ }
41612 req = embedded_payload(wrb);
41613
41614- be_wrb_hdr_prepare(wrb, sizeof(*req), true, 0);
41615+ be_wrb_hdr_prepare(wrb, sizeof(*req), true, 0,
41616+ OPCODE_COMMON_MODIFY_EQ_DELAY);
41617
41618 be_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_COMMON,
41619 OPCODE_COMMON_MODIFY_EQ_DELAY, sizeof(*req));
41620@@ -915,8 +1488,9 @@ int be_cmd_modify_eqd(struct be_adapter *adapter, u32 eq_id, u32 eqd)
41621
41622 be_mcc_notify(adapter);
41623
41624+err:
41625 spin_unlock_bh(&adapter->mcc_lock);
41626- return 0;
41627+ return status;
41628 }
41629
41630 /* Uses sycnhronous mcc */
41631@@ -930,9 +1504,14 @@ int be_cmd_vlan_config(struct be_adapter *adapter, u32 if_id, u16 *vtag_array,
41632 spin_lock_bh(&adapter->mcc_lock);
41633
41634 wrb = wrb_from_mccq(adapter);
41635+ if (!wrb) {
41636+ status = -EBUSY;
41637+ goto err;
41638+ }
41639 req = embedded_payload(wrb);
41640
41641- be_wrb_hdr_prepare(wrb, sizeof(*req), true, 0);
41642+ be_wrb_hdr_prepare(wrb, sizeof(*req), true, 0,
41643+ OPCODE_COMMON_NTWK_VLAN_CONFIG);
41644
41645 be_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_COMMON,
41646 OPCODE_COMMON_NTWK_VLAN_CONFIG, sizeof(*req));
41647@@ -948,79 +1527,63 @@ int be_cmd_vlan_config(struct be_adapter *adapter, u32 if_id, u16 *vtag_array,
41648
41649 status = be_mcc_notify_wait(adapter);
41650
41651+err:
41652 spin_unlock_bh(&adapter->mcc_lock);
41653 return status;
41654 }
41655
41656-/* Uses MCC for this command as it may be called in BH context
41657- * Uses synchronous mcc
41658- */
41659-int be_cmd_promiscuous_config(struct be_adapter *adapter, u8 port_num, bool en)
41660+int be_cmd_rx_filter(struct be_adapter *adapter, u32 flags, u32 value)
41661 {
41662 struct be_mcc_wrb *wrb;
41663- struct be_cmd_req_promiscuous_config *req;
41664+ struct be_dma_mem *mem = &adapter->rx_filter;
41665+ struct be_cmd_req_rx_filter *req = mem->va;
41666+ struct be_sge *sge;
41667 int status;
41668
41669 spin_lock_bh(&adapter->mcc_lock);
41670
41671 wrb = wrb_from_mccq(adapter);
41672- req = embedded_payload(wrb);
41673-
41674- be_wrb_hdr_prepare(wrb, sizeof(*req), true, 0);
41675-
41676- be_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_ETH,
41677- OPCODE_ETH_PROMISCUOUS, sizeof(*req));
41678-
41679- if (port_num)
41680- req->port1_promiscuous = en;
41681- else
41682- req->port0_promiscuous = en;
41683-
41684- status = be_mcc_notify_wait(adapter);
41685-
41686- spin_unlock_bh(&adapter->mcc_lock);
41687- return status;
41688-}
41689-
41690-/*
41691- * Uses MCC for this command as it may be called in BH context
41692- * (mc == NULL) => multicast promiscous
41693- */
41694-int be_cmd_multicast_set(struct be_adapter *adapter, u32 if_id,
41695- struct dev_mc_list *mc_list, u32 mc_count)
41696-{
41697-#define BE_MAX_MC 32 /* set mcast promisc if > 32 */
41698- struct be_mcc_wrb *wrb;
41699- struct be_cmd_req_mcast_mac_config *req;
41700-
41701- spin_lock_bh(&adapter->mcc_lock);
41702-
41703- wrb = wrb_from_mccq(adapter);
41704- req = embedded_payload(wrb);
41705-
41706- be_wrb_hdr_prepare(wrb, sizeof(*req), true, 0);
41707-
41708+ if (!wrb) {
41709+ status = -EBUSY;
41710+ goto err;
41711+ }
41712+ sge = nonembedded_sgl(wrb);
41713+ sge->pa_hi = cpu_to_le32(upper_32_bits(mem->dma));
41714+ sge->pa_lo = cpu_to_le32(mem->dma & 0xFFFFFFFF);
41715+ sge->len = cpu_to_le32(mem->size);
41716+ be_wrb_hdr_prepare(wrb, sizeof(*req), false, 1,
41717+ OPCODE_COMMON_NTWK_RX_FILTER);
41718+
41719+ memset(req, 0, sizeof(*req));
41720 be_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_COMMON,
41721- OPCODE_COMMON_NTWK_MULTICAST_SET, sizeof(*req));
41722+ OPCODE_COMMON_NTWK_RX_FILTER, sizeof(*req));
41723
41724- req->interface_id = if_id;
41725- if (mc_list && mc_count <= BE_MAX_MC) {
41726- int i;
41727- struct dev_mc_list *mc;
41728-
41729- req->num_mac = cpu_to_le16(mc_count);
41730-
41731- for (mc = mc_list, i = 0; mc; mc = mc->next, i++)
41732- memcpy(req->mac[i].byte, mc->dmi_addr, ETH_ALEN);
41733+ req->if_id = cpu_to_le32(adapter->if_handle);
41734+ if (flags & IFF_PROMISC) {
41735+ req->if_flags_mask = cpu_to_le32(BE_IF_FLAGS_PROMISCUOUS |
41736+ BE_IF_FLAGS_VLAN_PROMISCUOUS);
41737+ if (value == ON)
41738+ req->if_flags = cpu_to_le32(BE_IF_FLAGS_PROMISCUOUS |
41739+ BE_IF_FLAGS_VLAN_PROMISCUOUS);
41740+ } else if (flags & IFF_ALLMULTI) {
41741+ req->if_flags_mask = req->if_flags =
41742+ cpu_to_le32(BE_IF_FLAGS_MCAST_PROMISCUOUS);
41743 } else {
41744- req->promiscuous = 1;
41745- }
41746+ struct netdev_hw_addr *ha;
41747+ int i = 0;
41748
41749- be_mcc_notify_wait(adapter);
41750+ req->if_flags_mask = req->if_flags =
41751+ cpu_to_le32(BE_IF_FLAGS_MULTICAST);
41752+ req->mcast_num = cpu_to_le16(netdev_mc_count(adapter->netdev));
41753+ netdev_for_each_mc_addr(ha, adapter->netdev)
41754+ memcpy(req->mcast_mac[i++].byte, ha->DMI_ADDR,
41755+ ETH_ALEN);
41756+ }
41757+ status = be_mcc_notify_wait(adapter);
41758
41759+err:
41760 spin_unlock_bh(&adapter->mcc_lock);
41761-
41762- return 0;
41763+ return status;
41764 }
41765
41766 /* Uses synchrounous mcc */
41767@@ -1033,9 +1596,14 @@ int be_cmd_set_flow_control(struct be_adapter *adapter, u32 tx_fc, u32 rx_fc)
41768 spin_lock_bh(&adapter->mcc_lock);
41769
41770 wrb = wrb_from_mccq(adapter);
41771+ if (!wrb) {
41772+ status = -EBUSY;
41773+ goto err;
41774+ }
41775 req = embedded_payload(wrb);
41776
41777- be_wrb_hdr_prepare(wrb, sizeof(*req), true, 0);
41778+ be_wrb_hdr_prepare(wrb, sizeof(*req), true, 0,
41779+ OPCODE_COMMON_SET_FLOW_CONTROL);
41780
41781 be_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_COMMON,
41782 OPCODE_COMMON_SET_FLOW_CONTROL, sizeof(*req));
41783@@ -1045,6 +1613,7 @@ int be_cmd_set_flow_control(struct be_adapter *adapter, u32 tx_fc, u32 rx_fc)
41784
41785 status = be_mcc_notify_wait(adapter);
41786
41787+err:
41788 spin_unlock_bh(&adapter->mcc_lock);
41789 return status;
41790 }
41791@@ -1059,9 +1628,14 @@ int be_cmd_get_flow_control(struct be_adapter *adapter, u32 *tx_fc, u32 *rx_fc)
41792 spin_lock_bh(&adapter->mcc_lock);
41793
41794 wrb = wrb_from_mccq(adapter);
41795+ if (!wrb) {
41796+ status = -EBUSY;
41797+ goto err;
41798+ }
41799 req = embedded_payload(wrb);
41800
41801- be_wrb_hdr_prepare(wrb, sizeof(*req), true, 0);
41802+ be_wrb_hdr_prepare(wrb, sizeof(*req), true, 0,
41803+ OPCODE_COMMON_GET_FLOW_CONTROL);
41804
41805 be_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_COMMON,
41806 OPCODE_COMMON_GET_FLOW_CONTROL, sizeof(*req));
41807@@ -1074,23 +1648,27 @@ int be_cmd_get_flow_control(struct be_adapter *adapter, u32 *tx_fc, u32 *rx_fc)
41808 *rx_fc = le16_to_cpu(resp->rx_flow_control);
41809 }
41810
41811+err:
41812 spin_unlock_bh(&adapter->mcc_lock);
41813 return status;
41814 }
41815
41816 /* Uses mbox */
41817-int be_cmd_query_fw_cfg(struct be_adapter *adapter, u32 *port_num, u32 *cap)
41818+int be_cmd_query_fw_cfg(struct be_adapter *adapter, u32 *port_num,
41819+ u32 *mode, u32 *function_caps)
41820 {
41821 struct be_mcc_wrb *wrb;
41822 struct be_cmd_req_query_fw_cfg *req;
41823 int status;
41824
41825- spin_lock(&adapter->mbox_lock);
41826+ if (mutex_lock_interruptible(&adapter->mbox_lock))
41827+ return -1;
41828
41829 wrb = wrb_from_mbox(adapter);
41830 req = embedded_payload(wrb);
41831
41832- be_wrb_hdr_prepare(wrb, sizeof(*req), true, 0);
41833+ be_wrb_hdr_prepare(wrb, sizeof(*req), true, 0,
41834+ OPCODE_COMMON_QUERY_FIRMWARE_CONFIG);
41835
41836 be_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_COMMON,
41837 OPCODE_COMMON_QUERY_FIRMWARE_CONFIG, sizeof(*req));
41838@@ -1099,10 +1677,11 @@ int be_cmd_query_fw_cfg(struct be_adapter *adapter, u32 *port_num, u32 *cap)
41839 if (!status) {
41840 struct be_cmd_resp_query_fw_cfg *resp = embedded_payload(wrb);
41841 *port_num = le32_to_cpu(resp->phys_port);
41842- *cap = le32_to_cpu(resp->function_cap);
41843+ *mode = le32_to_cpu(resp->function_mode);
41844+ *function_caps = le32_to_cpu(resp->function_caps);
41845 }
41846
41847- spin_unlock(&adapter->mbox_lock);
41848+ mutex_unlock(&adapter->mbox_lock);
41849 return status;
41850 }
41851
41852@@ -1113,19 +1692,161 @@ int be_cmd_reset_function(struct be_adapter *adapter)
41853 struct be_cmd_req_hdr *req;
41854 int status;
41855
41856- spin_lock(&adapter->mbox_lock);
41857+ if (mutex_lock_interruptible(&adapter->mbox_lock))
41858+ return -1;
41859
41860 wrb = wrb_from_mbox(adapter);
41861 req = embedded_payload(wrb);
41862
41863- be_wrb_hdr_prepare(wrb, sizeof(*req), true, 0);
41864+ be_wrb_hdr_prepare(wrb, sizeof(*req), true, 0,
41865+ OPCODE_COMMON_FUNCTION_RESET);
41866
41867 be_cmd_hdr_prepare(req, CMD_SUBSYSTEM_COMMON,
41868 OPCODE_COMMON_FUNCTION_RESET, sizeof(*req));
41869
41870 status = be_mbox_notify_wait(adapter);
41871
41872- spin_unlock(&adapter->mbox_lock);
41873+ mutex_unlock(&adapter->mbox_lock);
41874+ return status;
41875+}
41876+
41877+int be_cmd_rss_config(struct be_adapter *adapter, u8 *rsstable, u16 table_size)
41878+{
41879+ struct be_mcc_wrb *wrb;
41880+ struct be_cmd_req_rss_config *req;
41881+ u32 myhash[10] = {0x0123, 0x4567, 0x89AB, 0xCDEF, 0x01EF,
41882+ 0x0123, 0x4567, 0x89AB, 0xCDEF, 0x01EF};
41883+ int status;
41884+
41885+ if (mutex_lock_interruptible(&adapter->mbox_lock))
41886+ return -1;
41887+
41888+ wrb = wrb_from_mbox(adapter);
41889+ req = embedded_payload(wrb);
41890+
41891+ be_wrb_hdr_prepare(wrb, sizeof(*req), true, 0,
41892+ OPCODE_ETH_RSS_CONFIG);
41893+
41894+ be_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_ETH,
41895+ OPCODE_ETH_RSS_CONFIG, sizeof(*req));
41896+
41897+ req->if_id = cpu_to_le32(adapter->if_handle);
41898+ req->enable_rss = cpu_to_le16(RSS_ENABLE_TCP_IPV4 | RSS_ENABLE_IPV4);
41899+ req->cpu_table_size_log2 = cpu_to_le16(fls(table_size) - 1);
41900+ memcpy(req->cpu_table, rsstable, table_size);
41901+ memcpy(req->hash, myhash, sizeof(myhash));
41902+ be_dws_cpu_to_le(req->hash, sizeof(req->hash));
41903+
41904+ status = be_mbox_notify_wait(adapter);
41905+
41906+ mutex_unlock(&adapter->mbox_lock);
41907+ return status;
41908+}
41909+
41910+/* Uses sync mcc */
41911+int be_cmd_set_beacon_state(struct be_adapter *adapter, u8 port_num,
41912+ u8 bcn, u8 sts, u8 state)
41913+{
41914+ struct be_mcc_wrb *wrb;
41915+ struct be_cmd_req_enable_disable_beacon *req;
41916+ int status;
41917+
41918+ spin_lock_bh(&adapter->mcc_lock);
41919+
41920+ wrb = wrb_from_mccq(adapter);
41921+ if (!wrb) {
41922+ status = -EBUSY;
41923+ goto err;
41924+ }
41925+ req = embedded_payload(wrb);
41926+
41927+ be_wrb_hdr_prepare(wrb, sizeof(*req), true, 0,
41928+ OPCODE_COMMON_ENABLE_DISABLE_BEACON);
41929+
41930+ be_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_COMMON,
41931+ OPCODE_COMMON_ENABLE_DISABLE_BEACON, sizeof(*req));
41932+
41933+ req->port_num = port_num;
41934+ req->beacon_state = state;
41935+ req->beacon_duration = bcn;
41936+ req->status_duration = sts;
41937+
41938+ status = be_mcc_notify_wait(adapter);
41939+
41940+err:
41941+ spin_unlock_bh(&adapter->mcc_lock);
41942+ return status;
41943+}
41944+
41945+/* Uses sync mcc */
41946+int be_cmd_get_beacon_state(struct be_adapter *adapter, u8 port_num, u32 *state)
41947+{
41948+ struct be_mcc_wrb *wrb;
41949+ struct be_cmd_req_get_beacon_state *req;
41950+ int status;
41951+
41952+ spin_lock_bh(&adapter->mcc_lock);
41953+
41954+ wrb = wrb_from_mccq(adapter);
41955+ if (!wrb) {
41956+ status = -EBUSY;
41957+ goto err;
41958+ }
41959+ req = embedded_payload(wrb);
41960+
41961+ be_wrb_hdr_prepare(wrb, sizeof(*req), true, 0,
41962+ OPCODE_COMMON_GET_BEACON_STATE);
41963+
41964+ be_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_COMMON,
41965+ OPCODE_COMMON_GET_BEACON_STATE, sizeof(*req));
41966+
41967+ req->port_num = port_num;
41968+
41969+ status = be_mcc_notify_wait(adapter);
41970+ if (!status) {
41971+ struct be_cmd_resp_get_beacon_state *resp =
41972+ embedded_payload(wrb);
41973+ *state = resp->beacon_state;
41974+ }
41975+
41976+err:
41977+ spin_unlock_bh(&adapter->mcc_lock);
41978+ return status;
41979+}
41980+
41981+/* Uses sync mcc */
41982+int be_cmd_read_port_type(struct be_adapter *adapter, u32 port,
41983+ u8 *connector)
41984+{
41985+ struct be_mcc_wrb *wrb;
41986+ struct be_cmd_req_port_type *req;
41987+ int status;
41988+
41989+ spin_lock_bh(&adapter->mcc_lock);
41990+
41991+ wrb = wrb_from_mccq(adapter);
41992+ if (!wrb) {
41993+ status = -EBUSY;
41994+ goto err;
41995+ }
41996+ req = embedded_payload(wrb);
41997+
41998+ be_wrb_hdr_prepare(wrb, sizeof(struct be_cmd_resp_port_type), true, 0,
41999+ OPCODE_COMMON_READ_TRANSRECV_DATA);
42000+
42001+ be_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_COMMON,
42002+ OPCODE_COMMON_READ_TRANSRECV_DATA, sizeof(*req));
42003+
42004+ req->port = cpu_to_le32(port);
42005+ req->page_num = cpu_to_le32(TR_PAGE_A0);
42006+ status = be_mcc_notify_wait(adapter);
42007+ if (!status) {
42008+ struct be_cmd_resp_port_type *resp = embedded_payload(wrb);
42009+ *connector = resp->data.connector;
42010+ }
42011+
42012+err:
42013+ spin_unlock_bh(&adapter->mcc_lock);
42014 return status;
42015 }
42016
42017@@ -1133,16 +1854,24 @@ int be_cmd_write_flashrom(struct be_adapter *adapter, struct be_dma_mem *cmd,
42018 u32 flash_type, u32 flash_opcode, u32 buf_size)
42019 {
42020 struct be_mcc_wrb *wrb;
42021- struct be_cmd_write_flashrom *req = cmd->va;
42022+ struct be_cmd_write_flashrom *req;
42023 struct be_sge *sge;
42024 int status;
42025
42026 spin_lock_bh(&adapter->mcc_lock);
42027+ adapter->flash_status = 0;
42028
42029 wrb = wrb_from_mccq(adapter);
42030+ if (!wrb) {
42031+ status = -EBUSY;
42032+ goto err_unlock;
42033+ }
42034+ req = cmd->va;
42035 sge = nonembedded_sgl(wrb);
42036
42037- be_wrb_hdr_prepare(wrb, cmd->size, false, 1);
42038+ be_wrb_hdr_prepare(wrb, cmd->size, false, 1,
42039+ OPCODE_COMMON_WRITE_FLASHROM);
42040+ wrb->tag1 = CMD_SUBSYSTEM_COMMON;
42041
42042 be_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_COMMON,
42043 OPCODE_COMMON_WRITE_FLASHROM, cmd->size);
42044@@ -1154,8 +1883,852 @@ int be_cmd_write_flashrom(struct be_adapter *adapter, struct be_dma_mem *cmd,
42045 req->params.op_code = cpu_to_le32(flash_opcode);
42046 req->params.data_buf_size = cpu_to_le32(buf_size);
42047
42048+ be_mcc_notify(adapter);
42049+ spin_unlock_bh(&adapter->mcc_lock);
42050+
42051+ if (!wait_for_completion_timeout(&adapter->flash_compl,
42052+ msecs_to_jiffies(40000)))
42053+ status = -1;
42054+ else
42055+ status = adapter->flash_status;
42056+
42057+ return status;
42058+
42059+err_unlock:
42060+ spin_unlock_bh(&adapter->mcc_lock);
42061+ return status;
42062+}
42063+
42064+int be_cmd_get_flash_crc(struct be_adapter *adapter, u8 *flashed_crc,
42065+ int offset)
42066+{
42067+ struct be_mcc_wrb *wrb;
42068+ struct be_cmd_write_flashrom *req;
42069+ int status;
42070+
42071+ spin_lock_bh(&adapter->mcc_lock);
42072+
42073+ wrb = wrb_from_mccq(adapter);
42074+ if (!wrb) {
42075+ status = -EBUSY;
42076+ goto err;
42077+ }
42078+ req = embedded_payload(wrb);
42079+
42080+ be_wrb_hdr_prepare(wrb, sizeof(*req)+4, true, 0,
42081+ OPCODE_COMMON_READ_FLASHROM);
42082+
42083+ be_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_COMMON,
42084+ OPCODE_COMMON_READ_FLASHROM, sizeof(*req)+4);
42085+
42086+ req->params.op_type = cpu_to_le32(IMG_TYPE_REDBOOT);
42087+ req->params.op_code = cpu_to_le32(FLASHROM_OPER_REPORT);
42088+ req->params.offset = cpu_to_le32(offset);
42089+ req->params.data_buf_size = cpu_to_le32(0x4);
42090+
42091+ status = be_mcc_notify_wait(adapter);
42092+ if (!status)
42093+ memcpy(flashed_crc, req->params.data_buf, 4);
42094+
42095+err:
42096+ spin_unlock_bh(&adapter->mcc_lock);
42097+ return status;
42098+}
42099+
42100+int be_cmd_enable_magic_wol(struct be_adapter *adapter, u8 *mac,
42101+ struct be_dma_mem *nonemb_cmd)
42102+{
42103+ struct be_mcc_wrb *wrb;
42104+ struct be_cmd_req_acpi_wol_magic_config *req;
42105+ struct be_sge *sge;
42106+ int status;
42107+
42108+ spin_lock_bh(&adapter->mcc_lock);
42109+
42110+ wrb = wrb_from_mccq(adapter);
42111+ if (!wrb) {
42112+ status = -EBUSY;
42113+ goto err;
42114+ }
42115+ req = nonemb_cmd->va;
42116+ sge = nonembedded_sgl(wrb);
42117+
42118+ be_wrb_hdr_prepare(wrb, sizeof(*req), false, 1,
42119+ OPCODE_ETH_ACPI_WOL_MAGIC_CONFIG);
42120+
42121+ be_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_ETH,
42122+ OPCODE_ETH_ACPI_WOL_MAGIC_CONFIG, sizeof(*req));
42123+ memcpy(req->magic_mac, mac, ETH_ALEN);
42124+
42125+ sge->pa_hi = cpu_to_le32(upper_32_bits(nonemb_cmd->dma));
42126+ sge->pa_lo = cpu_to_le32(nonemb_cmd->dma & 0xFFFFFFFF);
42127+ sge->len = cpu_to_le32(nonemb_cmd->size);
42128+
42129+ status = be_mcc_notify_wait(adapter);
42130+
42131+err:
42132+ spin_unlock_bh(&adapter->mcc_lock);
42133+ return status;
42134+}
42135+
42136+int be_cmd_set_loopback(struct be_adapter *adapter, u8 port_num,
42137+ u8 loopback_type, u8 enable)
42138+{
42139+ struct be_mcc_wrb *wrb;
42140+ struct be_cmd_req_set_lmode *req;
42141+ int status;
42142+
42143+ spin_lock_bh(&adapter->mcc_lock);
42144+
42145+ wrb = wrb_from_mccq(adapter);
42146+ if (!wrb) {
42147+ status = -EBUSY;
42148+ goto err;
42149+ }
42150+
42151+ req = embedded_payload(wrb);
42152+
42153+ be_wrb_hdr_prepare(wrb, sizeof(*req), true, 0,
42154+ OPCODE_LOWLEVEL_SET_LOOPBACK_MODE);
42155+
42156+ be_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_LOWLEVEL,
42157+ OPCODE_LOWLEVEL_SET_LOOPBACK_MODE,
42158+ sizeof(*req));
42159+
42160+ req->src_port = port_num;
42161+ req->dest_port = port_num;
42162+ req->loopback_type = loopback_type;
42163+ req->loopback_state = enable;
42164+
42165+ status = be_mcc_notify_wait(adapter);
42166+err:
42167+ spin_unlock_bh(&adapter->mcc_lock);
42168+ return status;
42169+}
42170+
42171+int be_cmd_loopback_test(struct be_adapter *adapter, u32 port_num,
42172+ u32 loopback_type, u32 pkt_size, u32 num_pkts, u64 pattern)
42173+{
42174+ struct be_mcc_wrb *wrb;
42175+ struct be_cmd_req_loopback_test *req;
42176+ int status;
42177+
42178+ spin_lock_bh(&adapter->mcc_lock);
42179+
42180+ wrb = wrb_from_mccq(adapter);
42181+ if (!wrb) {
42182+ status = -EBUSY;
42183+ goto err;
42184+ }
42185+
42186+ req = embedded_payload(wrb);
42187+
42188+ be_wrb_hdr_prepare(wrb, sizeof(*req), true, 0,
42189+ OPCODE_LOWLEVEL_LOOPBACK_TEST);
42190+
42191+ be_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_LOWLEVEL,
42192+ OPCODE_LOWLEVEL_LOOPBACK_TEST, sizeof(*req));
42193+ req->hdr.timeout = cpu_to_le32(4);
42194+
42195+ req->pattern = cpu_to_le64(pattern);
42196+ req->src_port = cpu_to_le32(port_num);
42197+ req->dest_port = cpu_to_le32(port_num);
42198+ req->pkt_size = cpu_to_le32(pkt_size);
42199+ req->num_pkts = cpu_to_le32(num_pkts);
42200+ req->loopback_type = cpu_to_le32(loopback_type);
42201+
42202+ status = be_mcc_notify_wait(adapter);
42203+ if (!status) {
42204+ struct be_cmd_resp_loopback_test *resp = embedded_payload(wrb);
42205+ status = le32_to_cpu(resp->status);
42206+ }
42207+
42208+err:
42209+ spin_unlock_bh(&adapter->mcc_lock);
42210+ return status;
42211+}
42212+
42213+int be_cmd_ddr_dma_test(struct be_adapter *adapter, u64 pattern,
42214+ u32 byte_cnt, struct be_dma_mem *cmd)
42215+{
42216+ struct be_mcc_wrb *wrb;
42217+ struct be_cmd_req_ddrdma_test *req;
42218+ struct be_sge *sge;
42219+ int status;
42220+ int i, j = 0;
42221+
42222+ spin_lock_bh(&adapter->mcc_lock);
42223+
42224+ wrb = wrb_from_mccq(adapter);
42225+ if (!wrb) {
42226+ status = -EBUSY;
42227+ goto err;
42228+ }
42229+ req = cmd->va;
42230+ sge = nonembedded_sgl(wrb);
42231+ be_wrb_hdr_prepare(wrb, cmd->size, false, 1,
42232+ OPCODE_LOWLEVEL_HOST_DDR_DMA);
42233+ be_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_LOWLEVEL,
42234+ OPCODE_LOWLEVEL_HOST_DDR_DMA, cmd->size);
42235+
42236+ sge->pa_hi = cpu_to_le32(upper_32_bits(cmd->dma));
42237+ sge->pa_lo = cpu_to_le32(cmd->dma & 0xFFFFFFFF);
42238+ sge->len = cpu_to_le32(cmd->size);
42239+
42240+ req->pattern = cpu_to_le64(pattern);
42241+ req->byte_count = cpu_to_le32(byte_cnt);
42242+ for (i = 0; i < byte_cnt; i++) {
42243+ req->snd_buff[i] = (u8)(pattern >> (j*8));
42244+ j++;
42245+ if (j > 7)
42246+ j = 0;
42247+ }
42248+
42249+ status = be_mcc_notify_wait(adapter);
42250+
42251+ if (!status) {
42252+ struct be_cmd_resp_ddrdma_test *resp;
42253+ resp = cmd->va;
42254+ if ((memcmp(resp->rcv_buff, req->snd_buff, byte_cnt) != 0) ||
42255+ resp->snd_err) {
42256+ status = -1;
42257+ }
42258+ }
42259+
42260+err:
42261+ spin_unlock_bh(&adapter->mcc_lock);
42262+ return status;
42263+}
42264+
42265+int be_cmd_get_seeprom_data(struct be_adapter *adapter,
42266+ struct be_dma_mem *nonemb_cmd)
42267+{
42268+ struct be_mcc_wrb *wrb;
42269+ struct be_cmd_req_seeprom_read *req;
42270+ struct be_sge *sge;
42271+ int status;
42272+
42273+ spin_lock_bh(&adapter->mcc_lock);
42274+
42275+ wrb = wrb_from_mccq(adapter);
42276+ req = nonemb_cmd->va;
42277+ sge = nonembedded_sgl(wrb);
42278+
42279+ be_wrb_hdr_prepare(wrb, sizeof(*req), false, 1,
42280+ OPCODE_COMMON_SEEPROM_READ);
42281+
42282+ be_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_COMMON,
42283+ OPCODE_COMMON_SEEPROM_READ, sizeof(*req));
42284+
42285+ sge->pa_hi = cpu_to_le32(upper_32_bits(nonemb_cmd->dma));
42286+ sge->pa_lo = cpu_to_le32(nonemb_cmd->dma & 0xFFFFFFFF);
42287+ sge->len = cpu_to_le32(nonemb_cmd->size);
42288+
42289+ status = be_mcc_notify_wait(adapter);
42290+
42291+ spin_unlock_bh(&adapter->mcc_lock);
42292+ return status;
42293+}
42294+
42295+int be_cmd_get_phy_info(struct be_adapter *adapter,
42296+ struct be_phy_info *phy_info)
42297+{
42298+ struct be_mcc_wrb *wrb;
42299+ struct be_cmd_req_get_phy_info *req;
42300+ struct be_sge *sge;
42301+ struct be_dma_mem cmd;
42302+ struct be_phy_info *resp_phy_info;
42303+ int status;
42304+
42305+ spin_lock_bh(&adapter->mcc_lock);
42306+ wrb = wrb_from_mccq(adapter);
42307+ if (!wrb) {
42308+ status = -EBUSY;
42309+ goto err;
42310+ }
42311+ cmd.size = sizeof(struct be_cmd_req_get_phy_info);
42312+ cmd.va = pci_alloc_consistent(adapter->pdev, cmd.size,
42313+ &cmd.dma);
42314+ if (!cmd.va) {
42315+ dev_err(&adapter->pdev->dev, "Memory alloc failure\n");
42316+ status = -ENOMEM;
42317+ goto err;
42318+ }
42319+
42320+ req = cmd.va;
42321+ sge = nonembedded_sgl(wrb);
42322+
42323+ be_wrb_hdr_prepare(wrb, sizeof(*req), false, 1,
42324+ OPCODE_COMMON_GET_PHY_DETAILS);
42325+
42326+ be_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_COMMON,
42327+ OPCODE_COMMON_GET_PHY_DETAILS,
42328+ sizeof(*req));
42329+
42330+ sge->pa_hi = cpu_to_le32(upper_32_bits(cmd.dma));
42331+ sge->pa_lo = cpu_to_le32(cmd.dma & 0xFFFFFFFF);
42332+ sge->len = cpu_to_le32(cmd.size);
42333+
42334+ status = be_mcc_notify_wait(adapter);
42335+ if (!status) {
42336+ resp_phy_info = cmd.va + sizeof(struct be_cmd_req_hdr);
42337+ phy_info->phy_type = le16_to_cpu(resp_phy_info->phy_type);
42338+ phy_info->interface_type =
42339+ le16_to_cpu(resp_phy_info->interface_type);
42340+ phy_info->auto_speeds_supported =
42341+ le16_to_cpu(resp_phy_info->auto_speeds_supported);
42342+ phy_info->fixed_speeds_supported =
42343+ le16_to_cpu(resp_phy_info->fixed_speeds_supported);
42344+ phy_info->misc_params =
42345+ le32_to_cpu(resp_phy_info->misc_params);
42346+ }
42347+ pci_free_consistent(adapter->pdev, cmd.size,
42348+ cmd.va, cmd.dma);
42349+err:
42350+ spin_unlock_bh(&adapter->mcc_lock);
42351+ return status;
42352+}
42353+
42354+int be_cmd_set_qos(struct be_adapter *adapter, u32 bps, u32 domain)
42355+{
42356+ struct be_mcc_wrb *wrb;
42357+ struct be_cmd_req_set_qos *req;
42358+ int status;
42359+
42360+ spin_lock_bh(&adapter->mcc_lock);
42361+
42362+ wrb = wrb_from_mccq(adapter);
42363+ if (!wrb) {
42364+ status = -EBUSY;
42365+ goto err;
42366+ }
42367+
42368+ req = embedded_payload(wrb);
42369+
42370+ be_wrb_hdr_prepare(wrb, sizeof(*req), true, 0,
42371+ OPCODE_COMMON_SET_QOS);
42372+
42373+ be_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_COMMON,
42374+ OPCODE_COMMON_SET_QOS, sizeof(*req));
42375+
42376+ req->hdr.domain = domain;
42377+ req->valid_bits = cpu_to_le32(BE_QOS_BITS_NIC);
42378+ req->max_bps_nic = cpu_to_le32(bps);
42379+
42380+ status = be_mcc_notify_wait(adapter);
42381+err:
42382+ spin_unlock_bh(&adapter->mcc_lock);
42383+ return status;
42384+}
42385+
42386+int be_cmd_get_cntl_attributes(struct be_adapter *adapter)
42387+{
42388+ struct be_mcc_wrb *wrb;
42389+ struct be_cmd_req_cntl_attribs *req;
42390+ struct be_cmd_resp_cntl_attribs *resp;
42391+ struct be_sge *sge;
42392+ int status;
42393+ int payload_len = max(sizeof(*req), sizeof(*resp));
42394+ struct mgmt_controller_attrib *attribs;
42395+ struct be_dma_mem attribs_cmd;
42396+
42397+ memset(&attribs_cmd, 0, sizeof(struct be_dma_mem));
42398+ attribs_cmd.size = sizeof(struct be_cmd_resp_cntl_attribs);
42399+ attribs_cmd.va = pci_alloc_consistent(adapter->pdev, attribs_cmd.size,
42400+ &attribs_cmd.dma);
42401+ if (!attribs_cmd.va) {
42402+ dev_err(&adapter->pdev->dev,
42403+ "Memory allocation failure\n");
42404+ return -ENOMEM;
42405+ }
42406+
42407+ if (mutex_lock_interruptible(&adapter->mbox_lock))
42408+ return -1;
42409+
42410+ wrb = wrb_from_mbox(adapter);
42411+ if (!wrb) {
42412+ status = -EBUSY;
42413+ goto err;
42414+ }
42415+ req = attribs_cmd.va;
42416+ sge = nonembedded_sgl(wrb);
42417+
42418+ be_wrb_hdr_prepare(wrb, payload_len, false, 1,
42419+ OPCODE_COMMON_GET_CNTL_ATTRIBUTES);
42420+ be_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_COMMON,
42421+ OPCODE_COMMON_GET_CNTL_ATTRIBUTES, payload_len);
42422+ sge->pa_hi = cpu_to_le32(upper_32_bits(attribs_cmd.dma));
42423+ sge->pa_lo = cpu_to_le32(attribs_cmd.dma & 0xFFFFFFFF);
42424+ sge->len = cpu_to_le32(attribs_cmd.size);
42425+
42426+ status = be_mbox_notify_wait(adapter);
42427+ if (!status) {
42428+ attribs = (struct mgmt_controller_attrib *)(attribs_cmd.va +
42429+ sizeof(struct be_cmd_resp_hdr));
42430+ adapter->hba_port_num = attribs->hba_attribs.phy_port;
42431+ strncpy(adapter->model_number,
42432+ attribs->hba_attribs.controller_model_number, 31);
42433+ }
42434+
42435+err:
42436+ mutex_unlock(&adapter->mbox_lock);
42437+ pci_free_consistent(adapter->pdev, attribs_cmd.size, attribs_cmd.va,
42438+ attribs_cmd.dma);
42439+ return status;
42440+}
42441+
42442+/* Uses mbox */
42443+int be_cmd_req_native_mode(struct be_adapter *adapter)
42444+{
42445+ struct be_mcc_wrb *wrb;
42446+ struct be_cmd_req_set_func_cap *req;
42447+ int status;
42448+
42449+ if (mutex_lock_interruptible(&adapter->mbox_lock))
42450+ return -1;
42451+
42452+ wrb = wrb_from_mbox(adapter);
42453+ if (!wrb) {
42454+ status = -EBUSY;
42455+ goto err;
42456+ }
42457+
42458+ req = embedded_payload(wrb);
42459+
42460+ be_wrb_hdr_prepare(wrb, sizeof(*req), true, 0,
42461+ OPCODE_COMMON_SET_DRIVER_FUNCTION_CAP);
42462+
42463+ be_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_COMMON,
42464+ OPCODE_COMMON_SET_DRIVER_FUNCTION_CAP, sizeof(*req));
42465+
42466+ req->valid_cap_flags = cpu_to_le32(CAPABILITY_SW_TIMESTAMPS |
42467+ CAPABILITY_BE3_NATIVE_ERX_API);
42468+ req->cap_flags = cpu_to_le32(CAPABILITY_BE3_NATIVE_ERX_API);
42469+
42470+ status = be_mbox_notify_wait(adapter);
42471+ if (!status) {
42472+ struct be_cmd_resp_set_func_cap *resp = embedded_payload(wrb);
42473+ adapter->be3_native = le32_to_cpu(resp->cap_flags) &
42474+ CAPABILITY_BE3_NATIVE_ERX_API;
42475+ }
42476+err:
42477+ mutex_unlock(&adapter->mbox_lock);
42478+ return status;
42479+}
42480+
42481+static void encode_port_names(struct be_adapter *adapter)
42482+{
42483+ switch (adapter->port_name[adapter->hba_port_num]) {
42484+ case '0':
42485+ adapter->port_name[adapter->hba_port_num] = 0;
42486+ break;
42487+ case '1':
42488+ adapter->port_name[adapter->hba_port_num] = 1;
42489+ break;
42490+ case '2':
42491+ adapter->port_name[adapter->hba_port_num] = 2;
42492+ break;
42493+ case '3':
42494+ adapter->port_name[adapter->hba_port_num] = 3;
42495+ break;
42496+ case '4':
42497+ adapter->port_name[adapter->hba_port_num] = 4;
42498+ break;
42499+ case 'A':
42500+ adapter->port_name[adapter->hba_port_num] = 5;
42501+ break;
42502+ case 'B':
42503+ adapter->port_name[adapter->hba_port_num] = 6;
42504+ break;
42505+ case 'C':
42506+ adapter->port_name[adapter->hba_port_num] = 7;
42507+ break;
42508+ case 'D':
42509+ adapter->port_name[adapter->hba_port_num] = 8;
42510+ break;
42511+ }
42512+}
42513+
42514+int be_cmd_query_port_names_v0(struct be_adapter *adapter, u8 *port_name)
42515+{
42516+ struct be_mcc_wrb *wrb;
42517+ struct be_cmd_req_get_port_name *req;
42518+ int status;
42519+
42520+ spin_lock_bh(&adapter->mcc_lock);
42521+
42522+ wrb = wrb_from_mccq(adapter);
42523+ if (!wrb) {
42524+ status = -EBUSY;
42525+ goto err;
42526+ }
42527+
42528+ req = embedded_payload(wrb);
42529+
42530+ be_wrb_hdr_prepare(wrb, sizeof(*req), true, 0,
42531+ OPCODE_COMMON_GET_PORT_NAME);
42532+ be_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_COMMON,
42533+ OPCODE_COMMON_GET_PORT_NAME, sizeof(*req));
42534+
42535+ status = be_mcc_notify_wait(adapter);
42536+ if (!status) {
42537+ struct be_cmd_resp_get_port_name *resp = embedded_payload(wrb);
42538+ port_name[0] = resp->port0_name;
42539+ port_name[1] = resp->port1_name;
42540+ }
42541+
42542+err:
42543+ spin_unlock_bh(&adapter->mcc_lock);
42544+
42545+ if(!status)
42546+ encode_port_names(adapter);
42547+ return status;
42548+}
42549+
42550+int be_cmd_query_port_names_v1(struct be_adapter *adapter, u8 *port_name)
42551+{
42552+ struct be_mcc_wrb *wrb;
42553+ struct be_cmd_req_get_port_name *req;
42554+ int status;
42555+
42556+ spin_lock_bh(&adapter->mcc_lock);
42557+
42558+ wrb = wrb_from_mccq(adapter);
42559+ if (!wrb) {
42560+ status = -EBUSY;
42561+ goto err;
42562+ }
42563+ req = embedded_payload(wrb);
42564+
42565+ be_wrb_hdr_prepare(wrb, sizeof(*req), true, 0,
42566+ OPCODE_COMMON_GET_PORT_NAME);
42567+ be_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_COMMON,
42568+ OPCODE_COMMON_GET_PORT_NAME, sizeof(*req));
42569+ req->hdr.version = 1;
42570+
42571 status = be_mcc_notify_wait(adapter);
42572+ if (!status) {
42573+ struct be_cmd_resp_get_port_name_v1 *resp = embedded_payload(wrb);
42574+ port_name[0] = resp->port0_name;
42575+ port_name[1] = resp->port1_name;
42576+ port_name[2] = resp->port2_name;
42577+ port_name[3] = resp->port3_name;
42578+ }
42579+
42580+err:
42581+ spin_unlock_bh(&adapter->mcc_lock);
42582+
42583+ if (!status)
42584+ encode_port_names(adapter);
42585+ return status;
42586+}
42587+
42588+int be_cmd_req_pg_pfc(struct be_adapter *adapter, int *fw_num_txqs)
42589+{
42590+ struct be_mcc_wrb *wrb;
42591+ struct be_cmd_req_pg *req;
42592+ int status, num = 0;
42593+ bool query = true;
42594+
42595+ *fw_num_txqs = MAX_TX_QS;
42596+
42597+ if (mutex_lock_interruptible(&adapter->mbox_lock))
42598+ return -1;
42599+
42600+enable_pfc:
42601+ wrb = wrb_from_mbox(adapter);
42602+ req = embedded_payload(wrb);
42603+
42604+ be_wrb_hdr_prepare(wrb, sizeof(*req), true, 0,
42605+ OPCODE_ETH_PG_FEATURE_QUERY_REQUEST);
42606+
42607+ be_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_ETH,
42608+ OPCODE_ETH_PG_FEATURE_QUERY_REQUEST, sizeof(*req));
42609+
42610+ if (query)
42611+ req->query |= cpu_to_le32(REQ_PG_QUERY);
42612+ req->pfc_pg |= cpu_to_le32(REQ_PG_FEAT);
42613+
42614+ status = be_mbox_notify_wait(adapter);
42615+ if (!status) {
42616+ struct be_cmd_resp_pg *resp = embedded_payload(wrb);
42617+ if (query) {
42618+ if (le32_to_cpu(resp->pfc_pg) & REQ_PG_FEAT) {
42619+ num = le32_to_cpu(resp->num_tx_rings);
42620+ query = false;
42621+ goto enable_pfc;
42622+ }
42623+ } else {
42624+ adapter->flags |= BE_FLAGS_DCBX;
42625+ *fw_num_txqs = num;
42626+ }
42627+ }
42628+
42629+ mutex_unlock(&adapter->mbox_lock);
42630+ return status;
42631+}
42632+
42633+/* Set privilege(s) for a function */
42634+int be_cmd_set_fn_privileges(struct be_adapter *adapter, u32 mask, u32 *prev,
42635+ u32 domain)
42636+{
42637+ struct be_mcc_wrb *wrb;
42638+ struct be_cmd_req_set_fn_privileges *req;
42639+ int status;
42640+
42641+ spin_lock_bh(&adapter->mcc_lock);
42642+
42643+ wrb = wrb_from_mccq(adapter);
42644+ if (!wrb) {
42645+ status = -EBUSY;
42646+ goto err;
42647+ }
42648+
42649+ req = embedded_payload(wrb);
42650+
42651+ be_wrb_hdr_prepare(wrb, sizeof(*req), true, 0,
42652+ OPCODE_COMMON_SET_FN_PRIVILEGES);
42653+
42654+ be_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_COMMON,
42655+ OPCODE_COMMON_SET_FN_PRIVILEGES, sizeof(*req));
42656+
42657+ req->hdr.domain = domain;
42658+ req->privilege_mask = cpu_to_le32(mask);
42659+
42660+ status = be_mcc_notify_wait(adapter);
42661+
42662+err:
42663+ spin_unlock_bh(&adapter->mcc_lock);
42664+ return status;
42665+}
42666+
42667+/* Get privilege(s) for a function */
42668+int be_cmd_get_fn_privileges(struct be_adapter *adapter, u32 *privilege,
42669+ u32 domain)
42670+{
42671+ struct be_mcc_wrb *wrb;
42672+ struct be_cmd_req_get_fn_privileges *req;
42673+ int status;
42674+
42675+ spin_lock_bh(&adapter->mcc_lock);
42676+
42677+ wrb = wrb_from_mccq(adapter);
42678+ if (!wrb) {
42679+ status = -EBUSY;
42680+ goto err;
42681+ }
42682+
42683+ req = embedded_payload(wrb);
42684+
42685+ be_wrb_hdr_prepare(wrb, sizeof(*req), true, 0,
42686+ OPCODE_COMMON_GET_FN_PRIVILEGES);
42687
42688+ be_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_COMMON,
42689+ OPCODE_COMMON_GET_FN_PRIVILEGES, sizeof(*req));
42690+
42691+ req->hdr.domain = domain;
42692+
42693+ status = be_mcc_notify_wait(adapter);
42694+ if (!status) {
42695+ struct be_cmd_resp_get_fn_privileges *resp =
42696+ embedded_payload(wrb);
42697+ *privilege = le32_to_cpu(resp->privilege_mask);
42698+ } else
42699+ *privilege = 0;
42700+
42701+err:
42702+ spin_unlock_bh(&adapter->mcc_lock);
42703+ return status;
42704+}
42705+
42706+/* Set Hyper switch config */
42707+int be_cmd_set_hsw_config(struct be_adapter *adapter, u16 pvid,
42708+ u32 domain, u16 intf_id)
42709+{
42710+ struct be_mcc_wrb *wrb;
42711+ struct be_cmd_req_set_hsw_config *req;
42712+ void *ctxt;
42713+ int status;
42714+
42715+ spin_lock_bh(&adapter->mcc_lock);
42716+
42717+ wrb = wrb_from_mccq(adapter);
42718+ if (!wrb) {
42719+ status = -EBUSY;
42720+ goto err;
42721+ }
42722+
42723+ req = embedded_payload(wrb);
42724+ ctxt = &req->context;
42725+
42726+ be_wrb_hdr_prepare(wrb, sizeof(*req), true, 0,
42727+ OPCODE_COMMON_SET_HSW_CONFIG);
42728+
42729+ be_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_COMMON,
42730+ OPCODE_COMMON_SET_HSW_CONFIG, sizeof(*req));
42731+
42732+ req->hdr.domain = domain;
42733+ AMAP_SET_BITS(struct amap_set_hsw_context, interface_id, ctxt, intf_id);
42734+ if (pvid) {
42735+ AMAP_SET_BITS(struct amap_set_hsw_context, pvid_valid, ctxt, 1);
42736+ AMAP_SET_BITS(struct amap_set_hsw_context, pvid, ctxt, pvid);
42737+ }
42738+
42739+ be_dws_cpu_to_le(req->context, sizeof(req->context));
42740+ status = be_mcc_notify_wait(adapter);
42741+
42742+err:
42743+ spin_unlock_bh(&adapter->mcc_lock);
42744+ return status;
42745+}
42746+
42747+/* Get Hyper switch config */
42748+int be_cmd_get_hsw_config(struct be_adapter *adapter, u16 *pvid,
42749+ u32 domain, u16 intf_id)
42750+{
42751+ struct be_mcc_wrb *wrb;
42752+ struct be_cmd_req_get_hsw_config *req;
42753+ void *ctxt;
42754+ int status;
42755+ u16 vid;
42756+
42757+ spin_lock_bh(&adapter->mcc_lock);
42758+
42759+ wrb = wrb_from_mccq(adapter);
42760+ if (!wrb) {
42761+ status = -EBUSY;
42762+ goto err;
42763+ }
42764+
42765+ req = embedded_payload(wrb);
42766+ ctxt = &req->context;
42767+
42768+ be_wrb_hdr_prepare(wrb, sizeof(*req), true, 0,
42769+ OPCODE_COMMON_GET_HSW_CONFIG);
42770+
42771+ be_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_COMMON,
42772+ OPCODE_COMMON_GET_HSW_CONFIG, sizeof(*req));
42773+
42774+ req->hdr.domain = domain;
42775+ AMAP_SET_BITS(struct amap_get_hsw_req_context, interface_id, ctxt,
42776+ intf_id);
42777+ AMAP_SET_BITS(struct amap_get_hsw_req_context, pvid_valid, ctxt, 1);
42778+ be_dws_cpu_to_le(req->context, sizeof(req->context));
42779+
42780+ status = be_mcc_notify_wait(adapter);
42781+ if (!status) {
42782+ struct be_cmd_resp_get_hsw_config *resp =
42783+ embedded_payload(wrb);
42784+ be_dws_le_to_cpu(&resp->context,
42785+ sizeof(resp->context));
42786+ vid = AMAP_GET_BITS(struct amap_get_hsw_resp_context,
42787+ pvid, &resp->context);
42788+ *pvid = le16_to_cpu(vid);
42789+ }
42790+
42791+err:
42792+ spin_unlock_bh(&adapter->mcc_lock);
42793+ return status;
42794+}
42795+
42796+int be_cmd_get_port_speed(struct be_adapter *adapter,
42797+ u8 port_num, u16 *dac_cable_len, u16 *port_speed)
42798+{
42799+ struct be_mcc_wrb *wrb;
42800+ struct be_cmd_req_get_port_speed *req;
42801+ int status = 0;
42802+
42803+ spin_lock_bh(&adapter->mcc_lock);
42804+
42805+ wrb = wrb_from_mccq(adapter);
42806+ if (!wrb) {
42807+ status = -EBUSY;
42808+ goto err;
42809+ }
42810+
42811+ req = embedded_payload(wrb);
42812+ be_wrb_hdr_prepare(wrb, sizeof(*req), true, 0,
42813+ OPCODE_COMMON_NTWK_GET_LINK_SPEED);
42814+ be_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_COMMON,
42815+ OPCODE_COMMON_NTWK_GET_LINK_SPEED,
42816+ sizeof(*req));
42817+ req->port_num = port_num;
42818+ status = be_mcc_notify_wait(adapter);
42819+ if (!status) {
42820+ struct be_cmd_resp_get_port_speed *resp =
42821+ embedded_payload(wrb);
42822+ *dac_cable_len = resp->dac_cable_length;
42823+ *port_speed = resp->mac_speed;
42824+ }
42825+
42826+err:
42827+ spin_unlock_bh(&adapter->mcc_lock);
42828+ return status;
42829+}
42830+
42831+int be_cmd_set_port_speed_v1(struct be_adapter *adapter,
42832+ u8 port_num, u16 mac_speed,
42833+ u16 dac_cable_len)
42834+{
42835+ struct be_mcc_wrb *wrb;
42836+ struct be_cmd_req_set_port_speed_v1 *req;
42837+ int status = 0;
42838+
42839+ spin_lock_bh(&adapter->mcc_lock);
42840+
42841+ wrb = wrb_from_mccq(adapter);
42842+ if (!wrb) {
42843+ status = -EBUSY;
42844+ goto err;
42845+ }
42846+ req = embedded_payload(wrb);
42847+ be_wrb_hdr_prepare(wrb, sizeof(*req), true, 0,
42848+ OPCODE_COMMON_NTWK_SET_LINK_SPEED);
42849+
42850+ be_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_COMMON,
42851+ OPCODE_COMMON_NTWK_SET_LINK_SPEED,
42852+ sizeof(*req));
42853+ req->hdr.version=1;
42854+
42855+ req->port_num = port_num;
42856+ req->virt_port = port_num;
42857+ req->mac_speed = mac_speed;
42858+ req->dac_cable_length = dac_cable_len;
42859+ status = be_mcc_notify_wait(adapter);
42860+err:
42861+ spin_unlock_bh(&adapter->mcc_lock);
42862+ return status;
42863+}
42864+
42865+
42866+/* Uses sync mcc */
42867+#ifdef CONFIG_PALAU
42868+int be_cmd_pass_ext_ioctl(struct be_adapter *adapter, dma_addr_t dma,
42869+ int req_size, void *va)
42870+{
42871+ struct be_mcc_wrb *wrb;
42872+ struct be_sge *sge;
42873+ int status;
42874+ struct be_cmd_req_hdr *hdr = (struct be_cmd_req_hdr *) va;
42875+
42876+ spin_lock_bh(&adapter->mcc_lock);
42877+
42878+ wrb = wrb_from_mccq(adapter);
42879+ if (!wrb) {
42880+ status = -EBUSY;
42881+ goto err;
42882+ }
42883+ sge = nonembedded_sgl(wrb);
42884+
42885+ be_wrb_hdr_prepare(wrb, req_size, false, 1, hdr->opcode);
42886+ wrb->tag1 = MCC_WRB_PASS_THRU;
42887+ sge->pa_hi = cpu_to_le32(upper_32_bits(dma));
42888+ sge->pa_lo = cpu_to_le32(dma & 0xFFFFFFFF);
42889+ sge->len = cpu_to_le32(req_size);
42890+
42891+ status = be_mcc_notify_wait(adapter);
42892+err:
42893 spin_unlock_bh(&adapter->mcc_lock);
42894 return status;
42895 }
42896+#endif
42897diff --git a/drivers/net/benet/be_cmds.h b/drivers/net/benet/be_cmds.h
42898index ad33d55..35aa5c7 100644
42899--- a/drivers/net/benet/be_cmds.h
42900+++ b/drivers/net/benet/be_cmds.h
42901@@ -1,20 +1,23 @@
42902 /*
42903- * Copyright (C) 2005 - 2009 ServerEngines
42904+ * Copyright (C) 2005 - 2011 Emulex
42905 * All rights reserved.
42906 *
42907 * This program is free software; you can redistribute it and/or
42908 * modify it under the terms of the GNU General Public License version 2
42909- * as published by the Free Software Foundation. The full GNU General
42910+ * as published by the Free Software Foundation. The full GNU General
42911 * Public License is included in this distribution in the file called COPYING.
42912 *
42913 * Contact Information:
42914- * linux-drivers@serverengines.com
42915+ * linux-drivers@emulex.com
42916 *
42917- * ServerEngines
42918- * 209 N. Fair Oaks Ave
42919- * Sunnyvale, CA 94085
42920+ * Emulex
42921+ * 3333 Susan Street
42922+ * Costa Mesa, CA 92626
42923 */
42924
42925+#ifndef BE_CMDS_H
42926+#define BE_CMDS_H
42927+
42928 /*
42929 * The driver sends configuration and managements command requests to the
42930 * firmware in the BE. These requests are communicated to the processor
42931@@ -29,9 +32,10 @@ struct be_sge {
42932 u32 len;
42933 };
42934
42935-#define MCC_WRB_EMBEDDED_MASK 1 /* bit 0 of dword 0*/
42936+#define MCC_WRB_EMBEDDED_MASK 1 /* bit 0 of dword 0*/
42937 #define MCC_WRB_SGE_CNT_SHIFT 3 /* bits 3 - 7 of dword 0 */
42938 #define MCC_WRB_SGE_CNT_MASK 0x1F /* bits 3 - 7 of dword 0 */
42939+#define MCC_WRB_PASS_THRU 0xFF /* this wrb is used for pass thru cmd */
42940 struct be_mcc_wrb {
42941 u32 embedded; /* dword 0 */
42942 u32 payload_length; /* dword 1 */
42943@@ -44,24 +48,19 @@ struct be_mcc_wrb {
42944 } payload;
42945 };
42946
42947-#define CQE_FLAGS_VALID_MASK (1 << 31)
42948-#define CQE_FLAGS_ASYNC_MASK (1 << 30)
42949-#define CQE_FLAGS_COMPLETED_MASK (1 << 28)
42950-#define CQE_FLAGS_CONSUMED_MASK (1 << 27)
42951+#define CQE_FLAGS_VALID_MASK (1 << 31)
42952+#define CQE_FLAGS_ASYNC_MASK (1 << 30)
42953+#define CQE_FLAGS_COMPLETED_MASK (1 << 28)
42954+#define CQE_FLAGS_CONSUMED_MASK (1 << 27)
42955
42956 /* Completion Status */
42957 enum {
42958- MCC_STATUS_SUCCESS = 0x0,
42959-/* The client does not have sufficient privileges to execute the command */
42960- MCC_STATUS_INSUFFICIENT_PRIVILEGES = 0x1,
42961-/* A parameter in the command was invalid. */
42962- MCC_STATUS_INVALID_PARAMETER = 0x2,
42963-/* There are insufficient chip resources to execute the command */
42964- MCC_STATUS_INSUFFICIENT_RESOURCES = 0x3,
42965-/* The command is completing because the queue was getting flushed */
42966- MCC_STATUS_QUEUE_FLUSHING = 0x4,
42967-/* The command is completing with a DMA error */
42968- MCC_STATUS_DMA_FAILED = 0x5,
42969+ MCC_STATUS_SUCCESS = 0,
42970+ MCC_STATUS_FAILED = 1,
42971+ MCC_STATUS_ILLEGAL_REQUEST = 2,
42972+ MCC_STATUS_ILLEGAL_FIELD = 3,
42973+ MCC_STATUS_INSUFFICIENT_BUFFER = 4,
42974+ MCC_STATUS_UNAUTHORIZED_REQUEST = 5,
42975 MCC_STATUS_NOT_SUPPORTED = 66
42976 };
42977
42978@@ -81,15 +80,24 @@ struct be_mcc_compl {
42979 * mcc_compl is interpreted as follows:
42980 */
42981 #define ASYNC_TRAILER_EVENT_CODE_SHIFT 8 /* bits 8 - 15 */
42982+#define ASYNC_TRAILER_EVENT_TYPE_SHIFT 16 /* bits 16 - 23 */
42983 #define ASYNC_TRAILER_EVENT_CODE_MASK 0xFF
42984+#define ASYNC_TRAILER_EVENT_TYPE_MASK 0xFF
42985 #define ASYNC_EVENT_CODE_LINK_STATE 0x1
42986+#define ASYNC_EVENT_CODE_GRP_5 0x5
42987+#define ASYNC_EVENT_QOS_SPEED 0x1
42988+#define ASYNC_EVENT_COS_PRIORITY 0x2
42989+#define ASYNC_EVENT_PVID_STATE 0x3
42990+#define GRP5_TYPE_PRIO_TC_MAP 4
42991+
42992 struct be_async_event_trailer {
42993 u32 code;
42994 };
42995
42996 enum {
42997- ASYNC_EVENT_LINK_DOWN = 0x0,
42998- ASYNC_EVENT_LINK_UP = 0x1
42999+ ASYNC_EVENT_LINK_DOWN = 0x0,
43000+ ASYNC_EVENT_LINK_UP = 0x1,
43001+ ASYNC_EVENT_LOGICAL = 0x2
43002 };
43003
43004 /* When the event code of an async trailer is link-state, the mcc_compl
43005@@ -101,7 +109,51 @@ struct be_async_event_link_state {
43006 u8 port_duplex;
43007 u8 port_speed;
43008 u8 port_fault;
43009- u8 rsvd0[7];
43010+ u8 rsvd0;
43011+ u16 qos_link_speed;
43012+ u32 event_tag;
43013+ struct be_async_event_trailer trailer;
43014+} __packed;
43015+
43016+/* When the event code of an async trailer is GRP-5 and event_type is QOS_SPEED
43017+ * the mcc_compl must be interpreted as follows
43018+ */
43019+struct be_async_event_grp5_qos_link_speed {
43020+ u8 physical_port;
43021+ u8 rsvd[5];
43022+ u16 qos_link_speed;
43023+ u32 event_tag;
43024+ struct be_async_event_trailer trailer;
43025+} __packed;
43026+
43027+/* When the event code of an async trailer is GRP5 and event type is
43028+ * CoS-Priority, the mcc_compl must be interpreted as follows
43029+ */
43030+struct be_async_event_grp5_cos_priority {
43031+ u8 physical_port;
43032+ u8 available_priority_bmap;
43033+ u8 reco_default_priority;
43034+ u8 valid;
43035+ u8 rsvd0;
43036+ u8 event_tag;
43037+ struct be_async_event_trailer trailer;
43038+} __packed;
43039+
43040+/* When the event code of an async trailer is GRP5 and event type is
43041+ * PVID state, the mcc_compl must be interpreted as follows
43042+ */
43043+struct be_async_event_grp5_pvid_state {
43044+ u8 enabled;
43045+ u8 rsvd0;
43046+ u16 tag;
43047+ u32 event_tag;
43048+ u32 rsvd1;
43049+ struct be_async_event_trailer trailer;
43050+} __packed;
43051+
43052+/* GRP5 prio-tc-map event */
43053+struct be_async_event_grp5_prio_tc_map {
43054+ u8 prio_tc_map[8]; /* map[prio] -> tc_id */
43055 struct be_async_event_trailer trailer;
43056 } __packed;
43057
43058@@ -111,41 +163,68 @@ struct be_mcc_mailbox {
43059 };
43060
43061 #define CMD_SUBSYSTEM_COMMON 0x1
43062-#define CMD_SUBSYSTEM_ETH 0x3
43063+#define CMD_SUBSYSTEM_ETH 0x3
43064+#define CMD_SUBSYSTEM_LOWLEVEL 0xb
43065
43066 #define OPCODE_COMMON_NTWK_MAC_QUERY 1
43067 #define OPCODE_COMMON_NTWK_MAC_SET 2
43068 #define OPCODE_COMMON_NTWK_MULTICAST_SET 3
43069-#define OPCODE_COMMON_NTWK_VLAN_CONFIG 4
43070+#define OPCODE_COMMON_NTWK_VLAN_CONFIG 4
43071 #define OPCODE_COMMON_NTWK_LINK_STATUS_QUERY 5
43072+#define OPCODE_COMMON_READ_FLASHROM 6
43073 #define OPCODE_COMMON_WRITE_FLASHROM 7
43074 #define OPCODE_COMMON_CQ_CREATE 12
43075 #define OPCODE_COMMON_EQ_CREATE 13
43076-#define OPCODE_COMMON_MCC_CREATE 21
43077-#define OPCODE_COMMON_NTWK_RX_FILTER 34
43078+#define OPCODE_COMMON_MCC_CREATE 21
43079+#define OPCODE_COMMON_SET_QOS 28
43080+#define OPCODE_COMMON_MCC_CREATE_EXT 90
43081+#define OPCODE_COMMON_SEEPROM_READ 30
43082+#define OPCODE_COMMON_GET_CNTL_ATTRIBUTES 32
43083+#define OPCODE_COMMON_NTWK_RX_FILTER 34
43084 #define OPCODE_COMMON_GET_FW_VERSION 35
43085 #define OPCODE_COMMON_SET_FLOW_CONTROL 36
43086 #define OPCODE_COMMON_GET_FLOW_CONTROL 37
43087 #define OPCODE_COMMON_SET_FRAME_SIZE 39
43088 #define OPCODE_COMMON_MODIFY_EQ_DELAY 41
43089 #define OPCODE_COMMON_FIRMWARE_CONFIG 42
43090-#define OPCODE_COMMON_NTWK_INTERFACE_CREATE 50
43091-#define OPCODE_COMMON_NTWK_INTERFACE_DESTROY 51
43092-#define OPCODE_COMMON_MCC_DESTROY 53
43093-#define OPCODE_COMMON_CQ_DESTROY 54
43094-#define OPCODE_COMMON_EQ_DESTROY 55
43095+#define OPCODE_COMMON_NTWK_INTERFACE_CREATE 50
43096+#define OPCODE_COMMON_NTWK_INTERFACE_DESTROY 51
43097+#define OPCODE_COMMON_MCC_DESTROY 53
43098+#define OPCODE_COMMON_CQ_DESTROY 54
43099+#define OPCODE_COMMON_EQ_DESTROY 55
43100+#define OPCODE_COMMON_NTWK_SET_LINK_SPEED 57
43101 #define OPCODE_COMMON_QUERY_FIRMWARE_CONFIG 58
43102 #define OPCODE_COMMON_NTWK_PMAC_ADD 59
43103 #define OPCODE_COMMON_NTWK_PMAC_DEL 60
43104 #define OPCODE_COMMON_FUNCTION_RESET 61
43105+#define OPCODE_COMMON_MANAGE_FAT 68
43106+#define OPCODE_COMMON_ENABLE_DISABLE_BEACON 69
43107+#define OPCODE_COMMON_GET_BEACON_STATE 70
43108+#define OPCODE_COMMON_READ_TRANSRECV_DATA 73
43109+#define OPCODE_COMMON_GET_PORT_NAME 77
43110+#define OPCODE_COMMON_SET_FN_PRIVILEGES 100
43111+#define OPCODE_COMMON_GET_PHY_DETAILS 102
43112+#define OPCODE_COMMON_SET_DRIVER_FUNCTION_CAP 103
43113+#define OPCODE_COMMON_GET_CNTL_ADDITIONAL_ATTRIBUTES 121
43114+#define OPCODE_COMMON_NTWK_GET_LINK_SPEED 134
43115+#define OPCODE_COMMON_GET_HSW_CONFIG 152
43116+#define OPCODE_COMMON_SET_HSW_CONFIG 153
43117+#define OPCODE_COMMON_GET_FN_PRIVILEGES 170
43118
43119+#define OPCODE_ETH_RSS_CONFIG 1
43120 #define OPCODE_ETH_ACPI_CONFIG 2
43121 #define OPCODE_ETH_PROMISCUOUS 3
43122 #define OPCODE_ETH_GET_STATISTICS 4
43123 #define OPCODE_ETH_TX_CREATE 7
43124-#define OPCODE_ETH_RX_CREATE 8
43125-#define OPCODE_ETH_TX_DESTROY 9
43126-#define OPCODE_ETH_RX_DESTROY 10
43127+#define OPCODE_ETH_RX_CREATE 8
43128+#define OPCODE_ETH_TX_DESTROY 9
43129+#define OPCODE_ETH_RX_DESTROY 10
43130+#define OPCODE_ETH_ACPI_WOL_MAGIC_CONFIG 12
43131+#define OPCODE_ETH_PG_FEATURE_QUERY_REQUEST 23
43132+
43133+#define OPCODE_LOWLEVEL_HOST_DDR_DMA 17
43134+#define OPCODE_LOWLEVEL_LOOPBACK_TEST 18
43135+#define OPCODE_LOWLEVEL_SET_LOOPBACK_MODE 19
43136
43137 struct be_cmd_req_hdr {
43138 u8 opcode; /* dword 0 */
43139@@ -159,7 +238,7 @@ struct be_cmd_req_hdr {
43140 };
43141
43142 #define RESP_HDR_INFO_OPCODE_SHIFT 0 /* bits 0 - 7 */
43143-#define RESP_HDR_INFO_SUBSYS_SHIFT 8 /* bits 8 - 15 */
43144+#define RESP_HDR_INFO_SUBSYS_SHIFT 8 /* bits 8 - 15 */
43145 struct be_cmd_resp_hdr {
43146 u32 info; /* dword 0 */
43147 u32 status; /* dword 1 */
43148@@ -265,7 +344,7 @@ struct be_cmd_req_pmac_del {
43149 /******************** Create CQ ***************************/
43150 /* Pseudo amap definition in which each bit of the actual structure is defined
43151 * as a byte: used to calculate offset/shift/mask of each field */
43152-struct amap_cq_context {
43153+struct amap_cq_context_be {
43154 u8 cidx[11]; /* dword 0*/
43155 u8 rsvd0; /* dword 0*/
43156 u8 coalescwm[2]; /* dword 0*/
43157@@ -288,11 +367,28 @@ struct amap_cq_context {
43158 u8 rsvd5[32]; /* dword 3*/
43159 } __packed;
43160
43161+struct amap_cq_context_lancer {
43162+ u8 rsvd0[12]; /* dword 0*/
43163+ u8 coalescwm[2]; /* dword 0*/
43164+ u8 nodelay; /* dword 0*/
43165+ u8 rsvd1[12]; /* dword 0*/
43166+ u8 count[2]; /* dword 0*/
43167+ u8 valid; /* dword 0*/
43168+ u8 rsvd2; /* dword 0*/
43169+ u8 eventable; /* dword 0*/
43170+ u8 eqid[16]; /* dword 1*/
43171+ u8 rsvd3[15]; /* dword 1*/
43172+ u8 armed; /* dword 1*/
43173+ u8 rsvd4[32]; /* dword 2*/
43174+ u8 rsvd5[32]; /* dword 3*/
43175+} __packed;
43176+
43177 struct be_cmd_req_cq_create {
43178 struct be_cmd_req_hdr hdr;
43179 u16 num_pages;
43180- u16 rsvd0;
43181- u8 context[sizeof(struct amap_cq_context) / 8];
43182+ u8 page_size;
43183+ u8 rsvd0;
43184+ u8 context[sizeof(struct amap_cq_context_be) / 8];
43185 struct phys_addr pages[8];
43186 } __packed;
43187
43188@@ -302,10 +398,28 @@ struct be_cmd_resp_cq_create {
43189 u16 rsvd0;
43190 } __packed;
43191
43192+struct be_cmd_req_get_fat {
43193+ struct be_cmd_req_hdr hdr;
43194+ u32 fat_operation;
43195+ u32 read_log_offset;
43196+ u32 read_log_length;
43197+ u32 data_buffer_size;
43198+ u32 data_buffer[1];
43199+} __packed;
43200+
43201+struct be_cmd_resp_get_fat {
43202+ struct be_cmd_resp_hdr hdr;
43203+ u32 log_size;
43204+ u32 read_log_length;
43205+ u32 rsvd[2];
43206+ u32 data_buffer[1];
43207+} __packed;
43208+
43209+
43210 /******************** Create MCCQ ***************************/
43211 /* Pseudo amap definition in which each bit of the actual structure is defined
43212 * as a byte: used to calculate offset/shift/mask of each field */
43213-struct amap_mcc_context {
43214+struct amap_mcc_context_be {
43215 u8 con_index[14];
43216 u8 rsvd0[2];
43217 u8 ring_size[4];
43218@@ -320,11 +434,31 @@ struct amap_mcc_context {
43219 u8 rsvd2[32];
43220 } __packed;
43221
43222+struct amap_mcc_context_lancer {
43223+ u8 async_cq_id[16];
43224+ u8 ring_size[4];
43225+ u8 rsvd0[12];
43226+ u8 rsvd1[31];
43227+ u8 valid;
43228+ u8 async_cq_valid[1];
43229+ u8 rsvd2[31];
43230+ u8 rsvd3[32];
43231+} __packed;
43232+
43233 struct be_cmd_req_mcc_create {
43234 struct be_cmd_req_hdr hdr;
43235 u16 num_pages;
43236- u16 rsvd0;
43237- u8 context[sizeof(struct amap_mcc_context) / 8];
43238+ u16 cq_id;
43239+ u8 context[sizeof(struct amap_mcc_context_be) / 8];
43240+ struct phys_addr pages[8];
43241+} __packed;
43242+
43243+struct be_cmd_req_mcc_ext_create {
43244+ struct be_cmd_req_hdr hdr;
43245+ u16 num_pages;
43246+ u16 cq_id;
43247+ u32 async_event_bitmap[1];
43248+ u8 context[sizeof(struct amap_mcc_context_be) / 8];
43249 struct phys_addr pages[8];
43250 } __packed;
43251
43252@@ -335,49 +469,32 @@ struct be_cmd_resp_mcc_create {
43253 } __packed;
43254
43255 /******************** Create TxQ ***************************/
43256-#define BE_ETH_TX_RING_TYPE_STANDARD 2
43257+#define ETX_QUEUE_TYPE_STANDARD 0x2
43258+#define ETX_QUEUE_TYPE_PRIORITY 0x10
43259 #define BE_ULP1_NUM 1
43260
43261-/* Pseudo amap definition in which each bit of the actual structure is defined
43262- * as a byte: used to calculate offset/shift/mask of each field */
43263-struct amap_tx_context {
43264- u8 rsvd0[16]; /* dword 0 */
43265- u8 tx_ring_size[4]; /* dword 0 */
43266- u8 rsvd1[26]; /* dword 0 */
43267- u8 pci_func_id[8]; /* dword 1 */
43268- u8 rsvd2[9]; /* dword 1 */
43269- u8 ctx_valid; /* dword 1 */
43270- u8 cq_id_send[16]; /* dword 2 */
43271- u8 rsvd3[16]; /* dword 2 */
43272- u8 rsvd4[32]; /* dword 3 */
43273- u8 rsvd5[32]; /* dword 4 */
43274- u8 rsvd6[32]; /* dword 5 */
43275- u8 rsvd7[32]; /* dword 6 */
43276- u8 rsvd8[32]; /* dword 7 */
43277- u8 rsvd9[32]; /* dword 8 */
43278- u8 rsvd10[32]; /* dword 9 */
43279- u8 rsvd11[32]; /* dword 10 */
43280- u8 rsvd12[32]; /* dword 11 */
43281- u8 rsvd13[32]; /* dword 12 */
43282- u8 rsvd14[32]; /* dword 13 */
43283- u8 rsvd15[32]; /* dword 14 */
43284- u8 rsvd16[32]; /* dword 15 */
43285-} __packed;
43286-
43287 struct be_cmd_req_eth_tx_create {
43288 struct be_cmd_req_hdr hdr;
43289 u8 num_pages;
43290 u8 ulp_num;
43291- u8 type;
43292- u8 bound_port;
43293- u8 context[sizeof(struct amap_tx_context) / 8];
43294+ u16 type;
43295+ u16 if_id;
43296+ u8 queue_size;
43297+ u8 rsvd1;
43298+ u32 rsvd2;
43299+ u16 cq_id;
43300+ u16 rsvd3;
43301+ u32 rsvd4[13];
43302 struct phys_addr pages[8];
43303 } __packed;
43304
43305 struct be_cmd_resp_eth_tx_create {
43306 struct be_cmd_resp_hdr hdr;
43307 u16 cid;
43308- u16 rsvd0;
43309+ u16 rid;
43310+ u32 db_offset;
43311+ u8 tc_id;
43312+ u8 rsvd0[3];
43313 } __packed;
43314
43315 /******************** Create RxQ ***************************/
43316@@ -396,7 +513,7 @@ struct be_cmd_req_eth_rx_create {
43317 struct be_cmd_resp_eth_rx_create {
43318 struct be_cmd_resp_hdr hdr;
43319 u16 id;
43320- u8 cpu_id;
43321+ u8 rss_id;
43322 u8 rsvd0;
43323 } __packed;
43324
43325@@ -429,14 +546,15 @@ enum be_if_flags {
43326 BE_IF_FLAGS_VLAN = 0x100,
43327 BE_IF_FLAGS_MCAST_PROMISCUOUS = 0x200,
43328 BE_IF_FLAGS_PASS_L2_ERRORS = 0x400,
43329- BE_IF_FLAGS_PASS_L3L4_ERRORS = 0x800
43330+ BE_IF_FLAGS_PASS_L3L4_ERRORS = 0x800,
43331+ BE_IF_FLAGS_MULTICAST = 0x1000
43332 };
43333
43334 /* An RX interface is an object with one or more MAC addresses and
43335 * filtering capabilities. */
43336 struct be_cmd_req_if_create {
43337 struct be_cmd_req_hdr hdr;
43338- u32 version; /* ignore currntly */
43339+ u32 version; /* ignore currently */
43340 u32 capability_flags;
43341 u32 enable_flags;
43342 u8 mac_addr[ETH_ALEN];
43343@@ -458,7 +576,7 @@ struct be_cmd_req_if_destroy {
43344 };
43345
43346 /*************** HW Stats Get **********************************/
43347-struct be_port_rxf_stats {
43348+struct be_port_rxf_stats_v0 {
43349 u32 rx_bytes_lsd; /* dword 0*/
43350 u32 rx_bytes_msd; /* dword 1*/
43351 u32 rx_total_frames; /* dword 2*/
43352@@ -527,8 +645,8 @@ struct be_port_rxf_stats {
43353 u32 rx_input_fifo_overflow; /* dword 65*/
43354 };
43355
43356-struct be_rxf_stats {
43357- struct be_port_rxf_stats port[2];
43358+struct be_rxf_stats_v0 {
43359+ struct be_port_rxf_stats_v0 port[2];
43360 u32 rx_drops_no_pbuf; /* dword 132*/
43361 u32 rx_drops_no_txpb; /* dword 133*/
43362 u32 rx_drops_no_erx_descr; /* dword 134*/
43363@@ -545,31 +663,51 @@ struct be_rxf_stats {
43364 u32 rx_drops_invalid_ring; /* dword 145*/
43365 u32 forwarded_packets; /* dword 146*/
43366 u32 rx_drops_mtu; /* dword 147*/
43367- u32 rsvd0[15];
43368+ u32 rsvd0[7];
43369+ u32 port0_jabber_events;
43370+ u32 port1_jabber_events;
43371+ u32 rsvd1[6];
43372 };
43373
43374-struct be_erx_stats {
43375+struct be_erx_stats_v0 {
43376 u32 rx_drops_no_fragments[44]; /* dwordS 0 to 43*/
43377- u32 debug_wdma_sent_hold; /* dword 44*/
43378- u32 debug_wdma_pbfree_sent_hold; /* dword 45*/
43379- u32 debug_wdma_zerobyte_pbfree_sent_hold; /* dword 46*/
43380- u32 debug_pmem_pbuf_dealloc; /* dword 47*/
43381+ u32 rsvd[4];
43382 };
43383
43384-struct be_hw_stats {
43385- struct be_rxf_stats rxf;
43386+struct be_pmem_stats {
43387+ u32 eth_red_drops;
43388+ u32 rsvd[5];
43389+};
43390+
43391+struct be_hw_stats_v0 {
43392+ struct be_rxf_stats_v0 rxf;
43393 u32 rsvd[48];
43394- struct be_erx_stats erx;
43395+ struct be_erx_stats_v0 erx;
43396+ struct be_pmem_stats pmem;
43397 };
43398
43399-struct be_cmd_req_get_stats {
43400+struct be_cmd_req_get_stats_v0 {
43401 struct be_cmd_req_hdr hdr;
43402- u8 rsvd[sizeof(struct be_hw_stats)];
43403+ u8 rsvd[sizeof(struct be_hw_stats_v0)];
43404 };
43405
43406-struct be_cmd_resp_get_stats {
43407+struct be_cmd_resp_get_stats_v0 {
43408 struct be_cmd_resp_hdr hdr;
43409- struct be_hw_stats hw_stats;
43410+ struct be_hw_stats_v0 hw_stats;
43411+};
43412+
43413+struct be_cmd_req_get_cntl_addnl_attribs {
43414+ struct be_cmd_req_hdr hdr;
43415+ u8 rsvd[8];
43416+};
43417+
43418+struct be_cmd_resp_get_cntl_addnl_attribs {
43419+ struct be_cmd_resp_hdr hdr;
43420+ u16 ipl_file_number;
43421+ u8 ipl_file_version;
43422+ u8 rsvd0;
43423+ u8 on_die_temperature; /* in degrees centigrade*/
43424+ u8 rsvd1[3];
43425 };
43426
43427 struct be_cmd_req_vlan_config {
43428@@ -581,30 +719,22 @@ struct be_cmd_req_vlan_config {
43429 u16 normal_vlan[64];
43430 } __packed;
43431
43432-struct be_cmd_req_promiscuous_config {
43433- struct be_cmd_req_hdr hdr;
43434- u8 port0_promiscuous;
43435- u8 port1_promiscuous;
43436- u16 rsvd0;
43437-} __packed;
43438-
43439+/******************** RX FILTER ******************************/
43440+#define BE_MAX_MC 64 /* set mcast promisc if > 64 */
43441 struct macaddr {
43442 u8 byte[ETH_ALEN];
43443 };
43444
43445-struct be_cmd_req_mcast_mac_config {
43446+struct be_cmd_req_rx_filter {
43447 struct be_cmd_req_hdr hdr;
43448- u16 num_mac;
43449- u8 promiscuous;
43450- u8 interface_id;
43451- struct macaddr mac[32];
43452-} __packed;
43453-
43454-static inline struct be_hw_stats *
43455-hw_stats_from_cmd(struct be_cmd_resp_get_stats *cmd)
43456-{
43457- return &cmd->hw_stats;
43458-}
43459+ u32 global_flags_mask;
43460+ u32 global_flags;
43461+ u32 if_flags_mask;
43462+ u32 if_flags;
43463+ u32 if_id;
43464+ u32 mcast_num;
43465+ struct macaddr mcast_mac[BE_MAX_MC];
43466+};
43467
43468 /******************** Link Status Query *******************/
43469 struct be_cmd_req_link_status {
43470@@ -619,13 +749,18 @@ enum {
43471 };
43472
43473 enum {
43474- PHY_LINK_SPEED_ZERO = 0x0, /* => No link */
43475+ PHY_LINK_SPEED_ZERO = 0x0, /* => No link */
43476 PHY_LINK_SPEED_10MBPS = 0x1,
43477 PHY_LINK_SPEED_100MBPS = 0x2,
43478 PHY_LINK_SPEED_1GBPS = 0x3,
43479 PHY_LINK_SPEED_10GBPS = 0x4
43480 };
43481
43482+enum {
43483+ LINK_DOWN = 0x0,
43484+ LINK_UP = 0X1
43485+};
43486+
43487 struct be_cmd_resp_link_status {
43488 struct be_cmd_resp_hdr hdr;
43489 u8 physical_port;
43490@@ -634,9 +769,47 @@ struct be_cmd_resp_link_status {
43491 u8 mac_fault;
43492 u8 mgmt_mac_duplex;
43493 u8 mgmt_mac_speed;
43494- u16 rsvd0;
43495+ u16 link_speed;
43496+ u32 logical_link_status;
43497 } __packed;
43498
43499+/******************** Port Identification ***************************/
43500+/* Identifies the type of port attached to NIC */
43501+struct be_cmd_req_port_type {
43502+ struct be_cmd_req_hdr hdr;
43503+ u32 page_num;
43504+ u32 port;
43505+};
43506+
43507+enum {
43508+ TR_PAGE_A0 = 0xa0,
43509+ TR_PAGE_A2 = 0xa2
43510+};
43511+
43512+struct be_cmd_resp_port_type {
43513+ struct be_cmd_resp_hdr hdr;
43514+ u32 page_num;
43515+ u32 port;
43516+ struct data {
43517+ u8 identifier;
43518+ u8 identifier_ext;
43519+ u8 connector;
43520+ u8 transceiver[8];
43521+ u8 rsvd0[3];
43522+ u8 length_km;
43523+ u8 length_hm;
43524+ u8 length_om1;
43525+ u8 length_om2;
43526+ u8 length_cu;
43527+ u8 length_cu_m;
43528+ u8 vendor_name[16];
43529+ u8 rsvd;
43530+ u8 vendor_oui[3];
43531+ u8 vendor_pn[16];
43532+ u8 vendor_rev[4];
43533+ } data;
43534+};
43535+
43536 /******************** Get FW Version *******************/
43537 struct be_cmd_req_get_fw_version {
43538 struct be_cmd_req_hdr hdr;
43539@@ -686,9 +859,13 @@ struct be_cmd_resp_modify_eq_delay {
43540 } __packed;
43541
43542 /******************** Get FW Config *******************/
43543+#define FLEX10_MODE 0x400
43544+#define VNIC_MODE 0x20000
43545+#define UMC_ENABLED 0x1000000
43546+
43547 struct be_cmd_req_query_fw_cfg {
43548 struct be_cmd_req_hdr hdr;
43549- u32 rsvd[30];
43550+ u32 rsvd[31];
43551 };
43552
43553 struct be_cmd_resp_query_fw_cfg {
43554@@ -696,10 +873,61 @@ struct be_cmd_resp_query_fw_cfg {
43555 u32 be_config_number;
43556 u32 asic_revision;
43557 u32 phys_port;
43558- u32 function_cap;
43559+ u32 function_mode;
43560 u32 rsvd[26];
43561+ u32 function_caps;
43562 };
43563
43564+/******************** RSS Config *******************/
43565+/* RSS types */
43566+#define RSS_ENABLE_NONE 0x0
43567+#define RSS_ENABLE_IPV4 0x1
43568+#define RSS_ENABLE_TCP_IPV4 0x2
43569+#define RSS_ENABLE_IPV6 0x4
43570+#define RSS_ENABLE_TCP_IPV6 0x8
43571+
43572+struct be_cmd_req_rss_config {
43573+ struct be_cmd_req_hdr hdr;
43574+ u32 if_id;
43575+ u16 enable_rss;
43576+ u16 cpu_table_size_log2;
43577+ u32 hash[10];
43578+ u8 cpu_table[128];
43579+ u8 flush;
43580+ u8 rsvd0[3];
43581+};
43582+
43583+/******************** Port Beacon ***************************/
43584+
43585+#define BEACON_STATE_ENABLED 0x1
43586+#define BEACON_STATE_DISABLED 0x0
43587+
43588+struct be_cmd_req_enable_disable_beacon {
43589+ struct be_cmd_req_hdr hdr;
43590+ u8 port_num;
43591+ u8 beacon_state;
43592+ u8 beacon_duration;
43593+ u8 status_duration;
43594+} __packed;
43595+
43596+struct be_cmd_resp_enable_disable_beacon {
43597+ struct be_cmd_resp_hdr resp_hdr;
43598+ u32 rsvd0;
43599+} __packed;
43600+
43601+struct be_cmd_req_get_beacon_state {
43602+ struct be_cmd_req_hdr hdr;
43603+ u8 port_num;
43604+ u8 rsvd0;
43605+ u16 rsvd1;
43606+} __packed;
43607+
43608+struct be_cmd_resp_get_beacon_state {
43609+ struct be_cmd_resp_hdr resp_hdr;
43610+ u8 beacon_state;
43611+ u8 rsvd0[3];
43612+} __packed;
43613+
43614 /****************** Firmware Flash ******************/
43615 struct flashrom_params {
43616 u32 op_code;
43617@@ -714,17 +942,468 @@ struct be_cmd_write_flashrom {
43618 struct flashrom_params params;
43619 };
43620
43621+/************************ WOL *******************************/
43622+struct be_cmd_req_acpi_wol_magic_config {
43623+ struct be_cmd_req_hdr hdr;
43624+ u32 rsvd0[145];
43625+ u8 magic_mac[6];
43626+ u8 rsvd2[2];
43627+} __packed;
43628+
43629+/********************** LoopBack test *********************/
43630+struct be_cmd_req_loopback_test {
43631+ struct be_cmd_req_hdr hdr;
43632+ u32 loopback_type;
43633+ u32 num_pkts;
43634+ u64 pattern;
43635+ u32 src_port;
43636+ u32 dest_port;
43637+ u32 pkt_size;
43638+};
43639+
43640+struct be_cmd_resp_loopback_test {
43641+ struct be_cmd_resp_hdr resp_hdr;
43642+ u32 status;
43643+ u32 num_txfer;
43644+ u32 num_rx;
43645+ u32 miscomp_off;
43646+ u32 ticks_compl;
43647+};
43648+
43649+struct be_cmd_req_set_lmode {
43650+ struct be_cmd_req_hdr hdr;
43651+ u8 src_port;
43652+ u8 dest_port;
43653+ u8 loopback_type;
43654+ u8 loopback_state;
43655+};
43656+
43657+struct be_cmd_resp_set_lmode {
43658+ struct be_cmd_resp_hdr resp_hdr;
43659+ u8 rsvd0[4];
43660+};
43661+
43662+/********************** DDR DMA test *********************/
43663+struct be_cmd_req_ddrdma_test {
43664+ struct be_cmd_req_hdr hdr;
43665+ u64 pattern;
43666+ u32 byte_count;
43667+ u32 rsvd0;
43668+ u8 snd_buff[4096];
43669+ u8 rsvd1[4096];
43670+};
43671+
43672+struct be_cmd_resp_ddrdma_test {
43673+ struct be_cmd_resp_hdr hdr;
43674+ u64 pattern;
43675+ u32 byte_cnt;
43676+ u32 snd_err;
43677+ u8 rsvd0[4096];
43678+ u8 rcv_buff[4096];
43679+};
43680+
43681+/*********************** SEEPROM Read ***********************/
43682+
43683+#define BE_READ_SEEPROM_LEN 1024
43684+struct be_cmd_req_seeprom_read {
43685+ struct be_cmd_req_hdr hdr;
43686+ u8 rsvd0[BE_READ_SEEPROM_LEN];
43687+};
43688+
43689+struct be_cmd_resp_seeprom_read {
43690+ struct be_cmd_req_hdr hdr;
43691+ u8 seeprom_data[BE_READ_SEEPROM_LEN];
43692+};
43693+
43694+enum {
43695+ PHY_TYPE_CX4_10GB = 0,
43696+ PHY_TYPE_XFP_10GB,
43697+ PHY_TYPE_SFP_1GB,
43698+ PHY_TYPE_SFP_PLUS_10GB,
43699+ PHY_TYPE_KR_10GB,
43700+ PHY_TYPE_KX4_10GB,
43701+ PHY_TYPE_BASET_10GB,
43702+ PHY_TYPE_BASET_1GB,
43703+ PHY_TYPE_BASEX_1GB,
43704+ PHY_TYPE_SGMII,
43705+ PHY_TYPE_DISABLED = 255
43706+};
43707+
43708+#define BE_AN_EN 0x2
43709+#define BE_PAUSE_SYM_EN 0x80
43710+
43711+struct be_cmd_req_get_phy_info {
43712+ struct be_cmd_req_hdr hdr;
43713+ u8 rsvd0[24];
43714+};
43715+
43716+struct be_phy_info {
43717+ u16 phy_type;
43718+ u16 interface_type;
43719+ u32 misc_params;
43720+ u16 ext_phy_details;
43721+ u16 rsvd;
43722+ u16 auto_speeds_supported;
43723+ u16 fixed_speeds_supported;
43724+ u32 future_use[2];
43725+};
43726+
43727+struct be_cmd_resp_get_phy_info {
43728+ struct be_cmd_req_hdr hdr;
43729+ struct be_phy_info phy_info;
43730+};
43731+
43732+/*********************** Set QOS ***********************/
43733+
43734+#define BE_QOS_BITS_NIC 1
43735+
43736+struct be_cmd_req_set_qos {
43737+ struct be_cmd_req_hdr hdr;
43738+ u32 valid_bits;
43739+ u32 max_bps_nic;
43740+ u32 rsvd[7];
43741+};
43742+
43743+struct be_cmd_resp_set_qos {
43744+ struct be_cmd_resp_hdr hdr;
43745+ u32 rsvd;
43746+};
43747+
43748+/*********************** Controller Attributes ***********************/
43749+struct be_cmd_req_cntl_attribs {
43750+ struct be_cmd_req_hdr hdr;
43751+};
43752+
43753+struct be_cmd_resp_cntl_attribs {
43754+ struct be_cmd_resp_hdr hdr;
43755+ struct mgmt_controller_attrib attribs;
43756+};
43757+
43758+/******************* get port names ***************/
43759+struct be_cmd_req_get_port_name {
43760+ struct be_cmd_req_hdr hdr;
43761+ u32 rsvd0;
43762+};
43763+
43764+struct be_cmd_resp_get_port_name {
43765+ struct be_cmd_req_hdr hdr;
43766+ u8 port0_name;
43767+ u8 port1_name;
43768+ u8 rsvd0[2];
43769+};
43770+
43771+struct be_cmd_resp_get_port_name_v1 {
43772+ struct be_cmd_req_hdr hdr;
43773+ u32 pt : 2;
43774+ u32 rsvd0 : 30;
43775+ u8 port0_name;
43776+ u8 port1_name;
43777+ u8 port2_name;
43778+ u8 port3_name;
43779+};
43780+
43781+/*********************** Set driver function ***********************/
43782+#define CAPABILITY_SW_TIMESTAMPS 2
43783+#define CAPABILITY_BE3_NATIVE_ERX_API 4
43784+
43785+struct be_cmd_req_set_func_cap {
43786+ struct be_cmd_req_hdr hdr;
43787+ u32 valid_cap_flags;
43788+ u32 cap_flags;
43789+ u8 rsvd[212];
43790+};
43791+
43792+struct be_cmd_resp_set_func_cap {
43793+ struct be_cmd_resp_hdr hdr;
43794+ u32 valid_cap_flags;
43795+ u32 cap_flags;
43796+ u8 rsvd[212];
43797+};
43798+
43799+/*********************** PG Query Request ****************************/
43800+#define REQ_PG_QUERY 0x1
43801+#define REQ_PG_FEAT 0x1
43802+struct be_cmd_req_pg {
43803+ struct be_cmd_req_hdr hdr;
43804+ u32 query;
43805+ u32 pfc_pg;
43806+};
43807+
43808+struct be_cmd_resp_pg {
43809+ struct be_cmd_resp_hdr hdr;
43810+ u32 pfc_pg;
43811+ u32 num_tx_rings;
43812+};
43813+
43814+/*********************** Function Privileges ***********************/
43815+enum {
43816+ BE_PRIV_DEFAULT = 0x1,
43817+ BE_PRIV_LNKQUERY = 0x2,
43818+ BE_PRIV_LNKSTATS = 0x4,
43819+ BE_PRIV_LNKMGMT = 0x8,
43820+ BE_PRIV_LNKDIAG = 0x10,
43821+ BE_PRIV_UTILQUERY = 0x20,
43822+ BE_PRIV_FILTMGMT = 0x40,
43823+ BE_PRIV_IFACEMGMT = 0x80,
43824+ BE_PRIV_VHADM = 0x100,
43825+ BE_PRIV_DEVCFG = 0x200,
43826+ BE_PRIV_DEVSEC = 0x400
43827+};
43828+
43829+struct be_cmd_req_get_fn_privileges {
43830+ struct be_cmd_req_hdr hdr;
43831+ u32 rsvd;
43832+};
43833+
43834+struct be_cmd_resp_get_fn_privileges {
43835+ struct be_cmd_resp_hdr hdr;
43836+ u32 privilege_mask;
43837+};
43838+
43839+struct be_cmd_req_set_fn_privileges {
43840+ struct be_cmd_req_hdr hdr;
43841+ u32 privilege_mask;
43842+};
43843+
43844+struct be_cmd_resp_set_fn_privileges {
43845+ struct be_cmd_resp_hdr hdr;
43846+ u32 prev_privilege_mask;
43847+};
43848+
43849+/*********************** HSW Config ***********************/
43850+struct amap_set_hsw_context {
43851+ u8 interface_id[16];
43852+ u8 rsvd0[14];
43853+ u8 pvid_valid;
43854+ u8 rsvd1;
43855+ u8 rsvd2[16];
43856+ u8 pvid[16];
43857+ u8 rsvd3[32];
43858+ u8 rsvd4[32];
43859+ u8 rsvd5[32];
43860+} __packed;
43861+
43862+struct be_cmd_req_set_hsw_config {
43863+ struct be_cmd_req_hdr hdr;
43864+ u8 context[sizeof(struct amap_set_hsw_context) / 8];
43865+} __packed;
43866+
43867+struct be_cmd_resp_set_hsw_config {
43868+ struct be_cmd_resp_hdr hdr;
43869+ u32 rsvd;
43870+};
43871+
43872+struct amap_get_hsw_req_context {
43873+ u8 interface_id[16];
43874+ u8 rsvd0[14];
43875+ u8 pvid_valid;
43876+ u8 pport;
43877+} __packed;
43878+
43879+struct amap_get_hsw_resp_context {
43880+ u8 rsvd1[16];
43881+ u8 pvid[16];
43882+ u8 rsvd2[32];
43883+ u8 rsvd3[32];
43884+ u8 rsvd4[32];
43885+} __packed;
43886+
43887+struct be_cmd_req_get_hsw_config {
43888+ struct be_cmd_req_hdr hdr;
43889+ u8 context[sizeof(struct amap_get_hsw_req_context) / 8];
43890+} __packed;
43891+
43892+struct be_cmd_resp_get_hsw_config {
43893+ struct be_cmd_resp_hdr hdr;
43894+ u8 context[sizeof(struct amap_get_hsw_resp_context) / 8];
43895+ u32 rsvd;
43896+};
43897+
43898+/*************** Set speed ********************/
43899+struct be_cmd_req_set_port_speed_v1 {
43900+ struct be_cmd_req_hdr hdr;
43901+ u8 port_num;
43902+ u8 virt_port;
43903+ u16 mac_speed;
43904+ u16 dac_cable_length;
43905+ u16 rsvd0;
43906+};
43907+
43908+struct be_cmd_resp_set_port_speed_v1 {
43909+ struct be_cmd_resp_hdr hdr;
43910+ u32 rsvd0;
43911+};
43912+
43913+/************** get port speed *******************/
43914+struct be_cmd_req_get_port_speed {
43915+ struct be_cmd_req_hdr hdr;
43916+ u8 port_num;
43917+};
43918+
43919+struct be_cmd_resp_get_port_speed {
43920+ struct be_cmd_req_hdr hdr;
43921+ u16 mac_speed;
43922+ u16 dac_cable_length;
43923+};
43924+
43925+/*************** HW Stats Get v1 **********************************/
43926+#define BE_TXP_SW_SZ 48
43927+struct be_port_rxf_stats_v1 {
43928+ u32 rsvd0[12];
43929+ u32 rx_crc_errors;
43930+ u32 rx_alignment_symbol_errors;
43931+ u32 rx_pause_frames;
43932+ u32 rx_priority_pause_frames;
43933+ u32 rx_control_frames;
43934+ u32 rx_in_range_errors;
43935+ u32 rx_out_range_errors;
43936+ u32 rx_frame_too_long;
43937+ u32 rx_address_match_errors;
43938+ u32 rx_dropped_too_small;
43939+ u32 rx_dropped_too_short;
43940+ u32 rx_dropped_header_too_small;
43941+ u32 rx_dropped_tcp_length;
43942+ u32 rx_dropped_runt;
43943+ u32 rsvd1[10];
43944+ u32 rx_ip_checksum_errs;
43945+ u32 rx_tcp_checksum_errs;
43946+ u32 rx_udp_checksum_errs;
43947+ u32 rsvd2[7];
43948+ u32 rx_switched_unicast_packets;
43949+ u32 rx_switched_multicast_packets;
43950+ u32 rx_switched_broadcast_packets;
43951+ u32 rsvd3[3];
43952+ u32 tx_pauseframes;
43953+ u32 tx_priority_pauseframes;
43954+ u32 tx_controlframes;
43955+ u32 rsvd4[10];
43956+ u32 rxpp_fifo_overflow_drop;
43957+ u32 rx_input_fifo_overflow_drop;
43958+ u32 pmem_fifo_overflow_drop;
43959+ u32 jabber_events;
43960+ u32 rsvd5[3];
43961+};
43962+
43963+
43964+struct be_rxf_stats_v1 {
43965+ struct be_port_rxf_stats_v1 port[4];
43966+ u32 rsvd0[2];
43967+ u32 rx_drops_no_pbuf;
43968+ u32 rx_drops_no_txpb;
43969+ u32 rx_drops_no_erx_descr;
43970+ u32 rx_drops_no_tpre_descr;
43971+ u32 rsvd1[6];
43972+ u32 rx_drops_too_many_frags;
43973+ u32 rx_drops_invalid_ring;
43974+ u32 forwarded_packets;
43975+ u32 rx_drops_mtu;
43976+ u32 rsvd2[14];
43977+};
43978+
43979+struct be_erx_stats_v1 {
43980+ u32 rx_drops_no_fragments[68]; /* dwordS 0 to 67*/
43981+ u32 rsvd[4];
43982+};
43983+
43984+struct be_hw_stats_v1 {
43985+ struct be_rxf_stats_v1 rxf;
43986+ u32 rsvd0[BE_TXP_SW_SZ];
43987+ struct be_erx_stats_v1 erx;
43988+ struct be_pmem_stats pmem;
43989+ u32 rsvd1[3];
43990+};
43991+
43992+struct be_cmd_req_get_stats_v1 {
43993+ struct be_cmd_req_hdr hdr;
43994+ u8 rsvd[sizeof(struct be_hw_stats_v1)];
43995+};
43996+
43997+struct be_cmd_resp_get_stats_v1 {
43998+ struct be_cmd_resp_hdr hdr;
43999+ struct be_hw_stats_v1 hw_stats;
44000+};
44001+
44002+static inline void *
44003+hw_stats_from_cmd(struct be_adapter *adapter)
44004+{
44005+ if (adapter->generation == BE_GEN3) {
44006+ struct be_cmd_resp_get_stats_v1 *cmd = adapter->stats_cmd.va;
44007+
44008+ return &cmd->hw_stats;
44009+ } else {
44010+ struct be_cmd_resp_get_stats_v0 *cmd = adapter->stats_cmd.va;
44011+
44012+ return &cmd->hw_stats;
44013+ }
44014+}
44015+
44016+static inline void *be_port_rxf_stats_from_cmd(struct be_adapter *adapter)
44017+{
44018+ if (adapter->generation == BE_GEN3) {
44019+ struct be_hw_stats_v1 *hw_stats = hw_stats_from_cmd(adapter);
44020+ struct be_rxf_stats_v1 *rxf_stats = &hw_stats->rxf;
44021+
44022+ return &rxf_stats->port[adapter->port_num];
44023+ } else {
44024+ struct be_hw_stats_v0 *hw_stats = hw_stats_from_cmd(adapter);
44025+ struct be_rxf_stats_v0 *rxf_stats = &hw_stats->rxf;
44026+
44027+ return &rxf_stats->port[adapter->port_num];
44028+ }
44029+}
44030+
44031+static inline void *be_rxf_stats_from_cmd(struct be_adapter *adapter)
44032+{
44033+ if (adapter->generation == BE_GEN3) {
44034+ struct be_hw_stats_v1 *hw_stats = hw_stats_from_cmd(adapter);
44035+
44036+ return &hw_stats->rxf;
44037+ } else {
44038+ struct be_hw_stats_v0 *hw_stats = hw_stats_from_cmd(adapter);
44039+
44040+ return &hw_stats->rxf;
44041+ }
44042+}
44043+
44044+static inline void *be_erx_stats_from_cmd(struct be_adapter *adapter)
44045+{
44046+ if (adapter->generation == BE_GEN3) {
44047+ struct be_hw_stats_v1 *hw_stats = hw_stats_from_cmd(adapter);
44048+
44049+ return &hw_stats->erx;
44050+ } else {
44051+ struct be_hw_stats_v0 *hw_stats = hw_stats_from_cmd(adapter);
44052+
44053+ return &hw_stats->erx;
44054+ }
44055+}
44056+
44057+static inline void *be_pmem_stats_from_cmd(struct be_adapter *adapter)
44058+{
44059+ if (adapter->generation == BE_GEN3) {
44060+ struct be_hw_stats_v1 *hw_stats = hw_stats_from_cmd(adapter);
44061+
44062+ return &hw_stats->pmem;
44063+ } else {
44064+ struct be_hw_stats_v0 *hw_stats = hw_stats_from_cmd(adapter);
44065+
44066+ return &hw_stats->pmem;
44067+ }
44068+}
44069+
44070 extern int be_pci_fnum_get(struct be_adapter *adapter);
44071 extern int be_cmd_POST(struct be_adapter *adapter);
44072 extern int be_cmd_mac_addr_query(struct be_adapter *adapter, u8 *mac_addr,
44073 u8 type, bool permanent, u32 if_handle);
44074 extern int be_cmd_pmac_add(struct be_adapter *adapter, u8 *mac_addr,
44075- u32 if_id, u32 *pmac_id);
44076-extern int be_cmd_pmac_del(struct be_adapter *adapter, u32 if_id, u32 pmac_id);
44077+ u32 if_id, u32 *pmac_id, u32 domain);
44078+extern int be_cmd_pmac_del(struct be_adapter *adapter, u32 if_id, u32 pmac_id,
44079+ u32 domain);
44080 extern int be_cmd_if_create(struct be_adapter *adapter, u32 cap_flags,
44081 u32 en_flags, u8 *mac, bool pmac_invalid,
44082- u32 *if_handle, u32 *pmac_id);
44083-extern int be_cmd_if_destroy(struct be_adapter *adapter, u32 if_handle);
44084+ u32 *if_handle, u32 *pmac_id, u32 domain);
44085+extern int be_cmd_if_destroy(struct be_adapter *adapter, u32 if_handle,
44086+ u32 domain);
44087 extern int be_cmd_eq_create(struct be_adapter *adapter,
44088 struct be_queue_info *eq, int eq_delay);
44089 extern int be_cmd_cq_create(struct be_adapter *adapter,
44090@@ -736,36 +1415,92 @@ extern int be_cmd_mccq_create(struct be_adapter *adapter,
44091 struct be_queue_info *cq);
44092 extern int be_cmd_txq_create(struct be_adapter *adapter,
44093 struct be_queue_info *txq,
44094- struct be_queue_info *cq);
44095+ struct be_queue_info *cq, u8 *tc_id);
44096 extern int be_cmd_rxq_create(struct be_adapter *adapter,
44097 struct be_queue_info *rxq, u16 cq_id,
44098 u16 frag_size, u16 max_frame_size, u32 if_id,
44099- u32 rss);
44100+ u32 rss, u8 *rss_id);
44101 extern int be_cmd_q_destroy(struct be_adapter *adapter, struct be_queue_info *q,
44102 int type);
44103+extern int be_cmd_rxq_destroy(struct be_adapter *adapter,
44104+ struct be_queue_info *q);
44105 extern int be_cmd_link_status_query(struct be_adapter *adapter,
44106- bool *link_up);
44107+ int *link_status, u8 *mac_speed, u16 *link_speed, u32 dom);
44108 extern int be_cmd_reset(struct be_adapter *adapter);
44109 extern int be_cmd_get_stats(struct be_adapter *adapter,
44110 struct be_dma_mem *nonemb_cmd);
44111-extern int be_cmd_get_fw_ver(struct be_adapter *adapter, char *fw_ver);
44112+extern int be_cmd_get_fw_ver(struct be_adapter *adapter, char *fw_ver,
44113+ char *fw_on_flash);
44114
44115 extern int be_cmd_modify_eqd(struct be_adapter *adapter, u32 eq_id, u32 eqd);
44116 extern int be_cmd_vlan_config(struct be_adapter *adapter, u32 if_id,
44117 u16 *vtag_array, u32 num, bool untagged,
44118 bool promiscuous);
44119-extern int be_cmd_promiscuous_config(struct be_adapter *adapter,
44120- u8 port_num, bool en);
44121-extern int be_cmd_multicast_set(struct be_adapter *adapter, u32 if_id,
44122- struct dev_mc_list *mc_list, u32 mc_count);
44123+extern int be_cmd_rx_filter(struct be_adapter *adapter, u32 flags, u32 status);
44124 extern int be_cmd_set_flow_control(struct be_adapter *adapter,
44125 u32 tx_fc, u32 rx_fc);
44126 extern int be_cmd_get_flow_control(struct be_adapter *adapter,
44127 u32 *tx_fc, u32 *rx_fc);
44128-extern int be_cmd_query_fw_cfg(struct be_adapter *adapter,
44129- u32 *port_num, u32 *cap);
44130+extern int be_cmd_query_fw_cfg(struct be_adapter *adapter, u32 *port_num,
44131+ u32 *function_mode, u32 *functions_caps);
44132 extern int be_cmd_reset_function(struct be_adapter *adapter);
44133-extern int be_process_mcc(struct be_adapter *adapter);
44134+extern int be_cmd_rss_config(struct be_adapter *adapter, u8 *rsstable,
44135+ u16 table_size);
44136+extern int be_process_mcc(struct be_adapter *adapter, int *status);
44137+extern int be_cmd_set_beacon_state(struct be_adapter *adapter,
44138+ u8 port_num, u8 beacon, u8 status, u8 state);
44139+extern int be_cmd_get_beacon_state(struct be_adapter *adapter,
44140+ u8 port_num, u32 *state);
44141+extern int be_cmd_read_port_type(struct be_adapter *adapter, u32 port,
44142+ u8 *connector);
44143 extern int be_cmd_write_flashrom(struct be_adapter *adapter,
44144 struct be_dma_mem *cmd, u32 flash_oper,
44145 u32 flash_opcode, u32 buf_size);
44146+int be_cmd_get_flash_crc(struct be_adapter *adapter, u8 *flashed_crc,
44147+ int offset);
44148+extern int be_cmd_enable_magic_wol(struct be_adapter *adapter, u8 *mac,
44149+ struct be_dma_mem *nonemb_cmd);
44150+extern int be_cmd_fw_init(struct be_adapter *adapter);
44151+extern int be_cmd_fw_clean(struct be_adapter *adapter);
44152+extern void be_async_mcc_enable(struct be_adapter *adapter);
44153+extern void be_async_mcc_disable(struct be_adapter *adapter);
44154+extern int be_cmd_loopback_test(struct be_adapter *adapter, u32 port_num,
44155+ u32 loopback_type, u32 pkt_size,
44156+ u32 num_pkts, u64 pattern);
44157+extern int be_cmd_ddr_dma_test(struct be_adapter *adapter, u64 pattern,
44158+ u32 byte_cnt, struct be_dma_mem *cmd);
44159+extern int be_cmd_get_seeprom_data(struct be_adapter *adapter,
44160+ struct be_dma_mem *nonemb_cmd);
44161+extern int be_cmd_set_loopback(struct be_adapter *adapter, u8 port_num,
44162+ u8 loopback_type, u8 enable);
44163+extern int be_cmd_get_phy_info(struct be_adapter *adapter,
44164+ struct be_phy_info *phy_info);
44165+extern int be_cmd_set_qos(struct be_adapter *adapter, u32 bps, u32 domain);
44166+extern void be_detect_dump_ue(struct be_adapter *adapter);
44167+extern int be_cmd_get_die_temperature(struct be_adapter *adapter);
44168+extern int be_cmd_get_cntl_attributes(struct be_adapter *adapter);
44169+extern int be_cmd_get_reg_len(struct be_adapter *adapter, u32 *log_size);
44170+extern void be_cmd_get_regs(struct be_adapter *adapter, u32 buf_len, void *buf);
44171+extern int be_cmd_req_native_mode(struct be_adapter *adapter);
44172+extern int be_cmd_query_port_names_v0(struct be_adapter *adapter, u8 *port_name);
44173+extern int be_cmd_query_port_names_v1(struct be_adapter *adapter, u8 *port_name);
44174+extern int be_cmd_req_pg_pfc(struct be_adapter *adapter, int *fw_num_txqs);
44175+
44176+extern int be_cmd_get_fn_privileges(struct be_adapter *adapter,
44177+ u32 *privilege, u32 domain);
44178+extern int be_cmd_set_fn_privileges(struct be_adapter *adapter,
44179+ u32 mask, u32 *prev, u32 domain);
44180+extern int be_cmd_set_hsw_config(struct be_adapter *adapter, u16 pvid,
44181+ u32 domain, u16 intf_id);
44182+extern int be_cmd_get_hsw_config(struct be_adapter *adapter, u16 *pvid,
44183+ u32 domain, u16 intf_id);
44184+extern int be_cmd_set_port_speed_v1(struct be_adapter *adapter, u8 port_num,
44185+ u16 mac_speed, u16 dac_cable_len);
44186+extern int be_cmd_get_port_speed(struct be_adapter *adapter, u8 port_num,
44187+ u16 *dac_cable_len, u16 *port_speed);
44188+#ifdef CONFIG_PALAU
44189+int be_cmd_pass_ext_ioctl(struct be_adapter *adapter, dma_addr_t dma,
44190+ int req_size, void *va);
44191+#endif
44192+
44193+#endif /* !BE_CMDS_H */
44194diff --git a/drivers/net/benet/be_compat.c b/drivers/net/benet/be_compat.c
44195new file mode 100644
44196index 0000000..bdd1dba
44197--- /dev/null
44198+++ b/drivers/net/benet/be_compat.c
44199@@ -0,0 +1,630 @@
44200+/*
44201+ * Copyright (C) 2005 - 2011 Emulex
44202+ * All rights reserved.
44203+ *
44204+ * This program is free software; you can redistribute it and/or
44205+ * modify it under the terms of the GNU General Public License version 2
44206+ * as published by the Free Software Foundation. The full GNU General
44207+ * Public License is included in this distribution in the file called COPYING.
44208+ *
44209+ * Contact Information:
44210+ * linux-drivers@emulex.com
44211+ *
44212+ * Emulex
44213+ * 3333 Susan Street
44214+ * Costa Mesa, CA 92626
44215+ */
44216+
44217+#include "be.h"
44218+
44219+#if LINUX_VERSION_CODE < KERNEL_VERSION(2, 6, 28)
44220+void be_netdev_ops_init(struct net_device *netdev, struct net_device_ops *ops)
44221+{
44222+ netdev->open = ops->ndo_open;
44223+ netdev->stop = ops->ndo_stop;
44224+ netdev->hard_start_xmit = ops->ndo_start_xmit;
44225+ netdev->set_mac_address = ops->ndo_set_mac_address;
44226+ netdev->get_stats = ops->ndo_get_stats;
44227+ netdev->set_multicast_list = ops->ndo_set_rx_mode;
44228+ netdev->change_mtu = ops->ndo_change_mtu;
44229+ netdev->vlan_rx_register = ops->ndo_vlan_rx_register;
44230+ netdev->vlan_rx_add_vid = ops->ndo_vlan_rx_add_vid;
44231+ netdev->vlan_rx_kill_vid = ops->ndo_vlan_rx_kill_vid;
44232+ netdev->do_ioctl = ops->ndo_do_ioctl;
44233+#ifdef CONFIG_NET_POLL_CONTROLLER
44234+ netdev->poll_controller = ops->ndo_poll_controller;
44235+#endif
44236+#if LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 27)
44237+ netdev->select_queue = ops->ndo_select_queue;
44238+#endif
44239+}
44240+#endif
44241+
44242+#if LINUX_VERSION_CODE < KERNEL_VERSION(2, 6, 28)
44243+int eth_validate_addr(struct net_device *netdev)
44244+{
44245+ return 0;
44246+}
44247+#endif
44248+
44249+/* New NAPI backport */
44250+#if LINUX_VERSION_CODE <= KERNEL_VERSION(2, 6, 24)
44251+
44252+int be_poll_compat(struct net_device *netdev, int *budget)
44253+{
44254+ struct napi_struct *napi = netdev->priv;
44255+ u32 work_done, can_do;
44256+
44257+ can_do = min(*budget, netdev->quota);
44258+ work_done = napi->poll(napi, can_do);
44259+
44260+ *budget -= work_done;
44261+ netdev->quota -= work_done;
44262+ if (napi->rx)
44263+ return (work_done >= can_do);
44264+ return 0;
44265+}
44266+
44267+
44268+#endif /* New NAPI backport */
44269+
44270+int be_netif_napi_add(struct net_device *netdev,
44271+ struct napi_struct *napi,
44272+ int (*poll) (struct napi_struct *, int), int weight)
44273+{
44274+#ifdef HAVE_SIMULATED_MULTI_NAPI
44275+ struct be_adapter *adapter = netdev_priv(netdev);
44276+ struct net_device *nd;
44277+
44278+ nd = alloc_netdev(0, "", ether_setup);
44279+ if (!nd)
44280+ return -ENOMEM;
44281+ nd->priv = napi;
44282+ nd->weight = BE_NAPI_WEIGHT;
44283+ nd->poll = be_poll_compat;
44284+ set_bit(__LINK_STATE_START, &nd->state);
44285+
44286+ if (napi == &adapter->rx_obj[0].rx_eq.napi)
44287+ napi->rx = true;
44288+ napi->poll = poll;
44289+ napi->dev = nd;
44290+#ifdef RHEL_NEW_NAPI
44291+ napi->napi.dev = netdev;
44292+#endif
44293+ return 0;
44294+#else
44295+ netif_napi_add(netdev, napi, poll, weight);
44296+ return 0;
44297+#endif
44298+}
44299+void be_netif_napi_del(struct net_device *netdev)
44300+{
44301+#ifdef HAVE_SIMULATED_MULTI_NAPI
44302+ struct be_adapter *adapter = netdev_priv(netdev);
44303+ struct napi_struct *napi;
44304+ struct be_rx_obj *rxo;
44305+ int i;
44306+
44307+ for_all_rx_queues(adapter, rxo, i) {
44308+ napi = &rxo->rx_eq.napi;
44309+ if (napi->dev) {
44310+ free_netdev(napi->dev);
44311+ napi->dev = NULL;
44312+ }
44313+ }
44314+
44315+ napi = &adapter->tx_eq.napi;
44316+ if (napi->dev) {
44317+ free_netdev(napi->dev);
44318+ napi->dev = NULL;
44319+ }
44320+#endif
44321+}
44322+/* INET_LRO backport */
44323+#if LINUX_VERSION_CODE < KERNEL_VERSION(2, 6, 27)
44324+
44325+#define TCP_HDR_LEN(tcph) (tcph->doff << 2)
44326+#define IP_HDR_LEN(iph) (iph->ihl << 2)
44327+#define TCP_PAYLOAD_LENGTH(iph, tcph) (ntohs(iph->tot_len) - IP_HDR_LEN(iph) \
44328+ - TCP_HDR_LEN(tcph))
44329+
44330+#define IPH_LEN_WO_OPTIONS 5
44331+#define TCPH_LEN_WO_OPTIONS 5
44332+#define TCPH_LEN_W_TIMESTAMP 8
44333+
44334+#define LRO_MAX_PG_HLEN 64
44335+#define LRO_INC_STATS(lro_mgr, attr) { lro_mgr->stats.attr++; }
44336+/*
44337+ * Basic tcp checks whether packet is suitable for LRO
44338+ */
44339+static int lro_tcp_ip_check(struct iphdr *iph, struct tcphdr *tcph,
44340+ int len, struct net_lro_desc *lro_desc)
44341+{
44342+ /* check ip header: don't aggregate padded frames */
44343+ if (ntohs(iph->tot_len) != len)
44344+ return -1;
44345+
44346+ if (iph->ihl != IPH_LEN_WO_OPTIONS)
44347+ return -1;
44348+
44349+ if (tcph->cwr || tcph->ece || tcph->urg || !tcph->ack
44350+ || tcph->rst || tcph->syn || tcph->fin)
44351+ return -1;
44352+
44353+ if (INET_ECN_is_ce(ipv4_get_dsfield(iph)))
44354+ return -1;
44355+
44356+ if (tcph->doff != TCPH_LEN_WO_OPTIONS
44357+ && tcph->doff != TCPH_LEN_W_TIMESTAMP)
44358+ return -1;
44359+
44360+ /* check tcp options (only timestamp allowed) */
44361+ if (tcph->doff == TCPH_LEN_W_TIMESTAMP) {
44362+ u32 *topt = (u32 *)(tcph + 1);
44363+
44364+ if (*topt != htonl((TCPOPT_NOP << 24) | (TCPOPT_NOP << 16)
44365+ | (TCPOPT_TIMESTAMP << 8)
44366+ | TCPOLEN_TIMESTAMP))
44367+ return -1;
44368+
44369+ /* timestamp should be in right order */
44370+ topt++;
44371+ if (lro_desc && after(ntohl(lro_desc->tcp_rcv_tsval),
44372+ ntohl(*topt)))
44373+ return -1;
44374+
44375+ /* timestamp reply should not be zero */
44376+ topt++;
44377+ if (*topt == 0)
44378+ return -1;
44379+ }
44380+
44381+ return 0;
44382+}
44383+
44384+static void lro_update_tcp_ip_header(struct net_lro_desc *lro_desc)
44385+{
44386+ struct iphdr *iph = lro_desc->iph;
44387+ struct tcphdr *tcph = lro_desc->tcph;
44388+ u32 *p;
44389+ __wsum tcp_hdr_csum;
44390+
44391+ tcph->ack_seq = lro_desc->tcp_ack;
44392+ tcph->window = lro_desc->tcp_window;
44393+
44394+ if (lro_desc->tcp_saw_tstamp) {
44395+ p = (u32 *)(tcph + 1);
44396+ *(p+2) = lro_desc->tcp_rcv_tsecr;
44397+ }
44398+
44399+ iph->tot_len = htons(lro_desc->ip_tot_len);
44400+
44401+ iph->check = 0;
44402+ iph->check = ip_fast_csum((u8 *)lro_desc->iph, iph->ihl);
44403+
44404+ tcph->check = 0;
44405+ tcp_hdr_csum = csum_partial((u8 *)tcph, TCP_HDR_LEN(tcph), 0);
44406+ lro_desc->data_csum = csum_add(lro_desc->data_csum, tcp_hdr_csum);
44407+ tcph->check = csum_tcpudp_magic(iph->saddr, iph->daddr,
44408+ lro_desc->ip_tot_len -
44409+ IP_HDR_LEN(iph), IPPROTO_TCP,
44410+ lro_desc->data_csum);
44411+}
44412+
44413+static __wsum lro_tcp_data_csum(struct iphdr *iph, struct tcphdr *tcph, int len)
44414+{
44415+ __wsum tcp_csum;
44416+ __wsum tcp_hdr_csum;
44417+ __wsum tcp_ps_hdr_csum;
44418+
44419+ tcp_csum = ~csum_unfold(tcph->check);
44420+ tcp_hdr_csum = csum_partial((u8 *)tcph, TCP_HDR_LEN(tcph), tcp_csum);
44421+
44422+ tcp_ps_hdr_csum = csum_tcpudp_nofold(iph->saddr, iph->daddr,
44423+ len + TCP_HDR_LEN(tcph),
44424+ IPPROTO_TCP, 0);
44425+
44426+ return csum_sub(csum_sub(tcp_csum, tcp_hdr_csum),
44427+ tcp_ps_hdr_csum);
44428+}
44429+
44430+static void lro_init_desc(struct net_lro_desc *lro_desc, struct sk_buff *skb,
44431+ struct iphdr *iph, struct tcphdr *tcph,
44432+ u16 vlan_tag, struct vlan_group *vgrp)
44433+{
44434+ int nr_frags;
44435+ u32 *ptr;
44436+ u32 tcp_data_len = TCP_PAYLOAD_LENGTH(iph, tcph);
44437+
44438+ nr_frags = skb_shinfo(skb)->nr_frags;
44439+ lro_desc->parent = skb;
44440+ lro_desc->next_frag = &(skb_shinfo(skb)->frags[nr_frags]);
44441+ lro_desc->iph = iph;
44442+ lro_desc->tcph = tcph;
44443+ lro_desc->tcp_next_seq = ntohl(tcph->seq) + tcp_data_len;
44444+ lro_desc->tcp_ack = ntohl(tcph->ack_seq);
44445+ lro_desc->tcp_window = tcph->window;
44446+
44447+ lro_desc->pkt_aggr_cnt = 1;
44448+ lro_desc->ip_tot_len = ntohs(iph->tot_len);
44449+
44450+ if (tcph->doff == 8) {
44451+ ptr = (u32 *)(tcph+1);
44452+ lro_desc->tcp_saw_tstamp = 1;
44453+ lro_desc->tcp_rcv_tsval = *(ptr+1);
44454+ lro_desc->tcp_rcv_tsecr = *(ptr+2);
44455+ }
44456+
44457+ lro_desc->mss = tcp_data_len;
44458+ lro_desc->vgrp = vgrp;
44459+ lro_desc->vlan_tag = vlan_tag;
44460+ lro_desc->active = 1;
44461+
44462+ if (tcp_data_len)
44463+ lro_desc->data_csum = lro_tcp_data_csum(iph, tcph,
44464+ tcp_data_len);
44465+
44466+ if (!tcp_data_len)
44467+ lro_desc->ack_cnt++;
44468+}
44469+
44470+static inline void lro_clear_desc(struct net_lro_desc *lro_desc)
44471+{
44472+ memset(lro_desc, 0, sizeof(struct net_lro_desc));
44473+}
44474+
44475+static void lro_add_common(struct net_lro_desc *lro_desc, struct iphdr *iph,
44476+ struct tcphdr *tcph, int tcp_data_len)
44477+{
44478+ struct sk_buff *parent = lro_desc->parent;
44479+ u32 *topt;
44480+
44481+ lro_desc->pkt_aggr_cnt++;
44482+ lro_desc->ip_tot_len += tcp_data_len;
44483+ lro_desc->tcp_next_seq += tcp_data_len;
44484+ lro_desc->tcp_window = tcph->window;
44485+ lro_desc->tcp_ack = tcph->ack_seq;
44486+
44487+ /* don't update tcp_rcv_tsval, would not work with PAWS */
44488+ if (lro_desc->tcp_saw_tstamp) {
44489+ topt = (u32 *) (tcph + 1);
44490+ lro_desc->tcp_rcv_tsecr = *(topt + 2);
44491+ }
44492+
44493+ if (tcp_data_len)
44494+ lro_desc->data_csum = csum_block_add(lro_desc->data_csum,
44495+ lro_tcp_data_csum(iph, tcph,
44496+ tcp_data_len),
44497+ parent->len);
44498+
44499+ parent->len += tcp_data_len;
44500+ parent->data_len += tcp_data_len;
44501+ if (tcp_data_len > lro_desc->mss)
44502+ lro_desc->mss = tcp_data_len;
44503+}
44504+
44505+static void lro_add_frags(struct net_lro_desc *lro_desc,
44506+ int len, int hlen, int truesize,
44507+ struct skb_frag_struct *skb_frags,
44508+ struct iphdr *iph, struct tcphdr *tcph)
44509+{
44510+ struct sk_buff *skb = lro_desc->parent;
44511+ int tcp_data_len = TCP_PAYLOAD_LENGTH(iph, tcph);
44512+
44513+ lro_add_common(lro_desc, iph, tcph, tcp_data_len);
44514+
44515+ skb->truesize += truesize;
44516+
44517+ if (!tcp_data_len) {
44518+ put_page(skb_frags[0].page);
44519+ lro_desc->ack_cnt++;
44520+ return;
44521+ }
44522+
44523+ skb_frags[0].page_offset += hlen;
44524+ skb_frags[0].size -= hlen;
44525+
44526+ while (tcp_data_len > 0) {
44527+ *(lro_desc->next_frag) = *skb_frags;
44528+ tcp_data_len -= skb_frags->size;
44529+ lro_desc->next_frag++;
44530+ skb_frags++;
44531+ skb_shinfo(skb)->nr_frags++;
44532+ }
44533+}
44534+
44535+static int lro_check_tcp_conn(struct net_lro_desc *lro_desc,
44536+ struct iphdr *iph,
44537+ struct tcphdr *tcph)
44538+{
44539+ if ((lro_desc->iph->saddr != iph->saddr)
44540+ || (lro_desc->iph->daddr != iph->daddr)
44541+ || (lro_desc->tcph->source != tcph->source)
44542+ || (lro_desc->tcph->dest != tcph->dest))
44543+ return -1;
44544+ return 0;
44545+}
44546+
44547+static struct net_lro_desc *lro_get_desc(struct net_lro_mgr *lro_mgr,
44548+ struct net_lro_desc *lro_arr,
44549+ struct iphdr *iph,
44550+ struct tcphdr *tcph)
44551+{
44552+ struct net_lro_desc *lro_desc = NULL;
44553+ struct net_lro_desc *tmp;
44554+ int max_desc = lro_mgr->max_desc;
44555+ int i;
44556+
44557+ for (i = 0; i < max_desc; i++) {
44558+ tmp = &lro_arr[i];
44559+ if (tmp->active)
44560+ if (!lro_check_tcp_conn(tmp, iph, tcph)) {
44561+ lro_desc = tmp;
44562+ goto out;
44563+ }
44564+ }
44565+
44566+ for (i = 0; i < max_desc; i++) {
44567+ if (!lro_arr[i].active) {
44568+ lro_desc = &lro_arr[i];
44569+ goto out;
44570+ }
44571+ }
44572+
44573+ LRO_INC_STATS(lro_mgr, no_desc);
44574+out:
44575+ return lro_desc;
44576+}
44577+
44578+static void lro_flush(struct net_lro_mgr *lro_mgr,
44579+ struct net_lro_desc *lro_desc)
44580+{
44581+ struct be_adapter *adapter = netdev_priv(lro_mgr->dev);
44582+
44583+ if (lro_desc->pkt_aggr_cnt > 1)
44584+ lro_update_tcp_ip_header(lro_desc);
44585+
44586+ skb_shinfo(lro_desc->parent)->gso_size = lro_desc->mss;
44587+
44588+ if (lro_desc->vgrp) {
44589+ if (test_bit(LRO_F_NAPI, &lro_mgr->features))
44590+ vlan_hwaccel_receive_skb(lro_desc->parent,
44591+ lro_desc->vgrp,
44592+ lro_desc->vlan_tag);
44593+ else
44594+ vlan_hwaccel_rx(lro_desc->parent,
44595+ lro_desc->vgrp,
44596+ lro_desc->vlan_tag);
44597+
44598+ } else {
44599+ if (test_bit(LRO_F_NAPI, &lro_mgr->features))
44600+ netif_receive_skb(lro_desc->parent);
44601+ else
44602+ netif_rx(lro_desc->parent);
44603+ }
44604+
44605+ LRO_INC_STATS(lro_mgr, flushed);
44606+ lro_clear_desc(lro_desc);
44607+}
44608+
44609+static struct sk_buff *lro_gen_skb(struct net_lro_mgr *lro_mgr,
44610+ struct skb_frag_struct *frags,
44611+ int len, int true_size,
44612+ void *mac_hdr,
44613+ int hlen, __wsum sum,
44614+ u32 ip_summed)
44615+{
44616+ struct sk_buff *skb;
44617+ struct skb_frag_struct *skb_frags;
44618+ int data_len = len;
44619+ int hdr_len = min(len, hlen);
44620+
44621+ skb = netdev_alloc_skb(lro_mgr->dev, hlen);
44622+ if (!skb)
44623+ return NULL;
44624+
44625+ skb->len = len;
44626+ skb->data_len = len - hdr_len;
44627+ skb->truesize += true_size;
44628+ skb->tail += hdr_len;
44629+
44630+ memcpy(skb->data, mac_hdr, hdr_len);
44631+
44632+ if (skb->data_len) {
44633+ skb_frags = skb_shinfo(skb)->frags;
44634+ while (data_len > 0) {
44635+ *skb_frags = *frags;
44636+ data_len -= frags->size;
44637+ skb_frags++;
44638+ frags++;
44639+ skb_shinfo(skb)->nr_frags++;
44640+ }
44641+ skb_shinfo(skb)->frags[0].page_offset += hdr_len;
44642+ skb_shinfo(skb)->frags[0].size -= hdr_len;
44643+ } else {
44644+ put_page(frags[0].page);
44645+ }
44646+
44647+
44648+ skb->ip_summed = ip_summed;
44649+ skb->csum = sum;
44650+ skb->protocol = eth_type_trans(skb, lro_mgr->dev);
44651+ return skb;
44652+}
44653+
44654+static struct sk_buff *__lro_proc_segment(struct net_lro_mgr *lro_mgr,
44655+ struct skb_frag_struct *frags,
44656+ int len, int true_size,
44657+ struct vlan_group *vgrp,
44658+ u16 vlan_tag, void *priv, __wsum sum)
44659+{
44660+ struct net_lro_desc *lro_desc;
44661+ struct iphdr *iph;
44662+ struct tcphdr *tcph;
44663+ struct sk_buff *skb;
44664+ u64 flags;
44665+ void *mac_hdr;
44666+ int mac_hdr_len;
44667+ int hdr_len = LRO_MAX_PG_HLEN;
44668+ int vlan_hdr_len = 0;
44669+ u8 pad_bytes;
44670+
44671+ if (!lro_mgr->get_frag_header
44672+ || lro_mgr->get_frag_header(frags, (void *)&mac_hdr, (void *)&iph,
44673+ (void *)&tcph, &flags, priv)) {
44674+ mac_hdr = page_address(frags->page) + frags->page_offset;
44675+ goto out1;
44676+ }
44677+
44678+ if (!(flags & LRO_IPV4) || !(flags & LRO_TCP))
44679+ goto out1;
44680+
44681+ hdr_len = (int)((void *)(tcph) + TCP_HDR_LEN(tcph) - mac_hdr);
44682+ mac_hdr_len = (int)((void *)(iph) - mac_hdr);
44683+
44684+ lro_desc = lro_get_desc(lro_mgr, lro_mgr->lro_arr, iph, tcph);
44685+ if (!lro_desc)
44686+ goto out1;
44687+
44688+ pad_bytes = len - (ntohs(iph->tot_len) + mac_hdr_len);
44689+ if (!TCP_PAYLOAD_LENGTH(iph, tcph) && pad_bytes) {
44690+ len -= pad_bytes; /* trim the packet */
44691+ frags[0].size -= pad_bytes;
44692+ true_size -= pad_bytes;
44693+ }
44694+
44695+ if (!lro_desc->active) { /* start new lro session */
44696+ if (lro_tcp_ip_check(iph, tcph, len - mac_hdr_len, NULL))
44697+ goto out1;
44698+
44699+ skb = lro_gen_skb(lro_mgr, frags, len, true_size, mac_hdr,
44700+ hdr_len, 0, lro_mgr->ip_summed_aggr);
44701+ if (!skb)
44702+ goto out;
44703+
44704+ if ((skb->protocol == htons(ETH_P_8021Q))
44705+ && !test_bit(LRO_F_EXTRACT_VLAN_ID, &lro_mgr->features))
44706+ vlan_hdr_len = VLAN_HLEN;
44707+
44708+ iph = (void *)(skb->data + vlan_hdr_len);
44709+ tcph = (void *)((u8 *)skb->data + vlan_hdr_len
44710+ + IP_HDR_LEN(iph));
44711+
44712+ lro_init_desc(lro_desc, skb, iph, tcph, vlan_tag, vgrp);
44713+ LRO_INC_STATS(lro_mgr, aggregated);
44714+ return 0;
44715+ }
44716+
44717+ if (lro_desc->tcp_next_seq != ntohl(tcph->seq))
44718+ goto out2;
44719+
44720+ if (lro_tcp_ip_check(iph, tcph, len - mac_hdr_len, lro_desc))
44721+ goto out2;
44722+
44723+ lro_add_frags(lro_desc, len, hdr_len, true_size, frags, iph, tcph);
44724+ LRO_INC_STATS(lro_mgr, aggregated);
44725+
44726+ if ((skb_shinfo(lro_desc->parent)->nr_frags >= lro_mgr->max_aggr) ||
44727+ lro_desc->parent->len > (0xFFFF - lro_mgr->dev->mtu))
44728+ lro_flush(lro_mgr, lro_desc);
44729+
44730+ return NULL;
44731+
44732+out2: /* send aggregated packets to the stack */
44733+ lro_flush(lro_mgr, lro_desc);
44734+
44735+out1: /* Original packet has to be posted to the stack */
44736+ skb = lro_gen_skb(lro_mgr, frags, len, true_size, mac_hdr,
44737+ hdr_len, sum, lro_mgr->ip_summed);
44738+out:
44739+ return skb;
44740+}
44741+
44742+void lro_receive_frags_compat(struct net_lro_mgr *lro_mgr,
44743+ struct skb_frag_struct *frags,
44744+ int len, int true_size, void *priv, __wsum sum)
44745+{
44746+ struct sk_buff *skb;
44747+
44748+ skb = __lro_proc_segment(lro_mgr, frags, len, true_size, NULL, 0,
44749+ priv, sum);
44750+ if (!skb)
44751+ return;
44752+
44753+ if (test_bit(LRO_F_NAPI, &lro_mgr->features))
44754+ netif_receive_skb(skb);
44755+ else
44756+ netif_rx(skb);
44757+}
44758+
44759+void lro_vlan_hwaccel_receive_frags_compat(struct net_lro_mgr *lro_mgr,
44760+ struct skb_frag_struct *frags,
44761+ int len, int true_size,
44762+ struct vlan_group *vgrp,
44763+ u16 vlan_tag, void *priv, __wsum sum)
44764+{
44765+ struct sk_buff *skb;
44766+
44767+ skb = __lro_proc_segment(lro_mgr, frags, len, true_size, vgrp,
44768+ vlan_tag, priv, sum);
44769+ if (!skb)
44770+ return;
44771+
44772+ if (test_bit(LRO_F_NAPI, &lro_mgr->features))
44773+ vlan_hwaccel_receive_skb(skb, vgrp, vlan_tag);
44774+ else
44775+ vlan_hwaccel_rx(skb, vgrp, vlan_tag);
44776+}
44777+
44778+void lro_flush_all_compat(struct net_lro_mgr *lro_mgr)
44779+{
44780+ int i;
44781+ struct net_lro_desc *lro_desc = lro_mgr->lro_arr;
44782+
44783+ for (i = 0; i < lro_mgr->max_desc; i++) {
44784+ if (lro_desc[i].active)
44785+ lro_flush(lro_mgr, &lro_desc[i]);
44786+ }
44787+}
44788+#endif /* INET_LRO backport */
44789+
44790+#ifndef TX_MQ
44791+struct net_device *alloc_etherdev_mq_compat(int sizeof_priv,
44792+ unsigned int queue_count)
44793+{
44794+ return alloc_etherdev(sizeof_priv);
44795+}
44796+
44797+void netif_wake_subqueue_compat(struct net_device *dev, u16 queue_index)
44798+{
44799+ netif_wake_queue(dev);
44800+}
44801+
44802+void netif_stop_subqueue_compat(struct net_device *dev, u16 queue_index)
44803+{
44804+ netif_stop_queue(dev);
44805+}
44806+
44807+int __netif_subqueue_stopped_compat(const struct net_device *dev,
44808+ u16 queue_index)
44809+{
44810+ return netif_queue_stopped(dev);
44811+}
44812+
44813+u16 skb_get_queue_mapping_compat(const struct sk_buff *skb)
44814+{
44815+ return 0;
44816+}
44817+
44818+void netif_set_real_num_tx_queues_compat(struct net_device *dev,
44819+ unsigned int txq)
44820+{
44821+ return;
44822+}
44823+
44824+u16 skb_tx_hash_compat(const struct net_device *dev,
44825+ const struct sk_buff *skb)
44826+{
44827+ return 0;
44828+}
44829+#endif
44830diff --git a/drivers/net/benet/be_compat.h b/drivers/net/benet/be_compat.h
44831new file mode 100644
44832index 0000000..8ceecc8
44833--- /dev/null
44834+++ b/drivers/net/benet/be_compat.h
44835@@ -0,0 +1,621 @@
44836+/*
44837+ * Copyright (C) 2005 - 2011 Emulex
44838+ * All rights reserved.
44839+ *
44840+ * This program is free software; you can redistribute it and/or
44841+ * modify it under the terms of the GNU General Public License version 2
44842+ * as published by the Free Software Foundation. The full GNU General
44843+ * Public License is included in this distribution in the file called COPYING.
44844+ *
44845+ * Contact Information:
44846+ * linux-drivers@emulex.com
44847+ *
44848+ * Emulex
44849+ * 3333 Susan Street
44850+ * Costa Mesa, CA 92626
44851+ */
44852+
44853+#ifndef BE_COMPAT_H
44854+#define BE_COMPAT_H
44855+
44856+/****************** RHEL5 and SLES10 backport ***************************/
44857+#if LINUX_VERSION_CODE <= KERNEL_VERSION(2, 6, 18)
44858+
44859+#ifndef upper_32_bits
44860+#define upper_32_bits(n) ((u32)(((n) >> 16) >> 16))
44861+#endif
44862+
44863+#ifndef CHECKSUM_PARTIAL
44864+#define CHECKSUM_PARTIAL CHECKSUM_HW
44865+#define CHECKSUM_COMPLETE CHECKSUM_HW
44866+#endif
44867+
44868+#if !defined(ip_hdr)
44869+#define ip_hdr(skb) (skb->nh.iph)
44870+#define ipv6_hdr(skb) (skb->nh.ipv6h)
44871+#endif
44872+
44873+#if !defined(__packed)
44874+#define __packed __attribute__ ((packed))
44875+#endif
44876+
44877+#if !defined(RHEL_MINOR)
44878+/* Only for RH5U1 (Maui) and SLES10 NIC driver */
44879+enum {
44880+ false = 0,
44881+ true = 1
44882+};
44883+#if LINUX_VERSION_CODE == KERNEL_VERSION(2, 6, 18)
44884+/* Only for RH5U1 (Maui) NIC driver */
44885+static inline __attribute__((const))
44886+int __ilog2_u32(u32 n)
44887+{
44888+ return fls(n) - 1;
44889+}
44890+#endif
44891+#endif
44892+
44893+#define ETH_FCS_LEN 4
44894+#define bool u8
44895+#ifndef PTR_ALIGN
44896+#define PTR_ALIGN(p, a) ((typeof(p)) \
44897+ ALIGN((unsigned long)(p), (a)))
44898+#endif
44899+#define list_first_entry(ptr, type, member) \
44900+ list_entry((ptr)->next, type, member)
44901+
44902+#if (defined(RHEL_MINOR) && RHEL_MINOR < 6) || \
44903+ LINUX_VERSION_CODE <= KERNEL_VERSION(2, 6, 16)
44904+#define DEFINE_PCI_DEVICE_TABLE(_table) struct pci_device_id _table[] \
44905+ __devinitdata
44906+#endif
44907+
44908+/* Backport of request_irq */
44909+typedef irqreturn_t(*backport_irq_handler_t) (int, void *);
44910+static inline int
44911+backport_request_irq(unsigned int irq, irqreturn_t(*handler) (int, void *),
44912+ unsigned long flags, const char *dev_name, void *dev_id)
44913+{
44914+ return request_irq(irq,
44915+ (irqreturn_t(*) (int, void *, struct pt_regs *))handler,
44916+ flags, dev_name, dev_id);
44917+}
44918+#define request_irq backport_request_irq
44919+
44920+#endif /*** RHEL5 and SLES10 backport ***/
44921+
44922+#if !defined(__packed)
44923+#define __packed __attribute__ ((packed))
44924+#endif
44925+
44926+/****************** SLES10 only backport ***************************/
44927+#if LINUX_VERSION_CODE < KERNEL_VERSION(2, 6, 18)
44928+
44929+#include <linux/tifm.h>
44930+
44931+#define FIELD_SIZEOF(t, f) (sizeof(((t *)0)->f))
44932+#define IRQF_SHARED SA_SHIRQ
44933+#define CHECKSUM_PARTIAL CHECKSUM_HW
44934+#define CHECKSUM_COMPLETE CHECKSUM_HW
44935+#define DIV_ROUND_UP(n, d) (((n) + (d) - 1) / (d))
44936+#define NETIF_F_IPV6_CSUM NETIF_F_IP_CSUM
44937+#define NETIF_F_TSO6 NETIF_F_TSO
44938+
44939+
44940+static inline struct sk_buff *netdev_alloc_skb(struct net_device *dev,
44941+ unsigned int length)
44942+{
44943+ /* 16 == NET_PAD_SKB */
44944+ struct sk_buff *skb;
44945+ skb = alloc_skb(length + 16, GFP_ATOMIC);
44946+ if (likely(skb != NULL)) {
44947+ skb_reserve(skb, 16);
44948+ skb->dev = dev;
44949+ }
44950+ return skb;
44951+}
44952+
44953+#define PCI_SAVE_STATE(x)
44954+
44955+#else /* SLES10 only backport */
44956+
44957+#define PCI_SAVE_STATE(x) pci_save_state(x)
44958+
44959+#endif /* SLES10 only backport */
44960+
44961+#if LINUX_VERSION_CODE <= KERNEL_VERSION(2, 6, 31)
44962+#define netdev_tx_t int
44963+#endif
44964+
44965+#ifndef VLAN_PRIO_MASK
44966+#define VLAN_PRIO_MASK 0xe000 /* Priority Code Point */
44967+#define VLAN_PRIO_SHIFT 13
44968+#endif
44969+
44970+/*
44971+ * Backport of netdev ops struct
44972+ */
44973+#if LINUX_VERSION_CODE < KERNEL_VERSION(2, 6, 28)
44974+struct net_device_ops {
44975+ int (*ndo_init)(struct net_device *dev);
44976+ void (*ndo_uninit)(struct net_device *dev);
44977+ int (*ndo_open)(struct net_device *dev);
44978+ int (*ndo_stop)(struct net_device *dev);
44979+ int (*ndo_start_xmit) (struct sk_buff *skb, struct net_device *dev);
44980+ u16 (*ndo_select_queue)(struct net_device *dev,
44981+ struct sk_buff *skb);
44982+ void (*ndo_change_rx_flags)(struct net_device *dev, int flags);
44983+ void (*ndo_set_rx_mode)(struct net_device *dev);
44984+ void (*ndo_set_multicast_list)(struct net_device *dev);
44985+ int (*ndo_set_mac_address)(struct net_device *dev, void *addr);
44986+ int (*ndo_validate_addr)(struct net_device *dev);
44987+ int (*ndo_do_ioctl)(struct net_device *dev,
44988+ struct ifreq *ifr, int cmd);
44989+ int (*ndo_set_config)(struct net_device *dev, struct ifmap *map);
44990+ int (*ndo_change_mtu)(struct net_device *dev, int new_mtu);
44991+ int (*ndo_neigh_setup)(struct net_device *dev,
44992+ struct neigh_parms *);
44993+ void (*ndo_tx_timeout) (struct net_device *dev);
44994+
44995+ struct net_device_stats* (*ndo_get_stats)(struct net_device *dev);
44996+
44997+ void (*ndo_vlan_rx_register)(struct net_device *dev,
44998+ struct vlan_group *grp);
44999+ void (*ndo_vlan_rx_add_vid)(struct net_device *dev,
45000+ unsigned short vid);
45001+ void (*ndo_vlan_rx_kill_vid)(struct net_device *dev,
45002+ unsigned short vid);
45003+#ifdef CONFIG_NET_POLL_CONTROLLER
45004+#define HAVE_NETDEV_POLL
45005+ void (*ndo_poll_controller)(struct net_device *dev);
45006+#endif
45007+};
45008+extern void be_netdev_ops_init(struct net_device *netdev,
45009+ struct net_device_ops *ops);
45010+extern int eth_validate_addr(struct net_device *);
45011+
45012+#endif /* Netdev ops backport */
45013+
45014+#if LINUX_VERSION_CODE == KERNEL_VERSION(2, 6, 29)
45015+#undef NETIF_F_GRO
45016+#endif
45017+
45018+#ifdef NO_GRO
45019+#if ((defined(RHEL_MAJOR) && (RHEL_MAJOR == 5)))
45020+#undef NETIF_F_GRO
45021+#endif
45022+#endif
45023+
45024+#if LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 32)
45025+#define HAVE_ETHTOOL_FLASH
45026+#endif
45027+
45028+/*
45029+ * Backport of NAPI
45030+ */
45031+#if LINUX_VERSION_CODE <= KERNEL_VERSION(2, 6, 24)
45032+
45033+#if defined(RHEL_MINOR) && (RHEL_MINOR > 3)
45034+#define RHEL_NEW_NAPI
45035+#endif
45036+
45037+/* We need a new struct that has some meta data beyond rhel 5.4's napi_struct
45038+ * to fix rhel5.4's half-baked new napi implementation.
45039+ * We don't want to use rhel 5.4's broken napi_complete; so
45040+ * define a new be_napi_complete that executes the logic only for Rx
45041+ */
45042+
45043+#ifdef RHEL_NEW_NAPI
45044+#define napi_complete be_napi_complete
45045+typedef struct napi_struct rhel_napi_struct;
45046+#endif
45047+#define napi_struct be_napi_struct
45048+#define napi_gro_frags(napi) napi_gro_frags((rhel_napi_struct *) napi)
45049+#define vlan_gro_frags(napi, vlan_grp, vid)\
45050+ vlan_gro_frags((rhel_napi_struct *) napi, vlan_grp, vid)
45051+#define napi_get_frags(napi) napi_get_frags((rhel_napi_struct *) napi)
45052+
45053+struct napi_struct {
45054+#ifdef RHEL_NEW_NAPI
45055+ rhel_napi_struct napi; /* must be the first member */
45056+#endif
45057+ struct net_device *dev;
45058+ int (*poll) (struct napi_struct *napi, int budget);
45059+ bool rx;
45060+};
45061+
45062+static inline void napi_complete(struct napi_struct *napi)
45063+{
45064+#ifdef NETIF_F_GRO
45065+ napi_gro_flush((rhel_napi_struct *)napi);
45066+#endif
45067+ netif_rx_complete(napi->dev);
45068+}
45069+
45070+static inline void napi_schedule(struct napi_struct *napi)
45071+{
45072+ netif_rx_schedule(napi->dev);
45073+}
45074+
45075+static inline void napi_enable(struct napi_struct *napi)
45076+{
45077+ netif_poll_enable(napi->dev);
45078+}
45079+
45080+static inline void napi_disable(struct napi_struct *napi)
45081+{
45082+ netif_poll_disable(napi->dev);
45083+}
45084+
45085+#if (defined(RHEL_MINOR) && RHEL_MINOR < 6) || \
45086+ LINUX_VERSION_CODE <= KERNEL_VERSION(2, 6, 16)
45087+static inline void vlan_group_set_device(struct vlan_group *vg,
45088+ u16 vlan_id,
45089+ struct net_device *dev)
45090+{
45091+ struct net_device **array;
45092+ if (!vg)
45093+ return;
45094+ array = vg->vlan_devices;
45095+ array[vlan_id] = dev;
45096+}
45097+#endif
45098+
45099+#endif /* New NAPI backport */
45100+
45101+extern int be_netif_napi_add(struct net_device *netdev,
45102+ struct napi_struct *napi,
45103+ int (*poll) (struct napi_struct *, int), int weight);
45104+extern void be_netif_napi_del(struct net_device *netdev);
45105+#if LINUX_VERSION_CODE < KERNEL_VERSION(2, 6, 27)
45106+#define HAVE_SIMULATED_MULTI_NAPI
45107+#endif
45108+
45109+/************** Backport of Delayed work queues interface ****************/
45110+#if LINUX_VERSION_CODE <= KERNEL_VERSION(2, 6, 19)
45111+#if (defined(RHEL_MINOR) && RHEL_MINOR < 6) || \
45112+ LINUX_VERSION_CODE <= KERNEL_VERSION(2, 6, 16)
45113+struct delayed_work {
45114+ struct work_struct work;
45115+};
45116+#endif
45117+
45118+#define INIT_DELAYED_WORK(_work, _func) \
45119+ INIT_WORK(&(_work)->work, _func, &(_work)->work)
45120+
45121+static inline int backport_cancel_delayed_work_sync(struct delayed_work *work)
45122+{
45123+ cancel_rearming_delayed_work(&work->work);
45124+ return 0;
45125+}
45126+#define cancel_delayed_work_sync backport_cancel_delayed_work_sync
45127+
45128+static inline int backport_schedule_delayed_work(struct delayed_work *work,
45129+ unsigned long delay)
45130+{
45131+ if (unlikely(!delay))
45132+ return schedule_work(&work->work);
45133+ else
45134+ return schedule_delayed_work(&work->work, delay);
45135+}
45136+#define schedule_delayed_work backport_schedule_delayed_work
45137+#endif /* backport delayed workqueue */
45138+
45139+
45140+/************** Backport of INET_LRO **********************************/
45141+#if LINUX_VERSION_CODE > KERNEL_VERSION(2, 6, 18)
45142+
45143+#include <linux/inet_lro.h>
45144+
45145+#else
45146+
45147+#if LINUX_VERSION_CODE == KERNEL_VERSION(2, 6, 18)
45148+
45149+#if defined(RHEL_MINOR) && RHEL_MINOR < 6
45150+typedef __u16 __bitwise __sum16;
45151+typedef __u32 __bitwise __wsum;
45152+#endif
45153+
45154+#if ((defined(RHEL_MAJOR) && (RHEL_MAJOR == 5) && (RHEL_MINOR <= 3)) || \
45155+ (!defined(RHEL_MINOR)))
45156+static inline __wsum csum_unfold(__sum16 n)
45157+{
45158+ return (__force __wsum)n;
45159+}
45160+#endif
45161+
45162+#endif
45163+
45164+#define lro_flush_all lro_flush_all_compat
45165+#define lro_vlan_hwaccel_receive_frags lro_vlan_hwaccel_receive_frags_compat
45166+#define lro_receive_frags lro_receive_frags_compat
45167+
45168+struct net_lro_stats {
45169+ unsigned long aggregated;
45170+ unsigned long flushed;
45171+ unsigned long no_desc;
45172+};
45173+
45174+struct net_lro_desc {
45175+ struct sk_buff *parent;
45176+ struct sk_buff *last_skb;
45177+ struct skb_frag_struct *next_frag;
45178+ struct iphdr *iph;
45179+ struct tcphdr *tcph;
45180+ struct vlan_group *vgrp;
45181+ __wsum data_csum;
45182+ u32 tcp_rcv_tsecr;
45183+ u32 tcp_rcv_tsval;
45184+ u32 tcp_ack;
45185+ u32 tcp_next_seq;
45186+ u32 skb_tot_frags_len;
45187+ u32 ack_cnt;
45188+ u16 ip_tot_len;
45189+ u16 tcp_saw_tstamp; /* timestamps enabled */
45190+ u16 tcp_window;
45191+ u16 vlan_tag;
45192+ int pkt_aggr_cnt; /* counts aggregated packets */
45193+ int vlan_packet;
45194+ int mss;
45195+ int active;
45196+};
45197+
45198+struct net_lro_mgr {
45199+ struct net_device *dev;
45200+ struct net_lro_stats stats;
45201+
45202+ /* LRO features */
45203+ unsigned long features;
45204+#define LRO_F_NAPI 1 /* Pass packets to stack via NAPI */
45205+#define LRO_F_EXTRACT_VLAN_ID 2 /* Set flag if VLAN IDs are extracted
45206+ from received packets and eth protocol
45207+ is still ETH_P_8021Q */
45208+
45209+ u32 ip_summed; /* Set in non generated SKBs in page mode */
45210+ u32 ip_summed_aggr; /* Set in aggregated SKBs: CHECKSUM_UNNECESSARY
45211+ * or CHECKSUM_NONE */
45212+
45213+ int max_desc; /* Max number of LRO descriptors */
45214+ int max_aggr; /* Max number of LRO packets to be aggregated */
45215+
45216+ struct net_lro_desc *lro_arr; /* Array of LRO descriptors */
45217+
45218+ /* Optimized driver functions
45219+ * get_skb_header: returns tcp and ip header for packet in SKB
45220+ */
45221+ int (*get_skb_header)(struct sk_buff *skb, void **ip_hdr,
45222+ void **tcpudp_hdr, u64 *hdr_flags, void *priv);
45223+
45224+ /* hdr_flags: */
45225+#define LRO_IPV4 1 /* ip_hdr is IPv4 header */
45226+#define LRO_TCP 2 /* tcpudp_hdr is TCP header */
45227+
45228+ /*
45229+ * get_frag_header: returns mac, tcp and ip header for packet in SKB
45230+ *
45231+ * @hdr_flags: Indicate what kind of LRO has to be done
45232+ * (IPv4/IPv6/TCP/UDP)
45233+ */
45234+ int (*get_frag_header)(struct skb_frag_struct *frag, void **mac_hdr,
45235+ void **ip_hdr, void **tcpudp_hdr, u64 *hdr_flags,
45236+ void *priv);
45237+};
45238+
45239+extern void lro_receive_skb(struct net_lro_mgr *lro_mgr, struct sk_buff *skb,
45240+ void *priv);
45241+
45242+extern void lro_vlan_hwaccel_receive_skb(struct net_lro_mgr *lro_mgr,
45243+ struct sk_buff *skb, struct vlan_group *vgrp,
45244+ u16 vlan_tag, void *priv);
45245+
45246+/* This functions aggregate fragments and generate SKBs do pass
45247+ * the packets to the stack.
45248+ *
45249+ * @lro_mgr: LRO manager to use
45250+ * @frags: Fragment to be processed. Must contain entire header in first
45251+ * element.
45252+ * @len: Length of received data
45253+ * @true_size: Actual size of memory the fragment is consuming
45254+ * @priv: Private data that may be used by driver functions
45255+ * (for example get_tcp_ip_hdr)
45256+ */
45257+extern void lro_receive_frags_compat(struct net_lro_mgr *lro_mgr,
45258+ struct skb_frag_struct *frags, int len, int true_size,
45259+ void *priv, __wsum sum);
45260+
45261+extern void lro_vlan_hwaccel_receive_frags_compat(struct net_lro_mgr *lro_mgr,
45262+ struct skb_frag_struct *frags, int len, int true_size,
45263+ struct vlan_group *vgrp, u16 vlan_tag, void *priv,
45264+ __wsum sum);
45265+
45266+/* Forward all aggregated SKBs held by lro_mgr to network stack */
45267+extern void lro_flush_all_compat(struct net_lro_mgr *lro_mgr);
45268+
45269+extern void lro_flush_pkt(struct net_lro_mgr *lro_mgr, struct iphdr *iph,
45270+ struct tcphdr *tcph);
45271+#endif /* backport of inet_lro */
45272+
45273+#ifndef ETHTOOL_FLASH_MAX_FILENAME
45274+#define ETHTOOL_FLASH_MAX_FILENAME 128
45275+#endif
45276+
45277+#if defined(CONFIG_XEN) && !defined(NETIF_F_GRO)
45278+#define BE_INIT_FRAGS_PER_FRAME (u32) 1
45279+#else
45280+#define BE_INIT_FRAGS_PER_FRAME (min((u32) 16, (u32) MAX_SKB_FRAGS))
45281+#endif
45282+
45283+#if LINUX_VERSION_CODE < KERNEL_VERSION(2, 6, 32)
45284+#ifdef CONFIG_PCI_IOV
45285+#if (!(defined(RHEL_MAJOR) && (RHEL_MAJOR == 5) && (RHEL_MINOR == 6)))
45286+#undef CONFIG_PCI_IOV
45287+#endif
45288+#endif
45289+#endif
45290+
45291+#if LINUX_VERSION_CODE < KERNEL_VERSION(2, 6, 20)
45292+#define dev_to_node(dev) -1
45293+#endif
45294+
45295+
45296+#if LINUX_VERSION_CODE < KERNEL_VERSION(2, 6, 32)
45297+#if (!(defined(RHEL_MAJOR) && (RHEL_MAJOR == 5) && (RHEL_MINOR > 6)))
45298+static inline struct sk_buff *netdev_alloc_skb_ip_align(struct net_device *dev,
45299+ unsigned int length)
45300+{
45301+ struct sk_buff *skb = netdev_alloc_skb(dev, length + NET_IP_ALIGN);
45302+
45303+ if (NET_IP_ALIGN && skb)
45304+ skb_reserve(skb, NET_IP_ALIGN);
45305+ return skb;
45306+}
45307+#endif
45308+#endif
45309+
45310+#if LINUX_VERSION_CODE < KERNEL_VERSION(2, 6, 26)
45311+#ifndef netif_set_gso_max_size
45312+#define netif_set_gso_max_size(netdev, size) do {} while (0)
45313+#endif
45314+#endif
45315+
45316+#if (LINUX_VERSION_CODE == KERNEL_VERSION(2, 6, 18))
45317+#if defined(RHEL_MINOR) && (RHEL_MINOR <= 4)
45318+static inline int skb_is_gso_v6(const struct sk_buff *skb)
45319+{
45320+ return skb_shinfo(skb)->gso_type & SKB_GSO_TCPV6;
45321+}
45322+#endif
45323+#endif
45324+
45325+#if (LINUX_VERSION_CODE < KERNEL_VERSION(2, 6, 18))
45326+static inline int skb_is_gso_v6(const struct sk_buff *skb)
45327+{
45328+ return (ip_hdr(skb)->version == 6);
45329+}
45330+#endif
45331+
45332+#if (LINUX_VERSION_CODE < KERNEL_VERSION(2, 6, 18))
45333+#define DMA_BIT_MASK(n) (((n) == 64) ? ~0ULL : ((1ULL<<(n))-1))
45334+#endif
45335+
45336+#if LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 32)
45337+#if ((defined(RHEL_MAJOR) && (RHEL_MAJOR == 6)))
45338+#define HAVE_SRIOV_CONFIG
45339+#endif
45340+#endif
45341+
45342+#ifndef NETIF_F_VLAN_SG
45343+#define NETIF_F_VLAN_SG NETIF_F_SG
45344+#endif
45345+
45346+#ifndef NETIF_F_VLAN_CSUM
45347+#define NETIF_F_VLAN_CSUM NETIF_F_HW_CSUM
45348+#endif
45349+
45350+#ifndef NETIF_F_VLAN_TSO
45351+#define NETIF_F_VLAN_TSO NETIF_F_TSO
45352+#endif
45353+
45354+#if (LINUX_VERSION_CODE < KERNEL_VERSION(2, 6, 27))
45355+#define vlan_features features
45356+#endif
45357+
45358+#ifndef DEFINE_DMA_UNMAP_ADDR
45359+#define DEFINE_DMA_UNMAP_ADDR(bus) dma_addr_t bus
45360+#endif
45361+
45362+#if LINUX_VERSION_CODE < KERNEL_VERSION(3, 0, 0)
45363+
45364+#ifndef netdev_mc_count
45365+#define netdev_mc_count(nd) (nd->mc_count)
45366+#endif
45367+
45368+#ifndef netdev_hw_addr
45369+#define netdev_hw_addr dev_mc_list
45370+#endif
45371+
45372+#ifndef netdev_for_each_mc_addr
45373+#define netdev_for_each_mc_addr(ha, nd) \
45374+ for (ha = (nd)->mc_list; ha; ha = ha->next)
45375+#endif
45376+
45377+#define DMI_ADDR dmi_addr
45378+#else
45379+#define DMI_ADDR addr
45380+#endif
45381+
45382+#ifndef VLAN_GROUP_ARRAY_LEN
45383+#define VLAN_GROUP_ARRAY_LEN VLAN_N_VID
45384+#endif
45385+/**************************** Multi TXQ Support ******************************/
45386+
45387+/* Supported only in RHEL6 and SL11.1 (barring one execption) */
45388+#if LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 32)
45389+#define MQ_TX
45390+#endif
45391+
45392+#if LINUX_VERSION_CODE < KERNEL_VERSION(2, 6, 27)
45393+#define alloc_etherdev_mq(sz, cnt) alloc_etherdev(sz)
45394+#define skb_get_queue_mapping(skb) 0
45395+#define skb_tx_hash(dev, skb) 0
45396+#define netif_set_real_num_tx_queues(dev, txq) do {} while(0)
45397+#define netif_wake_subqueue(dev, idx) netif_wake_queue(dev)
45398+#define netif_stop_subqueue(dev, idx) netif_stop_queue(dev)
45399+#define __netif_subqueue_stopped(dev, idx) netif_queue_stopped(dev)
45400+#endif /* < 2.6.27 */
45401+
45402+#if ((LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 27)) && \
45403+ (LINUX_VERSION_CODE < KERNEL_VERSION(2, 6, 32)))
45404+#define skb_tx_hash(dev, skb) 0
45405+#define netif_set_real_num_tx_queues(dev, txq) do {} while(0)
45406+#endif
45407+
45408+#if LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 32)
45409+#define netif_set_real_num_tx_queues be_set_real_num_tx_queues
45410+static inline void be_set_real_num_tx_queues(struct net_device *dev,
45411+ unsigned int txq)
45412+{
45413+ dev->real_num_tx_queues = txq;
45414+}
45415+#endif
45416+
45417+#include <linux/if_vlan.h>
45418+static inline void be_reset_skb_tx_vlan(struct sk_buff *skb)
45419+{
45420+#if LINUX_VERSION_CODE > KERNEL_VERSION(2, 6, 18)
45421+ skb->vlan_tci = 0;
45422+#else
45423+ struct vlan_skb_tx_cookie *cookie;
45424+
45425+ cookie = VLAN_TX_SKB_CB(skb);
45426+ cookie->magic = 0;
45427+#endif
45428+}
45429+
45430+#if (LINUX_VERSION_CODE < KERNEL_VERSION(2, 6, 18))
45431+static inline void skb_set_network_header(struct sk_buff *skb, const int offset)
45432+{
45433+ skb->nh.raw = skb->data + offset;
45434+}
45435+#endif
45436+
45437+static inline struct sk_buff *be_vlan_put_tag(struct sk_buff *skb,
45438+ unsigned short vlan_tag)
45439+{
45440+ struct sk_buff *new_skb = __vlan_put_tag(skb, vlan_tag);
45441+ /* On kernel versions < 2.6.27 the __vlan_put_tag() function
45442+ * distorts the network layer hdr pointer in the skb which
45443+ * affects the detection of UDP/TCP packets down the line in
45444+ * wrb_fill_hdr().This work-around sets it right.
45445+ */
45446+#if (LINUX_VERSION_CODE < KERNEL_VERSION(2, 6, 27))
45447+ skb_set_network_header(new_skb, VLAN_ETH_HLEN);
45448+#endif
45449+ return new_skb;
45450+}
45451+
45452+#ifndef ACCESS_ONCE
45453+#define ACCESS_ONCE(x) (*(volatile typeof(x) *)&(x))
45454+#endif
45455+
45456+#endif /* BE_COMPAT_H */
45457diff --git a/drivers/net/benet/be_ethtool.c b/drivers/net/benet/be_ethtool.c
45458index f0fd95b..37bad99 100644
45459--- a/drivers/net/benet/be_ethtool.c
45460+++ b/drivers/net/benet/be_ethtool.c
45461@@ -1,18 +1,18 @@
45462 /*
45463- * Copyright (C) 2005 - 2009 ServerEngines
45464+ * Copyright (C) 2005 - 2011 Emulex
45465 * All rights reserved.
45466 *
45467 * This program is free software; you can redistribute it and/or
45468 * modify it under the terms of the GNU General Public License version 2
45469- * as published by the Free Software Foundation. The full GNU General
45470+ * as published by the Free Software Foundation. The full GNU General
45471 * Public License is included in this distribution in the file called COPYING.
45472 *
45473 * Contact Information:
45474- * linux-drivers@serverengines.com
45475+ * linux-drivers@emulex.com
45476 *
45477- * ServerEngines
45478- * 209 N. Fair Oaks Ave
45479- * Sunnyvale, CA 94085
45480+ * Emulex
45481+ * 3333 Susan Street
45482+ * Costa Mesa, CA 92626
45483 */
45484
45485 #include "be.h"
45486@@ -26,21 +26,19 @@ struct be_ethtool_stat {
45487 int offset;
45488 };
45489
45490-enum {NETSTAT, PORTSTAT, MISCSTAT, DRVSTAT, ERXSTAT};
45491+enum {NETSTAT, DRVSTAT_TX, DRVSTAT_RX, DRVSTAT};
45492 #define FIELDINFO(_struct, field) FIELD_SIZEOF(_struct, field), \
45493 offsetof(_struct, field)
45494-#define NETSTAT_INFO(field) #field, NETSTAT,\
45495+#define NETSTAT_INFO(field) #field, NETSTAT,\
45496 FIELDINFO(struct net_device_stats,\
45497 field)
45498-#define DRVSTAT_INFO(field) #field, DRVSTAT,\
45499- FIELDINFO(struct be_drvr_stats, field)
45500-#define MISCSTAT_INFO(field) #field, MISCSTAT,\
45501- FIELDINFO(struct be_rxf_stats, field)
45502-#define PORTSTAT_INFO(field) #field, PORTSTAT,\
45503- FIELDINFO(struct be_port_rxf_stats, \
45504+#define DRVSTAT_TX_INFO(field) #field, DRVSTAT_TX,\
45505+ FIELDINFO(struct be_tx_stats, field)
45506+#define DRVSTAT_RX_INFO(field) #field, DRVSTAT_RX,\
45507+ FIELDINFO(struct be_rx_stats, field)
45508+#define DRVSTAT_INFO(field) #field, DRVSTAT,\
45509+ FIELDINFO(struct be_drv_stats, \
45510 field)
45511-#define ERXSTAT_INFO(field) #field, ERXSTAT,\
45512- FIELDINFO(struct be_erx_stats, field)
45513
45514 static const struct be_ethtool_stat et_stats[] = {
45515 {NETSTAT_INFO(rx_packets)},
45516@@ -51,70 +49,131 @@ static const struct be_ethtool_stat et_stats[] = {
45517 {NETSTAT_INFO(tx_errors)},
45518 {NETSTAT_INFO(rx_dropped)},
45519 {NETSTAT_INFO(tx_dropped)},
45520- {DRVSTAT_INFO(be_tx_reqs)},
45521- {DRVSTAT_INFO(be_tx_stops)},
45522- {DRVSTAT_INFO(be_fwd_reqs)},
45523- {DRVSTAT_INFO(be_tx_wrbs)},
45524- {DRVSTAT_INFO(be_polls)},
45525 {DRVSTAT_INFO(be_tx_events)},
45526- {DRVSTAT_INFO(be_rx_events)},
45527- {DRVSTAT_INFO(be_tx_compl)},
45528- {DRVSTAT_INFO(be_rx_compl)},
45529- {DRVSTAT_INFO(be_ethrx_post_fail)},
45530- {DRVSTAT_INFO(be_802_3_dropped_frames)},
45531- {DRVSTAT_INFO(be_802_3_malformed_frames)},
45532- {DRVSTAT_INFO(be_tx_rate)},
45533- {DRVSTAT_INFO(be_rx_rate)},
45534- {PORTSTAT_INFO(rx_unicast_frames)},
45535- {PORTSTAT_INFO(rx_multicast_frames)},
45536- {PORTSTAT_INFO(rx_broadcast_frames)},
45537- {PORTSTAT_INFO(rx_crc_errors)},
45538- {PORTSTAT_INFO(rx_alignment_symbol_errors)},
45539- {PORTSTAT_INFO(rx_pause_frames)},
45540- {PORTSTAT_INFO(rx_control_frames)},
45541- {PORTSTAT_INFO(rx_in_range_errors)},
45542- {PORTSTAT_INFO(rx_out_range_errors)},
45543- {PORTSTAT_INFO(rx_frame_too_long)},
45544- {PORTSTAT_INFO(rx_address_match_errors)},
45545- {PORTSTAT_INFO(rx_vlan_mismatch)},
45546- {PORTSTAT_INFO(rx_dropped_too_small)},
45547- {PORTSTAT_INFO(rx_dropped_too_short)},
45548- {PORTSTAT_INFO(rx_dropped_header_too_small)},
45549- {PORTSTAT_INFO(rx_dropped_tcp_length)},
45550- {PORTSTAT_INFO(rx_dropped_runt)},
45551- {PORTSTAT_INFO(rx_fifo_overflow)},
45552- {PORTSTAT_INFO(rx_input_fifo_overflow)},
45553- {PORTSTAT_INFO(rx_ip_checksum_errs)},
45554- {PORTSTAT_INFO(rx_tcp_checksum_errs)},
45555- {PORTSTAT_INFO(rx_udp_checksum_errs)},
45556- {PORTSTAT_INFO(rx_non_rss_packets)},
45557- {PORTSTAT_INFO(rx_ipv4_packets)},
45558- {PORTSTAT_INFO(rx_ipv6_packets)},
45559- {PORTSTAT_INFO(tx_unicastframes)},
45560- {PORTSTAT_INFO(tx_multicastframes)},
45561- {PORTSTAT_INFO(tx_broadcastframes)},
45562- {PORTSTAT_INFO(tx_pauseframes)},
45563- {PORTSTAT_INFO(tx_controlframes)},
45564- {MISCSTAT_INFO(rx_drops_no_pbuf)},
45565- {MISCSTAT_INFO(rx_drops_no_txpb)},
45566- {MISCSTAT_INFO(rx_drops_no_erx_descr)},
45567- {MISCSTAT_INFO(rx_drops_no_tpre_descr)},
45568- {MISCSTAT_INFO(rx_drops_too_many_frags)},
45569- {MISCSTAT_INFO(rx_drops_invalid_ring)},
45570- {MISCSTAT_INFO(forwarded_packets)},
45571- {MISCSTAT_INFO(rx_drops_mtu)},
45572- {ERXSTAT_INFO(rx_drops_no_fragments)},
45573+ {DRVSTAT_INFO(rx_crc_errors)},
45574+ {DRVSTAT_INFO(rx_alignment_symbol_errors)},
45575+ {DRVSTAT_INFO(rx_pause_frames)},
45576+ {DRVSTAT_INFO(rx_control_frames)},
45577+ {DRVSTAT_INFO(rx_in_range_errors)},
45578+ {DRVSTAT_INFO(rx_out_range_errors)},
45579+ {DRVSTAT_INFO(rx_frame_too_long)},
45580+ {DRVSTAT_INFO(rx_address_match_errors)},
45581+ {DRVSTAT_INFO(rx_dropped_too_small)},
45582+ {DRVSTAT_INFO(rx_dropped_too_short)},
45583+ {DRVSTAT_INFO(rx_dropped_header_too_small)},
45584+ {DRVSTAT_INFO(rx_dropped_tcp_length)},
45585+ {DRVSTAT_INFO(rx_dropped_runt)},
45586+ {DRVSTAT_INFO(rxpp_fifo_overflow_drop)},
45587+ {DRVSTAT_INFO(rx_input_fifo_overflow_drop)},
45588+ {DRVSTAT_INFO(rx_ip_checksum_errs)},
45589+ {DRVSTAT_INFO(rx_tcp_checksum_errs)},
45590+ {DRVSTAT_INFO(rx_udp_checksum_errs)},
45591+ {DRVSTAT_INFO(rx_switched_unicast_packets)},
45592+ {DRVSTAT_INFO(rx_switched_multicast_packets)},
45593+ {DRVSTAT_INFO(rx_switched_broadcast_packets)},
45594+ {DRVSTAT_INFO(tx_pauseframes)},
45595+ {DRVSTAT_INFO(tx_controlframes)},
45596+ {DRVSTAT_INFO(rx_priority_pause_frames)},
45597+ {DRVSTAT_INFO(pmem_fifo_overflow_drop)},
45598+ {DRVSTAT_INFO(jabber_events)},
45599+ {DRVSTAT_INFO(rx_drops_no_pbuf)},
45600+ {DRVSTAT_INFO(rx_drops_no_txpb)},
45601+ {DRVSTAT_INFO(rx_drops_no_erx_descr)},
45602+ {DRVSTAT_INFO(rx_drops_no_tpre_descr)},
45603+ {DRVSTAT_INFO(rx_drops_too_many_frags)},
45604+ {DRVSTAT_INFO(rx_drops_invalid_ring)},
45605+ {DRVSTAT_INFO(forwarded_packets)},
45606+ {DRVSTAT_INFO(rx_drops_mtu)},
45607+ {DRVSTAT_INFO(eth_red_drops)},
45608+ {DRVSTAT_INFO(be_on_die_temperature)}
45609 };
45610 #define ETHTOOL_STATS_NUM ARRAY_SIZE(et_stats)
45611
45612+/* Stats related to multi RX queues */
45613+static const struct be_ethtool_stat et_rx_stats[] = {
45614+ {DRVSTAT_RX_INFO(rx_bytes)},
45615+ {DRVSTAT_RX_INFO(rx_pkts)},
45616+ {DRVSTAT_RX_INFO(rx_rate)},
45617+ {DRVSTAT_RX_INFO(rx_polls)},
45618+ {DRVSTAT_RX_INFO(rx_events)},
45619+ {DRVSTAT_RX_INFO(rx_compl)},
45620+ {DRVSTAT_RX_INFO(rx_mcast_pkts)},
45621+ {DRVSTAT_RX_INFO(rx_post_fail)},
45622+ {DRVSTAT_RX_INFO(rx_drops_no_frags)}
45623+};
45624+#define ETHTOOL_RXSTATS_NUM (ARRAY_SIZE(et_rx_stats))
45625+
45626+/* Stats related to multi TX queues */
45627+static const struct be_ethtool_stat et_tx_stats[] = {
45628+ {DRVSTAT_TX_INFO(be_tx_rate)},
45629+ {DRVSTAT_TX_INFO(be_tx_reqs)},
45630+ {DRVSTAT_TX_INFO(be_tx_wrbs)},
45631+ {DRVSTAT_TX_INFO(be_tx_stops)},
45632+ {DRVSTAT_TX_INFO(be_tx_compl)},
45633+ {DRVSTAT_TX_INFO(be_ipv6_ext_hdr_tx_drop)}
45634+};
45635+#define ETHTOOL_TXSTATS_NUM (ARRAY_SIZE(et_tx_stats))
45636+
45637+static const char et_self_tests[][ETH_GSTRING_LEN] = {
45638+ "MAC Loopback test",
45639+ "PHY Loopback test",
45640+ "External Loopback test",
45641+ "DDR DMA test",
45642+ "Link test"
45643+};
45644+
45645+#define ETHTOOL_TESTS_NUM ARRAY_SIZE(et_self_tests)
45646+#define BE_MAC_LOOPBACK 0x0
45647+#define BE_PHY_LOOPBACK 0x1
45648+#define BE_ONE_PORT_EXT_LOOPBACK 0x2
45649+#define BE_NO_LOOPBACK 0xff
45650+
45651+/* MAC speed valid values */
45652+#define SPEED_DEFAULT 0x0
45653+#define SPEED_FORCED_10GB 0x1
45654+#define SPEED_FORCED_1GB 0x2
45655+#define SPEED_AUTONEG_10GB 0x3
45656+#define SPEED_AUTONEG_1GB 0x4
45657+#define SPEED_AUTONEG_100MB 0x5
45658+#define SPEED_AUTONEG_10GB_1GB 0x6
45659+#define SPEED_AUTONEG_10GB_1GB_100MB 0x7
45660+#define SPEED_AUTONEG_1GB_100MB 0x8
45661+#define SPEED_AUTONEG_10MB 0x9
45662+#define SPEED_AUTONEG_1GB_100MB_10MB 0xa
45663+#define SPEED_AUTONEG_100MB_10MB 0xb
45664+#define SPEED_FORCED_100MB 0xc
45665+#define SPEED_FORCED_10MB 0xd
45666+
45667+
45668+
45669 static void
45670 be_get_drvinfo(struct net_device *netdev, struct ethtool_drvinfo *drvinfo)
45671 {
45672 struct be_adapter *adapter = netdev_priv(netdev);
45673+ int len;
45674+ char fw_on_flash[FW_VER_LEN];
45675+
45676+ memset(fw_on_flash, 0 , sizeof(fw_on_flash));
45677+
45678+ be_cmd_get_fw_ver(adapter, adapter->fw_ver,
45679+ fw_on_flash);
45680
45681 strcpy(drvinfo->driver, DRV_NAME);
45682 strcpy(drvinfo->version, DRV_VER);
45683+
45684 strncpy(drvinfo->fw_version, adapter->fw_ver, FW_VER_LEN);
45685+ if (memcmp(adapter->fw_ver, fw_on_flash,
45686+ FW_VER_LEN) != 0) {
45687+ len = strlen(drvinfo->fw_version);
45688+ strncpy(drvinfo->fw_version+len, " [",
45689+ FW_VER_LEN-len-1);
45690+ len = strlen(drvinfo->fw_version);
45691+ strncpy(drvinfo->fw_version+len, fw_on_flash,
45692+ FW_VER_LEN-len-1);
45693+ len = strlen(drvinfo->fw_version);
45694+ strncpy(drvinfo->fw_version+len, "]", FW_VER_LEN-len-1);
45695+ }
45696+
45697 strcpy(drvinfo->bus_info, pci_name(adapter->pdev));
45698 drvinfo->testinfo_len = 0;
45699 drvinfo->regdump_len = 0;
45700@@ -122,12 +181,37 @@ be_get_drvinfo(struct net_device *netdev, struct ethtool_drvinfo *drvinfo)
45701 }
45702
45703 static int
45704+be_get_reg_len(struct net_device *netdev)
45705+{
45706+ struct be_adapter *adapter = netdev_priv(netdev);
45707+ u32 log_size = 0;
45708+
45709+ if (be_physfn(adapter))
45710+ be_cmd_get_reg_len(adapter, &log_size);
45711+
45712+ return log_size;
45713+}
45714+
45715+static void
45716+be_get_regs(struct net_device *netdev, struct ethtool_regs *regs, void *buf)
45717+{
45718+ struct be_adapter *adapter = netdev_priv(netdev);
45719+
45720+ if (be_physfn(adapter)) {
45721+ memset(buf, 0, regs->len);
45722+ be_cmd_get_regs(adapter, regs->len, buf);
45723+ }
45724+}
45725+
45726+static int
45727 be_get_coalesce(struct net_device *netdev, struct ethtool_coalesce *coalesce)
45728 {
45729 struct be_adapter *adapter = netdev_priv(netdev);
45730- struct be_eq_obj *rx_eq = &adapter->rx_eq;
45731+ struct be_eq_obj *rx_eq = &adapter->rx_obj[0].rx_eq;
45732 struct be_eq_obj *tx_eq = &adapter->tx_eq;
45733
45734+ coalesce->rx_max_coalesced_frames = adapter->max_rx_coal;
45735+
45736 coalesce->rx_coalesce_usecs = rx_eq->cur_eqd;
45737 coalesce->rx_coalesce_usecs_high = rx_eq->max_eqd;
45738 coalesce->rx_coalesce_usecs_low = rx_eq->min_eqd;
45739@@ -149,25 +233,52 @@ static int
45740 be_set_coalesce(struct net_device *netdev, struct ethtool_coalesce *coalesce)
45741 {
45742 struct be_adapter *adapter = netdev_priv(netdev);
45743- struct be_eq_obj *rx_eq = &adapter->rx_eq;
45744+ struct be_rx_obj *rxo;
45745+ struct be_eq_obj *rx_eq;
45746 struct be_eq_obj *tx_eq = &adapter->tx_eq;
45747 u32 tx_max, tx_min, tx_cur;
45748 u32 rx_max, rx_min, rx_cur;
45749- int status = 0;
45750+ int status = 0, i;
45751
45752 if (coalesce->use_adaptive_tx_coalesce == 1)
45753 return -EINVAL;
45754+ adapter->max_rx_coal = coalesce->rx_max_coalesced_frames;
45755+ if (adapter->max_rx_coal > BE_MAX_FRAGS_PER_FRAME)
45756+ adapter->max_rx_coal = BE_MAX_FRAGS_PER_FRAME;
45757
45758- /* if AIC is being turned on now, start with an EQD of 0 */
45759- if (rx_eq->enable_aic == 0 &&
45760- coalesce->use_adaptive_rx_coalesce == 1) {
45761- rx_eq->cur_eqd = 0;
45762+ for_all_rx_queues(adapter, rxo, i) {
45763+ rx_eq = &rxo->rx_eq;
45764+
45765+ if (!rx_eq->enable_aic && coalesce->use_adaptive_rx_coalesce)
45766+ rx_eq->cur_eqd = 0;
45767+ rx_eq->enable_aic = coalesce->use_adaptive_rx_coalesce;
45768+
45769+ rx_max = coalesce->rx_coalesce_usecs_high;
45770+ rx_min = coalesce->rx_coalesce_usecs_low;
45771+ rx_cur = coalesce->rx_coalesce_usecs;
45772+
45773+ if (rx_eq->enable_aic) {
45774+ if (rx_max > BE_MAX_EQD)
45775+ rx_max = BE_MAX_EQD;
45776+ if (rx_min > rx_max)
45777+ rx_min = rx_max;
45778+ rx_eq->max_eqd = rx_max;
45779+ rx_eq->min_eqd = rx_min;
45780+ if (rx_eq->cur_eqd > rx_max)
45781+ rx_eq->cur_eqd = rx_max;
45782+ if (rx_eq->cur_eqd < rx_min)
45783+ rx_eq->cur_eqd = rx_min;
45784+ } else {
45785+ if (rx_cur > BE_MAX_EQD)
45786+ rx_cur = BE_MAX_EQD;
45787+ if (rx_eq->cur_eqd != rx_cur) {
45788+ status = be_cmd_modify_eqd(adapter, rx_eq->q.id,
45789+ rx_cur);
45790+ if (!status)
45791+ rx_eq->cur_eqd = rx_cur;
45792+ }
45793+ }
45794 }
45795- rx_eq->enable_aic = coalesce->use_adaptive_rx_coalesce;
45796-
45797- rx_max = coalesce->rx_coalesce_usecs_high;
45798- rx_min = coalesce->rx_coalesce_usecs_low;
45799- rx_cur = coalesce->rx_coalesce_usecs;
45800
45801 tx_max = coalesce->tx_coalesce_usecs_high;
45802 tx_min = coalesce->tx_coalesce_usecs_low;
45803@@ -181,27 +292,6 @@ be_set_coalesce(struct net_device *netdev, struct ethtool_coalesce *coalesce)
45804 tx_eq->cur_eqd = tx_cur;
45805 }
45806
45807- if (rx_eq->enable_aic) {
45808- if (rx_max > BE_MAX_EQD)
45809- rx_max = BE_MAX_EQD;
45810- if (rx_min > rx_max)
45811- rx_min = rx_max;
45812- rx_eq->max_eqd = rx_max;
45813- rx_eq->min_eqd = rx_min;
45814- if (rx_eq->cur_eqd > rx_max)
45815- rx_eq->cur_eqd = rx_max;
45816- if (rx_eq->cur_eqd < rx_min)
45817- rx_eq->cur_eqd = rx_min;
45818- } else {
45819- if (rx_cur > BE_MAX_EQD)
45820- rx_cur = BE_MAX_EQD;
45821- if (rx_eq->cur_eqd != rx_cur) {
45822- status = be_cmd_modify_eqd(adapter, rx_eq->q.id,
45823- rx_cur);
45824- if (!status)
45825- rx_eq->cur_eqd = rx_cur;
45826- }
45827- }
45828 return 0;
45829 }
45830
45831@@ -229,81 +319,294 @@ be_get_ethtool_stats(struct net_device *netdev,
45832 struct ethtool_stats *stats, uint64_t *data)
45833 {
45834 struct be_adapter *adapter = netdev_priv(netdev);
45835- struct be_drvr_stats *drvr_stats = &adapter->stats.drvr_stats;
45836- struct be_hw_stats *hw_stats = hw_stats_from_cmd(adapter->stats.cmd.va);
45837- struct be_rxf_stats *rxf_stats = &hw_stats->rxf;
45838- struct be_port_rxf_stats *port_stats =
45839- &rxf_stats->port[adapter->port_num];
45840- struct net_device_stats *net_stats = &adapter->stats.net_stats;
45841- struct be_erx_stats *erx_stats = &hw_stats->erx;
45842+ struct be_rx_obj *rxo;
45843+ struct be_tx_obj *txo;
45844 void *p = NULL;
45845- int i;
45846+ int i, j, base;
45847
45848 for (i = 0; i < ETHTOOL_STATS_NUM; i++) {
45849 switch (et_stats[i].type) {
45850 case NETSTAT:
45851- p = net_stats;
45852+ p = &adapter->net_stats;
45853 break;
45854 case DRVSTAT:
45855- p = drvr_stats;
45856- break;
45857- case PORTSTAT:
45858- p = port_stats;
45859- break;
45860- case MISCSTAT:
45861- p = rxf_stats;
45862- break;
45863- case ERXSTAT: /* Currently only one ERX stat is provided */
45864- p = (u32 *)erx_stats + adapter->rx_obj.q.id;
45865+ p = &adapter->drv_stats;
45866 break;
45867 }
45868
45869 p = (u8 *)p + et_stats[i].offset;
45870 data[i] = (et_stats[i].size == sizeof(u64)) ?
45871- *(u64 *)p: *(u32 *)p;
45872+ *(u64 *)p:(*(u32 *)p);
45873 }
45874
45875- return;
45876+ base = ETHTOOL_STATS_NUM;
45877+ for_all_rx_queues(adapter, rxo, j) {
45878+ for (i = 0; i < ETHTOOL_RXSTATS_NUM; i++) {
45879+ p = (u8 *)&rxo->stats + et_rx_stats[i].offset;
45880+ data[base + j * ETHTOOL_RXSTATS_NUM + i] =
45881+ (et_rx_stats[i].size == sizeof(u64)) ?
45882+ *(u64 *)p: *(u32 *)p;
45883+ }
45884+ }
45885+
45886+ base = ETHTOOL_STATS_NUM + adapter->num_rx_qs * ETHTOOL_RXSTATS_NUM;
45887+ for_all_tx_queues(adapter, txo, j) {
45888+ for (i = 0; i < ETHTOOL_TXSTATS_NUM; i++) {
45889+ p = (u8 *)&txo->stats + et_tx_stats[i].offset;
45890+ data[base + j * ETHTOOL_TXSTATS_NUM + i] =
45891+ (et_tx_stats[i].size == sizeof(u64)) ?
45892+ *(u64 *)p: *(u32 *)p;
45893+ }
45894+ }
45895 }
45896
45897 static void
45898 be_get_stat_strings(struct net_device *netdev, uint32_t stringset,
45899 uint8_t *data)
45900 {
45901- int i;
45902+ struct be_adapter *adapter = netdev_priv(netdev);
45903+ int i, j;
45904+
45905 switch (stringset) {
45906 case ETH_SS_STATS:
45907 for (i = 0; i < ETHTOOL_STATS_NUM; i++) {
45908 memcpy(data, et_stats[i].desc, ETH_GSTRING_LEN);
45909 data += ETH_GSTRING_LEN;
45910 }
45911+ for (i = 0; i < adapter->num_rx_qs; i++) {
45912+ for (j = 0; j < ETHTOOL_RXSTATS_NUM; j++) {
45913+ sprintf(data, "rxq%d: %s", i,
45914+ et_rx_stats[j].desc);
45915+ data += ETH_GSTRING_LEN;
45916+ }
45917+ }
45918+ for (i = 0; i < adapter->num_tx_qs; i++) {
45919+ for (j = 0; j < ETHTOOL_TXSTATS_NUM; j++) {
45920+ sprintf(data, "txq%d: %s", i,
45921+ et_tx_stats[j].desc);
45922+ data += ETH_GSTRING_LEN;
45923+ }
45924+ }
45925+ break;
45926+ case ETH_SS_TEST:
45927+ for (i = 0; i < ETHTOOL_TESTS_NUM; i++) {
45928+ memcpy(data, et_self_tests[i], ETH_GSTRING_LEN);
45929+ data += ETH_GSTRING_LEN;
45930+ }
45931 break;
45932 }
45933 }
45934
45935+#if LINUX_VERSION_CODE < KERNEL_VERSION(3, 0, 0)
45936 static int be_get_stats_count(struct net_device *netdev)
45937 {
45938- return ETHTOOL_STATS_NUM;
45939+ struct be_adapter *adapter = netdev_priv(netdev);
45940+
45941+ return ETHTOOL_STATS_NUM + adapter->num_rx_qs * ETHTOOL_RXSTATS_NUM
45942+ + adapter->num_tx_qs * ETHTOOL_TXSTATS_NUM;
45943 }
45944+static int
45945+be_self_test_count(struct net_device *dev)
45946+{
45947+ return ETHTOOL_TESTS_NUM;
45948+}
45949+#else
45950+
45951+static int be_get_sset_count(struct net_device *netdev, int stringset)
45952+{
45953+ struct be_adapter *adapter = netdev_priv(netdev);
45954+
45955+ switch (stringset) {
45956+ case ETH_SS_TEST:
45957+ return ETHTOOL_TESTS_NUM;
45958+ case ETH_SS_STATS:
45959+ return ETHTOOL_STATS_NUM +
45960+ adapter->num_rx_qs * ETHTOOL_RXSTATS_NUM +
45961+ adapter->num_tx_qs * ETHTOOL_TXSTATS_NUM;
45962+ default:
45963+ return -EINVAL;
45964+ }
45965+}
45966+#endif
45967
45968 static int be_get_settings(struct net_device *netdev, struct ethtool_cmd *ecmd)
45969 {
45970- ecmd->speed = SPEED_10000;
45971+ struct be_adapter *adapter = netdev_priv(netdev);
45972+ struct be_phy_info phy_info;
45973+ u8 mac_speed = 0;
45974+ u16 link_speed = 0;
45975+ int link_status = LINK_DOWN;
45976+ int status;
45977+
45978+ if ((adapter->link_speed < 0) || (!(netdev->flags & IFF_UP))) {
45979+ status = be_cmd_link_status_query(adapter, &link_status,
45980+ &mac_speed, &link_speed, 0);
45981+
45982+ be_link_status_update(adapter, link_status);
45983+ /* link_speed is in units of 10 Mbps */
45984+ if (link_speed) {
45985+ ecmd->speed = link_speed*10;
45986+ } else {
45987+ switch (mac_speed) {
45988+ case PHY_LINK_SPEED_10MBPS:
45989+ ecmd->speed = SPEED_10;
45990+ break;
45991+ case PHY_LINK_SPEED_100MBPS:
45992+ ecmd->speed = SPEED_100;
45993+ break;
45994+ case PHY_LINK_SPEED_1GBPS:
45995+ ecmd->speed = SPEED_1000;
45996+ break;
45997+ case PHY_LINK_SPEED_10GBPS:
45998+ ecmd->speed = SPEED_10000;
45999+ break;
46000+ case PHY_LINK_SPEED_ZERO:
46001+ ecmd->speed = 0;
46002+ break;
46003+ }
46004+ }
46005+
46006+ status = be_cmd_get_phy_info(adapter, &phy_info);
46007+ if (!status) {
46008+ switch (phy_info.interface_type) {
46009+ case PHY_TYPE_XFP_10GB:
46010+ case PHY_TYPE_SFP_1GB:
46011+ case PHY_TYPE_SFP_PLUS_10GB:
46012+ ecmd->port = PORT_FIBRE;
46013+ break;
46014+ default:
46015+ ecmd->port = PORT_TP;
46016+ break;
46017+ }
46018+
46019+ switch (phy_info.interface_type) {
46020+ case PHY_TYPE_KR_10GB:
46021+ case PHY_TYPE_KX4_10GB:
46022+ ecmd->transceiver = XCVR_INTERNAL;
46023+ break;
46024+ default:
46025+ ecmd->transceiver = XCVR_EXTERNAL;
46026+ break;
46027+ }
46028+
46029+ if (phy_info.auto_speeds_supported) {
46030+ ecmd->supported |= SUPPORTED_Autoneg;
46031+ ecmd->autoneg = AUTONEG_ENABLE;
46032+ ecmd->advertising |= ADVERTISED_Autoneg;
46033+ }
46034+
46035+ if (phy_info.misc_params & BE_PAUSE_SYM_EN) {
46036+ ecmd->supported |= SUPPORTED_Pause;
46037+ ecmd->advertising |= ADVERTISED_Pause;
46038+ }
46039+
46040+ }
46041+
46042+ /* Save for future use */
46043+ adapter->link_speed = ecmd->speed;
46044+ adapter->port_type = ecmd->port;
46045+ adapter->transceiver = ecmd->transceiver;
46046+ adapter->autoneg = ecmd->autoneg;
46047+ } else {
46048+ ecmd->speed = adapter->link_speed;
46049+ ecmd->port = adapter->port_type;
46050+ ecmd->transceiver = adapter->transceiver;
46051+ ecmd->autoneg = adapter->autoneg;
46052+ }
46053+
46054 ecmd->duplex = DUPLEX_FULL;
46055- ecmd->autoneg = AUTONEG_DISABLE;
46056+ ecmd->phy_address = (adapter->hba_port_num << 4) |
46057+ (adapter->port_name[adapter->hba_port_num]);
46058+ switch (ecmd->port) {
46059+ case PORT_FIBRE:
46060+ ecmd->supported = (SUPPORTED_10000baseT_Full | SUPPORTED_FIBRE);
46061+ break;
46062+ case PORT_TP:
46063+ ecmd->supported = (SUPPORTED_10000baseT_Full | SUPPORTED_TP);
46064+ break;
46065+ }
46066+
46067+ if (ecmd->autoneg) {
46068+ ecmd->supported |= SUPPORTED_1000baseT_Full;
46069+ ecmd->advertising |= (ADVERTISED_10000baseT_Full |
46070+ ADVERTISED_1000baseT_Full);
46071+ }
46072+
46073 return 0;
46074 }
46075
46076+static int be_set_settings(struct net_device *netdev,
46077+ struct ethtool_cmd *ecmd)
46078+{
46079+ struct be_adapter *adapter = netdev_priv(netdev);
46080+ struct be_phy_info phy_info;
46081+ u16 mac_speed=0;
46082+ u16 dac_cable_len=0;
46083+ u16 port_speed = 0;
46084+ int status;
46085+
46086+ status = be_cmd_get_phy_info(adapter, &phy_info);
46087+ if (status) {
46088+ dev_warn(&adapter->pdev->dev, "port speed set failed.\n");
46089+ return status;
46090+ }
46091+
46092+ if (ecmd->autoneg == AUTONEG_ENABLE) {
46093+ switch(phy_info.interface_type) {
46094+ case PHY_TYPE_SFP_1GB:
46095+ case PHY_TYPE_BASET_1GB:
46096+ case PHY_TYPE_BASEX_1GB:
46097+ case PHY_TYPE_SGMII:
46098+ mac_speed = SPEED_AUTONEG_1GB_100MB_10MB;
46099+ break;
46100+ case PHY_TYPE_SFP_PLUS_10GB:
46101+ dev_warn(&adapter->pdev->dev,
46102+ "Autoneg not supported on this module. \n");
46103+ return -EINVAL;
46104+ case PHY_TYPE_KR_10GB:
46105+ case PHY_TYPE_KX4_10GB:
46106+ mac_speed = SPEED_AUTONEG_10GB_1GB;
46107+ break;
46108+ case PHY_TYPE_BASET_10GB:
46109+ mac_speed = SPEED_AUTONEG_10GB_1GB_100MB;
46110+ break;
46111+ }
46112+ } else if(ecmd->autoneg == AUTONEG_DISABLE) {
46113+ if(ecmd->speed == SPEED_10) {
46114+ mac_speed = SPEED_FORCED_10MB;
46115+ } else if(ecmd->speed == SPEED_100) {
46116+ mac_speed = SPEED_FORCED_100MB;
46117+ } else if(ecmd->speed == SPEED_1000) {
46118+ mac_speed = SPEED_FORCED_1GB;
46119+ } else if(ecmd->speed == SPEED_10000) {
46120+ mac_speed = SPEED_FORCED_10GB;
46121+ }
46122+ }
46123+
46124+ status = be_cmd_get_port_speed(adapter, adapter->hba_port_num,
46125+ &dac_cable_len, &port_speed);
46126+
46127+ if (!status && port_speed != mac_speed)
46128+ status = be_cmd_set_port_speed_v1(adapter,
46129+ adapter->hba_port_num, mac_speed,
46130+ dac_cable_len);
46131+ if (status)
46132+ dev_warn(&adapter->pdev->dev, "port speed set failed.\n");
46133+
46134+ return status;
46135+
46136+}
46137+
46138 static void
46139 be_get_ringparam(struct net_device *netdev, struct ethtool_ringparam *ring)
46140 {
46141 struct be_adapter *adapter = netdev_priv(netdev);
46142
46143- ring->rx_max_pending = adapter->rx_obj.q.len;
46144- ring->tx_max_pending = adapter->tx_obj.q.len;
46145+ ring->rx_max_pending = adapter->rx_obj[0].q.len;
46146+ ring->tx_max_pending = adapter->tx_obj[0].q.len;
46147
46148- ring->rx_pending = atomic_read(&adapter->rx_obj.q.used);
46149- ring->tx_pending = atomic_read(&adapter->tx_obj.q.used);
46150+ ring->rx_pending = atomic_read(&adapter->rx_obj[0].q.used);
46151+ ring->tx_pending = atomic_read(&adapter->tx_obj[0].q.used);
46152 }
46153
46154 static void
46155@@ -312,7 +615,7 @@ be_get_pauseparam(struct net_device *netdev, struct ethtool_pauseparam *ecmd)
46156 struct be_adapter *adapter = netdev_priv(netdev);
46157
46158 be_cmd_get_flow_control(adapter, &ecmd->tx_pause, &ecmd->rx_pause);
46159- ecmd->autoneg = 0;
46160+ ecmd->autoneg = adapter->autoneg;
46161 }
46162
46163 static int
46164@@ -334,6 +637,203 @@ be_set_pauseparam(struct net_device *netdev, struct ethtool_pauseparam *ecmd)
46165 return status;
46166 }
46167
46168+#if LINUX_VERSION_CODE < KERNEL_VERSION(3, 0, 0)
46169+static int
46170+be_phys_id(struct net_device *netdev, u32 data)
46171+{
46172+ struct be_adapter *adapter = netdev_priv(netdev);
46173+ int status;
46174+ u32 cur;
46175+
46176+ be_cmd_get_beacon_state(adapter, adapter->hba_port_num, &cur);
46177+
46178+ if (cur == BEACON_STATE_ENABLED)
46179+ return 0;
46180+
46181+ if (data < 2)
46182+ data = 2;
46183+
46184+ status = be_cmd_set_beacon_state(adapter, adapter->hba_port_num, 0, 0,
46185+ BEACON_STATE_ENABLED);
46186+ set_current_state(TASK_INTERRUPTIBLE);
46187+ schedule_timeout(data*HZ);
46188+
46189+ status = be_cmd_set_beacon_state(adapter, adapter->hba_port_num, 0, 0,
46190+ BEACON_STATE_DISABLED);
46191+
46192+ return status;
46193+}
46194+#else
46195+static int
46196+be_set_phys_id(struct net_device *netdev,
46197+ enum ethtool_phys_id_state state)
46198+{
46199+ struct be_adapter *adapter = netdev_priv(netdev);
46200+
46201+ switch (state) {
46202+ case ETHTOOL_ID_ACTIVE:
46203+ be_cmd_get_beacon_state(adapter, adapter->hba_port_num,
46204+ &adapter->beacon_state);
46205+ return 1; /* cycle on/off once per second */
46206+
46207+ case ETHTOOL_ID_ON:
46208+ be_cmd_set_beacon_state(adapter, adapter->hba_port_num, 0, 0,
46209+ BEACON_STATE_ENABLED);
46210+ break;
46211+
46212+ case ETHTOOL_ID_OFF:
46213+ be_cmd_set_beacon_state(adapter, adapter->hba_port_num, 0, 0,
46214+ BEACON_STATE_DISABLED);
46215+ break;
46216+
46217+ case ETHTOOL_ID_INACTIVE:
46218+ be_cmd_set_beacon_state(adapter, adapter->hba_port_num, 0, 0,
46219+ adapter->beacon_state);
46220+ }
46221+
46222+ return 0;
46223+}
46224+#endif
46225+
46226+static bool
46227+be_is_wol_supported(struct be_adapter *adapter)
46228+{
46229+ struct pci_dev *pdev = adapter->pdev;
46230+
46231+ if (!be_physfn(adapter))
46232+ return false;
46233+
46234+ switch (pdev->subsystem_device) {
46235+ case OC_SUBSYS_DEVICE_ID1:
46236+ case OC_SUBSYS_DEVICE_ID2:
46237+ case OC_SUBSYS_DEVICE_ID3:
46238+ case OC_SUBSYS_DEVICE_ID4:
46239+ return false;
46240+ default:
46241+ return true;
46242+ }
46243+}
46244+
46245+static void
46246+be_get_wol(struct net_device *netdev, struct ethtool_wolinfo *wol)
46247+{
46248+ struct be_adapter *adapter = netdev_priv(netdev);
46249+
46250+ if (be_is_wol_supported(adapter))
46251+ wol->supported = WAKE_MAGIC;
46252+ if (adapter->wol)
46253+ wol->wolopts = WAKE_MAGIC;
46254+ else
46255+ wol->wolopts = 0;
46256+ memset(&wol->sopass, 0, sizeof(wol->sopass));
46257+}
46258+
46259+static int
46260+be_set_wol(struct net_device *netdev, struct ethtool_wolinfo *wol)
46261+{
46262+ struct be_adapter *adapter = netdev_priv(netdev);
46263+
46264+ if (wol->wolopts & ~WAKE_MAGIC)
46265+ return -EOPNOTSUPP;
46266+
46267+ if (!be_is_wol_supported(adapter)) {
46268+ dev_warn(&adapter->pdev->dev,
46269+ "WOL not supported for this subsystemid: %x\n",
46270+ adapter->pdev->subsystem_device);
46271+ return -EOPNOTSUPP;
46272+ }
46273+
46274+ if (wol->wolopts & WAKE_MAGIC)
46275+ adapter->wol = true;
46276+ else
46277+ adapter->wol = false;
46278+
46279+ return 0;
46280+}
46281+
46282+static int
46283+be_test_ddr_dma(struct be_adapter *adapter)
46284+{
46285+ int ret, i;
46286+ struct be_dma_mem ddrdma_cmd;
46287+ u64 pattern[2] = {0x5a5a5a5a5a5a5a5aULL, 0xa5a5a5a5a5a5a5a5ULL};
46288+
46289+ ddrdma_cmd.size = sizeof(struct be_cmd_req_ddrdma_test);
46290+ ddrdma_cmd.va = pci_alloc_consistent(adapter->pdev, ddrdma_cmd.size,
46291+ &ddrdma_cmd.dma);
46292+ if (!ddrdma_cmd.va) {
46293+ dev_err(&adapter->pdev->dev, "Memory allocation failure\n");
46294+ return -ENOMEM;
46295+ }
46296+
46297+ for (i = 0; i < 2; i++) {
46298+ ret = be_cmd_ddr_dma_test(adapter, pattern[i],
46299+ 4096, &ddrdma_cmd);
46300+ if (ret != 0)
46301+ goto err;
46302+ }
46303+
46304+err:
46305+ pci_free_consistent(adapter->pdev, ddrdma_cmd.size,
46306+ ddrdma_cmd.va, ddrdma_cmd.dma);
46307+ return ret;
46308+}
46309+
46310+static u64 be_loopback_test(struct be_adapter *adapter, u8 loopback_type,
46311+ u64 *status)
46312+{
46313+ be_cmd_set_loopback(adapter, adapter->hba_port_num,
46314+ loopback_type, 1);
46315+ *status = be_cmd_loopback_test(adapter, adapter->hba_port_num,
46316+ loopback_type, 1500,
46317+ 2, 0xabc);
46318+ be_cmd_set_loopback(adapter, adapter->hba_port_num,
46319+ BE_NO_LOOPBACK, 1);
46320+ return *status;
46321+}
46322+
46323+static void
46324+be_self_test(struct net_device *netdev, struct ethtool_test *test, u64 *data)
46325+{
46326+ struct be_adapter *adapter = netdev_priv(netdev);
46327+ int link_status;
46328+ u8 mac_speed = 0;
46329+ u16 qos_link_speed = 0;
46330+
46331+ memset(data, 0, sizeof(u64) * ETHTOOL_TESTS_NUM);
46332+
46333+ if (test->flags & ETH_TEST_FL_OFFLINE) {
46334+ if (be_loopback_test(adapter, BE_MAC_LOOPBACK,
46335+ &data[0]) != 0) {
46336+ test->flags |= ETH_TEST_FL_FAILED;
46337+ }
46338+ if (be_loopback_test(adapter, BE_PHY_LOOPBACK,
46339+ &data[1]) != 0) {
46340+ test->flags |= ETH_TEST_FL_FAILED;
46341+ }
46342+ if (be_loopback_test(adapter, BE_ONE_PORT_EXT_LOOPBACK,
46343+ &data[2]) != 0) {
46344+ test->flags |= ETH_TEST_FL_FAILED;
46345+ }
46346+ }
46347+
46348+ if (be_test_ddr_dma(adapter) != 0) {
46349+ data[3] = 1;
46350+ test->flags |= ETH_TEST_FL_FAILED;
46351+ }
46352+
46353+ if (be_cmd_link_status_query(adapter, &link_status, &mac_speed,
46354+ &qos_link_speed, 0) != 0) {
46355+ test->flags |= ETH_TEST_FL_FAILED;
46356+ data[4] = -1;
46357+ } else if (!mac_speed) {
46358+ test->flags |= ETH_TEST_FL_FAILED;
46359+ data[4] = 1;
46360+ }
46361+
46362+}
46363+
46364+#ifdef HAVE_ETHTOOL_FLASH
46365 static int
46366 be_do_flash(struct net_device *netdev, struct ethtool_flash *efl)
46367 {
46368@@ -347,11 +847,73 @@ be_do_flash(struct net_device *netdev, struct ethtool_flash *efl)
46369
46370 return be_load_fw(adapter, file_name);
46371 }
46372+#endif
46373
46374-const struct ethtool_ops be_ethtool_ops = {
46375+static int
46376+be_get_eeprom_len(struct net_device *netdev)
46377+{
46378+ return BE_READ_SEEPROM_LEN;
46379+}
46380+
46381+static int
46382+be_read_eeprom(struct net_device *netdev, struct ethtool_eeprom *eeprom,
46383+ uint8_t *data)
46384+{
46385+ struct be_adapter *adapter = netdev_priv(netdev);
46386+ struct be_dma_mem eeprom_cmd;
46387+ struct be_cmd_resp_seeprom_read *resp;
46388+ int status;
46389+
46390+ if (!eeprom->len)
46391+ return -EINVAL;
46392+
46393+ eeprom->magic = BE_VENDOR_ID | (adapter->pdev->device<<16);
46394+
46395+ memset(&eeprom_cmd, 0, sizeof(struct be_dma_mem));
46396+ eeprom_cmd.size = sizeof(struct be_cmd_req_seeprom_read);
46397+ eeprom_cmd.va = pci_alloc_consistent(adapter->pdev, eeprom_cmd.size,
46398+ &eeprom_cmd.dma);
46399+
46400+ if (!eeprom_cmd.va) {
46401+ dev_err(&adapter->pdev->dev,
46402+ "Memory allocation failure. Could not read eeprom\n");
46403+ return -ENOMEM;
46404+ }
46405+
46406+ status = be_cmd_get_seeprom_data(adapter, &eeprom_cmd);
46407+
46408+ if (!status) {
46409+ resp = (struct be_cmd_resp_seeprom_read *) eeprom_cmd.va;
46410+ memcpy(data, resp->seeprom_data + eeprom->offset, eeprom->len);
46411+ }
46412+ pci_free_consistent(adapter->pdev, eeprom_cmd.size, eeprom_cmd.va,
46413+ eeprom_cmd.dma);
46414+
46415+ return status;
46416+}
46417+
46418+static int be_set_tso(struct net_device *netdev, uint32_t data)
46419+{
46420+ if (data) {
46421+ netdev->features |= NETIF_F_TSO;
46422+ netdev->features |= NETIF_F_TSO6;
46423+ } else {
46424+ netdev->features &= ~NETIF_F_TSO;
46425+ netdev->features &= ~NETIF_F_TSO6;
46426+ }
46427+ return 0;
46428+}
46429+
46430+
46431+struct ethtool_ops be_ethtool_ops = {
46432 .get_settings = be_get_settings,
46433+ .set_settings = be_set_settings,
46434 .get_drvinfo = be_get_drvinfo,
46435+ .get_wol = be_get_wol,
46436+ .set_wol = be_set_wol,
46437 .get_link = ethtool_op_get_link,
46438+ .get_eeprom_len = be_get_eeprom_len,
46439+ .get_eeprom = be_read_eeprom,
46440 .get_coalesce = be_get_coalesce,
46441 .set_coalesce = be_set_coalesce,
46442 .get_ringparam = be_get_ringparam,
46443@@ -364,9 +926,21 @@ const struct ethtool_ops be_ethtool_ops = {
46444 .get_sg = ethtool_op_get_sg,
46445 .set_sg = ethtool_op_set_sg,
46446 .get_tso = ethtool_op_get_tso,
46447- .set_tso = ethtool_op_set_tso,
46448+ .set_tso = be_set_tso,
46449 .get_strings = be_get_stat_strings,
46450+#if LINUX_VERSION_CODE < KERNEL_VERSION(3, 0, 0)
46451+ .phys_id = be_phys_id,
46452 .get_stats_count = be_get_stats_count,
46453+ .self_test_count = be_self_test_count,
46454+#else
46455+ .set_phys_id = be_set_phys_id,
46456+ .get_sset_count = be_get_sset_count,
46457+#endif
46458 .get_ethtool_stats = be_get_ethtool_stats,
46459+ .get_regs_len = be_get_reg_len,
46460+ .get_regs = be_get_regs,
46461+#ifdef HAVE_ETHTOOL_FLASH
46462 .flash_device = be_do_flash,
46463+#endif
46464+ .self_test = be_self_test
46465 };
46466diff --git a/drivers/net/benet/be_hw.h b/drivers/net/benet/be_hw.h
46467index a3394b4..f871d8c 100644
46468--- a/drivers/net/benet/be_hw.h
46469+++ b/drivers/net/benet/be_hw.h
46470@@ -1,18 +1,18 @@
46471 /*
46472- * Copyright (C) 2005 - 2009 ServerEngines
46473+ * Copyright (C) 2005 - 2011 Emulex
46474 * All rights reserved.
46475 *
46476 * This program is free software; you can redistribute it and/or
46477 * modify it under the terms of the GNU General Public License version 2
46478- * as published by the Free Software Foundation. The full GNU General
46479+ * as published by the Free Software Foundation. The full GNU General
46480 * Public License is included in this distribution in the file called COPYING.
46481 *
46482 * Contact Information:
46483- * linux-drivers@serverengines.com
46484+ * linux-drivers@emulex.com
46485 *
46486- * ServerEngines
46487- * 209 N. Fair Oaks Ave
46488- * Sunnyvale, CA 94085
46489+ * Emulex
46490+ * 3333 Susan Street
46491+ * Costa Mesa, CA 92626
46492 */
46493
46494 /********* Mailbox door bell *************/
46495@@ -26,24 +26,34 @@
46496 * queue entry.
46497 */
46498 #define MPU_MAILBOX_DB_OFFSET 0x160
46499-#define MPU_MAILBOX_DB_RDY_MASK 0x1 /* bit 0 */
46500+#define MPU_MAILBOX_DB_RDY_MASK 0x1 /* bit 0 */
46501 #define MPU_MAILBOX_DB_HI_MASK 0x2 /* bit 1 */
46502
46503-#define MPU_EP_CONTROL 0
46504+#define MPU_EP_CONTROL 0
46505
46506 /********** MPU semphore ******************/
46507-#define MPU_EP_SEMAPHORE_OFFSET 0xac
46508+#define MPU_EP_SEMAPHORE_OFFSET 0xac
46509+#define MPU_EP_SEMAPHORE_IF_TYPE2_OFFSET 0x400
46510 #define EP_SEMAPHORE_POST_STAGE_MASK 0x0000FFFF
46511 #define EP_SEMAPHORE_POST_ERR_MASK 0x1
46512 #define EP_SEMAPHORE_POST_ERR_SHIFT 31
46513 /* MPU semphore POST stage values */
46514-#define POST_STAGE_AWAITING_HOST_RDY 0x1 /* FW awaiting goahead from host */
46515-#define POST_STAGE_HOST_RDY 0x2 /* Host has given go-ahed to FW */
46516+#define POST_STAGE_AWAITING_HOST_RDY 0x1 /* FW awaiting goahead from host */
46517+#define POST_STAGE_HOST_RDY 0x2 /* Host has given go-ahed to FW */
46518 #define POST_STAGE_BE_RESET 0x3 /* Host wants to reset chip */
46519 #define POST_STAGE_ARMFW_RDY 0xc000 /* FW is done with POST */
46520
46521+/* Lancer SLIPORT_CONTROL SLIPORT_STATUS registers */
46522+#define SLIPORT_STATUS_OFFSET 0x404
46523+#define SLIPORT_CONTROL_OFFSET 0x408
46524+
46525+#define SLIPORT_STATUS_ERR_MASK 0x80000000
46526+#define SLIPORT_STATUS_RN_MASK 0x01000000
46527+#define SLIPORT_STATUS_RDY_MASK 0x00800000
46528+#define SLI_PORT_CONTROL_IP_MASK 0x08000000
46529+
46530 /********* Memory BAR register ************/
46531-#define PCICFG_MEMBAR_CTRL_INT_CTRL_OFFSET 0xfc
46532+#define PCICFG_MEMBAR_CTRL_INT_CTRL_OFFSET 0xfc
46533 /* Host Interrupt Enable, if set interrupts are enabled although "PCI Interrupt
46534 * Disable" may still globally block interrupts in addition to individual
46535 * interrupt masks; a mechanism for the device driver to block all interrupts
46536@@ -52,13 +62,70 @@
46537 */
46538 #define MEMBAR_CTRL_INT_CTRL_HOSTINTR_MASK (1 << 29) /* bit 29 */
46539
46540+/********* Link Status CSR ****************/
46541+#define PCICFG_PCIE_LINK_STATUS_OFFSET 0xd0
46542+#define PCIE_LINK_STATUS_SPEED_MASK 0xFF /* bits 16 - 19 */
46543+#define PCIE_LINK_STATUS_SPEED_SHIFT 16
46544+#define PCIE_LINK_STATUS_NEG_WIDTH_MASK 0x3F /* bits 20 - 25 */
46545+#define PCIE_LINK_STATUS_NEG_WIDTH_SHIFT 20
46546+
46547+/********* Link Capability CSR ************/
46548+#define PCICFG_PCIE_LINK_CAP_OFFSET 0xcc
46549+#define PCIE_LINK_CAP_MAX_SPEED_MASK 0xFF /* bits 0 - 3 */
46550+#define PCIE_LINK_CAP_MAX_SPEED_SHIFT 0
46551+#define PCIE_LINK_CAP_MAX_WIDTH_MASK 0x3F /* bits 4 - 9 */
46552+#define PCIE_LINK_CAP_MAX_WIDTH_SHIFT 4
46553+
46554+/********* PCI Function Capability ************/
46555+#define BE_FUNCTION_CAPS_UNCLASSIFIED_STATS 0x1
46556+#define BE_FUNCTION_CAPS_RSS 0x2
46557+#define BE_FUNCTION_CAPS_PROMISCUOUS 0x4
46558+#define BE_FUNCTION_CAPS_LEGACY_MODE 0x8
46559+
46560+/********* Power managment (WOL) **********/
46561+#define PCICFG_PM_CONTROL_OFFSET 0x44
46562+#define PCICFG_PM_CONTROL_MASK 0x108 /* bits 3 & 8 */
46563+
46564+/********* Online Control Registers *******/
46565+#define PCICFG_ONLINE0 0xB0
46566+#define PCICFG_ONLINE1 0xB4
46567+
46568+/********* UE Status and Mask Registers ***/
46569+#define PCICFG_UE_STATUS_LOW 0xA0
46570+#define PCICFG_UE_STATUS_HIGH 0xA4
46571+#define PCICFG_UE_STATUS_LOW_MASK 0xA8
46572+#define PCICFG_UE_STATUS_HI_MASK 0xAC
46573+
46574+/******** SLI_INTF ***********************/
46575+#define SLI_INTF_REG_OFFSET 0x58
46576+#define SLI_INTF_VALID_MASK 0xE0000000
46577+#define SLI_INTF_VALID 0xC0000000
46578+#define SLI_INTF_HINT2_MASK 0x1F000000
46579+#define SLI_INTF_HINT2_SHIFT 24
46580+#define SLI_INTF_HINT1_MASK 0x00FF0000
46581+#define SLI_INTF_HINT1_SHIFT 16
46582+#define SLI_INTF_FAMILY_MASK 0x00000F00
46583+#define SLI_INTF_FAMILY_SHIFT 8
46584+#define SLI_INTF_IF_TYPE_MASK 0x0000F000
46585+#define SLI_INTF_IF_TYPE_SHIFT 12
46586+#define SLI_INTF_REV_MASK 0x000000F0
46587+#define SLI_INTF_REV_SHIFT 4
46588+#define SLI_INTF_FT_MASK 0x00000001
46589+
46590+/* SLI family */
46591+#define BE_SLI_FAMILY 0x0
46592+#define LANCER_A0_SLI_FAMILY 0xA
46593+
46594 /********* ISR0 Register offset **********/
46595-#define CEV_ISR0_OFFSET 0xC18
46596+#define CEV_ISR0_OFFSET 0xC18
46597 #define CEV_ISR_SIZE 4
46598
46599 /********* Event Q door bell *************/
46600 #define DB_EQ_OFFSET DB_CQ_OFFSET
46601 #define DB_EQ_RING_ID_MASK 0x1FF /* bits 0 - 8 */
46602+#define DB_EQ_RING_ID_EXT_MASK 0x3e00 /* bits 9-13 */
46603+#define DB_EQ_RING_ID_EXT_MASK_SHIFT (2) /* qid bits 9-13 placing at 11-15 */
46604+
46605 /* Clear the interrupt for this eq */
46606 #define DB_EQ_CLR_SHIFT (9) /* bit 9 */
46607 /* Must be 1 */
46608@@ -69,12 +136,16 @@
46609 #define DB_EQ_REARM_SHIFT (29) /* bit 29 */
46610
46611 /********* Compl Q door bell *************/
46612-#define DB_CQ_OFFSET 0x120
46613+#define DB_CQ_OFFSET 0x120
46614 #define DB_CQ_RING_ID_MASK 0x3FF /* bits 0 - 9 */
46615+#define DB_CQ_RING_ID_EXT_MASK 0x7C00 /* bits 10-14 */
46616+#define DB_CQ_RING_ID_EXT_MASK_SHIFT (1) /* qid bits 10-14
46617+ placing at 11-15 */
46618+
46619 /* Number of event entries processed */
46620-#define DB_CQ_NUM_POPPED_SHIFT (16) /* bits 16 - 28 */
46621+#define DB_CQ_NUM_POPPED_SHIFT (16) /* bits 16 - 28 */
46622 /* Rearm bit */
46623-#define DB_CQ_REARM_SHIFT (29) /* bit 29 */
46624+#define DB_CQ_REARM_SHIFT (29) /* bit 29 */
46625
46626 /********** TX ULP door bell *************/
46627 #define DB_TXULP1_OFFSET 0x60
46628@@ -84,25 +155,103 @@
46629 #define DB_TXULP_NUM_POSTED_MASK 0x3FFF /* bits 16 - 29 */
46630
46631 /********** RQ(erx) door bell ************/
46632-#define DB_RQ_OFFSET 0x100
46633+#define DB_RQ_OFFSET 0x100
46634 #define DB_RQ_RING_ID_MASK 0x3FF /* bits 0 - 9 */
46635 /* Number of rx frags posted */
46636 #define DB_RQ_NUM_POSTED_SHIFT (24) /* bits 24 - 31 */
46637
46638 /********** MCC door bell ************/
46639-#define DB_MCCQ_OFFSET 0x140
46640+#define DB_MCCQ_OFFSET 0x140
46641 #define DB_MCCQ_RING_ID_MASK 0x7FF /* bits 0 - 10 */
46642 /* Number of entries posted */
46643 #define DB_MCCQ_NUM_POSTED_SHIFT (16) /* bits 16 - 29 */
46644
46645+/********** SRIOV VF PCICFG OFFSET ********/
46646+#define SRIOV_VF_PCICFG_OFFSET (4096)
46647+
46648+/********** FAT TABLE ********/
46649+#define RETRIEVE_FAT 0
46650+#define QUERY_FAT 1
46651+
46652+/* Flashrom related descriptors */
46653+#define IMAGE_TYPE_FIRMWARE 160
46654+#define IMAGE_TYPE_BOOTCODE 224
46655+#define IMAGE_TYPE_OPTIONROM 32
46656+
46657+#define NUM_FLASHDIR_ENTRIES 32
46658+
46659+#define IMG_TYPE_ISCSI_ACTIVE 0
46660+#define IMG_TYPE_REDBOOT 1
46661+#define IMG_TYPE_BIOS 2
46662+#define IMG_TYPE_PXE_BIOS 3
46663+#define IMG_TYPE_FCOE_BIOS 8
46664+#define IMG_TYPE_ISCSI_BACKUP 9
46665+#define IMG_TYPE_FCOE_FW_ACTIVE 10
46666+#define IMG_TYPE_FCOE_FW_BACKUP 11
46667+#define IMG_TYPE_NCSI_FW 13
46668+#define IMG_TYPE_PHY_FW 99
46669+#define TN_8022 13
46670+
46671+#define ILLEGAL_IOCTL_REQ 2
46672+#define FLASHROM_OPER_PHY_FLASH 9
46673+#define FLASHROM_OPER_PHY_SAVE 10
46674+#define FLASHROM_OPER_FLASH 1
46675+#define FLASHROM_OPER_SAVE 2
46676+#define FLASHROM_OPER_REPORT 4
46677+
46678+#define FLASH_IMAGE_MAX_SIZE_g2 (1310720) /* Max firmware image size */
46679+#define FLASH_BIOS_IMAGE_MAX_SIZE_g2 (262144) /* Max OPTION ROM image sz */
46680+#define FLASH_REDBOOT_IMAGE_MAX_SIZE_g2 (262144) /* Max Redboot image sz */
46681+#define FLASH_IMAGE_MAX_SIZE_g3 (2097152) /* Max firmware image size */
46682+#define FLASH_BIOS_IMAGE_MAX_SIZE_g3 (524288) /* Max OPTION ROM image sz */
46683+#define FLASH_REDBOOT_IMAGE_MAX_SIZE_g3 (1048576) /* Max Redboot image sz */
46684+#define FLASH_NCSI_IMAGE_MAX_SIZE_g3 (262144)
46685+#define FLASH_PHY_FW_IMAGE_MAX_SIZE_g3 (262144)
46686+
46687+#define FLASH_NCSI_MAGIC (0x16032009)
46688+#define FLASH_NCSI_DISABLED (0)
46689+#define FLASH_NCSI_ENABLED (1)
46690+
46691+#define FLASH_NCSI_BITFILE_HDR_OFFSET (0x600000)
46692+
46693+/* Offsets for components on Flash. */
46694+#define FLASH_iSCSI_PRIMARY_IMAGE_START_g2 (1048576)
46695+#define FLASH_iSCSI_BACKUP_IMAGE_START_g2 (2359296)
46696+#define FLASH_FCoE_PRIMARY_IMAGE_START_g2 (3670016)
46697+#define FLASH_FCoE_BACKUP_IMAGE_START_g2 (4980736)
46698+#define FLASH_iSCSI_BIOS_START_g2 (7340032)
46699+#define FLASH_PXE_BIOS_START_g2 (7864320)
46700+#define FLASH_FCoE_BIOS_START_g2 (524288)
46701+#define FLASH_REDBOOT_START_g2 (0)
46702+
46703+#define FLASH_NCSI_START_g3 (15990784)
46704+#define FLASH_iSCSI_PRIMARY_IMAGE_START_g3 (2097152)
46705+#define FLASH_iSCSI_BACKUP_IMAGE_START_g3 (4194304)
46706+#define FLASH_FCoE_PRIMARY_IMAGE_START_g3 (6291456)
46707+#define FLASH_FCoE_BACKUP_IMAGE_START_g3 (8388608)
46708+#define FLASH_iSCSI_BIOS_START_g3 (12582912)
46709+#define FLASH_PXE_BIOS_START_g3 (13107200)
46710+#define FLASH_FCoE_BIOS_START_g3 (13631488)
46711+#define FLASH_REDBOOT_START_g3 (262144)
46712+#define FLASH_PHY_FW_START_g3 (1310720)
46713+
46714+/************* Rx Packet Type Encoding **************/
46715+#define BE_UNICAST_PACKET 0
46716+#define BE_MULTICAST_PACKET 1
46717+#define BE_BROADCAST_PACKET 2
46718+#define BE_RSVD_PACKET 3
46719+
46720 /*
46721 * BE descriptors: host memory data structures whose formats
46722 * are hardwired in BE silicon.
46723 */
46724 /* Event Queue Descriptor */
46725-#define EQ_ENTRY_VALID_MASK 0x1 /* bit 0 */
46726-#define EQ_ENTRY_RES_ID_MASK 0xFFFF /* bits 16 - 31 */
46727-#define EQ_ENTRY_RES_ID_SHIFT 16
46728+#define EQ_ENTRY_VALID_MASK 0x1 /* bit 0 */
46729+#define EQ_ENTRY_RES_ID_MASK 0xFFFF /* bits 16 - 31 */
46730+#define EQ_ENTRY_RES_ID_SHIFT 16
46731+
46732+#define BE_MAC_PROMISCUOUS 62 /* Promiscuous mode */
46733+
46734 struct be_eq_entry {
46735 u32 evt;
46736 };
46737@@ -126,7 +275,7 @@ struct amap_eth_hdr_wrb {
46738 u8 event;
46739 u8 crc;
46740 u8 forward;
46741- u8 ipsec;
46742+ u8 lso6;
46743 u8 mgmt;
46744 u8 ipcs;
46745 u8 udpcs;
46746@@ -151,7 +300,7 @@ struct be_eth_hdr_wrb {
46747 * offset/shift/mask of each field */
46748 struct amap_eth_tx_compl {
46749 u8 wrb_index[16]; /* dword 0 */
46750- u8 ct[2]; /* dword 0 */
46751+ u8 ct[2]; /* dword 0 */
46752 u8 port[2]; /* dword 0 */
46753 u8 rsvd0[8]; /* dword 0 */
46754 u8 status[4]; /* dword 0 */
46755@@ -179,10 +328,10 @@ struct be_eth_rx_d {
46756
46757 /* RX Compl Queue Descriptor */
46758
46759-/* Pseudo amap definition for eth_rx_compl in which each bit of the
46760- * actual structure is defined as a byte: used to calculate
46761+/* Pseudo amap definition for BE2 and BE3 legacy mode eth_rx_compl in which
46762+ * each bit of the actual structure is defined as a byte: used to calculate
46763 * offset/shift/mask of each field */
46764-struct amap_eth_rx_compl {
46765+struct amap_eth_rx_compl_v0 {
46766 u8 vlan_tag[16]; /* dword 0 */
46767 u8 pktsize[14]; /* dword 0 */
46768 u8 port; /* dword 0 */
46769@@ -213,39 +362,91 @@ struct amap_eth_rx_compl {
46770 u8 rsshash[32]; /* dword 3 */
46771 } __packed;
46772
46773+/* Pseudo amap definition for BE3 native mode eth_rx_compl in which
46774+ * each bit of the actual structure is defined as a byte: used to calculate
46775+ * offset/shift/mask of each field */
46776+struct amap_eth_rx_compl_v1 {
46777+ u8 vlan_tag[16]; /* dword 0 */
46778+ u8 pktsize[14]; /* dword 0 */
46779+ u8 vtp; /* dword 0 */
46780+ u8 ip_opt; /* dword 0 */
46781+ u8 err; /* dword 1 */
46782+ u8 rsshp; /* dword 1 */
46783+ u8 ipf; /* dword 1 */
46784+ u8 tcpf; /* dword 1 */
46785+ u8 udpf; /* dword 1 */
46786+ u8 ipcksm; /* dword 1 */
46787+ u8 l4_cksm; /* dword 1 */
46788+ u8 ip_version; /* dword 1 */
46789+ u8 macdst[7]; /* dword 1 */
46790+ u8 rsvd0; /* dword 1 */
46791+ u8 fragndx[10]; /* dword 1 */
46792+ u8 ct[2]; /* dword 1 */
46793+ u8 sw; /* dword 1 */
46794+ u8 numfrags[3]; /* dword 1 */
46795+ u8 rss_flush; /* dword 2 */
46796+ u8 cast_enc[2]; /* dword 2 */
46797+ u8 vtm; /* dword 2 */
46798+ u8 rss_bank; /* dword 2 */
46799+ u8 port[2]; /* dword 2 */
46800+ u8 vntagp; /* dword 2 */
46801+ u8 header_len[8]; /* dword 2 */
46802+ u8 header_split[2]; /* dword 2 */
46803+ u8 rsvd1[13]; /* dword 2 */
46804+ u8 valid; /* dword 2 */
46805+ u8 rsshash[32]; /* dword 3 */
46806+} __packed;
46807+
46808 struct be_eth_rx_compl {
46809 u32 dw[4];
46810 };
46811
46812-/* Flashrom related descriptors */
46813-#define IMAGE_TYPE_FIRMWARE 160
46814-#define IMAGE_TYPE_BOOTCODE 224
46815-#define IMAGE_TYPE_OPTIONROM 32
46816+struct mgmt_hba_attribs {
46817+ u8 flashrom_version_string[32];
46818+ u8 manufacturer_name[32];
46819+ u32 supported_modes;
46820+ u32 rsvd0[3];
46821+ u8 ncsi_ver_string[12];
46822+ u32 default_extended_timeout;
46823+ u8 controller_model_number[32];
46824+ u8 controller_description[64];
46825+ u8 controller_serial_number[32];
46826+ u8 ip_version_string[32];
46827+ u8 firmware_version_string[32];
46828+ u8 bios_version_string[32];
46829+ u8 redboot_version_string[32];
46830+ u8 driver_version_string[32];
46831+ u8 fw_on_flash_version_string[32];
46832+ u32 functionalities_supported;
46833+ u16 max_cdblength;
46834+ u8 asic_revision;
46835+ u8 generational_guid[16];
46836+ u8 hba_port_count;
46837+ u16 default_link_down_timeout;
46838+ u8 iscsi_ver_min_max;
46839+ u8 multifunction_device;
46840+ u8 cache_valid;
46841+ u8 hba_status;
46842+ u8 max_domains_supported;
46843+ u8 phy_port;
46844+ u32 firmware_post_status;
46845+ u32 hba_mtu[8];
46846+ u32 rsvd1[4];
46847+};
46848
46849-#define NUM_FLASHDIR_ENTRIES 32
46850-
46851-#define FLASHROM_TYPE_ISCSI_ACTIVE 0
46852-#define FLASHROM_TYPE_BIOS 2
46853-#define FLASHROM_TYPE_PXE_BIOS 3
46854-#define FLASHROM_TYPE_FCOE_BIOS 8
46855-#define FLASHROM_TYPE_ISCSI_BACKUP 9
46856-#define FLASHROM_TYPE_FCOE_FW_ACTIVE 10
46857-#define FLASHROM_TYPE_FCOE_FW_BACKUP 11
46858-
46859-#define FLASHROM_OPER_FLASH 1
46860-#define FLASHROM_OPER_SAVE 2
46861-
46862-#define FLASH_IMAGE_MAX_SIZE (1310720) /* Max firmware image size */
46863-#define FLASH_BIOS_IMAGE_MAX_SIZE (262144) /* Max OPTION ROM image sz */
46864-
46865-/* Offsets for components on Flash. */
46866-#define FLASH_iSCSI_PRIMARY_IMAGE_START (1048576)
46867-#define FLASH_iSCSI_BACKUP_IMAGE_START (2359296)
46868-#define FLASH_FCoE_PRIMARY_IMAGE_START (3670016)
46869-#define FLASH_FCoE_BACKUP_IMAGE_START (4980736)
46870-#define FLASH_iSCSI_BIOS_START (7340032)
46871-#define FLASH_PXE_BIOS_START (7864320)
46872-#define FLASH_FCoE_BIOS_START (524288)
46873+struct mgmt_controller_attrib {
46874+ struct mgmt_hba_attribs hba_attribs;
46875+ u16 pci_vendor_id;
46876+ u16 pci_device_id;
46877+ u16 pci_sub_vendor_id;
46878+ u16 pci_sub_system_id;
46879+ u8 pci_bus_number;
46880+ u8 pci_device_number;
46881+ u8 pci_function_number;
46882+ u8 interface_type;
46883+ u64 unique_identifier;
46884+ u32 rsvd0[5];
46885+};
46886
46887 struct controller_id {
46888 u32 vendor;
46889@@ -254,7 +455,20 @@ struct controller_id {
46890 u32 subdevice;
46891 };
46892
46893-struct flash_file_hdr {
46894+struct flash_comp {
46895+ unsigned long offset;
46896+ int optype;
46897+ int size;
46898+};
46899+
46900+struct image_hdr {
46901+ u32 imageid;
46902+ u32 imageoffset;
46903+ u32 imagelength;
46904+ u32 image_checksum;
46905+ u8 image_version[32];
46906+};
46907+struct flash_file_hdr_g2 {
46908 u8 sign[32];
46909 u32 cksum;
46910 u32 antidote;
46911@@ -266,6 +480,17 @@ struct flash_file_hdr {
46912 u8 build[24];
46913 };
46914
46915+struct flash_file_hdr_g3 {
46916+ u8 sign[52];
46917+ u8 ufi_version[4];
46918+ u32 file_len;
46919+ u32 cksum;
46920+ u32 antidote;
46921+ u32 num_imgs;
46922+ u8 build[24];
46923+ u8 rsvd[32];
46924+};
46925+
46926 struct flash_section_hdr {
46927 u32 format_rev;
46928 u32 cksum;
46929@@ -299,3 +524,19 @@ struct flash_section_info {
46930 struct flash_section_hdr fsec_hdr;
46931 struct flash_section_entry fsec_entry[32];
46932 };
46933+
46934+struct flash_ncsi_image_hdr {
46935+ u32 magic;
46936+ u8 hdr_len;
46937+ u8 type;
46938+ u16 hdr_ver;
46939+ u8 rsvd0[2];
46940+ u16 load_offset;
46941+ u32 len;
46942+ u32 flash_offset;
46943+ u8 ver[16];
46944+ u8 name[24];
46945+ u32 img_cksum;
46946+ u8 rsvd1[4];
46947+ u32 hdr_cksum;
46948+};
46949diff --git a/drivers/net/benet/be_main.c b/drivers/net/benet/be_main.c
46950index 000e377..f501aa3 100644
46951--- a/drivers/net/benet/be_main.c
46952+++ b/drivers/net/benet/be_main.c
46953@@ -1,18 +1,18 @@
46954 /*
46955- * Copyright (C) 2005 - 2009 ServerEngines
46956+ * Copyright (C) 2005 - 2011 Emulex
46957 * All rights reserved.
46958 *
46959 * This program is free software; you can redistribute it and/or
46960 * modify it under the terms of the GNU General Public License version 2
46961- * as published by the Free Software Foundation. The full GNU General
46962+ * as published by the Free Software Foundation. The full GNU General
46963 * Public License is included in this distribution in the file called COPYING.
46964 *
46965 * Contact Information:
46966- * linux-drivers@serverengines.com
46967+ * linux-drivers@emulex.com
46968 *
46969- * ServerEngines
46970- * 209 N. Fair Oaks Ave
46971- * Sunnyvale, CA 94085
46972+ * Emulex
46973+ * 3333 Susan Street
46974+ * Costa Mesa, CA 92626
46975 */
46976
46977 #include "be.h"
46978@@ -22,23 +22,119 @@
46979 MODULE_VERSION(DRV_VER);
46980 MODULE_DEVICE_TABLE(pci, be_dev_ids);
46981 MODULE_DESCRIPTION(DRV_DESC " " DRV_VER);
46982-MODULE_AUTHOR("ServerEngines Corporation");
46983+MODULE_AUTHOR("Emulex Corporation");
46984 MODULE_LICENSE("GPL");
46985+MODULE_INFO(supported, "external");
46986
46987-static unsigned int rx_frag_size = 2048;
46988-module_param(rx_frag_size, uint, S_IRUGO);
46989-MODULE_PARM_DESC(rx_frag_size, "Size of a fragment that holds rcvd data.");
46990+static ushort rx_frag_size = 2048;
46991+static unsigned int num_vfs;
46992+static unsigned int msix = 1;
46993+module_param(rx_frag_size, ushort, S_IRUGO);
46994+module_param(num_vfs, uint, S_IRUGO);
46995+module_param(msix, uint, S_IRUGO);
46996+MODULE_PARM_DESC(rx_frag_size, "Size of receive fragment buffer"
46997+ " - 2048 (default), 4096 or 8192");
46998+MODULE_PARM_DESC(num_vfs, "Number of PCI VFs to initialize");
46999+MODULE_PARM_DESC(msix, "Enable and disable the MSI"
47000+ "x (By default MSIx is enabled)");
47001+static unsigned int gro = 1;
47002+module_param(gro, uint, S_IRUGO);
47003+MODULE_PARM_DESC(gro, "Enable or Disable GRO. Enabled by default");
47004+
47005+static unsigned int multi_rxq = true;
47006+module_param(multi_rxq, uint, S_IRUGO);
47007+MODULE_PARM_DESC(multi_rxq, "Multi Rx Queue support. Enabled by default");
47008
47009 static DEFINE_PCI_DEVICE_TABLE(be_dev_ids) = {
47010 { PCI_DEVICE(BE_VENDOR_ID, BE_DEVICE_ID1) },
47011 { PCI_DEVICE(BE_VENDOR_ID, BE_DEVICE_ID2) },
47012 { PCI_DEVICE(BE_VENDOR_ID, OC_DEVICE_ID1) },
47013 { PCI_DEVICE(BE_VENDOR_ID, OC_DEVICE_ID2) },
47014- { PCI_DEVICE(BE_VENDOR_ID, OC_DEVICE_ID3) },
47015+ /*
47016+ * Lancer is not part of Palau 4.0
47017+ * { PCI_DEVICE(EMULEX_VENDOR_ID, OC_DEVICE_ID3)},
47018+ */
47019 { 0 }
47020 };
47021 MODULE_DEVICE_TABLE(pci, be_dev_ids);
47022
47023+/* UE Status Low CSR */
47024+static char *ue_status_low_desc[] = {
47025+ "CEV",
47026+ "CTX",
47027+ "DBUF",
47028+ "ERX",
47029+ "Host",
47030+ "MPU",
47031+ "NDMA",
47032+ "PTC ",
47033+ "RDMA ",
47034+ "RXF ",
47035+ "RXIPS ",
47036+ "RXULP0 ",
47037+ "RXULP1 ",
47038+ "RXULP2 ",
47039+ "TIM ",
47040+ "TPOST ",
47041+ "TPRE ",
47042+ "TXIPS ",
47043+ "TXULP0 ",
47044+ "TXULP1 ",
47045+ "UC ",
47046+ "WDMA ",
47047+ "TXULP2 ",
47048+ "HOST1 ",
47049+ "P0_OB_LINK ",
47050+ "P1_OB_LINK ",
47051+ "HOST_GPIO ",
47052+ "MBOX ",
47053+ "AXGMAC0",
47054+ "AXGMAC1",
47055+ "JTAG",
47056+ "MPU_INTPEND"
47057+};
47058+
47059+/* UE Status High CSR */
47060+static char *ue_status_hi_desc[] = {
47061+ "LPCMEMHOST",
47062+ "MGMT_MAC",
47063+ "PCS0ONLINE",
47064+ "MPU_IRAM",
47065+ "PCS1ONLINE",
47066+ "PCTL0",
47067+ "PCTL1",
47068+ "PMEM",
47069+ "RR",
47070+ "TXPB",
47071+ "RXPP",
47072+ "XAUI",
47073+ "TXP",
47074+ "ARM",
47075+ "IPC",
47076+ "HOST2",
47077+ "HOST3",
47078+ "HOST4",
47079+ "HOST5",
47080+ "HOST6",
47081+ "HOST7",
47082+ "HOST8",
47083+ "HOST9",
47084+ "NETC",
47085+ "Unknown",
47086+ "Unknown",
47087+ "Unknown",
47088+ "Unknown",
47089+ "Unknown",
47090+ "Unknown",
47091+ "Unknown",
47092+ "Unknown"
47093+};
47094+
47095+static inline bool be_multi_rxq(struct be_adapter *adapter)
47096+{
47097+ return (adapter->num_rx_qs > 1);
47098+}
47099+
47100 static void be_queue_free(struct be_adapter *adapter, struct be_queue_info *q)
47101 {
47102 struct be_dma_mem *mem = &q->dma_mem;
47103@@ -69,6 +165,9 @@ static void be_intr_set(struct be_adapter *adapter, bool enable)
47104 u32 reg = ioread32(addr);
47105 u32 enabled = reg & MEMBAR_CTRL_INT_CTRL_HOSTINTR_MASK;
47106
47107+ if (adapter->eeh_err)
47108+ return;
47109+
47110 if (!enabled && enable)
47111 reg |= MEMBAR_CTRL_INT_CTRL_HOSTINTR_MASK;
47112 else if (enabled && !enable)
47113@@ -84,6 +183,8 @@ static void be_rxq_notify(struct be_adapter *adapter, u16 qid, u16 posted)
47114 u32 val = 0;
47115 val |= qid & DB_RQ_RING_ID_MASK;
47116 val |= posted << DB_RQ_NUM_POSTED_SHIFT;
47117+
47118+ wmb();
47119 iowrite32(val, adapter->db + DB_RQ_OFFSET);
47120 }
47121
47122@@ -92,6 +193,8 @@ static void be_txq_notify(struct be_adapter *adapter, u16 qid, u16 posted)
47123 u32 val = 0;
47124 val |= qid & DB_TXULP_RING_ID_MASK;
47125 val |= (posted & DB_TXULP_NUM_POSTED_MASK) << DB_TXULP_NUM_POSTED_SHIFT;
47126+
47127+ wmb();
47128 iowrite32(val, adapter->db + DB_TXULP1_OFFSET);
47129 }
47130
47131@@ -100,6 +203,12 @@ static void be_eq_notify(struct be_adapter *adapter, u16 qid,
47132 {
47133 u32 val = 0;
47134 val |= qid & DB_EQ_RING_ID_MASK;
47135+ val |= ((qid & DB_EQ_RING_ID_EXT_MASK) <<
47136+ DB_EQ_RING_ID_EXT_MASK_SHIFT);
47137+
47138+ if (adapter->eeh_err)
47139+ return;
47140+
47141 if (arm)
47142 val |= 1 << DB_EQ_REARM_SHIFT;
47143 if (clear_int)
47144@@ -113,6 +222,12 @@ void be_cq_notify(struct be_adapter *adapter, u16 qid, bool arm, u16 num_popped)
47145 {
47146 u32 val = 0;
47147 val |= qid & DB_CQ_RING_ID_MASK;
47148+ val |= ((qid & DB_CQ_RING_ID_EXT_MASK) <<
47149+ DB_CQ_RING_ID_EXT_MASK_SHIFT);
47150+
47151+ if (adapter->eeh_err)
47152+ return;
47153+
47154 if (arm)
47155 val |= 1 << DB_CQ_REARM_SHIFT;
47156 val |= num_popped << DB_CQ_NUM_POPPED_SHIFT;
47157@@ -124,96 +239,250 @@ static int be_mac_addr_set(struct net_device *netdev, void *p)
47158 struct be_adapter *adapter = netdev_priv(netdev);
47159 struct sockaddr *addr = p;
47160 int status = 0;
47161+ u8 current_mac[ETH_ALEN];
47162+ u32 pmac_id = adapter->pmac_id;
47163
47164- status = be_cmd_pmac_del(adapter, adapter->if_handle, adapter->pmac_id);
47165+ if (!is_valid_ether_addr(addr->sa_data))
47166+ return -EADDRNOTAVAIL;
47167+
47168+ status = be_cmd_mac_addr_query(adapter, current_mac,
47169+ MAC_ADDRESS_TYPE_NETWORK, false,
47170+ adapter->if_handle);
47171 if (status)
47172- return status;
47173+ goto err;
47174+
47175+ if (!memcmp(addr->sa_data, current_mac, ETH_ALEN))
47176+ goto done;
47177
47178 status = be_cmd_pmac_add(adapter, (u8 *)addr->sa_data,
47179- adapter->if_handle, &adapter->pmac_id);
47180- if (!status)
47181+ adapter->if_handle, &adapter->pmac_id, 0);
47182+
47183+ if (!status) {
47184+ status = be_cmd_pmac_del(adapter, adapter->if_handle,
47185+ pmac_id, 0);
47186 memcpy(netdev->dev_addr, addr->sa_data, netdev->addr_len);
47187+ goto done;
47188+ }
47189
47190- return status;
47191+err:
47192+ if (status == MCC_STATUS_UNAUTHORIZED_REQUEST)
47193+ return -EPERM;
47194+ else
47195+ dev_err(&adapter->pdev->dev, "MAC %pM set Failed\n",
47196+ addr->sa_data);
47197+done:
47198+ return status;
47199+}
47200+
47201+static void populate_be2_stats(struct be_adapter *adapter)
47202+{
47203+
47204+ struct be_drv_stats *drvs = &adapter->drv_stats;
47205+ struct be_pmem_stats *pmem_sts = be_pmem_stats_from_cmd(adapter);
47206+ struct be_port_rxf_stats_v0 *port_stats =
47207+ be_port_rxf_stats_from_cmd(adapter);
47208+ struct be_rxf_stats_v0 *rxf_stats =
47209+ be_rxf_stats_from_cmd(adapter);
47210+
47211+ drvs->rx_pause_frames = port_stats->rx_pause_frames;
47212+ drvs->rx_crc_errors = port_stats->rx_crc_errors;
47213+ drvs->rx_control_frames = port_stats->rx_control_frames;
47214+ drvs->rx_in_range_errors = port_stats->rx_in_range_errors;
47215+ drvs->rx_frame_too_long = port_stats->rx_frame_too_long;
47216+ drvs->rx_dropped_runt = port_stats->rx_dropped_runt;
47217+ drvs->rx_ip_checksum_errs = port_stats->rx_ip_checksum_errs;
47218+ drvs->rx_tcp_checksum_errs = port_stats->rx_tcp_checksum_errs;
47219+ drvs->rx_udp_checksum_errs = port_stats->rx_udp_checksum_errs;
47220+ drvs->rxpp_fifo_overflow_drop = port_stats->rx_fifo_overflow;
47221+ drvs->rx_dropped_tcp_length = port_stats->rx_dropped_tcp_length;
47222+ drvs->rx_dropped_too_small = port_stats->rx_dropped_too_small;
47223+ drvs->rx_dropped_too_short = port_stats->rx_dropped_too_short;
47224+ drvs->rx_out_range_errors = port_stats->rx_out_range_errors;
47225+ drvs->rx_input_fifo_overflow_drop =
47226+ port_stats->rx_input_fifo_overflow;
47227+ drvs->rx_dropped_header_too_small =
47228+ port_stats->rx_dropped_header_too_small;
47229+ drvs->rx_address_match_errors =
47230+ port_stats->rx_address_match_errors;
47231+ drvs->rx_alignment_symbol_errors =
47232+ port_stats->rx_alignment_symbol_errors;
47233+
47234+ drvs->tx_pauseframes = port_stats->tx_pauseframes;
47235+ drvs->tx_controlframes = port_stats->tx_controlframes;
47236+
47237+ if (adapter->port_num)
47238+ drvs->jabber_events =
47239+ rxf_stats->port1_jabber_events;
47240+ else
47241+ drvs->jabber_events =
47242+ rxf_stats->port0_jabber_events;
47243+ drvs->rx_drops_no_pbuf = rxf_stats->rx_drops_no_pbuf;
47244+ drvs->rx_drops_no_txpb = rxf_stats->rx_drops_no_txpb;
47245+ drvs->rx_drops_no_erx_descr = rxf_stats->rx_drops_no_erx_descr;
47246+ drvs->rx_drops_invalid_ring = rxf_stats->rx_drops_invalid_ring;
47247+ drvs->forwarded_packets = rxf_stats->forwarded_packets;
47248+ drvs->rx_drops_mtu = rxf_stats->rx_drops_mtu;
47249+ drvs->rx_drops_no_tpre_descr =
47250+ rxf_stats->rx_drops_no_tpre_descr;
47251+ drvs->rx_drops_too_many_frags =
47252+ rxf_stats->rx_drops_too_many_frags;
47253+ adapter->drv_stats.eth_red_drops = pmem_sts->eth_red_drops;
47254+}
47255+
47256+static void populate_be3_stats(struct be_adapter *adapter)
47257+{
47258+ struct be_drv_stats *drvs = &adapter->drv_stats;
47259+ struct be_pmem_stats *pmem_sts = be_pmem_stats_from_cmd(adapter);
47260+
47261+ struct be_rxf_stats_v1 *rxf_stats =
47262+ be_rxf_stats_from_cmd(adapter);
47263+ struct be_port_rxf_stats_v1 *port_stats =
47264+ be_port_rxf_stats_from_cmd(adapter);
47265+
47266+ drvs->pmem_fifo_overflow_drop = port_stats->pmem_fifo_overflow_drop;
47267+ drvs->rx_priority_pause_frames = port_stats->rx_priority_pause_frames;
47268+ drvs->rx_pause_frames = port_stats->rx_pause_frames;
47269+ drvs->rx_crc_errors = port_stats->rx_crc_errors;
47270+ drvs->rx_control_frames = port_stats->rx_control_frames;
47271+ drvs->rx_in_range_errors = port_stats->rx_in_range_errors;
47272+ drvs->rx_frame_too_long = port_stats->rx_frame_too_long;
47273+ drvs->rx_dropped_runt = port_stats->rx_dropped_runt;
47274+ drvs->rx_ip_checksum_errs = port_stats->rx_ip_checksum_errs;
47275+ drvs->rx_tcp_checksum_errs = port_stats->rx_tcp_checksum_errs;
47276+ drvs->rx_udp_checksum_errs = port_stats->rx_udp_checksum_errs;
47277+ drvs->rx_dropped_tcp_length = port_stats->rx_dropped_tcp_length;
47278+ drvs->rx_dropped_too_small = port_stats->rx_dropped_too_small;
47279+ drvs->rx_dropped_too_short = port_stats->rx_dropped_too_short;
47280+ drvs->rx_out_range_errors = port_stats->rx_out_range_errors;
47281+ drvs->rx_dropped_header_too_small =
47282+ port_stats->rx_dropped_header_too_small;
47283+ drvs->rx_input_fifo_overflow_drop =
47284+ port_stats->rx_input_fifo_overflow_drop;
47285+ drvs->rx_address_match_errors =
47286+ port_stats->rx_address_match_errors;
47287+ drvs->rx_alignment_symbol_errors =
47288+ port_stats->rx_alignment_symbol_errors;
47289+ drvs->rxpp_fifo_overflow_drop =
47290+ port_stats->rxpp_fifo_overflow_drop;
47291+ drvs->tx_pauseframes = port_stats->tx_pauseframes;
47292+ drvs->tx_controlframes = port_stats->tx_controlframes;
47293+ drvs->jabber_events = port_stats->jabber_events;
47294+ drvs->rx_drops_no_pbuf = rxf_stats->rx_drops_no_pbuf;
47295+ drvs->rx_drops_no_txpb = rxf_stats->rx_drops_no_txpb;
47296+ drvs->rx_drops_no_erx_descr = rxf_stats->rx_drops_no_erx_descr;
47297+ drvs->rx_drops_invalid_ring = rxf_stats->rx_drops_invalid_ring;
47298+ drvs->forwarded_packets = rxf_stats->forwarded_packets;
47299+ drvs->rx_drops_mtu = rxf_stats->rx_drops_mtu;
47300+ drvs->rx_drops_no_tpre_descr =
47301+ rxf_stats->rx_drops_no_tpre_descr;
47302+ drvs->rx_drops_too_many_frags =
47303+ rxf_stats->rx_drops_too_many_frags;
47304+ adapter->drv_stats.eth_red_drops = pmem_sts->eth_red_drops;
47305+}
47306+
47307+
47308+static void accumulate_16bit_val(u32 *acc, u16 val)
47309+{
47310+#define lo(x) (x & 0xFFFF)
47311+#define hi(x) (x & 0xFFFF0000)
47312+ bool wrapped = val < lo(*acc);
47313+ u32 newacc = hi(*acc) + val;
47314+
47315+ if (wrapped)
47316+ newacc += 65536;
47317+ ACCESS_ONCE_RW(*acc) = newacc;
47318+}
47319+
47320+void be_parse_stats(struct be_adapter *adapter)
47321+{
47322+ struct be_erx_stats_v1 *erx = be_erx_stats_from_cmd(adapter);
47323+ struct be_rx_obj *rxo;
47324+ int i;
47325+
47326+ if (adapter->generation == BE_GEN3) {
47327+ populate_be3_stats(adapter);
47328+ } else {
47329+ populate_be2_stats(adapter);
47330+ }
47331+
47332+ /* as erx_v1 is longer than v0, ok to use v1 defn for v0 access */
47333+ for_all_rx_queues(adapter, rxo, i) {
47334+ /* below erx HW counter can actually wrap around after
47335+ * 65535. Driver accumulates a 32-bit value
47336+ */
47337+ accumulate_16bit_val(&rx_stats(rxo)->rx_drops_no_frags,
47338+ (u16)erx->rx_drops_no_fragments[rxo->q.id]);
47339+ }
47340 }
47341
47342 void netdev_stats_update(struct be_adapter *adapter)
47343 {
47344- struct be_hw_stats *hw_stats = hw_stats_from_cmd(adapter->stats.cmd.va);
47345- struct be_rxf_stats *rxf_stats = &hw_stats->rxf;
47346- struct be_port_rxf_stats *port_stats =
47347- &rxf_stats->port[adapter->port_num];
47348- struct net_device_stats *dev_stats = &adapter->stats.net_stats;
47349- struct be_erx_stats *erx_stats = &hw_stats->erx;
47350+ struct be_drv_stats *drvs = &adapter->drv_stats;
47351+ struct net_device_stats *dev_stats = &adapter->net_stats;
47352+ struct be_rx_obj *rxo;
47353+ struct be_tx_obj *txo;
47354+ unsigned long pkts = 0, bytes = 0, mcast = 0, drops = 0;
47355+ int i;
47356
47357- dev_stats->rx_packets = drvr_stats(adapter)->be_rx_pkts;
47358- dev_stats->tx_packets = drvr_stats(adapter)->be_tx_pkts;
47359- dev_stats->rx_bytes = drvr_stats(adapter)->be_rx_bytes;
47360- dev_stats->tx_bytes = drvr_stats(adapter)->be_tx_bytes;
47361+ for_all_rx_queues(adapter, rxo, i) {
47362+ pkts += rx_stats(rxo)->rx_pkts;
47363+ bytes += rx_stats(rxo)->rx_bytes;
47364+ mcast += rx_stats(rxo)->rx_mcast_pkts;
47365+ drops += rx_stats(rxo)->rx_drops_no_frags;
47366+ }
47367+ dev_stats->rx_packets = pkts;
47368+ dev_stats->rx_bytes = bytes;
47369+ dev_stats->multicast = mcast;
47370+ dev_stats->rx_dropped = drops;
47371+
47372+ pkts = bytes = 0;
47373+ for_all_tx_queues(adapter, txo, i) {
47374+ pkts += tx_stats(txo)->be_tx_pkts;
47375+ bytes += tx_stats(txo)->be_tx_bytes;
47376+ }
47377+ dev_stats->tx_packets = pkts;
47378+ dev_stats->tx_bytes = bytes;
47379
47380 /* bad pkts received */
47381- dev_stats->rx_errors = port_stats->rx_crc_errors +
47382- port_stats->rx_alignment_symbol_errors +
47383- port_stats->rx_in_range_errors +
47384- port_stats->rx_out_range_errors +
47385- port_stats->rx_frame_too_long +
47386- port_stats->rx_dropped_too_small +
47387- port_stats->rx_dropped_too_short +
47388- port_stats->rx_dropped_header_too_small +
47389- port_stats->rx_dropped_tcp_length +
47390- port_stats->rx_dropped_runt +
47391- port_stats->rx_tcp_checksum_errs +
47392- port_stats->rx_ip_checksum_errs +
47393- port_stats->rx_udp_checksum_errs;
47394-
47395- /* no space in linux buffers: best possible approximation */
47396- dev_stats->rx_dropped = erx_stats->rx_drops_no_fragments[0];
47397+ dev_stats->rx_errors = drvs->rx_crc_errors +
47398+ drvs->rx_alignment_symbol_errors +
47399+ drvs->rx_in_range_errors +
47400+ drvs->rx_out_range_errors +
47401+ drvs->rx_frame_too_long +
47402+ drvs->rx_dropped_too_small +
47403+ drvs->rx_dropped_too_short +
47404+ drvs->rx_dropped_header_too_small +
47405+ drvs->rx_dropped_tcp_length +
47406+ drvs->rx_dropped_runt +
47407+ drvs->rx_tcp_checksum_errs +
47408+ drvs->rx_ip_checksum_errs +
47409+ drvs->rx_udp_checksum_errs;
47410
47411 /* detailed rx errors */
47412- dev_stats->rx_length_errors = port_stats->rx_in_range_errors +
47413- port_stats->rx_out_range_errors +
47414- port_stats->rx_frame_too_long;
47415+ dev_stats->rx_length_errors = drvs->rx_in_range_errors +
47416+ drvs->rx_out_range_errors +
47417+ drvs->rx_frame_too_long;
47418
47419- /* receive ring buffer overflow */
47420- dev_stats->rx_over_errors = 0;
47421-
47422- dev_stats->rx_crc_errors = port_stats->rx_crc_errors;
47423+ dev_stats->rx_crc_errors = drvs->rx_crc_errors;
47424
47425 /* frame alignment errors */
47426- dev_stats->rx_frame_errors = port_stats->rx_alignment_symbol_errors;
47427+ dev_stats->rx_frame_errors = drvs->rx_alignment_symbol_errors;
47428
47429 /* receiver fifo overrun */
47430 /* drops_no_pbuf is no per i/f, it's per BE card */
47431- dev_stats->rx_fifo_errors = port_stats->rx_fifo_overflow +
47432- port_stats->rx_input_fifo_overflow +
47433- rxf_stats->rx_drops_no_pbuf;
47434- /* receiver missed packetd */
47435- dev_stats->rx_missed_errors = 0;
47436-
47437- /* packet transmit problems */
47438- dev_stats->tx_errors = 0;
47439-
47440- /* no space available in linux */
47441- dev_stats->tx_dropped = 0;
47442-
47443- dev_stats->multicast = port_stats->rx_multicast_frames;
47444- dev_stats->collisions = 0;
47445-
47446- /* detailed tx_errors */
47447- dev_stats->tx_aborted_errors = 0;
47448- dev_stats->tx_carrier_errors = 0;
47449- dev_stats->tx_fifo_errors = 0;
47450- dev_stats->tx_heartbeat_errors = 0;
47451- dev_stats->tx_window_errors = 0;
47452+ dev_stats->rx_fifo_errors = drvs->rxpp_fifo_overflow_drop +
47453+ drvs->rx_input_fifo_overflow_drop +
47454+ drvs->rx_drops_no_pbuf;
47455 }
47456
47457-void be_link_status_update(struct be_adapter *adapter, bool link_up)
47458+void be_link_status_update(struct be_adapter *adapter, int link_status)
47459 {
47460 struct net_device *netdev = adapter->netdev;
47461
47462 /* If link came up or went down */
47463- if (adapter->link_up != link_up) {
47464- if (link_up) {
47465+ if (adapter->link_status != link_status) {
47466+ adapter->link_speed = -1;
47467+ if (link_status == LINK_UP) {
47468 netif_start_queue(netdev);
47469 netif_carrier_on(netdev);
47470 printk(KERN_INFO "%s: Link up\n", netdev->name);
47471@@ -222,15 +491,15 @@ void be_link_status_update(struct be_adapter *adapter, bool link_up)
47472 netif_carrier_off(netdev);
47473 printk(KERN_INFO "%s: Link down\n", netdev->name);
47474 }
47475- adapter->link_up = link_up;
47476+ adapter->link_status = link_status;
47477 }
47478 }
47479
47480 /* Update the EQ delay n BE based on the RX frags consumed / sec */
47481-static void be_rx_eqd_update(struct be_adapter *adapter)
47482+static void be_rx_eqd_update(struct be_adapter *adapter, struct be_rx_obj *rxo)
47483 {
47484- struct be_eq_obj *rx_eq = &adapter->rx_eq;
47485- struct be_drvr_stats *stats = &adapter->stats.drvr_stats;
47486+ struct be_eq_obj *rx_eq = &rxo->rx_eq;
47487+ struct be_rx_stats *stats = &rxo->stats;
47488 ulong now = jiffies;
47489 u32 eqd;
47490
47491@@ -247,19 +516,17 @@ static void be_rx_eqd_update(struct be_adapter *adapter)
47492 if ((now - stats->rx_fps_jiffies) < HZ)
47493 return;
47494
47495- stats->be_rx_fps = (stats->be_rx_frags - stats->be_prev_rx_frags) /
47496+ stats->rx_fps = (stats->rx_frags - stats->prev_rx_frags) /
47497 ((now - stats->rx_fps_jiffies) / HZ);
47498
47499 stats->rx_fps_jiffies = now;
47500- stats->be_prev_rx_frags = stats->be_rx_frags;
47501- eqd = stats->be_rx_fps / 110000;
47502+ stats->prev_rx_frags = stats->rx_frags;
47503+ eqd = stats->rx_fps / 110000;
47504 eqd = eqd << 3;
47505 if (eqd > rx_eq->max_eqd)
47506 eqd = rx_eq->max_eqd;
47507 if (eqd < rx_eq->min_eqd)
47508 eqd = rx_eq->min_eqd;
47509- if (eqd < 10)
47510- eqd = 0;
47511 if (eqd != rx_eq->cur_eqd)
47512 be_cmd_modify_eqd(adapter, rx_eq->q.id, eqd);
47513
47514@@ -270,7 +537,7 @@ static struct net_device_stats *be_get_stats(struct net_device *dev)
47515 {
47516 struct be_adapter *adapter = netdev_priv(dev);
47517
47518- return &adapter->stats.net_stats;
47519+ return &adapter->net_stats;
47520 }
47521
47522 static u32 be_calc_rate(u64 bytes, unsigned long ticks)
47523@@ -284,9 +551,9 @@ static u32 be_calc_rate(u64 bytes, unsigned long ticks)
47524 return rate;
47525 }
47526
47527-static void be_tx_rate_update(struct be_adapter *adapter)
47528+static void be_tx_rate_update(struct be_tx_obj *txo)
47529 {
47530- struct be_drvr_stats *stats = drvr_stats(adapter);
47531+ struct be_tx_stats *stats = tx_stats(txo);
47532 ulong now = jiffies;
47533
47534 /* Wrapped around? */
47535@@ -305,10 +572,11 @@ static void be_tx_rate_update(struct be_adapter *adapter)
47536 }
47537 }
47538
47539-static void be_tx_stats_update(struct be_adapter *adapter,
47540+static void be_tx_stats_update(struct be_tx_obj *txo,
47541 u32 wrb_cnt, u32 copied, u32 gso_segs, bool stopped)
47542 {
47543- struct be_drvr_stats *stats = drvr_stats(adapter);
47544+ struct be_tx_stats *stats = tx_stats(txo);
47545+
47546 stats->be_tx_reqs++;
47547 stats->be_tx_wrbs += wrb_cnt;
47548 stats->be_tx_bytes += copied;
47549@@ -318,7 +586,8 @@ static void be_tx_stats_update(struct be_adapter *adapter,
47550 }
47551
47552 /* Determine number of WRB entries needed to xmit data in an skb */
47553-static u32 wrb_cnt_for_skb(struct sk_buff *skb, bool *dummy)
47554+static u32 wrb_cnt_for_skb(struct be_adapter *adapter, struct sk_buff *skb,
47555+ bool *dummy)
47556 {
47557 int cnt = (skb->len > skb->data_len);
47558
47559@@ -326,12 +595,13 @@ static u32 wrb_cnt_for_skb(struct sk_buff *skb, bool *dummy)
47560
47561 /* to account for hdr wrb */
47562 cnt++;
47563- if (cnt & 1) {
47564+ if (lancer_chip(adapter) || !(cnt & 1)) {
47565+ *dummy = false;
47566+ } else {
47567 /* add a dummy to make it an even num */
47568 cnt++;
47569 *dummy = true;
47570- } else
47571- *dummy = false;
47572+ }
47573 BUG_ON(cnt > BE_MAX_TX_FRAG_COUNT);
47574 return cnt;
47575 }
47576@@ -343,17 +613,31 @@ static inline void wrb_fill(struct be_eth_wrb *wrb, u64 addr, int len)
47577 wrb->frag_len = len & ETH_WRB_FRAG_LEN_MASK;
47578 }
47579
47580-static void wrb_fill_hdr(struct be_eth_hdr_wrb *hdr, struct sk_buff *skb,
47581- bool vlan, u32 wrb_cnt, u32 len)
47582+static void wrb_fill_hdr(struct be_adapter *adapter, struct be_eth_hdr_wrb *hdr,
47583+ struct sk_buff *skb, u32 wrb_cnt, u32 len)
47584 {
47585+ u16 vlan_tag = 0;
47586+
47587 memset(hdr, 0, sizeof(*hdr));
47588
47589 AMAP_SET_BITS(struct amap_eth_hdr_wrb, crc, hdr, 1);
47590
47591- if (skb_shinfo(skb)->gso_segs > 1 && skb_shinfo(skb)->gso_size) {
47592+ if (skb_is_gso(skb)) {
47593 AMAP_SET_BITS(struct amap_eth_hdr_wrb, lso, hdr, 1);
47594 AMAP_SET_BITS(struct amap_eth_hdr_wrb, lso_mss,
47595 hdr, skb_shinfo(skb)->gso_size);
47596+ if (skb_is_gso_v6(skb) && !lancer_chip(adapter))
47597+ AMAP_SET_BITS(struct amap_eth_hdr_wrb, lso6, hdr, 1);
47598+
47599+ if (lancer_A0_chip(adapter)) {
47600+ AMAP_SET_BITS(struct amap_eth_hdr_wrb, ipcs, hdr, 1);
47601+ if (is_tcp_pkt(skb))
47602+ AMAP_SET_BITS(struct amap_eth_hdr_wrb,
47603+ tcpcs, hdr, 1);
47604+ else if (is_udp_pkt(skb))
47605+ AMAP_SET_BITS(struct amap_eth_hdr_wrb,
47606+ udpcs, hdr, 1);
47607+ }
47608 } else if (skb->ip_summed == CHECKSUM_PARTIAL) {
47609 if (is_tcp_pkt(skb))
47610 AMAP_SET_BITS(struct amap_eth_hdr_wrb, tcpcs, hdr, 1);
47611@@ -361,10 +645,10 @@ static void wrb_fill_hdr(struct be_eth_hdr_wrb *hdr, struct sk_buff *skb,
47612 AMAP_SET_BITS(struct amap_eth_hdr_wrb, udpcs, hdr, 1);
47613 }
47614
47615- if (vlan && vlan_tx_tag_present(skb)) {
47616+ if (adapter->vlan_grp && vlan_tx_tag_present(skb)) {
47617 AMAP_SET_BITS(struct amap_eth_hdr_wrb, vlan, hdr, 1);
47618- AMAP_SET_BITS(struct amap_eth_hdr_wrb, vlan_tag,
47619- hdr, vlan_tx_tag_get(skb));
47620+ vlan_tag = be_get_tx_vlan_tag(adapter, skb);
47621+ AMAP_SET_BITS(struct amap_eth_hdr_wrb, vlan_tag, hdr, vlan_tag);
47622 }
47623
47624 AMAP_SET_BITS(struct amap_eth_hdr_wrb, event, hdr, 1);
47625@@ -374,14 +658,13 @@ static void wrb_fill_hdr(struct be_eth_hdr_wrb *hdr, struct sk_buff *skb,
47626 }
47627
47628
47629-static int make_tx_wrbs(struct be_adapter *adapter,
47630+static int make_tx_wrbs(struct be_adapter *adapter, struct be_queue_info *txq,
47631 struct sk_buff *skb, u32 wrb_cnt, bool dummy_wrb)
47632 {
47633- u64 busaddr;
47634- u32 i, copied = 0;
47635+ dma_addr_t busaddr;
47636+ int i, copied = 0;
47637 struct pci_dev *pdev = adapter->pdev;
47638 struct sk_buff *first_skb = skb;
47639- struct be_queue_info *txq = &adapter->tx_obj.q;
47640 struct be_eth_wrb *wrb;
47641 struct be_eth_hdr_wrb *hdr;
47642
47643@@ -389,15 +672,11 @@ static int make_tx_wrbs(struct be_adapter *adapter,
47644 atomic_add(wrb_cnt, &txq->used);
47645 queue_head_inc(txq);
47646
47647- if (skb_dma_map(&pdev->dev, skb, DMA_TO_DEVICE)) {
47648- dev_err(&pdev->dev, "TX DMA mapping failed\n");
47649- return 0;
47650- }
47651-
47652 if (skb->len > skb->data_len) {
47653- int len = skb->len - skb->data_len;
47654+ int len = skb_headlen(skb);
47655+ busaddr = pci_map_single(pdev, skb->data, len,
47656+ PCI_DMA_TODEVICE);
47657 wrb = queue_head_node(txq);
47658- busaddr = skb_shinfo(skb)->dma_head;
47659 wrb_fill(wrb, busaddr, len);
47660 be_dws_cpu_to_le(wrb, sizeof(*wrb));
47661 queue_head_inc(txq);
47662@@ -407,8 +686,9 @@ static int make_tx_wrbs(struct be_adapter *adapter,
47663 for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) {
47664 struct skb_frag_struct *frag =
47665 &skb_shinfo(skb)->frags[i];
47666-
47667- busaddr = skb_shinfo(skb)->dma_maps[i];
47668+ busaddr = pci_map_page(pdev, frag->page,
47669+ frag->page_offset,
47670+ frag->size, PCI_DMA_TODEVICE);
47671 wrb = queue_head_node(txq);
47672 wrb_fill(wrb, busaddr, frag->size);
47673 be_dws_cpu_to_le(wrb, sizeof(*wrb));
47674@@ -423,8 +703,7 @@ static int make_tx_wrbs(struct be_adapter *adapter,
47675 queue_head_inc(txq);
47676 }
47677
47678- wrb_fill_hdr(hdr, first_skb, adapter->vlan_grp ? true : false,
47679- wrb_cnt, copied);
47680+ wrb_fill_hdr(adapter, hdr, first_skb, wrb_cnt, copied);
47681 be_dws_cpu_to_le(hdr, sizeof(*hdr));
47682
47683 return copied;
47684@@ -434,19 +713,70 @@ static netdev_tx_t be_xmit(struct sk_buff *skb,
47685 struct net_device *netdev)
47686 {
47687 struct be_adapter *adapter = netdev_priv(netdev);
47688- struct be_tx_obj *tx_obj = &adapter->tx_obj;
47689- struct be_queue_info *txq = &tx_obj->q;
47690+ struct be_tx_obj *txo = &adapter->tx_obj[skb_get_queue_mapping(skb)];
47691+ struct be_queue_info *txq = &txo->q;
47692 u32 wrb_cnt = 0, copied = 0;
47693 u32 start = txq->head;
47694 bool dummy_wrb, stopped = false;
47695
47696- wrb_cnt = wrb_cnt_for_skb(skb, &dummy_wrb);
47697+ if (unlikely((skb_shinfo(skb)->gso_segs > 1) &&
47698+ skb_shinfo(skb)->gso_size && is_ipv6_ext_hdr(skb))) {
47699+ tx_stats(txo)->be_ipv6_ext_hdr_tx_drop++;
47700+ goto tx_drop;
47701+ }
47702
47703- copied = make_tx_wrbs(adapter, skb, wrb_cnt, dummy_wrb);
47704+ /* If the skb is a large pkt forwarded to this interface
47705+ * after being LRO'd on another interface, drop the pkt.
47706+ * HW cannot handle such pkts. LRO must be disabled when
47707+ * using the server as a router.
47708+ */
47709+ if (!skb_is_gso(skb)) {
47710+ int eth_hdr_len = ntohs(skb->protocol) == ETH_P_8021Q ?
47711+ VLAN_ETH_HLEN : ETH_HLEN;
47712+
47713+ if ((skb->len - eth_hdr_len) > adapter->netdev->mtu)
47714+ goto tx_drop;
47715+ }
47716+
47717+ /* The ASIC is calculating checksum for Vlan tagged pkts
47718+ * though CSO is disabled.
47719+ * To work around this, insert the Vlan tag in the driver
47720+ * and donot set the vlan bit, cso bit in the Tx WRB.
47721+ */
47722+ if (unlikely(vlan_tx_tag_present(skb) &&
47723+ ((skb->ip_summed != CHECKSUM_PARTIAL) || (skb->len <= 60)))) {
47724+ /* Bug 28694: Don't embed the host VLAN tag in SKB
47725+ * when UMC mode enabled on that interface
47726+ */
47727+ if (!(adapter->function_mode & UMC_ENABLED)) {
47728+ skb = skb_share_check(skb, GFP_ATOMIC);
47729+ if (unlikely(!skb))
47730+ goto tx_drop;
47731+
47732+ skb = be_vlan_put_tag(skb,
47733+ be_get_tx_vlan_tag(adapter, skb));
47734+ if (unlikely(!skb))
47735+ goto tx_drop;
47736+
47737+ be_reset_skb_tx_vlan(skb);
47738+ }
47739+ }
47740+
47741+ /* Bug 12422: the stack can send us skbs with length more than 65535
47742+ * BE cannot handle such requests. Hack the extra data out and drop it.
47743+ */
47744+ if (skb->len > 65535) {
47745+ int err = __pskb_trim(skb, 65535);
47746+ BUG_ON(err);
47747+ }
47748+
47749+ wrb_cnt = wrb_cnt_for_skb(adapter, skb, &dummy_wrb);
47750+
47751+ copied = make_tx_wrbs(adapter, txq, skb, wrb_cnt, dummy_wrb);
47752 if (copied) {
47753 /* record the sent skb in the sent_skb table */
47754- BUG_ON(tx_obj->sent_skb_list[start]);
47755- tx_obj->sent_skb_list[start] = skb;
47756+ BUG_ON(txo->sent_skb_list[start]);
47757+ txo->sent_skb_list[start] = skb;
47758
47759 /* Ensure txq has space for the next skb; Else stop the queue
47760 * *BEFORE* ringing the tx doorbell, so that we serialze the
47761@@ -454,16 +784,21 @@ static netdev_tx_t be_xmit(struct sk_buff *skb,
47762 */
47763 if ((BE_MAX_TX_FRAG_COUNT + atomic_read(&txq->used)) >=
47764 txq->len) {
47765- netif_stop_queue(netdev);
47766+ netif_stop_subqueue(netdev, skb_get_queue_mapping(skb));
47767 stopped = true;
47768 }
47769
47770 be_txq_notify(adapter, txq->id, wrb_cnt);
47771
47772- be_tx_stats_update(adapter, wrb_cnt, copied,
47773+#if LINUX_VERSION_CODE < KERNEL_VERSION(2, 6, 32)
47774+ netdev->trans_start = jiffies;
47775+#endif
47776+
47777+ be_tx_stats_update(txo, wrb_cnt, copied,
47778 skb_shinfo(skb)->gso_segs, stopped);
47779 } else {
47780 txq->head = start;
47781+tx_drop:
47782 dev_kfree_skb_any(skb);
47783 }
47784 return NETDEV_TX_OK;
47785@@ -473,10 +808,12 @@ static int be_change_mtu(struct net_device *netdev, int new_mtu)
47786 {
47787 struct be_adapter *adapter = netdev_priv(netdev);
47788 if (new_mtu < BE_MIN_MTU ||
47789- new_mtu > BE_MAX_JUMBO_FRAME_SIZE) {
47790+ new_mtu > (BE_MAX_JUMBO_FRAME_SIZE -
47791+ (ETH_HLEN + ETH_FCS_LEN))) {
47792 dev_info(&adapter->pdev->dev,
47793 "MTU must be between %d and %d bytes\n",
47794- BE_MIN_MTU, BE_MAX_JUMBO_FRAME_SIZE);
47795+ BE_MIN_MTU,
47796+ (BE_MAX_JUMBO_FRAME_SIZE - (ETH_HLEN + ETH_FCS_LEN)));
47797 return -EINVAL;
47798 }
47799 dev_info(&adapter->pdev->dev, "MTU changed from %d to %d bytes\n",
47800@@ -486,17 +823,19 @@ static int be_change_mtu(struct net_device *netdev, int new_mtu)
47801 }
47802
47803 /*
47804- * if there are BE_NUM_VLANS_SUPPORTED or lesser number of VLANS configured,
47805- * program them in BE. If more than BE_NUM_VLANS_SUPPORTED are configured,
47806- * set the BE in promiscuous VLAN mode.
47807+ * A max of 64 (BE_NUM_VLANS_SUPPORTED) vlans can be configured in BE.
47808+ * If the user configures more, place BE in vlan promiscuous mode.
47809 */
47810-static int be_vid_config(struct be_adapter *adapter)
47811+static int be_vid_config(struct be_adapter *adapter, bool vf, u32 vf_num)
47812 {
47813 u16 vtag[BE_NUM_VLANS_SUPPORTED];
47814 u16 ntags = 0, i;
47815- int status;
47816+ int status = 0;
47817
47818- if (adapter->num_vlans <= BE_NUM_VLANS_SUPPORTED) {
47819+ /* No need to change the VLAN state if the I/F is in promiscous */
47820+ if (adapter->promiscuous)
47821+ return 0;
47822+ if (adapter->vlans_added <= adapter->max_vlans) {
47823 /* Construct VLAN Table to give to HW */
47824 for (i = 0; i < VLAN_GROUP_ARRAY_LEN; i++) {
47825 if (adapter->vlan_tag[i]) {
47826@@ -504,47 +843,46 @@ static int be_vid_config(struct be_adapter *adapter)
47827 ntags++;
47828 }
47829 }
47830- status = be_cmd_vlan_config(adapter, adapter->if_handle,
47831- vtag, ntags, 1, 0);
47832+ /* Send command only if there is something to be programmed */
47833+ if (ntags)
47834+ status = be_cmd_vlan_config(adapter, adapter->if_handle,
47835+ vtag, ntags, 1, 0);
47836 } else {
47837 status = be_cmd_vlan_config(adapter, adapter->if_handle,
47838- NULL, 0, 1, 1);
47839+ NULL, 0, 1, 1);
47840 }
47841+
47842 return status;
47843 }
47844
47845 static void be_vlan_register(struct net_device *netdev, struct vlan_group *grp)
47846 {
47847 struct be_adapter *adapter = netdev_priv(netdev);
47848- struct be_eq_obj *rx_eq = &adapter->rx_eq;
47849- struct be_eq_obj *tx_eq = &adapter->tx_eq;
47850
47851- be_eq_notify(adapter, rx_eq->q.id, false, false, 0);
47852- be_eq_notify(adapter, tx_eq->q.id, false, false, 0);
47853 adapter->vlan_grp = grp;
47854- be_eq_notify(adapter, rx_eq->q.id, true, false, 0);
47855- be_eq_notify(adapter, tx_eq->q.id, true, false, 0);
47856 }
47857
47858 static void be_vlan_add_vid(struct net_device *netdev, u16 vid)
47859 {
47860 struct be_adapter *adapter = netdev_priv(netdev);
47861
47862- adapter->num_vlans++;
47863+ adapter->vlans_added++;
47864+
47865 adapter->vlan_tag[vid] = 1;
47866-
47867- be_vid_config(adapter);
47868+ if (adapter->vlans_added <= (adapter->max_vlans + 1))
47869+ be_vid_config(adapter, false, 0);
47870 }
47871
47872 static void be_vlan_rem_vid(struct net_device *netdev, u16 vid)
47873 {
47874 struct be_adapter *adapter = netdev_priv(netdev);
47875
47876- adapter->num_vlans--;
47877- adapter->vlan_tag[vid] = 0;
47878-
47879+ adapter->vlans_added--;
47880 vlan_group_set_device(adapter->vlan_grp, vid, NULL);
47881- be_vid_config(adapter);
47882+
47883+ adapter->vlan_tag[vid] = 0;
47884+ if (adapter->vlans_added <= adapter->max_vlans)
47885+ be_vid_config(adapter, false, 0);
47886 }
47887
47888 static void be_set_multicast_list(struct net_device *netdev)
47889@@ -552,7 +890,7 @@ static void be_set_multicast_list(struct net_device *netdev)
47890 struct be_adapter *adapter = netdev_priv(netdev);
47891
47892 if (netdev->flags & IFF_PROMISC) {
47893- be_cmd_promiscuous_config(adapter, adapter->port_num, 1);
47894+ be_cmd_rx_filter(adapter, IFF_PROMISC, ON);
47895 adapter->promiscuous = true;
47896 goto done;
47897 }
47898@@ -560,81 +898,244 @@ static void be_set_multicast_list(struct net_device *netdev)
47899 /* BE was previously in promiscous mode; disable it */
47900 if (adapter->promiscuous) {
47901 adapter->promiscuous = false;
47902- be_cmd_promiscuous_config(adapter, adapter->port_num, 0);
47903+ be_cmd_rx_filter(adapter, IFF_PROMISC, OFF);
47904+
47905+ if (adapter->vlans_added)
47906+ be_vid_config(adapter, false, 0);
47907 }
47908
47909- if (netdev->flags & IFF_ALLMULTI) {
47910- be_cmd_multicast_set(adapter, adapter->if_handle, NULL, 0);
47911+ /* Enable multicast promisc if num configured exceeds what we support */
47912+ if (netdev->flags & IFF_ALLMULTI ||
47913+ netdev_mc_count(netdev) > BE_MAX_MC) {
47914+ be_cmd_rx_filter(adapter, IFF_ALLMULTI, ON);
47915 goto done;
47916 }
47917
47918- be_cmd_multicast_set(adapter, adapter->if_handle, netdev->mc_list,
47919- netdev->mc_count);
47920+ be_cmd_rx_filter(adapter, IFF_MULTICAST, ON);
47921 done:
47922 return;
47923 }
47924
47925-static void be_rx_rate_update(struct be_adapter *adapter)
47926+#ifdef HAVE_SRIOV_CONFIG
47927+static int be_set_vf_mac(struct net_device *netdev, int vf, u8 *mac)
47928 {
47929- struct be_drvr_stats *stats = drvr_stats(adapter);
47930+ struct be_adapter *adapter = netdev_priv(netdev);
47931+ int status;
47932+
47933+ if (adapter->num_vfs == 0)
47934+ return -EPERM;
47935+
47936+ if (!is_valid_ether_addr(mac) || (vf >= adapter->num_vfs))
47937+ return -EINVAL;
47938+
47939+ if (adapter->vf_cfg[vf].vf_pmac_id != BE_INVALID_PMAC_ID)
47940+ status = be_cmd_pmac_del(adapter,
47941+ adapter->vf_cfg[vf].vf_if_handle,
47942+ adapter->vf_cfg[vf].vf_pmac_id, vf + 1);
47943+
47944+ status = be_cmd_pmac_add(adapter, mac,
47945+ adapter->vf_cfg[vf].vf_if_handle,
47946+ &adapter->vf_cfg[vf].vf_pmac_id, vf + 1);
47947+
47948+ if (status)
47949+ dev_err(&adapter->pdev->dev, "MAC %pM set on VF %d Failed\n",
47950+ mac, vf);
47951+ else
47952+ memcpy(adapter->vf_cfg[vf].vf_mac_addr, mac, ETH_ALEN);
47953+
47954+ return status;
47955+}
47956+
47957+static int be_get_vf_config(struct net_device *netdev, int vf,
47958+ struct ifla_vf_info *vi)
47959+{
47960+ struct be_adapter *adapter = netdev_priv(netdev);
47961+
47962+ if (adapter->num_vfs == 0)
47963+ return -EPERM;
47964+
47965+ if (vf >= adapter->num_vfs)
47966+ return -EINVAL;
47967+
47968+ vi->vf = vf;
47969+ vi->tx_rate = adapter->vf_cfg[vf].vf_tx_rate;
47970+ vi->vlan = adapter->vf_cfg[vf].vf_vlan_tag & VLAN_VID_MASK;
47971+ vi->qos = adapter->vf_cfg[vf].vf_vlan_tag >> VLAN_PRIO_SHIFT;
47972+ memcpy(&vi->mac, adapter->vf_cfg[vf].vf_mac_addr, ETH_ALEN);
47973+
47974+ return 0;
47975+}
47976+
47977+/*
47978+ * Entry point to configure vlan behavior for a VF.
47979+ * 1. By default a VF is vlan Challenged.
47980+ * 2. It may or may not have Transparent Tagging enabled.
47981+ * 3. Vlan privilege for a VF can be toggled using special VID 4095.
47982+ * 4. When removing the Vlan privilege for a VF there is no need set default vid
47983+ * 5. Transparent Tagging configured for a VF resets its Vlan privilege
47984+ * 6. To disable the current Transparet Tagging for a VF:
47985+ * 6a. run the last iproute command with vlan set to 0.
47986+ * 6b. programing the default vid will disable Transparent Tagging in ARM/ASIC
47987+ */
47988+static int be_set_vf_vlan(struct net_device *netdev,
47989+ int vf, u16 vlan, u8 qos)
47990+{
47991+ struct be_adapter *adapter = netdev_priv(netdev);
47992+ int status = 0;
47993+ u32 en = 0;
47994+
47995+ if (adapter->num_vfs == 0)
47996+ return -EPERM;
47997+
47998+ if ((vf >= adapter->num_vfs) || (vlan > 4095) || (qos > 7))
47999+ return -EINVAL;
48000+
48001+ status = be_cmd_get_fn_privileges(adapter, &en, vf + 1);
48002+ if (status)
48003+ goto sts;
48004+
48005+ if (vlan == 4095) {
48006+ if (en & BE_PRIV_FILTMGMT) {
48007+ /* Knock off filtering privileges */
48008+ en &= ~BE_PRIV_FILTMGMT;
48009+ } else {
48010+ en |= BE_PRIV_FILTMGMT;
48011+ /* Transparent Tagging is currently enabled, Reset it */
48012+ if (adapter->vf_cfg[vf].vf_vlan_tag) {
48013+ adapter->vf_cfg[vf].vf_vlan_tag = 0;
48014+ vlan = adapter->vf_cfg[vf].vf_def_vid;
48015+ be_cmd_set_hsw_config(adapter, vlan, vf + 1,
48016+ adapter->vf_cfg[vf].vf_if_handle);
48017+ }
48018+ }
48019+
48020+ adapter->vf_cfg[vf].vf_vlan_tag = 0;
48021+ status = be_cmd_set_fn_privileges(adapter, en, NULL, vf + 1);
48022+
48023+ goto sts;
48024+ }
48025+
48026+ if (vlan || qos) {
48027+ if (en & BE_PRIV_FILTMGMT) {
48028+ /* Check privilege and reset it to default */
48029+ en &= ~BE_PRIV_FILTMGMT;
48030+ be_cmd_set_fn_privileges(adapter, en, NULL, vf + 1);
48031+ }
48032+
48033+ vlan |= qos << VLAN_PRIO_SHIFT;
48034+ if (adapter->vf_cfg[vf].vf_vlan_tag != vlan) {
48035+ /* If this is new value, program it. Else skip. */
48036+ adapter->vf_cfg[vf].vf_vlan_tag = vlan;
48037+
48038+ status = be_cmd_set_hsw_config(adapter, vlan,
48039+ vf + 1, adapter->vf_cfg[vf].vf_if_handle);
48040+ }
48041+
48042+ } else {
48043+ /* Reset Transparent Vlan Tagging. */
48044+ adapter->vf_cfg[vf].vf_vlan_tag = 0;
48045+ vlan = adapter->vf_cfg[vf].vf_def_vid;
48046+ status = be_cmd_set_hsw_config(adapter, vlan, vf + 1,
48047+ adapter->vf_cfg[vf].vf_if_handle);
48048+ }
48049+
48050+sts:
48051+ if (status)
48052+ dev_info(&adapter->pdev->dev,
48053+ "VLAN %d config on VF %d failed\n", vlan, vf);
48054+ return status;
48055+}
48056+
48057+static int be_set_vf_tx_rate(struct net_device *netdev,
48058+ int vf, int rate)
48059+{
48060+ struct be_adapter *adapter = netdev_priv(netdev);
48061+ int status = 0;
48062+
48063+ if (adapter->num_vfs == 0)
48064+ return -EPERM;
48065+
48066+ if ((vf >= adapter->num_vfs) || (rate < 0))
48067+ return -EINVAL;
48068+
48069+ if (rate > 10000)
48070+ rate = 10000;
48071+
48072+ adapter->vf_cfg[vf].vf_tx_rate = rate;
48073+ status = be_cmd_set_qos(adapter, rate / 10, vf + 1);
48074+
48075+ if (status)
48076+ dev_info(&adapter->pdev->dev,
48077+ "tx rate %d on VF %d failed\n", rate, vf);
48078+ return status;
48079+}
48080+#endif /* HAVE_SRIOV_CONFIG */
48081+
48082+static void be_rx_rate_update(struct be_rx_obj *rxo)
48083+{
48084+ struct be_rx_stats *stats = &rxo->stats;
48085 ulong now = jiffies;
48086
48087 /* Wrapped around */
48088- if (time_before(now, stats->be_rx_jiffies)) {
48089- stats->be_rx_jiffies = now;
48090+ if (time_before(now, stats->rx_jiffies)) {
48091+ stats->rx_jiffies = now;
48092 return;
48093 }
48094
48095 /* Update the rate once in two seconds */
48096- if ((now - stats->be_rx_jiffies) < 2 * HZ)
48097+ if ((now - stats->rx_jiffies) < 2 * HZ)
48098 return;
48099
48100- stats->be_rx_rate = be_calc_rate(stats->be_rx_bytes
48101- - stats->be_rx_bytes_prev,
48102- now - stats->be_rx_jiffies);
48103- stats->be_rx_jiffies = now;
48104- stats->be_rx_bytes_prev = stats->be_rx_bytes;
48105+ stats->rx_rate = be_calc_rate(stats->rx_bytes - stats->rx_bytes_prev,
48106+ now - stats->rx_jiffies);
48107+ stats->rx_jiffies = now;
48108+ stats->rx_bytes_prev = stats->rx_bytes;
48109 }
48110
48111-static void be_rx_stats_update(struct be_adapter *adapter,
48112- u32 pktsize, u16 numfrags)
48113+static void be_rx_stats_update(struct be_rx_obj *rxo,
48114+ struct be_rx_compl_info *rxcp)
48115 {
48116- struct be_drvr_stats *stats = drvr_stats(adapter);
48117+ struct be_rx_stats *stats = &rxo->stats;
48118
48119- stats->be_rx_compl++;
48120- stats->be_rx_frags += numfrags;
48121- stats->be_rx_bytes += pktsize;
48122- stats->be_rx_pkts++;
48123+ stats->rx_compl++;
48124+ stats->rx_frags += rxcp->num_rcvd;
48125+ stats->rx_bytes += rxcp->pkt_size;
48126+ stats->rx_pkts++;
48127+ if (rxcp->pkt_type == BE_MULTICAST_PACKET)
48128+ stats->rx_mcast_pkts++;
48129+ if (rxcp->err)
48130+ stats->rxcp_err++;
48131 }
48132
48133-static inline bool do_pkt_csum(struct be_eth_rx_compl *rxcp, bool cso)
48134+static inline bool csum_passed(struct be_rx_compl_info *rxcp)
48135 {
48136- u8 l4_cksm, ip_version, ipcksm, tcpf = 0, udpf = 0, ipv6_chk;
48137-
48138- l4_cksm = AMAP_GET_BITS(struct amap_eth_rx_compl, l4_cksm, rxcp);
48139- ipcksm = AMAP_GET_BITS(struct amap_eth_rx_compl, ipcksm, rxcp);
48140- ip_version = AMAP_GET_BITS(struct amap_eth_rx_compl, ip_version, rxcp);
48141- if (ip_version) {
48142- tcpf = AMAP_GET_BITS(struct amap_eth_rx_compl, tcpf, rxcp);
48143- udpf = AMAP_GET_BITS(struct amap_eth_rx_compl, udpf, rxcp);
48144- }
48145- ipv6_chk = (ip_version && (tcpf || udpf));
48146-
48147- return ((l4_cksm && ipv6_chk && ipcksm) && cso) ? false : true;
48148+ /* L4 checksum is not reliable for non TCP/UDP packets.
48149+ * Also ignore ipcksm for ipv6 pkts */
48150+ return (rxcp->tcpf || rxcp->udpf) && rxcp->l4_csum &&
48151+ (rxcp->ip_csum || rxcp->ipv6);
48152 }
48153
48154 static struct be_rx_page_info *
48155-get_rx_page_info(struct be_adapter *adapter, u16 frag_idx)
48156+get_rx_page_info(struct be_adapter *adapter, struct be_rx_obj *rxo,
48157+ u16 frag_idx)
48158 {
48159 struct be_rx_page_info *rx_page_info;
48160- struct be_queue_info *rxq = &adapter->rx_obj.q;
48161+ struct be_queue_info *rxq = &rxo->q;
48162
48163- rx_page_info = &adapter->rx_obj.page_info_tbl[frag_idx];
48164- BUG_ON(!rx_page_info->page);
48165+ rx_page_info = &rxo->page_info_tbl[frag_idx];
48166+ if (!rx_page_info->page) {
48167+ printk(KERN_EMERG "curr_idx=%d prev_dix=%d rxq->head=%d\n",
48168+ frag_idx, rxo->prev_frag_idx, rxq->head);
48169+ BUG_ON(!rx_page_info->page);
48170+ }
48171
48172- if (rx_page_info->last_page_user)
48173+ if (rx_page_info->last_page_user) {
48174 pci_unmap_page(adapter->pdev, pci_unmap_addr(rx_page_info, bus),
48175 adapter->big_page_size, PCI_DMA_FROMDEVICE);
48176+ rx_page_info->last_page_user = false;
48177+ }
48178+
48179+ rxo->prev_frag_idx = frag_idx;
48180
48181 atomic_dec(&rxq->used);
48182 return rx_page_info;
48183@@ -642,20 +1143,26 @@ get_rx_page_info(struct be_adapter *adapter, u16 frag_idx)
48184
48185 /* Throwaway the data in the Rx completion */
48186 static void be_rx_compl_discard(struct be_adapter *adapter,
48187- struct be_eth_rx_compl *rxcp)
48188+ struct be_rx_obj *rxo,
48189+ struct be_rx_compl_info *rxcp)
48190 {
48191- struct be_queue_info *rxq = &adapter->rx_obj.q;
48192+ struct be_queue_info *rxq = &rxo->q;
48193 struct be_rx_page_info *page_info;
48194- u16 rxq_idx, i, num_rcvd;
48195+ u16 i;
48196+ bool oob_error;
48197+ u16 num_rcvd = rxcp->num_rcvd;
48198
48199- rxq_idx = AMAP_GET_BITS(struct amap_eth_rx_compl, fragndx, rxcp);
48200- num_rcvd = AMAP_GET_BITS(struct amap_eth_rx_compl, numfrags, rxcp);
48201+ oob_error = lancer_A0_chip(adapter) && rxcp->err;
48202+
48203+ /* In case of OOB error num_rcvd will be 1 more than actual */
48204+ if (oob_error && num_rcvd)
48205+ num_rcvd -= 1;
48206
48207 for (i = 0; i < num_rcvd; i++) {
48208- page_info = get_rx_page_info(adapter, rxq_idx);
48209+ page_info = get_rx_page_info(adapter, rxo, rxcp->rxq_idx);
48210 put_page(page_info->page);
48211 memset(page_info, 0, sizeof(*page_info));
48212- index_inc(&rxq_idx, rxq->len);
48213+ index_inc(&rxcp->rxq_idx, rxq->len);
48214 }
48215 }
48216
48217@@ -663,29 +1170,24 @@ static void be_rx_compl_discard(struct be_adapter *adapter,
48218 * skb_fill_rx_data forms a complete skb for an ether frame
48219 * indicated by rxcp.
48220 */
48221-static void skb_fill_rx_data(struct be_adapter *adapter,
48222- struct sk_buff *skb, struct be_eth_rx_compl *rxcp)
48223+static void skb_fill_rx_data(struct be_adapter *adapter, struct be_rx_obj *rxo,
48224+ struct sk_buff *skb, struct be_rx_compl_info *rxcp)
48225 {
48226- struct be_queue_info *rxq = &adapter->rx_obj.q;
48227+ struct be_queue_info *rxq = &rxo->q;
48228 struct be_rx_page_info *page_info;
48229- u16 rxq_idx, i, num_rcvd, j;
48230- u32 pktsize, hdr_len, curr_frag_len, size;
48231+ u16 i, j;
48232+ u16 hdr_len, curr_frag_len, remaining;
48233 u8 *start;
48234
48235- rxq_idx = AMAP_GET_BITS(struct amap_eth_rx_compl, fragndx, rxcp);
48236- pktsize = AMAP_GET_BITS(struct amap_eth_rx_compl, pktsize, rxcp);
48237- num_rcvd = AMAP_GET_BITS(struct amap_eth_rx_compl, numfrags, rxcp);
48238-
48239- page_info = get_rx_page_info(adapter, rxq_idx);
48240-
48241+ page_info = get_rx_page_info(adapter, rxo, rxcp->rxq_idx);
48242 start = page_address(page_info->page) + page_info->page_offset;
48243 prefetch(start);
48244
48245 /* Copy data in the first descriptor of this completion */
48246- curr_frag_len = min(pktsize, rx_frag_size);
48247+ curr_frag_len = min(rxcp->pkt_size, rx_frag_size);
48248
48249 /* Copy the header portion into skb_data */
48250- hdr_len = min((u32)BE_HDR_LEN, curr_frag_len);
48251+ hdr_len = min(BE_HDR_LEN, curr_frag_len);
48252 memcpy(skb->data, start, hdr_len);
48253 skb->len = curr_frag_len;
48254 if (curr_frag_len <= BE_HDR_LEN) { /* tiny packet */
48255@@ -702,21 +1204,19 @@ static void skb_fill_rx_data(struct be_adapter *adapter,
48256 skb->data_len = curr_frag_len - hdr_len;
48257 skb->tail += hdr_len;
48258 }
48259- memset(page_info, 0, sizeof(*page_info));
48260+ page_info->page = NULL;
48261
48262- if (pktsize <= rx_frag_size) {
48263- BUG_ON(num_rcvd != 1);
48264- goto done;
48265+ if (rxcp->pkt_size <= rx_frag_size) {
48266+ BUG_ON(rxcp->num_rcvd != 1);
48267+ return;
48268 }
48269
48270 /* More frags present for this completion */
48271- size = pktsize;
48272- for (i = 1, j = 0; i < num_rcvd; i++) {
48273- size -= curr_frag_len;
48274- index_inc(&rxq_idx, rxq->len);
48275- page_info = get_rx_page_info(adapter, rxq_idx);
48276-
48277- curr_frag_len = min(size, rx_frag_size);
48278+ index_inc(&rxcp->rxq_idx, rxq->len);
48279+ remaining = rxcp->pkt_size - curr_frag_len;
48280+ for (i = 1, j = 0; i < rxcp->num_rcvd; i++) {
48281+ page_info = get_rx_page_info(adapter, rxo, rxcp->rxq_idx);
48282+ curr_frag_len = min(remaining, rx_frag_size);
48283
48284 /* Coalesce all frags from the same physical page in one slot */
48285 if (page_info->page_offset == 0) {
48286@@ -735,99 +1235,122 @@ static void skb_fill_rx_data(struct be_adapter *adapter,
48287 skb->len += curr_frag_len;
48288 skb->data_len += curr_frag_len;
48289
48290- memset(page_info, 0, sizeof(*page_info));
48291+ remaining -= curr_frag_len;
48292+ index_inc(&rxcp->rxq_idx, rxq->len);
48293+ page_info->page = NULL;
48294 }
48295 BUG_ON(j > MAX_SKB_FRAGS);
48296-
48297-done:
48298- be_rx_stats_update(adapter, pktsize, num_rcvd);
48299- return;
48300 }
48301
48302-/* Process the RX completion indicated by rxcp when GRO is disabled */
48303+/* Process the RX completion indicated by rxcp when LRO is disabled */
48304 static void be_rx_compl_process(struct be_adapter *adapter,
48305- struct be_eth_rx_compl *rxcp)
48306+ struct be_rx_obj *rxo,
48307+ struct be_rx_compl_info *rxcp)
48308 {
48309 struct sk_buff *skb;
48310- u32 vlanf, vid;
48311- u8 vtm;
48312
48313- vlanf = AMAP_GET_BITS(struct amap_eth_rx_compl, vtp, rxcp);
48314- vtm = AMAP_GET_BITS(struct amap_eth_rx_compl, vtm, rxcp);
48315-
48316- /* vlanf could be wrongly set in some cards.
48317- * ignore if vtm is not set */
48318- if ((adapter->cap == 0x400) && !vtm)
48319- vlanf = 0;
48320-
48321- skb = netdev_alloc_skb(adapter->netdev, BE_HDR_LEN + NET_IP_ALIGN);
48322- if (!skb) {
48323+ skb = netdev_alloc_skb_ip_align(adapter->netdev, BE_HDR_LEN);
48324+ if (unlikely(!skb)) {
48325 if (net_ratelimit())
48326 dev_warn(&adapter->pdev->dev, "skb alloc failed\n");
48327- be_rx_compl_discard(adapter, rxcp);
48328+ be_rx_compl_discard(adapter, rxo, rxcp);
48329 return;
48330 }
48331
48332- skb_reserve(skb, NET_IP_ALIGN);
48333+ skb_fill_rx_data(adapter, rxo, skb, rxcp);
48334
48335- skb_fill_rx_data(adapter, skb, rxcp);
48336-
48337- if (do_pkt_csum(rxcp, adapter->rx_csum))
48338- skb->ip_summed = CHECKSUM_NONE;
48339- else
48340+ if (likely(adapter->rx_csum && csum_passed(rxcp)))
48341 skb->ip_summed = CHECKSUM_UNNECESSARY;
48342+ else
48343+ skb->ip_summed = CHECKSUM_NONE;
48344
48345 skb->truesize = skb->len + sizeof(struct sk_buff);
48346+ if (unlikely(rxcp->vlanf) &&
48347+ unlikely(!vlan_configured(adapter))) {
48348+ __vlan_put_tag(skb, rxcp->vlan_tag);
48349+ }
48350 skb->protocol = eth_type_trans(skb, adapter->netdev);
48351 skb->dev = adapter->netdev;
48352
48353- if (vlanf) {
48354- if (!adapter->vlan_grp || adapter->num_vlans == 0) {
48355- kfree_skb(skb);
48356- return;
48357- }
48358- vid = AMAP_GET_BITS(struct amap_eth_rx_compl, vlan_tag, rxcp);
48359- vid = be16_to_cpu(vid);
48360- vlan_hwaccel_receive_skb(skb, adapter->vlan_grp, vid);
48361- } else {
48362+ if (unlikely(rxcp->vlanf) &&
48363+ vlan_configured(adapter))
48364+ vlan_hwaccel_receive_skb(skb, adapter->vlan_grp,
48365+ rxcp->vlan_tag);
48366+ else
48367 netif_receive_skb(skb);
48368+
48369+ return;
48370+}
48371+
48372+/* Process the RX completion indicated by rxcp when LRO is enabled */
48373+static void be_rx_compl_process_lro(struct be_adapter *adapter,
48374+ struct be_rx_obj *rxo,
48375+ struct be_rx_compl_info *rxcp)
48376+{
48377+ struct be_rx_page_info *page_info;
48378+ struct skb_frag_struct rx_frags[BE_MAX_FRAGS_PER_FRAME];
48379+ struct be_queue_info *rxq = &rxo->q;
48380+ u16 remaining, curr_frag_len;
48381+ u16 i, j;
48382+
48383+ remaining = rxcp->pkt_size;
48384+ for (i = 0, j = -1; i < rxcp->num_rcvd; i++) {
48385+ page_info = get_rx_page_info(adapter, rxo, rxcp->rxq_idx);
48386+
48387+ curr_frag_len = min(remaining, rx_frag_size);
48388+
48389+ /* Coalesce all frags from the same physical page in one slot */
48390+ if (i == 0 || page_info->page_offset == 0) {
48391+ /* First frag or Fresh page */
48392+ j++;
48393+ rx_frags[j].page = page_info->page;
48394+ rx_frags[j].page_offset = page_info->page_offset;
48395+ rx_frags[j].size = 0;
48396+ } else {
48397+ put_page(page_info->page);
48398+ }
48399+ rx_frags[j].size += curr_frag_len;
48400+
48401+ remaining -= curr_frag_len;
48402+ index_inc(&rxcp->rxq_idx, rxq->len);
48403+ memset(page_info, 0, sizeof(*page_info));
48404+ }
48405+ BUG_ON(j > MAX_SKB_FRAGS);
48406+
48407+ if (likely(!rxcp->vlanf)) {
48408+ lro_receive_frags(&rxo->lro_mgr, rx_frags, rxcp->pkt_size,
48409+ rxcp->pkt_size, NULL, 0);
48410+ } else {
48411+ lro_vlan_hwaccel_receive_frags(&rxo->lro_mgr, rx_frags,
48412+ rxcp->pkt_size, rxcp->pkt_size, adapter->vlan_grp,
48413+ rxcp->vlan_tag, NULL, 0);
48414 }
48415
48416 return;
48417 }
48418
48419 /* Process the RX completion indicated by rxcp when GRO is enabled */
48420-static void be_rx_compl_process_gro(struct be_adapter *adapter,
48421- struct be_eth_rx_compl *rxcp)
48422+void be_rx_compl_process_gro(struct be_adapter *adapter,
48423+ struct be_rx_obj *rxo,
48424+ struct be_rx_compl_info *rxcp)
48425 {
48426+#ifdef NETIF_F_GRO
48427 struct be_rx_page_info *page_info;
48428 struct sk_buff *skb = NULL;
48429- struct be_queue_info *rxq = &adapter->rx_obj.q;
48430- struct be_eq_obj *eq_obj = &adapter->rx_eq;
48431- u32 num_rcvd, pkt_size, remaining, vlanf, curr_frag_len;
48432- u16 i, rxq_idx = 0, vid, j;
48433- u8 vtm;
48434-
48435- num_rcvd = AMAP_GET_BITS(struct amap_eth_rx_compl, numfrags, rxcp);
48436- pkt_size = AMAP_GET_BITS(struct amap_eth_rx_compl, pktsize, rxcp);
48437- vlanf = AMAP_GET_BITS(struct amap_eth_rx_compl, vtp, rxcp);
48438- rxq_idx = AMAP_GET_BITS(struct amap_eth_rx_compl, fragndx, rxcp);
48439- vtm = AMAP_GET_BITS(struct amap_eth_rx_compl, vtm, rxcp);
48440-
48441- /* vlanf could be wrongly set in some cards.
48442- * ignore if vtm is not set */
48443- if ((adapter->cap == 0x400) && !vtm)
48444- vlanf = 0;
48445+ struct be_queue_info *rxq = &rxo->q;
48446+ struct be_eq_obj *eq_obj = &rxo->rx_eq;
48447+ u16 remaining, curr_frag_len;
48448+ u16 i, j;
48449
48450 skb = napi_get_frags(&eq_obj->napi);
48451 if (!skb) {
48452- be_rx_compl_discard(adapter, rxcp);
48453+ be_rx_compl_discard(adapter, rxo, rxcp);
48454 return;
48455 }
48456
48457- remaining = pkt_size;
48458- for (i = 0, j = -1; i < num_rcvd; i++) {
48459- page_info = get_rx_page_info(adapter, rxq_idx);
48460+ remaining = rxcp->pkt_size;
48461+ for (i = 0, j = -1; i < rxcp->num_rcvd; i++) {
48462+ page_info = get_rx_page_info(adapter, rxo, rxcp->rxq_idx);
48463
48464 curr_frag_len = min(remaining, rx_frag_size);
48465
48466@@ -845,55 +1368,129 @@ static void be_rx_compl_process_gro(struct be_adapter *adapter,
48467 skb_shinfo(skb)->frags[j].size += curr_frag_len;
48468
48469 remaining -= curr_frag_len;
48470- index_inc(&rxq_idx, rxq->len);
48471+ index_inc(&rxcp->rxq_idx, rxq->len);
48472 memset(page_info, 0, sizeof(*page_info));
48473 }
48474 BUG_ON(j > MAX_SKB_FRAGS);
48475
48476 skb_shinfo(skb)->nr_frags = j + 1;
48477- skb->len = pkt_size;
48478- skb->data_len = pkt_size;
48479- skb->truesize += pkt_size;
48480+ skb->len = rxcp->pkt_size;
48481+ skb->data_len = rxcp->pkt_size;
48482+ skb->truesize += rxcp->pkt_size;
48483 skb->ip_summed = CHECKSUM_UNNECESSARY;
48484
48485- if (likely(!vlanf)) {
48486+ if (likely(!rxcp->vlanf))
48487 napi_gro_frags(&eq_obj->napi);
48488- } else {
48489- vid = AMAP_GET_BITS(struct amap_eth_rx_compl, vlan_tag, rxcp);
48490- vid = be16_to_cpu(vid);
48491+ else
48492+ vlan_gro_frags(&eq_obj->napi,
48493+ adapter->vlan_grp, rxcp->vlan_tag);
48494+#endif
48495
48496- if (!adapter->vlan_grp || adapter->num_vlans == 0)
48497- return;
48498-
48499- vlan_gro_frags(&eq_obj->napi, adapter->vlan_grp, vid);
48500- }
48501-
48502- be_rx_stats_update(adapter, pkt_size, num_rcvd);
48503 return;
48504 }
48505
48506-static struct be_eth_rx_compl *be_rx_compl_get(struct be_adapter *adapter)
48507+static void be_parse_rx_compl_v1(struct be_adapter *adapter,
48508+ struct be_eth_rx_compl *compl,
48509+ struct be_rx_compl_info *rxcp)
48510 {
48511- struct be_eth_rx_compl *rxcp = queue_tail_node(&adapter->rx_obj.cq);
48512+ rxcp->pkt_size =
48513+ AMAP_GET_BITS(struct amap_eth_rx_compl_v1, pktsize, compl);
48514+ rxcp->vlanf = AMAP_GET_BITS(struct amap_eth_rx_compl_v1, vtp, compl);
48515+ rxcp->err = AMAP_GET_BITS(struct amap_eth_rx_compl_v1, err, compl);
48516+ rxcp->tcpf = AMAP_GET_BITS(struct amap_eth_rx_compl_v1, tcpf, compl);
48517+ rxcp->udpf = AMAP_GET_BITS(struct amap_eth_rx_compl_v1, udpf, compl);
48518+ rxcp->ip_csum =
48519+ AMAP_GET_BITS(struct amap_eth_rx_compl_v1, ipcksm, compl);
48520+ rxcp->l4_csum =
48521+ AMAP_GET_BITS(struct amap_eth_rx_compl_v1, l4_cksm, compl);
48522+ rxcp->ipv6 =
48523+ AMAP_GET_BITS(struct amap_eth_rx_compl_v1, ip_version, compl);
48524+ rxcp->rxq_idx =
48525+ AMAP_GET_BITS(struct amap_eth_rx_compl_v1, fragndx, compl);
48526+ rxcp->num_rcvd =
48527+ AMAP_GET_BITS(struct amap_eth_rx_compl_v1, numfrags, compl);
48528+ rxcp->pkt_type =
48529+ AMAP_GET_BITS(struct amap_eth_rx_compl_v1, cast_enc, compl);
48530+ if (rxcp->vlanf) {
48531+ rxcp->vtm = AMAP_GET_BITS(struct amap_eth_rx_compl_v1, vtm,
48532+ compl);
48533+ rxcp->vlan_tag = AMAP_GET_BITS(struct amap_eth_rx_compl_v1,
48534+ vlan_tag, compl);
48535+ }
48536+ rxcp->port = AMAP_GET_BITS(struct amap_eth_rx_compl_v1, port, compl);
48537+}
48538
48539- if (rxcp->dw[offsetof(struct amap_eth_rx_compl, valid) / 32] == 0)
48540+static void be_parse_rx_compl_v0(struct be_adapter *adapter,
48541+ struct be_eth_rx_compl *compl,
48542+ struct be_rx_compl_info *rxcp)
48543+{
48544+ rxcp->pkt_size =
48545+ AMAP_GET_BITS(struct amap_eth_rx_compl_v0, pktsize, compl);
48546+ rxcp->vlanf = AMAP_GET_BITS(struct amap_eth_rx_compl_v0, vtp, compl);
48547+ rxcp->err = AMAP_GET_BITS(struct amap_eth_rx_compl_v0, err, compl);
48548+ rxcp->tcpf = AMAP_GET_BITS(struct amap_eth_rx_compl_v0, tcpf, compl);
48549+ rxcp->udpf = AMAP_GET_BITS(struct amap_eth_rx_compl_v0, udpf, compl);
48550+ rxcp->ip_csum =
48551+ AMAP_GET_BITS(struct amap_eth_rx_compl_v0, ipcksm, compl);
48552+ rxcp->l4_csum =
48553+ AMAP_GET_BITS(struct amap_eth_rx_compl_v0, l4_cksm, compl);
48554+ rxcp->ipv6 =
48555+ AMAP_GET_BITS(struct amap_eth_rx_compl_v0, ip_version, compl);
48556+ rxcp->rxq_idx =
48557+ AMAP_GET_BITS(struct amap_eth_rx_compl_v0, fragndx, compl);
48558+ rxcp->num_rcvd =
48559+ AMAP_GET_BITS(struct amap_eth_rx_compl_v0, numfrags, compl);
48560+ rxcp->pkt_type =
48561+ AMAP_GET_BITS(struct amap_eth_rx_compl_v0, cast_enc, compl);
48562+ if (rxcp->vlanf) {
48563+ rxcp->vtm = AMAP_GET_BITS(struct amap_eth_rx_compl_v0, vtm,
48564+ compl);
48565+ rxcp->vlan_tag = AMAP_GET_BITS(struct amap_eth_rx_compl_v0,
48566+ vlan_tag, compl);
48567+ }
48568+ rxcp->port = AMAP_GET_BITS(struct amap_eth_rx_compl_v0, port, compl);
48569+}
48570+
48571+static struct be_rx_compl_info *be_rx_compl_get(struct be_rx_obj *rxo)
48572+{
48573+ struct be_eth_rx_compl *compl = queue_tail_node(&rxo->cq);
48574+ struct be_rx_compl_info *rxcp = &rxo->rxcp;
48575+ struct be_adapter *adapter = rxo->adapter;
48576+
48577+ /* For checking the valid bit it is Ok to use either definition as the
48578+ * valid bit is at the same position in both v0 and v1 Rx compl */
48579+ if (compl->dw[offsetof(struct amap_eth_rx_compl_v1, valid) / 32] == 0)
48580 return NULL;
48581
48582- be_dws_le_to_cpu(rxcp, sizeof(*rxcp));
48583+ rmb();
48584+ be_dws_le_to_cpu(compl, sizeof(*compl));
48585
48586- queue_tail_inc(&adapter->rx_obj.cq);
48587+ if (adapter->be3_native)
48588+ be_parse_rx_compl_v1(adapter, compl, rxcp);
48589+ else
48590+ be_parse_rx_compl_v0(adapter, compl, rxcp);
48591+
48592+ if (rxcp->vlanf) {
48593+ /* vlanf could be wrongly set in some cards.
48594+ * ignore if vtm is not set */
48595+ if ((adapter->function_mode & FLEX10_MODE) && !rxcp->vtm)
48596+ rxcp->vlanf = 0;
48597+
48598+ if (!lancer_chip(adapter))
48599+ rxcp->vlan_tag = swab16(rxcp->vlan_tag);
48600+
48601+ if ((adapter->pvid == (rxcp->vlan_tag & VLAN_VID_MASK)) &&
48602+ !adapter->vlan_tag[rxcp->vlan_tag])
48603+ rxcp->vlanf = 0;
48604+ }
48605+
48606+ /* As the compl has been parsed, reset it; we wont touch it again */
48607+ compl->dw[offsetof(struct amap_eth_rx_compl_v1, valid) / 32] = 0;
48608+
48609+ queue_tail_inc(&rxo->cq);
48610 return rxcp;
48611 }
48612
48613-/* To reset the valid bit, we need to reset the whole word as
48614- * when walking the queue the valid entries are little-endian
48615- * and invalid entries are host endian
48616- */
48617-static inline void be_rx_compl_reset(struct be_eth_rx_compl *rxcp)
48618-{
48619- rxcp->dw[offsetof(struct amap_eth_rx_compl, valid) / 32] = 0;
48620-}
48621-
48622 static inline struct page *be_alloc_pages(u32 size)
48623 {
48624 gfp_t alloc_flags = GFP_ATOMIC;
48625@@ -907,11 +1504,12 @@ static inline struct page *be_alloc_pages(u32 size)
48626 * Allocate a page, split it to fragments of size rx_frag_size and post as
48627 * receive buffers to BE
48628 */
48629-static void be_post_rx_frags(struct be_adapter *adapter)
48630+static void be_post_rx_frags(struct be_rx_obj *rxo)
48631 {
48632- struct be_rx_page_info *page_info_tbl = adapter->rx_obj.page_info_tbl;
48633- struct be_rx_page_info *page_info = NULL;
48634- struct be_queue_info *rxq = &adapter->rx_obj.q;
48635+ struct be_adapter *adapter = rxo->adapter;
48636+ struct be_rx_page_info *page_info_tbl = rxo->page_info_tbl;
48637+ struct be_rx_page_info *page_info = NULL, *prev_page_info = NULL;
48638+ struct be_queue_info *rxq = &rxo->q;
48639 struct page *pagep = NULL;
48640 struct be_eth_rx_d *rxd;
48641 u64 page_dmaaddr = 0, frag_dmaaddr;
48642@@ -922,7 +1520,7 @@ static void be_post_rx_frags(struct be_adapter *adapter)
48643 if (!pagep) {
48644 pagep = be_alloc_pages(adapter->big_page_size);
48645 if (unlikely(!pagep)) {
48646- drvr_stats(adapter)->be_ethrx_post_fail++;
48647+ rxo->stats.rx_post_fail++;
48648 break;
48649 }
48650 page_dmaaddr = pci_map_page(adapter->pdev, pagep, 0,
48651@@ -941,7 +1539,6 @@ static void be_post_rx_frags(struct be_adapter *adapter)
48652 rxd = queue_head_node(rxq);
48653 rxd->fragpa_lo = cpu_to_le32(frag_dmaaddr & 0xFFFFFFFF);
48654 rxd->fragpa_hi = cpu_to_le32(upper_32_bits(frag_dmaaddr));
48655- queue_head_inc(rxq);
48656
48657 /* Any space left in the current big page for another frag? */
48658 if ((page_offset + rx_frag_size + rx_frag_size) >
48659@@ -949,17 +1546,24 @@ static void be_post_rx_frags(struct be_adapter *adapter)
48660 pagep = NULL;
48661 page_info->last_page_user = true;
48662 }
48663+
48664+ prev_page_info = page_info;
48665+ queue_head_inc(rxq);
48666 page_info = &page_info_tbl[rxq->head];
48667 }
48668 if (pagep)
48669- page_info->last_page_user = true;
48670+ prev_page_info->last_page_user = true;
48671
48672+ /* Ensure that posting buffers is the last thing done by this
48673+ * routine to avoid racing between rx bottom-half and
48674+ * be_worker (process) contexts.
48675+ */
48676 if (posted) {
48677 atomic_add(posted, &rxq->used);
48678 be_rxq_notify(adapter, rxq->id, posted);
48679 } else if (atomic_read(&rxq->used) == 0) {
48680 /* Let be_worker replenish when memory is available */
48681- adapter->rx_post_starved = true;
48682+ rxo->rx_post_starved = true;
48683 }
48684
48685 return;
48686@@ -972,6 +1576,7 @@ static struct be_eth_tx_compl *be_tx_compl_get(struct be_queue_info *tx_cq)
48687 if (txcp->dw[offsetof(struct amap_eth_tx_compl, valid) / 32] == 0)
48688 return NULL;
48689
48690+ rmb();
48691 be_dws_le_to_cpu(txcp, sizeof(*txcp));
48692
48693 txcp->dw[offsetof(struct amap_eth_tx_compl, valid) / 32] = 0;
48694@@ -980,11 +1585,14 @@ static struct be_eth_tx_compl *be_tx_compl_get(struct be_queue_info *tx_cq)
48695 return txcp;
48696 }
48697
48698-static void be_tx_compl_process(struct be_adapter *adapter, u16 last_index)
48699+static u16 be_tx_compl_process(struct be_adapter *adapter,
48700+ struct be_tx_obj *txo, u16 last_index)
48701 {
48702- struct be_queue_info *txq = &adapter->tx_obj.q;
48703- struct sk_buff **sent_skbs = adapter->tx_obj.sent_skb_list;
48704+ struct be_queue_info *txq = &txo->q;
48705+ struct be_eth_wrb *wrb;
48706+ struct sk_buff **sent_skbs = txo->sent_skb_list;
48707 struct sk_buff *sent_skb;
48708+ u64 busaddr;
48709 u16 cur_index, num_wrbs = 0;
48710
48711 cur_index = txq->tail;
48712@@ -992,15 +1600,31 @@ static void be_tx_compl_process(struct be_adapter *adapter, u16 last_index)
48713 BUG_ON(!sent_skb);
48714 sent_skbs[cur_index] = NULL;
48715
48716- do {
48717+ wrb = queue_tail_node(txq);
48718+ be_dws_le_to_cpu(wrb, sizeof(*wrb));
48719+ busaddr = ((u64)wrb->frag_pa_hi << 32) | (u64)wrb->frag_pa_lo;
48720+ if (busaddr != 0) {
48721+ pci_unmap_single(adapter->pdev, busaddr,
48722+ wrb->frag_len, PCI_DMA_TODEVICE);
48723+ }
48724+ num_wrbs++;
48725+ queue_tail_inc(txq);
48726+
48727+ while (cur_index != last_index) {
48728 cur_index = txq->tail;
48729+ wrb = queue_tail_node(txq);
48730+ be_dws_le_to_cpu(wrb, sizeof(*wrb));
48731+ busaddr = ((u64)wrb->frag_pa_hi << 32) | (u64)wrb->frag_pa_lo;
48732+ if (busaddr != 0) {
48733+ pci_unmap_page(adapter->pdev, busaddr,
48734+ wrb->frag_len, PCI_DMA_TODEVICE);
48735+ }
48736 num_wrbs++;
48737 queue_tail_inc(txq);
48738- } while (cur_index != last_index);
48739+ }
48740
48741- atomic_sub(num_wrbs, &txq->used);
48742- skb_dma_unmap(&adapter->pdev->dev, sent_skb, DMA_TO_DEVICE);
48743 kfree_skb(sent_skb);
48744+ return num_wrbs;
48745 }
48746
48747 static inline struct be_eq_entry *event_get(struct be_eq_obj *eq_obj)
48748@@ -1010,13 +1634,15 @@ static inline struct be_eq_entry *event_get(struct be_eq_obj *eq_obj)
48749 if (!eqe->evt)
48750 return NULL;
48751
48752+ rmb();
48753 eqe->evt = le32_to_cpu(eqe->evt);
48754 queue_tail_inc(&eq_obj->q);
48755 return eqe;
48756 }
48757
48758 static int event_handle(struct be_adapter *adapter,
48759- struct be_eq_obj *eq_obj)
48760+ struct be_eq_obj *eq_obj,
48761+ bool rearm)
48762 {
48763 struct be_eq_entry *eqe;
48764 u16 num = 0;
48765@@ -1029,7 +1655,10 @@ static int event_handle(struct be_adapter *adapter,
48766 /* Deal with any spurious interrupts that come
48767 * without events
48768 */
48769- be_eq_notify(adapter, eq_obj->q.id, true, true, num);
48770+ if (!num)
48771+ rearm = true;
48772+
48773+ be_eq_notify(adapter, eq_obj->q.id, rearm, true, num);
48774 if (num)
48775 napi_schedule(&eq_obj->napi);
48776
48777@@ -1053,49 +1682,55 @@ static void be_eq_clean(struct be_adapter *adapter,
48778 be_eq_notify(adapter, eq_obj->q.id, false, true, num);
48779 }
48780
48781-static void be_rx_q_clean(struct be_adapter *adapter)
48782+static void be_rx_q_clean(struct be_adapter *adapter, struct be_rx_obj *rxo)
48783 {
48784 struct be_rx_page_info *page_info;
48785- struct be_queue_info *rxq = &adapter->rx_obj.q;
48786- struct be_queue_info *rx_cq = &adapter->rx_obj.cq;
48787- struct be_eth_rx_compl *rxcp;
48788+ struct be_queue_info *rxq = &rxo->q;
48789+ struct be_queue_info *rx_cq = &rxo->cq;
48790+ struct be_rx_compl_info *rxcp;
48791 u16 tail;
48792
48793 /* First cleanup pending rx completions */
48794- while ((rxcp = be_rx_compl_get(adapter)) != NULL) {
48795- be_rx_compl_discard(adapter, rxcp);
48796- be_rx_compl_reset(rxcp);
48797+ while ((rxcp = be_rx_compl_get(rxo)) != NULL) {
48798+ be_rx_compl_discard(adapter, rxo, rxcp);
48799 be_cq_notify(adapter, rx_cq->id, true, 1);
48800 }
48801
48802 /* Then free posted rx buffer that were not used */
48803 tail = (rxq->head + rxq->len - atomic_read(&rxq->used)) % rxq->len;
48804 for (; atomic_read(&rxq->used) > 0; index_inc(&tail, rxq->len)) {
48805- page_info = get_rx_page_info(adapter, tail);
48806+ page_info = get_rx_page_info(adapter, rxo, tail);
48807 put_page(page_info->page);
48808 memset(page_info, 0, sizeof(*page_info));
48809 }
48810 BUG_ON(atomic_read(&rxq->used));
48811+ rxq->tail = rxq->head = 0;
48812 }
48813
48814-static void be_tx_compl_clean(struct be_adapter *adapter)
48815+static void be_tx_compl_clean(struct be_adapter *adapter,
48816+ struct be_tx_obj *txo)
48817 {
48818- struct be_queue_info *tx_cq = &adapter->tx_obj.cq;
48819- struct be_queue_info *txq = &adapter->tx_obj.q;
48820+ struct be_queue_info *tx_cq = &txo->cq;
48821+ struct be_queue_info *txq = &txo->q;
48822 struct be_eth_tx_compl *txcp;
48823- u16 end_idx, cmpl = 0, timeo = 0;
48824+ u16 end_idx, cmpl = 0, timeo = 0, num_wrbs = 0;
48825+ struct sk_buff **sent_skbs = txo->sent_skb_list;
48826+ struct sk_buff *sent_skb;
48827+ bool dummy_wrb;
48828
48829 /* Wait for a max of 200ms for all the tx-completions to arrive. */
48830 do {
48831 while ((txcp = be_tx_compl_get(tx_cq))) {
48832 end_idx = AMAP_GET_BITS(struct amap_eth_tx_compl,
48833 wrb_index, txcp);
48834- be_tx_compl_process(adapter, end_idx);
48835+ num_wrbs += be_tx_compl_process(adapter, txo, end_idx);
48836 cmpl++;
48837 }
48838 if (cmpl) {
48839 be_cq_notify(adapter, tx_cq->id, false, cmpl);
48840+ atomic_sub(num_wrbs, &txq->used);
48841 cmpl = 0;
48842+ num_wrbs = 0;
48843 }
48844
48845 if (atomic_read(&txq->used) == 0 || ++timeo > 200)
48846@@ -1107,6 +1742,17 @@ static void be_tx_compl_clean(struct be_adapter *adapter)
48847 if (atomic_read(&txq->used))
48848 dev_err(&adapter->pdev->dev, "%d pending tx-completions\n",
48849 atomic_read(&txq->used));
48850+
48851+ /* free posted tx for which compls will never arrive */
48852+ while (atomic_read(&txq->used)) {
48853+ sent_skb = sent_skbs[txq->tail];
48854+ end_idx = txq->tail;
48855+ index_adv(&end_idx,
48856+ wrb_cnt_for_skb(adapter, sent_skb, &dummy_wrb) - 1,
48857+ txq->len);
48858+ num_wrbs = be_tx_compl_process(adapter, txo, end_idx);
48859+ atomic_sub(num_wrbs, &txq->used);
48860+ }
48861 }
48862
48863 static void be_mcc_queues_destroy(struct be_adapter *adapter)
48864@@ -1145,8 +1791,9 @@ static int be_mcc_queues_create(struct be_adapter *adapter)
48865 goto mcc_cq_destroy;
48866
48867 /* Ask BE to create MCC queue */
48868- if (be_cmd_mccq_create(adapter, q, cq))
48869+ if (be_cmd_mccq_create(adapter, q, cq)) {
48870 goto mcc_q_free;
48871+ }
48872
48873 return 0;
48874
48875@@ -1163,16 +1810,20 @@ err:
48876 static void be_tx_queues_destroy(struct be_adapter *adapter)
48877 {
48878 struct be_queue_info *q;
48879+ struct be_tx_obj *txo;
48880+ u8 i;
48881
48882- q = &adapter->tx_obj.q;
48883- if (q->created)
48884- be_cmd_q_destroy(adapter, q, QTYPE_TXQ);
48885- be_queue_free(adapter, q);
48886+ for_all_tx_queues(adapter, txo, i) {
48887+ q = &txo->q;
48888+ if (q->created)
48889+ be_cmd_q_destroy(adapter, q, QTYPE_TXQ);
48890+ be_queue_free(adapter, q);
48891
48892- q = &adapter->tx_obj.cq;
48893- if (q->created)
48894- be_cmd_q_destroy(adapter, q, QTYPE_CQ);
48895- be_queue_free(adapter, q);
48896+ q = &txo->cq;
48897+ if (q->created)
48898+ be_cmd_q_destroy(adapter, q, QTYPE_CQ);
48899+ be_queue_free(adapter, q);
48900+ }
48901
48902 /* Clear any residual events */
48903 be_eq_clean(adapter, &adapter->tx_eq);
48904@@ -1183,168 +1834,210 @@ static void be_tx_queues_destroy(struct be_adapter *adapter)
48905 be_queue_free(adapter, q);
48906 }
48907
48908+/* One TX event queue is shared by all TX compl qs */
48909 static int be_tx_queues_create(struct be_adapter *adapter)
48910 {
48911 struct be_queue_info *eq, *q, *cq;
48912+ struct be_tx_obj *txo;
48913+ u8 i, tc_id;
48914
48915 adapter->tx_eq.max_eqd = 0;
48916 adapter->tx_eq.min_eqd = 0;
48917 adapter->tx_eq.cur_eqd = 96;
48918 adapter->tx_eq.enable_aic = false;
48919- /* Alloc Tx Event queue */
48920+
48921 eq = &adapter->tx_eq.q;
48922- if (be_queue_alloc(adapter, eq, EVNT_Q_LEN, sizeof(struct be_eq_entry)))
48923+ if (be_queue_alloc(adapter, eq, EVNT_Q_LEN,
48924+ sizeof(struct be_eq_entry)))
48925 return -1;
48926
48927- /* Ask BE to create Tx Event queue */
48928 if (be_cmd_eq_create(adapter, eq, adapter->tx_eq.cur_eqd))
48929- goto tx_eq_free;
48930- /* Alloc TX eth compl queue */
48931- cq = &adapter->tx_obj.cq;
48932- if (be_queue_alloc(adapter, cq, TX_CQ_LEN,
48933+ goto err;
48934+ adapter->tx_eq.eq_idx = adapter->eq_next_idx++;
48935+
48936+ for_all_tx_queues(adapter, txo, i) {
48937+ cq = &txo->cq;
48938+ if (be_queue_alloc(adapter, cq, TX_CQ_LEN,
48939 sizeof(struct be_eth_tx_compl)))
48940- goto tx_eq_destroy;
48941+ goto err;
48942
48943- /* Ask BE to create Tx eth compl queue */
48944- if (be_cmd_cq_create(adapter, cq, eq, false, false, 3))
48945- goto tx_cq_free;
48946+ if (be_cmd_cq_create(adapter, cq, eq, false, false, 3))
48947+ goto err;
48948
48949- /* Alloc TX eth queue */
48950- q = &adapter->tx_obj.q;
48951- if (be_queue_alloc(adapter, q, TX_Q_LEN, sizeof(struct be_eth_wrb)))
48952- goto tx_cq_destroy;
48953+ q = &txo->q;
48954+ if (be_queue_alloc(adapter, q, TX_Q_LEN,
48955+ sizeof(struct be_eth_wrb)))
48956+ goto err;
48957
48958- /* Ask BE to create Tx eth queue */
48959- if (be_cmd_txq_create(adapter, q, cq))
48960- goto tx_q_free;
48961+ if (be_cmd_txq_create(adapter, q, cq, &tc_id))
48962+ goto err;
48963+
48964+ if (adapter->flags & BE_FLAGS_DCBX)
48965+ adapter->tc_txq_map[tc_id] = i;
48966+ }
48967 return 0;
48968
48969-tx_q_free:
48970- be_queue_free(adapter, q);
48971-tx_cq_destroy:
48972- be_cmd_q_destroy(adapter, cq, QTYPE_CQ);
48973-tx_cq_free:
48974- be_queue_free(adapter, cq);
48975-tx_eq_destroy:
48976- be_cmd_q_destroy(adapter, eq, QTYPE_EQ);
48977-tx_eq_free:
48978- be_queue_free(adapter, eq);
48979+err:
48980+ be_tx_queues_destroy(adapter);
48981 return -1;
48982 }
48983
48984 static void be_rx_queues_destroy(struct be_adapter *adapter)
48985 {
48986 struct be_queue_info *q;
48987+ struct be_rx_obj *rxo;
48988+ int i;
48989
48990- q = &adapter->rx_obj.q;
48991- if (q->created) {
48992- be_cmd_q_destroy(adapter, q, QTYPE_RXQ);
48993- be_rx_q_clean(adapter);
48994- }
48995- be_queue_free(adapter, q);
48996+ for_all_rx_queues(adapter, rxo, i) {
48997+ be_queue_free(adapter, &rxo->q);
48998+
48999+ q = &rxo->cq;
49000+ if (q->created)
49001+ be_cmd_q_destroy(adapter, q, QTYPE_CQ);
49002+ be_queue_free(adapter, q);
49003
49004- q = &adapter->rx_obj.cq;
49005- if (q->created)
49006- be_cmd_q_destroy(adapter, q, QTYPE_CQ);
49007- be_queue_free(adapter, q);
49008+ q = &rxo->rx_eq.q;
49009+ if (q->created)
49010+ be_cmd_q_destroy(adapter, q, QTYPE_EQ);
49011+ be_queue_free(adapter, q);
49012
49013- /* Clear any residual events */
49014- be_eq_clean(adapter, &adapter->rx_eq);
49015+ kfree(rxo->page_info_tbl);
49016+ }
49017+}
49018
49019- q = &adapter->rx_eq.q;
49020- if (q->created)
49021- be_cmd_q_destroy(adapter, q, QTYPE_EQ);
49022- be_queue_free(adapter, q);
49023+/* Is BE in a multi-channel mode */
49024+static inline bool be_is_mc(struct be_adapter *adapter) {
49025+ return (adapter->function_mode & FLEX10_MODE ||
49026+ adapter->function_mode & VNIC_MODE ||
49027+ adapter->function_mode & UMC_ENABLED);
49028+}
49029+
49030+static u32 be_num_rxqs_want(struct be_adapter *adapter)
49031+{
49032+ if (multi_rxq && (adapter->function_caps & BE_FUNCTION_CAPS_RSS) &&
49033+ adapter->num_vfs == 0 && be_physfn(adapter) &&
49034+ !be_is_mc(adapter)) {
49035+ return 1 + MAX_RSS_QS; /* one default non-RSS queue */
49036+ } else {
49037+ dev_warn(&adapter->pdev->dev,
49038+ "No support for multiple RX queues\n");
49039+ return 1;
49040+ }
49041 }
49042
49043 static int be_rx_queues_create(struct be_adapter *adapter)
49044 {
49045 struct be_queue_info *eq, *q, *cq;
49046- int rc;
49047+ struct be_rx_obj *rxo;
49048+ int rc, i;
49049
49050+ adapter->num_rx_qs = min(be_num_rxqs_want(adapter),
49051+ msix_enabled(adapter) ?
49052+ adapter->num_msix_vec - 1 : 1);
49053+ if (adapter->num_rx_qs != MAX_RX_QS)
49054+ dev_warn(&adapter->pdev->dev,
49055+ "Could create only %d receive queues",
49056+ adapter->num_rx_qs);
49057+
49058+ adapter->max_rx_coal = gro ? BE_INIT_FRAGS_PER_FRAME : 1;
49059 adapter->big_page_size = (1 << get_order(rx_frag_size)) * PAGE_SIZE;
49060- adapter->rx_eq.max_eqd = BE_MAX_EQD;
49061- adapter->rx_eq.min_eqd = 0;
49062- adapter->rx_eq.cur_eqd = 0;
49063- adapter->rx_eq.enable_aic = true;
49064+ for_all_rx_queues(adapter, rxo, i) {
49065+ rxo->adapter = adapter;
49066+ rxo->rx_eq.max_eqd = BE_MAX_EQD;
49067+ rxo->rx_eq.enable_aic = true;
49068
49069- /* Alloc Rx Event queue */
49070- eq = &adapter->rx_eq.q;
49071- rc = be_queue_alloc(adapter, eq, EVNT_Q_LEN,
49072- sizeof(struct be_eq_entry));
49073- if (rc)
49074- return rc;
49075+ /* EQ */
49076+ eq = &rxo->rx_eq.q;
49077+ rc = be_queue_alloc(adapter, eq, EVNT_Q_LEN,
49078+ sizeof(struct be_eq_entry));
49079+ if (rc)
49080+ goto err;
49081
49082- /* Ask BE to create Rx Event queue */
49083- rc = be_cmd_eq_create(adapter, eq, adapter->rx_eq.cur_eqd);
49084- if (rc)
49085- goto rx_eq_free;
49086+ rc = be_cmd_eq_create(adapter, eq, rxo->rx_eq.cur_eqd);
49087+ if (rc)
49088+ goto err;
49089
49090- /* Alloc RX eth compl queue */
49091- cq = &adapter->rx_obj.cq;
49092- rc = be_queue_alloc(adapter, cq, RX_CQ_LEN,
49093- sizeof(struct be_eth_rx_compl));
49094- if (rc)
49095- goto rx_eq_destroy;
49096+ rxo->rx_eq.eq_idx = adapter->eq_next_idx++;
49097
49098- /* Ask BE to create Rx eth compl queue */
49099- rc = be_cmd_cq_create(adapter, cq, eq, false, false, 3);
49100- if (rc)
49101- goto rx_cq_free;
49102+ /* CQ */
49103+ cq = &rxo->cq;
49104+ rc = be_queue_alloc(adapter, cq, RX_CQ_LEN,
49105+ sizeof(struct be_eth_rx_compl));
49106+ if (rc)
49107+ goto err;
49108
49109- /* Alloc RX eth queue */
49110- q = &adapter->rx_obj.q;
49111- rc = be_queue_alloc(adapter, q, RX_Q_LEN, sizeof(struct be_eth_rx_d));
49112- if (rc)
49113- goto rx_cq_destroy;
49114+ rc = be_cmd_cq_create(adapter, cq, eq, false, false, 3);
49115+ if (rc)
49116+ goto err;
49117
49118- /* Ask BE to create Rx eth queue */
49119- rc = be_cmd_rxq_create(adapter, q, cq->id, rx_frag_size,
49120- BE_MAX_JUMBO_FRAME_SIZE, adapter->if_handle, false);
49121- if (rc)
49122- goto rx_q_free;
49123+ /* Rx Q - will be created in be_open() */
49124+ q = &rxo->q;
49125+ rc = be_queue_alloc(adapter, q, RX_Q_LEN,
49126+ sizeof(struct be_eth_rx_d));
49127+ if (rc)
49128+ goto err;
49129+
49130+ rxo->page_info_tbl = kzalloc(sizeof(struct be_rx_page_info) *
49131+ RX_Q_LEN, GFP_KERNEL);
49132+ if (!rxo->page_info_tbl)
49133+ goto err;
49134+ }
49135
49136 return 0;
49137-rx_q_free:
49138- be_queue_free(adapter, q);
49139-rx_cq_destroy:
49140- be_cmd_q_destroy(adapter, cq, QTYPE_CQ);
49141-rx_cq_free:
49142- be_queue_free(adapter, cq);
49143-rx_eq_destroy:
49144- be_cmd_q_destroy(adapter, eq, QTYPE_EQ);
49145-rx_eq_free:
49146- be_queue_free(adapter, eq);
49147- return rc;
49148+err:
49149+ be_rx_queues_destroy(adapter);
49150+ return -1;
49151 }
49152
49153-/* There are 8 evt ids per func. Retruns the evt id's bit number */
49154-static inline int be_evt_bit_get(struct be_adapter *adapter, u32 eq_id)
49155+static bool event_peek(struct be_eq_obj *eq_obj)
49156 {
49157- return eq_id - 8 * be_pci_func(adapter);
49158+ struct be_eq_entry *eqe = queue_tail_node(&eq_obj->q);
49159+ if (!eqe->evt)
49160+ return false;
49161+ else
49162+ return true;
49163 }
49164
49165 static irqreturn_t be_intx(int irq, void *dev)
49166 {
49167 struct be_adapter *adapter = dev;
49168- int isr;
49169+ struct be_rx_obj *rxo;
49170+ int isr, i, tx = 0 , rx = 0;
49171
49172- isr = ioread32(adapter->csr + CEV_ISR0_OFFSET +
49173- be_pci_func(adapter) * CEV_ISR_SIZE);
49174- if (!isr)
49175- return IRQ_NONE;
49176+ if (lancer_chip(adapter)) {
49177+ if (event_peek(&adapter->tx_eq))
49178+ tx = event_handle(adapter, &adapter->tx_eq, false);
49179+ for_all_rx_queues(adapter, rxo, i) {
49180+ if (event_peek(&rxo->rx_eq))
49181+ rx |= event_handle(adapter, &rxo->rx_eq, true);
49182+ }
49183
49184- event_handle(adapter, &adapter->tx_eq);
49185- event_handle(adapter, &adapter->rx_eq);
49186+ if (!(tx || rx))
49187+ return IRQ_NONE;
49188+ } else {
49189+ isr = ioread32(adapter->csr + CEV_ISR0_OFFSET +
49190+ (adapter->tx_eq.q.id / 8) * CEV_ISR_SIZE);
49191+ if (!isr)
49192+ return IRQ_NONE;
49193+
49194+ if ((1 << adapter->tx_eq.eq_idx & isr))
49195+ event_handle(adapter, &adapter->tx_eq, false);
49196+
49197+ for_all_rx_queues(adapter, rxo, i) {
49198+ if ((1 << rxo->rx_eq.eq_idx & isr))
49199+ event_handle(adapter, &rxo->rx_eq, true);
49200+ }
49201+ }
49202
49203 return IRQ_HANDLED;
49204 }
49205
49206 static irqreturn_t be_msix_rx(int irq, void *dev)
49207 {
49208- struct be_adapter *adapter = dev;
49209+ struct be_rx_obj *rxo = dev;
49210+ struct be_adapter *adapter = rxo->adapter;
49211
49212- event_handle(adapter, &adapter->rx_eq);
49213+ event_handle(adapter, &rxo->rx_eq, true);
49214
49215 return IRQ_HANDLED;
49216 }
49217@@ -1353,48 +2046,72 @@ static irqreturn_t be_msix_tx_mcc(int irq, void *dev)
49218 {
49219 struct be_adapter *adapter = dev;
49220
49221- event_handle(adapter, &adapter->tx_eq);
49222+ event_handle(adapter, &adapter->tx_eq, false);
49223
49224 return IRQ_HANDLED;
49225 }
49226
49227 static inline bool do_gro(struct be_adapter *adapter,
49228- struct be_eth_rx_compl *rxcp)
49229+ struct be_rx_compl_info *rxcp)
49230 {
49231- int err = AMAP_GET_BITS(struct amap_eth_rx_compl, err, rxcp);
49232- int tcp_frame = AMAP_GET_BITS(struct amap_eth_rx_compl, tcpf, rxcp);
49233-
49234- if (err)
49235- drvr_stats(adapter)->be_rxcp_err++;
49236-
49237- return (tcp_frame && !err) ? true : false;
49238+ return (!rxcp->tcpf || rxcp->err || adapter->max_rx_coal <= 1 ||
49239+ (rxcp->vlanf && !vlan_configured(adapter))) ?
49240+ false : true;
49241 }
49242
49243 int be_poll_rx(struct napi_struct *napi, int budget)
49244 {
49245 struct be_eq_obj *rx_eq = container_of(napi, struct be_eq_obj, napi);
49246- struct be_adapter *adapter =
49247- container_of(rx_eq, struct be_adapter, rx_eq);
49248- struct be_queue_info *rx_cq = &adapter->rx_obj.cq;
49249- struct be_eth_rx_compl *rxcp;
49250+ struct be_rx_obj *rxo = container_of(rx_eq, struct be_rx_obj, rx_eq);
49251+ struct be_adapter *adapter = rxo->adapter;
49252+ struct be_queue_info *rx_cq = &rxo->cq;
49253+ struct be_rx_compl_info *rxcp;
49254 u32 work_done;
49255+ bool flush_lro = false;
49256
49257+ rxo->stats.rx_polls++;
49258 for (work_done = 0; work_done < budget; work_done++) {
49259- rxcp = be_rx_compl_get(adapter);
49260+ rxcp = be_rx_compl_get(rxo);
49261 if (!rxcp)
49262 break;
49263
49264- if (do_gro(adapter, rxcp))
49265- be_rx_compl_process_gro(adapter, rxcp);
49266- else
49267- be_rx_compl_process(adapter, rxcp);
49268+ /* Is it a flush compl that has no data */
49269+ if (unlikely(rxcp->num_rcvd == 0))
49270+ continue;
49271
49272- be_rx_compl_reset(rxcp);
49273+ if (unlikely(rxcp->port != adapter->port_num)) {
49274+ be_rx_compl_discard(adapter, rxo, rxcp);
49275+ be_rx_stats_update(rxo, rxcp);
49276+ continue;
49277+ }
49278+
49279+ if (likely((lancer_A0_chip(adapter) && !rxcp->err) ||
49280+ !lancer_A0_chip(adapter))) {
49281+ if (do_gro(adapter, rxcp)) {
49282+ if (adapter->gro_supported) {
49283+ be_rx_compl_process_gro(adapter, rxo,
49284+ rxcp);
49285+ } else {
49286+ be_rx_compl_process_lro(adapter, rxo,
49287+ rxcp);
49288+ flush_lro = true;
49289+ }
49290+ } else {
49291+ be_rx_compl_process(adapter, rxo, rxcp);
49292+ }
49293+ } else if (lancer_A0_chip(adapter) && rxcp->err) {
49294+ be_rx_compl_discard(adapter, rxo, rxcp);
49295+ }
49296+
49297+ be_rx_stats_update(rxo, rxcp);
49298 }
49299
49300+ if (flush_lro)
49301+ lro_flush_all(&rxo->lro_mgr);
49302+
49303 /* Refill the queue */
49304- if (atomic_read(&adapter->rx_obj.q.used) < RX_FRAGS_REFILL_WM)
49305- be_post_rx_frags(adapter);
49306+ if (work_done && atomic_read(&rxo->q.used) < RX_FRAGS_REFILL_WM)
49307+ be_post_rx_frags(rxo);
49308
49309 /* All consumed */
49310 if (work_done < budget) {
49311@@ -1404,40 +2121,13 @@ int be_poll_rx(struct napi_struct *napi, int budget)
49312 /* More to be consumed; continue with interrupts disabled */
49313 be_cq_notify(adapter, rx_cq->id, false, work_done);
49314 }
49315+
49316+#if LINUX_VERSION_CODE < KERNEL_VERSION(2, 6, 32)
49317+ adapter->netdev->last_rx = jiffies;
49318+#endif
49319 return work_done;
49320 }
49321
49322-void be_process_tx(struct be_adapter *adapter)
49323-{
49324- struct be_queue_info *txq = &adapter->tx_obj.q;
49325- struct be_queue_info *tx_cq = &adapter->tx_obj.cq;
49326- struct be_eth_tx_compl *txcp;
49327- u32 num_cmpl = 0;
49328- u16 end_idx;
49329-
49330- while ((txcp = be_tx_compl_get(tx_cq))) {
49331- end_idx = AMAP_GET_BITS(struct amap_eth_tx_compl,
49332- wrb_index, txcp);
49333- be_tx_compl_process(adapter, end_idx);
49334- num_cmpl++;
49335- }
49336-
49337- if (num_cmpl) {
49338- be_cq_notify(adapter, tx_cq->id, true, num_cmpl);
49339-
49340- /* As Tx wrbs have been freed up, wake up netdev queue if
49341- * it was stopped due to lack of tx wrbs.
49342- */
49343- if (netif_queue_stopped(adapter->netdev) &&
49344- atomic_read(&txq->used) < txq->len / 2) {
49345- netif_wake_queue(adapter->netdev);
49346- }
49347-
49348- drvr_stats(adapter)->be_tx_events++;
49349- drvr_stats(adapter)->be_tx_compl += num_cmpl;
49350- }
49351-}
49352-
49353 /* As TX and MCC share the same EQ check for both TX and MCC completions.
49354 * For TX/MCC we don't honour budget; consume everything
49355 */
49356@@ -1446,96 +2136,264 @@ static int be_poll_tx_mcc(struct napi_struct *napi, int budget)
49357 struct be_eq_obj *tx_eq = container_of(napi, struct be_eq_obj, napi);
49358 struct be_adapter *adapter =
49359 container_of(tx_eq, struct be_adapter, tx_eq);
49360+ struct be_tx_obj *txo;
49361+ struct be_eth_tx_compl *txcp;
49362+ int tx_compl, mcc_compl, status = 0;
49363+ u8 i;
49364+ u16 num_wrbs;
49365+
49366+ for_all_tx_queues(adapter, txo, i) {
49367+ tx_compl = 0;
49368+ num_wrbs = 0;
49369+ while ((txcp = be_tx_compl_get(&txo->cq))) {
49370+ num_wrbs += be_tx_compl_process(adapter, txo,
49371+ AMAP_GET_BITS(struct amap_eth_tx_compl,
49372+ wrb_index, txcp));
49373+ tx_compl++;
49374+ }
49375+ if (tx_compl) {
49376+ be_cq_notify(adapter, txo->cq.id, true, tx_compl);
49377+
49378+ atomic_sub(num_wrbs, &txo->q.used);
49379+
49380+ /* As Tx wrbs have been freed up, wake up netdev queue
49381+ * if it was stopped due to lack of tx wrbs. */
49382+ if (__netif_subqueue_stopped(adapter->netdev, i) &&
49383+ atomic_read(&txo->q.used) < txo->q.len / 2) {
49384+ netif_wake_subqueue(adapter->netdev, i);
49385+ }
49386+
49387+ adapter->drv_stats.be_tx_events++;
49388+ txo->stats.be_tx_compl += tx_compl;
49389+ }
49390+ }
49391+
49392+ mcc_compl = be_process_mcc(adapter, &status);
49393+
49394+ if (mcc_compl) {
49395+ struct be_mcc_obj *mcc_obj = &adapter->mcc_obj;
49396+ be_cq_notify(adapter, mcc_obj->cq.id, true, mcc_compl);
49397+ }
49398
49399 napi_complete(napi);
49400
49401- be_process_tx(adapter);
49402-
49403- be_process_mcc(adapter);
49404-
49405+ be_eq_notify(adapter, tx_eq->q.id, true, false, 0);
49406 return 1;
49407 }
49408
49409+void be_detect_dump_ue(struct be_adapter *adapter)
49410+{
49411+ u32 ue_status_lo, ue_status_hi, ue_status_lo_mask, ue_status_hi_mask;
49412+ u32 i;
49413+
49414+ pci_read_config_dword(adapter->pdev,
49415+ PCICFG_UE_STATUS_LOW, &ue_status_lo);
49416+ pci_read_config_dword(adapter->pdev,
49417+ PCICFG_UE_STATUS_HIGH, &ue_status_hi);
49418+ pci_read_config_dword(adapter->pdev,
49419+ PCICFG_UE_STATUS_LOW_MASK, &ue_status_lo_mask);
49420+ pci_read_config_dword(adapter->pdev,
49421+ PCICFG_UE_STATUS_HI_MASK, &ue_status_hi_mask);
49422+
49423+ ue_status_lo = (ue_status_lo & (~ue_status_lo_mask));
49424+ ue_status_hi = (ue_status_hi & (~ue_status_hi_mask));
49425+
49426+ if (ue_status_lo || ue_status_hi) {
49427+ adapter->ue_detected = true;
49428+ adapter->eeh_err = true;
49429+ dev_err(&adapter->pdev->dev, "UE Detected!!\n");
49430+ }
49431+
49432+ if (ue_status_lo) {
49433+ for (i = 0; ue_status_lo; ue_status_lo >>= 1, i++) {
49434+ if (ue_status_lo & 1)
49435+ dev_err(&adapter->pdev->dev,
49436+ "UE: %s bit set\n", ue_status_low_desc[i]);
49437+ }
49438+ }
49439+ if (ue_status_hi) {
49440+ for (i = 0; ue_status_hi; ue_status_hi >>= 1, i++) {
49441+ if (ue_status_hi & 1)
49442+ dev_err(&adapter->pdev->dev,
49443+ "UE: %s bit set\n", ue_status_hi_desc[i]);
49444+ }
49445+ }
49446+
49447+}
49448+
49449 static void be_worker(struct work_struct *work)
49450 {
49451 struct be_adapter *adapter =
49452 container_of(work, struct be_adapter, work.work);
49453+ struct be_rx_obj *rxo;
49454+ struct be_tx_obj *txo;
49455+ int i;
49456
49457- be_cmd_get_stats(adapter, &adapter->stats.cmd);
49458+ if (!adapter->ue_detected && !lancer_chip(adapter))
49459+ be_detect_dump_ue(adapter);
49460
49461- /* Set EQ delay */
49462- be_rx_eqd_update(adapter);
49463+ /* when interrupts are not yet enabled, just reap any pending
49464+ * mcc completions */
49465+ if (!netif_running(adapter->netdev)) {
49466+ int mcc_compl, status = 0;
49467
49468- be_tx_rate_update(adapter);
49469- be_rx_rate_update(adapter);
49470+ mcc_compl = be_process_mcc(adapter, &status);
49471
49472- if (adapter->rx_post_starved) {
49473- adapter->rx_post_starved = false;
49474- be_post_rx_frags(adapter);
49475+ if (mcc_compl) {
49476+ struct be_mcc_obj *mcc_obj = &adapter->mcc_obj;
49477+ be_cq_notify(adapter, mcc_obj->cq.id, false, mcc_compl);
49478+ }
49479+
49480+ goto reschedule;
49481+ }
49482+
49483+ if (!adapter->stats_cmd_sent)
49484+ be_cmd_get_stats(adapter, &adapter->stats_cmd);
49485+
49486+ for_all_tx_queues(adapter, txo, i)
49487+ be_tx_rate_update(txo);
49488+
49489+ for_all_rx_queues(adapter, rxo, i) {
49490+ be_rx_rate_update(rxo);
49491+ be_rx_eqd_update(adapter, rxo);
49492+
49493+ if (rxo->rx_post_starved) {
49494+ rxo->rx_post_starved = false;
49495+ be_post_rx_frags(rxo);
49496+ }
49497 }
49498
49499+reschedule:
49500+ adapter->work_counter++;
49501 schedule_delayed_work(&adapter->work, msecs_to_jiffies(1000));
49502 }
49503
49504+static void be_msix_disable(struct be_adapter *adapter)
49505+{
49506+ if (msix_enabled(adapter)) {
49507+ pci_disable_msix(adapter->pdev);
49508+ adapter->num_msix_vec = 0;
49509+ }
49510+}
49511+
49512 static void be_msix_enable(struct be_adapter *adapter)
49513 {
49514- int i, status;
49515+#define BE_MIN_MSIX_VECTORS (1 + 1) /* Rx + Tx */
49516+ int i, status, num_vec;
49517
49518- for (i = 0; i < BE_NUM_MSIX_VECTORS; i++)
49519+ num_vec = be_num_rxqs_want(adapter) + 1;
49520+
49521+ for (i = 0; i < num_vec; i++)
49522 adapter->msix_entries[i].entry = i;
49523
49524- status = pci_enable_msix(adapter->pdev, adapter->msix_entries,
49525- BE_NUM_MSIX_VECTORS);
49526- if (status == 0)
49527- adapter->msix_enabled = true;
49528+ status = pci_enable_msix(adapter->pdev, adapter->msix_entries, num_vec);
49529+ if (status == 0) {
49530+ goto done;
49531+ } else if (status >= BE_MIN_MSIX_VECTORS) {
49532+ num_vec = status;
49533+ if (pci_enable_msix(adapter->pdev, adapter->msix_entries,
49534+ num_vec) == 0)
49535+ goto done;
49536+ }
49537 return;
49538+done:
49539+ adapter->num_msix_vec = num_vec;
49540+ return;
49541+}
49542+
49543+static void be_sriov_enable(struct be_adapter *adapter)
49544+{
49545+ be_check_sriov_fn_type(adapter);
49546+#ifdef CONFIG_PCI_IOV
49547+ if (be_physfn(adapter) && num_vfs) {
49548+ int status, pos;
49549+ u16 nvfs;
49550+
49551+ pos = pci_find_ext_capability(adapter->pdev,
49552+ PCI_EXT_CAP_ID_SRIOV);
49553+ pci_read_config_word(adapter->pdev,
49554+ pos + PCI_SRIOV_TOTAL_VF, &nvfs);
49555+ adapter->num_vfs = num_vfs;
49556+ if (num_vfs > nvfs) {
49557+ dev_info(&adapter->pdev->dev,
49558+ "Device supports %d VFs and not %d\n",
49559+ nvfs, num_vfs);
49560+ adapter->num_vfs = nvfs;
49561+ }
49562+
49563+ status = pci_enable_sriov(adapter->pdev, adapter->num_vfs);
49564+ if (status)
49565+ adapter->num_vfs = 0;
49566+ }
49567+#endif
49568+}
49569+
49570+static void be_sriov_disable(struct be_adapter *adapter)
49571+{
49572+#ifdef CONFIG_PCI_IOV
49573+ if (adapter->num_vfs > 0) {
49574+ pci_disable_sriov(adapter->pdev);
49575+ adapter->num_vfs = 0;
49576+ }
49577+#endif
49578 }
49579
49580-static inline int be_msix_vec_get(struct be_adapter *adapter, u32 eq_id)
49581+static inline int be_msix_vec_get(struct be_adapter *adapter,
49582+ struct be_eq_obj *eq_obj)
49583 {
49584- return adapter->msix_entries[
49585- be_evt_bit_get(adapter, eq_id)].vector;
49586+ return adapter->msix_entries[eq_obj->eq_idx].vector;
49587 }
49588
49589 static int be_request_irq(struct be_adapter *adapter,
49590 struct be_eq_obj *eq_obj,
49591- void *handler, char *desc)
49592+ void *handler, char *desc, void *context)
49593 {
49594 struct net_device *netdev = adapter->netdev;
49595 int vec;
49596
49597 sprintf(eq_obj->desc, "%s-%s", netdev->name, desc);
49598- vec = be_msix_vec_get(adapter, eq_obj->q.id);
49599- return request_irq(vec, handler, 0, eq_obj->desc, adapter);
49600+ vec = be_msix_vec_get(adapter, eq_obj);
49601+ return request_irq(vec, handler, 0, eq_obj->desc, context);
49602 }
49603
49604-static void be_free_irq(struct be_adapter *adapter, struct be_eq_obj *eq_obj)
49605+static void be_free_irq(struct be_adapter *adapter, struct be_eq_obj *eq_obj,
49606+ void *context)
49607 {
49608- int vec = be_msix_vec_get(adapter, eq_obj->q.id);
49609- free_irq(vec, adapter);
49610+ int vec = be_msix_vec_get(adapter, eq_obj);
49611+ free_irq(vec, context);
49612 }
49613
49614 static int be_msix_register(struct be_adapter *adapter)
49615 {
49616- int status;
49617+ struct be_rx_obj *rxo;
49618+ int status, i;
49619+ char qname[10];
49620
49621- status = be_request_irq(adapter, &adapter->tx_eq, be_msix_tx_mcc, "tx");
49622+ status = be_request_irq(adapter, &adapter->tx_eq, be_msix_tx_mcc, "tx",
49623+ adapter);
49624 if (status)
49625 goto err;
49626
49627- status = be_request_irq(adapter, &adapter->rx_eq, be_msix_rx, "rx");
49628- if (status)
49629- goto free_tx_irq;
49630+ for_all_rx_queues(adapter, rxo, i) {
49631+ sprintf(qname, "rxq%d", i);
49632+ status = be_request_irq(adapter, &rxo->rx_eq, be_msix_rx,
49633+ qname, rxo);
49634+ if (status)
49635+ goto err_msix;
49636+ }
49637
49638 return 0;
49639
49640-free_tx_irq:
49641- be_free_irq(adapter, &adapter->tx_eq);
49642+err_msix:
49643+ be_free_irq(adapter, &adapter->tx_eq, adapter);
49644+
49645+ for (i--, rxo = &adapter->rx_obj[i]; i >= 0; i--, rxo--)
49646+ be_free_irq(adapter, &rxo->rx_eq, rxo);
49647+
49648 err:
49649 dev_warn(&adapter->pdev->dev,
49650 "MSIX Request IRQ failed - err %d\n", status);
49651- pci_disable_msix(adapter->pdev);
49652- adapter->msix_enabled = false;
49653+ be_msix_disable(adapter);
49654 return status;
49655 }
49656
49657@@ -1544,10 +2402,13 @@ static int be_irq_register(struct be_adapter *adapter)
49658 struct net_device *netdev = adapter->netdev;
49659 int status;
49660
49661- if (adapter->msix_enabled) {
49662+ if (msix_enabled(adapter)) {
49663 status = be_msix_register(adapter);
49664 if (status == 0)
49665 goto done;
49666+ /* INTx is not supported for VF */
49667+ if (!be_physfn(adapter))
49668+ return status;
49669 }
49670
49671 /* INTx */
49672@@ -1567,87 +2428,363 @@ done:
49673 static void be_irq_unregister(struct be_adapter *adapter)
49674 {
49675 struct net_device *netdev = adapter->netdev;
49676+ struct be_rx_obj *rxo;
49677+ int i;
49678
49679 if (!adapter->isr_registered)
49680 return;
49681
49682 /* INTx */
49683- if (!adapter->msix_enabled) {
49684+ if (!msix_enabled(adapter)) {
49685 free_irq(netdev->irq, adapter);
49686 goto done;
49687 }
49688
49689 /* MSIx */
49690- be_free_irq(adapter, &adapter->tx_eq);
49691- be_free_irq(adapter, &adapter->rx_eq);
49692+ be_free_irq(adapter, &adapter->tx_eq, adapter);
49693+
49694+ for_all_rx_queues(adapter, rxo, i)
49695+ be_free_irq(adapter, &rxo->rx_eq, rxo);
49696+
49697 done:
49698 adapter->isr_registered = false;
49699- return;
49700 }
49701
49702-static int be_open(struct net_device *netdev)
49703+static u16 be_select_queue(struct net_device *netdev,
49704+ struct sk_buff *skb)
49705 {
49706 struct be_adapter *adapter = netdev_priv(netdev);
49707- struct be_eq_obj *rx_eq = &adapter->rx_eq;
49708+ u8 prio;
49709+
49710+ if (adapter->num_tx_qs == 1)
49711+ return 0;
49712+
49713+ prio = (vlan_tx_tag_get(skb) & VLAN_PRIO_MASK) >> VLAN_PRIO_SHIFT;
49714+ return adapter->tc_txq_map[adapter->prio_tc_map[prio]];
49715+}
49716+
49717+static void be_rx_queues_clear(struct be_adapter *adapter)
49718+{
49719+ struct be_queue_info *q;
49720+ struct be_rx_obj *rxo;
49721+ int i;
49722+
49723+ for_all_rx_queues(adapter, rxo, i) {
49724+ q = &rxo->q;
49725+ if (q->created) {
49726+ be_cmd_rxq_destroy(adapter, q);
49727+ /* After the rxq is invalidated, wait for a grace time
49728+ * of 1ms for all dma to end and the flush compl to
49729+ * arrive
49730+ */
49731+ mdelay(1);
49732+ be_rx_q_clean(adapter, rxo);
49733+ }
49734+
49735+ /* Clear any residual events */
49736+ q = &rxo->rx_eq.q;
49737+ if (q->created)
49738+ be_eq_clean(adapter, &rxo->rx_eq);
49739+ }
49740+}
49741+
49742+static int be_close(struct net_device *netdev)
49743+{
49744+ struct be_adapter *adapter = netdev_priv(netdev);
49745+ struct be_rx_obj *rxo;
49746+ struct be_tx_obj *txo;
49747 struct be_eq_obj *tx_eq = &adapter->tx_eq;
49748- bool link_up;
49749- int status;
49750+ int vec, i;
49751+
49752+ be_async_mcc_disable(adapter);
49753+
49754+ netif_stop_queue(netdev);
49755+ netif_carrier_off(netdev);
49756+ adapter->link_status = LINK_DOWN;
49757+
49758+ if (!lancer_chip(adapter))
49759+ be_intr_set(adapter, false);
49760+
49761+ for_all_rx_queues(adapter, rxo, i)
49762+ napi_disable(&rxo->rx_eq.napi);
49763+
49764+ napi_disable(&tx_eq->napi);
49765+
49766+ if (lancer_chip(adapter)) {
49767+ be_cq_notify(adapter, adapter->mcc_obj.cq.id, false, 0);
49768+ for_all_rx_queues(adapter, rxo, i)
49769+ be_cq_notify(adapter, rxo->cq.id, false, 0);
49770+ for_all_tx_queues(adapter, txo, i)
49771+ be_cq_notify(adapter, txo->cq.id, false, 0);
49772+ }
49773+
49774+ if (msix_enabled(adapter)) {
49775+ vec = be_msix_vec_get(adapter, tx_eq);
49776+ synchronize_irq(vec);
49777+
49778+ for_all_rx_queues(adapter, rxo, i) {
49779+ vec = be_msix_vec_get(adapter, &rxo->rx_eq);
49780+ synchronize_irq(vec);
49781+ }
49782+ } else {
49783+ synchronize_irq(netdev->irq);
49784+ }
49785+ be_irq_unregister(adapter);
49786+
49787+ /* Wait for all pending tx completions to arrive so that
49788+ * all tx skbs are freed.
49789+ */
49790+ for_all_tx_queues(adapter, txo, i)
49791+ be_tx_compl_clean(adapter, txo);
49792+
49793+ be_rx_queues_clear(adapter);
49794+ return 0;
49795+}
49796+
49797+static int be_rx_queues_setup(struct be_adapter *adapter)
49798+{
49799+ struct be_rx_obj *rxo;
49800+ int rc, i;
49801+ u8 rsstable[MAX_RSS_QS];
49802+
49803+ for_all_rx_queues(adapter, rxo, i) {
49804+ rc = be_cmd_rxq_create(adapter, &rxo->q, rxo->cq.id,
49805+ rx_frag_size, BE_MAX_JUMBO_FRAME_SIZE,
49806+ adapter->if_handle,
49807+ (i > 0) ? 1 : 0/* rss enable */, &rxo->rss_id);
49808+ if (rc)
49809+ return rc;
49810+ }
49811+
49812+ if (be_multi_rxq(adapter)) {
49813+ for_all_rss_queues(adapter, rxo, i)
49814+ rsstable[i] = rxo->rss_id;
49815+
49816+ rc = be_cmd_rss_config(adapter, rsstable,
49817+ adapter->num_rx_qs - 1);
49818+ if (rc)
49819+ return rc;
49820+ }
49821
49822 /* First time posting */
49823- be_post_rx_frags(adapter);
49824+ for_all_rx_queues(adapter, rxo, i) {
49825+ be_post_rx_frags(rxo);
49826+ napi_enable(&rxo->rx_eq.napi);
49827+ }
49828+ return 0;
49829+}
49830+
49831+static int be_open(struct net_device *netdev)
49832+{
49833+ struct be_adapter *adapter = netdev_priv(netdev);
49834+ struct be_eq_obj *tx_eq = &adapter->tx_eq;
49835+ struct be_rx_obj *rxo;
49836+ int link_status;
49837+ int status, i;
49838+ u8 mac_speed;
49839+ u16 link_speed;
49840+
49841+ status = be_rx_queues_setup(adapter);
49842+ if (status)
49843+ goto err;
49844
49845- napi_enable(&rx_eq->napi);
49846 napi_enable(&tx_eq->napi);
49847
49848 be_irq_register(adapter);
49849
49850- be_intr_set(adapter, true);
49851+ if (!lancer_chip(adapter))
49852+ be_intr_set(adapter, true);
49853
49854 /* The evt queues are created in unarmed state; arm them */
49855- be_eq_notify(adapter, rx_eq->q.id, true, false, 0);
49856+ for_all_rx_queues(adapter, rxo, i) {
49857+ be_eq_notify(adapter, rxo->rx_eq.q.id, true, false, 0);
49858+ be_cq_notify(adapter, rxo->cq.id, true, 0);
49859+ }
49860 be_eq_notify(adapter, tx_eq->q.id, true, false, 0);
49861
49862- /* Rx compl queue may be in unarmed state; rearm it */
49863- be_cq_notify(adapter, adapter->rx_obj.cq.id, true, 0);
49864+ /* Now that interrupts are on we can process async mcc */
49865+ be_async_mcc_enable(adapter);
49866
49867- status = be_cmd_link_status_query(adapter, &link_up);
49868+ status = be_cmd_link_status_query(adapter, &link_status, &mac_speed,
49869+ &link_speed, 0);
49870 if (status)
49871- goto ret_sts;
49872- be_link_status_update(adapter, link_up);
49873+ goto err;
49874+ be_link_status_update(adapter, link_status);
49875
49876- status = be_vid_config(adapter);
49877+ status = be_vid_config(adapter, false, 0);
49878 if (status)
49879- goto ret_sts;
49880+ goto err;
49881
49882- status = be_cmd_set_flow_control(adapter,
49883- adapter->tx_fc, adapter->rx_fc);
49884- if (status)
49885- goto ret_sts;
49886+ if (be_physfn(adapter)) {
49887+ status = be_cmd_set_flow_control(adapter,
49888+ adapter->tx_fc, adapter->rx_fc);
49889+ if (status)
49890+ goto err;
49891+ }
49892+
49893+ return 0;
49894+err:
49895+ be_close(adapter->netdev);
49896+ return -EIO;
49897+}
49898+
49899+static int be_setup_wol(struct be_adapter *adapter, bool enable)
49900+{
49901+ struct be_dma_mem cmd;
49902+ int status = 0;
49903+ u8 mac[ETH_ALEN];
49904+
49905+ memset(mac, 0, ETH_ALEN);
49906+
49907+ cmd.size = sizeof(struct be_cmd_req_acpi_wol_magic_config);
49908+ cmd.va = pci_alloc_consistent(adapter->pdev, cmd.size, &cmd.dma);
49909+ if (cmd.va == NULL)
49910+ return -1;
49911+ memset(cmd.va, 0, cmd.size);
49912+
49913+ if (enable) {
49914+ status = pci_write_config_dword(adapter->pdev,
49915+ PCICFG_PM_CONTROL_OFFSET, PCICFG_PM_CONTROL_MASK);
49916+ if (status) {
49917+ dev_err(&adapter->pdev->dev,
49918+ "Could not enable Wake-on-lan\n");
49919+ pci_free_consistent(adapter->pdev, cmd.size, cmd.va,
49920+ cmd.dma);
49921+ return status;
49922+ }
49923+ status = be_cmd_enable_magic_wol(adapter,
49924+ adapter->netdev->dev_addr, &cmd);
49925+ pci_enable_wake(adapter->pdev, PCI_D3hot, 1);
49926+ pci_enable_wake(adapter->pdev, PCI_D3cold, 1);
49927+ } else {
49928+ status = be_cmd_enable_magic_wol(adapter, mac, &cmd);
49929+ pci_enable_wake(adapter->pdev, PCI_D3hot, 0);
49930+ pci_enable_wake(adapter->pdev, PCI_D3cold, 0);
49931+ }
49932+
49933+ pci_free_consistent(adapter->pdev, cmd.size, cmd.va, cmd.dma);
49934+ return status;
49935+}
49936+
49937+/*
49938+ * Generate a seed MAC address from the PF MAC Address using jhash.
49939+ * MAC Address for VFs are assigned incrementally starting from the seed.
49940+ * These addresses are programmed in the ASIC by the PF and the VF driver
49941+ * queries for the MAC address during its probe.
49942+ */
49943+static inline int be_vf_eth_addr_config(struct be_adapter *adapter)
49944+{
49945+ u32 vf = 0;
49946+ int status = 0;
49947+ u8 mac[ETH_ALEN];
49948+
49949+ be_vf_eth_addr_generate(adapter, mac);
49950+
49951+ for (vf = 0; vf < adapter->num_vfs; vf++) {
49952+ status = be_cmd_pmac_add(adapter, mac,
49953+ adapter->vf_cfg[vf].vf_if_handle,
49954+ &adapter->vf_cfg[vf].vf_pmac_id,
49955+ vf + 1);
49956+ if (status)
49957+ dev_err(&adapter->pdev->dev,
49958+ "Mac address add failed for VF %d\n", vf);
49959+ else
49960+ memcpy(adapter->vf_cfg[vf].vf_mac_addr, mac, ETH_ALEN);
49961
49962- schedule_delayed_work(&adapter->work, msecs_to_jiffies(100));
49963-ret_sts:
49964+ mac[5] += 1;
49965+ }
49966 return status;
49967 }
49968
49969+static inline void be_vf_eth_addr_rem(struct be_adapter *adapter)
49970+{
49971+ u32 vf;
49972+
49973+ for (vf = 0; vf < adapter->num_vfs; vf++) {
49974+ if (adapter->vf_cfg[vf].vf_pmac_id != BE_INVALID_PMAC_ID)
49975+ be_cmd_pmac_del(adapter,
49976+ adapter->vf_cfg[vf].vf_if_handle,
49977+ adapter->vf_cfg[vf].vf_pmac_id, vf + 1);
49978+ }
49979+}
49980+
49981+static int be_num_txqs_want(struct be_adapter *adapter)
49982+{
49983+ if (adapter->num_vfs > 0 || be_is_mc(adapter) ||
49984+ lancer_chip(adapter) || !be_physfn(adapter) ||
49985+ adapter->generation == BE_GEN2)
49986+ return 1;
49987+ else
49988+ return MAX_TX_QS;
49989+}
49990+
49991 static int be_setup(struct be_adapter *adapter)
49992 {
49993 struct net_device *netdev = adapter->netdev;
49994- u32 cap_flags, en_flags;
49995- int status;
49996-
49997- cap_flags = BE_IF_FLAGS_UNTAGGED | BE_IF_FLAGS_BROADCAST |
49998- BE_IF_FLAGS_MCAST_PROMISCUOUS |
49999- BE_IF_FLAGS_PROMISCUOUS |
50000- BE_IF_FLAGS_PASS_L3L4_ERRORS;
50001- en_flags = BE_IF_FLAGS_UNTAGGED | BE_IF_FLAGS_BROADCAST |
50002- BE_IF_FLAGS_PASS_L3L4_ERRORS;
50003+ int status, fw_num_txqs, num_txqs;
50004+ u32 cap_flags, en_flags, vf = 0;
50005+ u8 mac[ETH_ALEN];
50006+
50007+ num_txqs = be_num_txqs_want(adapter);
50008+ if (num_txqs > 1) {
50009+ be_cmd_req_pg_pfc(adapter, &fw_num_txqs);
50010+ num_txqs = min(num_txqs, fw_num_txqs);
50011+ }
50012+ adapter->num_tx_qs = num_txqs;
50013+ if (adapter->num_tx_qs != MAX_TX_QS)
50014+ netif_set_real_num_tx_queues(adapter->netdev,
50015+ adapter->num_tx_qs);
50016+
50017+ be_cmd_req_native_mode(adapter);
50018+
50019+ cap_flags = en_flags = BE_IF_FLAGS_UNTAGGED |
50020+ BE_IF_FLAGS_BROADCAST |
50021+ BE_IF_FLAGS_MULTICAST;
50022+
50023+ if (be_physfn(adapter)) {
50024+ if (adapter->function_caps & BE_FUNCTION_CAPS_RSS) {
50025+ cap_flags |= BE_IF_FLAGS_RSS;
50026+ en_flags |= BE_IF_FLAGS_RSS;
50027+ }
50028+ cap_flags |= BE_IF_FLAGS_MCAST_PROMISCUOUS |
50029+ BE_IF_FLAGS_PROMISCUOUS;
50030+ if (!lancer_A0_chip(adapter)) {
50031+ cap_flags |= BE_IF_FLAGS_PASS_L3L4_ERRORS;
50032+ en_flags |= BE_IF_FLAGS_PASS_L3L4_ERRORS;
50033+ }
50034+ }
50035
50036 status = be_cmd_if_create(adapter, cap_flags, en_flags,
50037 netdev->dev_addr, false/* pmac_invalid */,
50038- &adapter->if_handle, &adapter->pmac_id);
50039+ &adapter->if_handle, &adapter->pmac_id, 0);
50040 if (status != 0)
50041 goto do_none;
50042
50043+ if (be_physfn(adapter)) {
50044+ while (vf < adapter->num_vfs) {
50045+ cap_flags = en_flags = BE_IF_FLAGS_UNTAGGED |
50046+ BE_IF_FLAGS_BROADCAST;
50047+ status = be_cmd_if_create(adapter, cap_flags,
50048+ en_flags, mac, true,
50049+ &adapter->vf_cfg[vf].vf_if_handle,
50050+ NULL, vf+1);
50051+ if (status) {
50052+ dev_err(&adapter->pdev->dev,
50053+ "Interface Create failed for VF %d\n", vf);
50054+ goto if_destroy;
50055+ }
50056+ adapter->vf_cfg[vf].vf_pmac_id = BE_INVALID_PMAC_ID;
50057+ vf++;
50058+ }
50059+ } else {
50060+ status = be_cmd_mac_addr_query(adapter, mac,
50061+ MAC_ADDRESS_TYPE_NETWORK, false, adapter->if_handle);
50062+ if (!status) {
50063+ memcpy(adapter->netdev->dev_addr, mac, ETH_ALEN);
50064+ memcpy(adapter->netdev->perm_addr, mac, ETH_ALEN);
50065+ }
50066+ }
50067+
50068 status = be_tx_queues_create(adapter);
50069 if (status != 0)
50070 goto if_destroy;
50071@@ -1656,10 +2793,15 @@ static int be_setup(struct be_adapter *adapter)
50072 if (status != 0)
50073 goto tx_qs_destroy;
50074
50075+ /* Allow all priorities by default. A GRP5 evt may modify this */
50076+ adapter->vlan_prio_bmap = 0xff;
50077+
50078 status = be_mcc_queues_create(adapter);
50079 if (status != 0)
50080 goto rx_qs_destroy;
50081
50082+ adapter->link_speed = -1;
50083+
50084 return 0;
50085
50086 rx_qs_destroy:
50087@@ -1667,158 +2809,392 @@ rx_qs_destroy:
50088 tx_qs_destroy:
50089 be_tx_queues_destroy(adapter);
50090 if_destroy:
50091- be_cmd_if_destroy(adapter, adapter->if_handle);
50092+ if (be_physfn(adapter)) {
50093+ for (vf = 0; vf < adapter->num_vfs; vf++)
50094+ if (adapter->vf_cfg[vf].vf_if_handle)
50095+ be_cmd_if_destroy(adapter,
50096+ adapter->vf_cfg[vf].vf_if_handle,
50097+ vf + 1);
50098+ }
50099+ be_cmd_if_destroy(adapter, adapter->if_handle, 0);
50100 do_none:
50101 return status;
50102 }
50103
50104 static int be_clear(struct be_adapter *adapter)
50105 {
50106+ int vf;
50107+
50108+ if (be_physfn(adapter) && adapter->num_vfs)
50109+ be_vf_eth_addr_rem(adapter);
50110+
50111 be_mcc_queues_destroy(adapter);
50112 be_rx_queues_destroy(adapter);
50113 be_tx_queues_destroy(adapter);
50114+ adapter->eq_next_idx = 0;
50115
50116- be_cmd_if_destroy(adapter, adapter->if_handle);
50117+ if (be_physfn(adapter)) {
50118+ for (vf = 0; vf < adapter->num_vfs; vf++)
50119+ if (adapter->vf_cfg[vf].vf_if_handle)
50120+ be_cmd_if_destroy(adapter,
50121+ adapter->vf_cfg[vf].vf_if_handle, vf + 1);
50122+ }
50123+ be_cmd_if_destroy(adapter, adapter->if_handle, 0);
50124
50125+ /* tell fw we're done with firing cmds */
50126+ be_cmd_fw_clean(adapter);
50127 return 0;
50128 }
50129
50130-static int be_close(struct net_device *netdev)
50131+static void be_cpy_drv_ver(struct be_adapter *adapter, void *va)
50132+{
50133+ struct mgmt_controller_attrib *attrib =
50134+ (struct mgmt_controller_attrib *) ((u8*) va +
50135+ sizeof(struct be_cmd_resp_hdr));
50136+
50137+ memcpy(attrib->hba_attribs.driver_version_string,
50138+ DRV_VER, sizeof(DRV_VER));
50139+ attrib->pci_bus_number = adapter->pdev->bus->number;
50140+ attrib->pci_device_number = PCI_SLOT(adapter->pdev->devfn);
50141+ return;
50142+}
50143+
50144+#define IOCTL_COOKIE "SERVERENGINES CORP"
50145+static int be_do_ioctl(struct net_device *netdev,
50146+ struct ifreq *ifr, int cmd)
50147 {
50148 struct be_adapter *adapter = netdev_priv(netdev);
50149- struct be_eq_obj *rx_eq = &adapter->rx_eq;
50150- struct be_eq_obj *tx_eq = &adapter->tx_eq;
50151- int vec;
50152+ struct be_cmd_req_hdr req;
50153+ struct be_cmd_resp_hdr *resp;
50154+ void *data = ifr->ifr_data;
50155+ void *ioctl_ptr;
50156+ void *va;
50157+ dma_addr_t dma;
50158+ u32 req_size;
50159+ int status, ret = 0;
50160+ u8 cookie[32];
50161+
50162+ switch (cmd) {
50163+ case SIOCDEVPRIVATE:
50164+ if (copy_from_user(cookie, data, strlen(IOCTL_COOKIE)))
50165+ return -EFAULT;
50166+
50167+ if (memcmp(cookie, IOCTL_COOKIE, strlen(IOCTL_COOKIE)))
50168+ return -EINVAL;
50169
50170- cancel_delayed_work_sync(&adapter->work);
50171+ ioctl_ptr = (u8 *)data + strlen(IOCTL_COOKIE);
50172+ if (copy_from_user(&req, ioctl_ptr,
50173+ sizeof(struct be_cmd_req_hdr)))
50174+ return -EFAULT;
50175
50176- netif_stop_queue(netdev);
50177- netif_carrier_off(netdev);
50178- adapter->link_up = false;
50179+ req_size = le32_to_cpu(req.request_length);
50180+ if (req_size > 65536)
50181+ return -EINVAL;
50182
50183- be_intr_set(adapter, false);
50184+ req_size += sizeof(struct be_cmd_req_hdr);
50185+ va = pci_alloc_consistent(adapter->pdev, req_size, &dma);
50186+ if (!va)
50187+ return -ENOMEM;
50188+ if (copy_from_user(va, ioctl_ptr, req_size)) {
50189+ ret = -EFAULT;
50190+ break;
50191+ }
50192
50193- if (adapter->msix_enabled) {
50194- vec = be_msix_vec_get(adapter, tx_eq->q.id);
50195- synchronize_irq(vec);
50196- vec = be_msix_vec_get(adapter, rx_eq->q.id);
50197- synchronize_irq(vec);
50198- } else {
50199- synchronize_irq(netdev->irq);
50200+ status = be_cmd_pass_ext_ioctl(adapter, dma, req_size, va);
50201+ if (status == -1) {
50202+ ret = -EIO;
50203+ break;
50204+ }
50205+
50206+ resp = (struct be_cmd_resp_hdr *) va;
50207+ if (!status) {
50208+ if (req.opcode == OPCODE_COMMON_GET_CNTL_ATTRIBUTES)
50209+ be_cpy_drv_ver(adapter, va);
50210+ }
50211+
50212+ if (copy_to_user(ioctl_ptr, va, req_size)) {
50213+ ret = -EFAULT;
50214+ break;
50215+ }
50216+ break;
50217+ default:
50218+ return -EOPNOTSUPP;
50219 }
50220- be_irq_unregister(adapter);
50221
50222- napi_disable(&rx_eq->napi);
50223- napi_disable(&tx_eq->napi);
50224+ if (va)
50225+ pci_free_consistent(adapter->pdev, req_size, va, dma);
50226+
50227+ return ret;
50228+}
50229+
50230+#ifdef CONFIG_NET_POLL_CONTROLLER
50231+static void be_netpoll(struct net_device *netdev)
50232+{
50233+ struct be_adapter *adapter = netdev_priv(netdev);
50234+ struct be_rx_obj *rxo;
50235+ int i;
50236
50237- /* Wait for all pending tx completions to arrive so that
50238- * all tx skbs are freed.
50239- */
50240- be_tx_compl_clean(adapter);
50241+ event_handle(adapter, &adapter->tx_eq, false);
50242+ for_all_rx_queues(adapter, rxo, i)
50243+ event_handle(adapter, &rxo->rx_eq, true);
50244+
50245+ return;
50246+}
50247+#endif
50248+
50249+static int be_get_frag_header(struct skb_frag_struct *frag, void **mac_hdr,
50250+ void **ip_hdr, void **tcpudp_hdr,
50251+ u64 *hdr_flags, void *priv)
50252+{
50253+ struct ethhdr *eh;
50254+ struct vlan_ethhdr *veh;
50255+ struct iphdr *iph;
50256+ u8 *va = page_address(frag->page) + frag->page_offset;
50257+ unsigned long ll_hlen;
50258+
50259+ prefetch(va);
50260+ eh = (struct ethhdr *)va;
50261+ *mac_hdr = eh;
50262+ ll_hlen = ETH_HLEN;
50263+ if (eh->h_proto != htons(ETH_P_IP)) {
50264+ if (eh->h_proto == htons(ETH_P_8021Q)) {
50265+ veh = (struct vlan_ethhdr *)va;
50266+ if (veh->h_vlan_encapsulated_proto != htons(ETH_P_IP))
50267+ return -1;
50268+
50269+ ll_hlen += VLAN_HLEN;
50270+ } else {
50271+ return -1;
50272+ }
50273+ }
50274+ *hdr_flags = LRO_IPV4;
50275+ iph = (struct iphdr *)(va + ll_hlen);
50276+ *ip_hdr = iph;
50277+ if (iph->protocol != IPPROTO_TCP)
50278+ return -1;
50279+ *hdr_flags |= LRO_TCP;
50280+ *tcpudp_hdr = (u8 *) (*ip_hdr) + (iph->ihl << 2);
50281
50282 return 0;
50283 }
50284
50285-#define FW_FILE_HDR_SIGN "ServerEngines Corp. "
50286+static void be_lro_init(struct be_adapter *adapter, struct net_device *netdev)
50287+{
50288+ struct net_lro_mgr *lro_mgr;
50289+ struct be_rx_obj *rxo;
50290+ int i;
50291+
50292+ for_all_rx_queues(adapter, rxo, i) {
50293+ lro_mgr = &rxo->lro_mgr;
50294+ lro_mgr->dev = netdev;
50295+ lro_mgr->features = LRO_F_NAPI;
50296+ lro_mgr->ip_summed = CHECKSUM_UNNECESSARY;
50297+ lro_mgr->ip_summed_aggr = CHECKSUM_UNNECESSARY;
50298+ lro_mgr->max_desc = BE_MAX_LRO_DESCRIPTORS;
50299+ lro_mgr->lro_arr = rxo->lro_desc;
50300+ lro_mgr->get_frag_header = be_get_frag_header;
50301+ lro_mgr->max_aggr = BE_MAX_FRAGS_PER_FRAME;
50302+ }
50303+
50304+#ifdef NETIF_F_GRO
50305+ netdev->features |= NETIF_F_GRO;
50306+ adapter->gro_supported = true;
50307+#endif
50308+}
50309+
50310+#define FW_FILE_HDR_SIGN "ServerEngines Corp. "
50311 char flash_cookie[2][16] = {"*** SE FLAS",
50312 "H DIRECTORY *** "};
50313-static int be_flash_image(struct be_adapter *adapter,
50314+
50315+static bool be_flash_redboot(struct be_adapter *adapter,
50316+ const u8 *p, u32 img_start, int image_size,
50317+ int hdr_size)
50318+{
50319+ u32 crc_offset;
50320+ u8 flashed_crc[4];
50321+ int status;
50322+
50323+ crc_offset = hdr_size + img_start + image_size - 4;
50324+
50325+ p += crc_offset;
50326+
50327+ status = be_cmd_get_flash_crc(adapter, flashed_crc,
50328+ (image_size - 4));
50329+ if (status) {
50330+ dev_err(&adapter->pdev->dev,
50331+ "could not get crc from flash, not flashing redboot\n");
50332+ return false;
50333+ }
50334+
50335+ /*update redboot only if crc does not match*/
50336+ if (!memcmp(flashed_crc, p, 4))
50337+ return false;
50338+ else
50339+ return true;
50340+}
50341+
50342+static bool phy_flashing_required(struct be_adapter *adapter)
50343+{
50344+ int status = 0;
50345+ struct be_phy_info phy_info;
50346+
50347+ status = be_cmd_get_phy_info(adapter, &phy_info);
50348+ if (status)
50349+ return false;
50350+ if ((phy_info.phy_type == TN_8022) &&
50351+ (phy_info.interface_type == PHY_TYPE_BASET_10GB)) {
50352+ return true;
50353+ }
50354+ return false;
50355+}
50356+
50357+static int be_flash_data(struct be_adapter *adapter,
50358 const struct firmware *fw,
50359- struct be_dma_mem *flash_cmd, u32 flash_type)
50360+ struct be_dma_mem *flash_cmd, int num_of_images)
50361+
50362 {
50363- int status;
50364- u32 flash_op, image_offset = 0, total_bytes, image_size = 0;
50365+ int status = 0, i, filehdr_size = 0;
50366+ u32 total_bytes = 0, flash_op;
50367 int num_bytes;
50368 const u8 *p = fw->data;
50369 struct be_cmd_write_flashrom *req = flash_cmd->va;
50370+ struct flash_comp *pflashcomp;
50371+ int num_comp;
50372
50373- switch (flash_type) {
50374- case FLASHROM_TYPE_ISCSI_ACTIVE:
50375- image_offset = FLASH_iSCSI_PRIMARY_IMAGE_START;
50376- image_size = FLASH_IMAGE_MAX_SIZE;
50377- break;
50378- case FLASHROM_TYPE_ISCSI_BACKUP:
50379- image_offset = FLASH_iSCSI_BACKUP_IMAGE_START;
50380- image_size = FLASH_IMAGE_MAX_SIZE;
50381- break;
50382- case FLASHROM_TYPE_FCOE_FW_ACTIVE:
50383- image_offset = FLASH_FCoE_PRIMARY_IMAGE_START;
50384- image_size = FLASH_IMAGE_MAX_SIZE;
50385- break;
50386- case FLASHROM_TYPE_FCOE_FW_BACKUP:
50387- image_offset = FLASH_FCoE_BACKUP_IMAGE_START;
50388- image_size = FLASH_IMAGE_MAX_SIZE;
50389- break;
50390- case FLASHROM_TYPE_BIOS:
50391- image_offset = FLASH_iSCSI_BIOS_START;
50392- image_size = FLASH_BIOS_IMAGE_MAX_SIZE;
50393- break;
50394- case FLASHROM_TYPE_FCOE_BIOS:
50395- image_offset = FLASH_FCoE_BIOS_START;
50396- image_size = FLASH_BIOS_IMAGE_MAX_SIZE;
50397- break;
50398- case FLASHROM_TYPE_PXE_BIOS:
50399- image_offset = FLASH_PXE_BIOS_START;
50400- image_size = FLASH_BIOS_IMAGE_MAX_SIZE;
50401- break;
50402- default:
50403- return 0;
50404+ struct flash_comp gen3_flash_types[10] = {
50405+ { FLASH_iSCSI_PRIMARY_IMAGE_START_g3, IMG_TYPE_ISCSI_ACTIVE,
50406+ FLASH_IMAGE_MAX_SIZE_g3},
50407+ { FLASH_REDBOOT_START_g3, IMG_TYPE_REDBOOT,
50408+ FLASH_REDBOOT_IMAGE_MAX_SIZE_g3},
50409+ { FLASH_iSCSI_BIOS_START_g3, IMG_TYPE_BIOS,
50410+ FLASH_BIOS_IMAGE_MAX_SIZE_g3},
50411+ { FLASH_PXE_BIOS_START_g3, IMG_TYPE_PXE_BIOS,
50412+ FLASH_BIOS_IMAGE_MAX_SIZE_g3},
50413+ { FLASH_FCoE_BIOS_START_g3, IMG_TYPE_FCOE_BIOS,
50414+ FLASH_BIOS_IMAGE_MAX_SIZE_g3},
50415+ { FLASH_iSCSI_BACKUP_IMAGE_START_g3, IMG_TYPE_ISCSI_BACKUP,
50416+ FLASH_IMAGE_MAX_SIZE_g3},
50417+ { FLASH_FCoE_PRIMARY_IMAGE_START_g3, IMG_TYPE_FCOE_FW_ACTIVE,
50418+ FLASH_IMAGE_MAX_SIZE_g3},
50419+ { FLASH_FCoE_BACKUP_IMAGE_START_g3, IMG_TYPE_FCOE_FW_BACKUP,
50420+ FLASH_IMAGE_MAX_SIZE_g3},
50421+ { FLASH_NCSI_START_g3, IMG_TYPE_NCSI_FW,
50422+ FLASH_NCSI_IMAGE_MAX_SIZE_g3},
50423+ { FLASH_PHY_FW_START_g3, IMG_TYPE_PHY_FW,
50424+ FLASH_PHY_FW_IMAGE_MAX_SIZE_g3}
50425+ };
50426+ struct flash_comp gen2_flash_types[8] = {
50427+ { FLASH_iSCSI_PRIMARY_IMAGE_START_g2, IMG_TYPE_ISCSI_ACTIVE,
50428+ FLASH_IMAGE_MAX_SIZE_g2},
50429+ { FLASH_REDBOOT_START_g2, IMG_TYPE_REDBOOT,
50430+ FLASH_REDBOOT_IMAGE_MAX_SIZE_g2},
50431+ { FLASH_iSCSI_BIOS_START_g2, IMG_TYPE_BIOS,
50432+ FLASH_BIOS_IMAGE_MAX_SIZE_g2},
50433+ { FLASH_PXE_BIOS_START_g2, IMG_TYPE_PXE_BIOS,
50434+ FLASH_BIOS_IMAGE_MAX_SIZE_g2},
50435+ { FLASH_FCoE_BIOS_START_g2, IMG_TYPE_FCOE_BIOS,
50436+ FLASH_BIOS_IMAGE_MAX_SIZE_g2},
50437+ { FLASH_iSCSI_BACKUP_IMAGE_START_g2, IMG_TYPE_ISCSI_BACKUP,
50438+ FLASH_IMAGE_MAX_SIZE_g2},
50439+ { FLASH_FCoE_PRIMARY_IMAGE_START_g2, IMG_TYPE_FCOE_FW_ACTIVE,
50440+ FLASH_IMAGE_MAX_SIZE_g2},
50441+ { FLASH_FCoE_BACKUP_IMAGE_START_g2, IMG_TYPE_FCOE_FW_BACKUP,
50442+ FLASH_IMAGE_MAX_SIZE_g2}
50443+ };
50444+ if (adapter->generation == BE_GEN3) {
50445+ pflashcomp = gen3_flash_types;
50446+ filehdr_size = sizeof(struct flash_file_hdr_g3);
50447+ num_comp = ARRAY_SIZE(gen3_flash_types);
50448+ } else {
50449+ pflashcomp = gen2_flash_types;
50450+ filehdr_size = sizeof(struct flash_file_hdr_g2);
50451+ num_comp = ARRAY_SIZE(gen2_flash_types);
50452 }
50453+ for (i = 0; i < num_comp; i++) {
50454+ if ((pflashcomp[i].optype == IMG_TYPE_NCSI_FW) &&
50455+ memcmp(adapter->fw_ver, "3.102.148.0", 11) < 0)
50456+ continue;
50457+ if (pflashcomp[i].optype == IMG_TYPE_PHY_FW) {
50458+ if (!phy_flashing_required(adapter))
50459+ continue;
50460+ }
50461+ if ((pflashcomp[i].optype == IMG_TYPE_REDBOOT) &&
50462+ (!be_flash_redboot(adapter, fw->data,
50463+ pflashcomp[i].offset, pflashcomp[i].size, filehdr_size +
50464+ (num_of_images * sizeof(struct image_hdr)))))
50465+ continue;
50466
50467- p += sizeof(struct flash_file_hdr) + image_offset;
50468- if (p + image_size > fw->data + fw->size)
50469- return -1;
50470-
50471- total_bytes = image_size;
50472-
50473- while (total_bytes) {
50474- if (total_bytes > 32*1024)
50475- num_bytes = 32*1024;
50476- else
50477- num_bytes = total_bytes;
50478- total_bytes -= num_bytes;
50479-
50480- if (!total_bytes)
50481- flash_op = FLASHROM_OPER_FLASH;
50482- else
50483- flash_op = FLASHROM_OPER_SAVE;
50484- memcpy(req->params.data_buf, p, num_bytes);
50485- p += num_bytes;
50486- status = be_cmd_write_flashrom(adapter, flash_cmd,
50487- flash_type, flash_op, num_bytes);
50488- if (status) {
50489- dev_err(&adapter->pdev->dev,
50490- "cmd to write to flash rom failed. type/op %d/%d\n",
50491- flash_type, flash_op);
50492+ p = fw->data;
50493+ p += filehdr_size + pflashcomp[i].offset
50494+ + (num_of_images * sizeof(struct image_hdr));
50495+ if (p + pflashcomp[i].size > fw->data + fw->size)
50496 return -1;
50497+ total_bytes = pflashcomp[i].size;
50498+ while (total_bytes) {
50499+ if (total_bytes > 32*1024)
50500+ num_bytes = 32*1024;
50501+ else
50502+ num_bytes = total_bytes;
50503+ total_bytes -= num_bytes;
50504+ if (!total_bytes) {
50505+ if (pflashcomp[i].optype == IMG_TYPE_PHY_FW)
50506+ flash_op = FLASHROM_OPER_PHY_FLASH;
50507+ else
50508+ flash_op = FLASHROM_OPER_FLASH;
50509+ } else {
50510+ if (pflashcomp[i].optype == IMG_TYPE_PHY_FW)
50511+ flash_op = FLASHROM_OPER_PHY_SAVE;
50512+ else
50513+ flash_op = FLASHROM_OPER_SAVE;
50514+ }
50515+ memcpy(req->params.data_buf, p, num_bytes);
50516+ p += num_bytes;
50517+ status = be_cmd_write_flashrom(adapter, flash_cmd,
50518+ pflashcomp[i].optype, flash_op, num_bytes);
50519+ if (status) {
50520+ if ((status == ILLEGAL_IOCTL_REQ) &&
50521+ (pflashcomp[i].optype ==
50522+ IMG_TYPE_PHY_FW))
50523+ break;
50524+ dev_err(&adapter->pdev->dev,
50525+ "cmd to write to flash rom failed.\n");
50526+ return -1;
50527+ }
50528+ yield();
50529 }
50530- yield();
50531 }
50532-
50533 return 0;
50534 }
50535
50536+static int get_ufigen_type(struct flash_file_hdr_g2 *fhdr)
50537+{
50538+ if (fhdr == NULL)
50539+ return 0;
50540+ if (fhdr->build[0] == '3')
50541+ return BE_GEN3;
50542+ else if (fhdr->build[0] == '2')
50543+ return BE_GEN2;
50544+ else
50545+ return 0;
50546+}
50547+
50548 int be_load_fw(struct be_adapter *adapter, u8 *func)
50549 {
50550 char fw_file[ETHTOOL_FLASH_MAX_FILENAME];
50551 const struct firmware *fw;
50552- struct flash_file_hdr *fhdr;
50553- struct flash_section_info *fsec = NULL;
50554+ struct flash_file_hdr_g2 *fhdr;
50555+ struct flash_file_hdr_g3 *fhdr3;
50556+ struct image_hdr *img_hdr_ptr = NULL;
50557 struct be_dma_mem flash_cmd;
50558- int status;
50559+ int status, i = 0, num_imgs = 0;
50560 const u8 *p;
50561- bool entry_found = false;
50562- int flash_type;
50563- char fw_ver[FW_VER_LEN];
50564- char fw_cfg;
50565
50566- status = be_cmd_get_fw_ver(adapter, fw_ver);
50567- if (status)
50568- return status;
50569+ if (!netif_running(adapter->netdev)) {
50570+ dev_err(&adapter->pdev->dev,
50571+ "Firmware load not allowed (interface is down)\n");
50572+ return -1;
50573+ }
50574
50575- fw_cfg = *(fw_ver + 2);
50576- if (fw_cfg == '0')
50577- fw_cfg = '1';
50578 strcpy(fw_file, func);
50579
50580 status = request_firmware(&fw, fw_file, &adapter->pdev->dev);
50581@@ -1826,34 +3202,9 @@ int be_load_fw(struct be_adapter *adapter, u8 *func)
50582 goto fw_exit;
50583
50584 p = fw->data;
50585- fhdr = (struct flash_file_hdr *) p;
50586- if (memcmp(fhdr->sign, FW_FILE_HDR_SIGN, strlen(FW_FILE_HDR_SIGN))) {
50587- dev_err(&adapter->pdev->dev,
50588- "Firmware(%s) load error (signature did not match)\n",
50589- fw_file);
50590- status = -1;
50591- goto fw_exit;
50592- }
50593-
50594+ fhdr = (struct flash_file_hdr_g2 *) p;
50595 dev_info(&adapter->pdev->dev, "Flashing firmware file %s\n", fw_file);
50596
50597- p += sizeof(struct flash_file_hdr);
50598- while (p < (fw->data + fw->size)) {
50599- fsec = (struct flash_section_info *)p;
50600- if (!memcmp(flash_cookie, fsec->cookie, sizeof(flash_cookie))) {
50601- entry_found = true;
50602- break;
50603- }
50604- p += 32;
50605- }
50606-
50607- if (!entry_found) {
50608- status = -1;
50609- dev_err(&adapter->pdev->dev,
50610- "Flash cookie not found in firmware image\n");
50611- goto fw_exit;
50612- }
50613-
50614 flash_cmd.size = sizeof(struct be_cmd_write_flashrom) + 32*1024;
50615 flash_cmd.va = pci_alloc_consistent(adapter->pdev, flash_cmd.size,
50616 &flash_cmd.dma);
50617@@ -1864,12 +3215,25 @@ int be_load_fw(struct be_adapter *adapter, u8 *func)
50618 goto fw_exit;
50619 }
50620
50621- for (flash_type = FLASHROM_TYPE_ISCSI_ACTIVE;
50622- flash_type <= FLASHROM_TYPE_FCOE_FW_BACKUP; flash_type++) {
50623- status = be_flash_image(adapter, fw, &flash_cmd,
50624- flash_type);
50625- if (status)
50626- break;
50627+ if ((adapter->generation == BE_GEN3) &&
50628+ (get_ufigen_type(fhdr) == BE_GEN3)) {
50629+ fhdr3 = (struct flash_file_hdr_g3 *) fw->data;
50630+ num_imgs = le32_to_cpu(fhdr3->num_imgs);
50631+ for (i = 0; i < num_imgs; i++) {
50632+ img_hdr_ptr = (struct image_hdr *) (fw->data +
50633+ (sizeof(struct flash_file_hdr_g3) +
50634+ i * sizeof(struct image_hdr)));
50635+ if (le32_to_cpu(img_hdr_ptr->imageid) == 1)
50636+ status = be_flash_data(adapter, fw, &flash_cmd,
50637+ num_imgs);
50638+ }
50639+ } else if ((adapter->generation == BE_GEN2) &&
50640+ (get_ufigen_type(fhdr) == BE_GEN2)) {
50641+ status = be_flash_data(adapter, fw, &flash_cmd, 0);
50642+ } else {
50643+ dev_err(&adapter->pdev->dev,
50644+ "UFI and Interface are not compatible for flashing\n");
50645+ status = -1;
50646 }
50647
50648 pci_free_consistent(adapter->pdev, flash_cmd.size, flash_cmd.va,
50649@@ -1879,14 +3243,14 @@ int be_load_fw(struct be_adapter *adapter, u8 *func)
50650 goto fw_exit;
50651 }
50652
50653- dev_info(&adapter->pdev->dev, "Firmware flashed succesfully\n");
50654+ dev_info(&adapter->pdev->dev, "Firmware flashed successfully\n");
50655
50656 fw_exit:
50657 release_firmware(fw);
50658 return status;
50659 }
50660
50661-static struct net_device_ops be_netdev_ops = {
50662+static net_device_ops_no_const be_netdev_ops = {
50663 .ndo_open = be_open,
50664 .ndo_stop = be_close,
50665 .ndo_start_xmit = be_xmit,
50666@@ -1898,15 +3262,32 @@ static struct net_device_ops be_netdev_ops = {
50667 .ndo_vlan_rx_register = be_vlan_register,
50668 .ndo_vlan_rx_add_vid = be_vlan_add_vid,
50669 .ndo_vlan_rx_kill_vid = be_vlan_rem_vid,
50670+#ifdef HAVE_SRIOV_CONFIG
50671+ .ndo_set_vf_mac = be_set_vf_mac,
50672+ .ndo_set_vf_vlan = be_set_vf_vlan,
50673+ .ndo_set_vf_tx_rate = be_set_vf_tx_rate,
50674+ .ndo_get_vf_config = be_get_vf_config,
50675+#endif
50676+ .ndo_do_ioctl = be_do_ioctl,
50677+#ifdef CONFIG_NET_POLL_CONTROLLER
50678+ .ndo_poll_controller = be_netpoll,
50679+#endif
50680 };
50681
50682-static void be_netdev_init(struct net_device *netdev)
50683+static int be_netdev_init(struct net_device *netdev)
50684 {
50685 struct be_adapter *adapter = netdev_priv(netdev);
50686+ struct be_rx_obj *rxo;
50687+ int i, status = 0;
50688
50689 netdev->features |= NETIF_F_SG | NETIF_F_HW_VLAN_RX | NETIF_F_TSO |
50690- NETIF_F_HW_VLAN_TX | NETIF_F_HW_VLAN_FILTER | NETIF_F_HW_CSUM |
50691- NETIF_F_GRO;
50692+ NETIF_F_HW_VLAN_TX | NETIF_F_HW_CSUM | NETIF_F_TSO6;
50693+
50694+ netdev->vlan_features |= NETIF_F_SG | NETIF_F_TSO | NETIF_F_TSO6 |
50695+ NETIF_F_HW_CSUM;
50696+
50697+ netdev->features |= NETIF_F_VLAN_SG | NETIF_F_VLAN_TSO |
50698+ NETIF_F_VLAN_CSUM;
50699
50700 netdev->flags |= IFF_MULTICAST;
50701
50702@@ -1918,17 +3299,30 @@ static void be_netdev_init(struct net_device *netdev)
50703
50704 netif_set_gso_max_size(netdev, 65535);
50705
50706+ if (adapter->flags & BE_FLAGS_DCBX)
50707+ be_netdev_ops.ndo_select_queue = be_select_queue;
50708 BE_SET_NETDEV_OPS(netdev, &be_netdev_ops);
50709-
50710+
50711 SET_ETHTOOL_OPS(netdev, &be_ethtool_ops);
50712
50713- netif_napi_add(netdev, &adapter->rx_eq.napi, be_poll_rx,
50714- BE_NAPI_WEIGHT);
50715- netif_napi_add(netdev, &adapter->tx_eq.napi, be_poll_tx_mcc,
50716+ be_lro_init(adapter, netdev);
50717+
50718+ for_all_rx_queues(adapter, rxo, i) {
50719+ status = be_netif_napi_add(netdev, &rxo->rx_eq.napi, be_poll_rx,
50720+ BE_NAPI_WEIGHT);
50721+ if (status) {
50722+ dev_err(&adapter->pdev->dev, "dummy netdev alloc fail"
50723+ "for rxo:%d\n", i);
50724+ return status;
50725+ }
50726+ }
50727+ status = be_netif_napi_add(netdev, &adapter->tx_eq.napi, be_poll_tx_mcc,
50728 BE_NAPI_WEIGHT);
50729+ if (status)
50730+ dev_err(&adapter->pdev->dev, "dummy netdev alloc fail"
50731+ "for tx\n");
50732
50733- netif_carrier_off(netdev);
50734- netif_stop_queue(netdev);
50735+ return status;
50736 }
50737
50738 static void be_unmap_pci_bars(struct be_adapter *adapter)
50739@@ -1937,37 +3331,62 @@ static void be_unmap_pci_bars(struct be_adapter *adapter)
50740 iounmap(adapter->csr);
50741 if (adapter->db)
50742 iounmap(adapter->db);
50743- if (adapter->pcicfg)
50744+ if (adapter->pcicfg && be_physfn(adapter))
50745 iounmap(adapter->pcicfg);
50746 }
50747
50748 static int be_map_pci_bars(struct be_adapter *adapter)
50749 {
50750+ struct pci_dev *pdev = adapter->pdev;
50751 u8 __iomem *addr;
50752- int pcicfg_reg;
50753+ int pcicfg_reg, db_reg;
50754
50755- addr = ioremap_nocache(pci_resource_start(adapter->pdev, 2),
50756- pci_resource_len(adapter->pdev, 2));
50757- if (addr == NULL)
50758- return -ENOMEM;
50759- adapter->csr = addr;
50760+ if (lancer_chip(adapter)) {
50761+ addr = ioremap_nocache(pci_resource_start(pdev, 0),
50762+ pci_resource_len(adapter->pdev, 0));
50763+ if (addr == NULL)
50764+ return -ENOMEM;
50765+ adapter->db = addr;
50766+ return 0;
50767+ }
50768
50769- addr = ioremap_nocache(pci_resource_start(adapter->pdev, 4),
50770- 128 * 1024);
50771- if (addr == NULL)
50772- goto pci_map_err;
50773- adapter->db = addr;
50774+ if (be_physfn(adapter)) {
50775+ addr = ioremap_nocache(pci_resource_start(pdev, 2),
50776+ pci_resource_len(pdev, 2));
50777+ if (addr == NULL)
50778+ return -ENOMEM;
50779+ adapter->csr = addr;
50780+ adapter->netdev->mem_start = pci_resource_start(pdev, 2);
50781+ adapter->netdev->mem_end = pci_resource_start(pdev, 2) +
50782+ pci_resource_len(pdev, 2);
50783+ }
50784
50785- if (adapter->generation == BE_GEN2)
50786+ if (adapter->generation == BE_GEN2) {
50787 pcicfg_reg = 1;
50788- else
50789+ db_reg = 4;
50790+ } else {
50791 pcicfg_reg = 0;
50792+ if (be_physfn(adapter))
50793+ db_reg = 4;
50794+ else
50795+ db_reg = 0;
50796+ }
50797
50798- addr = ioremap_nocache(pci_resource_start(adapter->pdev, pcicfg_reg),
50799- pci_resource_len(adapter->pdev, pcicfg_reg));
50800+ addr = ioremap_nocache(pci_resource_start(pdev, db_reg),
50801+ pci_resource_len(pdev, db_reg));
50802 if (addr == NULL)
50803 goto pci_map_err;
50804- adapter->pcicfg = addr;
50805+ adapter->db = addr;
50806+
50807+ if (be_physfn(adapter)) {
50808+ addr = ioremap_nocache(
50809+ pci_resource_start(pdev, pcicfg_reg),
50810+ pci_resource_len(pdev, pcicfg_reg));
50811+ if (addr == NULL)
50812+ goto pci_map_err;
50813+ adapter->pcicfg = addr;
50814+ } else
50815+ adapter->pcicfg = adapter->db + SRIOV_VF_PCICFG_OFFSET;
50816
50817 return 0;
50818 pci_map_err:
50819@@ -1985,40 +3404,69 @@ static void be_ctrl_cleanup(struct be_adapter *adapter)
50820 if (mem->va)
50821 pci_free_consistent(adapter->pdev, mem->size,
50822 mem->va, mem->dma);
50823+
50824+ mem = &adapter->rx_filter;
50825+ if (mem->va)
50826+ pci_free_consistent(adapter->pdev, mem->size,
50827+ mem->va, mem->dma);
50828 }
50829
50830 static int be_ctrl_init(struct be_adapter *adapter)
50831 {
50832 struct be_dma_mem *mbox_mem_alloc = &adapter->mbox_mem_alloced;
50833 struct be_dma_mem *mbox_mem_align = &adapter->mbox_mem;
50834+ struct be_dma_mem *rx_filter = &adapter->rx_filter;
50835 int status;
50836
50837 status = be_map_pci_bars(adapter);
50838 if (status)
50839- return status;
50840+ goto done;
50841
50842 mbox_mem_alloc->size = sizeof(struct be_mcc_mailbox) + 16;
50843 mbox_mem_alloc->va = pci_alloc_consistent(adapter->pdev,
50844 mbox_mem_alloc->size, &mbox_mem_alloc->dma);
50845 if (!mbox_mem_alloc->va) {
50846- be_unmap_pci_bars(adapter);
50847- return -1;
50848+ status = -ENOMEM;
50849+ goto unmap_pci_bars;
50850 }
50851+
50852 mbox_mem_align->size = sizeof(struct be_mcc_mailbox);
50853 mbox_mem_align->va = PTR_ALIGN(mbox_mem_alloc->va, 16);
50854 mbox_mem_align->dma = PTR_ALIGN(mbox_mem_alloc->dma, 16);
50855 memset(mbox_mem_align->va, 0, sizeof(struct be_mcc_mailbox));
50856- spin_lock_init(&adapter->mbox_lock);
50857+
50858+ rx_filter->size = sizeof(struct be_cmd_req_rx_filter);
50859+ rx_filter->va = pci_alloc_consistent(adapter->pdev, rx_filter->size,
50860+ &rx_filter->dma);
50861+ if (rx_filter->va == NULL) {
50862+ status = -ENOMEM;
50863+ goto free_mbox;
50864+ }
50865+ memset(rx_filter->va, 0, rx_filter->size);
50866+
50867+ mutex_init(&adapter->mbox_lock);
50868 spin_lock_init(&adapter->mcc_lock);
50869 spin_lock_init(&adapter->mcc_cq_lock);
50870
50871+ init_completion(&adapter->flash_compl);
50872+
50873+ PCI_SAVE_STATE(adapter->pdev);
50874 return 0;
50875+
50876+free_mbox:
50877+ pci_free_consistent(adapter->pdev, mbox_mem_alloc->size,
50878+ mbox_mem_alloc->va, mbox_mem_alloc->dma);
50879+
50880+unmap_pci_bars:
50881+ be_unmap_pci_bars(adapter);
50882+
50883+done:
50884+ return status;
50885 }
50886
50887 static void be_stats_cleanup(struct be_adapter *adapter)
50888 {
50889- struct be_stats_obj *stats = &adapter->stats;
50890- struct be_dma_mem *cmd = &stats->cmd;
50891+ struct be_dma_mem *cmd = &adapter->stats_cmd;
50892
50893 if (cmd->va)
50894 pci_free_consistent(adapter->pdev, cmd->size,
50895@@ -2027,10 +3475,12 @@ static void be_stats_cleanup(struct be_adapter *adapter)
50896
50897 static int be_stats_init(struct be_adapter *adapter)
50898 {
50899- struct be_stats_obj *stats = &adapter->stats;
50900- struct be_dma_mem *cmd = &stats->cmd;
50901+ struct be_dma_mem *cmd = &adapter->stats_cmd;
50902
50903- cmd->size = sizeof(struct be_cmd_req_get_stats);
50904+ if (adapter->generation == BE_GEN2)
50905+ cmd->size = sizeof(struct be_cmd_req_get_stats_v0);
50906+ else
50907+ cmd->size = sizeof(struct be_cmd_req_get_stats_v1);
50908 cmd->va = pci_alloc_consistent(adapter->pdev, cmd->size, &cmd->dma);
50909 if (cmd->va == NULL)
50910 return -1;
50911@@ -2041,9 +3491,17 @@ static int be_stats_init(struct be_adapter *adapter)
50912 static void __devexit be_remove(struct pci_dev *pdev)
50913 {
50914 struct be_adapter *adapter = pci_get_drvdata(pdev);
50915+
50916 if (!adapter)
50917 return;
50918
50919+ cancel_delayed_work_sync(&adapter->work);
50920+
50921+#ifdef CONFIG_PALAU
50922+ be_sysfs_remove_group(adapter);
50923+#endif
50924+
50925+ /* be_close() gets called if the device is open by unregister */
50926 unregister_netdev(adapter->netdev);
50927
50928 be_clear(adapter);
50929@@ -2052,36 +3510,203 @@ static void __devexit be_remove(struct pci_dev *pdev)
50930
50931 be_ctrl_cleanup(adapter);
50932
50933- if (adapter->msix_enabled) {
50934- pci_disable_msix(adapter->pdev);
50935- adapter->msix_enabled = false;
50936- }
50937+ kfree(adapter->vf_cfg);
50938+ be_sriov_disable(adapter);
50939+
50940+ be_msix_disable(adapter);
50941
50942 pci_set_drvdata(pdev, NULL);
50943 pci_release_regions(pdev);
50944 pci_disable_device(pdev);
50945-
50946+ be_netif_napi_del(adapter->netdev);
50947 free_netdev(adapter->netdev);
50948 }
50949
50950-static int be_hw_up(struct be_adapter *adapter)
50951+static void be_pcie_slot_check(struct be_adapter *adapter)
50952+{
50953+ u32 curr, max, width, max_wd, speed, max_sp;
50954+
50955+ pci_read_config_dword(adapter->pdev, PCICFG_PCIE_LINK_STATUS_OFFSET,
50956+ &curr);
50957+ width = (curr >> PCIE_LINK_STATUS_NEG_WIDTH_SHIFT) &
50958+ PCIE_LINK_STATUS_NEG_WIDTH_MASK;
50959+ speed = (curr >> PCIE_LINK_STATUS_SPEED_SHIFT) &
50960+ PCIE_LINK_STATUS_SPEED_MASK;
50961+
50962+ pci_read_config_dword(adapter->pdev, PCICFG_PCIE_LINK_CAP_OFFSET,
50963+ &max);
50964+ max_wd = (max >> PCIE_LINK_CAP_MAX_WIDTH_SHIFT) &
50965+ PCIE_LINK_CAP_MAX_WIDTH_MASK;
50966+ max_sp = (max >> PCIE_LINK_CAP_MAX_SPEED_SHIFT) &
50967+ PCIE_LINK_CAP_MAX_SPEED_MASK;
50968+
50969+ if (width < max_wd || speed < max_sp)
50970+ dev_warn(&adapter->pdev->dev,
50971+ "Found network device in a Gen%s x%d PCIe slot. It "
50972+ "should be in a Gen2 x%d slot for best performance\n",
50973+ speed < max_sp ? "1" : "2", width, max_wd);
50974+}
50975+
50976+static int be_get_ioctl_version(char *fw_version) {
50977+ char *str[4];
50978+ int i;
50979+ int val[4];
50980+ char *endptr;
50981+
50982+ if(!fw_version)
50983+ return 0;
50984+ for(i=0; i<3; i++) {
50985+ str[i] = strsep(&fw_version, ".");
50986+ val[i] = simple_strtol(str[i], &endptr, 10);
50987+ }
50988+
50989+ if (val[0]>4 || (val[0]>3 && val[2]>143))
50990+ return 1;
50991+ return 0;
50992+}
50993+
50994+static int be_get_port_names(struct be_adapter *adapter)
50995 {
50996 int status;
50997+ int ver;
50998
50999- status = be_cmd_POST(adapter);
51000+ status = be_cmd_get_fw_ver(adapter,
51001+ adapter->fw_ver, NULL);
51002 if (status)
51003 return status;
51004+ ver = be_get_ioctl_version(adapter->fw_ver);
51005+ if (ver && (adapter->generation == BE_GEN3))
51006+ status = be_cmd_query_port_names_v1(adapter,
51007+ adapter->port_name);
51008+ else
51009+ status = be_cmd_query_port_names_v0(adapter,
51010+ adapter->port_name);
51011+ return status;
51012+}
51013
51014- status = be_cmd_reset_function(adapter);
51015+static int be_get_config(struct be_adapter *adapter)
51016+{
51017+ int status;
51018+ u8 mac[ETH_ALEN];
51019+
51020+ status = be_cmd_query_fw_cfg(adapter, &adapter->port_num,
51021+ &adapter->function_mode,
51022+ &adapter->function_caps);
51023 if (status)
51024 return status;
51025
51026- status = be_cmd_get_fw_ver(adapter, adapter->fw_ver);
51027+ status = be_cmd_get_cntl_attributes(adapter);
51028 if (status)
51029 return status;
51030
51031- status = be_cmd_query_fw_cfg(adapter,
51032- &adapter->port_num, &adapter->cap);
51033+ memset(mac, 0, ETH_ALEN);
51034+ be_pcie_slot_check(adapter);
51035+
51036+ if (be_physfn(adapter)) {
51037+ status = be_cmd_mac_addr_query(adapter, mac,
51038+ MAC_ADDRESS_TYPE_NETWORK, true /*permanent */, 0);
51039+
51040+ if (status)
51041+ return status;
51042+
51043+ if (!is_valid_ether_addr(mac))
51044+ return -EADDRNOTAVAIL;
51045+
51046+ memcpy(adapter->netdev->dev_addr, mac, ETH_ALEN);
51047+ memcpy(adapter->netdev->perm_addr, mac, ETH_ALEN);
51048+ }
51049+
51050+ if (adapter->function_mode & FLEX10_MODE)
51051+ adapter->max_vlans = BE_NUM_VLANS_SUPPORTED/8;
51052+ else
51053+ adapter->max_vlans = BE_NUM_VLANS_SUPPORTED;
51054+
51055+ return 0;
51056+}
51057+
51058+static int be_dev_family_check(struct be_adapter *adapter)
51059+{
51060+ struct pci_dev *pdev = adapter->pdev;
51061+ u32 sli_intf = 0, if_type;
51062+
51063+ switch (pdev->device) {
51064+ case BE_DEVICE_ID1:
51065+ case OC_DEVICE_ID1:
51066+ adapter->generation = BE_GEN2;
51067+ break;
51068+ case BE_DEVICE_ID2:
51069+ case OC_DEVICE_ID2:
51070+ adapter->generation = BE_GEN3;
51071+ break;
51072+ case OC_DEVICE_ID3:
51073+ pci_read_config_dword(pdev, SLI_INTF_REG_OFFSET, &sli_intf);
51074+ if_type = (sli_intf & SLI_INTF_IF_TYPE_MASK) >>
51075+ SLI_INTF_IF_TYPE_SHIFT;
51076+
51077+ if (((sli_intf & SLI_INTF_VALID_MASK) != SLI_INTF_VALID) ||
51078+ if_type != 0x02) {
51079+ dev_err(&pdev->dev, "SLI_INTF reg val is not valid\n");
51080+ return -EINVAL;
51081+ }
51082+ if (num_vfs > 0) {
51083+ dev_err(&pdev->dev, "VFs not supported\n");
51084+ return -EINVAL;
51085+ }
51086+ adapter->sli_family = ((sli_intf & SLI_INTF_FAMILY_MASK) >>
51087+ SLI_INTF_FAMILY_SHIFT);
51088+ adapter->generation = BE_GEN3;
51089+ break;
51090+ default:
51091+ adapter->generation = 0;
51092+ }
51093+ return 0;
51094+}
51095+
51096+static int lancer_wait_ready(struct be_adapter *adapter)
51097+{
51098+#define SLIPORT_READY_TIMEOUT 500
51099+ u32 sliport_status;
51100+ int status = 0, i;
51101+
51102+ for (i = 0; i < SLIPORT_READY_TIMEOUT; i++) {
51103+ sliport_status = ioread32(adapter->db + SLIPORT_STATUS_OFFSET);
51104+ if (sliport_status & SLIPORT_STATUS_RDY_MASK)
51105+ break;
51106+
51107+ msleep(20);
51108+ }
51109+
51110+ if (i == SLIPORT_READY_TIMEOUT)
51111+ status = -1;
51112+
51113+ return status;
51114+}
51115+
51116+static int lancer_test_and_set_rdy_state(struct be_adapter *adapter)
51117+{
51118+ int status;
51119+ u32 sliport_status, err, reset_needed;
51120+ status = lancer_wait_ready(adapter);
51121+ if (!status) {
51122+ sliport_status = ioread32(adapter->db + SLIPORT_STATUS_OFFSET);
51123+ err = sliport_status & SLIPORT_STATUS_ERR_MASK;
51124+ reset_needed = sliport_status & SLIPORT_STATUS_RN_MASK;
51125+ if (err && reset_needed) {
51126+ iowrite32(SLI_PORT_CONTROL_IP_MASK,
51127+ adapter->db + SLIPORT_CONTROL_OFFSET);
51128+
51129+ /* check adapter has corrected the error */
51130+ status = lancer_wait_ready(adapter);
51131+ sliport_status = ioread32(adapter->db +
51132+ SLIPORT_STATUS_OFFSET);
51133+ sliport_status &= (SLIPORT_STATUS_ERR_MASK |
51134+ SLIPORT_STATUS_RN_MASK);
51135+ if (status || sliport_status)
51136+ status = -1;
51137+ } else if (err || reset_needed) {
51138+ status = -1;
51139+ }
51140+ }
51141 return status;
51142 }
51143
51144@@ -2091,7 +3716,7 @@ static int __devinit be_probe(struct pci_dev *pdev,
51145 int status = 0;
51146 struct be_adapter *adapter;
51147 struct net_device *netdev;
51148- u8 mac[ETH_ALEN];
51149+ u32 en;
51150
51151 status = pci_enable_device(pdev);
51152 if (status)
51153@@ -2102,31 +3727,22 @@ static int __devinit be_probe(struct pci_dev *pdev,
51154 goto disable_dev;
51155 pci_set_master(pdev);
51156
51157- netdev = alloc_etherdev(sizeof(struct be_adapter));
51158+ netdev = alloc_etherdev_mq(sizeof(struct be_adapter), MAX_TX_QS);
51159 if (netdev == NULL) {
51160 status = -ENOMEM;
51161 goto rel_reg;
51162 }
51163 adapter = netdev_priv(netdev);
51164
51165- switch (pdev->device) {
51166- case BE_DEVICE_ID1:
51167- case OC_DEVICE_ID1:
51168- adapter->generation = BE_GEN2;
51169- break;
51170- case BE_DEVICE_ID2:
51171- case OC_DEVICE_ID2:
51172- adapter->generation = BE_GEN3;
51173- break;
51174- default:
51175- adapter->generation = 0;
51176- }
51177-
51178 adapter->pdev = pdev;
51179+
51180+ status = be_dev_family_check(adapter);
51181+ if (status)
51182+ goto free_netdev;
51183+
51184 pci_set_drvdata(pdev, adapter);
51185 adapter->netdev = netdev;
51186-
51187- be_msix_enable(adapter);
51188+ SET_NETDEV_DEV(netdev, &pdev->dev);
51189
51190 status = pci_set_dma_mask(pdev, DMA_BIT_MASK(64));
51191 if (!status) {
51192@@ -2139,46 +3755,150 @@ static int __devinit be_probe(struct pci_dev *pdev,
51193 }
51194 }
51195
51196+ be_sriov_enable(adapter);
51197+ if (adapter->num_vfs > 0) {
51198+ adapter->vf_cfg = kcalloc(adapter->num_vfs,
51199+ sizeof(struct be_vf_cfg), GFP_KERNEL);
51200+
51201+ if (!adapter->vf_cfg)
51202+ goto free_netdev;
51203+ }
51204+
51205 status = be_ctrl_init(adapter);
51206 if (status)
51207- goto free_netdev;
51208+ goto free_vf_cfg;
51209+
51210+ if (lancer_chip(adapter)) {
51211+ status = lancer_test_and_set_rdy_state(adapter);
51212+ if (status) {
51213+ dev_err(&pdev->dev, "Adapter in non recoverable error\n");
51214+ goto ctrl_clean;
51215+ }
51216+ }
51217+
51218+ /* sync up with fw's ready state */
51219+ if (be_physfn(adapter)) {
51220+ status = be_cmd_POST(adapter);
51221+ if (status)
51222+ goto ctrl_clean;
51223+ }
51224+
51225+ /* tell fw we're ready to fire cmds */
51226+ status = be_cmd_fw_init(adapter);
51227+ if (status)
51228+ goto ctrl_clean;
51229+
51230+ status = be_cmd_reset_function(adapter);
51231+ if (status)
51232+ goto ctrl_clean;
51233
51234 status = be_stats_init(adapter);
51235 if (status)
51236 goto ctrl_clean;
51237
51238- status = be_hw_up(adapter);
51239+ status = be_get_config(adapter);
51240 if (status)
51241 goto stats_clean;
51242
51243- status = be_cmd_mac_addr_query(adapter, mac, MAC_ADDRESS_TYPE_NETWORK,
51244- true /* permanent */, 0);
51245- if (status)
51246- goto stats_clean;
51247- memcpy(netdev->dev_addr, mac, ETH_ALEN);
51248+ /* This bit is zero in normal boot case, but in crash kernel case this
51249+ is not cleared. clear this bit here, until we are ready with the irqs
51250+ i.e in be_open call.*/
51251+ if (!lancer_chip(adapter))
51252+ be_intr_set(adapter, false);
51253+
51254+ if (msix)
51255+ be_msix_enable(adapter);
51256
51257 INIT_DELAYED_WORK(&adapter->work, be_worker);
51258- be_netdev_init(netdev);
51259- SET_NETDEV_DEV(netdev, &adapter->pdev->dev);
51260
51261 status = be_setup(adapter);
51262 if (status)
51263- goto stats_clean;
51264+ goto msix_disable;
51265+
51266+ /* Initilize the link status to -1 */
51267+ adapter->link_status = -1;
51268+
51269+ status = be_netdev_init(netdev);
51270+ if (status)
51271+ goto unsetup;
51272+
51273 status = register_netdev(netdev);
51274 if (status != 0)
51275 goto unsetup;
51276
51277- dev_info(&pdev->dev, "%s port %d\n", nic_name(pdev), adapter->port_num);
51278+ be_cmd_get_fw_ver(adapter, adapter->fw_ver, NULL);
51279+
51280+ if (be_physfn(adapter) && adapter->num_vfs) {
51281+ u8 mac_speed;
51282+ int link_status;
51283+ u16 def_vlan, vf, lnk_speed;
51284+
51285+ status = be_vf_eth_addr_config(adapter);
51286+ if (status)
51287+ goto unreg_netdev;
51288+
51289+ for (vf = 0; vf < adapter->num_vfs; vf++) {
51290+ status = be_cmd_get_hsw_config(adapter, &def_vlan,
51291+ vf + 1, adapter->vf_cfg[vf].vf_if_handle);
51292+ if (!status)
51293+ adapter->vf_cfg[vf].vf_def_vid = def_vlan;
51294+ else
51295+ goto unreg_netdev;
51296+
51297+ status = be_cmd_link_status_query(adapter, &link_status,
51298+ &mac_speed, &lnk_speed, vf + 1);
51299+ if (!status)
51300+ adapter->vf_cfg[vf].vf_tx_rate = lnk_speed * 10;
51301+ else
51302+ goto unreg_netdev;
51303+ }
51304+ }
51305+ if (be_physfn(adapter)) {
51306+ /* Temp fix ofr bug# 23034. Till ARM
51307+ * f/w fixes privilege lvl */
51308+ be_get_port_names(adapter);
51309+ }
51310+
51311+ /* Enable Vlan capability based on privileges.
51312+ * PF will have Vlan capability anyway. */
51313+ be_cmd_get_fn_privileges(adapter, &en, 0);
51314+
51315+ if ((en & (BE_PRIV_FILTMGMT | BE_PRIV_VHADM | BE_PRIV_DEVCFG)) ||
51316+ be_physfn(adapter))
51317+ netdev->features |= NETIF_F_HW_VLAN_FILTER;
51318+ else
51319+ netdev->features |= NETIF_F_VLAN_CHALLENGED;
51320+
51321+ dev_info(&pdev->dev, "%s: numa node %d\n", netdev->name,
51322+ dev_to_node(&pdev->dev));
51323+ dev_info(&pdev->dev, "%s %s \"%s\" port %d\n", nic_name(pdev),
51324+ (adapter->port_num > 1 ? "1Gbps NIC" : "10Gbps NIC"),
51325+ adapter->model_number, adapter->hba_port_num);
51326+
51327+
51328+#ifdef CONFIG_PALAU
51329+ be_sysfs_create_group(adapter);
51330+#endif
51331+ schedule_delayed_work(&adapter->work, msecs_to_jiffies(100));
51332 return 0;
51333
51334+unreg_netdev:
51335+ unregister_netdev(netdev);
51336 unsetup:
51337 be_clear(adapter);
51338+msix_disable:
51339+ be_msix_disable(adapter);
51340 stats_clean:
51341 be_stats_cleanup(adapter);
51342 ctrl_clean:
51343 be_ctrl_cleanup(adapter);
51344+free_vf_cfg:
51345+ kfree(adapter->vf_cfg);
51346 free_netdev:
51347- free_netdev(adapter->netdev);
51348+ be_sriov_disable(adapter);
51349+ be_netif_napi_del(netdev);
51350+ free_netdev(netdev);
51351+ pci_set_drvdata(pdev, NULL);
51352 rel_reg:
51353 pci_release_regions(pdev);
51354 disable_dev:
51355@@ -2193,6 +3913,10 @@ static int be_suspend(struct pci_dev *pdev, pm_message_t state)
51356 struct be_adapter *adapter = pci_get_drvdata(pdev);
51357 struct net_device *netdev = adapter->netdev;
51358
51359+ cancel_delayed_work_sync(&adapter->work);
51360+ if (adapter->wol)
51361+ be_setup_wol(adapter, true);
51362+
51363 netif_device_detach(netdev);
51364 if (netif_running(netdev)) {
51365 rtnl_lock();
51366@@ -2202,6 +3926,7 @@ static int be_suspend(struct pci_dev *pdev, pm_message_t state)
51367 be_cmd_get_flow_control(adapter, &adapter->tx_fc, &adapter->rx_fc);
51368 be_clear(adapter);
51369
51370+ be_msix_disable(adapter);
51371 pci_save_state(pdev);
51372 pci_disable_device(pdev);
51373 pci_set_power_state(pdev, pci_choose_state(pdev, state));
51374@@ -2223,6 +3948,12 @@ static int be_resume(struct pci_dev *pdev)
51375 pci_set_power_state(pdev, 0);
51376 pci_restore_state(pdev);
51377
51378+ be_msix_enable(adapter);
51379+ /* tell fw we're ready to fire cmds */
51380+ status = be_cmd_fw_init(adapter);
51381+ if (status)
51382+ return status;
51383+
51384 be_setup(adapter);
51385 if (netif_running(netdev)) {
51386 rtnl_lock();
51387@@ -2230,28 +3961,152 @@ static int be_resume(struct pci_dev *pdev)
51388 rtnl_unlock();
51389 }
51390 netif_device_attach(netdev);
51391+
51392+ if (adapter->wol)
51393+ be_setup_wol(adapter, false);
51394+
51395+ schedule_delayed_work(&adapter->work, msecs_to_jiffies(100));
51396 return 0;
51397 }
51398
51399+/*
51400+ * An FLR will stop BE from DMAing any data.
51401+ */
51402+static void be_shutdown(struct pci_dev *pdev)
51403+{
51404+ struct be_adapter *adapter = pci_get_drvdata(pdev);
51405+
51406+ if (!adapter)
51407+ return;
51408+
51409+ cancel_delayed_work_sync(&adapter->work);
51410+
51411+ netif_device_detach(adapter->netdev);
51412+
51413+ if (adapter->wol)
51414+ be_setup_wol(adapter, true);
51415+
51416+ be_cmd_reset_function(adapter);
51417+
51418+ pci_disable_device(pdev);
51419+}
51420+
51421+static pci_ers_result_t be_eeh_err_detected(struct pci_dev *pdev,
51422+ pci_channel_state_t state)
51423+{
51424+ struct be_adapter *adapter = pci_get_drvdata(pdev);
51425+ struct net_device *netdev = adapter->netdev;
51426+
51427+ dev_err(&adapter->pdev->dev, "EEH error detected\n");
51428+
51429+ adapter->eeh_err = true;
51430+
51431+ netif_device_detach(netdev);
51432+
51433+ if (netif_running(netdev)) {
51434+ rtnl_lock();
51435+ be_close(netdev);
51436+ rtnl_unlock();
51437+ }
51438+ be_clear(adapter);
51439+
51440+ if (state == pci_channel_io_perm_failure)
51441+ return PCI_ERS_RESULT_DISCONNECT;
51442+
51443+ pci_disable_device(pdev);
51444+
51445+ return PCI_ERS_RESULT_NEED_RESET;
51446+}
51447+
51448+static pci_ers_result_t be_eeh_reset(struct pci_dev *pdev)
51449+{
51450+ struct be_adapter *adapter = pci_get_drvdata(pdev);
51451+ int status;
51452+
51453+ dev_info(&adapter->pdev->dev, "EEH reset\n");
51454+ adapter->eeh_err = false;
51455+
51456+ status = pci_enable_device(pdev);
51457+ if (status)
51458+ return PCI_ERS_RESULT_DISCONNECT;
51459+
51460+ pci_set_master(pdev);
51461+ pci_set_power_state(pdev, 0);
51462+ pci_restore_state(pdev);
51463+
51464+ /* Check if card is ok and fw is ready */
51465+ status = be_cmd_POST(adapter);
51466+ if (status)
51467+ return PCI_ERS_RESULT_DISCONNECT;
51468+
51469+ return PCI_ERS_RESULT_RECOVERED;
51470+}
51471+
51472+static void be_eeh_resume(struct pci_dev *pdev)
51473+{
51474+ int status = 0;
51475+ struct be_adapter *adapter = pci_get_drvdata(pdev);
51476+ struct net_device *netdev = adapter->netdev;
51477+
51478+ dev_info(&adapter->pdev->dev, "EEH resume\n");
51479+
51480+ pci_save_state(pdev);
51481+
51482+ /* tell fw we're ready to fire cmds */
51483+ status = be_cmd_fw_init(adapter);
51484+ if (status)
51485+ goto err;
51486+
51487+ status = be_setup(adapter);
51488+ if (status)
51489+ goto err;
51490+
51491+ if (netif_running(netdev)) {
51492+ status = be_open(netdev);
51493+ if (status)
51494+ goto err;
51495+ }
51496+ netif_device_attach(netdev);
51497+ return;
51498+err:
51499+ dev_err(&adapter->pdev->dev, "EEH resume failed\n");
51500+ return;
51501+}
51502+
51503+static struct pci_error_handlers be_eeh_handlers = {
51504+ .error_detected = be_eeh_err_detected,
51505+ .slot_reset = be_eeh_reset,
51506+ .resume = be_eeh_resume,
51507+};
51508+
51509 static struct pci_driver be_driver = {
51510 .name = DRV_NAME,
51511 .id_table = be_dev_ids,
51512 .probe = be_probe,
51513 .remove = be_remove,
51514 .suspend = be_suspend,
51515- .resume = be_resume
51516+ .resume = be_resume,
51517+ .shutdown = be_shutdown,
51518+ .err_handler = &be_eeh_handlers
51519 };
51520
51521 static int __init be_init_module(void)
51522 {
51523- if (rx_frag_size != 8192 && rx_frag_size != 4096
51524- && rx_frag_size != 2048) {
51525+ if (rx_frag_size != 8192 && rx_frag_size != 4096 &&
51526+ rx_frag_size != 2048) {
51527 printk(KERN_WARNING DRV_NAME
51528 " : Module param rx_frag_size must be 2048/4096/8192."
51529 " Using 2048\n");
51530 rx_frag_size = 2048;
51531 }
51532
51533+ if (!msix && num_vfs > 0) {
51534+ printk(KERN_WARNING DRV_NAME
51535+ " : MSIx required for num_vfs > 0. Ignoring msix=0\n");
51536+ msix = 1;
51537+ }
51538+
51539+
51540 return pci_register_driver(&be_driver);
51541 }
51542 module_init(be_init_module);
51543diff --git a/drivers/net/benet/be_misc.c b/drivers/net/benet/be_misc.c
51544new file mode 100644
51545index 0000000..4ab499f
51546--- /dev/null
51547+++ b/drivers/net/benet/be_misc.c
51548@@ -0,0 +1,106 @@
51549+/*
51550+ * Copyright (C) 2005 - 2011 Emulex
51551+ * All rights reserved.
51552+ *
51553+ * This program is free software; you can redistribute it and/or
51554+ * modify it under the terms of the GNU General Public License version 2
51555+ * as published by the Free Software Foundation. The full GNU General
51556+ * Public License is included in this distribution in the file called COPYING.
51557+ *
51558+ * Contact Information:
51559+ * linux-drivers@emulex.com
51560+ *
51561+ * Emulex
51562+ * 3333 Susan Street
51563+ * Costa Mesa, CA 92626
51564+ */
51565+#include "be.h"
51566+
51567+#if LINUX_VERSION_CODE < KERNEL_VERSION(2, 6, 24)
51568+static ssize_t
51569+flash_fw_store(struct class_device *cd, const char *buf, size_t len)
51570+{
51571+ struct be_adapter *adapter =
51572+ netdev_priv(container_of(cd, struct net_device, class_dev));
51573+ char file_name[ETHTOOL_FLASH_MAX_FILENAME];
51574+ int status;
51575+
51576+ if (!capable(CAP_NET_ADMIN))
51577+ return -EPERM;
51578+
51579+ file_name[ETHTOOL_FLASH_MAX_FILENAME - 1] = 0;
51580+ strncpy(file_name, buf, (ETHTOOL_FLASH_MAX_FILENAME - 1));
51581+
51582+ /* Removing new-line char given by sysfs */
51583+ file_name[strlen(file_name) - 1] = '\0';
51584+
51585+ status = be_load_fw(adapter, file_name);
51586+ if (!status)
51587+ return len;
51588+ else
51589+ return status;
51590+}
51591+
51592+static CLASS_DEVICE_ATTR(flash_fw, S_IWUSR, NULL, flash_fw_store);
51593+
51594+static struct attribute *benet_attrs[] = {
51595+ &class_device_attr_flash_fw.attr,
51596+ NULL,
51597+};
51598+#else
51599+
51600+static ssize_t
51601+flash_fw_store(struct device *dev, struct device_attribute *attr,
51602+ const char *buf, size_t len)
51603+{
51604+ struct be_adapter *adapter =
51605+ netdev_priv(container_of(dev, struct net_device, dev));
51606+ char file_name[ETHTOOL_FLASH_MAX_FILENAME];
51607+ int status;
51608+
51609+ if (!capable(CAP_NET_ADMIN))
51610+ return -EPERM;
51611+
51612+ file_name[ETHTOOL_FLASH_MAX_FILENAME - 1] = 0;
51613+ strncpy(file_name, buf, (ETHTOOL_FLASH_MAX_FILENAME - 1));
51614+
51615+ /* Removing new-line char given by sysfs */
51616+ file_name[strlen(file_name) - 1] = '\0';
51617+
51618+ status = be_load_fw(adapter, file_name);
51619+ if (!status)
51620+ return len;
51621+ else
51622+ return status;
51623+}
51624+
51625+static DEVICE_ATTR(flash_fw, S_IWUSR, NULL, flash_fw_store);
51626+
51627+static struct attribute *benet_attrs[] = {
51628+ &dev_attr_flash_fw.attr,
51629+ NULL,
51630+};
51631+#endif
51632+
51633+#if LINUX_VERSION_CODE < KERNEL_VERSION(2, 6, 24)
51634+#define CLASS_DEV class_dev
51635+#else
51636+#define CLASS_DEV dev
51637+#endif
51638+
51639+static struct attribute_group benet_attr_group = {.attrs = benet_attrs };
51640+
51641+void be_sysfs_create_group(struct be_adapter *adapter)
51642+{
51643+ int status;
51644+
51645+ status = sysfs_create_group(&adapter->netdev->CLASS_DEV.kobj,
51646+ &benet_attr_group);
51647+ if (status)
51648+ dev_err(&adapter->pdev->dev, "Could not create sysfs group\n");
51649+}
51650+
51651+void be_sysfs_remove_group(struct be_adapter *adapter)
51652+{
51653+ sysfs_remove_group(&adapter->netdev->CLASS_DEV.kobj, &benet_attr_group);
51654+}
51655diff --git a/drivers/net/benet/be_proc.c b/drivers/net/benet/be_proc.c
51656new file mode 100644
51657index 0000000..0bfdb3b
51658--- /dev/null
51659+++ b/drivers/net/benet/be_proc.c
51660@@ -0,0 +1,513 @@
51661+/*
51662+ * Copyright (C) 2005 - 2011 ServerEngines
51663+ * All rights reserved.
51664+ *
51665+ * This program is free software; you can redistribute it and/or
51666+ * modify it under the terms of the GNU General Public License version 2
51667+ * as published by the Free Software Foundation. The full GNU General
51668+ * Public License is included in this distribution in the file called COPYING.
51669+ *
51670+ * Contact Information:
51671+ * linux-drivers@serverengines.com
51672+ *
51673+ * ServerEngines
51674+ * 209 N. Fair Oaks Ave
51675+ * Sunnyvale, CA 94085
51676+ */
51677+#include <linux/proc_fs.h>
51678+#include "be.h"
51679+
51680+char *be_adpt_name[] = {
51681+ "driver/be2net0",
51682+ "driver/be2net1",
51683+ "driver/be2net2",
51684+ "driver/be2net3",
51685+ "driver/be2net4",
51686+ "driver/be2net5",
51687+ "driver/be2net6",
51688+ "driver/be2net7"
51689+};
51690+
51691+#define MAX_BE_DEVICES 8
51692+struct proc_dir_entry *be_proc_dir[MAX_BE_DEVICES];
51693+
51694+/*File to read Eth Ring Information */
51695+#define BE_ETH_RING_FILE "eth_ring"
51696+#define BE_DRVR_STAT_FILE "drvr_stat"
51697+
51698+/*
51699+ * this file enables user to read a 32 bit CSR register.
51700+ * to read 32 bit value of a register at offset 0x1234,
51701+ * first write the offset 0x1234 (echo "0x1234") in
51702+ * the file and then read the value from this file.
51703+ * the written offset is latched until another value is written
51704+ */
51705+#define BE_CSR_R_FILE "csrr"
51706+/*
51707+ * this file enables user to write to a 32 bit CSR register.
51708+ * to write a value 0xdeadbeef to a register at offset 0x1234,
51709+ * write 0x1234 0xdeadbeef (echo "0x1234 0xdeadbeeb") to
51710+ * the file.
51711+ */
51712+#define BE_CSR_W_FILE "csrw"
51713+
51714+#define BE_PROC_MODE 0600
51715+
51716+static char read_eth_ring_buf[4096];
51717+static int read_eth_ring_count;
51718+
51719+/*
51720+ * Get Various Eth Ring Properties
51721+ */
51722+static int proc_eth_read_ring(char *page, char **start,
51723+ off_t off, int count, int *eof, void *data)
51724+{
51725+ int i, n;
51726+ char *p = read_eth_ring_buf;
51727+ struct be_adapter *adapter = (struct be_adapter *) data;
51728+
51729+ if (off == 0) {
51730+ /* Reset read_eth_ring_count */
51731+ read_eth_ring_count = 0;
51732+
51733+ n = sprintf(p, " PhyAddr VirtAddr Size TotalEntries ProducerIndex ConsumerIndex NumUsed\n");
51734+ p += n;
51735+ read_eth_ring_count += n;
51736+
51737+ n = sprintf(p, " ------- -------- ---- ------------ ------------- ------------- -------\n");
51738+ p += n;
51739+ read_eth_ring_count += n;
51740+
51741+ n = sprintf(p, "%s", "EthSendRing");
51742+ p += n;
51743+ read_eth_ring_count += n;
51744+
51745+ n = sprintf(p, " %7lx %8p %4u %12u %13u %13u %7u \n",
51746+ (long) adapter->tx_obj.q.dma_mem.dma,
51747+ (void *)adapter->tx_obj.q.dma_mem.va,
51748+ (u32) (adapter->tx_obj.q.len *
51749+ sizeof(struct be_eth_wrb)),
51750+ adapter->tx_obj.q.len, adapter->tx_obj.q.head,
51751+ adapter->tx_obj.q.tail,
51752+ atomic_read(&adapter->tx_obj.q.used));
51753+
51754+ p += n;
51755+ read_eth_ring_count += n;
51756+
51757+ /* Get Eth Send Compl Queue Details */
51758+ n = sprintf(p, "%s", "EthSendCmplRing");
51759+ p += n;
51760+ read_eth_ring_count += n;
51761+
51762+ n = sprintf(p, " %7lx %8p %4u %12u %13s %13u %7s\n",
51763+ (long)adapter->tx_obj.cq.dma_mem.dma,
51764+ (void *)adapter->tx_obj.cq.dma_mem.va,
51765+ (u32) (adapter->tx_obj.cq.len *
51766+ sizeof(struct be_eth_tx_compl)),
51767+ adapter->tx_obj.cq.len, "NA",
51768+ adapter->tx_obj.cq.tail, "NA");
51769+
51770+ p += n;
51771+ read_eth_ring_count += n;
51772+ /* Get Eth Rx Queue Details */
51773+ n = sprintf(p, "%s", "EthRxRing");
51774+ p += n;
51775+ read_eth_ring_count += n;
51776+
51777+ n = sprintf(p, " %7lx %8p %4u %12u %13u %13s %7u \n",
51778+ (long)adapter->rx_obj.q.dma_mem.dma,
51779+ (void *)adapter->rx_obj.q.dma_mem.va,
51780+ (u32) (adapter->rx_obj.q.len *
51781+ sizeof(struct be_eth_rx_d)),
51782+ adapter->rx_obj.q.len, adapter->rx_obj.q.head,"NA",
51783+ atomic_read(&adapter->rx_obj.q.used));
51784+ p += n;
51785+ read_eth_ring_count += n;
51786+
51787+ /* Get Eth Unicast Rx Compl Queue Details */
51788+ n = sprintf(p, "%s", "EthRxCmplRing");
51789+ p += n;
51790+ read_eth_ring_count += n;
51791+
51792+ n = sprintf(p, " %7lx %8p %4u %12u %13s %13u %7s\n",
51793+ (long)adapter->rx_obj.cq.dma_mem.dma,
51794+ (void *)adapter->rx_obj.cq.dma_mem.va,
51795+ (u32) (adapter->rx_obj.cq.len *
51796+ sizeof(struct be_eth_rx_compl)),
51797+ adapter->rx_obj.cq.len, "NA",
51798+ adapter->rx_obj.cq.tail, "NA");
51799+ p += n;
51800+ read_eth_ring_count += n;
51801+
51802+ /* Get Eth Event Queue Details */
51803+ n = sprintf(p, "%s", "EthTxEventRing");
51804+ p += n;
51805+ read_eth_ring_count += n;
51806+
51807+ n = sprintf(p,
51808+ " %7lx %8p %4u %12u %13s %13u %7s\n",
51809+ (long) adapter->tx_eq.q.dma_mem.dma,
51810+ (void *)adapter->tx_eq.q.dma_mem.va,
51811+ (u32) (adapter->tx_eq.q.len *
51812+ sizeof(struct be_eq_entry)),
51813+ adapter->tx_eq.q.len, "NA",
51814+ adapter->tx_eq.q.tail, "NA");
51815+
51816+ p += n;
51817+ read_eth_ring_count += n;
51818+
51819+ /* Get Eth Event Queue Details */
51820+ n = sprintf(p, "%s", "EthRxEventRing");
51821+ p += n;
51822+ read_eth_ring_count += n;
51823+
51824+ n = sprintf(p,
51825+ " %7lx %8p %4u %12u %13s %13u %7s\n",
51826+ (long) adapter->rx_eq.q.dma_mem.dma,
51827+ (void *)adapter->rx_eq.q.dma_mem.va,
51828+ (u32) (adapter->rx_eq.q.len *
51829+ sizeof(struct be_eq_entry)),
51830+ adapter->rx_eq.q.len, "NA",
51831+ adapter->rx_eq.q.tail, "NA");
51832+
51833+ p += n;
51834+ read_eth_ring_count += n;
51835+ }
51836+
51837+ *start = page;
51838+ /* copy whatever we can */
51839+ if (count < (read_eth_ring_count - off)) {
51840+ i = count;
51841+ *eof = 0; /* More bytes left */
51842+ } else {
51843+ i = read_eth_ring_count - off;
51844+ *eof = 1; /* Nothing left. indicate EOF */
51845+ }
51846+
51847+ memcpy(page, read_eth_ring_buf + off, i);
51848+ return (i);
51849+}
51850+
51851+static int proc_eth_write_ring(struct file *file,
51852+ const char *buffer, unsigned long count,
51853+ void *data)
51854+{
51855+ return (count); /* we do not support write */
51856+}
51857+
51858+/*
51859+ * read the driver stats.
51860+ */
51861+static int proc_read_drvr_stat(char *page, char **start,
51862+ off_t off, int count, int *eof, void *data)
51863+{
51864+ int n, lro_cp;
51865+ char *p = page;
51866+ struct be_adapter *adapter = (struct be_adapter *) data;
51867+ struct net_device *netdev = adapter->netdev;
51868+
51869+ if (off == 0) {
51870+ n = sprintf(p, "interface = %s\n", netdev->name);
51871+ p += n;
51872+ n = sprintf(p, "tx_reqs = %d\n",
51873+ drvr_stats(adapter)->be_tx_reqs);
51874+ p += n;
51875+ n = sprintf(p, "tx_stops = %d\n",
51876+ drvr_stats(adapter)->be_tx_stops);
51877+ p += n;
51878+ n = sprintf(p, "fwd_reqs = %d\n",
51879+ drvr_stats(adapter)->be_fwd_reqs);
51880+ p += n;
51881+ n = sprintf(p, "tx_wrbs = %d\n",
51882+ drvr_stats(adapter)->be_tx_wrbs);
51883+ p += n;
51884+ n = sprintf(p, "rx_poll = %d\n", drvr_stats(adapter)->be_rx_polls);
51885+ p += n;
51886+ n = sprintf(p, "tx_events = %d\n",
51887+ drvr_stats(adapter)->be_tx_events);
51888+ p += n;
51889+ n = sprintf(p, "rx_events = %d\n",
51890+ drvr_stats(adapter)->be_rx_events);
51891+ p += n;
51892+ n = sprintf(p, "tx_compl = %d\n",
51893+ drvr_stats(adapter)->be_tx_compl);
51894+ p += n;
51895+ n = sprintf(p, "rx_compl = %d\n",
51896+ drvr_stats(adapter)->be_rx_compl);
51897+ p += n;
51898+ n = sprintf(p, "ethrx_post_fail = %d\n",
51899+ drvr_stats(adapter)->be_ethrx_post_fail);
51900+ p += n;
51901+ n = sprintf(p, "802.3_dropped_frames = %d\n",
51902+ drvr_stats(adapter)->be_802_3_dropped_frames);
51903+ p += n;
51904+ n = sprintf(p, "802.3_malformed_frames = %d\n",
51905+ drvr_stats(adapter)->be_802_3_malformed_frames);
51906+ p += n;
51907+ n = sprintf(p, "eth_tx_rate = %d\n",
51908+ drvr_stats(adapter)->be_tx_rate);
51909+ p += n;
51910+ n = sprintf(p, "eth_rx_rate = %d\n",
51911+ drvr_stats(adapter)->be_rx_rate);
51912+ p += n;
51913+
51914+ lro_cp = (drvr_stats(adapter)->be_lro_hgram_data[0] +
51915+ drvr_stats(adapter)->be_lro_hgram_data[1] +
51916+ drvr_stats(adapter)->be_lro_hgram_data[2] +
51917+ drvr_stats(adapter)->be_lro_hgram_data[3] +
51918+ drvr_stats(adapter)->be_lro_hgram_data[4] +
51919+ drvr_stats(adapter)->be_lro_hgram_data[5] +
51920+ drvr_stats(adapter)->be_lro_hgram_data[6] +
51921+ drvr_stats(adapter)->be_lro_hgram_data[7])/100;
51922+ lro_cp = (lro_cp == 0) ? 1 : lro_cp; /* avoid divide by 0 */
51923+ n = sprintf(p,
51924+ "LRO data count %% histogram (1, 2-3, 4-5,..,>=16) = "
51925+ "%d, %d, %d, %d - %d, %d, %d, %d\n",
51926+ drvr_stats(adapter)->be_lro_hgram_data[0]/lro_cp,
51927+ drvr_stats(adapter)->be_lro_hgram_data[1]/lro_cp,
51928+ drvr_stats(adapter)->be_lro_hgram_data[2]/lro_cp,
51929+ drvr_stats(adapter)->be_lro_hgram_data[3]/lro_cp,
51930+ drvr_stats(adapter)->be_lro_hgram_data[4]/lro_cp,
51931+ drvr_stats(adapter)->be_lro_hgram_data[5]/lro_cp,
51932+ drvr_stats(adapter)->be_lro_hgram_data[6]/lro_cp,
51933+ drvr_stats(adapter)->be_lro_hgram_data[7]/lro_cp);
51934+ p += n;
51935+
51936+ lro_cp = (drvr_stats(adapter)->be_lro_hgram_ack[0] +
51937+ drvr_stats(adapter)->be_lro_hgram_ack[1] +
51938+ drvr_stats(adapter)->be_lro_hgram_ack[2] +
51939+ drvr_stats(adapter)->be_lro_hgram_ack[3] +
51940+ drvr_stats(adapter)->be_lro_hgram_ack[4] +
51941+ drvr_stats(adapter)->be_lro_hgram_ack[5] +
51942+ drvr_stats(adapter)->be_lro_hgram_ack[6] +
51943+ drvr_stats(adapter)->be_lro_hgram_ack[7])/100;
51944+ lro_cp = (lro_cp == 0) ? 1 : lro_cp; /* avoid divide by 0 */
51945+ n = sprintf(p,
51946+ "LRO ack count %% histogram (1, 2-3, 4-5,..,>=16) = "
51947+ "%d, %d, %d, %d - %d, %d, %d, %d\n",
51948+ drvr_stats(adapter)->be_lro_hgram_ack[0]/lro_cp,
51949+ drvr_stats(adapter)->be_lro_hgram_ack[1]/lro_cp,
51950+ drvr_stats(adapter)->be_lro_hgram_ack[2]/lro_cp,
51951+ drvr_stats(adapter)->be_lro_hgram_ack[3]/lro_cp,
51952+ drvr_stats(adapter)->be_lro_hgram_ack[4]/lro_cp,
51953+ drvr_stats(adapter)->be_lro_hgram_ack[5]/lro_cp,
51954+ drvr_stats(adapter)->be_lro_hgram_ack[6]/lro_cp,
51955+ drvr_stats(adapter)->be_lro_hgram_ack[7]/lro_cp);
51956+ p += n;
51957+ n = sprintf(p, "rx_eq_delay = %d \n", adapter->rx_eq.cur_eqd);
51958+ p += n;
51959+ n = sprintf(p, "rx frags per sec=%d \n",
51960+ drvr_stats(adapter)->be_rx_fps);
51961+ p += n;
51962+
51963+ }
51964+ *eof = 1;
51965+ return (p - page);
51966+}
51967+
51968+static int proc_write_drvr_stat(struct file *file,
51969+ const char *buffer, unsigned long count,
51970+ void *data)
51971+{
51972+ struct be_adapter *adapter = (struct be_adapter *) data;
51973+
51974+ memset(&(adapter->stats.drvr_stats), 0,
51975+ sizeof(adapter->stats.drvr_stats));
51976+ return (count); /* we do not support write */
51977+}
51978+
51979+#if 0
51980+/* the following are some of the functions that are needed here
51981+ * until all initializations are done by MPU.
51982+ */
51983+
51984+u32
51985+CsrReadDr(void* BaseAddress, u32 Offset)
51986+{
51987+ u32 *rp;
51988+
51989+ rp = (u32 *) (((u8 *) BaseAddress) + Offset);
51990+ return (*rp);
51991+}
51992+
51993+/*!
51994+
51995+@brief
51996+ This routine writes to a register located within the CSR
51997+ space for a given function object.
51998+
51999+@param
52000+ FuncObj - Pointer to the function object to read from.
52001+
52002+@param
52003+ Offset - The Offset (in bytes) to write to within the function's CSR space.
52004+
52005+@param
52006+ Value - The value to write to the register.
52007+
52008+@return
52009+
52010+@note
52011+ IRQL: any
52012+
52013+*/
52014+void
52015+CsrWriteDr(void* BaseAddress, u32 Offset, u32 Value)
52016+{
52017+ u32 *Register;
52018+
52019+ Register = (u32 *) (((u8 *) BaseAddress) + Offset);
52020+
52021+ //TRACE(DL_INFO, "CsrWrite[ %X ] <= %X", Register, Value);
52022+ *Register = Value;
52023+}
52024+u32 be_proc_csrr_offset = -1; /* to latch the offset of next CSR Read req. */
52025+
52026+/*
52027+ * read the csr_r file. return the 32 bit register value from
52028+ * CSR space at offset latched in the global location
52029+ * be_proc_csrr_offset
52030+ */
52031+static int proc_read_csr_r(char *page, char **start,
52032+ off_t off, int count, int *eof, void *data)
52033+{
52034+ struct be_adapter * adapter = (struct be_adapter *)data;
52035+ u32 val;
52036+ int n = 0;
52037+ if (be_proc_csrr_offset == -1)
52038+ return -EINVAL;
52039+
52040+ if (off == 0) {
52041+ /* read the CSR at offset be_proc_csrr_offset and return */
52042+ val = CsrReadDr(adapter->csr_va, be_proc_csrr_offset);
52043+ n = sprintf(page, "0x%x\n", val);
52044+ }
52045+ *eof = 1;
52046+ return n;
52047+}
52048+
52049+/*
52050+ * save the written value in be_proc_csrr_offset for next
52051+ * read from the file
52052+ */
52053+static int proc_write_csr_r(struct file *file,
52054+ const char *buffer, unsigned long count, void *data)
52055+{
52056+ char buf[64];
52057+ u32 n;
52058+
52059+ if (count > sizeof(buf) + 1)
52060+ return -EINVAL;
52061+ if (copy_from_user(buf, buffer, count))
52062+ return -EFAULT;
52063+ buf[count] = '\0';
52064+
52065+ n = simple_strtoul(buf, NULL, 16);
52066+ if (n < 0x50000)
52067+ be_proc_csrr_offset = n;
52068+ return (count);
52069+}
52070+
52071+/*
52072+ * return the latched offset for reading the csr_r file.
52073+ */
52074+static int proc_read_csr_w(char *page, char **start,
52075+ off_t off, int count, int *eof, void *data)
52076+{
52077+
52078+ *eof = 1;
52079+ return sprintf(page, "0x%x\n", be_proc_csrr_offset);
52080+}
52081+
52082+/*
52083+ * the incoming string is of the form "<offset> <value>"
52084+ * where the offset is the offset of the register to be written
52085+ * and value is the value to be written.
52086+ */
52087+static int proc_write_csr_w(struct file *file,
52088+ const char *buffer, unsigned long count,
52089+ void *data)
52090+{
52091+ char buf[64];
52092+ char *p;
52093+ u32 n, val;
52094+ struct be_adapter * adapter = (struct be_adapter *)data;
52095+
52096+ if (count > sizeof(buf) + 1)
52097+ return -EINVAL;
52098+ if (copy_from_user(buf, buffer, count))
52099+ return -EFAULT;
52100+ buf[count] = '\0';
52101+
52102+ n = simple_strtoul(buf, &p, 16);
52103+ if (n > 0x50000)
52104+ return -EINVAL;
52105+
52106+ /* now get the actual value to be written */
52107+ while (*p == ' ' || *p == '\t')
52108+ p++;
52109+ val = simple_strtoul(p, NULL, 16);
52110+ CsrWriteDr(adapter->csr_va, n, val);
52111+ return (count);
52112+}
52113+#endif
52114+
52115+void be_init_procfs(struct be_adapter *adapter, int adapt_num)
52116+{
52117+ static struct proc_dir_entry *pde;
52118+
52119+ if (adapt_num > MAX_BE_DEVICES - 1)
52120+ return;
52121+
52122+ /* create directory */
52123+ be_proc_dir[adapt_num] =
52124+ proc_mkdir(be_adpt_name[adapt_num], NULL);
52125+ if (be_proc_dir[adapt_num]) {
52126+ (be_proc_dir[adapt_num])->owner = THIS_MODULE;
52127+ }
52128+
52129+ pde = create_proc_entry(BE_ETH_RING_FILE, BE_PROC_MODE,
52130+ be_proc_dir[adapt_num]);
52131+ if (pde) {
52132+ pde->read_proc = proc_eth_read_ring;
52133+ pde->write_proc = proc_eth_write_ring;
52134+ pde->data = adapter;
52135+ pde->owner = THIS_MODULE;
52136+ }
52137+
52138+ pde = create_proc_entry(BE_DRVR_STAT_FILE, BE_PROC_MODE,
52139+ be_proc_dir[adapt_num]);
52140+ if (pde) {
52141+ pde->read_proc = proc_read_drvr_stat;
52142+ pde->write_proc = proc_write_drvr_stat;
52143+ pde->data = adapter;
52144+ pde->owner = THIS_MODULE;
52145+ }
52146+
52147+#if 0
52148+ if ((pde = create_proc_entry(BE_CSR_R_FILE, BE_PROC_MODE, be_proc_dir[adapt_num]))) {
52149+ pde->read_proc = proc_read_csr_r;
52150+ pde->write_proc = proc_write_csr_r;
52151+ pde->data = adapter;
52152+ pde->owner = THIS_MODULE;
52153+ }
52154+
52155+ if ((pde = create_proc_entry(BE_CSR_W_FILE, BE_PROC_MODE, be_proc_dir[adapt_num]))) {
52156+ pde->read_proc = proc_read_csr_w;
52157+ pde->write_proc = proc_write_csr_w;
52158+ pde->data = adapter;
52159+ pde->owner = THIS_MODULE;
52160+ }
52161+#endif
52162+}
52163+
52164+void be_cleanup_procfs(struct be_adapter *adapter, int adapt_num)
52165+{
52166+ if (adapt_num > MAX_BE_DEVICES - 1)
52167+ return;
52168+ remove_proc_entry(BE_ETH_RING_FILE, be_proc_dir[adapt_num]);
52169+ remove_proc_entry(BE_DRVR_STAT_FILE, be_proc_dir[adapt_num]);
52170+ remove_proc_entry(BE_CSR_R_FILE, be_proc_dir[adapt_num]);
52171+ remove_proc_entry(BE_CSR_W_FILE, be_proc_dir[adapt_num]);
52172+ remove_proc_entry(be_adpt_name[adapt_num], NULL);
52173+}
52174diff --git a/drivers/net/benet/version.h b/drivers/net/benet/version.h
52175new file mode 100644
52176index 0000000..c7ed692
52177--- /dev/null
52178+++ b/drivers/net/benet/version.h
52179@@ -0,0 +1,51 @@
52180+#define STR_BE_BRANCH "0" \r
52181+#define STR_BE_BUILD "479" \r
52182+#define STR_BE_DOT "0"\r
52183+#define STR_BE_MINOR "0"\r
52184+#define STR_BE_MAJOR "4"\r
52185+\r
52186+#define BE_BRANCH 0 \r
52187+#define BE_BUILD 479 \r
52188+#define BE_DOT 0\r
52189+#define BE_MINOR 0\r
52190+#define BE_MAJOR 4\r
52191+\r
52192+#define MGMT_BRANCH 0\r
52193+#define MGMT_BUILDNUM 479\r
52194+#define MGMT_MINOR 0\r
52195+#define MGMT_MAJOR 4\r
52196+\r
52197+#define BE_REDBOOT_VERSION "2.0.5.0"\r
52198+\r
52199+//start-auto\r
52200+#define BUILD_MONTH "12"\r
52201+#define BUILD_MONTH_NAME "December"\r
52202+#define BUILD_DAY "6"\r
52203+#define BUILD_YEAR "2011"\r
52204+#define BUILD_24HOUR "21"\r
52205+#define BUILD_12HOUR "9"\r
52206+#define BUILD_AM_PM "PM"\r
52207+#define BUILD_MIN "48"\r
52208+#define BUILD_SEC "05"\r
52209+#define BUILD_MONTH_NUMBER 12\r
52210+#define BUILD_DAY_NUMBER 6\r
52211+#define BUILD_YEAR_NUMBER 2011\r
52212+#define BUILD_24HOUR_NUMBER 21\r
52213+#define BUILD_12HOUR_NUMBER 9\r
52214+#define BUILD_MIN_NUMBER 48\r
52215+#define BUILD_SEC_NUMBER 5\r
52216+#undef MAJOR_BUILD\r
52217+#undef MINOR_BUILD\r
52218+#undef DOT_BUILD\r
52219+#define NUMBERED_BUILD\r
52220+#undef BRANCH_BUILD\r
52221+//end-auto\r
52222+\r
52223+#define ELX_FCOE_XROM_BIOS_VER "7.03a1"\r
52224+#define ELX_FCoE_X86_VER "4.02a1"\r
52225+#define ELX_FCoE_EFI_VER "5.01a1"\r
52226+#define ELX_FCoE_FCODE_VER "4.01a0"\r
52227+#define ELX_PXE_BIOS_VER "3.00a5"\r
52228+#define ELX_UEFI_NIC_VER "2.10A10"\r
52229+#define ELX_UEFI_FCODE_VER "1.10A0"\r
52230+#define ELX_ISCSI_BIOS_VER "1.00A8"\r
52231diff --git a/drivers/net/bnx2.c b/drivers/net/bnx2.c
52232index 4874b2b..67f8526 100644
52233--- a/drivers/net/bnx2.c
52234+++ b/drivers/net/bnx2.c
52235@@ -5809,6 +5809,8 @@ bnx2_test_nvram(struct bnx2 *bp)
52236 int rc = 0;
52237 u32 magic, csum;
52238
52239+ pax_track_stack();
52240+
52241 if ((rc = bnx2_nvram_read(bp, 0, data, 4)) != 0)
52242 goto test_nvram_done;
52243
52244diff --git a/drivers/net/cxgb3/l2t.h b/drivers/net/cxgb3/l2t.h
52245index fd3eb07..8a6978d 100644
52246--- a/drivers/net/cxgb3/l2t.h
52247+++ b/drivers/net/cxgb3/l2t.h
52248@@ -86,7 +86,7 @@ typedef void (*arp_failure_handler_func)(struct t3cdev * dev,
52249 */
52250 struct l2t_skb_cb {
52251 arp_failure_handler_func arp_failure_handler;
52252-};
52253+} __no_const;
52254
52255 #define L2T_SKB_CB(skb) ((struct l2t_skb_cb *)(skb)->cb)
52256
52257diff --git a/drivers/net/cxgb3/t3_hw.c b/drivers/net/cxgb3/t3_hw.c
52258index 032cfe0..411af379 100644
52259--- a/drivers/net/cxgb3/t3_hw.c
52260+++ b/drivers/net/cxgb3/t3_hw.c
52261@@ -699,6 +699,8 @@ static int get_vpd_params(struct adapter *adapter, struct vpd_params *p)
52262 int i, addr, ret;
52263 struct t3_vpd vpd;
52264
52265+ pax_track_stack();
52266+
52267 /*
52268 * Card information is normally at VPD_BASE but some early cards had
52269 * it at 0.
52270diff --git a/drivers/net/e1000e/82571.c b/drivers/net/e1000e/82571.c
52271index d1e0563..b9e129c 100644
52272--- a/drivers/net/e1000e/82571.c
52273+++ b/drivers/net/e1000e/82571.c
52274@@ -212,7 +212,7 @@ static s32 e1000_init_mac_params_82571(struct e1000_adapter *adapter)
52275 {
52276 struct e1000_hw *hw = &adapter->hw;
52277 struct e1000_mac_info *mac = &hw->mac;
52278- struct e1000_mac_operations *func = &mac->ops;
52279+ e1000_mac_operations_no_const *func = &mac->ops;
52280 u32 swsm = 0;
52281 u32 swsm2 = 0;
52282 bool force_clear_smbi = false;
52283@@ -1656,7 +1656,7 @@ static void e1000_clear_hw_cntrs_82571(struct e1000_hw *hw)
52284 temp = er32(ICRXDMTC);
52285 }
52286
52287-static struct e1000_mac_operations e82571_mac_ops = {
52288+static const struct e1000_mac_operations e82571_mac_ops = {
52289 /* .check_mng_mode: mac type dependent */
52290 /* .check_for_link: media type dependent */
52291 .id_led_init = e1000e_id_led_init,
52292@@ -1674,7 +1674,7 @@ static struct e1000_mac_operations e82571_mac_ops = {
52293 .setup_led = e1000e_setup_led_generic,
52294 };
52295
52296-static struct e1000_phy_operations e82_phy_ops_igp = {
52297+static const struct e1000_phy_operations e82_phy_ops_igp = {
52298 .acquire_phy = e1000_get_hw_semaphore_82571,
52299 .check_reset_block = e1000e_check_reset_block_generic,
52300 .commit_phy = NULL,
52301@@ -1691,7 +1691,7 @@ static struct e1000_phy_operations e82_phy_ops_igp = {
52302 .cfg_on_link_up = NULL,
52303 };
52304
52305-static struct e1000_phy_operations e82_phy_ops_m88 = {
52306+static const struct e1000_phy_operations e82_phy_ops_m88 = {
52307 .acquire_phy = e1000_get_hw_semaphore_82571,
52308 .check_reset_block = e1000e_check_reset_block_generic,
52309 .commit_phy = e1000e_phy_sw_reset,
52310@@ -1708,7 +1708,7 @@ static struct e1000_phy_operations e82_phy_ops_m88 = {
52311 .cfg_on_link_up = NULL,
52312 };
52313
52314-static struct e1000_phy_operations e82_phy_ops_bm = {
52315+static const struct e1000_phy_operations e82_phy_ops_bm = {
52316 .acquire_phy = e1000_get_hw_semaphore_82571,
52317 .check_reset_block = e1000e_check_reset_block_generic,
52318 .commit_phy = e1000e_phy_sw_reset,
52319@@ -1725,7 +1725,7 @@ static struct e1000_phy_operations e82_phy_ops_bm = {
52320 .cfg_on_link_up = NULL,
52321 };
52322
52323-static struct e1000_nvm_operations e82571_nvm_ops = {
52324+static const struct e1000_nvm_operations e82571_nvm_ops = {
52325 .acquire_nvm = e1000_acquire_nvm_82571,
52326 .read_nvm = e1000e_read_nvm_eerd,
52327 .release_nvm = e1000_release_nvm_82571,
52328diff --git a/drivers/net/e1000e/e1000.h b/drivers/net/e1000e/e1000.h
52329index 47db9bd..fa58ccd 100644
52330--- a/drivers/net/e1000e/e1000.h
52331+++ b/drivers/net/e1000e/e1000.h
52332@@ -375,9 +375,9 @@ struct e1000_info {
52333 u32 pba;
52334 u32 max_hw_frame_size;
52335 s32 (*get_variants)(struct e1000_adapter *);
52336- struct e1000_mac_operations *mac_ops;
52337- struct e1000_phy_operations *phy_ops;
52338- struct e1000_nvm_operations *nvm_ops;
52339+ const struct e1000_mac_operations *mac_ops;
52340+ const struct e1000_phy_operations *phy_ops;
52341+ const struct e1000_nvm_operations *nvm_ops;
52342 };
52343
52344 /* hardware capability, feature, and workaround flags */
52345diff --git a/drivers/net/e1000e/es2lan.c b/drivers/net/e1000e/es2lan.c
52346index ae5d736..e9a93a1 100644
52347--- a/drivers/net/e1000e/es2lan.c
52348+++ b/drivers/net/e1000e/es2lan.c
52349@@ -207,7 +207,7 @@ static s32 e1000_init_mac_params_80003es2lan(struct e1000_adapter *adapter)
52350 {
52351 struct e1000_hw *hw = &adapter->hw;
52352 struct e1000_mac_info *mac = &hw->mac;
52353- struct e1000_mac_operations *func = &mac->ops;
52354+ e1000_mac_operations_no_const *func = &mac->ops;
52355
52356 /* Set media type */
52357 switch (adapter->pdev->device) {
52358@@ -1365,7 +1365,7 @@ static void e1000_clear_hw_cntrs_80003es2lan(struct e1000_hw *hw)
52359 temp = er32(ICRXDMTC);
52360 }
52361
52362-static struct e1000_mac_operations es2_mac_ops = {
52363+static const struct e1000_mac_operations es2_mac_ops = {
52364 .id_led_init = e1000e_id_led_init,
52365 .check_mng_mode = e1000e_check_mng_mode_generic,
52366 /* check_for_link dependent on media type */
52367@@ -1383,7 +1383,7 @@ static struct e1000_mac_operations es2_mac_ops = {
52368 .setup_led = e1000e_setup_led_generic,
52369 };
52370
52371-static struct e1000_phy_operations es2_phy_ops = {
52372+static const struct e1000_phy_operations es2_phy_ops = {
52373 .acquire_phy = e1000_acquire_phy_80003es2lan,
52374 .check_reset_block = e1000e_check_reset_block_generic,
52375 .commit_phy = e1000e_phy_sw_reset,
52376@@ -1400,7 +1400,7 @@ static struct e1000_phy_operations es2_phy_ops = {
52377 .cfg_on_link_up = e1000_cfg_on_link_up_80003es2lan,
52378 };
52379
52380-static struct e1000_nvm_operations es2_nvm_ops = {
52381+static const struct e1000_nvm_operations es2_nvm_ops = {
52382 .acquire_nvm = e1000_acquire_nvm_80003es2lan,
52383 .read_nvm = e1000e_read_nvm_eerd,
52384 .release_nvm = e1000_release_nvm_80003es2lan,
52385diff --git a/drivers/net/e1000e/hw.h b/drivers/net/e1000e/hw.h
52386index 11f3b7c..6381887 100644
52387--- a/drivers/net/e1000e/hw.h
52388+++ b/drivers/net/e1000e/hw.h
52389@@ -753,6 +753,7 @@ struct e1000_mac_operations {
52390 s32 (*setup_physical_interface)(struct e1000_hw *);
52391 s32 (*setup_led)(struct e1000_hw *);
52392 };
52393+typedef struct e1000_mac_operations __no_const e1000_mac_operations_no_const;
52394
52395 /* Function pointers for the PHY. */
52396 struct e1000_phy_operations {
52397@@ -774,6 +775,7 @@ struct e1000_phy_operations {
52398 s32 (*write_phy_reg_locked)(struct e1000_hw *, u32, u16);
52399 s32 (*cfg_on_link_up)(struct e1000_hw *);
52400 };
52401+typedef struct e1000_phy_operations __no_const e1000_phy_operations_no_const;
52402
52403 /* Function pointers for the NVM. */
52404 struct e1000_nvm_operations {
52405@@ -785,9 +787,10 @@ struct e1000_nvm_operations {
52406 s32 (*validate_nvm)(struct e1000_hw *);
52407 s32 (*write_nvm)(struct e1000_hw *, u16, u16, u16 *);
52408 };
52409+typedef struct e1000_nvm_operations __no_const e1000_nvm_operations_no_const;
52410
52411 struct e1000_mac_info {
52412- struct e1000_mac_operations ops;
52413+ e1000_mac_operations_no_const ops;
52414
52415 u8 addr[6];
52416 u8 perm_addr[6];
52417@@ -823,7 +826,7 @@ struct e1000_mac_info {
52418 };
52419
52420 struct e1000_phy_info {
52421- struct e1000_phy_operations ops;
52422+ e1000_phy_operations_no_const ops;
52423
52424 enum e1000_phy_type type;
52425
52426@@ -857,7 +860,7 @@ struct e1000_phy_info {
52427 };
52428
52429 struct e1000_nvm_info {
52430- struct e1000_nvm_operations ops;
52431+ e1000_nvm_operations_no_const ops;
52432
52433 enum e1000_nvm_type type;
52434 enum e1000_nvm_override override;
52435diff --git a/drivers/net/e1000e/ich8lan.c b/drivers/net/e1000e/ich8lan.c
52436index de39f9a..e28d3e0 100644
52437--- a/drivers/net/e1000e/ich8lan.c
52438+++ b/drivers/net/e1000e/ich8lan.c
52439@@ -3463,7 +3463,7 @@ static void e1000_clear_hw_cntrs_ich8lan(struct e1000_hw *hw)
52440 }
52441 }
52442
52443-static struct e1000_mac_operations ich8_mac_ops = {
52444+static const struct e1000_mac_operations ich8_mac_ops = {
52445 .id_led_init = e1000e_id_led_init,
52446 .check_mng_mode = e1000_check_mng_mode_ich8lan,
52447 .check_for_link = e1000_check_for_copper_link_ich8lan,
52448@@ -3481,7 +3481,7 @@ static struct e1000_mac_operations ich8_mac_ops = {
52449 /* id_led_init dependent on mac type */
52450 };
52451
52452-static struct e1000_phy_operations ich8_phy_ops = {
52453+static const struct e1000_phy_operations ich8_phy_ops = {
52454 .acquire_phy = e1000_acquire_swflag_ich8lan,
52455 .check_reset_block = e1000_check_reset_block_ich8lan,
52456 .commit_phy = NULL,
52457@@ -3497,7 +3497,7 @@ static struct e1000_phy_operations ich8_phy_ops = {
52458 .write_phy_reg = e1000e_write_phy_reg_igp,
52459 };
52460
52461-static struct e1000_nvm_operations ich8_nvm_ops = {
52462+static const struct e1000_nvm_operations ich8_nvm_ops = {
52463 .acquire_nvm = e1000_acquire_nvm_ich8lan,
52464 .read_nvm = e1000_read_nvm_ich8lan,
52465 .release_nvm = e1000_release_nvm_ich8lan,
52466diff --git a/drivers/net/fealnx.c b/drivers/net/fealnx.c
52467index 18d5fbb..542d96d 100644
52468--- a/drivers/net/fealnx.c
52469+++ b/drivers/net/fealnx.c
52470@@ -151,7 +151,7 @@ struct chip_info {
52471 int flags;
52472 };
52473
52474-static const struct chip_info skel_netdrv_tbl[] __devinitdata = {
52475+static const struct chip_info skel_netdrv_tbl[] __devinitconst = {
52476 { "100/10M Ethernet PCI Adapter", HAS_MII_XCVR },
52477 { "100/10M Ethernet PCI Adapter", HAS_CHIP_XCVR },
52478 { "1000/100/10M Ethernet PCI Adapter", HAS_MII_XCVR },
52479diff --git a/drivers/net/hamradio/6pack.c b/drivers/net/hamradio/6pack.c
52480index 0e5b54b..b503f82 100644
52481--- a/drivers/net/hamradio/6pack.c
52482+++ b/drivers/net/hamradio/6pack.c
52483@@ -461,6 +461,8 @@ static void sixpack_receive_buf(struct tty_struct *tty,
52484 unsigned char buf[512];
52485 int count1;
52486
52487+ pax_track_stack();
52488+
52489 if (!count)
52490 return;
52491
52492diff --git a/drivers/net/ibmveth.c b/drivers/net/ibmveth.c
52493index 5862282..7cce8cb 100644
52494--- a/drivers/net/ibmveth.c
52495+++ b/drivers/net/ibmveth.c
52496@@ -1577,7 +1577,7 @@ static struct attribute * veth_pool_attrs[] = {
52497 NULL,
52498 };
52499
52500-static struct sysfs_ops veth_pool_ops = {
52501+static const struct sysfs_ops veth_pool_ops = {
52502 .show = veth_pool_show,
52503 .store = veth_pool_store,
52504 };
52505diff --git a/drivers/net/igb/e1000_82575.c b/drivers/net/igb/e1000_82575.c
52506index d617f2d..57b5309 100644
52507--- a/drivers/net/igb/e1000_82575.c
52508+++ b/drivers/net/igb/e1000_82575.c
52509@@ -1411,7 +1411,7 @@ void igb_vmdq_set_replication_pf(struct e1000_hw *hw, bool enable)
52510 wr32(E1000_VT_CTL, vt_ctl);
52511 }
52512
52513-static struct e1000_mac_operations e1000_mac_ops_82575 = {
52514+static const struct e1000_mac_operations e1000_mac_ops_82575 = {
52515 .reset_hw = igb_reset_hw_82575,
52516 .init_hw = igb_init_hw_82575,
52517 .check_for_link = igb_check_for_link_82575,
52518@@ -1420,13 +1420,13 @@ static struct e1000_mac_operations e1000_mac_ops_82575 = {
52519 .get_speed_and_duplex = igb_get_speed_and_duplex_copper,
52520 };
52521
52522-static struct e1000_phy_operations e1000_phy_ops_82575 = {
52523+static const struct e1000_phy_operations e1000_phy_ops_82575 = {
52524 .acquire = igb_acquire_phy_82575,
52525 .get_cfg_done = igb_get_cfg_done_82575,
52526 .release = igb_release_phy_82575,
52527 };
52528
52529-static struct e1000_nvm_operations e1000_nvm_ops_82575 = {
52530+static const struct e1000_nvm_operations e1000_nvm_ops_82575 = {
52531 .acquire = igb_acquire_nvm_82575,
52532 .read = igb_read_nvm_eerd,
52533 .release = igb_release_nvm_82575,
52534diff --git a/drivers/net/igb/e1000_hw.h b/drivers/net/igb/e1000_hw.h
52535index 72081df..d855cf5 100644
52536--- a/drivers/net/igb/e1000_hw.h
52537+++ b/drivers/net/igb/e1000_hw.h
52538@@ -288,6 +288,7 @@ struct e1000_mac_operations {
52539 s32 (*read_mac_addr)(struct e1000_hw *);
52540 s32 (*get_speed_and_duplex)(struct e1000_hw *, u16 *, u16 *);
52541 };
52542+typedef struct e1000_mac_operations __no_const e1000_mac_operations_no_const;
52543
52544 struct e1000_phy_operations {
52545 s32 (*acquire)(struct e1000_hw *);
52546@@ -303,6 +304,7 @@ struct e1000_phy_operations {
52547 s32 (*set_d3_lplu_state)(struct e1000_hw *, bool);
52548 s32 (*write_reg)(struct e1000_hw *, u32, u16);
52549 };
52550+typedef struct e1000_phy_operations __no_const e1000_phy_operations_no_const;
52551
52552 struct e1000_nvm_operations {
52553 s32 (*acquire)(struct e1000_hw *);
52554@@ -310,6 +312,7 @@ struct e1000_nvm_operations {
52555 void (*release)(struct e1000_hw *);
52556 s32 (*write)(struct e1000_hw *, u16, u16, u16 *);
52557 };
52558+typedef struct e1000_nvm_operations __no_const e1000_nvm_operations_no_const;
52559
52560 struct e1000_info {
52561 s32 (*get_invariants)(struct e1000_hw *);
52562@@ -321,7 +324,7 @@ struct e1000_info {
52563 extern const struct e1000_info e1000_82575_info;
52564
52565 struct e1000_mac_info {
52566- struct e1000_mac_operations ops;
52567+ e1000_mac_operations_no_const ops;
52568
52569 u8 addr[6];
52570 u8 perm_addr[6];
52571@@ -365,7 +368,7 @@ struct e1000_mac_info {
52572 };
52573
52574 struct e1000_phy_info {
52575- struct e1000_phy_operations ops;
52576+ e1000_phy_operations_no_const ops;
52577
52578 enum e1000_phy_type type;
52579
52580@@ -400,7 +403,7 @@ struct e1000_phy_info {
52581 };
52582
52583 struct e1000_nvm_info {
52584- struct e1000_nvm_operations ops;
52585+ e1000_nvm_operations_no_const ops;
52586
52587 enum e1000_nvm_type type;
52588 enum e1000_nvm_override override;
52589@@ -446,6 +449,7 @@ struct e1000_mbx_operations {
52590 s32 (*check_for_ack)(struct e1000_hw *, u16);
52591 s32 (*check_for_rst)(struct e1000_hw *, u16);
52592 };
52593+typedef struct e1000_mbx_operations __no_const e1000_mbx_operations_no_const;
52594
52595 struct e1000_mbx_stats {
52596 u32 msgs_tx;
52597@@ -457,7 +461,7 @@ struct e1000_mbx_stats {
52598 };
52599
52600 struct e1000_mbx_info {
52601- struct e1000_mbx_operations ops;
52602+ e1000_mbx_operations_no_const ops;
52603 struct e1000_mbx_stats stats;
52604 u32 timeout;
52605 u32 usec_delay;
52606diff --git a/drivers/net/igbvf/vf.h b/drivers/net/igbvf/vf.h
52607index 1e8ce37..549c453 100644
52608--- a/drivers/net/igbvf/vf.h
52609+++ b/drivers/net/igbvf/vf.h
52610@@ -187,9 +187,10 @@ struct e1000_mac_operations {
52611 s32 (*read_mac_addr)(struct e1000_hw *);
52612 s32 (*set_vfta)(struct e1000_hw *, u16, bool);
52613 };
52614+typedef struct e1000_mac_operations __no_const e1000_mac_operations_no_const;
52615
52616 struct e1000_mac_info {
52617- struct e1000_mac_operations ops;
52618+ e1000_mac_operations_no_const ops;
52619 u8 addr[6];
52620 u8 perm_addr[6];
52621
52622@@ -211,6 +212,7 @@ struct e1000_mbx_operations {
52623 s32 (*check_for_ack)(struct e1000_hw *);
52624 s32 (*check_for_rst)(struct e1000_hw *);
52625 };
52626+typedef struct e1000_mbx_operations __no_const e1000_mbx_operations_no_const;
52627
52628 struct e1000_mbx_stats {
52629 u32 msgs_tx;
52630@@ -222,7 +224,7 @@ struct e1000_mbx_stats {
52631 };
52632
52633 struct e1000_mbx_info {
52634- struct e1000_mbx_operations ops;
52635+ e1000_mbx_operations_no_const ops;
52636 struct e1000_mbx_stats stats;
52637 u32 timeout;
52638 u32 usec_delay;
52639diff --git a/drivers/net/iseries_veth.c b/drivers/net/iseries_veth.c
52640index aa7286b..a61394f 100644
52641--- a/drivers/net/iseries_veth.c
52642+++ b/drivers/net/iseries_veth.c
52643@@ -384,7 +384,7 @@ static struct attribute *veth_cnx_default_attrs[] = {
52644 NULL
52645 };
52646
52647-static struct sysfs_ops veth_cnx_sysfs_ops = {
52648+static const struct sysfs_ops veth_cnx_sysfs_ops = {
52649 .show = veth_cnx_attribute_show
52650 };
52651
52652@@ -441,7 +441,7 @@ static struct attribute *veth_port_default_attrs[] = {
52653 NULL
52654 };
52655
52656-static struct sysfs_ops veth_port_sysfs_ops = {
52657+static const struct sysfs_ops veth_port_sysfs_ops = {
52658 .show = veth_port_attribute_show
52659 };
52660
52661diff --git a/drivers/net/ixgb/ixgb_main.c b/drivers/net/ixgb/ixgb_main.c
52662index 8aa44dc..fa1e797 100644
52663--- a/drivers/net/ixgb/ixgb_main.c
52664+++ b/drivers/net/ixgb/ixgb_main.c
52665@@ -1052,6 +1052,8 @@ ixgb_set_multi(struct net_device *netdev)
52666 u32 rctl;
52667 int i;
52668
52669+ pax_track_stack();
52670+
52671 /* Check for Promiscuous and All Multicast modes */
52672
52673 rctl = IXGB_READ_REG(hw, RCTL);
52674diff --git a/drivers/net/ixgb/ixgb_param.c b/drivers/net/ixgb/ixgb_param.c
52675index af35e1d..8781785 100644
52676--- a/drivers/net/ixgb/ixgb_param.c
52677+++ b/drivers/net/ixgb/ixgb_param.c
52678@@ -260,6 +260,9 @@ void __devinit
52679 ixgb_check_options(struct ixgb_adapter *adapter)
52680 {
52681 int bd = adapter->bd_number;
52682+
52683+ pax_track_stack();
52684+
52685 if (bd >= IXGB_MAX_NIC) {
52686 printk(KERN_NOTICE
52687 "Warning: no configuration for board #%i\n", bd);
52688diff --git a/drivers/net/ixgbe/ixgbe_type.h b/drivers/net/ixgbe/ixgbe_type.h
52689index b17aa73..ed74540 100644
52690--- a/drivers/net/ixgbe/ixgbe_type.h
52691+++ b/drivers/net/ixgbe/ixgbe_type.h
52692@@ -2327,6 +2327,7 @@ struct ixgbe_eeprom_operations {
52693 s32 (*validate_checksum)(struct ixgbe_hw *, u16 *);
52694 s32 (*update_checksum)(struct ixgbe_hw *);
52695 };
52696+typedef struct ixgbe_eeprom_operations __no_const ixgbe_eeprom_operations_no_const;
52697
52698 struct ixgbe_mac_operations {
52699 s32 (*init_hw)(struct ixgbe_hw *);
52700@@ -2376,6 +2377,7 @@ struct ixgbe_mac_operations {
52701 /* Flow Control */
52702 s32 (*fc_enable)(struct ixgbe_hw *, s32);
52703 };
52704+typedef struct ixgbe_mac_operations __no_const ixgbe_mac_operations_no_const;
52705
52706 struct ixgbe_phy_operations {
52707 s32 (*identify)(struct ixgbe_hw *);
52708@@ -2394,9 +2396,10 @@ struct ixgbe_phy_operations {
52709 s32 (*read_i2c_eeprom)(struct ixgbe_hw *, u8 , u8 *);
52710 s32 (*write_i2c_eeprom)(struct ixgbe_hw *, u8, u8);
52711 };
52712+typedef struct ixgbe_phy_operations __no_const ixgbe_phy_operations_no_const;
52713
52714 struct ixgbe_eeprom_info {
52715- struct ixgbe_eeprom_operations ops;
52716+ ixgbe_eeprom_operations_no_const ops;
52717 enum ixgbe_eeprom_type type;
52718 u32 semaphore_delay;
52719 u16 word_size;
52720@@ -2404,7 +2407,7 @@ struct ixgbe_eeprom_info {
52721 };
52722
52723 struct ixgbe_mac_info {
52724- struct ixgbe_mac_operations ops;
52725+ ixgbe_mac_operations_no_const ops;
52726 enum ixgbe_mac_type type;
52727 u8 addr[IXGBE_ETH_LENGTH_OF_ADDRESS];
52728 u8 perm_addr[IXGBE_ETH_LENGTH_OF_ADDRESS];
52729@@ -2423,7 +2426,7 @@ struct ixgbe_mac_info {
52730 };
52731
52732 struct ixgbe_phy_info {
52733- struct ixgbe_phy_operations ops;
52734+ ixgbe_phy_operations_no_const ops;
52735 struct mdio_if_info mdio;
52736 enum ixgbe_phy_type type;
52737 u32 id;
52738diff --git a/drivers/net/mlx4/main.c b/drivers/net/mlx4/main.c
52739index 291a505..2543756 100644
52740--- a/drivers/net/mlx4/main.c
52741+++ b/drivers/net/mlx4/main.c
52742@@ -38,6 +38,7 @@
52743 #include <linux/errno.h>
52744 #include <linux/pci.h>
52745 #include <linux/dma-mapping.h>
52746+#include <linux/sched.h>
52747
52748 #include <linux/mlx4/device.h>
52749 #include <linux/mlx4/doorbell.h>
52750@@ -730,6 +731,8 @@ static int mlx4_init_hca(struct mlx4_dev *dev)
52751 u64 icm_size;
52752 int err;
52753
52754+ pax_track_stack();
52755+
52756 err = mlx4_QUERY_FW(dev);
52757 if (err) {
52758 if (err == -EACCES)
52759diff --git a/drivers/net/niu.c b/drivers/net/niu.c
52760index 2dce134..fa5ce75 100644
52761--- a/drivers/net/niu.c
52762+++ b/drivers/net/niu.c
52763@@ -9128,6 +9128,8 @@ static void __devinit niu_try_msix(struct niu *np, u8 *ldg_num_map)
52764 int i, num_irqs, err;
52765 u8 first_ldg;
52766
52767+ pax_track_stack();
52768+
52769 first_ldg = (NIU_NUM_LDG / parent->num_ports) * np->port;
52770 for (i = 0; i < (NIU_NUM_LDG / parent->num_ports); i++)
52771 ldg_num_map[i] = first_ldg + i;
52772diff --git a/drivers/net/pcnet32.c b/drivers/net/pcnet32.c
52773index c1b3f09..97cd8c4 100644
52774--- a/drivers/net/pcnet32.c
52775+++ b/drivers/net/pcnet32.c
52776@@ -79,7 +79,7 @@ static int cards_found;
52777 /*
52778 * VLB I/O addresses
52779 */
52780-static unsigned int pcnet32_portlist[] __initdata =
52781+static unsigned int pcnet32_portlist[] __devinitdata =
52782 { 0x300, 0x320, 0x340, 0x360, 0 };
52783
52784 static int pcnet32_debug = 0;
52785@@ -267,7 +267,7 @@ struct pcnet32_private {
52786 struct sk_buff **rx_skbuff;
52787 dma_addr_t *tx_dma_addr;
52788 dma_addr_t *rx_dma_addr;
52789- struct pcnet32_access a;
52790+ struct pcnet32_access *a;
52791 spinlock_t lock; /* Guard lock */
52792 unsigned int cur_rx, cur_tx; /* The next free ring entry */
52793 unsigned int rx_ring_size; /* current rx ring size */
52794@@ -457,9 +457,9 @@ static void pcnet32_netif_start(struct net_device *dev)
52795 u16 val;
52796
52797 netif_wake_queue(dev);
52798- val = lp->a.read_csr(ioaddr, CSR3);
52799+ val = lp->a->read_csr(ioaddr, CSR3);
52800 val &= 0x00ff;
52801- lp->a.write_csr(ioaddr, CSR3, val);
52802+ lp->a->write_csr(ioaddr, CSR3, val);
52803 napi_enable(&lp->napi);
52804 }
52805
52806@@ -744,7 +744,7 @@ static u32 pcnet32_get_link(struct net_device *dev)
52807 r = mii_link_ok(&lp->mii_if);
52808 } else if (lp->chip_version >= PCNET32_79C970A) {
52809 ulong ioaddr = dev->base_addr; /* card base I/O address */
52810- r = (lp->a.read_bcr(ioaddr, 4) != 0xc0);
52811+ r = (lp->a->read_bcr(ioaddr, 4) != 0xc0);
52812 } else { /* can not detect link on really old chips */
52813 r = 1;
52814 }
52815@@ -806,7 +806,7 @@ static int pcnet32_set_ringparam(struct net_device *dev,
52816 pcnet32_netif_stop(dev);
52817
52818 spin_lock_irqsave(&lp->lock, flags);
52819- lp->a.write_csr(ioaddr, CSR0, CSR0_STOP); /* stop the chip */
52820+ lp->a->write_csr(ioaddr, CSR0, CSR0_STOP); /* stop the chip */
52821
52822 size = min(ering->tx_pending, (unsigned int)TX_MAX_RING_SIZE);
52823
52824@@ -886,7 +886,7 @@ static void pcnet32_ethtool_test(struct net_device *dev,
52825 static int pcnet32_loopback_test(struct net_device *dev, uint64_t * data1)
52826 {
52827 struct pcnet32_private *lp = netdev_priv(dev);
52828- struct pcnet32_access *a = &lp->a; /* access to registers */
52829+ struct pcnet32_access *a = lp->a; /* access to registers */
52830 ulong ioaddr = dev->base_addr; /* card base I/O address */
52831 struct sk_buff *skb; /* sk buff */
52832 int x, i; /* counters */
52833@@ -906,21 +906,21 @@ static int pcnet32_loopback_test(struct net_device *dev, uint64_t * data1)
52834 pcnet32_netif_stop(dev);
52835
52836 spin_lock_irqsave(&lp->lock, flags);
52837- lp->a.write_csr(ioaddr, CSR0, CSR0_STOP); /* stop the chip */
52838+ lp->a->write_csr(ioaddr, CSR0, CSR0_STOP); /* stop the chip */
52839
52840 numbuffs = min(numbuffs, (int)min(lp->rx_ring_size, lp->tx_ring_size));
52841
52842 /* Reset the PCNET32 */
52843- lp->a.reset(ioaddr);
52844- lp->a.write_csr(ioaddr, CSR4, 0x0915); /* auto tx pad */
52845+ lp->a->reset(ioaddr);
52846+ lp->a->write_csr(ioaddr, CSR4, 0x0915); /* auto tx pad */
52847
52848 /* switch pcnet32 to 32bit mode */
52849- lp->a.write_bcr(ioaddr, 20, 2);
52850+ lp->a->write_bcr(ioaddr, 20, 2);
52851
52852 /* purge & init rings but don't actually restart */
52853 pcnet32_restart(dev, 0x0000);
52854
52855- lp->a.write_csr(ioaddr, CSR0, CSR0_STOP); /* Set STOP bit */
52856+ lp->a->write_csr(ioaddr, CSR0, CSR0_STOP); /* Set STOP bit */
52857
52858 /* Initialize Transmit buffers. */
52859 size = data_len + 15;
52860@@ -966,10 +966,10 @@ static int pcnet32_loopback_test(struct net_device *dev, uint64_t * data1)
52861
52862 /* set int loopback in CSR15 */
52863 x = a->read_csr(ioaddr, CSR15) & 0xfffc;
52864- lp->a.write_csr(ioaddr, CSR15, x | 0x0044);
52865+ lp->a->write_csr(ioaddr, CSR15, x | 0x0044);
52866
52867 teststatus = cpu_to_le16(0x8000);
52868- lp->a.write_csr(ioaddr, CSR0, CSR0_START); /* Set STRT bit */
52869+ lp->a->write_csr(ioaddr, CSR0, CSR0_START); /* Set STRT bit */
52870
52871 /* Check status of descriptors */
52872 for (x = 0; x < numbuffs; x++) {
52873@@ -990,7 +990,7 @@ static int pcnet32_loopback_test(struct net_device *dev, uint64_t * data1)
52874 }
52875 }
52876
52877- lp->a.write_csr(ioaddr, CSR0, CSR0_STOP); /* Set STOP bit */
52878+ lp->a->write_csr(ioaddr, CSR0, CSR0_STOP); /* Set STOP bit */
52879 wmb();
52880 if (netif_msg_hw(lp) && netif_msg_pktdata(lp)) {
52881 printk(KERN_DEBUG "%s: RX loopback packets:\n", dev->name);
52882@@ -1039,7 +1039,7 @@ static int pcnet32_loopback_test(struct net_device *dev, uint64_t * data1)
52883 pcnet32_restart(dev, CSR0_NORMAL);
52884 } else {
52885 pcnet32_purge_rx_ring(dev);
52886- lp->a.write_bcr(ioaddr, 20, 4); /* return to 16bit mode */
52887+ lp->a->write_bcr(ioaddr, 20, 4); /* return to 16bit mode */
52888 }
52889 spin_unlock_irqrestore(&lp->lock, flags);
52890
52891@@ -1049,7 +1049,7 @@ static int pcnet32_loopback_test(struct net_device *dev, uint64_t * data1)
52892 static void pcnet32_led_blink_callback(struct net_device *dev)
52893 {
52894 struct pcnet32_private *lp = netdev_priv(dev);
52895- struct pcnet32_access *a = &lp->a;
52896+ struct pcnet32_access *a = lp->a;
52897 ulong ioaddr = dev->base_addr;
52898 unsigned long flags;
52899 int i;
52900@@ -1066,7 +1066,7 @@ static void pcnet32_led_blink_callback(struct net_device *dev)
52901 static int pcnet32_phys_id(struct net_device *dev, u32 data)
52902 {
52903 struct pcnet32_private *lp = netdev_priv(dev);
52904- struct pcnet32_access *a = &lp->a;
52905+ struct pcnet32_access *a = lp->a;
52906 ulong ioaddr = dev->base_addr;
52907 unsigned long flags;
52908 int i, regs[4];
52909@@ -1112,7 +1112,7 @@ static int pcnet32_suspend(struct net_device *dev, unsigned long *flags,
52910 {
52911 int csr5;
52912 struct pcnet32_private *lp = netdev_priv(dev);
52913- struct pcnet32_access *a = &lp->a;
52914+ struct pcnet32_access *a = lp->a;
52915 ulong ioaddr = dev->base_addr;
52916 int ticks;
52917
52918@@ -1388,8 +1388,8 @@ static int pcnet32_poll(struct napi_struct *napi, int budget)
52919 spin_lock_irqsave(&lp->lock, flags);
52920 if (pcnet32_tx(dev)) {
52921 /* reset the chip to clear the error condition, then restart */
52922- lp->a.reset(ioaddr);
52923- lp->a.write_csr(ioaddr, CSR4, 0x0915); /* auto tx pad */
52924+ lp->a->reset(ioaddr);
52925+ lp->a->write_csr(ioaddr, CSR4, 0x0915); /* auto tx pad */
52926 pcnet32_restart(dev, CSR0_START);
52927 netif_wake_queue(dev);
52928 }
52929@@ -1401,12 +1401,12 @@ static int pcnet32_poll(struct napi_struct *napi, int budget)
52930 __napi_complete(napi);
52931
52932 /* clear interrupt masks */
52933- val = lp->a.read_csr(ioaddr, CSR3);
52934+ val = lp->a->read_csr(ioaddr, CSR3);
52935 val &= 0x00ff;
52936- lp->a.write_csr(ioaddr, CSR3, val);
52937+ lp->a->write_csr(ioaddr, CSR3, val);
52938
52939 /* Set interrupt enable. */
52940- lp->a.write_csr(ioaddr, CSR0, CSR0_INTEN);
52941+ lp->a->write_csr(ioaddr, CSR0, CSR0_INTEN);
52942
52943 spin_unlock_irqrestore(&lp->lock, flags);
52944 }
52945@@ -1429,7 +1429,7 @@ static void pcnet32_get_regs(struct net_device *dev, struct ethtool_regs *regs,
52946 int i, csr0;
52947 u16 *buff = ptr;
52948 struct pcnet32_private *lp = netdev_priv(dev);
52949- struct pcnet32_access *a = &lp->a;
52950+ struct pcnet32_access *a = lp->a;
52951 ulong ioaddr = dev->base_addr;
52952 unsigned long flags;
52953
52954@@ -1466,9 +1466,9 @@ static void pcnet32_get_regs(struct net_device *dev, struct ethtool_regs *regs,
52955 for (j = 0; j < PCNET32_MAX_PHYS; j++) {
52956 if (lp->phymask & (1 << j)) {
52957 for (i = 0; i < PCNET32_REGS_PER_PHY; i++) {
52958- lp->a.write_bcr(ioaddr, 33,
52959+ lp->a->write_bcr(ioaddr, 33,
52960 (j << 5) | i);
52961- *buff++ = lp->a.read_bcr(ioaddr, 34);
52962+ *buff++ = lp->a->read_bcr(ioaddr, 34);
52963 }
52964 }
52965 }
52966@@ -1858,7 +1858,7 @@ pcnet32_probe1(unsigned long ioaddr, int shared, struct pci_dev *pdev)
52967 ((cards_found >= MAX_UNITS) || full_duplex[cards_found]))
52968 lp->options |= PCNET32_PORT_FD;
52969
52970- lp->a = *a;
52971+ lp->a = a;
52972
52973 /* prior to register_netdev, dev->name is not yet correct */
52974 if (pcnet32_alloc_ring(dev, pci_name(lp->pci_dev))) {
52975@@ -1917,7 +1917,7 @@ pcnet32_probe1(unsigned long ioaddr, int shared, struct pci_dev *pdev)
52976 if (lp->mii) {
52977 /* lp->phycount and lp->phymask are set to 0 by memset above */
52978
52979- lp->mii_if.phy_id = ((lp->a.read_bcr(ioaddr, 33)) >> 5) & 0x1f;
52980+ lp->mii_if.phy_id = ((lp->a->read_bcr(ioaddr, 33)) >> 5) & 0x1f;
52981 /* scan for PHYs */
52982 for (i = 0; i < PCNET32_MAX_PHYS; i++) {
52983 unsigned short id1, id2;
52984@@ -1938,7 +1938,7 @@ pcnet32_probe1(unsigned long ioaddr, int shared, struct pci_dev *pdev)
52985 "Found PHY %04x:%04x at address %d.\n",
52986 id1, id2, i);
52987 }
52988- lp->a.write_bcr(ioaddr, 33, (lp->mii_if.phy_id) << 5);
52989+ lp->a->write_bcr(ioaddr, 33, (lp->mii_if.phy_id) << 5);
52990 if (lp->phycount > 1) {
52991 lp->options |= PCNET32_PORT_MII;
52992 }
52993@@ -2109,10 +2109,10 @@ static int pcnet32_open(struct net_device *dev)
52994 }
52995
52996 /* Reset the PCNET32 */
52997- lp->a.reset(ioaddr);
52998+ lp->a->reset(ioaddr);
52999
53000 /* switch pcnet32 to 32bit mode */
53001- lp->a.write_bcr(ioaddr, 20, 2);
53002+ lp->a->write_bcr(ioaddr, 20, 2);
53003
53004 if (netif_msg_ifup(lp))
53005 printk(KERN_DEBUG
53006@@ -2122,14 +2122,14 @@ static int pcnet32_open(struct net_device *dev)
53007 (u32) (lp->init_dma_addr));
53008
53009 /* set/reset autoselect bit */
53010- val = lp->a.read_bcr(ioaddr, 2) & ~2;
53011+ val = lp->a->read_bcr(ioaddr, 2) & ~2;
53012 if (lp->options & PCNET32_PORT_ASEL)
53013 val |= 2;
53014- lp->a.write_bcr(ioaddr, 2, val);
53015+ lp->a->write_bcr(ioaddr, 2, val);
53016
53017 /* handle full duplex setting */
53018 if (lp->mii_if.full_duplex) {
53019- val = lp->a.read_bcr(ioaddr, 9) & ~3;
53020+ val = lp->a->read_bcr(ioaddr, 9) & ~3;
53021 if (lp->options & PCNET32_PORT_FD) {
53022 val |= 1;
53023 if (lp->options == (PCNET32_PORT_FD | PCNET32_PORT_AUI))
53024@@ -2139,14 +2139,14 @@ static int pcnet32_open(struct net_device *dev)
53025 if (lp->chip_version == 0x2627)
53026 val |= 3;
53027 }
53028- lp->a.write_bcr(ioaddr, 9, val);
53029+ lp->a->write_bcr(ioaddr, 9, val);
53030 }
53031
53032 /* set/reset GPSI bit in test register */
53033- val = lp->a.read_csr(ioaddr, 124) & ~0x10;
53034+ val = lp->a->read_csr(ioaddr, 124) & ~0x10;
53035 if ((lp->options & PCNET32_PORT_PORTSEL) == PCNET32_PORT_GPSI)
53036 val |= 0x10;
53037- lp->a.write_csr(ioaddr, 124, val);
53038+ lp->a->write_csr(ioaddr, 124, val);
53039
53040 /* Allied Telesyn AT 2700/2701 FX are 100Mbit only and do not negotiate */
53041 if (pdev && pdev->subsystem_vendor == PCI_VENDOR_ID_AT &&
53042@@ -2167,24 +2167,24 @@ static int pcnet32_open(struct net_device *dev)
53043 * duplex, and/or enable auto negotiation, and clear DANAS
53044 */
53045 if (lp->mii && !(lp->options & PCNET32_PORT_ASEL)) {
53046- lp->a.write_bcr(ioaddr, 32,
53047- lp->a.read_bcr(ioaddr, 32) | 0x0080);
53048+ lp->a->write_bcr(ioaddr, 32,
53049+ lp->a->read_bcr(ioaddr, 32) | 0x0080);
53050 /* disable Auto Negotiation, set 10Mpbs, HD */
53051- val = lp->a.read_bcr(ioaddr, 32) & ~0xb8;
53052+ val = lp->a->read_bcr(ioaddr, 32) & ~0xb8;
53053 if (lp->options & PCNET32_PORT_FD)
53054 val |= 0x10;
53055 if (lp->options & PCNET32_PORT_100)
53056 val |= 0x08;
53057- lp->a.write_bcr(ioaddr, 32, val);
53058+ lp->a->write_bcr(ioaddr, 32, val);
53059 } else {
53060 if (lp->options & PCNET32_PORT_ASEL) {
53061- lp->a.write_bcr(ioaddr, 32,
53062- lp->a.read_bcr(ioaddr,
53063+ lp->a->write_bcr(ioaddr, 32,
53064+ lp->a->read_bcr(ioaddr,
53065 32) | 0x0080);
53066 /* enable auto negotiate, setup, disable fd */
53067- val = lp->a.read_bcr(ioaddr, 32) & ~0x98;
53068+ val = lp->a->read_bcr(ioaddr, 32) & ~0x98;
53069 val |= 0x20;
53070- lp->a.write_bcr(ioaddr, 32, val);
53071+ lp->a->write_bcr(ioaddr, 32, val);
53072 }
53073 }
53074 } else {
53075@@ -2197,10 +2197,10 @@ static int pcnet32_open(struct net_device *dev)
53076 * There is really no good other way to handle multiple PHYs
53077 * other than turning off all automatics
53078 */
53079- val = lp->a.read_bcr(ioaddr, 2);
53080- lp->a.write_bcr(ioaddr, 2, val & ~2);
53081- val = lp->a.read_bcr(ioaddr, 32);
53082- lp->a.write_bcr(ioaddr, 32, val & ~(1 << 7)); /* stop MII manager */
53083+ val = lp->a->read_bcr(ioaddr, 2);
53084+ lp->a->write_bcr(ioaddr, 2, val & ~2);
53085+ val = lp->a->read_bcr(ioaddr, 32);
53086+ lp->a->write_bcr(ioaddr, 32, val & ~(1 << 7)); /* stop MII manager */
53087
53088 if (!(lp->options & PCNET32_PORT_ASEL)) {
53089 /* setup ecmd */
53090@@ -2210,7 +2210,7 @@ static int pcnet32_open(struct net_device *dev)
53091 ecmd.speed =
53092 lp->
53093 options & PCNET32_PORT_100 ? SPEED_100 : SPEED_10;
53094- bcr9 = lp->a.read_bcr(ioaddr, 9);
53095+ bcr9 = lp->a->read_bcr(ioaddr, 9);
53096
53097 if (lp->options & PCNET32_PORT_FD) {
53098 ecmd.duplex = DUPLEX_FULL;
53099@@ -2219,7 +2219,7 @@ static int pcnet32_open(struct net_device *dev)
53100 ecmd.duplex = DUPLEX_HALF;
53101 bcr9 |= ~(1 << 0);
53102 }
53103- lp->a.write_bcr(ioaddr, 9, bcr9);
53104+ lp->a->write_bcr(ioaddr, 9, bcr9);
53105 }
53106
53107 for (i = 0; i < PCNET32_MAX_PHYS; i++) {
53108@@ -2252,9 +2252,9 @@ static int pcnet32_open(struct net_device *dev)
53109
53110 #ifdef DO_DXSUFLO
53111 if (lp->dxsuflo) { /* Disable transmit stop on underflow */
53112- val = lp->a.read_csr(ioaddr, CSR3);
53113+ val = lp->a->read_csr(ioaddr, CSR3);
53114 val |= 0x40;
53115- lp->a.write_csr(ioaddr, CSR3, val);
53116+ lp->a->write_csr(ioaddr, CSR3, val);
53117 }
53118 #endif
53119
53120@@ -2270,11 +2270,11 @@ static int pcnet32_open(struct net_device *dev)
53121 napi_enable(&lp->napi);
53122
53123 /* Re-initialize the PCNET32, and start it when done. */
53124- lp->a.write_csr(ioaddr, 1, (lp->init_dma_addr & 0xffff));
53125- lp->a.write_csr(ioaddr, 2, (lp->init_dma_addr >> 16));
53126+ lp->a->write_csr(ioaddr, 1, (lp->init_dma_addr & 0xffff));
53127+ lp->a->write_csr(ioaddr, 2, (lp->init_dma_addr >> 16));
53128
53129- lp->a.write_csr(ioaddr, CSR4, 0x0915); /* auto tx pad */
53130- lp->a.write_csr(ioaddr, CSR0, CSR0_INIT);
53131+ lp->a->write_csr(ioaddr, CSR4, 0x0915); /* auto tx pad */
53132+ lp->a->write_csr(ioaddr, CSR0, CSR0_INIT);
53133
53134 netif_start_queue(dev);
53135
53136@@ -2286,20 +2286,20 @@ static int pcnet32_open(struct net_device *dev)
53137
53138 i = 0;
53139 while (i++ < 100)
53140- if (lp->a.read_csr(ioaddr, CSR0) & CSR0_IDON)
53141+ if (lp->a->read_csr(ioaddr, CSR0) & CSR0_IDON)
53142 break;
53143 /*
53144 * We used to clear the InitDone bit, 0x0100, here but Mark Stockton
53145 * reports that doing so triggers a bug in the '974.
53146 */
53147- lp->a.write_csr(ioaddr, CSR0, CSR0_NORMAL);
53148+ lp->a->write_csr(ioaddr, CSR0, CSR0_NORMAL);
53149
53150 if (netif_msg_ifup(lp))
53151 printk(KERN_DEBUG
53152 "%s: pcnet32 open after %d ticks, init block %#x csr0 %4.4x.\n",
53153 dev->name, i,
53154 (u32) (lp->init_dma_addr),
53155- lp->a.read_csr(ioaddr, CSR0));
53156+ lp->a->read_csr(ioaddr, CSR0));
53157
53158 spin_unlock_irqrestore(&lp->lock, flags);
53159
53160@@ -2313,7 +2313,7 @@ static int pcnet32_open(struct net_device *dev)
53161 * Switch back to 16bit mode to avoid problems with dumb
53162 * DOS packet driver after a warm reboot
53163 */
53164- lp->a.write_bcr(ioaddr, 20, 4);
53165+ lp->a->write_bcr(ioaddr, 20, 4);
53166
53167 err_free_irq:
53168 spin_unlock_irqrestore(&lp->lock, flags);
53169@@ -2420,7 +2420,7 @@ static void pcnet32_restart(struct net_device *dev, unsigned int csr0_bits)
53170
53171 /* wait for stop */
53172 for (i = 0; i < 100; i++)
53173- if (lp->a.read_csr(ioaddr, CSR0) & CSR0_STOP)
53174+ if (lp->a->read_csr(ioaddr, CSR0) & CSR0_STOP)
53175 break;
53176
53177 if (i >= 100 && netif_msg_drv(lp))
53178@@ -2433,13 +2433,13 @@ static void pcnet32_restart(struct net_device *dev, unsigned int csr0_bits)
53179 return;
53180
53181 /* ReInit Ring */
53182- lp->a.write_csr(ioaddr, CSR0, CSR0_INIT);
53183+ lp->a->write_csr(ioaddr, CSR0, CSR0_INIT);
53184 i = 0;
53185 while (i++ < 1000)
53186- if (lp->a.read_csr(ioaddr, CSR0) & CSR0_IDON)
53187+ if (lp->a->read_csr(ioaddr, CSR0) & CSR0_IDON)
53188 break;
53189
53190- lp->a.write_csr(ioaddr, CSR0, csr0_bits);
53191+ lp->a->write_csr(ioaddr, CSR0, csr0_bits);
53192 }
53193
53194 static void pcnet32_tx_timeout(struct net_device *dev)
53195@@ -2452,8 +2452,8 @@ static void pcnet32_tx_timeout(struct net_device *dev)
53196 if (pcnet32_debug & NETIF_MSG_DRV)
53197 printk(KERN_ERR
53198 "%s: transmit timed out, status %4.4x, resetting.\n",
53199- dev->name, lp->a.read_csr(ioaddr, CSR0));
53200- lp->a.write_csr(ioaddr, CSR0, CSR0_STOP);
53201+ dev->name, lp->a->read_csr(ioaddr, CSR0));
53202+ lp->a->write_csr(ioaddr, CSR0, CSR0_STOP);
53203 dev->stats.tx_errors++;
53204 if (netif_msg_tx_err(lp)) {
53205 int i;
53206@@ -2497,7 +2497,7 @@ static netdev_tx_t pcnet32_start_xmit(struct sk_buff *skb,
53207 if (netif_msg_tx_queued(lp)) {
53208 printk(KERN_DEBUG
53209 "%s: pcnet32_start_xmit() called, csr0 %4.4x.\n",
53210- dev->name, lp->a.read_csr(ioaddr, CSR0));
53211+ dev->name, lp->a->read_csr(ioaddr, CSR0));
53212 }
53213
53214 /* Default status -- will not enable Successful-TxDone
53215@@ -2528,7 +2528,7 @@ static netdev_tx_t pcnet32_start_xmit(struct sk_buff *skb,
53216 dev->stats.tx_bytes += skb->len;
53217
53218 /* Trigger an immediate send poll. */
53219- lp->a.write_csr(ioaddr, CSR0, CSR0_INTEN | CSR0_TXPOLL);
53220+ lp->a->write_csr(ioaddr, CSR0, CSR0_INTEN | CSR0_TXPOLL);
53221
53222 dev->trans_start = jiffies;
53223
53224@@ -2555,18 +2555,18 @@ pcnet32_interrupt(int irq, void *dev_id)
53225
53226 spin_lock(&lp->lock);
53227
53228- csr0 = lp->a.read_csr(ioaddr, CSR0);
53229+ csr0 = lp->a->read_csr(ioaddr, CSR0);
53230 while ((csr0 & 0x8f00) && --boguscnt >= 0) {
53231 if (csr0 == 0xffff) {
53232 break; /* PCMCIA remove happened */
53233 }
53234 /* Acknowledge all of the current interrupt sources ASAP. */
53235- lp->a.write_csr(ioaddr, CSR0, csr0 & ~0x004f);
53236+ lp->a->write_csr(ioaddr, CSR0, csr0 & ~0x004f);
53237
53238 if (netif_msg_intr(lp))
53239 printk(KERN_DEBUG
53240 "%s: interrupt csr0=%#2.2x new csr=%#2.2x.\n",
53241- dev->name, csr0, lp->a.read_csr(ioaddr, CSR0));
53242+ dev->name, csr0, lp->a->read_csr(ioaddr, CSR0));
53243
53244 /* Log misc errors. */
53245 if (csr0 & 0x4000)
53246@@ -2595,19 +2595,19 @@ pcnet32_interrupt(int irq, void *dev_id)
53247 if (napi_schedule_prep(&lp->napi)) {
53248 u16 val;
53249 /* set interrupt masks */
53250- val = lp->a.read_csr(ioaddr, CSR3);
53251+ val = lp->a->read_csr(ioaddr, CSR3);
53252 val |= 0x5f00;
53253- lp->a.write_csr(ioaddr, CSR3, val);
53254+ lp->a->write_csr(ioaddr, CSR3, val);
53255
53256 __napi_schedule(&lp->napi);
53257 break;
53258 }
53259- csr0 = lp->a.read_csr(ioaddr, CSR0);
53260+ csr0 = lp->a->read_csr(ioaddr, CSR0);
53261 }
53262
53263 if (netif_msg_intr(lp))
53264 printk(KERN_DEBUG "%s: exiting interrupt, csr0=%#4.4x.\n",
53265- dev->name, lp->a.read_csr(ioaddr, CSR0));
53266+ dev->name, lp->a->read_csr(ioaddr, CSR0));
53267
53268 spin_unlock(&lp->lock);
53269
53270@@ -2627,21 +2627,21 @@ static int pcnet32_close(struct net_device *dev)
53271
53272 spin_lock_irqsave(&lp->lock, flags);
53273
53274- dev->stats.rx_missed_errors = lp->a.read_csr(ioaddr, 112);
53275+ dev->stats.rx_missed_errors = lp->a->read_csr(ioaddr, 112);
53276
53277 if (netif_msg_ifdown(lp))
53278 printk(KERN_DEBUG
53279 "%s: Shutting down ethercard, status was %2.2x.\n",
53280- dev->name, lp->a.read_csr(ioaddr, CSR0));
53281+ dev->name, lp->a->read_csr(ioaddr, CSR0));
53282
53283 /* We stop the PCNET32 here -- it occasionally polls memory if we don't. */
53284- lp->a.write_csr(ioaddr, CSR0, CSR0_STOP);
53285+ lp->a->write_csr(ioaddr, CSR0, CSR0_STOP);
53286
53287 /*
53288 * Switch back to 16bit mode to avoid problems with dumb
53289 * DOS packet driver after a warm reboot
53290 */
53291- lp->a.write_bcr(ioaddr, 20, 4);
53292+ lp->a->write_bcr(ioaddr, 20, 4);
53293
53294 spin_unlock_irqrestore(&lp->lock, flags);
53295
53296@@ -2664,7 +2664,7 @@ static struct net_device_stats *pcnet32_get_stats(struct net_device *dev)
53297 unsigned long flags;
53298
53299 spin_lock_irqsave(&lp->lock, flags);
53300- dev->stats.rx_missed_errors = lp->a.read_csr(ioaddr, 112);
53301+ dev->stats.rx_missed_errors = lp->a->read_csr(ioaddr, 112);
53302 spin_unlock_irqrestore(&lp->lock, flags);
53303
53304 return &dev->stats;
53305@@ -2686,10 +2686,10 @@ static void pcnet32_load_multicast(struct net_device *dev)
53306 if (dev->flags & IFF_ALLMULTI) {
53307 ib->filter[0] = cpu_to_le32(~0U);
53308 ib->filter[1] = cpu_to_le32(~0U);
53309- lp->a.write_csr(ioaddr, PCNET32_MC_FILTER, 0xffff);
53310- lp->a.write_csr(ioaddr, PCNET32_MC_FILTER+1, 0xffff);
53311- lp->a.write_csr(ioaddr, PCNET32_MC_FILTER+2, 0xffff);
53312- lp->a.write_csr(ioaddr, PCNET32_MC_FILTER+3, 0xffff);
53313+ lp->a->write_csr(ioaddr, PCNET32_MC_FILTER, 0xffff);
53314+ lp->a->write_csr(ioaddr, PCNET32_MC_FILTER+1, 0xffff);
53315+ lp->a->write_csr(ioaddr, PCNET32_MC_FILTER+2, 0xffff);
53316+ lp->a->write_csr(ioaddr, PCNET32_MC_FILTER+3, 0xffff);
53317 return;
53318 }
53319 /* clear the multicast filter */
53320@@ -2710,7 +2710,7 @@ static void pcnet32_load_multicast(struct net_device *dev)
53321 mcast_table[crc >> 4] |= cpu_to_le16(1 << (crc & 0xf));
53322 }
53323 for (i = 0; i < 4; i++)
53324- lp->a.write_csr(ioaddr, PCNET32_MC_FILTER + i,
53325+ lp->a->write_csr(ioaddr, PCNET32_MC_FILTER + i,
53326 le16_to_cpu(mcast_table[i]));
53327 return;
53328 }
53329@@ -2726,7 +2726,7 @@ static void pcnet32_set_multicast_list(struct net_device *dev)
53330
53331 spin_lock_irqsave(&lp->lock, flags);
53332 suspended = pcnet32_suspend(dev, &flags, 0);
53333- csr15 = lp->a.read_csr(ioaddr, CSR15);
53334+ csr15 = lp->a->read_csr(ioaddr, CSR15);
53335 if (dev->flags & IFF_PROMISC) {
53336 /* Log any net taps. */
53337 if (netif_msg_hw(lp))
53338@@ -2735,21 +2735,21 @@ static void pcnet32_set_multicast_list(struct net_device *dev)
53339 lp->init_block->mode =
53340 cpu_to_le16(0x8000 | (lp->options & PCNET32_PORT_PORTSEL) <<
53341 7);
53342- lp->a.write_csr(ioaddr, CSR15, csr15 | 0x8000);
53343+ lp->a->write_csr(ioaddr, CSR15, csr15 | 0x8000);
53344 } else {
53345 lp->init_block->mode =
53346 cpu_to_le16((lp->options & PCNET32_PORT_PORTSEL) << 7);
53347- lp->a.write_csr(ioaddr, CSR15, csr15 & 0x7fff);
53348+ lp->a->write_csr(ioaddr, CSR15, csr15 & 0x7fff);
53349 pcnet32_load_multicast(dev);
53350 }
53351
53352 if (suspended) {
53353 int csr5;
53354 /* clear SUSPEND (SPND) - CSR5 bit 0 */
53355- csr5 = lp->a.read_csr(ioaddr, CSR5);
53356- lp->a.write_csr(ioaddr, CSR5, csr5 & (~CSR5_SUSPEND));
53357+ csr5 = lp->a->read_csr(ioaddr, CSR5);
53358+ lp->a->write_csr(ioaddr, CSR5, csr5 & (~CSR5_SUSPEND));
53359 } else {
53360- lp->a.write_csr(ioaddr, CSR0, CSR0_STOP);
53361+ lp->a->write_csr(ioaddr, CSR0, CSR0_STOP);
53362 pcnet32_restart(dev, CSR0_NORMAL);
53363 netif_wake_queue(dev);
53364 }
53365@@ -2767,8 +2767,8 @@ static int mdio_read(struct net_device *dev, int phy_id, int reg_num)
53366 if (!lp->mii)
53367 return 0;
53368
53369- lp->a.write_bcr(ioaddr, 33, ((phy_id & 0x1f) << 5) | (reg_num & 0x1f));
53370- val_out = lp->a.read_bcr(ioaddr, 34);
53371+ lp->a->write_bcr(ioaddr, 33, ((phy_id & 0x1f) << 5) | (reg_num & 0x1f));
53372+ val_out = lp->a->read_bcr(ioaddr, 34);
53373
53374 return val_out;
53375 }
53376@@ -2782,8 +2782,8 @@ static void mdio_write(struct net_device *dev, int phy_id, int reg_num, int val)
53377 if (!lp->mii)
53378 return;
53379
53380- lp->a.write_bcr(ioaddr, 33, ((phy_id & 0x1f) << 5) | (reg_num & 0x1f));
53381- lp->a.write_bcr(ioaddr, 34, val);
53382+ lp->a->write_bcr(ioaddr, 33, ((phy_id & 0x1f) << 5) | (reg_num & 0x1f));
53383+ lp->a->write_bcr(ioaddr, 34, val);
53384 }
53385
53386 static int pcnet32_ioctl(struct net_device *dev, struct ifreq *rq, int cmd)
53387@@ -2862,7 +2862,7 @@ static void pcnet32_check_media(struct net_device *dev, int verbose)
53388 curr_link = mii_link_ok(&lp->mii_if);
53389 } else {
53390 ulong ioaddr = dev->base_addr; /* card base I/O address */
53391- curr_link = (lp->a.read_bcr(ioaddr, 4) != 0xc0);
53392+ curr_link = (lp->a->read_bcr(ioaddr, 4) != 0xc0);
53393 }
53394 if (!curr_link) {
53395 if (prev_link || verbose) {
53396@@ -2887,13 +2887,13 @@ static void pcnet32_check_media(struct net_device *dev, int verbose)
53397 (ecmd.duplex ==
53398 DUPLEX_FULL) ? "full" : "half");
53399 }
53400- bcr9 = lp->a.read_bcr(dev->base_addr, 9);
53401+ bcr9 = lp->a->read_bcr(dev->base_addr, 9);
53402 if ((bcr9 & (1 << 0)) != lp->mii_if.full_duplex) {
53403 if (lp->mii_if.full_duplex)
53404 bcr9 |= (1 << 0);
53405 else
53406 bcr9 &= ~(1 << 0);
53407- lp->a.write_bcr(dev->base_addr, 9, bcr9);
53408+ lp->a->write_bcr(dev->base_addr, 9, bcr9);
53409 }
53410 } else {
53411 if (netif_msg_link(lp))
53412diff --git a/drivers/net/sis190.c b/drivers/net/sis190.c
53413index 7cc9898..6eb50d3 100644
53414--- a/drivers/net/sis190.c
53415+++ b/drivers/net/sis190.c
53416@@ -1598,7 +1598,7 @@ static int __devinit sis190_get_mac_addr_from_eeprom(struct pci_dev *pdev,
53417 static int __devinit sis190_get_mac_addr_from_apc(struct pci_dev *pdev,
53418 struct net_device *dev)
53419 {
53420- static const u16 __devinitdata ids[] = { 0x0965, 0x0966, 0x0968 };
53421+ static const u16 __devinitconst ids[] = { 0x0965, 0x0966, 0x0968 };
53422 struct sis190_private *tp = netdev_priv(dev);
53423 struct pci_dev *isa_bridge;
53424 u8 reg, tmp8;
53425diff --git a/drivers/net/sundance.c b/drivers/net/sundance.c
53426index e13685a..60c948c 100644
53427--- a/drivers/net/sundance.c
53428+++ b/drivers/net/sundance.c
53429@@ -225,7 +225,7 @@ enum {
53430 struct pci_id_info {
53431 const char *name;
53432 };
53433-static const struct pci_id_info pci_id_tbl[] __devinitdata = {
53434+static const struct pci_id_info pci_id_tbl[] __devinitconst = {
53435 {"D-Link DFE-550TX FAST Ethernet Adapter"},
53436 {"D-Link DFE-550FX 100Mbps Fiber-optics Adapter"},
53437 {"D-Link DFE-580TX 4 port Server Adapter"},
53438diff --git a/drivers/net/tg3.h b/drivers/net/tg3.h
53439index 529f55a..cccaa18 100644
53440--- a/drivers/net/tg3.h
53441+++ b/drivers/net/tg3.h
53442@@ -95,6 +95,7 @@
53443 #define CHIPREV_ID_5750_A0 0x4000
53444 #define CHIPREV_ID_5750_A1 0x4001
53445 #define CHIPREV_ID_5750_A3 0x4003
53446+#define CHIPREV_ID_5750_C1 0x4201
53447 #define CHIPREV_ID_5750_C2 0x4202
53448 #define CHIPREV_ID_5752_A0_HW 0x5000
53449 #define CHIPREV_ID_5752_A0 0x6000
53450diff --git a/drivers/net/tokenring/abyss.c b/drivers/net/tokenring/abyss.c
53451index b9db1b5..720f9ce 100644
53452--- a/drivers/net/tokenring/abyss.c
53453+++ b/drivers/net/tokenring/abyss.c
53454@@ -451,10 +451,12 @@ static struct pci_driver abyss_driver = {
53455
53456 static int __init abyss_init (void)
53457 {
53458- abyss_netdev_ops = tms380tr_netdev_ops;
53459+ pax_open_kernel();
53460+ memcpy((void *)&abyss_netdev_ops, &tms380tr_netdev_ops, sizeof(tms380tr_netdev_ops));
53461
53462- abyss_netdev_ops.ndo_open = abyss_open;
53463- abyss_netdev_ops.ndo_stop = abyss_close;
53464+ *(void **)&abyss_netdev_ops.ndo_open = abyss_open;
53465+ *(void **)&abyss_netdev_ops.ndo_stop = abyss_close;
53466+ pax_close_kernel();
53467
53468 return pci_register_driver(&abyss_driver);
53469 }
53470diff --git a/drivers/net/tokenring/madgemc.c b/drivers/net/tokenring/madgemc.c
53471index 456f8bf..373e56d 100644
53472--- a/drivers/net/tokenring/madgemc.c
53473+++ b/drivers/net/tokenring/madgemc.c
53474@@ -755,9 +755,11 @@ static struct mca_driver madgemc_driver = {
53475
53476 static int __init madgemc_init (void)
53477 {
53478- madgemc_netdev_ops = tms380tr_netdev_ops;
53479- madgemc_netdev_ops.ndo_open = madgemc_open;
53480- madgemc_netdev_ops.ndo_stop = madgemc_close;
53481+ pax_open_kernel();
53482+ memcpy((void *)&madgemc_netdev_ops, &tms380tr_netdev_ops, sizeof(tms380tr_netdev_ops));
53483+ *(void **)&madgemc_netdev_ops.ndo_open = madgemc_open;
53484+ *(void **)&madgemc_netdev_ops.ndo_stop = madgemc_close;
53485+ pax_close_kernel();
53486
53487 return mca_register_driver (&madgemc_driver);
53488 }
53489diff --git a/drivers/net/tokenring/proteon.c b/drivers/net/tokenring/proteon.c
53490index 16e8783..925bd49 100644
53491--- a/drivers/net/tokenring/proteon.c
53492+++ b/drivers/net/tokenring/proteon.c
53493@@ -353,9 +353,11 @@ static int __init proteon_init(void)
53494 struct platform_device *pdev;
53495 int i, num = 0, err = 0;
53496
53497- proteon_netdev_ops = tms380tr_netdev_ops;
53498- proteon_netdev_ops.ndo_open = proteon_open;
53499- proteon_netdev_ops.ndo_stop = tms380tr_close;
53500+ pax_open_kernel();
53501+ memcpy((void *)&proteon_netdev_ops, &tms380tr_netdev_ops, sizeof(tms380tr_netdev_ops));
53502+ *(void **)&proteon_netdev_ops.ndo_open = proteon_open;
53503+ *(void **)&proteon_netdev_ops.ndo_stop = tms380tr_close;
53504+ pax_close_kernel();
53505
53506 err = platform_driver_register(&proteon_driver);
53507 if (err)
53508diff --git a/drivers/net/tokenring/skisa.c b/drivers/net/tokenring/skisa.c
53509index 46db5c5..37c1536 100644
53510--- a/drivers/net/tokenring/skisa.c
53511+++ b/drivers/net/tokenring/skisa.c
53512@@ -363,9 +363,11 @@ static int __init sk_isa_init(void)
53513 struct platform_device *pdev;
53514 int i, num = 0, err = 0;
53515
53516- sk_isa_netdev_ops = tms380tr_netdev_ops;
53517- sk_isa_netdev_ops.ndo_open = sk_isa_open;
53518- sk_isa_netdev_ops.ndo_stop = tms380tr_close;
53519+ pax_open_kernel();
53520+ memcpy((void *)&sk_isa_netdev_ops, &tms380tr_netdev_ops, sizeof(tms380tr_netdev_ops));
53521+ *(void **)&sk_isa_netdev_ops.ndo_open = sk_isa_open;
53522+ *(void **)&sk_isa_netdev_ops.ndo_stop = tms380tr_close;
53523+ pax_close_kernel();
53524
53525 err = platform_driver_register(&sk_isa_driver);
53526 if (err)
53527diff --git a/drivers/net/tulip/de2104x.c b/drivers/net/tulip/de2104x.c
53528index 74e5ba4..5cf6bc9 100644
53529--- a/drivers/net/tulip/de2104x.c
53530+++ b/drivers/net/tulip/de2104x.c
53531@@ -1785,6 +1785,8 @@ static void __devinit de21041_get_srom_info (struct de_private *de)
53532 struct de_srom_info_leaf *il;
53533 void *bufp;
53534
53535+ pax_track_stack();
53536+
53537 /* download entire eeprom */
53538 for (i = 0; i < DE_EEPROM_WORDS; i++)
53539 ((__le16 *)ee_data)[i] =
53540diff --git a/drivers/net/tulip/de4x5.c b/drivers/net/tulip/de4x5.c
53541index a8349b7..90f9dfe 100644
53542--- a/drivers/net/tulip/de4x5.c
53543+++ b/drivers/net/tulip/de4x5.c
53544@@ -5472,7 +5472,7 @@ de4x5_ioctl(struct net_device *dev, struct ifreq *rq, int cmd)
53545 for (i=0; i<ETH_ALEN; i++) {
53546 tmp.addr[i] = dev->dev_addr[i];
53547 }
53548- if (copy_to_user(ioc->data, tmp.addr, ioc->len)) return -EFAULT;
53549+ if (ioc->len > sizeof tmp.addr || copy_to_user(ioc->data, tmp.addr, ioc->len)) return -EFAULT;
53550 break;
53551
53552 case DE4X5_SET_HWADDR: /* Set the hardware address */
53553@@ -5512,7 +5512,7 @@ de4x5_ioctl(struct net_device *dev, struct ifreq *rq, int cmd)
53554 spin_lock_irqsave(&lp->lock, flags);
53555 memcpy(&statbuf, &lp->pktStats, ioc->len);
53556 spin_unlock_irqrestore(&lp->lock, flags);
53557- if (copy_to_user(ioc->data, &statbuf, ioc->len))
53558+ if (ioc->len > sizeof statbuf || copy_to_user(ioc->data, &statbuf, ioc->len))
53559 return -EFAULT;
53560 break;
53561 }
53562diff --git a/drivers/net/tulip/eeprom.c b/drivers/net/tulip/eeprom.c
53563index 391acd3..56d11cd 100644
53564--- a/drivers/net/tulip/eeprom.c
53565+++ b/drivers/net/tulip/eeprom.c
53566@@ -80,7 +80,7 @@ static struct eeprom_fixup eeprom_fixups[] __devinitdata = {
53567 {NULL}};
53568
53569
53570-static const char *block_name[] __devinitdata = {
53571+static const char *block_name[] __devinitconst = {
53572 "21140 non-MII",
53573 "21140 MII PHY",
53574 "21142 Serial PHY",
53575diff --git a/drivers/net/tulip/winbond-840.c b/drivers/net/tulip/winbond-840.c
53576index b38d3b7..b1cff23 100644
53577--- a/drivers/net/tulip/winbond-840.c
53578+++ b/drivers/net/tulip/winbond-840.c
53579@@ -235,7 +235,7 @@ struct pci_id_info {
53580 int drv_flags; /* Driver use, intended as capability flags. */
53581 };
53582
53583-static const struct pci_id_info pci_id_tbl[] __devinitdata = {
53584+static const struct pci_id_info pci_id_tbl[] __devinitconst = {
53585 { /* Sometime a Level-One switch card. */
53586 "Winbond W89c840", CanHaveMII | HasBrokenTx | FDXOnNoMII},
53587 { "Winbond W89c840", CanHaveMII | HasBrokenTx},
53588diff --git a/drivers/net/usb/hso.c b/drivers/net/usb/hso.c
53589index f450bc9..2b747c8 100644
53590--- a/drivers/net/usb/hso.c
53591+++ b/drivers/net/usb/hso.c
53592@@ -71,7 +71,7 @@
53593 #include <asm/byteorder.h>
53594 #include <linux/serial_core.h>
53595 #include <linux/serial.h>
53596-
53597+#include <asm/local.h>
53598
53599 #define DRIVER_VERSION "1.2"
53600 #define MOD_AUTHOR "Option Wireless"
53601@@ -258,7 +258,7 @@ struct hso_serial {
53602
53603 /* from usb_serial_port */
53604 struct tty_struct *tty;
53605- int open_count;
53606+ local_t open_count;
53607 spinlock_t serial_lock;
53608
53609 int (*write_data) (struct hso_serial *serial);
53610@@ -1180,7 +1180,7 @@ static void put_rxbuf_data_and_resubmit_ctrl_urb(struct hso_serial *serial)
53611 struct urb *urb;
53612
53613 urb = serial->rx_urb[0];
53614- if (serial->open_count > 0) {
53615+ if (local_read(&serial->open_count) > 0) {
53616 count = put_rxbuf_data(urb, serial);
53617 if (count == -1)
53618 return;
53619@@ -1216,7 +1216,7 @@ static void hso_std_serial_read_bulk_callback(struct urb *urb)
53620 DUMP1(urb->transfer_buffer, urb->actual_length);
53621
53622 /* Anyone listening? */
53623- if (serial->open_count == 0)
53624+ if (local_read(&serial->open_count) == 0)
53625 return;
53626
53627 if (status == 0) {
53628@@ -1311,8 +1311,7 @@ static int hso_serial_open(struct tty_struct *tty, struct file *filp)
53629 spin_unlock_irq(&serial->serial_lock);
53630
53631 /* check for port already opened, if not set the termios */
53632- serial->open_count++;
53633- if (serial->open_count == 1) {
53634+ if (local_inc_return(&serial->open_count) == 1) {
53635 tty->low_latency = 1;
53636 serial->rx_state = RX_IDLE;
53637 /* Force default termio settings */
53638@@ -1325,7 +1324,7 @@ static int hso_serial_open(struct tty_struct *tty, struct file *filp)
53639 result = hso_start_serial_device(serial->parent, GFP_KERNEL);
53640 if (result) {
53641 hso_stop_serial_device(serial->parent);
53642- serial->open_count--;
53643+ local_dec(&serial->open_count);
53644 kref_put(&serial->parent->ref, hso_serial_ref_free);
53645 }
53646 } else {
53647@@ -1362,10 +1361,10 @@ static void hso_serial_close(struct tty_struct *tty, struct file *filp)
53648
53649 /* reset the rts and dtr */
53650 /* do the actual close */
53651- serial->open_count--;
53652+ local_dec(&serial->open_count);
53653
53654- if (serial->open_count <= 0) {
53655- serial->open_count = 0;
53656+ if (local_read(&serial->open_count) <= 0) {
53657+ local_set(&serial->open_count, 0);
53658 spin_lock_irq(&serial->serial_lock);
53659 if (serial->tty == tty) {
53660 serial->tty->driver_data = NULL;
53661@@ -1447,7 +1446,7 @@ static void hso_serial_set_termios(struct tty_struct *tty, struct ktermios *old)
53662
53663 /* the actual setup */
53664 spin_lock_irqsave(&serial->serial_lock, flags);
53665- if (serial->open_count)
53666+ if (local_read(&serial->open_count))
53667 _hso_serial_set_termios(tty, old);
53668 else
53669 tty->termios = old;
53670@@ -3097,7 +3096,7 @@ static int hso_resume(struct usb_interface *iface)
53671 /* Start all serial ports */
53672 for (i = 0; i < HSO_SERIAL_TTY_MINORS; i++) {
53673 if (serial_table[i] && (serial_table[i]->interface == iface)) {
53674- if (dev2ser(serial_table[i])->open_count) {
53675+ if (local_read(&dev2ser(serial_table[i])->open_count)) {
53676 result =
53677 hso_start_serial_device(serial_table[i], GFP_NOIO);
53678 hso_kick_transmit(dev2ser(serial_table[i]));
53679diff --git a/drivers/net/vxge/vxge-config.h b/drivers/net/vxge/vxge-config.h
53680index 3e94f0c..ffdd926 100644
53681--- a/drivers/net/vxge/vxge-config.h
53682+++ b/drivers/net/vxge/vxge-config.h
53683@@ -474,7 +474,7 @@ struct vxge_hw_uld_cbs {
53684 void (*link_down)(struct __vxge_hw_device *devh);
53685 void (*crit_err)(struct __vxge_hw_device *devh,
53686 enum vxge_hw_event type, u64 ext_data);
53687-};
53688+} __no_const;
53689
53690 /*
53691 * struct __vxge_hw_blockpool_entry - Block private data structure
53692diff --git a/drivers/net/vxge/vxge-main.c b/drivers/net/vxge/vxge-main.c
53693index 068d7a9..35293de 100644
53694--- a/drivers/net/vxge/vxge-main.c
53695+++ b/drivers/net/vxge/vxge-main.c
53696@@ -93,6 +93,8 @@ static inline void VXGE_COMPLETE_VPATH_TX(struct vxge_fifo *fifo)
53697 struct sk_buff *completed[NR_SKB_COMPLETED];
53698 int more;
53699
53700+ pax_track_stack();
53701+
53702 do {
53703 more = 0;
53704 skb_ptr = completed;
53705@@ -1779,6 +1781,8 @@ static enum vxge_hw_status vxge_rth_configure(struct vxgedev *vdev)
53706 u8 mtable[256] = {0}; /* CPU to vpath mapping */
53707 int index;
53708
53709+ pax_track_stack();
53710+
53711 /*
53712 * Filling
53713 * - itable with bucket numbers
53714diff --git a/drivers/net/vxge/vxge-traffic.h b/drivers/net/vxge/vxge-traffic.h
53715index 461742b..81be42e 100644
53716--- a/drivers/net/vxge/vxge-traffic.h
53717+++ b/drivers/net/vxge/vxge-traffic.h
53718@@ -2123,7 +2123,7 @@ struct vxge_hw_mempool_cbs {
53719 struct vxge_hw_mempool_dma *dma_object,
53720 u32 index,
53721 u32 is_last);
53722-};
53723+} __no_const;
53724
53725 void
53726 __vxge_hw_mempool_destroy(
53727diff --git a/drivers/net/wan/cycx_x25.c b/drivers/net/wan/cycx_x25.c
53728index cd8cb95..4153b79 100644
53729--- a/drivers/net/wan/cycx_x25.c
53730+++ b/drivers/net/wan/cycx_x25.c
53731@@ -1017,6 +1017,8 @@ static void hex_dump(char *msg, unsigned char *p, int len)
53732 unsigned char hex[1024],
53733 * phex = hex;
53734
53735+ pax_track_stack();
53736+
53737 if (len >= (sizeof(hex) / 2))
53738 len = (sizeof(hex) / 2) - 1;
53739
53740diff --git a/drivers/net/wan/hdlc_x25.c b/drivers/net/wan/hdlc_x25.c
53741index aa9248f..a4e3c3b 100644
53742--- a/drivers/net/wan/hdlc_x25.c
53743+++ b/drivers/net/wan/hdlc_x25.c
53744@@ -136,16 +136,16 @@ static netdev_tx_t x25_xmit(struct sk_buff *skb, struct net_device *dev)
53745
53746 static int x25_open(struct net_device *dev)
53747 {
53748- struct lapb_register_struct cb;
53749+ static struct lapb_register_struct cb = {
53750+ .connect_confirmation = x25_connected,
53751+ .connect_indication = x25_connected,
53752+ .disconnect_confirmation = x25_disconnected,
53753+ .disconnect_indication = x25_disconnected,
53754+ .data_indication = x25_data_indication,
53755+ .data_transmit = x25_data_transmit
53756+ };
53757 int result;
53758
53759- cb.connect_confirmation = x25_connected;
53760- cb.connect_indication = x25_connected;
53761- cb.disconnect_confirmation = x25_disconnected;
53762- cb.disconnect_indication = x25_disconnected;
53763- cb.data_indication = x25_data_indication;
53764- cb.data_transmit = x25_data_transmit;
53765-
53766 result = lapb_register(dev, &cb);
53767 if (result != LAPB_OK)
53768 return result;
53769diff --git a/drivers/net/wimax/i2400m/usb-fw.c b/drivers/net/wimax/i2400m/usb-fw.c
53770index 5ad287c..783b020 100644
53771--- a/drivers/net/wimax/i2400m/usb-fw.c
53772+++ b/drivers/net/wimax/i2400m/usb-fw.c
53773@@ -263,6 +263,8 @@ ssize_t i2400mu_bus_bm_wait_for_ack(struct i2400m *i2400m,
53774 int do_autopm = 1;
53775 DECLARE_COMPLETION_ONSTACK(notif_completion);
53776
53777+ pax_track_stack();
53778+
53779 d_fnstart(8, dev, "(i2400m %p ack %p size %zu)\n",
53780 i2400m, ack, ack_size);
53781 BUG_ON(_ack == i2400m->bm_ack_buf);
53782diff --git a/drivers/net/wireless/airo.c b/drivers/net/wireless/airo.c
53783index 6c26840..62c97c3 100644
53784--- a/drivers/net/wireless/airo.c
53785+++ b/drivers/net/wireless/airo.c
53786@@ -3003,6 +3003,8 @@ static void airo_process_scan_results (struct airo_info *ai) {
53787 BSSListElement * loop_net;
53788 BSSListElement * tmp_net;
53789
53790+ pax_track_stack();
53791+
53792 /* Blow away current list of scan results */
53793 list_for_each_entry_safe (loop_net, tmp_net, &ai->network_list, list) {
53794 list_move_tail (&loop_net->list, &ai->network_free_list);
53795@@ -3783,6 +3785,8 @@ static u16 setup_card(struct airo_info *ai, u8 *mac, int lock)
53796 WepKeyRid wkr;
53797 int rc;
53798
53799+ pax_track_stack();
53800+
53801 memset( &mySsid, 0, sizeof( mySsid ) );
53802 kfree (ai->flash);
53803 ai->flash = NULL;
53804@@ -4758,6 +4762,8 @@ static int proc_stats_rid_open( struct inode *inode,
53805 __le32 *vals = stats.vals;
53806 int len;
53807
53808+ pax_track_stack();
53809+
53810 if ((file->private_data = kzalloc(sizeof(struct proc_data ), GFP_KERNEL)) == NULL)
53811 return -ENOMEM;
53812 data = (struct proc_data *)file->private_data;
53813@@ -5487,6 +5493,8 @@ static int proc_BSSList_open( struct inode *inode, struct file *file ) {
53814 /* If doLoseSync is not 1, we won't do a Lose Sync */
53815 int doLoseSync = -1;
53816
53817+ pax_track_stack();
53818+
53819 if ((file->private_data = kzalloc(sizeof(struct proc_data ), GFP_KERNEL)) == NULL)
53820 return -ENOMEM;
53821 data = (struct proc_data *)file->private_data;
53822@@ -7193,6 +7201,8 @@ static int airo_get_aplist(struct net_device *dev,
53823 int i;
53824 int loseSync = capable(CAP_NET_ADMIN) ? 1: -1;
53825
53826+ pax_track_stack();
53827+
53828 qual = kmalloc(IW_MAX_AP * sizeof(*qual), GFP_KERNEL);
53829 if (!qual)
53830 return -ENOMEM;
53831@@ -7753,6 +7763,8 @@ static void airo_read_wireless_stats(struct airo_info *local)
53832 CapabilityRid cap_rid;
53833 __le32 *vals = stats_rid.vals;
53834
53835+ pax_track_stack();
53836+
53837 /* Get stats out of the card */
53838 clear_bit(JOB_WSTATS, &local->jobs);
53839 if (local->power.event) {
53840diff --git a/drivers/net/wireless/ath/ath5k/debug.c b/drivers/net/wireless/ath/ath5k/debug.c
53841index 747508c..c36cb08 100644
53842--- a/drivers/net/wireless/ath/ath5k/debug.c
53843+++ b/drivers/net/wireless/ath/ath5k/debug.c
53844@@ -205,6 +205,8 @@ static ssize_t read_file_beacon(struct file *file, char __user *user_buf,
53845 unsigned int v;
53846 u64 tsf;
53847
53848+ pax_track_stack();
53849+
53850 v = ath5k_hw_reg_read(sc->ah, AR5K_BEACON);
53851 len += snprintf(buf+len, sizeof(buf)-len,
53852 "%-24s0x%08x\tintval: %d\tTIM: 0x%x\n",
53853@@ -318,6 +320,8 @@ static ssize_t read_file_debug(struct file *file, char __user *user_buf,
53854 unsigned int len = 0;
53855 unsigned int i;
53856
53857+ pax_track_stack();
53858+
53859 len += snprintf(buf+len, sizeof(buf)-len,
53860 "DEBUG LEVEL: 0x%08x\n\n", sc->debug.level);
53861
53862@@ -337,6 +341,9 @@ static ssize_t read_file_debug(struct file *file, char __user *user_buf,
53863
53864 static ssize_t write_file_debug(struct file *file,
53865 const char __user *userbuf,
53866+ size_t count, loff_t *ppos) __size_overflow(3);
53867+static ssize_t write_file_debug(struct file *file,
53868+ const char __user *userbuf,
53869 size_t count, loff_t *ppos)
53870 {
53871 struct ath5k_softc *sc = file->private_data;
53872diff --git a/drivers/net/wireless/ath/ath9k/debug.c b/drivers/net/wireless/ath/ath9k/debug.c
53873index 2be4c22..a8ad784 100644
53874--- a/drivers/net/wireless/ath/ath9k/debug.c
53875+++ b/drivers/net/wireless/ath/ath9k/debug.c
53876@@ -56,6 +56,8 @@ static ssize_t read_file_debug(struct file *file, char __user *user_buf,
53877 }
53878
53879 static ssize_t write_file_debug(struct file *file, const char __user *user_buf,
53880+ size_t count, loff_t *ppos) __size_overflow(3);
53881+static ssize_t write_file_debug(struct file *file, const char __user *user_buf,
53882 size_t count, loff_t *ppos)
53883 {
53884 struct ath_softc *sc = file->private_data;
53885@@ -220,6 +222,8 @@ static ssize_t read_file_interrupt(struct file *file, char __user *user_buf,
53886 char buf[512];
53887 unsigned int len = 0;
53888
53889+ pax_track_stack();
53890+
53891 len += snprintf(buf + len, sizeof(buf) - len,
53892 "%8s: %10u\n", "RX", sc->debug.stats.istats.rxok);
53893 len += snprintf(buf + len, sizeof(buf) - len,
53894@@ -360,6 +364,8 @@ static ssize_t read_file_wiphy(struct file *file, char __user *user_buf,
53895 int i;
53896 u8 addr[ETH_ALEN];
53897
53898+ pax_track_stack();
53899+
53900 len += snprintf(buf + len, sizeof(buf) - len,
53901 "primary: %s (%s chan=%d ht=%d)\n",
53902 wiphy_name(sc->pri_wiphy->hw->wiphy),
53903diff --git a/drivers/net/wireless/b43/debugfs.c b/drivers/net/wireless/b43/debugfs.c
53904index 80b19a4..dab3a45 100644
53905--- a/drivers/net/wireless/b43/debugfs.c
53906+++ b/drivers/net/wireless/b43/debugfs.c
53907@@ -43,7 +43,7 @@ static struct dentry *rootdir;
53908 struct b43_debugfs_fops {
53909 ssize_t (*read)(struct b43_wldev *dev, char *buf, size_t bufsize);
53910 int (*write)(struct b43_wldev *dev, const char *buf, size_t count);
53911- struct file_operations fops;
53912+ const struct file_operations fops;
53913 /* Offset of struct b43_dfs_file in struct b43_dfsentry */
53914 size_t file_struct_offset;
53915 };
53916diff --git a/drivers/net/wireless/b43legacy/debugfs.c b/drivers/net/wireless/b43legacy/debugfs.c
53917index 1f85ac5..c99b4b4 100644
53918--- a/drivers/net/wireless/b43legacy/debugfs.c
53919+++ b/drivers/net/wireless/b43legacy/debugfs.c
53920@@ -44,7 +44,7 @@ static struct dentry *rootdir;
53921 struct b43legacy_debugfs_fops {
53922 ssize_t (*read)(struct b43legacy_wldev *dev, char *buf, size_t bufsize);
53923 int (*write)(struct b43legacy_wldev *dev, const char *buf, size_t count);
53924- struct file_operations fops;
53925+ const struct file_operations fops;
53926 /* Offset of struct b43legacy_dfs_file in struct b43legacy_dfsentry */
53927 size_t file_struct_offset;
53928 /* Take wl->irq_lock before calling read/write? */
53929diff --git a/drivers/net/wireless/ipw2x00/ipw2100.c b/drivers/net/wireless/ipw2x00/ipw2100.c
53930index 43102bf..3b569c3 100644
53931--- a/drivers/net/wireless/ipw2x00/ipw2100.c
53932+++ b/drivers/net/wireless/ipw2x00/ipw2100.c
53933@@ -2014,6 +2014,8 @@ static int ipw2100_set_essid(struct ipw2100_priv *priv, char *essid,
53934 int err;
53935 DECLARE_SSID_BUF(ssid);
53936
53937+ pax_track_stack();
53938+
53939 IPW_DEBUG_HC("SSID: '%s'\n", print_ssid(ssid, essid, ssid_len));
53940
53941 if (ssid_len)
53942@@ -5380,6 +5382,8 @@ static int ipw2100_set_key(struct ipw2100_priv *priv,
53943 struct ipw2100_wep_key *wep_key = (void *)cmd.host_command_parameters;
53944 int err;
53945
53946+ pax_track_stack();
53947+
53948 IPW_DEBUG_HC("WEP_KEY_INFO: index = %d, len = %d/%d\n",
53949 idx, keylen, len);
53950
53951diff --git a/drivers/net/wireless/ipw2x00/libipw_rx.c b/drivers/net/wireless/ipw2x00/libipw_rx.c
53952index 282b1f7..169f0cf 100644
53953--- a/drivers/net/wireless/ipw2x00/libipw_rx.c
53954+++ b/drivers/net/wireless/ipw2x00/libipw_rx.c
53955@@ -1566,6 +1566,8 @@ static void libipw_process_probe_response(struct libipw_device
53956 unsigned long flags;
53957 DECLARE_SSID_BUF(ssid);
53958
53959+ pax_track_stack();
53960+
53961 LIBIPW_DEBUG_SCAN("'%s' (%pM"
53962 "): %c%c%c%c %c%c%c%c-%c%c%c%c %c%c%c%c\n",
53963 print_ssid(ssid, info_element->data, info_element->len),
53964diff --git a/drivers/net/wireless/iwlwifi/iwl-1000.c b/drivers/net/wireless/iwlwifi/iwl-1000.c
53965index 950267a..80d5fd2 100644
53966--- a/drivers/net/wireless/iwlwifi/iwl-1000.c
53967+++ b/drivers/net/wireless/iwlwifi/iwl-1000.c
53968@@ -137,7 +137,7 @@ static struct iwl_lib_ops iwl1000_lib = {
53969 },
53970 };
53971
53972-static struct iwl_ops iwl1000_ops = {
53973+static const struct iwl_ops iwl1000_ops = {
53974 .ucode = &iwl5000_ucode,
53975 .lib = &iwl1000_lib,
53976 .hcmd = &iwl5000_hcmd,
53977diff --git a/drivers/net/wireless/iwlwifi/iwl-3945.c b/drivers/net/wireless/iwlwifi/iwl-3945.c
53978index 56bfcc3..b348020 100644
53979--- a/drivers/net/wireless/iwlwifi/iwl-3945.c
53980+++ b/drivers/net/wireless/iwlwifi/iwl-3945.c
53981@@ -2874,7 +2874,7 @@ static struct iwl_hcmd_utils_ops iwl3945_hcmd_utils = {
53982 .build_addsta_hcmd = iwl3945_build_addsta_hcmd,
53983 };
53984
53985-static struct iwl_ops iwl3945_ops = {
53986+static const struct iwl_ops iwl3945_ops = {
53987 .ucode = &iwl3945_ucode,
53988 .lib = &iwl3945_lib,
53989 .hcmd = &iwl3945_hcmd,
53990diff --git a/drivers/net/wireless/iwlwifi/iwl-4965.c b/drivers/net/wireless/iwlwifi/iwl-4965.c
53991index 585b8d4..e142963 100644
53992--- a/drivers/net/wireless/iwlwifi/iwl-4965.c
53993+++ b/drivers/net/wireless/iwlwifi/iwl-4965.c
53994@@ -2345,7 +2345,7 @@ static struct iwl_lib_ops iwl4965_lib = {
53995 },
53996 };
53997
53998-static struct iwl_ops iwl4965_ops = {
53999+static const struct iwl_ops iwl4965_ops = {
54000 .ucode = &iwl4965_ucode,
54001 .lib = &iwl4965_lib,
54002 .hcmd = &iwl4965_hcmd,
54003diff --git a/drivers/net/wireless/iwlwifi/iwl-5000.c b/drivers/net/wireless/iwlwifi/iwl-5000.c
54004index 1f423f2..e37c192 100644
54005--- a/drivers/net/wireless/iwlwifi/iwl-5000.c
54006+++ b/drivers/net/wireless/iwlwifi/iwl-5000.c
54007@@ -1633,14 +1633,14 @@ static struct iwl_lib_ops iwl5150_lib = {
54008 },
54009 };
54010
54011-struct iwl_ops iwl5000_ops = {
54012+const struct iwl_ops iwl5000_ops = {
54013 .ucode = &iwl5000_ucode,
54014 .lib = &iwl5000_lib,
54015 .hcmd = &iwl5000_hcmd,
54016 .utils = &iwl5000_hcmd_utils,
54017 };
54018
54019-static struct iwl_ops iwl5150_ops = {
54020+static const struct iwl_ops iwl5150_ops = {
54021 .ucode = &iwl5000_ucode,
54022 .lib = &iwl5150_lib,
54023 .hcmd = &iwl5000_hcmd,
54024diff --git a/drivers/net/wireless/iwlwifi/iwl-6000.c b/drivers/net/wireless/iwlwifi/iwl-6000.c
54025index 1473452..f07d5e1 100644
54026--- a/drivers/net/wireless/iwlwifi/iwl-6000.c
54027+++ b/drivers/net/wireless/iwlwifi/iwl-6000.c
54028@@ -146,7 +146,7 @@ static struct iwl_hcmd_utils_ops iwl6000_hcmd_utils = {
54029 .calc_rssi = iwl5000_calc_rssi,
54030 };
54031
54032-static struct iwl_ops iwl6000_ops = {
54033+static const struct iwl_ops iwl6000_ops = {
54034 .ucode = &iwl5000_ucode,
54035 .lib = &iwl6000_lib,
54036 .hcmd = &iwl5000_hcmd,
54037diff --git a/drivers/net/wireless/iwlwifi/iwl-agn-rs.c b/drivers/net/wireless/iwlwifi/iwl-agn-rs.c
54038index 1a3dfa2..b3e0a61 100644
54039--- a/drivers/net/wireless/iwlwifi/iwl-agn-rs.c
54040+++ b/drivers/net/wireless/iwlwifi/iwl-agn-rs.c
54041@@ -857,6 +857,8 @@ static void rs_tx_status(void *priv_r, struct ieee80211_supported_band *sband,
54042 u8 active_index = 0;
54043 s32 tpt = 0;
54044
54045+ pax_track_stack();
54046+
54047 IWL_DEBUG_RATE_LIMIT(priv, "get frame ack response, update rate scale window\n");
54048
54049 if (!ieee80211_is_data(hdr->frame_control) ||
54050@@ -2722,6 +2724,8 @@ static void rs_fill_link_cmd(struct iwl_priv *priv,
54051 u8 valid_tx_ant = 0;
54052 struct iwl_link_quality_cmd *lq_cmd = &lq_sta->lq;
54053
54054+ pax_track_stack();
54055+
54056 /* Override starting rate (index 0) if needed for debug purposes */
54057 rs_dbgfs_set_mcs(lq_sta, &new_rate, index);
54058
54059diff --git a/drivers/net/wireless/iwlwifi/iwl-agn.c b/drivers/net/wireless/iwlwifi/iwl-agn.c
54060index 0e56d78..6a3c107 100644
54061--- a/drivers/net/wireless/iwlwifi/iwl-agn.c
54062+++ b/drivers/net/wireless/iwlwifi/iwl-agn.c
54063@@ -2911,7 +2911,9 @@ static int iwl_pci_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
54064 if (iwl_debug_level & IWL_DL_INFO)
54065 dev_printk(KERN_DEBUG, &(pdev->dev),
54066 "Disabling hw_scan\n");
54067- iwl_hw_ops.hw_scan = NULL;
54068+ pax_open_kernel();
54069+ *(void **)&iwl_hw_ops.hw_scan = NULL;
54070+ pax_close_kernel();
54071 }
54072
54073 hw = iwl_alloc_all(cfg, &iwl_hw_ops);
54074diff --git a/drivers/net/wireless/iwlwifi/iwl-debug.h b/drivers/net/wireless/iwlwifi/iwl-debug.h
54075index cbc6290..eb323d7 100644
54076--- a/drivers/net/wireless/iwlwifi/iwl-debug.h
54077+++ b/drivers/net/wireless/iwlwifi/iwl-debug.h
54078@@ -118,8 +118,8 @@ void iwl_dbgfs_unregister(struct iwl_priv *priv);
54079 #endif
54080
54081 #else
54082-#define IWL_DEBUG(__priv, level, fmt, args...)
54083-#define IWL_DEBUG_LIMIT(__priv, level, fmt, args...)
54084+#define IWL_DEBUG(__priv, level, fmt, args...) do {} while (0)
54085+#define IWL_DEBUG_LIMIT(__priv, level, fmt, args...) do {} while (0)
54086 static inline void iwl_print_hex_dump(struct iwl_priv *priv, int level,
54087 void *p, u32 len)
54088 {}
54089diff --git a/drivers/net/wireless/iwlwifi/iwl-debugfs.c b/drivers/net/wireless/iwlwifi/iwl-debugfs.c
54090index a198bcf..8e68233 100644
54091--- a/drivers/net/wireless/iwlwifi/iwl-debugfs.c
54092+++ b/drivers/net/wireless/iwlwifi/iwl-debugfs.c
54093@@ -524,6 +524,8 @@ static ssize_t iwl_dbgfs_status_read(struct file *file,
54094 int pos = 0;
54095 const size_t bufsz = sizeof(buf);
54096
54097+ pax_track_stack();
54098+
54099 pos += scnprintf(buf + pos, bufsz - pos, "STATUS_HCMD_ACTIVE:\t %d\n",
54100 test_bit(STATUS_HCMD_ACTIVE, &priv->status));
54101 pos += scnprintf(buf + pos, bufsz - pos, "STATUS_HCMD_SYNC_ACTIVE: %d\n",
54102@@ -658,6 +660,8 @@ static ssize_t iwl_dbgfs_qos_read(struct file *file, char __user *user_buf,
54103 const size_t bufsz = sizeof(buf);
54104 ssize_t ret;
54105
54106+ pax_track_stack();
54107+
54108 for (i = 0; i < AC_NUM; i++) {
54109 pos += scnprintf(buf + pos, bufsz - pos,
54110 "\tcw_min\tcw_max\taifsn\ttxop\n");
54111diff --git a/drivers/net/wireless/iwlwifi/iwl-dev.h b/drivers/net/wireless/iwlwifi/iwl-dev.h
54112index 3539ea4..b174bfa 100644
54113--- a/drivers/net/wireless/iwlwifi/iwl-dev.h
54114+++ b/drivers/net/wireless/iwlwifi/iwl-dev.h
54115@@ -68,7 +68,7 @@ struct iwl_tx_queue;
54116
54117 /* shared structures from iwl-5000.c */
54118 extern struct iwl_mod_params iwl50_mod_params;
54119-extern struct iwl_ops iwl5000_ops;
54120+extern const struct iwl_ops iwl5000_ops;
54121 extern struct iwl_ucode_ops iwl5000_ucode;
54122 extern struct iwl_lib_ops iwl5000_lib;
54123 extern struct iwl_hcmd_ops iwl5000_hcmd;
54124diff --git a/drivers/net/wireless/iwlwifi/iwl3945-base.c b/drivers/net/wireless/iwlwifi/iwl3945-base.c
54125index 619590d..69235ee 100644
54126--- a/drivers/net/wireless/iwlwifi/iwl3945-base.c
54127+++ b/drivers/net/wireless/iwlwifi/iwl3945-base.c
54128@@ -3927,7 +3927,9 @@ static int iwl3945_pci_probe(struct pci_dev *pdev, const struct pci_device_id *e
54129 */
54130 if (iwl3945_mod_params.disable_hw_scan) {
54131 IWL_DEBUG_INFO(priv, "Disabling hw_scan\n");
54132- iwl3945_hw_ops.hw_scan = NULL;
54133+ pax_open_kernel();
54134+ *(void **)&iwl3945_hw_ops.hw_scan = NULL;
54135+ pax_close_kernel();
54136 }
54137
54138
54139diff --git a/drivers/net/wireless/iwmc3200wifi/debugfs.c b/drivers/net/wireless/iwmc3200wifi/debugfs.c
54140index 1465379..fe4d78b 100644
54141--- a/drivers/net/wireless/iwmc3200wifi/debugfs.c
54142+++ b/drivers/net/wireless/iwmc3200wifi/debugfs.c
54143@@ -299,6 +299,8 @@ static ssize_t iwm_debugfs_fw_err_read(struct file *filp,
54144 int buf_len = 512;
54145 size_t len = 0;
54146
54147+ pax_track_stack();
54148+
54149 if (*ppos != 0)
54150 return 0;
54151 if (count < sizeof(buf))
54152diff --git a/drivers/net/wireless/libertas/debugfs.c b/drivers/net/wireless/libertas/debugfs.c
54153index 893a55c..7f66a50 100644
54154--- a/drivers/net/wireless/libertas/debugfs.c
54155+++ b/drivers/net/wireless/libertas/debugfs.c
54156@@ -708,7 +708,7 @@ out_unlock:
54157 struct lbs_debugfs_files {
54158 const char *name;
54159 int perm;
54160- struct file_operations fops;
54161+ const struct file_operations fops;
54162 };
54163
54164 static const struct lbs_debugfs_files debugfs_files[] = {
54165diff --git a/drivers/net/wireless/rndis_wlan.c b/drivers/net/wireless/rndis_wlan.c
54166index 2ecbedb..42704f0 100644
54167--- a/drivers/net/wireless/rndis_wlan.c
54168+++ b/drivers/net/wireless/rndis_wlan.c
54169@@ -1176,7 +1176,7 @@ static int set_rts_threshold(struct usbnet *usbdev, u32 rts_threshold)
54170
54171 devdbg(usbdev, "set_rts_threshold %i", rts_threshold);
54172
54173- if (rts_threshold < 0 || rts_threshold > 2347)
54174+ if (rts_threshold > 2347)
54175 rts_threshold = 2347;
54176
54177 tmp = cpu_to_le32(rts_threshold);
54178diff --git a/drivers/oprofile/buffer_sync.c b/drivers/oprofile/buffer_sync.c
54179index 334ccd6..47f8944 100644
54180--- a/drivers/oprofile/buffer_sync.c
54181+++ b/drivers/oprofile/buffer_sync.c
54182@@ -342,7 +342,7 @@ static void add_data(struct op_entry *entry, struct mm_struct *mm)
54183 if (cookie == NO_COOKIE)
54184 offset = pc;
54185 if (cookie == INVALID_COOKIE) {
54186- atomic_inc(&oprofile_stats.sample_lost_no_mapping);
54187+ atomic_inc_unchecked(&oprofile_stats.sample_lost_no_mapping);
54188 offset = pc;
54189 }
54190 if (cookie != last_cookie) {
54191@@ -386,14 +386,14 @@ add_sample(struct mm_struct *mm, struct op_sample *s, int in_kernel)
54192 /* add userspace sample */
54193
54194 if (!mm) {
54195- atomic_inc(&oprofile_stats.sample_lost_no_mm);
54196+ atomic_inc_unchecked(&oprofile_stats.sample_lost_no_mm);
54197 return 0;
54198 }
54199
54200 cookie = lookup_dcookie(mm, s->eip, &offset);
54201
54202 if (cookie == INVALID_COOKIE) {
54203- atomic_inc(&oprofile_stats.sample_lost_no_mapping);
54204+ atomic_inc_unchecked(&oprofile_stats.sample_lost_no_mapping);
54205 return 0;
54206 }
54207
54208@@ -562,7 +562,7 @@ void sync_buffer(int cpu)
54209 /* ignore backtraces if failed to add a sample */
54210 if (state == sb_bt_start) {
54211 state = sb_bt_ignore;
54212- atomic_inc(&oprofile_stats.bt_lost_no_mapping);
54213+ atomic_inc_unchecked(&oprofile_stats.bt_lost_no_mapping);
54214 }
54215 }
54216 release_mm(mm);
54217diff --git a/drivers/oprofile/event_buffer.c b/drivers/oprofile/event_buffer.c
54218index 5df60a6..72f5c1c 100644
54219--- a/drivers/oprofile/event_buffer.c
54220+++ b/drivers/oprofile/event_buffer.c
54221@@ -53,7 +53,7 @@ void add_event_entry(unsigned long value)
54222 }
54223
54224 if (buffer_pos == buffer_size) {
54225- atomic_inc(&oprofile_stats.event_lost_overflow);
54226+ atomic_inc_unchecked(&oprofile_stats.event_lost_overflow);
54227 return;
54228 }
54229
54230diff --git a/drivers/oprofile/oprof.c b/drivers/oprofile/oprof.c
54231index dc8a042..fe5f315 100644
54232--- a/drivers/oprofile/oprof.c
54233+++ b/drivers/oprofile/oprof.c
54234@@ -110,7 +110,7 @@ static void switch_worker(struct work_struct *work)
54235 if (oprofile_ops.switch_events())
54236 return;
54237
54238- atomic_inc(&oprofile_stats.multiplex_counter);
54239+ atomic_inc_unchecked(&oprofile_stats.multiplex_counter);
54240 start_switch_worker();
54241 }
54242
54243diff --git a/drivers/oprofile/oprofile_files.c b/drivers/oprofile/oprofile_files.c
54244index bbd7516..1f97f55 100644
54245--- a/drivers/oprofile/oprofile_files.c
54246+++ b/drivers/oprofile/oprofile_files.c
54247@@ -36,6 +36,8 @@ static ssize_t timeout_read(struct file *file, char __user *buf,
54248
54249
54250 static ssize_t timeout_write(struct file *file, char const __user *buf,
54251+ size_t count, loff_t *offset) __size_overflow(3);
54252+static ssize_t timeout_write(struct file *file, char const __user *buf,
54253 size_t count, loff_t *offset)
54254 {
54255 unsigned long val;
54256@@ -71,6 +73,7 @@ static ssize_t depth_read(struct file *file, char __user *buf, size_t count, lof
54257 }
54258
54259
54260+static ssize_t depth_write(struct file *file, char const __user *buf, size_t count, loff_t *offset) __size_overflow(3);
54261 static ssize_t depth_write(struct file *file, char const __user *buf, size_t count, loff_t *offset)
54262 {
54263 unsigned long val;
54264@@ -119,12 +122,14 @@ static const struct file_operations cpu_type_fops = {
54265 };
54266
54267
54268+static ssize_t enable_read(struct file *file, char __user *buf, size_t count, loff_t *offset) __size_overflow(3);
54269 static ssize_t enable_read(struct file *file, char __user *buf, size_t count, loff_t *offset)
54270 {
54271 return oprofilefs_ulong_to_user(oprofile_started, buf, count, offset);
54272 }
54273
54274
54275+static ssize_t enable_write(struct file *file, char const __user *buf, size_t count, loff_t *offset) __size_overflow(3);
54276 static ssize_t enable_write(struct file *file, char const __user *buf, size_t count, loff_t *offset)
54277 {
54278 unsigned long val;
54279diff --git a/drivers/oprofile/oprofile_stats.c b/drivers/oprofile/oprofile_stats.c
54280index 61689e8..387f7f8 100644
54281--- a/drivers/oprofile/oprofile_stats.c
54282+++ b/drivers/oprofile/oprofile_stats.c
54283@@ -30,11 +30,11 @@ void oprofile_reset_stats(void)
54284 cpu_buf->sample_invalid_eip = 0;
54285 }
54286
54287- atomic_set(&oprofile_stats.sample_lost_no_mm, 0);
54288- atomic_set(&oprofile_stats.sample_lost_no_mapping, 0);
54289- atomic_set(&oprofile_stats.event_lost_overflow, 0);
54290- atomic_set(&oprofile_stats.bt_lost_no_mapping, 0);
54291- atomic_set(&oprofile_stats.multiplex_counter, 0);
54292+ atomic_set_unchecked(&oprofile_stats.sample_lost_no_mm, 0);
54293+ atomic_set_unchecked(&oprofile_stats.sample_lost_no_mapping, 0);
54294+ atomic_set_unchecked(&oprofile_stats.event_lost_overflow, 0);
54295+ atomic_set_unchecked(&oprofile_stats.bt_lost_no_mapping, 0);
54296+ atomic_set_unchecked(&oprofile_stats.multiplex_counter, 0);
54297 }
54298
54299
54300diff --git a/drivers/oprofile/oprofile_stats.h b/drivers/oprofile/oprofile_stats.h
54301index 0b54e46..a37c527 100644
54302--- a/drivers/oprofile/oprofile_stats.h
54303+++ b/drivers/oprofile/oprofile_stats.h
54304@@ -13,11 +13,11 @@
54305 #include <asm/atomic.h>
54306
54307 struct oprofile_stat_struct {
54308- atomic_t sample_lost_no_mm;
54309- atomic_t sample_lost_no_mapping;
54310- atomic_t bt_lost_no_mapping;
54311- atomic_t event_lost_overflow;
54312- atomic_t multiplex_counter;
54313+ atomic_unchecked_t sample_lost_no_mm;
54314+ atomic_unchecked_t sample_lost_no_mapping;
54315+ atomic_unchecked_t bt_lost_no_mapping;
54316+ atomic_unchecked_t event_lost_overflow;
54317+ atomic_unchecked_t multiplex_counter;
54318 };
54319
54320 extern struct oprofile_stat_struct oprofile_stats;
54321diff --git a/drivers/oprofile/oprofilefs.c b/drivers/oprofile/oprofilefs.c
54322index 2766a6d..4d533c7 100644
54323--- a/drivers/oprofile/oprofilefs.c
54324+++ b/drivers/oprofile/oprofilefs.c
54325@@ -89,6 +89,7 @@ static ssize_t ulong_read_file(struct file *file, char __user *buf, size_t count
54326 }
54327
54328
54329+static ssize_t ulong_write_file(struct file *file, char const __user *buf, size_t count, loff_t *offset) __size_overflow(3);
54330 static ssize_t ulong_write_file(struct file *file, char const __user *buf, size_t count, loff_t *offset)
54331 {
54332 unsigned long *value = file->private_data;
54333@@ -187,7 +188,7 @@ static const struct file_operations atomic_ro_fops = {
54334
54335
54336 int oprofilefs_create_ro_atomic(struct super_block *sb, struct dentry *root,
54337- char const *name, atomic_t *val)
54338+ char const *name, atomic_unchecked_t *val)
54339 {
54340 struct dentry *d = __oprofilefs_create_file(sb, root, name,
54341 &atomic_ro_fops, 0444);
54342diff --git a/drivers/parisc/pdc_stable.c b/drivers/parisc/pdc_stable.c
54343index 13a64bc..ad62835 100644
54344--- a/drivers/parisc/pdc_stable.c
54345+++ b/drivers/parisc/pdc_stable.c
54346@@ -481,7 +481,7 @@ pdcspath_attr_store(struct kobject *kobj, struct attribute *attr,
54347 return ret;
54348 }
54349
54350-static struct sysfs_ops pdcspath_attr_ops = {
54351+static const struct sysfs_ops pdcspath_attr_ops = {
54352 .show = pdcspath_attr_show,
54353 .store = pdcspath_attr_store,
54354 };
54355diff --git a/drivers/parport/procfs.c b/drivers/parport/procfs.c
54356index 8eefe56..40751a7 100644
54357--- a/drivers/parport/procfs.c
54358+++ b/drivers/parport/procfs.c
54359@@ -64,7 +64,7 @@ static int do_active_device(ctl_table *table, int write,
54360
54361 *ppos += len;
54362
54363- return copy_to_user(result, buffer, len) ? -EFAULT : 0;
54364+ return (len > sizeof buffer || copy_to_user(result, buffer, len)) ? -EFAULT : 0;
54365 }
54366
54367 #ifdef CONFIG_PARPORT_1284
54368@@ -106,7 +106,7 @@ static int do_autoprobe(ctl_table *table, int write,
54369
54370 *ppos += len;
54371
54372- return copy_to_user (result, buffer, len) ? -EFAULT : 0;
54373+ return (len > sizeof buffer || copy_to_user (result, buffer, len)) ? -EFAULT : 0;
54374 }
54375 #endif /* IEEE1284.3 support. */
54376
54377diff --git a/drivers/pci/hotplug/acpiphp_glue.c b/drivers/pci/hotplug/acpiphp_glue.c
54378index 73e7d8e..c80f3d2 100644
54379--- a/drivers/pci/hotplug/acpiphp_glue.c
54380+++ b/drivers/pci/hotplug/acpiphp_glue.c
54381@@ -111,7 +111,7 @@ static int post_dock_fixups(struct notifier_block *nb, unsigned long val,
54382 }
54383
54384
54385-static struct acpi_dock_ops acpiphp_dock_ops = {
54386+static const struct acpi_dock_ops acpiphp_dock_ops = {
54387 .handler = handle_hotplug_event_func,
54388 };
54389
54390diff --git a/drivers/pci/hotplug/cpci_hotplug.h b/drivers/pci/hotplug/cpci_hotplug.h
54391index 9fff878..ad0ad53 100644
54392--- a/drivers/pci/hotplug/cpci_hotplug.h
54393+++ b/drivers/pci/hotplug/cpci_hotplug.h
54394@@ -59,7 +59,7 @@ struct cpci_hp_controller_ops {
54395 int (*hardware_test) (struct slot* slot, u32 value);
54396 u8 (*get_power) (struct slot* slot);
54397 int (*set_power) (struct slot* slot, int value);
54398-};
54399+} __no_const;
54400
54401 struct cpci_hp_controller {
54402 unsigned int irq;
54403diff --git a/drivers/pci/hotplug/cpqphp_nvram.c b/drivers/pci/hotplug/cpqphp_nvram.c
54404index 76ba8a1..20ca857 100644
54405--- a/drivers/pci/hotplug/cpqphp_nvram.c
54406+++ b/drivers/pci/hotplug/cpqphp_nvram.c
54407@@ -428,9 +428,13 @@ static u32 store_HRT (void __iomem *rom_start)
54408
54409 void compaq_nvram_init (void __iomem *rom_start)
54410 {
54411+
54412+#ifndef CONFIG_PAX_KERNEXEC
54413 if (rom_start) {
54414 compaq_int15_entry_point = (rom_start + ROM_INT15_PHY_ADDR - ROM_PHY_ADDR);
54415 }
54416+#endif
54417+
54418 dbg("int15 entry = %p\n", compaq_int15_entry_point);
54419
54420 /* initialize our int15 lock */
54421diff --git a/drivers/pci/hotplug/fakephp.c b/drivers/pci/hotplug/fakephp.c
54422index 6151389..0a894ef 100644
54423--- a/drivers/pci/hotplug/fakephp.c
54424+++ b/drivers/pci/hotplug/fakephp.c
54425@@ -73,7 +73,7 @@ static void legacy_release(struct kobject *kobj)
54426 }
54427
54428 static struct kobj_type legacy_ktype = {
54429- .sysfs_ops = &(struct sysfs_ops){
54430+ .sysfs_ops = &(const struct sysfs_ops){
54431 .store = legacy_store, .show = legacy_show
54432 },
54433 .release = &legacy_release,
54434diff --git a/drivers/pci/intel-iommu.c b/drivers/pci/intel-iommu.c
54435index 5b680df..fe05b7e 100644
54436--- a/drivers/pci/intel-iommu.c
54437+++ b/drivers/pci/intel-iommu.c
54438@@ -2643,7 +2643,7 @@ error:
54439 return 0;
54440 }
54441
54442-static dma_addr_t intel_map_page(struct device *dev, struct page *page,
54443+dma_addr_t intel_map_page(struct device *dev, struct page *page,
54444 unsigned long offset, size_t size,
54445 enum dma_data_direction dir,
54446 struct dma_attrs *attrs)
54447@@ -2719,7 +2719,7 @@ static void add_unmap(struct dmar_domain *dom, struct iova *iova)
54448 spin_unlock_irqrestore(&async_umap_flush_lock, flags);
54449 }
54450
54451-static void intel_unmap_page(struct device *dev, dma_addr_t dev_addr,
54452+void intel_unmap_page(struct device *dev, dma_addr_t dev_addr,
54453 size_t size, enum dma_data_direction dir,
54454 struct dma_attrs *attrs)
54455 {
54456@@ -2768,7 +2768,7 @@ static void intel_unmap_page(struct device *dev, dma_addr_t dev_addr,
54457 }
54458 }
54459
54460-static void *intel_alloc_coherent(struct device *hwdev, size_t size,
54461+void *intel_alloc_coherent(struct device *hwdev, size_t size,
54462 dma_addr_t *dma_handle, gfp_t flags)
54463 {
54464 void *vaddr;
54465@@ -2800,7 +2800,7 @@ static void *intel_alloc_coherent(struct device *hwdev, size_t size,
54466 return NULL;
54467 }
54468
54469-static void intel_free_coherent(struct device *hwdev, size_t size, void *vaddr,
54470+void intel_free_coherent(struct device *hwdev, size_t size, void *vaddr,
54471 dma_addr_t dma_handle)
54472 {
54473 int order;
54474@@ -2812,7 +2812,7 @@ static void intel_free_coherent(struct device *hwdev, size_t size, void *vaddr,
54475 free_pages((unsigned long)vaddr, order);
54476 }
54477
54478-static void intel_unmap_sg(struct device *hwdev, struct scatterlist *sglist,
54479+void intel_unmap_sg(struct device *hwdev, struct scatterlist *sglist,
54480 int nelems, enum dma_data_direction dir,
54481 struct dma_attrs *attrs)
54482 {
54483@@ -2872,7 +2872,7 @@ static int intel_nontranslate_map_sg(struct device *hddev,
54484 return nelems;
54485 }
54486
54487-static int intel_map_sg(struct device *hwdev, struct scatterlist *sglist, int nelems,
54488+int intel_map_sg(struct device *hwdev, struct scatterlist *sglist, int nelems,
54489 enum dma_data_direction dir, struct dma_attrs *attrs)
54490 {
54491 int i;
54492@@ -2941,12 +2941,12 @@ static int intel_map_sg(struct device *hwdev, struct scatterlist *sglist, int ne
54493 return nelems;
54494 }
54495
54496-static int intel_mapping_error(struct device *dev, dma_addr_t dma_addr)
54497+int intel_mapping_error(struct device *dev, dma_addr_t dma_addr)
54498 {
54499 return !dma_addr;
54500 }
54501
54502-struct dma_map_ops intel_dma_ops = {
54503+const struct dma_map_ops intel_dma_ops = {
54504 .alloc_coherent = intel_alloc_coherent,
54505 .free_coherent = intel_free_coherent,
54506 .map_sg = intel_map_sg,
54507diff --git a/drivers/pci/pcie/aspm.c b/drivers/pci/pcie/aspm.c
54508index 5b7056c..607bc94 100644
54509--- a/drivers/pci/pcie/aspm.c
54510+++ b/drivers/pci/pcie/aspm.c
54511@@ -27,9 +27,9 @@
54512 #define MODULE_PARAM_PREFIX "pcie_aspm."
54513
54514 /* Note: those are not register definitions */
54515-#define ASPM_STATE_L0S_UP (1) /* Upstream direction L0s state */
54516-#define ASPM_STATE_L0S_DW (2) /* Downstream direction L0s state */
54517-#define ASPM_STATE_L1 (4) /* L1 state */
54518+#define ASPM_STATE_L0S_UP (1U) /* Upstream direction L0s state */
54519+#define ASPM_STATE_L0S_DW (2U) /* Downstream direction L0s state */
54520+#define ASPM_STATE_L1 (4U) /* L1 state */
54521 #define ASPM_STATE_L0S (ASPM_STATE_L0S_UP | ASPM_STATE_L0S_DW)
54522 #define ASPM_STATE_ALL (ASPM_STATE_L0S | ASPM_STATE_L1)
54523
54524diff --git a/drivers/pci/probe.c b/drivers/pci/probe.c
54525index 8105e32..ca10419 100644
54526--- a/drivers/pci/probe.c
54527+++ b/drivers/pci/probe.c
54528@@ -62,14 +62,14 @@ static ssize_t pci_bus_show_cpuaffinity(struct device *dev,
54529 return ret;
54530 }
54531
54532-static ssize_t inline pci_bus_show_cpumaskaffinity(struct device *dev,
54533+static inline ssize_t pci_bus_show_cpumaskaffinity(struct device *dev,
54534 struct device_attribute *attr,
54535 char *buf)
54536 {
54537 return pci_bus_show_cpuaffinity(dev, 0, attr, buf);
54538 }
54539
54540-static ssize_t inline pci_bus_show_cpulistaffinity(struct device *dev,
54541+static inline ssize_t pci_bus_show_cpulistaffinity(struct device *dev,
54542 struct device_attribute *attr,
54543 char *buf)
54544 {
54545diff --git a/drivers/pci/proc.c b/drivers/pci/proc.c
54546index a03ad8c..024b0da 100644
54547--- a/drivers/pci/proc.c
54548+++ b/drivers/pci/proc.c
54549@@ -480,7 +480,16 @@ static const struct file_operations proc_bus_pci_dev_operations = {
54550 static int __init pci_proc_init(void)
54551 {
54552 struct pci_dev *dev = NULL;
54553+
54554+#ifdef CONFIG_GRKERNSEC_PROC_ADD
54555+#ifdef CONFIG_GRKERNSEC_PROC_USER
54556+ proc_bus_pci_dir = proc_mkdir_mode("bus/pci", S_IRUSR | S_IXUSR, NULL);
54557+#elif defined(CONFIG_GRKERNSEC_PROC_USERGROUP)
54558+ proc_bus_pci_dir = proc_mkdir_mode("bus/pci", S_IRUSR | S_IXUSR | S_IRGRP | S_IXGRP, NULL);
54559+#endif
54560+#else
54561 proc_bus_pci_dir = proc_mkdir("bus/pci", NULL);
54562+#endif
54563 proc_create("devices", 0, proc_bus_pci_dir,
54564 &proc_bus_pci_dev_operations);
54565 proc_initialized = 1;
54566diff --git a/drivers/pci/slot.c b/drivers/pci/slot.c
54567index 8c02b6c..5584d8e 100644
54568--- a/drivers/pci/slot.c
54569+++ b/drivers/pci/slot.c
54570@@ -29,7 +29,7 @@ static ssize_t pci_slot_attr_store(struct kobject *kobj,
54571 return attribute->store ? attribute->store(slot, buf, len) : -EIO;
54572 }
54573
54574-static struct sysfs_ops pci_slot_sysfs_ops = {
54575+static const struct sysfs_ops pci_slot_sysfs_ops = {
54576 .show = pci_slot_attr_show,
54577 .store = pci_slot_attr_store,
54578 };
54579diff --git a/drivers/pcmcia/pcmcia_ioctl.c b/drivers/pcmcia/pcmcia_ioctl.c
54580index 30cf71d2..50938f1 100644
54581--- a/drivers/pcmcia/pcmcia_ioctl.c
54582+++ b/drivers/pcmcia/pcmcia_ioctl.c
54583@@ -819,7 +819,7 @@ static int ds_ioctl(struct inode * inode, struct file * file,
54584 return -EFAULT;
54585 }
54586 }
54587- buf = kmalloc(sizeof(ds_ioctl_arg_t), GFP_KERNEL);
54588+ buf = kzalloc(sizeof(ds_ioctl_arg_t), GFP_KERNEL);
54589 if (!buf)
54590 return -ENOMEM;
54591
54592diff --git a/drivers/platform/x86/acer-wmi.c b/drivers/platform/x86/acer-wmi.c
54593index 52183c4..b224c69 100644
54594--- a/drivers/platform/x86/acer-wmi.c
54595+++ b/drivers/platform/x86/acer-wmi.c
54596@@ -918,7 +918,7 @@ static int update_bl_status(struct backlight_device *bd)
54597 return 0;
54598 }
54599
54600-static struct backlight_ops acer_bl_ops = {
54601+static const struct backlight_ops acer_bl_ops = {
54602 .get_brightness = read_brightness,
54603 .update_status = update_bl_status,
54604 };
54605diff --git a/drivers/platform/x86/asus-laptop.c b/drivers/platform/x86/asus-laptop.c
54606index 767cb61..a87380b 100644
54607--- a/drivers/platform/x86/asus-laptop.c
54608+++ b/drivers/platform/x86/asus-laptop.c
54609@@ -250,7 +250,7 @@ static struct backlight_device *asus_backlight_device;
54610 */
54611 static int read_brightness(struct backlight_device *bd);
54612 static int update_bl_status(struct backlight_device *bd);
54613-static struct backlight_ops asusbl_ops = {
54614+static const struct backlight_ops asusbl_ops = {
54615 .get_brightness = read_brightness,
54616 .update_status = update_bl_status,
54617 };
54618diff --git a/drivers/platform/x86/asus_acpi.c b/drivers/platform/x86/asus_acpi.c
54619index d66c07a..a4abaac 100644
54620--- a/drivers/platform/x86/asus_acpi.c
54621+++ b/drivers/platform/x86/asus_acpi.c
54622@@ -1396,7 +1396,7 @@ static int asus_hotk_remove(struct acpi_device *device, int type)
54623 return 0;
54624 }
54625
54626-static struct backlight_ops asus_backlight_data = {
54627+static const struct backlight_ops asus_backlight_data = {
54628 .get_brightness = read_brightness,
54629 .update_status = set_brightness_status,
54630 };
54631diff --git a/drivers/platform/x86/compal-laptop.c b/drivers/platform/x86/compal-laptop.c
54632index 11003bb..550ff1b 100644
54633--- a/drivers/platform/x86/compal-laptop.c
54634+++ b/drivers/platform/x86/compal-laptop.c
54635@@ -163,7 +163,7 @@ static int bl_update_status(struct backlight_device *b)
54636 return set_lcd_level(b->props.brightness);
54637 }
54638
54639-static struct backlight_ops compalbl_ops = {
54640+static const struct backlight_ops compalbl_ops = {
54641 .get_brightness = bl_get_brightness,
54642 .update_status = bl_update_status,
54643 };
54644diff --git a/drivers/platform/x86/dell-laptop.c b/drivers/platform/x86/dell-laptop.c
54645index 07a74da..9dc99fa 100644
54646--- a/drivers/platform/x86/dell-laptop.c
54647+++ b/drivers/platform/x86/dell-laptop.c
54648@@ -318,7 +318,7 @@ static int dell_get_intensity(struct backlight_device *bd)
54649 return buffer.output[1];
54650 }
54651
54652-static struct backlight_ops dell_ops = {
54653+static const struct backlight_ops dell_ops = {
54654 .get_brightness = dell_get_intensity,
54655 .update_status = dell_send_intensity,
54656 };
54657diff --git a/drivers/platform/x86/eeepc-laptop.c b/drivers/platform/x86/eeepc-laptop.c
54658index c533b1c..5c81f22 100644
54659--- a/drivers/platform/x86/eeepc-laptop.c
54660+++ b/drivers/platform/x86/eeepc-laptop.c
54661@@ -245,7 +245,7 @@ static struct device *eeepc_hwmon_device;
54662 */
54663 static int read_brightness(struct backlight_device *bd);
54664 static int update_bl_status(struct backlight_device *bd);
54665-static struct backlight_ops eeepcbl_ops = {
54666+static const struct backlight_ops eeepcbl_ops = {
54667 .get_brightness = read_brightness,
54668 .update_status = update_bl_status,
54669 };
54670diff --git a/drivers/platform/x86/fujitsu-laptop.c b/drivers/platform/x86/fujitsu-laptop.c
54671index bcd4ba8..a249b35 100644
54672--- a/drivers/platform/x86/fujitsu-laptop.c
54673+++ b/drivers/platform/x86/fujitsu-laptop.c
54674@@ -436,7 +436,7 @@ static int bl_update_status(struct backlight_device *b)
54675 return ret;
54676 }
54677
54678-static struct backlight_ops fujitsubl_ops = {
54679+static const struct backlight_ops fujitsubl_ops = {
54680 .get_brightness = bl_get_brightness,
54681 .update_status = bl_update_status,
54682 };
54683diff --git a/drivers/platform/x86/msi-laptop.c b/drivers/platform/x86/msi-laptop.c
54684index 759763d..1093ba2 100644
54685--- a/drivers/platform/x86/msi-laptop.c
54686+++ b/drivers/platform/x86/msi-laptop.c
54687@@ -161,7 +161,7 @@ static int bl_update_status(struct backlight_device *b)
54688 return set_lcd_level(b->props.brightness);
54689 }
54690
54691-static struct backlight_ops msibl_ops = {
54692+static const struct backlight_ops msibl_ops = {
54693 .get_brightness = bl_get_brightness,
54694 .update_status = bl_update_status,
54695 };
54696diff --git a/drivers/platform/x86/panasonic-laptop.c b/drivers/platform/x86/panasonic-laptop.c
54697index fe7cf01..9012d8d 100644
54698--- a/drivers/platform/x86/panasonic-laptop.c
54699+++ b/drivers/platform/x86/panasonic-laptop.c
54700@@ -352,7 +352,7 @@ static int bl_set_status(struct backlight_device *bd)
54701 return acpi_pcc_write_sset(pcc, SINF_DC_CUR_BRIGHT, bright);
54702 }
54703
54704-static struct backlight_ops pcc_backlight_ops = {
54705+static const struct backlight_ops pcc_backlight_ops = {
54706 .get_brightness = bl_get,
54707 .update_status = bl_set_status,
54708 };
54709diff --git a/drivers/platform/x86/sony-laptop.c b/drivers/platform/x86/sony-laptop.c
54710index a2a742c..b37e25e 100644
54711--- a/drivers/platform/x86/sony-laptop.c
54712+++ b/drivers/platform/x86/sony-laptop.c
54713@@ -850,7 +850,7 @@ static int sony_backlight_get_brightness(struct backlight_device *bd)
54714 }
54715
54716 static struct backlight_device *sony_backlight_device;
54717-static struct backlight_ops sony_backlight_ops = {
54718+static const struct backlight_ops sony_backlight_ops = {
54719 .update_status = sony_backlight_update_status,
54720 .get_brightness = sony_backlight_get_brightness,
54721 };
54722diff --git a/drivers/platform/x86/thinkpad_acpi.c b/drivers/platform/x86/thinkpad_acpi.c
54723index 68271ae..5e8fb10 100644
54724--- a/drivers/platform/x86/thinkpad_acpi.c
54725+++ b/drivers/platform/x86/thinkpad_acpi.c
54726@@ -2139,7 +2139,7 @@ static int hotkey_mask_get(void)
54727 return 0;
54728 }
54729
54730-void static hotkey_mask_warn_incomplete_mask(void)
54731+static void hotkey_mask_warn_incomplete_mask(void)
54732 {
54733 /* log only what the user can fix... */
54734 const u32 wantedmask = hotkey_driver_mask &
54735@@ -6125,7 +6125,7 @@ static void tpacpi_brightness_notify_change(void)
54736 BACKLIGHT_UPDATE_HOTKEY);
54737 }
54738
54739-static struct backlight_ops ibm_backlight_data = {
54740+static const struct backlight_ops ibm_backlight_data = {
54741 .get_brightness = brightness_get,
54742 .update_status = brightness_update_status,
54743 };
54744diff --git a/drivers/platform/x86/toshiba_acpi.c b/drivers/platform/x86/toshiba_acpi.c
54745index 51c0a8b..0786629 100644
54746--- a/drivers/platform/x86/toshiba_acpi.c
54747+++ b/drivers/platform/x86/toshiba_acpi.c
54748@@ -671,7 +671,7 @@ static acpi_status remove_device(void)
54749 return AE_OK;
54750 }
54751
54752-static struct backlight_ops toshiba_backlight_data = {
54753+static const struct backlight_ops toshiba_backlight_data = {
54754 .get_brightness = get_lcd,
54755 .update_status = set_lcd_status,
54756 };
54757diff --git a/drivers/pnp/pnpbios/bioscalls.c b/drivers/pnp/pnpbios/bioscalls.c
54758index fc83783c..cf370d7 100644
54759--- a/drivers/pnp/pnpbios/bioscalls.c
54760+++ b/drivers/pnp/pnpbios/bioscalls.c
54761@@ -60,7 +60,7 @@ do { \
54762 set_desc_limit(&gdt[(selname) >> 3], (size) - 1); \
54763 } while(0)
54764
54765-static struct desc_struct bad_bios_desc = GDT_ENTRY_INIT(0x4092,
54766+static const struct desc_struct bad_bios_desc = GDT_ENTRY_INIT(0x4093,
54767 (unsigned long)__va(0x400UL), PAGE_SIZE - 0x400 - 1);
54768
54769 /*
54770@@ -97,7 +97,10 @@ static inline u16 call_pnp_bios(u16 func, u16 arg1, u16 arg2, u16 arg3,
54771
54772 cpu = get_cpu();
54773 save_desc_40 = get_cpu_gdt_table(cpu)[0x40 / 8];
54774+
54775+ pax_open_kernel();
54776 get_cpu_gdt_table(cpu)[0x40 / 8] = bad_bios_desc;
54777+ pax_close_kernel();
54778
54779 /* On some boxes IRQ's during PnP BIOS calls are deadly. */
54780 spin_lock_irqsave(&pnp_bios_lock, flags);
54781@@ -135,7 +138,10 @@ static inline u16 call_pnp_bios(u16 func, u16 arg1, u16 arg2, u16 arg3,
54782 :"memory");
54783 spin_unlock_irqrestore(&pnp_bios_lock, flags);
54784
54785+ pax_open_kernel();
54786 get_cpu_gdt_table(cpu)[0x40 / 8] = save_desc_40;
54787+ pax_close_kernel();
54788+
54789 put_cpu();
54790
54791 /* If we get here and this is set then the PnP BIOS faulted on us. */
54792@@ -469,7 +475,7 @@ int pnp_bios_read_escd(char *data, u32 nvram_base)
54793 return status;
54794 }
54795
54796-void pnpbios_calls_init(union pnp_bios_install_struct *header)
54797+void __init pnpbios_calls_init(union pnp_bios_install_struct *header)
54798 {
54799 int i;
54800
54801@@ -477,6 +483,8 @@ void pnpbios_calls_init(union pnp_bios_install_struct *header)
54802 pnp_bios_callpoint.offset = header->fields.pm16offset;
54803 pnp_bios_callpoint.segment = PNP_CS16;
54804
54805+ pax_open_kernel();
54806+
54807 for_each_possible_cpu(i) {
54808 struct desc_struct *gdt = get_cpu_gdt_table(i);
54809 if (!gdt)
54810@@ -488,4 +496,6 @@ void pnpbios_calls_init(union pnp_bios_install_struct *header)
54811 set_desc_base(&gdt[GDT_ENTRY_PNPBIOS_DS],
54812 (unsigned long)__va(header->fields.pm16dseg));
54813 }
54814+
54815+ pax_close_kernel();
54816 }
54817diff --git a/drivers/pnp/resource.c b/drivers/pnp/resource.c
54818index ba97654..66b99d4 100644
54819--- a/drivers/pnp/resource.c
54820+++ b/drivers/pnp/resource.c
54821@@ -355,7 +355,7 @@ int pnp_check_irq(struct pnp_dev *dev, struct resource *res)
54822 return 1;
54823
54824 /* check if the resource is valid */
54825- if (*irq < 0 || *irq > 15)
54826+ if (*irq > 15)
54827 return 0;
54828
54829 /* check if the resource is reserved */
54830@@ -419,7 +419,7 @@ int pnp_check_dma(struct pnp_dev *dev, struct resource *res)
54831 return 1;
54832
54833 /* check if the resource is valid */
54834- if (*dma < 0 || *dma == 4 || *dma > 7)
54835+ if (*dma == 4 || *dma > 7)
54836 return 0;
54837
54838 /* check if the resource is reserved */
54839diff --git a/drivers/power/bq27x00_battery.c b/drivers/power/bq27x00_battery.c
54840index 62bb981..24a2dc9 100644
54841--- a/drivers/power/bq27x00_battery.c
54842+++ b/drivers/power/bq27x00_battery.c
54843@@ -44,7 +44,7 @@ struct bq27x00_device_info;
54844 struct bq27x00_access_methods {
54845 int (*read)(u8 reg, int *rt_value, int b_single,
54846 struct bq27x00_device_info *di);
54847-};
54848+} __no_const;
54849
54850 struct bq27x00_device_info {
54851 struct device *dev;
54852diff --git a/drivers/rtc/rtc-dev.c b/drivers/rtc/rtc-dev.c
54853index 62227cd..b5b538b 100644
54854--- a/drivers/rtc/rtc-dev.c
54855+++ b/drivers/rtc/rtc-dev.c
54856@@ -14,6 +14,7 @@
54857 #include <linux/module.h>
54858 #include <linux/rtc.h>
54859 #include <linux/sched.h>
54860+#include <linux/grsecurity.h>
54861 #include "rtc-core.h"
54862
54863 static dev_t rtc_devt;
54864@@ -357,6 +358,8 @@ static long rtc_dev_ioctl(struct file *file,
54865 if (copy_from_user(&tm, uarg, sizeof(tm)))
54866 return -EFAULT;
54867
54868+ gr_log_timechange();
54869+
54870 return rtc_set_time(rtc, &tm);
54871
54872 case RTC_PIE_ON:
54873diff --git a/drivers/s390/cio/qdio_perf.c b/drivers/s390/cio/qdio_perf.c
54874index 968e3c7..fbc637a 100644
54875--- a/drivers/s390/cio/qdio_perf.c
54876+++ b/drivers/s390/cio/qdio_perf.c
54877@@ -31,51 +31,51 @@ static struct proc_dir_entry *qdio_perf_pde;
54878 static int qdio_perf_proc_show(struct seq_file *m, void *v)
54879 {
54880 seq_printf(m, "Number of qdio interrupts\t\t\t: %li\n",
54881- (long)atomic_long_read(&perf_stats.qdio_int));
54882+ (long)atomic_long_read_unchecked(&perf_stats.qdio_int));
54883 seq_printf(m, "Number of PCI interrupts\t\t\t: %li\n",
54884- (long)atomic_long_read(&perf_stats.pci_int));
54885+ (long)atomic_long_read_unchecked(&perf_stats.pci_int));
54886 seq_printf(m, "Number of adapter interrupts\t\t\t: %li\n",
54887- (long)atomic_long_read(&perf_stats.thin_int));
54888+ (long)atomic_long_read_unchecked(&perf_stats.thin_int));
54889 seq_printf(m, "\n");
54890 seq_printf(m, "Inbound tasklet runs\t\t\t\t: %li\n",
54891- (long)atomic_long_read(&perf_stats.tasklet_inbound));
54892+ (long)atomic_long_read_unchecked(&perf_stats.tasklet_inbound));
54893 seq_printf(m, "Outbound tasklet runs\t\t\t\t: %li\n",
54894- (long)atomic_long_read(&perf_stats.tasklet_outbound));
54895+ (long)atomic_long_read_unchecked(&perf_stats.tasklet_outbound));
54896 seq_printf(m, "Adapter interrupt tasklet runs/loops\t\t: %li/%li\n",
54897- (long)atomic_long_read(&perf_stats.tasklet_thinint),
54898- (long)atomic_long_read(&perf_stats.tasklet_thinint_loop));
54899+ (long)atomic_long_read_unchecked(&perf_stats.tasklet_thinint),
54900+ (long)atomic_long_read_unchecked(&perf_stats.tasklet_thinint_loop));
54901 seq_printf(m, "Adapter interrupt inbound tasklet runs/loops\t: %li/%li\n",
54902- (long)atomic_long_read(&perf_stats.thinint_inbound),
54903- (long)atomic_long_read(&perf_stats.thinint_inbound_loop));
54904+ (long)atomic_long_read_unchecked(&perf_stats.thinint_inbound),
54905+ (long)atomic_long_read_unchecked(&perf_stats.thinint_inbound_loop));
54906 seq_printf(m, "\n");
54907 seq_printf(m, "Number of SIGA In issued\t\t\t: %li\n",
54908- (long)atomic_long_read(&perf_stats.siga_in));
54909+ (long)atomic_long_read_unchecked(&perf_stats.siga_in));
54910 seq_printf(m, "Number of SIGA Out issued\t\t\t: %li\n",
54911- (long)atomic_long_read(&perf_stats.siga_out));
54912+ (long)atomic_long_read_unchecked(&perf_stats.siga_out));
54913 seq_printf(m, "Number of SIGA Sync issued\t\t\t: %li\n",
54914- (long)atomic_long_read(&perf_stats.siga_sync));
54915+ (long)atomic_long_read_unchecked(&perf_stats.siga_sync));
54916 seq_printf(m, "\n");
54917 seq_printf(m, "Number of inbound transfers\t\t\t: %li\n",
54918- (long)atomic_long_read(&perf_stats.inbound_handler));
54919+ (long)atomic_long_read_unchecked(&perf_stats.inbound_handler));
54920 seq_printf(m, "Number of outbound transfers\t\t\t: %li\n",
54921- (long)atomic_long_read(&perf_stats.outbound_handler));
54922+ (long)atomic_long_read_unchecked(&perf_stats.outbound_handler));
54923 seq_printf(m, "\n");
54924 seq_printf(m, "Number of fast requeues (outg. SBAL w/o SIGA)\t: %li\n",
54925- (long)atomic_long_read(&perf_stats.fast_requeue));
54926+ (long)atomic_long_read_unchecked(&perf_stats.fast_requeue));
54927 seq_printf(m, "Number of outbound target full condition\t: %li\n",
54928- (long)atomic_long_read(&perf_stats.outbound_target_full));
54929+ (long)atomic_long_read_unchecked(&perf_stats.outbound_target_full));
54930 seq_printf(m, "Number of outbound tasklet mod_timer calls\t: %li\n",
54931- (long)atomic_long_read(&perf_stats.debug_tl_out_timer));
54932+ (long)atomic_long_read_unchecked(&perf_stats.debug_tl_out_timer));
54933 seq_printf(m, "Number of stop polling calls\t\t\t: %li\n",
54934- (long)atomic_long_read(&perf_stats.debug_stop_polling));
54935+ (long)atomic_long_read_unchecked(&perf_stats.debug_stop_polling));
54936 seq_printf(m, "AI inbound tasklet loops after stop polling\t: %li\n",
54937- (long)atomic_long_read(&perf_stats.thinint_inbound_loop2));
54938+ (long)atomic_long_read_unchecked(&perf_stats.thinint_inbound_loop2));
54939 seq_printf(m, "QEBSM EQBS total/incomplete\t\t\t: %li/%li\n",
54940- (long)atomic_long_read(&perf_stats.debug_eqbs_all),
54941- (long)atomic_long_read(&perf_stats.debug_eqbs_incomplete));
54942+ (long)atomic_long_read_unchecked(&perf_stats.debug_eqbs_all),
54943+ (long)atomic_long_read_unchecked(&perf_stats.debug_eqbs_incomplete));
54944 seq_printf(m, "QEBSM SQBS total/incomplete\t\t\t: %li/%li\n",
54945- (long)atomic_long_read(&perf_stats.debug_sqbs_all),
54946- (long)atomic_long_read(&perf_stats.debug_sqbs_incomplete));
54947+ (long)atomic_long_read_unchecked(&perf_stats.debug_sqbs_all),
54948+ (long)atomic_long_read_unchecked(&perf_stats.debug_sqbs_incomplete));
54949 seq_printf(m, "\n");
54950 return 0;
54951 }
54952diff --git a/drivers/s390/cio/qdio_perf.h b/drivers/s390/cio/qdio_perf.h
54953index ff4504c..b3604c3 100644
54954--- a/drivers/s390/cio/qdio_perf.h
54955+++ b/drivers/s390/cio/qdio_perf.h
54956@@ -13,46 +13,46 @@
54957
54958 struct qdio_perf_stats {
54959 /* interrupt handler calls */
54960- atomic_long_t qdio_int;
54961- atomic_long_t pci_int;
54962- atomic_long_t thin_int;
54963+ atomic_long_unchecked_t qdio_int;
54964+ atomic_long_unchecked_t pci_int;
54965+ atomic_long_unchecked_t thin_int;
54966
54967 /* tasklet runs */
54968- atomic_long_t tasklet_inbound;
54969- atomic_long_t tasklet_outbound;
54970- atomic_long_t tasklet_thinint;
54971- atomic_long_t tasklet_thinint_loop;
54972- atomic_long_t thinint_inbound;
54973- atomic_long_t thinint_inbound_loop;
54974- atomic_long_t thinint_inbound_loop2;
54975+ atomic_long_unchecked_t tasklet_inbound;
54976+ atomic_long_unchecked_t tasklet_outbound;
54977+ atomic_long_unchecked_t tasklet_thinint;
54978+ atomic_long_unchecked_t tasklet_thinint_loop;
54979+ atomic_long_unchecked_t thinint_inbound;
54980+ atomic_long_unchecked_t thinint_inbound_loop;
54981+ atomic_long_unchecked_t thinint_inbound_loop2;
54982
54983 /* signal adapter calls */
54984- atomic_long_t siga_out;
54985- atomic_long_t siga_in;
54986- atomic_long_t siga_sync;
54987+ atomic_long_unchecked_t siga_out;
54988+ atomic_long_unchecked_t siga_in;
54989+ atomic_long_unchecked_t siga_sync;
54990
54991 /* misc */
54992- atomic_long_t inbound_handler;
54993- atomic_long_t outbound_handler;
54994- atomic_long_t fast_requeue;
54995- atomic_long_t outbound_target_full;
54996+ atomic_long_unchecked_t inbound_handler;
54997+ atomic_long_unchecked_t outbound_handler;
54998+ atomic_long_unchecked_t fast_requeue;
54999+ atomic_long_unchecked_t outbound_target_full;
55000
55001 /* for debugging */
55002- atomic_long_t debug_tl_out_timer;
55003- atomic_long_t debug_stop_polling;
55004- atomic_long_t debug_eqbs_all;
55005- atomic_long_t debug_eqbs_incomplete;
55006- atomic_long_t debug_sqbs_all;
55007- atomic_long_t debug_sqbs_incomplete;
55008+ atomic_long_unchecked_t debug_tl_out_timer;
55009+ atomic_long_unchecked_t debug_stop_polling;
55010+ atomic_long_unchecked_t debug_eqbs_all;
55011+ atomic_long_unchecked_t debug_eqbs_incomplete;
55012+ atomic_long_unchecked_t debug_sqbs_all;
55013+ atomic_long_unchecked_t debug_sqbs_incomplete;
55014 };
55015
55016 extern struct qdio_perf_stats perf_stats;
55017 extern int qdio_performance_stats;
55018
55019-static inline void qdio_perf_stat_inc(atomic_long_t *count)
55020+static inline void qdio_perf_stat_inc(atomic_long_unchecked_t *count)
55021 {
55022 if (qdio_performance_stats)
55023- atomic_long_inc(count);
55024+ atomic_long_inc_unchecked(count);
55025 }
55026
55027 int qdio_setup_perf_stats(void);
55028diff --git a/drivers/scsi/3w-sas.c b/drivers/scsi/3w-sas.c
55029new file mode 100644
55030index 0000000..7d18a18
55031--- /dev/null
55032+++ b/drivers/scsi/3w-sas.c
55033@@ -0,0 +1,1933 @@
55034+/*
55035+ 3w-sas.c -- LSI 3ware SAS/SATA-RAID Controller device driver for Linux.
55036+
55037+ Written By: Adam Radford <linuxraid@lsi.com>
55038+
55039+ Copyright (C) 2009 LSI Corporation.
55040+
55041+ This program is free software; you can redistribute it and/or modify
55042+ it under the terms of the GNU General Public License as published by
55043+ the Free Software Foundation; version 2 of the License.
55044+
55045+ This program is distributed in the hope that it will be useful,
55046+ but WITHOUT ANY WARRANTY; without even the implied warranty of
55047+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
55048+ GNU General Public License for more details.
55049+
55050+ NO WARRANTY
55051+ THE PROGRAM IS PROVIDED ON AN "AS IS" BASIS, WITHOUT WARRANTIES OR
55052+ CONDITIONS OF ANY KIND, EITHER EXPRESS OR IMPLIED INCLUDING, WITHOUT
55053+ LIMITATION, ANY WARRANTIES OR CONDITIONS OF TITLE, NON-INFRINGEMENT,
55054+ MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE. Each Recipient is
55055+ solely responsible for determining the appropriateness of using and
55056+ distributing the Program and assumes all risks associated with its
55057+ exercise of rights under this Agreement, including but not limited to
55058+ the risks and costs of program errors, damage to or loss of data,
55059+ programs or equipment, and unavailability or interruption of operations.
55060+
55061+ DISCLAIMER OF LIABILITY
55062+ NEITHER RECIPIENT NOR ANY CONTRIBUTORS SHALL HAVE ANY LIABILITY FOR ANY
55063+ DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
55064+ DAMAGES (INCLUDING WITHOUT LIMITATION LOST PROFITS), HOWEVER CAUSED AND
55065+ ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR
55066+ TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE
55067+ USE OR DISTRIBUTION OF THE PROGRAM OR THE EXERCISE OF ANY RIGHTS GRANTED
55068+ HEREUNDER, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGES
55069+
55070+ You should have received a copy of the GNU General Public License
55071+ along with this program; if not, write to the Free Software
55072+ Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
55073+
55074+ Controllers supported by this driver:
55075+
55076+ LSI 3ware 9750 6Gb/s SAS/SATA-RAID
55077+
55078+ Bugs/Comments/Suggestions should be mailed to:
55079+ linuxraid@lsi.com
55080+
55081+ For more information, goto:
55082+ http://www.lsi.com
55083+
55084+ History
55085+ -------
55086+ 3.26.00.000 - Initial driver release.
55087+*/
55088+
55089+#include <linux/module.h>
55090+#include <linux/reboot.h>
55091+#include <linux/spinlock.h>
55092+#include <linux/interrupt.h>
55093+#include <linux/moduleparam.h>
55094+#include <linux/errno.h>
55095+#include <linux/types.h>
55096+#include <linux/delay.h>
55097+#include <linux/pci.h>
55098+#include <linux/time.h>
55099+#include <linux/mutex.h>
55100+#include <linux/smp_lock.h>
55101+#include <asm/io.h>
55102+#include <asm/irq.h>
55103+#include <asm/uaccess.h>
55104+#include <scsi/scsi.h>
55105+#include <scsi/scsi_host.h>
55106+#include <scsi/scsi_tcq.h>
55107+#include <scsi/scsi_cmnd.h>
55108+#include "3w-sas.h"
55109+
55110+/* Globals */
55111+#define TW_DRIVER_VERSION "3.26.00.028-2.6.32RH"
55112+static TW_Device_Extension *twl_device_extension_list[TW_MAX_SLOT];
55113+static unsigned int twl_device_extension_count;
55114+static int twl_major = -1;
55115+extern struct timezone sys_tz;
55116+
55117+/* Module parameters */
55118+MODULE_AUTHOR ("LSI");
55119+MODULE_DESCRIPTION ("LSI 3ware SAS/SATA-RAID Linux Driver");
55120+MODULE_LICENSE("GPL");
55121+MODULE_VERSION(TW_DRIVER_VERSION);
55122+
55123+static int use_msi = 0;
55124+module_param(use_msi, int, S_IRUGO);
55125+MODULE_PARM_DESC(use_msi, "Use Message Signaled Interrupts. Default: 0");
55126+
55127+/* Function prototypes */
55128+static int twl_reset_device_extension(TW_Device_Extension *tw_dev, int ioctl_reset);
55129+
55130+/* Functions */
55131+
55132+/* This function returns AENs through sysfs */
55133+static ssize_t twl_sysfs_aen_read(struct file *filp, struct kobject *kobj,
55134+ struct bin_attribute *bin_attr,
55135+ char *outbuf, loff_t offset, size_t count)
55136+{
55137+ struct device *dev = container_of(kobj, struct device, kobj);
55138+ struct Scsi_Host *shost = class_to_shost(dev);
55139+ TW_Device_Extension *tw_dev = (TW_Device_Extension *)shost->hostdata;
55140+ unsigned long flags = 0;
55141+ ssize_t ret;
55142+
55143+ if (!capable(CAP_SYS_ADMIN))
55144+ return -EACCES;
55145+
55146+ spin_lock_irqsave(tw_dev->host->host_lock, flags);
55147+ ret = memory_read_from_buffer(outbuf, count, &offset, tw_dev->event_queue[0], sizeof(TW_Event) * TW_Q_LENGTH);
55148+ spin_unlock_irqrestore(tw_dev->host->host_lock, flags);
55149+
55150+ return ret;
55151+} /* End twl_sysfs_aen_read() */
55152+
55153+/* aen_read sysfs attribute initializer */
55154+static struct bin_attribute twl_sysfs_aen_read_attr = {
55155+ .attr = {
55156+ .name = "3ware_aen_read",
55157+ .mode = S_IRUSR,
55158+ },
55159+ .size = 0,
55160+ .read = twl_sysfs_aen_read
55161+};
55162+
55163+/* This function returns driver compatibility info through sysfs */
55164+static ssize_t twl_sysfs_compat_info(struct file *filp, struct kobject *kobj,
55165+ struct bin_attribute *bin_attr,
55166+ char *outbuf, loff_t offset, size_t count)
55167+{
55168+ struct device *dev = container_of(kobj, struct device, kobj);
55169+ struct Scsi_Host *shost = class_to_shost(dev);
55170+ TW_Device_Extension *tw_dev = (TW_Device_Extension *)shost->hostdata;
55171+ unsigned long flags = 0;
55172+ ssize_t ret;
55173+
55174+ if (!capable(CAP_SYS_ADMIN))
55175+ return -EACCES;
55176+
55177+ spin_lock_irqsave(tw_dev->host->host_lock, flags);
55178+ ret = memory_read_from_buffer(outbuf, count, &offset, &tw_dev->tw_compat_info, sizeof(TW_Compatibility_Info));
55179+ spin_unlock_irqrestore(tw_dev->host->host_lock, flags);
55180+
55181+ return ret;
55182+} /* End twl_sysfs_compat_info() */
55183+
55184+/* compat_info sysfs attribute initializer */
55185+static struct bin_attribute twl_sysfs_compat_info_attr = {
55186+ .attr = {
55187+ .name = "3ware_compat_info",
55188+ .mode = S_IRUSR,
55189+ },
55190+ .size = 0,
55191+ .read = twl_sysfs_compat_info
55192+};
55193+
55194+/* Show some statistics about the card */
55195+static ssize_t twl_show_stats(struct device *dev,
55196+ struct device_attribute *attr, char *buf)
55197+{
55198+ struct Scsi_Host *host = class_to_shost(dev);
55199+ TW_Device_Extension *tw_dev = (TW_Device_Extension *)host->hostdata;
55200+ unsigned long flags = 0;
55201+ ssize_t len;
55202+
55203+ spin_lock_irqsave(tw_dev->host->host_lock, flags);
55204+ len = snprintf(buf, PAGE_SIZE, "3w-sas Driver version: %s\n"
55205+ "Current commands posted: %4d\n"
55206+ "Max commands posted: %4d\n"
55207+ "Last sgl length: %4d\n"
55208+ "Max sgl length: %4d\n"
55209+ "Last sector count: %4d\n"
55210+ "Max sector count: %4d\n"
55211+ "SCSI Host Resets: %4d\n"
55212+ "AEN's: %4d\n",
55213+ TW_DRIVER_VERSION,
55214+ tw_dev->posted_request_count,
55215+ tw_dev->max_posted_request_count,
55216+ tw_dev->sgl_entries,
55217+ tw_dev->max_sgl_entries,
55218+ tw_dev->sector_count,
55219+ tw_dev->max_sector_count,
55220+ tw_dev->num_resets,
55221+ tw_dev->aen_count);
55222+ spin_unlock_irqrestore(tw_dev->host->host_lock, flags);
55223+ return len;
55224+} /* End twl_show_stats() */
55225+
55226+/* This function will set a devices queue depth */
55227+static int twl_change_queue_depth(struct scsi_device *sdev, int queue_depth,
55228+ int reason)
55229+{
55230+ if (reason != SCSI_QDEPTH_DEFAULT)
55231+ return -EOPNOTSUPP;
55232+
55233+ if (queue_depth > TW_Q_LENGTH-2)
55234+ queue_depth = TW_Q_LENGTH-2;
55235+ scsi_adjust_queue_depth(sdev, MSG_ORDERED_TAG, queue_depth);
55236+ return queue_depth;
55237+} /* End twl_change_queue_depth() */
55238+
55239+/* stats sysfs attribute initializer */
55240+static struct device_attribute twl_host_stats_attr = {
55241+ .attr = {
55242+ .name = "3ware_stats",
55243+ .mode = S_IRUGO,
55244+ },
55245+ .show = twl_show_stats
55246+};
55247+
55248+/* Host attributes initializer */
55249+static struct device_attribute *twl_host_attrs[] = {
55250+ &twl_host_stats_attr,
55251+ NULL,
55252+};
55253+
55254+/* This function will look up an AEN severity string */
55255+static char *twl_aen_severity_lookup(unsigned char severity_code)
55256+{
55257+ char *retval = NULL;
55258+
55259+ if ((severity_code < (unsigned char) TW_AEN_SEVERITY_ERROR) ||
55260+ (severity_code > (unsigned char) TW_AEN_SEVERITY_DEBUG))
55261+ goto out;
55262+
55263+ retval = twl_aen_severity_table[severity_code];
55264+out:
55265+ return retval;
55266+} /* End twl_aen_severity_lookup() */
55267+
55268+/* This function will queue an event */
55269+static void twl_aen_queue_event(TW_Device_Extension *tw_dev, TW_Command_Apache_Header *header)
55270+{
55271+ u32 local_time;
55272+ struct timeval time;
55273+ TW_Event *event;
55274+ unsigned short aen;
55275+ char host[16];
55276+ char *error_str;
55277+
55278+ tw_dev->aen_count++;
55279+
55280+ /* Fill out event info */
55281+ event = tw_dev->event_queue[tw_dev->error_index];
55282+
55283+ host[0] = '\0';
55284+ if (tw_dev->host)
55285+ sprintf(host, " scsi%d:", tw_dev->host->host_no);
55286+
55287+ aen = le16_to_cpu(header->status_block.error);
55288+ memset(event, 0, sizeof(TW_Event));
55289+
55290+ event->severity = TW_SEV_OUT(header->status_block.severity__reserved);
55291+ do_gettimeofday(&time);
55292+ local_time = (u32)(time.tv_sec - (sys_tz.tz_minuteswest * 60));
55293+ event->time_stamp_sec = local_time;
55294+ event->aen_code = aen;
55295+ event->retrieved = TW_AEN_NOT_RETRIEVED;
55296+ event->sequence_id = tw_dev->error_sequence_id;
55297+ tw_dev->error_sequence_id++;
55298+
55299+ /* Check for embedded error string */
55300+ error_str = &(header->err_specific_desc[strlen(header->err_specific_desc)+1]);
55301+
55302+ header->err_specific_desc[sizeof(header->err_specific_desc) - 1] = '\0';
55303+ event->parameter_len = strlen(header->err_specific_desc);
55304+ memcpy(event->parameter_data, header->err_specific_desc, event->parameter_len + 1 + strlen(error_str));
55305+ if (event->severity != TW_AEN_SEVERITY_DEBUG)
55306+ printk(KERN_WARNING "3w-sas:%s AEN: %s (0x%02X:0x%04X): %s:%s.\n",
55307+ host,
55308+ twl_aen_severity_lookup(TW_SEV_OUT(header->status_block.severity__reserved)),
55309+ TW_MESSAGE_SOURCE_CONTROLLER_EVENT, aen, error_str,
55310+ header->err_specific_desc);
55311+ else
55312+ tw_dev->aen_count--;
55313+
55314+ tw_dev->error_index = (tw_dev->error_index + 1 ) % TW_Q_LENGTH;
55315+} /* End twl_aen_queue_event() */
55316+
55317+/* This function will attempt to post a command packet to the board */
55318+static int twl_post_command_packet(TW_Device_Extension *tw_dev, int request_id)
55319+{
55320+ dma_addr_t command_que_value;
55321+
55322+ command_que_value = tw_dev->command_packet_phys[request_id];
55323+ command_que_value += TW_COMMAND_OFFSET;
55324+
55325+ /* First write upper 4 bytes */
55326+ writel((u32)((u64)command_que_value >> 32), TWL_HIBQPH_REG_ADDR(tw_dev));
55327+ /* Then the lower 4 bytes */
55328+ writel((u32)(command_que_value | TWL_PULL_MODE), TWL_HIBQPL_REG_ADDR(tw_dev));
55329+
55330+ tw_dev->state[request_id] = TW_S_POSTED;
55331+ tw_dev->posted_request_count++;
55332+ if (tw_dev->posted_request_count > tw_dev->max_posted_request_count)
55333+ tw_dev->max_posted_request_count = tw_dev->posted_request_count;
55334+
55335+ return 0;
55336+} /* End twl_post_command_packet() */
55337+
55338+/* This function will perform a pci-dma mapping for a scatter gather list */
55339+static int twl_map_scsi_sg_data(TW_Device_Extension *tw_dev, int request_id)
55340+{
55341+ int use_sg;
55342+ struct scsi_cmnd *cmd = tw_dev->srb[request_id];
55343+
55344+ use_sg = scsi_dma_map(cmd);
55345+ if (!use_sg)
55346+ return 0;
55347+ else if (use_sg < 0) {
55348+ TW_PRINTK(tw_dev->host, TW_DRIVER, 0x1, "Failed to map scatter gather list");
55349+ return 0;
55350+ }
55351+
55352+ cmd->SCp.phase = TW_PHASE_SGLIST;
55353+ cmd->SCp.have_data_in = use_sg;
55354+
55355+ return use_sg;
55356+} /* End twl_map_scsi_sg_data() */
55357+
55358+/* This function hands scsi cdb's to the firmware */
55359+static int twl_scsiop_execute_scsi(TW_Device_Extension *tw_dev, int request_id, char *cdb, int use_sg, TW_SG_Entry_ISO *sglistarg)
55360+{
55361+ TW_Command_Full *full_command_packet;
55362+ TW_Command_Apache *command_packet;
55363+ int i, sg_count;
55364+ struct scsi_cmnd *srb = NULL;
55365+ struct scatterlist *sglist = NULL, *sg;
55366+ int retval = 1;
55367+
55368+ if (tw_dev->srb[request_id]) {
55369+ srb = tw_dev->srb[request_id];
55370+ if (scsi_sglist(srb))
55371+ sglist = scsi_sglist(srb);
55372+ }
55373+
55374+ /* Initialize command packet */
55375+ full_command_packet = tw_dev->command_packet_virt[request_id];
55376+ full_command_packet->header.header_desc.size_header = 128;
55377+ full_command_packet->header.status_block.error = 0;
55378+ full_command_packet->header.status_block.severity__reserved = 0;
55379+
55380+ command_packet = &full_command_packet->command.newcommand;
55381+ command_packet->status = 0;
55382+ command_packet->opcode__reserved = TW_OPRES_IN(0, TW_OP_EXECUTE_SCSI);
55383+
55384+ /* We forced 16 byte cdb use earlier */
55385+ if (!cdb)
55386+ memcpy(command_packet->cdb, srb->cmnd, TW_MAX_CDB_LEN);
55387+ else
55388+ memcpy(command_packet->cdb, cdb, TW_MAX_CDB_LEN);
55389+
55390+ if (srb) {
55391+ command_packet->unit = srb->device->id;
55392+ command_packet->request_id__lunl =
55393+ cpu_to_le16(TW_REQ_LUN_IN(srb->device->lun, request_id));
55394+ } else {
55395+ command_packet->request_id__lunl =
55396+ cpu_to_le16(TW_REQ_LUN_IN(0, request_id));
55397+ command_packet->unit = 0;
55398+ }
55399+
55400+ command_packet->sgl_offset = 16;
55401+
55402+ if (!sglistarg) {
55403+ /* Map sglist from scsi layer to cmd packet */
55404+ if (scsi_sg_count(srb)) {
55405+ sg_count = twl_map_scsi_sg_data(tw_dev, request_id);
55406+ if (sg_count == 0)
55407+ goto out;
55408+
55409+ scsi_for_each_sg(srb, sg, sg_count, i) {
55410+ command_packet->sg_list[i].address = TW_CPU_TO_SGL(sg_dma_address(sg));
55411+ command_packet->sg_list[i].length = TW_CPU_TO_SGL(sg_dma_len(sg));
55412+ }
55413+ command_packet->sgl_entries__lunh = cpu_to_le16(TW_REQ_LUN_IN((srb->device->lun >> 4), scsi_sg_count(tw_dev->srb[request_id])));
55414+ }
55415+ } else {
55416+ /* Internal cdb post */
55417+ for (i = 0; i < use_sg; i++) {
55418+ command_packet->sg_list[i].address = TW_CPU_TO_SGL(sglistarg[i].address);
55419+ command_packet->sg_list[i].length = TW_CPU_TO_SGL(sglistarg[i].length);
55420+ }
55421+ command_packet->sgl_entries__lunh = cpu_to_le16(TW_REQ_LUN_IN(0, use_sg));
55422+ }
55423+
55424+ /* Update some stats */
55425+ if (srb) {
55426+ tw_dev->sector_count = scsi_bufflen(srb) / 512;
55427+ if (tw_dev->sector_count > tw_dev->max_sector_count)
55428+ tw_dev->max_sector_count = tw_dev->sector_count;
55429+ tw_dev->sgl_entries = scsi_sg_count(srb);
55430+ if (tw_dev->sgl_entries > tw_dev->max_sgl_entries)
55431+ tw_dev->max_sgl_entries = tw_dev->sgl_entries;
55432+ }
55433+
55434+ /* Now post the command to the board */
55435+ retval = twl_post_command_packet(tw_dev, request_id);
55436+
55437+out:
55438+ return retval;
55439+} /* End twl_scsiop_execute_scsi() */
55440+
55441+/* This function will read the aen queue from the isr */
55442+static int twl_aen_read_queue(TW_Device_Extension *tw_dev, int request_id)
55443+{
55444+ char cdb[TW_MAX_CDB_LEN];
55445+ TW_SG_Entry_ISO sglist[1];
55446+ TW_Command_Full *full_command_packet;
55447+ int retval = 1;
55448+
55449+ full_command_packet = tw_dev->command_packet_virt[request_id];
55450+ memset(full_command_packet, 0, sizeof(TW_Command_Full));
55451+
55452+ /* Initialize cdb */
55453+ memset(&cdb, 0, TW_MAX_CDB_LEN);
55454+ cdb[0] = REQUEST_SENSE; /* opcode */
55455+ cdb[4] = TW_ALLOCATION_LENGTH; /* allocation length */
55456+
55457+ /* Initialize sglist */
55458+ memset(&sglist, 0, sizeof(TW_SG_Entry_ISO));
55459+ sglist[0].length = TW_SECTOR_SIZE;
55460+ sglist[0].address = tw_dev->generic_buffer_phys[request_id];
55461+
55462+ /* Mark internal command */
55463+ tw_dev->srb[request_id] = NULL;
55464+
55465+ /* Now post the command packet */
55466+ if (twl_scsiop_execute_scsi(tw_dev, request_id, cdb, 1, sglist)) {
55467+ TW_PRINTK(tw_dev->host, TW_DRIVER, 0x2, "Post failed while reading AEN queue");
55468+ goto out;
55469+ }
55470+ retval = 0;
55471+out:
55472+ return retval;
55473+} /* End twl_aen_read_queue() */
55474+
55475+/* This function will sync firmware time with the host time */
55476+static void twl_aen_sync_time(TW_Device_Extension *tw_dev, int request_id)
55477+{
55478+ u32 schedulertime;
55479+ struct timeval utc;
55480+ TW_Command_Full *full_command_packet;
55481+ TW_Command *command_packet;
55482+ TW_Param_Apache *param;
55483+ u32 local_time;
55484+
55485+ /* Fill out the command packet */
55486+ full_command_packet = tw_dev->command_packet_virt[request_id];
55487+ memset(full_command_packet, 0, sizeof(TW_Command_Full));
55488+ command_packet = &full_command_packet->command.oldcommand;
55489+ command_packet->opcode__sgloffset = TW_OPSGL_IN(2, TW_OP_SET_PARAM);
55490+ command_packet->request_id = request_id;
55491+ command_packet->byte8_offset.param.sgl[0].address = TW_CPU_TO_SGL(tw_dev->generic_buffer_phys[request_id]);
55492+ command_packet->byte8_offset.param.sgl[0].length = TW_CPU_TO_SGL(TW_SECTOR_SIZE);
55493+ command_packet->size = TW_COMMAND_SIZE;
55494+ command_packet->byte6_offset.parameter_count = cpu_to_le16(1);
55495+
55496+ /* Setup the param */
55497+ param = (TW_Param_Apache *)tw_dev->generic_buffer_virt[request_id];
55498+ memset(param, 0, TW_SECTOR_SIZE);
55499+ param->table_id = cpu_to_le16(TW_TIMEKEEP_TABLE | 0x8000); /* Controller time keep table */
55500+ param->parameter_id = cpu_to_le16(0x3); /* SchedulerTime */
55501+ param->parameter_size_bytes = cpu_to_le16(4);
55502+
55503+ /* Convert system time in UTC to local time seconds since last
55504+ Sunday 12:00AM */
55505+ do_gettimeofday(&utc);
55506+ local_time = (u32)(utc.tv_sec - (sys_tz.tz_minuteswest * 60));
55507+ schedulertime = local_time - (3 * 86400);
55508+ schedulertime = cpu_to_le32(schedulertime % 604800);
55509+
55510+ memcpy(param->data, &schedulertime, sizeof(u32));
55511+
55512+ /* Mark internal command */
55513+ tw_dev->srb[request_id] = NULL;
55514+
55515+ /* Now post the command */
55516+ twl_post_command_packet(tw_dev, request_id);
55517+} /* End twl_aen_sync_time() */
55518+
55519+/* This function will assign an available request id */
55520+static void twl_get_request_id(TW_Device_Extension *tw_dev, int *request_id)
55521+{
55522+ *request_id = tw_dev->free_queue[tw_dev->free_head];
55523+ tw_dev->free_head = (tw_dev->free_head + 1) % TW_Q_LENGTH;
55524+ tw_dev->state[*request_id] = TW_S_STARTED;
55525+} /* End twl_get_request_id() */
55526+
55527+/* This function will free a request id */
55528+static void twl_free_request_id(TW_Device_Extension *tw_dev, int request_id)
55529+{
55530+ tw_dev->free_queue[tw_dev->free_tail] = request_id;
55531+ tw_dev->state[request_id] = TW_S_FINISHED;
55532+ tw_dev->free_tail = (tw_dev->free_tail + 1) % TW_Q_LENGTH;
55533+} /* End twl_free_request_id() */
55534+
55535+/* This function will complete an aen request from the isr */
55536+static int twl_aen_complete(TW_Device_Extension *tw_dev, int request_id)
55537+{
55538+ TW_Command_Full *full_command_packet;
55539+ TW_Command *command_packet;
55540+ TW_Command_Apache_Header *header;
55541+ unsigned short aen;
55542+ int retval = 1;
55543+
55544+ header = (TW_Command_Apache_Header *)tw_dev->generic_buffer_virt[request_id];
55545+ tw_dev->posted_request_count--;
55546+ aen = le16_to_cpu(header->status_block.error);
55547+ full_command_packet = tw_dev->command_packet_virt[request_id];
55548+ command_packet = &full_command_packet->command.oldcommand;
55549+
55550+ /* First check for internal completion of set param for time sync */
55551+ if (TW_OP_OUT(command_packet->opcode__sgloffset) == TW_OP_SET_PARAM) {
55552+ /* Keep reading the queue in case there are more aen's */
55553+ if (twl_aen_read_queue(tw_dev, request_id))
55554+ goto out2;
55555+ else {
55556+ retval = 0;
55557+ goto out;
55558+ }
55559+ }
55560+
55561+ switch (aen) {
55562+ case TW_AEN_QUEUE_EMPTY:
55563+ /* Quit reading the queue if this is the last one */
55564+ break;
55565+ case TW_AEN_SYNC_TIME_WITH_HOST:
55566+ twl_aen_sync_time(tw_dev, request_id);
55567+ retval = 0;
55568+ goto out;
55569+ default:
55570+ twl_aen_queue_event(tw_dev, header);
55571+
55572+ /* If there are more aen's, keep reading the queue */
55573+ if (twl_aen_read_queue(tw_dev, request_id))
55574+ goto out2;
55575+ else {
55576+ retval = 0;
55577+ goto out;
55578+ }
55579+ }
55580+ retval = 0;
55581+out2:
55582+ tw_dev->state[request_id] = TW_S_COMPLETED;
55583+ twl_free_request_id(tw_dev, request_id);
55584+ clear_bit(TW_IN_ATTENTION_LOOP, &tw_dev->flags);
55585+out:
55586+ return retval;
55587+} /* End twl_aen_complete() */
55588+
55589+/* This function will poll for a response */
55590+static int twl_poll_response(TW_Device_Extension *tw_dev, int request_id, int seconds)
55591+{
55592+ unsigned long before;
55593+ dma_addr_t mfa;
55594+ u32 regh, regl;
55595+ u32 response;
55596+ int retval = 1;
55597+ int found = 0;
55598+
55599+ before = jiffies;
55600+
55601+ while (!found) {
55602+ if (sizeof(dma_addr_t) > 4) {
55603+ regh = readl(TWL_HOBQPH_REG_ADDR(tw_dev));
55604+ regl = readl(TWL_HOBQPL_REG_ADDR(tw_dev));
55605+ mfa = ((u64)regh << 32) | regl;
55606+ } else
55607+ mfa = readl(TWL_HOBQPL_REG_ADDR(tw_dev));
55608+
55609+ response = (u32)mfa;
55610+
55611+ if (TW_RESID_OUT(response) == request_id)
55612+ found = 1;
55613+
55614+ if (time_after(jiffies, before + HZ * seconds))
55615+ goto out;
55616+
55617+ msleep(50);
55618+ }
55619+ retval = 0;
55620+out:
55621+ return retval;
55622+} /* End twl_poll_response() */
55623+
55624+/* This function will drain the aen queue */
55625+static int twl_aen_drain_queue(TW_Device_Extension *tw_dev, int no_check_reset)
55626+{
55627+ int request_id = 0;
55628+ char cdb[TW_MAX_CDB_LEN];
55629+ TW_SG_Entry_ISO sglist[1];
55630+ int finished = 0, count = 0;
55631+ TW_Command_Full *full_command_packet;
55632+ TW_Command_Apache_Header *header;
55633+ unsigned short aen;
55634+ int first_reset = 0, queue = 0, retval = 1;
55635+
55636+ if (no_check_reset)
55637+ first_reset = 0;
55638+ else
55639+ first_reset = 1;
55640+
55641+ full_command_packet = tw_dev->command_packet_virt[request_id];
55642+ memset(full_command_packet, 0, sizeof(TW_Command_Full));
55643+
55644+ /* Initialize cdb */
55645+ memset(&cdb, 0, TW_MAX_CDB_LEN);
55646+ cdb[0] = REQUEST_SENSE; /* opcode */
55647+ cdb[4] = TW_ALLOCATION_LENGTH; /* allocation length */
55648+
55649+ /* Initialize sglist */
55650+ memset(&sglist, 0, sizeof(TW_SG_Entry_ISO));
55651+ sglist[0].length = TW_SECTOR_SIZE;
55652+ sglist[0].address = tw_dev->generic_buffer_phys[request_id];
55653+
55654+ /* Mark internal command */
55655+ tw_dev->srb[request_id] = NULL;
55656+
55657+ do {
55658+ /* Send command to the board */
55659+ if (twl_scsiop_execute_scsi(tw_dev, request_id, cdb, 1, sglist)) {
55660+ TW_PRINTK(tw_dev->host, TW_DRIVER, 0x3, "Error posting request sense");
55661+ goto out;
55662+ }
55663+
55664+ /* Now poll for completion */
55665+ if (twl_poll_response(tw_dev, request_id, 30)) {
55666+ TW_PRINTK(tw_dev->host, TW_DRIVER, 0x4, "No valid response while draining AEN queue");
55667+ tw_dev->posted_request_count--;
55668+ goto out;
55669+ }
55670+
55671+ tw_dev->posted_request_count--;
55672+ header = (TW_Command_Apache_Header *)tw_dev->generic_buffer_virt[request_id];
55673+ aen = le16_to_cpu(header->status_block.error);
55674+ queue = 0;
55675+ count++;
55676+
55677+ switch (aen) {
55678+ case TW_AEN_QUEUE_EMPTY:
55679+ if (first_reset != 1)
55680+ goto out;
55681+ else
55682+ finished = 1;
55683+ break;
55684+ case TW_AEN_SOFT_RESET:
55685+ if (first_reset == 0)
55686+ first_reset = 1;
55687+ else
55688+ queue = 1;
55689+ break;
55690+ case TW_AEN_SYNC_TIME_WITH_HOST:
55691+ break;
55692+ default:
55693+ queue = 1;
55694+ }
55695+
55696+ /* Now queue an event info */
55697+ if (queue)
55698+ twl_aen_queue_event(tw_dev, header);
55699+ } while ((finished == 0) && (count < TW_MAX_AEN_DRAIN));
55700+
55701+ if (count == TW_MAX_AEN_DRAIN)
55702+ goto out;
55703+
55704+ retval = 0;
55705+out:
55706+ tw_dev->state[request_id] = TW_S_INITIAL;
55707+ return retval;
55708+} /* End twl_aen_drain_queue() */
55709+
55710+/* This function will allocate memory and check if it is correctly aligned */
55711+static int twl_allocate_memory(TW_Device_Extension *tw_dev, int size, int which)
55712+{
55713+ int i;
55714+ dma_addr_t dma_handle;
55715+ unsigned long *cpu_addr;
55716+ int retval = 1;
55717+
55718+ cpu_addr = pci_alloc_consistent(tw_dev->tw_pci_dev, size*TW_Q_LENGTH, &dma_handle);
55719+ if (!cpu_addr) {
55720+ TW_PRINTK(tw_dev->host, TW_DRIVER, 0x5, "Memory allocation failed");
55721+ goto out;
55722+ }
55723+
55724+ memset(cpu_addr, 0, size*TW_Q_LENGTH);
55725+
55726+ for (i = 0; i < TW_Q_LENGTH; i++) {
55727+ switch(which) {
55728+ case 0:
55729+ tw_dev->command_packet_phys[i] = dma_handle+(i*size);
55730+ tw_dev->command_packet_virt[i] = (TW_Command_Full *)((unsigned char *)cpu_addr + (i*size));
55731+ break;
55732+ case 1:
55733+ tw_dev->generic_buffer_phys[i] = dma_handle+(i*size);
55734+ tw_dev->generic_buffer_virt[i] = (unsigned long *)((unsigned char *)cpu_addr + (i*size));
55735+ break;
55736+ case 2:
55737+ tw_dev->sense_buffer_phys[i] = dma_handle+(i*size);
55738+ tw_dev->sense_buffer_virt[i] = (TW_Command_Apache_Header *)((unsigned char *)cpu_addr + (i*size));
55739+ break;
55740+ }
55741+ }
55742+ retval = 0;
55743+out:
55744+ return retval;
55745+} /* End twl_allocate_memory() */
55746+
55747+/* This function will load the request id and various sgls for ioctls */
55748+static void twl_load_sgl(TW_Device_Extension *tw_dev, TW_Command_Full *full_command_packet, int request_id, dma_addr_t dma_handle, int length)
55749+{
55750+ TW_Command *oldcommand;
55751+ TW_Command_Apache *newcommand;
55752+ TW_SG_Entry_ISO *sgl;
55753+ unsigned int pae = 0;
55754+
55755+ if ((sizeof(long) < 8) && (sizeof(dma_addr_t) > 4))
55756+ pae = 1;
55757+
55758+ if (TW_OP_OUT(full_command_packet->command.newcommand.opcode__reserved) == TW_OP_EXECUTE_SCSI) {
55759+ newcommand = &full_command_packet->command.newcommand;
55760+ newcommand->request_id__lunl =
55761+ cpu_to_le16(TW_REQ_LUN_IN(TW_LUN_OUT(newcommand->request_id__lunl), request_id));
55762+ if (length) {
55763+ newcommand->sg_list[0].address = TW_CPU_TO_SGL(dma_handle + sizeof(TW_Ioctl_Buf_Apache) - 1);
55764+ newcommand->sg_list[0].length = TW_CPU_TO_SGL(length);
55765+ }
55766+ newcommand->sgl_entries__lunh =
55767+ cpu_to_le16(TW_REQ_LUN_IN(TW_LUN_OUT(newcommand->sgl_entries__lunh), length ? 1 : 0));
55768+ } else {
55769+ oldcommand = &full_command_packet->command.oldcommand;
55770+ oldcommand->request_id = request_id;
55771+
55772+ if (TW_SGL_OUT(oldcommand->opcode__sgloffset)) {
55773+ /* Load the sg list */
55774+ sgl = (TW_SG_Entry_ISO *)((u32 *)oldcommand+oldcommand->size - (sizeof(TW_SG_Entry_ISO)/4) + pae + (sizeof(dma_addr_t) > 4 ? 1 : 0));
55775+ sgl->address = TW_CPU_TO_SGL(dma_handle + sizeof(TW_Ioctl_Buf_Apache) - 1);
55776+ sgl->length = TW_CPU_TO_SGL(length);
55777+ oldcommand->size += pae;
55778+ oldcommand->size += sizeof(dma_addr_t) > 4 ? 1 : 0;
55779+ }
55780+ }
55781+} /* End twl_load_sgl() */
55782+
55783+/* This function handles ioctl for the character device
55784+ This interface is used by smartmontools open source software */
55785+static int twl_chrdev_ioctl(struct inode *inode, struct file *file, unsigned int cmd, unsigned long arg)
55786+{
55787+ long timeout;
55788+ unsigned long *cpu_addr, data_buffer_length_adjusted = 0, flags = 0;
55789+ dma_addr_t dma_handle;
55790+ int request_id = 0;
55791+ TW_Ioctl_Driver_Command driver_command;
55792+ TW_Ioctl_Buf_Apache *tw_ioctl;
55793+ TW_Command_Full *full_command_packet;
55794+ TW_Device_Extension *tw_dev = twl_device_extension_list[iminor(inode)];
55795+ int retval = -EFAULT;
55796+ void __user *argp = (void __user *)arg;
55797+
55798+ /* Only let one of these through at a time */
55799+ if (mutex_lock_interruptible(&tw_dev->ioctl_lock)) {
55800+ retval = -EINTR;
55801+ goto out;
55802+ }
55803+
55804+ /* First copy down the driver command */
55805+ if (copy_from_user(&driver_command, argp, sizeof(TW_Ioctl_Driver_Command)))
55806+ goto out2;
55807+
55808+ /* Check data buffer size */
55809+ if (driver_command.buffer_length > TW_MAX_SECTORS * 2048) {
55810+ retval = -EINVAL;
55811+ goto out2;
55812+ }
55813+
55814+ /* Hardware can only do multiple of 512 byte transfers */
55815+ data_buffer_length_adjusted = (driver_command.buffer_length + 511) & ~511;
55816+
55817+ /* Now allocate ioctl buf memory */
55818+ cpu_addr = dma_alloc_coherent(&tw_dev->tw_pci_dev->dev, data_buffer_length_adjusted+sizeof(TW_Ioctl_Buf_Apache) - 1, &dma_handle, GFP_KERNEL);
55819+ if (!cpu_addr) {
55820+ retval = -ENOMEM;
55821+ goto out2;
55822+ }
55823+
55824+ tw_ioctl = (TW_Ioctl_Buf_Apache *)cpu_addr;
55825+
55826+ /* Now copy down the entire ioctl */
55827+ if (copy_from_user(tw_ioctl, argp, driver_command.buffer_length + sizeof(TW_Ioctl_Buf_Apache) - 1))
55828+ goto out3;
55829+
55830+ /* See which ioctl we are doing */
55831+ switch (cmd) {
55832+ case TW_IOCTL_FIRMWARE_PASS_THROUGH:
55833+ spin_lock_irqsave(tw_dev->host->host_lock, flags);
55834+ twl_get_request_id(tw_dev, &request_id);
55835+
55836+ /* Flag internal command */
55837+ tw_dev->srb[request_id] = NULL;
55838+
55839+ /* Flag chrdev ioctl */
55840+ tw_dev->chrdev_request_id = request_id;
55841+
55842+ full_command_packet = (TW_Command_Full *)&tw_ioctl->firmware_command;
55843+
55844+ /* Load request id and sglist for both command types */
55845+ twl_load_sgl(tw_dev, full_command_packet, request_id, dma_handle, data_buffer_length_adjusted);
55846+
55847+ memcpy(tw_dev->command_packet_virt[request_id], &(tw_ioctl->firmware_command), sizeof(TW_Command_Full));
55848+
55849+ /* Now post the command packet to the controller */
55850+ twl_post_command_packet(tw_dev, request_id);
55851+ spin_unlock_irqrestore(tw_dev->host->host_lock, flags);
55852+
55853+ timeout = TW_IOCTL_CHRDEV_TIMEOUT*HZ;
55854+
55855+ /* Now wait for command to complete */
55856+ timeout = wait_event_timeout(tw_dev->ioctl_wqueue, tw_dev->chrdev_request_id == TW_IOCTL_CHRDEV_FREE, timeout);
55857+
55858+ /* We timed out, and didn't get an interrupt */
55859+ if (tw_dev->chrdev_request_id != TW_IOCTL_CHRDEV_FREE) {
55860+ /* Now we need to reset the board */
55861+ printk(KERN_WARNING "3w-sas: scsi%d: WARNING: (0x%02X:0x%04X): Character ioctl (0x%x) timed out, resetting card.\n",
55862+ tw_dev->host->host_no, TW_DRIVER, 0x6,
55863+ cmd);
55864+ retval = -EIO;
55865+ twl_reset_device_extension(tw_dev, 1);
55866+ goto out3;
55867+ }
55868+
55869+ /* Now copy in the command packet response */
55870+ memcpy(&(tw_ioctl->firmware_command), tw_dev->command_packet_virt[request_id], sizeof(TW_Command_Full));
55871+
55872+ /* Now complete the io */
55873+ spin_lock_irqsave(tw_dev->host->host_lock, flags);
55874+ tw_dev->posted_request_count--;
55875+ tw_dev->state[request_id] = TW_S_COMPLETED;
55876+ twl_free_request_id(tw_dev, request_id);
55877+ spin_unlock_irqrestore(tw_dev->host->host_lock, flags);
55878+ break;
55879+ default:
55880+ retval = -ENOTTY;
55881+ goto out3;
55882+ }
55883+
55884+ /* Now copy the entire response to userspace */
55885+ if (copy_to_user(argp, tw_ioctl, sizeof(TW_Ioctl_Buf_Apache) + driver_command.buffer_length - 1) == 0)
55886+ retval = 0;
55887+out3:
55888+ /* Now free ioctl buf memory */
55889+ dma_free_coherent(&tw_dev->tw_pci_dev->dev, data_buffer_length_adjusted+sizeof(TW_Ioctl_Buf_Apache) - 1, cpu_addr, dma_handle);
55890+out2:
55891+ mutex_unlock(&tw_dev->ioctl_lock);
55892+out:
55893+ return retval;
55894+} /* End twl_chrdev_ioctl() */
55895+
55896+/* This function handles open for the character device */
55897+static int twl_chrdev_open(struct inode *inode, struct file *file)
55898+{
55899+ unsigned int minor_number;
55900+ int retval = -ENODEV;
55901+
55902+ if (!capable(CAP_SYS_ADMIN)) {
55903+ retval = -EACCES;
55904+ goto out;
55905+ }
55906+
55907+ cycle_kernel_lock();
55908+ minor_number = iminor(inode);
55909+ if (minor_number >= twl_device_extension_count)
55910+ goto out;
55911+ retval = 0;
55912+out:
55913+ return retval;
55914+} /* End twl_chrdev_open() */
55915+
55916+/* File operations struct for character device */
55917+static const struct file_operations twl_fops = {
55918+ .owner = THIS_MODULE,
55919+ .ioctl = twl_chrdev_ioctl,
55920+ .open = twl_chrdev_open,
55921+ .release = NULL
55922+};
55923+
55924+/* This function passes sense data from firmware to scsi layer */
55925+static int twl_fill_sense(TW_Device_Extension *tw_dev, int i, int request_id, int copy_sense, int print_host)
55926+{
55927+ TW_Command_Apache_Header *header;
55928+ TW_Command_Full *full_command_packet;
55929+ unsigned short error;
55930+ char *error_str;
55931+ int retval = 1;
55932+
55933+ header = tw_dev->sense_buffer_virt[i];
55934+ full_command_packet = tw_dev->command_packet_virt[request_id];
55935+
55936+ /* Get embedded firmware error string */
55937+ error_str = &(header->err_specific_desc[strlen(header->err_specific_desc) + 1]);
55938+
55939+ /* Don't print error for Logical unit not supported during rollcall */
55940+ error = le16_to_cpu(header->status_block.error);
55941+ if ((error != TW_ERROR_LOGICAL_UNIT_NOT_SUPPORTED) && (error != TW_ERROR_UNIT_OFFLINE) && (error != TW_ERROR_INVALID_FIELD_IN_CDB)) {
55942+ if (print_host)
55943+ printk(KERN_WARNING "3w-sas: scsi%d: ERROR: (0x%02X:0x%04X): %s:%s.\n",
55944+ tw_dev->host->host_no,
55945+ TW_MESSAGE_SOURCE_CONTROLLER_ERROR,
55946+ header->status_block.error,
55947+ error_str,
55948+ header->err_specific_desc);
55949+ else
55950+ printk(KERN_WARNING "3w-sas: ERROR: (0x%02X:0x%04X): %s:%s.\n",
55951+ TW_MESSAGE_SOURCE_CONTROLLER_ERROR,
55952+ header->status_block.error,
55953+ error_str,
55954+ header->err_specific_desc);
55955+ }
55956+
55957+ if (copy_sense) {
55958+ memcpy(tw_dev->srb[request_id]->sense_buffer, header->sense_data, TW_SENSE_DATA_LENGTH);
55959+ tw_dev->srb[request_id]->result = (full_command_packet->command.newcommand.status << 1);
55960+ goto out;
55961+ }
55962+out:
55963+ return retval;
55964+} /* End twl_fill_sense() */
55965+
55966+/* This function will free up device extension resources */
55967+static void twl_free_device_extension(TW_Device_Extension *tw_dev)
55968+{
55969+ if (tw_dev->command_packet_virt[0])
55970+ pci_free_consistent(tw_dev->tw_pci_dev,
55971+ sizeof(TW_Command_Full)*TW_Q_LENGTH,
55972+ tw_dev->command_packet_virt[0],
55973+ tw_dev->command_packet_phys[0]);
55974+
55975+ if (tw_dev->generic_buffer_virt[0])
55976+ pci_free_consistent(tw_dev->tw_pci_dev,
55977+ TW_SECTOR_SIZE*TW_Q_LENGTH,
55978+ tw_dev->generic_buffer_virt[0],
55979+ tw_dev->generic_buffer_phys[0]);
55980+
55981+ if (tw_dev->sense_buffer_virt[0])
55982+ pci_free_consistent(tw_dev->tw_pci_dev,
55983+ sizeof(TW_Command_Apache_Header)*
55984+ TW_Q_LENGTH,
55985+ tw_dev->sense_buffer_virt[0],
55986+ tw_dev->sense_buffer_phys[0]);
55987+
55988+ kfree(tw_dev->event_queue[0]);
55989+} /* End twl_free_device_extension() */
55990+
55991+/* This function will get parameter table entries from the firmware */
55992+static void *twl_get_param(TW_Device_Extension *tw_dev, int request_id, int table_id, int parameter_id, int parameter_size_bytes)
55993+{
55994+ TW_Command_Full *full_command_packet;
55995+ TW_Command *command_packet;
55996+ TW_Param_Apache *param;
55997+ void *retval = NULL;
55998+
55999+ /* Setup the command packet */
56000+ full_command_packet = tw_dev->command_packet_virt[request_id];
56001+ memset(full_command_packet, 0, sizeof(TW_Command_Full));
56002+ command_packet = &full_command_packet->command.oldcommand;
56003+
56004+ command_packet->opcode__sgloffset = TW_OPSGL_IN(2, TW_OP_GET_PARAM);
56005+ command_packet->size = TW_COMMAND_SIZE;
56006+ command_packet->request_id = request_id;
56007+ command_packet->byte6_offset.block_count = cpu_to_le16(1);
56008+
56009+ /* Now setup the param */
56010+ param = (TW_Param_Apache *)tw_dev->generic_buffer_virt[request_id];
56011+ memset(param, 0, TW_SECTOR_SIZE);
56012+ param->table_id = cpu_to_le16(table_id | 0x8000);
56013+ param->parameter_id = cpu_to_le16(parameter_id);
56014+ param->parameter_size_bytes = cpu_to_le16(parameter_size_bytes);
56015+
56016+ command_packet->byte8_offset.param.sgl[0].address = TW_CPU_TO_SGL(tw_dev->generic_buffer_phys[request_id]);
56017+ command_packet->byte8_offset.param.sgl[0].length = TW_CPU_TO_SGL(TW_SECTOR_SIZE);
56018+
56019+ /* Post the command packet to the board */
56020+ twl_post_command_packet(tw_dev, request_id);
56021+
56022+ /* Poll for completion */
56023+ if (twl_poll_response(tw_dev, request_id, 30))
56024+ TW_PRINTK(tw_dev->host, TW_DRIVER, 0x7, "No valid response during get param")
56025+ else
56026+ retval = (void *)&(param->data[0]);
56027+
56028+ tw_dev->posted_request_count--;
56029+ tw_dev->state[request_id] = TW_S_INITIAL;
56030+
56031+ return retval;
56032+} /* End twl_get_param() */
56033+
56034+/* This function will send an initconnection command to controller */
56035+static int twl_initconnection(TW_Device_Extension *tw_dev, int message_credits,
56036+ u32 set_features, unsigned short current_fw_srl,
56037+ unsigned short current_fw_arch_id,
56038+ unsigned short current_fw_branch,
56039+ unsigned short current_fw_build,
56040+ unsigned short *fw_on_ctlr_srl,
56041+ unsigned short *fw_on_ctlr_arch_id,
56042+ unsigned short *fw_on_ctlr_branch,
56043+ unsigned short *fw_on_ctlr_build,
56044+ u32 *init_connect_result)
56045+{
56046+ TW_Command_Full *full_command_packet;
56047+ TW_Initconnect *tw_initconnect;
56048+ int request_id = 0, retval = 1;
56049+
56050+ /* Initialize InitConnection command packet */
56051+ full_command_packet = tw_dev->command_packet_virt[request_id];
56052+ memset(full_command_packet, 0, sizeof(TW_Command_Full));
56053+ full_command_packet->header.header_desc.size_header = 128;
56054+
56055+ tw_initconnect = (TW_Initconnect *)&full_command_packet->command.oldcommand;
56056+ tw_initconnect->opcode__reserved = TW_OPRES_IN(0, TW_OP_INIT_CONNECTION);
56057+ tw_initconnect->request_id = request_id;
56058+ tw_initconnect->message_credits = cpu_to_le16(message_credits);
56059+ tw_initconnect->features = set_features;
56060+
56061+ /* Turn on 64-bit sgl support if we need to */
56062+ tw_initconnect->features |= sizeof(dma_addr_t) > 4 ? 1 : 0;
56063+
56064+ tw_initconnect->features = cpu_to_le32(tw_initconnect->features);
56065+
56066+ if (set_features & TW_EXTENDED_INIT_CONNECT) {
56067+ tw_initconnect->size = TW_INIT_COMMAND_PACKET_SIZE_EXTENDED;
56068+ tw_initconnect->fw_srl = cpu_to_le16(current_fw_srl);
56069+ tw_initconnect->fw_arch_id = cpu_to_le16(current_fw_arch_id);
56070+ tw_initconnect->fw_branch = cpu_to_le16(current_fw_branch);
56071+ tw_initconnect->fw_build = cpu_to_le16(current_fw_build);
56072+ } else
56073+ tw_initconnect->size = TW_INIT_COMMAND_PACKET_SIZE;
56074+
56075+ /* Send command packet to the board */
56076+ twl_post_command_packet(tw_dev, request_id);
56077+
56078+ /* Poll for completion */
56079+ if (twl_poll_response(tw_dev, request_id, 30)) {
56080+ TW_PRINTK(tw_dev->host, TW_DRIVER, 0x8, "No valid response during init connection");
56081+ } else {
56082+ if (set_features & TW_EXTENDED_INIT_CONNECT) {
56083+ *fw_on_ctlr_srl = le16_to_cpu(tw_initconnect->fw_srl);
56084+ *fw_on_ctlr_arch_id = le16_to_cpu(tw_initconnect->fw_arch_id);
56085+ *fw_on_ctlr_branch = le16_to_cpu(tw_initconnect->fw_branch);
56086+ *fw_on_ctlr_build = le16_to_cpu(tw_initconnect->fw_build);
56087+ *init_connect_result = le32_to_cpu(tw_initconnect->result);
56088+ }
56089+ retval = 0;
56090+ }
56091+
56092+ tw_dev->posted_request_count--;
56093+ tw_dev->state[request_id] = TW_S_INITIAL;
56094+
56095+ return retval;
56096+} /* End twl_initconnection() */
56097+
56098+/* This function will initialize the fields of a device extension */
56099+static int twl_initialize_device_extension(TW_Device_Extension *tw_dev)
56100+{
56101+ int i, retval = 1;
56102+
56103+ /* Initialize command packet buffers */
56104+ if (twl_allocate_memory(tw_dev, sizeof(TW_Command_Full), 0)) {
56105+ TW_PRINTK(tw_dev->host, TW_DRIVER, 0x9, "Command packet memory allocation failed");
56106+ goto out;
56107+ }
56108+
56109+ /* Initialize generic buffer */
56110+ if (twl_allocate_memory(tw_dev, TW_SECTOR_SIZE, 1)) {
56111+ TW_PRINTK(tw_dev->host, TW_DRIVER, 0xa, "Generic memory allocation failed");
56112+ goto out;
56113+ }
56114+
56115+ /* Allocate sense buffers */
56116+ if (twl_allocate_memory(tw_dev, sizeof(TW_Command_Apache_Header), 2)) {
56117+ TW_PRINTK(tw_dev->host, TW_DRIVER, 0xb, "Sense buffer allocation failed");
56118+ goto out;
56119+ }
56120+
56121+ /* Allocate event info space */
56122+ tw_dev->event_queue[0] = kcalloc(TW_Q_LENGTH, sizeof(TW_Event), GFP_KERNEL);
56123+ if (!tw_dev->event_queue[0]) {
56124+ TW_PRINTK(tw_dev->host, TW_DRIVER, 0xc, "Event info memory allocation failed");
56125+ goto out;
56126+ }
56127+
56128+ for (i = 0; i < TW_Q_LENGTH; i++) {
56129+ tw_dev->event_queue[i] = (TW_Event *)((unsigned char *)tw_dev->event_queue[0] + (i * sizeof(TW_Event)));
56130+ tw_dev->free_queue[i] = i;
56131+ tw_dev->state[i] = TW_S_INITIAL;
56132+ }
56133+
56134+ tw_dev->free_head = TW_Q_START;
56135+ tw_dev->free_tail = TW_Q_START;
56136+ tw_dev->error_sequence_id = 1;
56137+ tw_dev->chrdev_request_id = TW_IOCTL_CHRDEV_FREE;
56138+
56139+ mutex_init(&tw_dev->ioctl_lock);
56140+ init_waitqueue_head(&tw_dev->ioctl_wqueue);
56141+
56142+ retval = 0;
56143+out:
56144+ return retval;
56145+} /* End twl_initialize_device_extension() */
56146+
56147+/* This function will perform a pci-dma unmap */
56148+static void twl_unmap_scsi_data(TW_Device_Extension *tw_dev, int request_id)
56149+{
56150+ struct scsi_cmnd *cmd = tw_dev->srb[request_id];
56151+
56152+ if (cmd->SCp.phase == TW_PHASE_SGLIST)
56153+ scsi_dma_unmap(cmd);
56154+} /* End twl_unmap_scsi_data() */
56155+
56156+/* This function will handle attention interrupts */
56157+static int twl_handle_attention_interrupt(TW_Device_Extension *tw_dev)
56158+{
56159+ int retval = 1;
56160+ u32 request_id, doorbell;
56161+
56162+ /* Read doorbell status */
56163+ doorbell = readl(TWL_HOBDB_REG_ADDR(tw_dev));
56164+
56165+ /* Check for controller errors */
56166+ if (doorbell & TWL_DOORBELL_CONTROLLER_ERROR) {
56167+ TW_PRINTK(tw_dev->host, TW_DRIVER, 0xd, "Microcontroller Error: clearing");
56168+ goto out;
56169+ }
56170+
56171+ /* Check if we need to perform an AEN drain */
56172+ if (doorbell & TWL_DOORBELL_ATTENTION_INTERRUPT) {
56173+ if (!(test_and_set_bit(TW_IN_ATTENTION_LOOP, &tw_dev->flags))) {
56174+ twl_get_request_id(tw_dev, &request_id);
56175+ if (twl_aen_read_queue(tw_dev, request_id)) {
56176+ tw_dev->state[request_id] = TW_S_COMPLETED;
56177+ twl_free_request_id(tw_dev, request_id);
56178+ clear_bit(TW_IN_ATTENTION_LOOP, &tw_dev->flags);
56179+ }
56180+ }
56181+ }
56182+
56183+ retval = 0;
56184+out:
56185+ /* Clear doorbell interrupt */
56186+ TWL_CLEAR_DB_INTERRUPT(tw_dev);
56187+
56188+ /* Make sure the clear was flushed by reading it back */
56189+ readl(TWL_HOBDBC_REG_ADDR(tw_dev));
56190+
56191+ return retval;
56192+} /* End twl_handle_attention_interrupt() */
56193+
56194+/* Interrupt service routine */
56195+static irqreturn_t twl_interrupt(int irq, void *dev_instance)
56196+{
56197+ TW_Device_Extension *tw_dev = (TW_Device_Extension *)dev_instance;
56198+ int i, handled = 0, error = 0;
56199+ dma_addr_t mfa = 0;
56200+ u32 reg, regl, regh, response, request_id = 0;
56201+ struct scsi_cmnd *cmd;
56202+ TW_Command_Full *full_command_packet;
56203+
56204+ spin_lock(tw_dev->host->host_lock);
56205+
56206+ /* Read host interrupt status */
56207+ reg = readl(TWL_HISTAT_REG_ADDR(tw_dev));
56208+
56209+ /* Check if this is our interrupt, otherwise bail */
56210+ if (!(reg & TWL_HISTATUS_VALID_INTERRUPT))
56211+ goto twl_interrupt_bail;
56212+
56213+ handled = 1;
56214+
56215+ /* If we are resetting, bail */
56216+ if (test_bit(TW_IN_RESET, &tw_dev->flags))
56217+ goto twl_interrupt_bail;
56218+
56219+ /* Attention interrupt */
56220+ if (reg & TWL_HISTATUS_ATTENTION_INTERRUPT) {
56221+ if (twl_handle_attention_interrupt(tw_dev)) {
56222+ TWL_MASK_INTERRUPTS(tw_dev);
56223+ goto twl_interrupt_bail;
56224+ }
56225+ }
56226+
56227+ /* Response interrupt */
56228+ while (reg & TWL_HISTATUS_RESPONSE_INTERRUPT) {
56229+ if (sizeof(dma_addr_t) > 4) {
56230+ regh = readl(TWL_HOBQPH_REG_ADDR(tw_dev));
56231+ regl = readl(TWL_HOBQPL_REG_ADDR(tw_dev));
56232+ mfa = ((u64)regh << 32) | regl;
56233+ } else
56234+ mfa = readl(TWL_HOBQPL_REG_ADDR(tw_dev));
56235+
56236+ error = 0;
56237+ response = (u32)mfa;
56238+
56239+ /* Check for command packet error */
56240+ if (!TW_NOTMFA_OUT(response)) {
56241+ for (i=0;i<TW_Q_LENGTH;i++) {
56242+ if (tw_dev->sense_buffer_phys[i] == mfa) {
56243+ request_id = le16_to_cpu(tw_dev->sense_buffer_virt[i]->header_desc.request_id);
56244+ if (tw_dev->srb[request_id] != NULL)
56245+ error = twl_fill_sense(tw_dev, i, request_id, 1, 1);
56246+ else {
56247+ /* Skip ioctl error prints */
56248+ if (request_id != tw_dev->chrdev_request_id)
56249+ error = twl_fill_sense(tw_dev, i, request_id, 0, 1);
56250+ else
56251+ memcpy(tw_dev->command_packet_virt[request_id], tw_dev->sense_buffer_virt[i], sizeof(TW_Command_Apache_Header));
56252+ }
56253+
56254+ /* Now re-post the sense buffer */
56255+ writel((u32)((u64)tw_dev->sense_buffer_phys[i] >> 32), TWL_HOBQPH_REG_ADDR(tw_dev));
56256+ writel((u32)tw_dev->sense_buffer_phys[i], TWL_HOBQPL_REG_ADDR(tw_dev));
56257+ break;
56258+ }
56259+ }
56260+ } else
56261+ request_id = TW_RESID_OUT(response);
56262+
56263+ full_command_packet = tw_dev->command_packet_virt[request_id];
56264+
56265+ /* Check for correct state */
56266+ if (tw_dev->state[request_id] != TW_S_POSTED) {
56267+ if (tw_dev->srb[request_id] != NULL) {
56268+ TW_PRINTK(tw_dev->host, TW_DRIVER, 0xe, "Received a request id that wasn't posted");
56269+ TWL_MASK_INTERRUPTS(tw_dev);
56270+ goto twl_interrupt_bail;
56271+ }
56272+ }
56273+
56274+ /* Check for internal command completion */
56275+ if (tw_dev->srb[request_id] == NULL) {
56276+ if (request_id != tw_dev->chrdev_request_id) {
56277+ if (twl_aen_complete(tw_dev, request_id))
56278+ TW_PRINTK(tw_dev->host, TW_DRIVER, 0xf, "Error completing AEN during attention interrupt");
56279+ } else {
56280+ tw_dev->chrdev_request_id = TW_IOCTL_CHRDEV_FREE;
56281+ wake_up(&tw_dev->ioctl_wqueue);
56282+ }
56283+ } else {
56284+ cmd = tw_dev->srb[request_id];
56285+
56286+ if (!error)
56287+ cmd->result = (DID_OK << 16);
56288+
56289+ /* Report residual bytes for single sgl */
56290+ if ((scsi_sg_count(cmd) <= 1) && (full_command_packet->command.newcommand.status == 0)) {
56291+ if (full_command_packet->command.newcommand.sg_list[0].length < scsi_bufflen(tw_dev->srb[request_id]))
56292+ scsi_set_resid(cmd, scsi_bufflen(cmd) - full_command_packet->command.newcommand.sg_list[0].length);
56293+ }
56294+
56295+ /* Now complete the io */
56296+ tw_dev->state[request_id] = TW_S_COMPLETED;
56297+ twl_free_request_id(tw_dev, request_id);
56298+ tw_dev->posted_request_count--;
56299+ tw_dev->srb[request_id]->scsi_done(tw_dev->srb[request_id]);
56300+ twl_unmap_scsi_data(tw_dev, request_id);
56301+ }
56302+
56303+ /* Check for another response interrupt */
56304+ reg = readl(TWL_HISTAT_REG_ADDR(tw_dev));
56305+ }
56306+
56307+twl_interrupt_bail:
56308+ spin_unlock(tw_dev->host->host_lock);
56309+ return IRQ_RETVAL(handled);
56310+} /* End twl_interrupt() */
56311+
56312+/* This function will poll for a register change */
56313+static int twl_poll_register(TW_Device_Extension *tw_dev, void *reg, u32 value, u32 result, int seconds)
56314+{
56315+ unsigned long before;
56316+ int retval = 1;
56317+ u32 reg_value;
56318+
56319+ reg_value = readl(reg);
56320+ before = jiffies;
56321+
56322+ while ((reg_value & value) != result) {
56323+ reg_value = readl(reg);
56324+ if (time_after(jiffies, before + HZ * seconds))
56325+ goto out;
56326+ msleep(50);
56327+ }
56328+ retval = 0;
56329+out:
56330+ return retval;
56331+} /* End twl_poll_register() */
56332+
56333+/* This function will reset a controller */
56334+static int twl_reset_sequence(TW_Device_Extension *tw_dev, int soft_reset)
56335+{
56336+ int retval = 1;
56337+ int i = 0;
56338+ u32 status = 0;
56339+ unsigned short fw_on_ctlr_srl = 0, fw_on_ctlr_arch_id = 0;
56340+ unsigned short fw_on_ctlr_branch = 0, fw_on_ctlr_build = 0;
56341+ u32 init_connect_result = 0;
56342+ int tries = 0;
56343+ int do_soft_reset = soft_reset;
56344+
56345+ while (tries < TW_MAX_RESET_TRIES) {
56346+ /* Do a soft reset if one is needed */
56347+ if (do_soft_reset) {
56348+ TWL_SOFT_RESET(tw_dev);
56349+
56350+ /* Make sure controller is in a good state */
56351+ if (twl_poll_register(tw_dev, TWL_SCRPD3_REG_ADDR(tw_dev), TWL_CONTROLLER_READY, 0x0, 30)) {
56352+ TW_PRINTK(tw_dev->host, TW_DRIVER, 0x10, "Controller never went non-ready during reset sequence");
56353+ tries++;
56354+ continue;
56355+ }
56356+ if (twl_poll_register(tw_dev, TWL_SCRPD3_REG_ADDR(tw_dev), TWL_CONTROLLER_READY, TWL_CONTROLLER_READY, 60)) {
56357+ TW_PRINTK(tw_dev->host, TW_DRIVER, 0x11, "Controller not ready during reset sequence");
56358+ tries++;
56359+ continue;
56360+ }
56361+ }
56362+
56363+ /* Initconnect */
56364+ if (twl_initconnection(tw_dev, TW_INIT_MESSAGE_CREDITS,
56365+ TW_EXTENDED_INIT_CONNECT, TW_CURRENT_DRIVER_SRL,
56366+ TW_9750_ARCH_ID, TW_CURRENT_DRIVER_BRANCH,
56367+ TW_CURRENT_DRIVER_BUILD, &fw_on_ctlr_srl,
56368+ &fw_on_ctlr_arch_id, &fw_on_ctlr_branch,
56369+ &fw_on_ctlr_build, &init_connect_result)) {
56370+ TW_PRINTK(tw_dev->host, TW_DRIVER, 0x12, "Initconnection failed while checking SRL");
56371+ do_soft_reset = 1;
56372+ tries++;
56373+ continue;
56374+ }
56375+
56376+ /* Load sense buffers */
56377+ while (i < TW_Q_LENGTH) {
56378+ writel((u32)((u64)tw_dev->sense_buffer_phys[i] >> 32), TWL_HOBQPH_REG_ADDR(tw_dev));
56379+ writel((u32)tw_dev->sense_buffer_phys[i], TWL_HOBQPL_REG_ADDR(tw_dev));
56380+
56381+ /* Check status for over-run after each write */
56382+ status = readl(TWL_STATUS_REG_ADDR(tw_dev));
56383+ if (!(status & TWL_STATUS_OVERRUN_SUBMIT))
56384+ i++;
56385+ }
56386+
56387+ /* Now check status */
56388+ status = readl(TWL_STATUS_REG_ADDR(tw_dev));
56389+ if (status) {
56390+ TW_PRINTK(tw_dev->host, TW_DRIVER, 0x13, "Bad controller status after loading sense buffers");
56391+ do_soft_reset = 1;
56392+ tries++;
56393+ continue;
56394+ }
56395+
56396+ /* Drain the AEN queue */
56397+ if (twl_aen_drain_queue(tw_dev, soft_reset)) {
56398+ TW_PRINTK(tw_dev->host, TW_DRIVER, 0x14, "AEN drain failed during reset sequence");
56399+ do_soft_reset = 1;
56400+ tries++;
56401+ continue;
56402+ }
56403+
56404+ /* Load rest of compatibility struct */
56405+ strncpy(tw_dev->tw_compat_info.driver_version, TW_DRIVER_VERSION, strlen(TW_DRIVER_VERSION));
56406+ tw_dev->tw_compat_info.driver_srl_high = TW_CURRENT_DRIVER_SRL;
56407+ tw_dev->tw_compat_info.driver_branch_high = TW_CURRENT_DRIVER_BRANCH;
56408+ tw_dev->tw_compat_info.driver_build_high = TW_CURRENT_DRIVER_BUILD;
56409+ tw_dev->tw_compat_info.driver_srl_low = TW_BASE_FW_SRL;
56410+ tw_dev->tw_compat_info.driver_branch_low = TW_BASE_FW_BRANCH;
56411+ tw_dev->tw_compat_info.driver_build_low = TW_BASE_FW_BUILD;
56412+ tw_dev->tw_compat_info.fw_on_ctlr_srl = fw_on_ctlr_srl;
56413+ tw_dev->tw_compat_info.fw_on_ctlr_branch = fw_on_ctlr_branch;
56414+ tw_dev->tw_compat_info.fw_on_ctlr_build = fw_on_ctlr_build;
56415+
56416+ /* If we got here, controller is in a good state */
56417+ retval = 0;
56418+ goto out;
56419+ }
56420+out:
56421+ return retval;
56422+} /* End twl_reset_sequence() */
56423+
56424+/* This function will reset a device extension */
56425+static int twl_reset_device_extension(TW_Device_Extension *tw_dev, int ioctl_reset)
56426+{
56427+ int i = 0, retval = 1;
56428+ unsigned long flags = 0;
56429+
56430+ /* Block SCSI requests while we are resetting */
56431+ if (ioctl_reset)
56432+ scsi_block_requests(tw_dev->host);
56433+
56434+ set_bit(TW_IN_RESET, &tw_dev->flags);
56435+ TWL_MASK_INTERRUPTS(tw_dev);
56436+ TWL_CLEAR_DB_INTERRUPT(tw_dev);
56437+
56438+ spin_lock_irqsave(tw_dev->host->host_lock, flags);
56439+
56440+ /* Abort all requests that are in progress */
56441+ for (i = 0; i < TW_Q_LENGTH; i++) {
56442+ if ((tw_dev->state[i] != TW_S_FINISHED) &&
56443+ (tw_dev->state[i] != TW_S_INITIAL) &&
56444+ (tw_dev->state[i] != TW_S_COMPLETED)) {
56445+ if (tw_dev->srb[i]) {
56446+ tw_dev->srb[i]->result = (DID_RESET << 16);
56447+ tw_dev->srb[i]->scsi_done(tw_dev->srb[i]);
56448+ twl_unmap_scsi_data(tw_dev, i);
56449+ }
56450+ }
56451+ }
56452+
56453+ /* Reset queues and counts */
56454+ for (i = 0; i < TW_Q_LENGTH; i++) {
56455+ tw_dev->free_queue[i] = i;
56456+ tw_dev->state[i] = TW_S_INITIAL;
56457+ }
56458+ tw_dev->free_head = TW_Q_START;
56459+ tw_dev->free_tail = TW_Q_START;
56460+ tw_dev->posted_request_count = 0;
56461+
56462+ spin_unlock_irqrestore(tw_dev->host->host_lock, flags);
56463+
56464+ if (twl_reset_sequence(tw_dev, 1))
56465+ goto out;
56466+
56467+ TWL_UNMASK_INTERRUPTS(tw_dev);
56468+
56469+ clear_bit(TW_IN_RESET, &tw_dev->flags);
56470+ tw_dev->chrdev_request_id = TW_IOCTL_CHRDEV_FREE;
56471+
56472+ retval = 0;
56473+out:
56474+ if (ioctl_reset)
56475+ scsi_unblock_requests(tw_dev->host);
56476+ return retval;
56477+} /* End twl_reset_device_extension() */
56478+
56479+/* This funciton returns unit geometry in cylinders/heads/sectors */
56480+static int twl_scsi_biosparam(struct scsi_device *sdev, struct block_device *bdev, sector_t capacity, int geom[])
56481+{
56482+ int heads, sectors, cylinders;
56483+ TW_Device_Extension *tw_dev;
56484+
56485+ tw_dev = (TW_Device_Extension *)sdev->host->hostdata;
56486+
56487+ if (capacity >= 0x200000) {
56488+ heads = 255;
56489+ sectors = 63;
56490+ cylinders = sector_div(capacity, heads * sectors);
56491+ } else {
56492+ heads = 64;
56493+ sectors = 32;
56494+ cylinders = sector_div(capacity, heads * sectors);
56495+ }
56496+
56497+ geom[0] = heads;
56498+ geom[1] = sectors;
56499+ geom[2] = cylinders;
56500+
56501+ return 0;
56502+} /* End twl_scsi_biosparam() */
56503+
56504+/* This is the new scsi eh reset function */
56505+static int twl_scsi_eh_reset(struct scsi_cmnd *SCpnt)
56506+{
56507+ TW_Device_Extension *tw_dev = NULL;
56508+ int retval = FAILED;
56509+
56510+ tw_dev = (TW_Device_Extension *)SCpnt->device->host->hostdata;
56511+
56512+ tw_dev->num_resets++;
56513+
56514+ sdev_printk(KERN_WARNING, SCpnt->device,
56515+ "WARNING: (0x%02X:0x%04X): Command (0x%x) timed out, resetting card.\n",
56516+ TW_DRIVER, 0x2c, SCpnt->cmnd[0]);
56517+
56518+ /* Make sure we are not issuing an ioctl or resetting from ioctl */
56519+ mutex_lock(&tw_dev->ioctl_lock);
56520+
56521+ /* Now reset the card and some of the device extension data */
56522+ if (twl_reset_device_extension(tw_dev, 0)) {
56523+ TW_PRINTK(tw_dev->host, TW_DRIVER, 0x15, "Controller reset failed during scsi host reset");
56524+ goto out;
56525+ }
56526+
56527+ retval = SUCCESS;
56528+out:
56529+ mutex_unlock(&tw_dev->ioctl_lock);
56530+ return retval;
56531+} /* End twl_scsi_eh_reset() */
56532+
56533+/* This is the main scsi queue function to handle scsi opcodes */
56534+static int twl_scsi_queue(struct scsi_cmnd *SCpnt, void (*done)(struct scsi_cmnd *))
56535+{
56536+ int request_id, retval;
56537+ TW_Device_Extension *tw_dev = (TW_Device_Extension *)SCpnt->device->host->hostdata;
56538+
56539+ /* If we are resetting due to timed out ioctl, report as busy */
56540+ if (test_bit(TW_IN_RESET, &tw_dev->flags)) {
56541+ retval = SCSI_MLQUEUE_HOST_BUSY;
56542+ goto out;
56543+ }
56544+
56545+ /* Save done function into scsi_cmnd struct */
56546+ SCpnt->scsi_done = done;
56547+
56548+ /* Get a free request id */
56549+ twl_get_request_id(tw_dev, &request_id);
56550+
56551+ /* Save the scsi command for use by the ISR */
56552+ tw_dev->srb[request_id] = SCpnt;
56553+
56554+ /* Initialize phase to zero */
56555+ SCpnt->SCp.phase = TW_PHASE_INITIAL;
56556+
56557+ retval = twl_scsiop_execute_scsi(tw_dev, request_id, NULL, 0, NULL);
56558+ if (retval) {
56559+ tw_dev->state[request_id] = TW_S_COMPLETED;
56560+ twl_free_request_id(tw_dev, request_id);
56561+ SCpnt->result = (DID_ERROR << 16);
56562+ done(SCpnt);
56563+ retval = 0;
56564+ }
56565+out:
56566+ return retval;
56567+} /* End twl_scsi_queue() */
56568+
56569+/* This function tells the controller to shut down */
56570+static void __twl_shutdown(TW_Device_Extension *tw_dev)
56571+{
56572+ /* Disable interrupts */
56573+ TWL_MASK_INTERRUPTS(tw_dev);
56574+
56575+ /* Free up the IRQ */
56576+ free_irq(tw_dev->tw_pci_dev->irq, tw_dev);
56577+
56578+ printk(KERN_WARNING "3w-sas: Shutting down host %d.\n", tw_dev->host->host_no);
56579+
56580+ /* Tell the card we are shutting down */
56581+ if (twl_initconnection(tw_dev, 1, 0, 0, 0, 0, 0, NULL, NULL, NULL, NULL, NULL)) {
56582+ TW_PRINTK(tw_dev->host, TW_DRIVER, 0x16, "Connection shutdown failed");
56583+ } else {
56584+ printk(KERN_WARNING "3w-sas: Shutdown complete.\n");
56585+ }
56586+
56587+ /* Clear doorbell interrupt just before exit */
56588+ TWL_CLEAR_DB_INTERRUPT(tw_dev);
56589+} /* End __twl_shutdown() */
56590+
56591+/* Wrapper for __twl_shutdown */
56592+static void twl_shutdown(struct pci_dev *pdev)
56593+{
56594+ struct Scsi_Host *host = pci_get_drvdata(pdev);
56595+ TW_Device_Extension *tw_dev;
56596+
56597+ if (!host)
56598+ return;
56599+
56600+ tw_dev = (TW_Device_Extension *)host->hostdata;
56601+
56602+ if (tw_dev->online)
56603+ __twl_shutdown(tw_dev);
56604+} /* End twl_shutdown() */
56605+
56606+/* This function configures unit settings when a unit is coming on-line */
56607+static int twl_slave_configure(struct scsi_device *sdev)
56608+{
56609+ /* Force 60 second timeout */
56610+ blk_queue_rq_timeout(sdev->request_queue, 60 * HZ);
56611+
56612+ return 0;
56613+} /* End twl_slave_configure() */
56614+
56615+/* scsi_host_template initializer */
56616+static struct scsi_host_template driver_template = {
56617+ .module = THIS_MODULE,
56618+ .name = "3w-sas",
56619+ .queuecommand = twl_scsi_queue,
56620+ .eh_host_reset_handler = twl_scsi_eh_reset,
56621+ .bios_param = twl_scsi_biosparam,
56622+ .change_queue_depth = twl_change_queue_depth,
56623+ .can_queue = TW_Q_LENGTH-2,
56624+ .slave_configure = twl_slave_configure,
56625+ .this_id = -1,
56626+ .sg_tablesize = TW_LIBERATOR_MAX_SGL_LENGTH,
56627+ .max_sectors = TW_MAX_SECTORS,
56628+ .cmd_per_lun = TW_MAX_CMDS_PER_LUN,
56629+ .use_clustering = ENABLE_CLUSTERING,
56630+ .shost_attrs = twl_host_attrs,
56631+ .emulated = 1
56632+};
56633+
56634+/* This function will probe and initialize a card */
56635+static int __devinit twl_probe(struct pci_dev *pdev, const struct pci_device_id *dev_id)
56636+{
56637+ struct Scsi_Host *host = NULL;
56638+ TW_Device_Extension *tw_dev;
56639+ resource_size_t mem_addr, mem_len;
56640+ int retval = -ENODEV;
56641+ int *ptr_phycount, phycount=0;
56642+
56643+ retval = pci_enable_device(pdev);
56644+ if (retval) {
56645+ TW_PRINTK(host, TW_DRIVER, 0x17, "Failed to enable pci device");
56646+ goto out_disable_device;
56647+ }
56648+
56649+ pci_set_master(pdev);
56650+ pci_try_set_mwi(pdev);
56651+
56652+ if (pci_set_dma_mask(pdev, DMA_BIT_MASK(64))
56653+ || pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(64)))
56654+ if (pci_set_dma_mask(pdev, DMA_BIT_MASK(32))
56655+ || pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(32))) {
56656+ TW_PRINTK(host, TW_DRIVER, 0x18, "Failed to set dma mask");
56657+ retval = -ENODEV;
56658+ goto out_disable_device;
56659+ }
56660+
56661+ host = scsi_host_alloc(&driver_template, sizeof(TW_Device_Extension));
56662+ if (!host) {
56663+ TW_PRINTK(host, TW_DRIVER, 0x19, "Failed to allocate memory for device extension");
56664+ retval = -ENOMEM;
56665+ goto out_disable_device;
56666+ }
56667+ tw_dev = (TW_Device_Extension *)host->hostdata;
56668+
56669+ /* Save values to device extension */
56670+ tw_dev->host = host;
56671+ tw_dev->tw_pci_dev = pdev;
56672+
56673+ if (twl_initialize_device_extension(tw_dev)) {
56674+ TW_PRINTK(tw_dev->host, TW_DRIVER, 0x1a, "Failed to initialize device extension");
56675+ goto out_free_device_extension;
56676+ }
56677+
56678+ /* Request IO regions */
56679+ retval = pci_request_regions(pdev, "3w-sas");
56680+ if (retval) {
56681+ TW_PRINTK(tw_dev->host, TW_DRIVER, 0x1b, "Failed to get mem region");
56682+ goto out_free_device_extension;
56683+ }
56684+
56685+ /* Use region 1 */
56686+ mem_addr = pci_resource_start(pdev, 1);
56687+ mem_len = pci_resource_len(pdev, 1);
56688+
56689+ /* Save base address */
56690+ tw_dev->base_addr = ioremap(mem_addr, mem_len);
56691+
56692+ if (!tw_dev->base_addr) {
56693+ TW_PRINTK(tw_dev->host, TW_DRIVER, 0x1c, "Failed to ioremap");
56694+ goto out_release_mem_region;
56695+ }
56696+
56697+ /* Disable interrupts on the card */
56698+ TWL_MASK_INTERRUPTS(tw_dev);
56699+
56700+ /* Initialize the card */
56701+ if (twl_reset_sequence(tw_dev, 0)) {
56702+ TW_PRINTK(tw_dev->host, TW_DRIVER, 0x1d, "Controller reset failed during probe");
56703+ goto out_iounmap;
56704+ }
56705+
56706+ /* Set host specific parameters */
56707+ host->max_id = TW_MAX_UNITS;
56708+ host->max_cmd_len = TW_MAX_CDB_LEN;
56709+ host->max_lun = TW_MAX_LUNS;
56710+ host->max_channel = 0;
56711+
56712+ /* Register the card with the kernel SCSI layer */
56713+ retval = scsi_add_host(host, &pdev->dev);
56714+ if (retval) {
56715+ TW_PRINTK(tw_dev->host, TW_DRIVER, 0x1e, "scsi add host failed");
56716+ goto out_iounmap;
56717+ }
56718+
56719+ pci_set_drvdata(pdev, host);
56720+
56721+ printk(KERN_WARNING "3w-sas: scsi%d: Found an LSI 3ware %s Controller at 0x%llx, IRQ: %d.\n",
56722+ host->host_no,
56723+ (char *)twl_get_param(tw_dev, 1, TW_VERSION_TABLE,
56724+ TW_PARAM_MODEL, TW_PARAM_MODEL_LENGTH),
56725+ (u64)mem_addr, pdev->irq);
56726+
56727+ ptr_phycount = twl_get_param(tw_dev, 2, TW_PARAM_PHY_SUMMARY_TABLE,
56728+ TW_PARAM_PHYCOUNT, TW_PARAM_PHYCOUNT_LENGTH);
56729+ if (ptr_phycount)
56730+ phycount = le32_to_cpu(*(int *)ptr_phycount);
56731+
56732+ printk(KERN_WARNING "3w-sas: scsi%d: Firmware %s, BIOS %s, Phys: %d.\n",
56733+ host->host_no,
56734+ (char *)twl_get_param(tw_dev, 1, TW_VERSION_TABLE,
56735+ TW_PARAM_FWVER, TW_PARAM_FWVER_LENGTH),
56736+ (char *)twl_get_param(tw_dev, 2, TW_VERSION_TABLE,
56737+ TW_PARAM_BIOSVER, TW_PARAM_BIOSVER_LENGTH),
56738+ phycount);
56739+
56740+ /* Try to enable MSI */
56741+ if (use_msi && !pci_enable_msi(pdev))
56742+ set_bit(TW_USING_MSI, &tw_dev->flags);
56743+
56744+ /* Now setup the interrupt handler */
56745+ retval = request_irq(pdev->irq, twl_interrupt, IRQF_SHARED, "3w-sas", tw_dev);
56746+ if (retval) {
56747+ TW_PRINTK(tw_dev->host, TW_DRIVER, 0x1f, "Error requesting IRQ");
56748+ goto out_remove_host;
56749+ }
56750+
56751+ twl_device_extension_list[twl_device_extension_count] = tw_dev;
56752+ twl_device_extension_count++;
56753+
56754+ /* Re-enable interrupts on the card */
56755+ TWL_UNMASK_INTERRUPTS(tw_dev);
56756+
56757+ /* Finally, scan the host */
56758+ scsi_scan_host(host);
56759+
56760+ /* Add sysfs binary files */
56761+ if (sysfs_create_bin_file(&host->shost_dev.kobj, &twl_sysfs_aen_read_attr))
56762+ TW_PRINTK(tw_dev->host, TW_DRIVER, 0x20, "Failed to create sysfs binary file: 3ware_aen_read");
56763+ if (sysfs_create_bin_file(&host->shost_dev.kobj, &twl_sysfs_compat_info_attr))
56764+ TW_PRINTK(tw_dev->host, TW_DRIVER, 0x21, "Failed to create sysfs binary file: 3ware_compat_info");
56765+
56766+ if (twl_major == -1) {
56767+ if ((twl_major = register_chrdev (0, "twl", &twl_fops)) < 0)
56768+ TW_PRINTK(host, TW_DRIVER, 0x22, "Failed to register character device");
56769+ }
56770+ tw_dev->online = 1;
56771+ return 0;
56772+
56773+out_remove_host:
56774+ if (test_bit(TW_USING_MSI, &tw_dev->flags))
56775+ pci_disable_msi(pdev);
56776+ scsi_remove_host(host);
56777+out_iounmap:
56778+ iounmap(tw_dev->base_addr);
56779+out_release_mem_region:
56780+ pci_release_regions(pdev);
56781+out_free_device_extension:
56782+ twl_free_device_extension(tw_dev);
56783+ scsi_host_put(host);
56784+out_disable_device:
56785+ pci_disable_device(pdev);
56786+
56787+ return retval;
56788+} /* End twl_probe() */
56789+
56790+/* This function is called to remove a device */
56791+static void twl_remove(struct pci_dev *pdev)
56792+{
56793+ struct Scsi_Host *host = pci_get_drvdata(pdev);
56794+ TW_Device_Extension *tw_dev;
56795+
56796+ if (!host)
56797+ return;
56798+
56799+ tw_dev = (TW_Device_Extension *)host->hostdata;
56800+
56801+ if (!tw_dev->online)
56802+ return;
56803+
56804+ /* Remove sysfs binary files */
56805+ sysfs_remove_bin_file(&host->shost_dev.kobj, &twl_sysfs_aen_read_attr);
56806+ sysfs_remove_bin_file(&host->shost_dev.kobj, &twl_sysfs_compat_info_attr);
56807+
56808+ scsi_remove_host(tw_dev->host);
56809+
56810+ /* Unregister character device */
56811+ if (twl_major >= 0) {
56812+ unregister_chrdev(twl_major, "twl");
56813+ twl_major = -1;
56814+ }
56815+
56816+ /* Shutdown the card */
56817+ __twl_shutdown(tw_dev);
56818+
56819+ /* Disable MSI if enabled */
56820+ if (test_bit(TW_USING_MSI, &tw_dev->flags))
56821+ pci_disable_msi(pdev);
56822+
56823+ /* Free IO remapping */
56824+ iounmap(tw_dev->base_addr);
56825+
56826+ /* Free up the mem region */
56827+ pci_release_regions(pdev);
56828+
56829+ /* Free up device extension resources */
56830+ twl_free_device_extension(tw_dev);
56831+
56832+ scsi_host_put(tw_dev->host);
56833+ pci_disable_device(pdev);
56834+ twl_device_extension_count--;
56835+} /* End twl_remove() */
56836+
56837+#ifdef CONFIG_PM
56838+/* This function is called on PCI suspend */
56839+static int twl_suspend(struct pci_dev *pdev, pm_message_t state)
56840+{
56841+ struct Scsi_Host *host = pci_get_drvdata(pdev);
56842+ TW_Device_Extension *tw_dev = (TW_Device_Extension *)host->hostdata;
56843+
56844+ printk(KERN_WARNING "3w-sas: Suspending host %d.\n", tw_dev->host->host_no);
56845+ /* Disable interrupts */
56846+ TWL_MASK_INTERRUPTS(tw_dev);
56847+
56848+ free_irq(tw_dev->tw_pci_dev->irq, tw_dev);
56849+
56850+ /* Tell the card we are shutting down */
56851+ if (twl_initconnection(tw_dev, 1, 0, 0, 0, 0, 0, NULL, NULL, NULL, NULL, NULL)) {
56852+ TW_PRINTK(tw_dev->host, TW_DRIVER, 0x23, "Connection shutdown failed during suspend");
56853+ } else {
56854+ printk(KERN_WARNING "3w-sas: Suspend complete.\n");
56855+ }
56856+
56857+ /* Clear doorbell interrupt */
56858+ TWL_CLEAR_DB_INTERRUPT(tw_dev);
56859+
56860+ pci_save_state(pdev);
56861+ pci_disable_device(pdev);
56862+ pci_set_power_state(pdev, pci_choose_state(pdev, state));
56863+
56864+ return 0;
56865+} /* End twl_suspend() */
56866+
56867+/* This function is called on PCI resume */
56868+static int twl_resume(struct pci_dev *pdev)
56869+{
56870+ int retval = 0;
56871+ struct Scsi_Host *host = pci_get_drvdata(pdev);
56872+ TW_Device_Extension *tw_dev = (TW_Device_Extension *)host->hostdata;
56873+
56874+ printk(KERN_WARNING "3w-sas: Resuming host %d.\n", tw_dev->host->host_no);
56875+ pci_set_power_state(pdev, PCI_D0);
56876+ pci_enable_wake(pdev, PCI_D0, 0);
56877+ pci_restore_state(pdev);
56878+
56879+ retval = pci_enable_device(pdev);
56880+ if (retval) {
56881+ TW_PRINTK(tw_dev->host, TW_DRIVER, 0x24, "Enable device failed during resume");
56882+ return retval;
56883+ }
56884+
56885+ pci_set_master(pdev);
56886+ pci_try_set_mwi(pdev);
56887+
56888+ if (pci_set_dma_mask(pdev, DMA_BIT_MASK(64))
56889+ || pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(64)))
56890+ if (pci_set_dma_mask(pdev, DMA_BIT_MASK(32))
56891+ || pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(32))) {
56892+ TW_PRINTK(host, TW_DRIVER, 0x25, "Failed to set dma mask during resume");
56893+ retval = -ENODEV;
56894+ goto out_disable_device;
56895+ }
56896+
56897+ /* Initialize the card */
56898+ if (twl_reset_sequence(tw_dev, 0)) {
56899+ retval = -ENODEV;
56900+ goto out_disable_device;
56901+ }
56902+
56903+ /* Now setup the interrupt handler */
56904+ retval = request_irq(pdev->irq, twl_interrupt, IRQF_SHARED, "3w-sas", tw_dev);
56905+ if (retval) {
56906+ TW_PRINTK(tw_dev->host, TW_DRIVER, 0x26, "Error requesting IRQ during resume");
56907+ retval = -ENODEV;
56908+ goto out_disable_device;
56909+ }
56910+
56911+ /* Now enable MSI if enabled */
56912+ if (test_bit(TW_USING_MSI, &tw_dev->flags))
56913+ pci_enable_msi(pdev);
56914+
56915+ /* Re-enable interrupts on the card */
56916+ TWL_UNMASK_INTERRUPTS(tw_dev);
56917+
56918+ printk(KERN_WARNING "3w-sas: Resume complete.\n");
56919+ return 0;
56920+
56921+out_disable_device:
56922+ scsi_remove_host(host);
56923+ pci_disable_device(pdev);
56924+
56925+ return retval;
56926+} /* End twl_resume() */
56927+#endif
56928+
56929+/* PCI Devices supported by this driver */
56930+static struct pci_device_id twl_pci_tbl[] __devinitdata = {
56931+ { PCI_VENDOR_ID_3WARE, PCI_DEVICE_ID_3WARE_9750,
56932+ PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0},
56933+ { }
56934+};
56935+MODULE_DEVICE_TABLE(pci, twl_pci_tbl);
56936+
56937+/* pci_driver initializer */
56938+static struct pci_driver twl_driver = {
56939+ .name = "3w-sas",
56940+ .id_table = twl_pci_tbl,
56941+ .probe = twl_probe,
56942+ .remove = twl_remove,
56943+#ifdef CONFIG_PM
56944+ .suspend = twl_suspend,
56945+ .resume = twl_resume,
56946+#endif
56947+ .shutdown = twl_shutdown
56948+};
56949+
56950+/* This function is called on driver initialization */
56951+static int __init twl_init(void)
56952+{
56953+ printk(KERN_WARNING "LSI 3ware SAS/SATA-RAID Controller device driver for Linux v%s.\n", TW_DRIVER_VERSION);
56954+
56955+ return pci_register_driver(&twl_driver);
56956+} /* End twl_init() */
56957+
56958+/* This function is called on driver exit */
56959+static void __exit twl_exit(void)
56960+{
56961+ pci_unregister_driver(&twl_driver);
56962+} /* End twl_exit() */
56963+
56964+module_init(twl_init);
56965+module_exit(twl_exit);
56966+
56967diff --git a/drivers/scsi/3w-sas.h b/drivers/scsi/3w-sas.h
56968new file mode 100644
56969index 0000000..e620505
56970--- /dev/null
56971+++ b/drivers/scsi/3w-sas.h
56972@@ -0,0 +1,396 @@
56973+/*
56974+ 3w-sas.h -- LSI 3ware SAS/SATA-RAID Controller device driver for Linux.
56975+
56976+ Written By: Adam Radford <linuxraid@lsi.com>
56977+
56978+ Copyright (C) 2009 LSI Corporation.
56979+
56980+ This program is free software; you can redistribute it and/or modify
56981+ it under the terms of the GNU General Public License as published by
56982+ the Free Software Foundation; version 2 of the License.
56983+
56984+ This program is distributed in the hope that it will be useful,
56985+ but WITHOUT ANY WARRANTY; without even the implied warranty of
56986+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
56987+ GNU General Public License for more details.
56988+
56989+ NO WARRANTY
56990+ THE PROGRAM IS PROVIDED ON AN "AS IS" BASIS, WITHOUT WARRANTIES OR
56991+ CONDITIONS OF ANY KIND, EITHER EXPRESS OR IMPLIED INCLUDING, WITHOUT
56992+ LIMITATION, ANY WARRANTIES OR CONDITIONS OF TITLE, NON-INFRINGEMENT,
56993+ MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE. Each Recipient is
56994+ solely responsible for determining the appropriateness of using and
56995+ distributing the Program and assumes all risks associated with its
56996+ exercise of rights under this Agreement, including but not limited to
56997+ the risks and costs of program errors, damage to or loss of data,
56998+ programs or equipment, and unavailability or interruption of operations.
56999+
57000+ DISCLAIMER OF LIABILITY
57001+ NEITHER RECIPIENT NOR ANY CONTRIBUTORS SHALL HAVE ANY LIABILITY FOR ANY
57002+ DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
57003+ DAMAGES (INCLUDING WITHOUT LIMITATION LOST PROFITS), HOWEVER CAUSED AND
57004+ ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR
57005+ TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE
57006+ USE OR DISTRIBUTION OF THE PROGRAM OR THE EXERCISE OF ANY RIGHTS GRANTED
57007+ HEREUNDER, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGES
57008+
57009+ You should have received a copy of the GNU General Public License
57010+ along with this program; if not, write to the Free Software
57011+ Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
57012+
57013+ Bugs/Comments/Suggestions should be mailed to:
57014+ linuxraid@lsi.com
57015+
57016+ For more information, goto:
57017+ http://www.lsi.com
57018+*/
57019+
57020+#ifndef _3W_SAS_H
57021+#define _3W_SAS_H
57022+
57023+/* AEN severity table */
57024+static char *twl_aen_severity_table[] =
57025+{
57026+ "None", "ERROR", "WARNING", "INFO", "DEBUG", (char*) 0
57027+};
57028+
57029+/* Liberator register offsets */
57030+#define TWL_STATUS 0x0 /* Status */
57031+#define TWL_HIBDB 0x20 /* Inbound doorbell */
57032+#define TWL_HISTAT 0x30 /* Host interrupt status */
57033+#define TWL_HIMASK 0x34 /* Host interrupt mask */
57034+#define TWL_HOBDB 0x9C /* Outbound doorbell */
57035+#define TWL_HOBDBC 0xA0 /* Outbound doorbell clear */
57036+#define TWL_SCRPD3 0xBC /* Scratchpad */
57037+#define TWL_HIBQPL 0xC0 /* Host inbound Q low */
57038+#define TWL_HIBQPH 0xC4 /* Host inbound Q high */
57039+#define TWL_HOBQPL 0xC8 /* Host outbound Q low */
57040+#define TWL_HOBQPH 0xCC /* Host outbound Q high */
57041+#define TWL_HISTATUS_VALID_INTERRUPT 0xC
57042+#define TWL_HISTATUS_ATTENTION_INTERRUPT 0x4
57043+#define TWL_HISTATUS_RESPONSE_INTERRUPT 0x8
57044+#define TWL_STATUS_OVERRUN_SUBMIT 0x2000
57045+#define TWL_ISSUE_SOFT_RESET 0x100
57046+#define TWL_CONTROLLER_READY 0x2000
57047+#define TWL_DOORBELL_CONTROLLER_ERROR 0x200000
57048+#define TWL_DOORBELL_ATTENTION_INTERRUPT 0x40000
57049+#define TWL_PULL_MODE 0x1
57050+
57051+/* Command packet opcodes used by the driver */
57052+#define TW_OP_INIT_CONNECTION 0x1
57053+#define TW_OP_GET_PARAM 0x12
57054+#define TW_OP_SET_PARAM 0x13
57055+#define TW_OP_EXECUTE_SCSI 0x10
57056+
57057+/* Asynchronous Event Notification (AEN) codes used by the driver */
57058+#define TW_AEN_QUEUE_EMPTY 0x0000
57059+#define TW_AEN_SOFT_RESET 0x0001
57060+#define TW_AEN_SYNC_TIME_WITH_HOST 0x031
57061+#define TW_AEN_SEVERITY_ERROR 0x1
57062+#define TW_AEN_SEVERITY_DEBUG 0x4
57063+#define TW_AEN_NOT_RETRIEVED 0x1
57064+
57065+/* Command state defines */
57066+#define TW_S_INITIAL 0x1 /* Initial state */
57067+#define TW_S_STARTED 0x2 /* Id in use */
57068+#define TW_S_POSTED 0x4 /* Posted to the controller */
57069+#define TW_S_COMPLETED 0x8 /* Completed by isr */
57070+#define TW_S_FINISHED 0x10 /* I/O completely done */
57071+
57072+/* Compatibility defines */
57073+#define TW_9750_ARCH_ID 10
57074+#define TW_CURRENT_DRIVER_SRL 40
57075+#define TW_CURRENT_DRIVER_BUILD 0
57076+#define TW_CURRENT_DRIVER_BRANCH 0
57077+
57078+/* Phase defines */
57079+#define TW_PHASE_INITIAL 0
57080+#define TW_PHASE_SGLIST 2
57081+
57082+/* Misc defines */
57083+#define TW_SECTOR_SIZE 512
57084+#define TW_MAX_UNITS 32
57085+#define TW_INIT_MESSAGE_CREDITS 0x100
57086+#define TW_INIT_COMMAND_PACKET_SIZE 0x3
57087+#define TW_INIT_COMMAND_PACKET_SIZE_EXTENDED 0x6
57088+#define TW_EXTENDED_INIT_CONNECT 0x2
57089+#define TW_BASE_FW_SRL 24
57090+#define TW_BASE_FW_BRANCH 0
57091+#define TW_BASE_FW_BUILD 1
57092+#define TW_Q_LENGTH 256
57093+#define TW_Q_START 0
57094+#define TW_MAX_SLOT 32
57095+#define TW_MAX_RESET_TRIES 2
57096+#define TW_MAX_CMDS_PER_LUN 254
57097+#define TW_MAX_AEN_DRAIN 255
57098+#define TW_IN_RESET 2
57099+#define TW_USING_MSI 3
57100+#define TW_IN_ATTENTION_LOOP 4
57101+#define TW_MAX_SECTORS 256
57102+#define TW_MAX_CDB_LEN 16
57103+#define TW_IOCTL_CHRDEV_TIMEOUT 60 /* 60 seconds */
57104+#define TW_IOCTL_CHRDEV_FREE -1
57105+#define TW_COMMAND_OFFSET 128 /* 128 bytes */
57106+#define TW_VERSION_TABLE 0x0402
57107+#define TW_TIMEKEEP_TABLE 0x040A
57108+#define TW_INFORMATION_TABLE 0x0403
57109+#define TW_PARAM_FWVER 3
57110+#define TW_PARAM_FWVER_LENGTH 16
57111+#define TW_PARAM_BIOSVER 4
57112+#define TW_PARAM_BIOSVER_LENGTH 16
57113+#define TW_PARAM_MODEL 8
57114+#define TW_PARAM_MODEL_LENGTH 16
57115+#define TW_PARAM_PHY_SUMMARY_TABLE 1
57116+#define TW_PARAM_PHYCOUNT 2
57117+#define TW_PARAM_PHYCOUNT_LENGTH 1
57118+#define TW_IOCTL_FIRMWARE_PASS_THROUGH 0x108 // Used by smartmontools
57119+#define TW_ALLOCATION_LENGTH 128
57120+#define TW_SENSE_DATA_LENGTH 18
57121+#define TW_ERROR_LOGICAL_UNIT_NOT_SUPPORTED 0x10a
57122+#define TW_ERROR_INVALID_FIELD_IN_CDB 0x10d
57123+#define TW_ERROR_UNIT_OFFLINE 0x128
57124+#define TW_MESSAGE_SOURCE_CONTROLLER_ERROR 3
57125+#define TW_MESSAGE_SOURCE_CONTROLLER_EVENT 4
57126+#define TW_DRIVER 6
57127+#ifndef PCI_DEVICE_ID_3WARE_9750
57128+#define PCI_DEVICE_ID_3WARE_9750 0x1010
57129+#endif
57130+
57131+/* Bitmask macros to eliminate bitfields */
57132+
57133+/* opcode: 5, reserved: 3 */
57134+#define TW_OPRES_IN(x,y) ((x << 5) | (y & 0x1f))
57135+#define TW_OP_OUT(x) (x & 0x1f)
57136+
57137+/* opcode: 5, sgloffset: 3 */
57138+#define TW_OPSGL_IN(x,y) ((x << 5) | (y & 0x1f))
57139+#define TW_SGL_OUT(x) ((x >> 5) & 0x7)
57140+
57141+/* severity: 3, reserved: 5 */
57142+#define TW_SEV_OUT(x) (x & 0x7)
57143+
57144+/* not_mfa: 1, reserved: 7, status: 8, request_id: 16 */
57145+#define TW_RESID_OUT(x) ((x >> 16) & 0xffff)
57146+#define TW_NOTMFA_OUT(x) (x & 0x1)
57147+
57148+/* request_id: 12, lun: 4 */
57149+#define TW_REQ_LUN_IN(lun, request_id) (((lun << 12) & 0xf000) | (request_id & 0xfff))
57150+#define TW_LUN_OUT(lun) ((lun >> 12) & 0xf)
57151+
57152+/* Register access macros */
57153+#define TWL_STATUS_REG_ADDR(x) ((unsigned char __iomem *)x->base_addr + TWL_STATUS)
57154+#define TWL_HOBQPL_REG_ADDR(x) ((unsigned char __iomem *)x->base_addr + TWL_HOBQPL)
57155+#define TWL_HOBQPH_REG_ADDR(x) ((unsigned char __iomem *)x->base_addr + TWL_HOBQPH)
57156+#define TWL_HOBDB_REG_ADDR(x) ((unsigned char __iomem *)x->base_addr + TWL_HOBDB)
57157+#define TWL_HOBDBC_REG_ADDR(x) ((unsigned char __iomem *)x->base_addr + TWL_HOBDBC)
57158+#define TWL_HIMASK_REG_ADDR(x) ((unsigned char __iomem *)x->base_addr + TWL_HIMASK)
57159+#define TWL_HISTAT_REG_ADDR(x) ((unsigned char __iomem *)x->base_addr + TWL_HISTAT)
57160+#define TWL_HIBQPH_REG_ADDR(x) ((unsigned char __iomem *)x->base_addr + TWL_HIBQPH)
57161+#define TWL_HIBQPL_REG_ADDR(x) ((unsigned char __iomem *)x->base_addr + TWL_HIBQPL)
57162+#define TWL_HIBDB_REG_ADDR(x) ((unsigned char __iomem *)x->base_addr + TWL_HIBDB)
57163+#define TWL_SCRPD3_REG_ADDR(x) ((unsigned char __iomem *)x->base_addr + TWL_SCRPD3)
57164+#define TWL_MASK_INTERRUPTS(x) (writel(~0, TWL_HIMASK_REG_ADDR(tw_dev)))
57165+#define TWL_UNMASK_INTERRUPTS(x) (writel(~TWL_HISTATUS_VALID_INTERRUPT, TWL_HIMASK_REG_ADDR(tw_dev)))
57166+#define TWL_CLEAR_DB_INTERRUPT(x) (writel(~0, TWL_HOBDBC_REG_ADDR(tw_dev)))
57167+#define TWL_SOFT_RESET(x) (writel(TWL_ISSUE_SOFT_RESET, TWL_HIBDB_REG_ADDR(tw_dev)))
57168+
57169+/* Macros */
57170+#define TW_PRINTK(h,a,b,c) { \
57171+if (h) \
57172+printk(KERN_WARNING "3w-sas: scsi%d: ERROR: (0x%02X:0x%04X): %s.\n",h->host_no,a,b,c); \
57173+else \
57174+printk(KERN_WARNING "3w-sas: ERROR: (0x%02X:0x%04X): %s.\n",a,b,c); \
57175+}
57176+#define TW_MAX_LUNS 16
57177+#define TW_COMMAND_SIZE (sizeof(dma_addr_t) > 4 ? 6 : 4)
57178+#define TW_LIBERATOR_MAX_SGL_LENGTH (sizeof(dma_addr_t) > 4 ? 46 : 92)
57179+#define TW_LIBERATOR_MAX_SGL_LENGTH_OLD (sizeof(dma_addr_t) > 4 ? 47 : 94)
57180+#define TW_PADDING_LENGTH_LIBERATOR 136
57181+#define TW_PADDING_LENGTH_LIBERATOR_OLD 132
57182+#define TW_CPU_TO_SGL(x) (sizeof(dma_addr_t) > 4 ? cpu_to_le64(x) : cpu_to_le32(x))
57183+
57184+#pragma pack(1)
57185+
57186+/* SGL entry */
57187+typedef struct TAG_TW_SG_Entry_ISO {
57188+ dma_addr_t address;
57189+ dma_addr_t length;
57190+} TW_SG_Entry_ISO;
57191+
57192+/* Old Command Packet with ISO SGL */
57193+typedef struct TW_Command {
57194+ unsigned char opcode__sgloffset;
57195+ unsigned char size;
57196+ unsigned char request_id;
57197+ unsigned char unit__hostid;
57198+ /* Second DWORD */
57199+ unsigned char status;
57200+ unsigned char flags;
57201+ union {
57202+ unsigned short block_count;
57203+ unsigned short parameter_count;
57204+ } byte6_offset;
57205+ union {
57206+ struct {
57207+ u32 lba;
57208+ TW_SG_Entry_ISO sgl[TW_LIBERATOR_MAX_SGL_LENGTH_OLD];
57209+ unsigned char padding[TW_PADDING_LENGTH_LIBERATOR_OLD];
57210+ } io;
57211+ struct {
57212+ TW_SG_Entry_ISO sgl[TW_LIBERATOR_MAX_SGL_LENGTH_OLD];
57213+ u32 padding;
57214+ unsigned char padding2[TW_PADDING_LENGTH_LIBERATOR_OLD];
57215+ } param;
57216+ } byte8_offset;
57217+} TW_Command;
57218+
57219+/* New Command Packet with ISO SGL */
57220+typedef struct TAG_TW_Command_Apache {
57221+ unsigned char opcode__reserved;
57222+ unsigned char unit;
57223+ unsigned short request_id__lunl;
57224+ unsigned char status;
57225+ unsigned char sgl_offset;
57226+ unsigned short sgl_entries__lunh;
57227+ unsigned char cdb[16];
57228+ TW_SG_Entry_ISO sg_list[TW_LIBERATOR_MAX_SGL_LENGTH];
57229+ unsigned char padding[TW_PADDING_LENGTH_LIBERATOR];
57230+} TW_Command_Apache;
57231+
57232+/* New command packet header */
57233+typedef struct TAG_TW_Command_Apache_Header {
57234+ unsigned char sense_data[TW_SENSE_DATA_LENGTH];
57235+ struct {
57236+ char reserved[4];
57237+ unsigned short error;
57238+ unsigned char padding;
57239+ unsigned char severity__reserved;
57240+ } status_block;
57241+ unsigned char err_specific_desc[98];
57242+ struct {
57243+ unsigned char size_header;
57244+ unsigned short request_id;
57245+ unsigned char size_sense;
57246+ } header_desc;
57247+} TW_Command_Apache_Header;
57248+
57249+/* This struct is a union of the 2 command packets */
57250+typedef struct TAG_TW_Command_Full {
57251+ TW_Command_Apache_Header header;
57252+ union {
57253+ TW_Command oldcommand;
57254+ TW_Command_Apache newcommand;
57255+ } command;
57256+} TW_Command_Full;
57257+
57258+/* Initconnection structure */
57259+typedef struct TAG_TW_Initconnect {
57260+ unsigned char opcode__reserved;
57261+ unsigned char size;
57262+ unsigned char request_id;
57263+ unsigned char res2;
57264+ unsigned char status;
57265+ unsigned char flags;
57266+ unsigned short message_credits;
57267+ u32 features;
57268+ unsigned short fw_srl;
57269+ unsigned short fw_arch_id;
57270+ unsigned short fw_branch;
57271+ unsigned short fw_build;
57272+ u32 result;
57273+} TW_Initconnect;
57274+
57275+/* Event info structure */
57276+typedef struct TAG_TW_Event
57277+{
57278+ unsigned int sequence_id;
57279+ unsigned int time_stamp_sec;
57280+ unsigned short aen_code;
57281+ unsigned char severity;
57282+ unsigned char retrieved;
57283+ unsigned char repeat_count;
57284+ unsigned char parameter_len;
57285+ unsigned char parameter_data[98];
57286+} TW_Event;
57287+
57288+typedef struct TAG_TW_Ioctl_Driver_Command {
57289+ unsigned int control_code;
57290+ unsigned int status;
57291+ unsigned int unique_id;
57292+ unsigned int sequence_id;
57293+ unsigned int os_specific;
57294+ unsigned int buffer_length;
57295+} TW_Ioctl_Driver_Command;
57296+
57297+typedef struct TAG_TW_Ioctl_Apache {
57298+ TW_Ioctl_Driver_Command driver_command;
57299+ char padding[488];
57300+ TW_Command_Full firmware_command;
57301+ char data_buffer[1];
57302+} TW_Ioctl_Buf_Apache;
57303+
57304+/* GetParam descriptor */
57305+typedef struct {
57306+ unsigned short table_id;
57307+ unsigned short parameter_id;
57308+ unsigned short parameter_size_bytes;
57309+ unsigned short actual_parameter_size_bytes;
57310+ unsigned char data[1];
57311+} TW_Param_Apache;
57312+
57313+/* Compatibility information structure */
57314+typedef struct TAG_TW_Compatibility_Info
57315+{
57316+ char driver_version[32];
57317+ unsigned short working_srl;
57318+ unsigned short working_branch;
57319+ unsigned short working_build;
57320+ unsigned short driver_srl_high;
57321+ unsigned short driver_branch_high;
57322+ unsigned short driver_build_high;
57323+ unsigned short driver_srl_low;
57324+ unsigned short driver_branch_low;
57325+ unsigned short driver_build_low;
57326+ unsigned short fw_on_ctlr_srl;
57327+ unsigned short fw_on_ctlr_branch;
57328+ unsigned short fw_on_ctlr_build;
57329+} TW_Compatibility_Info;
57330+
57331+#pragma pack()
57332+
57333+typedef struct TAG_TW_Device_Extension {
57334+ void __iomem *base_addr;
57335+ unsigned long *generic_buffer_virt[TW_Q_LENGTH];
57336+ dma_addr_t generic_buffer_phys[TW_Q_LENGTH];
57337+ TW_Command_Full *command_packet_virt[TW_Q_LENGTH];
57338+ dma_addr_t command_packet_phys[TW_Q_LENGTH];
57339+ TW_Command_Apache_Header *sense_buffer_virt[TW_Q_LENGTH];
57340+ dma_addr_t sense_buffer_phys[TW_Q_LENGTH];
57341+ struct pci_dev *tw_pci_dev;
57342+ struct scsi_cmnd *srb[TW_Q_LENGTH];
57343+ unsigned char free_queue[TW_Q_LENGTH];
57344+ unsigned char free_head;
57345+ unsigned char free_tail;
57346+ int state[TW_Q_LENGTH];
57347+ unsigned int posted_request_count;
57348+ unsigned int max_posted_request_count;
57349+ unsigned int max_sgl_entries;
57350+ unsigned int sgl_entries;
57351+ unsigned int num_resets;
57352+ unsigned int sector_count;
57353+ unsigned int max_sector_count;
57354+ unsigned int aen_count;
57355+ struct Scsi_Host *host;
57356+ long flags;
57357+ TW_Event *event_queue[TW_Q_LENGTH];
57358+ unsigned char error_index;
57359+ unsigned int error_sequence_id;
57360+ int chrdev_request_id;
57361+ wait_queue_head_t ioctl_wqueue;
57362+ struct mutex ioctl_lock;
57363+ TW_Compatibility_Info tw_compat_info;
57364+ char online;
57365+} TW_Device_Extension;
57366+
57367+#endif /* _3W_SAS_H */
57368+
57369diff --git a/drivers/scsi/BusLogic.c b/drivers/scsi/BusLogic.c
57370index 1ddcf40..a85f062 100644
57371--- a/drivers/scsi/BusLogic.c
57372+++ b/drivers/scsi/BusLogic.c
57373@@ -961,6 +961,8 @@ static int __init BusLogic_InitializeFlashPointProbeInfo(struct BusLogic_HostAda
57374 static void __init BusLogic_InitializeProbeInfoList(struct BusLogic_HostAdapter
57375 *PrototypeHostAdapter)
57376 {
57377+ pax_track_stack();
57378+
57379 /*
57380 If a PCI BIOS is present, interrogate it for MultiMaster and FlashPoint
57381 Host Adapters; otherwise, default to the standard ISA MultiMaster probe.
57382diff --git a/drivers/scsi/Kconfig b/drivers/scsi/Kconfig
57383index e11cca4..4295679 100644
57384--- a/drivers/scsi/Kconfig
57385+++ b/drivers/scsi/Kconfig
57386@@ -399,6 +399,17 @@ config SCSI_3W_9XXX
57387 Please read the comments at the top of
57388 <file:drivers/scsi/3w-9xxx.c>.
57389
57390+config SCSI_3W_SAS
57391+ tristate "3ware 97xx SAS/SATA-RAID support"
57392+ depends on PCI && SCSI
57393+ help
57394+ This driver supports the LSI 3ware 9750 6Gb/s SAS/SATA-RAID cards.
57395+
57396+ <http://www.lsi.com>
57397+
57398+ Please read the comments at the top of
57399+ <file:drivers/scsi/3w-sas.c>.
57400+
57401 config SCSI_7000FASST
57402 tristate "7000FASST SCSI support"
57403 depends on ISA && SCSI && ISA_DMA_API
57404@@ -621,6 +632,14 @@ config SCSI_FLASHPOINT
57405 substantial, so users of MultiMaster Host Adapters may not
57406 wish to include it.
57407
57408+config VMWARE_PVSCSI
57409+ tristate "VMware PVSCSI driver support"
57410+ depends on PCI && SCSI && X86
57411+ help
57412+ This driver supports VMware's para virtualized SCSI HBA.
57413+ To compile this driver as a module, choose M here: the
57414+ module will be called vmw_pvscsi.
57415+
57416 config LIBFC
57417 tristate "LibFC module"
57418 select SCSI_FC_ATTRS
57419diff --git a/drivers/scsi/Makefile b/drivers/scsi/Makefile
57420index 3ad61db..c938975 100644
57421--- a/drivers/scsi/Makefile
57422+++ b/drivers/scsi/Makefile
57423@@ -113,6 +113,7 @@ obj-$(CONFIG_SCSI_MESH) += mesh.o
57424 obj-$(CONFIG_SCSI_MAC53C94) += mac53c94.o
57425 obj-$(CONFIG_BLK_DEV_3W_XXXX_RAID) += 3w-xxxx.o
57426 obj-$(CONFIG_SCSI_3W_9XXX) += 3w-9xxx.o
57427+obj-$(CONFIG_SCSI_3W_SAS) += 3w-sas.o
57428 obj-$(CONFIG_SCSI_PPA) += ppa.o
57429 obj-$(CONFIG_SCSI_IMM) += imm.o
57430 obj-$(CONFIG_JAZZ_ESP) += esp_scsi.o jazz_esp.o
57431@@ -133,6 +134,7 @@ obj-$(CONFIG_SCSI_CXGB3_ISCSI) += libiscsi.o libiscsi_tcp.o cxgb3i/
57432 obj-$(CONFIG_SCSI_BNX2_ISCSI) += libiscsi.o bnx2i/
57433 obj-$(CONFIG_BE2ISCSI) += libiscsi.o be2iscsi/
57434 obj-$(CONFIG_SCSI_PMCRAID) += pmcraid.o
57435+obj-$(CONFIG_VMWARE_PVSCSI) += vmw_pvscsi.o
57436
57437 obj-$(CONFIG_ARM) += arm/
57438
57439diff --git a/drivers/scsi/aacraid/aacraid.h b/drivers/scsi/aacraid/aacraid.h
57440index cdbdec9..b7d560b 100644
57441--- a/drivers/scsi/aacraid/aacraid.h
57442+++ b/drivers/scsi/aacraid/aacraid.h
57443@@ -471,7 +471,7 @@ struct adapter_ops
57444 int (*adapter_scsi)(struct fib * fib, struct scsi_cmnd * cmd);
57445 /* Administrative operations */
57446 int (*adapter_comm)(struct aac_dev * dev, int comm);
57447-};
57448+} __no_const;
57449
57450 /*
57451 * Define which interrupt handler needs to be installed
57452diff --git a/drivers/scsi/aacraid/commctrl.c b/drivers/scsi/aacraid/commctrl.c
57453index a5b8e7b..a6a0e43 100644
57454--- a/drivers/scsi/aacraid/commctrl.c
57455+++ b/drivers/scsi/aacraid/commctrl.c
57456@@ -481,6 +481,7 @@ static int aac_send_raw_srb(struct aac_dev* dev, void __user * arg)
57457 u32 actual_fibsize64, actual_fibsize = 0;
57458 int i;
57459
57460+ pax_track_stack();
57461
57462 if (dev->in_reset) {
57463 dprintk((KERN_DEBUG"aacraid: send raw srb -EBUSY\n"));
57464diff --git a/drivers/scsi/aacraid/linit.c b/drivers/scsi/aacraid/linit.c
57465index 9b97c3e..f099725 100644
57466--- a/drivers/scsi/aacraid/linit.c
57467+++ b/drivers/scsi/aacraid/linit.c
57468@@ -91,7 +91,7 @@ static DECLARE_PCI_DEVICE_TABLE(aac_pci_tbl) = {
57469 #elif defined(__devinitconst)
57470 static const struct pci_device_id aac_pci_tbl[] __devinitconst = {
57471 #else
57472-static const struct pci_device_id aac_pci_tbl[] __devinitdata = {
57473+static const struct pci_device_id aac_pci_tbl[] __devinitconst = {
57474 #endif
57475 { 0x1028, 0x0001, 0x1028, 0x0001, 0, 0, 0 }, /* PERC 2/Si (Iguana/PERC2Si) */
57476 { 0x1028, 0x0002, 0x1028, 0x0002, 0, 0, 1 }, /* PERC 3/Di (Opal/PERC3Di) */
57477diff --git a/drivers/scsi/aic94xx/aic94xx_init.c b/drivers/scsi/aic94xx/aic94xx_init.c
57478index 996f722..9127845 100644
57479--- a/drivers/scsi/aic94xx/aic94xx_init.c
57480+++ b/drivers/scsi/aic94xx/aic94xx_init.c
57481@@ -485,7 +485,7 @@ static ssize_t asd_show_update_bios(struct device *dev,
57482 flash_error_table[i].reason);
57483 }
57484
57485-static DEVICE_ATTR(update_bios, S_IRUGO|S_IWUGO,
57486+static DEVICE_ATTR(update_bios, S_IRUGO|S_IWUSR,
57487 asd_show_update_bios, asd_store_update_bios);
57488
57489 static int asd_create_dev_attrs(struct asd_ha_struct *asd_ha)
57490@@ -1011,7 +1011,7 @@ static struct sas_domain_function_template aic94xx_transport_functions = {
57491 .lldd_control_phy = asd_control_phy,
57492 };
57493
57494-static const struct pci_device_id aic94xx_pci_table[] __devinitdata = {
57495+static const struct pci_device_id aic94xx_pci_table[] __devinitconst = {
57496 {PCI_DEVICE(PCI_VENDOR_ID_ADAPTEC2, 0x410),0, 0, 1},
57497 {PCI_DEVICE(PCI_VENDOR_ID_ADAPTEC2, 0x412),0, 0, 1},
57498 {PCI_DEVICE(PCI_VENDOR_ID_ADAPTEC2, 0x416),0, 0, 1},
57499diff --git a/drivers/scsi/bfa/bfa_ioc.h b/drivers/scsi/bfa/bfa_ioc.h
57500index 58efd4b..cb48dc7 100644
57501--- a/drivers/scsi/bfa/bfa_ioc.h
57502+++ b/drivers/scsi/bfa/bfa_ioc.h
57503@@ -127,7 +127,7 @@ struct bfa_ioc_cbfn_s {
57504 bfa_ioc_disable_cbfn_t disable_cbfn;
57505 bfa_ioc_hbfail_cbfn_t hbfail_cbfn;
57506 bfa_ioc_reset_cbfn_t reset_cbfn;
57507-};
57508+} __no_const;
57509
57510 /**
57511 * Heartbeat failure notification queue element.
57512diff --git a/drivers/scsi/bfa/bfa_iocfc.h b/drivers/scsi/bfa/bfa_iocfc.h
57513index 7ad177e..5503586 100644
57514--- a/drivers/scsi/bfa/bfa_iocfc.h
57515+++ b/drivers/scsi/bfa/bfa_iocfc.h
57516@@ -61,7 +61,7 @@ struct bfa_hwif_s {
57517 void (*hw_isr_mode_set)(struct bfa_s *bfa, bfa_boolean_t msix);
57518 void (*hw_msix_getvecs)(struct bfa_s *bfa, u32 *vecmap,
57519 u32 *nvecs, u32 *maxvec);
57520-};
57521+} __no_const;
57522 typedef void (*bfa_cb_iocfc_t) (void *cbarg, enum bfa_status status);
57523
57524 struct bfa_iocfc_s {
57525diff --git a/drivers/scsi/dpt_i2o.c b/drivers/scsi/dpt_i2o.c
57526index 4967643..cbec06b 100644
57527--- a/drivers/scsi/dpt_i2o.c
57528+++ b/drivers/scsi/dpt_i2o.c
57529@@ -1804,6 +1804,8 @@ static int adpt_i2o_passthru(adpt_hba* pHba, u32 __user *arg)
57530 dma_addr_t addr;
57531 ulong flags = 0;
57532
57533+ pax_track_stack();
57534+
57535 memset(&msg, 0, MAX_MESSAGE_SIZE*4);
57536 // get user msg size in u32s
57537 if(get_user(size, &user_msg[0])){
57538@@ -2297,6 +2299,8 @@ static s32 adpt_scsi_to_i2o(adpt_hba* pHba, struct scsi_cmnd* cmd, struct adpt_d
57539 s32 rcode;
57540 dma_addr_t addr;
57541
57542+ pax_track_stack();
57543+
57544 memset(msg, 0 , sizeof(msg));
57545 len = scsi_bufflen(cmd);
57546 direction = 0x00000000;
57547diff --git a/drivers/scsi/eata.c b/drivers/scsi/eata.c
57548index c7076ce..e20c67c 100644
57549--- a/drivers/scsi/eata.c
57550+++ b/drivers/scsi/eata.c
57551@@ -1087,6 +1087,8 @@ static int port_detect(unsigned long port_base, unsigned int j,
57552 struct hostdata *ha;
57553 char name[16];
57554
57555+ pax_track_stack();
57556+
57557 sprintf(name, "%s%d", driver_name, j);
57558
57559 if (!request_region(port_base, REGION_SIZE, driver_name)) {
57560diff --git a/drivers/scsi/fcoe/libfcoe.c b/drivers/scsi/fcoe/libfcoe.c
57561index 11ae5c9..891daec 100644
57562--- a/drivers/scsi/fcoe/libfcoe.c
57563+++ b/drivers/scsi/fcoe/libfcoe.c
57564@@ -809,6 +809,8 @@ static void fcoe_ctlr_recv_els(struct fcoe_ctlr *fip, struct sk_buff *skb)
57565 size_t rlen;
57566 size_t dlen;
57567
57568+ pax_track_stack();
57569+
57570 fiph = (struct fip_header *)skb->data;
57571 sub = fiph->fip_subcode;
57572 if (sub != FIP_SC_REQ && sub != FIP_SC_REP)
57573diff --git a/drivers/scsi/fnic/fnic_main.c b/drivers/scsi/fnic/fnic_main.c
57574index 71c7bbe..e93088a 100644
57575--- a/drivers/scsi/fnic/fnic_main.c
57576+++ b/drivers/scsi/fnic/fnic_main.c
57577@@ -669,7 +669,7 @@ static int __devinit fnic_probe(struct pci_dev *pdev,
57578 /* Start local port initiatialization */
57579
57580 lp->link_up = 0;
57581- lp->tt = fnic_transport_template;
57582+ memcpy((void *)&lp->tt, &fnic_transport_template, sizeof(fnic_transport_template));
57583
57584 lp->max_retry_count = fnic->config.flogi_retries;
57585 lp->max_rport_retry_count = fnic->config.plogi_retries;
57586diff --git a/drivers/scsi/gdth.c b/drivers/scsi/gdth.c
57587index bb96d74..9ec3ce4 100644
57588--- a/drivers/scsi/gdth.c
57589+++ b/drivers/scsi/gdth.c
57590@@ -4102,6 +4102,8 @@ static int ioc_lockdrv(void __user *arg)
57591 ulong flags;
57592 gdth_ha_str *ha;
57593
57594+ pax_track_stack();
57595+
57596 if (copy_from_user(&ldrv, arg, sizeof(gdth_ioctl_lockdrv)))
57597 return -EFAULT;
57598 ha = gdth_find_ha(ldrv.ionode);
57599@@ -4134,6 +4136,8 @@ static int ioc_resetdrv(void __user *arg, char *cmnd)
57600 gdth_ha_str *ha;
57601 int rval;
57602
57603+ pax_track_stack();
57604+
57605 if (copy_from_user(&res, arg, sizeof(gdth_ioctl_reset)) ||
57606 res.number >= MAX_HDRIVES)
57607 return -EFAULT;
57608@@ -4169,6 +4173,8 @@ static int ioc_general(void __user *arg, char *cmnd)
57609 gdth_ha_str *ha;
57610 int rval;
57611
57612+ pax_track_stack();
57613+
57614 if (copy_from_user(&gen, arg, sizeof(gdth_ioctl_general)))
57615 return -EFAULT;
57616 ha = gdth_find_ha(gen.ionode);
57617@@ -4625,6 +4631,9 @@ static void gdth_flush(gdth_ha_str *ha)
57618 int i;
57619 gdth_cmd_str gdtcmd;
57620 char cmnd[MAX_COMMAND_SIZE];
57621+
57622+ pax_track_stack();
57623+
57624 memset(cmnd, 0xff, MAX_COMMAND_SIZE);
57625
57626 TRACE2(("gdth_flush() hanum %d\n", ha->hanum));
57627diff --git a/drivers/scsi/gdth_proc.c b/drivers/scsi/gdth_proc.c
57628index 1258da3..20d8ae6 100644
57629--- a/drivers/scsi/gdth_proc.c
57630+++ b/drivers/scsi/gdth_proc.c
57631@@ -46,6 +46,9 @@ static int gdth_set_asc_info(struct Scsi_Host *host, char *buffer,
57632 ulong64 paddr;
57633
57634 char cmnd[MAX_COMMAND_SIZE];
57635+
57636+ pax_track_stack();
57637+
57638 memset(cmnd, 0xff, 12);
57639 memset(&gdtcmd, 0, sizeof(gdth_cmd_str));
57640
57641@@ -174,6 +177,8 @@ static int gdth_get_info(char *buffer,char **start,off_t offset,int length,
57642 gdth_hget_str *phg;
57643 char cmnd[MAX_COMMAND_SIZE];
57644
57645+ pax_track_stack();
57646+
57647 gdtcmd = kmalloc(sizeof(*gdtcmd), GFP_KERNEL);
57648 estr = kmalloc(sizeof(*estr), GFP_KERNEL);
57649 if (!gdtcmd || !estr)
57650diff --git a/drivers/scsi/hosts.c b/drivers/scsi/hosts.c
57651index d03a926..f324286 100644
57652--- a/drivers/scsi/hosts.c
57653+++ b/drivers/scsi/hosts.c
57654@@ -40,7 +40,7 @@
57655 #include "scsi_logging.h"
57656
57657
57658-static atomic_t scsi_host_next_hn; /* host_no for next new host */
57659+static atomic_unchecked_t scsi_host_next_hn; /* host_no for next new host */
57660
57661
57662 static void scsi_host_cls_release(struct device *dev)
57663@@ -347,7 +347,7 @@ struct Scsi_Host *scsi_host_alloc(struct scsi_host_template *sht, int privsize)
57664 * subtract one because we increment first then return, but we need to
57665 * know what the next host number was before increment
57666 */
57667- shost->host_no = atomic_inc_return(&scsi_host_next_hn) - 1;
57668+ shost->host_no = atomic_inc_return_unchecked(&scsi_host_next_hn) - 1;
57669 shost->dma_channel = 0xff;
57670
57671 /* These three are default values which can be overridden */
57672diff --git a/drivers/scsi/ipr.c b/drivers/scsi/ipr.c
57673index a601159..55e19d2 100644
57674--- a/drivers/scsi/ipr.c
57675+++ b/drivers/scsi/ipr.c
57676@@ -5286,7 +5286,7 @@ static bool ipr_qc_fill_rtf(struct ata_queued_cmd *qc)
57677 return true;
57678 }
57679
57680-static struct ata_port_operations ipr_sata_ops = {
57681+static const struct ata_port_operations ipr_sata_ops = {
57682 .phy_reset = ipr_ata_phy_reset,
57683 .hardreset = ipr_sata_reset,
57684 .post_internal_cmd = ipr_ata_post_internal,
57685diff --git a/drivers/scsi/ips.h b/drivers/scsi/ips.h
57686index 4e49fbc..97907ff 100644
57687--- a/drivers/scsi/ips.h
57688+++ b/drivers/scsi/ips.h
57689@@ -1027,7 +1027,7 @@ typedef struct {
57690 int (*intr)(struct ips_ha *);
57691 void (*enableint)(struct ips_ha *);
57692 uint32_t (*statupd)(struct ips_ha *);
57693-} ips_hw_func_t;
57694+} __no_const ips_hw_func_t;
57695
57696 typedef struct ips_ha {
57697 uint8_t ha_id[IPS_MAX_CHANNELS+1];
57698diff --git a/drivers/scsi/libfc/fc_exch.c b/drivers/scsi/libfc/fc_exch.c
57699index c1c1574..a9c9348 100644
57700--- a/drivers/scsi/libfc/fc_exch.c
57701+++ b/drivers/scsi/libfc/fc_exch.c
57702@@ -86,12 +86,12 @@ struct fc_exch_mgr {
57703 * all together if not used XXX
57704 */
57705 struct {
57706- atomic_t no_free_exch;
57707- atomic_t no_free_exch_xid;
57708- atomic_t xid_not_found;
57709- atomic_t xid_busy;
57710- atomic_t seq_not_found;
57711- atomic_t non_bls_resp;
57712+ atomic_unchecked_t no_free_exch;
57713+ atomic_unchecked_t no_free_exch_xid;
57714+ atomic_unchecked_t xid_not_found;
57715+ atomic_unchecked_t xid_busy;
57716+ atomic_unchecked_t seq_not_found;
57717+ atomic_unchecked_t non_bls_resp;
57718 } stats;
57719 };
57720 #define fc_seq_exch(sp) container_of(sp, struct fc_exch, seq)
57721@@ -510,7 +510,7 @@ static struct fc_exch *fc_exch_em_alloc(struct fc_lport *lport,
57722 /* allocate memory for exchange */
57723 ep = mempool_alloc(mp->ep_pool, GFP_ATOMIC);
57724 if (!ep) {
57725- atomic_inc(&mp->stats.no_free_exch);
57726+ atomic_inc_unchecked(&mp->stats.no_free_exch);
57727 goto out;
57728 }
57729 memset(ep, 0, sizeof(*ep));
57730@@ -557,7 +557,7 @@ out:
57731 return ep;
57732 err:
57733 spin_unlock_bh(&pool->lock);
57734- atomic_inc(&mp->stats.no_free_exch_xid);
57735+ atomic_inc_unchecked(&mp->stats.no_free_exch_xid);
57736 mempool_free(ep, mp->ep_pool);
57737 return NULL;
57738 }
57739@@ -690,7 +690,7 @@ static enum fc_pf_rjt_reason fc_seq_lookup_recip(struct fc_lport *lport,
57740 xid = ntohs(fh->fh_ox_id); /* we originated exch */
57741 ep = fc_exch_find(mp, xid);
57742 if (!ep) {
57743- atomic_inc(&mp->stats.xid_not_found);
57744+ atomic_inc_unchecked(&mp->stats.xid_not_found);
57745 reject = FC_RJT_OX_ID;
57746 goto out;
57747 }
57748@@ -720,7 +720,7 @@ static enum fc_pf_rjt_reason fc_seq_lookup_recip(struct fc_lport *lport,
57749 ep = fc_exch_find(mp, xid);
57750 if ((f_ctl & FC_FC_FIRST_SEQ) && fc_sof_is_init(fr_sof(fp))) {
57751 if (ep) {
57752- atomic_inc(&mp->stats.xid_busy);
57753+ atomic_inc_unchecked(&mp->stats.xid_busy);
57754 reject = FC_RJT_RX_ID;
57755 goto rel;
57756 }
57757@@ -731,7 +731,7 @@ static enum fc_pf_rjt_reason fc_seq_lookup_recip(struct fc_lport *lport,
57758 }
57759 xid = ep->xid; /* get our XID */
57760 } else if (!ep) {
57761- atomic_inc(&mp->stats.xid_not_found);
57762+ atomic_inc_unchecked(&mp->stats.xid_not_found);
57763 reject = FC_RJT_RX_ID; /* XID not found */
57764 goto out;
57765 }
57766@@ -752,7 +752,7 @@ static enum fc_pf_rjt_reason fc_seq_lookup_recip(struct fc_lport *lport,
57767 } else {
57768 sp = &ep->seq;
57769 if (sp->id != fh->fh_seq_id) {
57770- atomic_inc(&mp->stats.seq_not_found);
57771+ atomic_inc_unchecked(&mp->stats.seq_not_found);
57772 reject = FC_RJT_SEQ_ID; /* sequence/exch should exist */
57773 goto rel;
57774 }
57775@@ -1163,22 +1163,22 @@ static void fc_exch_recv_seq_resp(struct fc_exch_mgr *mp, struct fc_frame *fp)
57776
57777 ep = fc_exch_find(mp, ntohs(fh->fh_ox_id));
57778 if (!ep) {
57779- atomic_inc(&mp->stats.xid_not_found);
57780+ atomic_inc_unchecked(&mp->stats.xid_not_found);
57781 goto out;
57782 }
57783 if (ep->esb_stat & ESB_ST_COMPLETE) {
57784- atomic_inc(&mp->stats.xid_not_found);
57785+ atomic_inc_unchecked(&mp->stats.xid_not_found);
57786 goto out;
57787 }
57788 if (ep->rxid == FC_XID_UNKNOWN)
57789 ep->rxid = ntohs(fh->fh_rx_id);
57790 if (ep->sid != 0 && ep->sid != ntoh24(fh->fh_d_id)) {
57791- atomic_inc(&mp->stats.xid_not_found);
57792+ atomic_inc_unchecked(&mp->stats.xid_not_found);
57793 goto rel;
57794 }
57795 if (ep->did != ntoh24(fh->fh_s_id) &&
57796 ep->did != FC_FID_FLOGI) {
57797- atomic_inc(&mp->stats.xid_not_found);
57798+ atomic_inc_unchecked(&mp->stats.xid_not_found);
57799 goto rel;
57800 }
57801 sof = fr_sof(fp);
57802@@ -1189,7 +1189,7 @@ static void fc_exch_recv_seq_resp(struct fc_exch_mgr *mp, struct fc_frame *fp)
57803 } else {
57804 sp = &ep->seq;
57805 if (sp->id != fh->fh_seq_id) {
57806- atomic_inc(&mp->stats.seq_not_found);
57807+ atomic_inc_unchecked(&mp->stats.seq_not_found);
57808 goto rel;
57809 }
57810 }
57811@@ -1249,9 +1249,9 @@ static void fc_exch_recv_resp(struct fc_exch_mgr *mp, struct fc_frame *fp)
57812 sp = fc_seq_lookup_orig(mp, fp); /* doesn't hold sequence */
57813
57814 if (!sp)
57815- atomic_inc(&mp->stats.xid_not_found);
57816+ atomic_inc_unchecked(&mp->stats.xid_not_found);
57817 else
57818- atomic_inc(&mp->stats.non_bls_resp);
57819+ atomic_inc_unchecked(&mp->stats.non_bls_resp);
57820
57821 fc_frame_free(fp);
57822 }
57823diff --git a/drivers/scsi/libsas/sas_ata.c b/drivers/scsi/libsas/sas_ata.c
57824index 0ee989f..a582241 100644
57825--- a/drivers/scsi/libsas/sas_ata.c
57826+++ b/drivers/scsi/libsas/sas_ata.c
57827@@ -343,7 +343,7 @@ static int sas_ata_scr_read(struct ata_link *link, unsigned int sc_reg_in,
57828 }
57829 }
57830
57831-static struct ata_port_operations sas_sata_ops = {
57832+static const struct ata_port_operations sas_sata_ops = {
57833 .phy_reset = sas_ata_phy_reset,
57834 .post_internal_cmd = sas_ata_post_internal,
57835 .qc_defer = ata_std_qc_defer,
57836diff --git a/drivers/scsi/lpfc/lpfc.h b/drivers/scsi/lpfc/lpfc.h
57837index aa10f79..5cc79e4 100644
57838--- a/drivers/scsi/lpfc/lpfc.h
57839+++ b/drivers/scsi/lpfc/lpfc.h
57840@@ -400,7 +400,7 @@ struct lpfc_vport {
57841 struct dentry *debug_nodelist;
57842 struct dentry *vport_debugfs_root;
57843 struct lpfc_debugfs_trc *disc_trc;
57844- atomic_t disc_trc_cnt;
57845+ atomic_unchecked_t disc_trc_cnt;
57846 #endif
57847 uint8_t stat_data_enabled;
57848 uint8_t stat_data_blocked;
57849@@ -725,8 +725,8 @@ struct lpfc_hba {
57850 struct timer_list fabric_block_timer;
57851 unsigned long bit_flags;
57852 #define FABRIC_COMANDS_BLOCKED 0
57853- atomic_t num_rsrc_err;
57854- atomic_t num_cmd_success;
57855+ atomic_unchecked_t num_rsrc_err;
57856+ atomic_unchecked_t num_cmd_success;
57857 unsigned long last_rsrc_error_time;
57858 unsigned long last_ramp_down_time;
57859 unsigned long last_ramp_up_time;
57860@@ -740,7 +740,7 @@ struct lpfc_hba {
57861 struct dentry *debug_dumpDif; /* BlockGuard BPL*/
57862 struct dentry *debug_slow_ring_trc;
57863 struct lpfc_debugfs_trc *slow_ring_trc;
57864- atomic_t slow_ring_trc_cnt;
57865+ atomic_unchecked_t slow_ring_trc_cnt;
57866 #endif
57867
57868 /* Used for deferred freeing of ELS data buffers */
57869diff --git a/drivers/scsi/lpfc/lpfc_debugfs.c b/drivers/scsi/lpfc/lpfc_debugfs.c
57870index 8d0f0de..7c77a62 100644
57871--- a/drivers/scsi/lpfc/lpfc_debugfs.c
57872+++ b/drivers/scsi/lpfc/lpfc_debugfs.c
57873@@ -124,7 +124,7 @@ struct lpfc_debug {
57874 int len;
57875 };
57876
57877-static atomic_t lpfc_debugfs_seq_trc_cnt = ATOMIC_INIT(0);
57878+static atomic_unchecked_t lpfc_debugfs_seq_trc_cnt = ATOMIC_INIT(0);
57879 static unsigned long lpfc_debugfs_start_time = 0L;
57880
57881 /**
57882@@ -158,7 +158,7 @@ lpfc_debugfs_disc_trc_data(struct lpfc_vport *vport, char *buf, int size)
57883 lpfc_debugfs_enable = 0;
57884
57885 len = 0;
57886- index = (atomic_read(&vport->disc_trc_cnt) + 1) &
57887+ index = (atomic_read_unchecked(&vport->disc_trc_cnt) + 1) &
57888 (lpfc_debugfs_max_disc_trc - 1);
57889 for (i = index; i < lpfc_debugfs_max_disc_trc; i++) {
57890 dtp = vport->disc_trc + i;
57891@@ -219,7 +219,7 @@ lpfc_debugfs_slow_ring_trc_data(struct lpfc_hba *phba, char *buf, int size)
57892 lpfc_debugfs_enable = 0;
57893
57894 len = 0;
57895- index = (atomic_read(&phba->slow_ring_trc_cnt) + 1) &
57896+ index = (atomic_read_unchecked(&phba->slow_ring_trc_cnt) + 1) &
57897 (lpfc_debugfs_max_slow_ring_trc - 1);
57898 for (i = index; i < lpfc_debugfs_max_slow_ring_trc; i++) {
57899 dtp = phba->slow_ring_trc + i;
57900@@ -397,6 +397,8 @@ lpfc_debugfs_dumpHBASlim_data(struct lpfc_hba *phba, char *buf, int size)
57901 uint32_t *ptr;
57902 char buffer[1024];
57903
57904+ pax_track_stack();
57905+
57906 off = 0;
57907 spin_lock_irq(&phba->hbalock);
57908
57909@@ -634,14 +636,14 @@ lpfc_debugfs_disc_trc(struct lpfc_vport *vport, int mask, char *fmt,
57910 !vport || !vport->disc_trc)
57911 return;
57912
57913- index = atomic_inc_return(&vport->disc_trc_cnt) &
57914+ index = atomic_inc_return_unchecked(&vport->disc_trc_cnt) &
57915 (lpfc_debugfs_max_disc_trc - 1);
57916 dtp = vport->disc_trc + index;
57917 dtp->fmt = fmt;
57918 dtp->data1 = data1;
57919 dtp->data2 = data2;
57920 dtp->data3 = data3;
57921- dtp->seq_cnt = atomic_inc_return(&lpfc_debugfs_seq_trc_cnt);
57922+ dtp->seq_cnt = atomic_inc_return_unchecked(&lpfc_debugfs_seq_trc_cnt);
57923 dtp->jif = jiffies;
57924 #endif
57925 return;
57926@@ -672,14 +674,14 @@ lpfc_debugfs_slow_ring_trc(struct lpfc_hba *phba, char *fmt,
57927 !phba || !phba->slow_ring_trc)
57928 return;
57929
57930- index = atomic_inc_return(&phba->slow_ring_trc_cnt) &
57931+ index = atomic_inc_return_unchecked(&phba->slow_ring_trc_cnt) &
57932 (lpfc_debugfs_max_slow_ring_trc - 1);
57933 dtp = phba->slow_ring_trc + index;
57934 dtp->fmt = fmt;
57935 dtp->data1 = data1;
57936 dtp->data2 = data2;
57937 dtp->data3 = data3;
57938- dtp->seq_cnt = atomic_inc_return(&lpfc_debugfs_seq_trc_cnt);
57939+ dtp->seq_cnt = atomic_inc_return_unchecked(&lpfc_debugfs_seq_trc_cnt);
57940 dtp->jif = jiffies;
57941 #endif
57942 return;
57943@@ -1364,7 +1366,7 @@ lpfc_debugfs_initialize(struct lpfc_vport *vport)
57944 "slow_ring buffer\n");
57945 goto debug_failed;
57946 }
57947- atomic_set(&phba->slow_ring_trc_cnt, 0);
57948+ atomic_set_unchecked(&phba->slow_ring_trc_cnt, 0);
57949 memset(phba->slow_ring_trc, 0,
57950 (sizeof(struct lpfc_debugfs_trc) *
57951 lpfc_debugfs_max_slow_ring_trc));
57952@@ -1410,7 +1412,7 @@ lpfc_debugfs_initialize(struct lpfc_vport *vport)
57953 "buffer\n");
57954 goto debug_failed;
57955 }
57956- atomic_set(&vport->disc_trc_cnt, 0);
57957+ atomic_set_unchecked(&vport->disc_trc_cnt, 0);
57958
57959 snprintf(name, sizeof(name), "discovery_trace");
57960 vport->debug_disc_trc =
57961diff --git a/drivers/scsi/lpfc/lpfc_init.c b/drivers/scsi/lpfc/lpfc_init.c
57962index 549bc7d..8189dbb 100644
57963--- a/drivers/scsi/lpfc/lpfc_init.c
57964+++ b/drivers/scsi/lpfc/lpfc_init.c
57965@@ -8021,8 +8021,10 @@ lpfc_init(void)
57966 printk(LPFC_COPYRIGHT "\n");
57967
57968 if (lpfc_enable_npiv) {
57969- lpfc_transport_functions.vport_create = lpfc_vport_create;
57970- lpfc_transport_functions.vport_delete = lpfc_vport_delete;
57971+ pax_open_kernel();
57972+ *(void **)&lpfc_transport_functions.vport_create = lpfc_vport_create;
57973+ *(void **)&lpfc_transport_functions.vport_delete = lpfc_vport_delete;
57974+ pax_close_kernel();
57975 }
57976 lpfc_transport_template =
57977 fc_attach_transport(&lpfc_transport_functions);
57978diff --git a/drivers/scsi/lpfc/lpfc_scsi.c b/drivers/scsi/lpfc/lpfc_scsi.c
57979index c88f59f..ff2a42f 100644
57980--- a/drivers/scsi/lpfc/lpfc_scsi.c
57981+++ b/drivers/scsi/lpfc/lpfc_scsi.c
57982@@ -259,7 +259,7 @@ lpfc_rampdown_queue_depth(struct lpfc_hba *phba)
57983 uint32_t evt_posted;
57984
57985 spin_lock_irqsave(&phba->hbalock, flags);
57986- atomic_inc(&phba->num_rsrc_err);
57987+ atomic_inc_unchecked(&phba->num_rsrc_err);
57988 phba->last_rsrc_error_time = jiffies;
57989
57990 if ((phba->last_ramp_down_time + QUEUE_RAMP_DOWN_INTERVAL) > jiffies) {
57991@@ -300,7 +300,7 @@ lpfc_rampup_queue_depth(struct lpfc_vport *vport,
57992 unsigned long flags;
57993 struct lpfc_hba *phba = vport->phba;
57994 uint32_t evt_posted;
57995- atomic_inc(&phba->num_cmd_success);
57996+ atomic_inc_unchecked(&phba->num_cmd_success);
57997
57998 if (vport->cfg_lun_queue_depth <= queue_depth)
57999 return;
58000@@ -343,8 +343,8 @@ lpfc_ramp_down_queue_handler(struct lpfc_hba *phba)
58001 int i;
58002 struct lpfc_rport_data *rdata;
58003
58004- num_rsrc_err = atomic_read(&phba->num_rsrc_err);
58005- num_cmd_success = atomic_read(&phba->num_cmd_success);
58006+ num_rsrc_err = atomic_read_unchecked(&phba->num_rsrc_err);
58007+ num_cmd_success = atomic_read_unchecked(&phba->num_cmd_success);
58008
58009 vports = lpfc_create_vport_work_array(phba);
58010 if (vports != NULL)
58011@@ -378,8 +378,8 @@ lpfc_ramp_down_queue_handler(struct lpfc_hba *phba)
58012 }
58013 }
58014 lpfc_destroy_vport_work_array(phba, vports);
58015- atomic_set(&phba->num_rsrc_err, 0);
58016- atomic_set(&phba->num_cmd_success, 0);
58017+ atomic_set_unchecked(&phba->num_rsrc_err, 0);
58018+ atomic_set_unchecked(&phba->num_cmd_success, 0);
58019 }
58020
58021 /**
58022@@ -427,8 +427,8 @@ lpfc_ramp_up_queue_handler(struct lpfc_hba *phba)
58023 }
58024 }
58025 lpfc_destroy_vport_work_array(phba, vports);
58026- atomic_set(&phba->num_rsrc_err, 0);
58027- atomic_set(&phba->num_cmd_success, 0);
58028+ atomic_set_unchecked(&phba->num_rsrc_err, 0);
58029+ atomic_set_unchecked(&phba->num_cmd_success, 0);
58030 }
58031
58032 /**
58033diff --git a/drivers/scsi/megaraid/megaraid_mbox.c b/drivers/scsi/megaraid/megaraid_mbox.c
58034index 234f0b7..3020aea 100644
58035--- a/drivers/scsi/megaraid/megaraid_mbox.c
58036+++ b/drivers/scsi/megaraid/megaraid_mbox.c
58037@@ -3503,6 +3503,8 @@ megaraid_cmm_register(adapter_t *adapter)
58038 int rval;
58039 int i;
58040
58041+ pax_track_stack();
58042+
58043 // Allocate memory for the base list of scb for management module.
58044 adapter->uscb_list = kcalloc(MBOX_MAX_USER_CMDS, sizeof(scb_t), GFP_KERNEL);
58045
58046diff --git a/drivers/scsi/osd/osd_initiator.c b/drivers/scsi/osd/osd_initiator.c
58047index 7a117c1..ee01e9e 100644
58048--- a/drivers/scsi/osd/osd_initiator.c
58049+++ b/drivers/scsi/osd/osd_initiator.c
58050@@ -94,6 +94,8 @@ static int _osd_print_system_info(struct osd_dev *od, void *caps)
58051 int nelem = ARRAY_SIZE(get_attrs), a = 0;
58052 int ret;
58053
58054+ pax_track_stack();
58055+
58056 or = osd_start_request(od, GFP_KERNEL);
58057 if (!or)
58058 return -ENOMEM;
58059diff --git a/drivers/scsi/pmcraid.c b/drivers/scsi/pmcraid.c
58060index 9ab8c86..9425ad3 100644
58061--- a/drivers/scsi/pmcraid.c
58062+++ b/drivers/scsi/pmcraid.c
58063@@ -189,8 +189,8 @@ static int pmcraid_slave_alloc(struct scsi_device *scsi_dev)
58064 res->scsi_dev = scsi_dev;
58065 scsi_dev->hostdata = res;
58066 res->change_detected = 0;
58067- atomic_set(&res->read_failures, 0);
58068- atomic_set(&res->write_failures, 0);
58069+ atomic_set_unchecked(&res->read_failures, 0);
58070+ atomic_set_unchecked(&res->write_failures, 0);
58071 rc = 0;
58072 }
58073 spin_unlock_irqrestore(&pinstance->resource_lock, lock_flags);
58074@@ -2396,9 +2396,9 @@ static int pmcraid_error_handler(struct pmcraid_cmd *cmd)
58075
58076 /* If this was a SCSI read/write command keep count of errors */
58077 if (SCSI_CMD_TYPE(scsi_cmd->cmnd[0]) == SCSI_READ_CMD)
58078- atomic_inc(&res->read_failures);
58079+ atomic_inc_unchecked(&res->read_failures);
58080 else if (SCSI_CMD_TYPE(scsi_cmd->cmnd[0]) == SCSI_WRITE_CMD)
58081- atomic_inc(&res->write_failures);
58082+ atomic_inc_unchecked(&res->write_failures);
58083
58084 if (!RES_IS_GSCSI(res->cfg_entry) &&
58085 masked_ioasc != PMCRAID_IOASC_HW_DEVICE_BUS_STATUS_ERROR) {
58086@@ -4116,7 +4116,7 @@ static void pmcraid_worker_function(struct work_struct *workp)
58087
58088 pinstance = container_of(workp, struct pmcraid_instance, worker_q);
58089 /* add resources only after host is added into system */
58090- if (!atomic_read(&pinstance->expose_resources))
58091+ if (!atomic_read_unchecked(&pinstance->expose_resources))
58092 return;
58093
58094 spin_lock_irqsave(&pinstance->resource_lock, lock_flags);
58095@@ -4850,7 +4850,7 @@ static int __devinit pmcraid_init_instance(
58096 init_waitqueue_head(&pinstance->reset_wait_q);
58097
58098 atomic_set(&pinstance->outstanding_cmds, 0);
58099- atomic_set(&pinstance->expose_resources, 0);
58100+ atomic_set_unchecked(&pinstance->expose_resources, 0);
58101
58102 INIT_LIST_HEAD(&pinstance->free_res_q);
58103 INIT_LIST_HEAD(&pinstance->used_res_q);
58104@@ -5502,7 +5502,7 @@ static int __devinit pmcraid_probe(
58105 /* Schedule worker thread to handle CCN and take care of adding and
58106 * removing devices to OS
58107 */
58108- atomic_set(&pinstance->expose_resources, 1);
58109+ atomic_set_unchecked(&pinstance->expose_resources, 1);
58110 schedule_work(&pinstance->worker_q);
58111 return rc;
58112
58113diff --git a/drivers/scsi/pmcraid.h b/drivers/scsi/pmcraid.h
58114index 3441b3f..6cbe8f7 100644
58115--- a/drivers/scsi/pmcraid.h
58116+++ b/drivers/scsi/pmcraid.h
58117@@ -690,7 +690,7 @@ struct pmcraid_instance {
58118 atomic_t outstanding_cmds;
58119
58120 /* should add/delete resources to mid-layer now ?*/
58121- atomic_t expose_resources;
58122+ atomic_unchecked_t expose_resources;
58123
58124 /* Tasklet to handle deferred processing */
58125 struct tasklet_struct isr_tasklet[PMCRAID_NUM_MSIX_VECTORS];
58126@@ -727,8 +727,8 @@ struct pmcraid_resource_entry {
58127 struct list_head queue; /* link to "to be exposed" resources */
58128 struct pmcraid_config_table_entry cfg_entry;
58129 struct scsi_device *scsi_dev; /* Link scsi_device structure */
58130- atomic_t read_failures; /* count of failed READ commands */
58131- atomic_t write_failures; /* count of failed WRITE commands */
58132+ atomic_unchecked_t read_failures; /* count of failed READ commands */
58133+ atomic_unchecked_t write_failures; /* count of failed WRITE commands */
58134
58135 /* To indicate add/delete/modify during CCN */
58136 u8 change_detected;
58137diff --git a/drivers/scsi/qla2xxx/qla_def.h b/drivers/scsi/qla2xxx/qla_def.h
58138index 2150618..7034215 100644
58139--- a/drivers/scsi/qla2xxx/qla_def.h
58140+++ b/drivers/scsi/qla2xxx/qla_def.h
58141@@ -2089,7 +2089,7 @@ struct isp_operations {
58142
58143 int (*get_flash_version) (struct scsi_qla_host *, void *);
58144 int (*start_scsi) (srb_t *);
58145-};
58146+} __no_const;
58147
58148 /* MSI-X Support *************************************************************/
58149
58150diff --git a/drivers/scsi/qla4xxx/ql4_def.h b/drivers/scsi/qla4xxx/ql4_def.h
58151index 81b5f29..2ae1fad 100644
58152--- a/drivers/scsi/qla4xxx/ql4_def.h
58153+++ b/drivers/scsi/qla4xxx/ql4_def.h
58154@@ -240,7 +240,7 @@ struct ddb_entry {
58155 atomic_t retry_relogin_timer; /* Min Time between relogins
58156 * (4000 only) */
58157 atomic_t relogin_timer; /* Max Time to wait for relogin to complete */
58158- atomic_t relogin_retry_count; /* Num of times relogin has been
58159+ atomic_unchecked_t relogin_retry_count; /* Num of times relogin has been
58160 * retried */
58161
58162 uint16_t port;
58163diff --git a/drivers/scsi/qla4xxx/ql4_init.c b/drivers/scsi/qla4xxx/ql4_init.c
58164index af8c323..515dd51 100644
58165--- a/drivers/scsi/qla4xxx/ql4_init.c
58166+++ b/drivers/scsi/qla4xxx/ql4_init.c
58167@@ -482,7 +482,7 @@ static struct ddb_entry * qla4xxx_alloc_ddb(struct scsi_qla_host *ha,
58168 atomic_set(&ddb_entry->port_down_timer, ha->port_down_retry_count);
58169 atomic_set(&ddb_entry->retry_relogin_timer, INVALID_ENTRY);
58170 atomic_set(&ddb_entry->relogin_timer, 0);
58171- atomic_set(&ddb_entry->relogin_retry_count, 0);
58172+ atomic_set_unchecked(&ddb_entry->relogin_retry_count, 0);
58173 atomic_set(&ddb_entry->state, DDB_STATE_ONLINE);
58174 list_add_tail(&ddb_entry->list, &ha->ddb_list);
58175 ha->fw_ddb_index_map[fw_ddb_index] = ddb_entry;
58176@@ -1308,7 +1308,7 @@ int qla4xxx_process_ddb_changed(struct scsi_qla_host *ha,
58177 atomic_set(&ddb_entry->state, DDB_STATE_ONLINE);
58178 atomic_set(&ddb_entry->port_down_timer,
58179 ha->port_down_retry_count);
58180- atomic_set(&ddb_entry->relogin_retry_count, 0);
58181+ atomic_set_unchecked(&ddb_entry->relogin_retry_count, 0);
58182 atomic_set(&ddb_entry->relogin_timer, 0);
58183 clear_bit(DF_RELOGIN, &ddb_entry->flags);
58184 clear_bit(DF_NO_RELOGIN, &ddb_entry->flags);
58185diff --git a/drivers/scsi/qla4xxx/ql4_os.c b/drivers/scsi/qla4xxx/ql4_os.c
58186index 83c8b5e..a82b348 100644
58187--- a/drivers/scsi/qla4xxx/ql4_os.c
58188+++ b/drivers/scsi/qla4xxx/ql4_os.c
58189@@ -641,13 +641,13 @@ static void qla4xxx_timer(struct scsi_qla_host *ha)
58190 ddb_entry->fw_ddb_device_state ==
58191 DDB_DS_SESSION_FAILED) {
58192 /* Reset retry relogin timer */
58193- atomic_inc(&ddb_entry->relogin_retry_count);
58194+ atomic_inc_unchecked(&ddb_entry->relogin_retry_count);
58195 DEBUG2(printk("scsi%ld: index[%d] relogin"
58196 " timed out-retrying"
58197 " relogin (%d)\n",
58198 ha->host_no,
58199 ddb_entry->fw_ddb_index,
58200- atomic_read(&ddb_entry->
58201+ atomic_read_unchecked(&ddb_entry->
58202 relogin_retry_count))
58203 );
58204 start_dpc++;
58205diff --git a/drivers/scsi/scsi.c b/drivers/scsi/scsi.c
58206index dd098ca..686ce01 100644
58207--- a/drivers/scsi/scsi.c
58208+++ b/drivers/scsi/scsi.c
58209@@ -652,7 +652,7 @@ int scsi_dispatch_cmd(struct scsi_cmnd *cmd)
58210 unsigned long timeout;
58211 int rtn = 0;
58212
58213- atomic_inc(&cmd->device->iorequest_cnt);
58214+ atomic_inc_unchecked(&cmd->device->iorequest_cnt);
58215
58216 /* check if the device is still usable */
58217 if (unlikely(cmd->device->sdev_state == SDEV_DEL)) {
58218diff --git a/drivers/scsi/scsi_debug.c b/drivers/scsi/scsi_debug.c
58219index bc3e363..e1a8e50 100644
58220--- a/drivers/scsi/scsi_debug.c
58221+++ b/drivers/scsi/scsi_debug.c
58222@@ -1395,6 +1395,8 @@ static int resp_mode_select(struct scsi_cmnd * scp, int mselect6,
58223 unsigned char arr[SDEBUG_MAX_MSELECT_SZ];
58224 unsigned char *cmd = (unsigned char *)scp->cmnd;
58225
58226+ pax_track_stack();
58227+
58228 if ((errsts = check_readiness(scp, 1, devip)))
58229 return errsts;
58230 memset(arr, 0, sizeof(arr));
58231@@ -1492,6 +1494,8 @@ static int resp_log_sense(struct scsi_cmnd * scp,
58232 unsigned char arr[SDEBUG_MAX_LSENSE_SZ];
58233 unsigned char *cmd = (unsigned char *)scp->cmnd;
58234
58235+ pax_track_stack();
58236+
58237 if ((errsts = check_readiness(scp, 1, devip)))
58238 return errsts;
58239 memset(arr, 0, sizeof(arr));
58240diff --git a/drivers/scsi/scsi_lib.c b/drivers/scsi/scsi_lib.c
58241index 8df12522..c4c1472 100644
58242--- a/drivers/scsi/scsi_lib.c
58243+++ b/drivers/scsi/scsi_lib.c
58244@@ -1389,7 +1389,7 @@ static void scsi_kill_request(struct request *req, struct request_queue *q)
58245 shost = sdev->host;
58246 scsi_init_cmd_errh(cmd);
58247 cmd->result = DID_NO_CONNECT << 16;
58248- atomic_inc(&cmd->device->iorequest_cnt);
58249+ atomic_inc_unchecked(&cmd->device->iorequest_cnt);
58250
58251 /*
58252 * SCSI request completion path will do scsi_device_unbusy(),
58253@@ -1420,9 +1420,9 @@ static void scsi_softirq_done(struct request *rq)
58254 */
58255 cmd->serial_number = 0;
58256
58257- atomic_inc(&cmd->device->iodone_cnt);
58258+ atomic_inc_unchecked(&cmd->device->iodone_cnt);
58259 if (cmd->result)
58260- atomic_inc(&cmd->device->ioerr_cnt);
58261+ atomic_inc_unchecked(&cmd->device->ioerr_cnt);
58262
58263 disposition = scsi_decide_disposition(cmd);
58264 if (disposition != SUCCESS &&
58265diff --git a/drivers/scsi/scsi_sysfs.c b/drivers/scsi/scsi_sysfs.c
58266index 91a93e0..eae0fe3 100644
58267--- a/drivers/scsi/scsi_sysfs.c
58268+++ b/drivers/scsi/scsi_sysfs.c
58269@@ -662,7 +662,7 @@ show_iostat_##field(struct device *dev, struct device_attribute *attr, \
58270 char *buf) \
58271 { \
58272 struct scsi_device *sdev = to_scsi_device(dev); \
58273- unsigned long long count = atomic_read(&sdev->field); \
58274+ unsigned long long count = atomic_read_unchecked(&sdev->field); \
58275 return snprintf(buf, 20, "0x%llx\n", count); \
58276 } \
58277 static DEVICE_ATTR(field, S_IRUGO, show_iostat_##field, NULL)
58278diff --git a/drivers/scsi/scsi_tgt_lib.c b/drivers/scsi/scsi_tgt_lib.c
58279index 1030327..f91fd30 100644
58280--- a/drivers/scsi/scsi_tgt_lib.c
58281+++ b/drivers/scsi/scsi_tgt_lib.c
58282@@ -362,7 +362,7 @@ static int scsi_map_user_pages(struct scsi_tgt_cmd *tcmd, struct scsi_cmnd *cmd,
58283 int err;
58284
58285 dprintk("%lx %u\n", uaddr, len);
58286- err = blk_rq_map_user(q, rq, NULL, (void *)uaddr, len, GFP_KERNEL);
58287+ err = blk_rq_map_user(q, rq, NULL, (void __user *)uaddr, len, GFP_KERNEL);
58288 if (err) {
58289 /*
58290 * TODO: need to fixup sg_tablesize, max_segment_size,
58291diff --git a/drivers/scsi/scsi_transport_fc.c b/drivers/scsi/scsi_transport_fc.c
58292index db02e31..1b42ea9 100644
58293--- a/drivers/scsi/scsi_transport_fc.c
58294+++ b/drivers/scsi/scsi_transport_fc.c
58295@@ -480,7 +480,7 @@ MODULE_PARM_DESC(dev_loss_tmo,
58296 * Netlink Infrastructure
58297 */
58298
58299-static atomic_t fc_event_seq;
58300+static atomic_unchecked_t fc_event_seq;
58301
58302 /**
58303 * fc_get_event_number - Obtain the next sequential FC event number
58304@@ -493,7 +493,7 @@ static atomic_t fc_event_seq;
58305 u32
58306 fc_get_event_number(void)
58307 {
58308- return atomic_add_return(1, &fc_event_seq);
58309+ return atomic_add_return_unchecked(1, &fc_event_seq);
58310 }
58311 EXPORT_SYMBOL(fc_get_event_number);
58312
58313@@ -641,7 +641,7 @@ static __init int fc_transport_init(void)
58314 {
58315 int error;
58316
58317- atomic_set(&fc_event_seq, 0);
58318+ atomic_set_unchecked(&fc_event_seq, 0);
58319
58320 error = transport_class_register(&fc_host_class);
58321 if (error)
58322diff --git a/drivers/scsi/scsi_transport_iscsi.c b/drivers/scsi/scsi_transport_iscsi.c
58323index de2f8c4..63c5278 100644
58324--- a/drivers/scsi/scsi_transport_iscsi.c
58325+++ b/drivers/scsi/scsi_transport_iscsi.c
58326@@ -81,7 +81,7 @@ struct iscsi_internal {
58327 struct device_attribute *session_attrs[ISCSI_SESSION_ATTRS + 1];
58328 };
58329
58330-static atomic_t iscsi_session_nr; /* sysfs session id for next new session */
58331+static atomic_unchecked_t iscsi_session_nr; /* sysfs session id for next new session */
58332 static struct workqueue_struct *iscsi_eh_timer_workq;
58333
58334 /*
58335@@ -728,7 +728,7 @@ int iscsi_add_session(struct iscsi_cls_session *session, unsigned int target_id)
58336 int err;
58337
58338 ihost = shost->shost_data;
58339- session->sid = atomic_add_return(1, &iscsi_session_nr);
58340+ session->sid = atomic_add_return_unchecked(1, &iscsi_session_nr);
58341
58342 if (id == ISCSI_MAX_TARGET) {
58343 for (id = 0; id < ISCSI_MAX_TARGET; id++) {
58344@@ -2060,7 +2060,7 @@ static __init int iscsi_transport_init(void)
58345 printk(KERN_INFO "Loading iSCSI transport class v%s.\n",
58346 ISCSI_TRANSPORT_VERSION);
58347
58348- atomic_set(&iscsi_session_nr, 0);
58349+ atomic_set_unchecked(&iscsi_session_nr, 0);
58350
58351 err = class_register(&iscsi_transport_class);
58352 if (err)
58353diff --git a/drivers/scsi/scsi_transport_srp.c b/drivers/scsi/scsi_transport_srp.c
58354index 21a045e..ec89e03 100644
58355--- a/drivers/scsi/scsi_transport_srp.c
58356+++ b/drivers/scsi/scsi_transport_srp.c
58357@@ -33,7 +33,7 @@
58358 #include "scsi_transport_srp_internal.h"
58359
58360 struct srp_host_attrs {
58361- atomic_t next_port_id;
58362+ atomic_unchecked_t next_port_id;
58363 };
58364 #define to_srp_host_attrs(host) ((struct srp_host_attrs *)(host)->shost_data)
58365
58366@@ -62,7 +62,7 @@ static int srp_host_setup(struct transport_container *tc, struct device *dev,
58367 struct Scsi_Host *shost = dev_to_shost(dev);
58368 struct srp_host_attrs *srp_host = to_srp_host_attrs(shost);
58369
58370- atomic_set(&srp_host->next_port_id, 0);
58371+ atomic_set_unchecked(&srp_host->next_port_id, 0);
58372 return 0;
58373 }
58374
58375@@ -211,7 +211,7 @@ struct srp_rport *srp_rport_add(struct Scsi_Host *shost,
58376 memcpy(rport->port_id, ids->port_id, sizeof(rport->port_id));
58377 rport->roles = ids->roles;
58378
58379- id = atomic_inc_return(&to_srp_host_attrs(shost)->next_port_id);
58380+ id = atomic_inc_return_unchecked(&to_srp_host_attrs(shost)->next_port_id);
58381 dev_set_name(&rport->dev, "port-%d:%d", shost->host_no, id);
58382
58383 transport_setup_device(&rport->dev);
58384diff --git a/drivers/scsi/sg.c b/drivers/scsi/sg.c
58385index 040f751..98a5ed2 100644
58386--- a/drivers/scsi/sg.c
58387+++ b/drivers/scsi/sg.c
58388@@ -1064,7 +1064,7 @@ sg_ioctl(struct inode *inode, struct file *filp,
58389 sdp->disk->disk_name,
58390 MKDEV(SCSI_GENERIC_MAJOR, sdp->index),
58391 NULL,
58392- (char *)arg);
58393+ (char __user *)arg);
58394 case BLKTRACESTART:
58395 return blk_trace_startstop(sdp->device->request_queue, 1);
58396 case BLKTRACESTOP:
58397@@ -2292,7 +2292,7 @@ struct sg_proc_leaf {
58398 const struct file_operations * fops;
58399 };
58400
58401-static struct sg_proc_leaf sg_proc_leaf_arr[] = {
58402+static const struct sg_proc_leaf sg_proc_leaf_arr[] = {
58403 {"allow_dio", &adio_fops},
58404 {"debug", &debug_fops},
58405 {"def_reserved_size", &dressz_fops},
58406@@ -2307,7 +2307,7 @@ sg_proc_init(void)
58407 {
58408 int k, mask;
58409 int num_leaves = ARRAY_SIZE(sg_proc_leaf_arr);
58410- struct sg_proc_leaf * leaf;
58411+ const struct sg_proc_leaf * leaf;
58412
58413 sg_proc_sgp = proc_mkdir(sg_proc_sg_dirname, NULL);
58414 if (!sg_proc_sgp)
58415diff --git a/drivers/scsi/sym53c8xx_2/sym_glue.c b/drivers/scsi/sym53c8xx_2/sym_glue.c
58416index c19ca5e..3eb5959 100644
58417--- a/drivers/scsi/sym53c8xx_2/sym_glue.c
58418+++ b/drivers/scsi/sym53c8xx_2/sym_glue.c
58419@@ -1758,6 +1758,8 @@ static int __devinit sym2_probe(struct pci_dev *pdev,
58420 int do_iounmap = 0;
58421 int do_disable_device = 1;
58422
58423+ pax_track_stack();
58424+
58425 memset(&sym_dev, 0, sizeof(sym_dev));
58426 memset(&nvram, 0, sizeof(nvram));
58427 sym_dev.pdev = pdev;
58428diff --git a/drivers/scsi/vmw_pvscsi.c b/drivers/scsi/vmw_pvscsi.c
58429new file mode 100644
58430index 0000000..eabb432
58431--- /dev/null
58432+++ b/drivers/scsi/vmw_pvscsi.c
58433@@ -0,0 +1,1401 @@
58434+/*
58435+ * Linux driver for VMware's para-virtualized SCSI HBA.
58436+ *
58437+ * Copyright (C) 2008-2009, VMware, Inc. All Rights Reserved.
58438+ *
58439+ * This program is free software; you can redistribute it and/or modify it
58440+ * under the terms of the GNU General Public License as published by the
58441+ * Free Software Foundation; version 2 of the License and no later version.
58442+ *
58443+ * This program is distributed in the hope that it will be useful, but
58444+ * WITHOUT ANY WARRANTY; without even the implied warranty of
58445+ * MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE, GOOD TITLE or
58446+ * NON INFRINGEMENT. See the GNU General Public License for more
58447+ * details.
58448+ *
58449+ * You should have received a copy of the GNU General Public License
58450+ * along with this program; if not, write to the Free Software
58451+ * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
58452+ *
58453+ * Maintained by: Alok N Kataria <akataria@vmware.com>
58454+ *
58455+ */
58456+
58457+#include <linux/kernel.h>
58458+#include <linux/module.h>
58459+#include <linux/moduleparam.h>
58460+#include <linux/types.h>
58461+#include <linux/interrupt.h>
58462+#include <linux/workqueue.h>
58463+#include <linux/pci.h>
58464+
58465+#include <scsi/scsi.h>
58466+#include <scsi/scsi_host.h>
58467+#include <scsi/scsi_cmnd.h>
58468+#include <scsi/scsi_device.h>
58469+
58470+#include "vmw_pvscsi.h"
58471+
58472+#define PVSCSI_LINUX_DRIVER_DESC "VMware PVSCSI driver"
58473+
58474+MODULE_DESCRIPTION(PVSCSI_LINUX_DRIVER_DESC);
58475+MODULE_AUTHOR("VMware, Inc.");
58476+MODULE_LICENSE("GPL");
58477+MODULE_VERSION(PVSCSI_DRIVER_VERSION_STRING);
58478+
58479+#define PVSCSI_DEFAULT_NUM_PAGES_PER_RING 8
58480+#define PVSCSI_DEFAULT_NUM_PAGES_MSG_RING 1
58481+#define PVSCSI_DEFAULT_QUEUE_DEPTH 64
58482+#define SGL_SIZE PAGE_SIZE
58483+
58484+#define pvscsi_dev(adapter) (&(adapter->dev->dev))
58485+
58486+struct pvscsi_sg_list {
58487+ struct PVSCSISGElement sge[PVSCSI_MAX_NUM_SG_ENTRIES_PER_SEGMENT];
58488+};
58489+
58490+struct pvscsi_ctx {
58491+ /*
58492+ * The index of the context in cmd_map serves as the context ID for a
58493+ * 1-to-1 mapping completions back to requests.
58494+ */
58495+ struct scsi_cmnd *cmd;
58496+ struct pvscsi_sg_list *sgl;
58497+ struct list_head list;
58498+ dma_addr_t dataPA;
58499+ dma_addr_t sensePA;
58500+ dma_addr_t sglPA;
58501+};
58502+
58503+struct pvscsi_adapter {
58504+ char *mmioBase;
58505+ unsigned int irq;
58506+ u8 rev;
58507+ bool use_msi;
58508+ bool use_msix;
58509+ bool use_msg;
58510+
58511+ spinlock_t hw_lock;
58512+
58513+ struct workqueue_struct *workqueue;
58514+ struct work_struct work;
58515+
58516+ struct PVSCSIRingReqDesc *req_ring;
58517+ unsigned req_pages;
58518+ unsigned req_depth;
58519+ dma_addr_t reqRingPA;
58520+
58521+ struct PVSCSIRingCmpDesc *cmp_ring;
58522+ unsigned cmp_pages;
58523+ dma_addr_t cmpRingPA;
58524+
58525+ struct PVSCSIRingMsgDesc *msg_ring;
58526+ unsigned msg_pages;
58527+ dma_addr_t msgRingPA;
58528+
58529+ struct PVSCSIRingsState *rings_state;
58530+ dma_addr_t ringStatePA;
58531+
58532+ struct pci_dev *dev;
58533+ struct Scsi_Host *host;
58534+
58535+ struct list_head cmd_pool;
58536+ struct pvscsi_ctx *cmd_map;
58537+};
58538+
58539+
58540+/* Command line parameters */
58541+static int pvscsi_ring_pages = PVSCSI_DEFAULT_NUM_PAGES_PER_RING;
58542+static int pvscsi_msg_ring_pages = PVSCSI_DEFAULT_NUM_PAGES_MSG_RING;
58543+static int pvscsi_cmd_per_lun = PVSCSI_DEFAULT_QUEUE_DEPTH;
58544+static bool pvscsi_disable_msi;
58545+static bool pvscsi_disable_msix;
58546+static bool pvscsi_use_msg = true;
58547+
58548+#define PVSCSI_RW (S_IRUSR | S_IWUSR)
58549+
58550+module_param_named(ring_pages, pvscsi_ring_pages, int, PVSCSI_RW);
58551+MODULE_PARM_DESC(ring_pages, "Number of pages per req/cmp ring - (default="
58552+ __stringify(PVSCSI_DEFAULT_NUM_PAGES_PER_RING) ")");
58553+
58554+module_param_named(msg_ring_pages, pvscsi_msg_ring_pages, int, PVSCSI_RW);
58555+MODULE_PARM_DESC(msg_ring_pages, "Number of pages for the msg ring - (default="
58556+ __stringify(PVSCSI_DEFAULT_NUM_PAGES_MSG_RING) ")");
58557+
58558+module_param_named(cmd_per_lun, pvscsi_cmd_per_lun, int, PVSCSI_RW);
58559+MODULE_PARM_DESC(cmd_per_lun, "Maximum commands per lun - (default="
58560+ __stringify(PVSCSI_MAX_REQ_QUEUE_DEPTH) ")");
58561+
58562+module_param_named(disable_msi, pvscsi_disable_msi, bool, PVSCSI_RW);
58563+MODULE_PARM_DESC(disable_msi, "Disable MSI use in driver - (default=0)");
58564+
58565+module_param_named(disable_msix, pvscsi_disable_msix, bool, PVSCSI_RW);
58566+MODULE_PARM_DESC(disable_msix, "Disable MSI-X use in driver - (default=0)");
58567+
58568+module_param_named(use_msg, pvscsi_use_msg, bool, PVSCSI_RW);
58569+MODULE_PARM_DESC(use_msg, "Use msg ring when available - (default=1)");
58570+
58571+static const struct pci_device_id pvscsi_pci_tbl[] = {
58572+ { PCI_VDEVICE(VMWARE, PCI_DEVICE_ID_VMWARE_PVSCSI) },
58573+ { 0 }
58574+};
58575+
58576+MODULE_DEVICE_TABLE(pci, pvscsi_pci_tbl);
58577+
58578+static struct pvscsi_ctx *
58579+pvscsi_find_context(const struct pvscsi_adapter *adapter, struct scsi_cmnd *cmd)
58580+{
58581+ struct pvscsi_ctx *ctx, *end;
58582+
58583+ end = &adapter->cmd_map[adapter->req_depth];
58584+ for (ctx = adapter->cmd_map; ctx < end; ctx++)
58585+ if (ctx->cmd == cmd)
58586+ return ctx;
58587+
58588+ return NULL;
58589+}
58590+
58591+static struct pvscsi_ctx *
58592+pvscsi_acquire_context(struct pvscsi_adapter *adapter, struct scsi_cmnd *cmd)
58593+{
58594+ struct pvscsi_ctx *ctx;
58595+
58596+ if (list_empty(&adapter->cmd_pool))
58597+ return NULL;
58598+
58599+ ctx = list_first_entry(&adapter->cmd_pool, struct pvscsi_ctx, list);
58600+ ctx->cmd = cmd;
58601+ list_del(&ctx->list);
58602+
58603+ return ctx;
58604+}
58605+
58606+static void pvscsi_release_context(struct pvscsi_adapter *adapter,
58607+ struct pvscsi_ctx *ctx)
58608+{
58609+ ctx->cmd = NULL;
58610+ list_add(&ctx->list, &adapter->cmd_pool);
58611+}
58612+
58613+/*
58614+ * Map a pvscsi_ctx struct to a context ID field value; we map to a simple
58615+ * non-zero integer. ctx always points to an entry in cmd_map array, hence
58616+ * the return value is always >=1.
58617+ */
58618+static u64 pvscsi_map_context(const struct pvscsi_adapter *adapter,
58619+ const struct pvscsi_ctx *ctx)
58620+{
58621+ return ctx - adapter->cmd_map + 1;
58622+}
58623+
58624+static struct pvscsi_ctx *
58625+pvscsi_get_context(const struct pvscsi_adapter *adapter, u64 context)
58626+{
58627+ return &adapter->cmd_map[context - 1];
58628+}
58629+
58630+static void pvscsi_reg_write(const struct pvscsi_adapter *adapter,
58631+ u32 offset, u32 val)
58632+{
58633+ writel(val, adapter->mmioBase + offset);
58634+}
58635+
58636+static u32 pvscsi_reg_read(const struct pvscsi_adapter *adapter, u32 offset)
58637+{
58638+ return readl(adapter->mmioBase + offset);
58639+}
58640+
58641+static u32 pvscsi_read_intr_status(const struct pvscsi_adapter *adapter)
58642+{
58643+ return pvscsi_reg_read(adapter, PVSCSI_REG_OFFSET_INTR_STATUS);
58644+}
58645+
58646+static void pvscsi_write_intr_status(const struct pvscsi_adapter *adapter,
58647+ u32 val)
58648+{
58649+ pvscsi_reg_write(adapter, PVSCSI_REG_OFFSET_INTR_STATUS, val);
58650+}
58651+
58652+static void pvscsi_unmask_intr(const struct pvscsi_adapter *adapter)
58653+{
58654+ u32 intr_bits;
58655+
58656+ intr_bits = PVSCSI_INTR_CMPL_MASK;
58657+ if (adapter->use_msg)
58658+ intr_bits |= PVSCSI_INTR_MSG_MASK;
58659+
58660+ pvscsi_reg_write(adapter, PVSCSI_REG_OFFSET_INTR_MASK, intr_bits);
58661+}
58662+
58663+static void pvscsi_mask_intr(const struct pvscsi_adapter *adapter)
58664+{
58665+ pvscsi_reg_write(adapter, PVSCSI_REG_OFFSET_INTR_MASK, 0);
58666+}
58667+
58668+static void pvscsi_write_cmd_desc(const struct pvscsi_adapter *adapter,
58669+ u32 cmd, const void *desc, size_t len)
58670+{
58671+ const u32 *ptr = desc;
58672+ size_t i;
58673+
58674+ len /= sizeof(*ptr);
58675+ pvscsi_reg_write(adapter, PVSCSI_REG_OFFSET_COMMAND, cmd);
58676+ for (i = 0; i < len; i++)
58677+ pvscsi_reg_write(adapter,
58678+ PVSCSI_REG_OFFSET_COMMAND_DATA, ptr[i]);
58679+}
58680+
58681+static void pvscsi_abort_cmd(const struct pvscsi_adapter *adapter,
58682+ const struct pvscsi_ctx *ctx)
58683+{
58684+ struct PVSCSICmdDescAbortCmd cmd = { 0 };
58685+
58686+ cmd.target = ctx->cmd->device->id;
58687+ cmd.context = pvscsi_map_context(adapter, ctx);
58688+
58689+ pvscsi_write_cmd_desc(adapter, PVSCSI_CMD_ABORT_CMD, &cmd, sizeof(cmd));
58690+}
58691+
58692+static void pvscsi_kick_rw_io(const struct pvscsi_adapter *adapter)
58693+{
58694+ pvscsi_reg_write(adapter, PVSCSI_REG_OFFSET_KICK_RW_IO, 0);
58695+}
58696+
58697+static void pvscsi_process_request_ring(const struct pvscsi_adapter *adapter)
58698+{
58699+ pvscsi_reg_write(adapter, PVSCSI_REG_OFFSET_KICK_NON_RW_IO, 0);
58700+}
58701+
58702+static int scsi_is_rw(unsigned char op)
58703+{
58704+ return op == READ_6 || op == WRITE_6 ||
58705+ op == READ_10 || op == WRITE_10 ||
58706+ op == READ_12 || op == WRITE_12 ||
58707+ op == READ_16 || op == WRITE_16;
58708+}
58709+
58710+static void pvscsi_kick_io(const struct pvscsi_adapter *adapter,
58711+ unsigned char op)
58712+{
58713+ if (scsi_is_rw(op))
58714+ pvscsi_kick_rw_io(adapter);
58715+ else
58716+ pvscsi_process_request_ring(adapter);
58717+}
58718+
58719+static void ll_adapter_reset(const struct pvscsi_adapter *adapter)
58720+{
58721+ dev_dbg(pvscsi_dev(adapter), "Adapter Reset on %p\n", adapter);
58722+
58723+ pvscsi_write_cmd_desc(adapter, PVSCSI_CMD_ADAPTER_RESET, NULL, 0);
58724+}
58725+
58726+static void ll_bus_reset(const struct pvscsi_adapter *adapter)
58727+{
58728+ dev_dbg(pvscsi_dev(adapter), "Reseting bus on %p\n", adapter);
58729+
58730+ pvscsi_write_cmd_desc(adapter, PVSCSI_CMD_RESET_BUS, NULL, 0);
58731+}
58732+
58733+static void ll_device_reset(const struct pvscsi_adapter *adapter, u32 target)
58734+{
58735+ struct PVSCSICmdDescResetDevice cmd = { 0 };
58736+
58737+ dev_dbg(pvscsi_dev(adapter), "Reseting device: target=%u\n", target);
58738+
58739+ cmd.target = target;
58740+
58741+ pvscsi_write_cmd_desc(adapter, PVSCSI_CMD_RESET_DEVICE,
58742+ &cmd, sizeof(cmd));
58743+}
58744+
58745+static void pvscsi_create_sg(struct pvscsi_ctx *ctx,
58746+ struct scatterlist *sg, unsigned count)
58747+{
58748+ unsigned i;
58749+ struct PVSCSISGElement *sge;
58750+
58751+ BUG_ON(count > PVSCSI_MAX_NUM_SG_ENTRIES_PER_SEGMENT);
58752+
58753+ sge = &ctx->sgl->sge[0];
58754+ for (i = 0; i < count; i++, sg++) {
58755+ sge[i].addr = sg_dma_address(sg);
58756+ sge[i].length = sg_dma_len(sg);
58757+ sge[i].flags = 0;
58758+ }
58759+}
58760+
58761+/*
58762+ * Map all data buffers for a command into PCI space and
58763+ * setup the scatter/gather list if needed.
58764+ */
58765+static void pvscsi_map_buffers(struct pvscsi_adapter *adapter,
58766+ struct pvscsi_ctx *ctx, struct scsi_cmnd *cmd,
58767+ struct PVSCSIRingReqDesc *e)
58768+{
58769+ unsigned count;
58770+ unsigned bufflen = scsi_bufflen(cmd);
58771+ struct scatterlist *sg;
58772+
58773+ e->dataLen = bufflen;
58774+ e->dataAddr = 0;
58775+ if (bufflen == 0)
58776+ return;
58777+
58778+ sg = scsi_sglist(cmd);
58779+ count = scsi_sg_count(cmd);
58780+ if (count != 0) {
58781+ int segs = scsi_dma_map(cmd);
58782+ if (segs > 1) {
58783+ pvscsi_create_sg(ctx, sg, segs);
58784+
58785+ e->flags |= PVSCSI_FLAG_CMD_WITH_SG_LIST;
58786+ ctx->sglPA = pci_map_single(adapter->dev, ctx->sgl,
58787+ SGL_SIZE, PCI_DMA_TODEVICE);
58788+ e->dataAddr = ctx->sglPA;
58789+ } else
58790+ e->dataAddr = sg_dma_address(sg);
58791+ } else {
58792+ /*
58793+ * In case there is no S/G list, scsi_sglist points
58794+ * directly to the buffer.
58795+ */
58796+ ctx->dataPA = pci_map_single(adapter->dev, sg, bufflen,
58797+ cmd->sc_data_direction);
58798+ e->dataAddr = ctx->dataPA;
58799+ }
58800+}
58801+
58802+static void pvscsi_unmap_buffers(const struct pvscsi_adapter *adapter,
58803+ struct pvscsi_ctx *ctx)
58804+{
58805+ struct scsi_cmnd *cmd;
58806+ unsigned bufflen;
58807+
58808+ cmd = ctx->cmd;
58809+ bufflen = scsi_bufflen(cmd);
58810+
58811+ if (bufflen != 0) {
58812+ unsigned count = scsi_sg_count(cmd);
58813+
58814+ if (count != 0) {
58815+ scsi_dma_unmap(cmd);
58816+ if (ctx->sglPA) {
58817+ pci_unmap_single(adapter->dev, ctx->sglPA,
58818+ SGL_SIZE, PCI_DMA_TODEVICE);
58819+ ctx->sglPA = 0;
58820+ }
58821+ } else
58822+ pci_unmap_single(adapter->dev, ctx->dataPA, bufflen,
58823+ cmd->sc_data_direction);
58824+ }
58825+ if (cmd->sense_buffer)
58826+ pci_unmap_single(adapter->dev, ctx->sensePA,
58827+ SCSI_SENSE_BUFFERSIZE, PCI_DMA_FROMDEVICE);
58828+}
58829+
58830+static int __devinit pvscsi_allocate_rings(struct pvscsi_adapter *adapter)
58831+{
58832+ adapter->rings_state = pci_alloc_consistent(adapter->dev, PAGE_SIZE,
58833+ &adapter->ringStatePA);
58834+ if (!adapter->rings_state)
58835+ return -ENOMEM;
58836+
58837+ adapter->req_pages = min(PVSCSI_MAX_NUM_PAGES_REQ_RING,
58838+ pvscsi_ring_pages);
58839+ adapter->req_depth = adapter->req_pages
58840+ * PVSCSI_MAX_NUM_REQ_ENTRIES_PER_PAGE;
58841+ adapter->req_ring = pci_alloc_consistent(adapter->dev,
58842+ adapter->req_pages * PAGE_SIZE,
58843+ &adapter->reqRingPA);
58844+ if (!adapter->req_ring)
58845+ return -ENOMEM;
58846+
58847+ adapter->cmp_pages = min(PVSCSI_MAX_NUM_PAGES_CMP_RING,
58848+ pvscsi_ring_pages);
58849+ adapter->cmp_ring = pci_alloc_consistent(adapter->dev,
58850+ adapter->cmp_pages * PAGE_SIZE,
58851+ &adapter->cmpRingPA);
58852+ if (!adapter->cmp_ring)
58853+ return -ENOMEM;
58854+
58855+ BUG_ON(!IS_ALIGNED(adapter->ringStatePA, PAGE_SIZE));
58856+ BUG_ON(!IS_ALIGNED(adapter->reqRingPA, PAGE_SIZE));
58857+ BUG_ON(!IS_ALIGNED(adapter->cmpRingPA, PAGE_SIZE));
58858+
58859+ if (!adapter->use_msg)
58860+ return 0;
58861+
58862+ adapter->msg_pages = min(PVSCSI_MAX_NUM_PAGES_MSG_RING,
58863+ pvscsi_msg_ring_pages);
58864+ adapter->msg_ring = pci_alloc_consistent(adapter->dev,
58865+ adapter->msg_pages * PAGE_SIZE,
58866+ &adapter->msgRingPA);
58867+ if (!adapter->msg_ring)
58868+ return -ENOMEM;
58869+ BUG_ON(!IS_ALIGNED(adapter->msgRingPA, PAGE_SIZE));
58870+
58871+ return 0;
58872+}
58873+
58874+static void pvscsi_setup_all_rings(const struct pvscsi_adapter *adapter)
58875+{
58876+ struct PVSCSICmdDescSetupRings cmd = { 0 };
58877+ dma_addr_t base;
58878+ unsigned i;
58879+
58880+ cmd.ringsStatePPN = adapter->ringStatePA >> PAGE_SHIFT;
58881+ cmd.reqRingNumPages = adapter->req_pages;
58882+ cmd.cmpRingNumPages = adapter->cmp_pages;
58883+
58884+ base = adapter->reqRingPA;
58885+ for (i = 0; i < adapter->req_pages; i++) {
58886+ cmd.reqRingPPNs[i] = base >> PAGE_SHIFT;
58887+ base += PAGE_SIZE;
58888+ }
58889+
58890+ base = adapter->cmpRingPA;
58891+ for (i = 0; i < adapter->cmp_pages; i++) {
58892+ cmd.cmpRingPPNs[i] = base >> PAGE_SHIFT;
58893+ base += PAGE_SIZE;
58894+ }
58895+
58896+ memset(adapter->rings_state, 0, PAGE_SIZE);
58897+ memset(adapter->req_ring, 0, adapter->req_pages * PAGE_SIZE);
58898+ memset(adapter->cmp_ring, 0, adapter->cmp_pages * PAGE_SIZE);
58899+
58900+ pvscsi_write_cmd_desc(adapter, PVSCSI_CMD_SETUP_RINGS,
58901+ &cmd, sizeof(cmd));
58902+
58903+ if (adapter->use_msg) {
58904+ struct PVSCSICmdDescSetupMsgRing cmd_msg = { 0 };
58905+
58906+ cmd_msg.numPages = adapter->msg_pages;
58907+
58908+ base = adapter->msgRingPA;
58909+ for (i = 0; i < adapter->msg_pages; i++) {
58910+ cmd_msg.ringPPNs[i] = base >> PAGE_SHIFT;
58911+ base += PAGE_SIZE;
58912+ }
58913+ memset(adapter->msg_ring, 0, adapter->msg_pages * PAGE_SIZE);
58914+
58915+ pvscsi_write_cmd_desc(adapter, PVSCSI_CMD_SETUP_MSG_RING,
58916+ &cmd_msg, sizeof(cmd_msg));
58917+ }
58918+}
58919+
58920+/*
58921+ * Pull a completion descriptor off and pass the completion back
58922+ * to the SCSI mid layer.
58923+ */
58924+static void pvscsi_complete_request(struct pvscsi_adapter *adapter,
58925+ const struct PVSCSIRingCmpDesc *e)
58926+{
58927+ struct pvscsi_ctx *ctx;
58928+ struct scsi_cmnd *cmd;
58929+ u32 btstat = e->hostStatus;
58930+ u32 sdstat = e->scsiStatus;
58931+
58932+ ctx = pvscsi_get_context(adapter, e->context);
58933+ cmd = ctx->cmd;
58934+ pvscsi_unmap_buffers(adapter, ctx);
58935+ pvscsi_release_context(adapter, ctx);
58936+ cmd->result = 0;
58937+
58938+ if (sdstat != SAM_STAT_GOOD &&
58939+ (btstat == BTSTAT_SUCCESS ||
58940+ btstat == BTSTAT_LINKED_COMMAND_COMPLETED ||
58941+ btstat == BTSTAT_LINKED_COMMAND_COMPLETED_WITH_FLAG)) {
58942+ cmd->result = (DID_OK << 16) | sdstat;
58943+ if (sdstat == SAM_STAT_CHECK_CONDITION && cmd->sense_buffer)
58944+ cmd->result |= (DRIVER_SENSE << 24);
58945+ } else
58946+ switch (btstat) {
58947+ case BTSTAT_SUCCESS:
58948+ case BTSTAT_LINKED_COMMAND_COMPLETED:
58949+ case BTSTAT_LINKED_COMMAND_COMPLETED_WITH_FLAG:
58950+ /* If everything went fine, let's move on.. */
58951+ cmd->result = (DID_OK << 16);
58952+ break;
58953+
58954+ case BTSTAT_DATARUN:
58955+ case BTSTAT_DATA_UNDERRUN:
58956+ /* Report residual data in underruns */
58957+ scsi_set_resid(cmd, scsi_bufflen(cmd) - e->dataLen);
58958+ cmd->result = (DID_ERROR << 16);
58959+ break;
58960+
58961+ case BTSTAT_SELTIMEO:
58962+ /* Our emulation returns this for non-connected devs */
58963+ cmd->result = (DID_BAD_TARGET << 16);
58964+ break;
58965+
58966+ case BTSTAT_LUNMISMATCH:
58967+ case BTSTAT_TAGREJECT:
58968+ case BTSTAT_BADMSG:
58969+ cmd->result = (DRIVER_INVALID << 24);
58970+ /* fall through */
58971+
58972+ case BTSTAT_HAHARDWARE:
58973+ case BTSTAT_INVPHASE:
58974+ case BTSTAT_HATIMEOUT:
58975+ case BTSTAT_NORESPONSE:
58976+ case BTSTAT_DISCONNECT:
58977+ case BTSTAT_HASOFTWARE:
58978+ case BTSTAT_BUSFREE:
58979+ case BTSTAT_SENSFAILED:
58980+ cmd->result |= (DID_ERROR << 16);
58981+ break;
58982+
58983+ case BTSTAT_SENTRST:
58984+ case BTSTAT_RECVRST:
58985+ case BTSTAT_BUSRESET:
58986+ cmd->result = (DID_RESET << 16);
58987+ break;
58988+
58989+ case BTSTAT_ABORTQUEUE:
58990+ cmd->result = (DID_ABORT << 16);
58991+ break;
58992+
58993+ case BTSTAT_SCSIPARITY:
58994+ cmd->result = (DID_PARITY << 16);
58995+ break;
58996+
58997+ default:
58998+ cmd->result = (DID_ERROR << 16);
58999+ scmd_printk(KERN_DEBUG, cmd,
59000+ "Unknown completion status: 0x%x\n",
59001+ btstat);
59002+ }
59003+
59004+ dev_dbg(&cmd->device->sdev_gendev,
59005+ "cmd=%p %x ctx=%p result=0x%x status=0x%x,%x\n",
59006+ cmd, cmd->cmnd[0], ctx, cmd->result, btstat, sdstat);
59007+
59008+ cmd->scsi_done(cmd);
59009+}
59010+
59011+/*
59012+ * barrier usage : Since the PVSCSI device is emulated, there could be cases
59013+ * where we may want to serialize some accesses between the driver and the
59014+ * emulation layer. We use compiler barriers instead of the more expensive
59015+ * memory barriers because PVSCSI is only supported on X86 which has strong
59016+ * memory access ordering.
59017+ */
59018+static void pvscsi_process_completion_ring(struct pvscsi_adapter *adapter)
59019+{
59020+ struct PVSCSIRingsState *s = adapter->rings_state;
59021+ struct PVSCSIRingCmpDesc *ring = adapter->cmp_ring;
59022+ u32 cmp_entries = s->cmpNumEntriesLog2;
59023+
59024+ while (s->cmpConsIdx != s->cmpProdIdx) {
59025+ struct PVSCSIRingCmpDesc *e = ring + (s->cmpConsIdx &
59026+ MASK(cmp_entries));
59027+ /*
59028+ * This barrier() ensures that *e is not dereferenced while
59029+ * the device emulation still writes data into the slot.
59030+ * Since the device emulation advances s->cmpProdIdx only after
59031+ * updating the slot we want to check it first.
59032+ */
59033+ barrier();
59034+ pvscsi_complete_request(adapter, e);
59035+ /*
59036+ * This barrier() ensures that compiler doesn't reorder write
59037+ * to s->cmpConsIdx before the read of (*e) inside
59038+ * pvscsi_complete_request. Otherwise, device emulation may
59039+ * overwrite *e before we had a chance to read it.
59040+ */
59041+ barrier();
59042+ s->cmpConsIdx++;
59043+ }
59044+}
59045+
59046+/*
59047+ * Translate a Linux SCSI request into a request ring entry.
59048+ */
59049+static int pvscsi_queue_ring(struct pvscsi_adapter *adapter,
59050+ struct pvscsi_ctx *ctx, struct scsi_cmnd *cmd)
59051+{
59052+ struct PVSCSIRingsState *s;
59053+ struct PVSCSIRingReqDesc *e;
59054+ struct scsi_device *sdev;
59055+ u32 req_entries;
59056+
59057+ s = adapter->rings_state;
59058+ sdev = cmd->device;
59059+ req_entries = s->reqNumEntriesLog2;
59060+
59061+ /*
59062+ * If this condition holds, we might have room on the request ring, but
59063+ * we might not have room on the completion ring for the response.
59064+ * However, we have already ruled out this possibility - we would not
59065+ * have successfully allocated a context if it were true, since we only
59066+ * have one context per request entry. Check for it anyway, since it
59067+ * would be a serious bug.
59068+ */
59069+ if (s->reqProdIdx - s->cmpConsIdx >= 1 << req_entries) {
59070+ scmd_printk(KERN_ERR, cmd, "vmw_pvscsi: "
59071+ "ring full: reqProdIdx=%d cmpConsIdx=%d\n",
59072+ s->reqProdIdx, s->cmpConsIdx);
59073+ return -1;
59074+ }
59075+
59076+ e = adapter->req_ring + (s->reqProdIdx & MASK(req_entries));
59077+
59078+ e->bus = sdev->channel;
59079+ e->target = sdev->id;
59080+ memset(e->lun, 0, sizeof(e->lun));
59081+ e->lun[1] = sdev->lun;
59082+
59083+ if (cmd->sense_buffer) {
59084+ ctx->sensePA = pci_map_single(adapter->dev, cmd->sense_buffer,
59085+ SCSI_SENSE_BUFFERSIZE,
59086+ PCI_DMA_FROMDEVICE);
59087+ e->senseAddr = ctx->sensePA;
59088+ e->senseLen = SCSI_SENSE_BUFFERSIZE;
59089+ } else {
59090+ e->senseLen = 0;
59091+ e->senseAddr = 0;
59092+ }
59093+ e->cdbLen = cmd->cmd_len;
59094+ e->vcpuHint = smp_processor_id();
59095+ memcpy(e->cdb, cmd->cmnd, e->cdbLen);
59096+
59097+ e->tag = SIMPLE_QUEUE_TAG;
59098+ if (sdev->tagged_supported &&
59099+ (cmd->tag == HEAD_OF_QUEUE_TAG ||
59100+ cmd->tag == ORDERED_QUEUE_TAG))
59101+ e->tag = cmd->tag;
59102+
59103+ if (cmd->sc_data_direction == DMA_FROM_DEVICE)
59104+ e->flags = PVSCSI_FLAG_CMD_DIR_TOHOST;
59105+ else if (cmd->sc_data_direction == DMA_TO_DEVICE)
59106+ e->flags = PVSCSI_FLAG_CMD_DIR_TODEVICE;
59107+ else if (cmd->sc_data_direction == DMA_NONE)
59108+ e->flags = PVSCSI_FLAG_CMD_DIR_NONE;
59109+ else
59110+ e->flags = 0;
59111+
59112+ pvscsi_map_buffers(adapter, ctx, cmd, e);
59113+
59114+ e->context = pvscsi_map_context(adapter, ctx);
59115+
59116+ barrier();
59117+
59118+ s->reqProdIdx++;
59119+
59120+ return 0;
59121+}
59122+
59123+static int pvscsi_queue(struct scsi_cmnd *cmd, void (*done)(struct scsi_cmnd *))
59124+{
59125+ struct Scsi_Host *host = cmd->device->host;
59126+ struct pvscsi_adapter *adapter = shost_priv(host);
59127+ struct pvscsi_ctx *ctx;
59128+ unsigned long flags;
59129+
59130+ spin_lock_irqsave(&adapter->hw_lock, flags);
59131+
59132+ ctx = pvscsi_acquire_context(adapter, cmd);
59133+ if (!ctx || pvscsi_queue_ring(adapter, ctx, cmd) != 0) {
59134+ if (ctx)
59135+ pvscsi_release_context(adapter, ctx);
59136+ spin_unlock_irqrestore(&adapter->hw_lock, flags);
59137+ return SCSI_MLQUEUE_HOST_BUSY;
59138+ }
59139+
59140+ cmd->scsi_done = done;
59141+
59142+ dev_dbg(&cmd->device->sdev_gendev,
59143+ "queued cmd %p, ctx %p, op=%x\n", cmd, ctx, cmd->cmnd[0]);
59144+
59145+ spin_unlock_irqrestore(&adapter->hw_lock, flags);
59146+
59147+ pvscsi_kick_io(adapter, cmd->cmnd[0]);
59148+
59149+ return 0;
59150+}
59151+
59152+static int pvscsi_abort(struct scsi_cmnd *cmd)
59153+{
59154+ struct pvscsi_adapter *adapter = shost_priv(cmd->device->host);
59155+ struct pvscsi_ctx *ctx;
59156+ unsigned long flags;
59157+
59158+ scmd_printk(KERN_DEBUG, cmd, "task abort on host %u, %p\n",
59159+ adapter->host->host_no, cmd);
59160+
59161+ spin_lock_irqsave(&adapter->hw_lock, flags);
59162+
59163+ /*
59164+ * Poll the completion ring first - we might be trying to abort
59165+ * a command that is waiting to be dispatched in the completion ring.
59166+ */
59167+ pvscsi_process_completion_ring(adapter);
59168+
59169+ /*
59170+ * If there is no context for the command, it either already succeeded
59171+ * or else was never properly issued. Not our problem.
59172+ */
59173+ ctx = pvscsi_find_context(adapter, cmd);
59174+ if (!ctx) {
59175+ scmd_printk(KERN_DEBUG, cmd, "Failed to abort cmd %p\n", cmd);
59176+ goto out;
59177+ }
59178+
59179+ pvscsi_abort_cmd(adapter, ctx);
59180+
59181+ pvscsi_process_completion_ring(adapter);
59182+
59183+out:
59184+ spin_unlock_irqrestore(&adapter->hw_lock, flags);
59185+ return SUCCESS;
59186+}
59187+
59188+/*
59189+ * Abort all outstanding requests. This is only safe to use if the completion
59190+ * ring will never be walked again or the device has been reset, because it
59191+ * destroys the 1-1 mapping between context field passed to emulation and our
59192+ * request structure.
59193+ */
59194+static void pvscsi_reset_all(struct pvscsi_adapter *adapter)
59195+{
59196+ unsigned i;
59197+
59198+ for (i = 0; i < adapter->req_depth; i++) {
59199+ struct pvscsi_ctx *ctx = &adapter->cmd_map[i];
59200+ struct scsi_cmnd *cmd = ctx->cmd;
59201+ if (cmd) {
59202+ scmd_printk(KERN_ERR, cmd,
59203+ "Forced reset on cmd %p\n", cmd);
59204+ pvscsi_unmap_buffers(adapter, ctx);
59205+ pvscsi_release_context(adapter, ctx);
59206+ cmd->result = (DID_RESET << 16);
59207+ cmd->scsi_done(cmd);
59208+ }
59209+ }
59210+}
59211+
59212+static int pvscsi_host_reset(struct scsi_cmnd *cmd)
59213+{
59214+ struct Scsi_Host *host = cmd->device->host;
59215+ struct pvscsi_adapter *adapter = shost_priv(host);
59216+ unsigned long flags;
59217+ bool use_msg;
59218+
59219+ scmd_printk(KERN_INFO, cmd, "SCSI Host reset\n");
59220+
59221+ spin_lock_irqsave(&adapter->hw_lock, flags);
59222+
59223+ use_msg = adapter->use_msg;
59224+
59225+ if (use_msg) {
59226+ adapter->use_msg = 0;
59227+ spin_unlock_irqrestore(&adapter->hw_lock, flags);
59228+
59229+ /*
59230+ * Now that we know that the ISR won't add more work on the
59231+ * workqueue we can safely flush any outstanding work.
59232+ */
59233+ flush_workqueue(adapter->workqueue);
59234+ spin_lock_irqsave(&adapter->hw_lock, flags);
59235+ }
59236+
59237+ /*
59238+ * We're going to tear down the entire ring structure and set it back
59239+ * up, so stalling new requests until all completions are flushed and
59240+ * the rings are back in place.
59241+ */
59242+
59243+ pvscsi_process_request_ring(adapter);
59244+
59245+ ll_adapter_reset(adapter);
59246+
59247+ /*
59248+ * Now process any completions. Note we do this AFTER adapter reset,
59249+ * which is strange, but stops races where completions get posted
59250+ * between processing the ring and issuing the reset. The backend will
59251+ * not touch the ring memory after reset, so the immediately pre-reset
59252+ * completion ring state is still valid.
59253+ */
59254+ pvscsi_process_completion_ring(adapter);
59255+
59256+ pvscsi_reset_all(adapter);
59257+ adapter->use_msg = use_msg;
59258+ pvscsi_setup_all_rings(adapter);
59259+ pvscsi_unmask_intr(adapter);
59260+
59261+ spin_unlock_irqrestore(&adapter->hw_lock, flags);
59262+
59263+ return SUCCESS;
59264+}
59265+
59266+static int pvscsi_bus_reset(struct scsi_cmnd *cmd)
59267+{
59268+ struct Scsi_Host *host = cmd->device->host;
59269+ struct pvscsi_adapter *adapter = shost_priv(host);
59270+ unsigned long flags;
59271+
59272+ scmd_printk(KERN_INFO, cmd, "SCSI Bus reset\n");
59273+
59274+ /*
59275+ * We don't want to queue new requests for this bus after
59276+ * flushing all pending requests to emulation, since new
59277+ * requests could then sneak in during this bus reset phase,
59278+ * so take the lock now.
59279+ */
59280+ spin_lock_irqsave(&adapter->hw_lock, flags);
59281+
59282+ pvscsi_process_request_ring(adapter);
59283+ ll_bus_reset(adapter);
59284+ pvscsi_process_completion_ring(adapter);
59285+
59286+ spin_unlock_irqrestore(&adapter->hw_lock, flags);
59287+
59288+ return SUCCESS;
59289+}
59290+
59291+static int pvscsi_device_reset(struct scsi_cmnd *cmd)
59292+{
59293+ struct Scsi_Host *host = cmd->device->host;
59294+ struct pvscsi_adapter *adapter = shost_priv(host);
59295+ unsigned long flags;
59296+
59297+ scmd_printk(KERN_INFO, cmd, "SCSI device reset on scsi%u:%u\n",
59298+ host->host_no, cmd->device->id);
59299+
59300+ /*
59301+ * We don't want to queue new requests for this device after flushing
59302+ * all pending requests to emulation, since new requests could then
59303+ * sneak in during this device reset phase, so take the lock now.
59304+ */
59305+ spin_lock_irqsave(&adapter->hw_lock, flags);
59306+
59307+ pvscsi_process_request_ring(adapter);
59308+ ll_device_reset(adapter, cmd->device->id);
59309+ pvscsi_process_completion_ring(adapter);
59310+
59311+ spin_unlock_irqrestore(&adapter->hw_lock, flags);
59312+
59313+ return SUCCESS;
59314+}
59315+
59316+static struct scsi_host_template pvscsi_template;
59317+
59318+static const char *pvscsi_info(struct Scsi_Host *host)
59319+{
59320+ struct pvscsi_adapter *adapter = shost_priv(host);
59321+ static char buf[256];
59322+
59323+ sprintf(buf, "VMware PVSCSI storage adapter rev %d, req/cmp/msg rings: "
59324+ "%u/%u/%u pages, cmd_per_lun=%u", adapter->rev,
59325+ adapter->req_pages, adapter->cmp_pages, adapter->msg_pages,
59326+ pvscsi_template.cmd_per_lun);
59327+
59328+ return buf;
59329+}
59330+
59331+static struct scsi_host_template pvscsi_template = {
59332+ .module = THIS_MODULE,
59333+ .name = "VMware PVSCSI Host Adapter",
59334+ .proc_name = "vmw_pvscsi",
59335+ .info = pvscsi_info,
59336+ .queuecommand = pvscsi_queue,
59337+ .this_id = -1,
59338+ .sg_tablesize = PVSCSI_MAX_NUM_SG_ENTRIES_PER_SEGMENT,
59339+ .dma_boundary = UINT_MAX,
59340+ .max_sectors = 0xffff,
59341+ .use_clustering = ENABLE_CLUSTERING,
59342+ .eh_abort_handler = pvscsi_abort,
59343+ .eh_device_reset_handler = pvscsi_device_reset,
59344+ .eh_bus_reset_handler = pvscsi_bus_reset,
59345+ .eh_host_reset_handler = pvscsi_host_reset,
59346+};
59347+
59348+static void pvscsi_process_msg(const struct pvscsi_adapter *adapter,
59349+ const struct PVSCSIRingMsgDesc *e)
59350+{
59351+ struct PVSCSIRingsState *s = adapter->rings_state;
59352+ struct Scsi_Host *host = adapter->host;
59353+ struct scsi_device *sdev;
59354+
59355+ printk(KERN_INFO "vmw_pvscsi: msg type: 0x%x - MSG RING: %u/%u (%u) \n",
59356+ e->type, s->msgProdIdx, s->msgConsIdx, s->msgNumEntriesLog2);
59357+
59358+ BUILD_BUG_ON(PVSCSI_MSG_LAST != 2);
59359+
59360+ if (e->type == PVSCSI_MSG_DEV_ADDED) {
59361+ struct PVSCSIMsgDescDevStatusChanged *desc;
59362+ desc = (struct PVSCSIMsgDescDevStatusChanged *)e;
59363+
59364+ printk(KERN_INFO
59365+ "vmw_pvscsi: msg: device added at scsi%u:%u:%u\n",
59366+ desc->bus, desc->target, desc->lun[1]);
59367+
59368+ if (!scsi_host_get(host))
59369+ return;
59370+
59371+ sdev = scsi_device_lookup(host, desc->bus, desc->target,
59372+ desc->lun[1]);
59373+ if (sdev) {
59374+ printk(KERN_INFO "vmw_pvscsi: device already exists\n");
59375+ scsi_device_put(sdev);
59376+ } else
59377+ scsi_add_device(adapter->host, desc->bus,
59378+ desc->target, desc->lun[1]);
59379+
59380+ scsi_host_put(host);
59381+ } else if (e->type == PVSCSI_MSG_DEV_REMOVED) {
59382+ struct PVSCSIMsgDescDevStatusChanged *desc;
59383+ desc = (struct PVSCSIMsgDescDevStatusChanged *)e;
59384+
59385+ printk(KERN_INFO
59386+ "vmw_pvscsi: msg: device removed at scsi%u:%u:%u\n",
59387+ desc->bus, desc->target, desc->lun[1]);
59388+
59389+ if (!scsi_host_get(host))
59390+ return;
59391+
59392+ sdev = scsi_device_lookup(host, desc->bus, desc->target,
59393+ desc->lun[1]);
59394+ if (sdev) {
59395+ scsi_remove_device(sdev);
59396+ scsi_device_put(sdev);
59397+ } else
59398+ printk(KERN_INFO
59399+ "vmw_pvscsi: failed to lookup scsi%u:%u:%u\n",
59400+ desc->bus, desc->target, desc->lun[1]);
59401+
59402+ scsi_host_put(host);
59403+ }
59404+}
59405+
59406+static int pvscsi_msg_pending(const struct pvscsi_adapter *adapter)
59407+{
59408+ struct PVSCSIRingsState *s = adapter->rings_state;
59409+
59410+ return s->msgProdIdx != s->msgConsIdx;
59411+}
59412+
59413+static void pvscsi_process_msg_ring(const struct pvscsi_adapter *adapter)
59414+{
59415+ struct PVSCSIRingsState *s = adapter->rings_state;
59416+ struct PVSCSIRingMsgDesc *ring = adapter->msg_ring;
59417+ u32 msg_entries = s->msgNumEntriesLog2;
59418+
59419+ while (pvscsi_msg_pending(adapter)) {
59420+ struct PVSCSIRingMsgDesc *e = ring + (s->msgConsIdx &
59421+ MASK(msg_entries));
59422+
59423+ barrier();
59424+ pvscsi_process_msg(adapter, e);
59425+ barrier();
59426+ s->msgConsIdx++;
59427+ }
59428+}
59429+
59430+static void pvscsi_msg_workqueue_handler(struct work_struct *data)
59431+{
59432+ struct pvscsi_adapter *adapter;
59433+
59434+ adapter = container_of(data, struct pvscsi_adapter, work);
59435+
59436+ pvscsi_process_msg_ring(adapter);
59437+}
59438+
59439+static int pvscsi_setup_msg_workqueue(struct pvscsi_adapter *adapter)
59440+{
59441+ char name[32];
59442+
59443+ if (!pvscsi_use_msg)
59444+ return 0;
59445+
59446+ pvscsi_reg_write(adapter, PVSCSI_REG_OFFSET_COMMAND,
59447+ PVSCSI_CMD_SETUP_MSG_RING);
59448+
59449+ if (pvscsi_reg_read(adapter, PVSCSI_REG_OFFSET_COMMAND_STATUS) == -1)
59450+ return 0;
59451+
59452+ snprintf(name, sizeof(name),
59453+ "vmw_pvscsi_wq_%u", adapter->host->host_no);
59454+
59455+ adapter->workqueue = create_singlethread_workqueue(name);
59456+ if (!adapter->workqueue) {
59457+ printk(KERN_ERR "vmw_pvscsi: failed to create work queue\n");
59458+ return 0;
59459+ }
59460+ INIT_WORK(&adapter->work, pvscsi_msg_workqueue_handler);
59461+
59462+ return 1;
59463+}
59464+
59465+static irqreturn_t pvscsi_isr(int irq, void *devp)
59466+{
59467+ struct pvscsi_adapter *adapter = devp;
59468+ int handled;
59469+
59470+ if (adapter->use_msi || adapter->use_msix)
59471+ handled = true;
59472+ else {
59473+ u32 val = pvscsi_read_intr_status(adapter);
59474+ handled = (val & PVSCSI_INTR_ALL_SUPPORTED) != 0;
59475+ if (handled)
59476+ pvscsi_write_intr_status(devp, val);
59477+ }
59478+
59479+ if (handled) {
59480+ unsigned long flags;
59481+
59482+ spin_lock_irqsave(&adapter->hw_lock, flags);
59483+
59484+ pvscsi_process_completion_ring(adapter);
59485+ if (adapter->use_msg && pvscsi_msg_pending(adapter))
59486+ queue_work(adapter->workqueue, &adapter->work);
59487+
59488+ spin_unlock_irqrestore(&adapter->hw_lock, flags);
59489+ }
59490+
59491+ return IRQ_RETVAL(handled);
59492+}
59493+
59494+static void pvscsi_free_sgls(const struct pvscsi_adapter *adapter)
59495+{
59496+ struct pvscsi_ctx *ctx = adapter->cmd_map;
59497+ unsigned i;
59498+
59499+ for (i = 0; i < adapter->req_depth; ++i, ++ctx)
59500+ kfree(ctx->sgl);
59501+}
59502+
59503+static int pvscsi_setup_msix(const struct pvscsi_adapter *adapter, int *irq)
59504+{
59505+ struct msix_entry entry = { 0, PVSCSI_VECTOR_COMPLETION };
59506+ int ret;
59507+
59508+ ret = pci_enable_msix(adapter->dev, &entry, 1);
59509+ if (ret)
59510+ return ret;
59511+
59512+ *irq = entry.vector;
59513+
59514+ return 0;
59515+}
59516+
59517+static void pvscsi_shutdown_intr(struct pvscsi_adapter *adapter)
59518+{
59519+ if (adapter->irq) {
59520+ free_irq(adapter->irq, adapter);
59521+ adapter->irq = 0;
59522+ }
59523+ if (adapter->use_msi) {
59524+ pci_disable_msi(adapter->dev);
59525+ adapter->use_msi = 0;
59526+ } else if (adapter->use_msix) {
59527+ pci_disable_msix(adapter->dev);
59528+ adapter->use_msix = 0;
59529+ }
59530+}
59531+
59532+static void pvscsi_release_resources(struct pvscsi_adapter *adapter)
59533+{
59534+ pvscsi_shutdown_intr(adapter);
59535+
59536+ if (adapter->workqueue)
59537+ destroy_workqueue(adapter->workqueue);
59538+
59539+ if (adapter->mmioBase)
59540+ pci_iounmap(adapter->dev, adapter->mmioBase);
59541+
59542+ pci_release_regions(adapter->dev);
59543+
59544+ if (adapter->cmd_map) {
59545+ pvscsi_free_sgls(adapter);
59546+ kfree(adapter->cmd_map);
59547+ }
59548+
59549+ if (adapter->rings_state)
59550+ pci_free_consistent(adapter->dev, PAGE_SIZE,
59551+ adapter->rings_state, adapter->ringStatePA);
59552+
59553+ if (adapter->req_ring)
59554+ pci_free_consistent(adapter->dev,
59555+ adapter->req_pages * PAGE_SIZE,
59556+ adapter->req_ring, adapter->reqRingPA);
59557+
59558+ if (adapter->cmp_ring)
59559+ pci_free_consistent(adapter->dev,
59560+ adapter->cmp_pages * PAGE_SIZE,
59561+ adapter->cmp_ring, adapter->cmpRingPA);
59562+
59563+ if (adapter->msg_ring)
59564+ pci_free_consistent(adapter->dev,
59565+ adapter->msg_pages * PAGE_SIZE,
59566+ adapter->msg_ring, adapter->msgRingPA);
59567+}
59568+
59569+/*
59570+ * Allocate scatter gather lists.
59571+ *
59572+ * These are statically allocated. Trying to be clever was not worth it.
59573+ *
59574+ * Dynamic allocation can fail, and we can't go deeep into the memory
59575+ * allocator, since we're a SCSI driver, and trying too hard to allocate
59576+ * memory might generate disk I/O. We also don't want to fail disk I/O
59577+ * in that case because we can't get an allocation - the I/O could be
59578+ * trying to swap out data to free memory. Since that is pathological,
59579+ * just use a statically allocated scatter list.
59580+ *
59581+ */
59582+static int __devinit pvscsi_allocate_sg(struct pvscsi_adapter *adapter)
59583+{
59584+ struct pvscsi_ctx *ctx;
59585+ int i;
59586+
59587+ ctx = adapter->cmd_map;
59588+ BUILD_BUG_ON(sizeof(struct pvscsi_sg_list) > SGL_SIZE);
59589+
59590+ for (i = 0; i < adapter->req_depth; ++i, ++ctx) {
59591+ ctx->sgl = kmalloc(SGL_SIZE, GFP_KERNEL);
59592+ ctx->sglPA = 0;
59593+ BUG_ON(!IS_ALIGNED(((unsigned long)ctx->sgl), PAGE_SIZE));
59594+ if (!ctx->sgl) {
59595+ for (; i >= 0; --i, --ctx) {
59596+ kfree(ctx->sgl);
59597+ ctx->sgl = NULL;
59598+ }
59599+ return -ENOMEM;
59600+ }
59601+ }
59602+
59603+ return 0;
59604+}
59605+
59606+static int __devinit pvscsi_probe(struct pci_dev *pdev,
59607+ const struct pci_device_id *id)
59608+{
59609+ struct pvscsi_adapter *adapter;
59610+ struct Scsi_Host *host;
59611+ unsigned int i;
59612+ int error;
59613+
59614+ error = -ENODEV;
59615+
59616+ if (pci_enable_device(pdev))
59617+ return error;
59618+
59619+ if (pci_set_dma_mask(pdev, DMA_BIT_MASK(64)) == 0 &&
59620+ pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(64)) == 0) {
59621+ printk(KERN_INFO "vmw_pvscsi: using 64bit dma\n");
59622+ } else if (pci_set_dma_mask(pdev, DMA_BIT_MASK(32)) == 0 &&
59623+ pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(32)) == 0) {
59624+ printk(KERN_INFO "vmw_pvscsi: using 32bit dma\n");
59625+ } else {
59626+ printk(KERN_ERR "vmw_pvscsi: failed to set DMA mask\n");
59627+ goto out_disable_device;
59628+ }
59629+
59630+ pvscsi_template.can_queue =
59631+ min(PVSCSI_MAX_NUM_PAGES_REQ_RING, pvscsi_ring_pages) *
59632+ PVSCSI_MAX_NUM_REQ_ENTRIES_PER_PAGE;
59633+ pvscsi_template.cmd_per_lun =
59634+ min(pvscsi_template.can_queue, pvscsi_cmd_per_lun);
59635+ host = scsi_host_alloc(&pvscsi_template, sizeof(struct pvscsi_adapter));
59636+ if (!host) {
59637+ printk(KERN_ERR "vmw_pvscsi: failed to allocate host\n");
59638+ goto out_disable_device;
59639+ }
59640+
59641+ adapter = shost_priv(host);
59642+ memset(adapter, 0, sizeof(*adapter));
59643+ adapter->dev = pdev;
59644+ adapter->host = host;
59645+
59646+ spin_lock_init(&adapter->hw_lock);
59647+
59648+ host->max_channel = 0;
59649+ host->max_id = 16;
59650+ host->max_lun = 1;
59651+ host->max_cmd_len = 16;
59652+
59653+ adapter->rev = pdev->revision;
59654+
59655+ if (pci_request_regions(pdev, "vmw_pvscsi")) {
59656+ printk(KERN_ERR "vmw_pvscsi: pci memory selection failed\n");
59657+ goto out_free_host;
59658+ }
59659+
59660+ for (i = 0; i < DEVICE_COUNT_RESOURCE; i++) {
59661+ if ((pci_resource_flags(pdev, i) & PCI_BASE_ADDRESS_SPACE_IO))
59662+ continue;
59663+
59664+ if (pci_resource_len(pdev, i) < PVSCSI_MEM_SPACE_SIZE)
59665+ continue;
59666+
59667+ break;
59668+ }
59669+
59670+ if (i == DEVICE_COUNT_RESOURCE) {
59671+ printk(KERN_ERR
59672+ "vmw_pvscsi: adapter has no suitable MMIO region\n");
59673+ goto out_release_resources;
59674+ }
59675+
59676+ adapter->mmioBase = pci_iomap(pdev, i, PVSCSI_MEM_SPACE_SIZE);
59677+
59678+ if (!adapter->mmioBase) {
59679+ printk(KERN_ERR
59680+ "vmw_pvscsi: can't iomap for BAR %d memsize %lu\n",
59681+ i, PVSCSI_MEM_SPACE_SIZE);
59682+ goto out_release_resources;
59683+ }
59684+
59685+ pci_set_master(pdev);
59686+ pci_set_drvdata(pdev, host);
59687+
59688+ ll_adapter_reset(adapter);
59689+
59690+ adapter->use_msg = pvscsi_setup_msg_workqueue(adapter);
59691+
59692+ error = pvscsi_allocate_rings(adapter);
59693+ if (error) {
59694+ printk(KERN_ERR "vmw_pvscsi: unable to allocate ring memory\n");
59695+ goto out_release_resources;
59696+ }
59697+
59698+ /*
59699+ * From this point on we should reset the adapter if anything goes
59700+ * wrong.
59701+ */
59702+ pvscsi_setup_all_rings(adapter);
59703+
59704+ adapter->cmd_map = kcalloc(adapter->req_depth,
59705+ sizeof(struct pvscsi_ctx), GFP_KERNEL);
59706+ if (!adapter->cmd_map) {
59707+ printk(KERN_ERR "vmw_pvscsi: failed to allocate memory.\n");
59708+ error = -ENOMEM;
59709+ goto out_reset_adapter;
59710+ }
59711+
59712+ INIT_LIST_HEAD(&adapter->cmd_pool);
59713+ for (i = 0; i < adapter->req_depth; i++) {
59714+ struct pvscsi_ctx *ctx = adapter->cmd_map + i;
59715+ list_add(&ctx->list, &adapter->cmd_pool);
59716+ }
59717+
59718+ error = pvscsi_allocate_sg(adapter);
59719+ if (error) {
59720+ printk(KERN_ERR "vmw_pvscsi: unable to allocate s/g table\n");
59721+ goto out_reset_adapter;
59722+ }
59723+
59724+ if (!pvscsi_disable_msix &&
59725+ pvscsi_setup_msix(adapter, &adapter->irq) == 0) {
59726+ printk(KERN_INFO "vmw_pvscsi: using MSI-X\n");
59727+ adapter->use_msix = 1;
59728+ } else if (!pvscsi_disable_msi && pci_enable_msi(pdev) == 0) {
59729+ printk(KERN_INFO "vmw_pvscsi: using MSI\n");
59730+ adapter->use_msi = 1;
59731+ adapter->irq = pdev->irq;
59732+ } else {
59733+ printk(KERN_INFO "vmw_pvscsi: using INTx\n");
59734+ adapter->irq = pdev->irq;
59735+ }
59736+
59737+ error = request_irq(adapter->irq, pvscsi_isr, IRQF_SHARED,
59738+ "vmw_pvscsi", adapter);
59739+ if (error) {
59740+ printk(KERN_ERR
59741+ "vmw_pvscsi: unable to request IRQ: %d\n", error);
59742+ adapter->irq = 0;
59743+ goto out_reset_adapter;
59744+ }
59745+
59746+ error = scsi_add_host(host, &pdev->dev);
59747+ if (error) {
59748+ printk(KERN_ERR
59749+ "vmw_pvscsi: scsi_add_host failed: %d\n", error);
59750+ goto out_reset_adapter;
59751+ }
59752+
59753+ dev_info(&pdev->dev, "VMware PVSCSI rev %d host #%u\n",
59754+ adapter->rev, host->host_no);
59755+
59756+ pvscsi_unmask_intr(adapter);
59757+
59758+ scsi_scan_host(host);
59759+
59760+ return 0;
59761+
59762+out_reset_adapter:
59763+ ll_adapter_reset(adapter);
59764+out_release_resources:
59765+ pvscsi_release_resources(adapter);
59766+out_free_host:
59767+ scsi_host_put(host);
59768+out_disable_device:
59769+ pci_set_drvdata(pdev, NULL);
59770+ pci_disable_device(pdev);
59771+
59772+ return error;
59773+}
59774+
59775+static void __pvscsi_shutdown(struct pvscsi_adapter *adapter)
59776+{
59777+ pvscsi_mask_intr(adapter);
59778+
59779+ if (adapter->workqueue)
59780+ flush_workqueue(adapter->workqueue);
59781+
59782+ pvscsi_shutdown_intr(adapter);
59783+
59784+ pvscsi_process_request_ring(adapter);
59785+ pvscsi_process_completion_ring(adapter);
59786+ ll_adapter_reset(adapter);
59787+}
59788+
59789+static void pvscsi_shutdown(struct pci_dev *dev)
59790+{
59791+ struct Scsi_Host *host = pci_get_drvdata(dev);
59792+ struct pvscsi_adapter *adapter = shost_priv(host);
59793+
59794+ __pvscsi_shutdown(adapter);
59795+}
59796+
59797+static void pvscsi_remove(struct pci_dev *pdev)
59798+{
59799+ struct Scsi_Host *host = pci_get_drvdata(pdev);
59800+ struct pvscsi_adapter *adapter = shost_priv(host);
59801+
59802+ scsi_remove_host(host);
59803+
59804+ __pvscsi_shutdown(adapter);
59805+ pvscsi_release_resources(adapter);
59806+
59807+ scsi_host_put(host);
59808+
59809+ pci_set_drvdata(pdev, NULL);
59810+ pci_disable_device(pdev);
59811+}
59812+
59813+static struct pci_driver pvscsi_pci_driver = {
59814+ .name = "vmw_pvscsi",
59815+ .id_table = pvscsi_pci_tbl,
59816+ .probe = pvscsi_probe,
59817+ .remove = __devexit_p(pvscsi_remove),
59818+ .shutdown = pvscsi_shutdown,
59819+};
59820+
59821+static int __init pvscsi_init(void)
59822+{
59823+ pr_info("%s - version %s\n",
59824+ PVSCSI_LINUX_DRIVER_DESC, PVSCSI_DRIVER_VERSION_STRING);
59825+ return pci_register_driver(&pvscsi_pci_driver);
59826+}
59827+
59828+static void __exit pvscsi_exit(void)
59829+{
59830+ pci_unregister_driver(&pvscsi_pci_driver);
59831+}
59832+
59833+module_init(pvscsi_init);
59834+module_exit(pvscsi_exit);
59835diff --git a/drivers/scsi/vmw_pvscsi.h b/drivers/scsi/vmw_pvscsi.h
59836new file mode 100644
59837index 0000000..62e36e7
59838--- /dev/null
59839+++ b/drivers/scsi/vmw_pvscsi.h
59840@@ -0,0 +1,397 @@
59841+/*
59842+ * VMware PVSCSI header file
59843+ *
59844+ * Copyright (C) 2008-2009, VMware, Inc. All Rights Reserved.
59845+ *
59846+ * This program is free software; you can redistribute it and/or modify it
59847+ * under the terms of the GNU General Public License as published by the
59848+ * Free Software Foundation; version 2 of the License and no later version.
59849+ *
59850+ * This program is distributed in the hope that it will be useful, but
59851+ * WITHOUT ANY WARRANTY; without even the implied warranty of
59852+ * MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE, GOOD TITLE or
59853+ * NON INFRINGEMENT. See the GNU General Public License for more
59854+ * details.
59855+ *
59856+ * You should have received a copy of the GNU General Public License
59857+ * along with this program; if not, write to the Free Software
59858+ * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
59859+ *
59860+ * Maintained by: Alok N Kataria <akataria@vmware.com>
59861+ *
59862+ */
59863+
59864+#ifndef _VMW_PVSCSI_H_
59865+#define _VMW_PVSCSI_H_
59866+
59867+#include <linux/types.h>
59868+
59869+#define PVSCSI_DRIVER_VERSION_STRING "1.0.1.0-k"
59870+
59871+#define PVSCSI_MAX_NUM_SG_ENTRIES_PER_SEGMENT 128
59872+
59873+#define MASK(n) ((1 << (n)) - 1) /* make an n-bit mask */
59874+
59875+#define PCI_VENDOR_ID_VMWARE 0x15AD
59876+#define PCI_DEVICE_ID_VMWARE_PVSCSI 0x07C0
59877+
59878+/*
59879+ * host adapter status/error codes
59880+ */
59881+enum HostBusAdapterStatus {
59882+ BTSTAT_SUCCESS = 0x00, /* CCB complete normally with no errors */
59883+ BTSTAT_LINKED_COMMAND_COMPLETED = 0x0a,
59884+ BTSTAT_LINKED_COMMAND_COMPLETED_WITH_FLAG = 0x0b,
59885+ BTSTAT_DATA_UNDERRUN = 0x0c,
59886+ BTSTAT_SELTIMEO = 0x11, /* SCSI selection timeout */
59887+ BTSTAT_DATARUN = 0x12, /* data overrun/underrun */
59888+ BTSTAT_BUSFREE = 0x13, /* unexpected bus free */
59889+ BTSTAT_INVPHASE = 0x14, /* invalid bus phase or sequence requested by target */
59890+ BTSTAT_LUNMISMATCH = 0x17, /* linked CCB has different LUN from first CCB */
59891+ BTSTAT_SENSFAILED = 0x1b, /* auto request sense failed */
59892+ BTSTAT_TAGREJECT = 0x1c, /* SCSI II tagged queueing message rejected by target */
59893+ BTSTAT_BADMSG = 0x1d, /* unsupported message received by the host adapter */
59894+ BTSTAT_HAHARDWARE = 0x20, /* host adapter hardware failed */
59895+ BTSTAT_NORESPONSE = 0x21, /* target did not respond to SCSI ATN, sent a SCSI RST */
59896+ BTSTAT_SENTRST = 0x22, /* host adapter asserted a SCSI RST */
59897+ BTSTAT_RECVRST = 0x23, /* other SCSI devices asserted a SCSI RST */
59898+ BTSTAT_DISCONNECT = 0x24, /* target device reconnected improperly (w/o tag) */
59899+ BTSTAT_BUSRESET = 0x25, /* host adapter issued BUS device reset */
59900+ BTSTAT_ABORTQUEUE = 0x26, /* abort queue generated */
59901+ BTSTAT_HASOFTWARE = 0x27, /* host adapter software error */
59902+ BTSTAT_HATIMEOUT = 0x30, /* host adapter hardware timeout error */
59903+ BTSTAT_SCSIPARITY = 0x34, /* SCSI parity error detected */
59904+};
59905+
59906+/*
59907+ * Register offsets.
59908+ *
59909+ * These registers are accessible both via i/o space and mm i/o.
59910+ */
59911+
59912+enum PVSCSIRegOffset {
59913+ PVSCSI_REG_OFFSET_COMMAND = 0x0,
59914+ PVSCSI_REG_OFFSET_COMMAND_DATA = 0x4,
59915+ PVSCSI_REG_OFFSET_COMMAND_STATUS = 0x8,
59916+ PVSCSI_REG_OFFSET_LAST_STS_0 = 0x100,
59917+ PVSCSI_REG_OFFSET_LAST_STS_1 = 0x104,
59918+ PVSCSI_REG_OFFSET_LAST_STS_2 = 0x108,
59919+ PVSCSI_REG_OFFSET_LAST_STS_3 = 0x10c,
59920+ PVSCSI_REG_OFFSET_INTR_STATUS = 0x100c,
59921+ PVSCSI_REG_OFFSET_INTR_MASK = 0x2010,
59922+ PVSCSI_REG_OFFSET_KICK_NON_RW_IO = 0x3014,
59923+ PVSCSI_REG_OFFSET_DEBUG = 0x3018,
59924+ PVSCSI_REG_OFFSET_KICK_RW_IO = 0x4018,
59925+};
59926+
59927+/*
59928+ * Virtual h/w commands.
59929+ */
59930+
59931+enum PVSCSICommands {
59932+ PVSCSI_CMD_FIRST = 0, /* has to be first */
59933+
59934+ PVSCSI_CMD_ADAPTER_RESET = 1,
59935+ PVSCSI_CMD_ISSUE_SCSI = 2,
59936+ PVSCSI_CMD_SETUP_RINGS = 3,
59937+ PVSCSI_CMD_RESET_BUS = 4,
59938+ PVSCSI_CMD_RESET_DEVICE = 5,
59939+ PVSCSI_CMD_ABORT_CMD = 6,
59940+ PVSCSI_CMD_CONFIG = 7,
59941+ PVSCSI_CMD_SETUP_MSG_RING = 8,
59942+ PVSCSI_CMD_DEVICE_UNPLUG = 9,
59943+
59944+ PVSCSI_CMD_LAST = 10 /* has to be last */
59945+};
59946+
59947+/*
59948+ * Command descriptor for PVSCSI_CMD_RESET_DEVICE --
59949+ */
59950+
59951+struct PVSCSICmdDescResetDevice {
59952+ u32 target;
59953+ u8 lun[8];
59954+} __packed;
59955+
59956+/*
59957+ * Command descriptor for PVSCSI_CMD_ABORT_CMD --
59958+ *
59959+ * - currently does not support specifying the LUN.
59960+ * - _pad should be 0.
59961+ */
59962+
59963+struct PVSCSICmdDescAbortCmd {
59964+ u64 context;
59965+ u32 target;
59966+ u32 _pad;
59967+} __packed;
59968+
59969+/*
59970+ * Command descriptor for PVSCSI_CMD_SETUP_RINGS --
59971+ *
59972+ * Notes:
59973+ * - reqRingNumPages and cmpRingNumPages need to be power of two.
59974+ * - reqRingNumPages and cmpRingNumPages need to be different from 0,
59975+ * - reqRingNumPages and cmpRingNumPages need to be inferior to
59976+ * PVSCSI_SETUP_RINGS_MAX_NUM_PAGES.
59977+ */
59978+
59979+#define PVSCSI_SETUP_RINGS_MAX_NUM_PAGES 32
59980+struct PVSCSICmdDescSetupRings {
59981+ u32 reqRingNumPages;
59982+ u32 cmpRingNumPages;
59983+ u64 ringsStatePPN;
59984+ u64 reqRingPPNs[PVSCSI_SETUP_RINGS_MAX_NUM_PAGES];
59985+ u64 cmpRingPPNs[PVSCSI_SETUP_RINGS_MAX_NUM_PAGES];
59986+} __packed;
59987+
59988+/*
59989+ * Command descriptor for PVSCSI_CMD_SETUP_MSG_RING --
59990+ *
59991+ * Notes:
59992+ * - this command was not supported in the initial revision of the h/w
59993+ * interface. Before using it, you need to check that it is supported by
59994+ * writing PVSCSI_CMD_SETUP_MSG_RING to the 'command' register, then
59995+ * immediately after read the 'command status' register:
59996+ * * a value of -1 means that the cmd is NOT supported,
59997+ * * a value != -1 means that the cmd IS supported.
59998+ * If it's supported the 'command status' register should return:
59999+ * sizeof(PVSCSICmdDescSetupMsgRing) / sizeof(u32).
60000+ * - this command should be issued _after_ the usual SETUP_RINGS so that the
60001+ * RingsState page is already setup. If not, the command is a nop.
60002+ * - numPages needs to be a power of two,
60003+ * - numPages needs to be different from 0,
60004+ * - _pad should be zero.
60005+ */
60006+
60007+#define PVSCSI_SETUP_MSG_RING_MAX_NUM_PAGES 16
60008+
60009+struct PVSCSICmdDescSetupMsgRing {
60010+ u32 numPages;
60011+ u32 _pad;
60012+ u64 ringPPNs[PVSCSI_SETUP_MSG_RING_MAX_NUM_PAGES];
60013+} __packed;
60014+
60015+enum PVSCSIMsgType {
60016+ PVSCSI_MSG_DEV_ADDED = 0,
60017+ PVSCSI_MSG_DEV_REMOVED = 1,
60018+ PVSCSI_MSG_LAST = 2,
60019+};
60020+
60021+/*
60022+ * Msg descriptor.
60023+ *
60024+ * sizeof(struct PVSCSIRingMsgDesc) == 128.
60025+ *
60026+ * - type is of type enum PVSCSIMsgType.
60027+ * - the content of args depend on the type of event being delivered.
60028+ */
60029+
60030+struct PVSCSIRingMsgDesc {
60031+ u32 type;
60032+ u32 args[31];
60033+} __packed;
60034+
60035+struct PVSCSIMsgDescDevStatusChanged {
60036+ u32 type; /* PVSCSI_MSG_DEV _ADDED / _REMOVED */
60037+ u32 bus;
60038+ u32 target;
60039+ u8 lun[8];
60040+ u32 pad[27];
60041+} __packed;
60042+
60043+/*
60044+ * Rings state.
60045+ *
60046+ * - the fields:
60047+ * . msgProdIdx,
60048+ * . msgConsIdx,
60049+ * . msgNumEntriesLog2,
60050+ * .. are only used once the SETUP_MSG_RING cmd has been issued.
60051+ * - '_pad' helps to ensure that the msg related fields are on their own
60052+ * cache-line.
60053+ */
60054+
60055+struct PVSCSIRingsState {
60056+ u32 reqProdIdx;
60057+ u32 reqConsIdx;
60058+ u32 reqNumEntriesLog2;
60059+
60060+ u32 cmpProdIdx;
60061+ u32 cmpConsIdx;
60062+ u32 cmpNumEntriesLog2;
60063+
60064+ u8 _pad[104];
60065+
60066+ u32 msgProdIdx;
60067+ u32 msgConsIdx;
60068+ u32 msgNumEntriesLog2;
60069+} __packed;
60070+
60071+/*
60072+ * Request descriptor.
60073+ *
60074+ * sizeof(RingReqDesc) = 128
60075+ *
60076+ * - context: is a unique identifier of a command. It could normally be any
60077+ * 64bit value, however we currently store it in the serialNumber variable
60078+ * of struct SCSI_Command, so we have the following restrictions due to the
60079+ * way this field is handled in the vmkernel storage stack:
60080+ * * this value can't be 0,
60081+ * * the upper 32bit need to be 0 since serialNumber is as a u32.
60082+ * Currently tracked as PR 292060.
60083+ * - dataLen: contains the total number of bytes that need to be transferred.
60084+ * - dataAddr:
60085+ * * if PVSCSI_FLAG_CMD_WITH_SG_LIST is set: dataAddr is the PA of the first
60086+ * s/g table segment, each s/g segment is entirely contained on a single
60087+ * page of physical memory,
60088+ * * if PVSCSI_FLAG_CMD_WITH_SG_LIST is NOT set, then dataAddr is the PA of
60089+ * the buffer used for the DMA transfer,
60090+ * - flags:
60091+ * * PVSCSI_FLAG_CMD_WITH_SG_LIST: see dataAddr above,
60092+ * * PVSCSI_FLAG_CMD_DIR_NONE: no DMA involved,
60093+ * * PVSCSI_FLAG_CMD_DIR_TOHOST: transfer from device to main memory,
60094+ * * PVSCSI_FLAG_CMD_DIR_TODEVICE: transfer from main memory to device,
60095+ * * PVSCSI_FLAG_CMD_OUT_OF_BAND_CDB: reserved to handle CDBs larger than
60096+ * 16bytes. To be specified.
60097+ * - vcpuHint: vcpuId of the processor that will be most likely waiting for the
60098+ * completion of the i/o. For guest OSes that use lowest priority message
60099+ * delivery mode (such as windows), we use this "hint" to deliver the
60100+ * completion action to the proper vcpu. For now, we can use the vcpuId of
60101+ * the processor that initiated the i/o as a likely candidate for the vcpu
60102+ * that will be waiting for the completion..
60103+ * - bus should be 0: we currently only support bus 0 for now.
60104+ * - unused should be zero'd.
60105+ */
60106+
60107+#define PVSCSI_FLAG_CMD_WITH_SG_LIST (1 << 0)
60108+#define PVSCSI_FLAG_CMD_OUT_OF_BAND_CDB (1 << 1)
60109+#define PVSCSI_FLAG_CMD_DIR_NONE (1 << 2)
60110+#define PVSCSI_FLAG_CMD_DIR_TOHOST (1 << 3)
60111+#define PVSCSI_FLAG_CMD_DIR_TODEVICE (1 << 4)
60112+
60113+struct PVSCSIRingReqDesc {
60114+ u64 context;
60115+ u64 dataAddr;
60116+ u64 dataLen;
60117+ u64 senseAddr;
60118+ u32 senseLen;
60119+ u32 flags;
60120+ u8 cdb[16];
60121+ u8 cdbLen;
60122+ u8 lun[8];
60123+ u8 tag;
60124+ u8 bus;
60125+ u8 target;
60126+ u8 vcpuHint;
60127+ u8 unused[59];
60128+} __packed;
60129+
60130+/*
60131+ * Scatter-gather list management.
60132+ *
60133+ * As described above, when PVSCSI_FLAG_CMD_WITH_SG_LIST is set in the
60134+ * RingReqDesc.flags, then RingReqDesc.dataAddr is the PA of the first s/g
60135+ * table segment.
60136+ *
60137+ * - each segment of the s/g table contain a succession of struct
60138+ * PVSCSISGElement.
60139+ * - each segment is entirely contained on a single physical page of memory.
60140+ * - a "chain" s/g element has the flag PVSCSI_SGE_FLAG_CHAIN_ELEMENT set in
60141+ * PVSCSISGElement.flags and in this case:
60142+ * * addr is the PA of the next s/g segment,
60143+ * * length is undefined, assumed to be 0.
60144+ */
60145+
60146+struct PVSCSISGElement {
60147+ u64 addr;
60148+ u32 length;
60149+ u32 flags;
60150+} __packed;
60151+
60152+/*
60153+ * Completion descriptor.
60154+ *
60155+ * sizeof(RingCmpDesc) = 32
60156+ *
60157+ * - context: identifier of the command. The same thing that was specified
60158+ * under "context" as part of struct RingReqDesc at initiation time,
60159+ * - dataLen: number of bytes transferred for the actual i/o operation,
60160+ * - senseLen: number of bytes written into the sense buffer,
60161+ * - hostStatus: adapter status,
60162+ * - scsiStatus: device status,
60163+ * - _pad should be zero.
60164+ */
60165+
60166+struct PVSCSIRingCmpDesc {
60167+ u64 context;
60168+ u64 dataLen;
60169+ u32 senseLen;
60170+ u16 hostStatus;
60171+ u16 scsiStatus;
60172+ u32 _pad[2];
60173+} __packed;
60174+
60175+/*
60176+ * Interrupt status / IRQ bits.
60177+ */
60178+
60179+#define PVSCSI_INTR_CMPL_0 (1 << 0)
60180+#define PVSCSI_INTR_CMPL_1 (1 << 1)
60181+#define PVSCSI_INTR_CMPL_MASK MASK(2)
60182+
60183+#define PVSCSI_INTR_MSG_0 (1 << 2)
60184+#define PVSCSI_INTR_MSG_1 (1 << 3)
60185+#define PVSCSI_INTR_MSG_MASK (MASK(2) << 2)
60186+
60187+#define PVSCSI_INTR_ALL_SUPPORTED MASK(4)
60188+
60189+/*
60190+ * Number of MSI-X vectors supported.
60191+ */
60192+#define PVSCSI_MAX_INTRS 24
60193+
60194+/*
60195+ * Enumeration of supported MSI-X vectors
60196+ */
60197+#define PVSCSI_VECTOR_COMPLETION 0
60198+
60199+/*
60200+ * Misc constants for the rings.
60201+ */
60202+
60203+#define PVSCSI_MAX_NUM_PAGES_REQ_RING PVSCSI_SETUP_RINGS_MAX_NUM_PAGES
60204+#define PVSCSI_MAX_NUM_PAGES_CMP_RING PVSCSI_SETUP_RINGS_MAX_NUM_PAGES
60205+#define PVSCSI_MAX_NUM_PAGES_MSG_RING PVSCSI_SETUP_MSG_RING_MAX_NUM_PAGES
60206+
60207+#define PVSCSI_MAX_NUM_REQ_ENTRIES_PER_PAGE \
60208+ (PAGE_SIZE / sizeof(struct PVSCSIRingReqDesc))
60209+
60210+#define PVSCSI_MAX_REQ_QUEUE_DEPTH \
60211+ (PVSCSI_MAX_NUM_PAGES_REQ_RING * PVSCSI_MAX_NUM_REQ_ENTRIES_PER_PAGE)
60212+
60213+#define PVSCSI_MEM_SPACE_COMMAND_NUM_PAGES 1
60214+#define PVSCSI_MEM_SPACE_INTR_STATUS_NUM_PAGES 1
60215+#define PVSCSI_MEM_SPACE_MISC_NUM_PAGES 2
60216+#define PVSCSI_MEM_SPACE_KICK_IO_NUM_PAGES 2
60217+#define PVSCSI_MEM_SPACE_MSIX_NUM_PAGES 2
60218+
60219+enum PVSCSIMemSpace {
60220+ PVSCSI_MEM_SPACE_COMMAND_PAGE = 0,
60221+ PVSCSI_MEM_SPACE_INTR_STATUS_PAGE = 1,
60222+ PVSCSI_MEM_SPACE_MISC_PAGE = 2,
60223+ PVSCSI_MEM_SPACE_KICK_IO_PAGE = 4,
60224+ PVSCSI_MEM_SPACE_MSIX_TABLE_PAGE = 6,
60225+ PVSCSI_MEM_SPACE_MSIX_PBA_PAGE = 7,
60226+};
60227+
60228+#define PVSCSI_MEM_SPACE_NUM_PAGES \
60229+ (PVSCSI_MEM_SPACE_COMMAND_NUM_PAGES + \
60230+ PVSCSI_MEM_SPACE_INTR_STATUS_NUM_PAGES + \
60231+ PVSCSI_MEM_SPACE_MISC_NUM_PAGES + \
60232+ PVSCSI_MEM_SPACE_KICK_IO_NUM_PAGES + \
60233+ PVSCSI_MEM_SPACE_MSIX_NUM_PAGES)
60234+
60235+#define PVSCSI_MEM_SPACE_SIZE (PVSCSI_MEM_SPACE_NUM_PAGES * PAGE_SIZE)
60236+
60237+#endif /* _VMW_PVSCSI_H_ */
60238diff --git a/drivers/serial/kgdboc.c b/drivers/serial/kgdboc.c
60239index eadc1ab..2d81457 100644
60240--- a/drivers/serial/kgdboc.c
60241+++ b/drivers/serial/kgdboc.c
60242@@ -18,7 +18,7 @@
60243
60244 #define MAX_CONFIG_LEN 40
60245
60246-static struct kgdb_io kgdboc_io_ops;
60247+static const struct kgdb_io kgdboc_io_ops;
60248
60249 /* -1 = init not run yet, 0 = unconfigured, 1 = configured. */
60250 static int configured = -1;
60251@@ -154,7 +154,7 @@ static void kgdboc_post_exp_handler(void)
60252 module_put(THIS_MODULE);
60253 }
60254
60255-static struct kgdb_io kgdboc_io_ops = {
60256+static const struct kgdb_io kgdboc_io_ops = {
60257 .name = "kgdboc",
60258 .read_char = kgdboc_get_char,
60259 .write_char = kgdboc_put_char,
60260diff --git a/drivers/spi/spi.c b/drivers/spi/spi.c
60261index b76f246..7f41af7 100644
60262--- a/drivers/spi/spi.c
60263+++ b/drivers/spi/spi.c
60264@@ -774,7 +774,7 @@ int spi_sync(struct spi_device *spi, struct spi_message *message)
60265 EXPORT_SYMBOL_GPL(spi_sync);
60266
60267 /* portable code must never pass more than 32 bytes */
60268-#define SPI_BUFSIZ max(32,SMP_CACHE_BYTES)
60269+#define SPI_BUFSIZ max(32U,SMP_CACHE_BYTES)
60270
60271 static u8 *buf;
60272
60273diff --git a/drivers/staging/android/binder.c b/drivers/staging/android/binder.c
60274index b9b37ff..19dfa23 100644
60275--- a/drivers/staging/android/binder.c
60276+++ b/drivers/staging/android/binder.c
60277@@ -2761,7 +2761,7 @@ static void binder_vma_close(struct vm_area_struct *vma)
60278 binder_defer_work(proc, BINDER_DEFERRED_PUT_FILES);
60279 }
60280
60281-static struct vm_operations_struct binder_vm_ops = {
60282+static const struct vm_operations_struct binder_vm_ops = {
60283 .open = binder_vma_open,
60284 .close = binder_vma_close,
60285 };
60286diff --git a/drivers/staging/b3dfg/b3dfg.c b/drivers/staging/b3dfg/b3dfg.c
60287index cda26bb..39fed3f 100644
60288--- a/drivers/staging/b3dfg/b3dfg.c
60289+++ b/drivers/staging/b3dfg/b3dfg.c
60290@@ -455,7 +455,7 @@ static int b3dfg_vma_fault(struct vm_area_struct *vma,
60291 return VM_FAULT_NOPAGE;
60292 }
60293
60294-static struct vm_operations_struct b3dfg_vm_ops = {
60295+static const struct vm_operations_struct b3dfg_vm_ops = {
60296 .fault = b3dfg_vma_fault,
60297 };
60298
60299@@ -848,7 +848,7 @@ static int b3dfg_mmap(struct file *filp, struct vm_area_struct *vma)
60300 return r;
60301 }
60302
60303-static struct file_operations b3dfg_fops = {
60304+static const struct file_operations b3dfg_fops = {
60305 .owner = THIS_MODULE,
60306 .open = b3dfg_open,
60307 .release = b3dfg_release,
60308diff --git a/drivers/staging/comedi/comedi_fops.c b/drivers/staging/comedi/comedi_fops.c
60309index 908f25a..c9a579b 100644
60310--- a/drivers/staging/comedi/comedi_fops.c
60311+++ b/drivers/staging/comedi/comedi_fops.c
60312@@ -1389,7 +1389,7 @@ void comedi_unmap(struct vm_area_struct *area)
60313 mutex_unlock(&dev->mutex);
60314 }
60315
60316-static struct vm_operations_struct comedi_vm_ops = {
60317+static const struct vm_operations_struct comedi_vm_ops = {
60318 .close = comedi_unmap,
60319 };
60320
60321diff --git a/drivers/staging/dream/qdsp5/adsp_driver.c b/drivers/staging/dream/qdsp5/adsp_driver.c
60322index e55a0db..577b776 100644
60323--- a/drivers/staging/dream/qdsp5/adsp_driver.c
60324+++ b/drivers/staging/dream/qdsp5/adsp_driver.c
60325@@ -576,7 +576,7 @@ static struct adsp_device *inode_to_device(struct inode *inode)
60326 static dev_t adsp_devno;
60327 static struct class *adsp_class;
60328
60329-static struct file_operations adsp_fops = {
60330+static const struct file_operations adsp_fops = {
60331 .owner = THIS_MODULE,
60332 .open = adsp_open,
60333 .unlocked_ioctl = adsp_ioctl,
60334diff --git a/drivers/staging/dream/qdsp5/audio_aac.c b/drivers/staging/dream/qdsp5/audio_aac.c
60335index ad2390f..4116ee8 100644
60336--- a/drivers/staging/dream/qdsp5/audio_aac.c
60337+++ b/drivers/staging/dream/qdsp5/audio_aac.c
60338@@ -1022,7 +1022,7 @@ done:
60339 return rc;
60340 }
60341
60342-static struct file_operations audio_aac_fops = {
60343+static const struct file_operations audio_aac_fops = {
60344 .owner = THIS_MODULE,
60345 .open = audio_open,
60346 .release = audio_release,
60347diff --git a/drivers/staging/dream/qdsp5/audio_amrnb.c b/drivers/staging/dream/qdsp5/audio_amrnb.c
60348index cd818a5..870b37b 100644
60349--- a/drivers/staging/dream/qdsp5/audio_amrnb.c
60350+++ b/drivers/staging/dream/qdsp5/audio_amrnb.c
60351@@ -833,7 +833,7 @@ done:
60352 return rc;
60353 }
60354
60355-static struct file_operations audio_amrnb_fops = {
60356+static const struct file_operations audio_amrnb_fops = {
60357 .owner = THIS_MODULE,
60358 .open = audamrnb_open,
60359 .release = audamrnb_release,
60360diff --git a/drivers/staging/dream/qdsp5/audio_evrc.c b/drivers/staging/dream/qdsp5/audio_evrc.c
60361index 4b43e18..cedafda 100644
60362--- a/drivers/staging/dream/qdsp5/audio_evrc.c
60363+++ b/drivers/staging/dream/qdsp5/audio_evrc.c
60364@@ -805,7 +805,7 @@ dma_fail:
60365 return rc;
60366 }
60367
60368-static struct file_operations audio_evrc_fops = {
60369+static const struct file_operations audio_evrc_fops = {
60370 .owner = THIS_MODULE,
60371 .open = audevrc_open,
60372 .release = audevrc_release,
60373diff --git a/drivers/staging/dream/qdsp5/audio_in.c b/drivers/staging/dream/qdsp5/audio_in.c
60374index 3d950a2..9431118 100644
60375--- a/drivers/staging/dream/qdsp5/audio_in.c
60376+++ b/drivers/staging/dream/qdsp5/audio_in.c
60377@@ -913,7 +913,7 @@ static int audpre_open(struct inode *inode, struct file *file)
60378 return 0;
60379 }
60380
60381-static struct file_operations audio_fops = {
60382+static const struct file_operations audio_fops = {
60383 .owner = THIS_MODULE,
60384 .open = audio_in_open,
60385 .release = audio_in_release,
60386@@ -922,7 +922,7 @@ static struct file_operations audio_fops = {
60387 .unlocked_ioctl = audio_in_ioctl,
60388 };
60389
60390-static struct file_operations audpre_fops = {
60391+static const struct file_operations audpre_fops = {
60392 .owner = THIS_MODULE,
60393 .open = audpre_open,
60394 .unlocked_ioctl = audpre_ioctl,
60395diff --git a/drivers/staging/dream/qdsp5/audio_mp3.c b/drivers/staging/dream/qdsp5/audio_mp3.c
60396index b95574f..286c2f4 100644
60397--- a/drivers/staging/dream/qdsp5/audio_mp3.c
60398+++ b/drivers/staging/dream/qdsp5/audio_mp3.c
60399@@ -941,7 +941,7 @@ done:
60400 return rc;
60401 }
60402
60403-static struct file_operations audio_mp3_fops = {
60404+static const struct file_operations audio_mp3_fops = {
60405 .owner = THIS_MODULE,
60406 .open = audio_open,
60407 .release = audio_release,
60408diff --git a/drivers/staging/dream/qdsp5/audio_out.c b/drivers/staging/dream/qdsp5/audio_out.c
60409index d1adcf6..f8f9833 100644
60410--- a/drivers/staging/dream/qdsp5/audio_out.c
60411+++ b/drivers/staging/dream/qdsp5/audio_out.c
60412@@ -810,7 +810,7 @@ static int audpp_open(struct inode *inode, struct file *file)
60413 return 0;
60414 }
60415
60416-static struct file_operations audio_fops = {
60417+static const struct file_operations audio_fops = {
60418 .owner = THIS_MODULE,
60419 .open = audio_open,
60420 .release = audio_release,
60421@@ -819,7 +819,7 @@ static struct file_operations audio_fops = {
60422 .unlocked_ioctl = audio_ioctl,
60423 };
60424
60425-static struct file_operations audpp_fops = {
60426+static const struct file_operations audpp_fops = {
60427 .owner = THIS_MODULE,
60428 .open = audpp_open,
60429 .unlocked_ioctl = audpp_ioctl,
60430diff --git a/drivers/staging/dream/qdsp5/audio_qcelp.c b/drivers/staging/dream/qdsp5/audio_qcelp.c
60431index f0f50e3..f6b9dbc 100644
60432--- a/drivers/staging/dream/qdsp5/audio_qcelp.c
60433+++ b/drivers/staging/dream/qdsp5/audio_qcelp.c
60434@@ -816,7 +816,7 @@ err:
60435 return rc;
60436 }
60437
60438-static struct file_operations audio_qcelp_fops = {
60439+static const struct file_operations audio_qcelp_fops = {
60440 .owner = THIS_MODULE,
60441 .open = audqcelp_open,
60442 .release = audqcelp_release,
60443diff --git a/drivers/staging/dream/qdsp5/snd.c b/drivers/staging/dream/qdsp5/snd.c
60444index 037d7ff..5469ec3 100644
60445--- a/drivers/staging/dream/qdsp5/snd.c
60446+++ b/drivers/staging/dream/qdsp5/snd.c
60447@@ -242,7 +242,7 @@ err:
60448 return rc;
60449 }
60450
60451-static struct file_operations snd_fops = {
60452+static const struct file_operations snd_fops = {
60453 .owner = THIS_MODULE,
60454 .open = snd_open,
60455 .release = snd_release,
60456diff --git a/drivers/staging/dream/smd/smd_qmi.c b/drivers/staging/dream/smd/smd_qmi.c
60457index d4e7d88..0ea632a 100644
60458--- a/drivers/staging/dream/smd/smd_qmi.c
60459+++ b/drivers/staging/dream/smd/smd_qmi.c
60460@@ -793,7 +793,7 @@ static int qmi_release(struct inode *ip, struct file *fp)
60461 return 0;
60462 }
60463
60464-static struct file_operations qmi_fops = {
60465+static const struct file_operations qmi_fops = {
60466 .owner = THIS_MODULE,
60467 .read = qmi_read,
60468 .write = qmi_write,
60469diff --git a/drivers/staging/dream/smd/smd_rpcrouter_device.c b/drivers/staging/dream/smd/smd_rpcrouter_device.c
60470index cd3910b..ff053d3 100644
60471--- a/drivers/staging/dream/smd/smd_rpcrouter_device.c
60472+++ b/drivers/staging/dream/smd/smd_rpcrouter_device.c
60473@@ -214,7 +214,7 @@ static long rpcrouter_ioctl(struct file *filp, unsigned int cmd,
60474 return rc;
60475 }
60476
60477-static struct file_operations rpcrouter_server_fops = {
60478+static const struct file_operations rpcrouter_server_fops = {
60479 .owner = THIS_MODULE,
60480 .open = rpcrouter_open,
60481 .release = rpcrouter_release,
60482@@ -224,7 +224,7 @@ static struct file_operations rpcrouter_server_fops = {
60483 .unlocked_ioctl = rpcrouter_ioctl,
60484 };
60485
60486-static struct file_operations rpcrouter_router_fops = {
60487+static const struct file_operations rpcrouter_router_fops = {
60488 .owner = THIS_MODULE,
60489 .open = rpcrouter_open,
60490 .release = rpcrouter_release,
60491diff --git a/drivers/staging/dst/dcore.c b/drivers/staging/dst/dcore.c
60492index c24e4e0..07665be 100644
60493--- a/drivers/staging/dst/dcore.c
60494+++ b/drivers/staging/dst/dcore.c
60495@@ -149,7 +149,7 @@ static int dst_bdev_release(struct gendisk *disk, fmode_t mode)
60496 return 0;
60497 }
60498
60499-static struct block_device_operations dst_blk_ops = {
60500+static const struct block_device_operations dst_blk_ops = {
60501 .open = dst_bdev_open,
60502 .release = dst_bdev_release,
60503 .owner = THIS_MODULE,
60504@@ -588,7 +588,7 @@ static struct dst_node *dst_alloc_node(struct dst_ctl *ctl,
60505 n->size = ctl->size;
60506
60507 atomic_set(&n->refcnt, 1);
60508- atomic_long_set(&n->gen, 0);
60509+ atomic_long_set_unchecked(&n->gen, 0);
60510 snprintf(n->name, sizeof(n->name), "%s", ctl->name);
60511
60512 err = dst_node_sysfs_init(n);
60513diff --git a/drivers/staging/dst/trans.c b/drivers/staging/dst/trans.c
60514index 557d372..8d84422 100644
60515--- a/drivers/staging/dst/trans.c
60516+++ b/drivers/staging/dst/trans.c
60517@@ -169,7 +169,7 @@ int dst_process_bio(struct dst_node *n, struct bio *bio)
60518 t->error = 0;
60519 t->retries = 0;
60520 atomic_set(&t->refcnt, 1);
60521- t->gen = atomic_long_inc_return(&n->gen);
60522+ t->gen = atomic_long_inc_return_unchecked(&n->gen);
60523
60524 t->enc = bio_data_dir(bio);
60525 dst_bio_to_cmd(bio, &t->cmd, DST_IO, t->gen);
60526diff --git a/drivers/staging/et131x/et1310_tx.c b/drivers/staging/et131x/et1310_tx.c
60527index 94f7752..d051514 100644
60528--- a/drivers/staging/et131x/et1310_tx.c
60529+++ b/drivers/staging/et131x/et1310_tx.c
60530@@ -710,11 +710,11 @@ inline void et131x_free_send_packet(struct et131x_adapter *etdev,
60531 struct net_device_stats *stats = &etdev->net_stats;
60532
60533 if (pMpTcb->Flags & fMP_DEST_BROAD)
60534- atomic_inc(&etdev->Stats.brdcstxmt);
60535+ atomic_inc_unchecked(&etdev->Stats.brdcstxmt);
60536 else if (pMpTcb->Flags & fMP_DEST_MULTI)
60537- atomic_inc(&etdev->Stats.multixmt);
60538+ atomic_inc_unchecked(&etdev->Stats.multixmt);
60539 else
60540- atomic_inc(&etdev->Stats.unixmt);
60541+ atomic_inc_unchecked(&etdev->Stats.unixmt);
60542
60543 if (pMpTcb->Packet) {
60544 stats->tx_bytes += pMpTcb->Packet->len;
60545diff --git a/drivers/staging/et131x/et131x_adapter.h b/drivers/staging/et131x/et131x_adapter.h
60546index 1dfe06f..f469b4d 100644
60547--- a/drivers/staging/et131x/et131x_adapter.h
60548+++ b/drivers/staging/et131x/et131x_adapter.h
60549@@ -145,11 +145,11 @@ typedef struct _ce_stats_t {
60550 * operations
60551 */
60552 u32 unircv; /* # multicast packets received */
60553- atomic_t unixmt; /* # multicast packets for Tx */
60554+ atomic_unchecked_t unixmt; /* # multicast packets for Tx */
60555 u32 multircv; /* # multicast packets received */
60556- atomic_t multixmt; /* # multicast packets for Tx */
60557+ atomic_unchecked_t multixmt; /* # multicast packets for Tx */
60558 u32 brdcstrcv; /* # broadcast packets received */
60559- atomic_t brdcstxmt; /* # broadcast packets for Tx */
60560+ atomic_unchecked_t brdcstxmt; /* # broadcast packets for Tx */
60561 u32 norcvbuf; /* # Rx packets discarded */
60562 u32 noxmtbuf; /* # Tx packets discarded */
60563
60564diff --git a/drivers/staging/go7007/go7007-v4l2.c b/drivers/staging/go7007/go7007-v4l2.c
60565index 4bd353a..e28f455 100644
60566--- a/drivers/staging/go7007/go7007-v4l2.c
60567+++ b/drivers/staging/go7007/go7007-v4l2.c
60568@@ -1700,7 +1700,7 @@ static int go7007_vm_fault(struct vm_area_struct *vma, struct vm_fault *vmf)
60569 return 0;
60570 }
60571
60572-static struct vm_operations_struct go7007_vm_ops = {
60573+static const struct vm_operations_struct go7007_vm_ops = {
60574 .open = go7007_vm_open,
60575 .close = go7007_vm_close,
60576 .fault = go7007_vm_fault,
60577diff --git a/drivers/staging/hv/Channel.c b/drivers/staging/hv/Channel.c
60578index 366dc95..b974d87 100644
60579--- a/drivers/staging/hv/Channel.c
60580+++ b/drivers/staging/hv/Channel.c
60581@@ -464,8 +464,8 @@ int VmbusChannelEstablishGpadl(struct vmbus_channel *Channel, void *Kbuffer,
60582
60583 DPRINT_ENTER(VMBUS);
60584
60585- nextGpadlHandle = atomic_read(&gVmbusConnection.NextGpadlHandle);
60586- atomic_inc(&gVmbusConnection.NextGpadlHandle);
60587+ nextGpadlHandle = atomic_read_unchecked(&gVmbusConnection.NextGpadlHandle);
60588+ atomic_inc_unchecked(&gVmbusConnection.NextGpadlHandle);
60589
60590 VmbusChannelCreateGpadlHeader(Kbuffer, Size, &msgInfo, &msgCount);
60591 ASSERT(msgInfo != NULL);
60592diff --git a/drivers/staging/hv/Hv.c b/drivers/staging/hv/Hv.c
60593index b12237f..01ae28a 100644
60594--- a/drivers/staging/hv/Hv.c
60595+++ b/drivers/staging/hv/Hv.c
60596@@ -161,7 +161,7 @@ static u64 HvDoHypercall(u64 Control, void *Input, void *Output)
60597 u64 outputAddress = (Output) ? virt_to_phys(Output) : 0;
60598 u32 outputAddressHi = outputAddress >> 32;
60599 u32 outputAddressLo = outputAddress & 0xFFFFFFFF;
60600- volatile void *hypercallPage = gHvContext.HypercallPage;
60601+ volatile void *hypercallPage = ktva_ktla(gHvContext.HypercallPage);
60602
60603 DPRINT_DBG(VMBUS, "Hypercall <control %llx input %p output %p>",
60604 Control, Input, Output);
60605diff --git a/drivers/staging/hv/VmbusApi.h b/drivers/staging/hv/VmbusApi.h
60606index d089bb1..2ebc158 100644
60607--- a/drivers/staging/hv/VmbusApi.h
60608+++ b/drivers/staging/hv/VmbusApi.h
60609@@ -109,7 +109,7 @@ struct vmbus_channel_interface {
60610 u32 *GpadlHandle);
60611 int (*TeardownGpadl)(struct hv_device *device, u32 GpadlHandle);
60612 void (*GetInfo)(struct hv_device *dev, struct hv_device_info *devinfo);
60613-};
60614+} __no_const;
60615
60616 /* Base driver object */
60617 struct hv_driver {
60618diff --git a/drivers/staging/hv/VmbusPrivate.h b/drivers/staging/hv/VmbusPrivate.h
60619index 5a37cce..6ecc88c 100644
60620--- a/drivers/staging/hv/VmbusPrivate.h
60621+++ b/drivers/staging/hv/VmbusPrivate.h
60622@@ -59,7 +59,7 @@ enum VMBUS_CONNECT_STATE {
60623 struct VMBUS_CONNECTION {
60624 enum VMBUS_CONNECT_STATE ConnectState;
60625
60626- atomic_t NextGpadlHandle;
60627+ atomic_unchecked_t NextGpadlHandle;
60628
60629 /*
60630 * Represents channel interrupts. Each bit position represents a
60631diff --git a/drivers/staging/hv/blkvsc_drv.c b/drivers/staging/hv/blkvsc_drv.c
60632index 871a202..ca50ddf 100644
60633--- a/drivers/staging/hv/blkvsc_drv.c
60634+++ b/drivers/staging/hv/blkvsc_drv.c
60635@@ -153,7 +153,7 @@ static int blkvsc_ringbuffer_size = BLKVSC_RING_BUFFER_SIZE;
60636 /* The one and only one */
60637 static struct blkvsc_driver_context g_blkvsc_drv;
60638
60639-static struct block_device_operations block_ops = {
60640+static const struct block_device_operations block_ops = {
60641 .owner = THIS_MODULE,
60642 .open = blkvsc_open,
60643 .release = blkvsc_release,
60644diff --git a/drivers/staging/hv/vmbus_drv.c b/drivers/staging/hv/vmbus_drv.c
60645index 6acc49a..fbc8d46 100644
60646--- a/drivers/staging/hv/vmbus_drv.c
60647+++ b/drivers/staging/hv/vmbus_drv.c
60648@@ -532,7 +532,7 @@ static int vmbus_child_device_register(struct hv_device *root_device_obj,
60649 to_device_context(root_device_obj);
60650 struct device_context *child_device_ctx =
60651 to_device_context(child_device_obj);
60652- static atomic_t device_num = ATOMIC_INIT(0);
60653+ static atomic_unchecked_t device_num = ATOMIC_INIT(0);
60654
60655 DPRINT_ENTER(VMBUS_DRV);
60656
60657@@ -541,7 +541,7 @@ static int vmbus_child_device_register(struct hv_device *root_device_obj,
60658
60659 /* Set the device name. Otherwise, device_register() will fail. */
60660 dev_set_name(&child_device_ctx->device, "vmbus_0_%d",
60661- atomic_inc_return(&device_num));
60662+ atomic_inc_return_unchecked(&device_num));
60663
60664 /* The new device belongs to this bus */
60665 child_device_ctx->device.bus = &g_vmbus_drv.bus; /* device->dev.bus; */
60666diff --git a/drivers/staging/iio/ring_generic.h b/drivers/staging/iio/ring_generic.h
60667index d926189..17b19fd 100644
60668--- a/drivers/staging/iio/ring_generic.h
60669+++ b/drivers/staging/iio/ring_generic.h
60670@@ -87,7 +87,7 @@ struct iio_ring_access_funcs {
60671
60672 int (*is_enabled)(struct iio_ring_buffer *ring);
60673 int (*enable)(struct iio_ring_buffer *ring);
60674-};
60675+} __no_const;
60676
60677 /**
60678 * struct iio_ring_buffer - general ring buffer structure
60679diff --git a/drivers/staging/octeon/ethernet-rx.c b/drivers/staging/octeon/ethernet-rx.c
60680index 1b237b7..88c624e 100644
60681--- a/drivers/staging/octeon/ethernet-rx.c
60682+++ b/drivers/staging/octeon/ethernet-rx.c
60683@@ -406,11 +406,11 @@ void cvm_oct_tasklet_rx(unsigned long unused)
60684 /* Increment RX stats for virtual ports */
60685 if (work->ipprt >= CVMX_PIP_NUM_INPUT_PORTS) {
60686 #ifdef CONFIG_64BIT
60687- atomic64_add(1, (atomic64_t *)&priv->stats.rx_packets);
60688- atomic64_add(skb->len, (atomic64_t *)&priv->stats.rx_bytes);
60689+ atomic64_add_unchecked(1, (atomic64_unchecked_t *)&priv->stats.rx_packets);
60690+ atomic64_add_unchecked(skb->len, (atomic64_unchecked_t *)&priv->stats.rx_bytes);
60691 #else
60692- atomic_add(1, (atomic_t *)&priv->stats.rx_packets);
60693- atomic_add(skb->len, (atomic_t *)&priv->stats.rx_bytes);
60694+ atomic_add_unchecked(1, (atomic_unchecked_t *)&priv->stats.rx_packets);
60695+ atomic_add_unchecked(skb->len, (atomic_unchecked_t *)&priv->stats.rx_bytes);
60696 #endif
60697 }
60698 netif_receive_skb(skb);
60699@@ -424,9 +424,9 @@ void cvm_oct_tasklet_rx(unsigned long unused)
60700 dev->name);
60701 */
60702 #ifdef CONFIG_64BIT
60703- atomic64_add(1, (atomic64_t *)&priv->stats.rx_dropped);
60704+ atomic64_add_unchecked(1, (atomic64_t *)&priv->stats.rx_dropped);
60705 #else
60706- atomic_add(1, (atomic_t *)&priv->stats.rx_dropped);
60707+ atomic_add_unchecked(1, (atomic_t *)&priv->stats.rx_dropped);
60708 #endif
60709 dev_kfree_skb_irq(skb);
60710 }
60711diff --git a/drivers/staging/octeon/ethernet.c b/drivers/staging/octeon/ethernet.c
60712index 492c502..d9909f1 100644
60713--- a/drivers/staging/octeon/ethernet.c
60714+++ b/drivers/staging/octeon/ethernet.c
60715@@ -294,11 +294,11 @@ static struct net_device_stats *cvm_oct_common_get_stats(struct net_device *dev)
60716 * since the RX tasklet also increments it.
60717 */
60718 #ifdef CONFIG_64BIT
60719- atomic64_add(rx_status.dropped_packets,
60720- (atomic64_t *)&priv->stats.rx_dropped);
60721+ atomic64_add_unchecked(rx_status.dropped_packets,
60722+ (atomic64_unchecked_t *)&priv->stats.rx_dropped);
60723 #else
60724- atomic_add(rx_status.dropped_packets,
60725- (atomic_t *)&priv->stats.rx_dropped);
60726+ atomic_add_unchecked(rx_status.dropped_packets,
60727+ (atomic_unchecked_t *)&priv->stats.rx_dropped);
60728 #endif
60729 }
60730
60731diff --git a/drivers/staging/otus/80211core/pub_zfi.h b/drivers/staging/otus/80211core/pub_zfi.h
60732index a35bd5d..28fff45 100644
60733--- a/drivers/staging/otus/80211core/pub_zfi.h
60734+++ b/drivers/staging/otus/80211core/pub_zfi.h
60735@@ -531,7 +531,7 @@ struct zsCbFuncTbl
60736 u8_t (*zfcbClassifyTxPacket)(zdev_t* dev, zbuf_t* buf);
60737
60738 void (*zfcbHwWatchDogNotify)(zdev_t* dev);
60739-};
60740+} __no_const;
60741
60742 extern void zfZeroMemory(u8_t* va, u16_t length);
60743 #define ZM_INIT_CB_FUNC_TABLE(p) zfZeroMemory((u8_t *)p, sizeof(struct zsCbFuncTbl));
60744diff --git a/drivers/staging/panel/panel.c b/drivers/staging/panel/panel.c
60745index c39a25f..696f5aa 100644
60746--- a/drivers/staging/panel/panel.c
60747+++ b/drivers/staging/panel/panel.c
60748@@ -1305,7 +1305,7 @@ static int lcd_release(struct inode *inode, struct file *file)
60749 return 0;
60750 }
60751
60752-static struct file_operations lcd_fops = {
60753+static const struct file_operations lcd_fops = {
60754 .write = lcd_write,
60755 .open = lcd_open,
60756 .release = lcd_release,
60757@@ -1565,7 +1565,7 @@ static int keypad_release(struct inode *inode, struct file *file)
60758 return 0;
60759 }
60760
60761-static struct file_operations keypad_fops = {
60762+static const struct file_operations keypad_fops = {
60763 .read = keypad_read, /* read */
60764 .open = keypad_open, /* open */
60765 .release = keypad_release, /* close */
60766diff --git a/drivers/staging/phison/phison.c b/drivers/staging/phison/phison.c
60767index 270ebcb..37e46af 100644
60768--- a/drivers/staging/phison/phison.c
60769+++ b/drivers/staging/phison/phison.c
60770@@ -43,7 +43,7 @@ static struct scsi_host_template phison_sht = {
60771 ATA_BMDMA_SHT(DRV_NAME),
60772 };
60773
60774-static struct ata_port_operations phison_ops = {
60775+static const struct ata_port_operations phison_ops = {
60776 .inherits = &ata_bmdma_port_ops,
60777 .prereset = phison_pre_reset,
60778 };
60779diff --git a/drivers/staging/poch/poch.c b/drivers/staging/poch/poch.c
60780index 2eb8e3d..57616a7 100644
60781--- a/drivers/staging/poch/poch.c
60782+++ b/drivers/staging/poch/poch.c
60783@@ -1057,7 +1057,7 @@ static int poch_ioctl(struct inode *inode, struct file *filp,
60784 return 0;
60785 }
60786
60787-static struct file_operations poch_fops = {
60788+static const struct file_operations poch_fops = {
60789 .owner = THIS_MODULE,
60790 .open = poch_open,
60791 .release = poch_release,
60792diff --git a/drivers/staging/pohmelfs/inode.c b/drivers/staging/pohmelfs/inode.c
60793index c94de31..19402bc 100644
60794--- a/drivers/staging/pohmelfs/inode.c
60795+++ b/drivers/staging/pohmelfs/inode.c
60796@@ -1850,7 +1850,7 @@ static int pohmelfs_fill_super(struct super_block *sb, void *data, int silent)
60797 mutex_init(&psb->mcache_lock);
60798 psb->mcache_root = RB_ROOT;
60799 psb->mcache_timeout = msecs_to_jiffies(5000);
60800- atomic_long_set(&psb->mcache_gen, 0);
60801+ atomic_long_set_unchecked(&psb->mcache_gen, 0);
60802
60803 psb->trans_max_pages = 100;
60804
60805@@ -1865,7 +1865,7 @@ static int pohmelfs_fill_super(struct super_block *sb, void *data, int silent)
60806 INIT_LIST_HEAD(&psb->crypto_ready_list);
60807 INIT_LIST_HEAD(&psb->crypto_active_list);
60808
60809- atomic_set(&psb->trans_gen, 1);
60810+ atomic_set_unchecked(&psb->trans_gen, 1);
60811 atomic_long_set(&psb->total_inodes, 0);
60812
60813 mutex_init(&psb->state_lock);
60814diff --git a/drivers/staging/pohmelfs/mcache.c b/drivers/staging/pohmelfs/mcache.c
60815index e22665c..a2a9390 100644
60816--- a/drivers/staging/pohmelfs/mcache.c
60817+++ b/drivers/staging/pohmelfs/mcache.c
60818@@ -121,7 +121,7 @@ struct pohmelfs_mcache *pohmelfs_mcache_alloc(struct pohmelfs_sb *psb, u64 start
60819 m->data = data;
60820 m->start = start;
60821 m->size = size;
60822- m->gen = atomic_long_inc_return(&psb->mcache_gen);
60823+ m->gen = atomic_long_inc_return_unchecked(&psb->mcache_gen);
60824
60825 mutex_lock(&psb->mcache_lock);
60826 err = pohmelfs_mcache_insert(psb, m);
60827diff --git a/drivers/staging/pohmelfs/netfs.h b/drivers/staging/pohmelfs/netfs.h
60828index 623a07d..4035c19 100644
60829--- a/drivers/staging/pohmelfs/netfs.h
60830+++ b/drivers/staging/pohmelfs/netfs.h
60831@@ -570,14 +570,14 @@ struct pohmelfs_config;
60832 struct pohmelfs_sb {
60833 struct rb_root mcache_root;
60834 struct mutex mcache_lock;
60835- atomic_long_t mcache_gen;
60836+ atomic_long_unchecked_t mcache_gen;
60837 unsigned long mcache_timeout;
60838
60839 unsigned int idx;
60840
60841 unsigned int trans_retries;
60842
60843- atomic_t trans_gen;
60844+ atomic_unchecked_t trans_gen;
60845
60846 unsigned int crypto_attached_size;
60847 unsigned int crypto_align_size;
60848diff --git a/drivers/staging/pohmelfs/trans.c b/drivers/staging/pohmelfs/trans.c
60849index 36a2535..0591bf4 100644
60850--- a/drivers/staging/pohmelfs/trans.c
60851+++ b/drivers/staging/pohmelfs/trans.c
60852@@ -492,7 +492,7 @@ int netfs_trans_finish(struct netfs_trans *t, struct pohmelfs_sb *psb)
60853 int err;
60854 struct netfs_cmd *cmd = t->iovec.iov_base;
60855
60856- t->gen = atomic_inc_return(&psb->trans_gen);
60857+ t->gen = atomic_inc_return_unchecked(&psb->trans_gen);
60858
60859 cmd->size = t->iovec.iov_len - sizeof(struct netfs_cmd) +
60860 t->attached_size + t->attached_pages * sizeof(struct netfs_cmd);
60861diff --git a/drivers/staging/sep/sep_driver.c b/drivers/staging/sep/sep_driver.c
60862index f890a16..509ece8 100644
60863--- a/drivers/staging/sep/sep_driver.c
60864+++ b/drivers/staging/sep/sep_driver.c
60865@@ -2603,7 +2603,7 @@ static struct pci_driver sep_pci_driver = {
60866 static dev_t sep_devno;
60867
60868 /* the files operations structure of the driver */
60869-static struct file_operations sep_file_operations = {
60870+static const struct file_operations sep_file_operations = {
60871 .owner = THIS_MODULE,
60872 .ioctl = sep_ioctl,
60873 .poll = sep_poll,
60874diff --git a/drivers/staging/usbip/usbip_common.h b/drivers/staging/usbip/usbip_common.h
60875index 5e16bc3..7655b10 100644
60876--- a/drivers/staging/usbip/usbip_common.h
60877+++ b/drivers/staging/usbip/usbip_common.h
60878@@ -374,7 +374,7 @@ struct usbip_device {
60879 void (*shutdown)(struct usbip_device *);
60880 void (*reset)(struct usbip_device *);
60881 void (*unusable)(struct usbip_device *);
60882- } eh_ops;
60883+ } __no_const eh_ops;
60884 };
60885
60886
60887diff --git a/drivers/staging/usbip/vhci.h b/drivers/staging/usbip/vhci.h
60888index 57f7946..d9df23d 100644
60889--- a/drivers/staging/usbip/vhci.h
60890+++ b/drivers/staging/usbip/vhci.h
60891@@ -92,7 +92,7 @@ struct vhci_hcd {
60892 unsigned resuming:1;
60893 unsigned long re_timeout;
60894
60895- atomic_t seqnum;
60896+ atomic_unchecked_t seqnum;
60897
60898 /*
60899 * NOTE:
60900diff --git a/drivers/staging/usbip/vhci_hcd.c b/drivers/staging/usbip/vhci_hcd.c
60901index 20cd7db..c2693ff 100644
60902--- a/drivers/staging/usbip/vhci_hcd.c
60903+++ b/drivers/staging/usbip/vhci_hcd.c
60904@@ -534,7 +534,7 @@ static void vhci_tx_urb(struct urb *urb)
60905 return;
60906 }
60907
60908- priv->seqnum = atomic_inc_return(&the_controller->seqnum);
60909+ priv->seqnum = atomic_inc_return_unchecked(&the_controller->seqnum);
60910 if (priv->seqnum == 0xffff)
60911 usbip_uinfo("seqnum max\n");
60912
60913@@ -793,7 +793,7 @@ static int vhci_urb_dequeue(struct usb_hcd *hcd, struct urb *urb, int status)
60914 return -ENOMEM;
60915 }
60916
60917- unlink->seqnum = atomic_inc_return(&the_controller->seqnum);
60918+ unlink->seqnum = atomic_inc_return_unchecked(&the_controller->seqnum);
60919 if (unlink->seqnum == 0xffff)
60920 usbip_uinfo("seqnum max\n");
60921
60922@@ -988,7 +988,7 @@ static int vhci_start(struct usb_hcd *hcd)
60923 vdev->rhport = rhport;
60924 }
60925
60926- atomic_set(&vhci->seqnum, 0);
60927+ atomic_set_unchecked(&vhci->seqnum, 0);
60928 spin_lock_init(&vhci->lock);
60929
60930
60931diff --git a/drivers/staging/usbip/vhci_rx.c b/drivers/staging/usbip/vhci_rx.c
60932index 7fd76fe..673695a 100644
60933--- a/drivers/staging/usbip/vhci_rx.c
60934+++ b/drivers/staging/usbip/vhci_rx.c
60935@@ -79,7 +79,7 @@ static void vhci_recv_ret_submit(struct vhci_device *vdev,
60936 usbip_uerr("cannot find a urb of seqnum %u\n",
60937 pdu->base.seqnum);
60938 usbip_uinfo("max seqnum %d\n",
60939- atomic_read(&the_controller->seqnum));
60940+ atomic_read_unchecked(&the_controller->seqnum));
60941 usbip_event_add(ud, VDEV_EVENT_ERROR_TCP);
60942 return;
60943 }
60944diff --git a/drivers/staging/vme/devices/vme_user.c b/drivers/staging/vme/devices/vme_user.c
60945index 7891288..8e31300 100644
60946--- a/drivers/staging/vme/devices/vme_user.c
60947+++ b/drivers/staging/vme/devices/vme_user.c
60948@@ -136,7 +136,7 @@ static int vme_user_ioctl(struct inode *, struct file *, unsigned int,
60949 static int __init vme_user_probe(struct device *, int, int);
60950 static int __exit vme_user_remove(struct device *, int, int);
60951
60952-static struct file_operations vme_user_fops = {
60953+static const struct file_operations vme_user_fops = {
60954 .open = vme_user_open,
60955 .release = vme_user_release,
60956 .read = vme_user_read,
60957diff --git a/drivers/staging/vt6655/hostap.c b/drivers/staging/vt6655/hostap.c
60958index 58abf44..00c1fc8 100644
60959--- a/drivers/staging/vt6655/hostap.c
60960+++ b/drivers/staging/vt6655/hostap.c
60961@@ -84,7 +84,7 @@ static int hostap_enable_hostapd(PSDevice pDevice, int rtnl_locked)
60962 PSDevice apdev_priv;
60963 struct net_device *dev = pDevice->dev;
60964 int ret;
60965- const struct net_device_ops apdev_netdev_ops = {
60966+ net_device_ops_no_const apdev_netdev_ops = {
60967 .ndo_start_xmit = pDevice->tx_80211,
60968 };
60969
60970diff --git a/drivers/staging/vt6656/hostap.c b/drivers/staging/vt6656/hostap.c
60971index 0c8267a..db1f363 100644
60972--- a/drivers/staging/vt6656/hostap.c
60973+++ b/drivers/staging/vt6656/hostap.c
60974@@ -86,7 +86,7 @@ static int hostap_enable_hostapd(PSDevice pDevice, int rtnl_locked)
60975 PSDevice apdev_priv;
60976 struct net_device *dev = pDevice->dev;
60977 int ret;
60978- const struct net_device_ops apdev_netdev_ops = {
60979+ net_device_ops_no_const apdev_netdev_ops = {
60980 .ndo_start_xmit = pDevice->tx_80211,
60981 };
60982
60983diff --git a/drivers/staging/wlan-ng/hfa384x_usb.c b/drivers/staging/wlan-ng/hfa384x_usb.c
60984index 925678b..da7f5ed 100644
60985--- a/drivers/staging/wlan-ng/hfa384x_usb.c
60986+++ b/drivers/staging/wlan-ng/hfa384x_usb.c
60987@@ -205,7 +205,7 @@ static void unlocked_usbctlx_complete(hfa384x_t *hw, hfa384x_usbctlx_t *ctlx);
60988
60989 struct usbctlx_completor {
60990 int (*complete) (struct usbctlx_completor *);
60991-};
60992+} __no_const;
60993 typedef struct usbctlx_completor usbctlx_completor_t;
60994
60995 static int
60996diff --git a/drivers/telephony/ixj.c b/drivers/telephony/ixj.c
60997index 40de151..924f268 100644
60998--- a/drivers/telephony/ixj.c
60999+++ b/drivers/telephony/ixj.c
61000@@ -4976,6 +4976,8 @@ static int ixj_daa_cid_read(IXJ *j)
61001 bool mContinue;
61002 char *pIn, *pOut;
61003
61004+ pax_track_stack();
61005+
61006 if (!SCI_Prepare(j))
61007 return 0;
61008
61009diff --git a/drivers/uio/uio.c b/drivers/uio/uio.c
61010index e941367..b631f5a 100644
61011--- a/drivers/uio/uio.c
61012+++ b/drivers/uio/uio.c
61013@@ -23,6 +23,7 @@
61014 #include <linux/string.h>
61015 #include <linux/kobject.h>
61016 #include <linux/uio_driver.h>
61017+#include <asm/local.h>
61018
61019 #define UIO_MAX_DEVICES 255
61020
61021@@ -30,10 +31,10 @@ struct uio_device {
61022 struct module *owner;
61023 struct device *dev;
61024 int minor;
61025- atomic_t event;
61026+ atomic_unchecked_t event;
61027 struct fasync_struct *async_queue;
61028 wait_queue_head_t wait;
61029- int vma_count;
61030+ local_t vma_count;
61031 struct uio_info *info;
61032 struct kobject *map_dir;
61033 struct kobject *portio_dir;
61034@@ -129,7 +130,7 @@ static ssize_t map_type_show(struct kobject *kobj, struct attribute *attr,
61035 return entry->show(mem, buf);
61036 }
61037
61038-static struct sysfs_ops map_sysfs_ops = {
61039+static const struct sysfs_ops map_sysfs_ops = {
61040 .show = map_type_show,
61041 };
61042
61043@@ -217,7 +218,7 @@ static ssize_t portio_type_show(struct kobject *kobj, struct attribute *attr,
61044 return entry->show(port, buf);
61045 }
61046
61047-static struct sysfs_ops portio_sysfs_ops = {
61048+static const struct sysfs_ops portio_sysfs_ops = {
61049 .show = portio_type_show,
61050 };
61051
61052@@ -255,7 +256,7 @@ static ssize_t show_event(struct device *dev,
61053 struct uio_device *idev = dev_get_drvdata(dev);
61054 if (idev)
61055 return sprintf(buf, "%u\n",
61056- (unsigned int)atomic_read(&idev->event));
61057+ (unsigned int)atomic_read_unchecked(&idev->event));
61058 else
61059 return -ENODEV;
61060 }
61061@@ -424,7 +425,7 @@ void uio_event_notify(struct uio_info *info)
61062 {
61063 struct uio_device *idev = info->uio_dev;
61064
61065- atomic_inc(&idev->event);
61066+ atomic_inc_unchecked(&idev->event);
61067 wake_up_interruptible(&idev->wait);
61068 kill_fasync(&idev->async_queue, SIGIO, POLL_IN);
61069 }
61070@@ -477,7 +478,7 @@ static int uio_open(struct inode *inode, struct file *filep)
61071 }
61072
61073 listener->dev = idev;
61074- listener->event_count = atomic_read(&idev->event);
61075+ listener->event_count = atomic_read_unchecked(&idev->event);
61076 filep->private_data = listener;
61077
61078 if (idev->info->open) {
61079@@ -528,7 +529,7 @@ static unsigned int uio_poll(struct file *filep, poll_table *wait)
61080 return -EIO;
61081
61082 poll_wait(filep, &idev->wait, wait);
61083- if (listener->event_count != atomic_read(&idev->event))
61084+ if (listener->event_count != atomic_read_unchecked(&idev->event))
61085 return POLLIN | POLLRDNORM;
61086 return 0;
61087 }
61088@@ -553,7 +554,7 @@ static ssize_t uio_read(struct file *filep, char __user *buf,
61089 do {
61090 set_current_state(TASK_INTERRUPTIBLE);
61091
61092- event_count = atomic_read(&idev->event);
61093+ event_count = atomic_read_unchecked(&idev->event);
61094 if (event_count != listener->event_count) {
61095 if (copy_to_user(buf, &event_count, count))
61096 retval = -EFAULT;
61097@@ -624,13 +625,13 @@ static int uio_find_mem_index(struct vm_area_struct *vma)
61098 static void uio_vma_open(struct vm_area_struct *vma)
61099 {
61100 struct uio_device *idev = vma->vm_private_data;
61101- idev->vma_count++;
61102+ local_inc(&idev->vma_count);
61103 }
61104
61105 static void uio_vma_close(struct vm_area_struct *vma)
61106 {
61107 struct uio_device *idev = vma->vm_private_data;
61108- idev->vma_count--;
61109+ local_dec(&idev->vma_count);
61110 }
61111
61112 static int uio_vma_fault(struct vm_area_struct *vma, struct vm_fault *vmf)
61113@@ -840,7 +841,7 @@ int __uio_register_device(struct module *owner,
61114 idev->owner = owner;
61115 idev->info = info;
61116 init_waitqueue_head(&idev->wait);
61117- atomic_set(&idev->event, 0);
61118+ atomic_set_unchecked(&idev->event, 0);
61119
61120 ret = uio_get_minor(idev);
61121 if (ret)
61122diff --git a/drivers/usb/atm/usbatm.c b/drivers/usb/atm/usbatm.c
61123index fbea856..06efea6 100644
61124--- a/drivers/usb/atm/usbatm.c
61125+++ b/drivers/usb/atm/usbatm.c
61126@@ -333,7 +333,7 @@ static void usbatm_extract_one_cell(struct usbatm_data *instance, unsigned char
61127 if (printk_ratelimit())
61128 atm_warn(instance, "%s: OAM not supported (vpi %d, vci %d)!\n",
61129 __func__, vpi, vci);
61130- atomic_inc(&vcc->stats->rx_err);
61131+ atomic_inc_unchecked(&vcc->stats->rx_err);
61132 return;
61133 }
61134
61135@@ -361,7 +361,7 @@ static void usbatm_extract_one_cell(struct usbatm_data *instance, unsigned char
61136 if (length > ATM_MAX_AAL5_PDU) {
61137 atm_rldbg(instance, "%s: bogus length %u (vcc: 0x%p)!\n",
61138 __func__, length, vcc);
61139- atomic_inc(&vcc->stats->rx_err);
61140+ atomic_inc_unchecked(&vcc->stats->rx_err);
61141 goto out;
61142 }
61143
61144@@ -370,14 +370,14 @@ static void usbatm_extract_one_cell(struct usbatm_data *instance, unsigned char
61145 if (sarb->len < pdu_length) {
61146 atm_rldbg(instance, "%s: bogus pdu_length %u (sarb->len: %u, vcc: 0x%p)!\n",
61147 __func__, pdu_length, sarb->len, vcc);
61148- atomic_inc(&vcc->stats->rx_err);
61149+ atomic_inc_unchecked(&vcc->stats->rx_err);
61150 goto out;
61151 }
61152
61153 if (crc32_be(~0, skb_tail_pointer(sarb) - pdu_length, pdu_length) != 0xc704dd7b) {
61154 atm_rldbg(instance, "%s: packet failed crc check (vcc: 0x%p)!\n",
61155 __func__, vcc);
61156- atomic_inc(&vcc->stats->rx_err);
61157+ atomic_inc_unchecked(&vcc->stats->rx_err);
61158 goto out;
61159 }
61160
61161@@ -387,7 +387,7 @@ static void usbatm_extract_one_cell(struct usbatm_data *instance, unsigned char
61162 if (printk_ratelimit())
61163 atm_err(instance, "%s: no memory for skb (length: %u)!\n",
61164 __func__, length);
61165- atomic_inc(&vcc->stats->rx_drop);
61166+ atomic_inc_unchecked(&vcc->stats->rx_drop);
61167 goto out;
61168 }
61169
61170@@ -412,7 +412,7 @@ static void usbatm_extract_one_cell(struct usbatm_data *instance, unsigned char
61171
61172 vcc->push(vcc, skb);
61173
61174- atomic_inc(&vcc->stats->rx);
61175+ atomic_inc_unchecked(&vcc->stats->rx);
61176 out:
61177 skb_trim(sarb, 0);
61178 }
61179@@ -616,7 +616,7 @@ static void usbatm_tx_process(unsigned long data)
61180 struct atm_vcc *vcc = UDSL_SKB(skb)->atm.vcc;
61181
61182 usbatm_pop(vcc, skb);
61183- atomic_inc(&vcc->stats->tx);
61184+ atomic_inc_unchecked(&vcc->stats->tx);
61185
61186 skb = skb_dequeue(&instance->sndqueue);
61187 }
61188@@ -775,11 +775,11 @@ static int usbatm_atm_proc_read(struct atm_dev *atm_dev, loff_t * pos, char *pag
61189 if (!left--)
61190 return sprintf(page,
61191 "AAL5: tx %d ( %d err ), rx %d ( %d err, %d drop )\n",
61192- atomic_read(&atm_dev->stats.aal5.tx),
61193- atomic_read(&atm_dev->stats.aal5.tx_err),
61194- atomic_read(&atm_dev->stats.aal5.rx),
61195- atomic_read(&atm_dev->stats.aal5.rx_err),
61196- atomic_read(&atm_dev->stats.aal5.rx_drop));
61197+ atomic_read_unchecked(&atm_dev->stats.aal5.tx),
61198+ atomic_read_unchecked(&atm_dev->stats.aal5.tx_err),
61199+ atomic_read_unchecked(&atm_dev->stats.aal5.rx),
61200+ atomic_read_unchecked(&atm_dev->stats.aal5.rx_err),
61201+ atomic_read_unchecked(&atm_dev->stats.aal5.rx_drop));
61202
61203 if (!left--) {
61204 if (instance->disconnected)
61205diff --git a/drivers/usb/core/hcd.c b/drivers/usb/core/hcd.c
61206index 24e6205..fe5a5d4 100644
61207--- a/drivers/usb/core/hcd.c
61208+++ b/drivers/usb/core/hcd.c
61209@@ -2216,7 +2216,7 @@ EXPORT_SYMBOL_GPL(usb_hcd_platform_shutdown);
61210
61211 #if defined(CONFIG_USB_MON) || defined(CONFIG_USB_MON_MODULE)
61212
61213-struct usb_mon_operations *mon_ops;
61214+const struct usb_mon_operations *mon_ops;
61215
61216 /*
61217 * The registration is unlocked.
61218@@ -2226,7 +2226,7 @@ struct usb_mon_operations *mon_ops;
61219 * symbols from usbcore, usbcore gets referenced and cannot be unloaded first.
61220 */
61221
61222-int usb_mon_register (struct usb_mon_operations *ops)
61223+int usb_mon_register (const struct usb_mon_operations *ops)
61224 {
61225
61226 if (mon_ops)
61227diff --git a/drivers/usb/core/hcd.h b/drivers/usb/core/hcd.h
61228index bcbe104..9cfd1c6 100644
61229--- a/drivers/usb/core/hcd.h
61230+++ b/drivers/usb/core/hcd.h
61231@@ -486,13 +486,13 @@ static inline void usbfs_cleanup(void) { }
61232 #if defined(CONFIG_USB_MON) || defined(CONFIG_USB_MON_MODULE)
61233
61234 struct usb_mon_operations {
61235- void (*urb_submit)(struct usb_bus *bus, struct urb *urb);
61236- void (*urb_submit_error)(struct usb_bus *bus, struct urb *urb, int err);
61237- void (*urb_complete)(struct usb_bus *bus, struct urb *urb, int status);
61238+ void (* const urb_submit)(struct usb_bus *bus, struct urb *urb);
61239+ void (* const urb_submit_error)(struct usb_bus *bus, struct urb *urb, int err);
61240+ void (* const urb_complete)(struct usb_bus *bus, struct urb *urb, int status);
61241 /* void (*urb_unlink)(struct usb_bus *bus, struct urb *urb); */
61242 };
61243
61244-extern struct usb_mon_operations *mon_ops;
61245+extern const struct usb_mon_operations *mon_ops;
61246
61247 static inline void usbmon_urb_submit(struct usb_bus *bus, struct urb *urb)
61248 {
61249@@ -514,7 +514,7 @@ static inline void usbmon_urb_complete(struct usb_bus *bus, struct urb *urb,
61250 (*mon_ops->urb_complete)(bus, urb, status);
61251 }
61252
61253-int usb_mon_register(struct usb_mon_operations *ops);
61254+int usb_mon_register(const struct usb_mon_operations *ops);
61255 void usb_mon_deregister(void);
61256
61257 #else
61258diff --git a/drivers/usb/misc/appledisplay.c b/drivers/usb/misc/appledisplay.c
61259index 62ff5e7..530b74e 100644
61260--- a/drivers/usb/misc/appledisplay.c
61261+++ b/drivers/usb/misc/appledisplay.c
61262@@ -178,7 +178,7 @@ static int appledisplay_bl_get_brightness(struct backlight_device *bd)
61263 return pdata->msgdata[1];
61264 }
61265
61266-static struct backlight_ops appledisplay_bl_data = {
61267+static const struct backlight_ops appledisplay_bl_data = {
61268 .get_brightness = appledisplay_bl_get_brightness,
61269 .update_status = appledisplay_bl_update_status,
61270 };
61271diff --git a/drivers/usb/mon/mon_main.c b/drivers/usb/mon/mon_main.c
61272index e0c2db3..bd8cb66 100644
61273--- a/drivers/usb/mon/mon_main.c
61274+++ b/drivers/usb/mon/mon_main.c
61275@@ -238,7 +238,7 @@ static struct notifier_block mon_nb = {
61276 /*
61277 * Ops
61278 */
61279-static struct usb_mon_operations mon_ops_0 = {
61280+static const struct usb_mon_operations mon_ops_0 = {
61281 .urb_submit = mon_submit,
61282 .urb_submit_error = mon_submit_error,
61283 .urb_complete = mon_complete,
61284diff --git a/drivers/usb/wusbcore/wa-hc.h b/drivers/usb/wusbcore/wa-hc.h
61285index d6bea3e..60b250e 100644
61286--- a/drivers/usb/wusbcore/wa-hc.h
61287+++ b/drivers/usb/wusbcore/wa-hc.h
61288@@ -192,7 +192,7 @@ struct wahc {
61289 struct list_head xfer_delayed_list;
61290 spinlock_t xfer_list_lock;
61291 struct work_struct xfer_work;
61292- atomic_t xfer_id_count;
61293+ atomic_unchecked_t xfer_id_count;
61294 };
61295
61296
61297@@ -246,7 +246,7 @@ static inline void wa_init(struct wahc *wa)
61298 INIT_LIST_HEAD(&wa->xfer_delayed_list);
61299 spin_lock_init(&wa->xfer_list_lock);
61300 INIT_WORK(&wa->xfer_work, wa_urb_enqueue_run);
61301- atomic_set(&wa->xfer_id_count, 1);
61302+ atomic_set_unchecked(&wa->xfer_id_count, 1);
61303 }
61304
61305 /**
61306diff --git a/drivers/usb/wusbcore/wa-xfer.c b/drivers/usb/wusbcore/wa-xfer.c
61307index 613a5fc..3174865 100644
61308--- a/drivers/usb/wusbcore/wa-xfer.c
61309+++ b/drivers/usb/wusbcore/wa-xfer.c
61310@@ -293,7 +293,7 @@ out:
61311 */
61312 static void wa_xfer_id_init(struct wa_xfer *xfer)
61313 {
61314- xfer->id = atomic_add_return(1, &xfer->wa->xfer_id_count);
61315+ xfer->id = atomic_add_return_unchecked(1, &xfer->wa->xfer_id_count);
61316 }
61317
61318 /*
61319diff --git a/drivers/uwb/wlp/messages.c b/drivers/uwb/wlp/messages.c
61320index aa42fce..f8a828c 100644
61321--- a/drivers/uwb/wlp/messages.c
61322+++ b/drivers/uwb/wlp/messages.c
61323@@ -903,7 +903,7 @@ int wlp_parse_f0(struct wlp *wlp, struct sk_buff *skb)
61324 size_t len = skb->len;
61325 size_t used;
61326 ssize_t result;
61327- struct wlp_nonce enonce, rnonce;
61328+ struct wlp_nonce enonce = {{0}}, rnonce = {{0}};
61329 enum wlp_assc_error assc_err;
61330 char enonce_buf[WLP_WSS_NONCE_STRSIZE];
61331 char rnonce_buf[WLP_WSS_NONCE_STRSIZE];
61332diff --git a/drivers/uwb/wlp/sysfs.c b/drivers/uwb/wlp/sysfs.c
61333index 0370399..6627c94 100644
61334--- a/drivers/uwb/wlp/sysfs.c
61335+++ b/drivers/uwb/wlp/sysfs.c
61336@@ -615,8 +615,7 @@ ssize_t wlp_wss_attr_store(struct kobject *kobj, struct attribute *attr,
61337 return ret;
61338 }
61339
61340-static
61341-struct sysfs_ops wss_sysfs_ops = {
61342+static const struct sysfs_ops wss_sysfs_ops = {
61343 .show = wlp_wss_attr_show,
61344 .store = wlp_wss_attr_store,
61345 };
61346diff --git a/drivers/video/atmel_lcdfb.c b/drivers/video/atmel_lcdfb.c
61347index 8c5e432..5ee90ea 100644
61348--- a/drivers/video/atmel_lcdfb.c
61349+++ b/drivers/video/atmel_lcdfb.c
61350@@ -110,7 +110,7 @@ static int atmel_bl_get_brightness(struct backlight_device *bl)
61351 return lcdc_readl(sinfo, ATMEL_LCDC_CONTRAST_VAL);
61352 }
61353
61354-static struct backlight_ops atmel_lcdc_bl_ops = {
61355+static const struct backlight_ops atmel_lcdc_bl_ops = {
61356 .update_status = atmel_bl_update_status,
61357 .get_brightness = atmel_bl_get_brightness,
61358 };
61359diff --git a/drivers/video/aty/aty128fb.c b/drivers/video/aty/aty128fb.c
61360index e4e4d43..66bcbcc 100644
61361--- a/drivers/video/aty/aty128fb.c
61362+++ b/drivers/video/aty/aty128fb.c
61363@@ -149,7 +149,7 @@ enum {
61364 };
61365
61366 /* Must match above enum */
61367-static const char *r128_family[] __devinitdata = {
61368+static const char *r128_family[] __devinitconst = {
61369 "AGP",
61370 "PCI",
61371 "PRO AGP",
61372@@ -1787,7 +1787,7 @@ static int aty128_bl_get_brightness(struct backlight_device *bd)
61373 return bd->props.brightness;
61374 }
61375
61376-static struct backlight_ops aty128_bl_data = {
61377+static const struct backlight_ops aty128_bl_data = {
61378 .get_brightness = aty128_bl_get_brightness,
61379 .update_status = aty128_bl_update_status,
61380 };
61381diff --git a/drivers/video/aty/atyfb_base.c b/drivers/video/aty/atyfb_base.c
61382index 913b4a4..9295a38 100644
61383--- a/drivers/video/aty/atyfb_base.c
61384+++ b/drivers/video/aty/atyfb_base.c
61385@@ -2225,7 +2225,7 @@ static int aty_bl_get_brightness(struct backlight_device *bd)
61386 return bd->props.brightness;
61387 }
61388
61389-static struct backlight_ops aty_bl_data = {
61390+static const struct backlight_ops aty_bl_data = {
61391 .get_brightness = aty_bl_get_brightness,
61392 .update_status = aty_bl_update_status,
61393 };
61394diff --git a/drivers/video/aty/radeon_backlight.c b/drivers/video/aty/radeon_backlight.c
61395index 1a056ad..221bd6a 100644
61396--- a/drivers/video/aty/radeon_backlight.c
61397+++ b/drivers/video/aty/radeon_backlight.c
61398@@ -127,7 +127,7 @@ static int radeon_bl_get_brightness(struct backlight_device *bd)
61399 return bd->props.brightness;
61400 }
61401
61402-static struct backlight_ops radeon_bl_data = {
61403+static const struct backlight_ops radeon_bl_data = {
61404 .get_brightness = radeon_bl_get_brightness,
61405 .update_status = radeon_bl_update_status,
61406 };
61407diff --git a/drivers/video/backlight/adp5520_bl.c b/drivers/video/backlight/adp5520_bl.c
61408index ad05da5..3cb2cb9 100644
61409--- a/drivers/video/backlight/adp5520_bl.c
61410+++ b/drivers/video/backlight/adp5520_bl.c
61411@@ -84,7 +84,7 @@ static int adp5520_bl_get_brightness(struct backlight_device *bl)
61412 return error ? data->current_brightness : reg_val;
61413 }
61414
61415-static struct backlight_ops adp5520_bl_ops = {
61416+static const struct backlight_ops adp5520_bl_ops = {
61417 .update_status = adp5520_bl_update_status,
61418 .get_brightness = adp5520_bl_get_brightness,
61419 };
61420diff --git a/drivers/video/backlight/adx_bl.c b/drivers/video/backlight/adx_bl.c
61421index 2c3bdfc..d769b0b 100644
61422--- a/drivers/video/backlight/adx_bl.c
61423+++ b/drivers/video/backlight/adx_bl.c
61424@@ -61,7 +61,7 @@ static int adx_backlight_check_fb(struct fb_info *fb)
61425 return 1;
61426 }
61427
61428-static struct backlight_ops adx_backlight_ops = {
61429+static const struct backlight_ops adx_backlight_ops = {
61430 .options = 0,
61431 .update_status = adx_backlight_update_status,
61432 .get_brightness = adx_backlight_get_brightness,
61433diff --git a/drivers/video/backlight/atmel-pwm-bl.c b/drivers/video/backlight/atmel-pwm-bl.c
61434index 505c082..6b6b3cc 100644
61435--- a/drivers/video/backlight/atmel-pwm-bl.c
61436+++ b/drivers/video/backlight/atmel-pwm-bl.c
61437@@ -113,7 +113,7 @@ static int atmel_pwm_bl_init_pwm(struct atmel_pwm_bl *pwmbl)
61438 return pwm_channel_enable(&pwmbl->pwmc);
61439 }
61440
61441-static struct backlight_ops atmel_pwm_bl_ops = {
61442+static const struct backlight_ops atmel_pwm_bl_ops = {
61443 .get_brightness = atmel_pwm_bl_get_intensity,
61444 .update_status = atmel_pwm_bl_set_intensity,
61445 };
61446diff --git a/drivers/video/backlight/backlight.c b/drivers/video/backlight/backlight.c
61447index 5e20e6e..89025e6 100644
61448--- a/drivers/video/backlight/backlight.c
61449+++ b/drivers/video/backlight/backlight.c
61450@@ -269,7 +269,7 @@ EXPORT_SYMBOL(backlight_force_update);
61451 * ERR_PTR() or a pointer to the newly allocated device.
61452 */
61453 struct backlight_device *backlight_device_register(const char *name,
61454- struct device *parent, void *devdata, struct backlight_ops *ops)
61455+ struct device *parent, void *devdata, const struct backlight_ops *ops)
61456 {
61457 struct backlight_device *new_bd;
61458 int rc;
61459diff --git a/drivers/video/backlight/corgi_lcd.c b/drivers/video/backlight/corgi_lcd.c
61460index 9677494..b4bcf80 100644
61461--- a/drivers/video/backlight/corgi_lcd.c
61462+++ b/drivers/video/backlight/corgi_lcd.c
61463@@ -451,7 +451,7 @@ void corgi_lcd_limit_intensity(int limit)
61464 }
61465 EXPORT_SYMBOL(corgi_lcd_limit_intensity);
61466
61467-static struct backlight_ops corgi_bl_ops = {
61468+static const struct backlight_ops corgi_bl_ops = {
61469 .get_brightness = corgi_bl_get_intensity,
61470 .update_status = corgi_bl_update_status,
61471 };
61472diff --git a/drivers/video/backlight/cr_bllcd.c b/drivers/video/backlight/cr_bllcd.c
61473index b9fe62b..2914bf1 100644
61474--- a/drivers/video/backlight/cr_bllcd.c
61475+++ b/drivers/video/backlight/cr_bllcd.c
61476@@ -108,7 +108,7 @@ static int cr_backlight_get_intensity(struct backlight_device *bd)
61477 return intensity;
61478 }
61479
61480-static struct backlight_ops cr_backlight_ops = {
61481+static const struct backlight_ops cr_backlight_ops = {
61482 .get_brightness = cr_backlight_get_intensity,
61483 .update_status = cr_backlight_set_intensity,
61484 };
61485diff --git a/drivers/video/backlight/da903x_bl.c b/drivers/video/backlight/da903x_bl.c
61486index 701a108..feacfd5 100644
61487--- a/drivers/video/backlight/da903x_bl.c
61488+++ b/drivers/video/backlight/da903x_bl.c
61489@@ -94,7 +94,7 @@ static int da903x_backlight_get_brightness(struct backlight_device *bl)
61490 return data->current_brightness;
61491 }
61492
61493-static struct backlight_ops da903x_backlight_ops = {
61494+static const struct backlight_ops da903x_backlight_ops = {
61495 .update_status = da903x_backlight_update_status,
61496 .get_brightness = da903x_backlight_get_brightness,
61497 };
61498diff --git a/drivers/video/backlight/generic_bl.c b/drivers/video/backlight/generic_bl.c
61499index 6d27f62..e6d348e 100644
61500--- a/drivers/video/backlight/generic_bl.c
61501+++ b/drivers/video/backlight/generic_bl.c
61502@@ -70,7 +70,7 @@ void corgibl_limit_intensity(int limit)
61503 }
61504 EXPORT_SYMBOL(corgibl_limit_intensity);
61505
61506-static struct backlight_ops genericbl_ops = {
61507+static const struct backlight_ops genericbl_ops = {
61508 .options = BL_CORE_SUSPENDRESUME,
61509 .get_brightness = genericbl_get_intensity,
61510 .update_status = genericbl_send_intensity,
61511diff --git a/drivers/video/backlight/hp680_bl.c b/drivers/video/backlight/hp680_bl.c
61512index 7fb4eef..f7cc528 100644
61513--- a/drivers/video/backlight/hp680_bl.c
61514+++ b/drivers/video/backlight/hp680_bl.c
61515@@ -98,7 +98,7 @@ static int hp680bl_get_intensity(struct backlight_device *bd)
61516 return current_intensity;
61517 }
61518
61519-static struct backlight_ops hp680bl_ops = {
61520+static const struct backlight_ops hp680bl_ops = {
61521 .get_brightness = hp680bl_get_intensity,
61522 .update_status = hp680bl_set_intensity,
61523 };
61524diff --git a/drivers/video/backlight/jornada720_bl.c b/drivers/video/backlight/jornada720_bl.c
61525index 7aed256..db9071f 100644
61526--- a/drivers/video/backlight/jornada720_bl.c
61527+++ b/drivers/video/backlight/jornada720_bl.c
61528@@ -93,7 +93,7 @@ out:
61529 return ret;
61530 }
61531
61532-static struct backlight_ops jornada_bl_ops = {
61533+static const struct backlight_ops jornada_bl_ops = {
61534 .get_brightness = jornada_bl_get_brightness,
61535 .update_status = jornada_bl_update_status,
61536 .options = BL_CORE_SUSPENDRESUME,
61537diff --git a/drivers/video/backlight/kb3886_bl.c b/drivers/video/backlight/kb3886_bl.c
61538index a38fda1..939e7b8 100644
61539--- a/drivers/video/backlight/kb3886_bl.c
61540+++ b/drivers/video/backlight/kb3886_bl.c
61541@@ -134,7 +134,7 @@ static int kb3886bl_get_intensity(struct backlight_device *bd)
61542 return kb3886bl_intensity;
61543 }
61544
61545-static struct backlight_ops kb3886bl_ops = {
61546+static const struct backlight_ops kb3886bl_ops = {
61547 .get_brightness = kb3886bl_get_intensity,
61548 .update_status = kb3886bl_send_intensity,
61549 };
61550diff --git a/drivers/video/backlight/locomolcd.c b/drivers/video/backlight/locomolcd.c
61551index 6b488b8..00a9591 100644
61552--- a/drivers/video/backlight/locomolcd.c
61553+++ b/drivers/video/backlight/locomolcd.c
61554@@ -141,7 +141,7 @@ static int locomolcd_get_intensity(struct backlight_device *bd)
61555 return current_intensity;
61556 }
61557
61558-static struct backlight_ops locomobl_data = {
61559+static const struct backlight_ops locomobl_data = {
61560 .get_brightness = locomolcd_get_intensity,
61561 .update_status = locomolcd_set_intensity,
61562 };
61563diff --git a/drivers/video/backlight/mbp_nvidia_bl.c b/drivers/video/backlight/mbp_nvidia_bl.c
61564index 99bdfa8..3dac448 100644
61565--- a/drivers/video/backlight/mbp_nvidia_bl.c
61566+++ b/drivers/video/backlight/mbp_nvidia_bl.c
61567@@ -33,7 +33,7 @@ struct dmi_match_data {
61568 unsigned long iostart;
61569 unsigned long iolen;
61570 /* Backlight operations structure. */
61571- struct backlight_ops backlight_ops;
61572+ const struct backlight_ops backlight_ops;
61573 };
61574
61575 /* Module parameters. */
61576diff --git a/drivers/video/backlight/omap1_bl.c b/drivers/video/backlight/omap1_bl.c
61577index cbad67e..3cf900e 100644
61578--- a/drivers/video/backlight/omap1_bl.c
61579+++ b/drivers/video/backlight/omap1_bl.c
61580@@ -125,7 +125,7 @@ static int omapbl_get_intensity(struct backlight_device *dev)
61581 return bl->current_intensity;
61582 }
61583
61584-static struct backlight_ops omapbl_ops = {
61585+static const struct backlight_ops omapbl_ops = {
61586 .get_brightness = omapbl_get_intensity,
61587 .update_status = omapbl_update_status,
61588 };
61589diff --git a/drivers/video/backlight/progear_bl.c b/drivers/video/backlight/progear_bl.c
61590index 9edaf24..075786e 100644
61591--- a/drivers/video/backlight/progear_bl.c
61592+++ b/drivers/video/backlight/progear_bl.c
61593@@ -54,7 +54,7 @@ static int progearbl_get_intensity(struct backlight_device *bd)
61594 return intensity - HW_LEVEL_MIN;
61595 }
61596
61597-static struct backlight_ops progearbl_ops = {
61598+static const struct backlight_ops progearbl_ops = {
61599 .get_brightness = progearbl_get_intensity,
61600 .update_status = progearbl_set_intensity,
61601 };
61602diff --git a/drivers/video/backlight/pwm_bl.c b/drivers/video/backlight/pwm_bl.c
61603index 8871662..df9e0b3 100644
61604--- a/drivers/video/backlight/pwm_bl.c
61605+++ b/drivers/video/backlight/pwm_bl.c
61606@@ -56,7 +56,7 @@ static int pwm_backlight_get_brightness(struct backlight_device *bl)
61607 return bl->props.brightness;
61608 }
61609
61610-static struct backlight_ops pwm_backlight_ops = {
61611+static const struct backlight_ops pwm_backlight_ops = {
61612 .update_status = pwm_backlight_update_status,
61613 .get_brightness = pwm_backlight_get_brightness,
61614 };
61615diff --git a/drivers/video/backlight/tosa_bl.c b/drivers/video/backlight/tosa_bl.c
61616index 43edbad..e14ce4d 100644
61617--- a/drivers/video/backlight/tosa_bl.c
61618+++ b/drivers/video/backlight/tosa_bl.c
61619@@ -72,7 +72,7 @@ static int tosa_bl_get_brightness(struct backlight_device *dev)
61620 return props->brightness;
61621 }
61622
61623-static struct backlight_ops bl_ops = {
61624+static const struct backlight_ops bl_ops = {
61625 .get_brightness = tosa_bl_get_brightness,
61626 .update_status = tosa_bl_update_status,
61627 };
61628diff --git a/drivers/video/backlight/wm831x_bl.c b/drivers/video/backlight/wm831x_bl.c
61629index 467bdb7..e32add3 100644
61630--- a/drivers/video/backlight/wm831x_bl.c
61631+++ b/drivers/video/backlight/wm831x_bl.c
61632@@ -112,7 +112,7 @@ static int wm831x_backlight_get_brightness(struct backlight_device *bl)
61633 return data->current_brightness;
61634 }
61635
61636-static struct backlight_ops wm831x_backlight_ops = {
61637+static const struct backlight_ops wm831x_backlight_ops = {
61638 .options = BL_CORE_SUSPENDRESUME,
61639 .update_status = wm831x_backlight_update_status,
61640 .get_brightness = wm831x_backlight_get_brightness,
61641diff --git a/drivers/video/bf54x-lq043fb.c b/drivers/video/bf54x-lq043fb.c
61642index e49ae5e..db4e6f7 100644
61643--- a/drivers/video/bf54x-lq043fb.c
61644+++ b/drivers/video/bf54x-lq043fb.c
61645@@ -463,7 +463,7 @@ static int bl_get_brightness(struct backlight_device *bd)
61646 return 0;
61647 }
61648
61649-static struct backlight_ops bfin_lq043fb_bl_ops = {
61650+static const struct backlight_ops bfin_lq043fb_bl_ops = {
61651 .get_brightness = bl_get_brightness,
61652 };
61653
61654diff --git a/drivers/video/bfin-t350mcqb-fb.c b/drivers/video/bfin-t350mcqb-fb.c
61655index 2c72a7c..d523e52 100644
61656--- a/drivers/video/bfin-t350mcqb-fb.c
61657+++ b/drivers/video/bfin-t350mcqb-fb.c
61658@@ -381,7 +381,7 @@ static int bl_get_brightness(struct backlight_device *bd)
61659 return 0;
61660 }
61661
61662-static struct backlight_ops bfin_lq043fb_bl_ops = {
61663+static const struct backlight_ops bfin_lq043fb_bl_ops = {
61664 .get_brightness = bl_get_brightness,
61665 };
61666
61667diff --git a/drivers/video/fbcmap.c b/drivers/video/fbcmap.c
61668index f53b9f1..958bf4e 100644
61669--- a/drivers/video/fbcmap.c
61670+++ b/drivers/video/fbcmap.c
61671@@ -266,8 +266,7 @@ int fb_set_user_cmap(struct fb_cmap_user *cmap, struct fb_info *info)
61672 rc = -ENODEV;
61673 goto out;
61674 }
61675- if (cmap->start < 0 || (!info->fbops->fb_setcolreg &&
61676- !info->fbops->fb_setcmap)) {
61677+ if (!info->fbops->fb_setcolreg && !info->fbops->fb_setcmap) {
61678 rc = -EINVAL;
61679 goto out1;
61680 }
61681diff --git a/drivers/video/fbmem.c b/drivers/video/fbmem.c
61682index 99bbd28..ad3829e 100644
61683--- a/drivers/video/fbmem.c
61684+++ b/drivers/video/fbmem.c
61685@@ -403,7 +403,7 @@ static void fb_do_show_logo(struct fb_info *info, struct fb_image *image,
61686 image->dx += image->width + 8;
61687 }
61688 } else if (rotate == FB_ROTATE_UD) {
61689- for (x = 0; x < num && image->dx >= 0; x++) {
61690+ for (x = 0; x < num && (__s32)image->dx >= 0; x++) {
61691 info->fbops->fb_imageblit(info, image);
61692 image->dx -= image->width + 8;
61693 }
61694@@ -415,7 +415,7 @@ static void fb_do_show_logo(struct fb_info *info, struct fb_image *image,
61695 image->dy += image->height + 8;
61696 }
61697 } else if (rotate == FB_ROTATE_CCW) {
61698- for (x = 0; x < num && image->dy >= 0; x++) {
61699+ for (x = 0; x < num && (__s32)image->dy >= 0; x++) {
61700 info->fbops->fb_imageblit(info, image);
61701 image->dy -= image->height + 8;
61702 }
61703@@ -915,6 +915,8 @@ fb_set_var(struct fb_info *info, struct fb_var_screeninfo *var)
61704 int flags = info->flags;
61705 int ret = 0;
61706
61707+ pax_track_stack();
61708+
61709 if (var->activate & FB_ACTIVATE_INV_MODE) {
61710 struct fb_videomode mode1, mode2;
61711
61712@@ -1040,6 +1042,8 @@ static long do_fb_ioctl(struct fb_info *info, unsigned int cmd,
61713 void __user *argp = (void __user *)arg;
61714 long ret = 0;
61715
61716+ pax_track_stack();
61717+
61718 switch (cmd) {
61719 case FBIOGET_VSCREENINFO:
61720 if (!lock_fb_info(info))
61721@@ -1119,7 +1123,7 @@ static long do_fb_ioctl(struct fb_info *info, unsigned int cmd,
61722 return -EFAULT;
61723 if (con2fb.console < 1 || con2fb.console > MAX_NR_CONSOLES)
61724 return -EINVAL;
61725- if (con2fb.framebuffer < 0 || con2fb.framebuffer >= FB_MAX)
61726+ if (con2fb.framebuffer >= FB_MAX)
61727 return -EINVAL;
61728 if (!registered_fb[con2fb.framebuffer])
61729 request_module("fb%d", con2fb.framebuffer);
61730diff --git a/drivers/video/geode/gx1fb_core.c b/drivers/video/geode/gx1fb_core.c
61731index f20eff8..3e4f622 100644
61732--- a/drivers/video/geode/gx1fb_core.c
61733+++ b/drivers/video/geode/gx1fb_core.c
61734@@ -30,7 +30,7 @@ static int crt_option = 1;
61735 static char panel_option[32] = "";
61736
61737 /* Modes relevant to the GX1 (taken from modedb.c) */
61738-static const struct fb_videomode __initdata gx1_modedb[] = {
61739+static const struct fb_videomode __initconst gx1_modedb[] = {
61740 /* 640x480-60 VESA */
61741 { NULL, 60, 640, 480, 39682, 48, 16, 33, 10, 96, 2,
61742 0, FB_VMODE_NONINTERLACED, FB_MODE_IS_VESA },
61743diff --git a/drivers/video/gxt4500.c b/drivers/video/gxt4500.c
61744index 896e53d..4d87d0b 100644
61745--- a/drivers/video/gxt4500.c
61746+++ b/drivers/video/gxt4500.c
61747@@ -156,7 +156,7 @@ struct gxt4500_par {
61748 static char *mode_option;
61749
61750 /* default mode: 1280x1024 @ 60 Hz, 8 bpp */
61751-static const struct fb_videomode defaultmode __devinitdata = {
61752+static const struct fb_videomode defaultmode __devinitconst = {
61753 .refresh = 60,
61754 .xres = 1280,
61755 .yres = 1024,
61756@@ -581,7 +581,7 @@ static int gxt4500_blank(int blank, struct fb_info *info)
61757 return 0;
61758 }
61759
61760-static const struct fb_fix_screeninfo gxt4500_fix __devinitdata = {
61761+static const struct fb_fix_screeninfo gxt4500_fix __devinitconst = {
61762 .id = "IBM GXT4500P",
61763 .type = FB_TYPE_PACKED_PIXELS,
61764 .visual = FB_VISUAL_PSEUDOCOLOR,
61765diff --git a/drivers/video/i810/i810_accel.c b/drivers/video/i810/i810_accel.c
61766index f5bedee..28c6028 100644
61767--- a/drivers/video/i810/i810_accel.c
61768+++ b/drivers/video/i810/i810_accel.c
61769@@ -73,6 +73,7 @@ static inline int wait_for_space(struct fb_info *info, u32 space)
61770 }
61771 }
61772 printk("ringbuffer lockup!!!\n");
61773+ printk("head:%u tail:%u iring.size:%u space:%u\n", head, tail, par->iring.size, space);
61774 i810_report_error(mmio);
61775 par->dev_flags |= LOCKUP;
61776 info->pixmap.scan_align = 1;
61777diff --git a/drivers/video/i810/i810_main.c b/drivers/video/i810/i810_main.c
61778index 5743ea2..457f82c 100644
61779--- a/drivers/video/i810/i810_main.c
61780+++ b/drivers/video/i810/i810_main.c
61781@@ -97,7 +97,7 @@ static int i810fb_blank (int blank_mode, struct fb_info *info);
61782 static void i810fb_release_resource (struct fb_info *info, struct i810fb_par *par);
61783
61784 /* PCI */
61785-static const char *i810_pci_list[] __devinitdata = {
61786+static const char *i810_pci_list[] __devinitconst = {
61787 "Intel(R) 810 Framebuffer Device" ,
61788 "Intel(R) 810-DC100 Framebuffer Device" ,
61789 "Intel(R) 810E Framebuffer Device" ,
61790diff --git a/drivers/video/logo/logo_linux_clut224.ppm b/drivers/video/logo/logo_linux_clut224.ppm
61791index 3c14e43..eafa544 100644
61792--- a/drivers/video/logo/logo_linux_clut224.ppm
61793+++ b/drivers/video/logo/logo_linux_clut224.ppm
61794@@ -1,1604 +1,1123 @@
61795 P3
61796-# Standard 224-color Linux logo
61797 80 80
61798 255
61799- 0 0 0 0 0 0 0 0 0 0 0 0
61800- 0 0 0 0 0 0 0 0 0 0 0 0
61801- 0 0 0 0 0 0 0 0 0 0 0 0
61802- 0 0 0 0 0 0 0 0 0 0 0 0
61803- 0 0 0 0 0 0 0 0 0 0 0 0
61804- 0 0 0 0 0 0 0 0 0 0 0 0
61805- 0 0 0 0 0 0 0 0 0 0 0 0
61806- 0 0 0 0 0 0 0 0 0 0 0 0
61807- 0 0 0 0 0 0 0 0 0 0 0 0
61808- 6 6 6 6 6 6 10 10 10 10 10 10
61809- 10 10 10 6 6 6 6 6 6 6 6 6
61810- 0 0 0 0 0 0 0 0 0 0 0 0
61811- 0 0 0 0 0 0 0 0 0 0 0 0
61812- 0 0 0 0 0 0 0 0 0 0 0 0
61813- 0 0 0 0 0 0 0 0 0 0 0 0
61814- 0 0 0 0 0 0 0 0 0 0 0 0
61815- 0 0 0 0 0 0 0 0 0 0 0 0
61816- 0 0 0 0 0 0 0 0 0 0 0 0
61817- 0 0 0 0 0 0 0 0 0 0 0 0
61818- 0 0 0 0 0 0 0 0 0 0 0 0
61819- 0 0 0 0 0 0 0 0 0 0 0 0
61820- 0 0 0 0 0 0 0 0 0 0 0 0
61821- 0 0 0 0 0 0 0 0 0 0 0 0
61822- 0 0 0 0 0 0 0 0 0 0 0 0
61823- 0 0 0 0 0 0 0 0 0 0 0 0
61824- 0 0 0 0 0 0 0 0 0 0 0 0
61825- 0 0 0 0 0 0 0 0 0 0 0 0
61826- 0 0 0 0 0 0 0 0 0 0 0 0
61827- 0 0 0 6 6 6 10 10 10 14 14 14
61828- 22 22 22 26 26 26 30 30 30 34 34 34
61829- 30 30 30 30 30 30 26 26 26 18 18 18
61830- 14 14 14 10 10 10 6 6 6 0 0 0
61831- 0 0 0 0 0 0 0 0 0 0 0 0
61832- 0 0 0 0 0 0 0 0 0 0 0 0
61833- 0 0 0 0 0 0 0 0 0 0 0 0
61834- 0 0 0 0 0 0 0 0 0 0 0 0
61835- 0 0 0 0 0 0 0 0 0 0 0 0
61836- 0 0 0 0 0 0 0 0 0 0 0 0
61837- 0 0 0 0 0 0 0 0 0 0 0 0
61838- 0 0 0 0 0 0 0 0 0 0 0 0
61839- 0 0 0 0 0 0 0 0 0 0 0 0
61840- 0 0 0 0 0 1 0 0 1 0 0 0
61841- 0 0 0 0 0 0 0 0 0 0 0 0
61842- 0 0 0 0 0 0 0 0 0 0 0 0
61843- 0 0 0 0 0 0 0 0 0 0 0 0
61844- 0 0 0 0 0 0 0 0 0 0 0 0
61845- 0 0 0 0 0 0 0 0 0 0 0 0
61846- 0 0 0 0 0 0 0 0 0 0 0 0
61847- 6 6 6 14 14 14 26 26 26 42 42 42
61848- 54 54 54 66 66 66 78 78 78 78 78 78
61849- 78 78 78 74 74 74 66 66 66 54 54 54
61850- 42 42 42 26 26 26 18 18 18 10 10 10
61851- 6 6 6 0 0 0 0 0 0 0 0 0
61852- 0 0 0 0 0 0 0 0 0 0 0 0
61853- 0 0 0 0 0 0 0 0 0 0 0 0
61854- 0 0 0 0 0 0 0 0 0 0 0 0
61855- 0 0 0 0 0 0 0 0 0 0 0 0
61856- 0 0 0 0 0 0 0 0 0 0 0 0
61857- 0 0 0 0 0 0 0 0 0 0 0 0
61858- 0 0 0 0 0 0 0 0 0 0 0 0
61859- 0 0 0 0 0 0 0 0 0 0 0 0
61860- 0 0 1 0 0 0 0 0 0 0 0 0
61861- 0 0 0 0 0 0 0 0 0 0 0 0
61862- 0 0 0 0 0 0 0 0 0 0 0 0
61863- 0 0 0 0 0 0 0 0 0 0 0 0
61864- 0 0 0 0 0 0 0 0 0 0 0 0
61865- 0 0 0 0 0 0 0 0 0 0 0 0
61866- 0 0 0 0 0 0 0 0 0 10 10 10
61867- 22 22 22 42 42 42 66 66 66 86 86 86
61868- 66 66 66 38 38 38 38 38 38 22 22 22
61869- 26 26 26 34 34 34 54 54 54 66 66 66
61870- 86 86 86 70 70 70 46 46 46 26 26 26
61871- 14 14 14 6 6 6 0 0 0 0 0 0
61872- 0 0 0 0 0 0 0 0 0 0 0 0
61873- 0 0 0 0 0 0 0 0 0 0 0 0
61874- 0 0 0 0 0 0 0 0 0 0 0 0
61875- 0 0 0 0 0 0 0 0 0 0 0 0
61876- 0 0 0 0 0 0 0 0 0 0 0 0
61877- 0 0 0 0 0 0 0 0 0 0 0 0
61878- 0 0 0 0 0 0 0 0 0 0 0 0
61879- 0 0 0 0 0 0 0 0 0 0 0 0
61880- 0 0 1 0 0 1 0 0 1 0 0 0
61881- 0 0 0 0 0 0 0 0 0 0 0 0
61882- 0 0 0 0 0 0 0 0 0 0 0 0
61883- 0 0 0 0 0 0 0 0 0 0 0 0
61884- 0 0 0 0 0 0 0 0 0 0 0 0
61885- 0 0 0 0 0 0 0 0 0 0 0 0
61886- 0 0 0 0 0 0 10 10 10 26 26 26
61887- 50 50 50 82 82 82 58 58 58 6 6 6
61888- 2 2 6 2 2 6 2 2 6 2 2 6
61889- 2 2 6 2 2 6 2 2 6 2 2 6
61890- 6 6 6 54 54 54 86 86 86 66 66 66
61891- 38 38 38 18 18 18 6 6 6 0 0 0
61892- 0 0 0 0 0 0 0 0 0 0 0 0
61893- 0 0 0 0 0 0 0 0 0 0 0 0
61894- 0 0 0 0 0 0 0 0 0 0 0 0
61895- 0 0 0 0 0 0 0 0 0 0 0 0
61896- 0 0 0 0 0 0 0 0 0 0 0 0
61897- 0 0 0 0 0 0 0 0 0 0 0 0
61898- 0 0 0 0 0 0 0 0 0 0 0 0
61899- 0 0 0 0 0 0 0 0 0 0 0 0
61900- 0 0 0 0 0 0 0 0 0 0 0 0
61901- 0 0 0 0 0 0 0 0 0 0 0 0
61902- 0 0 0 0 0 0 0 0 0 0 0 0
61903- 0 0 0 0 0 0 0 0 0 0 0 0
61904- 0 0 0 0 0 0 0 0 0 0 0 0
61905- 0 0 0 0 0 0 0 0 0 0 0 0
61906- 0 0 0 6 6 6 22 22 22 50 50 50
61907- 78 78 78 34 34 34 2 2 6 2 2 6
61908- 2 2 6 2 2 6 2 2 6 2 2 6
61909- 2 2 6 2 2 6 2 2 6 2 2 6
61910- 2 2 6 2 2 6 6 6 6 70 70 70
61911- 78 78 78 46 46 46 22 22 22 6 6 6
61912- 0 0 0 0 0 0 0 0 0 0 0 0
61913- 0 0 0 0 0 0 0 0 0 0 0 0
61914- 0 0 0 0 0 0 0 0 0 0 0 0
61915- 0 0 0 0 0 0 0 0 0 0 0 0
61916- 0 0 0 0 0 0 0 0 0 0 0 0
61917- 0 0 0 0 0 0 0 0 0 0 0 0
61918- 0 0 0 0 0 0 0 0 0 0 0 0
61919- 0 0 0 0 0 0 0 0 0 0 0 0
61920- 0 0 1 0 0 1 0 0 1 0 0 0
61921- 0 0 0 0 0 0 0 0 0 0 0 0
61922- 0 0 0 0 0 0 0 0 0 0 0 0
61923- 0 0 0 0 0 0 0 0 0 0 0 0
61924- 0 0 0 0 0 0 0 0 0 0 0 0
61925- 0 0 0 0 0 0 0 0 0 0 0 0
61926- 6 6 6 18 18 18 42 42 42 82 82 82
61927- 26 26 26 2 2 6 2 2 6 2 2 6
61928- 2 2 6 2 2 6 2 2 6 2 2 6
61929- 2 2 6 2 2 6 2 2 6 14 14 14
61930- 46 46 46 34 34 34 6 6 6 2 2 6
61931- 42 42 42 78 78 78 42 42 42 18 18 18
61932- 6 6 6 0 0 0 0 0 0 0 0 0
61933- 0 0 0 0 0 0 0 0 0 0 0 0
61934- 0 0 0 0 0 0 0 0 0 0 0 0
61935- 0 0 0 0 0 0 0 0 0 0 0 0
61936- 0 0 0 0 0 0 0 0 0 0 0 0
61937- 0 0 0 0 0 0 0 0 0 0 0 0
61938- 0 0 0 0 0 0 0 0 0 0 0 0
61939- 0 0 0 0 0 0 0 0 0 0 0 0
61940- 0 0 1 0 0 0 0 0 1 0 0 0
61941- 0 0 0 0 0 0 0 0 0 0 0 0
61942- 0 0 0 0 0 0 0 0 0 0 0 0
61943- 0 0 0 0 0 0 0 0 0 0 0 0
61944- 0 0 0 0 0 0 0 0 0 0 0 0
61945- 0 0 0 0 0 0 0 0 0 0 0 0
61946- 10 10 10 30 30 30 66 66 66 58 58 58
61947- 2 2 6 2 2 6 2 2 6 2 2 6
61948- 2 2 6 2 2 6 2 2 6 2 2 6
61949- 2 2 6 2 2 6 2 2 6 26 26 26
61950- 86 86 86 101 101 101 46 46 46 10 10 10
61951- 2 2 6 58 58 58 70 70 70 34 34 34
61952- 10 10 10 0 0 0 0 0 0 0 0 0
61953- 0 0 0 0 0 0 0 0 0 0 0 0
61954- 0 0 0 0 0 0 0 0 0 0 0 0
61955- 0 0 0 0 0 0 0 0 0 0 0 0
61956- 0 0 0 0 0 0 0 0 0 0 0 0
61957- 0 0 0 0 0 0 0 0 0 0 0 0
61958- 0 0 0 0 0 0 0 0 0 0 0 0
61959- 0 0 0 0 0 0 0 0 0 0 0 0
61960- 0 0 1 0 0 1 0 0 1 0 0 0
61961- 0 0 0 0 0 0 0 0 0 0 0 0
61962- 0 0 0 0 0 0 0 0 0 0 0 0
61963- 0 0 0 0 0 0 0 0 0 0 0 0
61964- 0 0 0 0 0 0 0 0 0 0 0 0
61965- 0 0 0 0 0 0 0 0 0 0 0 0
61966- 14 14 14 42 42 42 86 86 86 10 10 10
61967- 2 2 6 2 2 6 2 2 6 2 2 6
61968- 2 2 6 2 2 6 2 2 6 2 2 6
61969- 2 2 6 2 2 6 2 2 6 30 30 30
61970- 94 94 94 94 94 94 58 58 58 26 26 26
61971- 2 2 6 6 6 6 78 78 78 54 54 54
61972- 22 22 22 6 6 6 0 0 0 0 0 0
61973- 0 0 0 0 0 0 0 0 0 0 0 0
61974- 0 0 0 0 0 0 0 0 0 0 0 0
61975- 0 0 0 0 0 0 0 0 0 0 0 0
61976- 0 0 0 0 0 0 0 0 0 0 0 0
61977- 0 0 0 0 0 0 0 0 0 0 0 0
61978- 0 0 0 0 0 0 0 0 0 0 0 0
61979- 0 0 0 0 0 0 0 0 0 0 0 0
61980- 0 0 0 0 0 0 0 0 0 0 0 0
61981- 0 0 0 0 0 0 0 0 0 0 0 0
61982- 0 0 0 0 0 0 0 0 0 0 0 0
61983- 0 0 0 0 0 0 0 0 0 0 0 0
61984- 0 0 0 0 0 0 0 0 0 0 0 0
61985- 0 0 0 0 0 0 0 0 0 6 6 6
61986- 22 22 22 62 62 62 62 62 62 2 2 6
61987- 2 2 6 2 2 6 2 2 6 2 2 6
61988- 2 2 6 2 2 6 2 2 6 2 2 6
61989- 2 2 6 2 2 6 2 2 6 26 26 26
61990- 54 54 54 38 38 38 18 18 18 10 10 10
61991- 2 2 6 2 2 6 34 34 34 82 82 82
61992- 38 38 38 14 14 14 0 0 0 0 0 0
61993- 0 0 0 0 0 0 0 0 0 0 0 0
61994- 0 0 0 0 0 0 0 0 0 0 0 0
61995- 0 0 0 0 0 0 0 0 0 0 0 0
61996- 0 0 0 0 0 0 0 0 0 0 0 0
61997- 0 0 0 0 0 0 0 0 0 0 0 0
61998- 0 0 0 0 0 0 0 0 0 0 0 0
61999- 0 0 0 0 0 0 0 0 0 0 0 0
62000- 0 0 0 0 0 1 0 0 1 0 0 0
62001- 0 0 0 0 0 0 0 0 0 0 0 0
62002- 0 0 0 0 0 0 0 0 0 0 0 0
62003- 0 0 0 0 0 0 0 0 0 0 0 0
62004- 0 0 0 0 0 0 0 0 0 0 0 0
62005- 0 0 0 0 0 0 0 0 0 6 6 6
62006- 30 30 30 78 78 78 30 30 30 2 2 6
62007- 2 2 6 2 2 6 2 2 6 2 2 6
62008- 2 2 6 2 2 6 2 2 6 2 2 6
62009- 2 2 6 2 2 6 2 2 6 10 10 10
62010- 10 10 10 2 2 6 2 2 6 2 2 6
62011- 2 2 6 2 2 6 2 2 6 78 78 78
62012- 50 50 50 18 18 18 6 6 6 0 0 0
62013- 0 0 0 0 0 0 0 0 0 0 0 0
62014- 0 0 0 0 0 0 0 0 0 0 0 0
62015- 0 0 0 0 0 0 0 0 0 0 0 0
62016- 0 0 0 0 0 0 0 0 0 0 0 0
62017- 0 0 0 0 0 0 0 0 0 0 0 0
62018- 0 0 0 0 0 0 0 0 0 0 0 0
62019- 0 0 0 0 0 0 0 0 0 0 0 0
62020- 0 0 1 0 0 0 0 0 0 0 0 0
62021- 0 0 0 0 0 0 0 0 0 0 0 0
62022- 0 0 0 0 0 0 0 0 0 0 0 0
62023- 0 0 0 0 0 0 0 0 0 0 0 0
62024- 0 0 0 0 0 0 0 0 0 0 0 0
62025- 0 0 0 0 0 0 0 0 0 10 10 10
62026- 38 38 38 86 86 86 14 14 14 2 2 6
62027- 2 2 6 2 2 6 2 2 6 2 2 6
62028- 2 2 6 2 2 6 2 2 6 2 2 6
62029- 2 2 6 2 2 6 2 2 6 2 2 6
62030- 2 2 6 2 2 6 2 2 6 2 2 6
62031- 2 2 6 2 2 6 2 2 6 54 54 54
62032- 66 66 66 26 26 26 6 6 6 0 0 0
62033- 0 0 0 0 0 0 0 0 0 0 0 0
62034- 0 0 0 0 0 0 0 0 0 0 0 0
62035- 0 0 0 0 0 0 0 0 0 0 0 0
62036- 0 0 0 0 0 0 0 0 0 0 0 0
62037- 0 0 0 0 0 0 0 0 0 0 0 0
62038- 0 0 0 0 0 0 0 0 0 0 0 0
62039- 0 0 0 0 0 0 0 0 0 0 0 0
62040- 0 0 0 0 0 1 0 0 1 0 0 0
62041- 0 0 0 0 0 0 0 0 0 0 0 0
62042- 0 0 0 0 0 0 0 0 0 0 0 0
62043- 0 0 0 0 0 0 0 0 0 0 0 0
62044- 0 0 0 0 0 0 0 0 0 0 0 0
62045- 0 0 0 0 0 0 0 0 0 14 14 14
62046- 42 42 42 82 82 82 2 2 6 2 2 6
62047- 2 2 6 6 6 6 10 10 10 2 2 6
62048- 2 2 6 2 2 6 2 2 6 2 2 6
62049- 2 2 6 2 2 6 2 2 6 6 6 6
62050- 14 14 14 10 10 10 2 2 6 2 2 6
62051- 2 2 6 2 2 6 2 2 6 18 18 18
62052- 82 82 82 34 34 34 10 10 10 0 0 0
62053- 0 0 0 0 0 0 0 0 0 0 0 0
62054- 0 0 0 0 0 0 0 0 0 0 0 0
62055- 0 0 0 0 0 0 0 0 0 0 0 0
62056- 0 0 0 0 0 0 0 0 0 0 0 0
62057- 0 0 0 0 0 0 0 0 0 0 0 0
62058- 0 0 0 0 0 0 0 0 0 0 0 0
62059- 0 0 0 0 0 0 0 0 0 0 0 0
62060- 0 0 1 0 0 0 0 0 0 0 0 0
62061- 0 0 0 0 0 0 0 0 0 0 0 0
62062- 0 0 0 0 0 0 0 0 0 0 0 0
62063- 0 0 0 0 0 0 0 0 0 0 0 0
62064- 0 0 0 0 0 0 0 0 0 0 0 0
62065- 0 0 0 0 0 0 0 0 0 14 14 14
62066- 46 46 46 86 86 86 2 2 6 2 2 6
62067- 6 6 6 6 6 6 22 22 22 34 34 34
62068- 6 6 6 2 2 6 2 2 6 2 2 6
62069- 2 2 6 2 2 6 18 18 18 34 34 34
62070- 10 10 10 50 50 50 22 22 22 2 2 6
62071- 2 2 6 2 2 6 2 2 6 10 10 10
62072- 86 86 86 42 42 42 14 14 14 0 0 0
62073- 0 0 0 0 0 0 0 0 0 0 0 0
62074- 0 0 0 0 0 0 0 0 0 0 0 0
62075- 0 0 0 0 0 0 0 0 0 0 0 0
62076- 0 0 0 0 0 0 0 0 0 0 0 0
62077- 0 0 0 0 0 0 0 0 0 0 0 0
62078- 0 0 0 0 0 0 0 0 0 0 0 0
62079- 0 0 0 0 0 0 0 0 0 0 0 0
62080- 0 0 1 0 0 1 0 0 1 0 0 0
62081- 0 0 0 0 0 0 0 0 0 0 0 0
62082- 0 0 0 0 0 0 0 0 0 0 0 0
62083- 0 0 0 0 0 0 0 0 0 0 0 0
62084- 0 0 0 0 0 0 0 0 0 0 0 0
62085- 0 0 0 0 0 0 0 0 0 14 14 14
62086- 46 46 46 86 86 86 2 2 6 2 2 6
62087- 38 38 38 116 116 116 94 94 94 22 22 22
62088- 22 22 22 2 2 6 2 2 6 2 2 6
62089- 14 14 14 86 86 86 138 138 138 162 162 162
62090-154 154 154 38 38 38 26 26 26 6 6 6
62091- 2 2 6 2 2 6 2 2 6 2 2 6
62092- 86 86 86 46 46 46 14 14 14 0 0 0
62093- 0 0 0 0 0 0 0 0 0 0 0 0
62094- 0 0 0 0 0 0 0 0 0 0 0 0
62095- 0 0 0 0 0 0 0 0 0 0 0 0
62096- 0 0 0 0 0 0 0 0 0 0 0 0
62097- 0 0 0 0 0 0 0 0 0 0 0 0
62098- 0 0 0 0 0 0 0 0 0 0 0 0
62099- 0 0 0 0 0 0 0 0 0 0 0 0
62100- 0 0 0 0 0 0 0 0 0 0 0 0
62101- 0 0 0 0 0 0 0 0 0 0 0 0
62102- 0 0 0 0 0 0 0 0 0 0 0 0
62103- 0 0 0 0 0 0 0 0 0 0 0 0
62104- 0 0 0 0 0 0 0 0 0 0 0 0
62105- 0 0 0 0 0 0 0 0 0 14 14 14
62106- 46 46 46 86 86 86 2 2 6 14 14 14
62107-134 134 134 198 198 198 195 195 195 116 116 116
62108- 10 10 10 2 2 6 2 2 6 6 6 6
62109-101 98 89 187 187 187 210 210 210 218 218 218
62110-214 214 214 134 134 134 14 14 14 6 6 6
62111- 2 2 6 2 2 6 2 2 6 2 2 6
62112- 86 86 86 50 50 50 18 18 18 6 6 6
62113- 0 0 0 0 0 0 0 0 0 0 0 0
62114- 0 0 0 0 0 0 0 0 0 0 0 0
62115- 0 0 0 0 0 0 0 0 0 0 0 0
62116- 0 0 0 0 0 0 0 0 0 0 0 0
62117- 0 0 0 0 0 0 0 0 0 0 0 0
62118- 0 0 0 0 0 0 0 0 0 0 0 0
62119- 0 0 0 0 0 0 0 0 1 0 0 0
62120- 0 0 1 0 0 1 0 0 1 0 0 0
62121- 0 0 0 0 0 0 0 0 0 0 0 0
62122- 0 0 0 0 0 0 0 0 0 0 0 0
62123- 0 0 0 0 0 0 0 0 0 0 0 0
62124- 0 0 0 0 0 0 0 0 0 0 0 0
62125- 0 0 0 0 0 0 0 0 0 14 14 14
62126- 46 46 46 86 86 86 2 2 6 54 54 54
62127-218 218 218 195 195 195 226 226 226 246 246 246
62128- 58 58 58 2 2 6 2 2 6 30 30 30
62129-210 210 210 253 253 253 174 174 174 123 123 123
62130-221 221 221 234 234 234 74 74 74 2 2 6
62131- 2 2 6 2 2 6 2 2 6 2 2 6
62132- 70 70 70 58 58 58 22 22 22 6 6 6
62133- 0 0 0 0 0 0 0 0 0 0 0 0
62134- 0 0 0 0 0 0 0 0 0 0 0 0
62135- 0 0 0 0 0 0 0 0 0 0 0 0
62136- 0 0 0 0 0 0 0 0 0 0 0 0
62137- 0 0 0 0 0 0 0 0 0 0 0 0
62138- 0 0 0 0 0 0 0 0 0 0 0 0
62139- 0 0 0 0 0 0 0 0 0 0 0 0
62140- 0 0 0 0 0 0 0 0 0 0 0 0
62141- 0 0 0 0 0 0 0 0 0 0 0 0
62142- 0 0 0 0 0 0 0 0 0 0 0 0
62143- 0 0 0 0 0 0 0 0 0 0 0 0
62144- 0 0 0 0 0 0 0 0 0 0 0 0
62145- 0 0 0 0 0 0 0 0 0 14 14 14
62146- 46 46 46 82 82 82 2 2 6 106 106 106
62147-170 170 170 26 26 26 86 86 86 226 226 226
62148-123 123 123 10 10 10 14 14 14 46 46 46
62149-231 231 231 190 190 190 6 6 6 70 70 70
62150- 90 90 90 238 238 238 158 158 158 2 2 6
62151- 2 2 6 2 2 6 2 2 6 2 2 6
62152- 70 70 70 58 58 58 22 22 22 6 6 6
62153- 0 0 0 0 0 0 0 0 0 0 0 0
62154- 0 0 0 0 0 0 0 0 0 0 0 0
62155- 0 0 0 0 0 0 0 0 0 0 0 0
62156- 0 0 0 0 0 0 0 0 0 0 0 0
62157- 0 0 0 0 0 0 0 0 0 0 0 0
62158- 0 0 0 0 0 0 0 0 0 0 0 0
62159- 0 0 0 0 0 0 0 0 1 0 0 0
62160- 0 0 1 0 0 1 0 0 1 0 0 0
62161- 0 0 0 0 0 0 0 0 0 0 0 0
62162- 0 0 0 0 0 0 0 0 0 0 0 0
62163- 0 0 0 0 0 0 0 0 0 0 0 0
62164- 0 0 0 0 0 0 0 0 0 0 0 0
62165- 0 0 0 0 0 0 0 0 0 14 14 14
62166- 42 42 42 86 86 86 6 6 6 116 116 116
62167-106 106 106 6 6 6 70 70 70 149 149 149
62168-128 128 128 18 18 18 38 38 38 54 54 54
62169-221 221 221 106 106 106 2 2 6 14 14 14
62170- 46 46 46 190 190 190 198 198 198 2 2 6
62171- 2 2 6 2 2 6 2 2 6 2 2 6
62172- 74 74 74 62 62 62 22 22 22 6 6 6
62173- 0 0 0 0 0 0 0 0 0 0 0 0
62174- 0 0 0 0 0 0 0 0 0 0 0 0
62175- 0 0 0 0 0 0 0 0 0 0 0 0
62176- 0 0 0 0 0 0 0 0 0 0 0 0
62177- 0 0 0 0 0 0 0 0 0 0 0 0
62178- 0 0 0 0 0 0 0 0 0 0 0 0
62179- 0 0 0 0 0 0 0 0 1 0 0 0
62180- 0 0 1 0 0 0 0 0 1 0 0 0
62181- 0 0 0 0 0 0 0 0 0 0 0 0
62182- 0 0 0 0 0 0 0 0 0 0 0 0
62183- 0 0 0 0 0 0 0 0 0 0 0 0
62184- 0 0 0 0 0 0 0 0 0 0 0 0
62185- 0 0 0 0 0 0 0 0 0 14 14 14
62186- 42 42 42 94 94 94 14 14 14 101 101 101
62187-128 128 128 2 2 6 18 18 18 116 116 116
62188-118 98 46 121 92 8 121 92 8 98 78 10
62189-162 162 162 106 106 106 2 2 6 2 2 6
62190- 2 2 6 195 195 195 195 195 195 6 6 6
62191- 2 2 6 2 2 6 2 2 6 2 2 6
62192- 74 74 74 62 62 62 22 22 22 6 6 6
62193- 0 0 0 0 0 0 0 0 0 0 0 0
62194- 0 0 0 0 0 0 0 0 0 0 0 0
62195- 0 0 0 0 0 0 0 0 0 0 0 0
62196- 0 0 0 0 0 0 0 0 0 0 0 0
62197- 0 0 0 0 0 0 0 0 0 0 0 0
62198- 0 0 0 0 0 0 0 0 0 0 0 0
62199- 0 0 0 0 0 0 0 0 1 0 0 1
62200- 0 0 1 0 0 0 0 0 1 0 0 0
62201- 0 0 0 0 0 0 0 0 0 0 0 0
62202- 0 0 0 0 0 0 0 0 0 0 0 0
62203- 0 0 0 0 0 0 0 0 0 0 0 0
62204- 0 0 0 0 0 0 0 0 0 0 0 0
62205- 0 0 0 0 0 0 0 0 0 10 10 10
62206- 38 38 38 90 90 90 14 14 14 58 58 58
62207-210 210 210 26 26 26 54 38 6 154 114 10
62208-226 170 11 236 186 11 225 175 15 184 144 12
62209-215 174 15 175 146 61 37 26 9 2 2 6
62210- 70 70 70 246 246 246 138 138 138 2 2 6
62211- 2 2 6 2 2 6 2 2 6 2 2 6
62212- 70 70 70 66 66 66 26 26 26 6 6 6
62213- 0 0 0 0 0 0 0 0 0 0 0 0
62214- 0 0 0 0 0 0 0 0 0 0 0 0
62215- 0 0 0 0 0 0 0 0 0 0 0 0
62216- 0 0 0 0 0 0 0 0 0 0 0 0
62217- 0 0 0 0 0 0 0 0 0 0 0 0
62218- 0 0 0 0 0 0 0 0 0 0 0 0
62219- 0 0 0 0 0 0 0 0 0 0 0 0
62220- 0 0 0 0 0 0 0 0 0 0 0 0
62221- 0 0 0 0 0 0 0 0 0 0 0 0
62222- 0 0 0 0 0 0 0 0 0 0 0 0
62223- 0 0 0 0 0 0 0 0 0 0 0 0
62224- 0 0 0 0 0 0 0 0 0 0 0 0
62225- 0 0 0 0 0 0 0 0 0 10 10 10
62226- 38 38 38 86 86 86 14 14 14 10 10 10
62227-195 195 195 188 164 115 192 133 9 225 175 15
62228-239 182 13 234 190 10 232 195 16 232 200 30
62229-245 207 45 241 208 19 232 195 16 184 144 12
62230-218 194 134 211 206 186 42 42 42 2 2 6
62231- 2 2 6 2 2 6 2 2 6 2 2 6
62232- 50 50 50 74 74 74 30 30 30 6 6 6
62233- 0 0 0 0 0 0 0 0 0 0 0 0
62234- 0 0 0 0 0 0 0 0 0 0 0 0
62235- 0 0 0 0 0 0 0 0 0 0 0 0
62236- 0 0 0 0 0 0 0 0 0 0 0 0
62237- 0 0 0 0 0 0 0 0 0 0 0 0
62238- 0 0 0 0 0 0 0 0 0 0 0 0
62239- 0 0 0 0 0 0 0 0 0 0 0 0
62240- 0 0 0 0 0 0 0 0 0 0 0 0
62241- 0 0 0 0 0 0 0 0 0 0 0 0
62242- 0 0 0 0 0 0 0 0 0 0 0 0
62243- 0 0 0 0 0 0 0 0 0 0 0 0
62244- 0 0 0 0 0 0 0 0 0 0 0 0
62245- 0 0 0 0 0 0 0 0 0 10 10 10
62246- 34 34 34 86 86 86 14 14 14 2 2 6
62247-121 87 25 192 133 9 219 162 10 239 182 13
62248-236 186 11 232 195 16 241 208 19 244 214 54
62249-246 218 60 246 218 38 246 215 20 241 208 19
62250-241 208 19 226 184 13 121 87 25 2 2 6
62251- 2 2 6 2 2 6 2 2 6 2 2 6
62252- 50 50 50 82 82 82 34 34 34 10 10 10
62253- 0 0 0 0 0 0 0 0 0 0 0 0
62254- 0 0 0 0 0 0 0 0 0 0 0 0
62255- 0 0 0 0 0 0 0 0 0 0 0 0
62256- 0 0 0 0 0 0 0 0 0 0 0 0
62257- 0 0 0 0 0 0 0 0 0 0 0 0
62258- 0 0 0 0 0 0 0 0 0 0 0 0
62259- 0 0 0 0 0 0 0 0 0 0 0 0
62260- 0 0 0 0 0 0 0 0 0 0 0 0
62261- 0 0 0 0 0 0 0 0 0 0 0 0
62262- 0 0 0 0 0 0 0 0 0 0 0 0
62263- 0 0 0 0 0 0 0 0 0 0 0 0
62264- 0 0 0 0 0 0 0 0 0 0 0 0
62265- 0 0 0 0 0 0 0 0 0 10 10 10
62266- 34 34 34 82 82 82 30 30 30 61 42 6
62267-180 123 7 206 145 10 230 174 11 239 182 13
62268-234 190 10 238 202 15 241 208 19 246 218 74
62269-246 218 38 246 215 20 246 215 20 246 215 20
62270-226 184 13 215 174 15 184 144 12 6 6 6
62271- 2 2 6 2 2 6 2 2 6 2 2 6
62272- 26 26 26 94 94 94 42 42 42 14 14 14
62273- 0 0 0 0 0 0 0 0 0 0 0 0
62274- 0 0 0 0 0 0 0 0 0 0 0 0
62275- 0 0 0 0 0 0 0 0 0 0 0 0
62276- 0 0 0 0 0 0 0 0 0 0 0 0
62277- 0 0 0 0 0 0 0 0 0 0 0 0
62278- 0 0 0 0 0 0 0 0 0 0 0 0
62279- 0 0 0 0 0 0 0 0 0 0 0 0
62280- 0 0 0 0 0 0 0 0 0 0 0 0
62281- 0 0 0 0 0 0 0 0 0 0 0 0
62282- 0 0 0 0 0 0 0 0 0 0 0 0
62283- 0 0 0 0 0 0 0 0 0 0 0 0
62284- 0 0 0 0 0 0 0 0 0 0 0 0
62285- 0 0 0 0 0 0 0 0 0 10 10 10
62286- 30 30 30 78 78 78 50 50 50 104 69 6
62287-192 133 9 216 158 10 236 178 12 236 186 11
62288-232 195 16 241 208 19 244 214 54 245 215 43
62289-246 215 20 246 215 20 241 208 19 198 155 10
62290-200 144 11 216 158 10 156 118 10 2 2 6
62291- 2 2 6 2 2 6 2 2 6 2 2 6
62292- 6 6 6 90 90 90 54 54 54 18 18 18
62293- 6 6 6 0 0 0 0 0 0 0 0 0
62294- 0 0 0 0 0 0 0 0 0 0 0 0
62295- 0 0 0 0 0 0 0 0 0 0 0 0
62296- 0 0 0 0 0 0 0 0 0 0 0 0
62297- 0 0 0 0 0 0 0 0 0 0 0 0
62298- 0 0 0 0 0 0 0 0 0 0 0 0
62299- 0 0 0 0 0 0 0 0 0 0 0 0
62300- 0 0 0 0 0 0 0 0 0 0 0 0
62301- 0 0 0 0 0 0 0 0 0 0 0 0
62302- 0 0 0 0 0 0 0 0 0 0 0 0
62303- 0 0 0 0 0 0 0 0 0 0 0 0
62304- 0 0 0 0 0 0 0 0 0 0 0 0
62305- 0 0 0 0 0 0 0 0 0 10 10 10
62306- 30 30 30 78 78 78 46 46 46 22 22 22
62307-137 92 6 210 162 10 239 182 13 238 190 10
62308-238 202 15 241 208 19 246 215 20 246 215 20
62309-241 208 19 203 166 17 185 133 11 210 150 10
62310-216 158 10 210 150 10 102 78 10 2 2 6
62311- 6 6 6 54 54 54 14 14 14 2 2 6
62312- 2 2 6 62 62 62 74 74 74 30 30 30
62313- 10 10 10 0 0 0 0 0 0 0 0 0
62314- 0 0 0 0 0 0 0 0 0 0 0 0
62315- 0 0 0 0 0 0 0 0 0 0 0 0
62316- 0 0 0 0 0 0 0 0 0 0 0 0
62317- 0 0 0 0 0 0 0 0 0 0 0 0
62318- 0 0 0 0 0 0 0 0 0 0 0 0
62319- 0 0 0 0 0 0 0 0 0 0 0 0
62320- 0 0 0 0 0 0 0 0 0 0 0 0
62321- 0 0 0 0 0 0 0 0 0 0 0 0
62322- 0 0 0 0 0 0 0 0 0 0 0 0
62323- 0 0 0 0 0 0 0 0 0 0 0 0
62324- 0 0 0 0 0 0 0 0 0 0 0 0
62325- 0 0 0 0 0 0 0 0 0 10 10 10
62326- 34 34 34 78 78 78 50 50 50 6 6 6
62327- 94 70 30 139 102 15 190 146 13 226 184 13
62328-232 200 30 232 195 16 215 174 15 190 146 13
62329-168 122 10 192 133 9 210 150 10 213 154 11
62330-202 150 34 182 157 106 101 98 89 2 2 6
62331- 2 2 6 78 78 78 116 116 116 58 58 58
62332- 2 2 6 22 22 22 90 90 90 46 46 46
62333- 18 18 18 6 6 6 0 0 0 0 0 0
62334- 0 0 0 0 0 0 0 0 0 0 0 0
62335- 0 0 0 0 0 0 0 0 0 0 0 0
62336- 0 0 0 0 0 0 0 0 0 0 0 0
62337- 0 0 0 0 0 0 0 0 0 0 0 0
62338- 0 0 0 0 0 0 0 0 0 0 0 0
62339- 0 0 0 0 0 0 0 0 0 0 0 0
62340- 0 0 0 0 0 0 0 0 0 0 0 0
62341- 0 0 0 0 0 0 0 0 0 0 0 0
62342- 0 0 0 0 0 0 0 0 0 0 0 0
62343- 0 0 0 0 0 0 0 0 0 0 0 0
62344- 0 0 0 0 0 0 0 0 0 0 0 0
62345- 0 0 0 0 0 0 0 0 0 10 10 10
62346- 38 38 38 86 86 86 50 50 50 6 6 6
62347-128 128 128 174 154 114 156 107 11 168 122 10
62348-198 155 10 184 144 12 197 138 11 200 144 11
62349-206 145 10 206 145 10 197 138 11 188 164 115
62350-195 195 195 198 198 198 174 174 174 14 14 14
62351- 2 2 6 22 22 22 116 116 116 116 116 116
62352- 22 22 22 2 2 6 74 74 74 70 70 70
62353- 30 30 30 10 10 10 0 0 0 0 0 0
62354- 0 0 0 0 0 0 0 0 0 0 0 0
62355- 0 0 0 0 0 0 0 0 0 0 0 0
62356- 0 0 0 0 0 0 0 0 0 0 0 0
62357- 0 0 0 0 0 0 0 0 0 0 0 0
62358- 0 0 0 0 0 0 0 0 0 0 0 0
62359- 0 0 0 0 0 0 0 0 0 0 0 0
62360- 0 0 0 0 0 0 0 0 0 0 0 0
62361- 0 0 0 0 0 0 0 0 0 0 0 0
62362- 0 0 0 0 0 0 0 0 0 0 0 0
62363- 0 0 0 0 0 0 0 0 0 0 0 0
62364- 0 0 0 0 0 0 0 0 0 0 0 0
62365- 0 0 0 0 0 0 6 6 6 18 18 18
62366- 50 50 50 101 101 101 26 26 26 10 10 10
62367-138 138 138 190 190 190 174 154 114 156 107 11
62368-197 138 11 200 144 11 197 138 11 192 133 9
62369-180 123 7 190 142 34 190 178 144 187 187 187
62370-202 202 202 221 221 221 214 214 214 66 66 66
62371- 2 2 6 2 2 6 50 50 50 62 62 62
62372- 6 6 6 2 2 6 10 10 10 90 90 90
62373- 50 50 50 18 18 18 6 6 6 0 0 0
62374- 0 0 0 0 0 0 0 0 0 0 0 0
62375- 0 0 0 0 0 0 0 0 0 0 0 0
62376- 0 0 0 0 0 0 0 0 0 0 0 0
62377- 0 0 0 0 0 0 0 0 0 0 0 0
62378- 0 0 0 0 0 0 0 0 0 0 0 0
62379- 0 0 0 0 0 0 0 0 0 0 0 0
62380- 0 0 0 0 0 0 0 0 0 0 0 0
62381- 0 0 0 0 0 0 0 0 0 0 0 0
62382- 0 0 0 0 0 0 0 0 0 0 0 0
62383- 0 0 0 0 0 0 0 0 0 0 0 0
62384- 0 0 0 0 0 0 0 0 0 0 0 0
62385- 0 0 0 0 0 0 10 10 10 34 34 34
62386- 74 74 74 74 74 74 2 2 6 6 6 6
62387-144 144 144 198 198 198 190 190 190 178 166 146
62388-154 121 60 156 107 11 156 107 11 168 124 44
62389-174 154 114 187 187 187 190 190 190 210 210 210
62390-246 246 246 253 253 253 253 253 253 182 182 182
62391- 6 6 6 2 2 6 2 2 6 2 2 6
62392- 2 2 6 2 2 6 2 2 6 62 62 62
62393- 74 74 74 34 34 34 14 14 14 0 0 0
62394- 0 0 0 0 0 0 0 0 0 0 0 0
62395- 0 0 0 0 0 0 0 0 0 0 0 0
62396- 0 0 0 0 0 0 0 0 0 0 0 0
62397- 0 0 0 0 0 0 0 0 0 0 0 0
62398- 0 0 0 0 0 0 0 0 0 0 0 0
62399- 0 0 0 0 0 0 0 0 0 0 0 0
62400- 0 0 0 0 0 0 0 0 0 0 0 0
62401- 0 0 0 0 0 0 0 0 0 0 0 0
62402- 0 0 0 0 0 0 0 0 0 0 0 0
62403- 0 0 0 0 0 0 0 0 0 0 0 0
62404- 0 0 0 0 0 0 0 0 0 0 0 0
62405- 0 0 0 10 10 10 22 22 22 54 54 54
62406- 94 94 94 18 18 18 2 2 6 46 46 46
62407-234 234 234 221 221 221 190 190 190 190 190 190
62408-190 190 190 187 187 187 187 187 187 190 190 190
62409-190 190 190 195 195 195 214 214 214 242 242 242
62410-253 253 253 253 253 253 253 253 253 253 253 253
62411- 82 82 82 2 2 6 2 2 6 2 2 6
62412- 2 2 6 2 2 6 2 2 6 14 14 14
62413- 86 86 86 54 54 54 22 22 22 6 6 6
62414- 0 0 0 0 0 0 0 0 0 0 0 0
62415- 0 0 0 0 0 0 0 0 0 0 0 0
62416- 0 0 0 0 0 0 0 0 0 0 0 0
62417- 0 0 0 0 0 0 0 0 0 0 0 0
62418- 0 0 0 0 0 0 0 0 0 0 0 0
62419- 0 0 0 0 0 0 0 0 0 0 0 0
62420- 0 0 0 0 0 0 0 0 0 0 0 0
62421- 0 0 0 0 0 0 0 0 0 0 0 0
62422- 0 0 0 0 0 0 0 0 0 0 0 0
62423- 0 0 0 0 0 0 0 0 0 0 0 0
62424- 0 0 0 0 0 0 0 0 0 0 0 0
62425- 6 6 6 18 18 18 46 46 46 90 90 90
62426- 46 46 46 18 18 18 6 6 6 182 182 182
62427-253 253 253 246 246 246 206 206 206 190 190 190
62428-190 190 190 190 190 190 190 190 190 190 190 190
62429-206 206 206 231 231 231 250 250 250 253 253 253
62430-253 253 253 253 253 253 253 253 253 253 253 253
62431-202 202 202 14 14 14 2 2 6 2 2 6
62432- 2 2 6 2 2 6 2 2 6 2 2 6
62433- 42 42 42 86 86 86 42 42 42 18 18 18
62434- 6 6 6 0 0 0 0 0 0 0 0 0
62435- 0 0 0 0 0 0 0 0 0 0 0 0
62436- 0 0 0 0 0 0 0 0 0 0 0 0
62437- 0 0 0 0 0 0 0 0 0 0 0 0
62438- 0 0 0 0 0 0 0 0 0 0 0 0
62439- 0 0 0 0 0 0 0 0 0 0 0 0
62440- 0 0 0 0 0 0 0 0 0 0 0 0
62441- 0 0 0 0 0 0 0 0 0 0 0 0
62442- 0 0 0 0 0 0 0 0 0 0 0 0
62443- 0 0 0 0 0 0 0 0 0 0 0 0
62444- 0 0 0 0 0 0 0 0 0 6 6 6
62445- 14 14 14 38 38 38 74 74 74 66 66 66
62446- 2 2 6 6 6 6 90 90 90 250 250 250
62447-253 253 253 253 253 253 238 238 238 198 198 198
62448-190 190 190 190 190 190 195 195 195 221 221 221
62449-246 246 246 253 253 253 253 253 253 253 253 253
62450-253 253 253 253 253 253 253 253 253 253 253 253
62451-253 253 253 82 82 82 2 2 6 2 2 6
62452- 2 2 6 2 2 6 2 2 6 2 2 6
62453- 2 2 6 78 78 78 70 70 70 34 34 34
62454- 14 14 14 6 6 6 0 0 0 0 0 0
62455- 0 0 0 0 0 0 0 0 0 0 0 0
62456- 0 0 0 0 0 0 0 0 0 0 0 0
62457- 0 0 0 0 0 0 0 0 0 0 0 0
62458- 0 0 0 0 0 0 0 0 0 0 0 0
62459- 0 0 0 0 0 0 0 0 0 0 0 0
62460- 0 0 0 0 0 0 0 0 0 0 0 0
62461- 0 0 0 0 0 0 0 0 0 0 0 0
62462- 0 0 0 0 0 0 0 0 0 0 0 0
62463- 0 0 0 0 0 0 0 0 0 0 0 0
62464- 0 0 0 0 0 0 0 0 0 14 14 14
62465- 34 34 34 66 66 66 78 78 78 6 6 6
62466- 2 2 6 18 18 18 218 218 218 253 253 253
62467-253 253 253 253 253 253 253 253 253 246 246 246
62468-226 226 226 231 231 231 246 246 246 253 253 253
62469-253 253 253 253 253 253 253 253 253 253 253 253
62470-253 253 253 253 253 253 253 253 253 253 253 253
62471-253 253 253 178 178 178 2 2 6 2 2 6
62472- 2 2 6 2 2 6 2 2 6 2 2 6
62473- 2 2 6 18 18 18 90 90 90 62 62 62
62474- 30 30 30 10 10 10 0 0 0 0 0 0
62475- 0 0 0 0 0 0 0 0 0 0 0 0
62476- 0 0 0 0 0 0 0 0 0 0 0 0
62477- 0 0 0 0 0 0 0 0 0 0 0 0
62478- 0 0 0 0 0 0 0 0 0 0 0 0
62479- 0 0 0 0 0 0 0 0 0 0 0 0
62480- 0 0 0 0 0 0 0 0 0 0 0 0
62481- 0 0 0 0 0 0 0 0 0 0 0 0
62482- 0 0 0 0 0 0 0 0 0 0 0 0
62483- 0 0 0 0 0 0 0 0 0 0 0 0
62484- 0 0 0 0 0 0 10 10 10 26 26 26
62485- 58 58 58 90 90 90 18 18 18 2 2 6
62486- 2 2 6 110 110 110 253 253 253 253 253 253
62487-253 253 253 253 253 253 253 253 253 253 253 253
62488-250 250 250 253 253 253 253 253 253 253 253 253
62489-253 253 253 253 253 253 253 253 253 253 253 253
62490-253 253 253 253 253 253 253 253 253 253 253 253
62491-253 253 253 231 231 231 18 18 18 2 2 6
62492- 2 2 6 2 2 6 2 2 6 2 2 6
62493- 2 2 6 2 2 6 18 18 18 94 94 94
62494- 54 54 54 26 26 26 10 10 10 0 0 0
62495- 0 0 0 0 0 0 0 0 0 0 0 0
62496- 0 0 0 0 0 0 0 0 0 0 0 0
62497- 0 0 0 0 0 0 0 0 0 0 0 0
62498- 0 0 0 0 0 0 0 0 0 0 0 0
62499- 0 0 0 0 0 0 0 0 0 0 0 0
62500- 0 0 0 0 0 0 0 0 0 0 0 0
62501- 0 0 0 0 0 0 0 0 0 0 0 0
62502- 0 0 0 0 0 0 0 0 0 0 0 0
62503- 0 0 0 0 0 0 0 0 0 0 0 0
62504- 0 0 0 6 6 6 22 22 22 50 50 50
62505- 90 90 90 26 26 26 2 2 6 2 2 6
62506- 14 14 14 195 195 195 250 250 250 253 253 253
62507-253 253 253 253 253 253 253 253 253 253 253 253
62508-253 253 253 253 253 253 253 253 253 253 253 253
62509-253 253 253 253 253 253 253 253 253 253 253 253
62510-253 253 253 253 253 253 253 253 253 253 253 253
62511-250 250 250 242 242 242 54 54 54 2 2 6
62512- 2 2 6 2 2 6 2 2 6 2 2 6
62513- 2 2 6 2 2 6 2 2 6 38 38 38
62514- 86 86 86 50 50 50 22 22 22 6 6 6
62515- 0 0 0 0 0 0 0 0 0 0 0 0
62516- 0 0 0 0 0 0 0 0 0 0 0 0
62517- 0 0 0 0 0 0 0 0 0 0 0 0
62518- 0 0 0 0 0 0 0 0 0 0 0 0
62519- 0 0 0 0 0 0 0 0 0 0 0 0
62520- 0 0 0 0 0 0 0 0 0 0 0 0
62521- 0 0 0 0 0 0 0 0 0 0 0 0
62522- 0 0 0 0 0 0 0 0 0 0 0 0
62523- 0 0 0 0 0 0 0 0 0 0 0 0
62524- 6 6 6 14 14 14 38 38 38 82 82 82
62525- 34 34 34 2 2 6 2 2 6 2 2 6
62526- 42 42 42 195 195 195 246 246 246 253 253 253
62527-253 253 253 253 253 253 253 253 253 250 250 250
62528-242 242 242 242 242 242 250 250 250 253 253 253
62529-253 253 253 253 253 253 253 253 253 253 253 253
62530-253 253 253 250 250 250 246 246 246 238 238 238
62531-226 226 226 231 231 231 101 101 101 6 6 6
62532- 2 2 6 2 2 6 2 2 6 2 2 6
62533- 2 2 6 2 2 6 2 2 6 2 2 6
62534- 38 38 38 82 82 82 42 42 42 14 14 14
62535- 6 6 6 0 0 0 0 0 0 0 0 0
62536- 0 0 0 0 0 0 0 0 0 0 0 0
62537- 0 0 0 0 0 0 0 0 0 0 0 0
62538- 0 0 0 0 0 0 0 0 0 0 0 0
62539- 0 0 0 0 0 0 0 0 0 0 0 0
62540- 0 0 0 0 0 0 0 0 0 0 0 0
62541- 0 0 0 0 0 0 0 0 0 0 0 0
62542- 0 0 0 0 0 0 0 0 0 0 0 0
62543- 0 0 0 0 0 0 0 0 0 0 0 0
62544- 10 10 10 26 26 26 62 62 62 66 66 66
62545- 2 2 6 2 2 6 2 2 6 6 6 6
62546- 70 70 70 170 170 170 206 206 206 234 234 234
62547-246 246 246 250 250 250 250 250 250 238 238 238
62548-226 226 226 231 231 231 238 238 238 250 250 250
62549-250 250 250 250 250 250 246 246 246 231 231 231
62550-214 214 214 206 206 206 202 202 202 202 202 202
62551-198 198 198 202 202 202 182 182 182 18 18 18
62552- 2 2 6 2 2 6 2 2 6 2 2 6
62553- 2 2 6 2 2 6 2 2 6 2 2 6
62554- 2 2 6 62 62 62 66 66 66 30 30 30
62555- 10 10 10 0 0 0 0 0 0 0 0 0
62556- 0 0 0 0 0 0 0 0 0 0 0 0
62557- 0 0 0 0 0 0 0 0 0 0 0 0
62558- 0 0 0 0 0 0 0 0 0 0 0 0
62559- 0 0 0 0 0 0 0 0 0 0 0 0
62560- 0 0 0 0 0 0 0 0 0 0 0 0
62561- 0 0 0 0 0 0 0 0 0 0 0 0
62562- 0 0 0 0 0 0 0 0 0 0 0 0
62563- 0 0 0 0 0 0 0 0 0 0 0 0
62564- 14 14 14 42 42 42 82 82 82 18 18 18
62565- 2 2 6 2 2 6 2 2 6 10 10 10
62566- 94 94 94 182 182 182 218 218 218 242 242 242
62567-250 250 250 253 253 253 253 253 253 250 250 250
62568-234 234 234 253 253 253 253 253 253 253 253 253
62569-253 253 253 253 253 253 253 253 253 246 246 246
62570-238 238 238 226 226 226 210 210 210 202 202 202
62571-195 195 195 195 195 195 210 210 210 158 158 158
62572- 6 6 6 14 14 14 50 50 50 14 14 14
62573- 2 2 6 2 2 6 2 2 6 2 2 6
62574- 2 2 6 6 6 6 86 86 86 46 46 46
62575- 18 18 18 6 6 6 0 0 0 0 0 0
62576- 0 0 0 0 0 0 0 0 0 0 0 0
62577- 0 0 0 0 0 0 0 0 0 0 0 0
62578- 0 0 0 0 0 0 0 0 0 0 0 0
62579- 0 0 0 0 0 0 0 0 0 0 0 0
62580- 0 0 0 0 0 0 0 0 0 0 0 0
62581- 0 0 0 0 0 0 0 0 0 0 0 0
62582- 0 0 0 0 0 0 0 0 0 0 0 0
62583- 0 0 0 0 0 0 0 0 0 6 6 6
62584- 22 22 22 54 54 54 70 70 70 2 2 6
62585- 2 2 6 10 10 10 2 2 6 22 22 22
62586-166 166 166 231 231 231 250 250 250 253 253 253
62587-253 253 253 253 253 253 253 253 253 250 250 250
62588-242 242 242 253 253 253 253 253 253 253 253 253
62589-253 253 253 253 253 253 253 253 253 253 253 253
62590-253 253 253 253 253 253 253 253 253 246 246 246
62591-231 231 231 206 206 206 198 198 198 226 226 226
62592- 94 94 94 2 2 6 6 6 6 38 38 38
62593- 30 30 30 2 2 6 2 2 6 2 2 6
62594- 2 2 6 2 2 6 62 62 62 66 66 66
62595- 26 26 26 10 10 10 0 0 0 0 0 0
62596- 0 0 0 0 0 0 0 0 0 0 0 0
62597- 0 0 0 0 0 0 0 0 0 0 0 0
62598- 0 0 0 0 0 0 0 0 0 0 0 0
62599- 0 0 0 0 0 0 0 0 0 0 0 0
62600- 0 0 0 0 0 0 0 0 0 0 0 0
62601- 0 0 0 0 0 0 0 0 0 0 0 0
62602- 0 0 0 0 0 0 0 0 0 0 0 0
62603- 0 0 0 0 0 0 0 0 0 10 10 10
62604- 30 30 30 74 74 74 50 50 50 2 2 6
62605- 26 26 26 26 26 26 2 2 6 106 106 106
62606-238 238 238 253 253 253 253 253 253 253 253 253
62607-253 253 253 253 253 253 253 253 253 253 253 253
62608-253 253 253 253 253 253 253 253 253 253 253 253
62609-253 253 253 253 253 253 253 253 253 253 253 253
62610-253 253 253 253 253 253 253 253 253 253 253 253
62611-253 253 253 246 246 246 218 218 218 202 202 202
62612-210 210 210 14 14 14 2 2 6 2 2 6
62613- 30 30 30 22 22 22 2 2 6 2 2 6
62614- 2 2 6 2 2 6 18 18 18 86 86 86
62615- 42 42 42 14 14 14 0 0 0 0 0 0
62616- 0 0 0 0 0 0 0 0 0 0 0 0
62617- 0 0 0 0 0 0 0 0 0 0 0 0
62618- 0 0 0 0 0 0 0 0 0 0 0 0
62619- 0 0 0 0 0 0 0 0 0 0 0 0
62620- 0 0 0 0 0 0 0 0 0 0 0 0
62621- 0 0 0 0 0 0 0 0 0 0 0 0
62622- 0 0 0 0 0 0 0 0 0 0 0 0
62623- 0 0 0 0 0 0 0 0 0 14 14 14
62624- 42 42 42 90 90 90 22 22 22 2 2 6
62625- 42 42 42 2 2 6 18 18 18 218 218 218
62626-253 253 253 253 253 253 253 253 253 253 253 253
62627-253 253 253 253 253 253 253 253 253 253 253 253
62628-253 253 253 253 253 253 253 253 253 253 253 253
62629-253 253 253 253 253 253 253 253 253 253 253 253
62630-253 253 253 253 253 253 253 253 253 253 253 253
62631-253 253 253 253 253 253 250 250 250 221 221 221
62632-218 218 218 101 101 101 2 2 6 14 14 14
62633- 18 18 18 38 38 38 10 10 10 2 2 6
62634- 2 2 6 2 2 6 2 2 6 78 78 78
62635- 58 58 58 22 22 22 6 6 6 0 0 0
62636- 0 0 0 0 0 0 0 0 0 0 0 0
62637- 0 0 0 0 0 0 0 0 0 0 0 0
62638- 0 0 0 0 0 0 0 0 0 0 0 0
62639- 0 0 0 0 0 0 0 0 0 0 0 0
62640- 0 0 0 0 0 0 0 0 0 0 0 0
62641- 0 0 0 0 0 0 0 0 0 0 0 0
62642- 0 0 0 0 0 0 0 0 0 0 0 0
62643- 0 0 0 0 0 0 6 6 6 18 18 18
62644- 54 54 54 82 82 82 2 2 6 26 26 26
62645- 22 22 22 2 2 6 123 123 123 253 253 253
62646-253 253 253 253 253 253 253 253 253 253 253 253
62647-253 253 253 253 253 253 253 253 253 253 253 253
62648-253 253 253 253 253 253 253 253 253 253 253 253
62649-253 253 253 253 253 253 253 253 253 253 253 253
62650-253 253 253 253 253 253 253 253 253 253 253 253
62651-253 253 253 253 253 253 253 253 253 250 250 250
62652-238 238 238 198 198 198 6 6 6 38 38 38
62653- 58 58 58 26 26 26 38 38 38 2 2 6
62654- 2 2 6 2 2 6 2 2 6 46 46 46
62655- 78 78 78 30 30 30 10 10 10 0 0 0
62656- 0 0 0 0 0 0 0 0 0 0 0 0
62657- 0 0 0 0 0 0 0 0 0 0 0 0
62658- 0 0 0 0 0 0 0 0 0 0 0 0
62659- 0 0 0 0 0 0 0 0 0 0 0 0
62660- 0 0 0 0 0 0 0 0 0 0 0 0
62661- 0 0 0 0 0 0 0 0 0 0 0 0
62662- 0 0 0 0 0 0 0 0 0 0 0 0
62663- 0 0 0 0 0 0 10 10 10 30 30 30
62664- 74 74 74 58 58 58 2 2 6 42 42 42
62665- 2 2 6 22 22 22 231 231 231 253 253 253
62666-253 253 253 253 253 253 253 253 253 253 253 253
62667-253 253 253 253 253 253 253 253 253 250 250 250
62668-253 253 253 253 253 253 253 253 253 253 253 253
62669-253 253 253 253 253 253 253 253 253 253 253 253
62670-253 253 253 253 253 253 253 253 253 253 253 253
62671-253 253 253 253 253 253 253 253 253 253 253 253
62672-253 253 253 246 246 246 46 46 46 38 38 38
62673- 42 42 42 14 14 14 38 38 38 14 14 14
62674- 2 2 6 2 2 6 2 2 6 6 6 6
62675- 86 86 86 46 46 46 14 14 14 0 0 0
62676- 0 0 0 0 0 0 0 0 0 0 0 0
62677- 0 0 0 0 0 0 0 0 0 0 0 0
62678- 0 0 0 0 0 0 0 0 0 0 0 0
62679- 0 0 0 0 0 0 0 0 0 0 0 0
62680- 0 0 0 0 0 0 0 0 0 0 0 0
62681- 0 0 0 0 0 0 0 0 0 0 0 0
62682- 0 0 0 0 0 0 0 0 0 0 0 0
62683- 0 0 0 6 6 6 14 14 14 42 42 42
62684- 90 90 90 18 18 18 18 18 18 26 26 26
62685- 2 2 6 116 116 116 253 253 253 253 253 253
62686-253 253 253 253 253 253 253 253 253 253 253 253
62687-253 253 253 253 253 253 250 250 250 238 238 238
62688-253 253 253 253 253 253 253 253 253 253 253 253
62689-253 253 253 253 253 253 253 253 253 253 253 253
62690-253 253 253 253 253 253 253 253 253 253 253 253
62691-253 253 253 253 253 253 253 253 253 253 253 253
62692-253 253 253 253 253 253 94 94 94 6 6 6
62693- 2 2 6 2 2 6 10 10 10 34 34 34
62694- 2 2 6 2 2 6 2 2 6 2 2 6
62695- 74 74 74 58 58 58 22 22 22 6 6 6
62696- 0 0 0 0 0 0 0 0 0 0 0 0
62697- 0 0 0 0 0 0 0 0 0 0 0 0
62698- 0 0 0 0 0 0 0 0 0 0 0 0
62699- 0 0 0 0 0 0 0 0 0 0 0 0
62700- 0 0 0 0 0 0 0 0 0 0 0 0
62701- 0 0 0 0 0 0 0 0 0 0 0 0
62702- 0 0 0 0 0 0 0 0 0 0 0 0
62703- 0 0 0 10 10 10 26 26 26 66 66 66
62704- 82 82 82 2 2 6 38 38 38 6 6 6
62705- 14 14 14 210 210 210 253 253 253 253 253 253
62706-253 253 253 253 253 253 253 253 253 253 253 253
62707-253 253 253 253 253 253 246 246 246 242 242 242
62708-253 253 253 253 253 253 253 253 253 253 253 253
62709-253 253 253 253 253 253 253 253 253 253 253 253
62710-253 253 253 253 253 253 253 253 253 253 253 253
62711-253 253 253 253 253 253 253 253 253 253 253 253
62712-253 253 253 253 253 253 144 144 144 2 2 6
62713- 2 2 6 2 2 6 2 2 6 46 46 46
62714- 2 2 6 2 2 6 2 2 6 2 2 6
62715- 42 42 42 74 74 74 30 30 30 10 10 10
62716- 0 0 0 0 0 0 0 0 0 0 0 0
62717- 0 0 0 0 0 0 0 0 0 0 0 0
62718- 0 0 0 0 0 0 0 0 0 0 0 0
62719- 0 0 0 0 0 0 0 0 0 0 0 0
62720- 0 0 0 0 0 0 0 0 0 0 0 0
62721- 0 0 0 0 0 0 0 0 0 0 0 0
62722- 0 0 0 0 0 0 0 0 0 0 0 0
62723- 6 6 6 14 14 14 42 42 42 90 90 90
62724- 26 26 26 6 6 6 42 42 42 2 2 6
62725- 74 74 74 250 250 250 253 253 253 253 253 253
62726-253 253 253 253 253 253 253 253 253 253 253 253
62727-253 253 253 253 253 253 242 242 242 242 242 242
62728-253 253 253 253 253 253 253 253 253 253 253 253
62729-253 253 253 253 253 253 253 253 253 253 253 253
62730-253 253 253 253 253 253 253 253 253 253 253 253
62731-253 253 253 253 253 253 253 253 253 253 253 253
62732-253 253 253 253 253 253 182 182 182 2 2 6
62733- 2 2 6 2 2 6 2 2 6 46 46 46
62734- 2 2 6 2 2 6 2 2 6 2 2 6
62735- 10 10 10 86 86 86 38 38 38 10 10 10
62736- 0 0 0 0 0 0 0 0 0 0 0 0
62737- 0 0 0 0 0 0 0 0 0 0 0 0
62738- 0 0 0 0 0 0 0 0 0 0 0 0
62739- 0 0 0 0 0 0 0 0 0 0 0 0
62740- 0 0 0 0 0 0 0 0 0 0 0 0
62741- 0 0 0 0 0 0 0 0 0 0 0 0
62742- 0 0 0 0 0 0 0 0 0 0 0 0
62743- 10 10 10 26 26 26 66 66 66 82 82 82
62744- 2 2 6 22 22 22 18 18 18 2 2 6
62745-149 149 149 253 253 253 253 253 253 253 253 253
62746-253 253 253 253 253 253 253 253 253 253 253 253
62747-253 253 253 253 253 253 234 234 234 242 242 242
62748-253 253 253 253 253 253 253 253 253 253 253 253
62749-253 253 253 253 253 253 253 253 253 253 253 253
62750-253 253 253 253 253 253 253 253 253 253 253 253
62751-253 253 253 253 253 253 253 253 253 253 253 253
62752-253 253 253 253 253 253 206 206 206 2 2 6
62753- 2 2 6 2 2 6 2 2 6 38 38 38
62754- 2 2 6 2 2 6 2 2 6 2 2 6
62755- 6 6 6 86 86 86 46 46 46 14 14 14
62756- 0 0 0 0 0 0 0 0 0 0 0 0
62757- 0 0 0 0 0 0 0 0 0 0 0 0
62758- 0 0 0 0 0 0 0 0 0 0 0 0
62759- 0 0 0 0 0 0 0 0 0 0 0 0
62760- 0 0 0 0 0 0 0 0 0 0 0 0
62761- 0 0 0 0 0 0 0 0 0 0 0 0
62762- 0 0 0 0 0 0 0 0 0 6 6 6
62763- 18 18 18 46 46 46 86 86 86 18 18 18
62764- 2 2 6 34 34 34 10 10 10 6 6 6
62765-210 210 210 253 253 253 253 253 253 253 253 253
62766-253 253 253 253 253 253 253 253 253 253 253 253
62767-253 253 253 253 253 253 234 234 234 242 242 242
62768-253 253 253 253 253 253 253 253 253 253 253 253
62769-253 253 253 253 253 253 253 253 253 253 253 253
62770-253 253 253 253 253 253 253 253 253 253 253 253
62771-253 253 253 253 253 253 253 253 253 253 253 253
62772-253 253 253 253 253 253 221 221 221 6 6 6
62773- 2 2 6 2 2 6 6 6 6 30 30 30
62774- 2 2 6 2 2 6 2 2 6 2 2 6
62775- 2 2 6 82 82 82 54 54 54 18 18 18
62776- 6 6 6 0 0 0 0 0 0 0 0 0
62777- 0 0 0 0 0 0 0 0 0 0 0 0
62778- 0 0 0 0 0 0 0 0 0 0 0 0
62779- 0 0 0 0 0 0 0 0 0 0 0 0
62780- 0 0 0 0 0 0 0 0 0 0 0 0
62781- 0 0 0 0 0 0 0 0 0 0 0 0
62782- 0 0 0 0 0 0 0 0 0 10 10 10
62783- 26 26 26 66 66 66 62 62 62 2 2 6
62784- 2 2 6 38 38 38 10 10 10 26 26 26
62785-238 238 238 253 253 253 253 253 253 253 253 253
62786-253 253 253 253 253 253 253 253 253 253 253 253
62787-253 253 253 253 253 253 231 231 231 238 238 238
62788-253 253 253 253 253 253 253 253 253 253 253 253
62789-253 253 253 253 253 253 253 253 253 253 253 253
62790-253 253 253 253 253 253 253 253 253 253 253 253
62791-253 253 253 253 253 253 253 253 253 253 253 253
62792-253 253 253 253 253 253 231 231 231 6 6 6
62793- 2 2 6 2 2 6 10 10 10 30 30 30
62794- 2 2 6 2 2 6 2 2 6 2 2 6
62795- 2 2 6 66 66 66 58 58 58 22 22 22
62796- 6 6 6 0 0 0 0 0 0 0 0 0
62797- 0 0 0 0 0 0 0 0 0 0 0 0
62798- 0 0 0 0 0 0 0 0 0 0 0 0
62799- 0 0 0 0 0 0 0 0 0 0 0 0
62800- 0 0 0 0 0 0 0 0 0 0 0 0
62801- 0 0 0 0 0 0 0 0 0 0 0 0
62802- 0 0 0 0 0 0 0 0 0 10 10 10
62803- 38 38 38 78 78 78 6 6 6 2 2 6
62804- 2 2 6 46 46 46 14 14 14 42 42 42
62805-246 246 246 253 253 253 253 253 253 253 253 253
62806-253 253 253 253 253 253 253 253 253 253 253 253
62807-253 253 253 253 253 253 231 231 231 242 242 242
62808-253 253 253 253 253 253 253 253 253 253 253 253
62809-253 253 253 253 253 253 253 253 253 253 253 253
62810-253 253 253 253 253 253 253 253 253 253 253 253
62811-253 253 253 253 253 253 253 253 253 253 253 253
62812-253 253 253 253 253 253 234 234 234 10 10 10
62813- 2 2 6 2 2 6 22 22 22 14 14 14
62814- 2 2 6 2 2 6 2 2 6 2 2 6
62815- 2 2 6 66 66 66 62 62 62 22 22 22
62816- 6 6 6 0 0 0 0 0 0 0 0 0
62817- 0 0 0 0 0 0 0 0 0 0 0 0
62818- 0 0 0 0 0 0 0 0 0 0 0 0
62819- 0 0 0 0 0 0 0 0 0 0 0 0
62820- 0 0 0 0 0 0 0 0 0 0 0 0
62821- 0 0 0 0 0 0 0 0 0 0 0 0
62822- 0 0 0 0 0 0 6 6 6 18 18 18
62823- 50 50 50 74 74 74 2 2 6 2 2 6
62824- 14 14 14 70 70 70 34 34 34 62 62 62
62825-250 250 250 253 253 253 253 253 253 253 253 253
62826-253 253 253 253 253 253 253 253 253 253 253 253
62827-253 253 253 253 253 253 231 231 231 246 246 246
62828-253 253 253 253 253 253 253 253 253 253 253 253
62829-253 253 253 253 253 253 253 253 253 253 253 253
62830-253 253 253 253 253 253 253 253 253 253 253 253
62831-253 253 253 253 253 253 253 253 253 253 253 253
62832-253 253 253 253 253 253 234 234 234 14 14 14
62833- 2 2 6 2 2 6 30 30 30 2 2 6
62834- 2 2 6 2 2 6 2 2 6 2 2 6
62835- 2 2 6 66 66 66 62 62 62 22 22 22
62836- 6 6 6 0 0 0 0 0 0 0 0 0
62837- 0 0 0 0 0 0 0 0 0 0 0 0
62838- 0 0 0 0 0 0 0 0 0 0 0 0
62839- 0 0 0 0 0 0 0 0 0 0 0 0
62840- 0 0 0 0 0 0 0 0 0 0 0 0
62841- 0 0 0 0 0 0 0 0 0 0 0 0
62842- 0 0 0 0 0 0 6 6 6 18 18 18
62843- 54 54 54 62 62 62 2 2 6 2 2 6
62844- 2 2 6 30 30 30 46 46 46 70 70 70
62845-250 250 250 253 253 253 253 253 253 253 253 253
62846-253 253 253 253 253 253 253 253 253 253 253 253
62847-253 253 253 253 253 253 231 231 231 246 246 246
62848-253 253 253 253 253 253 253 253 253 253 253 253
62849-253 253 253 253 253 253 253 253 253 253 253 253
62850-253 253 253 253 253 253 253 253 253 253 253 253
62851-253 253 253 253 253 253 253 253 253 253 253 253
62852-253 253 253 253 253 253 226 226 226 10 10 10
62853- 2 2 6 6 6 6 30 30 30 2 2 6
62854- 2 2 6 2 2 6 2 2 6 2 2 6
62855- 2 2 6 66 66 66 58 58 58 22 22 22
62856- 6 6 6 0 0 0 0 0 0 0 0 0
62857- 0 0 0 0 0 0 0 0 0 0 0 0
62858- 0 0 0 0 0 0 0 0 0 0 0 0
62859- 0 0 0 0 0 0 0 0 0 0 0 0
62860- 0 0 0 0 0 0 0 0 0 0 0 0
62861- 0 0 0 0 0 0 0 0 0 0 0 0
62862- 0 0 0 0 0 0 6 6 6 22 22 22
62863- 58 58 58 62 62 62 2 2 6 2 2 6
62864- 2 2 6 2 2 6 30 30 30 78 78 78
62865-250 250 250 253 253 253 253 253 253 253 253 253
62866-253 253 253 253 253 253 253 253 253 253 253 253
62867-253 253 253 253 253 253 231 231 231 246 246 246
62868-253 253 253 253 253 253 253 253 253 253 253 253
62869-253 253 253 253 253 253 253 253 253 253 253 253
62870-253 253 253 253 253 253 253 253 253 253 253 253
62871-253 253 253 253 253 253 253 253 253 253 253 253
62872-253 253 253 253 253 253 206 206 206 2 2 6
62873- 22 22 22 34 34 34 18 14 6 22 22 22
62874- 26 26 26 18 18 18 6 6 6 2 2 6
62875- 2 2 6 82 82 82 54 54 54 18 18 18
62876- 6 6 6 0 0 0 0 0 0 0 0 0
62877- 0 0 0 0 0 0 0 0 0 0 0 0
62878- 0 0 0 0 0 0 0 0 0 0 0 0
62879- 0 0 0 0 0 0 0 0 0 0 0 0
62880- 0 0 0 0 0 0 0 0 0 0 0 0
62881- 0 0 0 0 0 0 0 0 0 0 0 0
62882- 0 0 0 0 0 0 6 6 6 26 26 26
62883- 62 62 62 106 106 106 74 54 14 185 133 11
62884-210 162 10 121 92 8 6 6 6 62 62 62
62885-238 238 238 253 253 253 253 253 253 253 253 253
62886-253 253 253 253 253 253 253 253 253 253 253 253
62887-253 253 253 253 253 253 231 231 231 246 246 246
62888-253 253 253 253 253 253 253 253 253 253 253 253
62889-253 253 253 253 253 253 253 253 253 253 253 253
62890-253 253 253 253 253 253 253 253 253 253 253 253
62891-253 253 253 253 253 253 253 253 253 253 253 253
62892-253 253 253 253 253 253 158 158 158 18 18 18
62893- 14 14 14 2 2 6 2 2 6 2 2 6
62894- 6 6 6 18 18 18 66 66 66 38 38 38
62895- 6 6 6 94 94 94 50 50 50 18 18 18
62896- 6 6 6 0 0 0 0 0 0 0 0 0
62897- 0 0 0 0 0 0 0 0 0 0 0 0
62898- 0 0 0 0 0 0 0 0 0 0 0 0
62899- 0 0 0 0 0 0 0 0 0 0 0 0
62900- 0 0 0 0 0 0 0 0 0 0 0 0
62901- 0 0 0 0 0 0 0 0 0 6 6 6
62902- 10 10 10 10 10 10 18 18 18 38 38 38
62903- 78 78 78 142 134 106 216 158 10 242 186 14
62904-246 190 14 246 190 14 156 118 10 10 10 10
62905- 90 90 90 238 238 238 253 253 253 253 253 253
62906-253 253 253 253 253 253 253 253 253 253 253 253
62907-253 253 253 253 253 253 231 231 231 250 250 250
62908-253 253 253 253 253 253 253 253 253 253 253 253
62909-253 253 253 253 253 253 253 253 253 253 253 253
62910-253 253 253 253 253 253 253 253 253 253 253 253
62911-253 253 253 253 253 253 253 253 253 246 230 190
62912-238 204 91 238 204 91 181 142 44 37 26 9
62913- 2 2 6 2 2 6 2 2 6 2 2 6
62914- 2 2 6 2 2 6 38 38 38 46 46 46
62915- 26 26 26 106 106 106 54 54 54 18 18 18
62916- 6 6 6 0 0 0 0 0 0 0 0 0
62917- 0 0 0 0 0 0 0 0 0 0 0 0
62918- 0 0 0 0 0 0 0 0 0 0 0 0
62919- 0 0 0 0 0 0 0 0 0 0 0 0
62920- 0 0 0 0 0 0 0 0 0 0 0 0
62921- 0 0 0 6 6 6 14 14 14 22 22 22
62922- 30 30 30 38 38 38 50 50 50 70 70 70
62923-106 106 106 190 142 34 226 170 11 242 186 14
62924-246 190 14 246 190 14 246 190 14 154 114 10
62925- 6 6 6 74 74 74 226 226 226 253 253 253
62926-253 253 253 253 253 253 253 253 253 253 253 253
62927-253 253 253 253 253 253 231 231 231 250 250 250
62928-253 253 253 253 253 253 253 253 253 253 253 253
62929-253 253 253 253 253 253 253 253 253 253 253 253
62930-253 253 253 253 253 253 253 253 253 253 253 253
62931-253 253 253 253 253 253 253 253 253 228 184 62
62932-241 196 14 241 208 19 232 195 16 38 30 10
62933- 2 2 6 2 2 6 2 2 6 2 2 6
62934- 2 2 6 6 6 6 30 30 30 26 26 26
62935-203 166 17 154 142 90 66 66 66 26 26 26
62936- 6 6 6 0 0 0 0 0 0 0 0 0
62937- 0 0 0 0 0 0 0 0 0 0 0 0
62938- 0 0 0 0 0 0 0 0 0 0 0 0
62939- 0 0 0 0 0 0 0 0 0 0 0 0
62940- 0 0 0 0 0 0 0 0 0 0 0 0
62941- 6 6 6 18 18 18 38 38 38 58 58 58
62942- 78 78 78 86 86 86 101 101 101 123 123 123
62943-175 146 61 210 150 10 234 174 13 246 186 14
62944-246 190 14 246 190 14 246 190 14 238 190 10
62945-102 78 10 2 2 6 46 46 46 198 198 198
62946-253 253 253 253 253 253 253 253 253 253 253 253
62947-253 253 253 253 253 253 234 234 234 242 242 242
62948-253 253 253 253 253 253 253 253 253 253 253 253
62949-253 253 253 253 253 253 253 253 253 253 253 253
62950-253 253 253 253 253 253 253 253 253 253 253 253
62951-253 253 253 253 253 253 253 253 253 224 178 62
62952-242 186 14 241 196 14 210 166 10 22 18 6
62953- 2 2 6 2 2 6 2 2 6 2 2 6
62954- 2 2 6 2 2 6 6 6 6 121 92 8
62955-238 202 15 232 195 16 82 82 82 34 34 34
62956- 10 10 10 0 0 0 0 0 0 0 0 0
62957- 0 0 0 0 0 0 0 0 0 0 0 0
62958- 0 0 0 0 0 0 0 0 0 0 0 0
62959- 0 0 0 0 0 0 0 0 0 0 0 0
62960- 0 0 0 0 0 0 0 0 0 0 0 0
62961- 14 14 14 38 38 38 70 70 70 154 122 46
62962-190 142 34 200 144 11 197 138 11 197 138 11
62963-213 154 11 226 170 11 242 186 14 246 190 14
62964-246 190 14 246 190 14 246 190 14 246 190 14
62965-225 175 15 46 32 6 2 2 6 22 22 22
62966-158 158 158 250 250 250 253 253 253 253 253 253
62967-253 253 253 253 253 253 253 253 253 253 253 253
62968-253 253 253 253 253 253 253 253 253 253 253 253
62969-253 253 253 253 253 253 253 253 253 253 253 253
62970-253 253 253 253 253 253 253 253 253 253 253 253
62971-253 253 253 250 250 250 242 242 242 224 178 62
62972-239 182 13 236 186 11 213 154 11 46 32 6
62973- 2 2 6 2 2 6 2 2 6 2 2 6
62974- 2 2 6 2 2 6 61 42 6 225 175 15
62975-238 190 10 236 186 11 112 100 78 42 42 42
62976- 14 14 14 0 0 0 0 0 0 0 0 0
62977- 0 0 0 0 0 0 0 0 0 0 0 0
62978- 0 0 0 0 0 0 0 0 0 0 0 0
62979- 0 0 0 0 0 0 0 0 0 0 0 0
62980- 0 0 0 0 0 0 0 0 0 6 6 6
62981- 22 22 22 54 54 54 154 122 46 213 154 11
62982-226 170 11 230 174 11 226 170 11 226 170 11
62983-236 178 12 242 186 14 246 190 14 246 190 14
62984-246 190 14 246 190 14 246 190 14 246 190 14
62985-241 196 14 184 144 12 10 10 10 2 2 6
62986- 6 6 6 116 116 116 242 242 242 253 253 253
62987-253 253 253 253 253 253 253 253 253 253 253 253
62988-253 253 253 253 253 253 253 253 253 253 253 253
62989-253 253 253 253 253 253 253 253 253 253 253 253
62990-253 253 253 253 253 253 253 253 253 253 253 253
62991-253 253 253 231 231 231 198 198 198 214 170 54
62992-236 178 12 236 178 12 210 150 10 137 92 6
62993- 18 14 6 2 2 6 2 2 6 2 2 6
62994- 6 6 6 70 47 6 200 144 11 236 178 12
62995-239 182 13 239 182 13 124 112 88 58 58 58
62996- 22 22 22 6 6 6 0 0 0 0 0 0
62997- 0 0 0 0 0 0 0 0 0 0 0 0
62998- 0 0 0 0 0 0 0 0 0 0 0 0
62999- 0 0 0 0 0 0 0 0 0 0 0 0
63000- 0 0 0 0 0 0 0 0 0 10 10 10
63001- 30 30 30 70 70 70 180 133 36 226 170 11
63002-239 182 13 242 186 14 242 186 14 246 186 14
63003-246 190 14 246 190 14 246 190 14 246 190 14
63004-246 190 14 246 190 14 246 190 14 246 190 14
63005-246 190 14 232 195 16 98 70 6 2 2 6
63006- 2 2 6 2 2 6 66 66 66 221 221 221
63007-253 253 253 253 253 253 253 253 253 253 253 253
63008-253 253 253 253 253 253 253 253 253 253 253 253
63009-253 253 253 253 253 253 253 253 253 253 253 253
63010-253 253 253 253 253 253 253 253 253 253 253 253
63011-253 253 253 206 206 206 198 198 198 214 166 58
63012-230 174 11 230 174 11 216 158 10 192 133 9
63013-163 110 8 116 81 8 102 78 10 116 81 8
63014-167 114 7 197 138 11 226 170 11 239 182 13
63015-242 186 14 242 186 14 162 146 94 78 78 78
63016- 34 34 34 14 14 14 6 6 6 0 0 0
63017- 0 0 0 0 0 0 0 0 0 0 0 0
63018- 0 0 0 0 0 0 0 0 0 0 0 0
63019- 0 0 0 0 0 0 0 0 0 0 0 0
63020- 0 0 0 0 0 0 0 0 0 6 6 6
63021- 30 30 30 78 78 78 190 142 34 226 170 11
63022-239 182 13 246 190 14 246 190 14 246 190 14
63023-246 190 14 246 190 14 246 190 14 246 190 14
63024-246 190 14 246 190 14 246 190 14 246 190 14
63025-246 190 14 241 196 14 203 166 17 22 18 6
63026- 2 2 6 2 2 6 2 2 6 38 38 38
63027-218 218 218 253 253 253 253 253 253 253 253 253
63028-253 253 253 253 253 253 253 253 253 253 253 253
63029-253 253 253 253 253 253 253 253 253 253 253 253
63030-253 253 253 253 253 253 253 253 253 253 253 253
63031-250 250 250 206 206 206 198 198 198 202 162 69
63032-226 170 11 236 178 12 224 166 10 210 150 10
63033-200 144 11 197 138 11 192 133 9 197 138 11
63034-210 150 10 226 170 11 242 186 14 246 190 14
63035-246 190 14 246 186 14 225 175 15 124 112 88
63036- 62 62 62 30 30 30 14 14 14 6 6 6
63037- 0 0 0 0 0 0 0 0 0 0 0 0
63038- 0 0 0 0 0 0 0 0 0 0 0 0
63039- 0 0 0 0 0 0 0 0 0 0 0 0
63040- 0 0 0 0 0 0 0 0 0 10 10 10
63041- 30 30 30 78 78 78 174 135 50 224 166 10
63042-239 182 13 246 190 14 246 190 14 246 190 14
63043-246 190 14 246 190 14 246 190 14 246 190 14
63044-246 190 14 246 190 14 246 190 14 246 190 14
63045-246 190 14 246 190 14 241 196 14 139 102 15
63046- 2 2 6 2 2 6 2 2 6 2 2 6
63047- 78 78 78 250 250 250 253 253 253 253 253 253
63048-253 253 253 253 253 253 253 253 253 253 253 253
63049-253 253 253 253 253 253 253 253 253 253 253 253
63050-253 253 253 253 253 253 253 253 253 253 253 253
63051-250 250 250 214 214 214 198 198 198 190 150 46
63052-219 162 10 236 178 12 234 174 13 224 166 10
63053-216 158 10 213 154 11 213 154 11 216 158 10
63054-226 170 11 239 182 13 246 190 14 246 190 14
63055-246 190 14 246 190 14 242 186 14 206 162 42
63056-101 101 101 58 58 58 30 30 30 14 14 14
63057- 6 6 6 0 0 0 0 0 0 0 0 0
63058- 0 0 0 0 0 0 0 0 0 0 0 0
63059- 0 0 0 0 0 0 0 0 0 0 0 0
63060- 0 0 0 0 0 0 0 0 0 10 10 10
63061- 30 30 30 74 74 74 174 135 50 216 158 10
63062-236 178 12 246 190 14 246 190 14 246 190 14
63063-246 190 14 246 190 14 246 190 14 246 190 14
63064-246 190 14 246 190 14 246 190 14 246 190 14
63065-246 190 14 246 190 14 241 196 14 226 184 13
63066- 61 42 6 2 2 6 2 2 6 2 2 6
63067- 22 22 22 238 238 238 253 253 253 253 253 253
63068-253 253 253 253 253 253 253 253 253 253 253 253
63069-253 253 253 253 253 253 253 253 253 253 253 253
63070-253 253 253 253 253 253 253 253 253 253 253 253
63071-253 253 253 226 226 226 187 187 187 180 133 36
63072-216 158 10 236 178 12 239 182 13 236 178 12
63073-230 174 11 226 170 11 226 170 11 230 174 11
63074-236 178 12 242 186 14 246 190 14 246 190 14
63075-246 190 14 246 190 14 246 186 14 239 182 13
63076-206 162 42 106 106 106 66 66 66 34 34 34
63077- 14 14 14 6 6 6 0 0 0 0 0 0
63078- 0 0 0 0 0 0 0 0 0 0 0 0
63079- 0 0 0 0 0 0 0 0 0 0 0 0
63080- 0 0 0 0 0 0 0 0 0 6 6 6
63081- 26 26 26 70 70 70 163 133 67 213 154 11
63082-236 178 12 246 190 14 246 190 14 246 190 14
63083-246 190 14 246 190 14 246 190 14 246 190 14
63084-246 190 14 246 190 14 246 190 14 246 190 14
63085-246 190 14 246 190 14 246 190 14 241 196 14
63086-190 146 13 18 14 6 2 2 6 2 2 6
63087- 46 46 46 246 246 246 253 253 253 253 253 253
63088-253 253 253 253 253 253 253 253 253 253 253 253
63089-253 253 253 253 253 253 253 253 253 253 253 253
63090-253 253 253 253 253 253 253 253 253 253 253 253
63091-253 253 253 221 221 221 86 86 86 156 107 11
63092-216 158 10 236 178 12 242 186 14 246 186 14
63093-242 186 14 239 182 13 239 182 13 242 186 14
63094-242 186 14 246 186 14 246 190 14 246 190 14
63095-246 190 14 246 190 14 246 190 14 246 190 14
63096-242 186 14 225 175 15 142 122 72 66 66 66
63097- 30 30 30 10 10 10 0 0 0 0 0 0
63098- 0 0 0 0 0 0 0 0 0 0 0 0
63099- 0 0 0 0 0 0 0 0 0 0 0 0
63100- 0 0 0 0 0 0 0 0 0 6 6 6
63101- 26 26 26 70 70 70 163 133 67 210 150 10
63102-236 178 12 246 190 14 246 190 14 246 190 14
63103-246 190 14 246 190 14 246 190 14 246 190 14
63104-246 190 14 246 190 14 246 190 14 246 190 14
63105-246 190 14 246 190 14 246 190 14 246 190 14
63106-232 195 16 121 92 8 34 34 34 106 106 106
63107-221 221 221 253 253 253 253 253 253 253 253 253
63108-253 253 253 253 253 253 253 253 253 253 253 253
63109-253 253 253 253 253 253 253 253 253 253 253 253
63110-253 253 253 253 253 253 253 253 253 253 253 253
63111-242 242 242 82 82 82 18 14 6 163 110 8
63112-216 158 10 236 178 12 242 186 14 246 190 14
63113-246 190 14 246 190 14 246 190 14 246 190 14
63114-246 190 14 246 190 14 246 190 14 246 190 14
63115-246 190 14 246 190 14 246 190 14 246 190 14
63116-246 190 14 246 190 14 242 186 14 163 133 67
63117- 46 46 46 18 18 18 6 6 6 0 0 0
63118- 0 0 0 0 0 0 0 0 0 0 0 0
63119- 0 0 0 0 0 0 0 0 0 0 0 0
63120- 0 0 0 0 0 0 0 0 0 10 10 10
63121- 30 30 30 78 78 78 163 133 67 210 150 10
63122-236 178 12 246 186 14 246 190 14 246 190 14
63123-246 190 14 246 190 14 246 190 14 246 190 14
63124-246 190 14 246 190 14 246 190 14 246 190 14
63125-246 190 14 246 190 14 246 190 14 246 190 14
63126-241 196 14 215 174 15 190 178 144 253 253 253
63127-253 253 253 253 253 253 253 253 253 253 253 253
63128-253 253 253 253 253 253 253 253 253 253 253 253
63129-253 253 253 253 253 253 253 253 253 253 253 253
63130-253 253 253 253 253 253 253 253 253 218 218 218
63131- 58 58 58 2 2 6 22 18 6 167 114 7
63132-216 158 10 236 178 12 246 186 14 246 190 14
63133-246 190 14 246 190 14 246 190 14 246 190 14
63134-246 190 14 246 190 14 246 190 14 246 190 14
63135-246 190 14 246 190 14 246 190 14 246 190 14
63136-246 190 14 246 186 14 242 186 14 190 150 46
63137- 54 54 54 22 22 22 6 6 6 0 0 0
63138- 0 0 0 0 0 0 0 0 0 0 0 0
63139- 0 0 0 0 0 0 0 0 0 0 0 0
63140- 0 0 0 0 0 0 0 0 0 14 14 14
63141- 38 38 38 86 86 86 180 133 36 213 154 11
63142-236 178 12 246 186 14 246 190 14 246 190 14
63143-246 190 14 246 190 14 246 190 14 246 190 14
63144-246 190 14 246 190 14 246 190 14 246 190 14
63145-246 190 14 246 190 14 246 190 14 246 190 14
63146-246 190 14 232 195 16 190 146 13 214 214 214
63147-253 253 253 253 253 253 253 253 253 253 253 253
63148-253 253 253 253 253 253 253 253 253 253 253 253
63149-253 253 253 253 253 253 253 253 253 253 253 253
63150-253 253 253 250 250 250 170 170 170 26 26 26
63151- 2 2 6 2 2 6 37 26 9 163 110 8
63152-219 162 10 239 182 13 246 186 14 246 190 14
63153-246 190 14 246 190 14 246 190 14 246 190 14
63154-246 190 14 246 190 14 246 190 14 246 190 14
63155-246 190 14 246 190 14 246 190 14 246 190 14
63156-246 186 14 236 178 12 224 166 10 142 122 72
63157- 46 46 46 18 18 18 6 6 6 0 0 0
63158- 0 0 0 0 0 0 0 0 0 0 0 0
63159- 0 0 0 0 0 0 0 0 0 0 0 0
63160- 0 0 0 0 0 0 6 6 6 18 18 18
63161- 50 50 50 109 106 95 192 133 9 224 166 10
63162-242 186 14 246 190 14 246 190 14 246 190 14
63163-246 190 14 246 190 14 246 190 14 246 190 14
63164-246 190 14 246 190 14 246 190 14 246 190 14
63165-246 190 14 246 190 14 246 190 14 246 190 14
63166-242 186 14 226 184 13 210 162 10 142 110 46
63167-226 226 226 253 253 253 253 253 253 253 253 253
63168-253 253 253 253 253 253 253 253 253 253 253 253
63169-253 253 253 253 253 253 253 253 253 253 253 253
63170-198 198 198 66 66 66 2 2 6 2 2 6
63171- 2 2 6 2 2 6 50 34 6 156 107 11
63172-219 162 10 239 182 13 246 186 14 246 190 14
63173-246 190 14 246 190 14 246 190 14 246 190 14
63174-246 190 14 246 190 14 246 190 14 246 190 14
63175-246 190 14 246 190 14 246 190 14 242 186 14
63176-234 174 13 213 154 11 154 122 46 66 66 66
63177- 30 30 30 10 10 10 0 0 0 0 0 0
63178- 0 0 0 0 0 0 0 0 0 0 0 0
63179- 0 0 0 0 0 0 0 0 0 0 0 0
63180- 0 0 0 0 0 0 6 6 6 22 22 22
63181- 58 58 58 154 121 60 206 145 10 234 174 13
63182-242 186 14 246 186 14 246 190 14 246 190 14
63183-246 190 14 246 190 14 246 190 14 246 190 14
63184-246 190 14 246 190 14 246 190 14 246 190 14
63185-246 190 14 246 190 14 246 190 14 246 190 14
63186-246 186 14 236 178 12 210 162 10 163 110 8
63187- 61 42 6 138 138 138 218 218 218 250 250 250
63188-253 253 253 253 253 253 253 253 253 250 250 250
63189-242 242 242 210 210 210 144 144 144 66 66 66
63190- 6 6 6 2 2 6 2 2 6 2 2 6
63191- 2 2 6 2 2 6 61 42 6 163 110 8
63192-216 158 10 236 178 12 246 190 14 246 190 14
63193-246 190 14 246 190 14 246 190 14 246 190 14
63194-246 190 14 246 190 14 246 190 14 246 190 14
63195-246 190 14 239 182 13 230 174 11 216 158 10
63196-190 142 34 124 112 88 70 70 70 38 38 38
63197- 18 18 18 6 6 6 0 0 0 0 0 0
63198- 0 0 0 0 0 0 0 0 0 0 0 0
63199- 0 0 0 0 0 0 0 0 0 0 0 0
63200- 0 0 0 0 0 0 6 6 6 22 22 22
63201- 62 62 62 168 124 44 206 145 10 224 166 10
63202-236 178 12 239 182 13 242 186 14 242 186 14
63203-246 186 14 246 190 14 246 190 14 246 190 14
63204-246 190 14 246 190 14 246 190 14 246 190 14
63205-246 190 14 246 190 14 246 190 14 246 190 14
63206-246 190 14 236 178 12 216 158 10 175 118 6
63207- 80 54 7 2 2 6 6 6 6 30 30 30
63208- 54 54 54 62 62 62 50 50 50 38 38 38
63209- 14 14 14 2 2 6 2 2 6 2 2 6
63210- 2 2 6 2 2 6 2 2 6 2 2 6
63211- 2 2 6 6 6 6 80 54 7 167 114 7
63212-213 154 11 236 178 12 246 190 14 246 190 14
63213-246 190 14 246 190 14 246 190 14 246 190 14
63214-246 190 14 242 186 14 239 182 13 239 182 13
63215-230 174 11 210 150 10 174 135 50 124 112 88
63216- 82 82 82 54 54 54 34 34 34 18 18 18
63217- 6 6 6 0 0 0 0 0 0 0 0 0
63218- 0 0 0 0 0 0 0 0 0 0 0 0
63219- 0 0 0 0 0 0 0 0 0 0 0 0
63220- 0 0 0 0 0 0 6 6 6 18 18 18
63221- 50 50 50 158 118 36 192 133 9 200 144 11
63222-216 158 10 219 162 10 224 166 10 226 170 11
63223-230 174 11 236 178 12 239 182 13 239 182 13
63224-242 186 14 246 186 14 246 190 14 246 190 14
63225-246 190 14 246 190 14 246 190 14 246 190 14
63226-246 186 14 230 174 11 210 150 10 163 110 8
63227-104 69 6 10 10 10 2 2 6 2 2 6
63228- 2 2 6 2 2 6 2 2 6 2 2 6
63229- 2 2 6 2 2 6 2 2 6 2 2 6
63230- 2 2 6 2 2 6 2 2 6 2 2 6
63231- 2 2 6 6 6 6 91 60 6 167 114 7
63232-206 145 10 230 174 11 242 186 14 246 190 14
63233-246 190 14 246 190 14 246 186 14 242 186 14
63234-239 182 13 230 174 11 224 166 10 213 154 11
63235-180 133 36 124 112 88 86 86 86 58 58 58
63236- 38 38 38 22 22 22 10 10 10 6 6 6
63237- 0 0 0 0 0 0 0 0 0 0 0 0
63238- 0 0 0 0 0 0 0 0 0 0 0 0
63239- 0 0 0 0 0 0 0 0 0 0 0 0
63240- 0 0 0 0 0 0 0 0 0 14 14 14
63241- 34 34 34 70 70 70 138 110 50 158 118 36
63242-167 114 7 180 123 7 192 133 9 197 138 11
63243-200 144 11 206 145 10 213 154 11 219 162 10
63244-224 166 10 230 174 11 239 182 13 242 186 14
63245-246 186 14 246 186 14 246 186 14 246 186 14
63246-239 182 13 216 158 10 185 133 11 152 99 6
63247-104 69 6 18 14 6 2 2 6 2 2 6
63248- 2 2 6 2 2 6 2 2 6 2 2 6
63249- 2 2 6 2 2 6 2 2 6 2 2 6
63250- 2 2 6 2 2 6 2 2 6 2 2 6
63251- 2 2 6 6 6 6 80 54 7 152 99 6
63252-192 133 9 219 162 10 236 178 12 239 182 13
63253-246 186 14 242 186 14 239 182 13 236 178 12
63254-224 166 10 206 145 10 192 133 9 154 121 60
63255- 94 94 94 62 62 62 42 42 42 22 22 22
63256- 14 14 14 6 6 6 0 0 0 0 0 0
63257- 0 0 0 0 0 0 0 0 0 0 0 0
63258- 0 0 0 0 0 0 0 0 0 0 0 0
63259- 0 0 0 0 0 0 0 0 0 0 0 0
63260- 0 0 0 0 0 0 0 0 0 6 6 6
63261- 18 18 18 34 34 34 58 58 58 78 78 78
63262-101 98 89 124 112 88 142 110 46 156 107 11
63263-163 110 8 167 114 7 175 118 6 180 123 7
63264-185 133 11 197 138 11 210 150 10 219 162 10
63265-226 170 11 236 178 12 236 178 12 234 174 13
63266-219 162 10 197 138 11 163 110 8 130 83 6
63267- 91 60 6 10 10 10 2 2 6 2 2 6
63268- 18 18 18 38 38 38 38 38 38 38 38 38
63269- 38 38 38 38 38 38 38 38 38 38 38 38
63270- 38 38 38 38 38 38 26 26 26 2 2 6
63271- 2 2 6 6 6 6 70 47 6 137 92 6
63272-175 118 6 200 144 11 219 162 10 230 174 11
63273-234 174 13 230 174 11 219 162 10 210 150 10
63274-192 133 9 163 110 8 124 112 88 82 82 82
63275- 50 50 50 30 30 30 14 14 14 6 6 6
63276- 0 0 0 0 0 0 0 0 0 0 0 0
63277- 0 0 0 0 0 0 0 0 0 0 0 0
63278- 0 0 0 0 0 0 0 0 0 0 0 0
63279- 0 0 0 0 0 0 0 0 0 0 0 0
63280- 0 0 0 0 0 0 0 0 0 0 0 0
63281- 6 6 6 14 14 14 22 22 22 34 34 34
63282- 42 42 42 58 58 58 74 74 74 86 86 86
63283-101 98 89 122 102 70 130 98 46 121 87 25
63284-137 92 6 152 99 6 163 110 8 180 123 7
63285-185 133 11 197 138 11 206 145 10 200 144 11
63286-180 123 7 156 107 11 130 83 6 104 69 6
63287- 50 34 6 54 54 54 110 110 110 101 98 89
63288- 86 86 86 82 82 82 78 78 78 78 78 78
63289- 78 78 78 78 78 78 78 78 78 78 78 78
63290- 78 78 78 82 82 82 86 86 86 94 94 94
63291-106 106 106 101 101 101 86 66 34 124 80 6
63292-156 107 11 180 123 7 192 133 9 200 144 11
63293-206 145 10 200 144 11 192 133 9 175 118 6
63294-139 102 15 109 106 95 70 70 70 42 42 42
63295- 22 22 22 10 10 10 0 0 0 0 0 0
63296- 0 0 0 0 0 0 0 0 0 0 0 0
63297- 0 0 0 0 0 0 0 0 0 0 0 0
63298- 0 0 0 0 0 0 0 0 0 0 0 0
63299- 0 0 0 0 0 0 0 0 0 0 0 0
63300- 0 0 0 0 0 0 0 0 0 0 0 0
63301- 0 0 0 0 0 0 6 6 6 10 10 10
63302- 14 14 14 22 22 22 30 30 30 38 38 38
63303- 50 50 50 62 62 62 74 74 74 90 90 90
63304-101 98 89 112 100 78 121 87 25 124 80 6
63305-137 92 6 152 99 6 152 99 6 152 99 6
63306-138 86 6 124 80 6 98 70 6 86 66 30
63307-101 98 89 82 82 82 58 58 58 46 46 46
63308- 38 38 38 34 34 34 34 34 34 34 34 34
63309- 34 34 34 34 34 34 34 34 34 34 34 34
63310- 34 34 34 34 34 34 38 38 38 42 42 42
63311- 54 54 54 82 82 82 94 86 76 91 60 6
63312-134 86 6 156 107 11 167 114 7 175 118 6
63313-175 118 6 167 114 7 152 99 6 121 87 25
63314-101 98 89 62 62 62 34 34 34 18 18 18
63315- 6 6 6 0 0 0 0 0 0 0 0 0
63316- 0 0 0 0 0 0 0 0 0 0 0 0
63317- 0 0 0 0 0 0 0 0 0 0 0 0
63318- 0 0 0 0 0 0 0 0 0 0 0 0
63319- 0 0 0 0 0 0 0 0 0 0 0 0
63320- 0 0 0 0 0 0 0 0 0 0 0 0
63321- 0 0 0 0 0 0 0 0 0 0 0 0
63322- 0 0 0 6 6 6 6 6 6 10 10 10
63323- 18 18 18 22 22 22 30 30 30 42 42 42
63324- 50 50 50 66 66 66 86 86 86 101 98 89
63325-106 86 58 98 70 6 104 69 6 104 69 6
63326-104 69 6 91 60 6 82 62 34 90 90 90
63327- 62 62 62 38 38 38 22 22 22 14 14 14
63328- 10 10 10 10 10 10 10 10 10 10 10 10
63329- 10 10 10 10 10 10 6 6 6 10 10 10
63330- 10 10 10 10 10 10 10 10 10 14 14 14
63331- 22 22 22 42 42 42 70 70 70 89 81 66
63332- 80 54 7 104 69 6 124 80 6 137 92 6
63333-134 86 6 116 81 8 100 82 52 86 86 86
63334- 58 58 58 30 30 30 14 14 14 6 6 6
63335- 0 0 0 0 0 0 0 0 0 0 0 0
63336- 0 0 0 0 0 0 0 0 0 0 0 0
63337- 0 0 0 0 0 0 0 0 0 0 0 0
63338- 0 0 0 0 0 0 0 0 0 0 0 0
63339- 0 0 0 0 0 0 0 0 0 0 0 0
63340- 0 0 0 0 0 0 0 0 0 0 0 0
63341- 0 0 0 0 0 0 0 0 0 0 0 0
63342- 0 0 0 0 0 0 0 0 0 0 0 0
63343- 0 0 0 6 6 6 10 10 10 14 14 14
63344- 18 18 18 26 26 26 38 38 38 54 54 54
63345- 70 70 70 86 86 86 94 86 76 89 81 66
63346- 89 81 66 86 86 86 74 74 74 50 50 50
63347- 30 30 30 14 14 14 6 6 6 0 0 0
63348- 0 0 0 0 0 0 0 0 0 0 0 0
63349- 0 0 0 0 0 0 0 0 0 0 0 0
63350- 0 0 0 0 0 0 0 0 0 0 0 0
63351- 6 6 6 18 18 18 34 34 34 58 58 58
63352- 82 82 82 89 81 66 89 81 66 89 81 66
63353- 94 86 66 94 86 76 74 74 74 50 50 50
63354- 26 26 26 14 14 14 6 6 6 0 0 0
63355- 0 0 0 0 0 0 0 0 0 0 0 0
63356- 0 0 0 0 0 0 0 0 0 0 0 0
63357- 0 0 0 0 0 0 0 0 0 0 0 0
63358- 0 0 0 0 0 0 0 0 0 0 0 0
63359- 0 0 0 0 0 0 0 0 0 0 0 0
63360- 0 0 0 0 0 0 0 0 0 0 0 0
63361- 0 0 0 0 0 0 0 0 0 0 0 0
63362- 0 0 0 0 0 0 0 0 0 0 0 0
63363- 0 0 0 0 0 0 0 0 0 0 0 0
63364- 6 6 6 6 6 6 14 14 14 18 18 18
63365- 30 30 30 38 38 38 46 46 46 54 54 54
63366- 50 50 50 42 42 42 30 30 30 18 18 18
63367- 10 10 10 0 0 0 0 0 0 0 0 0
63368- 0 0 0 0 0 0 0 0 0 0 0 0
63369- 0 0 0 0 0 0 0 0 0 0 0 0
63370- 0 0 0 0 0 0 0 0 0 0 0 0
63371- 0 0 0 6 6 6 14 14 14 26 26 26
63372- 38 38 38 50 50 50 58 58 58 58 58 58
63373- 54 54 54 42 42 42 30 30 30 18 18 18
63374- 10 10 10 0 0 0 0 0 0 0 0 0
63375- 0 0 0 0 0 0 0 0 0 0 0 0
63376- 0 0 0 0 0 0 0 0 0 0 0 0
63377- 0 0 0 0 0 0 0 0 0 0 0 0
63378- 0 0 0 0 0 0 0 0 0 0 0 0
63379- 0 0 0 0 0 0 0 0 0 0 0 0
63380- 0 0 0 0 0 0 0 0 0 0 0 0
63381- 0 0 0 0 0 0 0 0 0 0 0 0
63382- 0 0 0 0 0 0 0 0 0 0 0 0
63383- 0 0 0 0 0 0 0 0 0 0 0 0
63384- 0 0 0 0 0 0 0 0 0 6 6 6
63385- 6 6 6 10 10 10 14 14 14 18 18 18
63386- 18 18 18 14 14 14 10 10 10 6 6 6
63387- 0 0 0 0 0 0 0 0 0 0 0 0
63388- 0 0 0 0 0 0 0 0 0 0 0 0
63389- 0 0 0 0 0 0 0 0 0 0 0 0
63390- 0 0 0 0 0 0 0 0 0 0 0 0
63391- 0 0 0 0 0 0 0 0 0 6 6 6
63392- 14 14 14 18 18 18 22 22 22 22 22 22
63393- 18 18 18 14 14 14 10 10 10 6 6 6
63394- 0 0 0 0 0 0 0 0 0 0 0 0
63395- 0 0 0 0 0 0 0 0 0 0 0 0
63396- 0 0 0 0 0 0 0 0 0 0 0 0
63397- 0 0 0 0 0 0 0 0 0 0 0 0
63398- 0 0 0 0 0 0 0 0 0 0 0 0
63399+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
63400+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
63401+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
63402+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
63403+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
63404+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
63405+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
63406+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
63407+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
63408+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
63409+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
63410+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
63411+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
63412+4 4 4 4 4 4
63413+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
63414+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
63415+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
63416+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
63417+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
63418+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
63419+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
63420+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
63421+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
63422+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
63423+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
63424+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
63425+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
63426+4 4 4 4 4 4
63427+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
63428+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
63429+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
63430+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
63431+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
63432+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
63433+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
63434+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
63435+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
63436+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
63437+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
63438+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
63439+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
63440+4 4 4 4 4 4
63441+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
63442+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
63443+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
63444+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
63445+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
63446+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
63447+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
63448+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
63449+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
63450+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
63451+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
63452+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
63453+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
63454+4 4 4 4 4 4
63455+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
63456+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
63457+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
63458+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
63459+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
63460+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
63461+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
63462+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
63463+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
63464+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
63465+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
63466+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
63467+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
63468+4 4 4 4 4 4
63469+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
63470+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
63471+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
63472+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
63473+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
63474+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
63475+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
63476+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
63477+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
63478+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
63479+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
63480+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
63481+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
63482+4 4 4 4 4 4
63483+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
63484+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
63485+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
63486+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
63487+4 4 4 4 4 4 4 4 4 3 3 3 0 0 0 0 0 0
63488+0 0 0 0 0 0 0 0 0 0 0 0 3 3 3 4 4 4
63489+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
63490+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
63491+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
63492+4 4 4 4 4 4 4 4 4 4 4 4 1 1 1 0 0 0
63493+0 0 0 3 3 3 4 4 4 4 4 4 4 4 4 4 4 4
63494+4 4 4 4 4 4 4 4 4 2 1 0 2 1 0 3 2 2
63495+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
63496+4 4 4 4 4 4
63497+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
63498+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
63499+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
63500+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
63501+4 4 4 4 4 4 2 2 2 0 0 0 3 4 3 26 28 28
63502+37 38 37 37 38 37 14 17 19 2 2 2 0 0 0 2 2 2
63503+5 5 5 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
63504+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
63505+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
63506+4 4 4 4 4 4 3 3 3 0 0 0 1 1 1 6 6 6
63507+2 2 2 0 0 0 3 3 3 4 4 4 4 4 4 4 4 4
63508+4 4 5 3 3 3 1 0 0 0 0 0 1 0 0 0 0 0
63509+1 1 1 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
63510+4 4 4 4 4 4
63511+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
63512+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
63513+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
63514+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
63515+2 2 2 0 0 0 0 0 0 14 17 19 60 74 84 137 136 137
63516+153 152 153 137 136 137 125 124 125 60 73 81 6 6 6 3 1 0
63517+0 0 0 3 3 3 4 4 4 4 4 4 4 4 4 4 4 4
63518+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
63519+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
63520+4 4 4 4 4 4 0 0 0 4 4 4 41 54 63 125 124 125
63521+60 73 81 6 6 6 4 0 0 3 3 3 4 4 4 4 4 4
63522+4 4 4 0 0 0 6 9 11 41 54 63 41 65 82 22 30 35
63523+2 2 2 2 1 0 4 4 4 4 4 4 4 4 4 4 4 4
63524+4 4 4 4 4 4
63525+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
63526+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
63527+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
63528+4 4 4 4 4 4 5 5 5 5 5 5 2 2 2 0 0 0
63529+4 0 0 6 6 6 41 54 63 137 136 137 174 174 174 167 166 167
63530+165 164 165 165 164 165 163 162 163 163 162 163 125 124 125 41 54 63
63531+1 1 1 0 0 0 0 0 0 3 3 3 5 5 5 4 4 4
63532+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
63533+4 4 4 4 4 4 4 4 4 4 4 4 5 5 5 5 5 5
63534+3 3 3 2 0 0 4 0 0 60 73 81 156 155 156 167 166 167
63535+163 162 163 85 115 134 5 7 8 0 0 0 4 4 4 5 5 5
63536+0 0 0 2 5 5 55 98 126 90 154 193 90 154 193 72 125 159
63537+37 51 59 2 0 0 1 1 1 4 5 5 4 4 4 4 4 4
63538+4 4 4 4 4 4
63539+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
63540+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
63541+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
63542+4 4 4 5 5 5 4 4 4 1 1 1 0 0 0 3 3 3
63543+37 38 37 125 124 125 163 162 163 174 174 174 158 157 158 158 157 158
63544+156 155 156 156 155 156 158 157 158 165 164 165 174 174 174 166 165 166
63545+125 124 125 16 19 21 1 0 0 0 0 0 0 0 0 4 4 4
63546+5 5 5 5 5 5 4 4 4 4 4 4 4 4 4 4 4 4
63547+4 4 4 4 4 4 4 4 4 5 5 5 5 5 5 1 1 1
63548+0 0 0 0 0 0 37 38 37 153 152 153 174 174 174 158 157 158
63549+174 174 174 163 162 163 37 38 37 4 3 3 4 0 0 1 1 1
63550+0 0 0 22 40 52 101 161 196 101 161 196 90 154 193 101 161 196
63551+64 123 161 14 17 19 0 0 0 4 4 4 4 4 4 4 4 4
63552+4 4 4 4 4 4
63553+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
63554+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
63555+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 5 5 5
63556+5 5 5 2 2 2 0 0 0 4 0 0 24 26 27 85 115 134
63557+156 155 156 174 174 174 167 166 167 156 155 156 154 153 154 157 156 157
63558+156 155 156 156 155 156 155 154 155 153 152 153 158 157 158 167 166 167
63559+174 174 174 156 155 156 60 74 84 16 19 21 0 0 0 0 0 0
63560+1 1 1 5 5 5 5 5 5 4 4 4 4 4 4 4 4 4
63561+4 4 4 5 5 5 6 6 6 3 3 3 0 0 0 4 0 0
63562+13 16 17 60 73 81 137 136 137 165 164 165 156 155 156 153 152 153
63563+174 174 174 177 184 187 60 73 81 3 1 0 0 0 0 1 1 2
63564+22 30 35 64 123 161 136 185 209 90 154 193 90 154 193 90 154 193
63565+90 154 193 21 29 34 0 0 0 3 2 2 4 4 5 4 4 4
63566+4 4 4 4 4 4
63567+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
63568+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
63569+4 4 4 4 4 4 4 4 4 4 4 4 5 5 5 3 3 3
63570+0 0 0 0 0 0 10 13 16 60 74 84 157 156 157 174 174 174
63571+174 174 174 158 157 158 153 152 153 154 153 154 156 155 156 155 154 155
63572+156 155 156 155 154 155 154 153 154 157 156 157 154 153 154 153 152 153
63573+163 162 163 174 174 174 177 184 187 137 136 137 60 73 81 13 16 17
63574+4 0 0 0 0 0 3 3 3 5 5 5 4 4 4 4 4 4
63575+5 5 5 4 4 4 1 1 1 0 0 0 3 3 3 41 54 63
63576+131 129 131 174 174 174 174 174 174 174 174 174 167 166 167 174 174 174
63577+190 197 201 137 136 137 24 26 27 4 0 0 16 21 25 50 82 103
63578+90 154 193 136 185 209 90 154 193 101 161 196 101 161 196 101 161 196
63579+31 91 132 3 6 7 0 0 0 4 4 4 4 4 4 4 4 4
63580+4 4 4 4 4 4
63581+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
63582+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
63583+4 4 4 4 4 4 4 4 4 2 2 2 0 0 0 4 0 0
63584+4 0 0 43 57 68 137 136 137 177 184 187 174 174 174 163 162 163
63585+155 154 155 155 154 155 156 155 156 155 154 155 158 157 158 165 164 165
63586+167 166 167 166 165 166 163 162 163 157 156 157 155 154 155 155 154 155
63587+153 152 153 156 155 156 167 166 167 174 174 174 174 174 174 131 129 131
63588+41 54 63 5 5 5 0 0 0 0 0 0 3 3 3 4 4 4
63589+1 1 1 0 0 0 1 0 0 26 28 28 125 124 125 174 174 174
63590+177 184 187 174 174 174 174 174 174 156 155 156 131 129 131 137 136 137
63591+125 124 125 24 26 27 4 0 0 41 65 82 90 154 193 136 185 209
63592+136 185 209 101 161 196 53 118 160 37 112 160 90 154 193 34 86 122
63593+7 12 15 0 0 0 4 4 4 4 4 4 4 4 4 4 4 4
63594+4 4 4 4 4 4
63595+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
63596+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
63597+4 4 4 3 3 3 0 0 0 0 0 0 5 5 5 37 38 37
63598+125 124 125 167 166 167 174 174 174 167 166 167 158 157 158 155 154 155
63599+156 155 156 156 155 156 156 155 156 163 162 163 167 166 167 155 154 155
63600+137 136 137 153 152 153 156 155 156 165 164 165 163 162 163 156 155 156
63601+156 155 156 156 155 156 155 154 155 158 157 158 166 165 166 174 174 174
63602+167 166 167 125 124 125 37 38 37 1 0 0 0 0 0 0 0 0
63603+0 0 0 24 26 27 60 74 84 158 157 158 174 174 174 174 174 174
63604+166 165 166 158 157 158 125 124 125 41 54 63 13 16 17 6 6 6
63605+6 6 6 37 38 37 80 127 157 136 185 209 101 161 196 101 161 196
63606+90 154 193 28 67 93 6 10 14 13 20 25 13 20 25 6 10 14
63607+1 1 2 4 3 3 4 4 4 4 4 4 4 4 4 4 4 4
63608+4 4 4 4 4 4
63609+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
63610+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
63611+1 1 1 1 0 0 4 3 3 37 38 37 60 74 84 153 152 153
63612+167 166 167 167 166 167 158 157 158 154 153 154 155 154 155 156 155 156
63613+157 156 157 158 157 158 167 166 167 167 166 167 131 129 131 43 57 68
63614+26 28 28 37 38 37 60 73 81 131 129 131 165 164 165 166 165 166
63615+158 157 158 155 154 155 156 155 156 156 155 156 156 155 156 158 157 158
63616+165 164 165 174 174 174 163 162 163 60 74 84 16 19 21 13 16 17
63617+60 73 81 131 129 131 174 174 174 174 174 174 167 166 167 165 164 165
63618+137 136 137 60 73 81 24 26 27 4 0 0 4 0 0 16 19 21
63619+52 104 138 101 161 196 136 185 209 136 185 209 90 154 193 27 99 146
63620+13 20 25 4 5 7 2 5 5 4 5 7 1 1 2 0 0 0
63621+4 4 4 4 4 4 3 3 3 2 2 2 2 2 2 4 4 4
63622+4 4 4 4 4 4
63623+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
63624+4 4 4 4 4 4 4 4 4 4 4 4 3 3 3 0 0 0
63625+0 0 0 13 16 17 60 73 81 137 136 137 174 174 174 166 165 166
63626+158 157 158 156 155 156 157 156 157 156 155 156 155 154 155 158 157 158
63627+167 166 167 174 174 174 153 152 153 60 73 81 16 19 21 4 0 0
63628+4 0 0 4 0 0 6 6 6 26 28 28 60 74 84 158 157 158
63629+174 174 174 166 165 166 157 156 157 155 154 155 156 155 156 156 155 156
63630+155 154 155 158 157 158 167 166 167 167 166 167 131 129 131 125 124 125
63631+137 136 137 167 166 167 167 166 167 174 174 174 158 157 158 125 124 125
63632+16 19 21 4 0 0 4 0 0 10 13 16 49 76 92 107 159 188
63633+136 185 209 136 185 209 90 154 193 26 108 161 22 40 52 6 10 14
63634+2 3 3 1 1 2 1 1 2 4 4 5 4 4 5 4 4 5
63635+4 4 5 2 2 1 0 0 0 0 0 0 0 0 0 2 2 2
63636+4 4 4 4 4 4
63637+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
63638+4 4 4 5 5 5 3 3 3 0 0 0 1 0 0 4 0 0
63639+37 51 59 131 129 131 167 166 167 167 166 167 163 162 163 157 156 157
63640+157 156 157 155 154 155 153 152 153 157 156 157 167 166 167 174 174 174
63641+153 152 153 125 124 125 37 38 37 4 0 0 4 0 0 4 0 0
63642+4 3 3 4 3 3 4 0 0 6 6 6 4 0 0 37 38 37
63643+125 124 125 174 174 174 174 174 174 165 164 165 156 155 156 154 153 154
63644+156 155 156 156 155 156 155 154 155 163 162 163 158 157 158 163 162 163
63645+174 174 174 174 174 174 174 174 174 125 124 125 37 38 37 0 0 0
63646+4 0 0 6 9 11 41 54 63 90 154 193 136 185 209 146 190 211
63647+136 185 209 37 112 160 22 40 52 6 10 14 3 6 7 1 1 2
63648+1 1 2 3 3 3 1 1 2 3 3 3 4 4 4 4 4 4
63649+2 2 2 2 0 0 16 19 21 37 38 37 24 26 27 0 0 0
63650+0 0 0 4 4 4
63651+4 4 4 4 4 4 4 4 4 4 4 4 5 5 5 5 5 5
63652+4 4 4 0 0 0 0 0 0 0 0 0 26 28 28 120 125 127
63653+158 157 158 174 174 174 165 164 165 157 156 157 155 154 155 156 155 156
63654+153 152 153 153 152 153 167 166 167 174 174 174 174 174 174 125 124 125
63655+37 38 37 4 0 0 0 0 0 4 0 0 4 3 3 4 4 4
63656+4 4 4 4 4 4 5 5 5 4 0 0 4 0 0 4 0 0
63657+4 3 3 43 57 68 137 136 137 174 174 174 174 174 174 165 164 165
63658+154 153 154 153 152 153 153 152 153 153 152 153 163 162 163 174 174 174
63659+174 174 174 153 152 153 60 73 81 6 6 6 4 0 0 4 3 3
63660+32 43 50 80 127 157 136 185 209 146 190 211 146 190 211 90 154 193
63661+28 67 93 28 67 93 40 71 93 3 6 7 1 1 2 2 5 5
63662+50 82 103 79 117 143 26 37 45 0 0 0 3 3 3 1 1 1
63663+0 0 0 41 54 63 137 136 137 174 174 174 153 152 153 60 73 81
63664+2 0 0 0 0 0
63665+4 4 4 4 4 4 4 4 4 4 4 4 6 6 6 2 2 2
63666+0 0 0 2 0 0 24 26 27 60 74 84 153 152 153 174 174 174
63667+174 174 174 157 156 157 154 153 154 156 155 156 154 153 154 153 152 153
63668+165 164 165 174 174 174 177 184 187 137 136 137 43 57 68 6 6 6
63669+4 0 0 2 0 0 3 3 3 5 5 5 5 5 5 4 4 4
63670+4 4 4 4 4 4 4 4 4 5 5 5 6 6 6 4 3 3
63671+4 0 0 4 0 0 24 26 27 60 73 81 153 152 153 174 174 174
63672+174 174 174 158 157 158 158 157 158 174 174 174 174 174 174 158 157 158
63673+60 74 84 24 26 27 4 0 0 4 0 0 17 23 27 59 113 148
63674+136 185 209 191 222 234 146 190 211 136 185 209 31 91 132 7 11 13
63675+22 40 52 101 161 196 90 154 193 6 9 11 3 4 4 43 95 132
63676+136 185 209 172 205 220 55 98 126 0 0 0 0 0 0 2 0 0
63677+26 28 28 153 152 153 177 184 187 167 166 167 177 184 187 165 164 165
63678+37 38 37 0 0 0
63679+4 4 4 4 4 4 5 5 5 5 5 5 1 1 1 0 0 0
63680+13 16 17 60 73 81 137 136 137 174 174 174 174 174 174 165 164 165
63681+153 152 153 153 152 153 155 154 155 154 153 154 158 157 158 174 174 174
63682+177 184 187 163 162 163 60 73 81 16 19 21 4 0 0 4 0 0
63683+4 3 3 4 4 4 5 5 5 5 5 5 4 4 4 5 5 5
63684+5 5 5 5 5 5 5 5 5 4 4 4 4 4 4 5 5 5
63685+6 6 6 4 0 0 4 0 0 4 0 0 24 26 27 60 74 84
63686+166 165 166 174 174 174 177 184 187 165 164 165 125 124 125 24 26 27
63687+4 0 0 4 0 0 5 5 5 50 82 103 136 185 209 172 205 220
63688+146 190 211 136 185 209 26 108 161 22 40 52 7 12 15 44 81 103
63689+71 116 144 28 67 93 37 51 59 41 65 82 100 139 164 101 161 196
63690+90 154 193 90 154 193 28 67 93 0 0 0 0 0 0 26 28 28
63691+125 124 125 167 166 167 163 162 163 153 152 153 163 162 163 174 174 174
63692+85 115 134 4 0 0
63693+4 4 4 5 5 5 4 4 4 1 0 0 4 0 0 34 47 55
63694+125 124 125 174 174 174 174 174 174 167 166 167 157 156 157 153 152 153
63695+155 154 155 155 154 155 158 157 158 166 165 166 167 166 167 154 153 154
63696+125 124 125 26 28 28 4 0 0 4 0 0 4 0 0 5 5 5
63697+5 5 5 4 4 4 4 4 4 4 4 4 4 4 4 1 1 1
63698+0 0 0 0 0 0 1 1 1 4 4 4 4 4 4 4 4 4
63699+5 5 5 5 5 5 4 3 3 4 0 0 4 0 0 6 6 6
63700+37 38 37 131 129 131 137 136 137 37 38 37 0 0 0 4 0 0
63701+4 5 5 43 61 72 90 154 193 172 205 220 146 190 211 136 185 209
63702+90 154 193 28 67 93 13 20 25 43 61 72 71 116 144 44 81 103
63703+2 5 5 7 11 13 59 113 148 101 161 196 90 154 193 28 67 93
63704+13 20 25 6 10 14 0 0 0 13 16 17 60 73 81 137 136 137
63705+166 165 166 158 157 158 156 155 156 154 153 154 167 166 167 174 174 174
63706+60 73 81 4 0 0
63707+4 4 4 4 4 4 0 0 0 3 3 3 60 74 84 174 174 174
63708+174 174 174 167 166 167 163 162 163 155 154 155 157 156 157 155 154 155
63709+156 155 156 163 162 163 167 166 167 158 157 158 125 124 125 37 38 37
63710+4 3 3 4 0 0 4 0 0 6 6 6 6 6 6 5 5 5
63711+4 4 4 4 4 4 4 4 4 1 1 1 0 0 0 2 3 3
63712+10 13 16 7 11 13 1 0 0 0 0 0 2 2 1 4 4 4
63713+4 4 4 4 4 4 4 4 4 5 5 5 4 3 3 4 0 0
63714+4 0 0 7 11 13 13 16 17 4 0 0 3 3 3 34 47 55
63715+80 127 157 146 190 211 172 205 220 136 185 209 136 185 209 136 185 209
63716+28 67 93 22 40 52 55 98 126 55 98 126 21 29 34 7 11 13
63717+50 82 103 101 161 196 101 161 196 35 83 115 13 20 25 2 2 1
63718+1 1 2 1 1 2 37 51 59 131 129 131 174 174 174 174 174 174
63719+167 166 167 163 162 163 163 162 163 167 166 167 174 174 174 125 124 125
63720+16 19 21 4 0 0
63721+4 4 4 4 0 0 4 0 0 60 74 84 174 174 174 174 174 174
63722+158 157 158 155 154 155 155 154 155 156 155 156 155 154 155 158 157 158
63723+167 166 167 165 164 165 131 129 131 60 73 81 13 16 17 4 0 0
63724+4 0 0 4 3 3 6 6 6 4 3 3 5 5 5 4 4 4
63725+4 4 4 3 2 2 0 0 0 0 0 0 7 11 13 45 69 86
63726+80 127 157 71 116 144 43 61 72 7 11 13 0 0 0 1 1 1
63727+4 3 3 4 4 4 4 4 4 4 4 4 6 6 6 5 5 5
63728+3 2 2 4 0 0 1 0 0 21 29 34 59 113 148 136 185 209
63729+146 190 211 136 185 209 136 185 209 136 185 209 136 185 209 136 185 209
63730+68 124 159 44 81 103 22 40 52 13 16 17 43 61 72 90 154 193
63731+136 185 209 59 113 148 21 29 34 3 4 3 1 1 1 0 0 0
63732+24 26 27 125 124 125 163 162 163 174 174 174 166 165 166 165 164 165
63733+163 162 163 125 124 125 125 124 125 125 124 125 125 124 125 26 28 28
63734+4 0 0 4 3 3
63735+3 3 3 0 0 0 24 26 27 153 152 153 177 184 187 158 157 158
63736+156 155 156 156 155 156 155 154 155 155 154 155 165 164 165 174 174 174
63737+155 154 155 60 74 84 26 28 28 4 0 0 4 0 0 3 1 0
63738+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 3 3
63739+2 0 0 0 0 0 0 0 0 32 43 50 72 125 159 101 161 196
63740+136 185 209 101 161 196 101 161 196 79 117 143 32 43 50 0 0 0
63741+0 0 0 2 2 2 4 4 4 4 4 4 3 3 3 1 0 0
63742+0 0 0 4 5 5 49 76 92 101 161 196 146 190 211 146 190 211
63743+136 185 209 136 185 209 136 185 209 136 185 209 136 185 209 90 154 193
63744+28 67 93 13 16 17 37 51 59 80 127 157 136 185 209 90 154 193
63745+22 40 52 6 9 11 3 4 3 2 2 1 16 19 21 60 73 81
63746+137 136 137 163 162 163 158 157 158 166 165 166 167 166 167 153 152 153
63747+60 74 84 37 38 37 6 6 6 13 16 17 4 0 0 1 0 0
63748+3 2 2 4 4 4
63749+3 2 2 4 0 0 37 38 37 137 136 137 167 166 167 158 157 158
63750+157 156 157 154 153 154 157 156 157 167 166 167 174 174 174 125 124 125
63751+37 38 37 4 0 0 4 0 0 4 0 0 4 3 3 4 4 4
63752+4 4 4 4 4 4 5 5 5 5 5 5 1 1 1 0 0 0
63753+0 0 0 16 21 25 55 98 126 90 154 193 136 185 209 101 161 196
63754+101 161 196 101 161 196 136 185 209 136 185 209 101 161 196 55 98 126
63755+14 17 19 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0
63756+22 40 52 90 154 193 146 190 211 146 190 211 136 185 209 136 185 209
63757+136 185 209 136 185 209 136 185 209 101 161 196 35 83 115 7 11 13
63758+17 23 27 59 113 148 136 185 209 101 161 196 34 86 122 7 12 15
63759+2 5 5 3 4 3 6 6 6 60 73 81 131 129 131 163 162 163
63760+166 165 166 174 174 174 174 174 174 163 162 163 125 124 125 41 54 63
63761+13 16 17 4 0 0 4 0 0 4 0 0 1 0 0 2 2 2
63762+4 4 4 4 4 4
63763+1 1 1 2 1 0 43 57 68 137 136 137 153 152 153 153 152 153
63764+163 162 163 156 155 156 165 164 165 167 166 167 60 74 84 6 6 6
63765+4 0 0 4 0 0 5 5 5 4 4 4 4 4 4 4 4 4
63766+4 5 5 6 6 6 4 3 3 0 0 0 0 0 0 11 15 18
63767+40 71 93 100 139 164 101 161 196 101 161 196 101 161 196 101 161 196
63768+101 161 196 101 161 196 101 161 196 101 161 196 136 185 209 136 185 209
63769+101 161 196 45 69 86 6 6 6 0 0 0 17 23 27 55 98 126
63770+136 185 209 146 190 211 136 185 209 136 185 209 136 185 209 136 185 209
63771+136 185 209 136 185 209 90 154 193 22 40 52 7 11 13 50 82 103
63772+136 185 209 136 185 209 53 118 160 22 40 52 7 11 13 2 5 5
63773+3 4 3 37 38 37 125 124 125 157 156 157 166 165 166 167 166 167
63774+174 174 174 174 174 174 137 136 137 60 73 81 4 0 0 4 0 0
63775+4 0 0 4 0 0 5 5 5 3 3 3 3 3 3 4 4 4
63776+4 4 4 4 4 4
63777+4 0 0 4 0 0 41 54 63 137 136 137 125 124 125 131 129 131
63778+155 154 155 167 166 167 174 174 174 60 74 84 6 6 6 4 0 0
63779+4 3 3 6 6 6 4 4 4 4 4 4 4 4 4 5 5 5
63780+4 4 4 1 1 1 0 0 0 3 6 7 41 65 82 72 125 159
63781+101 161 196 101 161 196 101 161 196 90 154 193 90 154 193 101 161 196
63782+101 161 196 101 161 196 101 161 196 101 161 196 101 161 196 136 185 209
63783+136 185 209 136 185 209 80 127 157 55 98 126 101 161 196 146 190 211
63784+136 185 209 136 185 209 136 185 209 101 161 196 136 185 209 101 161 196
63785+136 185 209 101 161 196 35 83 115 22 30 35 101 161 196 172 205 220
63786+90 154 193 28 67 93 7 11 13 2 5 5 3 4 3 13 16 17
63787+85 115 134 167 166 167 174 174 174 174 174 174 174 174 174 174 174 174
63788+167 166 167 60 74 84 13 16 17 4 0 0 4 0 0 4 3 3
63789+6 6 6 5 5 5 4 4 4 5 5 5 4 4 4 5 5 5
63790+5 5 5 5 5 5
63791+1 1 1 4 0 0 41 54 63 137 136 137 137 136 137 125 124 125
63792+131 129 131 167 166 167 157 156 157 37 38 37 6 6 6 4 0 0
63793+6 6 6 5 5 5 4 4 4 4 4 4 4 5 5 2 2 1
63794+0 0 0 0 0 0 26 37 45 58 111 146 101 161 196 101 161 196
63795+101 161 196 90 154 193 90 154 193 90 154 193 101 161 196 101 161 196
63796+101 161 196 101 161 196 101 161 196 101 161 196 101 161 196 101 161 196
63797+101 161 196 136 185 209 136 185 209 136 185 209 146 190 211 136 185 209
63798+136 185 209 101 161 196 136 185 209 136 185 209 101 161 196 136 185 209
63799+101 161 196 136 185 209 136 185 209 136 185 209 136 185 209 16 89 141
63800+7 11 13 2 5 5 2 5 5 13 16 17 60 73 81 154 154 154
63801+174 174 174 174 174 174 174 174 174 174 174 174 163 162 163 125 124 125
63802+24 26 27 4 0 0 4 0 0 4 0 0 5 5 5 5 5 5
63803+4 4 4 4 4 4 4 4 4 5 5 5 5 5 5 5 5 5
63804+5 5 5 4 4 4
63805+4 0 0 6 6 6 37 38 37 137 136 137 137 136 137 131 129 131
63806+131 129 131 153 152 153 131 129 131 26 28 28 4 0 0 4 3 3
63807+6 6 6 4 4 4 4 4 4 4 4 4 0 0 0 0 0 0
63808+13 20 25 51 88 114 90 154 193 101 161 196 101 161 196 90 154 193
63809+90 154 193 90 154 193 90 154 193 90 154 193 90 154 193 101 161 196
63810+101 161 196 101 161 196 101 161 196 101 161 196 136 185 209 101 161 196
63811+101 161 196 136 185 209 101 161 196 136 185 209 136 185 209 101 161 196
63812+136 185 209 101 161 196 136 185 209 101 161 196 101 161 196 101 161 196
63813+136 185 209 136 185 209 136 185 209 37 112 160 21 29 34 5 7 8
63814+2 5 5 13 16 17 43 57 68 131 129 131 174 174 174 174 174 174
63815+174 174 174 167 166 167 157 156 157 125 124 125 37 38 37 4 0 0
63816+4 0 0 4 0 0 5 5 5 5 5 5 4 4 4 4 4 4
63817+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
63818+4 4 4 4 4 4
63819+1 1 1 4 0 0 41 54 63 153 152 153 137 136 137 137 136 137
63820+137 136 137 153 152 153 125 124 125 24 26 27 4 0 0 3 2 2
63821+4 4 4 4 4 4 4 3 3 4 0 0 3 6 7 43 61 72
63822+64 123 161 101 161 196 90 154 193 90 154 193 90 154 193 90 154 193
63823+90 154 193 90 154 193 90 154 193 90 154 193 101 161 196 90 154 193
63824+101 161 196 101 161 196 101 161 196 101 161 196 101 161 196 101 161 196
63825+101 161 196 101 161 196 101 161 196 101 161 196 101 161 196 101 161 196
63826+136 185 209 101 161 196 101 161 196 136 185 209 136 185 209 101 161 196
63827+101 161 196 90 154 193 28 67 93 13 16 17 7 11 13 3 6 7
63828+37 51 59 125 124 125 163 162 163 174 174 174 167 166 167 166 165 166
63829+167 166 167 131 129 131 60 73 81 4 0 0 4 0 0 4 0 0
63830+3 3 3 5 5 5 6 6 6 4 4 4 4 4 4 4 4 4
63831+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
63832+4 4 4 4 4 4
63833+4 0 0 4 0 0 41 54 63 137 136 137 153 152 153 137 136 137
63834+153 152 153 157 156 157 125 124 125 24 26 27 0 0 0 2 2 2
63835+4 4 4 4 4 4 2 0 0 0 0 0 28 67 93 90 154 193
63836+90 154 193 90 154 193 90 154 193 90 154 193 64 123 161 90 154 193
63837+90 154 193 90 154 193 90 154 193 90 154 193 90 154 193 101 161 196
63838+90 154 193 101 161 196 101 161 196 101 161 196 90 154 193 136 185 209
63839+101 161 196 101 161 196 136 185 209 101 161 196 136 185 209 101 161 196
63840+101 161 196 101 161 196 136 185 209 101 161 196 101 161 196 90 154 193
63841+35 83 115 13 16 17 3 6 7 2 5 5 13 16 17 60 74 84
63842+154 154 154 166 165 166 165 164 165 158 157 158 163 162 163 157 156 157
63843+60 74 84 13 16 17 4 0 0 4 0 0 3 2 2 4 4 4
63844+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
63845+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
63846+4 4 4 4 4 4
63847+1 1 1 4 0 0 41 54 63 157 156 157 155 154 155 137 136 137
63848+153 152 153 158 157 158 137 136 137 26 28 28 2 0 0 2 2 2
63849+4 4 4 4 4 4 1 0 0 6 10 14 34 86 122 90 154 193
63850+64 123 161 90 154 193 64 123 161 90 154 193 90 154 193 90 154 193
63851+64 123 161 90 154 193 90 154 193 90 154 193 90 154 193 90 154 193
63852+101 161 196 101 161 196 101 161 196 101 161 196 101 161 196 101 161 196
63853+101 161 196 101 161 196 101 161 196 101 161 196 101 161 196 101 161 196
63854+136 185 209 101 161 196 136 185 209 90 154 193 26 108 161 22 40 52
63855+13 16 17 5 7 8 2 5 5 2 5 5 37 38 37 165 164 165
63856+174 174 174 163 162 163 154 154 154 165 164 165 167 166 167 60 73 81
63857+6 6 6 4 0 0 4 0 0 4 4 4 4 4 4 4 4 4
63858+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
63859+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
63860+4 4 4 4 4 4
63861+4 0 0 6 6 6 41 54 63 156 155 156 158 157 158 153 152 153
63862+156 155 156 165 164 165 137 136 137 26 28 28 0 0 0 2 2 2
63863+4 4 5 4 4 4 2 0 0 7 12 15 31 96 139 64 123 161
63864+90 154 193 64 123 161 90 154 193 90 154 193 64 123 161 90 154 193
63865+90 154 193 90 154 193 90 154 193 90 154 193 90 154 193 90 154 193
63866+90 154 193 90 154 193 90 154 193 101 161 196 101 161 196 101 161 196
63867+101 161 196 101 161 196 101 161 196 101 161 196 101 161 196 136 185 209
63868+101 161 196 136 185 209 26 108 161 22 40 52 7 11 13 5 7 8
63869+2 5 5 2 5 5 2 5 5 2 2 1 37 38 37 158 157 158
63870+174 174 174 154 154 154 156 155 156 167 166 167 165 164 165 37 38 37
63871+4 0 0 4 3 3 5 5 5 4 4 4 4 4 4 4 4 4
63872+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
63873+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
63874+4 4 4 4 4 4
63875+3 1 0 4 0 0 60 73 81 157 156 157 163 162 163 153 152 153
63876+158 157 158 167 166 167 137 136 137 26 28 28 2 0 0 2 2 2
63877+4 5 5 4 4 4 4 0 0 7 12 15 24 86 132 26 108 161
63878+37 112 160 64 123 161 90 154 193 64 123 161 90 154 193 90 154 193
63879+90 154 193 90 154 193 90 154 193 90 154 193 90 154 193 90 154 193
63880+90 154 193 101 161 196 90 154 193 101 161 196 101 161 196 101 161 196
63881+101 161 196 101 161 196 101 161 196 136 185 209 101 161 196 136 185 209
63882+90 154 193 35 83 115 13 16 17 13 16 17 7 11 13 3 6 7
63883+5 7 8 6 6 6 3 4 3 2 2 1 30 32 34 154 154 154
63884+167 166 167 154 154 154 154 154 154 174 174 174 165 164 165 37 38 37
63885+6 6 6 4 0 0 6 6 6 4 4 4 4 4 4 4 4 4
63886+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
63887+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
63888+4 4 4 4 4 4
63889+4 0 0 4 0 0 41 54 63 163 162 163 166 165 166 154 154 154
63890+163 162 163 174 174 174 137 136 137 26 28 28 0 0 0 2 2 2
63891+4 5 5 4 4 5 1 1 2 6 10 14 28 67 93 18 97 151
63892+18 97 151 18 97 151 26 108 161 37 112 160 37 112 160 90 154 193
63893+64 123 161 90 154 193 90 154 193 90 154 193 90 154 193 101 161 196
63894+90 154 193 101 161 196 101 161 196 90 154 193 101 161 196 101 161 196
63895+101 161 196 101 161 196 101 161 196 136 185 209 90 154 193 16 89 141
63896+13 20 25 7 11 13 5 7 8 5 7 8 2 5 5 4 5 5
63897+3 4 3 4 5 5 3 4 3 0 0 0 37 38 37 158 157 158
63898+174 174 174 158 157 158 158 157 158 167 166 167 174 174 174 41 54 63
63899+4 0 0 3 2 2 5 5 5 4 4 4 4 4 4 4 4 4
63900+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
63901+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
63902+4 4 4 4 4 4
63903+1 1 1 4 0 0 60 73 81 165 164 165 174 174 174 158 157 158
63904+167 166 167 174 174 174 153 152 153 26 28 28 2 0 0 2 2 2
63905+4 5 5 4 4 4 4 0 0 7 12 15 10 87 144 10 87 144
63906+18 97 151 18 97 151 18 97 151 26 108 161 26 108 161 26 108 161
63907+26 108 161 37 112 160 53 118 160 90 154 193 90 154 193 90 154 193
63908+90 154 193 90 154 193 101 161 196 101 161 196 101 161 196 101 161 196
63909+101 161 196 136 185 209 90 154 193 26 108 161 22 40 52 13 16 17
63910+7 11 13 3 6 7 5 7 8 5 7 8 2 5 5 4 5 5
63911+4 5 5 6 6 6 3 4 3 0 0 0 30 32 34 158 157 158
63912+174 174 174 156 155 156 155 154 155 165 164 165 154 153 154 37 38 37
63913+4 0 0 4 3 3 5 5 5 4 4 4 4 4 4 4 4 4
63914+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
63915+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
63916+4 4 4 4 4 4
63917+4 0 0 4 0 0 60 73 81 167 166 167 174 174 174 163 162 163
63918+174 174 174 174 174 174 153 152 153 26 28 28 0 0 0 3 3 3
63919+5 5 5 4 4 4 1 1 2 7 12 15 28 67 93 18 97 151
63920+18 97 151 18 97 151 18 97 151 18 97 151 18 97 151 26 108 161
63921+26 108 161 26 108 161 26 108 161 26 108 161 26 108 161 26 108 161
63922+90 154 193 26 108 161 90 154 193 90 154 193 90 154 193 101 161 196
63923+101 161 196 26 108 161 22 40 52 13 16 17 7 11 13 2 5 5
63924+2 5 5 6 6 6 2 5 5 4 5 5 4 5 5 4 5 5
63925+3 4 3 5 5 5 3 4 3 2 0 0 30 32 34 137 136 137
63926+153 152 153 137 136 137 131 129 131 137 136 137 131 129 131 37 38 37
63927+4 0 0 4 3 3 5 5 5 4 4 4 4 4 4 4 4 4
63928+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
63929+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
63930+4 4 4 4 4 4
63931+1 1 1 4 0 0 60 73 81 167 166 167 174 174 174 166 165 166
63932+174 174 174 177 184 187 153 152 153 30 32 34 1 0 0 3 3 3
63933+5 5 5 4 3 3 4 0 0 7 12 15 10 87 144 10 87 144
63934+18 97 151 18 97 151 18 97 151 26 108 161 26 108 161 26 108 161
63935+26 108 161 26 108 161 26 108 161 26 108 161 26 108 161 26 108 161
63936+26 108 161 26 108 161 26 108 161 90 154 193 90 154 193 26 108 161
63937+35 83 115 13 16 17 7 11 13 5 7 8 3 6 7 5 7 8
63938+2 5 5 6 6 6 4 5 5 4 5 5 3 4 3 4 5 5
63939+3 4 3 6 6 6 3 4 3 0 0 0 26 28 28 125 124 125
63940+131 129 131 125 124 125 125 124 125 131 129 131 131 129 131 37 38 37
63941+4 0 0 3 3 3 5 5 5 4 4 4 4 4 4 4 4 4
63942+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
63943+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
63944+4 4 4 4 4 4
63945+3 1 0 4 0 0 60 73 81 174 174 174 177 184 187 167 166 167
63946+174 174 174 177 184 187 153 152 153 30 32 34 0 0 0 3 3 3
63947+5 5 5 4 4 4 1 1 2 6 10 14 28 67 93 18 97 151
63948+18 97 151 18 97 151 18 97 151 18 97 151 18 97 151 26 108 161
63949+26 108 161 26 108 161 26 108 161 26 108 161 26 108 161 26 108 161
63950+26 108 161 90 154 193 26 108 161 26 108 161 24 86 132 13 20 25
63951+7 11 13 13 20 25 22 40 52 5 7 8 3 4 3 3 4 3
63952+4 5 5 3 4 3 4 5 5 3 4 3 4 5 5 3 4 3
63953+4 4 4 5 5 5 3 3 3 2 0 0 26 28 28 125 124 125
63954+137 136 137 125 124 125 125 124 125 137 136 137 131 129 131 37 38 37
63955+0 0 0 3 3 3 5 5 5 4 4 4 4 4 4 4 4 4
63956+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
63957+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
63958+4 4 4 4 4 4
63959+1 1 1 4 0 0 60 73 81 174 174 174 177 184 187 174 174 174
63960+174 174 174 190 197 201 157 156 157 30 32 34 1 0 0 3 3 3
63961+5 5 5 4 3 3 4 0 0 7 12 15 10 87 144 10 87 144
63962+18 97 151 19 95 150 19 95 150 18 97 151 18 97 151 26 108 161
63963+18 97 151 26 108 161 26 108 161 26 108 161 26 108 161 90 154 193
63964+26 108 161 26 108 161 26 108 161 22 40 52 2 5 5 3 4 3
63965+28 67 93 37 112 160 34 86 122 2 5 5 3 4 3 3 4 3
63966+3 4 3 3 4 3 3 4 3 2 2 1 3 4 3 4 4 4
63967+4 5 5 5 5 5 3 3 3 0 0 0 26 28 28 131 129 131
63968+137 136 137 125 124 125 125 124 125 137 136 137 131 129 131 37 38 37
63969+0 0 0 3 3 3 5 5 5 4 4 4 4 4 4 4 4 4
63970+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
63971+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
63972+4 4 4 4 4 4
63973+4 0 0 4 0 0 60 73 81 174 174 174 177 184 187 174 174 174
63974+174 174 174 190 197 201 158 157 158 30 32 34 0 0 0 2 2 2
63975+5 5 5 4 4 4 1 1 2 6 10 14 28 67 93 18 97 151
63976+10 87 144 19 95 150 19 95 150 18 97 151 18 97 151 18 97 151
63977+26 108 161 26 108 161 26 108 161 26 108 161 26 108 161 26 108 161
63978+18 97 151 22 40 52 2 5 5 2 2 1 22 40 52 26 108 161
63979+90 154 193 37 112 160 22 40 52 3 4 3 13 20 25 22 30 35
63980+3 6 7 1 1 1 2 2 2 6 9 11 5 5 5 4 3 3
63981+4 4 4 5 5 5 3 3 3 2 0 0 26 28 28 131 129 131
63982+137 136 137 125 124 125 125 124 125 137 136 137 131 129 131 37 38 37
63983+0 0 0 3 3 3 5 5 5 4 4 4 4 4 4 4 4 4
63984+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
63985+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
63986+4 4 4 4 4 4
63987+1 1 1 4 0 0 60 73 81 177 184 187 193 200 203 174 174 174
63988+177 184 187 193 200 203 163 162 163 30 32 34 4 0 0 2 2 2
63989+5 5 5 4 3 3 4 0 0 6 10 14 24 86 132 10 87 144
63990+10 87 144 10 87 144 19 95 150 19 95 150 19 95 150 18 97 151
63991+26 108 161 26 108 161 26 108 161 90 154 193 26 108 161 28 67 93
63992+6 10 14 2 5 5 13 20 25 24 86 132 37 112 160 90 154 193
63993+10 87 144 7 12 15 2 5 5 28 67 93 37 112 160 28 67 93
63994+2 2 1 7 12 15 35 83 115 28 67 93 3 6 7 1 0 0
63995+4 4 4 5 5 5 3 3 3 0 0 0 26 28 28 131 129 131
63996+137 136 137 125 124 125 125 124 125 137 136 137 131 129 131 37 38 37
63997+0 0 0 3 3 3 5 5 5 4 4 4 4 4 4 4 4 4
63998+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
63999+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
64000+4 4 4 4 4 4
64001+4 0 0 4 0 0 60 73 81 174 174 174 190 197 201 174 174 174
64002+177 184 187 193 200 203 163 162 163 30 32 34 0 0 0 2 2 2
64003+5 5 5 4 4 4 1 1 2 6 10 14 28 67 93 10 87 144
64004+10 87 144 16 89 141 19 95 150 10 87 144 26 108 161 26 108 161
64005+26 108 161 26 108 161 26 108 161 28 67 93 6 10 14 1 1 2
64006+7 12 15 28 67 93 26 108 161 16 89 141 24 86 132 21 29 34
64007+3 4 3 21 29 34 37 112 160 37 112 160 27 99 146 21 29 34
64008+21 29 34 26 108 161 90 154 193 35 83 115 1 1 2 2 0 0
64009+4 4 4 5 5 5 3 3 3 2 0 0 26 28 28 125 124 125
64010+137 136 137 125 124 125 125 124 125 137 136 137 131 129 131 37 38 37
64011+0 0 0 3 3 3 5 5 5 4 4 4 4 4 4 4 4 4
64012+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
64013+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
64014+4 4 4 4 4 4
64015+3 1 0 4 0 0 60 73 81 193 200 203 193 200 203 174 174 174
64016+190 197 201 193 200 203 165 164 165 37 38 37 4 0 0 2 2 2
64017+5 5 5 4 3 3 4 0 0 6 10 14 24 86 132 10 87 144
64018+10 87 144 10 87 144 16 89 141 18 97 151 18 97 151 10 87 144
64019+24 86 132 24 86 132 13 20 25 4 5 7 4 5 7 22 40 52
64020+18 97 151 37 112 160 26 108 161 7 12 15 1 1 1 0 0 0
64021+28 67 93 37 112 160 26 108 161 28 67 93 22 40 52 28 67 93
64022+26 108 161 90 154 193 26 108 161 10 87 144 0 0 0 2 0 0
64023+4 4 4 5 5 5 3 3 3 0 0 0 26 28 28 131 129 131
64024+137 136 137 125 124 125 125 124 125 137 136 137 131 129 131 37 38 37
64025+0 0 0 3 3 3 5 5 5 4 4 4 4 4 4 4 4 4
64026+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
64027+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
64028+4 4 4 4 4 4
64029+4 0 0 6 6 6 60 73 81 174 174 174 193 200 203 174 174 174
64030+190 197 201 193 200 203 165 164 165 30 32 34 0 0 0 2 2 2
64031+5 5 5 4 4 4 1 1 2 6 10 14 28 67 93 10 87 144
64032+10 87 144 10 87 144 10 87 144 18 97 151 28 67 93 6 10 14
64033+0 0 0 1 1 2 4 5 7 13 20 25 16 89 141 26 108 161
64034+26 108 161 26 108 161 24 86 132 6 9 11 2 3 3 22 40 52
64035+37 112 160 16 89 141 22 40 52 28 67 93 26 108 161 26 108 161
64036+90 154 193 26 108 161 26 108 161 28 67 93 1 1 1 4 0 0
64037+4 4 4 5 5 5 3 3 3 4 0 0 26 28 28 124 126 130
64038+137 136 137 125 124 125 125 124 125 137 136 137 131 129 131 37 38 37
64039+0 0 0 3 3 3 5 5 5 4 4 4 4 4 4 4 4 4
64040+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
64041+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
64042+4 4 4 4 4 4
64043+4 0 0 4 0 0 60 73 81 193 200 203 193 200 203 174 174 174
64044+193 200 203 193 200 203 167 166 167 37 38 37 4 0 0 2 2 2
64045+5 5 5 4 4 4 4 0 0 6 10 14 28 67 93 10 87 144
64046+10 87 144 10 87 144 18 97 151 10 87 144 13 20 25 4 5 7
64047+1 1 2 1 1 1 22 40 52 26 108 161 26 108 161 26 108 161
64048+26 108 161 26 108 161 26 108 161 24 86 132 22 40 52 22 40 52
64049+22 40 52 22 40 52 10 87 144 26 108 161 26 108 161 26 108 161
64050+26 108 161 26 108 161 90 154 193 10 87 144 0 0 0 4 0 0
64051+4 4 4 5 5 5 3 3 3 0 0 0 26 28 28 131 129 131
64052+137 136 137 125 124 125 125 124 125 137 136 137 131 129 131 37 38 37
64053+0 0 0 3 3 3 5 5 5 4 4 4 4 4 4 4 4 4
64054+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
64055+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
64056+4 4 4 4 4 4
64057+4 0 0 6 6 6 60 73 81 174 174 174 220 221 221 174 174 174
64058+190 197 201 205 212 215 167 166 167 30 32 34 0 0 0 2 2 2
64059+5 5 5 4 4 4 1 1 2 6 10 14 28 67 93 10 87 144
64060+10 87 144 10 87 144 10 87 144 10 87 144 22 40 52 1 1 2
64061+2 0 0 1 1 2 24 86 132 26 108 161 26 108 161 26 108 161
64062+26 108 161 19 95 150 16 89 141 10 87 144 22 40 52 22 40 52
64063+10 87 144 26 108 161 37 112 160 26 108 161 26 108 161 26 108 161
64064+26 108 161 26 108 161 26 108 161 28 67 93 2 0 0 3 1 0
64065+4 4 4 5 5 5 3 3 3 2 0 0 26 28 28 131 129 131
64066+137 136 137 125 124 125 125 124 125 137 136 137 131 129 131 37 38 37
64067+0 0 0 3 3 3 5 5 5 4 4 4 4 4 4 4 4 4
64068+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
64069+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
64070+4 4 4 4 4 4
64071+4 0 0 4 0 0 60 73 81 220 221 221 190 197 201 174 174 174
64072+193 200 203 193 200 203 174 174 174 37 38 37 4 0 0 2 2 2
64073+5 5 5 4 4 4 3 2 2 1 1 2 13 20 25 10 87 144
64074+10 87 144 10 87 144 10 87 144 10 87 144 10 87 144 13 20 25
64075+13 20 25 22 40 52 10 87 144 18 97 151 18 97 151 26 108 161
64076+10 87 144 13 20 25 6 10 14 21 29 34 24 86 132 18 97 151
64077+26 108 161 26 108 161 26 108 161 26 108 161 26 108 161 26 108 161
64078+26 108 161 90 154 193 18 97 151 13 20 25 0 0 0 4 3 3
64079+4 4 4 5 5 5 3 3 3 0 0 0 26 28 28 131 129 131
64080+137 136 137 125 124 125 125 124 125 137 136 137 131 129 131 37 38 37
64081+0 0 0 3 3 3 5 5 5 4 4 4 4 4 4 4 4 4
64082+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
64083+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
64084+4 4 4 4 4 4
64085+4 0 0 6 6 6 60 73 81 174 174 174 220 221 221 174 174 174
64086+190 197 201 220 221 221 167 166 167 30 32 34 1 0 0 2 2 2
64087+5 5 5 4 4 4 4 4 5 2 5 5 4 5 7 13 20 25
64088+28 67 93 10 87 144 10 87 144 10 87 144 10 87 144 10 87 144
64089+10 87 144 10 87 144 18 97 151 10 87 144 18 97 151 18 97 151
64090+28 67 93 2 3 3 0 0 0 28 67 93 26 108 161 26 108 161
64091+26 108 161 26 108 161 26 108 161 26 108 161 26 108 161 26 108 161
64092+26 108 161 10 87 144 13 20 25 1 1 2 3 2 2 4 4 4
64093+4 4 4 5 5 5 3 3 3 2 0 0 26 28 28 131 129 131
64094+137 136 137 125 124 125 125 124 125 137 136 137 131 129 131 37 38 37
64095+0 0 0 3 3 3 5 5 5 4 4 4 4 4 4 4 4 4
64096+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
64097+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
64098+4 4 4 4 4 4
64099+4 0 0 4 0 0 60 73 81 220 221 221 190 197 201 174 174 174
64100+193 200 203 193 200 203 174 174 174 26 28 28 4 0 0 4 3 3
64101+5 5 5 4 4 4 4 4 4 4 4 5 1 1 2 2 5 5
64102+4 5 7 22 40 52 10 87 144 10 87 144 18 97 151 10 87 144
64103+10 87 144 10 87 144 10 87 144 10 87 144 10 87 144 18 97 151
64104+10 87 144 28 67 93 22 40 52 10 87 144 26 108 161 18 97 151
64105+18 97 151 18 97 151 26 108 161 26 108 161 26 108 161 26 108 161
64106+22 40 52 1 1 2 0 0 0 2 3 3 4 4 4 4 4 4
64107+4 4 4 5 5 5 4 4 4 0 0 0 26 28 28 131 129 131
64108+137 136 137 125 124 125 125 124 125 137 136 137 131 129 131 37 38 37
64109+0 0 0 3 3 3 5 5 5 4 4 4 4 4 4 4 4 4
64110+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
64111+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
64112+4 4 4 4 4 4
64113+4 0 0 6 6 6 60 73 81 174 174 174 220 221 221 174 174 174
64114+190 197 201 220 221 221 190 197 201 41 54 63 4 0 0 2 2 2
64115+6 6 6 4 4 4 4 4 4 4 4 5 4 4 5 3 3 3
64116+1 1 2 1 1 2 6 10 14 22 40 52 10 87 144 18 97 151
64117+18 97 151 10 87 144 10 87 144 10 87 144 18 97 151 10 87 144
64118+10 87 144 18 97 151 26 108 161 18 97 151 18 97 151 10 87 144
64119+26 108 161 26 108 161 26 108 161 10 87 144 28 67 93 6 10 14
64120+1 1 2 1 1 2 4 3 3 4 4 5 4 4 4 4 4 4
64121+5 5 5 5 5 5 1 1 1 4 0 0 37 51 59 137 136 137
64122+137 136 137 125 124 125 125 124 125 137 136 137 131 129 131 37 38 37
64123+0 0 0 3 3 3 5 5 5 4 4 4 4 4 4 4 4 4
64124+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
64125+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
64126+4 4 4 4 4 4
64127+4 0 0 4 0 0 60 73 81 220 221 221 193 200 203 174 174 174
64128+193 200 203 193 200 203 220 221 221 137 136 137 13 16 17 4 0 0
64129+2 2 2 4 4 4 4 4 4 4 4 4 4 4 4 4 4 5
64130+4 4 5 4 3 3 1 1 2 4 5 7 13 20 25 28 67 93
64131+10 87 144 10 87 144 10 87 144 10 87 144 10 87 144 10 87 144
64132+10 87 144 18 97 151 18 97 151 10 87 144 18 97 151 26 108 161
64133+26 108 161 18 97 151 28 67 93 6 10 14 0 0 0 0 0 0
64134+2 3 3 4 5 5 4 4 5 4 4 4 4 4 4 5 5 5
64135+3 3 3 1 1 1 0 0 0 16 19 21 125 124 125 137 136 137
64136+131 129 131 125 124 125 125 124 125 137 136 137 131 129 131 37 38 37
64137+0 0 0 3 3 3 5 5 5 4 4 4 4 4 4 4 4 4
64138+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
64139+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
64140+4 4 4 4 4 4
64141+4 0 0 6 6 6 60 73 81 174 174 174 220 221 221 174 174 174
64142+193 200 203 190 197 201 220 221 221 220 221 221 153 152 153 30 32 34
64143+0 0 0 0 0 0 2 2 2 4 4 4 4 4 4 4 4 4
64144+4 4 4 4 5 5 4 5 7 1 1 2 1 1 2 4 5 7
64145+13 20 25 28 67 93 10 87 144 18 97 151 10 87 144 10 87 144
64146+10 87 144 10 87 144 10 87 144 18 97 151 26 108 161 18 97 151
64147+28 67 93 7 12 15 0 0 0 0 0 0 2 2 1 4 4 4
64148+4 5 5 4 5 5 4 4 4 4 4 4 3 3 3 0 0 0
64149+0 0 0 0 0 0 37 38 37 125 124 125 158 157 158 131 129 131
64150+125 124 125 125 124 125 125 124 125 137 136 137 131 129 131 37 38 37
64151+0 0 0 3 3 3 5 5 5 4 4 4 4 4 4 4 4 4
64152+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
64153+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
64154+4 4 4 4 4 4
64155+4 3 3 4 0 0 41 54 63 193 200 203 220 221 221 174 174 174
64156+193 200 203 193 200 203 193 200 203 220 221 221 244 246 246 193 200 203
64157+120 125 127 5 5 5 1 0 0 0 0 0 1 1 1 4 4 4
64158+4 4 4 4 4 4 4 5 5 4 5 5 4 4 5 1 1 2
64159+4 5 7 4 5 7 22 40 52 10 87 144 10 87 144 10 87 144
64160+10 87 144 10 87 144 18 97 151 10 87 144 10 87 144 13 20 25
64161+4 5 7 2 3 3 1 1 2 4 4 4 4 5 5 4 4 4
64162+4 4 4 4 4 4 4 4 4 1 1 1 0 0 0 1 1 2
64163+24 26 27 60 74 84 153 152 153 163 162 163 137 136 137 125 124 125
64164+125 124 125 125 124 125 125 124 125 137 136 137 125 124 125 26 28 28
64165+0 0 0 3 3 3 5 5 5 4 4 4 4 4 4 4 4 4
64166+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
64167+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
64168+4 4 4 4 4 4
64169+4 0 0 6 6 6 26 28 28 156 155 156 220 221 221 220 221 221
64170+174 174 174 193 200 203 193 200 203 193 200 203 205 212 215 220 221 221
64171+220 221 221 167 166 167 60 73 81 7 11 13 0 0 0 0 0 0
64172+3 3 3 4 4 4 4 4 4 4 4 4 4 4 5 4 4 5
64173+4 4 5 1 1 2 1 1 2 4 5 7 22 40 52 10 87 144
64174+10 87 144 10 87 144 10 87 144 22 40 52 4 5 7 1 1 2
64175+1 1 2 4 4 5 4 4 4 4 4 4 4 4 4 4 4 4
64176+5 5 5 2 2 2 0 0 0 4 0 0 16 19 21 60 73 81
64177+137 136 137 167 166 167 158 157 158 137 136 137 131 129 131 131 129 131
64178+125 124 125 125 124 125 131 129 131 155 154 155 60 74 84 5 7 8
64179+0 0 0 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
64180+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
64181+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
64182+4 4 4 4 4 4
64183+5 5 5 4 0 0 4 0 0 60 73 81 193 200 203 220 221 221
64184+193 200 203 193 200 203 193 200 203 193 200 203 205 212 215 220 221 221
64185+220 221 221 220 221 221 220 221 221 137 136 137 43 57 68 6 6 6
64186+4 0 0 1 1 1 4 4 4 4 4 4 4 4 4 4 4 4
64187+4 4 5 4 4 5 3 2 2 1 1 2 2 5 5 13 20 25
64188+22 40 52 22 40 52 13 20 25 2 3 3 1 1 2 3 3 3
64189+4 5 7 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
64190+1 1 1 0 0 0 2 3 3 41 54 63 131 129 131 166 165 166
64191+166 165 166 155 154 155 153 152 153 137 136 137 137 136 137 125 124 125
64192+125 124 125 137 136 137 137 136 137 125 124 125 37 38 37 4 3 3
64193+4 3 3 5 5 5 4 4 4 4 4 4 4 4 4 4 4 4
64194+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
64195+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
64196+4 4 4 4 4 4
64197+4 3 3 6 6 6 6 6 6 13 16 17 60 73 81 167 166 167
64198+220 221 221 220 221 221 220 221 221 193 200 203 193 200 203 193 200 203
64199+205 212 215 220 221 221 220 221 221 244 246 246 205 212 215 125 124 125
64200+24 26 27 0 0 0 0 0 0 2 2 2 5 5 5 5 5 5
64201+4 4 4 4 4 4 4 4 4 4 4 5 1 1 2 4 5 7
64202+4 5 7 4 5 7 1 1 2 3 2 2 4 4 5 4 4 4
64203+4 4 4 4 4 4 5 5 5 4 4 4 0 0 0 0 0 0
64204+2 0 0 26 28 28 125 124 125 174 174 174 174 174 174 166 165 166
64205+156 155 156 153 152 153 137 136 137 137 136 137 131 129 131 137 136 137
64206+137 136 137 137 136 137 60 74 84 30 32 34 4 0 0 4 0 0
64207+5 5 5 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
64208+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
64209+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
64210+4 4 4 4 4 4
64211+5 5 5 6 6 6 4 0 0 4 0 0 6 6 6 26 28 28
64212+125 124 125 174 174 174 220 221 221 220 221 221 220 221 221 193 200 203
64213+205 212 215 220 221 221 205 212 215 220 221 221 220 221 221 244 246 246
64214+193 200 203 60 74 84 13 16 17 4 0 0 0 0 0 3 3 3
64215+5 5 5 5 5 5 4 4 4 4 4 4 4 4 5 3 3 3
64216+1 1 2 3 3 3 4 4 5 4 4 5 4 4 4 4 4 4
64217+5 5 5 5 5 5 2 2 2 0 0 0 0 0 0 13 16 17
64218+60 74 84 174 174 174 193 200 203 174 174 174 167 166 167 163 162 163
64219+153 152 153 153 152 153 137 136 137 137 136 137 153 152 153 137 136 137
64220+125 124 125 41 54 63 24 26 27 4 0 0 4 0 0 5 5 5
64221+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
64222+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
64223+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
64224+4 4 4 4 4 4
64225+4 3 3 6 6 6 6 6 6 6 6 6 6 6 6 6 6 6
64226+6 6 6 37 38 37 131 129 131 220 221 221 220 221 221 220 221 221
64227+193 200 203 193 200 203 220 221 221 205 212 215 220 221 221 244 246 246
64228+244 246 246 244 246 246 174 174 174 41 54 63 0 0 0 0 0 0
64229+0 0 0 4 4 4 5 5 5 5 5 5 4 4 4 4 4 5
64230+4 4 5 4 4 5 4 4 4 4 4 4 6 6 6 6 6 6
64231+3 3 3 0 0 0 2 0 0 13 16 17 60 73 81 156 155 156
64232+220 221 221 193 200 203 174 174 174 165 164 165 163 162 163 154 153 154
64233+153 152 153 153 152 153 158 157 158 163 162 163 137 136 137 60 73 81
64234+13 16 17 4 0 0 4 0 0 4 3 3 4 4 4 4 4 4
64235+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
64236+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
64237+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
64238+4 4 4 4 4 4
64239+5 5 5 4 3 3 4 3 3 6 6 6 6 6 6 6 6 6
64240+6 6 6 6 6 6 6 6 6 37 38 37 167 166 167 244 246 246
64241+244 246 246 220 221 221 205 212 215 205 212 215 220 221 221 193 200 203
64242+220 221 221 244 246 246 244 246 246 244 246 246 137 136 137 37 38 37
64243+3 2 2 0 0 0 1 1 1 5 5 5 5 5 5 4 4 4
64244+4 4 4 4 4 4 4 4 4 5 5 5 4 4 4 1 1 1
64245+0 0 0 5 5 5 43 57 68 153 152 153 193 200 203 220 221 221
64246+177 184 187 174 174 174 167 166 167 166 165 166 158 157 158 157 156 157
64247+158 157 158 166 165 166 156 155 156 85 115 134 13 16 17 4 0 0
64248+4 0 0 4 0 0 5 5 5 5 5 5 4 4 4 4 4 4
64249+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
64250+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
64251+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
64252+4 4 4 4 4 4
64253+5 5 5 4 3 3 6 6 6 6 6 6 4 0 0 6 6 6
64254+6 6 6 6 6 6 6 6 6 6 6 6 13 16 17 60 73 81
64255+177 184 187 220 221 221 220 221 221 220 221 221 205 212 215 220 221 221
64256+220 221 221 205 212 215 220 221 221 244 246 246 244 246 246 205 212 215
64257+125 124 125 30 32 34 0 0 0 0 0 0 2 2 2 5 5 5
64258+4 4 4 4 4 4 4 4 4 1 1 1 0 0 0 1 0 0
64259+37 38 37 131 129 131 205 212 215 220 221 221 193 200 203 174 174 174
64260+174 174 174 174 174 174 167 166 167 165 164 165 166 165 166 167 166 167
64261+158 157 158 125 124 125 37 38 37 4 0 0 4 0 0 4 0 0
64262+4 3 3 5 5 5 4 4 4 4 4 4 4 4 4 4 4 4
64263+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
64264+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
64265+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
64266+4 4 4 4 4 4
64267+4 4 4 5 5 5 4 3 3 4 3 3 6 6 6 6 6 6
64268+4 0 0 6 6 6 6 6 6 6 6 6 6 6 6 6 6 6
64269+26 28 28 125 124 125 205 212 215 220 221 221 220 221 221 220 221 221
64270+205 212 215 220 221 221 205 212 215 220 221 221 220 221 221 244 246 246
64271+244 246 246 190 197 201 60 74 84 16 19 21 4 0 0 0 0 0
64272+0 0 0 0 0 0 0 0 0 0 0 0 16 19 21 120 125 127
64273+177 184 187 220 221 221 205 212 215 177 184 187 174 174 174 177 184 187
64274+174 174 174 174 174 174 167 166 167 174 174 174 166 165 166 137 136 137
64275+60 73 81 13 16 17 4 0 0 4 0 0 4 3 3 6 6 6
64276+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
64277+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
64278+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
64279+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
64280+4 4 4 4 4 4
64281+5 5 5 4 3 3 5 5 5 4 3 3 6 6 6 4 0 0
64282+6 6 6 6 6 6 4 0 0 6 6 6 4 0 0 6 6 6
64283+6 6 6 6 6 6 37 38 37 137 136 137 193 200 203 220 221 221
64284+220 221 221 205 212 215 220 221 221 205 212 215 205 212 215 220 221 221
64285+220 221 221 220 221 221 244 246 246 166 165 166 43 57 68 2 2 2
64286+0 0 0 4 0 0 16 19 21 60 73 81 157 156 157 202 210 214
64287+220 221 221 193 200 203 177 184 187 177 184 187 177 184 187 174 174 174
64288+174 174 174 174 174 174 174 174 174 157 156 157 60 74 84 24 26 27
64289+4 0 0 4 0 0 4 0 0 6 6 6 4 4 4 4 4 4
64290+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
64291+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
64292+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
64293+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
64294+4 4 4 4 4 4
64295+4 4 4 4 4 4 5 5 5 4 3 3 5 5 5 6 6 6
64296+6 6 6 4 0 0 6 6 6 6 6 6 6 6 6 4 0 0
64297+4 0 0 4 0 0 6 6 6 24 26 27 60 73 81 167 166 167
64298+220 221 221 220 221 221 220 221 221 205 212 215 205 212 215 205 212 215
64299+205 212 215 220 221 221 220 221 221 220 221 221 205 212 215 137 136 137
64300+60 74 84 125 124 125 137 136 137 190 197 201 220 221 221 193 200 203
64301+177 184 187 177 184 187 177 184 187 174 174 174 174 174 174 177 184 187
64302+190 197 201 174 174 174 125 124 125 37 38 37 6 6 6 4 0 0
64303+4 0 0 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
64304+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
64305+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
64306+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
64307+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
64308+4 4 4 4 4 4
64309+4 4 4 4 4 4 5 5 5 5 5 5 4 3 3 6 6 6
64310+4 0 0 6 6 6 6 6 6 6 6 6 4 0 0 6 6 6
64311+6 6 6 6 6 6 4 0 0 4 0 0 6 6 6 6 6 6
64312+125 124 125 193 200 203 244 246 246 220 221 221 205 212 215 205 212 215
64313+205 212 215 193 200 203 205 212 215 205 212 215 220 221 221 220 221 221
64314+193 200 203 193 200 203 205 212 215 193 200 203 193 200 203 177 184 187
64315+190 197 201 190 197 201 174 174 174 190 197 201 193 200 203 190 197 201
64316+153 152 153 60 73 81 4 0 0 4 0 0 4 0 0 3 2 2
64317+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
64318+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
64319+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
64320+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
64321+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
64322+4 4 4 4 4 4
64323+4 4 4 4 4 4 4 4 4 4 4 4 5 5 5 4 3 3
64324+6 6 6 4 3 3 4 3 3 4 3 3 6 6 6 6 6 6
64325+4 0 0 6 6 6 6 6 6 6 6 6 4 0 0 4 0 0
64326+4 0 0 26 28 28 131 129 131 220 221 221 244 246 246 220 221 221
64327+205 212 215 193 200 203 205 212 215 193 200 203 193 200 203 205 212 215
64328+220 221 221 193 200 203 193 200 203 193 200 203 190 197 201 174 174 174
64329+174 174 174 190 197 201 193 200 203 193 200 203 167 166 167 125 124 125
64330+6 6 6 4 0 0 4 0 0 4 3 3 4 4 4 4 4 4
64331+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
64332+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
64333+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
64334+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
64335+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
64336+4 4 4 4 4 4
64337+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 5 5 5
64338+5 5 5 4 3 3 5 5 5 6 6 6 4 3 3 5 5 5
64339+6 6 6 6 6 6 4 0 0 6 6 6 6 6 6 6 6 6
64340+4 0 0 4 0 0 6 6 6 41 54 63 158 157 158 220 221 221
64341+220 221 221 220 221 221 193 200 203 193 200 203 193 200 203 190 197 201
64342+190 197 201 190 197 201 190 197 201 190 197 201 174 174 174 193 200 203
64343+193 200 203 220 221 221 174 174 174 125 124 125 37 38 37 4 0 0
64344+4 0 0 4 3 3 6 6 6 4 4 4 4 4 4 4 4 4
64345+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
64346+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
64347+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
64348+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
64349+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
64350+4 4 4 4 4 4
64351+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
64352+4 4 4 5 5 5 4 3 3 4 3 3 4 3 3 5 5 5
64353+4 3 3 6 6 6 5 5 5 4 3 3 6 6 6 6 6 6
64354+6 6 6 6 6 6 4 0 0 4 0 0 13 16 17 60 73 81
64355+174 174 174 220 221 221 220 221 221 205 212 215 190 197 201 174 174 174
64356+193 200 203 174 174 174 190 197 201 174 174 174 193 200 203 220 221 221
64357+193 200 203 131 129 131 37 38 37 6 6 6 4 0 0 4 0 0
64358+6 6 6 6 6 6 4 3 3 5 5 5 4 4 4 4 4 4
64359+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
64360+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
64361+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
64362+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
64363+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
64364+4 4 4 4 4 4
64365+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
64366+4 4 4 4 4 4 4 4 4 5 5 5 5 5 5 5 5 5
64367+5 5 5 4 3 3 4 3 3 5 5 5 4 3 3 4 3 3
64368+5 5 5 6 6 6 6 6 6 4 0 0 6 6 6 6 6 6
64369+6 6 6 125 124 125 174 174 174 220 221 221 220 221 221 193 200 203
64370+193 200 203 193 200 203 193 200 203 193 200 203 220 221 221 158 157 158
64371+60 73 81 6 6 6 4 0 0 4 0 0 5 5 5 6 6 6
64372+5 5 5 5 5 5 4 4 4 4 4 4 4 4 4 4 4 4
64373+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
64374+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
64375+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
64376+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
64377+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
64378+4 4 4 4 4 4
64379+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
64380+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
64381+4 4 4 5 5 5 5 5 5 4 3 3 5 5 5 4 3 3
64382+5 5 5 5 5 5 6 6 6 6 6 6 4 0 0 4 0 0
64383+4 0 0 4 0 0 26 28 28 125 124 125 174 174 174 193 200 203
64384+193 200 203 174 174 174 193 200 203 167 166 167 125 124 125 6 6 6
64385+6 6 6 6 6 6 4 0 0 6 6 6 6 6 6 5 5 5
64386+4 3 3 5 5 5 4 4 4 4 4 4 4 4 4 4 4 4
64387+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
64388+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
64389+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
64390+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
64391+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
64392+4 4 4 4 4 4
64393+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
64394+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
64395+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 5 5 5
64396+4 3 3 6 6 6 4 0 0 6 6 6 6 6 6 6 6 6
64397+6 6 6 4 0 0 4 0 0 6 6 6 37 38 37 125 124 125
64398+153 152 153 131 129 131 125 124 125 37 38 37 6 6 6 6 6 6
64399+6 6 6 4 0 0 6 6 6 6 6 6 4 3 3 5 5 5
64400+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
64401+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
64402+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
64403+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
64404+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
64405+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
64406+4 4 4 4 4 4
64407+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
64408+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
64409+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
64410+4 4 4 5 5 5 5 5 5 4 3 3 5 5 5 4 3 3
64411+6 6 6 6 6 6 4 0 0 4 0 0 6 6 6 6 6 6
64412+24 26 27 24 26 27 6 6 6 6 6 6 6 6 6 4 0 0
64413+6 6 6 6 6 6 4 0 0 6 6 6 5 5 5 4 3 3
64414+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
64415+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
64416+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
64417+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
64418+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
64419+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
64420+4 4 4 4 4 4
64421+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
64422+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
64423+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
64424+4 4 4 4 4 4 5 5 5 4 3 3 5 5 5 6 6 6
64425+4 0 0 6 6 6 6 6 6 6 6 6 6 6 6 6 6 6
64426+6 6 6 6 6 6 6 6 6 4 0 0 6 6 6 6 6 6
64427+4 0 0 6 6 6 6 6 6 4 3 3 5 5 5 4 4 4
64428+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
64429+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
64430+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
64431+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
64432+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
64433+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
64434+4 4 4 4 4 4
64435+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
64436+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
64437+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
64438+4 4 4 4 4 4 4 4 4 5 5 5 4 3 3 5 5 5
64439+5 5 5 5 5 5 4 0 0 6 6 6 4 0 0 6 6 6
64440+6 6 6 6 6 6 6 6 6 4 0 0 6 6 6 4 0 0
64441+6 6 6 4 3 3 5 5 5 4 3 3 5 5 5 4 4 4
64442+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
64443+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
64444+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
64445+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
64446+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
64447+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
64448+4 4 4 4 4 4
64449+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
64450+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
64451+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
64452+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 5 5 5
64453+4 3 3 6 6 6 4 3 3 6 6 6 6 6 6 6 6 6
64454+4 0 0 6 6 6 4 0 0 6 6 6 6 6 6 6 6 6
64455+6 6 6 4 3 3 5 5 5 4 4 4 4 4 4 4 4 4
64456+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
64457+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
64458+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
64459+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
64460+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
64461+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
64462+4 4 4 4 4 4
64463+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
64464+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
64465+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
64466+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
64467+4 4 4 5 5 5 4 3 3 5 5 5 4 0 0 6 6 6
64468+6 6 6 4 0 0 6 6 6 6 6 6 4 0 0 6 6 6
64469+4 3 3 5 5 5 5 5 5 4 4 4 4 4 4 4 4 4
64470+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
64471+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
64472+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
64473+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
64474+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
64475+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
64476+4 4 4 4 4 4
64477+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
64478+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
64479+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
64480+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
64481+4 4 4 5 5 5 4 3 3 5 5 5 6 6 6 4 3 3
64482+4 3 3 6 6 6 6 6 6 4 3 3 6 6 6 4 3 3
64483+5 5 5 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
64484+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
64485+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
64486+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
64487+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
64488+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
64489+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
64490+4 4 4 4 4 4
64491+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
64492+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
64493+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
64494+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
64495+4 4 4 4 4 4 4 4 4 5 5 5 4 3 3 6 6 6
64496+5 5 5 4 3 3 4 3 3 4 3 3 5 5 5 5 5 5
64497+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
64498+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
64499+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
64500+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
64501+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
64502+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
64503+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
64504+4 4 4 4 4 4
64505+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
64506+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
64507+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
64508+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
64509+4 4 4 4 4 4 4 4 4 4 4 4 5 5 5 4 3 3
64510+5 5 5 4 3 3 5 5 5 5 5 5 4 4 4 4 4 4
64511+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
64512+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
64513+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
64514+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
64515+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
64516+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
64517+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
64518+4 4 4 4 4 4
64519diff --git a/drivers/video/nvidia/nv_backlight.c b/drivers/video/nvidia/nv_backlight.c
64520index 443e3c8..c443d6a 100644
64521--- a/drivers/video/nvidia/nv_backlight.c
64522+++ b/drivers/video/nvidia/nv_backlight.c
64523@@ -87,7 +87,7 @@ static int nvidia_bl_get_brightness(struct backlight_device *bd)
64524 return bd->props.brightness;
64525 }
64526
64527-static struct backlight_ops nvidia_bl_ops = {
64528+static const struct backlight_ops nvidia_bl_ops = {
64529 .get_brightness = nvidia_bl_get_brightness,
64530 .update_status = nvidia_bl_update_status,
64531 };
64532diff --git a/drivers/video/riva/fbdev.c b/drivers/video/riva/fbdev.c
64533index d94c57f..912984c 100644
64534--- a/drivers/video/riva/fbdev.c
64535+++ b/drivers/video/riva/fbdev.c
64536@@ -331,7 +331,7 @@ static int riva_bl_get_brightness(struct backlight_device *bd)
64537 return bd->props.brightness;
64538 }
64539
64540-static struct backlight_ops riva_bl_ops = {
64541+static const struct backlight_ops riva_bl_ops = {
64542 .get_brightness = riva_bl_get_brightness,
64543 .update_status = riva_bl_update_status,
64544 };
64545diff --git a/drivers/video/uvesafb.c b/drivers/video/uvesafb.c
64546index 54fbb29..2c108fc 100644
64547--- a/drivers/video/uvesafb.c
64548+++ b/drivers/video/uvesafb.c
64549@@ -18,6 +18,7 @@
64550 #include <linux/fb.h>
64551 #include <linux/io.h>
64552 #include <linux/mutex.h>
64553+#include <linux/moduleloader.h>
64554 #include <video/edid.h>
64555 #include <video/uvesafb.h>
64556 #ifdef CONFIG_X86
64557@@ -120,7 +121,7 @@ static int uvesafb_helper_start(void)
64558 NULL,
64559 };
64560
64561- return call_usermodehelper(v86d_path, argv, envp, 1);
64562+ return call_usermodehelper(v86d_path, argv, envp, UMH_WAIT_PROC);
64563 }
64564
64565 /*
64566@@ -568,10 +569,32 @@ static int __devinit uvesafb_vbe_getpmi(struct uvesafb_ktask *task,
64567 if ((task->t.regs.eax & 0xffff) != 0x4f || task->t.regs.es < 0xc000) {
64568 par->pmi_setpal = par->ypan = 0;
64569 } else {
64570+
64571+#ifdef CONFIG_PAX_KERNEXEC
64572+#ifdef CONFIG_MODULES
64573+ par->pmi_code = module_alloc_exec((u16)task->t.regs.ecx);
64574+#endif
64575+ if (!par->pmi_code) {
64576+ par->pmi_setpal = par->ypan = 0;
64577+ return 0;
64578+ }
64579+#endif
64580+
64581 par->pmi_base = (u16 *)phys_to_virt(((u32)task->t.regs.es << 4)
64582 + task->t.regs.edi);
64583+
64584+#if defined(CONFIG_MODULES) && defined(CONFIG_PAX_KERNEXEC)
64585+ pax_open_kernel();
64586+ memcpy(par->pmi_code, par->pmi_base, (u16)task->t.regs.ecx);
64587+ pax_close_kernel();
64588+
64589+ par->pmi_start = ktva_ktla(par->pmi_code + par->pmi_base[1]);
64590+ par->pmi_pal = ktva_ktla(par->pmi_code + par->pmi_base[2]);
64591+#else
64592 par->pmi_start = (u8 *)par->pmi_base + par->pmi_base[1];
64593 par->pmi_pal = (u8 *)par->pmi_base + par->pmi_base[2];
64594+#endif
64595+
64596 printk(KERN_INFO "uvesafb: protected mode interface info at "
64597 "%04x:%04x\n",
64598 (u16)task->t.regs.es, (u16)task->t.regs.edi);
64599@@ -1799,6 +1822,11 @@ out:
64600 if (par->vbe_modes)
64601 kfree(par->vbe_modes);
64602
64603+#if defined(CONFIG_MODULES) && defined(CONFIG_PAX_KERNEXEC)
64604+ if (par->pmi_code)
64605+ module_free_exec(NULL, par->pmi_code);
64606+#endif
64607+
64608 framebuffer_release(info);
64609 return err;
64610 }
64611@@ -1825,6 +1853,12 @@ static int uvesafb_remove(struct platform_device *dev)
64612 kfree(par->vbe_state_orig);
64613 if (par->vbe_state_saved)
64614 kfree(par->vbe_state_saved);
64615+
64616+#if defined(CONFIG_MODULES) && defined(CONFIG_PAX_KERNEXEC)
64617+ if (par->pmi_code)
64618+ module_free_exec(NULL, par->pmi_code);
64619+#endif
64620+
64621 }
64622
64623 framebuffer_release(info);
64624diff --git a/drivers/video/vesafb.c b/drivers/video/vesafb.c
64625index bd37ee1..cb827e8 100644
64626--- a/drivers/video/vesafb.c
64627+++ b/drivers/video/vesafb.c
64628@@ -9,6 +9,7 @@
64629 */
64630
64631 #include <linux/module.h>
64632+#include <linux/moduleloader.h>
64633 #include <linux/kernel.h>
64634 #include <linux/errno.h>
64635 #include <linux/string.h>
64636@@ -53,8 +54,8 @@ static int vram_remap __initdata; /* Set amount of memory to be used */
64637 static int vram_total __initdata; /* Set total amount of memory */
64638 static int pmi_setpal __read_mostly = 1; /* pmi for palette changes ??? */
64639 static int ypan __read_mostly; /* 0..nothing, 1..ypan, 2..ywrap */
64640-static void (*pmi_start)(void) __read_mostly;
64641-static void (*pmi_pal) (void) __read_mostly;
64642+static void (*pmi_start)(void) __read_only;
64643+static void (*pmi_pal) (void) __read_only;
64644 static int depth __read_mostly;
64645 static int vga_compat __read_mostly;
64646 /* --------------------------------------------------------------------- */
64647@@ -233,6 +234,7 @@ static int __init vesafb_probe(struct platform_device *dev)
64648 unsigned int size_vmode;
64649 unsigned int size_remap;
64650 unsigned int size_total;
64651+ void *pmi_code = NULL;
64652
64653 if (screen_info.orig_video_isVGA != VIDEO_TYPE_VLFB)
64654 return -ENODEV;
64655@@ -275,10 +277,6 @@ static int __init vesafb_probe(struct platform_device *dev)
64656 size_remap = size_total;
64657 vesafb_fix.smem_len = size_remap;
64658
64659-#ifndef __i386__
64660- screen_info.vesapm_seg = 0;
64661-#endif
64662-
64663 if (!request_mem_region(vesafb_fix.smem_start, size_total, "vesafb")) {
64664 printk(KERN_WARNING
64665 "vesafb: cannot reserve video memory at 0x%lx\n",
64666@@ -315,9 +313,21 @@ static int __init vesafb_probe(struct platform_device *dev)
64667 printk(KERN_INFO "vesafb: mode is %dx%dx%d, linelength=%d, pages=%d\n",
64668 vesafb_defined.xres, vesafb_defined.yres, vesafb_defined.bits_per_pixel, vesafb_fix.line_length, screen_info.pages);
64669
64670+#ifdef __i386__
64671+
64672+#if defined(CONFIG_MODULES) && defined(CONFIG_PAX_KERNEXEC)
64673+ pmi_code = module_alloc_exec(screen_info.vesapm_size);
64674+ if (!pmi_code)
64675+#elif !defined(CONFIG_PAX_KERNEXEC)
64676+ if (0)
64677+#endif
64678+
64679+#endif
64680+ screen_info.vesapm_seg = 0;
64681+
64682 if (screen_info.vesapm_seg) {
64683- printk(KERN_INFO "vesafb: protected mode interface info at %04x:%04x\n",
64684- screen_info.vesapm_seg,screen_info.vesapm_off);
64685+ printk(KERN_INFO "vesafb: protected mode interface info at %04x:%04x %04x bytes\n",
64686+ screen_info.vesapm_seg,screen_info.vesapm_off,screen_info.vesapm_size);
64687 }
64688
64689 if (screen_info.vesapm_seg < 0xc000)
64690@@ -325,9 +335,25 @@ static int __init vesafb_probe(struct platform_device *dev)
64691
64692 if (ypan || pmi_setpal) {
64693 unsigned short *pmi_base;
64694+
64695 pmi_base = (unsigned short*)phys_to_virt(((unsigned long)screen_info.vesapm_seg << 4) + screen_info.vesapm_off);
64696- pmi_start = (void*)((char*)pmi_base + pmi_base[1]);
64697- pmi_pal = (void*)((char*)pmi_base + pmi_base[2]);
64698+
64699+#if defined(CONFIG_MODULES) && defined(CONFIG_PAX_KERNEXEC)
64700+ pax_open_kernel();
64701+ memcpy(pmi_code, pmi_base, screen_info.vesapm_size);
64702+#else
64703+ pmi_code = pmi_base;
64704+#endif
64705+
64706+ pmi_start = (void*)((char*)pmi_code + pmi_base[1]);
64707+ pmi_pal = (void*)((char*)pmi_code + pmi_base[2]);
64708+
64709+#if defined(CONFIG_MODULES) && defined(CONFIG_PAX_KERNEXEC)
64710+ pmi_start = ktva_ktla(pmi_start);
64711+ pmi_pal = ktva_ktla(pmi_pal);
64712+ pax_close_kernel();
64713+#endif
64714+
64715 printk(KERN_INFO "vesafb: pmi: set display start = %p, set palette = %p\n",pmi_start,pmi_pal);
64716 if (pmi_base[3]) {
64717 printk(KERN_INFO "vesafb: pmi: ports = ");
64718@@ -469,6 +495,11 @@ static int __init vesafb_probe(struct platform_device *dev)
64719 info->node, info->fix.id);
64720 return 0;
64721 err:
64722+
64723+#if defined(__i386__) && defined(CONFIG_MODULES) && defined(CONFIG_PAX_KERNEXEC)
64724+ module_free_exec(NULL, pmi_code);
64725+#endif
64726+
64727 if (info->screen_base)
64728 iounmap(info->screen_base);
64729 framebuffer_release(info);
64730diff --git a/drivers/xen/sys-hypervisor.c b/drivers/xen/sys-hypervisor.c
64731index 88a60e0..6783cc2 100644
64732--- a/drivers/xen/sys-hypervisor.c
64733+++ b/drivers/xen/sys-hypervisor.c
64734@@ -425,7 +425,7 @@ static ssize_t hyp_sysfs_store(struct kobject *kobj,
64735 return 0;
64736 }
64737
64738-static struct sysfs_ops hyp_sysfs_ops = {
64739+static const struct sysfs_ops hyp_sysfs_ops = {
64740 .show = hyp_sysfs_show,
64741 .store = hyp_sysfs_store,
64742 };
64743diff --git a/fs/9p/vfs_inode.c b/fs/9p/vfs_inode.c
64744index 18f74ec..3227009 100644
64745--- a/fs/9p/vfs_inode.c
64746+++ b/fs/9p/vfs_inode.c
64747@@ -1079,7 +1079,7 @@ static void *v9fs_vfs_follow_link(struct dentry *dentry, struct nameidata *nd)
64748 static void
64749 v9fs_vfs_put_link(struct dentry *dentry, struct nameidata *nd, void *p)
64750 {
64751- char *s = nd_get_link(nd);
64752+ const char *s = nd_get_link(nd);
64753
64754 P9_DPRINTK(P9_DEBUG_VFS, " %s %s\n", dentry->d_name.name,
64755 IS_ERR(s) ? "<error>" : s);
64756diff --git a/fs/Kconfig.binfmt b/fs/Kconfig.binfmt
64757index bb4cc5b..df5eaa0 100644
64758--- a/fs/Kconfig.binfmt
64759+++ b/fs/Kconfig.binfmt
64760@@ -86,7 +86,7 @@ config HAVE_AOUT
64761
64762 config BINFMT_AOUT
64763 tristate "Kernel support for a.out and ECOFF binaries"
64764- depends on HAVE_AOUT
64765+ depends on HAVE_AOUT && BROKEN
64766 ---help---
64767 A.out (Assembler.OUTput) is a set of formats for libraries and
64768 executables used in the earliest versions of UNIX. Linux used
64769diff --git a/fs/aio.c b/fs/aio.c
64770index 22a19ad..d484e5b 100644
64771--- a/fs/aio.c
64772+++ b/fs/aio.c
64773@@ -115,7 +115,7 @@ static int aio_setup_ring(struct kioctx *ctx)
64774 size += sizeof(struct io_event) * nr_events;
64775 nr_pages = (size + PAGE_SIZE-1) >> PAGE_SHIFT;
64776
64777- if (nr_pages < 0)
64778+ if (nr_pages <= 0)
64779 return -EINVAL;
64780
64781 nr_events = (PAGE_SIZE * nr_pages - sizeof(struct aio_ring)) / sizeof(struct io_event);
64782@@ -1089,6 +1089,8 @@ static int read_events(struct kioctx *ctx,
64783 struct aio_timeout to;
64784 int retry = 0;
64785
64786+ pax_track_stack();
64787+
64788 /* needed to zero any padding within an entry (there shouldn't be
64789 * any, but C is fun!
64790 */
64791@@ -1382,13 +1384,18 @@ static ssize_t aio_fsync(struct kiocb *iocb)
64792 static ssize_t aio_setup_vectored_rw(int type, struct kiocb *kiocb)
64793 {
64794 ssize_t ret;
64795+ struct iovec iovstack;
64796
64797 ret = rw_copy_check_uvector(type, (struct iovec __user *)kiocb->ki_buf,
64798 kiocb->ki_nbytes, 1,
64799- &kiocb->ki_inline_vec, &kiocb->ki_iovec);
64800+ &iovstack, &kiocb->ki_iovec);
64801 if (ret < 0)
64802 goto out;
64803
64804+ if (kiocb->ki_iovec == &iovstack) {
64805+ kiocb->ki_inline_vec = iovstack;
64806+ kiocb->ki_iovec = &kiocb->ki_inline_vec;
64807+ }
64808 kiocb->ki_nr_segs = kiocb->ki_nbytes;
64809 kiocb->ki_cur_seg = 0;
64810 /* ki_nbytes/left now reflect bytes instead of segs */
64811diff --git a/fs/attr.c b/fs/attr.c
64812index 96d394b..33cf5b4 100644
64813--- a/fs/attr.c
64814+++ b/fs/attr.c
64815@@ -83,6 +83,7 @@ int inode_newsize_ok(const struct inode *inode, loff_t offset)
64816 unsigned long limit;
64817
64818 limit = current->signal->rlim[RLIMIT_FSIZE].rlim_cur;
64819+ gr_learn_resource(current, RLIMIT_FSIZE, (unsigned long)offset, 1);
64820 if (limit != RLIM_INFINITY && offset > limit)
64821 goto out_sig;
64822 if (offset > inode->i_sb->s_maxbytes)
64823diff --git a/fs/autofs4/symlink.c b/fs/autofs4/symlink.c
64824index b4ea829..e63ef18 100644
64825--- a/fs/autofs4/symlink.c
64826+++ b/fs/autofs4/symlink.c
64827@@ -15,7 +15,7 @@
64828 static void *autofs4_follow_link(struct dentry *dentry, struct nameidata *nd)
64829 {
64830 struct autofs_info *ino = autofs4_dentry_ino(dentry);
64831- nd_set_link(nd, (char *)ino->u.symlink);
64832+ nd_set_link(nd, ino->u.symlink);
64833 return NULL;
64834 }
64835
64836diff --git a/fs/autofs4/waitq.c b/fs/autofs4/waitq.c
64837index 136a0d6..a287331 100644
64838--- a/fs/autofs4/waitq.c
64839+++ b/fs/autofs4/waitq.c
64840@@ -60,7 +60,7 @@ static int autofs4_write(struct file *file, const void *addr, int bytes)
64841 {
64842 unsigned long sigpipe, flags;
64843 mm_segment_t fs;
64844- const char *data = (const char *)addr;
64845+ const char __user *data = (const char __force_user *)addr;
64846 ssize_t wr = 0;
64847
64848 /** WARNING: this is not safe for writing more than PIPE_BUF bytes! **/
64849diff --git a/fs/befs/linuxvfs.c b/fs/befs/linuxvfs.c
64850index 9158c07..3f06659 100644
64851--- a/fs/befs/linuxvfs.c
64852+++ b/fs/befs/linuxvfs.c
64853@@ -498,7 +498,7 @@ static void befs_put_link(struct dentry *dentry, struct nameidata *nd, void *p)
64854 {
64855 befs_inode_info *befs_ino = BEFS_I(dentry->d_inode);
64856 if (befs_ino->i_flags & BEFS_LONG_SYMLINK) {
64857- char *link = nd_get_link(nd);
64858+ const char *link = nd_get_link(nd);
64859 if (!IS_ERR(link))
64860 kfree(link);
64861 }
64862diff --git a/fs/binfmt_aout.c b/fs/binfmt_aout.c
64863index 0133b5a..3710d09 100644
64864--- a/fs/binfmt_aout.c
64865+++ b/fs/binfmt_aout.c
64866@@ -16,6 +16,7 @@
64867 #include <linux/string.h>
64868 #include <linux/fs.h>
64869 #include <linux/file.h>
64870+#include <linux/security.h>
64871 #include <linux/stat.h>
64872 #include <linux/fcntl.h>
64873 #include <linux/ptrace.h>
64874@@ -102,6 +103,8 @@ static int aout_core_dump(long signr, struct pt_regs *regs, struct file *file, u
64875 #endif
64876 # define START_STACK(u) (u.start_stack)
64877
64878+ memset(&dump, 0, sizeof(dump));
64879+
64880 fs = get_fs();
64881 set_fs(KERNEL_DS);
64882 has_dumped = 1;
64883@@ -113,10 +116,12 @@ static int aout_core_dump(long signr, struct pt_regs *regs, struct file *file, u
64884
64885 /* If the size of the dump file exceeds the rlimit, then see what would happen
64886 if we wrote the stack, but not the data area. */
64887+ gr_learn_resource(current, RLIMIT_CORE, (dump.u_dsize + dump.u_ssize+1) * PAGE_SIZE, 1);
64888 if ((dump.u_dsize + dump.u_ssize+1) * PAGE_SIZE > limit)
64889 dump.u_dsize = 0;
64890
64891 /* Make sure we have enough room to write the stack and data areas. */
64892+ gr_learn_resource(current, RLIMIT_CORE, (dump.u_ssize + 1) * PAGE_SIZE, 1);
64893 if ((dump.u_ssize + 1) * PAGE_SIZE > limit)
64894 dump.u_ssize = 0;
64895
64896@@ -146,9 +151,7 @@ static int aout_core_dump(long signr, struct pt_regs *regs, struct file *file, u
64897 dump_size = dump.u_ssize << PAGE_SHIFT;
64898 DUMP_WRITE(dump_start,dump_size);
64899 }
64900-/* Finally dump the task struct. Not be used by gdb, but could be useful */
64901- set_fs(KERNEL_DS);
64902- DUMP_WRITE(current,sizeof(*current));
64903+/* Finally, let's not dump the task struct. Not be used by gdb, but could be useful to an attacker */
64904 end_coredump:
64905 set_fs(fs);
64906 return has_dumped;
64907@@ -249,6 +252,8 @@ static int load_aout_binary(struct linux_binprm * bprm, struct pt_regs * regs)
64908 rlim = current->signal->rlim[RLIMIT_DATA].rlim_cur;
64909 if (rlim >= RLIM_INFINITY)
64910 rlim = ~0;
64911+
64912+ gr_learn_resource(current, RLIMIT_DATA, ex.a_data + ex.a_bss, 1);
64913 if (ex.a_data + ex.a_bss > rlim)
64914 return -ENOMEM;
64915
64916@@ -274,9 +279,37 @@ static int load_aout_binary(struct linux_binprm * bprm, struct pt_regs * regs)
64917 current->mm->free_area_cache = current->mm->mmap_base;
64918 current->mm->cached_hole_size = 0;
64919
64920+ retval = setup_arg_pages(bprm, STACK_TOP, EXSTACK_DEFAULT);
64921+ if (retval < 0) {
64922+ /* Someone check-me: is this error path enough? */
64923+ send_sig(SIGKILL, current, 0);
64924+ return retval;
64925+ }
64926+
64927 install_exec_creds(bprm);
64928 current->flags &= ~PF_FORKNOEXEC;
64929
64930+#if defined(CONFIG_PAX_NOEXEC) || defined(CONFIG_PAX_ASLR)
64931+ current->mm->pax_flags = 0UL;
64932+#endif
64933+
64934+#ifdef CONFIG_PAX_PAGEEXEC
64935+ if (!(N_FLAGS(ex) & F_PAX_PAGEEXEC)) {
64936+ current->mm->pax_flags |= MF_PAX_PAGEEXEC;
64937+
64938+#ifdef CONFIG_PAX_EMUTRAMP
64939+ if (N_FLAGS(ex) & F_PAX_EMUTRAMP)
64940+ current->mm->pax_flags |= MF_PAX_EMUTRAMP;
64941+#endif
64942+
64943+#ifdef CONFIG_PAX_MPROTECT
64944+ if (!(N_FLAGS(ex) & F_PAX_MPROTECT))
64945+ current->mm->pax_flags |= MF_PAX_MPROTECT;
64946+#endif
64947+
64948+ }
64949+#endif
64950+
64951 if (N_MAGIC(ex) == OMAGIC) {
64952 unsigned long text_addr, map_size;
64953 loff_t pos;
64954@@ -349,7 +382,7 @@ static int load_aout_binary(struct linux_binprm * bprm, struct pt_regs * regs)
64955
64956 down_write(&current->mm->mmap_sem);
64957 error = do_mmap(bprm->file, N_DATADDR(ex), ex.a_data,
64958- PROT_READ | PROT_WRITE | PROT_EXEC,
64959+ PROT_READ | PROT_WRITE,
64960 MAP_FIXED | MAP_PRIVATE | MAP_DENYWRITE | MAP_EXECUTABLE,
64961 fd_offset + ex.a_text);
64962 up_write(&current->mm->mmap_sem);
64963@@ -367,13 +400,6 @@ beyond_if:
64964 return retval;
64965 }
64966
64967- retval = setup_arg_pages(bprm, STACK_TOP, EXSTACK_DEFAULT);
64968- if (retval < 0) {
64969- /* Someone check-me: is this error path enough? */
64970- send_sig(SIGKILL, current, 0);
64971- return retval;
64972- }
64973-
64974 current->mm->start_stack =
64975 (unsigned long) create_aout_tables((char __user *) bprm->p, bprm);
64976 #ifdef __alpha__
64977diff --git a/fs/binfmt_elf.c b/fs/binfmt_elf.c
64978index a64fde6..b6699eb 100644
64979--- a/fs/binfmt_elf.c
64980+++ b/fs/binfmt_elf.c
64981@@ -31,6 +31,7 @@
64982 #include <linux/random.h>
64983 #include <linux/elf.h>
64984 #include <linux/utsname.h>
64985+#include <linux/xattr.h>
64986 #include <asm/uaccess.h>
64987 #include <asm/param.h>
64988 #include <asm/page.h>
64989@@ -50,6 +51,10 @@ static int elf_core_dump(long signr, struct pt_regs *regs, struct file *file, un
64990 #define elf_core_dump NULL
64991 #endif
64992
64993+#ifdef CONFIG_PAX_MPROTECT
64994+static void elf_handle_mprotect(struct vm_area_struct *vma, unsigned long newflags);
64995+#endif
64996+
64997 #if ELF_EXEC_PAGESIZE > PAGE_SIZE
64998 #define ELF_MIN_ALIGN ELF_EXEC_PAGESIZE
64999 #else
65000@@ -69,6 +74,11 @@ static struct linux_binfmt elf_format = {
65001 .load_binary = load_elf_binary,
65002 .load_shlib = load_elf_library,
65003 .core_dump = elf_core_dump,
65004+
65005+#ifdef CONFIG_PAX_MPROTECT
65006+ .handle_mprotect= elf_handle_mprotect,
65007+#endif
65008+
65009 .min_coredump = ELF_EXEC_PAGESIZE,
65010 .hasvdso = 1
65011 };
65012@@ -77,6 +87,8 @@ static struct linux_binfmt elf_format = {
65013
65014 static int set_brk(unsigned long start, unsigned long end)
65015 {
65016+ unsigned long e = end;
65017+
65018 start = ELF_PAGEALIGN(start);
65019 end = ELF_PAGEALIGN(end);
65020 if (end > start) {
65021@@ -87,7 +99,7 @@ static int set_brk(unsigned long start, unsigned long end)
65022 if (BAD_ADDR(addr))
65023 return addr;
65024 }
65025- current->mm->start_brk = current->mm->brk = end;
65026+ current->mm->start_brk = current->mm->brk = e;
65027 return 0;
65028 }
65029
65030@@ -148,12 +160,15 @@ create_elf_tables(struct linux_binprm *bprm, struct elfhdr *exec,
65031 elf_addr_t __user *u_rand_bytes;
65032 const char *k_platform = ELF_PLATFORM;
65033 const char *k_base_platform = ELF_BASE_PLATFORM;
65034- unsigned char k_rand_bytes[16];
65035+ u32 k_rand_bytes[4];
65036 int items;
65037 elf_addr_t *elf_info;
65038 int ei_index = 0;
65039 const struct cred *cred = current_cred();
65040 struct vm_area_struct *vma;
65041+ unsigned long saved_auxv[AT_VECTOR_SIZE];
65042+
65043+ pax_track_stack();
65044
65045 /*
65046 * In some cases (e.g. Hyper-Threading), we want to avoid L1
65047@@ -195,8 +210,12 @@ create_elf_tables(struct linux_binprm *bprm, struct elfhdr *exec,
65048 * Generate 16 random bytes for userspace PRNG seeding.
65049 */
65050 get_random_bytes(k_rand_bytes, sizeof(k_rand_bytes));
65051- u_rand_bytes = (elf_addr_t __user *)
65052- STACK_ALLOC(p, sizeof(k_rand_bytes));
65053+ srandom32(k_rand_bytes[0] ^ random32());
65054+ srandom32(k_rand_bytes[1] ^ random32());
65055+ srandom32(k_rand_bytes[2] ^ random32());
65056+ srandom32(k_rand_bytes[3] ^ random32());
65057+ p = STACK_ROUND(p, sizeof(k_rand_bytes));
65058+ u_rand_bytes = (elf_addr_t __user *) p;
65059 if (__copy_to_user(u_rand_bytes, k_rand_bytes, sizeof(k_rand_bytes)))
65060 return -EFAULT;
65061
65062@@ -308,9 +327,11 @@ create_elf_tables(struct linux_binprm *bprm, struct elfhdr *exec,
65063 return -EFAULT;
65064 current->mm->env_end = p;
65065
65066+ memcpy(saved_auxv, elf_info, ei_index * sizeof(elf_addr_t));
65067+
65068 /* Put the elf_info on the stack in the right place. */
65069 sp = (elf_addr_t __user *)envp + 1;
65070- if (copy_to_user(sp, elf_info, ei_index * sizeof(elf_addr_t)))
65071+ if (copy_to_user(sp, saved_auxv, ei_index * sizeof(elf_addr_t)))
65072 return -EFAULT;
65073 return 0;
65074 }
65075@@ -385,10 +406,10 @@ static unsigned long load_elf_interp(struct elfhdr *interp_elf_ex,
65076 {
65077 struct elf_phdr *elf_phdata;
65078 struct elf_phdr *eppnt;
65079- unsigned long load_addr = 0;
65080+ unsigned long load_addr = 0, pax_task_size = TASK_SIZE;
65081 int load_addr_set = 0;
65082 unsigned long last_bss = 0, elf_bss = 0;
65083- unsigned long error = ~0UL;
65084+ unsigned long error = -EINVAL;
65085 unsigned long total_size;
65086 int retval, i, size;
65087
65088@@ -434,6 +455,11 @@ static unsigned long load_elf_interp(struct elfhdr *interp_elf_ex,
65089 goto out_close;
65090 }
65091
65092+#ifdef CONFIG_PAX_SEGMEXEC
65093+ if (current->mm->pax_flags & MF_PAX_SEGMEXEC)
65094+ pax_task_size = SEGMEXEC_TASK_SIZE;
65095+#endif
65096+
65097 eppnt = elf_phdata;
65098 for (i = 0; i < interp_elf_ex->e_phnum; i++, eppnt++) {
65099 if (eppnt->p_type == PT_LOAD) {
65100@@ -477,8 +503,8 @@ static unsigned long load_elf_interp(struct elfhdr *interp_elf_ex,
65101 k = load_addr + eppnt->p_vaddr;
65102 if (BAD_ADDR(k) ||
65103 eppnt->p_filesz > eppnt->p_memsz ||
65104- eppnt->p_memsz > TASK_SIZE ||
65105- TASK_SIZE - eppnt->p_memsz < k) {
65106+ eppnt->p_memsz > pax_task_size ||
65107+ pax_task_size - eppnt->p_memsz < k) {
65108 error = -ENOMEM;
65109 goto out_close;
65110 }
65111@@ -532,6 +558,351 @@ out:
65112 return error;
65113 }
65114
65115+#if defined(CONFIG_PAX_EI_PAX) || defined(CONFIG_PAX_PT_PAX_FLAGS) || defined(CONFIG_PAX_XATTR_PAX_FLAGS)
65116+static unsigned long pax_parse_pt_pax_softmode(const struct elf_phdr * const elf_phdata)
65117+{
65118+ unsigned long pax_flags = 0UL;
65119+
65120+#ifdef CONFIG_PAX_PT_PAX_FLAGS
65121+
65122+#ifdef CONFIG_PAX_PAGEEXEC
65123+ if (elf_phdata->p_flags & PF_PAGEEXEC)
65124+ pax_flags |= MF_PAX_PAGEEXEC;
65125+#endif
65126+
65127+#ifdef CONFIG_PAX_SEGMEXEC
65128+ if (elf_phdata->p_flags & PF_SEGMEXEC)
65129+ pax_flags |= MF_PAX_SEGMEXEC;
65130+#endif
65131+
65132+#if defined(CONFIG_PAX_PAGEEXEC) && defined(CONFIG_PAX_SEGMEXEC)
65133+ if ((pax_flags & (MF_PAX_PAGEEXEC | MF_PAX_SEGMEXEC)) == (MF_PAX_PAGEEXEC | MF_PAX_SEGMEXEC)) {
65134+ if (nx_enabled)
65135+ pax_flags &= ~MF_PAX_SEGMEXEC;
65136+ else
65137+ pax_flags &= ~MF_PAX_PAGEEXEC;
65138+ }
65139+#endif
65140+
65141+#ifdef CONFIG_PAX_EMUTRAMP
65142+ if (elf_phdata->p_flags & PF_EMUTRAMP)
65143+ pax_flags |= MF_PAX_EMUTRAMP;
65144+#endif
65145+
65146+#ifdef CONFIG_PAX_MPROTECT
65147+ if (elf_phdata->p_flags & PF_MPROTECT)
65148+ pax_flags |= MF_PAX_MPROTECT;
65149+#endif
65150+
65151+#if defined(CONFIG_PAX_RANDMMAP) || defined(CONFIG_PAX_RANDUSTACK)
65152+ if (randomize_va_space && (elf_phdata->p_flags & PF_RANDMMAP))
65153+ pax_flags |= MF_PAX_RANDMMAP;
65154+#endif
65155+
65156+#endif
65157+
65158+ return pax_flags;
65159+}
65160+
65161+static unsigned long pax_parse_pt_pax_hardmode(const struct elf_phdr * const elf_phdata)
65162+{
65163+ unsigned long pax_flags = 0UL;
65164+
65165+#ifdef CONFIG_PAX_PT_PAX_FLAGS
65166+
65167+#ifdef CONFIG_PAX_PAGEEXEC
65168+ if (!(elf_phdata->p_flags & PF_NOPAGEEXEC))
65169+ pax_flags |= MF_PAX_PAGEEXEC;
65170+#endif
65171+
65172+#ifdef CONFIG_PAX_SEGMEXEC
65173+ if (!(elf_phdata->p_flags & PF_NOSEGMEXEC))
65174+ pax_flags |= MF_PAX_SEGMEXEC;
65175+#endif
65176+
65177+#if defined(CONFIG_PAX_PAGEEXEC) && defined(CONFIG_PAX_SEGMEXEC)
65178+ if ((pax_flags & (MF_PAX_PAGEEXEC | MF_PAX_SEGMEXEC)) == (MF_PAX_PAGEEXEC | MF_PAX_SEGMEXEC)) {
65179+ if (nx_enabled)
65180+ pax_flags &= ~MF_PAX_SEGMEXEC;
65181+ else
65182+ pax_flags &= ~MF_PAX_PAGEEXEC;
65183+ }
65184+#endif
65185+
65186+#ifdef CONFIG_PAX_EMUTRAMP
65187+ if (!(elf_phdata->p_flags & PF_NOEMUTRAMP))
65188+ pax_flags |= MF_PAX_EMUTRAMP;
65189+#endif
65190+
65191+#ifdef CONFIG_PAX_MPROTECT
65192+ if (!(elf_phdata->p_flags & PF_NOMPROTECT))
65193+ pax_flags |= MF_PAX_MPROTECT;
65194+#endif
65195+
65196+#if defined(CONFIG_PAX_RANDMMAP) || defined(CONFIG_PAX_RANDUSTACK)
65197+ if (randomize_va_space && !(elf_phdata->p_flags & PF_NORANDMMAP))
65198+ pax_flags |= MF_PAX_RANDMMAP;
65199+#endif
65200+
65201+#endif
65202+
65203+ return pax_flags;
65204+}
65205+
65206+static unsigned long pax_parse_ei_pax(const struct elfhdr * const elf_ex)
65207+{
65208+ unsigned long pax_flags = 0UL;
65209+
65210+#ifdef CONFIG_PAX_EI_PAX
65211+
65212+#ifdef CONFIG_PAX_PAGEEXEC
65213+ if (!(elf_ex->e_ident[EI_PAX] & EF_PAX_PAGEEXEC))
65214+ pax_flags |= MF_PAX_PAGEEXEC;
65215+#endif
65216+
65217+#ifdef CONFIG_PAX_SEGMEXEC
65218+ if (!(elf_ex->e_ident[EI_PAX] & EF_PAX_SEGMEXEC))
65219+ pax_flags |= MF_PAX_SEGMEXEC;
65220+#endif
65221+
65222+#if defined(CONFIG_PAX_PAGEEXEC) && defined(CONFIG_PAX_SEGMEXEC)
65223+ if ((pax_flags & (MF_PAX_PAGEEXEC | MF_PAX_SEGMEXEC)) == (MF_PAX_PAGEEXEC | MF_PAX_SEGMEXEC)) {
65224+ if (nx_enabled)
65225+ pax_flags &= ~MF_PAX_SEGMEXEC;
65226+ else
65227+ pax_flags &= ~MF_PAX_PAGEEXEC;
65228+ }
65229+#endif
65230+
65231+#ifdef CONFIG_PAX_EMUTRAMP
65232+ if ((pax_flags & (MF_PAX_PAGEEXEC | MF_PAX_SEGMEXEC)) && (elf_ex->e_ident[EI_PAX] & EF_PAX_EMUTRAMP))
65233+ pax_flags |= MF_PAX_EMUTRAMP;
65234+#endif
65235+
65236+#ifdef CONFIG_PAX_MPROTECT
65237+ if ((pax_flags & (MF_PAX_PAGEEXEC | MF_PAX_SEGMEXEC)) && !(elf_ex->e_ident[EI_PAX] & EF_PAX_MPROTECT))
65238+ pax_flags |= MF_PAX_MPROTECT;
65239+#endif
65240+
65241+#ifdef CONFIG_PAX_ASLR
65242+ if (randomize_va_space && !(elf_ex->e_ident[EI_PAX] & EF_PAX_RANDMMAP))
65243+ pax_flags |= MF_PAX_RANDMMAP;
65244+#endif
65245+
65246+#else
65247+
65248+#ifdef CONFIG_PAX_PAGEEXEC
65249+ pax_flags |= MF_PAX_PAGEEXEC;
65250+#endif
65251+
65252+#ifdef CONFIG_PAX_MPROTECT
65253+ pax_flags |= MF_PAX_MPROTECT;
65254+#endif
65255+
65256+#ifdef CONFIG_PAX_RANDMMAP
65257+ pax_flags |= MF_PAX_RANDMMAP;
65258+#endif
65259+
65260+#ifdef CONFIG_PAX_SEGMEXEC
65261+ if (!(pax_flags & MF_PAX_PAGEEXEC) || !(__supported_pte_mask & _PAGE_NX)) {
65262+ pax_flags &= ~MF_PAX_PAGEEXEC;
65263+ pax_flags |= MF_PAX_SEGMEXEC;
65264+ }
65265+#endif
65266+
65267+#endif
65268+
65269+ return pax_flags;
65270+}
65271+
65272+static unsigned long pax_parse_pt_pax(const struct elfhdr * const elf_ex, const struct elf_phdr * const elf_phdata)
65273+{
65274+
65275+#ifdef CONFIG_PAX_PT_PAX_FLAGS
65276+ unsigned long i;
65277+
65278+ for (i = 0UL; i < elf_ex->e_phnum; i++)
65279+ if (elf_phdata[i].p_type == PT_PAX_FLAGS) {
65280+ if (((elf_phdata[i].p_flags & PF_PAGEEXEC) && (elf_phdata[i].p_flags & PF_NOPAGEEXEC)) ||
65281+ ((elf_phdata[i].p_flags & PF_SEGMEXEC) && (elf_phdata[i].p_flags & PF_NOSEGMEXEC)) ||
65282+ ((elf_phdata[i].p_flags & PF_EMUTRAMP) && (elf_phdata[i].p_flags & PF_NOEMUTRAMP)) ||
65283+ ((elf_phdata[i].p_flags & PF_MPROTECT) && (elf_phdata[i].p_flags & PF_NOMPROTECT)) ||
65284+ ((elf_phdata[i].p_flags & PF_RANDMMAP) && (elf_phdata[i].p_flags & PF_NORANDMMAP)))
65285+ return ~0UL;
65286+
65287+#ifdef CONFIG_PAX_SOFTMODE
65288+ if (pax_softmode)
65289+ return pax_parse_pt_pax_softmode(&elf_phdata[i]);
65290+ else
65291+#endif
65292+
65293+ return pax_parse_pt_pax_hardmode(&elf_phdata[i]);
65294+ break;
65295+ }
65296+#endif
65297+
65298+ return ~0UL;
65299+}
65300+
65301+#ifdef CONFIG_PAX_XATTR_PAX_FLAGS
65302+static unsigned long pax_parse_xattr_pax_softmode(unsigned long pax_flags_softmode)
65303+{
65304+ unsigned long pax_flags = 0UL;
65305+
65306+#ifdef CONFIG_PAX_PAGEEXEC
65307+ if (pax_flags_softmode & MF_PAX_PAGEEXEC)
65308+ pax_flags |= MF_PAX_PAGEEXEC;
65309+#endif
65310+
65311+#ifdef CONFIG_PAX_SEGMEXEC
65312+ if (pax_flags_softmode & MF_PAX_SEGMEXEC)
65313+ pax_flags |= MF_PAX_SEGMEXEC;
65314+#endif
65315+
65316+#if defined(CONFIG_PAX_PAGEEXEC) && defined(CONFIG_PAX_SEGMEXEC)
65317+ if ((pax_flags & (MF_PAX_PAGEEXEC | MF_PAX_SEGMEXEC)) == (MF_PAX_PAGEEXEC | MF_PAX_SEGMEXEC)) {
65318+ if ((__supported_pte_mask & _PAGE_NX))
65319+ pax_flags &= ~MF_PAX_SEGMEXEC;
65320+ else
65321+ pax_flags &= ~MF_PAX_PAGEEXEC;
65322+ }
65323+#endif
65324+
65325+#ifdef CONFIG_PAX_EMUTRAMP
65326+ if (pax_flags_softmode & MF_PAX_EMUTRAMP)
65327+ pax_flags |= MF_PAX_EMUTRAMP;
65328+#endif
65329+
65330+#ifdef CONFIG_PAX_MPROTECT
65331+ if (pax_flags_softmode & MF_PAX_MPROTECT)
65332+ pax_flags |= MF_PAX_MPROTECT;
65333+#endif
65334+
65335+#if defined(CONFIG_PAX_RANDMMAP) || defined(CONFIG_PAX_RANDUSTACK)
65336+ if (randomize_va_space && (pax_flags_softmode & MF_PAX_RANDMMAP))
65337+ pax_flags |= MF_PAX_RANDMMAP;
65338+#endif
65339+
65340+ return pax_flags;
65341+}
65342+
65343+static unsigned long pax_parse_xattr_pax_hardmode(unsigned long pax_flags_hardmode)
65344+{
65345+ unsigned long pax_flags = 0UL;
65346+
65347+#ifdef CONFIG_PAX_PAGEEXEC
65348+ if (!(pax_flags_hardmode & MF_PAX_PAGEEXEC))
65349+ pax_flags |= MF_PAX_PAGEEXEC;
65350+#endif
65351+
65352+#ifdef CONFIG_PAX_SEGMEXEC
65353+ if (!(pax_flags_hardmode & MF_PAX_SEGMEXEC))
65354+ pax_flags |= MF_PAX_SEGMEXEC;
65355+#endif
65356+
65357+#if defined(CONFIG_PAX_PAGEEXEC) && defined(CONFIG_PAX_SEGMEXEC)
65358+ if ((pax_flags & (MF_PAX_PAGEEXEC | MF_PAX_SEGMEXEC)) == (MF_PAX_PAGEEXEC | MF_PAX_SEGMEXEC)) {
65359+ if ((__supported_pte_mask & _PAGE_NX))
65360+ pax_flags &= ~MF_PAX_SEGMEXEC;
65361+ else
65362+ pax_flags &= ~MF_PAX_PAGEEXEC;
65363+ }
65364+#endif
65365+
65366+#ifdef CONFIG_PAX_EMUTRAMP
65367+ if (!(pax_flags_hardmode & MF_PAX_EMUTRAMP))
65368+ pax_flags |= MF_PAX_EMUTRAMP;
65369+#endif
65370+
65371+#ifdef CONFIG_PAX_MPROTECT
65372+ if (!(pax_flags_hardmode & MF_PAX_MPROTECT))
65373+ pax_flags |= MF_PAX_MPROTECT;
65374+#endif
65375+
65376+#if defined(CONFIG_PAX_RANDMMAP) || defined(CONFIG_PAX_RANDUSTACK)
65377+ if (randomize_va_space && !(pax_flags_hardmode & MF_PAX_RANDMMAP))
65378+ pax_flags |= MF_PAX_RANDMMAP;
65379+#endif
65380+
65381+ return pax_flags;
65382+}
65383+#endif
65384+
65385+static unsigned long pax_parse_xattr_pax(struct file * const file)
65386+{
65387+
65388+#ifdef CONFIG_PAX_XATTR_PAX_FLAGS
65389+ ssize_t xattr_size, i;
65390+ unsigned char xattr_value[5];
65391+ unsigned long pax_flags_hardmode = 0UL, pax_flags_softmode = 0UL;
65392+
65393+ xattr_size = vfs_getxattr(file->f_path.dentry, XATTR_NAME_PAX_FLAGS, xattr_value, sizeof xattr_value);
65394+ if (xattr_size <= 0)
65395+ return ~0UL;
65396+
65397+ for (i = 0; i < xattr_size; i++)
65398+ switch (xattr_value[i]) {
65399+ default:
65400+ return ~0UL;
65401+
65402+#define parse_flag(option1, option2, flag) \
65403+ case option1: \
65404+ pax_flags_hardmode |= MF_PAX_##flag; \
65405+ break; \
65406+ case option2: \
65407+ pax_flags_softmode |= MF_PAX_##flag; \
65408+ break;
65409+
65410+ parse_flag('p', 'P', PAGEEXEC);
65411+ parse_flag('e', 'E', EMUTRAMP);
65412+ parse_flag('m', 'M', MPROTECT);
65413+ parse_flag('r', 'R', RANDMMAP);
65414+ parse_flag('s', 'S', SEGMEXEC);
65415+
65416+#undef parse_flag
65417+ }
65418+
65419+ if (pax_flags_hardmode & pax_flags_softmode)
65420+ return ~0UL;
65421+
65422+#ifdef CONFIG_PAX_SOFTMODE
65423+ if (pax_softmode)
65424+ return pax_parse_xattr_pax_softmode(pax_flags_softmode);
65425+ else
65426+#endif
65427+
65428+ return pax_parse_xattr_pax_hardmode(pax_flags_hardmode);
65429+#else
65430+ return ~0UL;
65431+#endif
65432+
65433+}
65434+
65435+static long pax_parse_pax_flags(const struct elfhdr * const elf_ex, const struct elf_phdr * const elf_phdata, struct file * const file)
65436+{
65437+ unsigned long pax_flags, pt_pax_flags, xattr_pax_flags;
65438+
65439+ pax_flags = pax_parse_ei_pax(elf_ex);
65440+ pt_pax_flags = pax_parse_pt_pax(elf_ex, elf_phdata);
65441+ xattr_pax_flags = pax_parse_xattr_pax(file);
65442+
65443+ if (pt_pax_flags == ~0UL)
65444+ pt_pax_flags = xattr_pax_flags;
65445+ else if (xattr_pax_flags == ~0UL)
65446+ xattr_pax_flags = pt_pax_flags;
65447+ if (pt_pax_flags != xattr_pax_flags)
65448+ return -EINVAL;
65449+ if (pt_pax_flags != ~0UL)
65450+ pax_flags = pt_pax_flags;
65451+
65452+ if (0 > pax_check_flags(&pax_flags))
65453+ return -EINVAL;
65454+
65455+ current->mm->pax_flags = pax_flags;
65456+ return 0;
65457+}
65458+#endif
65459+
65460 /*
65461 * These are the functions used to load ELF style executables and shared
65462 * libraries. There is no binary dependent code anywhere else.
65463@@ -548,6 +919,11 @@ static unsigned long randomize_stack_top(unsigned long stack_top)
65464 {
65465 unsigned int random_variable = 0;
65466
65467+#ifdef CONFIG_PAX_RANDUSTACK
65468+ if (randomize_va_space)
65469+ return stack_top - current->mm->delta_stack;
65470+#endif
65471+
65472 if ((current->flags & PF_RANDOMIZE) &&
65473 !(current->personality & ADDR_NO_RANDOMIZE)) {
65474 random_variable = get_random_int() & STACK_RND_MASK;
65475@@ -566,7 +942,7 @@ static int load_elf_binary(struct linux_binprm *bprm, struct pt_regs *regs)
65476 unsigned long load_addr = 0, load_bias = 0;
65477 int load_addr_set = 0;
65478 char * elf_interpreter = NULL;
65479- unsigned long error;
65480+ unsigned long error = 0;
65481 struct elf_phdr *elf_ppnt, *elf_phdata;
65482 unsigned long elf_bss, elf_brk;
65483 int retval, i;
65484@@ -576,11 +952,11 @@ static int load_elf_binary(struct linux_binprm *bprm, struct pt_regs *regs)
65485 unsigned long start_code, end_code, start_data, end_data;
65486 unsigned long reloc_func_desc = 0;
65487 int executable_stack = EXSTACK_DEFAULT;
65488- unsigned long def_flags = 0;
65489 struct {
65490 struct elfhdr elf_ex;
65491 struct elfhdr interp_elf_ex;
65492 } *loc;
65493+ unsigned long pax_task_size = TASK_SIZE;
65494
65495 loc = kmalloc(sizeof(*loc), GFP_KERNEL);
65496 if (!loc) {
65497@@ -718,11 +1094,80 @@ static int load_elf_binary(struct linux_binprm *bprm, struct pt_regs *regs)
65498
65499 /* OK, This is the point of no return */
65500 current->flags &= ~PF_FORKNOEXEC;
65501- current->mm->def_flags = def_flags;
65502+
65503+#if defined(CONFIG_PAX_NOEXEC) || defined(CONFIG_PAX_ASLR)
65504+ current->mm->pax_flags = 0UL;
65505+#endif
65506+
65507+#ifdef CONFIG_PAX_DLRESOLVE
65508+ current->mm->call_dl_resolve = 0UL;
65509+#endif
65510+
65511+#if defined(CONFIG_PPC32) && defined(CONFIG_PAX_EMUSIGRT)
65512+ current->mm->call_syscall = 0UL;
65513+#endif
65514+
65515+#ifdef CONFIG_PAX_ASLR
65516+ current->mm->delta_mmap = 0UL;
65517+ current->mm->delta_stack = 0UL;
65518+#endif
65519+
65520+ current->mm->def_flags = 0;
65521+
65522+#if defined(CONFIG_PAX_EI_PAX) || defined(CONFIG_PAX_PT_PAX_FLAGS) || defined(CONFIG_PAX_XATTR_PAX_FLAGS)
65523+ if (0 > pax_parse_pax_flags(&loc->elf_ex, elf_phdata, bprm->file)) {
65524+ send_sig(SIGKILL, current, 0);
65525+ goto out_free_dentry;
65526+ }
65527+#endif
65528+
65529+#ifdef CONFIG_PAX_HAVE_ACL_FLAGS
65530+ pax_set_initial_flags(bprm);
65531+#elif defined(CONFIG_PAX_HOOK_ACL_FLAGS)
65532+ if (pax_set_initial_flags_func)
65533+ (pax_set_initial_flags_func)(bprm);
65534+#endif
65535+
65536+#ifdef CONFIG_ARCH_TRACK_EXEC_LIMIT
65537+ if ((current->mm->pax_flags & MF_PAX_PAGEEXEC) && !nx_enabled) {
65538+ current->mm->context.user_cs_limit = PAGE_SIZE;
65539+ current->mm->def_flags |= VM_PAGEEXEC;
65540+ }
65541+#endif
65542+
65543+#ifdef CONFIG_PAX_SEGMEXEC
65544+ if (current->mm->pax_flags & MF_PAX_SEGMEXEC) {
65545+ current->mm->context.user_cs_base = SEGMEXEC_TASK_SIZE;
65546+ current->mm->context.user_cs_limit = TASK_SIZE-SEGMEXEC_TASK_SIZE;
65547+ pax_task_size = SEGMEXEC_TASK_SIZE;
65548+ }
65549+#endif
65550+
65551+#if defined(CONFIG_ARCH_TRACK_EXEC_LIMIT) || defined(CONFIG_PAX_SEGMEXEC)
65552+ if (current->mm->pax_flags & (MF_PAX_PAGEEXEC | MF_PAX_SEGMEXEC)) {
65553+ set_user_cs(current->mm->context.user_cs_base, current->mm->context.user_cs_limit, get_cpu());
65554+ put_cpu();
65555+ }
65556+#endif
65557
65558 /* Do this immediately, since STACK_TOP as used in setup_arg_pages
65559 may depend on the personality. */
65560 SET_PERSONALITY(loc->elf_ex);
65561+
65562+#ifdef CONFIG_PAX_ASLR
65563+ if (current->mm->pax_flags & MF_PAX_RANDMMAP) {
65564+ current->mm->delta_mmap = (pax_get_random_long() & ((1UL << PAX_DELTA_MMAP_LEN)-1)) << PAGE_SHIFT;
65565+ current->mm->delta_stack = (pax_get_random_long() & ((1UL << PAX_DELTA_STACK_LEN)-1)) << PAGE_SHIFT;
65566+ }
65567+#endif
65568+
65569+#if defined(CONFIG_PAX_PAGEEXEC) || defined(CONFIG_PAX_SEGMEXEC)
65570+ if (current->mm->pax_flags & (MF_PAX_PAGEEXEC | MF_PAX_SEGMEXEC)) {
65571+ executable_stack = EXSTACK_DISABLE_X;
65572+ current->personality &= ~READ_IMPLIES_EXEC;
65573+ } else
65574+#endif
65575+
65576 if (elf_read_implies_exec(loc->elf_ex, executable_stack))
65577 current->personality |= READ_IMPLIES_EXEC;
65578
65579@@ -800,10 +1245,27 @@ static int load_elf_binary(struct linux_binprm *bprm, struct pt_regs *regs)
65580 * might try to exec. This is because the brk will
65581 * follow the loader, and is not movable. */
65582 #ifdef CONFIG_X86
65583- load_bias = 0;
65584+ if (current->flags & PF_RANDOMIZE)
65585+ load_bias = 0;
65586+ else
65587+ load_bias = ELF_PAGESTART(ELF_ET_DYN_BASE - vaddr);
65588 #else
65589 load_bias = ELF_PAGESTART(ELF_ET_DYN_BASE - vaddr);
65590 #endif
65591+
65592+#ifdef CONFIG_PAX_RANDMMAP
65593+ /* PaX: randomize base address at the default exe base if requested */
65594+ if ((current->mm->pax_flags & MF_PAX_RANDMMAP) && elf_interpreter) {
65595+#ifdef CONFIG_SPARC64
65596+ load_bias = (pax_get_random_long() & ((1UL << PAX_DELTA_MMAP_LEN) - 1)) << (PAGE_SHIFT+1);
65597+#else
65598+ load_bias = (pax_get_random_long() & ((1UL << PAX_DELTA_MMAP_LEN) - 1)) << PAGE_SHIFT;
65599+#endif
65600+ load_bias = ELF_PAGESTART(PAX_ELF_ET_DYN_BASE - vaddr + load_bias);
65601+ elf_flags |= MAP_FIXED;
65602+ }
65603+#endif
65604+
65605 }
65606
65607 error = elf_map(bprm->file, load_bias + vaddr, elf_ppnt,
65608@@ -836,9 +1298,9 @@ static int load_elf_binary(struct linux_binprm *bprm, struct pt_regs *regs)
65609 * allowed task size. Note that p_filesz must always be
65610 * <= p_memsz so it is only necessary to check p_memsz.
65611 */
65612- if (BAD_ADDR(k) || elf_ppnt->p_filesz > elf_ppnt->p_memsz ||
65613- elf_ppnt->p_memsz > TASK_SIZE ||
65614- TASK_SIZE - elf_ppnt->p_memsz < k) {
65615+ if (k >= pax_task_size || elf_ppnt->p_filesz > elf_ppnt->p_memsz ||
65616+ elf_ppnt->p_memsz > pax_task_size ||
65617+ pax_task_size - elf_ppnt->p_memsz < k) {
65618 /* set_brk can never work. Avoid overflows. */
65619 send_sig(SIGKILL, current, 0);
65620 retval = -EINVAL;
65621@@ -866,6 +1328,11 @@ static int load_elf_binary(struct linux_binprm *bprm, struct pt_regs *regs)
65622 start_data += load_bias;
65623 end_data += load_bias;
65624
65625+#ifdef CONFIG_PAX_RANDMMAP
65626+ if (current->mm->pax_flags & MF_PAX_RANDMMAP)
65627+ elf_brk += PAGE_SIZE + ((pax_get_random_long() & ((1UL << 22) - 1UL)) << 4);
65628+#endif
65629+
65630 /* Calling set_brk effectively mmaps the pages that we need
65631 * for the bss and break sections. We must do this before
65632 * mapping in the interpreter, to make sure it doesn't wind
65633@@ -877,9 +1344,11 @@ static int load_elf_binary(struct linux_binprm *bprm, struct pt_regs *regs)
65634 goto out_free_dentry;
65635 }
65636 if (likely(elf_bss != elf_brk) && unlikely(padzero(elf_bss))) {
65637- send_sig(SIGSEGV, current, 0);
65638- retval = -EFAULT; /* Nobody gets to see this, but.. */
65639- goto out_free_dentry;
65640+ /*
65641+ * This bss-zeroing can fail if the ELF
65642+ * file specifies odd protections. So
65643+ * we don't check the return value
65644+ */
65645 }
65646
65647 if (elf_interpreter) {
65648@@ -1112,8 +1581,10 @@ static int dump_seek(struct file *file, loff_t off)
65649 unsigned long n = off;
65650 if (n > PAGE_SIZE)
65651 n = PAGE_SIZE;
65652- if (!dump_write(file, buf, n))
65653+ if (!dump_write(file, buf, n)) {
65654+ free_page((unsigned long)buf);
65655 return 0;
65656+ }
65657 off -= n;
65658 }
65659 free_page((unsigned long)buf);
65660@@ -1125,7 +1596,7 @@ static int dump_seek(struct file *file, loff_t off)
65661 * Decide what to dump of a segment, part, all or none.
65662 */
65663 static unsigned long vma_dump_size(struct vm_area_struct *vma,
65664- unsigned long mm_flags)
65665+ unsigned long mm_flags, long signr)
65666 {
65667 #define FILTER(type) (mm_flags & (1UL << MMF_DUMP_##type))
65668
65669@@ -1159,7 +1630,7 @@ static unsigned long vma_dump_size(struct vm_area_struct *vma,
65670 if (vma->vm_file == NULL)
65671 return 0;
65672
65673- if (FILTER(MAPPED_PRIVATE))
65674+ if (signr == SIGKILL || FILTER(MAPPED_PRIVATE))
65675 goto whole;
65676
65677 /*
65678@@ -1255,8 +1726,11 @@ static int writenote(struct memelfnote *men, struct file *file,
65679 #undef DUMP_WRITE
65680
65681 #define DUMP_WRITE(addr, nr) \
65682+ do { \
65683+ gr_learn_resource(current, RLIMIT_CORE, size + (nr), 1); \
65684 if ((size += (nr)) > limit || !dump_write(file, (addr), (nr))) \
65685- goto end_coredump;
65686+ goto end_coredump; \
65687+ } while (0);
65688
65689 static void fill_elf_header(struct elfhdr *elf, int segs,
65690 u16 machine, u32 flags, u8 osabi)
65691@@ -1385,9 +1859,9 @@ static void fill_auxv_note(struct memelfnote *note, struct mm_struct *mm)
65692 {
65693 elf_addr_t *auxv = (elf_addr_t *) mm->saved_auxv;
65694 int i = 0;
65695- do
65696+ do {
65697 i += 2;
65698- while (auxv[i - 2] != AT_NULL);
65699+ } while (auxv[i - 2] != AT_NULL);
65700 fill_note(note, "CORE", NT_AUXV, i * sizeof(elf_addr_t), auxv);
65701 }
65702
65703@@ -1973,7 +2447,7 @@ static int elf_core_dump(long signr, struct pt_regs *regs, struct file *file, un
65704 phdr.p_offset = offset;
65705 phdr.p_vaddr = vma->vm_start;
65706 phdr.p_paddr = 0;
65707- phdr.p_filesz = vma_dump_size(vma, mm_flags);
65708+ phdr.p_filesz = vma_dump_size(vma, mm_flags, signr);
65709 phdr.p_memsz = vma->vm_end - vma->vm_start;
65710 offset += phdr.p_filesz;
65711 phdr.p_flags = vma->vm_flags & VM_READ ? PF_R : 0;
65712@@ -2006,7 +2480,7 @@ static int elf_core_dump(long signr, struct pt_regs *regs, struct file *file, un
65713 unsigned long addr;
65714 unsigned long end;
65715
65716- end = vma->vm_start + vma_dump_size(vma, mm_flags);
65717+ end = vma->vm_start + vma_dump_size(vma, mm_flags, signr);
65718
65719 for (addr = vma->vm_start; addr < end; addr += PAGE_SIZE) {
65720 struct page *page;
65721@@ -2015,6 +2489,7 @@ static int elf_core_dump(long signr, struct pt_regs *regs, struct file *file, un
65722 page = get_dump_page(addr);
65723 if (page) {
65724 void *kaddr = kmap(page);
65725+ gr_learn_resource(current, RLIMIT_CORE, size + PAGE_SIZE, 1);
65726 stop = ((size += PAGE_SIZE) > limit) ||
65727 !dump_write(file, kaddr, PAGE_SIZE);
65728 kunmap(page);
65729@@ -2042,6 +2517,97 @@ out:
65730
65731 #endif /* USE_ELF_CORE_DUMP */
65732
65733+#ifdef CONFIG_PAX_MPROTECT
65734+/* PaX: non-PIC ELF libraries need relocations on their executable segments
65735+ * therefore we'll grant them VM_MAYWRITE once during their life. Similarly
65736+ * we'll remove VM_MAYWRITE for good on RELRO segments.
65737+ *
65738+ * The checks favour ld-linux.so behaviour which operates on a per ELF segment
65739+ * basis because we want to allow the common case and not the special ones.
65740+ */
65741+static void elf_handle_mprotect(struct vm_area_struct *vma, unsigned long newflags)
65742+{
65743+ struct elfhdr elf_h;
65744+ struct elf_phdr elf_p;
65745+ unsigned long i;
65746+ unsigned long oldflags;
65747+ bool is_textrel_rw, is_textrel_rx, is_relro;
65748+
65749+ if (!(vma->vm_mm->pax_flags & MF_PAX_MPROTECT))
65750+ return;
65751+
65752+ oldflags = vma->vm_flags & (VM_MAYEXEC | VM_MAYWRITE | VM_MAYREAD | VM_EXEC | VM_WRITE | VM_READ);
65753+ newflags &= VM_MAYEXEC | VM_MAYWRITE | VM_MAYREAD | VM_EXEC | VM_WRITE | VM_READ;
65754+
65755+#ifdef CONFIG_PAX_ELFRELOCS
65756+ /* possible TEXTREL */
65757+ is_textrel_rw = vma->vm_file && !vma->anon_vma && oldflags == (VM_MAYEXEC | VM_MAYREAD | VM_EXEC | VM_READ) && newflags == (VM_WRITE | VM_READ);
65758+ is_textrel_rx = vma->vm_file && vma->anon_vma && oldflags == (VM_MAYEXEC | VM_MAYWRITE | VM_MAYREAD | VM_WRITE | VM_READ) && newflags == (VM_EXEC | VM_READ);
65759+#else
65760+ is_textrel_rw = false;
65761+ is_textrel_rx = false;
65762+#endif
65763+
65764+ /* possible RELRO */
65765+ is_relro = vma->vm_file && vma->anon_vma && oldflags == (VM_MAYWRITE | VM_MAYREAD | VM_READ) && newflags == (VM_MAYWRITE | VM_MAYREAD | VM_READ);
65766+
65767+ if (!is_textrel_rw && !is_textrel_rx && !is_relro)
65768+ return;
65769+
65770+ if (sizeof(elf_h) != kernel_read(vma->vm_file, 0UL, (char *)&elf_h, sizeof(elf_h)) ||
65771+ memcmp(elf_h.e_ident, ELFMAG, SELFMAG) ||
65772+
65773+#ifdef CONFIG_PAX_ETEXECRELOCS
65774+ ((is_textrel_rw || is_textrel_rx) && (elf_h.e_type != ET_DYN && elf_h.e_type != ET_EXEC)) ||
65775+#else
65776+ ((is_textrel_rw || is_textrel_rx) && elf_h.e_type != ET_DYN) ||
65777+#endif
65778+
65779+ (is_relro && (elf_h.e_type != ET_DYN && elf_h.e_type != ET_EXEC)) ||
65780+ !elf_check_arch(&elf_h) ||
65781+ elf_h.e_phentsize != sizeof(struct elf_phdr) ||
65782+ elf_h.e_phnum > 65536UL / sizeof(struct elf_phdr))
65783+ return;
65784+
65785+ for (i = 0UL; i < elf_h.e_phnum; i++) {
65786+ if (sizeof(elf_p) != kernel_read(vma->vm_file, elf_h.e_phoff + i*sizeof(elf_p), (char *)&elf_p, sizeof(elf_p)))
65787+ return;
65788+ switch (elf_p.p_type) {
65789+ case PT_DYNAMIC:
65790+ if (!is_textrel_rw && !is_textrel_rx)
65791+ continue;
65792+ i = 0UL;
65793+ while ((i+1) * sizeof(elf_dyn) <= elf_p.p_filesz) {
65794+ elf_dyn dyn;
65795+
65796+ if (sizeof(dyn) != kernel_read(vma->vm_file, elf_p.p_offset + i*sizeof(dyn), (char *)&dyn, sizeof(dyn)))
65797+ return;
65798+ if (dyn.d_tag == DT_NULL)
65799+ return;
65800+ if (dyn.d_tag == DT_TEXTREL || (dyn.d_tag == DT_FLAGS && (dyn.d_un.d_val & DF_TEXTREL))) {
65801+ gr_log_textrel(vma);
65802+ if (is_textrel_rw)
65803+ vma->vm_flags |= VM_MAYWRITE;
65804+ else
65805+ /* PaX: disallow write access after relocs are done, hopefully noone else needs it... */
65806+ vma->vm_flags &= ~VM_MAYWRITE;
65807+ return;
65808+ }
65809+ i++;
65810+ }
65811+ return;
65812+
65813+ case PT_GNU_RELRO:
65814+ if (!is_relro)
65815+ continue;
65816+ if ((elf_p.p_offset >> PAGE_SHIFT) == vma->vm_pgoff && ELF_PAGEALIGN(elf_p.p_memsz) == vma->vm_end - vma->vm_start)
65817+ vma->vm_flags &= ~VM_MAYWRITE;
65818+ return;
65819+ }
65820+ }
65821+}
65822+#endif
65823+
65824 static int __init init_elf_binfmt(void)
65825 {
65826 return register_binfmt(&elf_format);
65827diff --git a/fs/binfmt_flat.c b/fs/binfmt_flat.c
65828index ca88c46..f155a60 100644
65829--- a/fs/binfmt_flat.c
65830+++ b/fs/binfmt_flat.c
65831@@ -564,7 +564,9 @@ static int load_flat_file(struct linux_binprm * bprm,
65832 realdatastart = (unsigned long) -ENOMEM;
65833 printk("Unable to allocate RAM for process data, errno %d\n",
65834 (int)-realdatastart);
65835+ down_write(&current->mm->mmap_sem);
65836 do_munmap(current->mm, textpos, text_len);
65837+ up_write(&current->mm->mmap_sem);
65838 ret = realdatastart;
65839 goto err;
65840 }
65841@@ -588,8 +590,10 @@ static int load_flat_file(struct linux_binprm * bprm,
65842 }
65843 if (IS_ERR_VALUE(result)) {
65844 printk("Unable to read data+bss, errno %d\n", (int)-result);
65845+ down_write(&current->mm->mmap_sem);
65846 do_munmap(current->mm, textpos, text_len);
65847 do_munmap(current->mm, realdatastart, data_len + extra);
65848+ up_write(&current->mm->mmap_sem);
65849 ret = result;
65850 goto err;
65851 }
65852@@ -658,8 +662,10 @@ static int load_flat_file(struct linux_binprm * bprm,
65853 }
65854 if (IS_ERR_VALUE(result)) {
65855 printk("Unable to read code+data+bss, errno %d\n",(int)-result);
65856+ down_write(&current->mm->mmap_sem);
65857 do_munmap(current->mm, textpos, text_len + data_len + extra +
65858 MAX_SHARED_LIBS * sizeof(unsigned long));
65859+ up_write(&current->mm->mmap_sem);
65860 ret = result;
65861 goto err;
65862 }
65863diff --git a/fs/bio.c b/fs/bio.c
65864index e696713..83de133 100644
65865--- a/fs/bio.c
65866+++ b/fs/bio.c
65867@@ -78,7 +78,7 @@ static struct kmem_cache *bio_find_or_create_slab(unsigned int extra_size)
65868
65869 i = 0;
65870 while (i < bio_slab_nr) {
65871- struct bio_slab *bslab = &bio_slabs[i];
65872+ bslab = &bio_slabs[i];
65873
65874 if (!bslab->slab && entry == -1)
65875 entry = i;
65876@@ -1236,7 +1236,7 @@ static void bio_copy_kern_endio(struct bio *bio, int err)
65877 const int read = bio_data_dir(bio) == READ;
65878 struct bio_map_data *bmd = bio->bi_private;
65879 int i;
65880- char *p = bmd->sgvecs[0].iov_base;
65881+ char *p = (char __force_kernel *)bmd->sgvecs[0].iov_base;
65882
65883 __bio_for_each_segment(bvec, bio, i, 0) {
65884 char *addr = page_address(bvec->bv_page);
65885diff --git a/fs/block_dev.c b/fs/block_dev.c
65886index e65efa2..04fae57 100644
65887--- a/fs/block_dev.c
65888+++ b/fs/block_dev.c
65889@@ -664,7 +664,7 @@ int bd_claim(struct block_device *bdev, void *holder)
65890 else if (bdev->bd_contains == bdev)
65891 res = 0; /* is a whole device which isn't held */
65892
65893- else if (bdev->bd_contains->bd_holder == bd_claim)
65894+ else if (bdev->bd_contains->bd_holder == (void *)bd_claim)
65895 res = 0; /* is a partition of a device that is being partitioned */
65896 else if (bdev->bd_contains->bd_holder != NULL)
65897 res = -EBUSY; /* is a partition of a held device */
65898diff --git a/fs/btrfs/ctree.c b/fs/btrfs/ctree.c
65899index c4bc570..42acd8d 100644
65900--- a/fs/btrfs/ctree.c
65901+++ b/fs/btrfs/ctree.c
65902@@ -461,9 +461,12 @@ static noinline int __btrfs_cow_block(struct btrfs_trans_handle *trans,
65903 free_extent_buffer(buf);
65904 add_root_to_dirty_list(root);
65905 } else {
65906- if (root->root_key.objectid == BTRFS_TREE_RELOC_OBJECTID)
65907- parent_start = parent->start;
65908- else
65909+ if (root->root_key.objectid == BTRFS_TREE_RELOC_OBJECTID) {
65910+ if (parent)
65911+ parent_start = parent->start;
65912+ else
65913+ parent_start = 0;
65914+ } else
65915 parent_start = 0;
65916
65917 WARN_ON(trans->transid != btrfs_header_generation(parent));
65918@@ -3645,7 +3648,6 @@ setup_items_for_insert(struct btrfs_trans_handle *trans,
65919
65920 ret = 0;
65921 if (slot == 0) {
65922- struct btrfs_disk_key disk_key;
65923 btrfs_cpu_key_to_disk(&disk_key, cpu_key);
65924 ret = fixup_low_keys(trans, root, path, &disk_key, 1);
65925 }
65926diff --git a/fs/btrfs/disk-io.c b/fs/btrfs/disk-io.c
65927index f447188..59c17c5 100644
65928--- a/fs/btrfs/disk-io.c
65929+++ b/fs/btrfs/disk-io.c
65930@@ -39,7 +39,7 @@
65931 #include "tree-log.h"
65932 #include "free-space-cache.h"
65933
65934-static struct extent_io_ops btree_extent_io_ops;
65935+static const struct extent_io_ops btree_extent_io_ops;
65936 static void end_workqueue_fn(struct btrfs_work *work);
65937 static void free_fs_root(struct btrfs_root *root);
65938
65939@@ -2607,7 +2607,7 @@ out:
65940 return 0;
65941 }
65942
65943-static struct extent_io_ops btree_extent_io_ops = {
65944+static const struct extent_io_ops btree_extent_io_ops = {
65945 .write_cache_pages_lock_hook = btree_lock_page_hook,
65946 .readpage_end_io_hook = btree_readpage_end_io_hook,
65947 .submit_bio_hook = btree_submit_bio_hook,
65948diff --git a/fs/btrfs/extent-tree.c b/fs/btrfs/extent-tree.c
65949index 559f724..a026171 100644
65950--- a/fs/btrfs/extent-tree.c
65951+++ b/fs/btrfs/extent-tree.c
65952@@ -7141,6 +7141,10 @@ static noinline int relocate_one_extent(struct btrfs_root *extent_root,
65953 u64 group_start = group->key.objectid;
65954 new_extents = kmalloc(sizeof(*new_extents),
65955 GFP_NOFS);
65956+ if (!new_extents) {
65957+ ret = -ENOMEM;
65958+ goto out;
65959+ }
65960 nr_extents = 1;
65961 ret = get_new_locations(reloc_inode,
65962 extent_key,
65963diff --git a/fs/btrfs/extent_io.h b/fs/btrfs/extent_io.h
65964index 36de250..7ec75c7 100644
65965--- a/fs/btrfs/extent_io.h
65966+++ b/fs/btrfs/extent_io.h
65967@@ -49,36 +49,36 @@ typedef int (extent_submit_bio_hook_t)(struct inode *inode, int rw,
65968 struct bio *bio, int mirror_num,
65969 unsigned long bio_flags);
65970 struct extent_io_ops {
65971- int (*fill_delalloc)(struct inode *inode, struct page *locked_page,
65972+ int (* const fill_delalloc)(struct inode *inode, struct page *locked_page,
65973 u64 start, u64 end, int *page_started,
65974 unsigned long *nr_written);
65975- int (*writepage_start_hook)(struct page *page, u64 start, u64 end);
65976- int (*writepage_io_hook)(struct page *page, u64 start, u64 end);
65977+ int (* const writepage_start_hook)(struct page *page, u64 start, u64 end);
65978+ int (* const writepage_io_hook)(struct page *page, u64 start, u64 end);
65979 extent_submit_bio_hook_t *submit_bio_hook;
65980- int (*merge_bio_hook)(struct page *page, unsigned long offset,
65981+ int (* const merge_bio_hook)(struct page *page, unsigned long offset,
65982 size_t size, struct bio *bio,
65983 unsigned long bio_flags);
65984- int (*readpage_io_hook)(struct page *page, u64 start, u64 end);
65985- int (*readpage_io_failed_hook)(struct bio *bio, struct page *page,
65986+ int (* const readpage_io_hook)(struct page *page, u64 start, u64 end);
65987+ int (* const readpage_io_failed_hook)(struct bio *bio, struct page *page,
65988 u64 start, u64 end,
65989 struct extent_state *state);
65990- int (*writepage_io_failed_hook)(struct bio *bio, struct page *page,
65991+ int (* const writepage_io_failed_hook)(struct bio *bio, struct page *page,
65992 u64 start, u64 end,
65993 struct extent_state *state);
65994- int (*readpage_end_io_hook)(struct page *page, u64 start, u64 end,
65995+ int (* const readpage_end_io_hook)(struct page *page, u64 start, u64 end,
65996 struct extent_state *state);
65997- int (*writepage_end_io_hook)(struct page *page, u64 start, u64 end,
65998+ int (* const writepage_end_io_hook)(struct page *page, u64 start, u64 end,
65999 struct extent_state *state, int uptodate);
66000- int (*set_bit_hook)(struct inode *inode, u64 start, u64 end,
66001+ int (* const set_bit_hook)(struct inode *inode, u64 start, u64 end,
66002 unsigned long old, unsigned long bits);
66003- int (*clear_bit_hook)(struct inode *inode, struct extent_state *state,
66004+ int (* const clear_bit_hook)(struct inode *inode, struct extent_state *state,
66005 unsigned long bits);
66006- int (*merge_extent_hook)(struct inode *inode,
66007+ int (* const merge_extent_hook)(struct inode *inode,
66008 struct extent_state *new,
66009 struct extent_state *other);
66010- int (*split_extent_hook)(struct inode *inode,
66011+ int (* const split_extent_hook)(struct inode *inode,
66012 struct extent_state *orig, u64 split);
66013- int (*write_cache_pages_lock_hook)(struct page *page);
66014+ int (* const write_cache_pages_lock_hook)(struct page *page);
66015 };
66016
66017 struct extent_io_tree {
66018@@ -88,7 +88,7 @@ struct extent_io_tree {
66019 u64 dirty_bytes;
66020 spinlock_t lock;
66021 spinlock_t buffer_lock;
66022- struct extent_io_ops *ops;
66023+ const struct extent_io_ops *ops;
66024 };
66025
66026 struct extent_state {
66027diff --git a/fs/btrfs/free-space-cache.c b/fs/btrfs/free-space-cache.c
66028index cb2849f..3718fb4 100644
66029--- a/fs/btrfs/free-space-cache.c
66030+++ b/fs/btrfs/free-space-cache.c
66031@@ -1074,8 +1074,6 @@ u64 btrfs_alloc_from_cluster(struct btrfs_block_group_cache *block_group,
66032
66033 while(1) {
66034 if (entry->bytes < bytes || entry->offset < min_start) {
66035- struct rb_node *node;
66036-
66037 node = rb_next(&entry->offset_index);
66038 if (!node)
66039 break;
66040@@ -1226,7 +1224,7 @@ again:
66041 */
66042 while (entry->bitmap || found_bitmap ||
66043 (!entry->bitmap && entry->bytes < min_bytes)) {
66044- struct rb_node *node = rb_next(&entry->offset_index);
66045+ node = rb_next(&entry->offset_index);
66046
66047 if (entry->bitmap && entry->bytes > bytes + empty_size) {
66048 ret = btrfs_bitmap_cluster(block_group, entry, cluster,
66049diff --git a/fs/btrfs/inode.c b/fs/btrfs/inode.c
66050index e03a836..323837e 100644
66051--- a/fs/btrfs/inode.c
66052+++ b/fs/btrfs/inode.c
66053@@ -63,7 +63,7 @@ static const struct inode_operations btrfs_file_inode_operations;
66054 static const struct address_space_operations btrfs_aops;
66055 static const struct address_space_operations btrfs_symlink_aops;
66056 static const struct file_operations btrfs_dir_file_operations;
66057-static struct extent_io_ops btrfs_extent_io_ops;
66058+static const struct extent_io_ops btrfs_extent_io_ops;
66059
66060 static struct kmem_cache *btrfs_inode_cachep;
66061 struct kmem_cache *btrfs_trans_handle_cachep;
66062@@ -925,6 +925,7 @@ static int cow_file_range_async(struct inode *inode, struct page *locked_page,
66063 1, 0, NULL, GFP_NOFS);
66064 while (start < end) {
66065 async_cow = kmalloc(sizeof(*async_cow), GFP_NOFS);
66066+ BUG_ON(!async_cow);
66067 async_cow->inode = inode;
66068 async_cow->root = root;
66069 async_cow->locked_page = locked_page;
66070@@ -4591,6 +4592,8 @@ static noinline int uncompress_inline(struct btrfs_path *path,
66071 inline_size = btrfs_file_extent_inline_item_len(leaf,
66072 btrfs_item_nr(leaf, path->slots[0]));
66073 tmp = kmalloc(inline_size, GFP_NOFS);
66074+ if (!tmp)
66075+ return -ENOMEM;
66076 ptr = btrfs_file_extent_inline_start(item);
66077
66078 read_extent_buffer(leaf, tmp, ptr, inline_size);
66079@@ -5410,7 +5413,7 @@ fail:
66080 return -ENOMEM;
66081 }
66082
66083-static int btrfs_getattr(struct vfsmount *mnt,
66084+int btrfs_getattr(struct vfsmount *mnt,
66085 struct dentry *dentry, struct kstat *stat)
66086 {
66087 struct inode *inode = dentry->d_inode;
66088@@ -5422,6 +5425,14 @@ static int btrfs_getattr(struct vfsmount *mnt,
66089 return 0;
66090 }
66091
66092+EXPORT_SYMBOL(btrfs_getattr);
66093+
66094+dev_t get_btrfs_dev_from_inode(struct inode *inode)
66095+{
66096+ return BTRFS_I(inode)->root->anon_super.s_dev;
66097+}
66098+EXPORT_SYMBOL(get_btrfs_dev_from_inode);
66099+
66100 static int btrfs_rename(struct inode *old_dir, struct dentry *old_dentry,
66101 struct inode *new_dir, struct dentry *new_dentry)
66102 {
66103@@ -5972,7 +5983,7 @@ static const struct file_operations btrfs_dir_file_operations = {
66104 .fsync = btrfs_sync_file,
66105 };
66106
66107-static struct extent_io_ops btrfs_extent_io_ops = {
66108+static const struct extent_io_ops btrfs_extent_io_ops = {
66109 .fill_delalloc = run_delalloc_range,
66110 .submit_bio_hook = btrfs_submit_bio_hook,
66111 .merge_bio_hook = btrfs_merge_bio_hook,
66112diff --git a/fs/btrfs/relocation.c b/fs/btrfs/relocation.c
66113index ab7ab53..94e0781 100644
66114--- a/fs/btrfs/relocation.c
66115+++ b/fs/btrfs/relocation.c
66116@@ -884,7 +884,7 @@ static int __update_reloc_root(struct btrfs_root *root, int del)
66117 }
66118 spin_unlock(&rc->reloc_root_tree.lock);
66119
66120- BUG_ON((struct btrfs_root *)node->data != root);
66121+ BUG_ON(!node || (struct btrfs_root *)node->data != root);
66122
66123 if (!del) {
66124 spin_lock(&rc->reloc_root_tree.lock);
66125diff --git a/fs/btrfs/sysfs.c b/fs/btrfs/sysfs.c
66126index a240b6f..4ce16ef 100644
66127--- a/fs/btrfs/sysfs.c
66128+++ b/fs/btrfs/sysfs.c
66129@@ -164,12 +164,12 @@ static void btrfs_root_release(struct kobject *kobj)
66130 complete(&root->kobj_unregister);
66131 }
66132
66133-static struct sysfs_ops btrfs_super_attr_ops = {
66134+static const struct sysfs_ops btrfs_super_attr_ops = {
66135 .show = btrfs_super_attr_show,
66136 .store = btrfs_super_attr_store,
66137 };
66138
66139-static struct sysfs_ops btrfs_root_attr_ops = {
66140+static const struct sysfs_ops btrfs_root_attr_ops = {
66141 .show = btrfs_root_attr_show,
66142 .store = btrfs_root_attr_store,
66143 };
66144diff --git a/fs/buffer.c b/fs/buffer.c
66145index 6fa5302..395d9f6 100644
66146--- a/fs/buffer.c
66147+++ b/fs/buffer.c
66148@@ -25,6 +25,7 @@
66149 #include <linux/percpu.h>
66150 #include <linux/slab.h>
66151 #include <linux/capability.h>
66152+#include <linux/security.h>
66153 #include <linux/blkdev.h>
66154 #include <linux/file.h>
66155 #include <linux/quotaops.h>
66156diff --git a/fs/cachefiles/bind.c b/fs/cachefiles/bind.c
66157index 3797e00..ce776f6 100644
66158--- a/fs/cachefiles/bind.c
66159+++ b/fs/cachefiles/bind.c
66160@@ -39,13 +39,11 @@ int cachefiles_daemon_bind(struct cachefiles_cache *cache, char *args)
66161 args);
66162
66163 /* start by checking things over */
66164- ASSERT(cache->fstop_percent >= 0 &&
66165- cache->fstop_percent < cache->fcull_percent &&
66166+ ASSERT(cache->fstop_percent < cache->fcull_percent &&
66167 cache->fcull_percent < cache->frun_percent &&
66168 cache->frun_percent < 100);
66169
66170- ASSERT(cache->bstop_percent >= 0 &&
66171- cache->bstop_percent < cache->bcull_percent &&
66172+ ASSERT(cache->bstop_percent < cache->bcull_percent &&
66173 cache->bcull_percent < cache->brun_percent &&
66174 cache->brun_percent < 100);
66175
66176diff --git a/fs/cachefiles/daemon.c b/fs/cachefiles/daemon.c
66177index 4618516..bb30d01 100644
66178--- a/fs/cachefiles/daemon.c
66179+++ b/fs/cachefiles/daemon.c
66180@@ -220,7 +220,7 @@ static ssize_t cachefiles_daemon_write(struct file *file,
66181 if (test_bit(CACHEFILES_DEAD, &cache->flags))
66182 return -EIO;
66183
66184- if (datalen < 0 || datalen > PAGE_SIZE - 1)
66185+ if (datalen > PAGE_SIZE - 1)
66186 return -EOPNOTSUPP;
66187
66188 /* drag the command string into the kernel so we can parse it */
66189@@ -385,7 +385,7 @@ static int cachefiles_daemon_fstop(struct cachefiles_cache *cache, char *args)
66190 if (args[0] != '%' || args[1] != '\0')
66191 return -EINVAL;
66192
66193- if (fstop < 0 || fstop >= cache->fcull_percent)
66194+ if (fstop >= cache->fcull_percent)
66195 return cachefiles_daemon_range_error(cache, args);
66196
66197 cache->fstop_percent = fstop;
66198@@ -457,7 +457,7 @@ static int cachefiles_daemon_bstop(struct cachefiles_cache *cache, char *args)
66199 if (args[0] != '%' || args[1] != '\0')
66200 return -EINVAL;
66201
66202- if (bstop < 0 || bstop >= cache->bcull_percent)
66203+ if (bstop >= cache->bcull_percent)
66204 return cachefiles_daemon_range_error(cache, args);
66205
66206 cache->bstop_percent = bstop;
66207diff --git a/fs/cachefiles/internal.h b/fs/cachefiles/internal.h
66208index f7c255f..fcd61de 100644
66209--- a/fs/cachefiles/internal.h
66210+++ b/fs/cachefiles/internal.h
66211@@ -56,7 +56,7 @@ struct cachefiles_cache {
66212 wait_queue_head_t daemon_pollwq; /* poll waitqueue for daemon */
66213 struct rb_root active_nodes; /* active nodes (can't be culled) */
66214 rwlock_t active_lock; /* lock for active_nodes */
66215- atomic_t gravecounter; /* graveyard uniquifier */
66216+ atomic_unchecked_t gravecounter; /* graveyard uniquifier */
66217 unsigned frun_percent; /* when to stop culling (% files) */
66218 unsigned fcull_percent; /* when to start culling (% files) */
66219 unsigned fstop_percent; /* when to stop allocating (% files) */
66220@@ -168,19 +168,19 @@ extern int cachefiles_check_in_use(struct cachefiles_cache *cache,
66221 * proc.c
66222 */
66223 #ifdef CONFIG_CACHEFILES_HISTOGRAM
66224-extern atomic_t cachefiles_lookup_histogram[HZ];
66225-extern atomic_t cachefiles_mkdir_histogram[HZ];
66226-extern atomic_t cachefiles_create_histogram[HZ];
66227+extern atomic_unchecked_t cachefiles_lookup_histogram[HZ];
66228+extern atomic_unchecked_t cachefiles_mkdir_histogram[HZ];
66229+extern atomic_unchecked_t cachefiles_create_histogram[HZ];
66230
66231 extern int __init cachefiles_proc_init(void);
66232 extern void cachefiles_proc_cleanup(void);
66233 static inline
66234-void cachefiles_hist(atomic_t histogram[], unsigned long start_jif)
66235+void cachefiles_hist(atomic_unchecked_t histogram[], unsigned long start_jif)
66236 {
66237 unsigned long jif = jiffies - start_jif;
66238 if (jif >= HZ)
66239 jif = HZ - 1;
66240- atomic_inc(&histogram[jif]);
66241+ atomic_inc_unchecked(&histogram[jif]);
66242 }
66243
66244 #else
66245diff --git a/fs/cachefiles/namei.c b/fs/cachefiles/namei.c
66246index 14ac480..a62766c 100644
66247--- a/fs/cachefiles/namei.c
66248+++ b/fs/cachefiles/namei.c
66249@@ -250,7 +250,7 @@ try_again:
66250 /* first step is to make up a grave dentry in the graveyard */
66251 sprintf(nbuffer, "%08x%08x",
66252 (uint32_t) get_seconds(),
66253- (uint32_t) atomic_inc_return(&cache->gravecounter));
66254+ (uint32_t) atomic_inc_return_unchecked(&cache->gravecounter));
66255
66256 /* do the multiway lock magic */
66257 trap = lock_rename(cache->graveyard, dir);
66258diff --git a/fs/cachefiles/proc.c b/fs/cachefiles/proc.c
66259index eccd339..4c1d995 100644
66260--- a/fs/cachefiles/proc.c
66261+++ b/fs/cachefiles/proc.c
66262@@ -14,9 +14,9 @@
66263 #include <linux/seq_file.h>
66264 #include "internal.h"
66265
66266-atomic_t cachefiles_lookup_histogram[HZ];
66267-atomic_t cachefiles_mkdir_histogram[HZ];
66268-atomic_t cachefiles_create_histogram[HZ];
66269+atomic_unchecked_t cachefiles_lookup_histogram[HZ];
66270+atomic_unchecked_t cachefiles_mkdir_histogram[HZ];
66271+atomic_unchecked_t cachefiles_create_histogram[HZ];
66272
66273 /*
66274 * display the latency histogram
66275@@ -35,9 +35,9 @@ static int cachefiles_histogram_show(struct seq_file *m, void *v)
66276 return 0;
66277 default:
66278 index = (unsigned long) v - 3;
66279- x = atomic_read(&cachefiles_lookup_histogram[index]);
66280- y = atomic_read(&cachefiles_mkdir_histogram[index]);
66281- z = atomic_read(&cachefiles_create_histogram[index]);
66282+ x = atomic_read_unchecked(&cachefiles_lookup_histogram[index]);
66283+ y = atomic_read_unchecked(&cachefiles_mkdir_histogram[index]);
66284+ z = atomic_read_unchecked(&cachefiles_create_histogram[index]);
66285 if (x == 0 && y == 0 && z == 0)
66286 return 0;
66287
66288diff --git a/fs/cachefiles/rdwr.c b/fs/cachefiles/rdwr.c
66289index a6c8c6f..5cf8517 100644
66290--- a/fs/cachefiles/rdwr.c
66291+++ b/fs/cachefiles/rdwr.c
66292@@ -946,7 +946,7 @@ int cachefiles_write_page(struct fscache_storage *op, struct page *page)
66293 old_fs = get_fs();
66294 set_fs(KERNEL_DS);
66295 ret = file->f_op->write(
66296- file, (const void __user *) data, len, &pos);
66297+ file, (const void __force_user *) data, len, &pos);
66298 set_fs(old_fs);
66299 kunmap(page);
66300 if (ret != len)
66301diff --git a/fs/cifs/asn1.c b/fs/cifs/asn1.c
66302index 20692fb..0098fb7 100644
66303--- a/fs/cifs/asn1.c
66304+++ b/fs/cifs/asn1.c
66305@@ -416,6 +416,9 @@ asn1_subid_decode(struct asn1_ctx *ctx, unsigned long *subid)
66306
66307 static int
66308 asn1_oid_decode(struct asn1_ctx *ctx,
66309+ unsigned char *eoc, unsigned long **oid, unsigned int *len) __size_overflow(2);
66310+static int
66311+asn1_oid_decode(struct asn1_ctx *ctx,
66312 unsigned char *eoc, unsigned long **oid, unsigned int *len)
66313 {
66314 unsigned long subid;
66315diff --git a/fs/cifs/cifs_debug.c b/fs/cifs/cifs_debug.c
66316index 42cec2a..2aba466 100644
66317--- a/fs/cifs/cifs_debug.c
66318+++ b/fs/cifs/cifs_debug.c
66319@@ -256,25 +256,25 @@ static ssize_t cifs_stats_proc_write(struct file *file,
66320 tcon = list_entry(tmp3,
66321 struct cifsTconInfo,
66322 tcon_list);
66323- atomic_set(&tcon->num_smbs_sent, 0);
66324- atomic_set(&tcon->num_writes, 0);
66325- atomic_set(&tcon->num_reads, 0);
66326- atomic_set(&tcon->num_oplock_brks, 0);
66327- atomic_set(&tcon->num_opens, 0);
66328- atomic_set(&tcon->num_posixopens, 0);
66329- atomic_set(&tcon->num_posixmkdirs, 0);
66330- atomic_set(&tcon->num_closes, 0);
66331- atomic_set(&tcon->num_deletes, 0);
66332- atomic_set(&tcon->num_mkdirs, 0);
66333- atomic_set(&tcon->num_rmdirs, 0);
66334- atomic_set(&tcon->num_renames, 0);
66335- atomic_set(&tcon->num_t2renames, 0);
66336- atomic_set(&tcon->num_ffirst, 0);
66337- atomic_set(&tcon->num_fnext, 0);
66338- atomic_set(&tcon->num_fclose, 0);
66339- atomic_set(&tcon->num_hardlinks, 0);
66340- atomic_set(&tcon->num_symlinks, 0);
66341- atomic_set(&tcon->num_locks, 0);
66342+ atomic_set_unchecked(&tcon->num_smbs_sent, 0);
66343+ atomic_set_unchecked(&tcon->num_writes, 0);
66344+ atomic_set_unchecked(&tcon->num_reads, 0);
66345+ atomic_set_unchecked(&tcon->num_oplock_brks, 0);
66346+ atomic_set_unchecked(&tcon->num_opens, 0);
66347+ atomic_set_unchecked(&tcon->num_posixopens, 0);
66348+ atomic_set_unchecked(&tcon->num_posixmkdirs, 0);
66349+ atomic_set_unchecked(&tcon->num_closes, 0);
66350+ atomic_set_unchecked(&tcon->num_deletes, 0);
66351+ atomic_set_unchecked(&tcon->num_mkdirs, 0);
66352+ atomic_set_unchecked(&tcon->num_rmdirs, 0);
66353+ atomic_set_unchecked(&tcon->num_renames, 0);
66354+ atomic_set_unchecked(&tcon->num_t2renames, 0);
66355+ atomic_set_unchecked(&tcon->num_ffirst, 0);
66356+ atomic_set_unchecked(&tcon->num_fnext, 0);
66357+ atomic_set_unchecked(&tcon->num_fclose, 0);
66358+ atomic_set_unchecked(&tcon->num_hardlinks, 0);
66359+ atomic_set_unchecked(&tcon->num_symlinks, 0);
66360+ atomic_set_unchecked(&tcon->num_locks, 0);
66361 }
66362 }
66363 }
66364@@ -334,41 +334,41 @@ static int cifs_stats_proc_show(struct seq_file *m, void *v)
66365 if (tcon->need_reconnect)
66366 seq_puts(m, "\tDISCONNECTED ");
66367 seq_printf(m, "\nSMBs: %d Oplock Breaks: %d",
66368- atomic_read(&tcon->num_smbs_sent),
66369- atomic_read(&tcon->num_oplock_brks));
66370+ atomic_read_unchecked(&tcon->num_smbs_sent),
66371+ atomic_read_unchecked(&tcon->num_oplock_brks));
66372 seq_printf(m, "\nReads: %d Bytes: %lld",
66373- atomic_read(&tcon->num_reads),
66374+ atomic_read_unchecked(&tcon->num_reads),
66375 (long long)(tcon->bytes_read));
66376 seq_printf(m, "\nWrites: %d Bytes: %lld",
66377- atomic_read(&tcon->num_writes),
66378+ atomic_read_unchecked(&tcon->num_writes),
66379 (long long)(tcon->bytes_written));
66380 seq_printf(m, "\nFlushes: %d",
66381- atomic_read(&tcon->num_flushes));
66382+ atomic_read_unchecked(&tcon->num_flushes));
66383 seq_printf(m, "\nLocks: %d HardLinks: %d "
66384 "Symlinks: %d",
66385- atomic_read(&tcon->num_locks),
66386- atomic_read(&tcon->num_hardlinks),
66387- atomic_read(&tcon->num_symlinks));
66388+ atomic_read_unchecked(&tcon->num_locks),
66389+ atomic_read_unchecked(&tcon->num_hardlinks),
66390+ atomic_read_unchecked(&tcon->num_symlinks));
66391 seq_printf(m, "\nOpens: %d Closes: %d "
66392 "Deletes: %d",
66393- atomic_read(&tcon->num_opens),
66394- atomic_read(&tcon->num_closes),
66395- atomic_read(&tcon->num_deletes));
66396+ atomic_read_unchecked(&tcon->num_opens),
66397+ atomic_read_unchecked(&tcon->num_closes),
66398+ atomic_read_unchecked(&tcon->num_deletes));
66399 seq_printf(m, "\nPosix Opens: %d "
66400 "Posix Mkdirs: %d",
66401- atomic_read(&tcon->num_posixopens),
66402- atomic_read(&tcon->num_posixmkdirs));
66403+ atomic_read_unchecked(&tcon->num_posixopens),
66404+ atomic_read_unchecked(&tcon->num_posixmkdirs));
66405 seq_printf(m, "\nMkdirs: %d Rmdirs: %d",
66406- atomic_read(&tcon->num_mkdirs),
66407- atomic_read(&tcon->num_rmdirs));
66408+ atomic_read_unchecked(&tcon->num_mkdirs),
66409+ atomic_read_unchecked(&tcon->num_rmdirs));
66410 seq_printf(m, "\nRenames: %d T2 Renames %d",
66411- atomic_read(&tcon->num_renames),
66412- atomic_read(&tcon->num_t2renames));
66413+ atomic_read_unchecked(&tcon->num_renames),
66414+ atomic_read_unchecked(&tcon->num_t2renames));
66415 seq_printf(m, "\nFindFirst: %d FNext %d "
66416 "FClose %d",
66417- atomic_read(&tcon->num_ffirst),
66418- atomic_read(&tcon->num_fnext),
66419- atomic_read(&tcon->num_fclose));
66420+ atomic_read_unchecked(&tcon->num_ffirst),
66421+ atomic_read_unchecked(&tcon->num_fnext),
66422+ atomic_read_unchecked(&tcon->num_fclose));
66423 }
66424 }
66425 }
66426diff --git a/fs/cifs/cifsfs.c b/fs/cifs/cifsfs.c
66427index 1445407..68cb0dc 100644
66428--- a/fs/cifs/cifsfs.c
66429+++ b/fs/cifs/cifsfs.c
66430@@ -869,7 +869,7 @@ cifs_init_request_bufs(void)
66431 cifs_req_cachep = kmem_cache_create("cifs_request",
66432 CIFSMaxBufSize +
66433 MAX_CIFS_HDR_SIZE, 0,
66434- SLAB_HWCACHE_ALIGN, NULL);
66435+ SLAB_HWCACHE_ALIGN | SLAB_USERCOPY, NULL);
66436 if (cifs_req_cachep == NULL)
66437 return -ENOMEM;
66438
66439@@ -896,7 +896,7 @@ cifs_init_request_bufs(void)
66440 efficient to alloc 1 per page off the slab compared to 17K (5page)
66441 alloc of large cifs buffers even when page debugging is on */
66442 cifs_sm_req_cachep = kmem_cache_create("cifs_small_rq",
66443- MAX_CIFS_SMALL_BUFFER_SIZE, 0, SLAB_HWCACHE_ALIGN,
66444+ MAX_CIFS_SMALL_BUFFER_SIZE, 0, SLAB_HWCACHE_ALIGN | SLAB_USERCOPY,
66445 NULL);
66446 if (cifs_sm_req_cachep == NULL) {
66447 mempool_destroy(cifs_req_poolp);
66448@@ -991,8 +991,8 @@ init_cifs(void)
66449 atomic_set(&bufAllocCount, 0);
66450 atomic_set(&smBufAllocCount, 0);
66451 #ifdef CONFIG_CIFS_STATS2
66452- atomic_set(&totBufAllocCount, 0);
66453- atomic_set(&totSmBufAllocCount, 0);
66454+ atomic_set_unchecked(&totBufAllocCount, 0);
66455+ atomic_set_unchecked(&totSmBufAllocCount, 0);
66456 #endif /* CONFIG_CIFS_STATS2 */
66457
66458 atomic_set(&midCount, 0);
66459diff --git a/fs/cifs/cifsglob.h b/fs/cifs/cifsglob.h
66460index e29581e..1c22bab 100644
66461--- a/fs/cifs/cifsglob.h
66462+++ b/fs/cifs/cifsglob.h
66463@@ -252,28 +252,28 @@ struct cifsTconInfo {
66464 __u16 Flags; /* optional support bits */
66465 enum statusEnum tidStatus;
66466 #ifdef CONFIG_CIFS_STATS
66467- atomic_t num_smbs_sent;
66468- atomic_t num_writes;
66469- atomic_t num_reads;
66470- atomic_t num_flushes;
66471- atomic_t num_oplock_brks;
66472- atomic_t num_opens;
66473- atomic_t num_closes;
66474- atomic_t num_deletes;
66475- atomic_t num_mkdirs;
66476- atomic_t num_posixopens;
66477- atomic_t num_posixmkdirs;
66478- atomic_t num_rmdirs;
66479- atomic_t num_renames;
66480- atomic_t num_t2renames;
66481- atomic_t num_ffirst;
66482- atomic_t num_fnext;
66483- atomic_t num_fclose;
66484- atomic_t num_hardlinks;
66485- atomic_t num_symlinks;
66486- atomic_t num_locks;
66487- atomic_t num_acl_get;
66488- atomic_t num_acl_set;
66489+ atomic_unchecked_t num_smbs_sent;
66490+ atomic_unchecked_t num_writes;
66491+ atomic_unchecked_t num_reads;
66492+ atomic_unchecked_t num_flushes;
66493+ atomic_unchecked_t num_oplock_brks;
66494+ atomic_unchecked_t num_opens;
66495+ atomic_unchecked_t num_closes;
66496+ atomic_unchecked_t num_deletes;
66497+ atomic_unchecked_t num_mkdirs;
66498+ atomic_unchecked_t num_posixopens;
66499+ atomic_unchecked_t num_posixmkdirs;
66500+ atomic_unchecked_t num_rmdirs;
66501+ atomic_unchecked_t num_renames;
66502+ atomic_unchecked_t num_t2renames;
66503+ atomic_unchecked_t num_ffirst;
66504+ atomic_unchecked_t num_fnext;
66505+ atomic_unchecked_t num_fclose;
66506+ atomic_unchecked_t num_hardlinks;
66507+ atomic_unchecked_t num_symlinks;
66508+ atomic_unchecked_t num_locks;
66509+ atomic_unchecked_t num_acl_get;
66510+ atomic_unchecked_t num_acl_set;
66511 #ifdef CONFIG_CIFS_STATS2
66512 unsigned long long time_writes;
66513 unsigned long long time_reads;
66514@@ -414,7 +414,7 @@ static inline char CIFS_DIR_SEP(const struct cifs_sb_info *cifs_sb)
66515 }
66516
66517 #ifdef CONFIG_CIFS_STATS
66518-#define cifs_stats_inc atomic_inc
66519+#define cifs_stats_inc atomic_inc_unchecked
66520
66521 static inline void cifs_stats_bytes_written(struct cifsTconInfo *tcon,
66522 unsigned int bytes)
66523@@ -701,8 +701,8 @@ GLOBAL_EXTERN atomic_t tconInfoReconnectCount;
66524 /* Various Debug counters */
66525 GLOBAL_EXTERN atomic_t bufAllocCount; /* current number allocated */
66526 #ifdef CONFIG_CIFS_STATS2
66527-GLOBAL_EXTERN atomic_t totBufAllocCount; /* total allocated over all time */
66528-GLOBAL_EXTERN atomic_t totSmBufAllocCount;
66529+GLOBAL_EXTERN atomic_unchecked_t totBufAllocCount; /* total allocated over all time */
66530+GLOBAL_EXTERN atomic_unchecked_t totSmBufAllocCount;
66531 #endif
66532 GLOBAL_EXTERN atomic_t smBufAllocCount;
66533 GLOBAL_EXTERN atomic_t midCount;
66534diff --git a/fs/cifs/link.c b/fs/cifs/link.c
66535index fc1e048..28b3441 100644
66536--- a/fs/cifs/link.c
66537+++ b/fs/cifs/link.c
66538@@ -215,7 +215,7 @@ cifs_symlink(struct inode *inode, struct dentry *direntry, const char *symname)
66539
66540 void cifs_put_link(struct dentry *direntry, struct nameidata *nd, void *cookie)
66541 {
66542- char *p = nd_get_link(nd);
66543+ const char *p = nd_get_link(nd);
66544 if (!IS_ERR(p))
66545 kfree(p);
66546 }
66547diff --git a/fs/cifs/misc.c b/fs/cifs/misc.c
66548index 95b82e8..12a538d 100644
66549--- a/fs/cifs/misc.c
66550+++ b/fs/cifs/misc.c
66551@@ -155,7 +155,7 @@ cifs_buf_get(void)
66552 memset(ret_buf, 0, sizeof(struct smb_hdr) + 3);
66553 atomic_inc(&bufAllocCount);
66554 #ifdef CONFIG_CIFS_STATS2
66555- atomic_inc(&totBufAllocCount);
66556+ atomic_inc_unchecked(&totBufAllocCount);
66557 #endif /* CONFIG_CIFS_STATS2 */
66558 }
66559
66560@@ -190,7 +190,7 @@ cifs_small_buf_get(void)
66561 /* memset(ret_buf, 0, sizeof(struct smb_hdr) + 27);*/
66562 atomic_inc(&smBufAllocCount);
66563 #ifdef CONFIG_CIFS_STATS2
66564- atomic_inc(&totSmBufAllocCount);
66565+ atomic_inc_unchecked(&totSmBufAllocCount);
66566 #endif /* CONFIG_CIFS_STATS2 */
66567
66568 }
66569diff --git a/fs/coda/cache.c b/fs/coda/cache.c
66570index a5bf577..6d19845 100644
66571--- a/fs/coda/cache.c
66572+++ b/fs/coda/cache.c
66573@@ -24,14 +24,14 @@
66574 #include <linux/coda_fs_i.h>
66575 #include <linux/coda_cache.h>
66576
66577-static atomic_t permission_epoch = ATOMIC_INIT(0);
66578+static atomic_unchecked_t permission_epoch = ATOMIC_INIT(0);
66579
66580 /* replace or extend an acl cache hit */
66581 void coda_cache_enter(struct inode *inode, int mask)
66582 {
66583 struct coda_inode_info *cii = ITOC(inode);
66584
66585- cii->c_cached_epoch = atomic_read(&permission_epoch);
66586+ cii->c_cached_epoch = atomic_read_unchecked(&permission_epoch);
66587 if (cii->c_uid != current_fsuid()) {
66588 cii->c_uid = current_fsuid();
66589 cii->c_cached_perm = mask;
66590@@ -43,13 +43,13 @@ void coda_cache_enter(struct inode *inode, int mask)
66591 void coda_cache_clear_inode(struct inode *inode)
66592 {
66593 struct coda_inode_info *cii = ITOC(inode);
66594- cii->c_cached_epoch = atomic_read(&permission_epoch) - 1;
66595+ cii->c_cached_epoch = atomic_read_unchecked(&permission_epoch) - 1;
66596 }
66597
66598 /* remove all acl caches */
66599 void coda_cache_clear_all(struct super_block *sb)
66600 {
66601- atomic_inc(&permission_epoch);
66602+ atomic_inc_unchecked(&permission_epoch);
66603 }
66604
66605
66606@@ -61,7 +61,7 @@ int coda_cache_check(struct inode *inode, int mask)
66607
66608 hit = (mask & cii->c_cached_perm) == mask &&
66609 cii->c_uid == current_fsuid() &&
66610- cii->c_cached_epoch == atomic_read(&permission_epoch);
66611+ cii->c_cached_epoch == atomic_read_unchecked(&permission_epoch);
66612
66613 return hit;
66614 }
66615diff --git a/fs/compat.c b/fs/compat.c
66616index d1e2411..9a958d2 100644
66617--- a/fs/compat.c
66618+++ b/fs/compat.c
66619@@ -133,8 +133,8 @@ asmlinkage long compat_sys_utimes(char __user *filename, struct compat_timeval _
66620 static int cp_compat_stat(struct kstat *stat, struct compat_stat __user *ubuf)
66621 {
66622 compat_ino_t ino = stat->ino;
66623- typeof(ubuf->st_uid) uid = 0;
66624- typeof(ubuf->st_gid) gid = 0;
66625+ typeof(((struct compat_stat *)0)->st_uid) uid = 0;
66626+ typeof(((struct compat_stat *)0)->st_gid) gid = 0;
66627 int err;
66628
66629 SET_UID(uid, stat->uid);
66630@@ -533,7 +533,7 @@ compat_sys_io_setup(unsigned nr_reqs, u32 __user *ctx32p)
66631
66632 set_fs(KERNEL_DS);
66633 /* The __user pointer cast is valid because of the set_fs() */
66634- ret = sys_io_setup(nr_reqs, (aio_context_t __user *) &ctx64);
66635+ ret = sys_io_setup(nr_reqs, (aio_context_t __force_user *) &ctx64);
66636 set_fs(oldfs);
66637 /* truncating is ok because it's a user address */
66638 if (!ret)
66639@@ -830,6 +830,7 @@ struct compat_old_linux_dirent {
66640
66641 struct compat_readdir_callback {
66642 struct compat_old_linux_dirent __user *dirent;
66643+ struct file * file;
66644 int result;
66645 };
66646
66647@@ -847,6 +848,10 @@ static int compat_fillonedir(void *__buf, const char *name, int namlen,
66648 buf->result = -EOVERFLOW;
66649 return -EOVERFLOW;
66650 }
66651+
66652+ if (!gr_acl_handle_filldir(buf->file, name, namlen, ino))
66653+ return 0;
66654+
66655 buf->result++;
66656 dirent = buf->dirent;
66657 if (!access_ok(VERIFY_WRITE, dirent,
66658@@ -879,6 +884,7 @@ asmlinkage long compat_sys_old_readdir(unsigned int fd,
66659
66660 buf.result = 0;
66661 buf.dirent = dirent;
66662+ buf.file = file;
66663
66664 error = vfs_readdir(file, compat_fillonedir, &buf);
66665 if (buf.result)
66666@@ -899,6 +905,7 @@ struct compat_linux_dirent {
66667 struct compat_getdents_callback {
66668 struct compat_linux_dirent __user *current_dir;
66669 struct compat_linux_dirent __user *previous;
66670+ struct file * file;
66671 int count;
66672 int error;
66673 };
66674@@ -919,6 +926,10 @@ static int compat_filldir(void *__buf, const char *name, int namlen,
66675 buf->error = -EOVERFLOW;
66676 return -EOVERFLOW;
66677 }
66678+
66679+ if (!gr_acl_handle_filldir(buf->file, name, namlen, ino))
66680+ return 0;
66681+
66682 dirent = buf->previous;
66683 if (dirent) {
66684 if (__put_user(offset, &dirent->d_off))
66685@@ -966,6 +977,7 @@ asmlinkage long compat_sys_getdents(unsigned int fd,
66686 buf.previous = NULL;
66687 buf.count = count;
66688 buf.error = 0;
66689+ buf.file = file;
66690
66691 error = vfs_readdir(file, compat_filldir, &buf);
66692 if (error >= 0)
66693@@ -987,6 +999,7 @@ out:
66694 struct compat_getdents_callback64 {
66695 struct linux_dirent64 __user *current_dir;
66696 struct linux_dirent64 __user *previous;
66697+ struct file * file;
66698 int count;
66699 int error;
66700 };
66701@@ -1003,6 +1016,10 @@ static int compat_filldir64(void * __buf, const char * name, int namlen, loff_t
66702 buf->error = -EINVAL; /* only used if we fail.. */
66703 if (reclen > buf->count)
66704 return -EINVAL;
66705+
66706+ if (!gr_acl_handle_filldir(buf->file, name, namlen, ino))
66707+ return 0;
66708+
66709 dirent = buf->previous;
66710
66711 if (dirent) {
66712@@ -1054,13 +1071,14 @@ asmlinkage long compat_sys_getdents64(unsigned int fd,
66713 buf.previous = NULL;
66714 buf.count = count;
66715 buf.error = 0;
66716+ buf.file = file;
66717
66718 error = vfs_readdir(file, compat_filldir64, &buf);
66719 if (error >= 0)
66720 error = buf.error;
66721 lastdirent = buf.previous;
66722 if (lastdirent) {
66723- typeof(lastdirent->d_off) d_off = file->f_pos;
66724+ typeof(((struct linux_dirent64 *)0)->d_off) d_off = file->f_pos;
66725 if (__put_user_unaligned(d_off, &lastdirent->d_off))
66726 error = -EFAULT;
66727 else
66728@@ -1098,7 +1116,7 @@ static ssize_t compat_do_readv_writev(int type, struct file *file,
66729 * verify all the pointers
66730 */
66731 ret = -EINVAL;
66732- if ((nr_segs > UIO_MAXIOV) || (nr_segs <= 0))
66733+ if (nr_segs > UIO_MAXIOV)
66734 goto out;
66735 if (!file->f_op)
66736 goto out;
66737@@ -1463,11 +1481,35 @@ int compat_do_execve(char * filename,
66738 compat_uptr_t __user *envp,
66739 struct pt_regs * regs)
66740 {
66741+#ifdef CONFIG_GRKERNSEC
66742+ struct file *old_exec_file;
66743+ struct acl_subject_label *old_acl;
66744+ struct rlimit old_rlim[RLIM_NLIMITS];
66745+#endif
66746 struct linux_binprm *bprm;
66747 struct file *file;
66748 struct files_struct *displaced;
66749 bool clear_in_exec;
66750 int retval;
66751+ const struct cred *cred = current_cred();
66752+
66753+ /*
66754+ * We move the actual failure in case of RLIMIT_NPROC excess from
66755+ * set*uid() to execve() because too many poorly written programs
66756+ * don't check setuid() return code. Here we additionally recheck
66757+ * whether NPROC limit is still exceeded.
66758+ */
66759+ gr_learn_resource(current, RLIMIT_NPROC, atomic_read(&current->cred->user->processes), 1);
66760+
66761+ if ((current->flags & PF_NPROC_EXCEEDED) &&
66762+ atomic_read(&cred->user->processes) > current->signal->rlim[RLIMIT_NPROC].rlim_cur) {
66763+ retval = -EAGAIN;
66764+ goto out_ret;
66765+ }
66766+
66767+ /* We're below the limit (still or again), so we don't want to make
66768+ * further execve() calls fail. */
66769+ current->flags &= ~PF_NPROC_EXCEEDED;
66770
66771 retval = unshare_files(&displaced);
66772 if (retval)
66773@@ -1493,12 +1535,26 @@ int compat_do_execve(char * filename,
66774 if (IS_ERR(file))
66775 goto out_unmark;
66776
66777+ if (gr_ptrace_readexec(file, bprm->unsafe)) {
66778+ retval = -EPERM;
66779+ goto out_file;
66780+ }
66781+
66782 sched_exec();
66783
66784 bprm->file = file;
66785 bprm->filename = filename;
66786 bprm->interp = filename;
66787
66788+ if (gr_process_user_ban()) {
66789+ retval = -EPERM;
66790+ goto out_file;
66791+ }
66792+
66793+ retval = -EACCES;
66794+ if (!gr_acl_handle_execve(file->f_dentry, file->f_vfsmnt))
66795+ goto out_file;
66796+
66797 retval = bprm_mm_init(bprm);
66798 if (retval)
66799 goto out_file;
66800@@ -1515,24 +1571,63 @@ int compat_do_execve(char * filename,
66801 if (retval < 0)
66802 goto out;
66803
66804+#ifdef CONFIG_GRKERNSEC
66805+ old_acl = current->acl;
66806+ memcpy(old_rlim, current->signal->rlim, sizeof(old_rlim));
66807+ old_exec_file = current->exec_file;
66808+ get_file(file);
66809+ current->exec_file = file;
66810+#endif
66811+#ifdef CONFIG_GRKERNSEC_PROC_MEMMAP
66812+ /* limit suid stack to 8MB
66813+ we saved the old limits above and will restore them if this exec fails
66814+ */
66815+ if ((bprm->cred->euid != current_euid()) || (bprm->cred->egid != current_egid()))
66816+ current->signal->rlim[RLIMIT_STACK].rlim_cur = 8 * 1024 * 1024;
66817+#endif
66818+
66819+ if (!gr_tpe_allow(file)) {
66820+ retval = -EACCES;
66821+ goto out_fail;
66822+ }
66823+
66824+ if (gr_check_crash_exec(file)) {
66825+ retval = -EACCES;
66826+ goto out_fail;
66827+ }
66828+
66829+ retval = gr_set_proc_label(file->f_dentry, file->f_vfsmnt,
66830+ bprm->unsafe);
66831+ if (retval < 0)
66832+ goto out_fail;
66833+
66834 retval = copy_strings_kernel(1, &bprm->filename, bprm);
66835 if (retval < 0)
66836- goto out;
66837+ goto out_fail;
66838
66839 bprm->exec = bprm->p;
66840 retval = compat_copy_strings(bprm->envc, envp, bprm);
66841 if (retval < 0)
66842- goto out;
66843+ goto out_fail;
66844
66845 retval = compat_copy_strings(bprm->argc, argv, bprm);
66846 if (retval < 0)
66847- goto out;
66848+ goto out_fail;
66849+
66850+ gr_log_chroot_exec(file->f_dentry, file->f_vfsmnt);
66851+
66852+ gr_handle_exec_args_compat(bprm, argv);
66853
66854 retval = search_binary_handler(bprm, regs);
66855 if (retval < 0)
66856- goto out;
66857+ goto out_fail;
66858+#ifdef CONFIG_GRKERNSEC
66859+ if (old_exec_file)
66860+ fput(old_exec_file);
66861+#endif
66862
66863 /* execve succeeded */
66864+ increment_exec_counter();
66865 current->fs->in_exec = 0;
66866 current->in_execve = 0;
66867 acct_update_integrals(current);
66868@@ -1541,6 +1636,14 @@ int compat_do_execve(char * filename,
66869 put_files_struct(displaced);
66870 return retval;
66871
66872+out_fail:
66873+#ifdef CONFIG_GRKERNSEC
66874+ current->acl = old_acl;
66875+ memcpy(current->signal->rlim, old_rlim, sizeof(old_rlim));
66876+ fput(current->exec_file);
66877+ current->exec_file = old_exec_file;
66878+#endif
66879+
66880 out:
66881 if (bprm->mm) {
66882 acct_arg_size(bprm, 0);
66883@@ -1711,6 +1814,8 @@ int compat_core_sys_select(int n, compat_ulong_t __user *inp,
66884 struct fdtable *fdt;
66885 long stack_fds[SELECT_STACK_ALLOC/sizeof(long)];
66886
66887+ pax_track_stack();
66888+
66889 if (n < 0)
66890 goto out_nofds;
66891
66892@@ -2151,7 +2256,7 @@ asmlinkage long compat_sys_nfsservctl(int cmd,
66893 oldfs = get_fs();
66894 set_fs(KERNEL_DS);
66895 /* The __user pointer casts are valid because of the set_fs() */
66896- err = sys_nfsservctl(cmd, (void __user *) karg, (void __user *) kres);
66897+ err = sys_nfsservctl(cmd, (void __force_user *) karg, (void __force_user *) kres);
66898 set_fs(oldfs);
66899
66900 if (err)
66901diff --git a/fs/compat_binfmt_elf.c b/fs/compat_binfmt_elf.c
66902index 0adced2..bbb1b0d 100644
66903--- a/fs/compat_binfmt_elf.c
66904+++ b/fs/compat_binfmt_elf.c
66905@@ -29,10 +29,12 @@
66906 #undef elfhdr
66907 #undef elf_phdr
66908 #undef elf_note
66909+#undef elf_dyn
66910 #undef elf_addr_t
66911 #define elfhdr elf32_hdr
66912 #define elf_phdr elf32_phdr
66913 #define elf_note elf32_note
66914+#define elf_dyn Elf32_Dyn
66915 #define elf_addr_t Elf32_Addr
66916
66917 /*
66918diff --git a/fs/compat_ioctl.c b/fs/compat_ioctl.c
66919index d84e705..d8c364c 100644
66920--- a/fs/compat_ioctl.c
66921+++ b/fs/compat_ioctl.c
66922@@ -234,6 +234,8 @@ static int do_video_set_spu_palette(unsigned int fd, unsigned int cmd, unsigned
66923 up = (struct compat_video_spu_palette __user *) arg;
66924 err = get_user(palp, &up->palette);
66925 err |= get_user(length, &up->length);
66926+ if (err)
66927+ return -EFAULT;
66928
66929 up_native = compat_alloc_user_space(sizeof(struct video_spu_palette));
66930 err = put_user(compat_ptr(palp), &up_native->palette);
66931@@ -1513,7 +1515,7 @@ static int serial_struct_ioctl(unsigned fd, unsigned cmd, unsigned long arg)
66932 return -EFAULT;
66933 if (__get_user(udata, &ss32->iomem_base))
66934 return -EFAULT;
66935- ss.iomem_base = compat_ptr(udata);
66936+ ss.iomem_base = (unsigned char __force_kernel *)compat_ptr(udata);
66937 if (__get_user(ss.iomem_reg_shift, &ss32->iomem_reg_shift) ||
66938 __get_user(ss.port_high, &ss32->port_high))
66939 return -EFAULT;
66940@@ -1809,7 +1811,7 @@ static int compat_ioctl_preallocate(struct file *file, unsigned long arg)
66941 copy_in_user(&p->l_len, &p32->l_len, sizeof(s64)) ||
66942 copy_in_user(&p->l_sysid, &p32->l_sysid, sizeof(s32)) ||
66943 copy_in_user(&p->l_pid, &p32->l_pid, sizeof(u32)) ||
66944- copy_in_user(&p->l_pad, &p32->l_pad, 4*sizeof(u32)))
66945+ copy_in_user(p->l_pad, &p32->l_pad, 4*sizeof(u32)))
66946 return -EFAULT;
66947
66948 return ioctl_preallocate(file, p);
66949diff --git a/fs/configfs/dir.c b/fs/configfs/dir.c
66950index 8e48b52..f01ed91 100644
66951--- a/fs/configfs/dir.c
66952+++ b/fs/configfs/dir.c
66953@@ -1572,7 +1572,8 @@ static int configfs_readdir(struct file * filp, void * dirent, filldir_t filldir
66954 }
66955 for (p=q->next; p!= &parent_sd->s_children; p=p->next) {
66956 struct configfs_dirent *next;
66957- const char * name;
66958+ const unsigned char * name;
66959+ char d_name[sizeof(next->s_dentry->d_iname)];
66960 int len;
66961
66962 next = list_entry(p, struct configfs_dirent,
66963@@ -1581,7 +1582,12 @@ static int configfs_readdir(struct file * filp, void * dirent, filldir_t filldir
66964 continue;
66965
66966 name = configfs_get_name(next);
66967- len = strlen(name);
66968+ if (next->s_dentry && name == next->s_dentry->d_iname) {
66969+ len = next->s_dentry->d_name.len;
66970+ memcpy(d_name, name, len);
66971+ name = d_name;
66972+ } else
66973+ len = strlen(name);
66974 if (next->s_dentry)
66975 ino = next->s_dentry->d_inode->i_ino;
66976 else
66977diff --git a/fs/dcache.c b/fs/dcache.c
66978index 44c0aea..2529092 100644
66979--- a/fs/dcache.c
66980+++ b/fs/dcache.c
66981@@ -45,8 +45,6 @@ EXPORT_SYMBOL(dcache_lock);
66982
66983 static struct kmem_cache *dentry_cache __read_mostly;
66984
66985-#define DNAME_INLINE_LEN (sizeof(struct dentry)-offsetof(struct dentry,d_iname))
66986-
66987 /*
66988 * This is the single most critical data structure when it comes
66989 * to the dcache: the hashtable for lookups. Somebody should try
66990@@ -2319,7 +2317,7 @@ void __init vfs_caches_init(unsigned long mempages)
66991 mempages -= reserve;
66992
66993 names_cachep = kmem_cache_create("names_cache", PATH_MAX, 0,
66994- SLAB_HWCACHE_ALIGN|SLAB_PANIC, NULL);
66995+ SLAB_HWCACHE_ALIGN|SLAB_PANIC|SLAB_USERCOPY, NULL);
66996
66997 dcache_init();
66998 inode_init();
66999diff --git a/fs/debugfs/inode.c b/fs/debugfs/inode.c
67000index 39c6ee8..dcee0f1 100644
67001--- a/fs/debugfs/inode.c
67002+++ b/fs/debugfs/inode.c
67003@@ -269,7 +269,11 @@ EXPORT_SYMBOL_GPL(debugfs_create_file);
67004 struct dentry *debugfs_create_dir(const char *name, struct dentry *parent)
67005 {
67006 return debugfs_create_file(name,
67007+#ifdef CONFIG_GRKERNSEC_SYSFS_RESTRICT
67008+ S_IFDIR | S_IRWXU,
67009+#else
67010 S_IFDIR | S_IRWXU | S_IRUGO | S_IXUGO,
67011+#endif
67012 parent, NULL, NULL);
67013 }
67014 EXPORT_SYMBOL_GPL(debugfs_create_dir);
67015diff --git a/fs/dlm/lockspace.c b/fs/dlm/lockspace.c
67016index c010ecf..a8d8c59 100644
67017--- a/fs/dlm/lockspace.c
67018+++ b/fs/dlm/lockspace.c
67019@@ -148,7 +148,7 @@ static void lockspace_kobj_release(struct kobject *k)
67020 kfree(ls);
67021 }
67022
67023-static struct sysfs_ops dlm_attr_ops = {
67024+static const struct sysfs_ops dlm_attr_ops = {
67025 .show = dlm_attr_show,
67026 .store = dlm_attr_store,
67027 };
67028diff --git a/fs/ecryptfs/crypto.c b/fs/ecryptfs/crypto.c
67029index 7e164bb..62fa913 100644
67030--- a/fs/ecryptfs/crypto.c
67031+++ b/fs/ecryptfs/crypto.c
67032@@ -418,17 +418,6 @@ static int ecryptfs_encrypt_extent(struct page *enc_extent_page,
67033 rc);
67034 goto out;
67035 }
67036- if (unlikely(ecryptfs_verbosity > 0)) {
67037- ecryptfs_printk(KERN_DEBUG, "Encrypting extent "
67038- "with iv:\n");
67039- ecryptfs_dump_hex(extent_iv, crypt_stat->iv_bytes);
67040- ecryptfs_printk(KERN_DEBUG, "First 8 bytes before "
67041- "encryption:\n");
67042- ecryptfs_dump_hex((char *)
67043- (page_address(page)
67044- + (extent_offset * crypt_stat->extent_size)),
67045- 8);
67046- }
67047 rc = ecryptfs_encrypt_page_offset(crypt_stat, enc_extent_page, 0,
67048 page, (extent_offset
67049 * crypt_stat->extent_size),
67050@@ -441,14 +430,6 @@ static int ecryptfs_encrypt_extent(struct page *enc_extent_page,
67051 goto out;
67052 }
67053 rc = 0;
67054- if (unlikely(ecryptfs_verbosity > 0)) {
67055- ecryptfs_printk(KERN_DEBUG, "Encrypt extent [0x%.16x]; "
67056- "rc = [%d]\n", (extent_base + extent_offset),
67057- rc);
67058- ecryptfs_printk(KERN_DEBUG, "First 8 bytes after "
67059- "encryption:\n");
67060- ecryptfs_dump_hex((char *)(page_address(enc_extent_page)), 8);
67061- }
67062 out:
67063 return rc;
67064 }
67065@@ -545,17 +526,6 @@ static int ecryptfs_decrypt_extent(struct page *page,
67066 rc);
67067 goto out;
67068 }
67069- if (unlikely(ecryptfs_verbosity > 0)) {
67070- ecryptfs_printk(KERN_DEBUG, "Decrypting extent "
67071- "with iv:\n");
67072- ecryptfs_dump_hex(extent_iv, crypt_stat->iv_bytes);
67073- ecryptfs_printk(KERN_DEBUG, "First 8 bytes before "
67074- "decryption:\n");
67075- ecryptfs_dump_hex((char *)
67076- (page_address(enc_extent_page)
67077- + (extent_offset * crypt_stat->extent_size)),
67078- 8);
67079- }
67080 rc = ecryptfs_decrypt_page_offset(crypt_stat, page,
67081 (extent_offset
67082 * crypt_stat->extent_size),
67083@@ -569,16 +539,6 @@ static int ecryptfs_decrypt_extent(struct page *page,
67084 goto out;
67085 }
67086 rc = 0;
67087- if (unlikely(ecryptfs_verbosity > 0)) {
67088- ecryptfs_printk(KERN_DEBUG, "Decrypt extent [0x%.16x]; "
67089- "rc = [%d]\n", (extent_base + extent_offset),
67090- rc);
67091- ecryptfs_printk(KERN_DEBUG, "First 8 bytes after "
67092- "decryption:\n");
67093- ecryptfs_dump_hex((char *)(page_address(page)
67094- + (extent_offset
67095- * crypt_stat->extent_size)), 8);
67096- }
67097 out:
67098 return rc;
67099 }
67100diff --git a/fs/ecryptfs/file.c b/fs/ecryptfs/file.c
67101index 502b09f..49129f4 100644
67102--- a/fs/ecryptfs/file.c
67103+++ b/fs/ecryptfs/file.c
67104@@ -348,7 +348,6 @@ const struct file_operations ecryptfs_main_fops = {
67105 #ifdef CONFIG_COMPAT
67106 .compat_ioctl = ecryptfs_compat_ioctl,
67107 #endif
67108- .mmap = generic_file_mmap,
67109 .open = ecryptfs_open,
67110 .flush = ecryptfs_flush,
67111 .release = ecryptfs_release,
67112diff --git a/fs/ecryptfs/inode.c b/fs/ecryptfs/inode.c
67113index 90a6087..fa05803 100644
67114--- a/fs/ecryptfs/inode.c
67115+++ b/fs/ecryptfs/inode.c
67116@@ -647,7 +647,7 @@ static int ecryptfs_readlink_lower(struct dentry *dentry, char **buf,
67117 old_fs = get_fs();
67118 set_fs(get_ds());
67119 rc = lower_dentry->d_inode->i_op->readlink(lower_dentry,
67120- (char __user *)lower_buf,
67121+ (char __force_user *)lower_buf,
67122 lower_bufsiz);
67123 set_fs(old_fs);
67124 if (rc < 0)
67125@@ -693,7 +693,7 @@ static void *ecryptfs_follow_link(struct dentry *dentry, struct nameidata *nd)
67126 }
67127 old_fs = get_fs();
67128 set_fs(get_ds());
67129- rc = dentry->d_inode->i_op->readlink(dentry, (char __user *)buf, len);
67130+ rc = dentry->d_inode->i_op->readlink(dentry, (__force char __user *)buf, len);
67131 set_fs(old_fs);
67132 if (rc < 0)
67133 goto out_free;
67134diff --git a/fs/exec.c b/fs/exec.c
67135index 86fafc6..6a109b9 100644
67136--- a/fs/exec.c
67137+++ b/fs/exec.c
67138@@ -56,12 +56,28 @@
67139 #include <linux/fsnotify.h>
67140 #include <linux/fs_struct.h>
67141 #include <linux/pipe_fs_i.h>
67142+#include <linux/random.h>
67143+#include <linux/seq_file.h>
67144+
67145+#ifdef CONFIG_PAX_REFCOUNT
67146+#include <linux/kallsyms.h>
67147+#include <linux/kdebug.h>
67148+#endif
67149
67150 #include <asm/uaccess.h>
67151 #include <asm/mmu_context.h>
67152 #include <asm/tlb.h>
67153 #include "internal.h"
67154
67155+#ifndef CONFIG_PAX_HAVE_ACL_FLAGS
67156+void __weak pax_set_initial_flags(struct linux_binprm *bprm) {}
67157+#endif
67158+
67159+#ifdef CONFIG_PAX_HOOK_ACL_FLAGS
67160+void (*pax_set_initial_flags_func)(struct linux_binprm *bprm);
67161+EXPORT_SYMBOL(pax_set_initial_flags_func);
67162+#endif
67163+
67164 int core_uses_pid;
67165 char core_pattern[CORENAME_MAX_SIZE] = "core";
67166 unsigned int core_pipe_limit;
67167@@ -178,18 +194,10 @@ struct page *get_arg_page(struct linux_binprm *bprm, unsigned long pos,
67168 int write)
67169 {
67170 struct page *page;
67171- int ret;
67172
67173-#ifdef CONFIG_STACK_GROWSUP
67174- if (write) {
67175- ret = expand_stack_downwards(bprm->vma, pos);
67176- if (ret < 0)
67177- return NULL;
67178- }
67179-#endif
67180- ret = get_user_pages(current, bprm->mm, pos,
67181- 1, write, 1, &page, NULL);
67182- if (ret <= 0)
67183+ if (0 > expand_stack_downwards(bprm->vma, pos))
67184+ return NULL;
67185+ if (0 >= get_user_pages(current, bprm->mm, pos, 1, write, 1, &page, NULL))
67186 return NULL;
67187
67188 if (write) {
67189@@ -205,6 +213,17 @@ struct page *get_arg_page(struct linux_binprm *bprm, unsigned long pos,
67190 if (size <= ARG_MAX)
67191 return page;
67192
67193+#ifdef CONFIG_GRKERNSEC_PROC_MEMMAP
67194+ // only allow 512KB for argv+env on suid/sgid binaries
67195+ // to prevent easy ASLR exhaustion
67196+ if (((bprm->cred->euid != current_euid()) ||
67197+ (bprm->cred->egid != current_egid())) &&
67198+ (size > (512 * 1024))) {
67199+ put_page(page);
67200+ return NULL;
67201+ }
67202+#endif
67203+
67204 /*
67205 * Limit to 1/4-th the stack size for the argv+env strings.
67206 * This ensures that:
67207@@ -263,6 +282,11 @@ static int __bprm_mm_init(struct linux_binprm *bprm)
67208 vma->vm_end = STACK_TOP_MAX;
67209 vma->vm_start = vma->vm_end - PAGE_SIZE;
67210 vma->vm_flags = VM_STACK_FLAGS;
67211+
67212+#ifdef CONFIG_PAX_SEGMEXEC
67213+ vma->vm_flags &= ~(VM_EXEC | VM_MAYEXEC);
67214+#endif
67215+
67216 vma->vm_page_prot = vm_get_page_prot(vma->vm_flags);
67217
67218 err = security_file_mmap(NULL, 0, 0, 0, vma->vm_start, 1);
67219@@ -276,6 +300,12 @@ static int __bprm_mm_init(struct linux_binprm *bprm)
67220 mm->stack_vm = mm->total_vm = 1;
67221 up_write(&mm->mmap_sem);
67222 bprm->p = vma->vm_end - sizeof(void *);
67223+
67224+#ifdef CONFIG_PAX_RANDUSTACK
67225+ if (randomize_va_space)
67226+ bprm->p ^= random32() & ~PAGE_MASK;
67227+#endif
67228+
67229 return 0;
67230 err:
67231 up_write(&mm->mmap_sem);
67232@@ -510,7 +540,7 @@ int copy_strings_kernel(int argc,char ** argv, struct linux_binprm *bprm)
67233 int r;
67234 mm_segment_t oldfs = get_fs();
67235 set_fs(KERNEL_DS);
67236- r = copy_strings(argc, (char __user * __user *)argv, bprm);
67237+ r = copy_strings(argc, (__force char __user * __user *)argv, bprm);
67238 set_fs(oldfs);
67239 return r;
67240 }
67241@@ -540,7 +570,8 @@ static int shift_arg_pages(struct vm_area_struct *vma, unsigned long shift)
67242 unsigned long new_end = old_end - shift;
67243 struct mmu_gather *tlb;
67244
67245- BUG_ON(new_start > new_end);
67246+ if (new_start >= new_end || new_start < mmap_min_addr)
67247+ return -ENOMEM;
67248
67249 /*
67250 * ensure there are no vmas between where we want to go
67251@@ -549,6 +580,10 @@ static int shift_arg_pages(struct vm_area_struct *vma, unsigned long shift)
67252 if (vma != find_vma(mm, new_start))
67253 return -EFAULT;
67254
67255+#ifdef CONFIG_PAX_SEGMEXEC
67256+ BUG_ON(pax_find_mirror_vma(vma));
67257+#endif
67258+
67259 /*
67260 * cover the whole range: [new_start, old_end)
67261 */
67262@@ -630,10 +665,6 @@ int setup_arg_pages(struct linux_binprm *bprm,
67263 stack_top = arch_align_stack(stack_top);
67264 stack_top = PAGE_ALIGN(stack_top);
67265
67266- if (unlikely(stack_top < mmap_min_addr) ||
67267- unlikely(vma->vm_end - vma->vm_start >= stack_top - mmap_min_addr))
67268- return -ENOMEM;
67269-
67270 stack_shift = vma->vm_end - stack_top;
67271
67272 bprm->p -= stack_shift;
67273@@ -645,6 +676,14 @@ int setup_arg_pages(struct linux_binprm *bprm,
67274 bprm->exec -= stack_shift;
67275
67276 down_write(&mm->mmap_sem);
67277+
67278+ /* Move stack pages down in memory. */
67279+ if (stack_shift) {
67280+ ret = shift_arg_pages(vma, stack_shift);
67281+ if (ret)
67282+ goto out_unlock;
67283+ }
67284+
67285 vm_flags = VM_STACK_FLAGS;
67286
67287 /*
67288@@ -658,19 +697,24 @@ int setup_arg_pages(struct linux_binprm *bprm,
67289 vm_flags &= ~VM_EXEC;
67290 vm_flags |= mm->def_flags;
67291
67292+#if defined(CONFIG_PAX_PAGEEXEC) || defined(CONFIG_PAX_SEGMEXEC)
67293+ if (mm->pax_flags & (MF_PAX_PAGEEXEC | MF_PAX_SEGMEXEC)) {
67294+ vm_flags &= ~VM_EXEC;
67295+
67296+#ifdef CONFIG_PAX_MPROTECT
67297+ if (mm->pax_flags & MF_PAX_MPROTECT)
67298+ vm_flags &= ~VM_MAYEXEC;
67299+#endif
67300+
67301+ }
67302+#endif
67303+
67304 ret = mprotect_fixup(vma, &prev, vma->vm_start, vma->vm_end,
67305 vm_flags);
67306 if (ret)
67307 goto out_unlock;
67308 BUG_ON(prev != vma);
67309
67310- /* Move stack pages down in memory. */
67311- if (stack_shift) {
67312- ret = shift_arg_pages(vma, stack_shift);
67313- if (ret)
67314- goto out_unlock;
67315- }
67316-
67317 stack_expand = EXTRA_STACK_VM_PAGES * PAGE_SIZE;
67318 stack_size = vma->vm_end - vma->vm_start;
67319 /*
67320@@ -744,7 +788,7 @@ int kernel_read(struct file *file, loff_t offset,
67321 old_fs = get_fs();
67322 set_fs(get_ds());
67323 /* The cast to a user pointer is valid due to the set_fs() */
67324- result = vfs_read(file, (void __user *)addr, count, &pos);
67325+ result = vfs_read(file, (void __force_user *)addr, count, &pos);
67326 set_fs(old_fs);
67327 return result;
67328 }
67329@@ -985,6 +1029,21 @@ void set_task_comm(struct task_struct *tsk, char *buf)
67330 perf_event_comm(tsk);
67331 }
67332
67333+static void filename_to_taskname(char *tcomm, const char *fn, unsigned int len)
67334+{
67335+ int i, ch;
67336+
67337+ /* Copies the binary name from after last slash */
67338+ for (i = 0; (ch = *(fn++)) != '\0';) {
67339+ if (ch == '/')
67340+ i = 0; /* overwrite what we wrote */
67341+ else
67342+ if (i < len - 1)
67343+ tcomm[i++] = ch;
67344+ }
67345+ tcomm[i] = '\0';
67346+}
67347+
67348 int flush_old_exec(struct linux_binprm * bprm)
67349 {
67350 int retval;
67351@@ -999,6 +1058,7 @@ int flush_old_exec(struct linux_binprm * bprm)
67352
67353 set_mm_exe_file(bprm->mm, bprm->file);
67354
67355+ filename_to_taskname(bprm->tcomm, bprm->filename, sizeof(bprm->tcomm));
67356 /*
67357 * Release all of the old mmap stuff
67358 */
67359@@ -1023,10 +1083,6 @@ EXPORT_SYMBOL(flush_old_exec);
67360
67361 void setup_new_exec(struct linux_binprm * bprm)
67362 {
67363- int i, ch;
67364- char * name;
67365- char tcomm[sizeof(current->comm)];
67366-
67367 arch_pick_mmap_layout(current->mm);
67368
67369 /* This is the point of no return */
67370@@ -1037,18 +1093,7 @@ void setup_new_exec(struct linux_binprm * bprm)
67371 else
67372 set_dumpable(current->mm, suid_dumpable);
67373
67374- name = bprm->filename;
67375-
67376- /* Copies the binary name from after last slash */
67377- for (i=0; (ch = *(name++)) != '\0';) {
67378- if (ch == '/')
67379- i = 0; /* overwrite what we wrote */
67380- else
67381- if (i < (sizeof(tcomm) - 1))
67382- tcomm[i++] = ch;
67383- }
67384- tcomm[i] = '\0';
67385- set_task_comm(current, tcomm);
67386+ set_task_comm(current, bprm->tcomm);
67387
67388 /* Set the new mm task size. We have to do that late because it may
67389 * depend on TIF_32BIT which is only updated in flush_thread() on
67390@@ -1152,7 +1197,7 @@ int check_unsafe_exec(struct linux_binprm *bprm)
67391 }
67392 rcu_read_unlock();
67393
67394- if (p->fs->users > n_fs) {
67395+ if (atomic_read(&p->fs->users) > n_fs) {
67396 bprm->unsafe |= LSM_UNSAFE_SHARE;
67397 } else {
67398 res = -EAGAIN;
67399@@ -1339,6 +1384,21 @@ int search_binary_handler(struct linux_binprm *bprm,struct pt_regs *regs)
67400
67401 EXPORT_SYMBOL(search_binary_handler);
67402
67403+#ifdef CONFIG_GRKERNSEC_PROC_MEMMAP
67404+DEFINE_PER_CPU(u64, exec_counter);
67405+static int __init init_exec_counters(void)
67406+{
67407+ unsigned int cpu;
67408+
67409+ for_each_possible_cpu(cpu) {
67410+ per_cpu(exec_counter, cpu) = (u64)cpu;
67411+ }
67412+
67413+ return 0;
67414+}
67415+early_initcall(init_exec_counters);
67416+#endif
67417+
67418 /*
67419 * sys_execve() executes a new program.
67420 */
67421@@ -1347,11 +1407,35 @@ int do_execve(char * filename,
67422 char __user *__user *envp,
67423 struct pt_regs * regs)
67424 {
67425+#ifdef CONFIG_GRKERNSEC
67426+ struct file *old_exec_file;
67427+ struct acl_subject_label *old_acl;
67428+ struct rlimit old_rlim[RLIM_NLIMITS];
67429+#endif
67430 struct linux_binprm *bprm;
67431 struct file *file;
67432 struct files_struct *displaced;
67433 bool clear_in_exec;
67434 int retval;
67435+ const struct cred *cred = current_cred();
67436+
67437+ /*
67438+ * We move the actual failure in case of RLIMIT_NPROC excess from
67439+ * set*uid() to execve() because too many poorly written programs
67440+ * don't check setuid() return code. Here we additionally recheck
67441+ * whether NPROC limit is still exceeded.
67442+ */
67443+ gr_learn_resource(current, RLIMIT_NPROC, atomic_read(&current->cred->user->processes), 1);
67444+
67445+ if ((current->flags & PF_NPROC_EXCEEDED) &&
67446+ atomic_read(&cred->user->processes) > current->signal->rlim[RLIMIT_NPROC].rlim_cur) {
67447+ retval = -EAGAIN;
67448+ goto out_ret;
67449+ }
67450+
67451+ /* We're below the limit (still or again), so we don't want to make
67452+ * further execve() calls fail. */
67453+ current->flags &= ~PF_NPROC_EXCEEDED;
67454
67455 retval = unshare_files(&displaced);
67456 if (retval)
67457@@ -1377,12 +1461,27 @@ int do_execve(char * filename,
67458 if (IS_ERR(file))
67459 goto out_unmark;
67460
67461+ if (gr_ptrace_readexec(file, bprm->unsafe)) {
67462+ retval = -EPERM;
67463+ goto out_file;
67464+ }
67465+
67466 sched_exec();
67467
67468 bprm->file = file;
67469 bprm->filename = filename;
67470 bprm->interp = filename;
67471
67472+ if (gr_process_user_ban()) {
67473+ retval = -EPERM;
67474+ goto out_file;
67475+ }
67476+
67477+ if (!gr_acl_handle_execve(file->f_dentry, file->f_vfsmnt)) {
67478+ retval = -EACCES;
67479+ goto out_file;
67480+ }
67481+
67482 retval = bprm_mm_init(bprm);
67483 if (retval)
67484 goto out_file;
67485@@ -1399,25 +1498,66 @@ int do_execve(char * filename,
67486 if (retval < 0)
67487 goto out;
67488
67489+#ifdef CONFIG_GRKERNSEC
67490+ old_acl = current->acl;
67491+ memcpy(old_rlim, current->signal->rlim, sizeof(old_rlim));
67492+ old_exec_file = current->exec_file;
67493+ get_file(file);
67494+ current->exec_file = file;
67495+#endif
67496+#ifdef CONFIG_GRKERNSEC_PROC_MEMMAP
67497+ /* limit suid stack to 8MB
67498+ we saved the old limits above and will restore them if this exec fails
67499+ */
67500+ if (((bprm->cred->euid != current_euid()) || (bprm->cred->egid != current_egid())) &&
67501+ (old_rlim[RLIMIT_STACK].rlim_cur > (8 * 1024 * 1024)))
67502+ current->signal->rlim[RLIMIT_STACK].rlim_cur = 8 * 1024 * 1024;
67503+#endif
67504+
67505+ if (!gr_tpe_allow(file)) {
67506+ retval = -EACCES;
67507+ goto out_fail;
67508+ }
67509+
67510+ if (gr_check_crash_exec(file)) {
67511+ retval = -EACCES;
67512+ goto out_fail;
67513+ }
67514+
67515+ retval = gr_set_proc_label(file->f_dentry, file->f_vfsmnt,
67516+ bprm->unsafe);
67517+ if (retval < 0)
67518+ goto out_fail;
67519+
67520 retval = copy_strings_kernel(1, &bprm->filename, bprm);
67521 if (retval < 0)
67522- goto out;
67523+ goto out_fail;
67524
67525 bprm->exec = bprm->p;
67526 retval = copy_strings(bprm->envc, envp, bprm);
67527 if (retval < 0)
67528- goto out;
67529+ goto out_fail;
67530
67531 retval = copy_strings(bprm->argc, argv, bprm);
67532 if (retval < 0)
67533- goto out;
67534+ goto out_fail;
67535+
67536+ gr_log_chroot_exec(file->f_dentry, file->f_vfsmnt);
67537+
67538+ gr_handle_exec_args(bprm, (const char __user *const __user *)argv);
67539
67540 current->flags &= ~PF_KTHREAD;
67541 retval = search_binary_handler(bprm,regs);
67542 if (retval < 0)
67543- goto out;
67544+ goto out_fail;
67545+#ifdef CONFIG_GRKERNSEC
67546+ if (old_exec_file)
67547+ fput(old_exec_file);
67548+#endif
67549
67550 /* execve succeeded */
67551+
67552+ increment_exec_counter();
67553 current->fs->in_exec = 0;
67554 current->in_execve = 0;
67555 acct_update_integrals(current);
67556@@ -1426,6 +1566,14 @@ int do_execve(char * filename,
67557 put_files_struct(displaced);
67558 return retval;
67559
67560+out_fail:
67561+#ifdef CONFIG_GRKERNSEC
67562+ current->acl = old_acl;
67563+ memcpy(current->signal->rlim, old_rlim, sizeof(old_rlim));
67564+ fput(current->exec_file);
67565+ current->exec_file = old_exec_file;
67566+#endif
67567+
67568 out:
67569 if (bprm->mm) {
67570 acct_arg_size(bprm, 0);
67571@@ -1591,6 +1739,229 @@ out:
67572 return ispipe;
67573 }
67574
67575+int pax_check_flags(unsigned long *flags)
67576+{
67577+ int retval = 0;
67578+
67579+#if !defined(CONFIG_X86_32) || !defined(CONFIG_PAX_SEGMEXEC)
67580+ if (*flags & MF_PAX_SEGMEXEC)
67581+ {
67582+ *flags &= ~MF_PAX_SEGMEXEC;
67583+ retval = -EINVAL;
67584+ }
67585+#endif
67586+
67587+ if ((*flags & MF_PAX_PAGEEXEC)
67588+
67589+#ifdef CONFIG_PAX_PAGEEXEC
67590+ && (*flags & MF_PAX_SEGMEXEC)
67591+#endif
67592+
67593+ )
67594+ {
67595+ *flags &= ~MF_PAX_PAGEEXEC;
67596+ retval = -EINVAL;
67597+ }
67598+
67599+ if ((*flags & MF_PAX_MPROTECT)
67600+
67601+#ifdef CONFIG_PAX_MPROTECT
67602+ && !(*flags & (MF_PAX_PAGEEXEC | MF_PAX_SEGMEXEC))
67603+#endif
67604+
67605+ )
67606+ {
67607+ *flags &= ~MF_PAX_MPROTECT;
67608+ retval = -EINVAL;
67609+ }
67610+
67611+ if ((*flags & MF_PAX_EMUTRAMP)
67612+
67613+#ifdef CONFIG_PAX_EMUTRAMP
67614+ && !(*flags & (MF_PAX_PAGEEXEC | MF_PAX_SEGMEXEC))
67615+#endif
67616+
67617+ )
67618+ {
67619+ *flags &= ~MF_PAX_EMUTRAMP;
67620+ retval = -EINVAL;
67621+ }
67622+
67623+ return retval;
67624+}
67625+
67626+EXPORT_SYMBOL(pax_check_flags);
67627+
67628+#if defined(CONFIG_PAX_PAGEEXEC) || defined(CONFIG_PAX_SEGMEXEC)
67629+void pax_report_fault(struct pt_regs *regs, void *pc, void *sp)
67630+{
67631+ struct task_struct *tsk = current;
67632+ struct mm_struct *mm = current->mm;
67633+ char *buffer_exec = (char *)__get_free_page(GFP_KERNEL);
67634+ char *buffer_fault = (char *)__get_free_page(GFP_KERNEL);
67635+ char *path_exec = NULL;
67636+ char *path_fault = NULL;
67637+ unsigned long start = 0UL, end = 0UL, offset = 0UL;
67638+
67639+ if (buffer_exec && buffer_fault) {
67640+ struct vm_area_struct *vma, *vma_exec = NULL, *vma_fault = NULL;
67641+
67642+ down_read(&mm->mmap_sem);
67643+ vma = mm->mmap;
67644+ while (vma && (!vma_exec || !vma_fault)) {
67645+ if ((vma->vm_flags & VM_EXECUTABLE) && vma->vm_file)
67646+ vma_exec = vma;
67647+ if (vma->vm_start <= (unsigned long)pc && (unsigned long)pc < vma->vm_end)
67648+ vma_fault = vma;
67649+ vma = vma->vm_next;
67650+ }
67651+ if (vma_exec) {
67652+ path_exec = d_path(&vma_exec->vm_file->f_path, buffer_exec, PAGE_SIZE);
67653+ if (IS_ERR(path_exec))
67654+ path_exec = "<path too long>";
67655+ else {
67656+ path_exec = mangle_path(buffer_exec, path_exec, "\t\n\\");
67657+ if (path_exec) {
67658+ *path_exec = 0;
67659+ path_exec = buffer_exec;
67660+ } else
67661+ path_exec = "<path too long>";
67662+ }
67663+ }
67664+ if (vma_fault) {
67665+ start = vma_fault->vm_start;
67666+ end = vma_fault->vm_end;
67667+ offset = vma_fault->vm_pgoff << PAGE_SHIFT;
67668+ if (vma_fault->vm_file) {
67669+ path_fault = d_path(&vma_fault->vm_file->f_path, buffer_fault, PAGE_SIZE);
67670+ if (IS_ERR(path_fault))
67671+ path_fault = "<path too long>";
67672+ else {
67673+ path_fault = mangle_path(buffer_fault, path_fault, "\t\n\\");
67674+ if (path_fault) {
67675+ *path_fault = 0;
67676+ path_fault = buffer_fault;
67677+ } else
67678+ path_fault = "<path too long>";
67679+ }
67680+ } else
67681+ path_fault = "<anonymous mapping>";
67682+ }
67683+ up_read(&mm->mmap_sem);
67684+ }
67685+ if (tsk->signal->curr_ip)
67686+ printk(KERN_ERR "PAX: From %pI4: execution attempt in: %s, %08lx-%08lx %08lx\n", &tsk->signal->curr_ip, path_fault, start, end, offset);
67687+ else
67688+ printk(KERN_ERR "PAX: execution attempt in: %s, %08lx-%08lx %08lx\n", path_fault, start, end, offset);
67689+ printk(KERN_ERR "PAX: terminating task: %s(%s):%d, uid/euid: %u/%u, "
67690+ "PC: %p, SP: %p\n", path_exec, tsk->comm, task_pid_nr(tsk),
67691+ task_uid(tsk), task_euid(tsk), pc, sp);
67692+ free_page((unsigned long)buffer_exec);
67693+ free_page((unsigned long)buffer_fault);
67694+ pax_report_insns(regs, pc, sp);
67695+ do_coredump(SIGKILL, SIGKILL, regs);
67696+}
67697+#endif
67698+
67699+#ifdef CONFIG_PAX_REFCOUNT
67700+void pax_report_refcount_overflow(struct pt_regs *regs)
67701+{
67702+ if (current->signal->curr_ip)
67703+ printk(KERN_ERR "PAX: From %pI4: refcount overflow detected in: %s:%d, uid/euid: %u/%u\n",
67704+ &current->signal->curr_ip, current->comm, task_pid_nr(current), current_uid(), current_euid());
67705+ else
67706+ printk(KERN_ERR "PAX: refcount overflow detected in: %s:%d, uid/euid: %u/%u\n",
67707+ current->comm, task_pid_nr(current), current_uid(), current_euid());
67708+ print_symbol(KERN_ERR "PAX: refcount overflow occured at: %s\n", instruction_pointer(regs));
67709+ show_regs(regs);
67710+ force_sig_specific(SIGKILL, current);
67711+}
67712+#endif
67713+
67714+#ifdef CONFIG_PAX_USERCOPY
67715+/* 0: not at all, 1: fully, 2: fully inside frame, -1: partially (implies an error) */
67716+int object_is_on_stack(const void *obj, unsigned long len)
67717+{
67718+ const void * const stack = task_stack_page(current);
67719+ const void * const stackend = stack + THREAD_SIZE;
67720+
67721+#if defined(CONFIG_FRAME_POINTER) && defined(CONFIG_X86)
67722+ const void *frame = NULL;
67723+ const void *oldframe;
67724+#endif
67725+
67726+ if (obj + len < obj)
67727+ return -1;
67728+
67729+ if (obj + len <= stack || stackend <= obj)
67730+ return 0;
67731+
67732+ if (obj < stack || stackend < obj + len)
67733+ return -1;
67734+
67735+#if defined(CONFIG_FRAME_POINTER) && defined(CONFIG_X86)
67736+ oldframe = __builtin_frame_address(1);
67737+ if (oldframe)
67738+ frame = __builtin_frame_address(2);
67739+ /*
67740+ low ----------------------------------------------> high
67741+ [saved bp][saved ip][args][local vars][saved bp][saved ip]
67742+ ^----------------^
67743+ allow copies only within here
67744+ */
67745+ while (stack <= frame && frame < stackend) {
67746+ /* if obj + len extends past the last frame, this
67747+ check won't pass and the next frame will be 0,
67748+ causing us to bail out and correctly report
67749+ the copy as invalid
67750+ */
67751+ if (obj + len <= frame)
67752+ return obj >= oldframe + 2 * sizeof(void *) ? 2 : -1;
67753+ oldframe = frame;
67754+ frame = *(const void * const *)frame;
67755+ }
67756+ return -1;
67757+#else
67758+ return 1;
67759+#endif
67760+}
67761+
67762+__noreturn void pax_report_usercopy(const void *ptr, unsigned long len, bool to, const char *type)
67763+{
67764+ if (current->signal->curr_ip)
67765+ printk(KERN_ERR "PAX: From %pI4: kernel memory %s attempt detected %s %p (%s) (%lu bytes)\n",
67766+ &current->signal->curr_ip, to ? "leak" : "overwrite", to ? "from" : "to", ptr, type ? : "unknown", len);
67767+ else
67768+ printk(KERN_ERR "PAX: kernel memory %s attempt detected %s %p (%s) (%lu bytes)\n",
67769+ to ? "leak" : "overwrite", to ? "from" : "to", ptr, type ? : "unknown", len);
67770+
67771+ dump_stack();
67772+ gr_handle_kernel_exploit();
67773+ do_group_exit(SIGKILL);
67774+}
67775+#endif
67776+
67777+#ifdef CONFIG_PAX_MEMORY_STACKLEAK
67778+void pax_track_stack(void)
67779+{
67780+ unsigned long sp = (unsigned long)&sp;
67781+ if (sp < current_thread_info()->lowest_stack &&
67782+ sp > (unsigned long)task_stack_page(current))
67783+ current_thread_info()->lowest_stack = sp;
67784+}
67785+EXPORT_SYMBOL(pax_track_stack);
67786+#endif
67787+
67788+#ifdef CONFIG_PAX_SIZE_OVERFLOW
67789+void report_size_overflow(const char *file, unsigned int line, const char *func)
67790+{
67791+ printk(KERN_ERR "PAX: size overflow detected in function %s %s:%u\n", func, file, line);
67792+ dump_stack();
67793+ do_group_exit(SIGKILL);
67794+}
67795+EXPORT_SYMBOL(report_size_overflow);
67796+#endif
67797+
67798 static int zap_process(struct task_struct *start)
67799 {
67800 struct task_struct *t;
67801@@ -1793,17 +2164,17 @@ static void wait_for_dump_helpers(struct file *file)
67802 pipe = file->f_path.dentry->d_inode->i_pipe;
67803
67804 pipe_lock(pipe);
67805- pipe->readers++;
67806- pipe->writers--;
67807+ atomic_inc(&pipe->readers);
67808+ atomic_dec(&pipe->writers);
67809
67810- while ((pipe->readers > 1) && (!signal_pending(current))) {
67811+ while ((atomic_read(&pipe->readers) > 1) && (!signal_pending(current))) {
67812 wake_up_interruptible_sync(&pipe->wait);
67813 kill_fasync(&pipe->fasync_readers, SIGIO, POLL_IN);
67814 pipe_wait(pipe);
67815 }
67816
67817- pipe->readers--;
67818- pipe->writers++;
67819+ atomic_dec(&pipe->readers);
67820+ atomic_inc(&pipe->writers);
67821 pipe_unlock(pipe);
67822
67823 }
67824@@ -1826,10 +2197,13 @@ void do_coredump(long signr, int exit_code, struct pt_regs *regs)
67825 char **helper_argv = NULL;
67826 int helper_argc = 0;
67827 int dump_count = 0;
67828- static atomic_t core_dump_count = ATOMIC_INIT(0);
67829+ static atomic_unchecked_t core_dump_count = ATOMIC_INIT(0);
67830
67831 audit_core_dumps(signr);
67832
67833+ if (signr == SIGSEGV || signr == SIGBUS || signr == SIGKILL || signr == SIGILL)
67834+ gr_handle_brute_attach(current, mm->flags);
67835+
67836 binfmt = mm->binfmt;
67837 if (!binfmt || !binfmt->core_dump)
67838 goto fail;
67839@@ -1874,6 +2248,8 @@ void do_coredump(long signr, int exit_code, struct pt_regs *regs)
67840 */
67841 clear_thread_flag(TIF_SIGPENDING);
67842
67843+ gr_learn_resource(current, RLIMIT_CORE, binfmt->min_coredump, 1);
67844+
67845 /*
67846 * lock_kernel() because format_corename() is controlled by sysctl, which
67847 * uses lock_kernel()
67848@@ -1908,7 +2284,7 @@ void do_coredump(long signr, int exit_code, struct pt_regs *regs)
67849 goto fail_unlock;
67850 }
67851
67852- dump_count = atomic_inc_return(&core_dump_count);
67853+ dump_count = atomic_inc_return_unchecked(&core_dump_count);
67854 if (core_pipe_limit && (core_pipe_limit < dump_count)) {
67855 printk(KERN_WARNING "Pid %d(%s) over core_pipe_limit\n",
67856 task_tgid_vnr(current), current->comm);
67857@@ -1972,7 +2348,7 @@ close_fail:
67858 filp_close(file, NULL);
67859 fail_dropcount:
67860 if (dump_count)
67861- atomic_dec(&core_dump_count);
67862+ atomic_dec_unchecked(&core_dump_count);
67863 fail_unlock:
67864 if (helper_argv)
67865 argv_free(helper_argv);
67866diff --git a/fs/ext2/balloc.c b/fs/ext2/balloc.c
67867index 7f8d2e5..a1abdbb 100644
67868--- a/fs/ext2/balloc.c
67869+++ b/fs/ext2/balloc.c
67870@@ -1192,7 +1192,7 @@ static int ext2_has_free_blocks(struct ext2_sb_info *sbi)
67871
67872 free_blocks = percpu_counter_read_positive(&sbi->s_freeblocks_counter);
67873 root_blocks = le32_to_cpu(sbi->s_es->s_r_blocks_count);
67874- if (free_blocks < root_blocks + 1 && !capable(CAP_SYS_RESOURCE) &&
67875+ if (free_blocks < root_blocks + 1 && !capable_nolog(CAP_SYS_RESOURCE) &&
67876 sbi->s_resuid != current_fsuid() &&
67877 (sbi->s_resgid == 0 || !in_group_p (sbi->s_resgid))) {
67878 return 0;
67879diff --git a/fs/ext3/balloc.c b/fs/ext3/balloc.c
67880index 27967f9..9f2a5fb 100644
67881--- a/fs/ext3/balloc.c
67882+++ b/fs/ext3/balloc.c
67883@@ -1421,7 +1421,7 @@ static int ext3_has_free_blocks(struct ext3_sb_info *sbi)
67884
67885 free_blocks = percpu_counter_read_positive(&sbi->s_freeblocks_counter);
67886 root_blocks = le32_to_cpu(sbi->s_es->s_r_blocks_count);
67887- if (free_blocks < root_blocks + 1 && !capable(CAP_SYS_RESOURCE) &&
67888+ if (free_blocks < root_blocks + 1 && !capable_nolog(CAP_SYS_RESOURCE) &&
67889 sbi->s_resuid != current_fsuid() &&
67890 (sbi->s_resgid == 0 || !in_group_p (sbi->s_resgid))) {
67891 return 0;
67892diff --git a/fs/ext4/balloc.c b/fs/ext4/balloc.c
67893index e85b63c..80398e6 100644
67894--- a/fs/ext4/balloc.c
67895+++ b/fs/ext4/balloc.c
67896@@ -570,7 +570,7 @@ int ext4_has_free_blocks(struct ext4_sb_info *sbi, s64 nblocks)
67897 /* Hm, nope. Are (enough) root reserved blocks available? */
67898 if (sbi->s_resuid == current_fsuid() ||
67899 ((sbi->s_resgid != 0) && in_group_p(sbi->s_resgid)) ||
67900- capable(CAP_SYS_RESOURCE)) {
67901+ capable_nolog(CAP_SYS_RESOURCE)) {
67902 if (free_blocks >= (nblocks + dirty_blocks))
67903 return 1;
67904 }
67905diff --git a/fs/ext4/ext4.h b/fs/ext4/ext4.h
67906index 67c46ed..1f237e5 100644
67907--- a/fs/ext4/ext4.h
67908+++ b/fs/ext4/ext4.h
67909@@ -1077,19 +1077,19 @@ struct ext4_sb_info {
67910
67911 /* stats for buddy allocator */
67912 spinlock_t s_mb_pa_lock;
67913- atomic_t s_bal_reqs; /* number of reqs with len > 1 */
67914- atomic_t s_bal_success; /* we found long enough chunks */
67915- atomic_t s_bal_allocated; /* in blocks */
67916- atomic_t s_bal_ex_scanned; /* total extents scanned */
67917- atomic_t s_bal_goals; /* goal hits */
67918- atomic_t s_bal_breaks; /* too long searches */
67919- atomic_t s_bal_2orders; /* 2^order hits */
67920+ atomic_unchecked_t s_bal_reqs; /* number of reqs with len > 1 */
67921+ atomic_unchecked_t s_bal_success; /* we found long enough chunks */
67922+ atomic_unchecked_t s_bal_allocated; /* in blocks */
67923+ atomic_unchecked_t s_bal_ex_scanned; /* total extents scanned */
67924+ atomic_unchecked_t s_bal_goals; /* goal hits */
67925+ atomic_unchecked_t s_bal_breaks; /* too long searches */
67926+ atomic_unchecked_t s_bal_2orders; /* 2^order hits */
67927 spinlock_t s_bal_lock;
67928 unsigned long s_mb_buddies_generated;
67929 unsigned long long s_mb_generation_time;
67930- atomic_t s_mb_lost_chunks;
67931- atomic_t s_mb_preallocated;
67932- atomic_t s_mb_discarded;
67933+ atomic_unchecked_t s_mb_lost_chunks;
67934+ atomic_unchecked_t s_mb_preallocated;
67935+ atomic_unchecked_t s_mb_discarded;
67936 atomic_t s_lock_busy;
67937
67938 /* locality groups */
67939diff --git a/fs/ext4/file.c b/fs/ext4/file.c
67940index 2a60541..7439d61 100644
67941--- a/fs/ext4/file.c
67942+++ b/fs/ext4/file.c
67943@@ -122,8 +122,8 @@ static int ext4_file_open(struct inode * inode, struct file * filp)
67944 cp = d_path(&path, buf, sizeof(buf));
67945 path_put(&path);
67946 if (!IS_ERR(cp)) {
67947- memcpy(sbi->s_es->s_last_mounted, cp,
67948- sizeof(sbi->s_es->s_last_mounted));
67949+ strlcpy(sbi->s_es->s_last_mounted, cp,
67950+ sizeof(sbi->s_es->s_last_mounted));
67951 sb->s_dirt = 1;
67952 }
67953 }
67954diff --git a/fs/ext4/mballoc.c b/fs/ext4/mballoc.c
67955index 42bac1b..0aab9d8 100644
67956--- a/fs/ext4/mballoc.c
67957+++ b/fs/ext4/mballoc.c
67958@@ -1755,7 +1755,7 @@ void ext4_mb_simple_scan_group(struct ext4_allocation_context *ac,
67959 BUG_ON(ac->ac_b_ex.fe_len != ac->ac_g_ex.fe_len);
67960
67961 if (EXT4_SB(sb)->s_mb_stats)
67962- atomic_inc(&EXT4_SB(sb)->s_bal_2orders);
67963+ atomic_inc_unchecked(&EXT4_SB(sb)->s_bal_2orders);
67964
67965 break;
67966 }
67967@@ -2131,7 +2131,7 @@ repeat:
67968 ac->ac_status = AC_STATUS_CONTINUE;
67969 ac->ac_flags |= EXT4_MB_HINT_FIRST;
67970 cr = 3;
67971- atomic_inc(&sbi->s_mb_lost_chunks);
67972+ atomic_inc_unchecked(&sbi->s_mb_lost_chunks);
67973 goto repeat;
67974 }
67975 }
67976@@ -2174,6 +2174,8 @@ static int ext4_mb_seq_groups_show(struct seq_file *seq, void *v)
67977 ext4_grpblk_t counters[16];
67978 } sg;
67979
67980+ pax_track_stack();
67981+
67982 group--;
67983 if (group == 0)
67984 seq_printf(seq, "#%-5s: %-5s %-5s %-5s "
67985@@ -2534,25 +2536,25 @@ int ext4_mb_release(struct super_block *sb)
67986 if (sbi->s_mb_stats) {
67987 printk(KERN_INFO
67988 "EXT4-fs: mballoc: %u blocks %u reqs (%u success)\n",
67989- atomic_read(&sbi->s_bal_allocated),
67990- atomic_read(&sbi->s_bal_reqs),
67991- atomic_read(&sbi->s_bal_success));
67992+ atomic_read_unchecked(&sbi->s_bal_allocated),
67993+ atomic_read_unchecked(&sbi->s_bal_reqs),
67994+ atomic_read_unchecked(&sbi->s_bal_success));
67995 printk(KERN_INFO
67996 "EXT4-fs: mballoc: %u extents scanned, %u goal hits, "
67997 "%u 2^N hits, %u breaks, %u lost\n",
67998- atomic_read(&sbi->s_bal_ex_scanned),
67999- atomic_read(&sbi->s_bal_goals),
68000- atomic_read(&sbi->s_bal_2orders),
68001- atomic_read(&sbi->s_bal_breaks),
68002- atomic_read(&sbi->s_mb_lost_chunks));
68003+ atomic_read_unchecked(&sbi->s_bal_ex_scanned),
68004+ atomic_read_unchecked(&sbi->s_bal_goals),
68005+ atomic_read_unchecked(&sbi->s_bal_2orders),
68006+ atomic_read_unchecked(&sbi->s_bal_breaks),
68007+ atomic_read_unchecked(&sbi->s_mb_lost_chunks));
68008 printk(KERN_INFO
68009 "EXT4-fs: mballoc: %lu generated and it took %Lu\n",
68010 sbi->s_mb_buddies_generated++,
68011 sbi->s_mb_generation_time);
68012 printk(KERN_INFO
68013 "EXT4-fs: mballoc: %u preallocated, %u discarded\n",
68014- atomic_read(&sbi->s_mb_preallocated),
68015- atomic_read(&sbi->s_mb_discarded));
68016+ atomic_read_unchecked(&sbi->s_mb_preallocated),
68017+ atomic_read_unchecked(&sbi->s_mb_discarded));
68018 }
68019
68020 free_percpu(sbi->s_locality_groups);
68021@@ -3034,16 +3036,16 @@ static void ext4_mb_collect_stats(struct ext4_allocation_context *ac)
68022 struct ext4_sb_info *sbi = EXT4_SB(ac->ac_sb);
68023
68024 if (sbi->s_mb_stats && ac->ac_g_ex.fe_len > 1) {
68025- atomic_inc(&sbi->s_bal_reqs);
68026- atomic_add(ac->ac_b_ex.fe_len, &sbi->s_bal_allocated);
68027+ atomic_inc_unchecked(&sbi->s_bal_reqs);
68028+ atomic_add_unchecked(ac->ac_b_ex.fe_len, &sbi->s_bal_allocated);
68029 if (ac->ac_o_ex.fe_len >= ac->ac_g_ex.fe_len)
68030- atomic_inc(&sbi->s_bal_success);
68031- atomic_add(ac->ac_found, &sbi->s_bal_ex_scanned);
68032+ atomic_inc_unchecked(&sbi->s_bal_success);
68033+ atomic_add_unchecked(ac->ac_found, &sbi->s_bal_ex_scanned);
68034 if (ac->ac_g_ex.fe_start == ac->ac_b_ex.fe_start &&
68035 ac->ac_g_ex.fe_group == ac->ac_b_ex.fe_group)
68036- atomic_inc(&sbi->s_bal_goals);
68037+ atomic_inc_unchecked(&sbi->s_bal_goals);
68038 if (ac->ac_found > sbi->s_mb_max_to_scan)
68039- atomic_inc(&sbi->s_bal_breaks);
68040+ atomic_inc_unchecked(&sbi->s_bal_breaks);
68041 }
68042
68043 if (ac->ac_op == EXT4_MB_HISTORY_ALLOC)
68044@@ -3443,7 +3445,7 @@ ext4_mb_new_inode_pa(struct ext4_allocation_context *ac)
68045 trace_ext4_mb_new_inode_pa(ac, pa);
68046
68047 ext4_mb_use_inode_pa(ac, pa);
68048- atomic_add(pa->pa_free, &EXT4_SB(sb)->s_mb_preallocated);
68049+ atomic_add_unchecked(pa->pa_free, &EXT4_SB(sb)->s_mb_preallocated);
68050
68051 ei = EXT4_I(ac->ac_inode);
68052 grp = ext4_get_group_info(sb, ac->ac_b_ex.fe_group);
68053@@ -3503,7 +3505,7 @@ ext4_mb_new_group_pa(struct ext4_allocation_context *ac)
68054 trace_ext4_mb_new_group_pa(ac, pa);
68055
68056 ext4_mb_use_group_pa(ac, pa);
68057- atomic_add(pa->pa_free, &EXT4_SB(sb)->s_mb_preallocated);
68058+ atomic_add_unchecked(pa->pa_free, &EXT4_SB(sb)->s_mb_preallocated);
68059
68060 grp = ext4_get_group_info(sb, ac->ac_b_ex.fe_group);
68061 lg = ac->ac_lg;
68062@@ -3607,7 +3609,7 @@ ext4_mb_release_inode_pa(struct ext4_buddy *e4b, struct buffer_head *bitmap_bh,
68063 * from the bitmap and continue.
68064 */
68065 }
68066- atomic_add(free, &sbi->s_mb_discarded);
68067+ atomic_add_unchecked(free, &sbi->s_mb_discarded);
68068
68069 return err;
68070 }
68071@@ -3626,7 +3628,7 @@ ext4_mb_release_group_pa(struct ext4_buddy *e4b,
68072 ext4_get_group_no_and_offset(sb, pa->pa_pstart, &group, &bit);
68073 BUG_ON(group != e4b->bd_group && pa->pa_len != 0);
68074 mb_free_blocks(pa->pa_inode, e4b, bit, pa->pa_len);
68075- atomic_add(pa->pa_len, &EXT4_SB(sb)->s_mb_discarded);
68076+ atomic_add_unchecked(pa->pa_len, &EXT4_SB(sb)->s_mb_discarded);
68077
68078 if (ac) {
68079 ac->ac_sb = sb;
68080diff --git a/fs/ext4/super.c b/fs/ext4/super.c
68081index f1e7077..edd86b2 100644
68082--- a/fs/ext4/super.c
68083+++ b/fs/ext4/super.c
68084@@ -2286,7 +2286,7 @@ static void ext4_sb_release(struct kobject *kobj)
68085 }
68086
68087
68088-static struct sysfs_ops ext4_attr_ops = {
68089+static const struct sysfs_ops ext4_attr_ops = {
68090 .show = ext4_attr_show,
68091 .store = ext4_attr_store,
68092 };
68093diff --git a/fs/fcntl.c b/fs/fcntl.c
68094index 97e01dc..e9aab2d 100644
68095--- a/fs/fcntl.c
68096+++ b/fs/fcntl.c
68097@@ -223,6 +223,11 @@ int __f_setown(struct file *filp, struct pid *pid, enum pid_type type,
68098 if (err)
68099 return err;
68100
68101+ if (gr_handle_chroot_fowner(pid, type))
68102+ return -ENOENT;
68103+ if (gr_check_protected_task_fowner(pid, type))
68104+ return -EACCES;
68105+
68106 f_modown(filp, pid, type, force);
68107 return 0;
68108 }
68109@@ -265,7 +270,7 @@ pid_t f_getown(struct file *filp)
68110
68111 static int f_setown_ex(struct file *filp, unsigned long arg)
68112 {
68113- struct f_owner_ex * __user owner_p = (void * __user)arg;
68114+ struct f_owner_ex __user *owner_p = (void __user *)arg;
68115 struct f_owner_ex owner;
68116 struct pid *pid;
68117 int type;
68118@@ -305,7 +310,7 @@ static int f_setown_ex(struct file *filp, unsigned long arg)
68119
68120 static int f_getown_ex(struct file *filp, unsigned long arg)
68121 {
68122- struct f_owner_ex * __user owner_p = (void * __user)arg;
68123+ struct f_owner_ex __user *owner_p = (void __user *)arg;
68124 struct f_owner_ex owner;
68125 int ret = 0;
68126
68127@@ -344,6 +349,7 @@ static long do_fcntl(int fd, unsigned int cmd, unsigned long arg,
68128 switch (cmd) {
68129 case F_DUPFD:
68130 case F_DUPFD_CLOEXEC:
68131+ gr_learn_resource(current, RLIMIT_NOFILE, arg, 0);
68132 if (arg >= current->signal->rlim[RLIMIT_NOFILE].rlim_cur)
68133 break;
68134 err = alloc_fd(arg, cmd == F_DUPFD_CLOEXEC ? O_CLOEXEC : 0);
68135diff --git a/fs/fifo.c b/fs/fifo.c
68136index f8f97b8..b1f2259 100644
68137--- a/fs/fifo.c
68138+++ b/fs/fifo.c
68139@@ -59,10 +59,10 @@ static int fifo_open(struct inode *inode, struct file *filp)
68140 */
68141 filp->f_op = &read_pipefifo_fops;
68142 pipe->r_counter++;
68143- if (pipe->readers++ == 0)
68144+ if (atomic_inc_return(&pipe->readers) == 1)
68145 wake_up_partner(inode);
68146
68147- if (!pipe->writers) {
68148+ if (!atomic_read(&pipe->writers)) {
68149 if ((filp->f_flags & O_NONBLOCK)) {
68150 /* suppress POLLHUP until we have
68151 * seen a writer */
68152@@ -83,15 +83,15 @@ static int fifo_open(struct inode *inode, struct file *filp)
68153 * errno=ENXIO when there is no process reading the FIFO.
68154 */
68155 ret = -ENXIO;
68156- if ((filp->f_flags & O_NONBLOCK) && !pipe->readers)
68157+ if ((filp->f_flags & O_NONBLOCK) && !atomic_read(&pipe->readers))
68158 goto err;
68159
68160 filp->f_op = &write_pipefifo_fops;
68161 pipe->w_counter++;
68162- if (!pipe->writers++)
68163+ if (atomic_inc_return(&pipe->writers) == 1)
68164 wake_up_partner(inode);
68165
68166- if (!pipe->readers) {
68167+ if (!atomic_read(&pipe->readers)) {
68168 wait_for_partner(inode, &pipe->r_counter);
68169 if (signal_pending(current))
68170 goto err_wr;
68171@@ -107,11 +107,11 @@ static int fifo_open(struct inode *inode, struct file *filp)
68172 */
68173 filp->f_op = &rdwr_pipefifo_fops;
68174
68175- pipe->readers++;
68176- pipe->writers++;
68177+ atomic_inc(&pipe->readers);
68178+ atomic_inc(&pipe->writers);
68179 pipe->r_counter++;
68180 pipe->w_counter++;
68181- if (pipe->readers == 1 || pipe->writers == 1)
68182+ if (atomic_read(&pipe->readers) == 1 || atomic_read(&pipe->writers) == 1)
68183 wake_up_partner(inode);
68184 break;
68185
68186@@ -125,19 +125,19 @@ static int fifo_open(struct inode *inode, struct file *filp)
68187 return 0;
68188
68189 err_rd:
68190- if (!--pipe->readers)
68191+ if (atomic_dec_and_test(&pipe->readers))
68192 wake_up_interruptible(&pipe->wait);
68193 ret = -ERESTARTSYS;
68194 goto err;
68195
68196 err_wr:
68197- if (!--pipe->writers)
68198+ if (atomic_dec_and_test(&pipe->writers))
68199 wake_up_interruptible(&pipe->wait);
68200 ret = -ERESTARTSYS;
68201 goto err;
68202
68203 err:
68204- if (!pipe->readers && !pipe->writers)
68205+ if (!atomic_read(&pipe->readers) && !atomic_read(&pipe->writers))
68206 free_pipe_info(inode);
68207
68208 err_nocleanup:
68209diff --git a/fs/file.c b/fs/file.c
68210index 87e1290..a930cc4 100644
68211--- a/fs/file.c
68212+++ b/fs/file.c
68213@@ -14,6 +14,7 @@
68214 #include <linux/slab.h>
68215 #include <linux/vmalloc.h>
68216 #include <linux/file.h>
68217+#include <linux/security.h>
68218 #include <linux/fdtable.h>
68219 #include <linux/bitops.h>
68220 #include <linux/interrupt.h>
68221@@ -257,6 +258,8 @@ int expand_files(struct files_struct *files, int nr)
68222 * N.B. For clone tasks sharing a files structure, this test
68223 * will limit the total number of files that can be opened.
68224 */
68225+
68226+ gr_learn_resource(current, RLIMIT_NOFILE, nr, 0);
68227 if (nr >= current->signal->rlim[RLIMIT_NOFILE].rlim_cur)
68228 return -EMFILE;
68229
68230diff --git a/fs/filesystems.c b/fs/filesystems.c
68231index a24c58e..53f91ee 100644
68232--- a/fs/filesystems.c
68233+++ b/fs/filesystems.c
68234@@ -272,7 +272,12 @@ struct file_system_type *get_fs_type(const char *name)
68235 int len = dot ? dot - name : strlen(name);
68236
68237 fs = __get_fs_type(name, len);
68238+
68239+#ifdef CONFIG_GRKERNSEC_MODHARDEN
68240+ if (!fs && (___request_module(true, "grsec_modharden_fs", "%.*s", len, name) == 0))
68241+#else
68242 if (!fs && (request_module("%.*s", len, name) == 0))
68243+#endif
68244 fs = __get_fs_type(name, len);
68245
68246 if (dot && fs && !(fs->fs_flags & FS_HAS_SUBTYPE)) {
68247diff --git a/fs/fs_struct.c b/fs/fs_struct.c
68248index eee0590..1181166 100644
68249--- a/fs/fs_struct.c
68250+++ b/fs/fs_struct.c
68251@@ -4,6 +4,7 @@
68252 #include <linux/path.h>
68253 #include <linux/slab.h>
68254 #include <linux/fs_struct.h>
68255+#include <linux/grsecurity.h>
68256
68257 /*
68258 * Replace the fs->{rootmnt,root} with {mnt,dentry}. Put the old values.
68259@@ -17,6 +18,7 @@ void set_fs_root(struct fs_struct *fs, struct path *path)
68260 old_root = fs->root;
68261 fs->root = *path;
68262 path_get(path);
68263+ gr_set_chroot_entries(current, path);
68264 write_unlock(&fs->lock);
68265 if (old_root.dentry)
68266 path_put(&old_root);
68267@@ -56,6 +58,7 @@ void chroot_fs_refs(struct path *old_root, struct path *new_root)
68268 && fs->root.mnt == old_root->mnt) {
68269 path_get(new_root);
68270 fs->root = *new_root;
68271+ gr_set_chroot_entries(p, new_root);
68272 count++;
68273 }
68274 if (fs->pwd.dentry == old_root->dentry
68275@@ -89,7 +92,8 @@ void exit_fs(struct task_struct *tsk)
68276 task_lock(tsk);
68277 write_lock(&fs->lock);
68278 tsk->fs = NULL;
68279- kill = !--fs->users;
68280+ gr_clear_chroot_entries(tsk);
68281+ kill = !atomic_dec_return(&fs->users);
68282 write_unlock(&fs->lock);
68283 task_unlock(tsk);
68284 if (kill)
68285@@ -102,7 +106,7 @@ struct fs_struct *copy_fs_struct(struct fs_struct *old)
68286 struct fs_struct *fs = kmem_cache_alloc(fs_cachep, GFP_KERNEL);
68287 /* We don't need to lock fs - think why ;-) */
68288 if (fs) {
68289- fs->users = 1;
68290+ atomic_set(&fs->users, 1);
68291 fs->in_exec = 0;
68292 rwlock_init(&fs->lock);
68293 fs->umask = old->umask;
68294@@ -127,8 +131,9 @@ int unshare_fs_struct(void)
68295
68296 task_lock(current);
68297 write_lock(&fs->lock);
68298- kill = !--fs->users;
68299+ kill = !atomic_dec_return(&fs->users);
68300 current->fs = new_fs;
68301+ gr_set_chroot_entries(current, &new_fs->root);
68302 write_unlock(&fs->lock);
68303 task_unlock(current);
68304
68305@@ -141,13 +146,13 @@ EXPORT_SYMBOL_GPL(unshare_fs_struct);
68306
68307 int current_umask(void)
68308 {
68309- return current->fs->umask;
68310+ return current->fs->umask | gr_acl_umask();
68311 }
68312 EXPORT_SYMBOL(current_umask);
68313
68314 /* to be mentioned only in INIT_TASK */
68315 struct fs_struct init_fs = {
68316- .users = 1,
68317+ .users = ATOMIC_INIT(1),
68318 .lock = __RW_LOCK_UNLOCKED(init_fs.lock),
68319 .umask = 0022,
68320 };
68321@@ -162,12 +167,13 @@ void daemonize_fs_struct(void)
68322 task_lock(current);
68323
68324 write_lock(&init_fs.lock);
68325- init_fs.users++;
68326+ atomic_inc(&init_fs.users);
68327 write_unlock(&init_fs.lock);
68328
68329 write_lock(&fs->lock);
68330 current->fs = &init_fs;
68331- kill = !--fs->users;
68332+ gr_set_chroot_entries(current, &current->fs->root);
68333+ kill = !atomic_dec_return(&fs->users);
68334 write_unlock(&fs->lock);
68335
68336 task_unlock(current);
68337diff --git a/fs/fscache/cookie.c b/fs/fscache/cookie.c
68338index 9905350..02eaec4 100644
68339--- a/fs/fscache/cookie.c
68340+++ b/fs/fscache/cookie.c
68341@@ -68,11 +68,11 @@ struct fscache_cookie *__fscache_acquire_cookie(
68342 parent ? (char *) parent->def->name : "<no-parent>",
68343 def->name, netfs_data);
68344
68345- fscache_stat(&fscache_n_acquires);
68346+ fscache_stat_unchecked(&fscache_n_acquires);
68347
68348 /* if there's no parent cookie, then we don't create one here either */
68349 if (!parent) {
68350- fscache_stat(&fscache_n_acquires_null);
68351+ fscache_stat_unchecked(&fscache_n_acquires_null);
68352 _leave(" [no parent]");
68353 return NULL;
68354 }
68355@@ -87,7 +87,7 @@ struct fscache_cookie *__fscache_acquire_cookie(
68356 /* allocate and initialise a cookie */
68357 cookie = kmem_cache_alloc(fscache_cookie_jar, GFP_KERNEL);
68358 if (!cookie) {
68359- fscache_stat(&fscache_n_acquires_oom);
68360+ fscache_stat_unchecked(&fscache_n_acquires_oom);
68361 _leave(" [ENOMEM]");
68362 return NULL;
68363 }
68364@@ -109,13 +109,13 @@ struct fscache_cookie *__fscache_acquire_cookie(
68365
68366 switch (cookie->def->type) {
68367 case FSCACHE_COOKIE_TYPE_INDEX:
68368- fscache_stat(&fscache_n_cookie_index);
68369+ fscache_stat_unchecked(&fscache_n_cookie_index);
68370 break;
68371 case FSCACHE_COOKIE_TYPE_DATAFILE:
68372- fscache_stat(&fscache_n_cookie_data);
68373+ fscache_stat_unchecked(&fscache_n_cookie_data);
68374 break;
68375 default:
68376- fscache_stat(&fscache_n_cookie_special);
68377+ fscache_stat_unchecked(&fscache_n_cookie_special);
68378 break;
68379 }
68380
68381@@ -126,13 +126,13 @@ struct fscache_cookie *__fscache_acquire_cookie(
68382 if (fscache_acquire_non_index_cookie(cookie) < 0) {
68383 atomic_dec(&parent->n_children);
68384 __fscache_cookie_put(cookie);
68385- fscache_stat(&fscache_n_acquires_nobufs);
68386+ fscache_stat_unchecked(&fscache_n_acquires_nobufs);
68387 _leave(" = NULL");
68388 return NULL;
68389 }
68390 }
68391
68392- fscache_stat(&fscache_n_acquires_ok);
68393+ fscache_stat_unchecked(&fscache_n_acquires_ok);
68394 _leave(" = %p", cookie);
68395 return cookie;
68396 }
68397@@ -168,7 +168,7 @@ static int fscache_acquire_non_index_cookie(struct fscache_cookie *cookie)
68398 cache = fscache_select_cache_for_object(cookie->parent);
68399 if (!cache) {
68400 up_read(&fscache_addremove_sem);
68401- fscache_stat(&fscache_n_acquires_no_cache);
68402+ fscache_stat_unchecked(&fscache_n_acquires_no_cache);
68403 _leave(" = -ENOMEDIUM [no cache]");
68404 return -ENOMEDIUM;
68405 }
68406@@ -256,12 +256,12 @@ static int fscache_alloc_object(struct fscache_cache *cache,
68407 object = cache->ops->alloc_object(cache, cookie);
68408 fscache_stat_d(&fscache_n_cop_alloc_object);
68409 if (IS_ERR(object)) {
68410- fscache_stat(&fscache_n_object_no_alloc);
68411+ fscache_stat_unchecked(&fscache_n_object_no_alloc);
68412 ret = PTR_ERR(object);
68413 goto error;
68414 }
68415
68416- fscache_stat(&fscache_n_object_alloc);
68417+ fscache_stat_unchecked(&fscache_n_object_alloc);
68418
68419 object->debug_id = atomic_inc_return(&fscache_object_debug_id);
68420
68421@@ -377,10 +377,10 @@ void __fscache_update_cookie(struct fscache_cookie *cookie)
68422 struct fscache_object *object;
68423 struct hlist_node *_p;
68424
68425- fscache_stat(&fscache_n_updates);
68426+ fscache_stat_unchecked(&fscache_n_updates);
68427
68428 if (!cookie) {
68429- fscache_stat(&fscache_n_updates_null);
68430+ fscache_stat_unchecked(&fscache_n_updates_null);
68431 _leave(" [no cookie]");
68432 return;
68433 }
68434@@ -414,12 +414,12 @@ void __fscache_relinquish_cookie(struct fscache_cookie *cookie, int retire)
68435 struct fscache_object *object;
68436 unsigned long event;
68437
68438- fscache_stat(&fscache_n_relinquishes);
68439+ fscache_stat_unchecked(&fscache_n_relinquishes);
68440 if (retire)
68441- fscache_stat(&fscache_n_relinquishes_retire);
68442+ fscache_stat_unchecked(&fscache_n_relinquishes_retire);
68443
68444 if (!cookie) {
68445- fscache_stat(&fscache_n_relinquishes_null);
68446+ fscache_stat_unchecked(&fscache_n_relinquishes_null);
68447 _leave(" [no cookie]");
68448 return;
68449 }
68450@@ -435,7 +435,7 @@ void __fscache_relinquish_cookie(struct fscache_cookie *cookie, int retire)
68451
68452 /* wait for the cookie to finish being instantiated (or to fail) */
68453 if (test_bit(FSCACHE_COOKIE_CREATING, &cookie->flags)) {
68454- fscache_stat(&fscache_n_relinquishes_waitcrt);
68455+ fscache_stat_unchecked(&fscache_n_relinquishes_waitcrt);
68456 wait_on_bit(&cookie->flags, FSCACHE_COOKIE_CREATING,
68457 fscache_wait_bit, TASK_UNINTERRUPTIBLE);
68458 }
68459diff --git a/fs/fscache/internal.h b/fs/fscache/internal.h
68460index edd7434..0725e66 100644
68461--- a/fs/fscache/internal.h
68462+++ b/fs/fscache/internal.h
68463@@ -136,94 +136,94 @@ extern void fscache_proc_cleanup(void);
68464 extern atomic_t fscache_n_ops_processed[FSCACHE_MAX_THREADS];
68465 extern atomic_t fscache_n_objs_processed[FSCACHE_MAX_THREADS];
68466
68467-extern atomic_t fscache_n_op_pend;
68468-extern atomic_t fscache_n_op_run;
68469-extern atomic_t fscache_n_op_enqueue;
68470-extern atomic_t fscache_n_op_deferred_release;
68471-extern atomic_t fscache_n_op_release;
68472-extern atomic_t fscache_n_op_gc;
68473-extern atomic_t fscache_n_op_cancelled;
68474-extern atomic_t fscache_n_op_rejected;
68475+extern atomic_unchecked_t fscache_n_op_pend;
68476+extern atomic_unchecked_t fscache_n_op_run;
68477+extern atomic_unchecked_t fscache_n_op_enqueue;
68478+extern atomic_unchecked_t fscache_n_op_deferred_release;
68479+extern atomic_unchecked_t fscache_n_op_release;
68480+extern atomic_unchecked_t fscache_n_op_gc;
68481+extern atomic_unchecked_t fscache_n_op_cancelled;
68482+extern atomic_unchecked_t fscache_n_op_rejected;
68483
68484-extern atomic_t fscache_n_attr_changed;
68485-extern atomic_t fscache_n_attr_changed_ok;
68486-extern atomic_t fscache_n_attr_changed_nobufs;
68487-extern atomic_t fscache_n_attr_changed_nomem;
68488-extern atomic_t fscache_n_attr_changed_calls;
68489+extern atomic_unchecked_t fscache_n_attr_changed;
68490+extern atomic_unchecked_t fscache_n_attr_changed_ok;
68491+extern atomic_unchecked_t fscache_n_attr_changed_nobufs;
68492+extern atomic_unchecked_t fscache_n_attr_changed_nomem;
68493+extern atomic_unchecked_t fscache_n_attr_changed_calls;
68494
68495-extern atomic_t fscache_n_allocs;
68496-extern atomic_t fscache_n_allocs_ok;
68497-extern atomic_t fscache_n_allocs_wait;
68498-extern atomic_t fscache_n_allocs_nobufs;
68499-extern atomic_t fscache_n_allocs_intr;
68500-extern atomic_t fscache_n_allocs_object_dead;
68501-extern atomic_t fscache_n_alloc_ops;
68502-extern atomic_t fscache_n_alloc_op_waits;
68503+extern atomic_unchecked_t fscache_n_allocs;
68504+extern atomic_unchecked_t fscache_n_allocs_ok;
68505+extern atomic_unchecked_t fscache_n_allocs_wait;
68506+extern atomic_unchecked_t fscache_n_allocs_nobufs;
68507+extern atomic_unchecked_t fscache_n_allocs_intr;
68508+extern atomic_unchecked_t fscache_n_allocs_object_dead;
68509+extern atomic_unchecked_t fscache_n_alloc_ops;
68510+extern atomic_unchecked_t fscache_n_alloc_op_waits;
68511
68512-extern atomic_t fscache_n_retrievals;
68513-extern atomic_t fscache_n_retrievals_ok;
68514-extern atomic_t fscache_n_retrievals_wait;
68515-extern atomic_t fscache_n_retrievals_nodata;
68516-extern atomic_t fscache_n_retrievals_nobufs;
68517-extern atomic_t fscache_n_retrievals_intr;
68518-extern atomic_t fscache_n_retrievals_nomem;
68519-extern atomic_t fscache_n_retrievals_object_dead;
68520-extern atomic_t fscache_n_retrieval_ops;
68521-extern atomic_t fscache_n_retrieval_op_waits;
68522+extern atomic_unchecked_t fscache_n_retrievals;
68523+extern atomic_unchecked_t fscache_n_retrievals_ok;
68524+extern atomic_unchecked_t fscache_n_retrievals_wait;
68525+extern atomic_unchecked_t fscache_n_retrievals_nodata;
68526+extern atomic_unchecked_t fscache_n_retrievals_nobufs;
68527+extern atomic_unchecked_t fscache_n_retrievals_intr;
68528+extern atomic_unchecked_t fscache_n_retrievals_nomem;
68529+extern atomic_unchecked_t fscache_n_retrievals_object_dead;
68530+extern atomic_unchecked_t fscache_n_retrieval_ops;
68531+extern atomic_unchecked_t fscache_n_retrieval_op_waits;
68532
68533-extern atomic_t fscache_n_stores;
68534-extern atomic_t fscache_n_stores_ok;
68535-extern atomic_t fscache_n_stores_again;
68536-extern atomic_t fscache_n_stores_nobufs;
68537-extern atomic_t fscache_n_stores_oom;
68538-extern atomic_t fscache_n_store_ops;
68539-extern atomic_t fscache_n_store_calls;
68540-extern atomic_t fscache_n_store_pages;
68541-extern atomic_t fscache_n_store_radix_deletes;
68542-extern atomic_t fscache_n_store_pages_over_limit;
68543+extern atomic_unchecked_t fscache_n_stores;
68544+extern atomic_unchecked_t fscache_n_stores_ok;
68545+extern atomic_unchecked_t fscache_n_stores_again;
68546+extern atomic_unchecked_t fscache_n_stores_nobufs;
68547+extern atomic_unchecked_t fscache_n_stores_oom;
68548+extern atomic_unchecked_t fscache_n_store_ops;
68549+extern atomic_unchecked_t fscache_n_store_calls;
68550+extern atomic_unchecked_t fscache_n_store_pages;
68551+extern atomic_unchecked_t fscache_n_store_radix_deletes;
68552+extern atomic_unchecked_t fscache_n_store_pages_over_limit;
68553
68554-extern atomic_t fscache_n_store_vmscan_not_storing;
68555-extern atomic_t fscache_n_store_vmscan_gone;
68556-extern atomic_t fscache_n_store_vmscan_busy;
68557-extern atomic_t fscache_n_store_vmscan_cancelled;
68558+extern atomic_unchecked_t fscache_n_store_vmscan_not_storing;
68559+extern atomic_unchecked_t fscache_n_store_vmscan_gone;
68560+extern atomic_unchecked_t fscache_n_store_vmscan_busy;
68561+extern atomic_unchecked_t fscache_n_store_vmscan_cancelled;
68562
68563-extern atomic_t fscache_n_marks;
68564-extern atomic_t fscache_n_uncaches;
68565+extern atomic_unchecked_t fscache_n_marks;
68566+extern atomic_unchecked_t fscache_n_uncaches;
68567
68568-extern atomic_t fscache_n_acquires;
68569-extern atomic_t fscache_n_acquires_null;
68570-extern atomic_t fscache_n_acquires_no_cache;
68571-extern atomic_t fscache_n_acquires_ok;
68572-extern atomic_t fscache_n_acquires_nobufs;
68573-extern atomic_t fscache_n_acquires_oom;
68574+extern atomic_unchecked_t fscache_n_acquires;
68575+extern atomic_unchecked_t fscache_n_acquires_null;
68576+extern atomic_unchecked_t fscache_n_acquires_no_cache;
68577+extern atomic_unchecked_t fscache_n_acquires_ok;
68578+extern atomic_unchecked_t fscache_n_acquires_nobufs;
68579+extern atomic_unchecked_t fscache_n_acquires_oom;
68580
68581-extern atomic_t fscache_n_updates;
68582-extern atomic_t fscache_n_updates_null;
68583-extern atomic_t fscache_n_updates_run;
68584+extern atomic_unchecked_t fscache_n_updates;
68585+extern atomic_unchecked_t fscache_n_updates_null;
68586+extern atomic_unchecked_t fscache_n_updates_run;
68587
68588-extern atomic_t fscache_n_relinquishes;
68589-extern atomic_t fscache_n_relinquishes_null;
68590-extern atomic_t fscache_n_relinquishes_waitcrt;
68591-extern atomic_t fscache_n_relinquishes_retire;
68592+extern atomic_unchecked_t fscache_n_relinquishes;
68593+extern atomic_unchecked_t fscache_n_relinquishes_null;
68594+extern atomic_unchecked_t fscache_n_relinquishes_waitcrt;
68595+extern atomic_unchecked_t fscache_n_relinquishes_retire;
68596
68597-extern atomic_t fscache_n_cookie_index;
68598-extern atomic_t fscache_n_cookie_data;
68599-extern atomic_t fscache_n_cookie_special;
68600+extern atomic_unchecked_t fscache_n_cookie_index;
68601+extern atomic_unchecked_t fscache_n_cookie_data;
68602+extern atomic_unchecked_t fscache_n_cookie_special;
68603
68604-extern atomic_t fscache_n_object_alloc;
68605-extern atomic_t fscache_n_object_no_alloc;
68606-extern atomic_t fscache_n_object_lookups;
68607-extern atomic_t fscache_n_object_lookups_negative;
68608-extern atomic_t fscache_n_object_lookups_positive;
68609-extern atomic_t fscache_n_object_lookups_timed_out;
68610-extern atomic_t fscache_n_object_created;
68611-extern atomic_t fscache_n_object_avail;
68612-extern atomic_t fscache_n_object_dead;
68613+extern atomic_unchecked_t fscache_n_object_alloc;
68614+extern atomic_unchecked_t fscache_n_object_no_alloc;
68615+extern atomic_unchecked_t fscache_n_object_lookups;
68616+extern atomic_unchecked_t fscache_n_object_lookups_negative;
68617+extern atomic_unchecked_t fscache_n_object_lookups_positive;
68618+extern atomic_unchecked_t fscache_n_object_lookups_timed_out;
68619+extern atomic_unchecked_t fscache_n_object_created;
68620+extern atomic_unchecked_t fscache_n_object_avail;
68621+extern atomic_unchecked_t fscache_n_object_dead;
68622
68623-extern atomic_t fscache_n_checkaux_none;
68624-extern atomic_t fscache_n_checkaux_okay;
68625-extern atomic_t fscache_n_checkaux_update;
68626-extern atomic_t fscache_n_checkaux_obsolete;
68627+extern atomic_unchecked_t fscache_n_checkaux_none;
68628+extern atomic_unchecked_t fscache_n_checkaux_okay;
68629+extern atomic_unchecked_t fscache_n_checkaux_update;
68630+extern atomic_unchecked_t fscache_n_checkaux_obsolete;
68631
68632 extern atomic_t fscache_n_cop_alloc_object;
68633 extern atomic_t fscache_n_cop_lookup_object;
68634@@ -247,6 +247,11 @@ static inline void fscache_stat(atomic_t *stat)
68635 atomic_inc(stat);
68636 }
68637
68638+static inline void fscache_stat_unchecked(atomic_unchecked_t *stat)
68639+{
68640+ atomic_inc_unchecked(stat);
68641+}
68642+
68643 static inline void fscache_stat_d(atomic_t *stat)
68644 {
68645 atomic_dec(stat);
68646@@ -259,6 +264,7 @@ extern const struct file_operations fscache_stats_fops;
68647
68648 #define __fscache_stat(stat) (NULL)
68649 #define fscache_stat(stat) do {} while (0)
68650+#define fscache_stat_unchecked(stat) do {} while (0)
68651 #define fscache_stat_d(stat) do {} while (0)
68652 #endif
68653
68654diff --git a/fs/fscache/object.c b/fs/fscache/object.c
68655index e513ac5..e888d34 100644
68656--- a/fs/fscache/object.c
68657+++ b/fs/fscache/object.c
68658@@ -144,7 +144,7 @@ static void fscache_object_state_machine(struct fscache_object *object)
68659 /* update the object metadata on disk */
68660 case FSCACHE_OBJECT_UPDATING:
68661 clear_bit(FSCACHE_OBJECT_EV_UPDATE, &object->events);
68662- fscache_stat(&fscache_n_updates_run);
68663+ fscache_stat_unchecked(&fscache_n_updates_run);
68664 fscache_stat(&fscache_n_cop_update_object);
68665 object->cache->ops->update_object(object);
68666 fscache_stat_d(&fscache_n_cop_update_object);
68667@@ -233,7 +233,7 @@ static void fscache_object_state_machine(struct fscache_object *object)
68668 spin_lock(&object->lock);
68669 object->state = FSCACHE_OBJECT_DEAD;
68670 spin_unlock(&object->lock);
68671- fscache_stat(&fscache_n_object_dead);
68672+ fscache_stat_unchecked(&fscache_n_object_dead);
68673 goto terminal_transit;
68674
68675 /* handle the parent cache of this object being withdrawn from
68676@@ -248,7 +248,7 @@ static void fscache_object_state_machine(struct fscache_object *object)
68677 spin_lock(&object->lock);
68678 object->state = FSCACHE_OBJECT_DEAD;
68679 spin_unlock(&object->lock);
68680- fscache_stat(&fscache_n_object_dead);
68681+ fscache_stat_unchecked(&fscache_n_object_dead);
68682 goto terminal_transit;
68683
68684 /* complain about the object being woken up once it is
68685@@ -492,7 +492,7 @@ static void fscache_lookup_object(struct fscache_object *object)
68686 parent->cookie->def->name, cookie->def->name,
68687 object->cache->tag->name);
68688
68689- fscache_stat(&fscache_n_object_lookups);
68690+ fscache_stat_unchecked(&fscache_n_object_lookups);
68691 fscache_stat(&fscache_n_cop_lookup_object);
68692 ret = object->cache->ops->lookup_object(object);
68693 fscache_stat_d(&fscache_n_cop_lookup_object);
68694@@ -503,7 +503,7 @@ static void fscache_lookup_object(struct fscache_object *object)
68695 if (ret == -ETIMEDOUT) {
68696 /* probably stuck behind another object, so move this one to
68697 * the back of the queue */
68698- fscache_stat(&fscache_n_object_lookups_timed_out);
68699+ fscache_stat_unchecked(&fscache_n_object_lookups_timed_out);
68700 set_bit(FSCACHE_OBJECT_EV_REQUEUE, &object->events);
68701 }
68702
68703@@ -526,7 +526,7 @@ void fscache_object_lookup_negative(struct fscache_object *object)
68704
68705 spin_lock(&object->lock);
68706 if (object->state == FSCACHE_OBJECT_LOOKING_UP) {
68707- fscache_stat(&fscache_n_object_lookups_negative);
68708+ fscache_stat_unchecked(&fscache_n_object_lookups_negative);
68709
68710 /* transit here to allow write requests to begin stacking up
68711 * and read requests to begin returning ENODATA */
68712@@ -572,7 +572,7 @@ void fscache_obtained_object(struct fscache_object *object)
68713 * result, in which case there may be data available */
68714 spin_lock(&object->lock);
68715 if (object->state == FSCACHE_OBJECT_LOOKING_UP) {
68716- fscache_stat(&fscache_n_object_lookups_positive);
68717+ fscache_stat_unchecked(&fscache_n_object_lookups_positive);
68718
68719 clear_bit(FSCACHE_COOKIE_NO_DATA_YET, &cookie->flags);
68720
68721@@ -586,7 +586,7 @@ void fscache_obtained_object(struct fscache_object *object)
68722 set_bit(FSCACHE_OBJECT_EV_REQUEUE, &object->events);
68723 } else {
68724 ASSERTCMP(object->state, ==, FSCACHE_OBJECT_CREATING);
68725- fscache_stat(&fscache_n_object_created);
68726+ fscache_stat_unchecked(&fscache_n_object_created);
68727
68728 object->state = FSCACHE_OBJECT_AVAILABLE;
68729 spin_unlock(&object->lock);
68730@@ -633,7 +633,7 @@ static void fscache_object_available(struct fscache_object *object)
68731 fscache_enqueue_dependents(object);
68732
68733 fscache_hist(fscache_obj_instantiate_histogram, object->lookup_jif);
68734- fscache_stat(&fscache_n_object_avail);
68735+ fscache_stat_unchecked(&fscache_n_object_avail);
68736
68737 _leave("");
68738 }
68739@@ -861,7 +861,7 @@ enum fscache_checkaux fscache_check_aux(struct fscache_object *object,
68740 enum fscache_checkaux result;
68741
68742 if (!object->cookie->def->check_aux) {
68743- fscache_stat(&fscache_n_checkaux_none);
68744+ fscache_stat_unchecked(&fscache_n_checkaux_none);
68745 return FSCACHE_CHECKAUX_OKAY;
68746 }
68747
68748@@ -870,17 +870,17 @@ enum fscache_checkaux fscache_check_aux(struct fscache_object *object,
68749 switch (result) {
68750 /* entry okay as is */
68751 case FSCACHE_CHECKAUX_OKAY:
68752- fscache_stat(&fscache_n_checkaux_okay);
68753+ fscache_stat_unchecked(&fscache_n_checkaux_okay);
68754 break;
68755
68756 /* entry requires update */
68757 case FSCACHE_CHECKAUX_NEEDS_UPDATE:
68758- fscache_stat(&fscache_n_checkaux_update);
68759+ fscache_stat_unchecked(&fscache_n_checkaux_update);
68760 break;
68761
68762 /* entry requires deletion */
68763 case FSCACHE_CHECKAUX_OBSOLETE:
68764- fscache_stat(&fscache_n_checkaux_obsolete);
68765+ fscache_stat_unchecked(&fscache_n_checkaux_obsolete);
68766 break;
68767
68768 default:
68769diff --git a/fs/fscache/operation.c b/fs/fscache/operation.c
68770index 313e79a..775240f 100644
68771--- a/fs/fscache/operation.c
68772+++ b/fs/fscache/operation.c
68773@@ -16,7 +16,7 @@
68774 #include <linux/seq_file.h>
68775 #include "internal.h"
68776
68777-atomic_t fscache_op_debug_id;
68778+atomic_unchecked_t fscache_op_debug_id;
68779 EXPORT_SYMBOL(fscache_op_debug_id);
68780
68781 /**
68782@@ -39,7 +39,7 @@ void fscache_enqueue_operation(struct fscache_operation *op)
68783 ASSERTCMP(op->object->state, >=, FSCACHE_OBJECT_AVAILABLE);
68784 ASSERTCMP(atomic_read(&op->usage), >, 0);
68785
68786- fscache_stat(&fscache_n_op_enqueue);
68787+ fscache_stat_unchecked(&fscache_n_op_enqueue);
68788 switch (op->flags & FSCACHE_OP_TYPE) {
68789 case FSCACHE_OP_FAST:
68790 _debug("queue fast");
68791@@ -76,7 +76,7 @@ static void fscache_run_op(struct fscache_object *object,
68792 wake_up_bit(&op->flags, FSCACHE_OP_WAITING);
68793 if (op->processor)
68794 fscache_enqueue_operation(op);
68795- fscache_stat(&fscache_n_op_run);
68796+ fscache_stat_unchecked(&fscache_n_op_run);
68797 }
68798
68799 /*
68800@@ -107,11 +107,11 @@ int fscache_submit_exclusive_op(struct fscache_object *object,
68801 if (object->n_ops > 0) {
68802 atomic_inc(&op->usage);
68803 list_add_tail(&op->pend_link, &object->pending_ops);
68804- fscache_stat(&fscache_n_op_pend);
68805+ fscache_stat_unchecked(&fscache_n_op_pend);
68806 } else if (!list_empty(&object->pending_ops)) {
68807 atomic_inc(&op->usage);
68808 list_add_tail(&op->pend_link, &object->pending_ops);
68809- fscache_stat(&fscache_n_op_pend);
68810+ fscache_stat_unchecked(&fscache_n_op_pend);
68811 fscache_start_operations(object);
68812 } else {
68813 ASSERTCMP(object->n_in_progress, ==, 0);
68814@@ -127,7 +127,7 @@ int fscache_submit_exclusive_op(struct fscache_object *object,
68815 object->n_exclusive++; /* reads and writes must wait */
68816 atomic_inc(&op->usage);
68817 list_add_tail(&op->pend_link, &object->pending_ops);
68818- fscache_stat(&fscache_n_op_pend);
68819+ fscache_stat_unchecked(&fscache_n_op_pend);
68820 ret = 0;
68821 } else {
68822 /* not allowed to submit ops in any other state */
68823@@ -214,11 +214,11 @@ int fscache_submit_op(struct fscache_object *object,
68824 if (object->n_exclusive > 0) {
68825 atomic_inc(&op->usage);
68826 list_add_tail(&op->pend_link, &object->pending_ops);
68827- fscache_stat(&fscache_n_op_pend);
68828+ fscache_stat_unchecked(&fscache_n_op_pend);
68829 } else if (!list_empty(&object->pending_ops)) {
68830 atomic_inc(&op->usage);
68831 list_add_tail(&op->pend_link, &object->pending_ops);
68832- fscache_stat(&fscache_n_op_pend);
68833+ fscache_stat_unchecked(&fscache_n_op_pend);
68834 fscache_start_operations(object);
68835 } else {
68836 ASSERTCMP(object->n_exclusive, ==, 0);
68837@@ -230,12 +230,12 @@ int fscache_submit_op(struct fscache_object *object,
68838 object->n_ops++;
68839 atomic_inc(&op->usage);
68840 list_add_tail(&op->pend_link, &object->pending_ops);
68841- fscache_stat(&fscache_n_op_pend);
68842+ fscache_stat_unchecked(&fscache_n_op_pend);
68843 ret = 0;
68844 } else if (object->state == FSCACHE_OBJECT_DYING ||
68845 object->state == FSCACHE_OBJECT_LC_DYING ||
68846 object->state == FSCACHE_OBJECT_WITHDRAWING) {
68847- fscache_stat(&fscache_n_op_rejected);
68848+ fscache_stat_unchecked(&fscache_n_op_rejected);
68849 ret = -ENOBUFS;
68850 } else if (!test_bit(FSCACHE_IOERROR, &object->cache->flags)) {
68851 fscache_report_unexpected_submission(object, op, ostate);
68852@@ -305,7 +305,7 @@ int fscache_cancel_op(struct fscache_operation *op)
68853
68854 ret = -EBUSY;
68855 if (!list_empty(&op->pend_link)) {
68856- fscache_stat(&fscache_n_op_cancelled);
68857+ fscache_stat_unchecked(&fscache_n_op_cancelled);
68858 list_del_init(&op->pend_link);
68859 object->n_ops--;
68860 if (test_bit(FSCACHE_OP_EXCLUSIVE, &op->flags))
68861@@ -344,7 +344,7 @@ void fscache_put_operation(struct fscache_operation *op)
68862 if (test_and_set_bit(FSCACHE_OP_DEAD, &op->flags))
68863 BUG();
68864
68865- fscache_stat(&fscache_n_op_release);
68866+ fscache_stat_unchecked(&fscache_n_op_release);
68867
68868 if (op->release) {
68869 op->release(op);
68870@@ -361,7 +361,7 @@ void fscache_put_operation(struct fscache_operation *op)
68871 * lock, and defer it otherwise */
68872 if (!spin_trylock(&object->lock)) {
68873 _debug("defer put");
68874- fscache_stat(&fscache_n_op_deferred_release);
68875+ fscache_stat_unchecked(&fscache_n_op_deferred_release);
68876
68877 cache = object->cache;
68878 spin_lock(&cache->op_gc_list_lock);
68879@@ -423,7 +423,7 @@ void fscache_operation_gc(struct work_struct *work)
68880
68881 _debug("GC DEFERRED REL OBJ%x OP%x",
68882 object->debug_id, op->debug_id);
68883- fscache_stat(&fscache_n_op_gc);
68884+ fscache_stat_unchecked(&fscache_n_op_gc);
68885
68886 ASSERTCMP(atomic_read(&op->usage), ==, 0);
68887
68888diff --git a/fs/fscache/page.c b/fs/fscache/page.c
68889index c598ea4..6aac13e 100644
68890--- a/fs/fscache/page.c
68891+++ b/fs/fscache/page.c
68892@@ -59,7 +59,7 @@ bool __fscache_maybe_release_page(struct fscache_cookie *cookie,
68893 val = radix_tree_lookup(&cookie->stores, page->index);
68894 if (!val) {
68895 rcu_read_unlock();
68896- fscache_stat(&fscache_n_store_vmscan_not_storing);
68897+ fscache_stat_unchecked(&fscache_n_store_vmscan_not_storing);
68898 __fscache_uncache_page(cookie, page);
68899 return true;
68900 }
68901@@ -89,11 +89,11 @@ bool __fscache_maybe_release_page(struct fscache_cookie *cookie,
68902 spin_unlock(&cookie->stores_lock);
68903
68904 if (xpage) {
68905- fscache_stat(&fscache_n_store_vmscan_cancelled);
68906- fscache_stat(&fscache_n_store_radix_deletes);
68907+ fscache_stat_unchecked(&fscache_n_store_vmscan_cancelled);
68908+ fscache_stat_unchecked(&fscache_n_store_radix_deletes);
68909 ASSERTCMP(xpage, ==, page);
68910 } else {
68911- fscache_stat(&fscache_n_store_vmscan_gone);
68912+ fscache_stat_unchecked(&fscache_n_store_vmscan_gone);
68913 }
68914
68915 wake_up_bit(&cookie->flags, 0);
68916@@ -106,7 +106,7 @@ page_busy:
68917 /* we might want to wait here, but that could deadlock the allocator as
68918 * the slow-work threads writing to the cache may all end up sleeping
68919 * on memory allocation */
68920- fscache_stat(&fscache_n_store_vmscan_busy);
68921+ fscache_stat_unchecked(&fscache_n_store_vmscan_busy);
68922 return false;
68923 }
68924 EXPORT_SYMBOL(__fscache_maybe_release_page);
68925@@ -130,7 +130,7 @@ static void fscache_end_page_write(struct fscache_object *object,
68926 FSCACHE_COOKIE_STORING_TAG);
68927 if (!radix_tree_tag_get(&cookie->stores, page->index,
68928 FSCACHE_COOKIE_PENDING_TAG)) {
68929- fscache_stat(&fscache_n_store_radix_deletes);
68930+ fscache_stat_unchecked(&fscache_n_store_radix_deletes);
68931 xpage = radix_tree_delete(&cookie->stores, page->index);
68932 }
68933 spin_unlock(&cookie->stores_lock);
68934@@ -151,7 +151,7 @@ static void fscache_attr_changed_op(struct fscache_operation *op)
68935
68936 _enter("{OBJ%x OP%x}", object->debug_id, op->debug_id);
68937
68938- fscache_stat(&fscache_n_attr_changed_calls);
68939+ fscache_stat_unchecked(&fscache_n_attr_changed_calls);
68940
68941 if (fscache_object_is_active(object)) {
68942 fscache_set_op_state(op, "CallFS");
68943@@ -178,11 +178,11 @@ int __fscache_attr_changed(struct fscache_cookie *cookie)
68944
68945 ASSERTCMP(cookie->def->type, !=, FSCACHE_COOKIE_TYPE_INDEX);
68946
68947- fscache_stat(&fscache_n_attr_changed);
68948+ fscache_stat_unchecked(&fscache_n_attr_changed);
68949
68950 op = kzalloc(sizeof(*op), GFP_KERNEL);
68951 if (!op) {
68952- fscache_stat(&fscache_n_attr_changed_nomem);
68953+ fscache_stat_unchecked(&fscache_n_attr_changed_nomem);
68954 _leave(" = -ENOMEM");
68955 return -ENOMEM;
68956 }
68957@@ -202,7 +202,7 @@ int __fscache_attr_changed(struct fscache_cookie *cookie)
68958 if (fscache_submit_exclusive_op(object, op) < 0)
68959 goto nobufs;
68960 spin_unlock(&cookie->lock);
68961- fscache_stat(&fscache_n_attr_changed_ok);
68962+ fscache_stat_unchecked(&fscache_n_attr_changed_ok);
68963 fscache_put_operation(op);
68964 _leave(" = 0");
68965 return 0;
68966@@ -210,7 +210,7 @@ int __fscache_attr_changed(struct fscache_cookie *cookie)
68967 nobufs:
68968 spin_unlock(&cookie->lock);
68969 kfree(op);
68970- fscache_stat(&fscache_n_attr_changed_nobufs);
68971+ fscache_stat_unchecked(&fscache_n_attr_changed_nobufs);
68972 _leave(" = %d", -ENOBUFS);
68973 return -ENOBUFS;
68974 }
68975@@ -264,7 +264,7 @@ static struct fscache_retrieval *fscache_alloc_retrieval(
68976 /* allocate a retrieval operation and attempt to submit it */
68977 op = kzalloc(sizeof(*op), GFP_NOIO);
68978 if (!op) {
68979- fscache_stat(&fscache_n_retrievals_nomem);
68980+ fscache_stat_unchecked(&fscache_n_retrievals_nomem);
68981 return NULL;
68982 }
68983
68984@@ -294,13 +294,13 @@ static int fscache_wait_for_deferred_lookup(struct fscache_cookie *cookie)
68985 return 0;
68986 }
68987
68988- fscache_stat(&fscache_n_retrievals_wait);
68989+ fscache_stat_unchecked(&fscache_n_retrievals_wait);
68990
68991 jif = jiffies;
68992 if (wait_on_bit(&cookie->flags, FSCACHE_COOKIE_LOOKING_UP,
68993 fscache_wait_bit_interruptible,
68994 TASK_INTERRUPTIBLE) != 0) {
68995- fscache_stat(&fscache_n_retrievals_intr);
68996+ fscache_stat_unchecked(&fscache_n_retrievals_intr);
68997 _leave(" = -ERESTARTSYS");
68998 return -ERESTARTSYS;
68999 }
69000@@ -318,8 +318,8 @@ static int fscache_wait_for_deferred_lookup(struct fscache_cookie *cookie)
69001 */
69002 static int fscache_wait_for_retrieval_activation(struct fscache_object *object,
69003 struct fscache_retrieval *op,
69004- atomic_t *stat_op_waits,
69005- atomic_t *stat_object_dead)
69006+ atomic_unchecked_t *stat_op_waits,
69007+ atomic_unchecked_t *stat_object_dead)
69008 {
69009 int ret;
69010
69011@@ -327,7 +327,7 @@ static int fscache_wait_for_retrieval_activation(struct fscache_object *object,
69012 goto check_if_dead;
69013
69014 _debug(">>> WT");
69015- fscache_stat(stat_op_waits);
69016+ fscache_stat_unchecked(stat_op_waits);
69017 if (wait_on_bit(&op->op.flags, FSCACHE_OP_WAITING,
69018 fscache_wait_bit_interruptible,
69019 TASK_INTERRUPTIBLE) < 0) {
69020@@ -344,7 +344,7 @@ static int fscache_wait_for_retrieval_activation(struct fscache_object *object,
69021
69022 check_if_dead:
69023 if (unlikely(fscache_object_is_dead(object))) {
69024- fscache_stat(stat_object_dead);
69025+ fscache_stat_unchecked(stat_object_dead);
69026 return -ENOBUFS;
69027 }
69028 return 0;
69029@@ -371,7 +371,7 @@ int __fscache_read_or_alloc_page(struct fscache_cookie *cookie,
69030
69031 _enter("%p,%p,,,", cookie, page);
69032
69033- fscache_stat(&fscache_n_retrievals);
69034+ fscache_stat_unchecked(&fscache_n_retrievals);
69035
69036 if (hlist_empty(&cookie->backing_objects))
69037 goto nobufs;
69038@@ -405,7 +405,7 @@ int __fscache_read_or_alloc_page(struct fscache_cookie *cookie,
69039 goto nobufs_unlock;
69040 spin_unlock(&cookie->lock);
69041
69042- fscache_stat(&fscache_n_retrieval_ops);
69043+ fscache_stat_unchecked(&fscache_n_retrieval_ops);
69044
69045 /* pin the netfs read context in case we need to do the actual netfs
69046 * read because we've encountered a cache read failure */
69047@@ -435,15 +435,15 @@ int __fscache_read_or_alloc_page(struct fscache_cookie *cookie,
69048
69049 error:
69050 if (ret == -ENOMEM)
69051- fscache_stat(&fscache_n_retrievals_nomem);
69052+ fscache_stat_unchecked(&fscache_n_retrievals_nomem);
69053 else if (ret == -ERESTARTSYS)
69054- fscache_stat(&fscache_n_retrievals_intr);
69055+ fscache_stat_unchecked(&fscache_n_retrievals_intr);
69056 else if (ret == -ENODATA)
69057- fscache_stat(&fscache_n_retrievals_nodata);
69058+ fscache_stat_unchecked(&fscache_n_retrievals_nodata);
69059 else if (ret < 0)
69060- fscache_stat(&fscache_n_retrievals_nobufs);
69061+ fscache_stat_unchecked(&fscache_n_retrievals_nobufs);
69062 else
69063- fscache_stat(&fscache_n_retrievals_ok);
69064+ fscache_stat_unchecked(&fscache_n_retrievals_ok);
69065
69066 fscache_put_retrieval(op);
69067 _leave(" = %d", ret);
69068@@ -453,7 +453,7 @@ nobufs_unlock:
69069 spin_unlock(&cookie->lock);
69070 kfree(op);
69071 nobufs:
69072- fscache_stat(&fscache_n_retrievals_nobufs);
69073+ fscache_stat_unchecked(&fscache_n_retrievals_nobufs);
69074 _leave(" = -ENOBUFS");
69075 return -ENOBUFS;
69076 }
69077@@ -491,7 +491,7 @@ int __fscache_read_or_alloc_pages(struct fscache_cookie *cookie,
69078
69079 _enter("%p,,%d,,,", cookie, *nr_pages);
69080
69081- fscache_stat(&fscache_n_retrievals);
69082+ fscache_stat_unchecked(&fscache_n_retrievals);
69083
69084 if (hlist_empty(&cookie->backing_objects))
69085 goto nobufs;
69086@@ -522,7 +522,7 @@ int __fscache_read_or_alloc_pages(struct fscache_cookie *cookie,
69087 goto nobufs_unlock;
69088 spin_unlock(&cookie->lock);
69089
69090- fscache_stat(&fscache_n_retrieval_ops);
69091+ fscache_stat_unchecked(&fscache_n_retrieval_ops);
69092
69093 /* pin the netfs read context in case we need to do the actual netfs
69094 * read because we've encountered a cache read failure */
69095@@ -552,15 +552,15 @@ int __fscache_read_or_alloc_pages(struct fscache_cookie *cookie,
69096
69097 error:
69098 if (ret == -ENOMEM)
69099- fscache_stat(&fscache_n_retrievals_nomem);
69100+ fscache_stat_unchecked(&fscache_n_retrievals_nomem);
69101 else if (ret == -ERESTARTSYS)
69102- fscache_stat(&fscache_n_retrievals_intr);
69103+ fscache_stat_unchecked(&fscache_n_retrievals_intr);
69104 else if (ret == -ENODATA)
69105- fscache_stat(&fscache_n_retrievals_nodata);
69106+ fscache_stat_unchecked(&fscache_n_retrievals_nodata);
69107 else if (ret < 0)
69108- fscache_stat(&fscache_n_retrievals_nobufs);
69109+ fscache_stat_unchecked(&fscache_n_retrievals_nobufs);
69110 else
69111- fscache_stat(&fscache_n_retrievals_ok);
69112+ fscache_stat_unchecked(&fscache_n_retrievals_ok);
69113
69114 fscache_put_retrieval(op);
69115 _leave(" = %d", ret);
69116@@ -570,7 +570,7 @@ nobufs_unlock:
69117 spin_unlock(&cookie->lock);
69118 kfree(op);
69119 nobufs:
69120- fscache_stat(&fscache_n_retrievals_nobufs);
69121+ fscache_stat_unchecked(&fscache_n_retrievals_nobufs);
69122 _leave(" = -ENOBUFS");
69123 return -ENOBUFS;
69124 }
69125@@ -594,7 +594,7 @@ int __fscache_alloc_page(struct fscache_cookie *cookie,
69126
69127 _enter("%p,%p,,,", cookie, page);
69128
69129- fscache_stat(&fscache_n_allocs);
69130+ fscache_stat_unchecked(&fscache_n_allocs);
69131
69132 if (hlist_empty(&cookie->backing_objects))
69133 goto nobufs;
69134@@ -621,7 +621,7 @@ int __fscache_alloc_page(struct fscache_cookie *cookie,
69135 goto nobufs_unlock;
69136 spin_unlock(&cookie->lock);
69137
69138- fscache_stat(&fscache_n_alloc_ops);
69139+ fscache_stat_unchecked(&fscache_n_alloc_ops);
69140
69141 ret = fscache_wait_for_retrieval_activation(
69142 object, op,
69143@@ -637,11 +637,11 @@ int __fscache_alloc_page(struct fscache_cookie *cookie,
69144
69145 error:
69146 if (ret == -ERESTARTSYS)
69147- fscache_stat(&fscache_n_allocs_intr);
69148+ fscache_stat_unchecked(&fscache_n_allocs_intr);
69149 else if (ret < 0)
69150- fscache_stat(&fscache_n_allocs_nobufs);
69151+ fscache_stat_unchecked(&fscache_n_allocs_nobufs);
69152 else
69153- fscache_stat(&fscache_n_allocs_ok);
69154+ fscache_stat_unchecked(&fscache_n_allocs_ok);
69155
69156 fscache_put_retrieval(op);
69157 _leave(" = %d", ret);
69158@@ -651,7 +651,7 @@ nobufs_unlock:
69159 spin_unlock(&cookie->lock);
69160 kfree(op);
69161 nobufs:
69162- fscache_stat(&fscache_n_allocs_nobufs);
69163+ fscache_stat_unchecked(&fscache_n_allocs_nobufs);
69164 _leave(" = -ENOBUFS");
69165 return -ENOBUFS;
69166 }
69167@@ -694,7 +694,7 @@ static void fscache_write_op(struct fscache_operation *_op)
69168
69169 spin_lock(&cookie->stores_lock);
69170
69171- fscache_stat(&fscache_n_store_calls);
69172+ fscache_stat_unchecked(&fscache_n_store_calls);
69173
69174 /* find a page to store */
69175 page = NULL;
69176@@ -705,7 +705,7 @@ static void fscache_write_op(struct fscache_operation *_op)
69177 page = results[0];
69178 _debug("gang %d [%lx]", n, page->index);
69179 if (page->index > op->store_limit) {
69180- fscache_stat(&fscache_n_store_pages_over_limit);
69181+ fscache_stat_unchecked(&fscache_n_store_pages_over_limit);
69182 goto superseded;
69183 }
69184
69185@@ -721,7 +721,7 @@ static void fscache_write_op(struct fscache_operation *_op)
69186
69187 if (page) {
69188 fscache_set_op_state(&op->op, "Store");
69189- fscache_stat(&fscache_n_store_pages);
69190+ fscache_stat_unchecked(&fscache_n_store_pages);
69191 fscache_stat(&fscache_n_cop_write_page);
69192 ret = object->cache->ops->write_page(op, page);
69193 fscache_stat_d(&fscache_n_cop_write_page);
69194@@ -792,7 +792,7 @@ int __fscache_write_page(struct fscache_cookie *cookie,
69195 ASSERTCMP(cookie->def->type, !=, FSCACHE_COOKIE_TYPE_INDEX);
69196 ASSERT(PageFsCache(page));
69197
69198- fscache_stat(&fscache_n_stores);
69199+ fscache_stat_unchecked(&fscache_n_stores);
69200
69201 op = kzalloc(sizeof(*op), GFP_NOIO);
69202 if (!op)
69203@@ -844,7 +844,7 @@ int __fscache_write_page(struct fscache_cookie *cookie,
69204 spin_unlock(&cookie->stores_lock);
69205 spin_unlock(&object->lock);
69206
69207- op->op.debug_id = atomic_inc_return(&fscache_op_debug_id);
69208+ op->op.debug_id = atomic_inc_return_unchecked(&fscache_op_debug_id);
69209 op->store_limit = object->store_limit;
69210
69211 if (fscache_submit_op(object, &op->op) < 0)
69212@@ -852,8 +852,8 @@ int __fscache_write_page(struct fscache_cookie *cookie,
69213
69214 spin_unlock(&cookie->lock);
69215 radix_tree_preload_end();
69216- fscache_stat(&fscache_n_store_ops);
69217- fscache_stat(&fscache_n_stores_ok);
69218+ fscache_stat_unchecked(&fscache_n_store_ops);
69219+ fscache_stat_unchecked(&fscache_n_stores_ok);
69220
69221 /* the slow work queue now carries its own ref on the object */
69222 fscache_put_operation(&op->op);
69223@@ -861,14 +861,14 @@ int __fscache_write_page(struct fscache_cookie *cookie,
69224 return 0;
69225
69226 already_queued:
69227- fscache_stat(&fscache_n_stores_again);
69228+ fscache_stat_unchecked(&fscache_n_stores_again);
69229 already_pending:
69230 spin_unlock(&cookie->stores_lock);
69231 spin_unlock(&object->lock);
69232 spin_unlock(&cookie->lock);
69233 radix_tree_preload_end();
69234 kfree(op);
69235- fscache_stat(&fscache_n_stores_ok);
69236+ fscache_stat_unchecked(&fscache_n_stores_ok);
69237 _leave(" = 0");
69238 return 0;
69239
69240@@ -886,14 +886,14 @@ nobufs:
69241 spin_unlock(&cookie->lock);
69242 radix_tree_preload_end();
69243 kfree(op);
69244- fscache_stat(&fscache_n_stores_nobufs);
69245+ fscache_stat_unchecked(&fscache_n_stores_nobufs);
69246 _leave(" = -ENOBUFS");
69247 return -ENOBUFS;
69248
69249 nomem_free:
69250 kfree(op);
69251 nomem:
69252- fscache_stat(&fscache_n_stores_oom);
69253+ fscache_stat_unchecked(&fscache_n_stores_oom);
69254 _leave(" = -ENOMEM");
69255 return -ENOMEM;
69256 }
69257@@ -911,7 +911,7 @@ void __fscache_uncache_page(struct fscache_cookie *cookie, struct page *page)
69258 ASSERTCMP(cookie->def->type, !=, FSCACHE_COOKIE_TYPE_INDEX);
69259 ASSERTCMP(page, !=, NULL);
69260
69261- fscache_stat(&fscache_n_uncaches);
69262+ fscache_stat_unchecked(&fscache_n_uncaches);
69263
69264 /* cache withdrawal may beat us to it */
69265 if (!PageFsCache(page))
69266@@ -964,7 +964,7 @@ void fscache_mark_pages_cached(struct fscache_retrieval *op,
69267 unsigned long loop;
69268
69269 #ifdef CONFIG_FSCACHE_STATS
69270- atomic_add(pagevec->nr, &fscache_n_marks);
69271+ atomic_add_unchecked(pagevec->nr, &fscache_n_marks);
69272 #endif
69273
69274 for (loop = 0; loop < pagevec->nr; loop++) {
69275diff --git a/fs/fscache/stats.c b/fs/fscache/stats.c
69276index 46435f3..8cddf18 100644
69277--- a/fs/fscache/stats.c
69278+++ b/fs/fscache/stats.c
69279@@ -18,95 +18,95 @@
69280 /*
69281 * operation counters
69282 */
69283-atomic_t fscache_n_op_pend;
69284-atomic_t fscache_n_op_run;
69285-atomic_t fscache_n_op_enqueue;
69286-atomic_t fscache_n_op_requeue;
69287-atomic_t fscache_n_op_deferred_release;
69288-atomic_t fscache_n_op_release;
69289-atomic_t fscache_n_op_gc;
69290-atomic_t fscache_n_op_cancelled;
69291-atomic_t fscache_n_op_rejected;
69292+atomic_unchecked_t fscache_n_op_pend;
69293+atomic_unchecked_t fscache_n_op_run;
69294+atomic_unchecked_t fscache_n_op_enqueue;
69295+atomic_unchecked_t fscache_n_op_requeue;
69296+atomic_unchecked_t fscache_n_op_deferred_release;
69297+atomic_unchecked_t fscache_n_op_release;
69298+atomic_unchecked_t fscache_n_op_gc;
69299+atomic_unchecked_t fscache_n_op_cancelled;
69300+atomic_unchecked_t fscache_n_op_rejected;
69301
69302-atomic_t fscache_n_attr_changed;
69303-atomic_t fscache_n_attr_changed_ok;
69304-atomic_t fscache_n_attr_changed_nobufs;
69305-atomic_t fscache_n_attr_changed_nomem;
69306-atomic_t fscache_n_attr_changed_calls;
69307+atomic_unchecked_t fscache_n_attr_changed;
69308+atomic_unchecked_t fscache_n_attr_changed_ok;
69309+atomic_unchecked_t fscache_n_attr_changed_nobufs;
69310+atomic_unchecked_t fscache_n_attr_changed_nomem;
69311+atomic_unchecked_t fscache_n_attr_changed_calls;
69312
69313-atomic_t fscache_n_allocs;
69314-atomic_t fscache_n_allocs_ok;
69315-atomic_t fscache_n_allocs_wait;
69316-atomic_t fscache_n_allocs_nobufs;
69317-atomic_t fscache_n_allocs_intr;
69318-atomic_t fscache_n_allocs_object_dead;
69319-atomic_t fscache_n_alloc_ops;
69320-atomic_t fscache_n_alloc_op_waits;
69321+atomic_unchecked_t fscache_n_allocs;
69322+atomic_unchecked_t fscache_n_allocs_ok;
69323+atomic_unchecked_t fscache_n_allocs_wait;
69324+atomic_unchecked_t fscache_n_allocs_nobufs;
69325+atomic_unchecked_t fscache_n_allocs_intr;
69326+atomic_unchecked_t fscache_n_allocs_object_dead;
69327+atomic_unchecked_t fscache_n_alloc_ops;
69328+atomic_unchecked_t fscache_n_alloc_op_waits;
69329
69330-atomic_t fscache_n_retrievals;
69331-atomic_t fscache_n_retrievals_ok;
69332-atomic_t fscache_n_retrievals_wait;
69333-atomic_t fscache_n_retrievals_nodata;
69334-atomic_t fscache_n_retrievals_nobufs;
69335-atomic_t fscache_n_retrievals_intr;
69336-atomic_t fscache_n_retrievals_nomem;
69337-atomic_t fscache_n_retrievals_object_dead;
69338-atomic_t fscache_n_retrieval_ops;
69339-atomic_t fscache_n_retrieval_op_waits;
69340+atomic_unchecked_t fscache_n_retrievals;
69341+atomic_unchecked_t fscache_n_retrievals_ok;
69342+atomic_unchecked_t fscache_n_retrievals_wait;
69343+atomic_unchecked_t fscache_n_retrievals_nodata;
69344+atomic_unchecked_t fscache_n_retrievals_nobufs;
69345+atomic_unchecked_t fscache_n_retrievals_intr;
69346+atomic_unchecked_t fscache_n_retrievals_nomem;
69347+atomic_unchecked_t fscache_n_retrievals_object_dead;
69348+atomic_unchecked_t fscache_n_retrieval_ops;
69349+atomic_unchecked_t fscache_n_retrieval_op_waits;
69350
69351-atomic_t fscache_n_stores;
69352-atomic_t fscache_n_stores_ok;
69353-atomic_t fscache_n_stores_again;
69354-atomic_t fscache_n_stores_nobufs;
69355-atomic_t fscache_n_stores_oom;
69356-atomic_t fscache_n_store_ops;
69357-atomic_t fscache_n_store_calls;
69358-atomic_t fscache_n_store_pages;
69359-atomic_t fscache_n_store_radix_deletes;
69360-atomic_t fscache_n_store_pages_over_limit;
69361+atomic_unchecked_t fscache_n_stores;
69362+atomic_unchecked_t fscache_n_stores_ok;
69363+atomic_unchecked_t fscache_n_stores_again;
69364+atomic_unchecked_t fscache_n_stores_nobufs;
69365+atomic_unchecked_t fscache_n_stores_oom;
69366+atomic_unchecked_t fscache_n_store_ops;
69367+atomic_unchecked_t fscache_n_store_calls;
69368+atomic_unchecked_t fscache_n_store_pages;
69369+atomic_unchecked_t fscache_n_store_radix_deletes;
69370+atomic_unchecked_t fscache_n_store_pages_over_limit;
69371
69372-atomic_t fscache_n_store_vmscan_not_storing;
69373-atomic_t fscache_n_store_vmscan_gone;
69374-atomic_t fscache_n_store_vmscan_busy;
69375-atomic_t fscache_n_store_vmscan_cancelled;
69376+atomic_unchecked_t fscache_n_store_vmscan_not_storing;
69377+atomic_unchecked_t fscache_n_store_vmscan_gone;
69378+atomic_unchecked_t fscache_n_store_vmscan_busy;
69379+atomic_unchecked_t fscache_n_store_vmscan_cancelled;
69380
69381-atomic_t fscache_n_marks;
69382-atomic_t fscache_n_uncaches;
69383+atomic_unchecked_t fscache_n_marks;
69384+atomic_unchecked_t fscache_n_uncaches;
69385
69386-atomic_t fscache_n_acquires;
69387-atomic_t fscache_n_acquires_null;
69388-atomic_t fscache_n_acquires_no_cache;
69389-atomic_t fscache_n_acquires_ok;
69390-atomic_t fscache_n_acquires_nobufs;
69391-atomic_t fscache_n_acquires_oom;
69392+atomic_unchecked_t fscache_n_acquires;
69393+atomic_unchecked_t fscache_n_acquires_null;
69394+atomic_unchecked_t fscache_n_acquires_no_cache;
69395+atomic_unchecked_t fscache_n_acquires_ok;
69396+atomic_unchecked_t fscache_n_acquires_nobufs;
69397+atomic_unchecked_t fscache_n_acquires_oom;
69398
69399-atomic_t fscache_n_updates;
69400-atomic_t fscache_n_updates_null;
69401-atomic_t fscache_n_updates_run;
69402+atomic_unchecked_t fscache_n_updates;
69403+atomic_unchecked_t fscache_n_updates_null;
69404+atomic_unchecked_t fscache_n_updates_run;
69405
69406-atomic_t fscache_n_relinquishes;
69407-atomic_t fscache_n_relinquishes_null;
69408-atomic_t fscache_n_relinquishes_waitcrt;
69409-atomic_t fscache_n_relinquishes_retire;
69410+atomic_unchecked_t fscache_n_relinquishes;
69411+atomic_unchecked_t fscache_n_relinquishes_null;
69412+atomic_unchecked_t fscache_n_relinquishes_waitcrt;
69413+atomic_unchecked_t fscache_n_relinquishes_retire;
69414
69415-atomic_t fscache_n_cookie_index;
69416-atomic_t fscache_n_cookie_data;
69417-atomic_t fscache_n_cookie_special;
69418+atomic_unchecked_t fscache_n_cookie_index;
69419+atomic_unchecked_t fscache_n_cookie_data;
69420+atomic_unchecked_t fscache_n_cookie_special;
69421
69422-atomic_t fscache_n_object_alloc;
69423-atomic_t fscache_n_object_no_alloc;
69424-atomic_t fscache_n_object_lookups;
69425-atomic_t fscache_n_object_lookups_negative;
69426-atomic_t fscache_n_object_lookups_positive;
69427-atomic_t fscache_n_object_lookups_timed_out;
69428-atomic_t fscache_n_object_created;
69429-atomic_t fscache_n_object_avail;
69430-atomic_t fscache_n_object_dead;
69431+atomic_unchecked_t fscache_n_object_alloc;
69432+atomic_unchecked_t fscache_n_object_no_alloc;
69433+atomic_unchecked_t fscache_n_object_lookups;
69434+atomic_unchecked_t fscache_n_object_lookups_negative;
69435+atomic_unchecked_t fscache_n_object_lookups_positive;
69436+atomic_unchecked_t fscache_n_object_lookups_timed_out;
69437+atomic_unchecked_t fscache_n_object_created;
69438+atomic_unchecked_t fscache_n_object_avail;
69439+atomic_unchecked_t fscache_n_object_dead;
69440
69441-atomic_t fscache_n_checkaux_none;
69442-atomic_t fscache_n_checkaux_okay;
69443-atomic_t fscache_n_checkaux_update;
69444-atomic_t fscache_n_checkaux_obsolete;
69445+atomic_unchecked_t fscache_n_checkaux_none;
69446+atomic_unchecked_t fscache_n_checkaux_okay;
69447+atomic_unchecked_t fscache_n_checkaux_update;
69448+atomic_unchecked_t fscache_n_checkaux_obsolete;
69449
69450 atomic_t fscache_n_cop_alloc_object;
69451 atomic_t fscache_n_cop_lookup_object;
69452@@ -133,113 +133,113 @@ static int fscache_stats_show(struct seq_file *m, void *v)
69453 seq_puts(m, "FS-Cache statistics\n");
69454
69455 seq_printf(m, "Cookies: idx=%u dat=%u spc=%u\n",
69456- atomic_read(&fscache_n_cookie_index),
69457- atomic_read(&fscache_n_cookie_data),
69458- atomic_read(&fscache_n_cookie_special));
69459+ atomic_read_unchecked(&fscache_n_cookie_index),
69460+ atomic_read_unchecked(&fscache_n_cookie_data),
69461+ atomic_read_unchecked(&fscache_n_cookie_special));
69462
69463 seq_printf(m, "Objects: alc=%u nal=%u avl=%u ded=%u\n",
69464- atomic_read(&fscache_n_object_alloc),
69465- atomic_read(&fscache_n_object_no_alloc),
69466- atomic_read(&fscache_n_object_avail),
69467- atomic_read(&fscache_n_object_dead));
69468+ atomic_read_unchecked(&fscache_n_object_alloc),
69469+ atomic_read_unchecked(&fscache_n_object_no_alloc),
69470+ atomic_read_unchecked(&fscache_n_object_avail),
69471+ atomic_read_unchecked(&fscache_n_object_dead));
69472 seq_printf(m, "ChkAux : non=%u ok=%u upd=%u obs=%u\n",
69473- atomic_read(&fscache_n_checkaux_none),
69474- atomic_read(&fscache_n_checkaux_okay),
69475- atomic_read(&fscache_n_checkaux_update),
69476- atomic_read(&fscache_n_checkaux_obsolete));
69477+ atomic_read_unchecked(&fscache_n_checkaux_none),
69478+ atomic_read_unchecked(&fscache_n_checkaux_okay),
69479+ atomic_read_unchecked(&fscache_n_checkaux_update),
69480+ atomic_read_unchecked(&fscache_n_checkaux_obsolete));
69481
69482 seq_printf(m, "Pages : mrk=%u unc=%u\n",
69483- atomic_read(&fscache_n_marks),
69484- atomic_read(&fscache_n_uncaches));
69485+ atomic_read_unchecked(&fscache_n_marks),
69486+ atomic_read_unchecked(&fscache_n_uncaches));
69487
69488 seq_printf(m, "Acquire: n=%u nul=%u noc=%u ok=%u nbf=%u"
69489 " oom=%u\n",
69490- atomic_read(&fscache_n_acquires),
69491- atomic_read(&fscache_n_acquires_null),
69492- atomic_read(&fscache_n_acquires_no_cache),
69493- atomic_read(&fscache_n_acquires_ok),
69494- atomic_read(&fscache_n_acquires_nobufs),
69495- atomic_read(&fscache_n_acquires_oom));
69496+ atomic_read_unchecked(&fscache_n_acquires),
69497+ atomic_read_unchecked(&fscache_n_acquires_null),
69498+ atomic_read_unchecked(&fscache_n_acquires_no_cache),
69499+ atomic_read_unchecked(&fscache_n_acquires_ok),
69500+ atomic_read_unchecked(&fscache_n_acquires_nobufs),
69501+ atomic_read_unchecked(&fscache_n_acquires_oom));
69502
69503 seq_printf(m, "Lookups: n=%u neg=%u pos=%u crt=%u tmo=%u\n",
69504- atomic_read(&fscache_n_object_lookups),
69505- atomic_read(&fscache_n_object_lookups_negative),
69506- atomic_read(&fscache_n_object_lookups_positive),
69507- atomic_read(&fscache_n_object_lookups_timed_out),
69508- atomic_read(&fscache_n_object_created));
69509+ atomic_read_unchecked(&fscache_n_object_lookups),
69510+ atomic_read_unchecked(&fscache_n_object_lookups_negative),
69511+ atomic_read_unchecked(&fscache_n_object_lookups_positive),
69512+ atomic_read_unchecked(&fscache_n_object_lookups_timed_out),
69513+ atomic_read_unchecked(&fscache_n_object_created));
69514
69515 seq_printf(m, "Updates: n=%u nul=%u run=%u\n",
69516- atomic_read(&fscache_n_updates),
69517- atomic_read(&fscache_n_updates_null),
69518- atomic_read(&fscache_n_updates_run));
69519+ atomic_read_unchecked(&fscache_n_updates),
69520+ atomic_read_unchecked(&fscache_n_updates_null),
69521+ atomic_read_unchecked(&fscache_n_updates_run));
69522
69523 seq_printf(m, "Relinqs: n=%u nul=%u wcr=%u rtr=%u\n",
69524- atomic_read(&fscache_n_relinquishes),
69525- atomic_read(&fscache_n_relinquishes_null),
69526- atomic_read(&fscache_n_relinquishes_waitcrt),
69527- atomic_read(&fscache_n_relinquishes_retire));
69528+ atomic_read_unchecked(&fscache_n_relinquishes),
69529+ atomic_read_unchecked(&fscache_n_relinquishes_null),
69530+ atomic_read_unchecked(&fscache_n_relinquishes_waitcrt),
69531+ atomic_read_unchecked(&fscache_n_relinquishes_retire));
69532
69533 seq_printf(m, "AttrChg: n=%u ok=%u nbf=%u oom=%u run=%u\n",
69534- atomic_read(&fscache_n_attr_changed),
69535- atomic_read(&fscache_n_attr_changed_ok),
69536- atomic_read(&fscache_n_attr_changed_nobufs),
69537- atomic_read(&fscache_n_attr_changed_nomem),
69538- atomic_read(&fscache_n_attr_changed_calls));
69539+ atomic_read_unchecked(&fscache_n_attr_changed),
69540+ atomic_read_unchecked(&fscache_n_attr_changed_ok),
69541+ atomic_read_unchecked(&fscache_n_attr_changed_nobufs),
69542+ atomic_read_unchecked(&fscache_n_attr_changed_nomem),
69543+ atomic_read_unchecked(&fscache_n_attr_changed_calls));
69544
69545 seq_printf(m, "Allocs : n=%u ok=%u wt=%u nbf=%u int=%u\n",
69546- atomic_read(&fscache_n_allocs),
69547- atomic_read(&fscache_n_allocs_ok),
69548- atomic_read(&fscache_n_allocs_wait),
69549- atomic_read(&fscache_n_allocs_nobufs),
69550- atomic_read(&fscache_n_allocs_intr));
69551+ atomic_read_unchecked(&fscache_n_allocs),
69552+ atomic_read_unchecked(&fscache_n_allocs_ok),
69553+ atomic_read_unchecked(&fscache_n_allocs_wait),
69554+ atomic_read_unchecked(&fscache_n_allocs_nobufs),
69555+ atomic_read_unchecked(&fscache_n_allocs_intr));
69556 seq_printf(m, "Allocs : ops=%u owt=%u abt=%u\n",
69557- atomic_read(&fscache_n_alloc_ops),
69558- atomic_read(&fscache_n_alloc_op_waits),
69559- atomic_read(&fscache_n_allocs_object_dead));
69560+ atomic_read_unchecked(&fscache_n_alloc_ops),
69561+ atomic_read_unchecked(&fscache_n_alloc_op_waits),
69562+ atomic_read_unchecked(&fscache_n_allocs_object_dead));
69563
69564 seq_printf(m, "Retrvls: n=%u ok=%u wt=%u nod=%u nbf=%u"
69565 " int=%u oom=%u\n",
69566- atomic_read(&fscache_n_retrievals),
69567- atomic_read(&fscache_n_retrievals_ok),
69568- atomic_read(&fscache_n_retrievals_wait),
69569- atomic_read(&fscache_n_retrievals_nodata),
69570- atomic_read(&fscache_n_retrievals_nobufs),
69571- atomic_read(&fscache_n_retrievals_intr),
69572- atomic_read(&fscache_n_retrievals_nomem));
69573+ atomic_read_unchecked(&fscache_n_retrievals),
69574+ atomic_read_unchecked(&fscache_n_retrievals_ok),
69575+ atomic_read_unchecked(&fscache_n_retrievals_wait),
69576+ atomic_read_unchecked(&fscache_n_retrievals_nodata),
69577+ atomic_read_unchecked(&fscache_n_retrievals_nobufs),
69578+ atomic_read_unchecked(&fscache_n_retrievals_intr),
69579+ atomic_read_unchecked(&fscache_n_retrievals_nomem));
69580 seq_printf(m, "Retrvls: ops=%u owt=%u abt=%u\n",
69581- atomic_read(&fscache_n_retrieval_ops),
69582- atomic_read(&fscache_n_retrieval_op_waits),
69583- atomic_read(&fscache_n_retrievals_object_dead));
69584+ atomic_read_unchecked(&fscache_n_retrieval_ops),
69585+ atomic_read_unchecked(&fscache_n_retrieval_op_waits),
69586+ atomic_read_unchecked(&fscache_n_retrievals_object_dead));
69587
69588 seq_printf(m, "Stores : n=%u ok=%u agn=%u nbf=%u oom=%u\n",
69589- atomic_read(&fscache_n_stores),
69590- atomic_read(&fscache_n_stores_ok),
69591- atomic_read(&fscache_n_stores_again),
69592- atomic_read(&fscache_n_stores_nobufs),
69593- atomic_read(&fscache_n_stores_oom));
69594+ atomic_read_unchecked(&fscache_n_stores),
69595+ atomic_read_unchecked(&fscache_n_stores_ok),
69596+ atomic_read_unchecked(&fscache_n_stores_again),
69597+ atomic_read_unchecked(&fscache_n_stores_nobufs),
69598+ atomic_read_unchecked(&fscache_n_stores_oom));
69599 seq_printf(m, "Stores : ops=%u run=%u pgs=%u rxd=%u olm=%u\n",
69600- atomic_read(&fscache_n_store_ops),
69601- atomic_read(&fscache_n_store_calls),
69602- atomic_read(&fscache_n_store_pages),
69603- atomic_read(&fscache_n_store_radix_deletes),
69604- atomic_read(&fscache_n_store_pages_over_limit));
69605+ atomic_read_unchecked(&fscache_n_store_ops),
69606+ atomic_read_unchecked(&fscache_n_store_calls),
69607+ atomic_read_unchecked(&fscache_n_store_pages),
69608+ atomic_read_unchecked(&fscache_n_store_radix_deletes),
69609+ atomic_read_unchecked(&fscache_n_store_pages_over_limit));
69610
69611 seq_printf(m, "VmScan : nos=%u gon=%u bsy=%u can=%u\n",
69612- atomic_read(&fscache_n_store_vmscan_not_storing),
69613- atomic_read(&fscache_n_store_vmscan_gone),
69614- atomic_read(&fscache_n_store_vmscan_busy),
69615- atomic_read(&fscache_n_store_vmscan_cancelled));
69616+ atomic_read_unchecked(&fscache_n_store_vmscan_not_storing),
69617+ atomic_read_unchecked(&fscache_n_store_vmscan_gone),
69618+ atomic_read_unchecked(&fscache_n_store_vmscan_busy),
69619+ atomic_read_unchecked(&fscache_n_store_vmscan_cancelled));
69620
69621 seq_printf(m, "Ops : pend=%u run=%u enq=%u can=%u rej=%u\n",
69622- atomic_read(&fscache_n_op_pend),
69623- atomic_read(&fscache_n_op_run),
69624- atomic_read(&fscache_n_op_enqueue),
69625- atomic_read(&fscache_n_op_cancelled),
69626- atomic_read(&fscache_n_op_rejected));
69627+ atomic_read_unchecked(&fscache_n_op_pend),
69628+ atomic_read_unchecked(&fscache_n_op_run),
69629+ atomic_read_unchecked(&fscache_n_op_enqueue),
69630+ atomic_read_unchecked(&fscache_n_op_cancelled),
69631+ atomic_read_unchecked(&fscache_n_op_rejected));
69632 seq_printf(m, "Ops : dfr=%u rel=%u gc=%u\n",
69633- atomic_read(&fscache_n_op_deferred_release),
69634- atomic_read(&fscache_n_op_release),
69635- atomic_read(&fscache_n_op_gc));
69636+ atomic_read_unchecked(&fscache_n_op_deferred_release),
69637+ atomic_read_unchecked(&fscache_n_op_release),
69638+ atomic_read_unchecked(&fscache_n_op_gc));
69639
69640 seq_printf(m, "CacheOp: alo=%d luo=%d luc=%d gro=%d\n",
69641 atomic_read(&fscache_n_cop_alloc_object),
69642diff --git a/fs/fuse/cuse.c b/fs/fuse/cuse.c
69643index de792dc..448b532 100644
69644--- a/fs/fuse/cuse.c
69645+++ b/fs/fuse/cuse.c
69646@@ -576,10 +576,12 @@ static int __init cuse_init(void)
69647 INIT_LIST_HEAD(&cuse_conntbl[i]);
69648
69649 /* inherit and extend fuse_dev_operations */
69650- cuse_channel_fops = fuse_dev_operations;
69651- cuse_channel_fops.owner = THIS_MODULE;
69652- cuse_channel_fops.open = cuse_channel_open;
69653- cuse_channel_fops.release = cuse_channel_release;
69654+ pax_open_kernel();
69655+ memcpy((void *)&cuse_channel_fops, &fuse_dev_operations, sizeof(fuse_dev_operations));
69656+ *(void **)&cuse_channel_fops.owner = THIS_MODULE;
69657+ *(void **)&cuse_channel_fops.open = cuse_channel_open;
69658+ *(void **)&cuse_channel_fops.release = cuse_channel_release;
69659+ pax_close_kernel();
69660
69661 cuse_class = class_create(THIS_MODULE, "cuse");
69662 if (IS_ERR(cuse_class))
69663diff --git a/fs/fuse/dev.c b/fs/fuse/dev.c
69664index 1facb39..7f48557 100644
69665--- a/fs/fuse/dev.c
69666+++ b/fs/fuse/dev.c
69667@@ -885,7 +885,7 @@ static int fuse_notify_inval_entry(struct fuse_conn *fc, unsigned int size,
69668 {
69669 struct fuse_notify_inval_entry_out outarg;
69670 int err = -EINVAL;
69671- char buf[FUSE_NAME_MAX+1];
69672+ char *buf = NULL;
69673 struct qstr name;
69674
69675 if (size < sizeof(outarg))
69676@@ -899,6 +899,11 @@ static int fuse_notify_inval_entry(struct fuse_conn *fc, unsigned int size,
69677 if (outarg.namelen > FUSE_NAME_MAX)
69678 goto err;
69679
69680+ err = -ENOMEM;
69681+ buf = kmalloc(FUSE_NAME_MAX+1, GFP_KERNEL);
69682+ if (!buf)
69683+ goto err;
69684+
69685 err = -EINVAL;
69686 if (size != sizeof(outarg) + outarg.namelen + 1)
69687 goto err;
69688@@ -914,17 +919,15 @@ static int fuse_notify_inval_entry(struct fuse_conn *fc, unsigned int size,
69689
69690 down_read(&fc->killsb);
69691 err = -ENOENT;
69692- if (!fc->sb)
69693- goto err_unlock;
69694-
69695- err = fuse_reverse_inval_entry(fc->sb, outarg.parent, &name);
69696-
69697-err_unlock:
69698+ if (fc->sb)
69699+ err = fuse_reverse_inval_entry(fc->sb, outarg.parent, &name);
69700 up_read(&fc->killsb);
69701+ kfree(buf);
69702 return err;
69703
69704 err:
69705 fuse_copy_finish(cs);
69706+ kfree(buf);
69707 return err;
69708 }
69709
69710diff --git a/fs/fuse/dir.c b/fs/fuse/dir.c
69711index 4787ae6..73efff7 100644
69712--- a/fs/fuse/dir.c
69713+++ b/fs/fuse/dir.c
69714@@ -1127,7 +1127,7 @@ static char *read_link(struct dentry *dentry)
69715 return link;
69716 }
69717
69718-static void free_link(char *link)
69719+static void free_link(const char *link)
69720 {
69721 if (!IS_ERR(link))
69722 free_page((unsigned long) link);
69723diff --git a/fs/gfs2/ops_inode.c b/fs/gfs2/ops_inode.c
69724index 247436c..e650ccb 100644
69725--- a/fs/gfs2/ops_inode.c
69726+++ b/fs/gfs2/ops_inode.c
69727@@ -752,6 +752,8 @@ static int gfs2_rename(struct inode *odir, struct dentry *odentry,
69728 unsigned int x;
69729 int error;
69730
69731+ pax_track_stack();
69732+
69733 if (ndentry->d_inode) {
69734 nip = GFS2_I(ndentry->d_inode);
69735 if (ip == nip)
69736diff --git a/fs/gfs2/sys.c b/fs/gfs2/sys.c
69737index 4463297..4fed53b 100644
69738--- a/fs/gfs2/sys.c
69739+++ b/fs/gfs2/sys.c
69740@@ -49,7 +49,7 @@ static ssize_t gfs2_attr_store(struct kobject *kobj, struct attribute *attr,
69741 return a->store ? a->store(sdp, buf, len) : len;
69742 }
69743
69744-static struct sysfs_ops gfs2_attr_ops = {
69745+static const struct sysfs_ops gfs2_attr_ops = {
69746 .show = gfs2_attr_show,
69747 .store = gfs2_attr_store,
69748 };
69749@@ -584,7 +584,7 @@ static int gfs2_uevent(struct kset *kset, struct kobject *kobj,
69750 return 0;
69751 }
69752
69753-static struct kset_uevent_ops gfs2_uevent_ops = {
69754+static const struct kset_uevent_ops gfs2_uevent_ops = {
69755 .uevent = gfs2_uevent,
69756 };
69757
69758diff --git a/fs/hfsplus/catalog.c b/fs/hfsplus/catalog.c
69759index f6874ac..7cd98a8 100644
69760--- a/fs/hfsplus/catalog.c
69761+++ b/fs/hfsplus/catalog.c
69762@@ -157,6 +157,8 @@ int hfsplus_find_cat(struct super_block *sb, u32 cnid,
69763 int err;
69764 u16 type;
69765
69766+ pax_track_stack();
69767+
69768 hfsplus_cat_build_key(sb, fd->search_key, cnid, NULL);
69769 err = hfs_brec_read(fd, &tmp, sizeof(hfsplus_cat_entry));
69770 if (err)
69771@@ -186,6 +188,8 @@ int hfsplus_create_cat(u32 cnid, struct inode *dir, struct qstr *str, struct ino
69772 int entry_size;
69773 int err;
69774
69775+ pax_track_stack();
69776+
69777 dprint(DBG_CAT_MOD, "create_cat: %s,%u(%d)\n", str->name, cnid, inode->i_nlink);
69778 sb = dir->i_sb;
69779 hfs_find_init(HFSPLUS_SB(sb).cat_tree, &fd);
69780@@ -318,6 +322,8 @@ int hfsplus_rename_cat(u32 cnid,
69781 int entry_size, type;
69782 int err = 0;
69783
69784+ pax_track_stack();
69785+
69786 dprint(DBG_CAT_MOD, "rename_cat: %u - %lu,%s - %lu,%s\n", cnid, src_dir->i_ino, src_name->name,
69787 dst_dir->i_ino, dst_name->name);
69788 sb = src_dir->i_sb;
69789diff --git a/fs/hfsplus/dir.c b/fs/hfsplus/dir.c
69790index 5f40236..dac3421 100644
69791--- a/fs/hfsplus/dir.c
69792+++ b/fs/hfsplus/dir.c
69793@@ -121,6 +121,8 @@ static int hfsplus_readdir(struct file *filp, void *dirent, filldir_t filldir)
69794 struct hfsplus_readdir_data *rd;
69795 u16 type;
69796
69797+ pax_track_stack();
69798+
69799 if (filp->f_pos >= inode->i_size)
69800 return 0;
69801
69802diff --git a/fs/hfsplus/inode.c b/fs/hfsplus/inode.c
69803index 1bcf597..905a251 100644
69804--- a/fs/hfsplus/inode.c
69805+++ b/fs/hfsplus/inode.c
69806@@ -399,6 +399,8 @@ int hfsplus_cat_read_inode(struct inode *inode, struct hfs_find_data *fd)
69807 int res = 0;
69808 u16 type;
69809
69810+ pax_track_stack();
69811+
69812 type = hfs_bnode_read_u16(fd->bnode, fd->entryoffset);
69813
69814 HFSPLUS_I(inode).dev = 0;
69815@@ -461,6 +463,8 @@ int hfsplus_cat_write_inode(struct inode *inode)
69816 struct hfs_find_data fd;
69817 hfsplus_cat_entry entry;
69818
69819+ pax_track_stack();
69820+
69821 if (HFSPLUS_IS_RSRC(inode))
69822 main_inode = HFSPLUS_I(inode).rsrc_inode;
69823
69824diff --git a/fs/hfsplus/ioctl.c b/fs/hfsplus/ioctl.c
69825index f457d2c..7ef4ad5 100644
69826--- a/fs/hfsplus/ioctl.c
69827+++ b/fs/hfsplus/ioctl.c
69828@@ -101,6 +101,8 @@ int hfsplus_setxattr(struct dentry *dentry, const char *name,
69829 struct hfsplus_cat_file *file;
69830 int res;
69831
69832+ pax_track_stack();
69833+
69834 if (!S_ISREG(inode->i_mode) || HFSPLUS_IS_RSRC(inode))
69835 return -EOPNOTSUPP;
69836
69837@@ -143,6 +145,8 @@ ssize_t hfsplus_getxattr(struct dentry *dentry, const char *name,
69838 struct hfsplus_cat_file *file;
69839 ssize_t res = 0;
69840
69841+ pax_track_stack();
69842+
69843 if (!S_ISREG(inode->i_mode) || HFSPLUS_IS_RSRC(inode))
69844 return -EOPNOTSUPP;
69845
69846diff --git a/fs/hfsplus/super.c b/fs/hfsplus/super.c
69847index 43022f3..7298079 100644
69848--- a/fs/hfsplus/super.c
69849+++ b/fs/hfsplus/super.c
69850@@ -312,6 +312,8 @@ static int hfsplus_fill_super(struct super_block *sb, void *data, int silent)
69851 struct nls_table *nls = NULL;
69852 int err = -EINVAL;
69853
69854+ pax_track_stack();
69855+
69856 sbi = kzalloc(sizeof(*sbi), GFP_KERNEL);
69857 if (!sbi)
69858 return -ENOMEM;
69859diff --git a/fs/hugetlbfs/inode.c b/fs/hugetlbfs/inode.c
69860index 87a1258..5694d91 100644
69861--- a/fs/hugetlbfs/inode.c
69862+++ b/fs/hugetlbfs/inode.c
69863@@ -909,7 +909,7 @@ static struct file_system_type hugetlbfs_fs_type = {
69864 .kill_sb = kill_litter_super,
69865 };
69866
69867-static struct vfsmount *hugetlbfs_vfsmount;
69868+struct vfsmount *hugetlbfs_vfsmount;
69869
69870 static int can_do_hugetlb_shm(void)
69871 {
69872diff --git a/fs/ioctl.c b/fs/ioctl.c
69873index 6c75110..19d2c3c 100644
69874--- a/fs/ioctl.c
69875+++ b/fs/ioctl.c
69876@@ -97,7 +97,7 @@ int fiemap_fill_next_extent(struct fiemap_extent_info *fieinfo, u64 logical,
69877 u64 phys, u64 len, u32 flags)
69878 {
69879 struct fiemap_extent extent;
69880- struct fiemap_extent *dest = fieinfo->fi_extents_start;
69881+ struct fiemap_extent __user *dest = fieinfo->fi_extents_start;
69882
69883 /* only count the extents */
69884 if (fieinfo->fi_extents_max == 0) {
69885@@ -207,7 +207,7 @@ static int ioctl_fiemap(struct file *filp, unsigned long arg)
69886
69887 fieinfo.fi_flags = fiemap.fm_flags;
69888 fieinfo.fi_extents_max = fiemap.fm_extent_count;
69889- fieinfo.fi_extents_start = (struct fiemap_extent *)(arg + sizeof(fiemap));
69890+ fieinfo.fi_extents_start = (struct fiemap_extent __user *)(arg + sizeof(fiemap));
69891
69892 if (fiemap.fm_extent_count != 0 &&
69893 !access_ok(VERIFY_WRITE, fieinfo.fi_extents_start,
69894@@ -220,7 +220,7 @@ static int ioctl_fiemap(struct file *filp, unsigned long arg)
69895 error = inode->i_op->fiemap(inode, &fieinfo, fiemap.fm_start, len);
69896 fiemap.fm_flags = fieinfo.fi_flags;
69897 fiemap.fm_mapped_extents = fieinfo.fi_extents_mapped;
69898- if (copy_to_user((char *)arg, &fiemap, sizeof(fiemap)))
69899+ if (copy_to_user((__force char __user *)arg, &fiemap, sizeof(fiemap)))
69900 error = -EFAULT;
69901
69902 return error;
69903diff --git a/fs/jbd/checkpoint.c b/fs/jbd/checkpoint.c
69904index b0435dd..81ee0be 100644
69905--- a/fs/jbd/checkpoint.c
69906+++ b/fs/jbd/checkpoint.c
69907@@ -348,6 +348,8 @@ int log_do_checkpoint(journal_t *journal)
69908 tid_t this_tid;
69909 int result;
69910
69911+ pax_track_stack();
69912+
69913 jbd_debug(1, "Start checkpoint\n");
69914
69915 /*
69916diff --git a/fs/jffs2/compr_rtime.c b/fs/jffs2/compr_rtime.c
69917index 546d153..736896c 100644
69918--- a/fs/jffs2/compr_rtime.c
69919+++ b/fs/jffs2/compr_rtime.c
69920@@ -37,6 +37,8 @@ static int jffs2_rtime_compress(unsigned char *data_in,
69921 int outpos = 0;
69922 int pos=0;
69923
69924+ pax_track_stack();
69925+
69926 memset(positions,0,sizeof(positions));
69927
69928 while (pos < (*sourcelen) && outpos <= (*dstlen)-2) {
69929@@ -79,6 +81,8 @@ static int jffs2_rtime_decompress(unsigned char *data_in,
69930 int outpos = 0;
69931 int pos=0;
69932
69933+ pax_track_stack();
69934+
69935 memset(positions,0,sizeof(positions));
69936
69937 while (outpos<destlen) {
69938diff --git a/fs/jffs2/compr_rubin.c b/fs/jffs2/compr_rubin.c
69939index 170d289..3254b98 100644
69940--- a/fs/jffs2/compr_rubin.c
69941+++ b/fs/jffs2/compr_rubin.c
69942@@ -314,6 +314,8 @@ static int jffs2_dynrubin_compress(unsigned char *data_in,
69943 int ret;
69944 uint32_t mysrclen, mydstlen;
69945
69946+ pax_track_stack();
69947+
69948 mysrclen = *sourcelen;
69949 mydstlen = *dstlen - 8;
69950
69951diff --git a/fs/jffs2/erase.c b/fs/jffs2/erase.c
69952index b47679b..00d65d3 100644
69953--- a/fs/jffs2/erase.c
69954+++ b/fs/jffs2/erase.c
69955@@ -434,7 +434,8 @@ static void jffs2_mark_erased_block(struct jffs2_sb_info *c, struct jffs2_eraseb
69956 struct jffs2_unknown_node marker = {
69957 .magic = cpu_to_je16(JFFS2_MAGIC_BITMASK),
69958 .nodetype = cpu_to_je16(JFFS2_NODETYPE_CLEANMARKER),
69959- .totlen = cpu_to_je32(c->cleanmarker_size)
69960+ .totlen = cpu_to_je32(c->cleanmarker_size),
69961+ .hdr_crc = cpu_to_je32(0)
69962 };
69963
69964 jffs2_prealloc_raw_node_refs(c, jeb, 1);
69965diff --git a/fs/jffs2/wbuf.c b/fs/jffs2/wbuf.c
69966index 5ef7bac..4fd1e3c 100644
69967--- a/fs/jffs2/wbuf.c
69968+++ b/fs/jffs2/wbuf.c
69969@@ -1012,7 +1012,8 @@ static const struct jffs2_unknown_node oob_cleanmarker =
69970 {
69971 .magic = constant_cpu_to_je16(JFFS2_MAGIC_BITMASK),
69972 .nodetype = constant_cpu_to_je16(JFFS2_NODETYPE_CLEANMARKER),
69973- .totlen = constant_cpu_to_je32(8)
69974+ .totlen = constant_cpu_to_je32(8),
69975+ .hdr_crc = constant_cpu_to_je32(0)
69976 };
69977
69978 /*
69979diff --git a/fs/jffs2/xattr.c b/fs/jffs2/xattr.c
69980index 082e844..52012a1 100644
69981--- a/fs/jffs2/xattr.c
69982+++ b/fs/jffs2/xattr.c
69983@@ -773,6 +773,8 @@ void jffs2_build_xattr_subsystem(struct jffs2_sb_info *c)
69984
69985 BUG_ON(!(c->flags & JFFS2_SB_FLAG_BUILDING));
69986
69987+ pax_track_stack();
69988+
69989 /* Phase.1 : Merge same xref */
69990 for (i=0; i < XREF_TMPHASH_SIZE; i++)
69991 xref_tmphash[i] = NULL;
69992diff --git a/fs/jfs/super.c b/fs/jfs/super.c
69993index 2234c73..f6e6e6b 100644
69994--- a/fs/jfs/super.c
69995+++ b/fs/jfs/super.c
69996@@ -793,7 +793,7 @@ static int __init init_jfs_fs(void)
69997
69998 jfs_inode_cachep =
69999 kmem_cache_create("jfs_ip", sizeof(struct jfs_inode_info), 0,
70000- SLAB_RECLAIM_ACCOUNT|SLAB_MEM_SPREAD,
70001+ SLAB_RECLAIM_ACCOUNT|SLAB_MEM_SPREAD|SLAB_USERCOPY,
70002 init_once);
70003 if (jfs_inode_cachep == NULL)
70004 return -ENOMEM;
70005diff --git a/fs/libfs.c b/fs/libfs.c
70006index ba36e93..3153fce 100644
70007--- a/fs/libfs.c
70008+++ b/fs/libfs.c
70009@@ -157,12 +157,20 @@ int dcache_readdir(struct file * filp, void * dirent, filldir_t filldir)
70010
70011 for (p=q->next; p != &dentry->d_subdirs; p=p->next) {
70012 struct dentry *next;
70013+ char d_name[sizeof(next->d_iname)];
70014+ const unsigned char *name;
70015+
70016 next = list_entry(p, struct dentry, d_u.d_child);
70017 if (d_unhashed(next) || !next->d_inode)
70018 continue;
70019
70020 spin_unlock(&dcache_lock);
70021- if (filldir(dirent, next->d_name.name,
70022+ name = next->d_name.name;
70023+ if (name == next->d_iname) {
70024+ memcpy(d_name, name, next->d_name.len);
70025+ name = d_name;
70026+ }
70027+ if (filldir(dirent, name,
70028 next->d_name.len, filp->f_pos,
70029 next->d_inode->i_ino,
70030 dt_type(next->d_inode)) < 0)
70031diff --git a/fs/lockd/clntproc.c b/fs/lockd/clntproc.c
70032index c325a83..d15b07b 100644
70033--- a/fs/lockd/clntproc.c
70034+++ b/fs/lockd/clntproc.c
70035@@ -36,11 +36,11 @@ static const struct rpc_call_ops nlmclnt_cancel_ops;
70036 /*
70037 * Cookie counter for NLM requests
70038 */
70039-static atomic_t nlm_cookie = ATOMIC_INIT(0x1234);
70040+static atomic_unchecked_t nlm_cookie = ATOMIC_INIT(0x1234);
70041
70042 void nlmclnt_next_cookie(struct nlm_cookie *c)
70043 {
70044- u32 cookie = atomic_inc_return(&nlm_cookie);
70045+ u32 cookie = atomic_inc_return_unchecked(&nlm_cookie);
70046
70047 memcpy(c->data, &cookie, 4);
70048 c->len=4;
70049@@ -621,6 +621,8 @@ nlmclnt_reclaim(struct nlm_host *host, struct file_lock *fl)
70050 struct nlm_rqst reqst, *req;
70051 int status;
70052
70053+ pax_track_stack();
70054+
70055 req = &reqst;
70056 memset(req, 0, sizeof(*req));
70057 locks_init_lock(&req->a_args.lock.fl);
70058diff --git a/fs/lockd/svc.c b/fs/lockd/svc.c
70059index 1a54ae1..6a16c27 100644
70060--- a/fs/lockd/svc.c
70061+++ b/fs/lockd/svc.c
70062@@ -43,7 +43,7 @@
70063
70064 static struct svc_program nlmsvc_program;
70065
70066-struct nlmsvc_binding * nlmsvc_ops;
70067+const struct nlmsvc_binding * nlmsvc_ops;
70068 EXPORT_SYMBOL_GPL(nlmsvc_ops);
70069
70070 static DEFINE_MUTEX(nlmsvc_mutex);
70071diff --git a/fs/locks.c b/fs/locks.c
70072index a8794f2..4041e55 100644
70073--- a/fs/locks.c
70074+++ b/fs/locks.c
70075@@ -145,10 +145,28 @@ static LIST_HEAD(blocked_list);
70076
70077 static struct kmem_cache *filelock_cache __read_mostly;
70078
70079+static void locks_init_lock_always(struct file_lock *fl)
70080+{
70081+ fl->fl_next = NULL;
70082+ fl->fl_fasync = NULL;
70083+ fl->fl_owner = NULL;
70084+ fl->fl_pid = 0;
70085+ fl->fl_nspid = NULL;
70086+ fl->fl_file = NULL;
70087+ fl->fl_flags = 0;
70088+ fl->fl_type = 0;
70089+ fl->fl_start = fl->fl_end = 0;
70090+}
70091+
70092 /* Allocate an empty lock structure. */
70093 static struct file_lock *locks_alloc_lock(void)
70094 {
70095- return kmem_cache_alloc(filelock_cache, GFP_KERNEL);
70096+ struct file_lock *fl = kmem_cache_alloc(filelock_cache, GFP_KERNEL);
70097+
70098+ if (fl)
70099+ locks_init_lock_always(fl);
70100+
70101+ return fl;
70102 }
70103
70104 void locks_release_private(struct file_lock *fl)
70105@@ -183,17 +201,9 @@ void locks_init_lock(struct file_lock *fl)
70106 INIT_LIST_HEAD(&fl->fl_link);
70107 INIT_LIST_HEAD(&fl->fl_block);
70108 init_waitqueue_head(&fl->fl_wait);
70109- fl->fl_next = NULL;
70110- fl->fl_fasync = NULL;
70111- fl->fl_owner = NULL;
70112- fl->fl_pid = 0;
70113- fl->fl_nspid = NULL;
70114- fl->fl_file = NULL;
70115- fl->fl_flags = 0;
70116- fl->fl_type = 0;
70117- fl->fl_start = fl->fl_end = 0;
70118 fl->fl_ops = NULL;
70119 fl->fl_lmops = NULL;
70120+ locks_init_lock_always(fl);
70121 }
70122
70123 EXPORT_SYMBOL(locks_init_lock);
70124@@ -2007,16 +2017,16 @@ void locks_remove_flock(struct file *filp)
70125 return;
70126
70127 if (filp->f_op && filp->f_op->flock) {
70128- struct file_lock fl = {
70129+ struct file_lock flock = {
70130 .fl_pid = current->tgid,
70131 .fl_file = filp,
70132 .fl_flags = FL_FLOCK,
70133 .fl_type = F_UNLCK,
70134 .fl_end = OFFSET_MAX,
70135 };
70136- filp->f_op->flock(filp, F_SETLKW, &fl);
70137- if (fl.fl_ops && fl.fl_ops->fl_release_private)
70138- fl.fl_ops->fl_release_private(&fl);
70139+ filp->f_op->flock(filp, F_SETLKW, &flock);
70140+ if (flock.fl_ops && flock.fl_ops->fl_release_private)
70141+ flock.fl_ops->fl_release_private(&flock);
70142 }
70143
70144 lock_kernel();
70145diff --git a/fs/mbcache.c b/fs/mbcache.c
70146index ec88ff3..b843a82 100644
70147--- a/fs/mbcache.c
70148+++ b/fs/mbcache.c
70149@@ -266,9 +266,9 @@ mb_cache_create(const char *name, struct mb_cache_op *cache_op,
70150 if (!cache)
70151 goto fail;
70152 cache->c_name = name;
70153- cache->c_op.free = NULL;
70154+ *(void **)&cache->c_op.free = NULL;
70155 if (cache_op)
70156- cache->c_op.free = cache_op->free;
70157+ *(void **)&cache->c_op.free = cache_op->free;
70158 atomic_set(&cache->c_entry_count, 0);
70159 cache->c_bucket_bits = bucket_bits;
70160 #ifdef MB_CACHE_INDEXES_COUNT
70161diff --git a/fs/namei.c b/fs/namei.c
70162index b0afbd4..8d065a1 100644
70163--- a/fs/namei.c
70164+++ b/fs/namei.c
70165@@ -224,6 +224,14 @@ int generic_permission(struct inode *inode, int mask,
70166 return ret;
70167
70168 /*
70169+ * Searching includes executable on directories, else just read.
70170+ */
70171+ mask &= MAY_READ | MAY_WRITE | MAY_EXEC;
70172+ if (mask == MAY_READ || (S_ISDIR(inode->i_mode) && !(mask & MAY_WRITE)))
70173+ if (capable(CAP_DAC_READ_SEARCH))
70174+ return 0;
70175+
70176+ /*
70177 * Read/write DACs are always overridable.
70178 * Executable DACs are overridable if at least one exec bit is set.
70179 */
70180@@ -231,14 +239,6 @@ int generic_permission(struct inode *inode, int mask,
70181 if (capable(CAP_DAC_OVERRIDE))
70182 return 0;
70183
70184- /*
70185- * Searching includes executable on directories, else just read.
70186- */
70187- mask &= MAY_READ | MAY_WRITE | MAY_EXEC;
70188- if (mask == MAY_READ || (S_ISDIR(inode->i_mode) && !(mask & MAY_WRITE)))
70189- if (capable(CAP_DAC_READ_SEARCH))
70190- return 0;
70191-
70192 return -EACCES;
70193 }
70194
70195@@ -458,7 +458,8 @@ static int exec_permission_lite(struct inode *inode)
70196 if (!ret)
70197 goto ok;
70198
70199- if (capable(CAP_DAC_OVERRIDE) || capable(CAP_DAC_READ_SEARCH))
70200+ if (capable_nolog(CAP_DAC_OVERRIDE) || capable(CAP_DAC_READ_SEARCH) ||
70201+ capable(CAP_DAC_OVERRIDE))
70202 goto ok;
70203
70204 return ret;
70205@@ -638,7 +639,7 @@ static __always_inline int __do_follow_link(struct path *path, struct nameidata
70206 cookie = dentry->d_inode->i_op->follow_link(dentry, nd);
70207 error = PTR_ERR(cookie);
70208 if (!IS_ERR(cookie)) {
70209- char *s = nd_get_link(nd);
70210+ const char *s = nd_get_link(nd);
70211 error = 0;
70212 if (s)
70213 error = __vfs_follow_link(nd, s);
70214@@ -669,6 +670,13 @@ static inline int do_follow_link(struct path *path, struct nameidata *nd)
70215 err = security_inode_follow_link(path->dentry, nd);
70216 if (err)
70217 goto loop;
70218+
70219+ if (gr_handle_follow_link(path->dentry->d_parent->d_inode,
70220+ path->dentry->d_inode, path->dentry, nd->path.mnt)) {
70221+ err = -EACCES;
70222+ goto loop;
70223+ }
70224+
70225 current->link_count++;
70226 current->total_link_count++;
70227 nd->depth++;
70228@@ -1016,11 +1024,19 @@ return_reval:
70229 break;
70230 }
70231 return_base:
70232+ if (!(nd->flags & (LOOKUP_CONTINUE | LOOKUP_PARENT)) &&
70233+ !gr_acl_handle_hidden_file(nd->path.dentry, nd->path.mnt)) {
70234+ path_put(&nd->path);
70235+ return -ENOENT;
70236+ }
70237 return 0;
70238 out_dput:
70239 path_put_conditional(&next, nd);
70240 break;
70241 }
70242+ if (!gr_acl_handle_hidden_file(nd->path.dentry, nd->path.mnt))
70243+ err = -ENOENT;
70244+
70245 path_put(&nd->path);
70246 return_err:
70247 return err;
70248@@ -1091,13 +1107,20 @@ static int do_path_lookup(int dfd, const char *name,
70249 int retval = path_init(dfd, name, flags, nd);
70250 if (!retval)
70251 retval = path_walk(name, nd);
70252- if (unlikely(!retval && !audit_dummy_context() && nd->path.dentry &&
70253- nd->path.dentry->d_inode))
70254- audit_inode(name, nd->path.dentry);
70255+
70256+ if (likely(!retval)) {
70257+ if (nd->path.dentry && nd->path.dentry->d_inode) {
70258+ if (*name != '/' && !gr_chroot_fchdir(nd->path.dentry, nd->path.mnt))
70259+ retval = -ENOENT;
70260+ if (!audit_dummy_context())
70261+ audit_inode(name, nd->path.dentry);
70262+ }
70263+ }
70264 if (nd->root.mnt) {
70265 path_put(&nd->root);
70266 nd->root.mnt = NULL;
70267 }
70268+
70269 return retval;
70270 }
70271
70272@@ -1576,6 +1599,20 @@ int may_open(struct path *path, int acc_mode, int flag)
70273 if (error)
70274 goto err_out;
70275
70276+
70277+ if (gr_handle_rofs_blockwrite(dentry, path->mnt, acc_mode)) {
70278+ error = -EPERM;
70279+ goto err_out;
70280+ }
70281+ if (gr_handle_rawio(inode)) {
70282+ error = -EPERM;
70283+ goto err_out;
70284+ }
70285+ if (!gr_acl_handle_open(dentry, path->mnt, acc_mode)) {
70286+ error = -EACCES;
70287+ goto err_out;
70288+ }
70289+
70290 if (flag & O_TRUNC) {
70291 error = get_write_access(inode);
70292 if (error)
70293@@ -1620,6 +1657,17 @@ static int __open_namei_create(struct nameidata *nd, struct path *path,
70294 {
70295 int error;
70296 struct dentry *dir = nd->path.dentry;
70297+ int acc_mode = ACC_MODE(flag);
70298+
70299+ if (flag & O_TRUNC)
70300+ acc_mode |= MAY_WRITE;
70301+ if (flag & O_APPEND)
70302+ acc_mode |= MAY_APPEND;
70303+
70304+ if (!gr_acl_handle_creat(path->dentry, dir, nd->path.mnt, flag, acc_mode, mode)) {
70305+ error = -EACCES;
70306+ goto out_unlock;
70307+ }
70308
70309 if (!IS_POSIXACL(dir->d_inode))
70310 mode &= ~current_umask();
70311@@ -1627,6 +1675,8 @@ static int __open_namei_create(struct nameidata *nd, struct path *path,
70312 if (error)
70313 goto out_unlock;
70314 error = vfs_create(dir->d_inode, path->dentry, mode, nd);
70315+ if (!error)
70316+ gr_handle_create(path->dentry, nd->path.mnt);
70317 out_unlock:
70318 mutex_unlock(&dir->d_inode->i_mutex);
70319 dput(nd->path.dentry);
70320@@ -1709,6 +1759,22 @@ struct file *do_filp_open(int dfd, const char *pathname,
70321 &nd, flag);
70322 if (error)
70323 return ERR_PTR(error);
70324+
70325+ if (gr_handle_rofs_blockwrite(nd.path.dentry, nd.path.mnt, acc_mode)) {
70326+ error = -EPERM;
70327+ goto exit;
70328+ }
70329+
70330+ if (gr_handle_rawio(nd.path.dentry->d_inode)) {
70331+ error = -EPERM;
70332+ goto exit;
70333+ }
70334+
70335+ if (!gr_acl_handle_open(nd.path.dentry, nd.path.mnt, acc_mode)) {
70336+ error = -EACCES;
70337+ goto exit;
70338+ }
70339+
70340 goto ok;
70341 }
70342
70343@@ -1795,6 +1861,19 @@ do_last:
70344 /*
70345 * It already exists.
70346 */
70347+
70348+ if (!gr_acl_handle_hidden_file(path.dentry, path.mnt)) {
70349+ error = -ENOENT;
70350+ goto exit_mutex_unlock;
70351+ }
70352+
70353+ /* only check if O_CREAT is specified, all other checks need
70354+ to go into may_open */
70355+ if (gr_handle_fifo(path.dentry, path.mnt, dir, flag, acc_mode)) {
70356+ error = -EACCES;
70357+ goto exit_mutex_unlock;
70358+ }
70359+
70360 mutex_unlock(&dir->d_inode->i_mutex);
70361 audit_inode(pathname, path.dentry);
70362
70363@@ -1887,6 +1966,13 @@ do_link:
70364 error = security_inode_follow_link(path.dentry, &nd);
70365 if (error)
70366 goto exit_dput;
70367+
70368+ if (gr_handle_follow_link(path.dentry->d_parent->d_inode, path.dentry->d_inode,
70369+ path.dentry, nd.path.mnt)) {
70370+ error = -EACCES;
70371+ goto exit_dput;
70372+ }
70373+
70374 error = __do_follow_link(&path, &nd);
70375 if (error) {
70376 /* Does someone understand code flow here? Or it is only
70377@@ -1984,6 +2070,10 @@ struct dentry *lookup_create(struct nameidata *nd, int is_dir)
70378 }
70379 return dentry;
70380 eexist:
70381+ if (!gr_acl_handle_hidden_file(dentry, nd->path.mnt)) {
70382+ dput(dentry);
70383+ return ERR_PTR(-ENOENT);
70384+ }
70385 dput(dentry);
70386 dentry = ERR_PTR(-EEXIST);
70387 fail:
70388@@ -2061,6 +2151,17 @@ SYSCALL_DEFINE4(mknodat, int, dfd, const char __user *, filename, int, mode,
70389 error = may_mknod(mode);
70390 if (error)
70391 goto out_dput;
70392+
70393+ if (gr_handle_chroot_mknod(dentry, nd.path.mnt, mode)) {
70394+ error = -EPERM;
70395+ goto out_dput;
70396+ }
70397+
70398+ if (!gr_acl_handle_mknod(dentry, nd.path.dentry, nd.path.mnt, mode)) {
70399+ error = -EACCES;
70400+ goto out_dput;
70401+ }
70402+
70403 error = mnt_want_write(nd.path.mnt);
70404 if (error)
70405 goto out_dput;
70406@@ -2081,6 +2182,9 @@ SYSCALL_DEFINE4(mknodat, int, dfd, const char __user *, filename, int, mode,
70407 }
70408 out_drop_write:
70409 mnt_drop_write(nd.path.mnt);
70410+
70411+ if (!error)
70412+ gr_handle_create(dentry, nd.path.mnt);
70413 out_dput:
70414 dput(dentry);
70415 out_unlock:
70416@@ -2134,6 +2238,11 @@ SYSCALL_DEFINE3(mkdirat, int, dfd, const char __user *, pathname, int, mode)
70417 if (IS_ERR(dentry))
70418 goto out_unlock;
70419
70420+ if (!gr_acl_handle_mkdir(dentry, nd.path.dentry, nd.path.mnt)) {
70421+ error = -EACCES;
70422+ goto out_dput;
70423+ }
70424+
70425 if (!IS_POSIXACL(nd.path.dentry->d_inode))
70426 mode &= ~current_umask();
70427 error = mnt_want_write(nd.path.mnt);
70428@@ -2145,6 +2254,10 @@ SYSCALL_DEFINE3(mkdirat, int, dfd, const char __user *, pathname, int, mode)
70429 error = vfs_mkdir(nd.path.dentry->d_inode, dentry, mode);
70430 out_drop_write:
70431 mnt_drop_write(nd.path.mnt);
70432+
70433+ if (!error)
70434+ gr_handle_create(dentry, nd.path.mnt);
70435+
70436 out_dput:
70437 dput(dentry);
70438 out_unlock:
70439@@ -2226,6 +2339,8 @@ static long do_rmdir(int dfd, const char __user *pathname)
70440 char * name;
70441 struct dentry *dentry;
70442 struct nameidata nd;
70443+ ino_t saved_ino = 0;
70444+ dev_t saved_dev = 0;
70445
70446 error = user_path_parent(dfd, pathname, &nd, &name);
70447 if (error)
70448@@ -2250,6 +2365,17 @@ static long do_rmdir(int dfd, const char __user *pathname)
70449 error = PTR_ERR(dentry);
70450 if (IS_ERR(dentry))
70451 goto exit2;
70452+
70453+ if (dentry->d_inode != NULL) {
70454+ saved_ino = dentry->d_inode->i_ino;
70455+ saved_dev = gr_get_dev_from_dentry(dentry);
70456+
70457+ if (!gr_acl_handle_rmdir(dentry, nd.path.mnt)) {
70458+ error = -EACCES;
70459+ goto exit3;
70460+ }
70461+ }
70462+
70463 error = mnt_want_write(nd.path.mnt);
70464 if (error)
70465 goto exit3;
70466@@ -2257,6 +2383,8 @@ static long do_rmdir(int dfd, const char __user *pathname)
70467 if (error)
70468 goto exit4;
70469 error = vfs_rmdir(nd.path.dentry->d_inode, dentry);
70470+ if (!error && (saved_dev || saved_ino))
70471+ gr_handle_delete(saved_ino, saved_dev);
70472 exit4:
70473 mnt_drop_write(nd.path.mnt);
70474 exit3:
70475@@ -2318,6 +2446,8 @@ static long do_unlinkat(int dfd, const char __user *pathname)
70476 struct dentry *dentry;
70477 struct nameidata nd;
70478 struct inode *inode = NULL;
70479+ ino_t saved_ino = 0;
70480+ dev_t saved_dev = 0;
70481
70482 error = user_path_parent(dfd, pathname, &nd, &name);
70483 if (error)
70484@@ -2337,8 +2467,19 @@ static long do_unlinkat(int dfd, const char __user *pathname)
70485 if (nd.last.name[nd.last.len])
70486 goto slashes;
70487 inode = dentry->d_inode;
70488- if (inode)
70489+ if (inode) {
70490+ if (inode->i_nlink <= 1) {
70491+ saved_ino = inode->i_ino;
70492+ saved_dev = gr_get_dev_from_dentry(dentry);
70493+ }
70494+
70495 atomic_inc(&inode->i_count);
70496+
70497+ if (!gr_acl_handle_unlink(dentry, nd.path.mnt)) {
70498+ error = -EACCES;
70499+ goto exit2;
70500+ }
70501+ }
70502 error = mnt_want_write(nd.path.mnt);
70503 if (error)
70504 goto exit2;
70505@@ -2346,6 +2487,8 @@ static long do_unlinkat(int dfd, const char __user *pathname)
70506 if (error)
70507 goto exit3;
70508 error = vfs_unlink(nd.path.dentry->d_inode, dentry);
70509+ if (!error && (saved_ino || saved_dev))
70510+ gr_handle_delete(saved_ino, saved_dev);
70511 exit3:
70512 mnt_drop_write(nd.path.mnt);
70513 exit2:
70514@@ -2424,6 +2567,11 @@ SYSCALL_DEFINE3(symlinkat, const char __user *, oldname,
70515 if (IS_ERR(dentry))
70516 goto out_unlock;
70517
70518+ if (!gr_acl_handle_symlink(dentry, nd.path.dentry, nd.path.mnt, from)) {
70519+ error = -EACCES;
70520+ goto out_dput;
70521+ }
70522+
70523 error = mnt_want_write(nd.path.mnt);
70524 if (error)
70525 goto out_dput;
70526@@ -2431,6 +2579,8 @@ SYSCALL_DEFINE3(symlinkat, const char __user *, oldname,
70527 if (error)
70528 goto out_drop_write;
70529 error = vfs_symlink(nd.path.dentry->d_inode, dentry, from);
70530+ if (!error)
70531+ gr_handle_create(dentry, nd.path.mnt);
70532 out_drop_write:
70533 mnt_drop_write(nd.path.mnt);
70534 out_dput:
70535@@ -2524,6 +2674,20 @@ SYSCALL_DEFINE5(linkat, int, olddfd, const char __user *, oldname,
70536 error = PTR_ERR(new_dentry);
70537 if (IS_ERR(new_dentry))
70538 goto out_unlock;
70539+
70540+ if (gr_handle_hardlink(old_path.dentry, old_path.mnt,
70541+ old_path.dentry->d_inode,
70542+ old_path.dentry->d_inode->i_mode, to)) {
70543+ error = -EACCES;
70544+ goto out_dput;
70545+ }
70546+
70547+ if (!gr_acl_handle_link(new_dentry, nd.path.dentry, nd.path.mnt,
70548+ old_path.dentry, old_path.mnt, to)) {
70549+ error = -EACCES;
70550+ goto out_dput;
70551+ }
70552+
70553 error = mnt_want_write(nd.path.mnt);
70554 if (error)
70555 goto out_dput;
70556@@ -2531,6 +2695,8 @@ SYSCALL_DEFINE5(linkat, int, olddfd, const char __user *, oldname,
70557 if (error)
70558 goto out_drop_write;
70559 error = vfs_link(old_path.dentry, nd.path.dentry->d_inode, new_dentry);
70560+ if (!error)
70561+ gr_handle_create(new_dentry, nd.path.mnt);
70562 out_drop_write:
70563 mnt_drop_write(nd.path.mnt);
70564 out_dput:
70565@@ -2708,6 +2874,8 @@ SYSCALL_DEFINE4(renameat, int, olddfd, const char __user *, oldname,
70566 char *to;
70567 int error;
70568
70569+ pax_track_stack();
70570+
70571 error = user_path_parent(olddfd, oldname, &oldnd, &from);
70572 if (error)
70573 goto exit;
70574@@ -2764,6 +2932,12 @@ SYSCALL_DEFINE4(renameat, int, olddfd, const char __user *, oldname,
70575 if (new_dentry == trap)
70576 goto exit5;
70577
70578+ error = gr_acl_handle_rename(new_dentry, new_dir, newnd.path.mnt,
70579+ old_dentry, old_dir->d_inode, oldnd.path.mnt,
70580+ to);
70581+ if (error)
70582+ goto exit5;
70583+
70584 error = mnt_want_write(oldnd.path.mnt);
70585 if (error)
70586 goto exit5;
70587@@ -2773,6 +2947,9 @@ SYSCALL_DEFINE4(renameat, int, olddfd, const char __user *, oldname,
70588 goto exit6;
70589 error = vfs_rename(old_dir->d_inode, old_dentry,
70590 new_dir->d_inode, new_dentry);
70591+ if (!error)
70592+ gr_handle_rename(old_dir->d_inode, new_dir->d_inode, old_dentry,
70593+ new_dentry, oldnd.path.mnt, new_dentry->d_inode ? 1 : 0);
70594 exit6:
70595 mnt_drop_write(oldnd.path.mnt);
70596 exit5:
70597@@ -2798,6 +2975,8 @@ SYSCALL_DEFINE2(rename, const char __user *, oldname, const char __user *, newna
70598
70599 int vfs_readlink(struct dentry *dentry, char __user *buffer, int buflen, const char *link)
70600 {
70601+ char tmpbuf[64];
70602+ const char *newlink;
70603 int len;
70604
70605 len = PTR_ERR(link);
70606@@ -2807,7 +2986,14 @@ int vfs_readlink(struct dentry *dentry, char __user *buffer, int buflen, const c
70607 len = strlen(link);
70608 if (len > (unsigned) buflen)
70609 len = buflen;
70610- if (copy_to_user(buffer, link, len))
70611+
70612+ if (len < sizeof(tmpbuf)) {
70613+ memcpy(tmpbuf, link, len);
70614+ newlink = tmpbuf;
70615+ } else
70616+ newlink = link;
70617+
70618+ if (copy_to_user(buffer, newlink, len))
70619 len = -EFAULT;
70620 out:
70621 return len;
70622diff --git a/fs/namespace.c b/fs/namespace.c
70623index 2beb0fb..11a95a5 100644
70624--- a/fs/namespace.c
70625+++ b/fs/namespace.c
70626@@ -1083,6 +1083,9 @@ static int do_umount(struct vfsmount *mnt, int flags)
70627 if (!(sb->s_flags & MS_RDONLY))
70628 retval = do_remount_sb(sb, MS_RDONLY, NULL, 0);
70629 up_write(&sb->s_umount);
70630+
70631+ gr_log_remount(mnt->mnt_devname, retval);
70632+
70633 return retval;
70634 }
70635
70636@@ -1104,6 +1107,9 @@ static int do_umount(struct vfsmount *mnt, int flags)
70637 security_sb_umount_busy(mnt);
70638 up_write(&namespace_sem);
70639 release_mounts(&umount_list);
70640+
70641+ gr_log_unmount(mnt->mnt_devname, retval);
70642+
70643 return retval;
70644 }
70645
70646@@ -1962,6 +1968,16 @@ long do_mount(char *dev_name, char *dir_name, char *type_page,
70647 if (retval)
70648 goto dput_out;
70649
70650+ if (gr_handle_rofs_mount(path.dentry, path.mnt, mnt_flags)) {
70651+ retval = -EPERM;
70652+ goto dput_out;
70653+ }
70654+
70655+ if (gr_handle_chroot_mount(path.dentry, path.mnt, dev_name)) {
70656+ retval = -EPERM;
70657+ goto dput_out;
70658+ }
70659+
70660 if (flags & MS_REMOUNT)
70661 retval = do_remount(&path, flags & ~MS_REMOUNT, mnt_flags,
70662 data_page);
70663@@ -1976,6 +1992,9 @@ long do_mount(char *dev_name, char *dir_name, char *type_page,
70664 dev_name, data_page);
70665 dput_out:
70666 path_put(&path);
70667+
70668+ gr_log_mount(dev_name, dir_name, retval);
70669+
70670 return retval;
70671 }
70672
70673@@ -2182,6 +2201,12 @@ SYSCALL_DEFINE2(pivot_root, const char __user *, new_root,
70674 goto out1;
70675 }
70676
70677+ if (gr_handle_chroot_pivot()) {
70678+ error = -EPERM;
70679+ path_put(&old);
70680+ goto out1;
70681+ }
70682+
70683 read_lock(&current->fs->lock);
70684 root = current->fs->root;
70685 path_get(&current->fs->root);
70686diff --git a/fs/ncpfs/dir.c b/fs/ncpfs/dir.c
70687index b8b5b30..2bd9ccb 100644
70688--- a/fs/ncpfs/dir.c
70689+++ b/fs/ncpfs/dir.c
70690@@ -275,6 +275,8 @@ __ncp_lookup_validate(struct dentry *dentry)
70691 int res, val = 0, len;
70692 __u8 __name[NCP_MAXPATHLEN + 1];
70693
70694+ pax_track_stack();
70695+
70696 parent = dget_parent(dentry);
70697 dir = parent->d_inode;
70698
70699@@ -799,6 +801,8 @@ static struct dentry *ncp_lookup(struct inode *dir, struct dentry *dentry, struc
70700 int error, res, len;
70701 __u8 __name[NCP_MAXPATHLEN + 1];
70702
70703+ pax_track_stack();
70704+
70705 lock_kernel();
70706 error = -EIO;
70707 if (!ncp_conn_valid(server))
70708@@ -883,10 +887,12 @@ int ncp_create_new(struct inode *dir, struct dentry *dentry, int mode,
70709 int error, result, len;
70710 int opmode;
70711 __u8 __name[NCP_MAXPATHLEN + 1];
70712-
70713+
70714 PPRINTK("ncp_create_new: creating %s/%s, mode=%x\n",
70715 dentry->d_parent->d_name.name, dentry->d_name.name, mode);
70716
70717+ pax_track_stack();
70718+
70719 error = -EIO;
70720 lock_kernel();
70721 if (!ncp_conn_valid(server))
70722@@ -952,6 +958,8 @@ static int ncp_mkdir(struct inode *dir, struct dentry *dentry, int mode)
70723 int error, len;
70724 __u8 __name[NCP_MAXPATHLEN + 1];
70725
70726+ pax_track_stack();
70727+
70728 DPRINTK("ncp_mkdir: making %s/%s\n",
70729 dentry->d_parent->d_name.name, dentry->d_name.name);
70730
70731@@ -960,6 +968,8 @@ static int ncp_mkdir(struct inode *dir, struct dentry *dentry, int mode)
70732 if (!ncp_conn_valid(server))
70733 goto out;
70734
70735+ pax_track_stack();
70736+
70737 ncp_age_dentry(server, dentry);
70738 len = sizeof(__name);
70739 error = ncp_io2vol(server, __name, &len, dentry->d_name.name,
70740@@ -1114,6 +1124,8 @@ static int ncp_rename(struct inode *old_dir, struct dentry *old_dentry,
70741 int old_len, new_len;
70742 __u8 __old_name[NCP_MAXPATHLEN + 1], __new_name[NCP_MAXPATHLEN + 1];
70743
70744+ pax_track_stack();
70745+
70746 DPRINTK("ncp_rename: %s/%s to %s/%s\n",
70747 old_dentry->d_parent->d_name.name, old_dentry->d_name.name,
70748 new_dentry->d_parent->d_name.name, new_dentry->d_name.name);
70749diff --git a/fs/ncpfs/inode.c b/fs/ncpfs/inode.c
70750index cf98da1..da890a9 100644
70751--- a/fs/ncpfs/inode.c
70752+++ b/fs/ncpfs/inode.c
70753@@ -445,6 +445,8 @@ static int ncp_fill_super(struct super_block *sb, void *raw_data, int silent)
70754 #endif
70755 struct ncp_entry_info finfo;
70756
70757+ pax_track_stack();
70758+
70759 data.wdog_pid = NULL;
70760 server = kzalloc(sizeof(struct ncp_server), GFP_KERNEL);
70761 if (!server)
70762diff --git a/fs/ncpfs/ncplib_kernel.h b/fs/ncpfs/ncplib_kernel.h
70763index 2441d1a..96882c1 100644
70764--- a/fs/ncpfs/ncplib_kernel.h
70765+++ b/fs/ncpfs/ncplib_kernel.h
70766@@ -131,7 +131,7 @@ static inline int ncp_is_nfs_extras(struct ncp_server* server, unsigned int voln
70767 int ncp__io2vol(struct ncp_server *, unsigned char *, unsigned int *,
70768 const unsigned char *, unsigned int, int);
70769 int ncp__vol2io(struct ncp_server *, unsigned char *, unsigned int *,
70770- const unsigned char *, unsigned int, int);
70771+ const unsigned char *, unsigned int, int) __size_overflow(5);
70772
70773 #define NCP_ESC ':'
70774 #define NCP_IO_TABLE(dentry) (NCP_SERVER((dentry)->d_inode)->nls_io)
70775@@ -147,7 +147,7 @@ int ncp__vol2io(struct ncp_server *, unsigned char *, unsigned int *,
70776 int ncp__io2vol(unsigned char *, unsigned int *,
70777 const unsigned char *, unsigned int, int);
70778 int ncp__vol2io(unsigned char *, unsigned int *,
70779- const unsigned char *, unsigned int, int);
70780+ const unsigned char *, unsigned int, int) __size_overflow(5);
70781
70782 #define NCP_IO_TABLE(dentry) NULL
70783 #define ncp_tolower(t, c) tolower(c)
70784diff --git a/fs/nfs/inode.c b/fs/nfs/inode.c
70785index bfaef7b..e9d03ca 100644
70786--- a/fs/nfs/inode.c
70787+++ b/fs/nfs/inode.c
70788@@ -156,7 +156,7 @@ static void nfs_zap_caches_locked(struct inode *inode)
70789 nfsi->attrtimeo = NFS_MINATTRTIMEO(inode);
70790 nfsi->attrtimeo_timestamp = jiffies;
70791
70792- memset(NFS_COOKIEVERF(inode), 0, sizeof(NFS_COOKIEVERF(inode)));
70793+ memset(NFS_COOKIEVERF(inode), 0, sizeof(NFS_I(inode)->cookieverf));
70794 if (S_ISREG(mode) || S_ISDIR(mode) || S_ISLNK(mode))
70795 nfsi->cache_validity |= NFS_INO_INVALID_ATTR|NFS_INO_INVALID_DATA|NFS_INO_INVALID_ACCESS|NFS_INO_INVALID_ACL|NFS_INO_REVAL_PAGECACHE;
70796 else
70797@@ -973,16 +973,16 @@ static int nfs_size_need_update(const struct inode *inode, const struct nfs_fatt
70798 return nfs_size_to_loff_t(fattr->size) > i_size_read(inode);
70799 }
70800
70801-static atomic_long_t nfs_attr_generation_counter;
70802+static atomic_long_unchecked_t nfs_attr_generation_counter;
70803
70804 static unsigned long nfs_read_attr_generation_counter(void)
70805 {
70806- return atomic_long_read(&nfs_attr_generation_counter);
70807+ return atomic_long_read_unchecked(&nfs_attr_generation_counter);
70808 }
70809
70810 unsigned long nfs_inc_attr_generation_counter(void)
70811 {
70812- return atomic_long_inc_return(&nfs_attr_generation_counter);
70813+ return atomic_long_inc_return_unchecked(&nfs_attr_generation_counter);
70814 }
70815
70816 void nfs_fattr_init(struct nfs_fattr *fattr)
70817diff --git a/fs/nfsd/lockd.c b/fs/nfsd/lockd.c
70818index cc2f505..f6a236f 100644
70819--- a/fs/nfsd/lockd.c
70820+++ b/fs/nfsd/lockd.c
70821@@ -66,7 +66,7 @@ nlm_fclose(struct file *filp)
70822 fput(filp);
70823 }
70824
70825-static struct nlmsvc_binding nfsd_nlm_ops = {
70826+static const struct nlmsvc_binding nfsd_nlm_ops = {
70827 .fopen = nlm_fopen, /* open file for locking */
70828 .fclose = nlm_fclose, /* close file */
70829 };
70830diff --git a/fs/nfsd/nfs4state.c b/fs/nfsd/nfs4state.c
70831index cfc3391..dcc083a 100644
70832--- a/fs/nfsd/nfs4state.c
70833+++ b/fs/nfsd/nfs4state.c
70834@@ -3459,6 +3459,8 @@ nfsd4_lock(struct svc_rqst *rqstp, struct nfsd4_compound_state *cstate,
70835 unsigned int cmd;
70836 int err;
70837
70838+ pax_track_stack();
70839+
70840 dprintk("NFSD: nfsd4_lock: start=%Ld length=%Ld\n",
70841 (long long) lock->lk_offset,
70842 (long long) lock->lk_length);
70843diff --git a/fs/nfsd/nfs4xdr.c b/fs/nfsd/nfs4xdr.c
70844index 4a82a96..0d5fb49 100644
70845--- a/fs/nfsd/nfs4xdr.c
70846+++ b/fs/nfsd/nfs4xdr.c
70847@@ -1751,6 +1751,8 @@ nfsd4_encode_fattr(struct svc_fh *fhp, struct svc_export *exp,
70848 struct nfsd4_compoundres *resp = rqstp->rq_resp;
70849 u32 minorversion = resp->cstate.minorversion;
70850
70851+ pax_track_stack();
70852+
70853 BUG_ON(bmval1 & NFSD_WRITEONLY_ATTRS_WORD1);
70854 BUG_ON(bmval0 & ~nfsd_suppattrs0(minorversion));
70855 BUG_ON(bmval1 & ~nfsd_suppattrs1(minorversion));
70856diff --git a/fs/nfsd/vfs.c b/fs/nfsd/vfs.c
70857index 2e09588..596421d 100644
70858--- a/fs/nfsd/vfs.c
70859+++ b/fs/nfsd/vfs.c
70860@@ -937,7 +937,7 @@ nfsd_vfs_read(struct svc_rqst *rqstp, struct svc_fh *fhp, struct file *file,
70861 } else {
70862 oldfs = get_fs();
70863 set_fs(KERNEL_DS);
70864- host_err = vfs_readv(file, (struct iovec __user *)vec, vlen, &offset);
70865+ host_err = vfs_readv(file, (struct iovec __force_user *)vec, vlen, &offset);
70866 set_fs(oldfs);
70867 }
70868
70869@@ -1060,7 +1060,7 @@ nfsd_vfs_write(struct svc_rqst *rqstp, struct svc_fh *fhp, struct file *file,
70870
70871 /* Write the data. */
70872 oldfs = get_fs(); set_fs(KERNEL_DS);
70873- host_err = vfs_writev(file, (struct iovec __user *)vec, vlen, &offset);
70874+ host_err = vfs_writev(file, (struct iovec __force_user *)vec, vlen, &offset);
70875 set_fs(oldfs);
70876 if (host_err < 0)
70877 goto out_nfserr;
70878@@ -1542,7 +1542,7 @@ nfsd_readlink(struct svc_rqst *rqstp, struct svc_fh *fhp, char *buf, int *lenp)
70879 */
70880
70881 oldfs = get_fs(); set_fs(KERNEL_DS);
70882- host_err = inode->i_op->readlink(dentry, buf, *lenp);
70883+ host_err = inode->i_op->readlink(dentry, (char __force_user *)buf, *lenp);
70884 set_fs(oldfs);
70885
70886 if (host_err < 0)
70887diff --git a/fs/nilfs2/ioctl.c b/fs/nilfs2/ioctl.c
70888index f6af760..d0adf34 100644
70889--- a/fs/nilfs2/ioctl.c
70890+++ b/fs/nilfs2/ioctl.c
70891@@ -480,7 +480,7 @@ static int nilfs_ioctl_clean_segments(struct inode *inode, struct file *filp,
70892 unsigned int cmd, void __user *argp)
70893 {
70894 struct nilfs_argv argv[5];
70895- const static size_t argsz[5] = {
70896+ static const size_t argsz[5] = {
70897 sizeof(struct nilfs_vdesc),
70898 sizeof(struct nilfs_period),
70899 sizeof(__u64),
70900@@ -522,6 +522,9 @@ static int nilfs_ioctl_clean_segments(struct inode *inode, struct file *filp,
70901 if (argv[n].v_nmembs > nsegs * nilfs->ns_blocks_per_segment)
70902 goto out_free;
70903
70904+ if (argv[n].v_nmembs >= UINT_MAX / argv[n].v_size)
70905+ goto out_free;
70906+
70907 len = argv[n].v_size * argv[n].v_nmembs;
70908 base = (void __user *)(unsigned long)argv[n].v_base;
70909 if (len == 0) {
70910diff --git a/fs/nilfs2/the_nilfs.c b/fs/nilfs2/the_nilfs.c
70911index ad391a8..149a8a1 100644
70912--- a/fs/nilfs2/the_nilfs.c
70913+++ b/fs/nilfs2/the_nilfs.c
70914@@ -478,6 +478,7 @@ static int nilfs_load_super_block(struct the_nilfs *nilfs,
70915 brelse(sbh[1]);
70916 sbh[1] = NULL;
70917 sbp[1] = NULL;
70918+ valid[1] = 0;
70919 swp = 0;
70920 }
70921 if (!valid[swp]) {
70922diff --git a/fs/notify/dnotify/dnotify.c b/fs/notify/dnotify/dnotify.c
70923index 7e54e52..9337248 100644
70924--- a/fs/notify/dnotify/dnotify.c
70925+++ b/fs/notify/dnotify/dnotify.c
70926@@ -173,7 +173,7 @@ static void dnotify_free_mark(struct fsnotify_mark_entry *entry)
70927 kmem_cache_free(dnotify_mark_entry_cache, dnentry);
70928 }
70929
70930-static struct fsnotify_ops dnotify_fsnotify_ops = {
70931+static const struct fsnotify_ops dnotify_fsnotify_ops = {
70932 .handle_event = dnotify_handle_event,
70933 .should_send_event = dnotify_should_send_event,
70934 .free_group_priv = NULL,
70935diff --git a/fs/notify/notification.c b/fs/notify/notification.c
70936index b8bf53b..c518688 100644
70937--- a/fs/notify/notification.c
70938+++ b/fs/notify/notification.c
70939@@ -57,7 +57,7 @@ static struct kmem_cache *fsnotify_event_holder_cachep;
70940 * get set to 0 so it will never get 'freed'
70941 */
70942 static struct fsnotify_event q_overflow_event;
70943-static atomic_t fsnotify_sync_cookie = ATOMIC_INIT(0);
70944+static atomic_unchecked_t fsnotify_sync_cookie = ATOMIC_INIT(0);
70945
70946 /**
70947 * fsnotify_get_cookie - return a unique cookie for use in synchronizing events.
70948@@ -65,7 +65,7 @@ static atomic_t fsnotify_sync_cookie = ATOMIC_INIT(0);
70949 */
70950 u32 fsnotify_get_cookie(void)
70951 {
70952- return atomic_inc_return(&fsnotify_sync_cookie);
70953+ return atomic_inc_return_unchecked(&fsnotify_sync_cookie);
70954 }
70955 EXPORT_SYMBOL_GPL(fsnotify_get_cookie);
70956
70957diff --git a/fs/ntfs/dir.c b/fs/ntfs/dir.c
70958index 5a9e344..0f8cd28 100644
70959--- a/fs/ntfs/dir.c
70960+++ b/fs/ntfs/dir.c
70961@@ -1328,7 +1328,7 @@ find_next_index_buffer:
70962 ia = (INDEX_ALLOCATION*)(kaddr + (ia_pos & ~PAGE_CACHE_MASK &
70963 ~(s64)(ndir->itype.index.block_size - 1)));
70964 /* Bounds checks. */
70965- if (unlikely((u8*)ia < kaddr || (u8*)ia > kaddr + PAGE_CACHE_SIZE)) {
70966+ if (unlikely(!kaddr || (u8*)ia < kaddr || (u8*)ia > kaddr + PAGE_CACHE_SIZE)) {
70967 ntfs_error(sb, "Out of bounds check failed. Corrupt directory "
70968 "inode 0x%lx or driver bug.", vdir->i_ino);
70969 goto err_out;
70970diff --git a/fs/ntfs/file.c b/fs/ntfs/file.c
70971index 663c0e3..b6868e9 100644
70972--- a/fs/ntfs/file.c
70973+++ b/fs/ntfs/file.c
70974@@ -2243,6 +2243,6 @@ const struct inode_operations ntfs_file_inode_ops = {
70975 #endif /* NTFS_RW */
70976 };
70977
70978-const struct file_operations ntfs_empty_file_ops = {};
70979+const struct file_operations ntfs_empty_file_ops __read_only;
70980
70981-const struct inode_operations ntfs_empty_inode_ops = {};
70982+const struct inode_operations ntfs_empty_inode_ops __read_only;
70983diff --git a/fs/ocfs2/cluster/masklog.c b/fs/ocfs2/cluster/masklog.c
70984index 1cd2934..880b5d2 100644
70985--- a/fs/ocfs2/cluster/masklog.c
70986+++ b/fs/ocfs2/cluster/masklog.c
70987@@ -135,7 +135,7 @@ static ssize_t mlog_store(struct kobject *obj, struct attribute *attr,
70988 return mlog_mask_store(mlog_attr->mask, buf, count);
70989 }
70990
70991-static struct sysfs_ops mlog_attr_ops = {
70992+static const struct sysfs_ops mlog_attr_ops = {
70993 .show = mlog_show,
70994 .store = mlog_store,
70995 };
70996diff --git a/fs/ocfs2/localalloc.c b/fs/ocfs2/localalloc.c
70997index ac10f83..2cd2607 100644
70998--- a/fs/ocfs2/localalloc.c
70999+++ b/fs/ocfs2/localalloc.c
71000@@ -1188,7 +1188,7 @@ static int ocfs2_local_alloc_slide_window(struct ocfs2_super *osb,
71001 goto bail;
71002 }
71003
71004- atomic_inc(&osb->alloc_stats.moves);
71005+ atomic_inc_unchecked(&osb->alloc_stats.moves);
71006
71007 status = 0;
71008 bail:
71009diff --git a/fs/ocfs2/namei.c b/fs/ocfs2/namei.c
71010index f010b22..9f9ed34 100644
71011--- a/fs/ocfs2/namei.c
71012+++ b/fs/ocfs2/namei.c
71013@@ -1043,6 +1043,8 @@ static int ocfs2_rename(struct inode *old_dir,
71014 struct ocfs2_dir_lookup_result orphan_insert = { NULL, };
71015 struct ocfs2_dir_lookup_result target_insert = { NULL, };
71016
71017+ pax_track_stack();
71018+
71019 /* At some point it might be nice to break this function up a
71020 * bit. */
71021
71022diff --git a/fs/ocfs2/ocfs2.h b/fs/ocfs2/ocfs2.h
71023index d963d86..914cfbd 100644
71024--- a/fs/ocfs2/ocfs2.h
71025+++ b/fs/ocfs2/ocfs2.h
71026@@ -217,11 +217,11 @@ enum ocfs2_vol_state
71027
71028 struct ocfs2_alloc_stats
71029 {
71030- atomic_t moves;
71031- atomic_t local_data;
71032- atomic_t bitmap_data;
71033- atomic_t bg_allocs;
71034- atomic_t bg_extends;
71035+ atomic_unchecked_t moves;
71036+ atomic_unchecked_t local_data;
71037+ atomic_unchecked_t bitmap_data;
71038+ atomic_unchecked_t bg_allocs;
71039+ atomic_unchecked_t bg_extends;
71040 };
71041
71042 enum ocfs2_local_alloc_state
71043diff --git a/fs/ocfs2/suballoc.c b/fs/ocfs2/suballoc.c
71044index 79b5dac..d322952 100644
71045--- a/fs/ocfs2/suballoc.c
71046+++ b/fs/ocfs2/suballoc.c
71047@@ -623,7 +623,7 @@ static int ocfs2_reserve_suballoc_bits(struct ocfs2_super *osb,
71048 mlog_errno(status);
71049 goto bail;
71050 }
71051- atomic_inc(&osb->alloc_stats.bg_extends);
71052+ atomic_inc_unchecked(&osb->alloc_stats.bg_extends);
71053
71054 /* You should never ask for this much metadata */
71055 BUG_ON(bits_wanted >
71056@@ -1654,7 +1654,7 @@ int ocfs2_claim_metadata(struct ocfs2_super *osb,
71057 mlog_errno(status);
71058 goto bail;
71059 }
71060- atomic_inc(&osb->alloc_stats.bg_allocs);
71061+ atomic_inc_unchecked(&osb->alloc_stats.bg_allocs);
71062
71063 *blkno_start = bg_blkno + (u64) *suballoc_bit_start;
71064 ac->ac_bits_given += (*num_bits);
71065@@ -1728,7 +1728,7 @@ int ocfs2_claim_new_inode(struct ocfs2_super *osb,
71066 mlog_errno(status);
71067 goto bail;
71068 }
71069- atomic_inc(&osb->alloc_stats.bg_allocs);
71070+ atomic_inc_unchecked(&osb->alloc_stats.bg_allocs);
71071
71072 BUG_ON(num_bits != 1);
71073
71074@@ -1830,7 +1830,7 @@ int __ocfs2_claim_clusters(struct ocfs2_super *osb,
71075 cluster_start,
71076 num_clusters);
71077 if (!status)
71078- atomic_inc(&osb->alloc_stats.local_data);
71079+ atomic_inc_unchecked(&osb->alloc_stats.local_data);
71080 } else {
71081 if (min_clusters > (osb->bitmap_cpg - 1)) {
71082 /* The only paths asking for contiguousness
71083@@ -1858,7 +1858,7 @@ int __ocfs2_claim_clusters(struct ocfs2_super *osb,
71084 ocfs2_desc_bitmap_to_cluster_off(ac->ac_inode,
71085 bg_blkno,
71086 bg_bit_off);
71087- atomic_inc(&osb->alloc_stats.bitmap_data);
71088+ atomic_inc_unchecked(&osb->alloc_stats.bitmap_data);
71089 }
71090 }
71091 if (status < 0) {
71092diff --git a/fs/ocfs2/super.c b/fs/ocfs2/super.c
71093index 9f55be4..a3f8048 100644
71094--- a/fs/ocfs2/super.c
71095+++ b/fs/ocfs2/super.c
71096@@ -284,11 +284,11 @@ static int ocfs2_osb_dump(struct ocfs2_super *osb, char *buf, int len)
71097 "%10s => GlobalAllocs: %d LocalAllocs: %d "
71098 "SubAllocs: %d LAWinMoves: %d SAExtends: %d\n",
71099 "Stats",
71100- atomic_read(&osb->alloc_stats.bitmap_data),
71101- atomic_read(&osb->alloc_stats.local_data),
71102- atomic_read(&osb->alloc_stats.bg_allocs),
71103- atomic_read(&osb->alloc_stats.moves),
71104- atomic_read(&osb->alloc_stats.bg_extends));
71105+ atomic_read_unchecked(&osb->alloc_stats.bitmap_data),
71106+ atomic_read_unchecked(&osb->alloc_stats.local_data),
71107+ atomic_read_unchecked(&osb->alloc_stats.bg_allocs),
71108+ atomic_read_unchecked(&osb->alloc_stats.moves),
71109+ atomic_read_unchecked(&osb->alloc_stats.bg_extends));
71110
71111 out += snprintf(buf + out, len - out,
71112 "%10s => State: %u Descriptor: %llu Size: %u bits "
71113@@ -2002,11 +2002,11 @@ static int ocfs2_initialize_super(struct super_block *sb,
71114 spin_lock_init(&osb->osb_xattr_lock);
71115 ocfs2_init_inode_steal_slot(osb);
71116
71117- atomic_set(&osb->alloc_stats.moves, 0);
71118- atomic_set(&osb->alloc_stats.local_data, 0);
71119- atomic_set(&osb->alloc_stats.bitmap_data, 0);
71120- atomic_set(&osb->alloc_stats.bg_allocs, 0);
71121- atomic_set(&osb->alloc_stats.bg_extends, 0);
71122+ atomic_set_unchecked(&osb->alloc_stats.moves, 0);
71123+ atomic_set_unchecked(&osb->alloc_stats.local_data, 0);
71124+ atomic_set_unchecked(&osb->alloc_stats.bitmap_data, 0);
71125+ atomic_set_unchecked(&osb->alloc_stats.bg_allocs, 0);
71126+ atomic_set_unchecked(&osb->alloc_stats.bg_extends, 0);
71127
71128 /* Copy the blockcheck stats from the superblock probe */
71129 osb->osb_ecc_stats = *stats;
71130diff --git a/fs/open.c b/fs/open.c
71131index 4f01e06..2a8057a 100644
71132--- a/fs/open.c
71133+++ b/fs/open.c
71134@@ -275,6 +275,10 @@ static long do_sys_truncate(const char __user *pathname, loff_t length)
71135 error = locks_verify_truncate(inode, NULL, length);
71136 if (!error)
71137 error = security_path_truncate(&path, length, 0);
71138+
71139+ if (!error && !gr_acl_handle_truncate(path.dentry, path.mnt))
71140+ error = -EACCES;
71141+
71142 if (!error) {
71143 vfs_dq_init(inode);
71144 error = do_truncate(path.dentry, length, 0, NULL);
71145@@ -511,6 +515,9 @@ SYSCALL_DEFINE3(faccessat, int, dfd, const char __user *, filename, int, mode)
71146 if (__mnt_is_readonly(path.mnt))
71147 res = -EROFS;
71148
71149+ if (!res && !gr_acl_handle_access(path.dentry, path.mnt, mode))
71150+ res = -EACCES;
71151+
71152 out_path_release:
71153 path_put(&path);
71154 out:
71155@@ -537,6 +544,8 @@ SYSCALL_DEFINE1(chdir, const char __user *, filename)
71156 if (error)
71157 goto dput_and_out;
71158
71159+ gr_log_chdir(path.dentry, path.mnt);
71160+
71161 set_fs_pwd(current->fs, &path);
71162
71163 dput_and_out:
71164@@ -563,6 +572,13 @@ SYSCALL_DEFINE1(fchdir, unsigned int, fd)
71165 goto out_putf;
71166
71167 error = inode_permission(inode, MAY_EXEC | MAY_ACCESS);
71168+
71169+ if (!error && !gr_chroot_fchdir(file->f_path.dentry, file->f_path.mnt))
71170+ error = -EPERM;
71171+
71172+ if (!error)
71173+ gr_log_chdir(file->f_path.dentry, file->f_path.mnt);
71174+
71175 if (!error)
71176 set_fs_pwd(current->fs, &file->f_path);
71177 out_putf:
71178@@ -588,7 +604,13 @@ SYSCALL_DEFINE1(chroot, const char __user *, filename)
71179 if (!capable(CAP_SYS_CHROOT))
71180 goto dput_and_out;
71181
71182+ if (gr_handle_chroot_chroot(path.dentry, path.mnt))
71183+ goto dput_and_out;
71184+
71185 set_fs_root(current->fs, &path);
71186+
71187+ gr_handle_chroot_chdir(&path);
71188+
71189 error = 0;
71190 dput_and_out:
71191 path_put(&path);
71192@@ -596,66 +618,57 @@ out:
71193 return error;
71194 }
71195
71196+static int chmod_common(struct path *path, umode_t mode)
71197+{
71198+ struct inode *inode = path->dentry->d_inode;
71199+ struct iattr newattrs;
71200+ int error;
71201+
71202+ error = mnt_want_write(path->mnt);
71203+ if (error)
71204+ return error;
71205+ mutex_lock(&inode->i_mutex);
71206+ if (!gr_acl_handle_chmod(path->dentry, path->mnt, &mode)) {
71207+ error = -EACCES;
71208+ goto out_unlock;
71209+ }
71210+ if (gr_handle_chroot_chmod(path->dentry, path->mnt, mode)) {
71211+ error = -EPERM;
71212+ goto out_unlock;
71213+ }
71214+ newattrs.ia_mode = (mode & S_IALLUGO) | (inode->i_mode & ~S_IALLUGO);
71215+ newattrs.ia_valid = ATTR_MODE | ATTR_CTIME;
71216+ error = notify_change(path->dentry, &newattrs);
71217+out_unlock:
71218+ mutex_unlock(&inode->i_mutex);
71219+ mnt_drop_write(path->mnt);
71220+ return error;
71221+}
71222+
71223 SYSCALL_DEFINE2(fchmod, unsigned int, fd, mode_t, mode)
71224 {
71225- struct inode * inode;
71226- struct dentry * dentry;
71227 struct file * file;
71228 int err = -EBADF;
71229- struct iattr newattrs;
71230
71231 file = fget(fd);
71232- if (!file)
71233- goto out;
71234-
71235- dentry = file->f_path.dentry;
71236- inode = dentry->d_inode;
71237-
71238- audit_inode(NULL, dentry);
71239-
71240- err = mnt_want_write_file(file);
71241- if (err)
71242- goto out_putf;
71243- mutex_lock(&inode->i_mutex);
71244- if (mode == (mode_t) -1)
71245- mode = inode->i_mode;
71246- newattrs.ia_mode = (mode & S_IALLUGO) | (inode->i_mode & ~S_IALLUGO);
71247- newattrs.ia_valid = ATTR_MODE | ATTR_CTIME;
71248- err = notify_change(dentry, &newattrs);
71249- mutex_unlock(&inode->i_mutex);
71250- mnt_drop_write(file->f_path.mnt);
71251-out_putf:
71252- fput(file);
71253-out:
71254+ if (file) {
71255+ audit_inode(NULL, file->f_path.dentry);
71256+ err = chmod_common(&file->f_path, mode);
71257+ fput(file);
71258+ }
71259 return err;
71260 }
71261
71262 SYSCALL_DEFINE3(fchmodat, int, dfd, const char __user *, filename, mode_t, mode)
71263 {
71264 struct path path;
71265- struct inode *inode;
71266 int error;
71267- struct iattr newattrs;
71268
71269 error = user_path_at(dfd, filename, LOOKUP_FOLLOW, &path);
71270- if (error)
71271- goto out;
71272- inode = path.dentry->d_inode;
71273-
71274- error = mnt_want_write(path.mnt);
71275- if (error)
71276- goto dput_and_out;
71277- mutex_lock(&inode->i_mutex);
71278- if (mode == (mode_t) -1)
71279- mode = inode->i_mode;
71280- newattrs.ia_mode = (mode & S_IALLUGO) | (inode->i_mode & ~S_IALLUGO);
71281- newattrs.ia_valid = ATTR_MODE | ATTR_CTIME;
71282- error = notify_change(path.dentry, &newattrs);
71283- mutex_unlock(&inode->i_mutex);
71284- mnt_drop_write(path.mnt);
71285-dput_and_out:
71286- path_put(&path);
71287-out:
71288+ if (!error) {
71289+ error = chmod_common(&path, mode);
71290+ path_put(&path);
71291+ }
71292 return error;
71293 }
71294
71295@@ -664,12 +677,15 @@ SYSCALL_DEFINE2(chmod, const char __user *, filename, mode_t, mode)
71296 return sys_fchmodat(AT_FDCWD, filename, mode);
71297 }
71298
71299-static int chown_common(struct dentry * dentry, uid_t user, gid_t group)
71300+static int chown_common(struct dentry * dentry, uid_t user, gid_t group, struct vfsmount *mnt)
71301 {
71302 struct inode *inode = dentry->d_inode;
71303 int error;
71304 struct iattr newattrs;
71305
71306+ if (!gr_acl_handle_chown(dentry, mnt))
71307+ return -EACCES;
71308+
71309 newattrs.ia_valid = ATTR_CTIME;
71310 if (user != (uid_t) -1) {
71311 newattrs.ia_valid |= ATTR_UID;
71312@@ -700,7 +716,7 @@ SYSCALL_DEFINE3(chown, const char __user *, filename, uid_t, user, gid_t, group)
71313 error = mnt_want_write(path.mnt);
71314 if (error)
71315 goto out_release;
71316- error = chown_common(path.dentry, user, group);
71317+ error = chown_common(path.dentry, user, group, path.mnt);
71318 mnt_drop_write(path.mnt);
71319 out_release:
71320 path_put(&path);
71321@@ -725,7 +741,7 @@ SYSCALL_DEFINE5(fchownat, int, dfd, const char __user *, filename, uid_t, user,
71322 error = mnt_want_write(path.mnt);
71323 if (error)
71324 goto out_release;
71325- error = chown_common(path.dentry, user, group);
71326+ error = chown_common(path.dentry, user, group, path.mnt);
71327 mnt_drop_write(path.mnt);
71328 out_release:
71329 path_put(&path);
71330@@ -744,7 +760,7 @@ SYSCALL_DEFINE3(lchown, const char __user *, filename, uid_t, user, gid_t, group
71331 error = mnt_want_write(path.mnt);
71332 if (error)
71333 goto out_release;
71334- error = chown_common(path.dentry, user, group);
71335+ error = chown_common(path.dentry, user, group, path.mnt);
71336 mnt_drop_write(path.mnt);
71337 out_release:
71338 path_put(&path);
71339@@ -767,7 +783,7 @@ SYSCALL_DEFINE3(fchown, unsigned int, fd, uid_t, user, gid_t, group)
71340 goto out_fput;
71341 dentry = file->f_path.dentry;
71342 audit_inode(NULL, dentry);
71343- error = chown_common(dentry, user, group);
71344+ error = chown_common(dentry, user, group, file->f_path.mnt);
71345 mnt_drop_write(file->f_path.mnt);
71346 out_fput:
71347 fput(file);
71348@@ -1036,7 +1052,7 @@ long do_sys_open(int dfd, const char __user *filename, int flags, int mode)
71349 if (!IS_ERR(tmp)) {
71350 fd = get_unused_fd_flags(flags);
71351 if (fd >= 0) {
71352- struct file *f = do_filp_open(dfd, tmp, flags, mode, 0);
71353+ struct file *f = do_filp_open(dfd, tmp, flags, mode, 0);
71354 if (IS_ERR(f)) {
71355 put_unused_fd(fd);
71356 fd = PTR_ERR(f);
71357diff --git a/fs/partitions/efi.c b/fs/partitions/efi.c
71358index 6ab70f4..f4103d1 100644
71359--- a/fs/partitions/efi.c
71360+++ b/fs/partitions/efi.c
71361@@ -231,14 +231,14 @@ alloc_read_gpt_entries(struct block_device *bdev, gpt_header *gpt)
71362 if (!bdev || !gpt)
71363 return NULL;
71364
71365+ if (!le32_to_cpu(gpt->num_partition_entries))
71366+ return NULL;
71367+ pte = kcalloc(le32_to_cpu(gpt->num_partition_entries), le32_to_cpu(gpt->sizeof_partition_entry), GFP_KERNEL);
71368+ if (!pte)
71369+ return NULL;
71370+
71371 count = le32_to_cpu(gpt->num_partition_entries) *
71372 le32_to_cpu(gpt->sizeof_partition_entry);
71373- if (!count)
71374- return NULL;
71375- pte = kzalloc(count, GFP_KERNEL);
71376- if (!pte)
71377- return NULL;
71378-
71379 if (read_lba(bdev, le64_to_cpu(gpt->partition_entry_lba),
71380 (u8 *) pte,
71381 count) < count) {
71382diff --git a/fs/partitions/ldm.c b/fs/partitions/ldm.c
71383index dd6efdb..3babc6c 100644
71384--- a/fs/partitions/ldm.c
71385+++ b/fs/partitions/ldm.c
71386@@ -1311,6 +1311,7 @@ static bool ldm_frag_add (const u8 *data, int size, struct list_head *frags)
71387 ldm_error ("A VBLK claims to have %d parts.", num);
71388 return false;
71389 }
71390+
71391 if (rec >= num) {
71392 ldm_error("REC value (%d) exceeds NUM value (%d)", rec, num);
71393 return false;
71394@@ -1322,7 +1323,7 @@ static bool ldm_frag_add (const u8 *data, int size, struct list_head *frags)
71395 goto found;
71396 }
71397
71398- f = kmalloc (sizeof (*f) + size*num, GFP_KERNEL);
71399+ f = kmalloc (size*num + sizeof (*f), GFP_KERNEL);
71400 if (!f) {
71401 ldm_crit ("Out of memory.");
71402 return false;
71403diff --git a/fs/partitions/mac.c b/fs/partitions/mac.c
71404index 5765198..7f8e9e0 100644
71405--- a/fs/partitions/mac.c
71406+++ b/fs/partitions/mac.c
71407@@ -59,11 +59,11 @@ int mac_partition(struct parsed_partitions *state, struct block_device *bdev)
71408 return 0; /* not a MacOS disk */
71409 }
71410 blocks_in_map = be32_to_cpu(part->map_count);
71411- if (blocks_in_map < 0 || blocks_in_map >= DISK_MAX_PARTS) {
71412- put_dev_sector(sect);
71413- return 0;
71414- }
71415 printk(" [mac]");
71416+ if (blocks_in_map < 0 || blocks_in_map >= DISK_MAX_PARTS) {
71417+ put_dev_sector(sect);
71418+ return 0;
71419+ }
71420 for (slot = 1; slot <= blocks_in_map; ++slot) {
71421 int pos = slot * secsize;
71422 put_dev_sector(sect);
71423diff --git a/fs/pipe.c b/fs/pipe.c
71424index d0cc080..8a6f211 100644
71425--- a/fs/pipe.c
71426+++ b/fs/pipe.c
71427@@ -401,9 +401,9 @@ redo:
71428 }
71429 if (bufs) /* More to do? */
71430 continue;
71431- if (!pipe->writers)
71432+ if (!atomic_read(&pipe->writers))
71433 break;
71434- if (!pipe->waiting_writers) {
71435+ if (!atomic_read(&pipe->waiting_writers)) {
71436 /* syscall merging: Usually we must not sleep
71437 * if O_NONBLOCK is set, or if we got some data.
71438 * But if a writer sleeps in kernel space, then
71439@@ -462,7 +462,7 @@ pipe_write(struct kiocb *iocb, const struct iovec *_iov,
71440 mutex_lock(&inode->i_mutex);
71441 pipe = inode->i_pipe;
71442
71443- if (!pipe->readers) {
71444+ if (!atomic_read(&pipe->readers)) {
71445 send_sig(SIGPIPE, current, 0);
71446 ret = -EPIPE;
71447 goto out;
71448@@ -511,7 +511,7 @@ redo1:
71449 for (;;) {
71450 int bufs;
71451
71452- if (!pipe->readers) {
71453+ if (!atomic_read(&pipe->readers)) {
71454 send_sig(SIGPIPE, current, 0);
71455 if (!ret)
71456 ret = -EPIPE;
71457@@ -597,9 +597,9 @@ redo2:
71458 kill_fasync(&pipe->fasync_readers, SIGIO, POLL_IN);
71459 do_wakeup = 0;
71460 }
71461- pipe->waiting_writers++;
71462+ atomic_inc(&pipe->waiting_writers);
71463 pipe_wait(pipe);
71464- pipe->waiting_writers--;
71465+ atomic_dec(&pipe->waiting_writers);
71466 }
71467 out:
71468 mutex_unlock(&inode->i_mutex);
71469@@ -666,7 +666,7 @@ pipe_poll(struct file *filp, poll_table *wait)
71470 mask = 0;
71471 if (filp->f_mode & FMODE_READ) {
71472 mask = (nrbufs > 0) ? POLLIN | POLLRDNORM : 0;
71473- if (!pipe->writers && filp->f_version != pipe->w_counter)
71474+ if (!atomic_read(&pipe->writers) && filp->f_version != pipe->w_counter)
71475 mask |= POLLHUP;
71476 }
71477
71478@@ -676,7 +676,7 @@ pipe_poll(struct file *filp, poll_table *wait)
71479 * Most Unices do not set POLLERR for FIFOs but on Linux they
71480 * behave exactly like pipes for poll().
71481 */
71482- if (!pipe->readers)
71483+ if (!atomic_read(&pipe->readers))
71484 mask |= POLLERR;
71485 }
71486
71487@@ -690,10 +690,10 @@ pipe_release(struct inode *inode, int decr, int decw)
71488
71489 mutex_lock(&inode->i_mutex);
71490 pipe = inode->i_pipe;
71491- pipe->readers -= decr;
71492- pipe->writers -= decw;
71493+ atomic_sub(decr, &pipe->readers);
71494+ atomic_sub(decw, &pipe->writers);
71495
71496- if (!pipe->readers && !pipe->writers) {
71497+ if (!atomic_read(&pipe->readers) && !atomic_read(&pipe->writers)) {
71498 free_pipe_info(inode);
71499 } else {
71500 wake_up_interruptible_sync(&pipe->wait);
71501@@ -783,7 +783,7 @@ pipe_read_open(struct inode *inode, struct file *filp)
71502
71503 if (inode->i_pipe) {
71504 ret = 0;
71505- inode->i_pipe->readers++;
71506+ atomic_inc(&inode->i_pipe->readers);
71507 }
71508
71509 mutex_unlock(&inode->i_mutex);
71510@@ -800,7 +800,7 @@ pipe_write_open(struct inode *inode, struct file *filp)
71511
71512 if (inode->i_pipe) {
71513 ret = 0;
71514- inode->i_pipe->writers++;
71515+ atomic_inc(&inode->i_pipe->writers);
71516 }
71517
71518 mutex_unlock(&inode->i_mutex);
71519@@ -818,9 +818,9 @@ pipe_rdwr_open(struct inode *inode, struct file *filp)
71520 if (inode->i_pipe) {
71521 ret = 0;
71522 if (filp->f_mode & FMODE_READ)
71523- inode->i_pipe->readers++;
71524+ atomic_inc(&inode->i_pipe->readers);
71525 if (filp->f_mode & FMODE_WRITE)
71526- inode->i_pipe->writers++;
71527+ atomic_inc(&inode->i_pipe->writers);
71528 }
71529
71530 mutex_unlock(&inode->i_mutex);
71531@@ -905,7 +905,7 @@ void free_pipe_info(struct inode *inode)
71532 inode->i_pipe = NULL;
71533 }
71534
71535-static struct vfsmount *pipe_mnt __read_mostly;
71536+struct vfsmount *pipe_mnt __read_mostly;
71537 static int pipefs_delete_dentry(struct dentry *dentry)
71538 {
71539 /*
71540@@ -945,7 +945,8 @@ static struct inode * get_pipe_inode(void)
71541 goto fail_iput;
71542 inode->i_pipe = pipe;
71543
71544- pipe->readers = pipe->writers = 1;
71545+ atomic_set(&pipe->readers, 1);
71546+ atomic_set(&pipe->writers, 1);
71547 inode->i_fop = &rdwr_pipefifo_fops;
71548
71549 /*
71550diff --git a/fs/proc/Kconfig b/fs/proc/Kconfig
71551index 50f8f06..c5755df 100644
71552--- a/fs/proc/Kconfig
71553+++ b/fs/proc/Kconfig
71554@@ -30,12 +30,12 @@ config PROC_FS
71555
71556 config PROC_KCORE
71557 bool "/proc/kcore support" if !ARM
71558- depends on PROC_FS && MMU
71559+ depends on PROC_FS && MMU && !GRKERNSEC_PROC_ADD
71560
71561 config PROC_VMCORE
71562 bool "/proc/vmcore support (EXPERIMENTAL)"
71563- depends on PROC_FS && CRASH_DUMP
71564- default y
71565+ depends on PROC_FS && CRASH_DUMP && !GRKERNSEC
71566+ default n
71567 help
71568 Exports the dump image of crashed kernel in ELF format.
71569
71570@@ -59,8 +59,8 @@ config PROC_SYSCTL
71571 limited in memory.
71572
71573 config PROC_PAGE_MONITOR
71574- default y
71575- depends on PROC_FS && MMU
71576+ default n
71577+ depends on PROC_FS && MMU && !GRKERNSEC
71578 bool "Enable /proc page monitoring" if EMBEDDED
71579 help
71580 Various /proc files exist to monitor process memory utilization:
71581diff --git a/fs/proc/array.c b/fs/proc/array.c
71582index c5ef152..28c94f7 100644
71583--- a/fs/proc/array.c
71584+++ b/fs/proc/array.c
71585@@ -60,6 +60,7 @@
71586 #include <linux/tty.h>
71587 #include <linux/string.h>
71588 #include <linux/mman.h>
71589+#include <linux/grsecurity.h>
71590 #include <linux/proc_fs.h>
71591 #include <linux/ioport.h>
71592 #include <linux/uaccess.h>
71593@@ -321,6 +322,21 @@ static inline void task_context_switch_counts(struct seq_file *m,
71594 p->nivcsw);
71595 }
71596
71597+#if defined(CONFIG_PAX_NOEXEC) || defined(CONFIG_PAX_ASLR)
71598+static inline void task_pax(struct seq_file *m, struct task_struct *p)
71599+{
71600+ if (p->mm)
71601+ seq_printf(m, "PaX:\t%c%c%c%c%c\n",
71602+ p->mm->pax_flags & MF_PAX_PAGEEXEC ? 'P' : 'p',
71603+ p->mm->pax_flags & MF_PAX_EMUTRAMP ? 'E' : 'e',
71604+ p->mm->pax_flags & MF_PAX_MPROTECT ? 'M' : 'm',
71605+ p->mm->pax_flags & MF_PAX_RANDMMAP ? 'R' : 'r',
71606+ p->mm->pax_flags & MF_PAX_SEGMEXEC ? 'S' : 's');
71607+ else
71608+ seq_printf(m, "PaX:\t-----\n");
71609+}
71610+#endif
71611+
71612 int proc_pid_status(struct seq_file *m, struct pid_namespace *ns,
71613 struct pid *pid, struct task_struct *task)
71614 {
71615@@ -337,9 +353,24 @@ int proc_pid_status(struct seq_file *m, struct pid_namespace *ns,
71616 task_cap(m, task);
71617 cpuset_task_status_allowed(m, task);
71618 task_context_switch_counts(m, task);
71619+
71620+#if defined(CONFIG_PAX_NOEXEC) || defined(CONFIG_PAX_ASLR)
71621+ task_pax(m, task);
71622+#endif
71623+
71624+#if defined(CONFIG_GRKERNSEC) && !defined(CONFIG_GRKERNSEC_NO_RBAC)
71625+ task_grsec_rbac(m, task);
71626+#endif
71627+
71628 return 0;
71629 }
71630
71631+#ifdef CONFIG_GRKERNSEC_PROC_MEMMAP
71632+#define PAX_RAND_FLAGS(_mm) (_mm != NULL && _mm != current->mm && \
71633+ (_mm->pax_flags & MF_PAX_RANDMMAP || \
71634+ _mm->pax_flags & MF_PAX_SEGMEXEC))
71635+#endif
71636+
71637 static int do_task_stat(struct seq_file *m, struct pid_namespace *ns,
71638 struct pid *pid, struct task_struct *task, int whole)
71639 {
71640@@ -358,9 +389,18 @@ static int do_task_stat(struct seq_file *m, struct pid_namespace *ns,
71641 cputime_t cutime, cstime, utime, stime;
71642 cputime_t cgtime, gtime;
71643 unsigned long rsslim = 0;
71644- char tcomm[sizeof(task->comm)];
71645+ char tcomm[sizeof(task->comm)] = { 0 };
71646 unsigned long flags;
71647
71648+ pax_track_stack();
71649+
71650+#ifdef CONFIG_GRKERNSEC_PROC_MEMMAP
71651+ if (current->exec_id != m->exec_id) {
71652+ gr_log_badprocpid("stat");
71653+ return 0;
71654+ }
71655+#endif
71656+
71657 state = *get_task_state(task);
71658 vsize = eip = esp = 0;
71659 permitted = ptrace_may_access(task, PTRACE_MODE_READ);
71660@@ -433,6 +473,19 @@ static int do_task_stat(struct seq_file *m, struct pid_namespace *ns,
71661 gtime = task_gtime(task);
71662 }
71663
71664+#ifdef CONFIG_GRKERNSEC_PROC_MEMMAP
71665+ if (PAX_RAND_FLAGS(mm)) {
71666+ eip = 0;
71667+ esp = 0;
71668+ wchan = 0;
71669+ }
71670+#endif
71671+#ifdef CONFIG_GRKERNSEC_HIDESYM
71672+ wchan = 0;
71673+ eip =0;
71674+ esp =0;
71675+#endif
71676+
71677 /* scale priority and nice values from timeslices to -20..20 */
71678 /* to make it look like a "normal" Unix priority/nice value */
71679 priority = task_prio(task);
71680@@ -473,9 +526,15 @@ static int do_task_stat(struct seq_file *m, struct pid_namespace *ns,
71681 vsize,
71682 mm ? get_mm_rss(mm) : 0,
71683 rsslim,
71684+#ifdef CONFIG_GRKERNSEC_PROC_MEMMAP
71685+ PAX_RAND_FLAGS(mm) ? 1 : (mm ? (permitted ? mm->start_code : 1) : 0),
71686+ PAX_RAND_FLAGS(mm) ? 1 : (mm ? (permitted ? mm->end_code : 1) : 0),
71687+ PAX_RAND_FLAGS(mm) ? 0 : ((permitted && mm) ? mm->start_stack : 0),
71688+#else
71689 mm ? (permitted ? mm->start_code : 1) : 0,
71690 mm ? (permitted ? mm->end_code : 1) : 0,
71691 (permitted && mm) ? mm->start_stack : 0,
71692+#endif
71693 esp,
71694 eip,
71695 /* The signal information here is obsolete.
71696@@ -517,8 +576,16 @@ int proc_pid_statm(struct seq_file *m, struct pid_namespace *ns,
71697 struct pid *pid, struct task_struct *task)
71698 {
71699 int size = 0, resident = 0, shared = 0, text = 0, lib = 0, data = 0;
71700- struct mm_struct *mm = get_task_mm(task);
71701+ struct mm_struct *mm;
71702
71703+#ifdef CONFIG_GRKERNSEC_PROC_MEMMAP
71704+ if (current->exec_id != m->exec_id) {
71705+ gr_log_badprocpid("statm");
71706+ return 0;
71707+ }
71708+#endif
71709+
71710+ mm = get_task_mm(task);
71711 if (mm) {
71712 size = task_statm(mm, &shared, &text, &data, &resident);
71713 mmput(mm);
71714@@ -528,3 +595,18 @@ int proc_pid_statm(struct seq_file *m, struct pid_namespace *ns,
71715
71716 return 0;
71717 }
71718+
71719+#ifdef CONFIG_GRKERNSEC_PROC_IPADDR
71720+int proc_pid_ipaddr(struct task_struct *task, char *buffer)
71721+{
71722+ u32 curr_ip = 0;
71723+ unsigned long flags;
71724+
71725+ if (lock_task_sighand(task, &flags)) {
71726+ curr_ip = task->signal->curr_ip;
71727+ unlock_task_sighand(task, &flags);
71728+ }
71729+
71730+ return sprintf(buffer, "%pI4\n", &curr_ip);
71731+}
71732+#endif
71733diff --git a/fs/proc/base.c b/fs/proc/base.c
71734index 67f7dc0..a86ad9a 100644
71735--- a/fs/proc/base.c
71736+++ b/fs/proc/base.c
71737@@ -102,6 +102,22 @@ struct pid_entry {
71738 union proc_op op;
71739 };
71740
71741+struct getdents_callback {
71742+ struct linux_dirent __user * current_dir;
71743+ struct linux_dirent __user * previous;
71744+ struct file * file;
71745+ int count;
71746+ int error;
71747+};
71748+
71749+static int gr_fake_filldir(void * __buf, const char *name, int namlen,
71750+ loff_t offset, u64 ino, unsigned int d_type)
71751+{
71752+ struct getdents_callback * buf = (struct getdents_callback *) __buf;
71753+ buf->error = -EINVAL;
71754+ return 0;
71755+}
71756+
71757 #define NOD(NAME, MODE, IOP, FOP, OP) { \
71758 .name = (NAME), \
71759 .len = sizeof(NAME) - 1, \
71760@@ -213,6 +229,9 @@ static int check_mem_permission(struct task_struct *task)
71761 if (task == current)
71762 return 0;
71763
71764+ if (gr_handle_proc_ptrace(task) || gr_acl_handle_procpidmem(task))
71765+ return -EPERM;
71766+
71767 /*
71768 * If current is actively ptrace'ing, and would also be
71769 * permitted to freshly attach with ptrace now, permit it.
71770@@ -260,6 +279,9 @@ static int proc_pid_cmdline(struct task_struct *task, char * buffer)
71771 if (!mm->arg_end)
71772 goto out_mm; /* Shh! No looking before we're done */
71773
71774+ if (gr_acl_handle_procpidmem(task))
71775+ goto out_mm;
71776+
71777 len = mm->arg_end - mm->arg_start;
71778
71779 if (len > PAGE_SIZE)
71780@@ -287,12 +309,28 @@ out:
71781 return res;
71782 }
71783
71784+#ifdef CONFIG_GRKERNSEC_PROC_MEMMAP
71785+#define PAX_RAND_FLAGS(_mm) (_mm != NULL && _mm != current->mm && \
71786+ (_mm->pax_flags & MF_PAX_RANDMMAP || \
71787+ _mm->pax_flags & MF_PAX_SEGMEXEC))
71788+#endif
71789+
71790 static int proc_pid_auxv(struct task_struct *task, char *buffer)
71791 {
71792 int res = 0;
71793 struct mm_struct *mm = get_task_mm(task);
71794 if (mm) {
71795 unsigned int nwords = 0;
71796+
71797+#ifdef CONFIG_GRKERNSEC_PROC_MEMMAP
71798+ /* allow if we're currently ptracing this task */
71799+ if (PAX_RAND_FLAGS(mm) &&
71800+ (!(task->ptrace & PT_PTRACED) || (task->parent != current))) {
71801+ mmput(mm);
71802+ return 0;
71803+ }
71804+#endif
71805+
71806 do {
71807 nwords += 2;
71808 } while (mm->saved_auxv[nwords - 2] != 0); /* AT_NULL */
71809@@ -306,7 +344,7 @@ static int proc_pid_auxv(struct task_struct *task, char *buffer)
71810 }
71811
71812
71813-#ifdef CONFIG_KALLSYMS
71814+#if defined(CONFIG_KALLSYMS) && !defined(CONFIG_GRKERNSEC_HIDESYM)
71815 /*
71816 * Provides a wchan file via kallsyms in a proper one-value-per-file format.
71817 * Returns the resolved symbol. If that fails, simply return the address.
71818@@ -345,7 +383,7 @@ static void unlock_trace(struct task_struct *task)
71819 mutex_unlock(&task->cred_guard_mutex);
71820 }
71821
71822-#ifdef CONFIG_STACKTRACE
71823+#if defined(CONFIG_STACKTRACE) && !defined(CONFIG_GRKERNSEC_HIDESYM)
71824
71825 #define MAX_STACK_TRACE_DEPTH 64
71826
71827@@ -545,7 +583,7 @@ static int proc_pid_limits(struct task_struct *task, char *buffer)
71828 return count;
71829 }
71830
71831-#ifdef CONFIG_HAVE_ARCH_TRACEHOOK
71832+#if defined(CONFIG_HAVE_ARCH_TRACEHOOK) && !defined(CONFIG_GRKERNSEC_PROC_MEMMAP)
71833 static int proc_pid_syscall(struct task_struct *task, char *buffer)
71834 {
71835 long nr;
71836@@ -574,7 +612,7 @@ static int proc_pid_syscall(struct task_struct *task, char *buffer)
71837 /************************************************************************/
71838
71839 /* permission checks */
71840-static int proc_fd_access_allowed(struct inode *inode)
71841+static int proc_fd_access_allowed(struct inode *inode, unsigned int log)
71842 {
71843 struct task_struct *task;
71844 int allowed = 0;
71845@@ -584,7 +622,10 @@ static int proc_fd_access_allowed(struct inode *inode)
71846 */
71847 task = get_proc_task(inode);
71848 if (task) {
71849- allowed = ptrace_may_access(task, PTRACE_MODE_READ);
71850+ if (log)
71851+ allowed = ptrace_may_access_log(task, PTRACE_MODE_READ);
71852+ else
71853+ allowed = ptrace_may_access(task, PTRACE_MODE_READ);
71854 put_task_struct(task);
71855 }
71856 return allowed;
71857@@ -806,9 +847,16 @@ static const struct file_operations proc_single_file_operations = {
71858 static int mem_open(struct inode* inode, struct file* file)
71859 {
71860 file->private_data = (void*)((long)current->self_exec_id);
71861+
71862+#ifdef CONFIG_GRKERNSEC_PROC_MEMMAP
71863+ file->f_version = current->exec_id;
71864+#endif
71865+
71866 return 0;
71867 }
71868
71869+static int task_dumpable(struct task_struct *task);
71870+
71871 static ssize_t mem_read(struct file * file, char __user * buf,
71872 size_t count, loff_t *ppos)
71873 {
71874@@ -818,6 +866,13 @@ static ssize_t mem_read(struct file * file, char __user * buf,
71875 int ret = -ESRCH;
71876 struct mm_struct *mm;
71877
71878+#ifdef CONFIG_GRKERNSEC_PROC_MEMMAP
71879+ if (file->f_version != current->exec_id) {
71880+ gr_log_badprocpid("mem");
71881+ return 0;
71882+ }
71883+#endif
71884+
71885 if (!task)
71886 goto out_no_task;
71887
71888@@ -963,6 +1018,9 @@ static ssize_t environ_read(struct file *file, char __user *buf,
71889 if (!task)
71890 goto out_no_task;
71891
71892+ if (gr_acl_handle_procpidmem(task))
71893+ goto out;
71894+
71895 if (!ptrace_may_access(task, PTRACE_MODE_READ))
71896 goto out;
71897
71898@@ -1377,7 +1435,7 @@ static void *proc_pid_follow_link(struct dentry *dentry, struct nameidata *nd)
71899 path_put(&nd->path);
71900
71901 /* Are we allowed to snoop on the tasks file descriptors? */
71902- if (!proc_fd_access_allowed(inode))
71903+ if (!proc_fd_access_allowed(inode,0))
71904 goto out;
71905
71906 error = PROC_I(inode)->op.proc_get_link(inode, &nd->path);
71907@@ -1417,8 +1475,18 @@ static int proc_pid_readlink(struct dentry * dentry, char __user * buffer, int b
71908 struct path path;
71909
71910 /* Are we allowed to snoop on the tasks file descriptors? */
71911- if (!proc_fd_access_allowed(inode))
71912- goto out;
71913+ /* logging this is needed for learning on chromium to work properly,
71914+ but we don't want to flood the logs from 'ps' which does a readlink
71915+ on /proc/fd/2 of tasks in the listing, nor do we want 'ps' to learn
71916+ CAP_SYS_PTRACE as it's not necessary for its basic functionality
71917+ */
71918+ if (dentry->d_name.name[0] == '2' && dentry->d_name.name[1] == '\0') {
71919+ if (!proc_fd_access_allowed(inode,0))
71920+ goto out;
71921+ } else {
71922+ if (!proc_fd_access_allowed(inode,1))
71923+ goto out;
71924+ }
71925
71926 error = PROC_I(inode)->op.proc_get_link(inode, &path);
71927 if (error)
71928@@ -1483,7 +1551,11 @@ static struct inode *proc_pid_make_inode(struct super_block * sb, struct task_st
71929 rcu_read_lock();
71930 cred = __task_cred(task);
71931 inode->i_uid = cred->euid;
71932+#ifdef CONFIG_GRKERNSEC_PROC_USERGROUP
71933+ inode->i_gid = CONFIG_GRKERNSEC_PROC_GID;
71934+#else
71935 inode->i_gid = cred->egid;
71936+#endif
71937 rcu_read_unlock();
71938 }
71939 security_task_to_inode(task, inode);
71940@@ -1501,6 +1573,9 @@ static int pid_getattr(struct vfsmount *mnt, struct dentry *dentry, struct kstat
71941 struct inode *inode = dentry->d_inode;
71942 struct task_struct *task;
71943 const struct cred *cred;
71944+#if defined(CONFIG_GRKERNSEC_PROC_USER) || defined(CONFIG_GRKERNSEC_PROC_USERGROUP)
71945+ const struct cred *tmpcred = current_cred();
71946+#endif
71947
71948 generic_fillattr(inode, stat);
71949
71950@@ -1508,13 +1583,41 @@ static int pid_getattr(struct vfsmount *mnt, struct dentry *dentry, struct kstat
71951 stat->uid = 0;
71952 stat->gid = 0;
71953 task = pid_task(proc_pid(inode), PIDTYPE_PID);
71954+
71955+ if (task && (gr_pid_is_chrooted(task) || gr_check_hidden_task(task))) {
71956+ rcu_read_unlock();
71957+ return -ENOENT;
71958+ }
71959+
71960 if (task) {
71961+ cred = __task_cred(task);
71962+#if defined(CONFIG_GRKERNSEC_PROC_USER) || defined(CONFIG_GRKERNSEC_PROC_USERGROUP)
71963+ if (!tmpcred->uid || (tmpcred->uid == cred->uid)
71964+#ifdef CONFIG_GRKERNSEC_PROC_USERGROUP
71965+ || in_group_p(CONFIG_GRKERNSEC_PROC_GID)
71966+#endif
71967+ ) {
71968+#endif
71969 if ((inode->i_mode == (S_IFDIR|S_IRUGO|S_IXUGO)) ||
71970+#ifdef CONFIG_GRKERNSEC_PROC_USER
71971+ (inode->i_mode == (S_IFDIR|S_IRUSR|S_IXUSR)) ||
71972+#elif defined(CONFIG_GRKERNSEC_PROC_USERGROUP)
71973+ (inode->i_mode == (S_IFDIR|S_IRUSR|S_IRGRP|S_IXUSR|S_IXGRP)) ||
71974+#endif
71975 task_dumpable(task)) {
71976- cred = __task_cred(task);
71977 stat->uid = cred->euid;
71978+#ifdef CONFIG_GRKERNSEC_PROC_USERGROUP
71979+ stat->gid = CONFIG_GRKERNSEC_PROC_GID;
71980+#else
71981 stat->gid = cred->egid;
71982+#endif
71983 }
71984+#if defined(CONFIG_GRKERNSEC_PROC_USER) || defined(CONFIG_GRKERNSEC_PROC_USERGROUP)
71985+ } else {
71986+ rcu_read_unlock();
71987+ return -ENOENT;
71988+ }
71989+#endif
71990 }
71991 rcu_read_unlock();
71992 return 0;
71993@@ -1545,11 +1648,20 @@ static int pid_revalidate(struct dentry *dentry, struct nameidata *nd)
71994
71995 if (task) {
71996 if ((inode->i_mode == (S_IFDIR|S_IRUGO|S_IXUGO)) ||
71997+#ifdef CONFIG_GRKERNSEC_PROC_USER
71998+ (inode->i_mode == (S_IFDIR|S_IRUSR|S_IXUSR)) ||
71999+#elif defined(CONFIG_GRKERNSEC_PROC_USERGROUP)
72000+ (inode->i_mode == (S_IFDIR|S_IRUSR|S_IRGRP|S_IXUSR|S_IXGRP)) ||
72001+#endif
72002 task_dumpable(task)) {
72003 rcu_read_lock();
72004 cred = __task_cred(task);
72005 inode->i_uid = cred->euid;
72006+#ifdef CONFIG_GRKERNSEC_PROC_USERGROUP
72007+ inode->i_gid = CONFIG_GRKERNSEC_PROC_GID;
72008+#else
72009 inode->i_gid = cred->egid;
72010+#endif
72011 rcu_read_unlock();
72012 } else {
72013 inode->i_uid = 0;
72014@@ -1670,7 +1782,8 @@ static int proc_fd_info(struct inode *inode, struct path *path, char *info)
72015 int fd = proc_fd(inode);
72016
72017 if (task) {
72018- files = get_files_struct(task);
72019+ if (!gr_acl_handle_procpidmem(task))
72020+ files = get_files_struct(task);
72021 put_task_struct(task);
72022 }
72023 if (files) {
72024@@ -1922,12 +2035,22 @@ static const struct file_operations proc_fd_operations = {
72025 static int proc_fd_permission(struct inode *inode, int mask)
72026 {
72027 int rv;
72028+ struct task_struct *task;
72029
72030 rv = generic_permission(inode, mask, NULL);
72031- if (rv == 0)
72032- return 0;
72033+
72034 if (task_pid(current) == proc_pid(inode))
72035 rv = 0;
72036+
72037+ task = get_proc_task(inode);
72038+ if (task == NULL)
72039+ return rv;
72040+
72041+ if (gr_acl_handle_procpidmem(task))
72042+ rv = -EACCES;
72043+
72044+ put_task_struct(task);
72045+
72046 return rv;
72047 }
72048
72049@@ -2036,6 +2159,9 @@ static struct dentry *proc_pident_lookup(struct inode *dir,
72050 if (!task)
72051 goto out_no_task;
72052
72053+ if (gr_pid_is_chrooted(task) || gr_check_hidden_task(task))
72054+ goto out;
72055+
72056 /*
72057 * Yes, it does not scale. And it should not. Don't add
72058 * new entries into /proc/<tgid>/ without very good reasons.
72059@@ -2080,6 +2206,9 @@ static int proc_pident_readdir(struct file *filp,
72060 if (!task)
72061 goto out_no_task;
72062
72063+ if (gr_pid_is_chrooted(task) || gr_check_hidden_task(task))
72064+ goto out;
72065+
72066 ret = 0;
72067 i = filp->f_pos;
72068 switch (i) {
72069@@ -2347,7 +2476,7 @@ static void *proc_self_follow_link(struct dentry *dentry, struct nameidata *nd)
72070 static void proc_self_put_link(struct dentry *dentry, struct nameidata *nd,
72071 void *cookie)
72072 {
72073- char *s = nd_get_link(nd);
72074+ const char *s = nd_get_link(nd);
72075 if (!IS_ERR(s))
72076 __putname(s);
72077 }
72078@@ -2553,7 +2682,7 @@ static const struct pid_entry tgid_base_stuff[] = {
72079 #ifdef CONFIG_SCHED_DEBUG
72080 REG("sched", S_IRUGO|S_IWUSR, proc_pid_sched_operations),
72081 #endif
72082-#ifdef CONFIG_HAVE_ARCH_TRACEHOOK
72083+#if defined(CONFIG_HAVE_ARCH_TRACEHOOK) && !defined(CONFIG_GRKERNSEC_PROC_MEMMAP)
72084 INF("syscall", S_IRUGO, proc_pid_syscall),
72085 #endif
72086 INF("cmdline", S_IRUGO, proc_pid_cmdline),
72087@@ -2578,10 +2707,10 @@ static const struct pid_entry tgid_base_stuff[] = {
72088 #ifdef CONFIG_SECURITY
72089 DIR("attr", S_IRUGO|S_IXUGO, proc_attr_dir_inode_operations, proc_attr_dir_operations),
72090 #endif
72091-#ifdef CONFIG_KALLSYMS
72092+#if defined(CONFIG_KALLSYMS) && !defined(CONFIG_GRKERNSEC_HIDESYM)
72093 INF("wchan", S_IRUGO, proc_pid_wchan),
72094 #endif
72095-#ifdef CONFIG_STACKTRACE
72096+#if defined(CONFIG_STACKTRACE) && !defined(CONFIG_GRKERNSEC_HIDESYM)
72097 ONE("stack", S_IRUGO, proc_pid_stack),
72098 #endif
72099 #ifdef CONFIG_SCHEDSTATS
72100@@ -2611,6 +2740,9 @@ static const struct pid_entry tgid_base_stuff[] = {
72101 #ifdef CONFIG_TASK_IO_ACCOUNTING
72102 INF("io", S_IRUSR, proc_tgid_io_accounting),
72103 #endif
72104+#ifdef CONFIG_GRKERNSEC_PROC_IPADDR
72105+ INF("ipaddr", S_IRUSR, proc_pid_ipaddr),
72106+#endif
72107 };
72108
72109 static int proc_tgid_base_readdir(struct file * filp,
72110@@ -2735,7 +2867,14 @@ static struct dentry *proc_pid_instantiate(struct inode *dir,
72111 if (!inode)
72112 goto out;
72113
72114+#ifdef CONFIG_GRKERNSEC_PROC_USER
72115+ inode->i_mode = S_IFDIR|S_IRUSR|S_IXUSR;
72116+#elif defined(CONFIG_GRKERNSEC_PROC_USERGROUP)
72117+ inode->i_gid = CONFIG_GRKERNSEC_PROC_GID;
72118+ inode->i_mode = S_IFDIR|S_IRUSR|S_IRGRP|S_IXUSR|S_IXGRP;
72119+#else
72120 inode->i_mode = S_IFDIR|S_IRUGO|S_IXUGO;
72121+#endif
72122 inode->i_op = &proc_tgid_base_inode_operations;
72123 inode->i_fop = &proc_tgid_base_operations;
72124 inode->i_flags|=S_IMMUTABLE;
72125@@ -2777,7 +2916,11 @@ struct dentry *proc_pid_lookup(struct inode *dir, struct dentry * dentry, struct
72126 if (!task)
72127 goto out;
72128
72129+ if (gr_pid_is_chrooted(task) || gr_check_hidden_task(task))
72130+ goto out_put_task;
72131+
72132 result = proc_pid_instantiate(dir, dentry, task, NULL);
72133+out_put_task:
72134 put_task_struct(task);
72135 out:
72136 return result;
72137@@ -2842,6 +2985,11 @@ int proc_pid_readdir(struct file * filp, void * dirent, filldir_t filldir)
72138 {
72139 unsigned int nr;
72140 struct task_struct *reaper;
72141+#if defined(CONFIG_GRKERNSEC_PROC_USER) || defined(CONFIG_GRKERNSEC_PROC_USERGROUP)
72142+ const struct cred *tmpcred = current_cred();
72143+ const struct cred *itercred;
72144+#endif
72145+ filldir_t __filldir = filldir;
72146 struct tgid_iter iter;
72147 struct pid_namespace *ns;
72148
72149@@ -2865,8 +3013,27 @@ int proc_pid_readdir(struct file * filp, void * dirent, filldir_t filldir)
72150 for (iter = next_tgid(ns, iter);
72151 iter.task;
72152 iter.tgid += 1, iter = next_tgid(ns, iter)) {
72153+#if defined(CONFIG_GRKERNSEC_PROC_USER) || defined(CONFIG_GRKERNSEC_PROC_USERGROUP)
72154+ rcu_read_lock();
72155+ itercred = __task_cred(iter.task);
72156+#endif
72157+ if (gr_pid_is_chrooted(iter.task) || gr_check_hidden_task(iter.task)
72158+#if defined(CONFIG_GRKERNSEC_PROC_USER) || defined(CONFIG_GRKERNSEC_PROC_USERGROUP)
72159+ || (tmpcred->uid && (itercred->uid != tmpcred->uid)
72160+#ifdef CONFIG_GRKERNSEC_PROC_USERGROUP
72161+ && !in_group_p(CONFIG_GRKERNSEC_PROC_GID)
72162+#endif
72163+ )
72164+#endif
72165+ )
72166+ __filldir = &gr_fake_filldir;
72167+ else
72168+ __filldir = filldir;
72169+#if defined(CONFIG_GRKERNSEC_PROC_USER) || defined(CONFIG_GRKERNSEC_PROC_USERGROUP)
72170+ rcu_read_unlock();
72171+#endif
72172 filp->f_pos = iter.tgid + TGID_OFFSET;
72173- if (proc_pid_fill_cache(filp, dirent, filldir, iter) < 0) {
72174+ if (proc_pid_fill_cache(filp, dirent, __filldir, iter) < 0) {
72175 put_task_struct(iter.task);
72176 goto out;
72177 }
72178@@ -2892,7 +3059,7 @@ static const struct pid_entry tid_base_stuff[] = {
72179 #ifdef CONFIG_SCHED_DEBUG
72180 REG("sched", S_IRUGO|S_IWUSR, proc_pid_sched_operations),
72181 #endif
72182-#ifdef CONFIG_HAVE_ARCH_TRACEHOOK
72183+#if defined(CONFIG_HAVE_ARCH_TRACEHOOK) && !defined(CONFIG_GRKERNSEC_PROC_MEMMAP)
72184 INF("syscall", S_IRUGO, proc_pid_syscall),
72185 #endif
72186 INF("cmdline", S_IRUGO, proc_pid_cmdline),
72187@@ -2916,10 +3083,10 @@ static const struct pid_entry tid_base_stuff[] = {
72188 #ifdef CONFIG_SECURITY
72189 DIR("attr", S_IRUGO|S_IXUGO, proc_attr_dir_inode_operations, proc_attr_dir_operations),
72190 #endif
72191-#ifdef CONFIG_KALLSYMS
72192+#if defined(CONFIG_KALLSYMS) && !defined(CONFIG_GRKERNSEC_HIDESYM)
72193 INF("wchan", S_IRUGO, proc_pid_wchan),
72194 #endif
72195-#ifdef CONFIG_STACKTRACE
72196+#if defined(CONFIG_STACKTRACE) && !defined(CONFIG_GRKERNSEC_HIDESYM)
72197 ONE("stack", S_IRUGO, proc_pid_stack),
72198 #endif
72199 #ifdef CONFIG_SCHEDSTATS
72200diff --git a/fs/proc/cmdline.c b/fs/proc/cmdline.c
72201index 82676e3..5f8518a 100644
72202--- a/fs/proc/cmdline.c
72203+++ b/fs/proc/cmdline.c
72204@@ -23,7 +23,11 @@ static const struct file_operations cmdline_proc_fops = {
72205
72206 static int __init proc_cmdline_init(void)
72207 {
72208+#ifdef CONFIG_GRKERNSEC_PROC_ADD
72209+ proc_create_grsec("cmdline", 0, NULL, &cmdline_proc_fops);
72210+#else
72211 proc_create("cmdline", 0, NULL, &cmdline_proc_fops);
72212+#endif
72213 return 0;
72214 }
72215 module_init(proc_cmdline_init);
72216diff --git a/fs/proc/devices.c b/fs/proc/devices.c
72217index 59ee7da..469b4b6 100644
72218--- a/fs/proc/devices.c
72219+++ b/fs/proc/devices.c
72220@@ -64,7 +64,11 @@ static const struct file_operations proc_devinfo_operations = {
72221
72222 static int __init proc_devices_init(void)
72223 {
72224+#ifdef CONFIG_GRKERNSEC_PROC_ADD
72225+ proc_create_grsec("devices", 0, NULL, &proc_devinfo_operations);
72226+#else
72227 proc_create("devices", 0, NULL, &proc_devinfo_operations);
72228+#endif
72229 return 0;
72230 }
72231 module_init(proc_devices_init);
72232diff --git a/fs/proc/inode.c b/fs/proc/inode.c
72233index d78ade3..81767f9 100644
72234--- a/fs/proc/inode.c
72235+++ b/fs/proc/inode.c
72236@@ -18,12 +18,19 @@
72237 #include <linux/module.h>
72238 #include <linux/smp_lock.h>
72239 #include <linux/sysctl.h>
72240+#include <linux/grsecurity.h>
72241
72242 #include <asm/system.h>
72243 #include <asm/uaccess.h>
72244
72245 #include "internal.h"
72246
72247+#ifdef CONFIG_PROC_SYSCTL
72248+extern const struct inode_operations proc_sys_inode_operations;
72249+extern const struct inode_operations proc_sys_dir_operations;
72250+#endif
72251+
72252+
72253 struct proc_dir_entry *de_get(struct proc_dir_entry *de)
72254 {
72255 atomic_inc(&de->count);
72256@@ -62,6 +69,13 @@ static void proc_delete_inode(struct inode *inode)
72257 de_put(de);
72258 if (PROC_I(inode)->sysctl)
72259 sysctl_head_put(PROC_I(inode)->sysctl);
72260+
72261+#ifdef CONFIG_PROC_SYSCTL
72262+ if (inode->i_op == &proc_sys_inode_operations ||
72263+ inode->i_op == &proc_sys_dir_operations)
72264+ gr_handle_delete(inode->i_ino, inode->i_sb->s_dev);
72265+#endif
72266+
72267 clear_inode(inode);
72268 }
72269
72270@@ -457,7 +471,11 @@ struct inode *proc_get_inode(struct super_block *sb, unsigned int ino,
72271 if (de->mode) {
72272 inode->i_mode = de->mode;
72273 inode->i_uid = de->uid;
72274+#ifdef CONFIG_GRKERNSEC_PROC_USERGROUP
72275+ inode->i_gid = CONFIG_GRKERNSEC_PROC_GID;
72276+#else
72277 inode->i_gid = de->gid;
72278+#endif
72279 }
72280 if (de->size)
72281 inode->i_size = de->size;
72282diff --git a/fs/proc/internal.h b/fs/proc/internal.h
72283index 753ca37..26bcf3b 100644
72284--- a/fs/proc/internal.h
72285+++ b/fs/proc/internal.h
72286@@ -51,6 +51,9 @@ extern int proc_pid_status(struct seq_file *m, struct pid_namespace *ns,
72287 struct pid *pid, struct task_struct *task);
72288 extern int proc_pid_statm(struct seq_file *m, struct pid_namespace *ns,
72289 struct pid *pid, struct task_struct *task);
72290+#ifdef CONFIG_GRKERNSEC_PROC_IPADDR
72291+extern int proc_pid_ipaddr(struct task_struct *task, char *buffer);
72292+#endif
72293 extern loff_t mem_lseek(struct file *file, loff_t offset, int orig);
72294
72295 extern const struct file_operations proc_maps_operations;
72296diff --git a/fs/proc/kcore.c b/fs/proc/kcore.c
72297index b442dac..aab29cb 100644
72298--- a/fs/proc/kcore.c
72299+++ b/fs/proc/kcore.c
72300@@ -320,6 +320,8 @@ static void elf_kcore_store_hdr(char *bufp, int nphdr, int dataoff)
72301 off_t offset = 0;
72302 struct kcore_list *m;
72303
72304+ pax_track_stack();
72305+
72306 /* setup ELF header */
72307 elf = (struct elfhdr *) bufp;
72308 bufp += sizeof(struct elfhdr);
72309@@ -477,9 +479,10 @@ read_kcore(struct file *file, char __user *buffer, size_t buflen, loff_t *fpos)
72310 * the addresses in the elf_phdr on our list.
72311 */
72312 start = kc_offset_to_vaddr(*fpos - elf_buflen);
72313- if ((tsz = (PAGE_SIZE - (start & ~PAGE_MASK))) > buflen)
72314+ tsz = PAGE_SIZE - (start & ~PAGE_MASK);
72315+ if (tsz > buflen)
72316 tsz = buflen;
72317-
72318+
72319 while (buflen) {
72320 struct kcore_list *m;
72321
72322@@ -508,20 +511,23 @@ read_kcore(struct file *file, char __user *buffer, size_t buflen, loff_t *fpos)
72323 kfree(elf_buf);
72324 } else {
72325 if (kern_addr_valid(start)) {
72326- unsigned long n;
72327+ char *elf_buf;
72328+ mm_segment_t oldfs;
72329
72330- n = copy_to_user(buffer, (char *)start, tsz);
72331- /*
72332- * We cannot distingush between fault on source
72333- * and fault on destination. When this happens
72334- * we clear too and hope it will trigger the
72335- * EFAULT again.
72336- */
72337- if (n) {
72338- if (clear_user(buffer + tsz - n,
72339- n))
72340+ elf_buf = kmalloc(tsz, GFP_KERNEL);
72341+ if (!elf_buf)
72342+ return -ENOMEM;
72343+ oldfs = get_fs();
72344+ set_fs(KERNEL_DS);
72345+ if (!__copy_from_user(elf_buf, (const void __user *)start, tsz)) {
72346+ set_fs(oldfs);
72347+ if (copy_to_user(buffer, elf_buf, tsz)) {
72348+ kfree(elf_buf);
72349 return -EFAULT;
72350+ }
72351 }
72352+ set_fs(oldfs);
72353+ kfree(elf_buf);
72354 } else {
72355 if (clear_user(buffer, tsz))
72356 return -EFAULT;
72357@@ -541,6 +547,9 @@ read_kcore(struct file *file, char __user *buffer, size_t buflen, loff_t *fpos)
72358
72359 static int open_kcore(struct inode *inode, struct file *filp)
72360 {
72361+#if defined(CONFIG_GRKERNSEC_PROC_ADD) || defined(CONFIG_GRKERNSEC_HIDESYM)
72362+ return -EPERM;
72363+#endif
72364 if (!capable(CAP_SYS_RAWIO))
72365 return -EPERM;
72366 if (kcore_need_update)
72367diff --git a/fs/proc/kmsg.c b/fs/proc/kmsg.c
72368index 7ca7834..cfe90a4 100644
72369--- a/fs/proc/kmsg.c
72370+++ b/fs/proc/kmsg.c
72371@@ -12,37 +12,37 @@
72372 #include <linux/poll.h>
72373 #include <linux/proc_fs.h>
72374 #include <linux/fs.h>
72375+#include <linux/syslog.h>
72376
72377 #include <asm/uaccess.h>
72378 #include <asm/io.h>
72379
72380 extern wait_queue_head_t log_wait;
72381
72382-extern int do_syslog(int type, char __user *bug, int count);
72383-
72384 static int kmsg_open(struct inode * inode, struct file * file)
72385 {
72386- return do_syslog(1,NULL,0);
72387+ return do_syslog(SYSLOG_ACTION_OPEN, NULL, 0, SYSLOG_FROM_FILE);
72388 }
72389
72390 static int kmsg_release(struct inode * inode, struct file * file)
72391 {
72392- (void) do_syslog(0,NULL,0);
72393+ (void) do_syslog(SYSLOG_ACTION_CLOSE, NULL, 0, SYSLOG_FROM_FILE);
72394 return 0;
72395 }
72396
72397 static ssize_t kmsg_read(struct file *file, char __user *buf,
72398 size_t count, loff_t *ppos)
72399 {
72400- if ((file->f_flags & O_NONBLOCK) && !do_syslog(9, NULL, 0))
72401+ if ((file->f_flags & O_NONBLOCK) &&
72402+ !do_syslog(SYSLOG_ACTION_SIZE_UNREAD, NULL, 0, SYSLOG_FROM_FILE))
72403 return -EAGAIN;
72404- return do_syslog(2, buf, count);
72405+ return do_syslog(SYSLOG_ACTION_READ, buf, count, SYSLOG_FROM_FILE);
72406 }
72407
72408 static unsigned int kmsg_poll(struct file *file, poll_table *wait)
72409 {
72410 poll_wait(file, &log_wait, wait);
72411- if (do_syslog(9, NULL, 0))
72412+ if (do_syslog(SYSLOG_ACTION_SIZE_UNREAD, NULL, 0, SYSLOG_FROM_FILE))
72413 return POLLIN | POLLRDNORM;
72414 return 0;
72415 }
72416diff --git a/fs/proc/meminfo.c b/fs/proc/meminfo.c
72417index a65239c..ad1182a 100644
72418--- a/fs/proc/meminfo.c
72419+++ b/fs/proc/meminfo.c
72420@@ -29,6 +29,8 @@ static int meminfo_proc_show(struct seq_file *m, void *v)
72421 unsigned long pages[NR_LRU_LISTS];
72422 int lru;
72423
72424+ pax_track_stack();
72425+
72426 /*
72427 * display in kilobytes.
72428 */
72429@@ -149,7 +151,7 @@ static int meminfo_proc_show(struct seq_file *m, void *v)
72430 vmi.used >> 10,
72431 vmi.largest_chunk >> 10
72432 #ifdef CONFIG_MEMORY_FAILURE
72433- ,atomic_long_read(&mce_bad_pages) << (PAGE_SHIFT - 10)
72434+ ,atomic_long_read_unchecked(&mce_bad_pages) << (PAGE_SHIFT - 10)
72435 #endif
72436 );
72437
72438diff --git a/fs/proc/nommu.c b/fs/proc/nommu.c
72439index 9fe7d7e..cdb62c9 100644
72440--- a/fs/proc/nommu.c
72441+++ b/fs/proc/nommu.c
72442@@ -67,7 +67,7 @@ static int nommu_region_show(struct seq_file *m, struct vm_region *region)
72443 if (len < 1)
72444 len = 1;
72445 seq_printf(m, "%*c", len, ' ');
72446- seq_path(m, &file->f_path, "");
72447+ seq_path(m, &file->f_path, "\n\\");
72448 }
72449
72450 seq_putc(m, '\n');
72451diff --git a/fs/proc/proc_net.c b/fs/proc/proc_net.c
72452index 04d1270..25e1173 100644
72453--- a/fs/proc/proc_net.c
72454+++ b/fs/proc/proc_net.c
72455@@ -104,6 +104,17 @@ static struct net *get_proc_task_net(struct inode *dir)
72456 struct task_struct *task;
72457 struct nsproxy *ns;
72458 struct net *net = NULL;
72459+#if defined(CONFIG_GRKERNSEC_PROC_USER) || defined(CONFIG_GRKERNSEC_PROC_USERGROUP)
72460+ const struct cred *cred = current_cred();
72461+#endif
72462+
72463+#ifdef CONFIG_GRKERNSEC_PROC_USER
72464+ if (cred->fsuid)
72465+ return net;
72466+#elif defined(CONFIG_GRKERNSEC_PROC_USERGROUP)
72467+ if (cred->fsuid && !in_group_p(CONFIG_GRKERNSEC_PROC_GID))
72468+ return net;
72469+#endif
72470
72471 rcu_read_lock();
72472 task = pid_task(proc_pid(dir), PIDTYPE_PID);
72473diff --git a/fs/proc/proc_sysctl.c b/fs/proc/proc_sysctl.c
72474index f667e8a..55f4d96 100644
72475--- a/fs/proc/proc_sysctl.c
72476+++ b/fs/proc/proc_sysctl.c
72477@@ -7,11 +7,13 @@
72478 #include <linux/security.h>
72479 #include "internal.h"
72480
72481+extern __u32 gr_handle_sysctl(const struct ctl_table *table, const int op);
72482+
72483 static const struct dentry_operations proc_sys_dentry_operations;
72484 static const struct file_operations proc_sys_file_operations;
72485-static const struct inode_operations proc_sys_inode_operations;
72486+const struct inode_operations proc_sys_inode_operations;
72487 static const struct file_operations proc_sys_dir_file_operations;
72488-static const struct inode_operations proc_sys_dir_operations;
72489+const struct inode_operations proc_sys_dir_operations;
72490
72491 static struct inode *proc_sys_make_inode(struct super_block *sb,
72492 struct ctl_table_header *head, struct ctl_table *table)
72493@@ -109,6 +111,9 @@ static struct dentry *proc_sys_lookup(struct inode *dir, struct dentry *dentry,
72494 if (!p)
72495 goto out;
72496
72497+ if (gr_handle_sysctl(p, MAY_EXEC))
72498+ goto out;
72499+
72500 err = ERR_PTR(-ENOMEM);
72501 inode = proc_sys_make_inode(dir->i_sb, h ? h : head, p);
72502 if (h)
72503@@ -119,6 +124,9 @@ static struct dentry *proc_sys_lookup(struct inode *dir, struct dentry *dentry,
72504
72505 err = NULL;
72506 dentry->d_op = &proc_sys_dentry_operations;
72507+
72508+ gr_handle_proc_create(dentry, inode);
72509+
72510 d_add(dentry, inode);
72511
72512 out:
72513@@ -200,6 +208,9 @@ static int proc_sys_fill_cache(struct file *filp, void *dirent,
72514 return -ENOMEM;
72515 } else {
72516 child->d_op = &proc_sys_dentry_operations;
72517+
72518+ gr_handle_proc_create(child, inode);
72519+
72520 d_add(child, inode);
72521 }
72522 } else {
72523@@ -228,6 +239,9 @@ static int scan(struct ctl_table_header *head, ctl_table *table,
72524 if (*pos < file->f_pos)
72525 continue;
72526
72527+ if (gr_handle_sysctl(table, 0))
72528+ continue;
72529+
72530 res = proc_sys_fill_cache(file, dirent, filldir, head, table);
72531 if (res)
72532 return res;
72533@@ -344,6 +358,9 @@ static int proc_sys_getattr(struct vfsmount *mnt, struct dentry *dentry, struct
72534 if (IS_ERR(head))
72535 return PTR_ERR(head);
72536
72537+ if (table && gr_handle_sysctl(table, MAY_EXEC))
72538+ return -ENOENT;
72539+
72540 generic_fillattr(inode, stat);
72541 if (table)
72542 stat->mode = (stat->mode & S_IFMT) | table->mode;
72543@@ -358,17 +375,18 @@ static const struct file_operations proc_sys_file_operations = {
72544 };
72545
72546 static const struct file_operations proc_sys_dir_file_operations = {
72547+ .read = generic_read_dir,
72548 .readdir = proc_sys_readdir,
72549 .llseek = generic_file_llseek,
72550 };
72551
72552-static const struct inode_operations proc_sys_inode_operations = {
72553+const struct inode_operations proc_sys_inode_operations = {
72554 .permission = proc_sys_permission,
72555 .setattr = proc_sys_setattr,
72556 .getattr = proc_sys_getattr,
72557 };
72558
72559-static const struct inode_operations proc_sys_dir_operations = {
72560+const struct inode_operations proc_sys_dir_operations = {
72561 .lookup = proc_sys_lookup,
72562 .permission = proc_sys_permission,
72563 .setattr = proc_sys_setattr,
72564diff --git a/fs/proc/root.c b/fs/proc/root.c
72565index b080b79..d957e63 100644
72566--- a/fs/proc/root.c
72567+++ b/fs/proc/root.c
72568@@ -134,7 +134,15 @@ void __init proc_root_init(void)
72569 #ifdef CONFIG_PROC_DEVICETREE
72570 proc_device_tree_init();
72571 #endif
72572+#ifdef CONFIG_GRKERNSEC_PROC_ADD
72573+#ifdef CONFIG_GRKERNSEC_PROC_USER
72574+ proc_mkdir_mode("bus", S_IRUSR | S_IXUSR, NULL);
72575+#elif defined(CONFIG_GRKERNSEC_PROC_USERGROUP)
72576+ proc_mkdir_mode("bus", S_IRUSR | S_IXUSR | S_IRGRP | S_IXGRP, NULL);
72577+#endif
72578+#else
72579 proc_mkdir("bus", NULL);
72580+#endif
72581 proc_sys_init();
72582 }
72583
72584diff --git a/fs/proc/task_mmu.c b/fs/proc/task_mmu.c
72585index 3b7b82a..4b420b0 100644
72586--- a/fs/proc/task_mmu.c
72587+++ b/fs/proc/task_mmu.c
72588@@ -8,6 +8,7 @@
72589 #include <linux/mempolicy.h>
72590 #include <linux/swap.h>
72591 #include <linux/swapops.h>
72592+#include <linux/grsecurity.h>
72593
72594 #include <asm/elf.h>
72595 #include <asm/uaccess.h>
72596@@ -46,15 +47,26 @@ void task_mem(struct seq_file *m, struct mm_struct *mm)
72597 "VmStk:\t%8lu kB\n"
72598 "VmExe:\t%8lu kB\n"
72599 "VmLib:\t%8lu kB\n"
72600- "VmPTE:\t%8lu kB\n",
72601- hiwater_vm << (PAGE_SHIFT-10),
72602+ "VmPTE:\t%8lu kB\n"
72603+
72604+#ifdef CONFIG_ARCH_TRACK_EXEC_LIMIT
72605+ "CsBase:\t%8lx\nCsLim:\t%8lx\n"
72606+#endif
72607+
72608+ ,hiwater_vm << (PAGE_SHIFT-10),
72609 (total_vm - mm->reserved_vm) << (PAGE_SHIFT-10),
72610 mm->locked_vm << (PAGE_SHIFT-10),
72611 hiwater_rss << (PAGE_SHIFT-10),
72612 total_rss << (PAGE_SHIFT-10),
72613 data << (PAGE_SHIFT-10),
72614 mm->stack_vm << (PAGE_SHIFT-10), text, lib,
72615- (PTRS_PER_PTE*sizeof(pte_t)*mm->nr_ptes) >> 10);
72616+ (PTRS_PER_PTE*sizeof(pte_t)*mm->nr_ptes) >> 10
72617+
72618+#ifdef CONFIG_ARCH_TRACK_EXEC_LIMIT
72619+ , mm->context.user_cs_base, mm->context.user_cs_limit
72620+#endif
72621+
72622+ );
72623 }
72624
72625 unsigned long task_vsize(struct mm_struct *mm)
72626@@ -175,7 +187,8 @@ static void m_stop(struct seq_file *m, void *v)
72627 struct proc_maps_private *priv = m->private;
72628 struct vm_area_struct *vma = v;
72629
72630- vma_stop(priv, vma);
72631+ if (!IS_ERR(vma))
72632+ vma_stop(priv, vma);
72633 if (priv->task)
72634 put_task_struct(priv->task);
72635 }
72636@@ -199,6 +212,12 @@ static int do_maps_open(struct inode *inode, struct file *file,
72637 return ret;
72638 }
72639
72640+#ifdef CONFIG_GRKERNSEC_PROC_MEMMAP
72641+#define PAX_RAND_FLAGS(_mm) (_mm != NULL && _mm != current->mm && \
72642+ (_mm->pax_flags & MF_PAX_RANDMMAP || \
72643+ _mm->pax_flags & MF_PAX_SEGMEXEC))
72644+#endif
72645+
72646 static void show_map_vma(struct seq_file *m, struct vm_area_struct *vma)
72647 {
72648 struct mm_struct *mm = vma->vm_mm;
72649@@ -206,7 +225,6 @@ static void show_map_vma(struct seq_file *m, struct vm_area_struct *vma)
72650 int flags = vma->vm_flags;
72651 unsigned long ino = 0;
72652 unsigned long long pgoff = 0;
72653- unsigned long start;
72654 dev_t dev = 0;
72655 int len;
72656
72657@@ -217,20 +235,23 @@ static void show_map_vma(struct seq_file *m, struct vm_area_struct *vma)
72658 pgoff = ((loff_t)vma->vm_pgoff) << PAGE_SHIFT;
72659 }
72660
72661- /* We don't show the stack guard page in /proc/maps */
72662- start = vma->vm_start;
72663- if (vma->vm_flags & VM_GROWSDOWN)
72664- if (!vma_stack_continue(vma->vm_prev, vma->vm_start))
72665- start += PAGE_SIZE;
72666-
72667 seq_printf(m, "%08lx-%08lx %c%c%c%c %08llx %02x:%02x %lu %n",
72668- start,
72669+#ifdef CONFIG_GRKERNSEC_PROC_MEMMAP
72670+ PAX_RAND_FLAGS(mm) ? 0UL : vma->vm_start,
72671+ PAX_RAND_FLAGS(mm) ? 0UL : vma->vm_end,
72672+#else
72673+ vma->vm_start,
72674 vma->vm_end,
72675+#endif
72676 flags & VM_READ ? 'r' : '-',
72677 flags & VM_WRITE ? 'w' : '-',
72678 flags & VM_EXEC ? 'x' : '-',
72679 flags & VM_MAYSHARE ? 's' : 'p',
72680+#ifdef CONFIG_GRKERNSEC_PROC_MEMMAP
72681+ PAX_RAND_FLAGS(mm) ? 0UL : pgoff,
72682+#else
72683 pgoff,
72684+#endif
72685 MAJOR(dev), MINOR(dev), ino, &len);
72686
72687 /*
72688@@ -239,7 +260,7 @@ static void show_map_vma(struct seq_file *m, struct vm_area_struct *vma)
72689 */
72690 if (file) {
72691 pad_len_spaces(m, len);
72692- seq_path(m, &file->f_path, "\n");
72693+ seq_path(m, &file->f_path, "\n\\");
72694 } else {
72695 const char *name = arch_vma_name(vma);
72696 if (!name) {
72697@@ -247,8 +268,9 @@ static void show_map_vma(struct seq_file *m, struct vm_area_struct *vma)
72698 if (vma->vm_start <= mm->brk &&
72699 vma->vm_end >= mm->start_brk) {
72700 name = "[heap]";
72701- } else if (vma->vm_start <= mm->start_stack &&
72702- vma->vm_end >= mm->start_stack) {
72703+ } else if ((vma->vm_flags & (VM_GROWSDOWN | VM_GROWSUP)) ||
72704+ (vma->vm_start <= mm->start_stack &&
72705+ vma->vm_end >= mm->start_stack)) {
72706 name = "[stack]";
72707 }
72708 } else {
72709@@ -269,6 +291,13 @@ static int show_map(struct seq_file *m, void *v)
72710 struct proc_maps_private *priv = m->private;
72711 struct task_struct *task = priv->task;
72712
72713+#ifdef CONFIG_GRKERNSEC_PROC_MEMMAP
72714+ if (current->exec_id != m->exec_id) {
72715+ gr_log_badprocpid("maps");
72716+ return 0;
72717+ }
72718+#endif
72719+
72720 show_map_vma(m, vma);
72721
72722 if (m->count < m->size) /* vma is copied successfully */
72723@@ -390,10 +419,23 @@ static int show_smap(struct seq_file *m, void *v)
72724 .private = &mss,
72725 };
72726
72727+#ifdef CONFIG_GRKERNSEC_PROC_MEMMAP
72728+ if (current->exec_id != m->exec_id) {
72729+ gr_log_badprocpid("smaps");
72730+ return 0;
72731+ }
72732+#endif
72733 memset(&mss, 0, sizeof mss);
72734- mss.vma = vma;
72735- if (vma->vm_mm && !is_vm_hugetlb_page(vma))
72736- walk_page_range(vma->vm_start, vma->vm_end, &smaps_walk);
72737+
72738+#ifdef CONFIG_GRKERNSEC_PROC_MEMMAP
72739+ if (!PAX_RAND_FLAGS(vma->vm_mm)) {
72740+#endif
72741+ mss.vma = vma;
72742+ if (vma->vm_mm && !is_vm_hugetlb_page(vma))
72743+ walk_page_range(vma->vm_start, vma->vm_end, &smaps_walk);
72744+#ifdef CONFIG_GRKERNSEC_PROC_MEMMAP
72745+ }
72746+#endif
72747
72748 show_map_vma(m, vma);
72749
72750@@ -409,7 +451,11 @@ static int show_smap(struct seq_file *m, void *v)
72751 "Swap: %8lu kB\n"
72752 "KernelPageSize: %8lu kB\n"
72753 "MMUPageSize: %8lu kB\n",
72754+#ifdef CONFIG_GRKERNSEC_PROC_MEMMAP
72755+ PAX_RAND_FLAGS(vma->vm_mm) ? 0UL : (vma->vm_end - vma->vm_start) >> 10,
72756+#else
72757 (vma->vm_end - vma->vm_start) >> 10,
72758+#endif
72759 mss.resident >> 10,
72760 (unsigned long)(mss.pss >> (10 + PSS_SHIFT)),
72761 mss.shared_clean >> 10,
72762diff --git a/fs/proc/task_nommu.c b/fs/proc/task_nommu.c
72763index 8f5c05d..c99c76d 100644
72764--- a/fs/proc/task_nommu.c
72765+++ b/fs/proc/task_nommu.c
72766@@ -50,7 +50,7 @@ void task_mem(struct seq_file *m, struct mm_struct *mm)
72767 else
72768 bytes += kobjsize(mm);
72769
72770- if (current->fs && current->fs->users > 1)
72771+ if (current->fs && atomic_read(&current->fs->users) > 1)
72772 sbytes += kobjsize(current->fs);
72773 else
72774 bytes += kobjsize(current->fs);
72775@@ -154,7 +154,7 @@ static int nommu_vma_show(struct seq_file *m, struct vm_area_struct *vma)
72776 if (len < 1)
72777 len = 1;
72778 seq_printf(m, "%*c", len, ' ');
72779- seq_path(m, &file->f_path, "");
72780+ seq_path(m, &file->f_path, "\n\\");
72781 }
72782
72783 seq_putc(m, '\n');
72784diff --git a/fs/readdir.c b/fs/readdir.c
72785index 7723401..30059a6 100644
72786--- a/fs/readdir.c
72787+++ b/fs/readdir.c
72788@@ -16,6 +16,7 @@
72789 #include <linux/security.h>
72790 #include <linux/syscalls.h>
72791 #include <linux/unistd.h>
72792+#include <linux/namei.h>
72793
72794 #include <asm/uaccess.h>
72795
72796@@ -67,6 +68,7 @@ struct old_linux_dirent {
72797
72798 struct readdir_callback {
72799 struct old_linux_dirent __user * dirent;
72800+ struct file * file;
72801 int result;
72802 };
72803
72804@@ -84,6 +86,10 @@ static int fillonedir(void * __buf, const char * name, int namlen, loff_t offset
72805 buf->result = -EOVERFLOW;
72806 return -EOVERFLOW;
72807 }
72808+
72809+ if (!gr_acl_handle_filldir(buf->file, name, namlen, ino))
72810+ return 0;
72811+
72812 buf->result++;
72813 dirent = buf->dirent;
72814 if (!access_ok(VERIFY_WRITE, dirent,
72815@@ -116,6 +122,7 @@ SYSCALL_DEFINE3(old_readdir, unsigned int, fd,
72816
72817 buf.result = 0;
72818 buf.dirent = dirent;
72819+ buf.file = file;
72820
72821 error = vfs_readdir(file, fillonedir, &buf);
72822 if (buf.result)
72823@@ -142,6 +149,7 @@ struct linux_dirent {
72824 struct getdents_callback {
72825 struct linux_dirent __user * current_dir;
72826 struct linux_dirent __user * previous;
72827+ struct file * file;
72828 int count;
72829 int error;
72830 };
72831@@ -162,6 +170,10 @@ static int filldir(void * __buf, const char * name, int namlen, loff_t offset,
72832 buf->error = -EOVERFLOW;
72833 return -EOVERFLOW;
72834 }
72835+
72836+ if (!gr_acl_handle_filldir(buf->file, name, namlen, ino))
72837+ return 0;
72838+
72839 dirent = buf->previous;
72840 if (dirent) {
72841 if (__put_user(offset, &dirent->d_off))
72842@@ -209,6 +221,7 @@ SYSCALL_DEFINE3(getdents, unsigned int, fd,
72843 buf.previous = NULL;
72844 buf.count = count;
72845 buf.error = 0;
72846+ buf.file = file;
72847
72848 error = vfs_readdir(file, filldir, &buf);
72849 if (error >= 0)
72850@@ -228,6 +241,7 @@ out:
72851 struct getdents_callback64 {
72852 struct linux_dirent64 __user * current_dir;
72853 struct linux_dirent64 __user * previous;
72854+ struct file *file;
72855 int count;
72856 int error;
72857 };
72858@@ -242,6 +256,10 @@ static int filldir64(void * __buf, const char * name, int namlen, loff_t offset,
72859 buf->error = -EINVAL; /* only used if we fail.. */
72860 if (reclen > buf->count)
72861 return -EINVAL;
72862+
72863+ if (!gr_acl_handle_filldir(buf->file, name, namlen, ino))
72864+ return 0;
72865+
72866 dirent = buf->previous;
72867 if (dirent) {
72868 if (__put_user(offset, &dirent->d_off))
72869@@ -289,6 +307,7 @@ SYSCALL_DEFINE3(getdents64, unsigned int, fd,
72870
72871 buf.current_dir = dirent;
72872 buf.previous = NULL;
72873+ buf.file = file;
72874 buf.count = count;
72875 buf.error = 0;
72876
72877@@ -297,7 +316,7 @@ SYSCALL_DEFINE3(getdents64, unsigned int, fd,
72878 error = buf.error;
72879 lastdirent = buf.previous;
72880 if (lastdirent) {
72881- typeof(lastdirent->d_off) d_off = file->f_pos;
72882+ typeof(((struct linux_dirent64 *)0)->d_off) d_off = file->f_pos;
72883 if (__put_user(d_off, &lastdirent->d_off))
72884 error = -EFAULT;
72885 else
72886diff --git a/fs/reiserfs/dir.c b/fs/reiserfs/dir.c
72887index d42c30c..4fd8718 100644
72888--- a/fs/reiserfs/dir.c
72889+++ b/fs/reiserfs/dir.c
72890@@ -66,6 +66,8 @@ int reiserfs_readdir_dentry(struct dentry *dentry, void *dirent,
72891 struct reiserfs_dir_entry de;
72892 int ret = 0;
72893
72894+ pax_track_stack();
72895+
72896 reiserfs_write_lock(inode->i_sb);
72897
72898 reiserfs_check_lock_depth(inode->i_sb, "readdir");
72899diff --git a/fs/reiserfs/do_balan.c b/fs/reiserfs/do_balan.c
72900index 128d3f7..8840d44 100644
72901--- a/fs/reiserfs/do_balan.c
72902+++ b/fs/reiserfs/do_balan.c
72903@@ -2058,7 +2058,7 @@ void do_balance(struct tree_balance *tb, /* tree_balance structure */
72904 return;
72905 }
72906
72907- atomic_inc(&(fs_generation(tb->tb_sb)));
72908+ atomic_inc_unchecked(&(fs_generation(tb->tb_sb)));
72909 do_balance_starts(tb);
72910
72911 /* balance leaf returns 0 except if combining L R and S into
72912diff --git a/fs/reiserfs/item_ops.c b/fs/reiserfs/item_ops.c
72913index 72cb1cc..d0e3181 100644
72914--- a/fs/reiserfs/item_ops.c
72915+++ b/fs/reiserfs/item_ops.c
72916@@ -102,7 +102,7 @@ static void sd_print_vi(struct virtual_item *vi)
72917 vi->vi_index, vi->vi_type, vi->vi_ih);
72918 }
72919
72920-static struct item_operations stat_data_ops = {
72921+static const struct item_operations stat_data_ops = {
72922 .bytes_number = sd_bytes_number,
72923 .decrement_key = sd_decrement_key,
72924 .is_left_mergeable = sd_is_left_mergeable,
72925@@ -196,7 +196,7 @@ static void direct_print_vi(struct virtual_item *vi)
72926 vi->vi_index, vi->vi_type, vi->vi_ih);
72927 }
72928
72929-static struct item_operations direct_ops = {
72930+static const struct item_operations direct_ops = {
72931 .bytes_number = direct_bytes_number,
72932 .decrement_key = direct_decrement_key,
72933 .is_left_mergeable = direct_is_left_mergeable,
72934@@ -341,7 +341,7 @@ static void indirect_print_vi(struct virtual_item *vi)
72935 vi->vi_index, vi->vi_type, vi->vi_ih);
72936 }
72937
72938-static struct item_operations indirect_ops = {
72939+static const struct item_operations indirect_ops = {
72940 .bytes_number = indirect_bytes_number,
72941 .decrement_key = indirect_decrement_key,
72942 .is_left_mergeable = indirect_is_left_mergeable,
72943@@ -628,7 +628,7 @@ static void direntry_print_vi(struct virtual_item *vi)
72944 printk("\n");
72945 }
72946
72947-static struct item_operations direntry_ops = {
72948+static const struct item_operations direntry_ops = {
72949 .bytes_number = direntry_bytes_number,
72950 .decrement_key = direntry_decrement_key,
72951 .is_left_mergeable = direntry_is_left_mergeable,
72952@@ -724,7 +724,7 @@ static void errcatch_print_vi(struct virtual_item *vi)
72953 "Invalid item type observed, run fsck ASAP");
72954 }
72955
72956-static struct item_operations errcatch_ops = {
72957+static const struct item_operations errcatch_ops = {
72958 errcatch_bytes_number,
72959 errcatch_decrement_key,
72960 errcatch_is_left_mergeable,
72961@@ -746,7 +746,7 @@ static struct item_operations errcatch_ops = {
72962 #error Item types must use disk-format assigned values.
72963 #endif
72964
72965-struct item_operations *item_ops[TYPE_ANY + 1] = {
72966+const struct item_operations * const item_ops[TYPE_ANY + 1] = {
72967 &stat_data_ops,
72968 &indirect_ops,
72969 &direct_ops,
72970diff --git a/fs/reiserfs/journal.c b/fs/reiserfs/journal.c
72971index b5fe0aa..e0e25c4 100644
72972--- a/fs/reiserfs/journal.c
72973+++ b/fs/reiserfs/journal.c
72974@@ -2329,6 +2329,8 @@ static struct buffer_head *reiserfs_breada(struct block_device *dev,
72975 struct buffer_head *bh;
72976 int i, j;
72977
72978+ pax_track_stack();
72979+
72980 bh = __getblk(dev, block, bufsize);
72981 if (buffer_uptodate(bh))
72982 return (bh);
72983diff --git a/fs/reiserfs/namei.c b/fs/reiserfs/namei.c
72984index 2715791..b8996db 100644
72985--- a/fs/reiserfs/namei.c
72986+++ b/fs/reiserfs/namei.c
72987@@ -1214,6 +1214,8 @@ static int reiserfs_rename(struct inode *old_dir, struct dentry *old_dentry,
72988 unsigned long savelink = 1;
72989 struct timespec ctime;
72990
72991+ pax_track_stack();
72992+
72993 /* three balancings: (1) old name removal, (2) new name insertion
72994 and (3) maybe "save" link insertion
72995 stat data updates: (1) old directory,
72996diff --git a/fs/reiserfs/procfs.c b/fs/reiserfs/procfs.c
72997index 9229e55..3d2e3b7 100644
72998--- a/fs/reiserfs/procfs.c
72999+++ b/fs/reiserfs/procfs.c
73000@@ -123,7 +123,7 @@ static int show_super(struct seq_file *m, struct super_block *sb)
73001 "SMALL_TAILS " : "NO_TAILS ",
73002 replay_only(sb) ? "REPLAY_ONLY " : "",
73003 convert_reiserfs(sb) ? "CONV " : "",
73004- atomic_read(&r->s_generation_counter),
73005+ atomic_read_unchecked(&r->s_generation_counter),
73006 SF(s_disk_reads), SF(s_disk_writes), SF(s_fix_nodes),
73007 SF(s_do_balance), SF(s_unneeded_left_neighbor),
73008 SF(s_good_search_by_key_reada), SF(s_bmaps),
73009@@ -309,6 +309,8 @@ static int show_journal(struct seq_file *m, struct super_block *sb)
73010 struct journal_params *jp = &rs->s_v1.s_journal;
73011 char b[BDEVNAME_SIZE];
73012
73013+ pax_track_stack();
73014+
73015 seq_printf(m, /* on-disk fields */
73016 "jp_journal_1st_block: \t%i\n"
73017 "jp_journal_dev: \t%s[%x]\n"
73018diff --git a/fs/reiserfs/stree.c b/fs/reiserfs/stree.c
73019index d036ee5..4c7dca1 100644
73020--- a/fs/reiserfs/stree.c
73021+++ b/fs/reiserfs/stree.c
73022@@ -1159,6 +1159,8 @@ int reiserfs_delete_item(struct reiserfs_transaction_handle *th,
73023 int iter = 0;
73024 #endif
73025
73026+ pax_track_stack();
73027+
73028 BUG_ON(!th->t_trans_id);
73029
73030 init_tb_struct(th, &s_del_balance, sb, path,
73031@@ -1296,6 +1298,8 @@ void reiserfs_delete_solid_item(struct reiserfs_transaction_handle *th,
73032 int retval;
73033 int quota_cut_bytes = 0;
73034
73035+ pax_track_stack();
73036+
73037 BUG_ON(!th->t_trans_id);
73038
73039 le_key2cpu_key(&cpu_key, key);
73040@@ -1525,6 +1529,8 @@ int reiserfs_cut_from_item(struct reiserfs_transaction_handle *th,
73041 int quota_cut_bytes;
73042 loff_t tail_pos = 0;
73043
73044+ pax_track_stack();
73045+
73046 BUG_ON(!th->t_trans_id);
73047
73048 init_tb_struct(th, &s_cut_balance, inode->i_sb, path,
73049@@ -1920,6 +1926,8 @@ int reiserfs_paste_into_item(struct reiserfs_transaction_handle *th, struct tree
73050 int retval;
73051 int fs_gen;
73052
73053+ pax_track_stack();
73054+
73055 BUG_ON(!th->t_trans_id);
73056
73057 fs_gen = get_generation(inode->i_sb);
73058@@ -2007,6 +2015,8 @@ int reiserfs_insert_item(struct reiserfs_transaction_handle *th,
73059 int fs_gen = 0;
73060 int quota_bytes = 0;
73061
73062+ pax_track_stack();
73063+
73064 BUG_ON(!th->t_trans_id);
73065
73066 if (inode) { /* Do we count quotas for item? */
73067diff --git a/fs/reiserfs/super.c b/fs/reiserfs/super.c
73068index 7cb1285..c726cd0 100644
73069--- a/fs/reiserfs/super.c
73070+++ b/fs/reiserfs/super.c
73071@@ -916,6 +916,8 @@ static int reiserfs_parse_options(struct super_block *s, char *options, /* strin
73072 {.option_name = NULL}
73073 };
73074
73075+ pax_track_stack();
73076+
73077 *blocks = 0;
73078 if (!options || !*options)
73079 /* use default configuration: create tails, journaling on, no
73080diff --git a/fs/select.c b/fs/select.c
73081index fd38ce2..f5381b8 100644
73082--- a/fs/select.c
73083+++ b/fs/select.c
73084@@ -20,6 +20,7 @@
73085 #include <linux/module.h>
73086 #include <linux/slab.h>
73087 #include <linux/poll.h>
73088+#include <linux/security.h>
73089 #include <linux/personality.h> /* for STICKY_TIMEOUTS */
73090 #include <linux/file.h>
73091 #include <linux/fdtable.h>
73092@@ -401,6 +402,8 @@ int do_select(int n, fd_set_bits *fds, struct timespec *end_time)
73093 int retval, i, timed_out = 0;
73094 unsigned long slack = 0;
73095
73096+ pax_track_stack();
73097+
73098 rcu_read_lock();
73099 retval = max_select_fd(n, fds);
73100 rcu_read_unlock();
73101@@ -529,6 +532,8 @@ int core_sys_select(int n, fd_set __user *inp, fd_set __user *outp,
73102 /* Allocate small arguments on the stack to save memory and be faster */
73103 long stack_fds[SELECT_STACK_ALLOC/sizeof(long)];
73104
73105+ pax_track_stack();
73106+
73107 ret = -EINVAL;
73108 if (n < 0)
73109 goto out_nofds;
73110@@ -821,6 +826,9 @@ int do_sys_poll(struct pollfd __user *ufds, unsigned int nfds,
73111 struct poll_list *walk = head;
73112 unsigned long todo = nfds;
73113
73114+ pax_track_stack();
73115+
73116+ gr_learn_resource(current, RLIMIT_NOFILE, nfds, 1);
73117 if (nfds > current->signal->rlim[RLIMIT_NOFILE].rlim_cur)
73118 return -EINVAL;
73119
73120diff --git a/fs/seq_file.c b/fs/seq_file.c
73121index eae7d9d..b7613c6 100644
73122--- a/fs/seq_file.c
73123+++ b/fs/seq_file.c
73124@@ -9,6 +9,7 @@
73125 #include <linux/module.h>
73126 #include <linux/seq_file.h>
73127 #include <linux/slab.h>
73128+#include <linux/sched.h>
73129
73130 #include <asm/uaccess.h>
73131 #include <asm/page.h>
73132@@ -40,6 +41,9 @@ int seq_open(struct file *file, const struct seq_operations *op)
73133 memset(p, 0, sizeof(*p));
73134 mutex_init(&p->lock);
73135 p->op = op;
73136+#ifdef CONFIG_GRKERNSEC_PROC_MEMMAP
73137+ p->exec_id = current->exec_id;
73138+#endif
73139
73140 /*
73141 * Wrappers around seq_open(e.g. swaps_open) need to be
73142@@ -551,7 +555,7 @@ static void single_stop(struct seq_file *p, void *v)
73143 int single_open(struct file *file, int (*show)(struct seq_file *, void *),
73144 void *data)
73145 {
73146- struct seq_operations *op = kmalloc(sizeof(*op), GFP_KERNEL);
73147+ seq_operations_no_const *op = kmalloc(sizeof(*op), GFP_KERNEL);
73148 int res = -ENOMEM;
73149
73150 if (op) {
73151diff --git a/fs/smbfs/proc.c b/fs/smbfs/proc.c
73152index 71c29b6..54694dd 100644
73153--- a/fs/smbfs/proc.c
73154+++ b/fs/smbfs/proc.c
73155@@ -266,9 +266,9 @@ int smb_setcodepage(struct smb_sb_info *server, struct smb_nls_codepage *cp)
73156
73157 out:
73158 if (server->local_nls != NULL && server->remote_nls != NULL)
73159- server->ops->convert = convert_cp;
73160+ *(void **)&server->ops->convert = convert_cp;
73161 else
73162- server->ops->convert = convert_memcpy;
73163+ *(void **)&server->ops->convert = convert_memcpy;
73164
73165 smb_unlock_server(server);
73166 return n;
73167@@ -933,9 +933,9 @@ smb_newconn(struct smb_sb_info *server, struct smb_conn_opt *opt)
73168
73169 /* FIXME: the win9x code wants to modify these ... (seek/trunc bug) */
73170 if (server->mnt->flags & SMB_MOUNT_OLDATTR) {
73171- server->ops->getattr = smb_proc_getattr_core;
73172+ *(void **)&server->ops->getattr = smb_proc_getattr_core;
73173 } else if (server->mnt->flags & SMB_MOUNT_DIRATTR) {
73174- server->ops->getattr = smb_proc_getattr_ff;
73175+ *(void **)&server->ops->getattr = smb_proc_getattr_ff;
73176 }
73177
73178 /* Decode server capabilities */
73179@@ -3439,7 +3439,7 @@ out:
73180 static void
73181 install_ops(struct smb_ops *dst, struct smb_ops *src)
73182 {
73183- memcpy(dst, src, sizeof(void *) * SMB_OPS_NUM_STATIC);
73184+ memcpy((void *)dst, src, sizeof(void *) * SMB_OPS_NUM_STATIC);
73185 }
73186
73187 /* < LANMAN2 */
73188diff --git a/fs/smbfs/symlink.c b/fs/smbfs/symlink.c
73189index 00b2909..2ace383 100644
73190--- a/fs/smbfs/symlink.c
73191+++ b/fs/smbfs/symlink.c
73192@@ -55,7 +55,7 @@ static void *smb_follow_link(struct dentry *dentry, struct nameidata *nd)
73193
73194 static void smb_put_link(struct dentry *dentry, struct nameidata *nd, void *p)
73195 {
73196- char *s = nd_get_link(nd);
73197+ const char *s = nd_get_link(nd);
73198 if (!IS_ERR(s))
73199 __putname(s);
73200 }
73201diff --git a/fs/splice.c b/fs/splice.c
73202index bb92b7c5..5aa72b0 100644
73203--- a/fs/splice.c
73204+++ b/fs/splice.c
73205@@ -185,7 +185,7 @@ ssize_t splice_to_pipe(struct pipe_inode_info *pipe,
73206 pipe_lock(pipe);
73207
73208 for (;;) {
73209- if (!pipe->readers) {
73210+ if (!atomic_read(&pipe->readers)) {
73211 send_sig(SIGPIPE, current, 0);
73212 if (!ret)
73213 ret = -EPIPE;
73214@@ -239,9 +239,9 @@ ssize_t splice_to_pipe(struct pipe_inode_info *pipe,
73215 do_wakeup = 0;
73216 }
73217
73218- pipe->waiting_writers++;
73219+ atomic_inc(&pipe->waiting_writers);
73220 pipe_wait(pipe);
73221- pipe->waiting_writers--;
73222+ atomic_dec(&pipe->waiting_writers);
73223 }
73224
73225 pipe_unlock(pipe);
73226@@ -285,6 +285,8 @@ __generic_file_splice_read(struct file *in, loff_t *ppos,
73227 .spd_release = spd_release_page,
73228 };
73229
73230+ pax_track_stack();
73231+
73232 index = *ppos >> PAGE_CACHE_SHIFT;
73233 loff = *ppos & ~PAGE_CACHE_MASK;
73234 req_pages = (len + loff + PAGE_CACHE_SIZE - 1) >> PAGE_CACHE_SHIFT;
73235@@ -521,7 +523,7 @@ static ssize_t kernel_readv(struct file *file, const struct iovec *vec,
73236 old_fs = get_fs();
73237 set_fs(get_ds());
73238 /* The cast to a user pointer is valid due to the set_fs() */
73239- res = vfs_readv(file, (const struct iovec __user *)vec, vlen, &pos);
73240+ res = vfs_readv(file, (const struct iovec __force_user *)vec, vlen, &pos);
73241 set_fs(old_fs);
73242
73243 return res;
73244@@ -536,7 +538,7 @@ static ssize_t kernel_write(struct file *file, const char *buf, size_t count,
73245 old_fs = get_fs();
73246 set_fs(get_ds());
73247 /* The cast to a user pointer is valid due to the set_fs() */
73248- res = vfs_write(file, (const char __user *)buf, count, &pos);
73249+ res = vfs_write(file, (const char __force_user *)buf, count, &pos);
73250 set_fs(old_fs);
73251
73252 return res;
73253@@ -565,6 +567,8 @@ ssize_t default_file_splice_read(struct file *in, loff_t *ppos,
73254 .spd_release = spd_release_page,
73255 };
73256
73257+ pax_track_stack();
73258+
73259 index = *ppos >> PAGE_CACHE_SHIFT;
73260 offset = *ppos & ~PAGE_CACHE_MASK;
73261 nr_pages = (len + offset + PAGE_CACHE_SIZE - 1) >> PAGE_CACHE_SHIFT;
73262@@ -578,7 +582,7 @@ ssize_t default_file_splice_read(struct file *in, loff_t *ppos,
73263 goto err;
73264
73265 this_len = min_t(size_t, len, PAGE_CACHE_SIZE - offset);
73266- vec[i].iov_base = (void __user *) page_address(page);
73267+ vec[i].iov_base = (__force void __user *) page_address(page);
73268 vec[i].iov_len = this_len;
73269 pages[i] = page;
73270 spd.nr_pages++;
73271@@ -800,10 +804,10 @@ EXPORT_SYMBOL(splice_from_pipe_feed);
73272 int splice_from_pipe_next(struct pipe_inode_info *pipe, struct splice_desc *sd)
73273 {
73274 while (!pipe->nrbufs) {
73275- if (!pipe->writers)
73276+ if (!atomic_read(&pipe->writers))
73277 return 0;
73278
73279- if (!pipe->waiting_writers && sd->num_spliced)
73280+ if (!atomic_read(&pipe->waiting_writers) && sd->num_spliced)
73281 return 0;
73282
73283 if (sd->flags & SPLICE_F_NONBLOCK)
73284@@ -1140,7 +1144,7 @@ ssize_t splice_direct_to_actor(struct file *in, struct splice_desc *sd,
73285 * out of the pipe right after the splice_to_pipe(). So set
73286 * PIPE_READERS appropriately.
73287 */
73288- pipe->readers = 1;
73289+ atomic_set(&pipe->readers, 1);
73290
73291 current->splice_pipe = pipe;
73292 }
73293@@ -1593,6 +1597,8 @@ static long vmsplice_to_pipe(struct file *file, const struct iovec __user *iov,
73294 .spd_release = spd_release_page,
73295 };
73296
73297+ pax_track_stack();
73298+
73299 pipe = pipe_info(file->f_path.dentry->d_inode);
73300 if (!pipe)
73301 return -EBADF;
73302@@ -1701,9 +1707,9 @@ static int ipipe_prep(struct pipe_inode_info *pipe, unsigned int flags)
73303 ret = -ERESTARTSYS;
73304 break;
73305 }
73306- if (!pipe->writers)
73307+ if (!atomic_read(&pipe->writers))
73308 break;
73309- if (!pipe->waiting_writers) {
73310+ if (!atomic_read(&pipe->waiting_writers)) {
73311 if (flags & SPLICE_F_NONBLOCK) {
73312 ret = -EAGAIN;
73313 break;
73314@@ -1735,7 +1741,7 @@ static int opipe_prep(struct pipe_inode_info *pipe, unsigned int flags)
73315 pipe_lock(pipe);
73316
73317 while (pipe->nrbufs >= PIPE_BUFFERS) {
73318- if (!pipe->readers) {
73319+ if (!atomic_read(&pipe->readers)) {
73320 send_sig(SIGPIPE, current, 0);
73321 ret = -EPIPE;
73322 break;
73323@@ -1748,9 +1754,9 @@ static int opipe_prep(struct pipe_inode_info *pipe, unsigned int flags)
73324 ret = -ERESTARTSYS;
73325 break;
73326 }
73327- pipe->waiting_writers++;
73328+ atomic_inc(&pipe->waiting_writers);
73329 pipe_wait(pipe);
73330- pipe->waiting_writers--;
73331+ atomic_dec(&pipe->waiting_writers);
73332 }
73333
73334 pipe_unlock(pipe);
73335@@ -1786,14 +1792,14 @@ retry:
73336 pipe_double_lock(ipipe, opipe);
73337
73338 do {
73339- if (!opipe->readers) {
73340+ if (!atomic_read(&opipe->readers)) {
73341 send_sig(SIGPIPE, current, 0);
73342 if (!ret)
73343 ret = -EPIPE;
73344 break;
73345 }
73346
73347- if (!ipipe->nrbufs && !ipipe->writers)
73348+ if (!ipipe->nrbufs && !atomic_read(&ipipe->writers))
73349 break;
73350
73351 /*
73352@@ -1893,7 +1899,7 @@ static int link_pipe(struct pipe_inode_info *ipipe,
73353 pipe_double_lock(ipipe, opipe);
73354
73355 do {
73356- if (!opipe->readers) {
73357+ if (!atomic_read(&opipe->readers)) {
73358 send_sig(SIGPIPE, current, 0);
73359 if (!ret)
73360 ret = -EPIPE;
73361@@ -1938,7 +1944,7 @@ static int link_pipe(struct pipe_inode_info *ipipe,
73362 * return EAGAIN if we have the potential of some data in the
73363 * future, otherwise just return 0
73364 */
73365- if (!ret && ipipe->waiting_writers && (flags & SPLICE_F_NONBLOCK))
73366+ if (!ret && atomic_read(&ipipe->waiting_writers) && (flags & SPLICE_F_NONBLOCK))
73367 ret = -EAGAIN;
73368
73369 pipe_unlock(ipipe);
73370diff --git a/fs/sysfs/bin.c b/fs/sysfs/bin.c
73371index 60c702b..dddc2b5 100644
73372--- a/fs/sysfs/bin.c
73373+++ b/fs/sysfs/bin.c
73374@@ -67,6 +67,8 @@ fill_read(struct dentry *dentry, char *buffer, loff_t off, size_t count)
73375 }
73376
73377 static ssize_t
73378+read(struct file *file, char __user *userbuf, size_t bytes, loff_t *off) __size_overflow(3);
73379+static ssize_t
73380 read(struct file *file, char __user *userbuf, size_t bytes, loff_t *off)
73381 {
73382 struct bin_buffer *bb = file->private_data;
73383diff --git a/fs/sysfs/dir.c b/fs/sysfs/dir.c
73384index e020183..18d64b4 100644
73385--- a/fs/sysfs/dir.c
73386+++ b/fs/sysfs/dir.c
73387@@ -678,6 +678,18 @@ static int create_dir(struct kobject *kobj, struct sysfs_dirent *parent_sd,
73388 struct sysfs_dirent *sd;
73389 int rc;
73390
73391+#ifdef CONFIG_GRKERNSEC_SYSFS_RESTRICT
73392+ const char *parent_name = parent_sd->s_name;
73393+
73394+ mode = S_IFDIR | S_IRWXU;
73395+
73396+ if ((!strcmp(parent_name, "") && (!strcmp(name, "devices") || !strcmp(name, "fs"))) ||
73397+ (!strcmp(parent_name, "devices") && !strcmp(name, "system")) ||
73398+ (!strcmp(parent_name, "fs") && (!strcmp(name, "selinux") || !strcmp(name, "fuse"))) ||
73399+ (!strcmp(parent_name, "system") && !strcmp(name, "cpu")))
73400+ mode = S_IFDIR | S_IRWXU | S_IRUGO | S_IXUGO;
73401+#endif
73402+
73403 /* allocate */
73404 sd = sysfs_new_dirent(name, mode, SYSFS_DIR);
73405 if (!sd)
73406diff --git a/fs/sysfs/file.c b/fs/sysfs/file.c
73407index 7118a38..70af853 100644
73408--- a/fs/sysfs/file.c
73409+++ b/fs/sysfs/file.c
73410@@ -44,7 +44,7 @@ static DEFINE_SPINLOCK(sysfs_open_dirent_lock);
73411
73412 struct sysfs_open_dirent {
73413 atomic_t refcnt;
73414- atomic_t event;
73415+ atomic_unchecked_t event;
73416 wait_queue_head_t poll;
73417 struct list_head buffers; /* goes through sysfs_buffer.list */
73418 };
73419@@ -53,7 +53,7 @@ struct sysfs_buffer {
73420 size_t count;
73421 loff_t pos;
73422 char * page;
73423- struct sysfs_ops * ops;
73424+ const struct sysfs_ops * ops;
73425 struct mutex mutex;
73426 int needs_read_fill;
73427 int event;
73428@@ -75,7 +75,7 @@ static int fill_read_buffer(struct dentry * dentry, struct sysfs_buffer * buffer
73429 {
73430 struct sysfs_dirent *attr_sd = dentry->d_fsdata;
73431 struct kobject *kobj = attr_sd->s_parent->s_dir.kobj;
73432- struct sysfs_ops * ops = buffer->ops;
73433+ const struct sysfs_ops * ops = buffer->ops;
73434 int ret = 0;
73435 ssize_t count;
73436
73437@@ -88,7 +88,7 @@ static int fill_read_buffer(struct dentry * dentry, struct sysfs_buffer * buffer
73438 if (!sysfs_get_active_two(attr_sd))
73439 return -ENODEV;
73440
73441- buffer->event = atomic_read(&attr_sd->s_attr.open->event);
73442+ buffer->event = atomic_read_unchecked(&attr_sd->s_attr.open->event);
73443 count = ops->show(kobj, attr_sd->s_attr.attr, buffer->page);
73444
73445 sysfs_put_active_two(attr_sd);
73446@@ -199,7 +199,7 @@ flush_write_buffer(struct dentry * dentry, struct sysfs_buffer * buffer, size_t
73447 {
73448 struct sysfs_dirent *attr_sd = dentry->d_fsdata;
73449 struct kobject *kobj = attr_sd->s_parent->s_dir.kobj;
73450- struct sysfs_ops * ops = buffer->ops;
73451+ const struct sysfs_ops * ops = buffer->ops;
73452 int rc;
73453
73454 /* need attr_sd for attr and ops, its parent for kobj */
73455@@ -294,7 +294,7 @@ static int sysfs_get_open_dirent(struct sysfs_dirent *sd,
73456 return -ENOMEM;
73457
73458 atomic_set(&new_od->refcnt, 0);
73459- atomic_set(&new_od->event, 1);
73460+ atomic_set_unchecked(&new_od->event, 1);
73461 init_waitqueue_head(&new_od->poll);
73462 INIT_LIST_HEAD(&new_od->buffers);
73463 goto retry;
73464@@ -335,7 +335,7 @@ static int sysfs_open_file(struct inode *inode, struct file *file)
73465 struct sysfs_dirent *attr_sd = file->f_path.dentry->d_fsdata;
73466 struct kobject *kobj = attr_sd->s_parent->s_dir.kobj;
73467 struct sysfs_buffer *buffer;
73468- struct sysfs_ops *ops;
73469+ const struct sysfs_ops *ops;
73470 int error = -EACCES;
73471 char *p;
73472
73473@@ -444,7 +444,7 @@ static unsigned int sysfs_poll(struct file *filp, poll_table *wait)
73474
73475 sysfs_put_active_two(attr_sd);
73476
73477- if (buffer->event != atomic_read(&od->event))
73478+ if (buffer->event != atomic_read_unchecked(&od->event))
73479 goto trigger;
73480
73481 return DEFAULT_POLLMASK;
73482@@ -463,7 +463,7 @@ void sysfs_notify_dirent(struct sysfs_dirent *sd)
73483
73484 od = sd->s_attr.open;
73485 if (od) {
73486- atomic_inc(&od->event);
73487+ atomic_inc_unchecked(&od->event);
73488 wake_up_interruptible(&od->poll);
73489 }
73490
73491diff --git a/fs/sysfs/symlink.c b/fs/sysfs/symlink.c
73492index c5081ad..342ea86 100644
73493--- a/fs/sysfs/symlink.c
73494+++ b/fs/sysfs/symlink.c
73495@@ -204,7 +204,7 @@ static void *sysfs_follow_link(struct dentry *dentry, struct nameidata *nd)
73496
73497 static void sysfs_put_link(struct dentry *dentry, struct nameidata *nd, void *cookie)
73498 {
73499- char *page = nd_get_link(nd);
73500+ const char *page = nd_get_link(nd);
73501 if (!IS_ERR(page))
73502 free_page((unsigned long)page);
73503 }
73504diff --git a/fs/udf/balloc.c b/fs/udf/balloc.c
73505index 1e06853..b06d325 100644
73506--- a/fs/udf/balloc.c
73507+++ b/fs/udf/balloc.c
73508@@ -172,9 +172,7 @@ static void udf_bitmap_free_blocks(struct super_block *sb,
73509
73510 mutex_lock(&sbi->s_alloc_mutex);
73511 partmap = &sbi->s_partmaps[bloc->partitionReferenceNum];
73512- if (bloc->logicalBlockNum < 0 ||
73513- (bloc->logicalBlockNum + count) >
73514- partmap->s_partition_len) {
73515+ if ((bloc->logicalBlockNum + count) > partmap->s_partition_len) {
73516 udf_debug("%d < %d || %d + %d > %d\n",
73517 bloc->logicalBlockNum, 0, bloc->logicalBlockNum,
73518 count, partmap->s_partition_len);
73519@@ -436,9 +434,7 @@ static void udf_table_free_blocks(struct super_block *sb,
73520
73521 mutex_lock(&sbi->s_alloc_mutex);
73522 partmap = &sbi->s_partmaps[bloc->partitionReferenceNum];
73523- if (bloc->logicalBlockNum < 0 ||
73524- (bloc->logicalBlockNum + count) >
73525- partmap->s_partition_len) {
73526+ if ((bloc->logicalBlockNum + count) > partmap->s_partition_len) {
73527 udf_debug("%d < %d || %d + %d > %d\n",
73528 bloc.logicalBlockNum, 0, bloc.logicalBlockNum, count,
73529 partmap->s_partition_len);
73530diff --git a/fs/udf/inode.c b/fs/udf/inode.c
73531index 6d24c2c..fff470f 100644
73532--- a/fs/udf/inode.c
73533+++ b/fs/udf/inode.c
73534@@ -484,6 +484,8 @@ static struct buffer_head *inode_getblk(struct inode *inode, sector_t block,
73535 int goal = 0, pgoal = iinfo->i_location.logicalBlockNum;
73536 int lastblock = 0;
73537
73538+ pax_track_stack();
73539+
73540 prev_epos.offset = udf_file_entry_alloc_offset(inode);
73541 prev_epos.block = iinfo->i_location;
73542 prev_epos.bh = NULL;
73543diff --git a/fs/udf/misc.c b/fs/udf/misc.c
73544index 9215700..bf1f68e 100644
73545--- a/fs/udf/misc.c
73546+++ b/fs/udf/misc.c
73547@@ -286,7 +286,7 @@ void udf_new_tag(char *data, uint16_t ident, uint16_t version, uint16_t snum,
73548
73549 u8 udf_tag_checksum(const struct tag *t)
73550 {
73551- u8 *data = (u8 *)t;
73552+ const u8 *data = (const u8 *)t;
73553 u8 checksum = 0;
73554 int i;
73555 for (i = 0; i < sizeof(struct tag); ++i)
73556diff --git a/fs/utimes.c b/fs/utimes.c
73557index e4c75db..b4df0e0 100644
73558--- a/fs/utimes.c
73559+++ b/fs/utimes.c
73560@@ -1,6 +1,7 @@
73561 #include <linux/compiler.h>
73562 #include <linux/file.h>
73563 #include <linux/fs.h>
73564+#include <linux/security.h>
73565 #include <linux/linkage.h>
73566 #include <linux/mount.h>
73567 #include <linux/namei.h>
73568@@ -101,6 +102,12 @@ static int utimes_common(struct path *path, struct timespec *times)
73569 goto mnt_drop_write_and_out;
73570 }
73571 }
73572+
73573+ if (!gr_acl_handle_utime(path->dentry, path->mnt)) {
73574+ error = -EACCES;
73575+ goto mnt_drop_write_and_out;
73576+ }
73577+
73578 mutex_lock(&inode->i_mutex);
73579 error = notify_change(path->dentry, &newattrs);
73580 mutex_unlock(&inode->i_mutex);
73581diff --git a/fs/xattr.c b/fs/xattr.c
73582index 6d4f6d3..cda3958 100644
73583--- a/fs/xattr.c
73584+++ b/fs/xattr.c
73585@@ -247,7 +247,7 @@ EXPORT_SYMBOL_GPL(vfs_removexattr);
73586 * Extended attribute SET operations
73587 */
73588 static long
73589-setxattr(struct dentry *d, const char __user *name, const void __user *value,
73590+setxattr(struct path *path, const char __user *name, const void __user *value,
73591 size_t size, int flags)
73592 {
73593 int error;
73594@@ -271,7 +271,13 @@ setxattr(struct dentry *d, const char __user *name, const void __user *value,
73595 return PTR_ERR(kvalue);
73596 }
73597
73598- error = vfs_setxattr(d, kname, kvalue, size, flags);
73599+ if (!gr_acl_handle_setxattr(path->dentry, path->mnt)) {
73600+ error = -EACCES;
73601+ goto out;
73602+ }
73603+
73604+ error = vfs_setxattr(path->dentry, kname, kvalue, size, flags);
73605+out:
73606 kfree(kvalue);
73607 return error;
73608 }
73609@@ -288,7 +294,7 @@ SYSCALL_DEFINE5(setxattr, const char __user *, pathname,
73610 return error;
73611 error = mnt_want_write(path.mnt);
73612 if (!error) {
73613- error = setxattr(path.dentry, name, value, size, flags);
73614+ error = setxattr(&path, name, value, size, flags);
73615 mnt_drop_write(path.mnt);
73616 }
73617 path_put(&path);
73618@@ -307,7 +313,7 @@ SYSCALL_DEFINE5(lsetxattr, const char __user *, pathname,
73619 return error;
73620 error = mnt_want_write(path.mnt);
73621 if (!error) {
73622- error = setxattr(path.dentry, name, value, size, flags);
73623+ error = setxattr(&path, name, value, size, flags);
73624 mnt_drop_write(path.mnt);
73625 }
73626 path_put(&path);
73627@@ -318,17 +324,15 @@ SYSCALL_DEFINE5(fsetxattr, int, fd, const char __user *, name,
73628 const void __user *,value, size_t, size, int, flags)
73629 {
73630 struct file *f;
73631- struct dentry *dentry;
73632 int error = -EBADF;
73633
73634 f = fget(fd);
73635 if (!f)
73636 return error;
73637- dentry = f->f_path.dentry;
73638- audit_inode(NULL, dentry);
73639+ audit_inode(NULL, f->f_path.dentry);
73640 error = mnt_want_write_file(f);
73641 if (!error) {
73642- error = setxattr(dentry, name, value, size, flags);
73643+ error = setxattr(&f->f_path, name, value, size, flags);
73644 mnt_drop_write(f->f_path.mnt);
73645 }
73646 fput(f);
73647diff --git a/fs/xattr_acl.c b/fs/xattr_acl.c
73648index c6ad7c7..f2847a7 100644
73649--- a/fs/xattr_acl.c
73650+++ b/fs/xattr_acl.c
73651@@ -17,8 +17,8 @@
73652 struct posix_acl *
73653 posix_acl_from_xattr(const void *value, size_t size)
73654 {
73655- posix_acl_xattr_header *header = (posix_acl_xattr_header *)value;
73656- posix_acl_xattr_entry *entry = (posix_acl_xattr_entry *)(header+1), *end;
73657+ const posix_acl_xattr_header *header = (const posix_acl_xattr_header *)value;
73658+ const posix_acl_xattr_entry *entry = (const posix_acl_xattr_entry *)(header+1), *end;
73659 int count;
73660 struct posix_acl *acl;
73661 struct posix_acl_entry *acl_e;
73662diff --git a/fs/xfs/linux-2.6/xfs_ioctl.c b/fs/xfs/linux-2.6/xfs_ioctl.c
73663index 942362f..88f96f5 100644
73664--- a/fs/xfs/linux-2.6/xfs_ioctl.c
73665+++ b/fs/xfs/linux-2.6/xfs_ioctl.c
73666@@ -134,7 +134,7 @@ xfs_find_handle(
73667 }
73668
73669 error = -EFAULT;
73670- if (copy_to_user(hreq->ohandle, &handle, hsize) ||
73671+ if (hsize > sizeof handle || copy_to_user(hreq->ohandle, &handle, hsize) ||
73672 copy_to_user(hreq->ohandlen, &hsize, sizeof(__s32)))
73673 goto out_put;
73674
73675@@ -423,7 +423,7 @@ xfs_attrlist_by_handle(
73676 if (IS_ERR(dentry))
73677 return PTR_ERR(dentry);
73678
73679- kbuf = kmalloc(al_hreq.buflen, GFP_KERNEL);
73680+ kbuf = kzalloc(al_hreq.buflen, GFP_KERNEL);
73681 if (!kbuf)
73682 goto out_dput;
73683
73684@@ -697,7 +697,7 @@ xfs_ioc_fsgeometry_v1(
73685 xfs_mount_t *mp,
73686 void __user *arg)
73687 {
73688- xfs_fsop_geom_t fsgeo;
73689+ xfs_fsop_geom_t fsgeo;
73690 int error;
73691
73692 error = xfs_fs_geometry(mp, &fsgeo, 3);
73693diff --git a/fs/xfs/linux-2.6/xfs_ioctl32.c b/fs/xfs/linux-2.6/xfs_ioctl32.c
73694index bad485a..479bd32 100644
73695--- a/fs/xfs/linux-2.6/xfs_ioctl32.c
73696+++ b/fs/xfs/linux-2.6/xfs_ioctl32.c
73697@@ -75,6 +75,7 @@ xfs_compat_ioc_fsgeometry_v1(
73698 xfs_fsop_geom_t fsgeo;
73699 int error;
73700
73701+ memset(&fsgeo, 0, sizeof(fsgeo));
73702 error = xfs_fs_geometry(mp, &fsgeo, 3);
73703 if (error)
73704 return -error;
73705diff --git a/fs/xfs/linux-2.6/xfs_iops.c b/fs/xfs/linux-2.6/xfs_iops.c
73706index 1f3b4b8..6102f6d 100644
73707--- a/fs/xfs/linux-2.6/xfs_iops.c
73708+++ b/fs/xfs/linux-2.6/xfs_iops.c
73709@@ -468,7 +468,7 @@ xfs_vn_put_link(
73710 struct nameidata *nd,
73711 void *p)
73712 {
73713- char *s = nd_get_link(nd);
73714+ const char *s = nd_get_link(nd);
73715
73716 if (!IS_ERR(s))
73717 kfree(s);
73718diff --git a/fs/xfs/xfs_bmap.c b/fs/xfs/xfs_bmap.c
73719index 8971fb0..5fc1eb2 100644
73720--- a/fs/xfs/xfs_bmap.c
73721+++ b/fs/xfs/xfs_bmap.c
73722@@ -360,7 +360,7 @@ xfs_bmap_validate_ret(
73723 int nmap,
73724 int ret_nmap);
73725 #else
73726-#define xfs_bmap_validate_ret(bno,len,flags,mval,onmap,nmap)
73727+#define xfs_bmap_validate_ret(bno,len,flags,mval,onmap,nmap) do {} while (0)
73728 #endif /* DEBUG */
73729
73730 #if defined(XFS_RW_TRACE)
73731diff --git a/fs/xfs/xfs_dir2_sf.c b/fs/xfs/xfs_dir2_sf.c
73732index e89734e..5e84d8d 100644
73733--- a/fs/xfs/xfs_dir2_sf.c
73734+++ b/fs/xfs/xfs_dir2_sf.c
73735@@ -779,7 +779,15 @@ xfs_dir2_sf_getdents(
73736 }
73737
73738 ino = xfs_dir2_sf_get_inumber(sfp, xfs_dir2_sf_inumberp(sfep));
73739- if (filldir(dirent, sfep->name, sfep->namelen,
73740+ if (dp->i_df.if_u1.if_data == dp->i_df.if_u2.if_inline_data) {
73741+ char name[sfep->namelen];
73742+ memcpy(name, sfep->name, sfep->namelen);
73743+ if (filldir(dirent, name, sfep->namelen,
73744+ off & 0x7fffffff, ino, DT_UNKNOWN)) {
73745+ *offset = off & 0x7fffffff;
73746+ return 0;
73747+ }
73748+ } else if (filldir(dirent, sfep->name, sfep->namelen,
73749 off & 0x7fffffff, ino, DT_UNKNOWN)) {
73750 *offset = off & 0x7fffffff;
73751 return 0;
73752diff --git a/fs/xfs/xfs_vnodeops.c b/fs/xfs/xfs_vnodeops.c
73753index 8f32f50..b6a41e8 100644
73754--- a/fs/xfs/xfs_vnodeops.c
73755+++ b/fs/xfs/xfs_vnodeops.c
73756@@ -564,13 +564,18 @@ xfs_readlink(
73757
73758 xfs_ilock(ip, XFS_ILOCK_SHARED);
73759
73760- ASSERT((ip->i_d.di_mode & S_IFMT) == S_IFLNK);
73761- ASSERT(ip->i_d.di_size <= MAXPATHLEN);
73762-
73763 pathlen = ip->i_d.di_size;
73764 if (!pathlen)
73765 goto out;
73766
73767+ if (pathlen > MAXPATHLEN) {
73768+ xfs_fs_cmn_err(CE_ALERT, mp, "%s: inode (%llu) symlink length (%d) too long",
73769+ __func__, (unsigned long long)ip->i_ino, pathlen);
73770+ ASSERT(0);
73771+ error = XFS_ERROR(EFSCORRUPTED);
73772+ goto out;
73773+ }
73774+
73775 if (ip->i_df.if_flags & XFS_IFINLINE) {
73776 memcpy(link, ip->i_df.if_u1.if_data, pathlen);
73777 link[pathlen] = '\0';
73778diff --git a/grsecurity/Kconfig b/grsecurity/Kconfig
73779new file mode 100644
73780index 0000000..50819f8
73781--- /dev/null
73782+++ b/grsecurity/Kconfig
73783@@ -0,0 +1,1077 @@
73784+#
73785+# grecurity configuration
73786+#
73787+
73788+menu "Grsecurity"
73789+
73790+config GRKERNSEC
73791+ bool "Grsecurity"
73792+ select CRYPTO
73793+ select CRYPTO_SHA256
73794+ help
73795+ If you say Y here, you will be able to configure many features
73796+ that will enhance the security of your system. It is highly
73797+ recommended that you say Y here and read through the help
73798+ for each option so that you fully understand the features and
73799+ can evaluate their usefulness for your machine.
73800+
73801+choice
73802+ prompt "Security Level"
73803+ depends on GRKERNSEC
73804+ default GRKERNSEC_CUSTOM
73805+
73806+config GRKERNSEC_LOW
73807+ bool "Low"
73808+ select GRKERNSEC_LINK
73809+ select GRKERNSEC_FIFO
73810+ select GRKERNSEC_RANDNET
73811+ select GRKERNSEC_DMESG
73812+ select GRKERNSEC_CHROOT
73813+ select GRKERNSEC_CHROOT_CHDIR
73814+
73815+ help
73816+ If you choose this option, several of the grsecurity options will
73817+ be enabled that will give you greater protection against a number
73818+ of attacks, while assuring that none of your software will have any
73819+ conflicts with the additional security measures. If you run a lot
73820+ of unusual software, or you are having problems with the higher
73821+ security levels, you should say Y here. With this option, the
73822+ following features are enabled:
73823+
73824+ - Linking restrictions
73825+ - FIFO restrictions
73826+ - Restricted dmesg
73827+ - Enforced chdir("/") on chroot
73828+ - Runtime module disabling
73829+
73830+config GRKERNSEC_MEDIUM
73831+ bool "Medium"
73832+ select PAX
73833+ select PAX_EI_PAX
73834+ select PAX_PT_PAX_FLAGS
73835+ select PAX_HAVE_ACL_FLAGS
73836+ select GRKERNSEC_PROC_MEMMAP if (PAX_NOEXEC || PAX_ASLR)
73837+ select GRKERNSEC_CHROOT
73838+ select GRKERNSEC_CHROOT_SYSCTL
73839+ select GRKERNSEC_LINK
73840+ select GRKERNSEC_FIFO
73841+ select GRKERNSEC_DMESG
73842+ select GRKERNSEC_RANDNET
73843+ select GRKERNSEC_FORKFAIL
73844+ select GRKERNSEC_TIME
73845+ select GRKERNSEC_SIGNAL
73846+ select GRKERNSEC_CHROOT
73847+ select GRKERNSEC_CHROOT_UNIX
73848+ select GRKERNSEC_CHROOT_MOUNT
73849+ select GRKERNSEC_CHROOT_PIVOT
73850+ select GRKERNSEC_CHROOT_DOUBLE
73851+ select GRKERNSEC_CHROOT_CHDIR
73852+ select GRKERNSEC_CHROOT_MKNOD
73853+ select GRKERNSEC_PROC
73854+ select GRKERNSEC_PROC_USERGROUP
73855+ select PAX_RANDUSTACK
73856+ select PAX_ASLR
73857+ select PAX_RANDMMAP
73858+ select PAX_REFCOUNT if (X86 || SPARC64)
73859+ select PAX_USERCOPY if ((X86 || SPARC || PPC || ARM) && (SLAB || SLUB || SLOB))
73860+
73861+ help
73862+ If you say Y here, several features in addition to those included
73863+ in the low additional security level will be enabled. These
73864+ features provide even more security to your system, though in rare
73865+ cases they may be incompatible with very old or poorly written
73866+ software. If you enable this option, make sure that your auth
73867+ service (identd) is running as gid 1001. With this option,
73868+ the following features (in addition to those provided in the
73869+ low additional security level) will be enabled:
73870+
73871+ - Failed fork logging
73872+ - Time change logging
73873+ - Signal logging
73874+ - Deny mounts in chroot
73875+ - Deny double chrooting
73876+ - Deny sysctl writes in chroot
73877+ - Deny mknod in chroot
73878+ - Deny access to abstract AF_UNIX sockets out of chroot
73879+ - Deny pivot_root in chroot
73880+ - Denied reads/writes of /dev/kmem, /dev/mem, and /dev/port
73881+ - /proc restrictions with special GID set to 10 (usually wheel)
73882+ - Address Space Layout Randomization (ASLR)
73883+ - Prevent exploitation of most refcount overflows
73884+ - Bounds checking of copying between the kernel and userland
73885+
73886+config GRKERNSEC_HIGH
73887+ bool "High"
73888+ select GRKERNSEC_LINK
73889+ select GRKERNSEC_FIFO
73890+ select GRKERNSEC_DMESG
73891+ select GRKERNSEC_FORKFAIL
73892+ select GRKERNSEC_TIME
73893+ select GRKERNSEC_SIGNAL
73894+ select GRKERNSEC_CHROOT
73895+ select GRKERNSEC_CHROOT_SHMAT
73896+ select GRKERNSEC_CHROOT_UNIX
73897+ select GRKERNSEC_CHROOT_MOUNT
73898+ select GRKERNSEC_CHROOT_FCHDIR
73899+ select GRKERNSEC_CHROOT_PIVOT
73900+ select GRKERNSEC_CHROOT_DOUBLE
73901+ select GRKERNSEC_CHROOT_CHDIR
73902+ select GRKERNSEC_CHROOT_MKNOD
73903+ select GRKERNSEC_CHROOT_CAPS
73904+ select GRKERNSEC_CHROOT_SYSCTL
73905+ select GRKERNSEC_CHROOT_FINDTASK
73906+ select GRKERNSEC_SYSFS_RESTRICT
73907+ select GRKERNSEC_PROC
73908+ select GRKERNSEC_PROC_MEMMAP if (PAX_NOEXEC || PAX_ASLR)
73909+ select GRKERNSEC_HIDESYM
73910+ select GRKERNSEC_BRUTE
73911+ select GRKERNSEC_PROC_USERGROUP
73912+ select GRKERNSEC_KMEM
73913+ select GRKERNSEC_RESLOG
73914+ select GRKERNSEC_RANDNET
73915+ select GRKERNSEC_PROC_ADD
73916+ select GRKERNSEC_CHROOT_CHMOD
73917+ select GRKERNSEC_CHROOT_NICE
73918+ select GRKERNSEC_SETXID
73919+ select GRKERNSEC_AUDIT_MOUNT
73920+ select GRKERNSEC_MODHARDEN if (MODULES)
73921+ select GRKERNSEC_HARDEN_PTRACE
73922+ select GRKERNSEC_PTRACE_READEXEC
73923+ select GRKERNSEC_VM86 if (X86_32)
73924+ select GRKERNSEC_KERN_LOCKOUT if (X86 || ARM || PPC || SPARC)
73925+ select PAX
73926+ select PAX_RANDUSTACK
73927+ select PAX_ASLR
73928+ select PAX_RANDMMAP
73929+ select PAX_NOEXEC
73930+ select PAX_MPROTECT
73931+ select PAX_EI_PAX
73932+ select PAX_PT_PAX_FLAGS
73933+ select PAX_HAVE_ACL_FLAGS
73934+ select PAX_KERNEXEC if ((PPC || X86) && (!X86_32 || X86_WP_WORKS_OK) && !XEN)
73935+ select PAX_MEMORY_UDEREF if (X86 && !XEN)
73936+ select PAX_RANDKSTACK if (X86_TSC && X86)
73937+ select PAX_SEGMEXEC if (X86_32)
73938+ select PAX_PAGEEXEC
73939+ select PAX_EMUPLT if (ALPHA || PARISC || SPARC)
73940+ select PAX_EMUTRAMP if (PARISC)
73941+ select PAX_EMUSIGRT if (PARISC)
73942+ select PAX_ETEXECRELOCS if (ALPHA || IA64 || PARISC)
73943+ select PAX_ELFRELOCS if (PAX_ETEXECRELOCS || (IA64 || PPC || X86))
73944+ select PAX_REFCOUNT if (X86 || SPARC64)
73945+ select PAX_USERCOPY if ((X86 || SPARC || PPC || ARM) && (SLAB || SLUB || SLOB))
73946+ help
73947+ If you say Y here, many of the features of grsecurity will be
73948+ enabled, which will protect you against many kinds of attacks
73949+ against your system. The heightened security comes at a cost
73950+ of an increased chance of incompatibilities with rare software
73951+ on your machine. Since this security level enables PaX, you should
73952+ view <http://pax.grsecurity.net> and read about the PaX
73953+ project. While you are there, download chpax and run it on
73954+ binaries that cause problems with PaX. Also remember that
73955+ since the /proc restrictions are enabled, you must run your
73956+ identd as gid 1001. This security level enables the following
73957+ features in addition to those listed in the low and medium
73958+ security levels:
73959+
73960+ - Additional /proc restrictions
73961+ - Chmod restrictions in chroot
73962+ - No signals, ptrace, or viewing of processes outside of chroot
73963+ - Capability restrictions in chroot
73964+ - Deny fchdir out of chroot
73965+ - Priority restrictions in chroot
73966+ - Segmentation-based implementation of PaX
73967+ - Mprotect restrictions
73968+ - Removal of addresses from /proc/<pid>/[smaps|maps|stat]
73969+ - Kernel stack randomization
73970+ - Mount/unmount/remount logging
73971+ - Kernel symbol hiding
73972+ - Hardening of module auto-loading
73973+ - Ptrace restrictions
73974+ - Restricted vm86 mode
73975+ - Restricted sysfs/debugfs
73976+ - Active kernel exploit response
73977+
73978+config GRKERNSEC_CUSTOM
73979+ bool "Custom"
73980+ help
73981+ If you say Y here, you will be able to configure every grsecurity
73982+ option, which allows you to enable many more features that aren't
73983+ covered in the basic security levels. These additional features
73984+ include TPE, socket restrictions, and the sysctl system for
73985+ grsecurity. It is advised that you read through the help for
73986+ each option to determine its usefulness in your situation.
73987+
73988+endchoice
73989+
73990+menu "Memory Protections"
73991+depends on GRKERNSEC
73992+
73993+config GRKERNSEC_KMEM
73994+ bool "Deny reading/writing to /dev/kmem, /dev/mem, and /dev/port"
73995+ select STRICT_DEVMEM if (X86 || ARM || TILE || S390)
73996+ help
73997+ If you say Y here, /dev/kmem and /dev/mem won't be allowed to
73998+ be written to or read from to modify or leak the contents of the running
73999+ kernel. /dev/port will also not be allowed to be opened. If you have module
74000+ support disabled, enabling this will close up four ways that are
74001+ currently used to insert malicious code into the running kernel.
74002+ Even with all these features enabled, we still highly recommend that
74003+ you use the RBAC system, as it is still possible for an attacker to
74004+ modify the running kernel through privileged I/O granted by ioperm/iopl.
74005+ If you are not using XFree86, you may be able to stop this additional
74006+ case by enabling the 'Disable privileged I/O' option. Though nothing
74007+ legitimately writes to /dev/kmem, XFree86 does need to write to /dev/mem,
74008+ but only to video memory, which is the only writing we allow in this
74009+ case. If /dev/kmem or /dev/mem are mmaped without PROT_WRITE, they will
74010+ not be allowed to mprotect it with PROT_WRITE later.
74011+ It is highly recommended that you say Y here if you meet all the
74012+ conditions above.
74013+
74014+config GRKERNSEC_VM86
74015+ bool "Restrict VM86 mode"
74016+ depends on X86_32
74017+
74018+ help
74019+ If you say Y here, only processes with CAP_SYS_RAWIO will be able to
74020+ make use of a special execution mode on 32bit x86 processors called
74021+ Virtual 8086 (VM86) mode. XFree86 may need vm86 mode for certain
74022+ video cards and will still work with this option enabled. The purpose
74023+ of the option is to prevent exploitation of emulation errors in
74024+ virtualization of vm86 mode like the one discovered in VMWare in 2009.
74025+ Nearly all users should be able to enable this option.
74026+
74027+config GRKERNSEC_IO
74028+ bool "Disable privileged I/O"
74029+ depends on X86
74030+ select RTC_CLASS
74031+ select RTC_INTF_DEV
74032+ select RTC_DRV_CMOS
74033+
74034+ help
74035+ If you say Y here, all ioperm and iopl calls will return an error.
74036+ Ioperm and iopl can be used to modify the running kernel.
74037+ Unfortunately, some programs need this access to operate properly,
74038+ the most notable of which are XFree86 and hwclock. hwclock can be
74039+ remedied by having RTC support in the kernel, so real-time
74040+ clock support is enabled if this option is enabled, to ensure
74041+ that hwclock operates correctly. XFree86 still will not
74042+ operate correctly with this option enabled, so DO NOT CHOOSE Y
74043+ IF YOU USE XFree86. If you use XFree86 and you still want to
74044+ protect your kernel against modification, use the RBAC system.
74045+
74046+config GRKERNSEC_PROC_MEMMAP
74047+ bool "Harden ASLR against information leaks and entropy reduction"
74048+ default y if (PAX_NOEXEC || PAX_ASLR)
74049+ depends on PAX_NOEXEC || PAX_ASLR
74050+ help
74051+ If you say Y here, the /proc/<pid>/maps and /proc/<pid>/stat files will
74052+ give no information about the addresses of its mappings if
74053+ PaX features that rely on random addresses are enabled on the task.
74054+ In addition to sanitizing this information and disabling other
74055+ dangerous sources of information, this option causes reads of sensitive
74056+ /proc/<pid> entries where the file descriptor was opened in a different
74057+ task than the one performing the read. Such attempts are logged.
74058+ This option also limits argv/env strings for suid/sgid binaries
74059+ to 512KB to prevent a complete exhaustion of the stack entropy provided
74060+ by ASLR. Finally, it places an 8MB stack resource limit on suid/sgid
74061+ binaries to prevent alternative mmap layouts from being abused.
74062+
74063+ If you use PaX it is essential that you say Y here as it closes up
74064+ several holes that make full ASLR useless locally.
74065+
74066+config GRKERNSEC_BRUTE
74067+ bool "Deter exploit bruteforcing"
74068+ help
74069+ If you say Y here, attempts to bruteforce exploits against forking
74070+ daemons such as apache or sshd, as well as against suid/sgid binaries
74071+ will be deterred. When a child of a forking daemon is killed by PaX
74072+ or crashes due to an illegal instruction or other suspicious signal,
74073+ the parent process will be delayed 30 seconds upon every subsequent
74074+ fork until the administrator is able to assess the situation and
74075+ restart the daemon.
74076+ In the suid/sgid case, the attempt is logged, the user has all their
74077+ processes terminated, and they are prevented from executing any further
74078+ processes for 15 minutes.
74079+ It is recommended that you also enable signal logging in the auditing
74080+ section so that logs are generated when a process triggers a suspicious
74081+ signal.
74082+ If the sysctl option is enabled, a sysctl option with name
74083+ "deter_bruteforce" is created.
74084+
74085+config GRKERNSEC_MODHARDEN
74086+ bool "Harden module auto-loading"
74087+ depends on MODULES
74088+ help
74089+ If you say Y here, module auto-loading in response to use of some
74090+ feature implemented by an unloaded module will be restricted to
74091+ root users. Enabling this option helps defend against attacks
74092+ by unprivileged users who abuse the auto-loading behavior to
74093+ cause a vulnerable module to load that is then exploited.
74094+
74095+ If this option prevents a legitimate use of auto-loading for a
74096+ non-root user, the administrator can execute modprobe manually
74097+ with the exact name of the module mentioned in the alert log.
74098+ Alternatively, the administrator can add the module to the list
74099+ of modules loaded at boot by modifying init scripts.
74100+
74101+ Modification of init scripts will most likely be needed on
74102+ Ubuntu servers with encrypted home directory support enabled,
74103+ as the first non-root user logging in will cause the ecb(aes),
74104+ ecb(aes)-all, cbc(aes), and cbc(aes)-all modules to be loaded.
74105+
74106+config GRKERNSEC_HIDESYM
74107+ bool "Hide kernel symbols"
74108+ help
74109+ If you say Y here, getting information on loaded modules, and
74110+ displaying all kernel symbols through a syscall will be restricted
74111+ to users with CAP_SYS_MODULE. For software compatibility reasons,
74112+ /proc/kallsyms will be restricted to the root user. The RBAC
74113+ system can hide that entry even from root.
74114+
74115+ This option also prevents leaking of kernel addresses through
74116+ several /proc entries.
74117+
74118+ Note that this option is only effective provided the following
74119+ conditions are met:
74120+ 1) The kernel using grsecurity is not precompiled by some distribution
74121+ 2) You have also enabled GRKERNSEC_DMESG
74122+ 3) You are using the RBAC system and hiding other files such as your
74123+ kernel image and System.map. Alternatively, enabling this option
74124+ causes the permissions on /boot, /lib/modules, and the kernel
74125+ source directory to change at compile time to prevent
74126+ reading by non-root users.
74127+ If the above conditions are met, this option will aid in providing a
74128+ useful protection against local kernel exploitation of overflows
74129+ and arbitrary read/write vulnerabilities.
74130+
74131+config GRKERNSEC_KERN_LOCKOUT
74132+ bool "Active kernel exploit response"
74133+ depends on X86 || ARM || PPC || SPARC
74134+ help
74135+ If you say Y here, when a PaX alert is triggered due to suspicious
74136+ activity in the kernel (from KERNEXEC/UDEREF/USERCOPY)
74137+ or an OOPs occurs due to bad memory accesses, instead of just
74138+ terminating the offending process (and potentially allowing
74139+ a subsequent exploit from the same user), we will take one of two
74140+ actions:
74141+ If the user was root, we will panic the system
74142+ If the user was non-root, we will log the attempt, terminate
74143+ all processes owned by the user, then prevent them from creating
74144+ any new processes until the system is restarted
74145+ This deters repeated kernel exploitation/bruteforcing attempts
74146+ and is useful for later forensics.
74147+
74148+endmenu
74149+menu "Role Based Access Control Options"
74150+depends on GRKERNSEC
74151+
74152+config GRKERNSEC_RBAC_DEBUG
74153+ bool
74154+
74155+config GRKERNSEC_NO_RBAC
74156+ bool "Disable RBAC system"
74157+ help
74158+ If you say Y here, the /dev/grsec device will be removed from the kernel,
74159+ preventing the RBAC system from being enabled. You should only say Y
74160+ here if you have no intention of using the RBAC system, so as to prevent
74161+ an attacker with root access from misusing the RBAC system to hide files
74162+ and processes when loadable module support and /dev/[k]mem have been
74163+ locked down.
74164+
74165+config GRKERNSEC_ACL_HIDEKERN
74166+ bool "Hide kernel processes"
74167+ help
74168+ If you say Y here, all kernel threads will be hidden to all
74169+ processes but those whose subject has the "view hidden processes"
74170+ flag.
74171+
74172+config GRKERNSEC_ACL_MAXTRIES
74173+ int "Maximum tries before password lockout"
74174+ default 3
74175+ help
74176+ This option enforces the maximum number of times a user can attempt
74177+ to authorize themselves with the grsecurity RBAC system before being
74178+ denied the ability to attempt authorization again for a specified time.
74179+ The lower the number, the harder it will be to brute-force a password.
74180+
74181+config GRKERNSEC_ACL_TIMEOUT
74182+ int "Time to wait after max password tries, in seconds"
74183+ default 30
74184+ help
74185+ This option specifies the time the user must wait after attempting to
74186+ authorize to the RBAC system with the maximum number of invalid
74187+ passwords. The higher the number, the harder it will be to brute-force
74188+ a password.
74189+
74190+endmenu
74191+menu "Filesystem Protections"
74192+depends on GRKERNSEC
74193+
74194+config GRKERNSEC_PROC
74195+ bool "Proc restrictions"
74196+ help
74197+ If you say Y here, the permissions of the /proc filesystem
74198+ will be altered to enhance system security and privacy. You MUST
74199+ choose either a user only restriction or a user and group restriction.
74200+ Depending upon the option you choose, you can either restrict users to
74201+ see only the processes they themselves run, or choose a group that can
74202+ view all processes and files normally restricted to root if you choose
74203+ the "restrict to user only" option. NOTE: If you're running identd or
74204+ ntpd as a non-root user, you will have to run it as the group you
74205+ specify here.
74206+
74207+config GRKERNSEC_PROC_USER
74208+ bool "Restrict /proc to user only"
74209+ depends on GRKERNSEC_PROC
74210+ help
74211+ If you say Y here, non-root users will only be able to view their own
74212+ processes, and restricts them from viewing network-related information,
74213+ and viewing kernel symbol and module information.
74214+
74215+config GRKERNSEC_PROC_USERGROUP
74216+ bool "Allow special group"
74217+ depends on GRKERNSEC_PROC && !GRKERNSEC_PROC_USER
74218+ help
74219+ If you say Y here, you will be able to select a group that will be
74220+ able to view all processes and network-related information. If you've
74221+ enabled GRKERNSEC_HIDESYM, kernel and symbol information may still
74222+ remain hidden. This option is useful if you want to run identd as
74223+ a non-root user.
74224+
74225+config GRKERNSEC_PROC_GID
74226+ int "GID for special group"
74227+ depends on GRKERNSEC_PROC_USERGROUP
74228+ default 1001
74229+
74230+config GRKERNSEC_PROC_ADD
74231+ bool "Additional restrictions"
74232+ depends on GRKERNSEC_PROC_USER || GRKERNSEC_PROC_USERGROUP
74233+ help
74234+ If you say Y here, additional restrictions will be placed on
74235+ /proc that keep normal users from viewing device information and
74236+ slabinfo information that could be useful for exploits.
74237+
74238+config GRKERNSEC_LINK
74239+ bool "Linking restrictions"
74240+ help
74241+ If you say Y here, /tmp race exploits will be prevented, since users
74242+ will no longer be able to follow symlinks owned by other users in
74243+ world-writable +t directories (e.g. /tmp), unless the owner of the
74244+ symlink is the owner of the directory. users will also not be
74245+ able to hardlink to files they do not own. If the sysctl option is
74246+ enabled, a sysctl option with name "linking_restrictions" is created.
74247+
74248+config GRKERNSEC_FIFO
74249+ bool "FIFO restrictions"
74250+ help
74251+ If you say Y here, users will not be able to write to FIFOs they don't
74252+ own in world-writable +t directories (e.g. /tmp), unless the owner of
74253+ the FIFO is the same owner of the directory it's held in. If the sysctl
74254+ option is enabled, a sysctl option with name "fifo_restrictions" is
74255+ created.
74256+
74257+config GRKERNSEC_SYSFS_RESTRICT
74258+ bool "Sysfs/debugfs restriction"
74259+ depends on SYSFS
74260+ help
74261+ If you say Y here, sysfs (the pseudo-filesystem mounted at /sys) and
74262+ any filesystem normally mounted under it (e.g. debugfs) will be
74263+ mostly accessible only by root. These filesystems generally provide access
74264+ to hardware and debug information that isn't appropriate for unprivileged
74265+ users of the system. Sysfs and debugfs have also become a large source
74266+ of new vulnerabilities, ranging from infoleaks to local compromise.
74267+ There has been very little oversight with an eye toward security involved
74268+ in adding new exporters of information to these filesystems, so their
74269+ use is discouraged.
74270+ For reasons of compatibility, a few directories have been whitelisted
74271+ for access by non-root users:
74272+ /sys/fs/selinux
74273+ /sys/fs/fuse
74274+ /sys/devices/system/cpu
74275+
74276+config GRKERNSEC_ROFS
74277+ bool "Runtime read-only mount protection"
74278+ help
74279+ If you say Y here, a sysctl option with name "romount_protect" will
74280+ be created. By setting this option to 1 at runtime, filesystems
74281+ will be protected in the following ways:
74282+ * No new writable mounts will be allowed
74283+ * Existing read-only mounts won't be able to be remounted read/write
74284+ * Write operations will be denied on all block devices
74285+ This option acts independently of grsec_lock: once it is set to 1,
74286+ it cannot be turned off. Therefore, please be mindful of the resulting
74287+ behavior if this option is enabled in an init script on a read-only
74288+ filesystem. This feature is mainly intended for secure embedded systems.
74289+
74290+config GRKERNSEC_CHROOT
74291+ bool "Chroot jail restrictions"
74292+ help
74293+ If you say Y here, you will be able to choose several options that will
74294+ make breaking out of a chrooted jail much more difficult. If you
74295+ encounter no software incompatibilities with the following options, it
74296+ is recommended that you enable each one.
74297+
74298+config GRKERNSEC_CHROOT_MOUNT
74299+ bool "Deny mounts"
74300+ depends on GRKERNSEC_CHROOT
74301+ help
74302+ If you say Y here, processes inside a chroot will not be able to
74303+ mount or remount filesystems. If the sysctl option is enabled, a
74304+ sysctl option with name "chroot_deny_mount" is created.
74305+
74306+config GRKERNSEC_CHROOT_DOUBLE
74307+ bool "Deny double-chroots"
74308+ depends on GRKERNSEC_CHROOT
74309+ help
74310+ If you say Y here, processes inside a chroot will not be able to chroot
74311+ again outside the chroot. This is a widely used method of breaking
74312+ out of a chroot jail and should not be allowed. If the sysctl
74313+ option is enabled, a sysctl option with name
74314+ "chroot_deny_chroot" is created.
74315+
74316+config GRKERNSEC_CHROOT_PIVOT
74317+ bool "Deny pivot_root in chroot"
74318+ depends on GRKERNSEC_CHROOT
74319+ help
74320+ If you say Y here, processes inside a chroot will not be able to use
74321+ a function called pivot_root() that was introduced in Linux 2.3.41. It
74322+ works similar to chroot in that it changes the root filesystem. This
74323+ function could be misused in a chrooted process to attempt to break out
74324+ of the chroot, and therefore should not be allowed. If the sysctl
74325+ option is enabled, a sysctl option with name "chroot_deny_pivot" is
74326+ created.
74327+
74328+config GRKERNSEC_CHROOT_CHDIR
74329+ bool "Enforce chdir(\"/\") on all chroots"
74330+ depends on GRKERNSEC_CHROOT
74331+ help
74332+ If you say Y here, the current working directory of all newly-chrooted
74333+ applications will be set to the the root directory of the chroot.
74334+ The man page on chroot(2) states:
74335+ Note that this call does not change the current working
74336+ directory, so that `.' can be outside the tree rooted at
74337+ `/'. In particular, the super-user can escape from a
74338+ `chroot jail' by doing `mkdir foo; chroot foo; cd ..'.
74339+
74340+ It is recommended that you say Y here, since it's not known to break
74341+ any software. If the sysctl option is enabled, a sysctl option with
74342+ name "chroot_enforce_chdir" is created.
74343+
74344+config GRKERNSEC_CHROOT_CHMOD
74345+ bool "Deny (f)chmod +s"
74346+ depends on GRKERNSEC_CHROOT
74347+ help
74348+ If you say Y here, processes inside a chroot will not be able to chmod
74349+ or fchmod files to make them have suid or sgid bits. This protects
74350+ against another published method of breaking a chroot. If the sysctl
74351+ option is enabled, a sysctl option with name "chroot_deny_chmod" is
74352+ created.
74353+
74354+config GRKERNSEC_CHROOT_FCHDIR
74355+ bool "Deny fchdir out of chroot"
74356+ depends on GRKERNSEC_CHROOT
74357+ help
74358+ If you say Y here, a well-known method of breaking chroots by fchdir'ing
74359+ to a file descriptor of the chrooting process that points to a directory
74360+ outside the filesystem will be stopped. If the sysctl option
74361+ is enabled, a sysctl option with name "chroot_deny_fchdir" is created.
74362+
74363+config GRKERNSEC_CHROOT_MKNOD
74364+ bool "Deny mknod"
74365+ depends on GRKERNSEC_CHROOT
74366+ help
74367+ If you say Y here, processes inside a chroot will not be allowed to
74368+ mknod. The problem with using mknod inside a chroot is that it
74369+ would allow an attacker to create a device entry that is the same
74370+ as one on the physical root of your system, which could range from
74371+ anything from the console device to a device for your harddrive (which
74372+ they could then use to wipe the drive or steal data). It is recommended
74373+ that you say Y here, unless you run into software incompatibilities.
74374+ If the sysctl option is enabled, a sysctl option with name
74375+ "chroot_deny_mknod" is created.
74376+
74377+config GRKERNSEC_CHROOT_SHMAT
74378+ bool "Deny shmat() out of chroot"
74379+ depends on GRKERNSEC_CHROOT
74380+ help
74381+ If you say Y here, processes inside a chroot will not be able to attach
74382+ to shared memory segments that were created outside of the chroot jail.
74383+ It is recommended that you say Y here. If the sysctl option is enabled,
74384+ a sysctl option with name "chroot_deny_shmat" is created.
74385+
74386+config GRKERNSEC_CHROOT_UNIX
74387+ bool "Deny access to abstract AF_UNIX sockets out of chroot"
74388+ depends on GRKERNSEC_CHROOT
74389+ help
74390+ If you say Y here, processes inside a chroot will not be able to
74391+ connect to abstract (meaning not belonging to a filesystem) Unix
74392+ domain sockets that were bound outside of a chroot. It is recommended
74393+ that you say Y here. If the sysctl option is enabled, a sysctl option
74394+ with name "chroot_deny_unix" is created.
74395+
74396+config GRKERNSEC_CHROOT_FINDTASK
74397+ bool "Protect outside processes"
74398+ depends on GRKERNSEC_CHROOT
74399+ help
74400+ If you say Y here, processes inside a chroot will not be able to
74401+ kill, send signals with fcntl, ptrace, capget, getpgid, setpgid,
74402+ getsid, or view any process outside of the chroot. If the sysctl
74403+ option is enabled, a sysctl option with name "chroot_findtask" is
74404+ created.
74405+
74406+config GRKERNSEC_CHROOT_NICE
74407+ bool "Restrict priority changes"
74408+ depends on GRKERNSEC_CHROOT
74409+ help
74410+ If you say Y here, processes inside a chroot will not be able to raise
74411+ the priority of processes in the chroot, or alter the priority of
74412+ processes outside the chroot. This provides more security than simply
74413+ removing CAP_SYS_NICE from the process' capability set. If the
74414+ sysctl option is enabled, a sysctl option with name "chroot_restrict_nice"
74415+ is created.
74416+
74417+config GRKERNSEC_CHROOT_SYSCTL
74418+ bool "Deny sysctl writes"
74419+ depends on GRKERNSEC_CHROOT
74420+ help
74421+ If you say Y here, an attacker in a chroot will not be able to
74422+ write to sysctl entries, either by sysctl(2) or through a /proc
74423+ interface. It is strongly recommended that you say Y here. If the
74424+ sysctl option is enabled, a sysctl option with name
74425+ "chroot_deny_sysctl" is created.
74426+
74427+config GRKERNSEC_CHROOT_CAPS
74428+ bool "Capability restrictions"
74429+ depends on GRKERNSEC_CHROOT
74430+ help
74431+ If you say Y here, the capabilities on all processes within a
74432+ chroot jail will be lowered to stop module insertion, raw i/o,
74433+ system and net admin tasks, rebooting the system, modifying immutable
74434+ files, modifying IPC owned by another, and changing the system time.
74435+ This is left an option because it can break some apps. Disable this
74436+ if your chrooted apps are having problems performing those kinds of
74437+ tasks. If the sysctl option is enabled, a sysctl option with
74438+ name "chroot_caps" is created.
74439+
74440+endmenu
74441+menu "Kernel Auditing"
74442+depends on GRKERNSEC
74443+
74444+config GRKERNSEC_AUDIT_GROUP
74445+ bool "Single group for auditing"
74446+ help
74447+ If you say Y here, the exec, chdir, and (un)mount logging features
74448+ will only operate on a group you specify. This option is recommended
74449+ if you only want to watch certain users instead of having a large
74450+ amount of logs from the entire system. If the sysctl option is enabled,
74451+ a sysctl option with name "audit_group" is created.
74452+
74453+config GRKERNSEC_AUDIT_GID
74454+ int "GID for auditing"
74455+ depends on GRKERNSEC_AUDIT_GROUP
74456+ default 1007
74457+
74458+config GRKERNSEC_EXECLOG
74459+ bool "Exec logging"
74460+ help
74461+ If you say Y here, all execve() calls will be logged (since the
74462+ other exec*() calls are frontends to execve(), all execution
74463+ will be logged). Useful for shell-servers that like to keep track
74464+ of their users. If the sysctl option is enabled, a sysctl option with
74465+ name "exec_logging" is created.
74466+ WARNING: This option when enabled will produce a LOT of logs, especially
74467+ on an active system.
74468+
74469+config GRKERNSEC_RESLOG
74470+ bool "Resource logging"
74471+ help
74472+ If you say Y here, all attempts to overstep resource limits will
74473+ be logged with the resource name, the requested size, and the current
74474+ limit. It is highly recommended that you say Y here. If the sysctl
74475+ option is enabled, a sysctl option with name "resource_logging" is
74476+ created. If the RBAC system is enabled, the sysctl value is ignored.
74477+
74478+config GRKERNSEC_CHROOT_EXECLOG
74479+ bool "Log execs within chroot"
74480+ help
74481+ If you say Y here, all executions inside a chroot jail will be logged
74482+ to syslog. This can cause a large amount of logs if certain
74483+ applications (eg. djb's daemontools) are installed on the system, and
74484+ is therefore left as an option. If the sysctl option is enabled, a
74485+ sysctl option with name "chroot_execlog" is created.
74486+
74487+config GRKERNSEC_AUDIT_PTRACE
74488+ bool "Ptrace logging"
74489+ help
74490+ If you say Y here, all attempts to attach to a process via ptrace
74491+ will be logged. If the sysctl option is enabled, a sysctl option
74492+ with name "audit_ptrace" is created.
74493+
74494+config GRKERNSEC_AUDIT_CHDIR
74495+ bool "Chdir logging"
74496+ help
74497+ If you say Y here, all chdir() calls will be logged. If the sysctl
74498+ option is enabled, a sysctl option with name "audit_chdir" is created.
74499+
74500+config GRKERNSEC_AUDIT_MOUNT
74501+ bool "(Un)Mount logging"
74502+ help
74503+ If you say Y here, all mounts and unmounts will be logged. If the
74504+ sysctl option is enabled, a sysctl option with name "audit_mount" is
74505+ created.
74506+
74507+config GRKERNSEC_SIGNAL
74508+ bool "Signal logging"
74509+ help
74510+ If you say Y here, certain important signals will be logged, such as
74511+ SIGSEGV, which will as a result inform you of when a error in a program
74512+ occurred, which in some cases could mean a possible exploit attempt.
74513+ If the sysctl option is enabled, a sysctl option with name
74514+ "signal_logging" is created.
74515+
74516+config GRKERNSEC_FORKFAIL
74517+ bool "Fork failure logging"
74518+ help
74519+ If you say Y here, all failed fork() attempts will be logged.
74520+ This could suggest a fork bomb, or someone attempting to overstep
74521+ their process limit. If the sysctl option is enabled, a sysctl option
74522+ with name "forkfail_logging" is created.
74523+
74524+config GRKERNSEC_TIME
74525+ bool "Time change logging"
74526+ help
74527+ If you say Y here, any changes of the system clock will be logged.
74528+ If the sysctl option is enabled, a sysctl option with name
74529+ "timechange_logging" is created.
74530+
74531+config GRKERNSEC_PROC_IPADDR
74532+ bool "/proc/<pid>/ipaddr support"
74533+ help
74534+ If you say Y here, a new entry will be added to each /proc/<pid>
74535+ directory that contains the IP address of the person using the task.
74536+ The IP is carried across local TCP and AF_UNIX stream sockets.
74537+ This information can be useful for IDS/IPSes to perform remote response
74538+ to a local attack. The entry is readable by only the owner of the
74539+ process (and root if he has CAP_DAC_OVERRIDE, which can be removed via
74540+ the RBAC system), and thus does not create privacy concerns.
74541+
74542+config GRKERNSEC_RWXMAP_LOG
74543+ bool 'Denied RWX mmap/mprotect logging'
74544+ depends on PAX_MPROTECT && !PAX_EMUPLT && !PAX_EMUSIGRT
74545+ help
74546+ If you say Y here, calls to mmap() and mprotect() with explicit
74547+ usage of PROT_WRITE and PROT_EXEC together will be logged when
74548+ denied by the PAX_MPROTECT feature. If the sysctl option is
74549+ enabled, a sysctl option with name "rwxmap_logging" is created.
74550+
74551+config GRKERNSEC_AUDIT_TEXTREL
74552+ bool 'ELF text relocations logging (READ HELP)'
74553+ depends on PAX_MPROTECT
74554+ help
74555+ If you say Y here, text relocations will be logged with the filename
74556+ of the offending library or binary. The purpose of the feature is
74557+ to help Linux distribution developers get rid of libraries and
74558+ binaries that need text relocations which hinder the future progress
74559+ of PaX. Only Linux distribution developers should say Y here, and
74560+ never on a production machine, as this option creates an information
74561+ leak that could aid an attacker in defeating the randomization of
74562+ a single memory region. If the sysctl option is enabled, a sysctl
74563+ option with name "audit_textrel" is created.
74564+
74565+endmenu
74566+
74567+menu "Executable Protections"
74568+depends on GRKERNSEC
74569+
74570+config GRKERNSEC_DMESG
74571+ bool "Dmesg(8) restriction"
74572+ help
74573+ If you say Y here, non-root users will not be able to use dmesg(8)
74574+ to view up to the last 4kb of messages in the kernel's log buffer.
74575+ The kernel's log buffer often contains kernel addresses and other
74576+ identifying information useful to an attacker in fingerprinting a
74577+ system for a targeted exploit.
74578+ If the sysctl option is enabled, a sysctl option with name "dmesg" is
74579+ created.
74580+
74581+config GRKERNSEC_HARDEN_PTRACE
74582+ bool "Deter ptrace-based process snooping"
74583+ help
74584+ If you say Y here, TTY sniffers and other malicious monitoring
74585+ programs implemented through ptrace will be defeated. If you
74586+ have been using the RBAC system, this option has already been
74587+ enabled for several years for all users, with the ability to make
74588+ fine-grained exceptions.
74589+
74590+ This option only affects the ability of non-root users to ptrace
74591+ processes that are not a descendent of the ptracing process.
74592+ This means that strace ./binary and gdb ./binary will still work,
74593+ but attaching to arbitrary processes will not. If the sysctl
74594+ option is enabled, a sysctl option with name "harden_ptrace" is
74595+ created.
74596+
74597+config GRKERNSEC_PTRACE_READEXEC
74598+ bool "Require read access to ptrace sensitive binaries"
74599+ help
74600+ If you say Y here, unprivileged users will not be able to ptrace unreadable
74601+ binaries. This option is useful in environments that
74602+ remove the read bits (e.g. file mode 4711) from suid binaries to
74603+ prevent infoleaking of their contents. This option adds
74604+ consistency to the use of that file mode, as the binary could normally
74605+ be read out when run without privileges while ptracing.
74606+
74607+ If the sysctl option is enabled, a sysctl option with name "ptrace_readexec"
74608+ is created.
74609+
74610+config GRKERNSEC_SETXID
74611+ bool "Enforce consistent multithreaded privileges"
74612+ help
74613+ If you say Y here, a change from a root uid to a non-root uid
74614+ in a multithreaded application will cause the resulting uids,
74615+ gids, supplementary groups, and capabilities in that thread
74616+ to be propagated to the other threads of the process. In most
74617+ cases this is unnecessary, as glibc will emulate this behavior
74618+ on behalf of the application. Other libcs do not act in the
74619+ same way, allowing the other threads of the process to continue
74620+ running with root privileges. If the sysctl option is enabled,
74621+ a sysctl option with name "consistent_setxid" is created.
74622+
74623+config GRKERNSEC_TPE
74624+ bool "Trusted Path Execution (TPE)"
74625+ help
74626+ If you say Y here, you will be able to choose a gid to add to the
74627+ supplementary groups of users you want to mark as "untrusted."
74628+ These users will not be able to execute any files that are not in
74629+ root-owned directories writable only by root. If the sysctl option
74630+ is enabled, a sysctl option with name "tpe" is created.
74631+
74632+config GRKERNSEC_TPE_ALL
74633+ bool "Partially restrict all non-root users"
74634+ depends on GRKERNSEC_TPE
74635+ help
74636+ If you say Y here, all non-root users will be covered under
74637+ a weaker TPE restriction. This is separate from, and in addition to,
74638+ the main TPE options that you have selected elsewhere. Thus, if a
74639+ "trusted" GID is chosen, this restriction applies to even that GID.
74640+ Under this restriction, all non-root users will only be allowed to
74641+ execute files in directories they own that are not group or
74642+ world-writable, or in directories owned by root and writable only by
74643+ root. If the sysctl option is enabled, a sysctl option with name
74644+ "tpe_restrict_all" is created.
74645+
74646+config GRKERNSEC_TPE_INVERT
74647+ bool "Invert GID option"
74648+ depends on GRKERNSEC_TPE
74649+ help
74650+ If you say Y here, the group you specify in the TPE configuration will
74651+ decide what group TPE restrictions will be *disabled* for. This
74652+ option is useful if you want TPE restrictions to be applied to most
74653+ users on the system. If the sysctl option is enabled, a sysctl option
74654+ with name "tpe_invert" is created. Unlike other sysctl options, this
74655+ entry will default to on for backward-compatibility.
74656+
74657+config GRKERNSEC_TPE_GID
74658+ int "GID for untrusted users"
74659+ depends on GRKERNSEC_TPE && !GRKERNSEC_TPE_INVERT
74660+ default 1005
74661+ help
74662+ Setting this GID determines what group TPE restrictions will be
74663+ *enabled* for. If the sysctl option is enabled, a sysctl option
74664+ with name "tpe_gid" is created.
74665+
74666+config GRKERNSEC_TPE_GID
74667+ int "GID for trusted users"
74668+ depends on GRKERNSEC_TPE && GRKERNSEC_TPE_INVERT
74669+ default 1005
74670+ help
74671+ Setting this GID determines what group TPE restrictions will be
74672+ *disabled* for. If the sysctl option is enabled, a sysctl option
74673+ with name "tpe_gid" is created.
74674+
74675+endmenu
74676+menu "Network Protections"
74677+depends on GRKERNSEC
74678+
74679+config GRKERNSEC_RANDNET
74680+ bool "Larger entropy pools"
74681+ help
74682+ If you say Y here, the entropy pools used for many features of Linux
74683+ and grsecurity will be doubled in size. Since several grsecurity
74684+ features use additional randomness, it is recommended that you say Y
74685+ here. Saying Y here has a similar effect as modifying
74686+ /proc/sys/kernel/random/poolsize.
74687+
74688+config GRKERNSEC_BLACKHOLE
74689+ bool "TCP/UDP blackhole and LAST_ACK DoS prevention"
74690+ depends on NET
74691+ help
74692+ If you say Y here, neither TCP resets nor ICMP
74693+ destination-unreachable packets will be sent in response to packets
74694+ sent to ports for which no associated listening process exists.
74695+ This feature supports both IPV4 and IPV6 and exempts the
74696+ loopback interface from blackholing. Enabling this feature
74697+ makes a host more resilient to DoS attacks and reduces network
74698+ visibility against scanners.
74699+
74700+ The blackhole feature as-implemented is equivalent to the FreeBSD
74701+ blackhole feature, as it prevents RST responses to all packets, not
74702+ just SYNs. Under most application behavior this causes no
74703+ problems, but applications (like haproxy) may not close certain
74704+ connections in a way that cleanly terminates them on the remote
74705+ end, leaving the remote host in LAST_ACK state. Because of this
74706+ side-effect and to prevent intentional LAST_ACK DoSes, this
74707+ feature also adds automatic mitigation against such attacks.
74708+ The mitigation drastically reduces the amount of time a socket
74709+ can spend in LAST_ACK state. If you're using haproxy and not
74710+ all servers it connects to have this option enabled, consider
74711+ disabling this feature on the haproxy host.
74712+
74713+ If the sysctl option is enabled, two sysctl options with names
74714+ "ip_blackhole" and "lastack_retries" will be created.
74715+ While "ip_blackhole" takes the standard zero/non-zero on/off
74716+ toggle, "lastack_retries" uses the same kinds of values as
74717+ "tcp_retries1" and "tcp_retries2". The default value of 4
74718+ prevents a socket from lasting more than 45 seconds in LAST_ACK
74719+ state.
74720+
74721+config GRKERNSEC_SOCKET
74722+ bool "Socket restrictions"
74723+ depends on NET
74724+ help
74725+ If you say Y here, you will be able to choose from several options.
74726+ If you assign a GID on your system and add it to the supplementary
74727+ groups of users you want to restrict socket access to, this patch
74728+ will perform up to three things, based on the option(s) you choose.
74729+
74730+config GRKERNSEC_SOCKET_ALL
74731+ bool "Deny any sockets to group"
74732+ depends on GRKERNSEC_SOCKET
74733+ help
74734+ If you say Y here, you will be able to choose a GID of whose users will
74735+ be unable to connect to other hosts from your machine or run server
74736+ applications from your machine. If the sysctl option is enabled, a
74737+ sysctl option with name "socket_all" is created.
74738+
74739+config GRKERNSEC_SOCKET_ALL_GID
74740+ int "GID to deny all sockets for"
74741+ depends on GRKERNSEC_SOCKET_ALL
74742+ default 1004
74743+ help
74744+ Here you can choose the GID to disable socket access for. Remember to
74745+ add the users you want socket access disabled for to the GID
74746+ specified here. If the sysctl option is enabled, a sysctl option
74747+ with name "socket_all_gid" is created.
74748+
74749+config GRKERNSEC_SOCKET_CLIENT
74750+ bool "Deny client sockets to group"
74751+ depends on GRKERNSEC_SOCKET
74752+ help
74753+ If you say Y here, you will be able to choose a GID of whose users will
74754+ be unable to connect to other hosts from your machine, but will be
74755+ able to run servers. If this option is enabled, all users in the group
74756+ you specify will have to use passive mode when initiating ftp transfers
74757+ from the shell on your machine. If the sysctl option is enabled, a
74758+ sysctl option with name "socket_client" is created.
74759+
74760+config GRKERNSEC_SOCKET_CLIENT_GID
74761+ int "GID to deny client sockets for"
74762+ depends on GRKERNSEC_SOCKET_CLIENT
74763+ default 1003
74764+ help
74765+ Here you can choose the GID to disable client socket access for.
74766+ Remember to add the users you want client socket access disabled for to
74767+ the GID specified here. If the sysctl option is enabled, a sysctl
74768+ option with name "socket_client_gid" is created.
74769+
74770+config GRKERNSEC_SOCKET_SERVER
74771+ bool "Deny server sockets to group"
74772+ depends on GRKERNSEC_SOCKET
74773+ help
74774+ If you say Y here, you will be able to choose a GID of whose users will
74775+ be unable to run server applications from your machine. If the sysctl
74776+ option is enabled, a sysctl option with name "socket_server" is created.
74777+
74778+config GRKERNSEC_SOCKET_SERVER_GID
74779+ int "GID to deny server sockets for"
74780+ depends on GRKERNSEC_SOCKET_SERVER
74781+ default 1002
74782+ help
74783+ Here you can choose the GID to disable server socket access for.
74784+ Remember to add the users you want server socket access disabled for to
74785+ the GID specified here. If the sysctl option is enabled, a sysctl
74786+ option with name "socket_server_gid" is created.
74787+
74788+endmenu
74789+menu "Sysctl support"
74790+depends on GRKERNSEC && SYSCTL
74791+
74792+config GRKERNSEC_SYSCTL
74793+ bool "Sysctl support"
74794+ help
74795+ If you say Y here, you will be able to change the options that
74796+ grsecurity runs with at bootup, without having to recompile your
74797+ kernel. You can echo values to files in /proc/sys/kernel/grsecurity
74798+ to enable (1) or disable (0) various features. All the sysctl entries
74799+ are mutable until the "grsec_lock" entry is set to a non-zero value.
74800+ All features enabled in the kernel configuration are disabled at boot
74801+ if you do not say Y to the "Turn on features by default" option.
74802+ All options should be set at startup, and the grsec_lock entry should
74803+ be set to a non-zero value after all the options are set.
74804+ *THIS IS EXTREMELY IMPORTANT*
74805+
74806+config GRKERNSEC_SYSCTL_DISTRO
74807+ bool "Extra sysctl support for distro makers (READ HELP)"
74808+ depends on GRKERNSEC_SYSCTL && GRKERNSEC_IO
74809+ help
74810+ If you say Y here, additional sysctl options will be created
74811+ for features that affect processes running as root. Therefore,
74812+ it is critical when using this option that the grsec_lock entry be
74813+ enabled after boot. Only distros with prebuilt kernel packages
74814+ with this option enabled that can ensure grsec_lock is enabled
74815+ after boot should use this option.
74816+ *Failure to set grsec_lock after boot makes all grsec features
74817+ this option covers useless*
74818+
74819+ Currently this option creates the following sysctl entries:
74820+ "Disable Privileged I/O": "disable_priv_io"
74821+
74822+config GRKERNSEC_SYSCTL_ON
74823+ bool "Turn on features by default"
74824+ depends on GRKERNSEC_SYSCTL
74825+ help
74826+ If you say Y here, instead of having all features enabled in the
74827+ kernel configuration disabled at boot time, the features will be
74828+ enabled at boot time. It is recommended you say Y here unless
74829+ there is some reason you would want all sysctl-tunable features to
74830+ be disabled by default. As mentioned elsewhere, it is important
74831+ to enable the grsec_lock entry once you have finished modifying
74832+ the sysctl entries.
74833+
74834+endmenu
74835+menu "Logging Options"
74836+depends on GRKERNSEC
74837+
74838+config GRKERNSEC_FLOODTIME
74839+ int "Seconds in between log messages (minimum)"
74840+ default 10
74841+ help
74842+ This option allows you to enforce the number of seconds between
74843+ grsecurity log messages. The default should be suitable for most
74844+ people, however, if you choose to change it, choose a value small enough
74845+ to allow informative logs to be produced, but large enough to
74846+ prevent flooding.
74847+
74848+config GRKERNSEC_FLOODBURST
74849+ int "Number of messages in a burst (maximum)"
74850+ default 6
74851+ help
74852+ This option allows you to choose the maximum number of messages allowed
74853+ within the flood time interval you chose in a separate option. The
74854+ default should be suitable for most people, however if you find that
74855+ many of your logs are being interpreted as flooding, you may want to
74856+ raise this value.
74857+
74858+endmenu
74859+
74860+endmenu
74861diff --git a/grsecurity/Makefile b/grsecurity/Makefile
74862new file mode 100644
74863index 0000000..1b9afa9
74864--- /dev/null
74865+++ b/grsecurity/Makefile
74866@@ -0,0 +1,38 @@
74867+# grsecurity's ACL system was originally written in 2001 by Michael Dalton
74868+# during 2001-2009 it has been completely redesigned by Brad Spengler
74869+# into an RBAC system
74870+#
74871+# All code in this directory and various hooks inserted throughout the kernel
74872+# are copyright Brad Spengler - Open Source Security, Inc., and released
74873+# under the GPL v2 or higher
74874+
74875+KBUILD_CFLAGS += -Werror
74876+
74877+obj-y = grsec_chdir.o grsec_chroot.o grsec_exec.o grsec_fifo.o grsec_fork.o \
74878+ grsec_mount.o grsec_sig.o grsec_sysctl.o \
74879+ grsec_time.o grsec_tpe.o grsec_link.o grsec_pax.o grsec_ptrace.o
74880+
74881+obj-$(CONFIG_GRKERNSEC) += grsec_init.o grsum.o gracl.o gracl_segv.o \
74882+ gracl_cap.o gracl_alloc.o gracl_shm.o grsec_mem.o gracl_fs.o \
74883+ gracl_learn.o grsec_log.o
74884+obj-$(CONFIG_GRKERNSEC_RESLOG) += gracl_res.o
74885+
74886+ifdef CONFIG_NET
74887+obj-y += grsec_sock.o
74888+obj-$(CONFIG_GRKERNSEC) += gracl_ip.o
74889+endif
74890+
74891+ifndef CONFIG_GRKERNSEC
74892+obj-y += grsec_disabled.o
74893+endif
74894+
74895+ifdef CONFIG_GRKERNSEC_HIDESYM
74896+extra-y := grsec_hidesym.o
74897+$(obj)/grsec_hidesym.o:
74898+ @-chmod -f 500 /boot
74899+ @-chmod -f 500 /lib/modules
74900+ @-chmod -f 500 /lib64/modules
74901+ @-chmod -f 500 /lib32/modules
74902+ @-chmod -f 700 .
74903+ @echo ' grsec: protected kernel image paths'
74904+endif
74905diff --git a/grsecurity/gracl.c b/grsecurity/gracl.c
74906new file mode 100644
74907index 0000000..d881a39
74908--- /dev/null
74909+++ b/grsecurity/gracl.c
74910@@ -0,0 +1,4156 @@
74911+#include <linux/kernel.h>
74912+#include <linux/module.h>
74913+#include <linux/sched.h>
74914+#include <linux/mm.h>
74915+#include <linux/file.h>
74916+#include <linux/fs.h>
74917+#include <linux/namei.h>
74918+#include <linux/mount.h>
74919+#include <linux/tty.h>
74920+#include <linux/proc_fs.h>
74921+#include <linux/smp_lock.h>
74922+#include <linux/slab.h>
74923+#include <linux/vmalloc.h>
74924+#include <linux/types.h>
74925+#include <linux/sysctl.h>
74926+#include <linux/netdevice.h>
74927+#include <linux/ptrace.h>
74928+#include <linux/gracl.h>
74929+#include <linux/gralloc.h>
74930+#include <linux/security.h>
74931+#include <linux/grinternal.h>
74932+#include <linux/pid_namespace.h>
74933+#include <linux/fdtable.h>
74934+#include <linux/percpu.h>
74935+
74936+#include <asm/uaccess.h>
74937+#include <asm/errno.h>
74938+#include <asm/mman.h>
74939+
74940+static struct acl_role_db acl_role_set;
74941+static struct name_db name_set;
74942+static struct inodev_db inodev_set;
74943+
74944+/* for keeping track of userspace pointers used for subjects, so we
74945+ can share references in the kernel as well
74946+*/
74947+
74948+static struct dentry *real_root;
74949+static struct vfsmount *real_root_mnt;
74950+
74951+static struct acl_subj_map_db subj_map_set;
74952+
74953+static struct acl_role_label *default_role;
74954+
74955+static struct acl_role_label *role_list;
74956+
74957+static u16 acl_sp_role_value;
74958+
74959+extern char *gr_shared_page[4];
74960+static DEFINE_MUTEX(gr_dev_mutex);
74961+DEFINE_RWLOCK(gr_inode_lock);
74962+
74963+struct gr_arg *gr_usermode;
74964+
74965+static unsigned int gr_status __read_only = GR_STATUS_INIT;
74966+
74967+extern int chkpw(struct gr_arg *entry, unsigned char *salt, unsigned char *sum);
74968+extern void gr_clear_learn_entries(void);
74969+
74970+#ifdef CONFIG_GRKERNSEC_RESLOG
74971+extern void gr_log_resource(const struct task_struct *task,
74972+ const int res, const unsigned long wanted, const int gt);
74973+#endif
74974+
74975+unsigned char *gr_system_salt;
74976+unsigned char *gr_system_sum;
74977+
74978+static struct sprole_pw **acl_special_roles = NULL;
74979+static __u16 num_sprole_pws = 0;
74980+
74981+static struct acl_role_label *kernel_role = NULL;
74982+
74983+static unsigned int gr_auth_attempts = 0;
74984+static unsigned long gr_auth_expires = 0UL;
74985+
74986+#ifdef CONFIG_NET
74987+extern struct vfsmount *sock_mnt;
74988+#endif
74989+extern struct vfsmount *pipe_mnt;
74990+extern struct vfsmount *shm_mnt;
74991+#ifdef CONFIG_HUGETLBFS
74992+extern struct vfsmount *hugetlbfs_vfsmount;
74993+#endif
74994+
74995+static struct acl_object_label *fakefs_obj_rw;
74996+static struct acl_object_label *fakefs_obj_rwx;
74997+
74998+extern int gr_init_uidset(void);
74999+extern void gr_free_uidset(void);
75000+extern void gr_remove_uid(uid_t uid);
75001+extern int gr_find_uid(uid_t uid);
75002+
75003+__inline__ int
75004+gr_acl_is_enabled(void)
75005+{
75006+ return (gr_status & GR_READY);
75007+}
75008+
75009+#ifdef CONFIG_BTRFS_FS
75010+extern dev_t get_btrfs_dev_from_inode(struct inode *inode);
75011+extern int btrfs_getattr(struct vfsmount *mnt, struct dentry *dentry, struct kstat *stat);
75012+#endif
75013+
75014+static inline dev_t __get_dev(const struct dentry *dentry)
75015+{
75016+#ifdef CONFIG_BTRFS_FS
75017+ if (dentry->d_inode->i_op && dentry->d_inode->i_op->getattr == &btrfs_getattr)
75018+ return get_btrfs_dev_from_inode(dentry->d_inode);
75019+ else
75020+#endif
75021+ return dentry->d_inode->i_sb->s_dev;
75022+}
75023+
75024+dev_t gr_get_dev_from_dentry(struct dentry *dentry)
75025+{
75026+ return __get_dev(dentry);
75027+}
75028+
75029+static char gr_task_roletype_to_char(struct task_struct *task)
75030+{
75031+ switch (task->role->roletype &
75032+ (GR_ROLE_DEFAULT | GR_ROLE_USER | GR_ROLE_GROUP |
75033+ GR_ROLE_SPECIAL)) {
75034+ case GR_ROLE_DEFAULT:
75035+ return 'D';
75036+ case GR_ROLE_USER:
75037+ return 'U';
75038+ case GR_ROLE_GROUP:
75039+ return 'G';
75040+ case GR_ROLE_SPECIAL:
75041+ return 'S';
75042+ }
75043+
75044+ return 'X';
75045+}
75046+
75047+char gr_roletype_to_char(void)
75048+{
75049+ return gr_task_roletype_to_char(current);
75050+}
75051+
75052+__inline__ int
75053+gr_acl_tpe_check(void)
75054+{
75055+ if (unlikely(!(gr_status & GR_READY)))
75056+ return 0;
75057+ if (current->role->roletype & GR_ROLE_TPE)
75058+ return 1;
75059+ else
75060+ return 0;
75061+}
75062+
75063+int
75064+gr_handle_rawio(const struct inode *inode)
75065+{
75066+#ifdef CONFIG_GRKERNSEC_CHROOT_CAPS
75067+ if (inode && S_ISBLK(inode->i_mode) &&
75068+ grsec_enable_chroot_caps && proc_is_chrooted(current) &&
75069+ !capable(CAP_SYS_RAWIO))
75070+ return 1;
75071+#endif
75072+ return 0;
75073+}
75074+
75075+static int
75076+gr_streq(const char *a, const char *b, const unsigned int lena, const unsigned int lenb)
75077+{
75078+ if (likely(lena != lenb))
75079+ return 0;
75080+
75081+ return !memcmp(a, b, lena);
75082+}
75083+
75084+static int prepend(char **buffer, int *buflen, const char *str, int namelen)
75085+{
75086+ *buflen -= namelen;
75087+ if (*buflen < 0)
75088+ return -ENAMETOOLONG;
75089+ *buffer -= namelen;
75090+ memcpy(*buffer, str, namelen);
75091+ return 0;
75092+}
75093+
75094+/* this must be called with vfsmount_lock and dcache_lock held */
75095+
75096+static char * __our_d_path(struct dentry *dentry, struct vfsmount *vfsmnt,
75097+ struct dentry *root, struct vfsmount *rootmnt,
75098+ char *buffer, int buflen)
75099+{
75100+ char * end = buffer+buflen;
75101+ char * retval;
75102+ int namelen;
75103+
75104+ *--end = '\0';
75105+ buflen--;
75106+
75107+ if (buflen < 1)
75108+ goto Elong;
75109+ /* Get '/' right */
75110+ retval = end-1;
75111+ *retval = '/';
75112+
75113+ for (;;) {
75114+ struct dentry * parent;
75115+
75116+ if (dentry == root && vfsmnt == rootmnt)
75117+ break;
75118+ if (dentry == vfsmnt->mnt_root || IS_ROOT(dentry)) {
75119+ /* Global root? */
75120+ if (vfsmnt->mnt_parent == vfsmnt)
75121+ goto global_root;
75122+ dentry = vfsmnt->mnt_mountpoint;
75123+ vfsmnt = vfsmnt->mnt_parent;
75124+ continue;
75125+ }
75126+ parent = dentry->d_parent;
75127+ prefetch(parent);
75128+ namelen = dentry->d_name.len;
75129+ buflen -= namelen + 1;
75130+ if (buflen < 0)
75131+ goto Elong;
75132+ end -= namelen;
75133+ memcpy(end, dentry->d_name.name, namelen);
75134+ *--end = '/';
75135+ retval = end;
75136+ dentry = parent;
75137+ }
75138+
75139+out:
75140+ return retval;
75141+
75142+global_root:
75143+ namelen = dentry->d_name.len;
75144+ buflen -= namelen;
75145+ if (buflen < 0)
75146+ goto Elong;
75147+ retval -= namelen-1; /* hit the slash */
75148+ memcpy(retval, dentry->d_name.name, namelen);
75149+ goto out;
75150+Elong:
75151+ retval = ERR_PTR(-ENAMETOOLONG);
75152+ goto out;
75153+}
75154+
75155+static char *
75156+gen_full_path(struct dentry *dentry, struct vfsmount *vfsmnt,
75157+ struct dentry *root, struct vfsmount *rootmnt, char *buf, int buflen)
75158+{
75159+ char *retval;
75160+
75161+ retval = __our_d_path(dentry, vfsmnt, root, rootmnt, buf, buflen);
75162+ if (unlikely(IS_ERR(retval)))
75163+ retval = strcpy(buf, "<path too long>");
75164+ else if (unlikely(retval[1] == '/' && retval[2] == '\0'))
75165+ retval[1] = '\0';
75166+
75167+ return retval;
75168+}
75169+
75170+static char *
75171+__d_real_path(const struct dentry *dentry, const struct vfsmount *vfsmnt,
75172+ char *buf, int buflen)
75173+{
75174+ char *res;
75175+
75176+ /* we can use real_root, real_root_mnt, because this is only called
75177+ by the RBAC system */
75178+ res = gen_full_path((struct dentry *)dentry, (struct vfsmount *)vfsmnt, real_root, real_root_mnt, buf, buflen);
75179+
75180+ return res;
75181+}
75182+
75183+static char *
75184+d_real_path(const struct dentry *dentry, const struct vfsmount *vfsmnt,
75185+ char *buf, int buflen)
75186+{
75187+ char *res;
75188+ struct dentry *root;
75189+ struct vfsmount *rootmnt;
75190+ struct task_struct *reaper = &init_task;
75191+
75192+ /* we can't use real_root, real_root_mnt, because they belong only to the RBAC system */
75193+ read_lock(&reaper->fs->lock);
75194+ root = dget(reaper->fs->root.dentry);
75195+ rootmnt = mntget(reaper->fs->root.mnt);
75196+ read_unlock(&reaper->fs->lock);
75197+
75198+ spin_lock(&dcache_lock);
75199+ spin_lock(&vfsmount_lock);
75200+ res = gen_full_path((struct dentry *)dentry, (struct vfsmount *)vfsmnt, root, rootmnt, buf, buflen);
75201+ spin_unlock(&vfsmount_lock);
75202+ spin_unlock(&dcache_lock);
75203+
75204+ dput(root);
75205+ mntput(rootmnt);
75206+ return res;
75207+}
75208+
75209+static char *
75210+gr_to_filename_rbac(const struct dentry *dentry, const struct vfsmount *mnt)
75211+{
75212+ char *ret;
75213+ spin_lock(&dcache_lock);
75214+ spin_lock(&vfsmount_lock);
75215+ ret = __d_real_path(dentry, mnt, per_cpu_ptr(gr_shared_page[0],smp_processor_id()),
75216+ PAGE_SIZE);
75217+ spin_unlock(&vfsmount_lock);
75218+ spin_unlock(&dcache_lock);
75219+ return ret;
75220+}
75221+
75222+static char *
75223+gr_to_proc_filename_rbac(const struct dentry *dentry, const struct vfsmount *mnt)
75224+{
75225+ char *ret;
75226+ char *buf;
75227+ int buflen;
75228+
75229+ spin_lock(&dcache_lock);
75230+ spin_lock(&vfsmount_lock);
75231+ buf = per_cpu_ptr(gr_shared_page[0], smp_processor_id());
75232+ ret = __d_real_path(dentry, mnt, buf, PAGE_SIZE - 6);
75233+ buflen = (int)(ret - buf);
75234+ if (buflen >= 5)
75235+ prepend(&ret, &buflen, "/proc", 5);
75236+ else
75237+ ret = strcpy(buf, "<path too long>");
75238+ spin_unlock(&vfsmount_lock);
75239+ spin_unlock(&dcache_lock);
75240+ return ret;
75241+}
75242+
75243+char *
75244+gr_to_filename_nolock(const struct dentry *dentry, const struct vfsmount *mnt)
75245+{
75246+ return __d_real_path(dentry, mnt, per_cpu_ptr(gr_shared_page[0],smp_processor_id()),
75247+ PAGE_SIZE);
75248+}
75249+
75250+char *
75251+gr_to_filename(const struct dentry *dentry, const struct vfsmount *mnt)
75252+{
75253+ return d_real_path(dentry, mnt, per_cpu_ptr(gr_shared_page[0], smp_processor_id()),
75254+ PAGE_SIZE);
75255+}
75256+
75257+char *
75258+gr_to_filename1(const struct dentry *dentry, const struct vfsmount *mnt)
75259+{
75260+ return d_real_path(dentry, mnt, per_cpu_ptr(gr_shared_page[1], smp_processor_id()),
75261+ PAGE_SIZE);
75262+}
75263+
75264+char *
75265+gr_to_filename2(const struct dentry *dentry, const struct vfsmount *mnt)
75266+{
75267+ return d_real_path(dentry, mnt, per_cpu_ptr(gr_shared_page[2], smp_processor_id()),
75268+ PAGE_SIZE);
75269+}
75270+
75271+char *
75272+gr_to_filename3(const struct dentry *dentry, const struct vfsmount *mnt)
75273+{
75274+ return d_real_path(dentry, mnt, per_cpu_ptr(gr_shared_page[3], smp_processor_id()),
75275+ PAGE_SIZE);
75276+}
75277+
75278+__inline__ __u32
75279+to_gr_audit(const __u32 reqmode)
75280+{
75281+ /* masks off auditable permission flags, then shifts them to create
75282+ auditing flags, and adds the special case of append auditing if
75283+ we're requesting write */
75284+ return (((reqmode & ~GR_AUDITS) << 10) | ((reqmode & GR_WRITE) ? GR_AUDIT_APPEND : 0));
75285+}
75286+
75287+struct acl_subject_label *
75288+lookup_subject_map(const struct acl_subject_label *userp)
75289+{
75290+ unsigned int index = shash(userp, subj_map_set.s_size);
75291+ struct subject_map *match;
75292+
75293+ match = subj_map_set.s_hash[index];
75294+
75295+ while (match && match->user != userp)
75296+ match = match->next;
75297+
75298+ if (match != NULL)
75299+ return match->kernel;
75300+ else
75301+ return NULL;
75302+}
75303+
75304+static void
75305+insert_subj_map_entry(struct subject_map *subjmap)
75306+{
75307+ unsigned int index = shash(subjmap->user, subj_map_set.s_size);
75308+ struct subject_map **curr;
75309+
75310+ subjmap->prev = NULL;
75311+
75312+ curr = &subj_map_set.s_hash[index];
75313+ if (*curr != NULL)
75314+ (*curr)->prev = subjmap;
75315+
75316+ subjmap->next = *curr;
75317+ *curr = subjmap;
75318+
75319+ return;
75320+}
75321+
75322+static struct acl_role_label *
75323+lookup_acl_role_label(const struct task_struct *task, const uid_t uid,
75324+ const gid_t gid)
75325+{
75326+ unsigned int index = rhash(uid, GR_ROLE_USER, acl_role_set.r_size);
75327+ struct acl_role_label *match;
75328+ struct role_allowed_ip *ipp;
75329+ unsigned int x;
75330+ u32 curr_ip = task->signal->curr_ip;
75331+
75332+ task->signal->saved_ip = curr_ip;
75333+
75334+ match = acl_role_set.r_hash[index];
75335+
75336+ while (match) {
75337+ if ((match->roletype & (GR_ROLE_DOMAIN | GR_ROLE_USER)) == (GR_ROLE_DOMAIN | GR_ROLE_USER)) {
75338+ for (x = 0; x < match->domain_child_num; x++) {
75339+ if (match->domain_children[x] == uid)
75340+ goto found;
75341+ }
75342+ } else if (match->uidgid == uid && match->roletype & GR_ROLE_USER)
75343+ break;
75344+ match = match->next;
75345+ }
75346+found:
75347+ if (match == NULL) {
75348+ try_group:
75349+ index = rhash(gid, GR_ROLE_GROUP, acl_role_set.r_size);
75350+ match = acl_role_set.r_hash[index];
75351+
75352+ while (match) {
75353+ if ((match->roletype & (GR_ROLE_DOMAIN | GR_ROLE_GROUP)) == (GR_ROLE_DOMAIN | GR_ROLE_GROUP)) {
75354+ for (x = 0; x < match->domain_child_num; x++) {
75355+ if (match->domain_children[x] == gid)
75356+ goto found2;
75357+ }
75358+ } else if (match->uidgid == gid && match->roletype & GR_ROLE_GROUP)
75359+ break;
75360+ match = match->next;
75361+ }
75362+found2:
75363+ if (match == NULL)
75364+ match = default_role;
75365+ if (match->allowed_ips == NULL)
75366+ return match;
75367+ else {
75368+ for (ipp = match->allowed_ips; ipp; ipp = ipp->next) {
75369+ if (likely
75370+ ((ntohl(curr_ip) & ipp->netmask) ==
75371+ (ntohl(ipp->addr) & ipp->netmask)))
75372+ return match;
75373+ }
75374+ match = default_role;
75375+ }
75376+ } else if (match->allowed_ips == NULL) {
75377+ return match;
75378+ } else {
75379+ for (ipp = match->allowed_ips; ipp; ipp = ipp->next) {
75380+ if (likely
75381+ ((ntohl(curr_ip) & ipp->netmask) ==
75382+ (ntohl(ipp->addr) & ipp->netmask)))
75383+ return match;
75384+ }
75385+ goto try_group;
75386+ }
75387+
75388+ return match;
75389+}
75390+
75391+struct acl_subject_label *
75392+lookup_acl_subj_label(const ino_t ino, const dev_t dev,
75393+ const struct acl_role_label *role)
75394+{
75395+ unsigned int index = fhash(ino, dev, role->subj_hash_size);
75396+ struct acl_subject_label *match;
75397+
75398+ match = role->subj_hash[index];
75399+
75400+ while (match && (match->inode != ino || match->device != dev ||
75401+ (match->mode & GR_DELETED))) {
75402+ match = match->next;
75403+ }
75404+
75405+ if (match && !(match->mode & GR_DELETED))
75406+ return match;
75407+ else
75408+ return NULL;
75409+}
75410+
75411+struct acl_subject_label *
75412+lookup_acl_subj_label_deleted(const ino_t ino, const dev_t dev,
75413+ const struct acl_role_label *role)
75414+{
75415+ unsigned int index = fhash(ino, dev, role->subj_hash_size);
75416+ struct acl_subject_label *match;
75417+
75418+ match = role->subj_hash[index];
75419+
75420+ while (match && (match->inode != ino || match->device != dev ||
75421+ !(match->mode & GR_DELETED))) {
75422+ match = match->next;
75423+ }
75424+
75425+ if (match && (match->mode & GR_DELETED))
75426+ return match;
75427+ else
75428+ return NULL;
75429+}
75430+
75431+static struct acl_object_label *
75432+lookup_acl_obj_label(const ino_t ino, const dev_t dev,
75433+ const struct acl_subject_label *subj)
75434+{
75435+ unsigned int index = fhash(ino, dev, subj->obj_hash_size);
75436+ struct acl_object_label *match;
75437+
75438+ match = subj->obj_hash[index];
75439+
75440+ while (match && (match->inode != ino || match->device != dev ||
75441+ (match->mode & GR_DELETED))) {
75442+ match = match->next;
75443+ }
75444+
75445+ if (match && !(match->mode & GR_DELETED))
75446+ return match;
75447+ else
75448+ return NULL;
75449+}
75450+
75451+static struct acl_object_label *
75452+lookup_acl_obj_label_create(const ino_t ino, const dev_t dev,
75453+ const struct acl_subject_label *subj)
75454+{
75455+ unsigned int index = fhash(ino, dev, subj->obj_hash_size);
75456+ struct acl_object_label *match;
75457+
75458+ match = subj->obj_hash[index];
75459+
75460+ while (match && (match->inode != ino || match->device != dev ||
75461+ !(match->mode & GR_DELETED))) {
75462+ match = match->next;
75463+ }
75464+
75465+ if (match && (match->mode & GR_DELETED))
75466+ return match;
75467+
75468+ match = subj->obj_hash[index];
75469+
75470+ while (match && (match->inode != ino || match->device != dev ||
75471+ (match->mode & GR_DELETED))) {
75472+ match = match->next;
75473+ }
75474+
75475+ if (match && !(match->mode & GR_DELETED))
75476+ return match;
75477+ else
75478+ return NULL;
75479+}
75480+
75481+static struct name_entry *
75482+lookup_name_entry(const char *name)
75483+{
75484+ unsigned int len = strlen(name);
75485+ unsigned int key = full_name_hash(name, len);
75486+ unsigned int index = key % name_set.n_size;
75487+ struct name_entry *match;
75488+
75489+ match = name_set.n_hash[index];
75490+
75491+ while (match && (match->key != key || !gr_streq(match->name, name, match->len, len)))
75492+ match = match->next;
75493+
75494+ return match;
75495+}
75496+
75497+static struct name_entry *
75498+lookup_name_entry_create(const char *name)
75499+{
75500+ unsigned int len = strlen(name);
75501+ unsigned int key = full_name_hash(name, len);
75502+ unsigned int index = key % name_set.n_size;
75503+ struct name_entry *match;
75504+
75505+ match = name_set.n_hash[index];
75506+
75507+ while (match && (match->key != key || !gr_streq(match->name, name, match->len, len) ||
75508+ !match->deleted))
75509+ match = match->next;
75510+
75511+ if (match && match->deleted)
75512+ return match;
75513+
75514+ match = name_set.n_hash[index];
75515+
75516+ while (match && (match->key != key || !gr_streq(match->name, name, match->len, len) ||
75517+ match->deleted))
75518+ match = match->next;
75519+
75520+ if (match && !match->deleted)
75521+ return match;
75522+ else
75523+ return NULL;
75524+}
75525+
75526+static struct inodev_entry *
75527+lookup_inodev_entry(const ino_t ino, const dev_t dev)
75528+{
75529+ unsigned int index = fhash(ino, dev, inodev_set.i_size);
75530+ struct inodev_entry *match;
75531+
75532+ match = inodev_set.i_hash[index];
75533+
75534+ while (match && (match->nentry->inode != ino || match->nentry->device != dev))
75535+ match = match->next;
75536+
75537+ return match;
75538+}
75539+
75540+static void
75541+insert_inodev_entry(struct inodev_entry *entry)
75542+{
75543+ unsigned int index = fhash(entry->nentry->inode, entry->nentry->device,
75544+ inodev_set.i_size);
75545+ struct inodev_entry **curr;
75546+
75547+ entry->prev = NULL;
75548+
75549+ curr = &inodev_set.i_hash[index];
75550+ if (*curr != NULL)
75551+ (*curr)->prev = entry;
75552+
75553+ entry->next = *curr;
75554+ *curr = entry;
75555+
75556+ return;
75557+}
75558+
75559+static void
75560+__insert_acl_role_label(struct acl_role_label *role, uid_t uidgid)
75561+{
75562+ unsigned int index =
75563+ rhash(uidgid, role->roletype & (GR_ROLE_USER | GR_ROLE_GROUP), acl_role_set.r_size);
75564+ struct acl_role_label **curr;
75565+ struct acl_role_label *tmp, *tmp2;
75566+
75567+ curr = &acl_role_set.r_hash[index];
75568+
75569+ /* simple case, slot is empty, just set it to our role */
75570+ if (*curr == NULL) {
75571+ *curr = role;
75572+ } else {
75573+ /* example:
75574+ 1 -> 2 -> 3 (adding 2 -> 3 to here)
75575+ 2 -> 3
75576+ */
75577+ /* first check to see if we can already be reached via this slot */
75578+ tmp = *curr;
75579+ while (tmp && tmp != role)
75580+ tmp = tmp->next;
75581+ if (tmp == role) {
75582+ /* we don't need to add ourselves to this slot's chain */
75583+ return;
75584+ }
75585+ /* we need to add ourselves to this chain, two cases */
75586+ if (role->next == NULL) {
75587+ /* simple case, append the current chain to our role */
75588+ role->next = *curr;
75589+ *curr = role;
75590+ } else {
75591+ /* 1 -> 2 -> 3 -> 4
75592+ 2 -> 3 -> 4
75593+ 3 -> 4 (adding 1 -> 2 -> 3 -> 4 to here)
75594+ */
75595+ /* trickier case: walk our role's chain until we find
75596+ the role for the start of the current slot's chain */
75597+ tmp = role;
75598+ tmp2 = *curr;
75599+ while (tmp->next && tmp->next != tmp2)
75600+ tmp = tmp->next;
75601+ if (tmp->next == tmp2) {
75602+ /* from example above, we found 3, so just
75603+ replace this slot's chain with ours */
75604+ *curr = role;
75605+ } else {
75606+ /* we didn't find a subset of our role's chain
75607+ in the current slot's chain, so append their
75608+ chain to ours, and set us as the first role in
75609+ the slot's chain
75610+
75611+ we could fold this case with the case above,
75612+ but making it explicit for clarity
75613+ */
75614+ tmp->next = tmp2;
75615+ *curr = role;
75616+ }
75617+ }
75618+ }
75619+
75620+ return;
75621+}
75622+
75623+static void
75624+insert_acl_role_label(struct acl_role_label *role)
75625+{
75626+ int i;
75627+
75628+ if (role_list == NULL) {
75629+ role_list = role;
75630+ role->prev = NULL;
75631+ } else {
75632+ role->prev = role_list;
75633+ role_list = role;
75634+ }
75635+
75636+ /* used for hash chains */
75637+ role->next = NULL;
75638+
75639+ if (role->roletype & GR_ROLE_DOMAIN) {
75640+ for (i = 0; i < role->domain_child_num; i++)
75641+ __insert_acl_role_label(role, role->domain_children[i]);
75642+ } else
75643+ __insert_acl_role_label(role, role->uidgid);
75644+}
75645+
75646+static int
75647+insert_name_entry(char *name, const ino_t inode, const dev_t device, __u8 deleted)
75648+{
75649+ struct name_entry **curr, *nentry;
75650+ struct inodev_entry *ientry;
75651+ unsigned int len = strlen(name);
75652+ unsigned int key = full_name_hash(name, len);
75653+ unsigned int index = key % name_set.n_size;
75654+
75655+ curr = &name_set.n_hash[index];
75656+
75657+ while (*curr && ((*curr)->key != key || !gr_streq((*curr)->name, name, (*curr)->len, len)))
75658+ curr = &((*curr)->next);
75659+
75660+ if (*curr != NULL)
75661+ return 1;
75662+
75663+ nentry = acl_alloc(sizeof (struct name_entry));
75664+ if (nentry == NULL)
75665+ return 0;
75666+ ientry = acl_alloc(sizeof (struct inodev_entry));
75667+ if (ientry == NULL)
75668+ return 0;
75669+ ientry->nentry = nentry;
75670+
75671+ nentry->key = key;
75672+ nentry->name = name;
75673+ nentry->inode = inode;
75674+ nentry->device = device;
75675+ nentry->len = len;
75676+ nentry->deleted = deleted;
75677+
75678+ nentry->prev = NULL;
75679+ curr = &name_set.n_hash[index];
75680+ if (*curr != NULL)
75681+ (*curr)->prev = nentry;
75682+ nentry->next = *curr;
75683+ *curr = nentry;
75684+
75685+ /* insert us into the table searchable by inode/dev */
75686+ insert_inodev_entry(ientry);
75687+
75688+ return 1;
75689+}
75690+
75691+static void
75692+insert_acl_obj_label(struct acl_object_label *obj,
75693+ struct acl_subject_label *subj)
75694+{
75695+ unsigned int index =
75696+ fhash(obj->inode, obj->device, subj->obj_hash_size);
75697+ struct acl_object_label **curr;
75698+
75699+
75700+ obj->prev = NULL;
75701+
75702+ curr = &subj->obj_hash[index];
75703+ if (*curr != NULL)
75704+ (*curr)->prev = obj;
75705+
75706+ obj->next = *curr;
75707+ *curr = obj;
75708+
75709+ return;
75710+}
75711+
75712+static void
75713+insert_acl_subj_label(struct acl_subject_label *obj,
75714+ struct acl_role_label *role)
75715+{
75716+ unsigned int index = fhash(obj->inode, obj->device, role->subj_hash_size);
75717+ struct acl_subject_label **curr;
75718+
75719+ obj->prev = NULL;
75720+
75721+ curr = &role->subj_hash[index];
75722+ if (*curr != NULL)
75723+ (*curr)->prev = obj;
75724+
75725+ obj->next = *curr;
75726+ *curr = obj;
75727+
75728+ return;
75729+}
75730+
75731+/* allocating chained hash tables, so optimal size is where lambda ~ 1 */
75732+
75733+static void *
75734+create_table(__u32 * len, int elementsize)
75735+{
75736+ unsigned int table_sizes[] = {
75737+ 7, 13, 31, 61, 127, 251, 509, 1021, 2039, 4093, 8191, 16381,
75738+ 32749, 65521, 131071, 262139, 524287, 1048573, 2097143,
75739+ 4194301, 8388593, 16777213, 33554393, 67108859
75740+ };
75741+ void *newtable = NULL;
75742+ unsigned int pwr = 0;
75743+
75744+ while ((pwr < ((sizeof (table_sizes) / sizeof (table_sizes[0])) - 1)) &&
75745+ table_sizes[pwr] <= *len)
75746+ pwr++;
75747+
75748+ if (table_sizes[pwr] <= *len || (table_sizes[pwr] > ULONG_MAX / elementsize))
75749+ return newtable;
75750+
75751+ if ((table_sizes[pwr] * elementsize) <= PAGE_SIZE)
75752+ newtable =
75753+ kmalloc(table_sizes[pwr] * elementsize, GFP_KERNEL);
75754+ else
75755+ newtable = vmalloc(table_sizes[pwr] * elementsize);
75756+
75757+ *len = table_sizes[pwr];
75758+
75759+ return newtable;
75760+}
75761+
75762+static int
75763+init_variables(const struct gr_arg *arg)
75764+{
75765+ struct task_struct *reaper = &init_task;
75766+ unsigned int stacksize;
75767+
75768+ subj_map_set.s_size = arg->role_db.num_subjects;
75769+ acl_role_set.r_size = arg->role_db.num_roles + arg->role_db.num_domain_children;
75770+ name_set.n_size = arg->role_db.num_objects;
75771+ inodev_set.i_size = arg->role_db.num_objects;
75772+
75773+ if (!subj_map_set.s_size || !acl_role_set.r_size ||
75774+ !name_set.n_size || !inodev_set.i_size)
75775+ return 1;
75776+
75777+ if (!gr_init_uidset())
75778+ return 1;
75779+
75780+ /* set up the stack that holds allocation info */
75781+
75782+ stacksize = arg->role_db.num_pointers + 5;
75783+
75784+ if (!acl_alloc_stack_init(stacksize))
75785+ return 1;
75786+
75787+ /* grab reference for the real root dentry and vfsmount */
75788+ read_lock(&reaper->fs->lock);
75789+ real_root = dget(reaper->fs->root.dentry);
75790+ real_root_mnt = mntget(reaper->fs->root.mnt);
75791+ read_unlock(&reaper->fs->lock);
75792+
75793+#ifdef CONFIG_GRKERNSEC_RBAC_DEBUG
75794+ printk(KERN_ALERT "Obtained real root device=%d, inode=%lu\n", __get_dev(real_root), real_root->d_inode->i_ino);
75795+#endif
75796+
75797+ fakefs_obj_rw = acl_alloc(sizeof(struct acl_object_label));
75798+ if (fakefs_obj_rw == NULL)
75799+ return 1;
75800+ fakefs_obj_rw->mode = GR_FIND | GR_READ | GR_WRITE;
75801+
75802+ fakefs_obj_rwx = acl_alloc(sizeof(struct acl_object_label));
75803+ if (fakefs_obj_rwx == NULL)
75804+ return 1;
75805+ fakefs_obj_rwx->mode = GR_FIND | GR_READ | GR_WRITE | GR_EXEC;
75806+
75807+ subj_map_set.s_hash =
75808+ (struct subject_map **) create_table(&subj_map_set.s_size, sizeof(void *));
75809+ acl_role_set.r_hash =
75810+ (struct acl_role_label **) create_table(&acl_role_set.r_size, sizeof(void *));
75811+ name_set.n_hash = (struct name_entry **) create_table(&name_set.n_size, sizeof(void *));
75812+ inodev_set.i_hash =
75813+ (struct inodev_entry **) create_table(&inodev_set.i_size, sizeof(void *));
75814+
75815+ if (!subj_map_set.s_hash || !acl_role_set.r_hash ||
75816+ !name_set.n_hash || !inodev_set.i_hash)
75817+ return 1;
75818+
75819+ memset(subj_map_set.s_hash, 0,
75820+ sizeof(struct subject_map *) * subj_map_set.s_size);
75821+ memset(acl_role_set.r_hash, 0,
75822+ sizeof (struct acl_role_label *) * acl_role_set.r_size);
75823+ memset(name_set.n_hash, 0,
75824+ sizeof (struct name_entry *) * name_set.n_size);
75825+ memset(inodev_set.i_hash, 0,
75826+ sizeof (struct inodev_entry *) * inodev_set.i_size);
75827+
75828+ return 0;
75829+}
75830+
75831+/* free information not needed after startup
75832+ currently contains user->kernel pointer mappings for subjects
75833+*/
75834+
75835+static void
75836+free_init_variables(void)
75837+{
75838+ __u32 i;
75839+
75840+ if (subj_map_set.s_hash) {
75841+ for (i = 0; i < subj_map_set.s_size; i++) {
75842+ if (subj_map_set.s_hash[i]) {
75843+ kfree(subj_map_set.s_hash[i]);
75844+ subj_map_set.s_hash[i] = NULL;
75845+ }
75846+ }
75847+
75848+ if ((subj_map_set.s_size * sizeof (struct subject_map *)) <=
75849+ PAGE_SIZE)
75850+ kfree(subj_map_set.s_hash);
75851+ else
75852+ vfree(subj_map_set.s_hash);
75853+ }
75854+
75855+ return;
75856+}
75857+
75858+static void
75859+free_variables(void)
75860+{
75861+ struct acl_subject_label *s;
75862+ struct acl_role_label *r;
75863+ struct task_struct *task, *task2;
75864+ unsigned int x;
75865+
75866+ gr_clear_learn_entries();
75867+
75868+ read_lock(&tasklist_lock);
75869+ do_each_thread(task2, task) {
75870+ task->acl_sp_role = 0;
75871+ task->acl_role_id = 0;
75872+ task->acl = NULL;
75873+ task->role = NULL;
75874+ } while_each_thread(task2, task);
75875+ read_unlock(&tasklist_lock);
75876+
75877+ /* release the reference to the real root dentry and vfsmount */
75878+ if (real_root)
75879+ dput(real_root);
75880+ real_root = NULL;
75881+ if (real_root_mnt)
75882+ mntput(real_root_mnt);
75883+ real_root_mnt = NULL;
75884+
75885+ /* free all object hash tables */
75886+
75887+ FOR_EACH_ROLE_START(r)
75888+ if (r->subj_hash == NULL)
75889+ goto next_role;
75890+ FOR_EACH_SUBJECT_START(r, s, x)
75891+ if (s->obj_hash == NULL)
75892+ break;
75893+ if ((s->obj_hash_size * sizeof (struct acl_object_label *)) <= PAGE_SIZE)
75894+ kfree(s->obj_hash);
75895+ else
75896+ vfree(s->obj_hash);
75897+ FOR_EACH_SUBJECT_END(s, x)
75898+ FOR_EACH_NESTED_SUBJECT_START(r, s)
75899+ if (s->obj_hash == NULL)
75900+ break;
75901+ if ((s->obj_hash_size * sizeof (struct acl_object_label *)) <= PAGE_SIZE)
75902+ kfree(s->obj_hash);
75903+ else
75904+ vfree(s->obj_hash);
75905+ FOR_EACH_NESTED_SUBJECT_END(s)
75906+ if ((r->subj_hash_size * sizeof (struct acl_subject_label *)) <= PAGE_SIZE)
75907+ kfree(r->subj_hash);
75908+ else
75909+ vfree(r->subj_hash);
75910+ r->subj_hash = NULL;
75911+next_role:
75912+ FOR_EACH_ROLE_END(r)
75913+
75914+ acl_free_all();
75915+
75916+ if (acl_role_set.r_hash) {
75917+ if ((acl_role_set.r_size * sizeof (struct acl_role_label *)) <=
75918+ PAGE_SIZE)
75919+ kfree(acl_role_set.r_hash);
75920+ else
75921+ vfree(acl_role_set.r_hash);
75922+ }
75923+ if (name_set.n_hash) {
75924+ if ((name_set.n_size * sizeof (struct name_entry *)) <=
75925+ PAGE_SIZE)
75926+ kfree(name_set.n_hash);
75927+ else
75928+ vfree(name_set.n_hash);
75929+ }
75930+
75931+ if (inodev_set.i_hash) {
75932+ if ((inodev_set.i_size * sizeof (struct inodev_entry *)) <=
75933+ PAGE_SIZE)
75934+ kfree(inodev_set.i_hash);
75935+ else
75936+ vfree(inodev_set.i_hash);
75937+ }
75938+
75939+ gr_free_uidset();
75940+
75941+ memset(&name_set, 0, sizeof (struct name_db));
75942+ memset(&inodev_set, 0, sizeof (struct inodev_db));
75943+ memset(&acl_role_set, 0, sizeof (struct acl_role_db));
75944+ memset(&subj_map_set, 0, sizeof (struct acl_subj_map_db));
75945+
75946+ default_role = NULL;
75947+ kernel_role = NULL;
75948+ role_list = NULL;
75949+
75950+ return;
75951+}
75952+
75953+static __u32
75954+count_user_objs(struct acl_object_label *userp)
75955+{
75956+ struct acl_object_label o_tmp;
75957+ __u32 num = 0;
75958+
75959+ while (userp) {
75960+ if (copy_from_user(&o_tmp, userp,
75961+ sizeof (struct acl_object_label)))
75962+ break;
75963+
75964+ userp = o_tmp.prev;
75965+ num++;
75966+ }
75967+
75968+ return num;
75969+}
75970+
75971+static struct acl_subject_label *
75972+do_copy_user_subj(struct acl_subject_label *userp, struct acl_role_label *role);
75973+
75974+static int
75975+copy_user_glob(struct acl_object_label *obj)
75976+{
75977+ struct acl_object_label *g_tmp, **guser;
75978+ unsigned int len;
75979+ char *tmp;
75980+
75981+ if (obj->globbed == NULL)
75982+ return 0;
75983+
75984+ guser = &obj->globbed;
75985+ while (*guser) {
75986+ g_tmp = (struct acl_object_label *)
75987+ acl_alloc(sizeof (struct acl_object_label));
75988+ if (g_tmp == NULL)
75989+ return -ENOMEM;
75990+
75991+ if (copy_from_user(g_tmp, *guser,
75992+ sizeof (struct acl_object_label)))
75993+ return -EFAULT;
75994+
75995+ len = strnlen_user(g_tmp->filename, PATH_MAX);
75996+
75997+ if (!len || len >= PATH_MAX)
75998+ return -EINVAL;
75999+
76000+ if ((tmp = (char *) acl_alloc(len)) == NULL)
76001+ return -ENOMEM;
76002+
76003+ if (copy_from_user(tmp, g_tmp->filename, len))
76004+ return -EFAULT;
76005+ tmp[len-1] = '\0';
76006+ g_tmp->filename = tmp;
76007+
76008+ *guser = g_tmp;
76009+ guser = &(g_tmp->next);
76010+ }
76011+
76012+ return 0;
76013+}
76014+
76015+static int
76016+copy_user_objs(struct acl_object_label *userp, struct acl_subject_label *subj,
76017+ struct acl_role_label *role)
76018+{
76019+ struct acl_object_label *o_tmp;
76020+ unsigned int len;
76021+ int ret;
76022+ char *tmp;
76023+
76024+ while (userp) {
76025+ if ((o_tmp = (struct acl_object_label *)
76026+ acl_alloc(sizeof (struct acl_object_label))) == NULL)
76027+ return -ENOMEM;
76028+
76029+ if (copy_from_user(o_tmp, userp,
76030+ sizeof (struct acl_object_label)))
76031+ return -EFAULT;
76032+
76033+ userp = o_tmp->prev;
76034+
76035+ len = strnlen_user(o_tmp->filename, PATH_MAX);
76036+
76037+ if (!len || len >= PATH_MAX)
76038+ return -EINVAL;
76039+
76040+ if ((tmp = (char *) acl_alloc(len)) == NULL)
76041+ return -ENOMEM;
76042+
76043+ if (copy_from_user(tmp, o_tmp->filename, len))
76044+ return -EFAULT;
76045+ tmp[len-1] = '\0';
76046+ o_tmp->filename = tmp;
76047+
76048+ insert_acl_obj_label(o_tmp, subj);
76049+ if (!insert_name_entry(o_tmp->filename, o_tmp->inode,
76050+ o_tmp->device, (o_tmp->mode & GR_DELETED) ? 1 : 0))
76051+ return -ENOMEM;
76052+
76053+ ret = copy_user_glob(o_tmp);
76054+ if (ret)
76055+ return ret;
76056+
76057+ if (o_tmp->nested) {
76058+ o_tmp->nested = do_copy_user_subj(o_tmp->nested, role);
76059+ if (IS_ERR(o_tmp->nested))
76060+ return PTR_ERR(o_tmp->nested);
76061+
76062+ /* insert into nested subject list */
76063+ o_tmp->nested->next = role->hash->first;
76064+ role->hash->first = o_tmp->nested;
76065+ }
76066+ }
76067+
76068+ return 0;
76069+}
76070+
76071+static __u32
76072+count_user_subjs(struct acl_subject_label *userp)
76073+{
76074+ struct acl_subject_label s_tmp;
76075+ __u32 num = 0;
76076+
76077+ while (userp) {
76078+ if (copy_from_user(&s_tmp, userp,
76079+ sizeof (struct acl_subject_label)))
76080+ break;
76081+
76082+ userp = s_tmp.prev;
76083+ /* do not count nested subjects against this count, since
76084+ they are not included in the hash table, but are
76085+ attached to objects. We have already counted
76086+ the subjects in userspace for the allocation
76087+ stack
76088+ */
76089+ if (!(s_tmp.mode & GR_NESTED))
76090+ num++;
76091+ }
76092+
76093+ return num;
76094+}
76095+
76096+static int
76097+copy_user_allowedips(struct acl_role_label *rolep)
76098+{
76099+ struct role_allowed_ip *ruserip, *rtmp = NULL, *rlast;
76100+
76101+ ruserip = rolep->allowed_ips;
76102+
76103+ while (ruserip) {
76104+ rlast = rtmp;
76105+
76106+ if ((rtmp = (struct role_allowed_ip *)
76107+ acl_alloc(sizeof (struct role_allowed_ip))) == NULL)
76108+ return -ENOMEM;
76109+
76110+ if (copy_from_user(rtmp, ruserip,
76111+ sizeof (struct role_allowed_ip)))
76112+ return -EFAULT;
76113+
76114+ ruserip = rtmp->prev;
76115+
76116+ if (!rlast) {
76117+ rtmp->prev = NULL;
76118+ rolep->allowed_ips = rtmp;
76119+ } else {
76120+ rlast->next = rtmp;
76121+ rtmp->prev = rlast;
76122+ }
76123+
76124+ if (!ruserip)
76125+ rtmp->next = NULL;
76126+ }
76127+
76128+ return 0;
76129+}
76130+
76131+static int
76132+copy_user_transitions(struct acl_role_label *rolep)
76133+{
76134+ struct role_transition *rusertp, *rtmp = NULL, *rlast;
76135+
76136+ unsigned int len;
76137+ char *tmp;
76138+
76139+ rusertp = rolep->transitions;
76140+
76141+ while (rusertp) {
76142+ rlast = rtmp;
76143+
76144+ if ((rtmp = (struct role_transition *)
76145+ acl_alloc(sizeof (struct role_transition))) == NULL)
76146+ return -ENOMEM;
76147+
76148+ if (copy_from_user(rtmp, rusertp,
76149+ sizeof (struct role_transition)))
76150+ return -EFAULT;
76151+
76152+ rusertp = rtmp->prev;
76153+
76154+ len = strnlen_user(rtmp->rolename, GR_SPROLE_LEN);
76155+
76156+ if (!len || len >= GR_SPROLE_LEN)
76157+ return -EINVAL;
76158+
76159+ if ((tmp = (char *) acl_alloc(len)) == NULL)
76160+ return -ENOMEM;
76161+
76162+ if (copy_from_user(tmp, rtmp->rolename, len))
76163+ return -EFAULT;
76164+ tmp[len-1] = '\0';
76165+ rtmp->rolename = tmp;
76166+
76167+ if (!rlast) {
76168+ rtmp->prev = NULL;
76169+ rolep->transitions = rtmp;
76170+ } else {
76171+ rlast->next = rtmp;
76172+ rtmp->prev = rlast;
76173+ }
76174+
76175+ if (!rusertp)
76176+ rtmp->next = NULL;
76177+ }
76178+
76179+ return 0;
76180+}
76181+
76182+static struct acl_subject_label *
76183+do_copy_user_subj(struct acl_subject_label *userp, struct acl_role_label *role)
76184+{
76185+ struct acl_subject_label *s_tmp = NULL, *s_tmp2;
76186+ unsigned int len;
76187+ char *tmp;
76188+ __u32 num_objs;
76189+ struct acl_ip_label **i_tmp, *i_utmp2;
76190+ struct gr_hash_struct ghash;
76191+ struct subject_map *subjmap;
76192+ unsigned int i_num;
76193+ int err;
76194+
76195+ s_tmp = lookup_subject_map(userp);
76196+
76197+ /* we've already copied this subject into the kernel, just return
76198+ the reference to it, and don't copy it over again
76199+ */
76200+ if (s_tmp)
76201+ return(s_tmp);
76202+
76203+ if ((s_tmp = (struct acl_subject_label *)
76204+ acl_alloc(sizeof (struct acl_subject_label))) == NULL)
76205+ return ERR_PTR(-ENOMEM);
76206+
76207+ subjmap = (struct subject_map *)kmalloc(sizeof (struct subject_map), GFP_KERNEL);
76208+ if (subjmap == NULL)
76209+ return ERR_PTR(-ENOMEM);
76210+
76211+ subjmap->user = userp;
76212+ subjmap->kernel = s_tmp;
76213+ insert_subj_map_entry(subjmap);
76214+
76215+ if (copy_from_user(s_tmp, userp,
76216+ sizeof (struct acl_subject_label)))
76217+ return ERR_PTR(-EFAULT);
76218+
76219+ len = strnlen_user(s_tmp->filename, PATH_MAX);
76220+
76221+ if (!len || len >= PATH_MAX)
76222+ return ERR_PTR(-EINVAL);
76223+
76224+ if ((tmp = (char *) acl_alloc(len)) == NULL)
76225+ return ERR_PTR(-ENOMEM);
76226+
76227+ if (copy_from_user(tmp, s_tmp->filename, len))
76228+ return ERR_PTR(-EFAULT);
76229+ tmp[len-1] = '\0';
76230+ s_tmp->filename = tmp;
76231+
76232+ if (!strcmp(s_tmp->filename, "/"))
76233+ role->root_label = s_tmp;
76234+
76235+ if (copy_from_user(&ghash, s_tmp->hash, sizeof(struct gr_hash_struct)))
76236+ return ERR_PTR(-EFAULT);
76237+
76238+ /* copy user and group transition tables */
76239+
76240+ if (s_tmp->user_trans_num) {
76241+ uid_t *uidlist;
76242+
76243+ uidlist = (uid_t *)acl_alloc_num(s_tmp->user_trans_num, sizeof(uid_t));
76244+ if (uidlist == NULL)
76245+ return ERR_PTR(-ENOMEM);
76246+ if (copy_from_user(uidlist, s_tmp->user_transitions, s_tmp->user_trans_num * sizeof(uid_t)))
76247+ return ERR_PTR(-EFAULT);
76248+
76249+ s_tmp->user_transitions = uidlist;
76250+ }
76251+
76252+ if (s_tmp->group_trans_num) {
76253+ gid_t *gidlist;
76254+
76255+ gidlist = (gid_t *)acl_alloc_num(s_tmp->group_trans_num, sizeof(gid_t));
76256+ if (gidlist == NULL)
76257+ return ERR_PTR(-ENOMEM);
76258+ if (copy_from_user(gidlist, s_tmp->group_transitions, s_tmp->group_trans_num * sizeof(gid_t)))
76259+ return ERR_PTR(-EFAULT);
76260+
76261+ s_tmp->group_transitions = gidlist;
76262+ }
76263+
76264+ /* set up object hash table */
76265+ num_objs = count_user_objs(ghash.first);
76266+
76267+ s_tmp->obj_hash_size = num_objs;
76268+ s_tmp->obj_hash =
76269+ (struct acl_object_label **)
76270+ create_table(&(s_tmp->obj_hash_size), sizeof(void *));
76271+
76272+ if (!s_tmp->obj_hash)
76273+ return ERR_PTR(-ENOMEM);
76274+
76275+ memset(s_tmp->obj_hash, 0,
76276+ s_tmp->obj_hash_size *
76277+ sizeof (struct acl_object_label *));
76278+
76279+ /* add in objects */
76280+ err = copy_user_objs(ghash.first, s_tmp, role);
76281+
76282+ if (err)
76283+ return ERR_PTR(err);
76284+
76285+ /* set pointer for parent subject */
76286+ if (s_tmp->parent_subject) {
76287+ s_tmp2 = do_copy_user_subj(s_tmp->parent_subject, role);
76288+
76289+ if (IS_ERR(s_tmp2))
76290+ return s_tmp2;
76291+
76292+ s_tmp->parent_subject = s_tmp2;
76293+ }
76294+
76295+ /* add in ip acls */
76296+
76297+ if (!s_tmp->ip_num) {
76298+ s_tmp->ips = NULL;
76299+ goto insert;
76300+ }
76301+
76302+ i_tmp =
76303+ (struct acl_ip_label **) acl_alloc_num(s_tmp->ip_num,
76304+ sizeof (struct acl_ip_label *));
76305+
76306+ if (!i_tmp)
76307+ return ERR_PTR(-ENOMEM);
76308+
76309+ for (i_num = 0; i_num < s_tmp->ip_num; i_num++) {
76310+ *(i_tmp + i_num) =
76311+ (struct acl_ip_label *)
76312+ acl_alloc(sizeof (struct acl_ip_label));
76313+ if (!*(i_tmp + i_num))
76314+ return ERR_PTR(-ENOMEM);
76315+
76316+ if (copy_from_user
76317+ (&i_utmp2, s_tmp->ips + i_num,
76318+ sizeof (struct acl_ip_label *)))
76319+ return ERR_PTR(-EFAULT);
76320+
76321+ if (copy_from_user
76322+ (*(i_tmp + i_num), i_utmp2,
76323+ sizeof (struct acl_ip_label)))
76324+ return ERR_PTR(-EFAULT);
76325+
76326+ if ((*(i_tmp + i_num))->iface == NULL)
76327+ continue;
76328+
76329+ len = strnlen_user((*(i_tmp + i_num))->iface, IFNAMSIZ);
76330+ if (!len || len >= IFNAMSIZ)
76331+ return ERR_PTR(-EINVAL);
76332+ tmp = acl_alloc(len);
76333+ if (tmp == NULL)
76334+ return ERR_PTR(-ENOMEM);
76335+ if (copy_from_user(tmp, (*(i_tmp + i_num))->iface, len))
76336+ return ERR_PTR(-EFAULT);
76337+ (*(i_tmp + i_num))->iface = tmp;
76338+ }
76339+
76340+ s_tmp->ips = i_tmp;
76341+
76342+insert:
76343+ if (!insert_name_entry(s_tmp->filename, s_tmp->inode,
76344+ s_tmp->device, (s_tmp->mode & GR_DELETED) ? 1 : 0))
76345+ return ERR_PTR(-ENOMEM);
76346+
76347+ return s_tmp;
76348+}
76349+
76350+static int
76351+copy_user_subjs(struct acl_subject_label *userp, struct acl_role_label *role)
76352+{
76353+ struct acl_subject_label s_pre;
76354+ struct acl_subject_label * ret;
76355+ int err;
76356+
76357+ while (userp) {
76358+ if (copy_from_user(&s_pre, userp,
76359+ sizeof (struct acl_subject_label)))
76360+ return -EFAULT;
76361+
76362+ /* do not add nested subjects here, add
76363+ while parsing objects
76364+ */
76365+
76366+ if (s_pre.mode & GR_NESTED) {
76367+ userp = s_pre.prev;
76368+ continue;
76369+ }
76370+
76371+ ret = do_copy_user_subj(userp, role);
76372+
76373+ err = PTR_ERR(ret);
76374+ if (IS_ERR(ret))
76375+ return err;
76376+
76377+ insert_acl_subj_label(ret, role);
76378+
76379+ userp = s_pre.prev;
76380+ }
76381+
76382+ return 0;
76383+}
76384+
76385+static int
76386+copy_user_acl(struct gr_arg *arg)
76387+{
76388+ struct acl_role_label *r_tmp = NULL, **r_utmp, *r_utmp2;
76389+ struct sprole_pw *sptmp;
76390+ struct gr_hash_struct *ghash;
76391+ uid_t *domainlist;
76392+ unsigned int r_num;
76393+ unsigned int len;
76394+ char *tmp;
76395+ int err = 0;
76396+ __u16 i;
76397+ __u32 num_subjs;
76398+
76399+ /* we need a default and kernel role */
76400+ if (arg->role_db.num_roles < 2)
76401+ return -EINVAL;
76402+
76403+ /* copy special role authentication info from userspace */
76404+
76405+ num_sprole_pws = arg->num_sprole_pws;
76406+ acl_special_roles = (struct sprole_pw **) acl_alloc_num(num_sprole_pws, sizeof(struct sprole_pw *));
76407+
76408+ if (!acl_special_roles && num_sprole_pws)
76409+ return -ENOMEM;
76410+
76411+ for (i = 0; i < num_sprole_pws; i++) {
76412+ sptmp = (struct sprole_pw *) acl_alloc(sizeof(struct sprole_pw));
76413+ if (!sptmp)
76414+ return -ENOMEM;
76415+ if (copy_from_user(sptmp, arg->sprole_pws + i,
76416+ sizeof (struct sprole_pw)))
76417+ return -EFAULT;
76418+
76419+ len = strnlen_user(sptmp->rolename, GR_SPROLE_LEN);
76420+
76421+ if (!len || len >= GR_SPROLE_LEN)
76422+ return -EINVAL;
76423+
76424+ if ((tmp = (char *) acl_alloc(len)) == NULL)
76425+ return -ENOMEM;
76426+
76427+ if (copy_from_user(tmp, sptmp->rolename, len))
76428+ return -EFAULT;
76429+
76430+ tmp[len-1] = '\0';
76431+#ifdef CONFIG_GRKERNSEC_RBAC_DEBUG
76432+ printk(KERN_ALERT "Copying special role %s\n", tmp);
76433+#endif
76434+ sptmp->rolename = tmp;
76435+ acl_special_roles[i] = sptmp;
76436+ }
76437+
76438+ r_utmp = (struct acl_role_label **) arg->role_db.r_table;
76439+
76440+ for (r_num = 0; r_num < arg->role_db.num_roles; r_num++) {
76441+ r_tmp = acl_alloc(sizeof (struct acl_role_label));
76442+
76443+ if (!r_tmp)
76444+ return -ENOMEM;
76445+
76446+ if (copy_from_user(&r_utmp2, r_utmp + r_num,
76447+ sizeof (struct acl_role_label *)))
76448+ return -EFAULT;
76449+
76450+ if (copy_from_user(r_tmp, r_utmp2,
76451+ sizeof (struct acl_role_label)))
76452+ return -EFAULT;
76453+
76454+ len = strnlen_user(r_tmp->rolename, GR_SPROLE_LEN);
76455+
76456+ if (!len || len >= PATH_MAX)
76457+ return -EINVAL;
76458+
76459+ if ((tmp = (char *) acl_alloc(len)) == NULL)
76460+ return -ENOMEM;
76461+
76462+ if (copy_from_user(tmp, r_tmp->rolename, len))
76463+ return -EFAULT;
76464+
76465+ tmp[len-1] = '\0';
76466+ r_tmp->rolename = tmp;
76467+
76468+ if (!strcmp(r_tmp->rolename, "default")
76469+ && (r_tmp->roletype & GR_ROLE_DEFAULT)) {
76470+ default_role = r_tmp;
76471+ } else if (!strcmp(r_tmp->rolename, ":::kernel:::")) {
76472+ kernel_role = r_tmp;
76473+ }
76474+
76475+ if ((ghash = (struct gr_hash_struct *) acl_alloc(sizeof(struct gr_hash_struct))) == NULL)
76476+ return -ENOMEM;
76477+
76478+ if (copy_from_user(ghash, r_tmp->hash, sizeof(struct gr_hash_struct)))
76479+ return -EFAULT;
76480+
76481+ r_tmp->hash = ghash;
76482+
76483+ num_subjs = count_user_subjs(r_tmp->hash->first);
76484+
76485+ r_tmp->subj_hash_size = num_subjs;
76486+ r_tmp->subj_hash =
76487+ (struct acl_subject_label **)
76488+ create_table(&(r_tmp->subj_hash_size), sizeof(void *));
76489+
76490+ if (!r_tmp->subj_hash)
76491+ return -ENOMEM;
76492+
76493+ err = copy_user_allowedips(r_tmp);
76494+ if (err)
76495+ return err;
76496+
76497+ /* copy domain info */
76498+ if (r_tmp->domain_children != NULL) {
76499+ domainlist = acl_alloc_num(r_tmp->domain_child_num, sizeof(uid_t));
76500+ if (domainlist == NULL)
76501+ return -ENOMEM;
76502+
76503+ if (copy_from_user(domainlist, r_tmp->domain_children, r_tmp->domain_child_num * sizeof(uid_t)))
76504+ return -EFAULT;
76505+
76506+ r_tmp->domain_children = domainlist;
76507+ }
76508+
76509+ err = copy_user_transitions(r_tmp);
76510+ if (err)
76511+ return err;
76512+
76513+ memset(r_tmp->subj_hash, 0,
76514+ r_tmp->subj_hash_size *
76515+ sizeof (struct acl_subject_label *));
76516+
76517+ err = copy_user_subjs(r_tmp->hash->first, r_tmp);
76518+
76519+ if (err)
76520+ return err;
76521+
76522+ /* set nested subject list to null */
76523+ r_tmp->hash->first = NULL;
76524+
76525+ insert_acl_role_label(r_tmp);
76526+ }
76527+
76528+ if (default_role == NULL || kernel_role == NULL)
76529+ return -EINVAL;
76530+
76531+ return err;
76532+}
76533+
76534+static int
76535+gracl_init(struct gr_arg *args)
76536+{
76537+ int error = 0;
76538+
76539+ memcpy(gr_system_salt, args->salt, GR_SALT_LEN);
76540+ memcpy(gr_system_sum, args->sum, GR_SHA_LEN);
76541+
76542+ if (init_variables(args)) {
76543+ gr_log_str(GR_DONT_AUDIT_GOOD, GR_INITF_ACL_MSG, GR_VERSION);
76544+ error = -ENOMEM;
76545+ free_variables();
76546+ goto out;
76547+ }
76548+
76549+ error = copy_user_acl(args);
76550+ free_init_variables();
76551+ if (error) {
76552+ free_variables();
76553+ goto out;
76554+ }
76555+
76556+ if ((error = gr_set_acls(0))) {
76557+ free_variables();
76558+ goto out;
76559+ }
76560+
76561+ pax_open_kernel();
76562+ gr_status |= GR_READY;
76563+ pax_close_kernel();
76564+
76565+ out:
76566+ return error;
76567+}
76568+
76569+/* derived from glibc fnmatch() 0: match, 1: no match*/
76570+
76571+static int
76572+glob_match(const char *p, const char *n)
76573+{
76574+ char c;
76575+
76576+ while ((c = *p++) != '\0') {
76577+ switch (c) {
76578+ case '?':
76579+ if (*n == '\0')
76580+ return 1;
76581+ else if (*n == '/')
76582+ return 1;
76583+ break;
76584+ case '\\':
76585+ if (*n != c)
76586+ return 1;
76587+ break;
76588+ case '*':
76589+ for (c = *p++; c == '?' || c == '*'; c = *p++) {
76590+ if (*n == '/')
76591+ return 1;
76592+ else if (c == '?') {
76593+ if (*n == '\0')
76594+ return 1;
76595+ else
76596+ ++n;
76597+ }
76598+ }
76599+ if (c == '\0') {
76600+ return 0;
76601+ } else {
76602+ const char *endp;
76603+
76604+ if ((endp = strchr(n, '/')) == NULL)
76605+ endp = n + strlen(n);
76606+
76607+ if (c == '[') {
76608+ for (--p; n < endp; ++n)
76609+ if (!glob_match(p, n))
76610+ return 0;
76611+ } else if (c == '/') {
76612+ while (*n != '\0' && *n != '/')
76613+ ++n;
76614+ if (*n == '/' && !glob_match(p, n + 1))
76615+ return 0;
76616+ } else {
76617+ for (--p; n < endp; ++n)
76618+ if (*n == c && !glob_match(p, n))
76619+ return 0;
76620+ }
76621+
76622+ return 1;
76623+ }
76624+ case '[':
76625+ {
76626+ int not;
76627+ char cold;
76628+
76629+ if (*n == '\0' || *n == '/')
76630+ return 1;
76631+
76632+ not = (*p == '!' || *p == '^');
76633+ if (not)
76634+ ++p;
76635+
76636+ c = *p++;
76637+ for (;;) {
76638+ unsigned char fn = (unsigned char)*n;
76639+
76640+ if (c == '\0')
76641+ return 1;
76642+ else {
76643+ if (c == fn)
76644+ goto matched;
76645+ cold = c;
76646+ c = *p++;
76647+
76648+ if (c == '-' && *p != ']') {
76649+ unsigned char cend = *p++;
76650+
76651+ if (cend == '\0')
76652+ return 1;
76653+
76654+ if (cold <= fn && fn <= cend)
76655+ goto matched;
76656+
76657+ c = *p++;
76658+ }
76659+ }
76660+
76661+ if (c == ']')
76662+ break;
76663+ }
76664+ if (!not)
76665+ return 1;
76666+ break;
76667+ matched:
76668+ while (c != ']') {
76669+ if (c == '\0')
76670+ return 1;
76671+
76672+ c = *p++;
76673+ }
76674+ if (not)
76675+ return 1;
76676+ }
76677+ break;
76678+ default:
76679+ if (c != *n)
76680+ return 1;
76681+ }
76682+
76683+ ++n;
76684+ }
76685+
76686+ if (*n == '\0')
76687+ return 0;
76688+
76689+ if (*n == '/')
76690+ return 0;
76691+
76692+ return 1;
76693+}
76694+
76695+static struct acl_object_label *
76696+chk_glob_label(struct acl_object_label *globbed,
76697+ const struct dentry *dentry, const struct vfsmount *mnt, char **path)
76698+{
76699+ struct acl_object_label *tmp;
76700+
76701+ if (*path == NULL)
76702+ *path = gr_to_filename_nolock(dentry, mnt);
76703+
76704+ tmp = globbed;
76705+
76706+ while (tmp) {
76707+ if (!glob_match(tmp->filename, *path))
76708+ return tmp;
76709+ tmp = tmp->next;
76710+ }
76711+
76712+ return NULL;
76713+}
76714+
76715+static struct acl_object_label *
76716+__full_lookup(const struct dentry *orig_dentry, const struct vfsmount *orig_mnt,
76717+ const ino_t curr_ino, const dev_t curr_dev,
76718+ const struct acl_subject_label *subj, char **path, const int checkglob)
76719+{
76720+ struct acl_subject_label *tmpsubj;
76721+ struct acl_object_label *retval;
76722+ struct acl_object_label *retval2;
76723+
76724+ tmpsubj = (struct acl_subject_label *) subj;
76725+ read_lock(&gr_inode_lock);
76726+ do {
76727+ retval = lookup_acl_obj_label(curr_ino, curr_dev, tmpsubj);
76728+ if (retval) {
76729+ if (checkglob && retval->globbed) {
76730+ retval2 = chk_glob_label(retval->globbed, orig_dentry, orig_mnt, path);
76731+ if (retval2)
76732+ retval = retval2;
76733+ }
76734+ break;
76735+ }
76736+ } while ((tmpsubj = tmpsubj->parent_subject));
76737+ read_unlock(&gr_inode_lock);
76738+
76739+ return retval;
76740+}
76741+
76742+static __inline__ struct acl_object_label *
76743+full_lookup(const struct dentry *orig_dentry, const struct vfsmount *orig_mnt,
76744+ const struct dentry *curr_dentry,
76745+ const struct acl_subject_label *subj, char **path, const int checkglob)
76746+{
76747+ int newglob = checkglob;
76748+
76749+ /* if we aren't checking a subdirectory of the original path yet, don't do glob checking
76750+ as we don't want a / * rule to match instead of the / object
76751+ don't do this for create lookups that call this function though, since they're looking up
76752+ on the parent and thus need globbing checks on all paths
76753+ */
76754+ if (orig_dentry == curr_dentry && newglob != GR_CREATE_GLOB)
76755+ newglob = GR_NO_GLOB;
76756+
76757+ return __full_lookup(orig_dentry, orig_mnt,
76758+ curr_dentry->d_inode->i_ino,
76759+ __get_dev(curr_dentry), subj, path, newglob);
76760+}
76761+
76762+static struct acl_object_label *
76763+__chk_obj_label(const struct dentry *l_dentry, const struct vfsmount *l_mnt,
76764+ const struct acl_subject_label *subj, char *path, const int checkglob)
76765+{
76766+ struct dentry *dentry = (struct dentry *) l_dentry;
76767+ struct vfsmount *mnt = (struct vfsmount *) l_mnt;
76768+ struct acl_object_label *retval;
76769+
76770+ spin_lock(&dcache_lock);
76771+ spin_lock(&vfsmount_lock);
76772+
76773+ if (unlikely((mnt == shm_mnt && dentry->d_inode->i_nlink == 0) || mnt == pipe_mnt ||
76774+#ifdef CONFIG_NET
76775+ mnt == sock_mnt ||
76776+#endif
76777+#ifdef CONFIG_HUGETLBFS
76778+ (mnt == hugetlbfs_vfsmount && dentry->d_inode->i_nlink == 0) ||
76779+#endif
76780+ /* ignore Eric Biederman */
76781+ IS_PRIVATE(l_dentry->d_inode))) {
76782+ retval = (subj->mode & GR_SHMEXEC) ? fakefs_obj_rwx : fakefs_obj_rw;
76783+ goto out;
76784+ }
76785+
76786+ for (;;) {
76787+ if (dentry == real_root && mnt == real_root_mnt)
76788+ break;
76789+
76790+ if (dentry == mnt->mnt_root || IS_ROOT(dentry)) {
76791+ if (mnt->mnt_parent == mnt)
76792+ break;
76793+
76794+ retval = full_lookup(l_dentry, l_mnt, dentry, subj, &path, checkglob);
76795+ if (retval != NULL)
76796+ goto out;
76797+
76798+ dentry = mnt->mnt_mountpoint;
76799+ mnt = mnt->mnt_parent;
76800+ continue;
76801+ }
76802+
76803+ retval = full_lookup(l_dentry, l_mnt, dentry, subj, &path, checkglob);
76804+ if (retval != NULL)
76805+ goto out;
76806+
76807+ dentry = dentry->d_parent;
76808+ }
76809+
76810+ retval = full_lookup(l_dentry, l_mnt, dentry, subj, &path, checkglob);
76811+
76812+ if (retval == NULL)
76813+ retval = full_lookup(l_dentry, l_mnt, real_root, subj, &path, checkglob);
76814+out:
76815+ spin_unlock(&vfsmount_lock);
76816+ spin_unlock(&dcache_lock);
76817+
76818+ BUG_ON(retval == NULL);
76819+
76820+ return retval;
76821+}
76822+
76823+static __inline__ struct acl_object_label *
76824+chk_obj_label(const struct dentry *l_dentry, const struct vfsmount *l_mnt,
76825+ const struct acl_subject_label *subj)
76826+{
76827+ char *path = NULL;
76828+ return __chk_obj_label(l_dentry, l_mnt, subj, path, GR_REG_GLOB);
76829+}
76830+
76831+static __inline__ struct acl_object_label *
76832+chk_obj_label_noglob(const struct dentry *l_dentry, const struct vfsmount *l_mnt,
76833+ const struct acl_subject_label *subj)
76834+{
76835+ char *path = NULL;
76836+ return __chk_obj_label(l_dentry, l_mnt, subj, path, GR_NO_GLOB);
76837+}
76838+
76839+static __inline__ struct acl_object_label *
76840+chk_obj_create_label(const struct dentry *l_dentry, const struct vfsmount *l_mnt,
76841+ const struct acl_subject_label *subj, char *path)
76842+{
76843+ return __chk_obj_label(l_dentry, l_mnt, subj, path, GR_CREATE_GLOB);
76844+}
76845+
76846+static struct acl_subject_label *
76847+chk_subj_label(const struct dentry *l_dentry, const struct vfsmount *l_mnt,
76848+ const struct acl_role_label *role)
76849+{
76850+ struct dentry *dentry = (struct dentry *) l_dentry;
76851+ struct vfsmount *mnt = (struct vfsmount *) l_mnt;
76852+ struct acl_subject_label *retval;
76853+
76854+ spin_lock(&dcache_lock);
76855+ spin_lock(&vfsmount_lock);
76856+
76857+ for (;;) {
76858+ if (dentry == real_root && mnt == real_root_mnt)
76859+ break;
76860+ if (dentry == mnt->mnt_root || IS_ROOT(dentry)) {
76861+ if (mnt->mnt_parent == mnt)
76862+ break;
76863+
76864+ read_lock(&gr_inode_lock);
76865+ retval =
76866+ lookup_acl_subj_label(dentry->d_inode->i_ino,
76867+ __get_dev(dentry), role);
76868+ read_unlock(&gr_inode_lock);
76869+ if (retval != NULL)
76870+ goto out;
76871+
76872+ dentry = mnt->mnt_mountpoint;
76873+ mnt = mnt->mnt_parent;
76874+ continue;
76875+ }
76876+
76877+ read_lock(&gr_inode_lock);
76878+ retval = lookup_acl_subj_label(dentry->d_inode->i_ino,
76879+ __get_dev(dentry), role);
76880+ read_unlock(&gr_inode_lock);
76881+ if (retval != NULL)
76882+ goto out;
76883+
76884+ dentry = dentry->d_parent;
76885+ }
76886+
76887+ read_lock(&gr_inode_lock);
76888+ retval = lookup_acl_subj_label(dentry->d_inode->i_ino,
76889+ __get_dev(dentry), role);
76890+ read_unlock(&gr_inode_lock);
76891+
76892+ if (unlikely(retval == NULL)) {
76893+ read_lock(&gr_inode_lock);
76894+ retval = lookup_acl_subj_label(real_root->d_inode->i_ino,
76895+ __get_dev(real_root), role);
76896+ read_unlock(&gr_inode_lock);
76897+ }
76898+out:
76899+ spin_unlock(&vfsmount_lock);
76900+ spin_unlock(&dcache_lock);
76901+
76902+ BUG_ON(retval == NULL);
76903+
76904+ return retval;
76905+}
76906+
76907+static void
76908+gr_log_learn(const struct dentry *dentry, const struct vfsmount *mnt, const __u32 mode)
76909+{
76910+ struct task_struct *task = current;
76911+ const struct cred *cred = current_cred();
76912+
76913+ security_learn(GR_LEARN_AUDIT_MSG, task->role->rolename, task->role->roletype,
76914+ cred->uid, cred->gid, task->exec_file ? gr_to_filename1(task->exec_file->f_path.dentry,
76915+ task->exec_file->f_path.mnt) : task->acl->filename, task->acl->filename,
76916+ 1UL, 1UL, gr_to_filename(dentry, mnt), (unsigned long) mode, &task->signal->saved_ip);
76917+
76918+ return;
76919+}
76920+
76921+static void
76922+gr_log_learn_sysctl(const char *path, const __u32 mode)
76923+{
76924+ struct task_struct *task = current;
76925+ const struct cred *cred = current_cred();
76926+
76927+ security_learn(GR_LEARN_AUDIT_MSG, task->role->rolename, task->role->roletype,
76928+ cred->uid, cred->gid, task->exec_file ? gr_to_filename1(task->exec_file->f_path.dentry,
76929+ task->exec_file->f_path.mnt) : task->acl->filename, task->acl->filename,
76930+ 1UL, 1UL, path, (unsigned long) mode, &task->signal->saved_ip);
76931+
76932+ return;
76933+}
76934+
76935+static void
76936+gr_log_learn_id_change(const char type, const unsigned int real,
76937+ const unsigned int effective, const unsigned int fs)
76938+{
76939+ struct task_struct *task = current;
76940+ const struct cred *cred = current_cred();
76941+
76942+ security_learn(GR_ID_LEARN_MSG, task->role->rolename, task->role->roletype,
76943+ cred->uid, cred->gid, task->exec_file ? gr_to_filename1(task->exec_file->f_path.dentry,
76944+ task->exec_file->f_path.mnt) : task->acl->filename, task->acl->filename,
76945+ type, real, effective, fs, &task->signal->saved_ip);
76946+
76947+ return;
76948+}
76949+
76950+__u32
76951+gr_search_file(const struct dentry * dentry, const __u32 mode,
76952+ const struct vfsmount * mnt)
76953+{
76954+ __u32 retval = mode;
76955+ struct acl_subject_label *curracl;
76956+ struct acl_object_label *currobj;
76957+
76958+ if (unlikely(!(gr_status & GR_READY)))
76959+ return (mode & ~GR_AUDITS);
76960+
76961+ curracl = current->acl;
76962+
76963+ currobj = chk_obj_label(dentry, mnt, curracl);
76964+ retval = currobj->mode & mode;
76965+
76966+ /* if we're opening a specified transfer file for writing
76967+ (e.g. /dev/initctl), then transfer our role to init
76968+ */
76969+ if (unlikely(currobj->mode & GR_INIT_TRANSFER && retval & GR_WRITE &&
76970+ current->role->roletype & GR_ROLE_PERSIST)) {
76971+ struct task_struct *task = init_pid_ns.child_reaper;
76972+
76973+ if (task->role != current->role) {
76974+ task->acl_sp_role = 0;
76975+ task->acl_role_id = current->acl_role_id;
76976+ task->role = current->role;
76977+ rcu_read_lock();
76978+ read_lock(&grsec_exec_file_lock);
76979+ gr_apply_subject_to_task(task);
76980+ read_unlock(&grsec_exec_file_lock);
76981+ rcu_read_unlock();
76982+ gr_log_noargs(GR_DONT_AUDIT_GOOD, GR_INIT_TRANSFER_MSG);
76983+ }
76984+ }
76985+
76986+ if (unlikely
76987+ ((curracl->mode & (GR_LEARN | GR_INHERITLEARN)) && !(mode & GR_NOPTRACE)
76988+ && (retval != (mode & ~(GR_AUDITS | GR_SUPPRESS))))) {
76989+ __u32 new_mode = mode;
76990+
76991+ new_mode &= ~(GR_AUDITS | GR_SUPPRESS);
76992+
76993+ retval = new_mode;
76994+
76995+ if (new_mode & GR_EXEC && curracl->mode & GR_INHERITLEARN)
76996+ new_mode |= GR_INHERIT;
76997+
76998+ if (!(mode & GR_NOLEARN))
76999+ gr_log_learn(dentry, mnt, new_mode);
77000+ }
77001+
77002+ return retval;
77003+}
77004+
77005+struct acl_object_label *gr_get_create_object(const struct dentry *new_dentry,
77006+ const struct dentry *parent,
77007+ const struct vfsmount *mnt)
77008+{
77009+ struct name_entry *match;
77010+ struct acl_object_label *matchpo;
77011+ struct acl_subject_label *curracl;
77012+ char *path;
77013+
77014+ if (unlikely(!(gr_status & GR_READY)))
77015+ return NULL;
77016+
77017+ preempt_disable();
77018+ path = gr_to_filename_rbac(new_dentry, mnt);
77019+ match = lookup_name_entry_create(path);
77020+
77021+ curracl = current->acl;
77022+
77023+ if (match) {
77024+ read_lock(&gr_inode_lock);
77025+ matchpo = lookup_acl_obj_label_create(match->inode, match->device, curracl);
77026+ read_unlock(&gr_inode_lock);
77027+
77028+ if (matchpo) {
77029+ preempt_enable();
77030+ return matchpo;
77031+ }
77032+ }
77033+
77034+ // lookup parent
77035+
77036+ matchpo = chk_obj_create_label(parent, mnt, curracl, path);
77037+
77038+ preempt_enable();
77039+ return matchpo;
77040+}
77041+
77042+__u32
77043+gr_check_create(const struct dentry * new_dentry, const struct dentry * parent,
77044+ const struct vfsmount * mnt, const __u32 mode)
77045+{
77046+ struct acl_object_label *matchpo;
77047+ __u32 retval;
77048+
77049+ if (unlikely(!(gr_status & GR_READY)))
77050+ return (mode & ~GR_AUDITS);
77051+
77052+ matchpo = gr_get_create_object(new_dentry, parent, mnt);
77053+
77054+ retval = matchpo->mode & mode;
77055+
77056+ if ((retval != (mode & ~(GR_AUDITS | GR_SUPPRESS)))
77057+ && (current->acl->mode & (GR_LEARN | GR_INHERITLEARN))) {
77058+ __u32 new_mode = mode;
77059+
77060+ new_mode &= ~(GR_AUDITS | GR_SUPPRESS);
77061+
77062+ gr_log_learn(new_dentry, mnt, new_mode);
77063+ return new_mode;
77064+ }
77065+
77066+ return retval;
77067+}
77068+
77069+__u32
77070+gr_check_link(const struct dentry * new_dentry,
77071+ const struct dentry * parent_dentry,
77072+ const struct vfsmount * parent_mnt,
77073+ const struct dentry * old_dentry, const struct vfsmount * old_mnt)
77074+{
77075+ struct acl_object_label *obj;
77076+ __u32 oldmode, newmode;
77077+ __u32 needmode;
77078+ __u32 checkmodes = GR_FIND | GR_APPEND | GR_WRITE | GR_EXEC | GR_SETID | GR_READ |
77079+ GR_DELETE | GR_INHERIT;
77080+
77081+ if (unlikely(!(gr_status & GR_READY)))
77082+ return (GR_CREATE | GR_LINK);
77083+
77084+ obj = chk_obj_label(old_dentry, old_mnt, current->acl);
77085+ oldmode = obj->mode;
77086+
77087+ obj = gr_get_create_object(new_dentry, parent_dentry, parent_mnt);
77088+ newmode = obj->mode;
77089+
77090+ needmode = newmode & checkmodes;
77091+
77092+ // old name for hardlink must have at least the permissions of the new name
77093+ if ((oldmode & needmode) != needmode)
77094+ goto bad;
77095+
77096+ // if old name had restrictions/auditing, make sure the new name does as well
77097+ needmode = oldmode & (GR_NOPTRACE | GR_PTRACERD | GR_INHERIT | GR_AUDITS);
77098+
77099+ // don't allow hardlinking of suid/sgid files without permission
77100+ if (old_dentry->d_inode->i_mode & (S_ISUID | S_ISGID))
77101+ needmode |= GR_SETID;
77102+
77103+ if ((newmode & needmode) != needmode)
77104+ goto bad;
77105+
77106+ // enforce minimum permissions
77107+ if ((newmode & (GR_CREATE | GR_LINK)) == (GR_CREATE | GR_LINK))
77108+ return newmode;
77109+bad:
77110+ needmode = oldmode;
77111+ if (old_dentry->d_inode->i_mode & (S_ISUID | S_ISGID))
77112+ needmode |= GR_SETID;
77113+
77114+ if (current->acl->mode & (GR_LEARN | GR_INHERITLEARN)) {
77115+ gr_log_learn(old_dentry, old_mnt, needmode | GR_CREATE | GR_LINK);
77116+ return (GR_CREATE | GR_LINK);
77117+ } else if (newmode & GR_SUPPRESS)
77118+ return GR_SUPPRESS;
77119+ else
77120+ return 0;
77121+}
77122+
77123+int
77124+gr_check_hidden_task(const struct task_struct *task)
77125+{
77126+ if (unlikely(!(gr_status & GR_READY)))
77127+ return 0;
77128+
77129+ if (!(task->acl->mode & GR_PROCFIND) && !(current->acl->mode & GR_VIEW))
77130+ return 1;
77131+
77132+ return 0;
77133+}
77134+
77135+int
77136+gr_check_protected_task(const struct task_struct *task)
77137+{
77138+ if (unlikely(!(gr_status & GR_READY) || !task))
77139+ return 0;
77140+
77141+ if ((task->acl->mode & GR_PROTECTED) && !(current->acl->mode & GR_KILL) &&
77142+ task->acl != current->acl)
77143+ return 1;
77144+
77145+ return 0;
77146+}
77147+
77148+int
77149+gr_check_protected_task_fowner(struct pid *pid, enum pid_type type)
77150+{
77151+ struct task_struct *p;
77152+ int ret = 0;
77153+
77154+ if (unlikely(!(gr_status & GR_READY) || !pid))
77155+ return ret;
77156+
77157+ read_lock(&tasklist_lock);
77158+ do_each_pid_task(pid, type, p) {
77159+ if ((p->acl->mode & GR_PROTECTED) && !(current->acl->mode & GR_KILL) &&
77160+ p->acl != current->acl) {
77161+ ret = 1;
77162+ goto out;
77163+ }
77164+ } while_each_pid_task(pid, type, p);
77165+out:
77166+ read_unlock(&tasklist_lock);
77167+
77168+ return ret;
77169+}
77170+
77171+void
77172+gr_copy_label(struct task_struct *tsk)
77173+{
77174+ /* plain copying of fields is already done by dup_task_struct */
77175+ tsk->signal->used_accept = 0;
77176+ tsk->acl_sp_role = 0;
77177+ //tsk->acl_role_id = current->acl_role_id;
77178+ //tsk->acl = current->acl;
77179+ //tsk->role = current->role;
77180+ tsk->signal->curr_ip = current->signal->curr_ip;
77181+ tsk->signal->saved_ip = current->signal->saved_ip;
77182+ if (current->exec_file)
77183+ get_file(current->exec_file);
77184+ //tsk->exec_file = current->exec_file;
77185+ //tsk->is_writable = current->is_writable;
77186+ if (unlikely(current->signal->used_accept)) {
77187+ current->signal->curr_ip = 0;
77188+ current->signal->saved_ip = 0;
77189+ }
77190+
77191+ return;
77192+}
77193+
77194+static void
77195+gr_set_proc_res(struct task_struct *task)
77196+{
77197+ struct acl_subject_label *proc;
77198+ unsigned short i;
77199+
77200+ proc = task->acl;
77201+
77202+ if (proc->mode & (GR_LEARN | GR_INHERITLEARN))
77203+ return;
77204+
77205+ for (i = 0; i < RLIM_NLIMITS; i++) {
77206+ if (!(proc->resmask & (1 << i)))
77207+ continue;
77208+
77209+ task->signal->rlim[i].rlim_cur = proc->res[i].rlim_cur;
77210+ task->signal->rlim[i].rlim_max = proc->res[i].rlim_max;
77211+ }
77212+
77213+ return;
77214+}
77215+
77216+extern int __gr_process_user_ban(struct user_struct *user);
77217+
77218+int
77219+gr_check_user_change(int real, int effective, int fs)
77220+{
77221+ unsigned int i;
77222+ __u16 num;
77223+ uid_t *uidlist;
77224+ int curuid;
77225+ int realok = 0;
77226+ int effectiveok = 0;
77227+ int fsok = 0;
77228+
77229+#if defined(CONFIG_GRKERNSEC_KERN_LOCKOUT) || defined(CONFIG_GRKERNSEC_BRUTE)
77230+ struct user_struct *user;
77231+
77232+ if (real == -1)
77233+ goto skipit;
77234+
77235+ user = find_user(real);
77236+ if (user == NULL)
77237+ goto skipit;
77238+
77239+ if (__gr_process_user_ban(user)) {
77240+ /* for find_user */
77241+ free_uid(user);
77242+ return 1;
77243+ }
77244+
77245+ /* for find_user */
77246+ free_uid(user);
77247+
77248+skipit:
77249+#endif
77250+
77251+ if (unlikely(!(gr_status & GR_READY)))
77252+ return 0;
77253+
77254+ if (current->acl->mode & (GR_LEARN | GR_INHERITLEARN))
77255+ gr_log_learn_id_change('u', real, effective, fs);
77256+
77257+ num = current->acl->user_trans_num;
77258+ uidlist = current->acl->user_transitions;
77259+
77260+ if (uidlist == NULL)
77261+ return 0;
77262+
77263+ if (real == -1)
77264+ realok = 1;
77265+ if (effective == -1)
77266+ effectiveok = 1;
77267+ if (fs == -1)
77268+ fsok = 1;
77269+
77270+ if (current->acl->user_trans_type & GR_ID_ALLOW) {
77271+ for (i = 0; i < num; i++) {
77272+ curuid = (int)uidlist[i];
77273+ if (real == curuid)
77274+ realok = 1;
77275+ if (effective == curuid)
77276+ effectiveok = 1;
77277+ if (fs == curuid)
77278+ fsok = 1;
77279+ }
77280+ } else if (current->acl->user_trans_type & GR_ID_DENY) {
77281+ for (i = 0; i < num; i++) {
77282+ curuid = (int)uidlist[i];
77283+ if (real == curuid)
77284+ break;
77285+ if (effective == curuid)
77286+ break;
77287+ if (fs == curuid)
77288+ break;
77289+ }
77290+ /* not in deny list */
77291+ if (i == num) {
77292+ realok = 1;
77293+ effectiveok = 1;
77294+ fsok = 1;
77295+ }
77296+ }
77297+
77298+ if (realok && effectiveok && fsok)
77299+ return 0;
77300+ else {
77301+ gr_log_int(GR_DONT_AUDIT, GR_USRCHANGE_ACL_MSG, realok ? (effectiveok ? (fsok ? 0 : fs) : effective) : real);
77302+ return 1;
77303+ }
77304+}
77305+
77306+int
77307+gr_check_group_change(int real, int effective, int fs)
77308+{
77309+ unsigned int i;
77310+ __u16 num;
77311+ gid_t *gidlist;
77312+ int curgid;
77313+ int realok = 0;
77314+ int effectiveok = 0;
77315+ int fsok = 0;
77316+
77317+ if (unlikely(!(gr_status & GR_READY)))
77318+ return 0;
77319+
77320+ if (current->acl->mode & (GR_LEARN | GR_INHERITLEARN))
77321+ gr_log_learn_id_change('g', real, effective, fs);
77322+
77323+ num = current->acl->group_trans_num;
77324+ gidlist = current->acl->group_transitions;
77325+
77326+ if (gidlist == NULL)
77327+ return 0;
77328+
77329+ if (real == -1)
77330+ realok = 1;
77331+ if (effective == -1)
77332+ effectiveok = 1;
77333+ if (fs == -1)
77334+ fsok = 1;
77335+
77336+ if (current->acl->group_trans_type & GR_ID_ALLOW) {
77337+ for (i = 0; i < num; i++) {
77338+ curgid = (int)gidlist[i];
77339+ if (real == curgid)
77340+ realok = 1;
77341+ if (effective == curgid)
77342+ effectiveok = 1;
77343+ if (fs == curgid)
77344+ fsok = 1;
77345+ }
77346+ } else if (current->acl->group_trans_type & GR_ID_DENY) {
77347+ for (i = 0; i < num; i++) {
77348+ curgid = (int)gidlist[i];
77349+ if (real == curgid)
77350+ break;
77351+ if (effective == curgid)
77352+ break;
77353+ if (fs == curgid)
77354+ break;
77355+ }
77356+ /* not in deny list */
77357+ if (i == num) {
77358+ realok = 1;
77359+ effectiveok = 1;
77360+ fsok = 1;
77361+ }
77362+ }
77363+
77364+ if (realok && effectiveok && fsok)
77365+ return 0;
77366+ else {
77367+ gr_log_int(GR_DONT_AUDIT, GR_GRPCHANGE_ACL_MSG, realok ? (effectiveok ? (fsok ? 0 : fs) : effective) : real);
77368+ return 1;
77369+ }
77370+}
77371+
77372+extern int gr_acl_is_capable(const int cap);
77373+
77374+void
77375+gr_set_role_label(struct task_struct *task, const uid_t uid, const uid_t gid)
77376+{
77377+ struct acl_role_label *role = task->role;
77378+ struct acl_subject_label *subj = NULL;
77379+ struct acl_object_label *obj;
77380+ struct file *filp;
77381+
77382+ if (unlikely(!(gr_status & GR_READY)))
77383+ return;
77384+
77385+ filp = task->exec_file;
77386+
77387+ /* kernel process, we'll give them the kernel role */
77388+ if (unlikely(!filp)) {
77389+ task->role = kernel_role;
77390+ task->acl = kernel_role->root_label;
77391+ return;
77392+ } else if (!task->role || !(task->role->roletype & GR_ROLE_SPECIAL))
77393+ role = lookup_acl_role_label(task, uid, gid);
77394+
77395+ /* don't change the role if we're not a privileged process */
77396+ if (role && task->role != role &&
77397+ (((role->roletype & GR_ROLE_USER) && !gr_acl_is_capable(CAP_SETUID)) ||
77398+ ((role->roletype & GR_ROLE_GROUP) && !gr_acl_is_capable(CAP_SETGID))))
77399+ return;
77400+
77401+ /* perform subject lookup in possibly new role
77402+ we can use this result below in the case where role == task->role
77403+ */
77404+ subj = chk_subj_label(filp->f_path.dentry, filp->f_path.mnt, role);
77405+
77406+ /* if we changed uid/gid, but result in the same role
77407+ and are using inheritance, don't lose the inherited subject
77408+ if current subject is other than what normal lookup
77409+ would result in, we arrived via inheritance, don't
77410+ lose subject
77411+ */
77412+ if (role != task->role || (!(task->acl->mode & GR_INHERITLEARN) &&
77413+ (subj == task->acl)))
77414+ task->acl = subj;
77415+
77416+ task->role = role;
77417+
77418+ task->is_writable = 0;
77419+
77420+ /* ignore additional mmap checks for processes that are writable
77421+ by the default ACL */
77422+ obj = chk_obj_label(filp->f_path.dentry, filp->f_path.mnt, default_role->root_label);
77423+ if (unlikely(obj->mode & GR_WRITE))
77424+ task->is_writable = 1;
77425+ obj = chk_obj_label(filp->f_path.dentry, filp->f_path.mnt, task->role->root_label);
77426+ if (unlikely(obj->mode & GR_WRITE))
77427+ task->is_writable = 1;
77428+
77429+#ifdef CONFIG_GRKERNSEC_RBAC_DEBUG
77430+ printk(KERN_ALERT "Set role label for (%s:%d): role:%s, subject:%s\n", task->comm, task->pid, task->role->rolename, task->acl->filename);
77431+#endif
77432+
77433+ gr_set_proc_res(task);
77434+
77435+ return;
77436+}
77437+
77438+int
77439+gr_set_proc_label(const struct dentry *dentry, const struct vfsmount *mnt,
77440+ const int unsafe_flags)
77441+{
77442+ struct task_struct *task = current;
77443+ struct acl_subject_label *newacl;
77444+ struct acl_object_label *obj;
77445+ __u32 retmode;
77446+
77447+ if (unlikely(!(gr_status & GR_READY)))
77448+ return 0;
77449+
77450+ newacl = chk_subj_label(dentry, mnt, task->role);
77451+
77452+ task_lock(task);
77453+ if (unsafe_flags && !(task->acl->mode & GR_POVERRIDE) && (task->acl != newacl) &&
77454+ !(task->role->roletype & GR_ROLE_GOD) &&
77455+ !gr_search_file(dentry, GR_PTRACERD, mnt) &&
77456+ !(task->acl->mode & (GR_LEARN | GR_INHERITLEARN))) {
77457+ task_unlock(task);
77458+ if (unsafe_flags & LSM_UNSAFE_SHARE)
77459+ gr_log_fs_generic(GR_DONT_AUDIT, GR_UNSAFESHARE_EXEC_ACL_MSG, dentry, mnt);
77460+ else
77461+ gr_log_fs_generic(GR_DONT_AUDIT, GR_PTRACE_EXEC_ACL_MSG, dentry, mnt);
77462+ return -EACCES;
77463+ }
77464+ task_unlock(task);
77465+
77466+ obj = chk_obj_label(dentry, mnt, task->acl);
77467+ retmode = obj->mode & (GR_INHERIT | GR_AUDIT_INHERIT);
77468+
77469+ if (!(task->acl->mode & GR_INHERITLEARN) &&
77470+ ((newacl->mode & GR_LEARN) || !(retmode & GR_INHERIT))) {
77471+ if (obj->nested)
77472+ task->acl = obj->nested;
77473+ else
77474+ task->acl = newacl;
77475+ } else if (retmode & GR_INHERIT && retmode & GR_AUDIT_INHERIT)
77476+ gr_log_str_fs(GR_DO_AUDIT, GR_INHERIT_ACL_MSG, task->acl->filename, dentry, mnt);
77477+
77478+ task->is_writable = 0;
77479+
77480+ /* ignore additional mmap checks for processes that are writable
77481+ by the default ACL */
77482+ obj = chk_obj_label(dentry, mnt, default_role->root_label);
77483+ if (unlikely(obj->mode & GR_WRITE))
77484+ task->is_writable = 1;
77485+ obj = chk_obj_label(dentry, mnt, task->role->root_label);
77486+ if (unlikely(obj->mode & GR_WRITE))
77487+ task->is_writable = 1;
77488+
77489+ gr_set_proc_res(task);
77490+
77491+#ifdef CONFIG_GRKERNSEC_RBAC_DEBUG
77492+ printk(KERN_ALERT "Set subject label for (%s:%d): role:%s, subject:%s\n", task->comm, task->pid, task->role->rolename, task->acl->filename);
77493+#endif
77494+ return 0;
77495+}
77496+
77497+/* always called with valid inodev ptr */
77498+static void
77499+do_handle_delete(struct inodev_entry *inodev, const ino_t ino, const dev_t dev)
77500+{
77501+ struct acl_object_label *matchpo;
77502+ struct acl_subject_label *matchps;
77503+ struct acl_subject_label *subj;
77504+ struct acl_role_label *role;
77505+ unsigned int x;
77506+
77507+ FOR_EACH_ROLE_START(role)
77508+ FOR_EACH_SUBJECT_START(role, subj, x)
77509+ if ((matchpo = lookup_acl_obj_label(ino, dev, subj)) != NULL)
77510+ matchpo->mode |= GR_DELETED;
77511+ FOR_EACH_SUBJECT_END(subj,x)
77512+ FOR_EACH_NESTED_SUBJECT_START(role, subj)
77513+ if (subj->inode == ino && subj->device == dev)
77514+ subj->mode |= GR_DELETED;
77515+ FOR_EACH_NESTED_SUBJECT_END(subj)
77516+ if ((matchps = lookup_acl_subj_label(ino, dev, role)) != NULL)
77517+ matchps->mode |= GR_DELETED;
77518+ FOR_EACH_ROLE_END(role)
77519+
77520+ inodev->nentry->deleted = 1;
77521+
77522+ return;
77523+}
77524+
77525+void
77526+gr_handle_delete(const ino_t ino, const dev_t dev)
77527+{
77528+ struct inodev_entry *inodev;
77529+
77530+ if (unlikely(!(gr_status & GR_READY)))
77531+ return;
77532+
77533+ write_lock(&gr_inode_lock);
77534+ inodev = lookup_inodev_entry(ino, dev);
77535+ if (inodev != NULL)
77536+ do_handle_delete(inodev, ino, dev);
77537+ write_unlock(&gr_inode_lock);
77538+
77539+ return;
77540+}
77541+
77542+static void
77543+update_acl_obj_label(const ino_t oldinode, const dev_t olddevice,
77544+ const ino_t newinode, const dev_t newdevice,
77545+ struct acl_subject_label *subj)
77546+{
77547+ unsigned int index = fhash(oldinode, olddevice, subj->obj_hash_size);
77548+ struct acl_object_label *match;
77549+
77550+ match = subj->obj_hash[index];
77551+
77552+ while (match && (match->inode != oldinode ||
77553+ match->device != olddevice ||
77554+ !(match->mode & GR_DELETED)))
77555+ match = match->next;
77556+
77557+ if (match && (match->inode == oldinode)
77558+ && (match->device == olddevice)
77559+ && (match->mode & GR_DELETED)) {
77560+ if (match->prev == NULL) {
77561+ subj->obj_hash[index] = match->next;
77562+ if (match->next != NULL)
77563+ match->next->prev = NULL;
77564+ } else {
77565+ match->prev->next = match->next;
77566+ if (match->next != NULL)
77567+ match->next->prev = match->prev;
77568+ }
77569+ match->prev = NULL;
77570+ match->next = NULL;
77571+ match->inode = newinode;
77572+ match->device = newdevice;
77573+ match->mode &= ~GR_DELETED;
77574+
77575+ insert_acl_obj_label(match, subj);
77576+ }
77577+
77578+ return;
77579+}
77580+
77581+static void
77582+update_acl_subj_label(const ino_t oldinode, const dev_t olddevice,
77583+ const ino_t newinode, const dev_t newdevice,
77584+ struct acl_role_label *role)
77585+{
77586+ unsigned int index = fhash(oldinode, olddevice, role->subj_hash_size);
77587+ struct acl_subject_label *match;
77588+
77589+ match = role->subj_hash[index];
77590+
77591+ while (match && (match->inode != oldinode ||
77592+ match->device != olddevice ||
77593+ !(match->mode & GR_DELETED)))
77594+ match = match->next;
77595+
77596+ if (match && (match->inode == oldinode)
77597+ && (match->device == olddevice)
77598+ && (match->mode & GR_DELETED)) {
77599+ if (match->prev == NULL) {
77600+ role->subj_hash[index] = match->next;
77601+ if (match->next != NULL)
77602+ match->next->prev = NULL;
77603+ } else {
77604+ match->prev->next = match->next;
77605+ if (match->next != NULL)
77606+ match->next->prev = match->prev;
77607+ }
77608+ match->prev = NULL;
77609+ match->next = NULL;
77610+ match->inode = newinode;
77611+ match->device = newdevice;
77612+ match->mode &= ~GR_DELETED;
77613+
77614+ insert_acl_subj_label(match, role);
77615+ }
77616+
77617+ return;
77618+}
77619+
77620+static void
77621+update_inodev_entry(const ino_t oldinode, const dev_t olddevice,
77622+ const ino_t newinode, const dev_t newdevice)
77623+{
77624+ unsigned int index = fhash(oldinode, olddevice, inodev_set.i_size);
77625+ struct inodev_entry *match;
77626+
77627+ match = inodev_set.i_hash[index];
77628+
77629+ while (match && (match->nentry->inode != oldinode ||
77630+ match->nentry->device != olddevice || !match->nentry->deleted))
77631+ match = match->next;
77632+
77633+ if (match && (match->nentry->inode == oldinode)
77634+ && (match->nentry->device == olddevice) &&
77635+ match->nentry->deleted) {
77636+ if (match->prev == NULL) {
77637+ inodev_set.i_hash[index] = match->next;
77638+ if (match->next != NULL)
77639+ match->next->prev = NULL;
77640+ } else {
77641+ match->prev->next = match->next;
77642+ if (match->next != NULL)
77643+ match->next->prev = match->prev;
77644+ }
77645+ match->prev = NULL;
77646+ match->next = NULL;
77647+ match->nentry->inode = newinode;
77648+ match->nentry->device = newdevice;
77649+ match->nentry->deleted = 0;
77650+
77651+ insert_inodev_entry(match);
77652+ }
77653+
77654+ return;
77655+}
77656+
77657+static void
77658+__do_handle_create(const struct name_entry *matchn, ino_t inode, dev_t dev)
77659+{
77660+ struct acl_subject_label *subj;
77661+ struct acl_role_label *role;
77662+ unsigned int x;
77663+
77664+ FOR_EACH_ROLE_START(role)
77665+ update_acl_subj_label(matchn->inode, matchn->device,
77666+ inode, dev, role);
77667+
77668+ FOR_EACH_NESTED_SUBJECT_START(role, subj)
77669+ if ((subj->inode == inode) && (subj->device == dev)) {
77670+ subj->inode = inode;
77671+ subj->device = dev;
77672+ }
77673+ FOR_EACH_NESTED_SUBJECT_END(subj)
77674+ FOR_EACH_SUBJECT_START(role, subj, x)
77675+ update_acl_obj_label(matchn->inode, matchn->device,
77676+ inode, dev, subj);
77677+ FOR_EACH_SUBJECT_END(subj,x)
77678+ FOR_EACH_ROLE_END(role)
77679+
77680+ update_inodev_entry(matchn->inode, matchn->device, inode, dev);
77681+
77682+ return;
77683+}
77684+
77685+static void
77686+do_handle_create(const struct name_entry *matchn, const struct dentry *dentry,
77687+ const struct vfsmount *mnt)
77688+{
77689+ ino_t ino = dentry->d_inode->i_ino;
77690+ dev_t dev = __get_dev(dentry);
77691+
77692+ __do_handle_create(matchn, ino, dev);
77693+
77694+ return;
77695+}
77696+
77697+void
77698+gr_handle_create(const struct dentry *dentry, const struct vfsmount *mnt)
77699+{
77700+ struct name_entry *matchn;
77701+
77702+ if (unlikely(!(gr_status & GR_READY)))
77703+ return;
77704+
77705+ preempt_disable();
77706+ matchn = lookup_name_entry(gr_to_filename_rbac(dentry, mnt));
77707+
77708+ if (unlikely((unsigned long)matchn)) {
77709+ write_lock(&gr_inode_lock);
77710+ do_handle_create(matchn, dentry, mnt);
77711+ write_unlock(&gr_inode_lock);
77712+ }
77713+ preempt_enable();
77714+
77715+ return;
77716+}
77717+
77718+void
77719+gr_handle_proc_create(const struct dentry *dentry, const struct inode *inode)
77720+{
77721+ struct name_entry *matchn;
77722+
77723+ if (unlikely(!(gr_status & GR_READY)))
77724+ return;
77725+
77726+ preempt_disable();
77727+ matchn = lookup_name_entry(gr_to_proc_filename_rbac(dentry, init_pid_ns.proc_mnt));
77728+
77729+ if (unlikely((unsigned long)matchn)) {
77730+ write_lock(&gr_inode_lock);
77731+ __do_handle_create(matchn, inode->i_ino, inode->i_sb->s_dev);
77732+ write_unlock(&gr_inode_lock);
77733+ }
77734+ preempt_enable();
77735+
77736+ return;
77737+}
77738+
77739+void
77740+gr_handle_rename(struct inode *old_dir, struct inode *new_dir,
77741+ struct dentry *old_dentry,
77742+ struct dentry *new_dentry,
77743+ struct vfsmount *mnt, const __u8 replace)
77744+{
77745+ struct name_entry *matchn;
77746+ struct inodev_entry *inodev;
77747+ struct inode *inode = new_dentry->d_inode;
77748+ ino_t oldinode = old_dentry->d_inode->i_ino;
77749+ dev_t olddev = __get_dev(old_dentry);
77750+
77751+ /* vfs_rename swaps the name and parent link for old_dentry and
77752+ new_dentry
77753+ at this point, old_dentry has the new name, parent link, and inode
77754+ for the renamed file
77755+ if a file is being replaced by a rename, new_dentry has the inode
77756+ and name for the replaced file
77757+ */
77758+
77759+ if (unlikely(!(gr_status & GR_READY)))
77760+ return;
77761+
77762+ preempt_disable();
77763+ matchn = lookup_name_entry(gr_to_filename_rbac(old_dentry, mnt));
77764+
77765+ /* we wouldn't have to check d_inode if it weren't for
77766+ NFS silly-renaming
77767+ */
77768+
77769+ write_lock(&gr_inode_lock);
77770+ if (unlikely(replace && inode)) {
77771+ ino_t newinode = inode->i_ino;
77772+ dev_t newdev = __get_dev(new_dentry);
77773+ inodev = lookup_inodev_entry(newinode, newdev);
77774+ if (inodev != NULL && ((inode->i_nlink <= 1) || S_ISDIR(inode->i_mode)))
77775+ do_handle_delete(inodev, newinode, newdev);
77776+ }
77777+
77778+ inodev = lookup_inodev_entry(oldinode, olddev);
77779+ if (inodev != NULL && ((old_dentry->d_inode->i_nlink <= 1) || S_ISDIR(old_dentry->d_inode->i_mode)))
77780+ do_handle_delete(inodev, oldinode, olddev);
77781+
77782+ if (unlikely((unsigned long)matchn))
77783+ do_handle_create(matchn, old_dentry, mnt);
77784+
77785+ write_unlock(&gr_inode_lock);
77786+ preempt_enable();
77787+
77788+ return;
77789+}
77790+
77791+static int
77792+lookup_special_role_auth(__u16 mode, const char *rolename, unsigned char **salt,
77793+ unsigned char **sum)
77794+{
77795+ struct acl_role_label *r;
77796+ struct role_allowed_ip *ipp;
77797+ struct role_transition *trans;
77798+ unsigned int i;
77799+ int found = 0;
77800+ u32 curr_ip = current->signal->curr_ip;
77801+
77802+ current->signal->saved_ip = curr_ip;
77803+
77804+ /* check transition table */
77805+
77806+ for (trans = current->role->transitions; trans; trans = trans->next) {
77807+ if (!strcmp(rolename, trans->rolename)) {
77808+ found = 1;
77809+ break;
77810+ }
77811+ }
77812+
77813+ if (!found)
77814+ return 0;
77815+
77816+ /* handle special roles that do not require authentication
77817+ and check ip */
77818+
77819+ FOR_EACH_ROLE_START(r)
77820+ if (!strcmp(rolename, r->rolename) &&
77821+ (r->roletype & GR_ROLE_SPECIAL)) {
77822+ found = 0;
77823+ if (r->allowed_ips != NULL) {
77824+ for (ipp = r->allowed_ips; ipp; ipp = ipp->next) {
77825+ if ((ntohl(curr_ip) & ipp->netmask) ==
77826+ (ntohl(ipp->addr) & ipp->netmask))
77827+ found = 1;
77828+ }
77829+ } else
77830+ found = 2;
77831+ if (!found)
77832+ return 0;
77833+
77834+ if (((mode == GR_SPROLE) && (r->roletype & GR_ROLE_NOPW)) ||
77835+ ((mode == GR_SPROLEPAM) && (r->roletype & GR_ROLE_PAM))) {
77836+ *salt = NULL;
77837+ *sum = NULL;
77838+ return 1;
77839+ }
77840+ }
77841+ FOR_EACH_ROLE_END(r)
77842+
77843+ for (i = 0; i < num_sprole_pws; i++) {
77844+ if (!strcmp(rolename, acl_special_roles[i]->rolename)) {
77845+ *salt = acl_special_roles[i]->salt;
77846+ *sum = acl_special_roles[i]->sum;
77847+ return 1;
77848+ }
77849+ }
77850+
77851+ return 0;
77852+}
77853+
77854+static void
77855+assign_special_role(char *rolename)
77856+{
77857+ struct acl_object_label *obj;
77858+ struct acl_role_label *r;
77859+ struct acl_role_label *assigned = NULL;
77860+ struct task_struct *tsk;
77861+ struct file *filp;
77862+
77863+ FOR_EACH_ROLE_START(r)
77864+ if (!strcmp(rolename, r->rolename) &&
77865+ (r->roletype & GR_ROLE_SPECIAL)) {
77866+ assigned = r;
77867+ break;
77868+ }
77869+ FOR_EACH_ROLE_END(r)
77870+
77871+ if (!assigned)
77872+ return;
77873+
77874+ read_lock(&tasklist_lock);
77875+ read_lock(&grsec_exec_file_lock);
77876+
77877+ tsk = current->real_parent;
77878+ if (tsk == NULL)
77879+ goto out_unlock;
77880+
77881+ filp = tsk->exec_file;
77882+ if (filp == NULL)
77883+ goto out_unlock;
77884+
77885+ tsk->is_writable = 0;
77886+
77887+ tsk->acl_sp_role = 1;
77888+ tsk->acl_role_id = ++acl_sp_role_value;
77889+ tsk->role = assigned;
77890+ tsk->acl = chk_subj_label(filp->f_path.dentry, filp->f_path.mnt, tsk->role);
77891+
77892+ /* ignore additional mmap checks for processes that are writable
77893+ by the default ACL */
77894+ obj = chk_obj_label(filp->f_path.dentry, filp->f_path.mnt, default_role->root_label);
77895+ if (unlikely(obj->mode & GR_WRITE))
77896+ tsk->is_writable = 1;
77897+ obj = chk_obj_label(filp->f_path.dentry, filp->f_path.mnt, tsk->role->root_label);
77898+ if (unlikely(obj->mode & GR_WRITE))
77899+ tsk->is_writable = 1;
77900+
77901+#ifdef CONFIG_GRKERNSEC_RBAC_DEBUG
77902+ printk(KERN_ALERT "Assigning special role:%s subject:%s to process (%s:%d)\n", tsk->role->rolename, tsk->acl->filename, tsk->comm, tsk->pid);
77903+#endif
77904+
77905+out_unlock:
77906+ read_unlock(&grsec_exec_file_lock);
77907+ read_unlock(&tasklist_lock);
77908+ return;
77909+}
77910+
77911+int gr_check_secure_terminal(struct task_struct *task)
77912+{
77913+ struct task_struct *p, *p2, *p3;
77914+ struct files_struct *files;
77915+ struct fdtable *fdt;
77916+ struct file *our_file = NULL, *file;
77917+ int i;
77918+
77919+ if (task->signal->tty == NULL)
77920+ return 1;
77921+
77922+ files = get_files_struct(task);
77923+ if (files != NULL) {
77924+ rcu_read_lock();
77925+ fdt = files_fdtable(files);
77926+ for (i=0; i < fdt->max_fds; i++) {
77927+ file = fcheck_files(files, i);
77928+ if (file && (our_file == NULL) && (file->private_data == task->signal->tty)) {
77929+ get_file(file);
77930+ our_file = file;
77931+ }
77932+ }
77933+ rcu_read_unlock();
77934+ put_files_struct(files);
77935+ }
77936+
77937+ if (our_file == NULL)
77938+ return 1;
77939+
77940+ read_lock(&tasklist_lock);
77941+ do_each_thread(p2, p) {
77942+ files = get_files_struct(p);
77943+ if (files == NULL ||
77944+ (p->signal && p->signal->tty == task->signal->tty)) {
77945+ if (files != NULL)
77946+ put_files_struct(files);
77947+ continue;
77948+ }
77949+ rcu_read_lock();
77950+ fdt = files_fdtable(files);
77951+ for (i=0; i < fdt->max_fds; i++) {
77952+ file = fcheck_files(files, i);
77953+ if (file && S_ISCHR(file->f_path.dentry->d_inode->i_mode) &&
77954+ file->f_path.dentry->d_inode->i_rdev == our_file->f_path.dentry->d_inode->i_rdev) {
77955+ p3 = task;
77956+ while (p3->pid > 0) {
77957+ if (p3 == p)
77958+ break;
77959+ p3 = p3->real_parent;
77960+ }
77961+ if (p3 == p)
77962+ break;
77963+ gr_log_ttysniff(GR_DONT_AUDIT_GOOD, GR_TTYSNIFF_ACL_MSG, p);
77964+ gr_handle_alertkill(p);
77965+ rcu_read_unlock();
77966+ put_files_struct(files);
77967+ read_unlock(&tasklist_lock);
77968+ fput(our_file);
77969+ return 0;
77970+ }
77971+ }
77972+ rcu_read_unlock();
77973+ put_files_struct(files);
77974+ } while_each_thread(p2, p);
77975+ read_unlock(&tasklist_lock);
77976+
77977+ fput(our_file);
77978+ return 1;
77979+}
77980+
77981+ssize_t
77982+write_grsec_handler(struct file *file, const char * buf, size_t count, loff_t *ppos)
77983+{
77984+ struct gr_arg_wrapper uwrap;
77985+ unsigned char *sprole_salt = NULL;
77986+ unsigned char *sprole_sum = NULL;
77987+ int error = sizeof (struct gr_arg_wrapper);
77988+ int error2 = 0;
77989+
77990+ mutex_lock(&gr_dev_mutex);
77991+
77992+ if ((gr_status & GR_READY) && !(current->acl->mode & GR_KERNELAUTH)) {
77993+ error = -EPERM;
77994+ goto out;
77995+ }
77996+
77997+ if (count != sizeof (struct gr_arg_wrapper)) {
77998+ gr_log_int_int(GR_DONT_AUDIT_GOOD, GR_DEV_ACL_MSG, (int)count, (int)sizeof(struct gr_arg_wrapper));
77999+ error = -EINVAL;
78000+ goto out;
78001+ }
78002+
78003+
78004+ if (gr_auth_expires && time_after_eq(get_seconds(), gr_auth_expires)) {
78005+ gr_auth_expires = 0;
78006+ gr_auth_attempts = 0;
78007+ }
78008+
78009+ if (copy_from_user(&uwrap, buf, sizeof (struct gr_arg_wrapper))) {
78010+ error = -EFAULT;
78011+ goto out;
78012+ }
78013+
78014+ if ((uwrap.version != GRSECURITY_VERSION) || (uwrap.size != sizeof(struct gr_arg))) {
78015+ error = -EINVAL;
78016+ goto out;
78017+ }
78018+
78019+ if (copy_from_user(gr_usermode, uwrap.arg, sizeof (struct gr_arg))) {
78020+ error = -EFAULT;
78021+ goto out;
78022+ }
78023+
78024+ if (gr_usermode->mode != GR_SPROLE && gr_usermode->mode != GR_SPROLEPAM &&
78025+ gr_auth_attempts >= CONFIG_GRKERNSEC_ACL_MAXTRIES &&
78026+ time_after(gr_auth_expires, get_seconds())) {
78027+ error = -EBUSY;
78028+ goto out;
78029+ }
78030+
78031+ /* if non-root trying to do anything other than use a special role,
78032+ do not attempt authentication, do not count towards authentication
78033+ locking
78034+ */
78035+
78036+ if (gr_usermode->mode != GR_SPROLE && gr_usermode->mode != GR_STATUS &&
78037+ gr_usermode->mode != GR_UNSPROLE && gr_usermode->mode != GR_SPROLEPAM &&
78038+ current_uid()) {
78039+ error = -EPERM;
78040+ goto out;
78041+ }
78042+
78043+ /* ensure pw and special role name are null terminated */
78044+
78045+ gr_usermode->pw[GR_PW_LEN - 1] = '\0';
78046+ gr_usermode->sp_role[GR_SPROLE_LEN - 1] = '\0';
78047+
78048+ /* Okay.
78049+ * We have our enough of the argument structure..(we have yet
78050+ * to copy_from_user the tables themselves) . Copy the tables
78051+ * only if we need them, i.e. for loading operations. */
78052+
78053+ switch (gr_usermode->mode) {
78054+ case GR_STATUS:
78055+ if (gr_status & GR_READY) {
78056+ error = 1;
78057+ if (!gr_check_secure_terminal(current))
78058+ error = 3;
78059+ } else
78060+ error = 2;
78061+ goto out;
78062+ case GR_SHUTDOWN:
78063+ if ((gr_status & GR_READY)
78064+ && !(chkpw(gr_usermode, gr_system_salt, gr_system_sum))) {
78065+ pax_open_kernel();
78066+ gr_status &= ~GR_READY;
78067+ pax_close_kernel();
78068+
78069+ gr_log_noargs(GR_DONT_AUDIT_GOOD, GR_SHUTS_ACL_MSG);
78070+ free_variables();
78071+ memset(gr_usermode, 0, sizeof (struct gr_arg));
78072+ memset(gr_system_salt, 0, GR_SALT_LEN);
78073+ memset(gr_system_sum, 0, GR_SHA_LEN);
78074+ } else if (gr_status & GR_READY) {
78075+ gr_log_noargs(GR_DONT_AUDIT, GR_SHUTF_ACL_MSG);
78076+ error = -EPERM;
78077+ } else {
78078+ gr_log_noargs(GR_DONT_AUDIT_GOOD, GR_SHUTI_ACL_MSG);
78079+ error = -EAGAIN;
78080+ }
78081+ break;
78082+ case GR_ENABLE:
78083+ if (!(gr_status & GR_READY) && !(error2 = gracl_init(gr_usermode)))
78084+ gr_log_str(GR_DONT_AUDIT_GOOD, GR_ENABLE_ACL_MSG, GR_VERSION);
78085+ else {
78086+ if (gr_status & GR_READY)
78087+ error = -EAGAIN;
78088+ else
78089+ error = error2;
78090+ gr_log_str(GR_DONT_AUDIT, GR_ENABLEF_ACL_MSG, GR_VERSION);
78091+ }
78092+ break;
78093+ case GR_RELOAD:
78094+ if (!(gr_status & GR_READY)) {
78095+ gr_log_str(GR_DONT_AUDIT_GOOD, GR_RELOADI_ACL_MSG, GR_VERSION);
78096+ error = -EAGAIN;
78097+ } else if (!(chkpw(gr_usermode, gr_system_salt, gr_system_sum))) {
78098+ lock_kernel();
78099+
78100+ pax_open_kernel();
78101+ gr_status &= ~GR_READY;
78102+ pax_close_kernel();
78103+
78104+ free_variables();
78105+ if (!(error2 = gracl_init(gr_usermode))) {
78106+ unlock_kernel();
78107+ gr_log_str(GR_DONT_AUDIT_GOOD, GR_RELOAD_ACL_MSG, GR_VERSION);
78108+ } else {
78109+ unlock_kernel();
78110+ error = error2;
78111+ gr_log_str(GR_DONT_AUDIT, GR_RELOADF_ACL_MSG, GR_VERSION);
78112+ }
78113+ } else {
78114+ gr_log_str(GR_DONT_AUDIT, GR_RELOADF_ACL_MSG, GR_VERSION);
78115+ error = -EPERM;
78116+ }
78117+ break;
78118+ case GR_SEGVMOD:
78119+ if (unlikely(!(gr_status & GR_READY))) {
78120+ gr_log_noargs(GR_DONT_AUDIT_GOOD, GR_SEGVMODI_ACL_MSG);
78121+ error = -EAGAIN;
78122+ break;
78123+ }
78124+
78125+ if (!(chkpw(gr_usermode, gr_system_salt, gr_system_sum))) {
78126+ gr_log_noargs(GR_DONT_AUDIT_GOOD, GR_SEGVMODS_ACL_MSG);
78127+ if (gr_usermode->segv_device && gr_usermode->segv_inode) {
78128+ struct acl_subject_label *segvacl;
78129+ segvacl =
78130+ lookup_acl_subj_label(gr_usermode->segv_inode,
78131+ gr_usermode->segv_device,
78132+ current->role);
78133+ if (segvacl) {
78134+ segvacl->crashes = 0;
78135+ segvacl->expires = 0;
78136+ }
78137+ } else if (gr_find_uid(gr_usermode->segv_uid) >= 0) {
78138+ gr_remove_uid(gr_usermode->segv_uid);
78139+ }
78140+ } else {
78141+ gr_log_noargs(GR_DONT_AUDIT, GR_SEGVMODF_ACL_MSG);
78142+ error = -EPERM;
78143+ }
78144+ break;
78145+ case GR_SPROLE:
78146+ case GR_SPROLEPAM:
78147+ if (unlikely(!(gr_status & GR_READY))) {
78148+ gr_log_noargs(GR_DONT_AUDIT_GOOD, GR_SPROLEI_ACL_MSG);
78149+ error = -EAGAIN;
78150+ break;
78151+ }
78152+
78153+ if (current->role->expires && time_after_eq(get_seconds(), current->role->expires)) {
78154+ current->role->expires = 0;
78155+ current->role->auth_attempts = 0;
78156+ }
78157+
78158+ if (current->role->auth_attempts >= CONFIG_GRKERNSEC_ACL_MAXTRIES &&
78159+ time_after(current->role->expires, get_seconds())) {
78160+ error = -EBUSY;
78161+ goto out;
78162+ }
78163+
78164+ if (lookup_special_role_auth
78165+ (gr_usermode->mode, gr_usermode->sp_role, &sprole_salt, &sprole_sum)
78166+ && ((!sprole_salt && !sprole_sum)
78167+ || !(chkpw(gr_usermode, sprole_salt, sprole_sum)))) {
78168+ char *p = "";
78169+ assign_special_role(gr_usermode->sp_role);
78170+ read_lock(&tasklist_lock);
78171+ if (current->real_parent)
78172+ p = current->real_parent->role->rolename;
78173+ read_unlock(&tasklist_lock);
78174+ gr_log_str_int(GR_DONT_AUDIT_GOOD, GR_SPROLES_ACL_MSG,
78175+ p, acl_sp_role_value);
78176+ } else {
78177+ gr_log_str(GR_DONT_AUDIT, GR_SPROLEF_ACL_MSG, gr_usermode->sp_role);
78178+ error = -EPERM;
78179+ if(!(current->role->auth_attempts++))
78180+ current->role->expires = get_seconds() + CONFIG_GRKERNSEC_ACL_TIMEOUT;
78181+
78182+ goto out;
78183+ }
78184+ break;
78185+ case GR_UNSPROLE:
78186+ if (unlikely(!(gr_status & GR_READY))) {
78187+ gr_log_noargs(GR_DONT_AUDIT_GOOD, GR_UNSPROLEI_ACL_MSG);
78188+ error = -EAGAIN;
78189+ break;
78190+ }
78191+
78192+ if (current->role->roletype & GR_ROLE_SPECIAL) {
78193+ char *p = "";
78194+ int i = 0;
78195+
78196+ read_lock(&tasklist_lock);
78197+ if (current->real_parent) {
78198+ p = current->real_parent->role->rolename;
78199+ i = current->real_parent->acl_role_id;
78200+ }
78201+ read_unlock(&tasklist_lock);
78202+
78203+ gr_log_str_int(GR_DONT_AUDIT_GOOD, GR_UNSPROLES_ACL_MSG, p, i);
78204+ gr_set_acls(1);
78205+ } else {
78206+ error = -EPERM;
78207+ goto out;
78208+ }
78209+ break;
78210+ default:
78211+ gr_log_int(GR_DONT_AUDIT, GR_INVMODE_ACL_MSG, gr_usermode->mode);
78212+ error = -EINVAL;
78213+ break;
78214+ }
78215+
78216+ if (error != -EPERM)
78217+ goto out;
78218+
78219+ if(!(gr_auth_attempts++))
78220+ gr_auth_expires = get_seconds() + CONFIG_GRKERNSEC_ACL_TIMEOUT;
78221+
78222+ out:
78223+ mutex_unlock(&gr_dev_mutex);
78224+ return error;
78225+}
78226+
78227+/* must be called with
78228+ rcu_read_lock();
78229+ read_lock(&tasklist_lock);
78230+ read_lock(&grsec_exec_file_lock);
78231+*/
78232+int gr_apply_subject_to_task(struct task_struct *task)
78233+{
78234+ struct acl_object_label *obj;
78235+ char *tmpname;
78236+ struct acl_subject_label *tmpsubj;
78237+ struct file *filp;
78238+ struct name_entry *nmatch;
78239+
78240+ filp = task->exec_file;
78241+ if (filp == NULL)
78242+ return 0;
78243+
78244+ /* the following is to apply the correct subject
78245+ on binaries running when the RBAC system
78246+ is enabled, when the binaries have been
78247+ replaced or deleted since their execution
78248+ -----
78249+ when the RBAC system starts, the inode/dev
78250+ from exec_file will be one the RBAC system
78251+ is unaware of. It only knows the inode/dev
78252+ of the present file on disk, or the absence
78253+ of it.
78254+ */
78255+ preempt_disable();
78256+ tmpname = gr_to_filename_rbac(filp->f_path.dentry, filp->f_path.mnt);
78257+
78258+ nmatch = lookup_name_entry(tmpname);
78259+ preempt_enable();
78260+ tmpsubj = NULL;
78261+ if (nmatch) {
78262+ if (nmatch->deleted)
78263+ tmpsubj = lookup_acl_subj_label_deleted(nmatch->inode, nmatch->device, task->role);
78264+ else
78265+ tmpsubj = lookup_acl_subj_label(nmatch->inode, nmatch->device, task->role);
78266+ if (tmpsubj != NULL)
78267+ task->acl = tmpsubj;
78268+ }
78269+ if (tmpsubj == NULL)
78270+ task->acl = chk_subj_label(filp->f_path.dentry, filp->f_path.mnt,
78271+ task->role);
78272+ if (task->acl) {
78273+ task->is_writable = 0;
78274+ /* ignore additional mmap checks for processes that are writable
78275+ by the default ACL */
78276+ obj = chk_obj_label(filp->f_path.dentry, filp->f_path.mnt, default_role->root_label);
78277+ if (unlikely(obj->mode & GR_WRITE))
78278+ task->is_writable = 1;
78279+ obj = chk_obj_label(filp->f_path.dentry, filp->f_path.mnt, task->role->root_label);
78280+ if (unlikely(obj->mode & GR_WRITE))
78281+ task->is_writable = 1;
78282+
78283+ gr_set_proc_res(task);
78284+
78285+#ifdef CONFIG_GRKERNSEC_RBAC_DEBUG
78286+ printk(KERN_ALERT "gr_set_acls for (%s:%d): role:%s, subject:%s\n", task->comm, task->pid, task->role->rolename, task->acl->filename);
78287+#endif
78288+ } else {
78289+ return 1;
78290+ }
78291+
78292+ return 0;
78293+}
78294+
78295+int
78296+gr_set_acls(const int type)
78297+{
78298+ struct task_struct *task, *task2;
78299+ struct acl_role_label *role = current->role;
78300+ __u16 acl_role_id = current->acl_role_id;
78301+ const struct cred *cred;
78302+ int ret;
78303+
78304+ rcu_read_lock();
78305+ read_lock(&tasklist_lock);
78306+ read_lock(&grsec_exec_file_lock);
78307+ do_each_thread(task2, task) {
78308+ /* check to see if we're called from the exit handler,
78309+ if so, only replace ACLs that have inherited the admin
78310+ ACL */
78311+
78312+ if (type && (task->role != role ||
78313+ task->acl_role_id != acl_role_id))
78314+ continue;
78315+
78316+ task->acl_role_id = 0;
78317+ task->acl_sp_role = 0;
78318+
78319+ if (task->exec_file) {
78320+ cred = __task_cred(task);
78321+ task->role = lookup_acl_role_label(task, cred->uid, cred->gid);
78322+
78323+ ret = gr_apply_subject_to_task(task);
78324+ if (ret) {
78325+ read_unlock(&grsec_exec_file_lock);
78326+ read_unlock(&tasklist_lock);
78327+ rcu_read_unlock();
78328+ gr_log_str_int(GR_DONT_AUDIT_GOOD, GR_DEFACL_MSG, task->comm, task->pid);
78329+ return ret;
78330+ }
78331+ } else {
78332+ // it's a kernel process
78333+ task->role = kernel_role;
78334+ task->acl = kernel_role->root_label;
78335+#ifdef CONFIG_GRKERNSEC_ACL_HIDEKERN
78336+ task->acl->mode &= ~GR_PROCFIND;
78337+#endif
78338+ }
78339+ } while_each_thread(task2, task);
78340+ read_unlock(&grsec_exec_file_lock);
78341+ read_unlock(&tasklist_lock);
78342+ rcu_read_unlock();
78343+
78344+ return 0;
78345+}
78346+
78347+void
78348+gr_learn_resource(const struct task_struct *task,
78349+ const int res, const unsigned long wanted, const int gt)
78350+{
78351+ struct acl_subject_label *acl;
78352+ const struct cred *cred;
78353+
78354+ if (unlikely((gr_status & GR_READY) &&
78355+ task->acl && (task->acl->mode & (GR_LEARN | GR_INHERITLEARN))))
78356+ goto skip_reslog;
78357+
78358+#ifdef CONFIG_GRKERNSEC_RESLOG
78359+ gr_log_resource(task, res, wanted, gt);
78360+#endif
78361+ skip_reslog:
78362+
78363+ if (unlikely(!(gr_status & GR_READY) || !wanted || res >= GR_NLIMITS))
78364+ return;
78365+
78366+ acl = task->acl;
78367+
78368+ if (likely(!acl || !(acl->mode & (GR_LEARN | GR_INHERITLEARN)) ||
78369+ !(acl->resmask & (1 << (unsigned short) res))))
78370+ return;
78371+
78372+ if (wanted >= acl->res[res].rlim_cur) {
78373+ unsigned long res_add;
78374+
78375+ res_add = wanted;
78376+ switch (res) {
78377+ case RLIMIT_CPU:
78378+ res_add += GR_RLIM_CPU_BUMP;
78379+ break;
78380+ case RLIMIT_FSIZE:
78381+ res_add += GR_RLIM_FSIZE_BUMP;
78382+ break;
78383+ case RLIMIT_DATA:
78384+ res_add += GR_RLIM_DATA_BUMP;
78385+ break;
78386+ case RLIMIT_STACK:
78387+ res_add += GR_RLIM_STACK_BUMP;
78388+ break;
78389+ case RLIMIT_CORE:
78390+ res_add += GR_RLIM_CORE_BUMP;
78391+ break;
78392+ case RLIMIT_RSS:
78393+ res_add += GR_RLIM_RSS_BUMP;
78394+ break;
78395+ case RLIMIT_NPROC:
78396+ res_add += GR_RLIM_NPROC_BUMP;
78397+ break;
78398+ case RLIMIT_NOFILE:
78399+ res_add += GR_RLIM_NOFILE_BUMP;
78400+ break;
78401+ case RLIMIT_MEMLOCK:
78402+ res_add += GR_RLIM_MEMLOCK_BUMP;
78403+ break;
78404+ case RLIMIT_AS:
78405+ res_add += GR_RLIM_AS_BUMP;
78406+ break;
78407+ case RLIMIT_LOCKS:
78408+ res_add += GR_RLIM_LOCKS_BUMP;
78409+ break;
78410+ case RLIMIT_SIGPENDING:
78411+ res_add += GR_RLIM_SIGPENDING_BUMP;
78412+ break;
78413+ case RLIMIT_MSGQUEUE:
78414+ res_add += GR_RLIM_MSGQUEUE_BUMP;
78415+ break;
78416+ case RLIMIT_NICE:
78417+ res_add += GR_RLIM_NICE_BUMP;
78418+ break;
78419+ case RLIMIT_RTPRIO:
78420+ res_add += GR_RLIM_RTPRIO_BUMP;
78421+ break;
78422+ case RLIMIT_RTTIME:
78423+ res_add += GR_RLIM_RTTIME_BUMP;
78424+ break;
78425+ }
78426+
78427+ acl->res[res].rlim_cur = res_add;
78428+
78429+ if (wanted > acl->res[res].rlim_max)
78430+ acl->res[res].rlim_max = res_add;
78431+
78432+ /* only log the subject filename, since resource logging is supported for
78433+ single-subject learning only */
78434+ rcu_read_lock();
78435+ cred = __task_cred(task);
78436+ security_learn(GR_LEARN_AUDIT_MSG, task->role->rolename,
78437+ task->role->roletype, cred->uid, cred->gid, acl->filename,
78438+ acl->filename, acl->res[res].rlim_cur, acl->res[res].rlim_max,
78439+ "", (unsigned long) res, &task->signal->saved_ip);
78440+ rcu_read_unlock();
78441+ }
78442+
78443+ return;
78444+}
78445+
78446+#if defined(CONFIG_PAX_HAVE_ACL_FLAGS) && (defined(CONFIG_PAX_NOEXEC) || defined(CONFIG_PAX_ASLR))
78447+void
78448+pax_set_initial_flags(struct linux_binprm *bprm)
78449+{
78450+ struct task_struct *task = current;
78451+ struct acl_subject_label *proc;
78452+ unsigned long flags;
78453+
78454+ if (unlikely(!(gr_status & GR_READY)))
78455+ return;
78456+
78457+ flags = pax_get_flags(task);
78458+
78459+ proc = task->acl;
78460+
78461+ if (proc->pax_flags & GR_PAX_DISABLE_PAGEEXEC)
78462+ flags &= ~MF_PAX_PAGEEXEC;
78463+ if (proc->pax_flags & GR_PAX_DISABLE_SEGMEXEC)
78464+ flags &= ~MF_PAX_SEGMEXEC;
78465+ if (proc->pax_flags & GR_PAX_DISABLE_RANDMMAP)
78466+ flags &= ~MF_PAX_RANDMMAP;
78467+ if (proc->pax_flags & GR_PAX_DISABLE_EMUTRAMP)
78468+ flags &= ~MF_PAX_EMUTRAMP;
78469+ if (proc->pax_flags & GR_PAX_DISABLE_MPROTECT)
78470+ flags &= ~MF_PAX_MPROTECT;
78471+
78472+ if (proc->pax_flags & GR_PAX_ENABLE_PAGEEXEC)
78473+ flags |= MF_PAX_PAGEEXEC;
78474+ if (proc->pax_flags & GR_PAX_ENABLE_SEGMEXEC)
78475+ flags |= MF_PAX_SEGMEXEC;
78476+ if (proc->pax_flags & GR_PAX_ENABLE_RANDMMAP)
78477+ flags |= MF_PAX_RANDMMAP;
78478+ if (proc->pax_flags & GR_PAX_ENABLE_EMUTRAMP)
78479+ flags |= MF_PAX_EMUTRAMP;
78480+ if (proc->pax_flags & GR_PAX_ENABLE_MPROTECT)
78481+ flags |= MF_PAX_MPROTECT;
78482+
78483+ pax_set_flags(task, flags);
78484+
78485+ return;
78486+}
78487+#endif
78488+
78489+#ifdef CONFIG_SYSCTL
78490+/* Eric Biederman likes breaking userland ABI and every inode-based security
78491+ system to save 35kb of memory */
78492+
78493+/* we modify the passed in filename, but adjust it back before returning */
78494+static struct acl_object_label *gr_lookup_by_name(char *name, unsigned int len)
78495+{
78496+ struct name_entry *nmatch;
78497+ char *p, *lastp = NULL;
78498+ struct acl_object_label *obj = NULL, *tmp;
78499+ struct acl_subject_label *tmpsubj;
78500+ char c = '\0';
78501+
78502+ read_lock(&gr_inode_lock);
78503+
78504+ p = name + len - 1;
78505+ do {
78506+ nmatch = lookup_name_entry(name);
78507+ if (lastp != NULL)
78508+ *lastp = c;
78509+
78510+ if (nmatch == NULL)
78511+ goto next_component;
78512+ tmpsubj = current->acl;
78513+ do {
78514+ obj = lookup_acl_obj_label(nmatch->inode, nmatch->device, tmpsubj);
78515+ if (obj != NULL) {
78516+ tmp = obj->globbed;
78517+ while (tmp) {
78518+ if (!glob_match(tmp->filename, name)) {
78519+ obj = tmp;
78520+ goto found_obj;
78521+ }
78522+ tmp = tmp->next;
78523+ }
78524+ goto found_obj;
78525+ }
78526+ } while ((tmpsubj = tmpsubj->parent_subject));
78527+next_component:
78528+ /* end case */
78529+ if (p == name)
78530+ break;
78531+
78532+ while (*p != '/')
78533+ p--;
78534+ if (p == name)
78535+ lastp = p + 1;
78536+ else {
78537+ lastp = p;
78538+ p--;
78539+ }
78540+ c = *lastp;
78541+ *lastp = '\0';
78542+ } while (1);
78543+found_obj:
78544+ read_unlock(&gr_inode_lock);
78545+ /* obj returned will always be non-null */
78546+ return obj;
78547+}
78548+
78549+/* returns 0 when allowing, non-zero on error
78550+ op of 0 is used for readdir, so we don't log the names of hidden files
78551+*/
78552+__u32
78553+gr_handle_sysctl(const struct ctl_table *table, const int op)
78554+{
78555+ ctl_table *tmp;
78556+ const char *proc_sys = "/proc/sys";
78557+ char *path;
78558+ struct acl_object_label *obj;
78559+ unsigned short len = 0, pos = 0, depth = 0, i;
78560+ __u32 err = 0;
78561+ __u32 mode = 0;
78562+
78563+ if (unlikely(!(gr_status & GR_READY)))
78564+ return 0;
78565+
78566+ /* for now, ignore operations on non-sysctl entries if it's not a
78567+ readdir*/
78568+ if (table->child != NULL && op != 0)
78569+ return 0;
78570+
78571+ mode |= GR_FIND;
78572+ /* it's only a read if it's an entry, read on dirs is for readdir */
78573+ if (op & MAY_READ)
78574+ mode |= GR_READ;
78575+ if (op & MAY_WRITE)
78576+ mode |= GR_WRITE;
78577+
78578+ preempt_disable();
78579+
78580+ path = per_cpu_ptr(gr_shared_page[0], smp_processor_id());
78581+
78582+ /* it's only a read/write if it's an actual entry, not a dir
78583+ (which are opened for readdir)
78584+ */
78585+
78586+ /* convert the requested sysctl entry into a pathname */
78587+
78588+ for (tmp = (ctl_table *)table; tmp != NULL; tmp = tmp->parent) {
78589+ len += strlen(tmp->procname);
78590+ len++;
78591+ depth++;
78592+ }
78593+
78594+ if ((len + depth + strlen(proc_sys) + 1) > PAGE_SIZE) {
78595+ /* deny */
78596+ goto out;
78597+ }
78598+
78599+ memset(path, 0, PAGE_SIZE);
78600+
78601+ memcpy(path, proc_sys, strlen(proc_sys));
78602+
78603+ pos += strlen(proc_sys);
78604+
78605+ for (; depth > 0; depth--) {
78606+ path[pos] = '/';
78607+ pos++;
78608+ for (i = 1, tmp = (ctl_table *)table; tmp != NULL; tmp = tmp->parent) {
78609+ if (depth == i) {
78610+ memcpy(path + pos, tmp->procname,
78611+ strlen(tmp->procname));
78612+ pos += strlen(tmp->procname);
78613+ }
78614+ i++;
78615+ }
78616+ }
78617+
78618+ obj = gr_lookup_by_name(path, pos);
78619+ err = obj->mode & (mode | to_gr_audit(mode) | GR_SUPPRESS);
78620+
78621+ if (unlikely((current->acl->mode & (GR_LEARN | GR_INHERITLEARN)) &&
78622+ ((err & mode) != mode))) {
78623+ __u32 new_mode = mode;
78624+
78625+ new_mode &= ~(GR_AUDITS | GR_SUPPRESS);
78626+
78627+ err = 0;
78628+ gr_log_learn_sysctl(path, new_mode);
78629+ } else if (!(err & GR_FIND) && !(err & GR_SUPPRESS) && op != 0) {
78630+ gr_log_hidden_sysctl(GR_DONT_AUDIT, GR_HIDDEN_ACL_MSG, path);
78631+ err = -ENOENT;
78632+ } else if (!(err & GR_FIND)) {
78633+ err = -ENOENT;
78634+ } else if (((err & mode) & ~GR_FIND) != (mode & ~GR_FIND) && !(err & GR_SUPPRESS)) {
78635+ gr_log_str4(GR_DONT_AUDIT, GR_SYSCTL_ACL_MSG, "denied",
78636+ path, (mode & GR_READ) ? " reading" : "",
78637+ (mode & GR_WRITE) ? " writing" : "");
78638+ err = -EACCES;
78639+ } else if ((err & mode) != mode) {
78640+ err = -EACCES;
78641+ } else if ((((err & mode) & ~GR_FIND) == (mode & ~GR_FIND)) && (err & GR_AUDITS)) {
78642+ gr_log_str4(GR_DO_AUDIT, GR_SYSCTL_ACL_MSG, "successful",
78643+ path, (mode & GR_READ) ? " reading" : "",
78644+ (mode & GR_WRITE) ? " writing" : "");
78645+ err = 0;
78646+ } else
78647+ err = 0;
78648+
78649+ out:
78650+ preempt_enable();
78651+
78652+ return err;
78653+}
78654+#endif
78655+
78656+int
78657+gr_handle_proc_ptrace(struct task_struct *task)
78658+{
78659+ struct file *filp;
78660+ struct task_struct *tmp = task;
78661+ struct task_struct *curtemp = current;
78662+ __u32 retmode;
78663+
78664+#ifndef CONFIG_GRKERNSEC_HARDEN_PTRACE
78665+ if (unlikely(!(gr_status & GR_READY)))
78666+ return 0;
78667+#endif
78668+
78669+ read_lock(&tasklist_lock);
78670+ read_lock(&grsec_exec_file_lock);
78671+ filp = task->exec_file;
78672+
78673+ while (tmp->pid > 0) {
78674+ if (tmp == curtemp)
78675+ break;
78676+ tmp = tmp->real_parent;
78677+ }
78678+
78679+ if (!filp || (tmp->pid == 0 && ((grsec_enable_harden_ptrace && current_uid() && !(gr_status & GR_READY)) ||
78680+ ((gr_status & GR_READY) && !(current->acl->mode & GR_RELAXPTRACE))))) {
78681+ read_unlock(&grsec_exec_file_lock);
78682+ read_unlock(&tasklist_lock);
78683+ return 1;
78684+ }
78685+
78686+#ifdef CONFIG_GRKERNSEC_HARDEN_PTRACE
78687+ if (!(gr_status & GR_READY)) {
78688+ read_unlock(&grsec_exec_file_lock);
78689+ read_unlock(&tasklist_lock);
78690+ return 0;
78691+ }
78692+#endif
78693+
78694+ retmode = gr_search_file(filp->f_path.dentry, GR_NOPTRACE, filp->f_path.mnt);
78695+ read_unlock(&grsec_exec_file_lock);
78696+ read_unlock(&tasklist_lock);
78697+
78698+ if (retmode & GR_NOPTRACE)
78699+ return 1;
78700+
78701+ if (!(current->acl->mode & GR_POVERRIDE) && !(current->role->roletype & GR_ROLE_GOD)
78702+ && (current->acl != task->acl || (current->acl != current->role->root_label
78703+ && current->pid != task->pid)))
78704+ return 1;
78705+
78706+ return 0;
78707+}
78708+
78709+void task_grsec_rbac(struct seq_file *m, struct task_struct *p)
78710+{
78711+ if (unlikely(!(gr_status & GR_READY)))
78712+ return;
78713+
78714+ if (!(current->role->roletype & GR_ROLE_GOD))
78715+ return;
78716+
78717+ seq_printf(m, "RBAC:\t%.64s:%c:%.950s\n",
78718+ p->role->rolename, gr_task_roletype_to_char(p),
78719+ p->acl->filename);
78720+}
78721+
78722+int
78723+gr_handle_ptrace(struct task_struct *task, const long request)
78724+{
78725+ struct task_struct *tmp = task;
78726+ struct task_struct *curtemp = current;
78727+ __u32 retmode;
78728+
78729+#ifndef CONFIG_GRKERNSEC_HARDEN_PTRACE
78730+ if (unlikely(!(gr_status & GR_READY)))
78731+ return 0;
78732+#endif
78733+
78734+ read_lock(&tasklist_lock);
78735+ while (tmp->pid > 0) {
78736+ if (tmp == curtemp)
78737+ break;
78738+ tmp = tmp->real_parent;
78739+ }
78740+
78741+ if (tmp->pid == 0 && ((grsec_enable_harden_ptrace && current_uid() && !(gr_status & GR_READY)) ||
78742+ ((gr_status & GR_READY) && !(current->acl->mode & GR_RELAXPTRACE)))) {
78743+ read_unlock(&tasklist_lock);
78744+ gr_log_ptrace(GR_DONT_AUDIT, GR_PTRACE_ACL_MSG, task);
78745+ return 1;
78746+ }
78747+ read_unlock(&tasklist_lock);
78748+
78749+#ifdef CONFIG_GRKERNSEC_HARDEN_PTRACE
78750+ if (!(gr_status & GR_READY))
78751+ return 0;
78752+#endif
78753+
78754+ read_lock(&grsec_exec_file_lock);
78755+ if (unlikely(!task->exec_file)) {
78756+ read_unlock(&grsec_exec_file_lock);
78757+ return 0;
78758+ }
78759+
78760+ retmode = gr_search_file(task->exec_file->f_path.dentry, GR_PTRACERD | GR_NOPTRACE, task->exec_file->f_path.mnt);
78761+ read_unlock(&grsec_exec_file_lock);
78762+
78763+ if (retmode & GR_NOPTRACE) {
78764+ gr_log_ptrace(GR_DONT_AUDIT, GR_PTRACE_ACL_MSG, task);
78765+ return 1;
78766+ }
78767+
78768+ if (retmode & GR_PTRACERD) {
78769+ switch (request) {
78770+ case PTRACE_POKETEXT:
78771+ case PTRACE_POKEDATA:
78772+ case PTRACE_POKEUSR:
78773+#if !defined(CONFIG_PPC32) && !defined(CONFIG_PPC64) && !defined(CONFIG_PARISC) && !defined(CONFIG_ALPHA) && !defined(CONFIG_IA64)
78774+ case PTRACE_SETREGS:
78775+ case PTRACE_SETFPREGS:
78776+#endif
78777+#ifdef CONFIG_X86
78778+ case PTRACE_SETFPXREGS:
78779+#endif
78780+#ifdef CONFIG_ALTIVEC
78781+ case PTRACE_SETVRREGS:
78782+#endif
78783+ return 1;
78784+ default:
78785+ return 0;
78786+ }
78787+ } else if (!(current->acl->mode & GR_POVERRIDE) &&
78788+ !(current->role->roletype & GR_ROLE_GOD) &&
78789+ (current->acl != task->acl)) {
78790+ gr_log_ptrace(GR_DONT_AUDIT, GR_PTRACE_ACL_MSG, task);
78791+ return 1;
78792+ }
78793+
78794+ return 0;
78795+}
78796+
78797+static int is_writable_mmap(const struct file *filp)
78798+{
78799+ struct task_struct *task = current;
78800+ struct acl_object_label *obj, *obj2;
78801+
78802+ if (gr_status & GR_READY && !(task->acl->mode & GR_OVERRIDE) &&
78803+ !task->is_writable && S_ISREG(filp->f_path.dentry->d_inode->i_mode) && (filp->f_path.mnt != shm_mnt || (filp->f_path.dentry->d_inode->i_nlink > 0))) {
78804+ obj = chk_obj_label(filp->f_path.dentry, filp->f_path.mnt, default_role->root_label);
78805+ obj2 = chk_obj_label(filp->f_path.dentry, filp->f_path.mnt,
78806+ task->role->root_label);
78807+ if (unlikely((obj->mode & GR_WRITE) || (obj2->mode & GR_WRITE))) {
78808+ gr_log_fs_generic(GR_DONT_AUDIT, GR_WRITLIB_ACL_MSG, filp->f_path.dentry, filp->f_path.mnt);
78809+ return 1;
78810+ }
78811+ }
78812+ return 0;
78813+}
78814+
78815+int
78816+gr_acl_handle_mmap(const struct file *file, const unsigned long prot)
78817+{
78818+ __u32 mode;
78819+
78820+ if (unlikely(!file || !(prot & PROT_EXEC)))
78821+ return 1;
78822+
78823+ if (is_writable_mmap(file))
78824+ return 0;
78825+
78826+ mode =
78827+ gr_search_file(file->f_path.dentry,
78828+ GR_EXEC | GR_AUDIT_EXEC | GR_SUPPRESS,
78829+ file->f_path.mnt);
78830+
78831+ if (!gr_tpe_allow(file))
78832+ return 0;
78833+
78834+ if (unlikely(!(mode & GR_EXEC) && !(mode & GR_SUPPRESS))) {
78835+ gr_log_fs_rbac_generic(GR_DONT_AUDIT, GR_MMAP_ACL_MSG, file->f_path.dentry, file->f_path.mnt);
78836+ return 0;
78837+ } else if (unlikely(!(mode & GR_EXEC))) {
78838+ return 0;
78839+ } else if (unlikely(mode & GR_EXEC && mode & GR_AUDIT_EXEC)) {
78840+ gr_log_fs_rbac_generic(GR_DO_AUDIT, GR_MMAP_ACL_MSG, file->f_path.dentry, file->f_path.mnt);
78841+ return 1;
78842+ }
78843+
78844+ return 1;
78845+}
78846+
78847+int
78848+gr_acl_handle_mprotect(const struct file *file, const unsigned long prot)
78849+{
78850+ __u32 mode;
78851+
78852+ if (unlikely(!file || !(prot & PROT_EXEC)))
78853+ return 1;
78854+
78855+ if (is_writable_mmap(file))
78856+ return 0;
78857+
78858+ mode =
78859+ gr_search_file(file->f_path.dentry,
78860+ GR_EXEC | GR_AUDIT_EXEC | GR_SUPPRESS,
78861+ file->f_path.mnt);
78862+
78863+ if (!gr_tpe_allow(file))
78864+ return 0;
78865+
78866+ if (unlikely(!(mode & GR_EXEC) && !(mode & GR_SUPPRESS))) {
78867+ gr_log_fs_rbac_generic(GR_DONT_AUDIT, GR_MPROTECT_ACL_MSG, file->f_path.dentry, file->f_path.mnt);
78868+ return 0;
78869+ } else if (unlikely(!(mode & GR_EXEC))) {
78870+ return 0;
78871+ } else if (unlikely(mode & GR_EXEC && mode & GR_AUDIT_EXEC)) {
78872+ gr_log_fs_rbac_generic(GR_DO_AUDIT, GR_MPROTECT_ACL_MSG, file->f_path.dentry, file->f_path.mnt);
78873+ return 1;
78874+ }
78875+
78876+ return 1;
78877+}
78878+
78879+void
78880+gr_acl_handle_psacct(struct task_struct *task, const long code)
78881+{
78882+ unsigned long runtime;
78883+ unsigned long cputime;
78884+ unsigned int wday, cday;
78885+ __u8 whr, chr;
78886+ __u8 wmin, cmin;
78887+ __u8 wsec, csec;
78888+ struct timespec timeval;
78889+
78890+ if (unlikely(!(gr_status & GR_READY) || !task->acl ||
78891+ !(task->acl->mode & GR_PROCACCT)))
78892+ return;
78893+
78894+ do_posix_clock_monotonic_gettime(&timeval);
78895+ runtime = timeval.tv_sec - task->start_time.tv_sec;
78896+ wday = runtime / (3600 * 24);
78897+ runtime -= wday * (3600 * 24);
78898+ whr = runtime / 3600;
78899+ runtime -= whr * 3600;
78900+ wmin = runtime / 60;
78901+ runtime -= wmin * 60;
78902+ wsec = runtime;
78903+
78904+ cputime = (task->utime + task->stime) / HZ;
78905+ cday = cputime / (3600 * 24);
78906+ cputime -= cday * (3600 * 24);
78907+ chr = cputime / 3600;
78908+ cputime -= chr * 3600;
78909+ cmin = cputime / 60;
78910+ cputime -= cmin * 60;
78911+ csec = cputime;
78912+
78913+ gr_log_procacct(GR_DO_AUDIT, GR_ACL_PROCACCT_MSG, task, wday, whr, wmin, wsec, cday, chr, cmin, csec, code);
78914+
78915+ return;
78916+}
78917+
78918+void gr_set_kernel_label(struct task_struct *task)
78919+{
78920+ if (gr_status & GR_READY) {
78921+ task->role = kernel_role;
78922+ task->acl = kernel_role->root_label;
78923+ }
78924+ return;
78925+}
78926+
78927+#ifdef CONFIG_TASKSTATS
78928+int gr_is_taskstats_denied(int pid)
78929+{
78930+ struct task_struct *task;
78931+#if defined(CONFIG_GRKERNSEC_PROC_USER) || defined(CONFIG_GRKERNSEC_PROC_USERGROUP)
78932+ const struct cred *cred;
78933+#endif
78934+ int ret = 0;
78935+
78936+ /* restrict taskstats viewing to un-chrooted root users
78937+ who have the 'view' subject flag if the RBAC system is enabled
78938+ */
78939+
78940+ rcu_read_lock();
78941+ read_lock(&tasklist_lock);
78942+ task = find_task_by_vpid(pid);
78943+ if (task) {
78944+#ifdef CONFIG_GRKERNSEC_CHROOT
78945+ if (proc_is_chrooted(task))
78946+ ret = -EACCES;
78947+#endif
78948+#if defined(CONFIG_GRKERNSEC_PROC_USER) || defined(CONFIG_GRKERNSEC_PROC_USERGROUP)
78949+ cred = __task_cred(task);
78950+#ifdef CONFIG_GRKERNSEC_PROC_USER
78951+ if (cred->uid != 0)
78952+ ret = -EACCES;
78953+#elif defined(CONFIG_GRKERNSEC_PROC_USERGROUP)
78954+ if (cred->uid != 0 && !groups_search(cred->group_info, CONFIG_GRKERNSEC_PROC_GID))
78955+ ret = -EACCES;
78956+#endif
78957+#endif
78958+ if (gr_status & GR_READY) {
78959+ if (!(task->acl->mode & GR_VIEW))
78960+ ret = -EACCES;
78961+ }
78962+ } else
78963+ ret = -ENOENT;
78964+
78965+ read_unlock(&tasklist_lock);
78966+ rcu_read_unlock();
78967+
78968+ return ret;
78969+}
78970+#endif
78971+
78972+/* AUXV entries are filled via a descendant of search_binary_handler
78973+ after we've already applied the subject for the target
78974+*/
78975+int gr_acl_enable_at_secure(void)
78976+{
78977+ if (unlikely(!(gr_status & GR_READY)))
78978+ return 0;
78979+
78980+ if (current->acl->mode & GR_ATSECURE)
78981+ return 1;
78982+
78983+ return 0;
78984+}
78985+
78986+int gr_acl_handle_filldir(const struct file *file, const char *name, const unsigned int namelen, const ino_t ino)
78987+{
78988+ struct task_struct *task = current;
78989+ struct dentry *dentry = file->f_path.dentry;
78990+ struct vfsmount *mnt = file->f_path.mnt;
78991+ struct acl_object_label *obj, *tmp;
78992+ struct acl_subject_label *subj;
78993+ unsigned int bufsize;
78994+ int is_not_root;
78995+ char *path;
78996+ dev_t dev = __get_dev(dentry);
78997+
78998+ if (unlikely(!(gr_status & GR_READY)))
78999+ return 1;
79000+
79001+ if (task->acl->mode & (GR_LEARN | GR_INHERITLEARN))
79002+ return 1;
79003+
79004+ /* ignore Eric Biederman */
79005+ if (IS_PRIVATE(dentry->d_inode))
79006+ return 1;
79007+
79008+ subj = task->acl;
79009+ do {
79010+ obj = lookup_acl_obj_label(ino, dev, subj);
79011+ if (obj != NULL)
79012+ return (obj->mode & GR_FIND) ? 1 : 0;
79013+ } while ((subj = subj->parent_subject));
79014+
79015+ /* this is purely an optimization since we're looking for an object
79016+ for the directory we're doing a readdir on
79017+ if it's possible for any globbed object to match the entry we're
79018+ filling into the directory, then the object we find here will be
79019+ an anchor point with attached globbed objects
79020+ */
79021+ obj = chk_obj_label_noglob(dentry, mnt, task->acl);
79022+ if (obj->globbed == NULL)
79023+ return (obj->mode & GR_FIND) ? 1 : 0;
79024+
79025+ is_not_root = ((obj->filename[0] == '/') &&
79026+ (obj->filename[1] == '\0')) ? 0 : 1;
79027+ bufsize = PAGE_SIZE - namelen - is_not_root;
79028+
79029+ /* check bufsize > PAGE_SIZE || bufsize == 0 */
79030+ if (unlikely((bufsize - 1) > (PAGE_SIZE - 1)))
79031+ return 1;
79032+
79033+ preempt_disable();
79034+ path = d_real_path(dentry, mnt, per_cpu_ptr(gr_shared_page[0], smp_processor_id()),
79035+ bufsize);
79036+
79037+ bufsize = strlen(path);
79038+
79039+ /* if base is "/", don't append an additional slash */
79040+ if (is_not_root)
79041+ *(path + bufsize) = '/';
79042+ memcpy(path + bufsize + is_not_root, name, namelen);
79043+ *(path + bufsize + namelen + is_not_root) = '\0';
79044+
79045+ tmp = obj->globbed;
79046+ while (tmp) {
79047+ if (!glob_match(tmp->filename, path)) {
79048+ preempt_enable();
79049+ return (tmp->mode & GR_FIND) ? 1 : 0;
79050+ }
79051+ tmp = tmp->next;
79052+ }
79053+ preempt_enable();
79054+ return (obj->mode & GR_FIND) ? 1 : 0;
79055+}
79056+
79057+#ifdef CONFIG_NETFILTER_XT_MATCH_GRADM_MODULE
79058+EXPORT_SYMBOL(gr_acl_is_enabled);
79059+#endif
79060+EXPORT_SYMBOL(gr_learn_resource);
79061+EXPORT_SYMBOL(gr_set_kernel_label);
79062+#ifdef CONFIG_SECURITY
79063+EXPORT_SYMBOL(gr_check_user_change);
79064+EXPORT_SYMBOL(gr_check_group_change);
79065+#endif
79066+
79067diff --git a/grsecurity/gracl_alloc.c b/grsecurity/gracl_alloc.c
79068new file mode 100644
79069index 0000000..34fefda
79070--- /dev/null
79071+++ b/grsecurity/gracl_alloc.c
79072@@ -0,0 +1,105 @@
79073+#include <linux/kernel.h>
79074+#include <linux/mm.h>
79075+#include <linux/slab.h>
79076+#include <linux/vmalloc.h>
79077+#include <linux/gracl.h>
79078+#include <linux/grsecurity.h>
79079+
79080+static unsigned long alloc_stack_next = 1;
79081+static unsigned long alloc_stack_size = 1;
79082+static void **alloc_stack;
79083+
79084+static __inline__ int
79085+alloc_pop(void)
79086+{
79087+ if (alloc_stack_next == 1)
79088+ return 0;
79089+
79090+ kfree(alloc_stack[alloc_stack_next - 2]);
79091+
79092+ alloc_stack_next--;
79093+
79094+ return 1;
79095+}
79096+
79097+static __inline__ int
79098+alloc_push(void *buf)
79099+{
79100+ if (alloc_stack_next >= alloc_stack_size)
79101+ return 1;
79102+
79103+ alloc_stack[alloc_stack_next - 1] = buf;
79104+
79105+ alloc_stack_next++;
79106+
79107+ return 0;
79108+}
79109+
79110+void *
79111+acl_alloc(unsigned long len)
79112+{
79113+ void *ret = NULL;
79114+
79115+ if (!len || len > PAGE_SIZE)
79116+ goto out;
79117+
79118+ ret = kmalloc(len, GFP_KERNEL);
79119+
79120+ if (ret) {
79121+ if (alloc_push(ret)) {
79122+ kfree(ret);
79123+ ret = NULL;
79124+ }
79125+ }
79126+
79127+out:
79128+ return ret;
79129+}
79130+
79131+void *
79132+acl_alloc_num(unsigned long num, unsigned long len)
79133+{
79134+ if (!len || (num > (PAGE_SIZE / len)))
79135+ return NULL;
79136+
79137+ return acl_alloc(num * len);
79138+}
79139+
79140+void
79141+acl_free_all(void)
79142+{
79143+ if (gr_acl_is_enabled() || !alloc_stack)
79144+ return;
79145+
79146+ while (alloc_pop()) ;
79147+
79148+ if (alloc_stack) {
79149+ if ((alloc_stack_size * sizeof (void *)) <= PAGE_SIZE)
79150+ kfree(alloc_stack);
79151+ else
79152+ vfree(alloc_stack);
79153+ }
79154+
79155+ alloc_stack = NULL;
79156+ alloc_stack_size = 1;
79157+ alloc_stack_next = 1;
79158+
79159+ return;
79160+}
79161+
79162+int
79163+acl_alloc_stack_init(unsigned long size)
79164+{
79165+ if ((size * sizeof (void *)) <= PAGE_SIZE)
79166+ alloc_stack =
79167+ (void **) kmalloc(size * sizeof (void *), GFP_KERNEL);
79168+ else
79169+ alloc_stack = (void **) vmalloc(size * sizeof (void *));
79170+
79171+ alloc_stack_size = size;
79172+
79173+ if (!alloc_stack)
79174+ return 0;
79175+ else
79176+ return 1;
79177+}
79178diff --git a/grsecurity/gracl_cap.c b/grsecurity/gracl_cap.c
79179new file mode 100644
79180index 0000000..955ddfb
79181--- /dev/null
79182+++ b/grsecurity/gracl_cap.c
79183@@ -0,0 +1,101 @@
79184+#include <linux/kernel.h>
79185+#include <linux/module.h>
79186+#include <linux/sched.h>
79187+#include <linux/gracl.h>
79188+#include <linux/grsecurity.h>
79189+#include <linux/grinternal.h>
79190+
79191+extern const char *captab_log[];
79192+extern int captab_log_entries;
79193+
79194+int
79195+gr_acl_is_capable(const int cap)
79196+{
79197+ struct task_struct *task = current;
79198+ const struct cred *cred = current_cred();
79199+ struct acl_subject_label *curracl;
79200+ kernel_cap_t cap_drop = __cap_empty_set, cap_mask = __cap_empty_set;
79201+ kernel_cap_t cap_audit = __cap_empty_set;
79202+
79203+ if (!gr_acl_is_enabled())
79204+ return 1;
79205+
79206+ curracl = task->acl;
79207+
79208+ cap_drop = curracl->cap_lower;
79209+ cap_mask = curracl->cap_mask;
79210+ cap_audit = curracl->cap_invert_audit;
79211+
79212+ while ((curracl = curracl->parent_subject)) {
79213+ /* if the cap isn't specified in the current computed mask but is specified in the
79214+ current level subject, and is lowered in the current level subject, then add
79215+ it to the set of dropped capabilities
79216+ otherwise, add the current level subject's mask to the current computed mask
79217+ */
79218+ if (!cap_raised(cap_mask, cap) && cap_raised(curracl->cap_mask, cap)) {
79219+ cap_raise(cap_mask, cap);
79220+ if (cap_raised(curracl->cap_lower, cap))
79221+ cap_raise(cap_drop, cap);
79222+ if (cap_raised(curracl->cap_invert_audit, cap))
79223+ cap_raise(cap_audit, cap);
79224+ }
79225+ }
79226+
79227+ if (!cap_raised(cap_drop, cap)) {
79228+ if (cap_raised(cap_audit, cap))
79229+ gr_log_cap(GR_DO_AUDIT, GR_CAP_ACL_MSG2, task, captab_log[cap]);
79230+ return 1;
79231+ }
79232+
79233+ curracl = task->acl;
79234+
79235+ if ((curracl->mode & (GR_LEARN | GR_INHERITLEARN))
79236+ && cap_raised(cred->cap_effective, cap)) {
79237+ security_learn(GR_LEARN_AUDIT_MSG, task->role->rolename,
79238+ task->role->roletype, cred->uid,
79239+ cred->gid, task->exec_file ?
79240+ gr_to_filename(task->exec_file->f_path.dentry,
79241+ task->exec_file->f_path.mnt) : curracl->filename,
79242+ curracl->filename, 0UL,
79243+ 0UL, "", (unsigned long) cap, &task->signal->saved_ip);
79244+ return 1;
79245+ }
79246+
79247+ if ((cap >= 0) && (cap < captab_log_entries) && cap_raised(cred->cap_effective, cap) && !cap_raised(cap_audit, cap))
79248+ gr_log_cap(GR_DONT_AUDIT, GR_CAP_ACL_MSG, task, captab_log[cap]);
79249+ return 0;
79250+}
79251+
79252+int
79253+gr_acl_is_capable_nolog(const int cap)
79254+{
79255+ struct acl_subject_label *curracl;
79256+ kernel_cap_t cap_drop = __cap_empty_set, cap_mask = __cap_empty_set;
79257+
79258+ if (!gr_acl_is_enabled())
79259+ return 1;
79260+
79261+ curracl = current->acl;
79262+
79263+ cap_drop = curracl->cap_lower;
79264+ cap_mask = curracl->cap_mask;
79265+
79266+ while ((curracl = curracl->parent_subject)) {
79267+ /* if the cap isn't specified in the current computed mask but is specified in the
79268+ current level subject, and is lowered in the current level subject, then add
79269+ it to the set of dropped capabilities
79270+ otherwise, add the current level subject's mask to the current computed mask
79271+ */
79272+ if (!cap_raised(cap_mask, cap) && cap_raised(curracl->cap_mask, cap)) {
79273+ cap_raise(cap_mask, cap);
79274+ if (cap_raised(curracl->cap_lower, cap))
79275+ cap_raise(cap_drop, cap);
79276+ }
79277+ }
79278+
79279+ if (!cap_raised(cap_drop, cap))
79280+ return 1;
79281+
79282+ return 0;
79283+}
79284+
79285diff --git a/grsecurity/gracl_fs.c b/grsecurity/gracl_fs.c
79286new file mode 100644
79287index 0000000..523e7e8
79288--- /dev/null
79289+++ b/grsecurity/gracl_fs.c
79290@@ -0,0 +1,435 @@
79291+#include <linux/kernel.h>
79292+#include <linux/sched.h>
79293+#include <linux/types.h>
79294+#include <linux/fs.h>
79295+#include <linux/file.h>
79296+#include <linux/stat.h>
79297+#include <linux/grsecurity.h>
79298+#include <linux/grinternal.h>
79299+#include <linux/gracl.h>
79300+
79301+umode_t
79302+gr_acl_umask(void)
79303+{
79304+ if (unlikely(!gr_acl_is_enabled()))
79305+ return 0;
79306+
79307+ return current->role->umask;
79308+}
79309+
79310+__u32
79311+gr_acl_handle_hidden_file(const struct dentry * dentry,
79312+ const struct vfsmount * mnt)
79313+{
79314+ __u32 mode;
79315+
79316+ if (unlikely(!dentry->d_inode))
79317+ return GR_FIND;
79318+
79319+ mode =
79320+ gr_search_file(dentry, GR_FIND | GR_AUDIT_FIND | GR_SUPPRESS, mnt);
79321+
79322+ if (unlikely(mode & GR_FIND && mode & GR_AUDIT_FIND)) {
79323+ gr_log_fs_rbac_generic(GR_DO_AUDIT, GR_HIDDEN_ACL_MSG, dentry, mnt);
79324+ return mode;
79325+ } else if (unlikely(!(mode & GR_FIND) && !(mode & GR_SUPPRESS))) {
79326+ gr_log_fs_rbac_generic(GR_DONT_AUDIT, GR_HIDDEN_ACL_MSG, dentry, mnt);
79327+ return 0;
79328+ } else if (unlikely(!(mode & GR_FIND)))
79329+ return 0;
79330+
79331+ return GR_FIND;
79332+}
79333+
79334+__u32
79335+gr_acl_handle_open(const struct dentry * dentry, const struct vfsmount * mnt,
79336+ int acc_mode)
79337+{
79338+ __u32 reqmode = GR_FIND;
79339+ __u32 mode;
79340+
79341+ if (unlikely(!dentry->d_inode))
79342+ return reqmode;
79343+
79344+ if (acc_mode & MAY_APPEND)
79345+ reqmode |= GR_APPEND;
79346+ else if (acc_mode & MAY_WRITE)
79347+ reqmode |= GR_WRITE;
79348+ if ((acc_mode & MAY_READ) && !S_ISDIR(dentry->d_inode->i_mode))
79349+ reqmode |= GR_READ;
79350+
79351+ mode =
79352+ gr_search_file(dentry, reqmode | to_gr_audit(reqmode) | GR_SUPPRESS,
79353+ mnt);
79354+
79355+ if (unlikely(((mode & reqmode) == reqmode) && mode & GR_AUDITS)) {
79356+ gr_log_fs_rbac_mode2(GR_DO_AUDIT, GR_OPEN_ACL_MSG, dentry, mnt,
79357+ reqmode & GR_READ ? " reading" : "",
79358+ reqmode & GR_WRITE ? " writing" : reqmode &
79359+ GR_APPEND ? " appending" : "");
79360+ return reqmode;
79361+ } else
79362+ if (unlikely((mode & reqmode) != reqmode && !(mode & GR_SUPPRESS)))
79363+ {
79364+ gr_log_fs_rbac_mode2(GR_DONT_AUDIT, GR_OPEN_ACL_MSG, dentry, mnt,
79365+ reqmode & GR_READ ? " reading" : "",
79366+ reqmode & GR_WRITE ? " writing" : reqmode &
79367+ GR_APPEND ? " appending" : "");
79368+ return 0;
79369+ } else if (unlikely((mode & reqmode) != reqmode))
79370+ return 0;
79371+
79372+ return reqmode;
79373+}
79374+
79375+__u32
79376+gr_acl_handle_creat(const struct dentry * dentry,
79377+ const struct dentry * p_dentry,
79378+ const struct vfsmount * p_mnt, int open_flags, int acc_mode,
79379+ const int imode)
79380+{
79381+ __u32 reqmode = GR_WRITE | GR_CREATE;
79382+ __u32 mode;
79383+
79384+ if (acc_mode & MAY_APPEND)
79385+ reqmode |= GR_APPEND;
79386+ // if a directory was required or the directory already exists, then
79387+ // don't count this open as a read
79388+ if ((acc_mode & MAY_READ) &&
79389+ !((open_flags & O_DIRECTORY) || (dentry->d_inode && S_ISDIR(dentry->d_inode->i_mode))))
79390+ reqmode |= GR_READ;
79391+ if ((open_flags & O_CREAT) && (imode & (S_ISUID | S_ISGID)))
79392+ reqmode |= GR_SETID;
79393+
79394+ mode =
79395+ gr_check_create(dentry, p_dentry, p_mnt,
79396+ reqmode | to_gr_audit(reqmode) | GR_SUPPRESS);
79397+
79398+ if (unlikely(((mode & reqmode) == reqmode) && mode & GR_AUDITS)) {
79399+ gr_log_fs_rbac_mode2(GR_DO_AUDIT, GR_CREATE_ACL_MSG, dentry, p_mnt,
79400+ reqmode & GR_READ ? " reading" : "",
79401+ reqmode & GR_WRITE ? " writing" : reqmode &
79402+ GR_APPEND ? " appending" : "");
79403+ return reqmode;
79404+ } else
79405+ if (unlikely((mode & reqmode) != reqmode && !(mode & GR_SUPPRESS)))
79406+ {
79407+ gr_log_fs_rbac_mode2(GR_DONT_AUDIT, GR_CREATE_ACL_MSG, dentry, p_mnt,
79408+ reqmode & GR_READ ? " reading" : "",
79409+ reqmode & GR_WRITE ? " writing" : reqmode &
79410+ GR_APPEND ? " appending" : "");
79411+ return 0;
79412+ } else if (unlikely((mode & reqmode) != reqmode))
79413+ return 0;
79414+
79415+ return reqmode;
79416+}
79417+
79418+__u32
79419+gr_acl_handle_access(const struct dentry * dentry, const struct vfsmount * mnt,
79420+ const int fmode)
79421+{
79422+ __u32 mode, reqmode = GR_FIND;
79423+
79424+ if ((fmode & S_IXOTH) && !S_ISDIR(dentry->d_inode->i_mode))
79425+ reqmode |= GR_EXEC;
79426+ if (fmode & S_IWOTH)
79427+ reqmode |= GR_WRITE;
79428+ if (fmode & S_IROTH)
79429+ reqmode |= GR_READ;
79430+
79431+ mode =
79432+ gr_search_file(dentry, reqmode | to_gr_audit(reqmode) | GR_SUPPRESS,
79433+ mnt);
79434+
79435+ if (unlikely(((mode & reqmode) == reqmode) && mode & GR_AUDITS)) {
79436+ gr_log_fs_rbac_mode3(GR_DO_AUDIT, GR_ACCESS_ACL_MSG, dentry, mnt,
79437+ reqmode & GR_READ ? " reading" : "",
79438+ reqmode & GR_WRITE ? " writing" : "",
79439+ reqmode & GR_EXEC ? " executing" : "");
79440+ return reqmode;
79441+ } else
79442+ if (unlikely((mode & reqmode) != reqmode && !(mode & GR_SUPPRESS)))
79443+ {
79444+ gr_log_fs_rbac_mode3(GR_DONT_AUDIT, GR_ACCESS_ACL_MSG, dentry, mnt,
79445+ reqmode & GR_READ ? " reading" : "",
79446+ reqmode & GR_WRITE ? " writing" : "",
79447+ reqmode & GR_EXEC ? " executing" : "");
79448+ return 0;
79449+ } else if (unlikely((mode & reqmode) != reqmode))
79450+ return 0;
79451+
79452+ return reqmode;
79453+}
79454+
79455+static __u32 generic_fs_handler(const struct dentry *dentry, const struct vfsmount *mnt, __u32 reqmode, const char *fmt)
79456+{
79457+ __u32 mode;
79458+
79459+ mode = gr_search_file(dentry, reqmode | to_gr_audit(reqmode) | GR_SUPPRESS, mnt);
79460+
79461+ if (unlikely(((mode & (reqmode)) == (reqmode)) && mode & GR_AUDITS)) {
79462+ gr_log_fs_rbac_generic(GR_DO_AUDIT, fmt, dentry, mnt);
79463+ return mode;
79464+ } else if (unlikely((mode & (reqmode)) != (reqmode) && !(mode & GR_SUPPRESS))) {
79465+ gr_log_fs_rbac_generic(GR_DONT_AUDIT, fmt, dentry, mnt);
79466+ return 0;
79467+ } else if (unlikely((mode & (reqmode)) != (reqmode)))
79468+ return 0;
79469+
79470+ return (reqmode);
79471+}
79472+
79473+__u32
79474+gr_acl_handle_rmdir(const struct dentry * dentry, const struct vfsmount * mnt)
79475+{
79476+ return generic_fs_handler(dentry, mnt, GR_WRITE | GR_DELETE , GR_RMDIR_ACL_MSG);
79477+}
79478+
79479+__u32
79480+gr_acl_handle_unlink(const struct dentry *dentry, const struct vfsmount *mnt)
79481+{
79482+ return generic_fs_handler(dentry, mnt, GR_WRITE | GR_DELETE , GR_UNLINK_ACL_MSG);
79483+}
79484+
79485+__u32
79486+gr_acl_handle_truncate(const struct dentry *dentry, const struct vfsmount *mnt)
79487+{
79488+ return generic_fs_handler(dentry, mnt, GR_WRITE, GR_TRUNCATE_ACL_MSG);
79489+}
79490+
79491+__u32
79492+gr_acl_handle_utime(const struct dentry *dentry, const struct vfsmount *mnt)
79493+{
79494+ return generic_fs_handler(dentry, mnt, GR_WRITE, GR_ATIME_ACL_MSG);
79495+}
79496+
79497+__u32
79498+gr_acl_handle_chmod(const struct dentry *dentry, const struct vfsmount *mnt,
79499+ umode_t *modeptr)
79500+{
79501+ mode_t mode;
79502+
79503+ *modeptr &= ~(mode_t)gr_acl_umask();
79504+ mode = *modeptr;
79505+
79506+ if (unlikely(dentry->d_inode && S_ISSOCK(dentry->d_inode->i_mode)))
79507+ return 1;
79508+
79509+ if (unlikely(mode & (S_ISUID | S_ISGID))) {
79510+ return generic_fs_handler(dentry, mnt, GR_WRITE | GR_SETID,
79511+ GR_CHMOD_ACL_MSG);
79512+ } else {
79513+ return generic_fs_handler(dentry, mnt, GR_WRITE, GR_CHMOD_ACL_MSG);
79514+ }
79515+}
79516+
79517+__u32
79518+gr_acl_handle_chown(const struct dentry *dentry, const struct vfsmount *mnt)
79519+{
79520+ return generic_fs_handler(dentry, mnt, GR_WRITE, GR_CHOWN_ACL_MSG);
79521+}
79522+
79523+__u32
79524+gr_acl_handle_setxattr(const struct dentry *dentry, const struct vfsmount *mnt)
79525+{
79526+ return generic_fs_handler(dentry, mnt, GR_WRITE, GR_SETXATTR_ACL_MSG);
79527+}
79528+
79529+__u32
79530+gr_acl_handle_execve(const struct dentry *dentry, const struct vfsmount *mnt)
79531+{
79532+ return generic_fs_handler(dentry, mnt, GR_EXEC, GR_EXEC_ACL_MSG);
79533+}
79534+
79535+__u32
79536+gr_acl_handle_unix(const struct dentry *dentry, const struct vfsmount *mnt)
79537+{
79538+ return generic_fs_handler(dentry, mnt, GR_READ | GR_WRITE,
79539+ GR_UNIXCONNECT_ACL_MSG);
79540+}
79541+
79542+/* hardlinks require at minimum create and link permission,
79543+ any additional privilege required is based on the
79544+ privilege of the file being linked to
79545+*/
79546+__u32
79547+gr_acl_handle_link(const struct dentry * new_dentry,
79548+ const struct dentry * parent_dentry,
79549+ const struct vfsmount * parent_mnt,
79550+ const struct dentry * old_dentry,
79551+ const struct vfsmount * old_mnt, const char *to)
79552+{
79553+ __u32 mode;
79554+ __u32 needmode = GR_CREATE | GR_LINK;
79555+ __u32 needaudit = GR_AUDIT_CREATE | GR_AUDIT_LINK;
79556+
79557+ mode =
79558+ gr_check_link(new_dentry, parent_dentry, parent_mnt, old_dentry,
79559+ old_mnt);
79560+
79561+ if (unlikely(((mode & needmode) == needmode) && (mode & needaudit))) {
79562+ gr_log_fs_rbac_str(GR_DO_AUDIT, GR_LINK_ACL_MSG, old_dentry, old_mnt, to);
79563+ return mode;
79564+ } else if (unlikely(((mode & needmode) != needmode) && !(mode & GR_SUPPRESS))) {
79565+ gr_log_fs_rbac_str(GR_DONT_AUDIT, GR_LINK_ACL_MSG, old_dentry, old_mnt, to);
79566+ return 0;
79567+ } else if (unlikely((mode & needmode) != needmode))
79568+ return 0;
79569+
79570+ return 1;
79571+}
79572+
79573+__u32
79574+gr_acl_handle_symlink(const struct dentry * new_dentry,
79575+ const struct dentry * parent_dentry,
79576+ const struct vfsmount * parent_mnt, const char *from)
79577+{
79578+ __u32 needmode = GR_WRITE | GR_CREATE;
79579+ __u32 mode;
79580+
79581+ mode =
79582+ gr_check_create(new_dentry, parent_dentry, parent_mnt,
79583+ GR_CREATE | GR_AUDIT_CREATE |
79584+ GR_WRITE | GR_AUDIT_WRITE | GR_SUPPRESS);
79585+
79586+ if (unlikely(mode & GR_WRITE && mode & GR_AUDITS)) {
79587+ gr_log_fs_str_rbac(GR_DO_AUDIT, GR_SYMLINK_ACL_MSG, from, new_dentry, parent_mnt);
79588+ return mode;
79589+ } else if (unlikely(((mode & needmode) != needmode) && !(mode & GR_SUPPRESS))) {
79590+ gr_log_fs_str_rbac(GR_DONT_AUDIT, GR_SYMLINK_ACL_MSG, from, new_dentry, parent_mnt);
79591+ return 0;
79592+ } else if (unlikely((mode & needmode) != needmode))
79593+ return 0;
79594+
79595+ return (GR_WRITE | GR_CREATE);
79596+}
79597+
79598+static __u32 generic_fs_create_handler(const struct dentry *new_dentry, const struct dentry *parent_dentry, const struct vfsmount *parent_mnt, __u32 reqmode, const char *fmt)
79599+{
79600+ __u32 mode;
79601+
79602+ mode = gr_check_create(new_dentry, parent_dentry, parent_mnt, reqmode | to_gr_audit(reqmode) | GR_SUPPRESS);
79603+
79604+ if (unlikely(((mode & (reqmode)) == (reqmode)) && mode & GR_AUDITS)) {
79605+ gr_log_fs_rbac_generic(GR_DO_AUDIT, fmt, new_dentry, parent_mnt);
79606+ return mode;
79607+ } else if (unlikely((mode & (reqmode)) != (reqmode) && !(mode & GR_SUPPRESS))) {
79608+ gr_log_fs_rbac_generic(GR_DONT_AUDIT, fmt, new_dentry, parent_mnt);
79609+ return 0;
79610+ } else if (unlikely((mode & (reqmode)) != (reqmode)))
79611+ return 0;
79612+
79613+ return (reqmode);
79614+}
79615+
79616+__u32
79617+gr_acl_handle_mknod(const struct dentry * new_dentry,
79618+ const struct dentry * parent_dentry,
79619+ const struct vfsmount * parent_mnt,
79620+ const int mode)
79621+{
79622+ __u32 reqmode = GR_WRITE | GR_CREATE;
79623+ if (unlikely(mode & (S_ISUID | S_ISGID)))
79624+ reqmode |= GR_SETID;
79625+
79626+ return generic_fs_create_handler(new_dentry, parent_dentry, parent_mnt,
79627+ reqmode, GR_MKNOD_ACL_MSG);
79628+}
79629+
79630+__u32
79631+gr_acl_handle_mkdir(const struct dentry *new_dentry,
79632+ const struct dentry *parent_dentry,
79633+ const struct vfsmount *parent_mnt)
79634+{
79635+ return generic_fs_create_handler(new_dentry, parent_dentry, parent_mnt,
79636+ GR_WRITE | GR_CREATE, GR_MKDIR_ACL_MSG);
79637+}
79638+
79639+#define RENAME_CHECK_SUCCESS(old, new) \
79640+ (((old & (GR_WRITE | GR_READ)) == (GR_WRITE | GR_READ)) && \
79641+ ((new & (GR_WRITE | GR_READ)) == (GR_WRITE | GR_READ)))
79642+
79643+int
79644+gr_acl_handle_rename(struct dentry *new_dentry,
79645+ struct dentry *parent_dentry,
79646+ const struct vfsmount *parent_mnt,
79647+ struct dentry *old_dentry,
79648+ struct inode *old_parent_inode,
79649+ struct vfsmount *old_mnt, const char *newname)
79650+{
79651+ __u32 comp1, comp2;
79652+ int error = 0;
79653+
79654+ if (unlikely(!gr_acl_is_enabled()))
79655+ return 0;
79656+
79657+ if (!new_dentry->d_inode) {
79658+ comp1 = gr_check_create(new_dentry, parent_dentry, parent_mnt,
79659+ GR_READ | GR_WRITE | GR_CREATE | GR_AUDIT_READ |
79660+ GR_AUDIT_WRITE | GR_AUDIT_CREATE | GR_SUPPRESS);
79661+ comp2 = gr_search_file(old_dentry, GR_READ | GR_WRITE |
79662+ GR_DELETE | GR_AUDIT_DELETE |
79663+ GR_AUDIT_READ | GR_AUDIT_WRITE |
79664+ GR_SUPPRESS, old_mnt);
79665+ } else {
79666+ comp1 = gr_search_file(new_dentry, GR_READ | GR_WRITE |
79667+ GR_CREATE | GR_DELETE |
79668+ GR_AUDIT_CREATE | GR_AUDIT_DELETE |
79669+ GR_AUDIT_READ | GR_AUDIT_WRITE |
79670+ GR_SUPPRESS, parent_mnt);
79671+ comp2 =
79672+ gr_search_file(old_dentry,
79673+ GR_READ | GR_WRITE | GR_AUDIT_READ |
79674+ GR_DELETE | GR_AUDIT_DELETE |
79675+ GR_AUDIT_WRITE | GR_SUPPRESS, old_mnt);
79676+ }
79677+
79678+ if (RENAME_CHECK_SUCCESS(comp1, comp2) &&
79679+ ((comp1 & GR_AUDITS) || (comp2 & GR_AUDITS)))
79680+ gr_log_fs_rbac_str(GR_DO_AUDIT, GR_RENAME_ACL_MSG, old_dentry, old_mnt, newname);
79681+ else if (!RENAME_CHECK_SUCCESS(comp1, comp2) && !(comp1 & GR_SUPPRESS)
79682+ && !(comp2 & GR_SUPPRESS)) {
79683+ gr_log_fs_rbac_str(GR_DONT_AUDIT, GR_RENAME_ACL_MSG, old_dentry, old_mnt, newname);
79684+ error = -EACCES;
79685+ } else if (unlikely(!RENAME_CHECK_SUCCESS(comp1, comp2)))
79686+ error = -EACCES;
79687+
79688+ return error;
79689+}
79690+
79691+void
79692+gr_acl_handle_exit(void)
79693+{
79694+ u16 id;
79695+ char *rolename;
79696+ struct file *exec_file;
79697+
79698+ if (unlikely(current->acl_sp_role && gr_acl_is_enabled() &&
79699+ !(current->role->roletype & GR_ROLE_PERSIST))) {
79700+ id = current->acl_role_id;
79701+ rolename = current->role->rolename;
79702+ gr_set_acls(1);
79703+ gr_log_str_int(GR_DONT_AUDIT_GOOD, GR_SPROLEL_ACL_MSG, rolename, id);
79704+ }
79705+
79706+ write_lock(&grsec_exec_file_lock);
79707+ exec_file = current->exec_file;
79708+ current->exec_file = NULL;
79709+ write_unlock(&grsec_exec_file_lock);
79710+
79711+ if (exec_file)
79712+ fput(exec_file);
79713+}
79714+
79715+int
79716+gr_acl_handle_procpidmem(const struct task_struct *task)
79717+{
79718+ if (unlikely(!gr_acl_is_enabled()))
79719+ return 0;
79720+
79721+ if (task != current && task->acl->mode & GR_PROTPROCFD)
79722+ return -EACCES;
79723+
79724+ return 0;
79725+}
79726diff --git a/grsecurity/gracl_ip.c b/grsecurity/gracl_ip.c
79727new file mode 100644
79728index 0000000..cd07b96
79729--- /dev/null
79730+++ b/grsecurity/gracl_ip.c
79731@@ -0,0 +1,382 @@
79732+#include <linux/kernel.h>
79733+#include <asm/uaccess.h>
79734+#include <asm/errno.h>
79735+#include <net/sock.h>
79736+#include <linux/file.h>
79737+#include <linux/fs.h>
79738+#include <linux/net.h>
79739+#include <linux/in.h>
79740+#include <linux/skbuff.h>
79741+#include <linux/ip.h>
79742+#include <linux/udp.h>
79743+#include <linux/smp_lock.h>
79744+#include <linux/types.h>
79745+#include <linux/sched.h>
79746+#include <linux/netdevice.h>
79747+#include <linux/inetdevice.h>
79748+#include <linux/gracl.h>
79749+#include <linux/grsecurity.h>
79750+#include <linux/grinternal.h>
79751+
79752+#define GR_BIND 0x01
79753+#define GR_CONNECT 0x02
79754+#define GR_INVERT 0x04
79755+#define GR_BINDOVERRIDE 0x08
79756+#define GR_CONNECTOVERRIDE 0x10
79757+#define GR_SOCK_FAMILY 0x20
79758+
79759+static const char * gr_protocols[IPPROTO_MAX] = {
79760+ "ip", "icmp", "igmp", "ggp", "ipencap", "st", "tcp", "cbt",
79761+ "egp", "igp", "bbn-rcc", "nvp", "pup", "argus", "emcon", "xnet",
79762+ "chaos", "udp", "mux", "dcn", "hmp", "prm", "xns-idp", "trunk-1",
79763+ "trunk-2", "leaf-1", "leaf-2", "rdp", "irtp", "iso-tp4", "netblt", "mfe-nsp",
79764+ "merit-inp", "sep", "3pc", "idpr", "xtp", "ddp", "idpr-cmtp", "tp++",
79765+ "il", "ipv6", "sdrp", "ipv6-route", "ipv6-frag", "idrp", "rsvp", "gre",
79766+ "mhrp", "bna", "ipv6-crypt", "ipv6-auth", "i-nlsp", "swipe", "narp", "mobile",
79767+ "tlsp", "skip", "ipv6-icmp", "ipv6-nonxt", "ipv6-opts", "unknown:61", "cftp", "unknown:63",
79768+ "sat-expak", "kryptolan", "rvd", "ippc", "unknown:68", "sat-mon", "visa", "ipcv",
79769+ "cpnx", "cphb", "wsn", "pvp", "br-sat-mon", "sun-nd", "wb-mon", "wb-expak",
79770+ "iso-ip", "vmtp", "secure-vmtp", "vines", "ttp", "nfsnet-igp", "dgp", "tcf",
79771+ "eigrp", "ospf", "sprite-rpc", "larp", "mtp", "ax.25", "ipip", "micp",
79772+ "scc-sp", "etherip", "encap", "unknown:99", "gmtp", "ifmp", "pnni", "pim",
79773+ "aris", "scps", "qnx", "a/n", "ipcomp", "snp", "compaq-peer", "ipx-in-ip",
79774+ "vrrp", "pgm", "unknown:114", "l2tp", "ddx", "iatp", "stp", "srp",
79775+ "uti", "smp", "sm", "ptp", "isis", "fire", "crtp", "crdup",
79776+ "sscopmce", "iplt", "sps", "pipe", "sctp", "fc", "unkown:134", "unknown:135",
79777+ "unknown:136", "unknown:137", "unknown:138", "unknown:139", "unknown:140", "unknown:141", "unknown:142", "unknown:143",
79778+ "unknown:144", "unknown:145", "unknown:146", "unknown:147", "unknown:148", "unknown:149", "unknown:150", "unknown:151",
79779+ "unknown:152", "unknown:153", "unknown:154", "unknown:155", "unknown:156", "unknown:157", "unknown:158", "unknown:159",
79780+ "unknown:160", "unknown:161", "unknown:162", "unknown:163", "unknown:164", "unknown:165", "unknown:166", "unknown:167",
79781+ "unknown:168", "unknown:169", "unknown:170", "unknown:171", "unknown:172", "unknown:173", "unknown:174", "unknown:175",
79782+ "unknown:176", "unknown:177", "unknown:178", "unknown:179", "unknown:180", "unknown:181", "unknown:182", "unknown:183",
79783+ "unknown:184", "unknown:185", "unknown:186", "unknown:187", "unknown:188", "unknown:189", "unknown:190", "unknown:191",
79784+ "unknown:192", "unknown:193", "unknown:194", "unknown:195", "unknown:196", "unknown:197", "unknown:198", "unknown:199",
79785+ "unknown:200", "unknown:201", "unknown:202", "unknown:203", "unknown:204", "unknown:205", "unknown:206", "unknown:207",
79786+ "unknown:208", "unknown:209", "unknown:210", "unknown:211", "unknown:212", "unknown:213", "unknown:214", "unknown:215",
79787+ "unknown:216", "unknown:217", "unknown:218", "unknown:219", "unknown:220", "unknown:221", "unknown:222", "unknown:223",
79788+ "unknown:224", "unknown:225", "unknown:226", "unknown:227", "unknown:228", "unknown:229", "unknown:230", "unknown:231",
79789+ "unknown:232", "unknown:233", "unknown:234", "unknown:235", "unknown:236", "unknown:237", "unknown:238", "unknown:239",
79790+ "unknown:240", "unknown:241", "unknown:242", "unknown:243", "unknown:244", "unknown:245", "unknown:246", "unknown:247",
79791+ "unknown:248", "unknown:249", "unknown:250", "unknown:251", "unknown:252", "unknown:253", "unknown:254", "unknown:255",
79792+ };
79793+
79794+static const char * gr_socktypes[SOCK_MAX] = {
79795+ "unknown:0", "stream", "dgram", "raw", "rdm", "seqpacket", "unknown:6",
79796+ "unknown:7", "unknown:8", "unknown:9", "packet"
79797+ };
79798+
79799+static const char * gr_sockfamilies[AF_MAX+1] = {
79800+ "unspec", "unix", "inet", "ax25", "ipx", "appletalk", "netrom", "bridge", "atmpvc", "x25",
79801+ "inet6", "rose", "decnet", "netbeui", "security", "key", "netlink", "packet", "ash",
79802+ "econet", "atmsvc", "rds", "sna", "irda", "ppox", "wanpipe", "llc", "fam_27", "fam_28",
79803+ "tipc", "bluetooth", "iucv", "rxrpc", "isdn", "phonet", "ieee802154"
79804+ };
79805+
79806+const char *
79807+gr_proto_to_name(unsigned char proto)
79808+{
79809+ return gr_protocols[proto];
79810+}
79811+
79812+const char *
79813+gr_socktype_to_name(unsigned char type)
79814+{
79815+ return gr_socktypes[type];
79816+}
79817+
79818+const char *
79819+gr_sockfamily_to_name(unsigned char family)
79820+{
79821+ return gr_sockfamilies[family];
79822+}
79823+
79824+int
79825+gr_search_socket(const int domain, const int type, const int protocol)
79826+{
79827+ struct acl_subject_label *curr;
79828+ const struct cred *cred = current_cred();
79829+
79830+ if (unlikely(!gr_acl_is_enabled()))
79831+ goto exit;
79832+
79833+ if ((domain < 0) || (type < 0) || (protocol < 0) ||
79834+ (domain >= AF_MAX) || (type >= SOCK_MAX) || (protocol >= IPPROTO_MAX))
79835+ goto exit; // let the kernel handle it
79836+
79837+ curr = current->acl;
79838+
79839+ if (curr->sock_families[domain / 32] & (1 << (domain % 32))) {
79840+ /* the family is allowed, if this is PF_INET allow it only if
79841+ the extra sock type/protocol checks pass */
79842+ if (domain == PF_INET)
79843+ goto inet_check;
79844+ goto exit;
79845+ } else {
79846+ if (curr->mode & (GR_LEARN | GR_INHERITLEARN)) {
79847+ __u32 fakeip = 0;
79848+ security_learn(GR_IP_LEARN_MSG, current->role->rolename,
79849+ current->role->roletype, cred->uid,
79850+ cred->gid, current->exec_file ?
79851+ gr_to_filename(current->exec_file->f_path.dentry,
79852+ current->exec_file->f_path.mnt) :
79853+ curr->filename, curr->filename,
79854+ &fakeip, domain, 0, 0, GR_SOCK_FAMILY,
79855+ &current->signal->saved_ip);
79856+ goto exit;
79857+ }
79858+ goto exit_fail;
79859+ }
79860+
79861+inet_check:
79862+ /* the rest of this checking is for IPv4 only */
79863+ if (!curr->ips)
79864+ goto exit;
79865+
79866+ if ((curr->ip_type & (1 << type)) &&
79867+ (curr->ip_proto[protocol / 32] & (1 << (protocol % 32))))
79868+ goto exit;
79869+
79870+ if (curr->mode & (GR_LEARN | GR_INHERITLEARN)) {
79871+ /* we don't place acls on raw sockets , and sometimes
79872+ dgram/ip sockets are opened for ioctl and not
79873+ bind/connect, so we'll fake a bind learn log */
79874+ if (type == SOCK_RAW || type == SOCK_PACKET) {
79875+ __u32 fakeip = 0;
79876+ security_learn(GR_IP_LEARN_MSG, current->role->rolename,
79877+ current->role->roletype, cred->uid,
79878+ cred->gid, current->exec_file ?
79879+ gr_to_filename(current->exec_file->f_path.dentry,
79880+ current->exec_file->f_path.mnt) :
79881+ curr->filename, curr->filename,
79882+ &fakeip, 0, type,
79883+ protocol, GR_CONNECT, &current->signal->saved_ip);
79884+ } else if ((type == SOCK_DGRAM) && (protocol == IPPROTO_IP)) {
79885+ __u32 fakeip = 0;
79886+ security_learn(GR_IP_LEARN_MSG, current->role->rolename,
79887+ current->role->roletype, cred->uid,
79888+ cred->gid, current->exec_file ?
79889+ gr_to_filename(current->exec_file->f_path.dentry,
79890+ current->exec_file->f_path.mnt) :
79891+ curr->filename, curr->filename,
79892+ &fakeip, 0, type,
79893+ protocol, GR_BIND, &current->signal->saved_ip);
79894+ }
79895+ /* we'll log when they use connect or bind */
79896+ goto exit;
79897+ }
79898+
79899+exit_fail:
79900+ if (domain == PF_INET)
79901+ gr_log_str3(GR_DONT_AUDIT, GR_SOCK_MSG, gr_sockfamily_to_name(domain),
79902+ gr_socktype_to_name(type), gr_proto_to_name(protocol));
79903+ else
79904+ gr_log_str2_int(GR_DONT_AUDIT, GR_SOCK_NOINET_MSG, gr_sockfamily_to_name(domain),
79905+ gr_socktype_to_name(type), protocol);
79906+
79907+ return 0;
79908+exit:
79909+ return 1;
79910+}
79911+
79912+int check_ip_policy(struct acl_ip_label *ip, __u32 ip_addr, __u16 ip_port, __u8 protocol, const int mode, const int type, __u32 our_addr, __u32 our_netmask)
79913+{
79914+ if ((ip->mode & mode) &&
79915+ (ip_port >= ip->low) &&
79916+ (ip_port <= ip->high) &&
79917+ ((ntohl(ip_addr) & our_netmask) ==
79918+ (ntohl(our_addr) & our_netmask))
79919+ && (ip->proto[protocol / 32] & (1 << (protocol % 32)))
79920+ && (ip->type & (1 << type))) {
79921+ if (ip->mode & GR_INVERT)
79922+ return 2; // specifically denied
79923+ else
79924+ return 1; // allowed
79925+ }
79926+
79927+ return 0; // not specifically allowed, may continue parsing
79928+}
79929+
79930+static int
79931+gr_search_connectbind(const int full_mode, struct sock *sk,
79932+ struct sockaddr_in *addr, const int type)
79933+{
79934+ char iface[IFNAMSIZ] = {0};
79935+ struct acl_subject_label *curr;
79936+ struct acl_ip_label *ip;
79937+ struct inet_sock *isk;
79938+ struct net_device *dev;
79939+ struct in_device *idev;
79940+ unsigned long i;
79941+ int ret;
79942+ int mode = full_mode & (GR_BIND | GR_CONNECT);
79943+ __u32 ip_addr = 0;
79944+ __u32 our_addr;
79945+ __u32 our_netmask;
79946+ char *p;
79947+ __u16 ip_port = 0;
79948+ const struct cred *cred = current_cred();
79949+
79950+ if (unlikely(!gr_acl_is_enabled() || sk->sk_family != PF_INET))
79951+ return 0;
79952+
79953+ curr = current->acl;
79954+ isk = inet_sk(sk);
79955+
79956+ /* INADDR_ANY overriding for binds, inaddr_any_override is already in network order */
79957+ if ((full_mode & GR_BINDOVERRIDE) && addr->sin_addr.s_addr == htonl(INADDR_ANY) && curr->inaddr_any_override != 0)
79958+ addr->sin_addr.s_addr = curr->inaddr_any_override;
79959+ if ((full_mode & GR_CONNECT) && isk->saddr == htonl(INADDR_ANY) && curr->inaddr_any_override != 0) {
79960+ struct sockaddr_in saddr;
79961+ int err;
79962+
79963+ saddr.sin_family = AF_INET;
79964+ saddr.sin_addr.s_addr = curr->inaddr_any_override;
79965+ saddr.sin_port = isk->sport;
79966+
79967+ err = security_socket_bind(sk->sk_socket, (struct sockaddr *)&saddr, sizeof(struct sockaddr_in));
79968+ if (err)
79969+ return err;
79970+
79971+ err = sk->sk_socket->ops->bind(sk->sk_socket, (struct sockaddr *)&saddr, sizeof(struct sockaddr_in));
79972+ if (err)
79973+ return err;
79974+ }
79975+
79976+ if (!curr->ips)
79977+ return 0;
79978+
79979+ ip_addr = addr->sin_addr.s_addr;
79980+ ip_port = ntohs(addr->sin_port);
79981+
79982+ if (curr->mode & (GR_LEARN | GR_INHERITLEARN)) {
79983+ security_learn(GR_IP_LEARN_MSG, current->role->rolename,
79984+ current->role->roletype, cred->uid,
79985+ cred->gid, current->exec_file ?
79986+ gr_to_filename(current->exec_file->f_path.dentry,
79987+ current->exec_file->f_path.mnt) :
79988+ curr->filename, curr->filename,
79989+ &ip_addr, ip_port, type,
79990+ sk->sk_protocol, mode, &current->signal->saved_ip);
79991+ return 0;
79992+ }
79993+
79994+ for (i = 0; i < curr->ip_num; i++) {
79995+ ip = *(curr->ips + i);
79996+ if (ip->iface != NULL) {
79997+ strncpy(iface, ip->iface, IFNAMSIZ - 1);
79998+ p = strchr(iface, ':');
79999+ if (p != NULL)
80000+ *p = '\0';
80001+ dev = dev_get_by_name(sock_net(sk), iface);
80002+ if (dev == NULL)
80003+ continue;
80004+ idev = in_dev_get(dev);
80005+ if (idev == NULL) {
80006+ dev_put(dev);
80007+ continue;
80008+ }
80009+ rcu_read_lock();
80010+ for_ifa(idev) {
80011+ if (!strcmp(ip->iface, ifa->ifa_label)) {
80012+ our_addr = ifa->ifa_address;
80013+ our_netmask = 0xffffffff;
80014+ ret = check_ip_policy(ip, ip_addr, ip_port, sk->sk_protocol, mode, type, our_addr, our_netmask);
80015+ if (ret == 1) {
80016+ rcu_read_unlock();
80017+ in_dev_put(idev);
80018+ dev_put(dev);
80019+ return 0;
80020+ } else if (ret == 2) {
80021+ rcu_read_unlock();
80022+ in_dev_put(idev);
80023+ dev_put(dev);
80024+ goto denied;
80025+ }
80026+ }
80027+ } endfor_ifa(idev);
80028+ rcu_read_unlock();
80029+ in_dev_put(idev);
80030+ dev_put(dev);
80031+ } else {
80032+ our_addr = ip->addr;
80033+ our_netmask = ip->netmask;
80034+ ret = check_ip_policy(ip, ip_addr, ip_port, sk->sk_protocol, mode, type, our_addr, our_netmask);
80035+ if (ret == 1)
80036+ return 0;
80037+ else if (ret == 2)
80038+ goto denied;
80039+ }
80040+ }
80041+
80042+denied:
80043+ if (mode == GR_BIND)
80044+ gr_log_int5_str2(GR_DONT_AUDIT, GR_BIND_ACL_MSG, &ip_addr, ip_port, gr_socktype_to_name(type), gr_proto_to_name(sk->sk_protocol));
80045+ else if (mode == GR_CONNECT)
80046+ gr_log_int5_str2(GR_DONT_AUDIT, GR_CONNECT_ACL_MSG, &ip_addr, ip_port, gr_socktype_to_name(type), gr_proto_to_name(sk->sk_protocol));
80047+
80048+ return -EACCES;
80049+}
80050+
80051+int
80052+gr_search_connect(struct socket *sock, struct sockaddr_in *addr)
80053+{
80054+ return gr_search_connectbind(GR_CONNECT | GR_CONNECTOVERRIDE, sock->sk, addr, sock->type);
80055+}
80056+
80057+int
80058+gr_search_bind(struct socket *sock, struct sockaddr_in *addr)
80059+{
80060+ return gr_search_connectbind(GR_BIND | GR_BINDOVERRIDE, sock->sk, addr, sock->type);
80061+}
80062+
80063+int gr_search_listen(struct socket *sock)
80064+{
80065+ struct sock *sk = sock->sk;
80066+ struct sockaddr_in addr;
80067+
80068+ addr.sin_addr.s_addr = inet_sk(sk)->saddr;
80069+ addr.sin_port = inet_sk(sk)->sport;
80070+
80071+ return gr_search_connectbind(GR_BIND | GR_CONNECTOVERRIDE, sock->sk, &addr, sock->type);
80072+}
80073+
80074+int gr_search_accept(struct socket *sock)
80075+{
80076+ struct sock *sk = sock->sk;
80077+ struct sockaddr_in addr;
80078+
80079+ addr.sin_addr.s_addr = inet_sk(sk)->saddr;
80080+ addr.sin_port = inet_sk(sk)->sport;
80081+
80082+ return gr_search_connectbind(GR_BIND | GR_CONNECTOVERRIDE, sock->sk, &addr, sock->type);
80083+}
80084+
80085+int
80086+gr_search_udp_sendmsg(struct sock *sk, struct sockaddr_in *addr)
80087+{
80088+ if (addr)
80089+ return gr_search_connectbind(GR_CONNECT, sk, addr, SOCK_DGRAM);
80090+ else {
80091+ struct sockaddr_in sin;
80092+ const struct inet_sock *inet = inet_sk(sk);
80093+
80094+ sin.sin_addr.s_addr = inet->daddr;
80095+ sin.sin_port = inet->dport;
80096+
80097+ return gr_search_connectbind(GR_CONNECT | GR_CONNECTOVERRIDE, sk, &sin, SOCK_DGRAM);
80098+ }
80099+}
80100+
80101+int
80102+gr_search_udp_recvmsg(struct sock *sk, const struct sk_buff *skb)
80103+{
80104+ struct sockaddr_in sin;
80105+
80106+ if (unlikely(skb->len < sizeof (struct udphdr)))
80107+ return 0; // skip this packet
80108+
80109+ sin.sin_addr.s_addr = ip_hdr(skb)->saddr;
80110+ sin.sin_port = udp_hdr(skb)->source;
80111+
80112+ return gr_search_connectbind(GR_CONNECT | GR_CONNECTOVERRIDE, sk, &sin, SOCK_DGRAM);
80113+}
80114diff --git a/grsecurity/gracl_learn.c b/grsecurity/gracl_learn.c
80115new file mode 100644
80116index 0000000..34bdd46
80117--- /dev/null
80118+++ b/grsecurity/gracl_learn.c
80119@@ -0,0 +1,208 @@
80120+#include <linux/kernel.h>
80121+#include <linux/mm.h>
80122+#include <linux/sched.h>
80123+#include <linux/poll.h>
80124+#include <linux/smp_lock.h>
80125+#include <linux/string.h>
80126+#include <linux/file.h>
80127+#include <linux/types.h>
80128+#include <linux/vmalloc.h>
80129+#include <linux/grinternal.h>
80130+
80131+extern ssize_t write_grsec_handler(struct file * file, const char __user * buf,
80132+ size_t count, loff_t *ppos);
80133+extern int gr_acl_is_enabled(void);
80134+
80135+static DECLARE_WAIT_QUEUE_HEAD(learn_wait);
80136+static int gr_learn_attached;
80137+
80138+/* use a 512k buffer */
80139+#define LEARN_BUFFER_SIZE (512 * 1024)
80140+
80141+static DEFINE_SPINLOCK(gr_learn_lock);
80142+static DEFINE_MUTEX(gr_learn_user_mutex);
80143+
80144+/* we need to maintain two buffers, so that the kernel context of grlearn
80145+ uses a semaphore around the userspace copying, and the other kernel contexts
80146+ use a spinlock when copying into the buffer, since they cannot sleep
80147+*/
80148+static char *learn_buffer;
80149+static char *learn_buffer_user;
80150+static int learn_buffer_len;
80151+static int learn_buffer_user_len;
80152+
80153+static ssize_t
80154+read_learn(struct file *file, char __user * buf, size_t count, loff_t * ppos)
80155+{
80156+ DECLARE_WAITQUEUE(wait, current);
80157+ ssize_t retval = 0;
80158+
80159+ add_wait_queue(&learn_wait, &wait);
80160+ set_current_state(TASK_INTERRUPTIBLE);
80161+ do {
80162+ mutex_lock(&gr_learn_user_mutex);
80163+ spin_lock(&gr_learn_lock);
80164+ if (learn_buffer_len)
80165+ break;
80166+ spin_unlock(&gr_learn_lock);
80167+ mutex_unlock(&gr_learn_user_mutex);
80168+ if (file->f_flags & O_NONBLOCK) {
80169+ retval = -EAGAIN;
80170+ goto out;
80171+ }
80172+ if (signal_pending(current)) {
80173+ retval = -ERESTARTSYS;
80174+ goto out;
80175+ }
80176+
80177+ schedule();
80178+ } while (1);
80179+
80180+ memcpy(learn_buffer_user, learn_buffer, learn_buffer_len);
80181+ learn_buffer_user_len = learn_buffer_len;
80182+ retval = learn_buffer_len;
80183+ learn_buffer_len = 0;
80184+
80185+ spin_unlock(&gr_learn_lock);
80186+
80187+ if (copy_to_user(buf, learn_buffer_user, learn_buffer_user_len))
80188+ retval = -EFAULT;
80189+
80190+ mutex_unlock(&gr_learn_user_mutex);
80191+out:
80192+ set_current_state(TASK_RUNNING);
80193+ remove_wait_queue(&learn_wait, &wait);
80194+ return retval;
80195+}
80196+
80197+static unsigned int
80198+poll_learn(struct file * file, poll_table * wait)
80199+{
80200+ poll_wait(file, &learn_wait, wait);
80201+
80202+ if (learn_buffer_len)
80203+ return (POLLIN | POLLRDNORM);
80204+
80205+ return 0;
80206+}
80207+
80208+void
80209+gr_clear_learn_entries(void)
80210+{
80211+ char *tmp;
80212+
80213+ mutex_lock(&gr_learn_user_mutex);
80214+ spin_lock(&gr_learn_lock);
80215+ tmp = learn_buffer;
80216+ learn_buffer = NULL;
80217+ spin_unlock(&gr_learn_lock);
80218+ if (tmp)
80219+ vfree(tmp);
80220+ if (learn_buffer_user != NULL) {
80221+ vfree(learn_buffer_user);
80222+ learn_buffer_user = NULL;
80223+ }
80224+ learn_buffer_len = 0;
80225+ mutex_unlock(&gr_learn_user_mutex);
80226+
80227+ return;
80228+}
80229+
80230+void
80231+gr_add_learn_entry(const char *fmt, ...)
80232+{
80233+ va_list args;
80234+ unsigned int len;
80235+
80236+ if (!gr_learn_attached)
80237+ return;
80238+
80239+ spin_lock(&gr_learn_lock);
80240+
80241+ /* leave a gap at the end so we know when it's "full" but don't have to
80242+ compute the exact length of the string we're trying to append
80243+ */
80244+ if (learn_buffer_len > LEARN_BUFFER_SIZE - 16384) {
80245+ spin_unlock(&gr_learn_lock);
80246+ wake_up_interruptible(&learn_wait);
80247+ return;
80248+ }
80249+ if (learn_buffer == NULL) {
80250+ spin_unlock(&gr_learn_lock);
80251+ return;
80252+ }
80253+
80254+ va_start(args, fmt);
80255+ len = vsnprintf(learn_buffer + learn_buffer_len, LEARN_BUFFER_SIZE - learn_buffer_len, fmt, args);
80256+ va_end(args);
80257+
80258+ learn_buffer_len += len + 1;
80259+
80260+ spin_unlock(&gr_learn_lock);
80261+ wake_up_interruptible(&learn_wait);
80262+
80263+ return;
80264+}
80265+
80266+static int
80267+open_learn(struct inode *inode, struct file *file)
80268+{
80269+ if (file->f_mode & FMODE_READ && gr_learn_attached)
80270+ return -EBUSY;
80271+ if (file->f_mode & FMODE_READ) {
80272+ int retval = 0;
80273+ mutex_lock(&gr_learn_user_mutex);
80274+ if (learn_buffer == NULL)
80275+ learn_buffer = vmalloc(LEARN_BUFFER_SIZE);
80276+ if (learn_buffer_user == NULL)
80277+ learn_buffer_user = vmalloc(LEARN_BUFFER_SIZE);
80278+ if (learn_buffer == NULL) {
80279+ retval = -ENOMEM;
80280+ goto out_error;
80281+ }
80282+ if (learn_buffer_user == NULL) {
80283+ retval = -ENOMEM;
80284+ goto out_error;
80285+ }
80286+ learn_buffer_len = 0;
80287+ learn_buffer_user_len = 0;
80288+ gr_learn_attached = 1;
80289+out_error:
80290+ mutex_unlock(&gr_learn_user_mutex);
80291+ return retval;
80292+ }
80293+ return 0;
80294+}
80295+
80296+static int
80297+close_learn(struct inode *inode, struct file *file)
80298+{
80299+ if (file->f_mode & FMODE_READ) {
80300+ char *tmp = NULL;
80301+ mutex_lock(&gr_learn_user_mutex);
80302+ spin_lock(&gr_learn_lock);
80303+ tmp = learn_buffer;
80304+ learn_buffer = NULL;
80305+ spin_unlock(&gr_learn_lock);
80306+ if (tmp)
80307+ vfree(tmp);
80308+ if (learn_buffer_user != NULL) {
80309+ vfree(learn_buffer_user);
80310+ learn_buffer_user = NULL;
80311+ }
80312+ learn_buffer_len = 0;
80313+ learn_buffer_user_len = 0;
80314+ gr_learn_attached = 0;
80315+ mutex_unlock(&gr_learn_user_mutex);
80316+ }
80317+
80318+ return 0;
80319+}
80320+
80321+const struct file_operations grsec_fops = {
80322+ .read = read_learn,
80323+ .write = write_grsec_handler,
80324+ .open = open_learn,
80325+ .release = close_learn,
80326+ .poll = poll_learn,
80327+};
80328diff --git a/grsecurity/gracl_res.c b/grsecurity/gracl_res.c
80329new file mode 100644
80330index 0000000..70b2179
80331--- /dev/null
80332+++ b/grsecurity/gracl_res.c
80333@@ -0,0 +1,67 @@
80334+#include <linux/kernel.h>
80335+#include <linux/sched.h>
80336+#include <linux/gracl.h>
80337+#include <linux/grinternal.h>
80338+
80339+static const char *restab_log[] = {
80340+ [RLIMIT_CPU] = "RLIMIT_CPU",
80341+ [RLIMIT_FSIZE] = "RLIMIT_FSIZE",
80342+ [RLIMIT_DATA] = "RLIMIT_DATA",
80343+ [RLIMIT_STACK] = "RLIMIT_STACK",
80344+ [RLIMIT_CORE] = "RLIMIT_CORE",
80345+ [RLIMIT_RSS] = "RLIMIT_RSS",
80346+ [RLIMIT_NPROC] = "RLIMIT_NPROC",
80347+ [RLIMIT_NOFILE] = "RLIMIT_NOFILE",
80348+ [RLIMIT_MEMLOCK] = "RLIMIT_MEMLOCK",
80349+ [RLIMIT_AS] = "RLIMIT_AS",
80350+ [RLIMIT_LOCKS] = "RLIMIT_LOCKS",
80351+ [RLIMIT_SIGPENDING] = "RLIMIT_SIGPENDING",
80352+ [RLIMIT_MSGQUEUE] = "RLIMIT_MSGQUEUE",
80353+ [RLIMIT_NICE] = "RLIMIT_NICE",
80354+ [RLIMIT_RTPRIO] = "RLIMIT_RTPRIO",
80355+ [RLIMIT_RTTIME] = "RLIMIT_RTTIME",
80356+ [GR_CRASH_RES] = "RLIMIT_CRASH"
80357+};
80358+
80359+void
80360+gr_log_resource(const struct task_struct *task,
80361+ const int res, const unsigned long wanted, const int gt)
80362+{
80363+ const struct cred *cred;
80364+ unsigned long rlim;
80365+
80366+ if (!gr_acl_is_enabled() && !grsec_resource_logging)
80367+ return;
80368+
80369+ // not yet supported resource
80370+ if (unlikely(!restab_log[res]))
80371+ return;
80372+
80373+ if (res == RLIMIT_CPU || res == RLIMIT_RTTIME)
80374+ rlim = task->signal->rlim[res].rlim_max;
80375+ else
80376+ rlim = task->signal->rlim[res].rlim_cur;
80377+ if (likely((rlim == RLIM_INFINITY) || (gt && wanted <= rlim) || (!gt && wanted < rlim)))
80378+ return;
80379+
80380+ rcu_read_lock();
80381+ cred = __task_cred(task);
80382+
80383+ if (res == RLIMIT_NPROC &&
80384+ (cap_raised(cred->cap_effective, CAP_SYS_ADMIN) ||
80385+ cap_raised(cred->cap_effective, CAP_SYS_RESOURCE)))
80386+ goto out_rcu_unlock;
80387+ else if (res == RLIMIT_MEMLOCK &&
80388+ cap_raised(cred->cap_effective, CAP_IPC_LOCK))
80389+ goto out_rcu_unlock;
80390+ else if (res == RLIMIT_NICE && cap_raised(cred->cap_effective, CAP_SYS_NICE))
80391+ goto out_rcu_unlock;
80392+ rcu_read_unlock();
80393+
80394+ gr_log_res_ulong2_str(GR_DONT_AUDIT, GR_RESOURCE_MSG, task, wanted, restab_log[res], rlim);
80395+
80396+ return;
80397+out_rcu_unlock:
80398+ rcu_read_unlock();
80399+ return;
80400+}
80401diff --git a/grsecurity/gracl_segv.c b/grsecurity/gracl_segv.c
80402new file mode 100644
80403index 0000000..1d1b734
80404--- /dev/null
80405+++ b/grsecurity/gracl_segv.c
80406@@ -0,0 +1,284 @@
80407+#include <linux/kernel.h>
80408+#include <linux/mm.h>
80409+#include <asm/uaccess.h>
80410+#include <asm/errno.h>
80411+#include <asm/mman.h>
80412+#include <net/sock.h>
80413+#include <linux/file.h>
80414+#include <linux/fs.h>
80415+#include <linux/net.h>
80416+#include <linux/in.h>
80417+#include <linux/smp_lock.h>
80418+#include <linux/slab.h>
80419+#include <linux/types.h>
80420+#include <linux/sched.h>
80421+#include <linux/timer.h>
80422+#include <linux/gracl.h>
80423+#include <linux/grsecurity.h>
80424+#include <linux/grinternal.h>
80425+
80426+static struct crash_uid *uid_set;
80427+static unsigned short uid_used;
80428+static DEFINE_SPINLOCK(gr_uid_lock);
80429+extern rwlock_t gr_inode_lock;
80430+extern struct acl_subject_label *
80431+ lookup_acl_subj_label(const ino_t inode, const dev_t dev,
80432+ struct acl_role_label *role);
80433+extern int gr_fake_force_sig(int sig, struct task_struct *t);
80434+
80435+int
80436+gr_init_uidset(void)
80437+{
80438+ uid_set =
80439+ kmalloc(GR_UIDTABLE_MAX * sizeof (struct crash_uid), GFP_KERNEL);
80440+ uid_used = 0;
80441+
80442+ return uid_set ? 1 : 0;
80443+}
80444+
80445+void
80446+gr_free_uidset(void)
80447+{
80448+ if (uid_set)
80449+ kfree(uid_set);
80450+
80451+ return;
80452+}
80453+
80454+int
80455+gr_find_uid(const uid_t uid)
80456+{
80457+ struct crash_uid *tmp = uid_set;
80458+ uid_t buid;
80459+ int low = 0, high = uid_used - 1, mid;
80460+
80461+ while (high >= low) {
80462+ mid = (low + high) >> 1;
80463+ buid = tmp[mid].uid;
80464+ if (buid == uid)
80465+ return mid;
80466+ if (buid > uid)
80467+ high = mid - 1;
80468+ if (buid < uid)
80469+ low = mid + 1;
80470+ }
80471+
80472+ return -1;
80473+}
80474+
80475+static __inline__ void
80476+gr_insertsort(void)
80477+{
80478+ unsigned short i, j;
80479+ struct crash_uid index;
80480+
80481+ for (i = 1; i < uid_used; i++) {
80482+ index = uid_set[i];
80483+ j = i;
80484+ while ((j > 0) && uid_set[j - 1].uid > index.uid) {
80485+ uid_set[j] = uid_set[j - 1];
80486+ j--;
80487+ }
80488+ uid_set[j] = index;
80489+ }
80490+
80491+ return;
80492+}
80493+
80494+static __inline__ void
80495+gr_insert_uid(const uid_t uid, const unsigned long expires)
80496+{
80497+ int loc;
80498+
80499+ if (uid_used == GR_UIDTABLE_MAX)
80500+ return;
80501+
80502+ loc = gr_find_uid(uid);
80503+
80504+ if (loc >= 0) {
80505+ uid_set[loc].expires = expires;
80506+ return;
80507+ }
80508+
80509+ uid_set[uid_used].uid = uid;
80510+ uid_set[uid_used].expires = expires;
80511+ uid_used++;
80512+
80513+ gr_insertsort();
80514+
80515+ return;
80516+}
80517+
80518+void
80519+gr_remove_uid(const unsigned short loc)
80520+{
80521+ unsigned short i;
80522+
80523+ for (i = loc + 1; i < uid_used; i++)
80524+ uid_set[i - 1] = uid_set[i];
80525+
80526+ uid_used--;
80527+
80528+ return;
80529+}
80530+
80531+int
80532+gr_check_crash_uid(const uid_t uid)
80533+{
80534+ int loc;
80535+ int ret = 0;
80536+
80537+ if (unlikely(!gr_acl_is_enabled()))
80538+ return 0;
80539+
80540+ spin_lock(&gr_uid_lock);
80541+ loc = gr_find_uid(uid);
80542+
80543+ if (loc < 0)
80544+ goto out_unlock;
80545+
80546+ if (time_before_eq(uid_set[loc].expires, get_seconds()))
80547+ gr_remove_uid(loc);
80548+ else
80549+ ret = 1;
80550+
80551+out_unlock:
80552+ spin_unlock(&gr_uid_lock);
80553+ return ret;
80554+}
80555+
80556+static __inline__ int
80557+proc_is_setxid(const struct cred *cred)
80558+{
80559+ if (cred->uid != cred->euid || cred->uid != cred->suid ||
80560+ cred->uid != cred->fsuid)
80561+ return 1;
80562+ if (cred->gid != cred->egid || cred->gid != cred->sgid ||
80563+ cred->gid != cred->fsgid)
80564+ return 1;
80565+
80566+ return 0;
80567+}
80568+
80569+void
80570+gr_handle_crash(struct task_struct *task, const int sig)
80571+{
80572+ struct acl_subject_label *curr;
80573+ struct task_struct *tsk, *tsk2;
80574+ const struct cred *cred;
80575+ const struct cred *cred2;
80576+
80577+ if (sig != SIGSEGV && sig != SIGKILL && sig != SIGBUS && sig != SIGILL)
80578+ return;
80579+
80580+ if (unlikely(!gr_acl_is_enabled()))
80581+ return;
80582+
80583+ curr = task->acl;
80584+
80585+ if (!(curr->resmask & (1 << GR_CRASH_RES)))
80586+ return;
80587+
80588+ if (time_before_eq(curr->expires, get_seconds())) {
80589+ curr->expires = 0;
80590+ curr->crashes = 0;
80591+ }
80592+
80593+ curr->crashes++;
80594+
80595+ if (!curr->expires)
80596+ curr->expires = get_seconds() + curr->res[GR_CRASH_RES].rlim_max;
80597+
80598+ if ((curr->crashes >= curr->res[GR_CRASH_RES].rlim_cur) &&
80599+ time_after(curr->expires, get_seconds())) {
80600+ rcu_read_lock();
80601+ cred = __task_cred(task);
80602+ if (cred->uid && proc_is_setxid(cred)) {
80603+ gr_log_crash1(GR_DONT_AUDIT, GR_SEGVSTART_ACL_MSG, task, curr->res[GR_CRASH_RES].rlim_max);
80604+ spin_lock(&gr_uid_lock);
80605+ gr_insert_uid(cred->uid, curr->expires);
80606+ spin_unlock(&gr_uid_lock);
80607+ curr->expires = 0;
80608+ curr->crashes = 0;
80609+ read_lock(&tasklist_lock);
80610+ do_each_thread(tsk2, tsk) {
80611+ cred2 = __task_cred(tsk);
80612+ if (tsk != task && cred2->uid == cred->uid)
80613+ gr_fake_force_sig(SIGKILL, tsk);
80614+ } while_each_thread(tsk2, tsk);
80615+ read_unlock(&tasklist_lock);
80616+ } else {
80617+ gr_log_crash2(GR_DONT_AUDIT, GR_SEGVNOSUID_ACL_MSG, task, curr->res[GR_CRASH_RES].rlim_max);
80618+ read_lock(&tasklist_lock);
80619+ read_lock(&grsec_exec_file_lock);
80620+ do_each_thread(tsk2, tsk) {
80621+ if (likely(tsk != task)) {
80622+ // if this thread has the same subject as the one that triggered
80623+ // RES_CRASH and it's the same binary, kill it
80624+ if (tsk->acl == task->acl && tsk->exec_file == task->exec_file)
80625+ gr_fake_force_sig(SIGKILL, tsk);
80626+ }
80627+ } while_each_thread(tsk2, tsk);
80628+ read_unlock(&grsec_exec_file_lock);
80629+ read_unlock(&tasklist_lock);
80630+ }
80631+ rcu_read_unlock();
80632+ }
80633+
80634+ return;
80635+}
80636+
80637+int
80638+gr_check_crash_exec(const struct file *filp)
80639+{
80640+ struct acl_subject_label *curr;
80641+
80642+ if (unlikely(!gr_acl_is_enabled()))
80643+ return 0;
80644+
80645+ read_lock(&gr_inode_lock);
80646+ curr = lookup_acl_subj_label(filp->f_path.dentry->d_inode->i_ino,
80647+ filp->f_path.dentry->d_inode->i_sb->s_dev,
80648+ current->role);
80649+ read_unlock(&gr_inode_lock);
80650+
80651+ if (!curr || !(curr->resmask & (1 << GR_CRASH_RES)) ||
80652+ (!curr->crashes && !curr->expires))
80653+ return 0;
80654+
80655+ if ((curr->crashes >= curr->res[GR_CRASH_RES].rlim_cur) &&
80656+ time_after(curr->expires, get_seconds()))
80657+ return 1;
80658+ else if (time_before_eq(curr->expires, get_seconds())) {
80659+ curr->crashes = 0;
80660+ curr->expires = 0;
80661+ }
80662+
80663+ return 0;
80664+}
80665+
80666+void
80667+gr_handle_alertkill(struct task_struct *task)
80668+{
80669+ struct acl_subject_label *curracl;
80670+ __u32 curr_ip;
80671+ struct task_struct *p, *p2;
80672+
80673+ if (unlikely(!gr_acl_is_enabled()))
80674+ return;
80675+
80676+ curracl = task->acl;
80677+ curr_ip = task->signal->curr_ip;
80678+
80679+ if ((curracl->mode & GR_KILLIPPROC) && curr_ip) {
80680+ read_lock(&tasklist_lock);
80681+ do_each_thread(p2, p) {
80682+ if (p->signal->curr_ip == curr_ip)
80683+ gr_fake_force_sig(SIGKILL, p);
80684+ } while_each_thread(p2, p);
80685+ read_unlock(&tasklist_lock);
80686+ } else if (curracl->mode & GR_KILLPROC)
80687+ gr_fake_force_sig(SIGKILL, task);
80688+
80689+ return;
80690+}
80691diff --git a/grsecurity/gracl_shm.c b/grsecurity/gracl_shm.c
80692new file mode 100644
80693index 0000000..9d83a69
80694--- /dev/null
80695+++ b/grsecurity/gracl_shm.c
80696@@ -0,0 +1,40 @@
80697+#include <linux/kernel.h>
80698+#include <linux/mm.h>
80699+#include <linux/sched.h>
80700+#include <linux/file.h>
80701+#include <linux/ipc.h>
80702+#include <linux/gracl.h>
80703+#include <linux/grsecurity.h>
80704+#include <linux/grinternal.h>
80705+
80706+int
80707+gr_handle_shmat(const pid_t shm_cprid, const pid_t shm_lapid,
80708+ const time_t shm_createtime, const uid_t cuid, const int shmid)
80709+{
80710+ struct task_struct *task;
80711+
80712+ if (!gr_acl_is_enabled())
80713+ return 1;
80714+
80715+ rcu_read_lock();
80716+ read_lock(&tasklist_lock);
80717+
80718+ task = find_task_by_vpid(shm_cprid);
80719+
80720+ if (unlikely(!task))
80721+ task = find_task_by_vpid(shm_lapid);
80722+
80723+ if (unlikely(task && (time_before_eq((unsigned long)task->start_time.tv_sec, (unsigned long)shm_createtime) ||
80724+ (task->pid == shm_lapid)) &&
80725+ (task->acl->mode & GR_PROTSHM) &&
80726+ (task->acl != current->acl))) {
80727+ read_unlock(&tasklist_lock);
80728+ rcu_read_unlock();
80729+ gr_log_int3(GR_DONT_AUDIT, GR_SHMAT_ACL_MSG, cuid, shm_cprid, shmid);
80730+ return 0;
80731+ }
80732+ read_unlock(&tasklist_lock);
80733+ rcu_read_unlock();
80734+
80735+ return 1;
80736+}
80737diff --git a/grsecurity/grsec_chdir.c b/grsecurity/grsec_chdir.c
80738new file mode 100644
80739index 0000000..bc0be01
80740--- /dev/null
80741+++ b/grsecurity/grsec_chdir.c
80742@@ -0,0 +1,19 @@
80743+#include <linux/kernel.h>
80744+#include <linux/sched.h>
80745+#include <linux/fs.h>
80746+#include <linux/file.h>
80747+#include <linux/grsecurity.h>
80748+#include <linux/grinternal.h>
80749+
80750+void
80751+gr_log_chdir(const struct dentry *dentry, const struct vfsmount *mnt)
80752+{
80753+#ifdef CONFIG_GRKERNSEC_AUDIT_CHDIR
80754+ if ((grsec_enable_chdir && grsec_enable_group &&
80755+ in_group_p(grsec_audit_gid)) || (grsec_enable_chdir &&
80756+ !grsec_enable_group)) {
80757+ gr_log_fs_generic(GR_DO_AUDIT, GR_CHDIR_AUDIT_MSG, dentry, mnt);
80758+ }
80759+#endif
80760+ return;
80761+}
80762diff --git a/grsecurity/grsec_chroot.c b/grsecurity/grsec_chroot.c
80763new file mode 100644
80764index 0000000..197bdd5
80765--- /dev/null
80766+++ b/grsecurity/grsec_chroot.c
80767@@ -0,0 +1,386 @@
80768+#include <linux/kernel.h>
80769+#include <linux/module.h>
80770+#include <linux/sched.h>
80771+#include <linux/file.h>
80772+#include <linux/fs.h>
80773+#include <linux/mount.h>
80774+#include <linux/types.h>
80775+#include <linux/pid_namespace.h>
80776+#include <linux/grsecurity.h>
80777+#include <linux/grinternal.h>
80778+
80779+void gr_set_chroot_entries(struct task_struct *task, struct path *path)
80780+{
80781+#ifdef CONFIG_GRKERNSEC
80782+ if (task->pid > 1 && path->dentry != init_task.fs->root.dentry &&
80783+ path->dentry != task->nsproxy->mnt_ns->root->mnt_root)
80784+ task->gr_is_chrooted = 1;
80785+ else
80786+ task->gr_is_chrooted = 0;
80787+
80788+ task->gr_chroot_dentry = path->dentry;
80789+#endif
80790+ return;
80791+}
80792+
80793+void gr_clear_chroot_entries(struct task_struct *task)
80794+{
80795+#ifdef CONFIG_GRKERNSEC
80796+ task->gr_is_chrooted = 0;
80797+ task->gr_chroot_dentry = NULL;
80798+#endif
80799+ return;
80800+}
80801+
80802+int
80803+gr_handle_chroot_unix(const pid_t pid)
80804+{
80805+#ifdef CONFIG_GRKERNSEC_CHROOT_UNIX
80806+ struct task_struct *p;
80807+
80808+ if (unlikely(!grsec_enable_chroot_unix))
80809+ return 1;
80810+
80811+ if (likely(!proc_is_chrooted(current)))
80812+ return 1;
80813+
80814+ rcu_read_lock();
80815+ read_lock(&tasklist_lock);
80816+
80817+ p = find_task_by_vpid_unrestricted(pid);
80818+ if (unlikely(p && !have_same_root(current, p))) {
80819+ read_unlock(&tasklist_lock);
80820+ rcu_read_unlock();
80821+ gr_log_noargs(GR_DONT_AUDIT, GR_UNIX_CHROOT_MSG);
80822+ return 0;
80823+ }
80824+ read_unlock(&tasklist_lock);
80825+ rcu_read_unlock();
80826+#endif
80827+ return 1;
80828+}
80829+
80830+int
80831+gr_handle_chroot_nice(void)
80832+{
80833+#ifdef CONFIG_GRKERNSEC_CHROOT_NICE
80834+ if (grsec_enable_chroot_nice && proc_is_chrooted(current)) {
80835+ gr_log_noargs(GR_DONT_AUDIT, GR_NICE_CHROOT_MSG);
80836+ return -EPERM;
80837+ }
80838+#endif
80839+ return 0;
80840+}
80841+
80842+int
80843+gr_handle_chroot_setpriority(struct task_struct *p, const int niceval)
80844+{
80845+#ifdef CONFIG_GRKERNSEC_CHROOT_NICE
80846+ if (grsec_enable_chroot_nice && (niceval < task_nice(p))
80847+ && proc_is_chrooted(current)) {
80848+ gr_log_str_int(GR_DONT_AUDIT, GR_PRIORITY_CHROOT_MSG, p->comm, p->pid);
80849+ return -EACCES;
80850+ }
80851+#endif
80852+ return 0;
80853+}
80854+
80855+int
80856+gr_handle_chroot_rawio(const struct inode *inode)
80857+{
80858+#ifdef CONFIG_GRKERNSEC_CHROOT_CAPS
80859+ if (grsec_enable_chroot_caps && proc_is_chrooted(current) &&
80860+ inode && S_ISBLK(inode->i_mode) && !capable(CAP_SYS_RAWIO))
80861+ return 1;
80862+#endif
80863+ return 0;
80864+}
80865+
80866+int
80867+gr_handle_chroot_fowner(struct pid *pid, enum pid_type type)
80868+{
80869+#ifdef CONFIG_GRKERNSEC_CHROOT_FINDTASK
80870+ struct task_struct *p;
80871+ int ret = 0;
80872+ if (!grsec_enable_chroot_findtask || !proc_is_chrooted(current) || !pid)
80873+ return ret;
80874+
80875+ read_lock(&tasklist_lock);
80876+ do_each_pid_task(pid, type, p) {
80877+ if (!have_same_root(current, p)) {
80878+ ret = 1;
80879+ goto out;
80880+ }
80881+ } while_each_pid_task(pid, type, p);
80882+out:
80883+ read_unlock(&tasklist_lock);
80884+ return ret;
80885+#endif
80886+ return 0;
80887+}
80888+
80889+int
80890+gr_pid_is_chrooted(struct task_struct *p)
80891+{
80892+#ifdef CONFIG_GRKERNSEC_CHROOT_FINDTASK
80893+ if (!grsec_enable_chroot_findtask || !proc_is_chrooted(current) || p == NULL)
80894+ return 0;
80895+
80896+ if ((p->exit_state & (EXIT_ZOMBIE | EXIT_DEAD)) ||
80897+ !have_same_root(current, p)) {
80898+ return 1;
80899+ }
80900+#endif
80901+ return 0;
80902+}
80903+
80904+EXPORT_SYMBOL(gr_pid_is_chrooted);
80905+
80906+#if defined(CONFIG_GRKERNSEC_CHROOT_DOUBLE) || defined(CONFIG_GRKERNSEC_CHROOT_FCHDIR)
80907+int gr_is_outside_chroot(const struct dentry *u_dentry, const struct vfsmount *u_mnt)
80908+{
80909+ struct dentry *dentry = (struct dentry *)u_dentry;
80910+ struct vfsmount *mnt = (struct vfsmount *)u_mnt;
80911+ struct dentry *realroot;
80912+ struct vfsmount *realrootmnt;
80913+ struct dentry *currentroot;
80914+ struct vfsmount *currentmnt;
80915+ struct task_struct *reaper = &init_task;
80916+ int ret = 1;
80917+
80918+ read_lock(&reaper->fs->lock);
80919+ realrootmnt = mntget(reaper->fs->root.mnt);
80920+ realroot = dget(reaper->fs->root.dentry);
80921+ read_unlock(&reaper->fs->lock);
80922+
80923+ read_lock(&current->fs->lock);
80924+ currentmnt = mntget(current->fs->root.mnt);
80925+ currentroot = dget(current->fs->root.dentry);
80926+ read_unlock(&current->fs->lock);
80927+
80928+ spin_lock(&dcache_lock);
80929+ for (;;) {
80930+ if (unlikely((dentry == realroot && mnt == realrootmnt)
80931+ || (dentry == currentroot && mnt == currentmnt)))
80932+ break;
80933+ if (unlikely(dentry == mnt->mnt_root || IS_ROOT(dentry))) {
80934+ if (mnt->mnt_parent == mnt)
80935+ break;
80936+ dentry = mnt->mnt_mountpoint;
80937+ mnt = mnt->mnt_parent;
80938+ continue;
80939+ }
80940+ dentry = dentry->d_parent;
80941+ }
80942+ spin_unlock(&dcache_lock);
80943+
80944+ dput(currentroot);
80945+ mntput(currentmnt);
80946+
80947+ /* access is outside of chroot */
80948+ if (dentry == realroot && mnt == realrootmnt)
80949+ ret = 0;
80950+
80951+ dput(realroot);
80952+ mntput(realrootmnt);
80953+ return ret;
80954+}
80955+#endif
80956+
80957+int
80958+gr_chroot_fchdir(struct dentry *u_dentry, struct vfsmount *u_mnt)
80959+{
80960+#ifdef CONFIG_GRKERNSEC_CHROOT_FCHDIR
80961+ if (!grsec_enable_chroot_fchdir)
80962+ return 1;
80963+
80964+ if (!proc_is_chrooted(current))
80965+ return 1;
80966+ else if (!gr_is_outside_chroot(u_dentry, u_mnt)) {
80967+ gr_log_fs_generic(GR_DONT_AUDIT, GR_CHROOT_FCHDIR_MSG, u_dentry, u_mnt);
80968+ return 0;
80969+ }
80970+#endif
80971+ return 1;
80972+}
80973+
80974+int
80975+gr_chroot_shmat(const pid_t shm_cprid, const pid_t shm_lapid,
80976+ const time_t shm_createtime)
80977+{
80978+#ifdef CONFIG_GRKERNSEC_CHROOT_SHMAT
80979+ struct task_struct *p;
80980+ time_t starttime;
80981+
80982+ if (unlikely(!grsec_enable_chroot_shmat))
80983+ return 1;
80984+
80985+ if (likely(!proc_is_chrooted(current)))
80986+ return 1;
80987+
80988+ rcu_read_lock();
80989+ read_lock(&tasklist_lock);
80990+
80991+ if ((p = find_task_by_vpid_unrestricted(shm_cprid))) {
80992+ starttime = p->start_time.tv_sec;
80993+ if (time_before_eq((unsigned long)starttime, (unsigned long)shm_createtime)) {
80994+ if (have_same_root(current, p)) {
80995+ goto allow;
80996+ } else {
80997+ read_unlock(&tasklist_lock);
80998+ rcu_read_unlock();
80999+ gr_log_noargs(GR_DONT_AUDIT, GR_SHMAT_CHROOT_MSG);
81000+ return 0;
81001+ }
81002+ }
81003+ /* creator exited, pid reuse, fall through to next check */
81004+ }
81005+ if ((p = find_task_by_vpid_unrestricted(shm_lapid))) {
81006+ if (unlikely(!have_same_root(current, p))) {
81007+ read_unlock(&tasklist_lock);
81008+ rcu_read_unlock();
81009+ gr_log_noargs(GR_DONT_AUDIT, GR_SHMAT_CHROOT_MSG);
81010+ return 0;
81011+ }
81012+ }
81013+
81014+allow:
81015+ read_unlock(&tasklist_lock);
81016+ rcu_read_unlock();
81017+#endif
81018+ return 1;
81019+}
81020+
81021+void
81022+gr_log_chroot_exec(const struct dentry *dentry, const struct vfsmount *mnt)
81023+{
81024+#ifdef CONFIG_GRKERNSEC_CHROOT_EXECLOG
81025+ if (grsec_enable_chroot_execlog && proc_is_chrooted(current))
81026+ gr_log_fs_generic(GR_DO_AUDIT, GR_EXEC_CHROOT_MSG, dentry, mnt);
81027+#endif
81028+ return;
81029+}
81030+
81031+int
81032+gr_handle_chroot_mknod(const struct dentry *dentry,
81033+ const struct vfsmount *mnt, const int mode)
81034+{
81035+#ifdef CONFIG_GRKERNSEC_CHROOT_MKNOD
81036+ if (grsec_enable_chroot_mknod && !S_ISFIFO(mode) && !S_ISREG(mode) &&
81037+ proc_is_chrooted(current)) {
81038+ gr_log_fs_generic(GR_DONT_AUDIT, GR_MKNOD_CHROOT_MSG, dentry, mnt);
81039+ return -EPERM;
81040+ }
81041+#endif
81042+ return 0;
81043+}
81044+
81045+int
81046+gr_handle_chroot_mount(const struct dentry *dentry,
81047+ const struct vfsmount *mnt, const char *dev_name)
81048+{
81049+#ifdef CONFIG_GRKERNSEC_CHROOT_MOUNT
81050+ if (grsec_enable_chroot_mount && proc_is_chrooted(current)) {
81051+ gr_log_str_fs(GR_DONT_AUDIT, GR_MOUNT_CHROOT_MSG, dev_name ? dev_name : "none" , dentry, mnt);
81052+ return -EPERM;
81053+ }
81054+#endif
81055+ return 0;
81056+}
81057+
81058+int
81059+gr_handle_chroot_pivot(void)
81060+{
81061+#ifdef CONFIG_GRKERNSEC_CHROOT_PIVOT
81062+ if (grsec_enable_chroot_pivot && proc_is_chrooted(current)) {
81063+ gr_log_noargs(GR_DONT_AUDIT, GR_PIVOT_CHROOT_MSG);
81064+ return -EPERM;
81065+ }
81066+#endif
81067+ return 0;
81068+}
81069+
81070+int
81071+gr_handle_chroot_chroot(const struct dentry *dentry, const struct vfsmount *mnt)
81072+{
81073+#ifdef CONFIG_GRKERNSEC_CHROOT_DOUBLE
81074+ if (grsec_enable_chroot_double && proc_is_chrooted(current) &&
81075+ !gr_is_outside_chroot(dentry, mnt)) {
81076+ gr_log_fs_generic(GR_DONT_AUDIT, GR_CHROOT_CHROOT_MSG, dentry, mnt);
81077+ return -EPERM;
81078+ }
81079+#endif
81080+ return 0;
81081+}
81082+
81083+extern const char *captab_log[];
81084+extern int captab_log_entries;
81085+
81086+int
81087+gr_chroot_is_capable(const int cap)
81088+{
81089+#ifdef CONFIG_GRKERNSEC_CHROOT_CAPS
81090+ if (grsec_enable_chroot_caps && proc_is_chrooted(current)) {
81091+ kernel_cap_t chroot_caps = GR_CHROOT_CAPS;
81092+ if (cap_raised(chroot_caps, cap)) {
81093+ const struct cred *creds = current_cred();
81094+ if (cap_raised(creds->cap_effective, cap) && cap < captab_log_entries) {
81095+ gr_log_cap(GR_DONT_AUDIT, GR_CAP_CHROOT_MSG, current, captab_log[cap]);
81096+ }
81097+ return 0;
81098+ }
81099+ }
81100+#endif
81101+ return 1;
81102+}
81103+
81104+int
81105+gr_chroot_is_capable_nolog(const int cap)
81106+{
81107+#ifdef CONFIG_GRKERNSEC_CHROOT_CAPS
81108+ if (grsec_enable_chroot_caps && proc_is_chrooted(current)) {
81109+ kernel_cap_t chroot_caps = GR_CHROOT_CAPS;
81110+ if (cap_raised(chroot_caps, cap)) {
81111+ return 0;
81112+ }
81113+ }
81114+#endif
81115+ return 1;
81116+}
81117+
81118+int
81119+gr_handle_chroot_sysctl(const int op)
81120+{
81121+#ifdef CONFIG_GRKERNSEC_CHROOT_SYSCTL
81122+ if (grsec_enable_chroot_sysctl && proc_is_chrooted(current)
81123+ && (op & MAY_WRITE))
81124+ return -EACCES;
81125+#endif
81126+ return 0;
81127+}
81128+
81129+void
81130+gr_handle_chroot_chdir(struct path *path)
81131+{
81132+#ifdef CONFIG_GRKERNSEC_CHROOT_CHDIR
81133+ if (grsec_enable_chroot_chdir)
81134+ set_fs_pwd(current->fs, path);
81135+#endif
81136+ return;
81137+}
81138+
81139+int
81140+gr_handle_chroot_chmod(const struct dentry *dentry,
81141+ const struct vfsmount *mnt, const int mode)
81142+{
81143+#ifdef CONFIG_GRKERNSEC_CHROOT_CHMOD
81144+ /* allow chmod +s on directories, but not on files */
81145+ if (grsec_enable_chroot_chmod && !S_ISDIR(dentry->d_inode->i_mode) &&
81146+ ((mode & S_ISUID) || ((mode & (S_ISGID | S_IXGRP)) == (S_ISGID | S_IXGRP))) &&
81147+ proc_is_chrooted(current)) {
81148+ gr_log_fs_generic(GR_DONT_AUDIT, GR_CHMOD_CHROOT_MSG, dentry, mnt);
81149+ return -EPERM;
81150+ }
81151+#endif
81152+ return 0;
81153+}
81154diff --git a/grsecurity/grsec_disabled.c b/grsecurity/grsec_disabled.c
81155new file mode 100644
81156index 0000000..40545bf
81157--- /dev/null
81158+++ b/grsecurity/grsec_disabled.c
81159@@ -0,0 +1,437 @@
81160+#include <linux/kernel.h>
81161+#include <linux/module.h>
81162+#include <linux/sched.h>
81163+#include <linux/file.h>
81164+#include <linux/fs.h>
81165+#include <linux/kdev_t.h>
81166+#include <linux/net.h>
81167+#include <linux/in.h>
81168+#include <linux/ip.h>
81169+#include <linux/skbuff.h>
81170+#include <linux/sysctl.h>
81171+
81172+#ifdef CONFIG_PAX_HAVE_ACL_FLAGS
81173+void
81174+pax_set_initial_flags(struct linux_binprm *bprm)
81175+{
81176+ return;
81177+}
81178+#endif
81179+
81180+#ifdef CONFIG_SYSCTL
81181+__u32
81182+gr_handle_sysctl(const struct ctl_table * table, const int op)
81183+{
81184+ return 0;
81185+}
81186+#endif
81187+
81188+#ifdef CONFIG_TASKSTATS
81189+int gr_is_taskstats_denied(int pid)
81190+{
81191+ return 0;
81192+}
81193+#endif
81194+
81195+int
81196+gr_acl_is_enabled(void)
81197+{
81198+ return 0;
81199+}
81200+
81201+void
81202+gr_handle_proc_create(const struct dentry *dentry, const struct inode *inode)
81203+{
81204+ return;
81205+}
81206+
81207+int
81208+gr_handle_rawio(const struct inode *inode)
81209+{
81210+ return 0;
81211+}
81212+
81213+void
81214+gr_acl_handle_psacct(struct task_struct *task, const long code)
81215+{
81216+ return;
81217+}
81218+
81219+int
81220+gr_handle_ptrace(struct task_struct *task, const long request)
81221+{
81222+ return 0;
81223+}
81224+
81225+int
81226+gr_handle_proc_ptrace(struct task_struct *task)
81227+{
81228+ return 0;
81229+}
81230+
81231+void
81232+gr_learn_resource(const struct task_struct *task,
81233+ const int res, const unsigned long wanted, const int gt)
81234+{
81235+ return;
81236+}
81237+
81238+int
81239+gr_set_acls(const int type)
81240+{
81241+ return 0;
81242+}
81243+
81244+int
81245+gr_check_hidden_task(const struct task_struct *tsk)
81246+{
81247+ return 0;
81248+}
81249+
81250+int
81251+gr_check_protected_task(const struct task_struct *task)
81252+{
81253+ return 0;
81254+}
81255+
81256+int
81257+gr_check_protected_task_fowner(struct pid *pid, enum pid_type type)
81258+{
81259+ return 0;
81260+}
81261+
81262+void
81263+gr_copy_label(struct task_struct *tsk)
81264+{
81265+ return;
81266+}
81267+
81268+void
81269+gr_set_pax_flags(struct task_struct *task)
81270+{
81271+ return;
81272+}
81273+
81274+int
81275+gr_set_proc_label(const struct dentry *dentry, const struct vfsmount *mnt,
81276+ const int unsafe_share)
81277+{
81278+ return 0;
81279+}
81280+
81281+void
81282+gr_handle_delete(const ino_t ino, const dev_t dev)
81283+{
81284+ return;
81285+}
81286+
81287+void
81288+gr_handle_create(const struct dentry *dentry, const struct vfsmount *mnt)
81289+{
81290+ return;
81291+}
81292+
81293+void
81294+gr_handle_crash(struct task_struct *task, const int sig)
81295+{
81296+ return;
81297+}
81298+
81299+int
81300+gr_check_crash_exec(const struct file *filp)
81301+{
81302+ return 0;
81303+}
81304+
81305+int
81306+gr_check_crash_uid(const uid_t uid)
81307+{
81308+ return 0;
81309+}
81310+
81311+void
81312+gr_handle_rename(struct inode *old_dir, struct inode *new_dir,
81313+ struct dentry *old_dentry,
81314+ struct dentry *new_dentry,
81315+ struct vfsmount *mnt, const __u8 replace)
81316+{
81317+ return;
81318+}
81319+
81320+int
81321+gr_search_socket(const int family, const int type, const int protocol)
81322+{
81323+ return 1;
81324+}
81325+
81326+int
81327+gr_search_connectbind(const int mode, const struct socket *sock,
81328+ const struct sockaddr_in *addr)
81329+{
81330+ return 0;
81331+}
81332+
81333+void
81334+gr_handle_alertkill(struct task_struct *task)
81335+{
81336+ return;
81337+}
81338+
81339+__u32
81340+gr_acl_handle_execve(const struct dentry * dentry, const struct vfsmount * mnt)
81341+{
81342+ return 1;
81343+}
81344+
81345+__u32
81346+gr_acl_handle_hidden_file(const struct dentry * dentry,
81347+ const struct vfsmount * mnt)
81348+{
81349+ return 1;
81350+}
81351+
81352+__u32
81353+gr_acl_handle_open(const struct dentry * dentry, const struct vfsmount * mnt,
81354+ int acc_mode)
81355+{
81356+ return 1;
81357+}
81358+
81359+__u32
81360+gr_acl_handle_rmdir(const struct dentry * dentry, const struct vfsmount * mnt)
81361+{
81362+ return 1;
81363+}
81364+
81365+__u32
81366+gr_acl_handle_unlink(const struct dentry * dentry, const struct vfsmount * mnt)
81367+{
81368+ return 1;
81369+}
81370+
81371+int
81372+gr_acl_handle_mmap(const struct file *file, const unsigned long prot,
81373+ unsigned int *vm_flags)
81374+{
81375+ return 1;
81376+}
81377+
81378+__u32
81379+gr_acl_handle_truncate(const struct dentry * dentry,
81380+ const struct vfsmount * mnt)
81381+{
81382+ return 1;
81383+}
81384+
81385+__u32
81386+gr_acl_handle_utime(const struct dentry * dentry, const struct vfsmount * mnt)
81387+{
81388+ return 1;
81389+}
81390+
81391+__u32
81392+gr_acl_handle_access(const struct dentry * dentry,
81393+ const struct vfsmount * mnt, const int fmode)
81394+{
81395+ return 1;
81396+}
81397+
81398+__u32
81399+gr_acl_handle_chmod(const struct dentry * dentry, const struct vfsmount * mnt,
81400+ umode_t *mode)
81401+{
81402+ return 1;
81403+}
81404+
81405+__u32
81406+gr_acl_handle_chown(const struct dentry * dentry, const struct vfsmount * mnt)
81407+{
81408+ return 1;
81409+}
81410+
81411+__u32
81412+gr_acl_handle_setxattr(const struct dentry * dentry, const struct vfsmount * mnt)
81413+{
81414+ return 1;
81415+}
81416+
81417+void
81418+grsecurity_init(void)
81419+{
81420+ return;
81421+}
81422+
81423+umode_t gr_acl_umask(void)
81424+{
81425+ return 0;
81426+}
81427+
81428+__u32
81429+gr_acl_handle_mknod(const struct dentry * new_dentry,
81430+ const struct dentry * parent_dentry,
81431+ const struct vfsmount * parent_mnt,
81432+ const int mode)
81433+{
81434+ return 1;
81435+}
81436+
81437+__u32
81438+gr_acl_handle_mkdir(const struct dentry * new_dentry,
81439+ const struct dentry * parent_dentry,
81440+ const struct vfsmount * parent_mnt)
81441+{
81442+ return 1;
81443+}
81444+
81445+__u32
81446+gr_acl_handle_symlink(const struct dentry * new_dentry,
81447+ const struct dentry * parent_dentry,
81448+ const struct vfsmount * parent_mnt, const char *from)
81449+{
81450+ return 1;
81451+}
81452+
81453+__u32
81454+gr_acl_handle_link(const struct dentry * new_dentry,
81455+ const struct dentry * parent_dentry,
81456+ const struct vfsmount * parent_mnt,
81457+ const struct dentry * old_dentry,
81458+ const struct vfsmount * old_mnt, const char *to)
81459+{
81460+ return 1;
81461+}
81462+
81463+int
81464+gr_acl_handle_rename(const struct dentry *new_dentry,
81465+ const struct dentry *parent_dentry,
81466+ const struct vfsmount *parent_mnt,
81467+ const struct dentry *old_dentry,
81468+ const struct inode *old_parent_inode,
81469+ const struct vfsmount *old_mnt, const char *newname)
81470+{
81471+ return 0;
81472+}
81473+
81474+int
81475+gr_acl_handle_filldir(const struct file *file, const char *name,
81476+ const int namelen, const ino_t ino)
81477+{
81478+ return 1;
81479+}
81480+
81481+int
81482+gr_handle_shmat(const pid_t shm_cprid, const pid_t shm_lapid,
81483+ const time_t shm_createtime, const uid_t cuid, const int shmid)
81484+{
81485+ return 1;
81486+}
81487+
81488+int
81489+gr_search_bind(const struct socket *sock, const struct sockaddr_in *addr)
81490+{
81491+ return 0;
81492+}
81493+
81494+int
81495+gr_search_accept(const struct socket *sock)
81496+{
81497+ return 0;
81498+}
81499+
81500+int
81501+gr_search_listen(const struct socket *sock)
81502+{
81503+ return 0;
81504+}
81505+
81506+int
81507+gr_search_connect(const struct socket *sock, const struct sockaddr_in *addr)
81508+{
81509+ return 0;
81510+}
81511+
81512+__u32
81513+gr_acl_handle_unix(const struct dentry * dentry, const struct vfsmount * mnt)
81514+{
81515+ return 1;
81516+}
81517+
81518+__u32
81519+gr_acl_handle_creat(const struct dentry * dentry,
81520+ const struct dentry * p_dentry,
81521+ const struct vfsmount * p_mnt, int open_flags, int acc_mode,
81522+ const int imode)
81523+{
81524+ return 1;
81525+}
81526+
81527+void
81528+gr_acl_handle_exit(void)
81529+{
81530+ return;
81531+}
81532+
81533+int
81534+gr_acl_handle_mprotect(const struct file *file, const unsigned long prot)
81535+{
81536+ return 1;
81537+}
81538+
81539+void
81540+gr_set_role_label(const uid_t uid, const gid_t gid)
81541+{
81542+ return;
81543+}
81544+
81545+int
81546+gr_acl_handle_procpidmem(const struct task_struct *task)
81547+{
81548+ return 0;
81549+}
81550+
81551+int
81552+gr_search_udp_recvmsg(const struct sock *sk, const struct sk_buff *skb)
81553+{
81554+ return 0;
81555+}
81556+
81557+int
81558+gr_search_udp_sendmsg(const struct sock *sk, const struct sockaddr_in *addr)
81559+{
81560+ return 0;
81561+}
81562+
81563+void
81564+gr_set_kernel_label(struct task_struct *task)
81565+{
81566+ return;
81567+}
81568+
81569+int
81570+gr_check_user_change(int real, int effective, int fs)
81571+{
81572+ return 0;
81573+}
81574+
81575+int
81576+gr_check_group_change(int real, int effective, int fs)
81577+{
81578+ return 0;
81579+}
81580+
81581+int gr_acl_enable_at_secure(void)
81582+{
81583+ return 0;
81584+}
81585+
81586+dev_t gr_get_dev_from_dentry(struct dentry *dentry)
81587+{
81588+ return dentry->d_inode->i_sb->s_dev;
81589+}
81590+
81591+EXPORT_SYMBOL(gr_learn_resource);
81592+EXPORT_SYMBOL(gr_set_kernel_label);
81593+#ifdef CONFIG_SECURITY
81594+EXPORT_SYMBOL(gr_check_user_change);
81595+EXPORT_SYMBOL(gr_check_group_change);
81596+#endif
81597diff --git a/grsecurity/grsec_exec.c b/grsecurity/grsec_exec.c
81598new file mode 100644
81599index 0000000..a96e155
81600--- /dev/null
81601+++ b/grsecurity/grsec_exec.c
81602@@ -0,0 +1,204 @@
81603+#include <linux/kernel.h>
81604+#include <linux/sched.h>
81605+#include <linux/file.h>
81606+#include <linux/binfmts.h>
81607+#include <linux/smp_lock.h>
81608+#include <linux/fs.h>
81609+#include <linux/types.h>
81610+#include <linux/grdefs.h>
81611+#include <linux/grinternal.h>
81612+#include <linux/capability.h>
81613+#include <linux/compat.h>
81614+#include <linux/module.h>
81615+
81616+#include <asm/uaccess.h>
81617+
81618+#ifdef CONFIG_GRKERNSEC_EXECLOG
81619+static char gr_exec_arg_buf[132];
81620+static DEFINE_MUTEX(gr_exec_arg_mutex);
81621+#endif
81622+
81623+void
81624+gr_handle_exec_args(struct linux_binprm *bprm, const char __user *const __user *argv)
81625+{
81626+#ifdef CONFIG_GRKERNSEC_EXECLOG
81627+ char *grarg = gr_exec_arg_buf;
81628+ unsigned int i, x, execlen = 0;
81629+ char c;
81630+
81631+ if (!((grsec_enable_execlog && grsec_enable_group &&
81632+ in_group_p(grsec_audit_gid))
81633+ || (grsec_enable_execlog && !grsec_enable_group)))
81634+ return;
81635+
81636+ mutex_lock(&gr_exec_arg_mutex);
81637+ memset(grarg, 0, sizeof(gr_exec_arg_buf));
81638+
81639+ if (unlikely(argv == NULL))
81640+ goto log;
81641+
81642+ for (i = 0; i < bprm->argc && execlen < 128; i++) {
81643+ const char __user *p;
81644+ unsigned int len;
81645+
81646+ if (copy_from_user(&p, argv + i, sizeof(p)))
81647+ goto log;
81648+ if (!p)
81649+ goto log;
81650+ len = strnlen_user(p, 128 - execlen);
81651+ if (len > 128 - execlen)
81652+ len = 128 - execlen;
81653+ else if (len > 0)
81654+ len--;
81655+ if (copy_from_user(grarg + execlen, p, len))
81656+ goto log;
81657+
81658+ /* rewrite unprintable characters */
81659+ for (x = 0; x < len; x++) {
81660+ c = *(grarg + execlen + x);
81661+ if (c < 32 || c > 126)
81662+ *(grarg + execlen + x) = ' ';
81663+ }
81664+
81665+ execlen += len;
81666+ *(grarg + execlen) = ' ';
81667+ *(grarg + execlen + 1) = '\0';
81668+ execlen++;
81669+ }
81670+
81671+ log:
81672+ gr_log_fs_str(GR_DO_AUDIT, GR_EXEC_AUDIT_MSG, bprm->file->f_path.dentry,
81673+ bprm->file->f_path.mnt, grarg);
81674+ mutex_unlock(&gr_exec_arg_mutex);
81675+#endif
81676+ return;
81677+}
81678+
81679+#ifdef CONFIG_COMPAT
81680+void
81681+gr_handle_exec_args_compat(struct linux_binprm *bprm, compat_uptr_t __user *argv)
81682+{
81683+#ifdef CONFIG_GRKERNSEC_EXECLOG
81684+ char *grarg = gr_exec_arg_buf;
81685+ unsigned int i, x, execlen = 0;
81686+ char c;
81687+
81688+ if (!((grsec_enable_execlog && grsec_enable_group &&
81689+ in_group_p(grsec_audit_gid))
81690+ || (grsec_enable_execlog && !grsec_enable_group)))
81691+ return;
81692+
81693+ mutex_lock(&gr_exec_arg_mutex);
81694+ memset(grarg, 0, sizeof(gr_exec_arg_buf));
81695+
81696+ if (unlikely(argv == NULL))
81697+ goto log;
81698+
81699+ for (i = 0; i < bprm->argc && execlen < 128; i++) {
81700+ compat_uptr_t p;
81701+ unsigned int len;
81702+
81703+ if (get_user(p, argv + i))
81704+ goto log;
81705+ len = strnlen_user(compat_ptr(p), 128 - execlen);
81706+ if (len > 128 - execlen)
81707+ len = 128 - execlen;
81708+ else if (len > 0)
81709+ len--;
81710+ else
81711+ goto log;
81712+ if (copy_from_user(grarg + execlen, compat_ptr(p), len))
81713+ goto log;
81714+
81715+ /* rewrite unprintable characters */
81716+ for (x = 0; x < len; x++) {
81717+ c = *(grarg + execlen + x);
81718+ if (c < 32 || c > 126)
81719+ *(grarg + execlen + x) = ' ';
81720+ }
81721+
81722+ execlen += len;
81723+ *(grarg + execlen) = ' ';
81724+ *(grarg + execlen + 1) = '\0';
81725+ execlen++;
81726+ }
81727+
81728+ log:
81729+ gr_log_fs_str(GR_DO_AUDIT, GR_EXEC_AUDIT_MSG, bprm->file->f_path.dentry,
81730+ bprm->file->f_path.mnt, grarg);
81731+ mutex_unlock(&gr_exec_arg_mutex);
81732+#endif
81733+ return;
81734+}
81735+#endif
81736+
81737+#ifdef CONFIG_GRKERNSEC
81738+extern int gr_acl_is_capable(const int cap);
81739+extern int gr_acl_is_capable_nolog(const int cap);
81740+extern int gr_chroot_is_capable(const int cap);
81741+extern int gr_chroot_is_capable_nolog(const int cap);
81742+#endif
81743+
81744+const char *captab_log[] = {
81745+ "CAP_CHOWN",
81746+ "CAP_DAC_OVERRIDE",
81747+ "CAP_DAC_READ_SEARCH",
81748+ "CAP_FOWNER",
81749+ "CAP_FSETID",
81750+ "CAP_KILL",
81751+ "CAP_SETGID",
81752+ "CAP_SETUID",
81753+ "CAP_SETPCAP",
81754+ "CAP_LINUX_IMMUTABLE",
81755+ "CAP_NET_BIND_SERVICE",
81756+ "CAP_NET_BROADCAST",
81757+ "CAP_NET_ADMIN",
81758+ "CAP_NET_RAW",
81759+ "CAP_IPC_LOCK",
81760+ "CAP_IPC_OWNER",
81761+ "CAP_SYS_MODULE",
81762+ "CAP_SYS_RAWIO",
81763+ "CAP_SYS_CHROOT",
81764+ "CAP_SYS_PTRACE",
81765+ "CAP_SYS_PACCT",
81766+ "CAP_SYS_ADMIN",
81767+ "CAP_SYS_BOOT",
81768+ "CAP_SYS_NICE",
81769+ "CAP_SYS_RESOURCE",
81770+ "CAP_SYS_TIME",
81771+ "CAP_SYS_TTY_CONFIG",
81772+ "CAP_MKNOD",
81773+ "CAP_LEASE",
81774+ "CAP_AUDIT_WRITE",
81775+ "CAP_AUDIT_CONTROL",
81776+ "CAP_SETFCAP",
81777+ "CAP_MAC_OVERRIDE",
81778+ "CAP_MAC_ADMIN"
81779+};
81780+
81781+int captab_log_entries = sizeof(captab_log)/sizeof(captab_log[0]);
81782+
81783+int gr_is_capable(const int cap)
81784+{
81785+#ifdef CONFIG_GRKERNSEC
81786+ if (gr_acl_is_capable(cap) && gr_chroot_is_capable(cap))
81787+ return 1;
81788+ return 0;
81789+#else
81790+ return 1;
81791+#endif
81792+}
81793+
81794+int gr_is_capable_nolog(const int cap)
81795+{
81796+#ifdef CONFIG_GRKERNSEC
81797+ if (gr_acl_is_capable_nolog(cap) && gr_chroot_is_capable_nolog(cap))
81798+ return 1;
81799+ return 0;
81800+#else
81801+ return 1;
81802+#endif
81803+}
81804+
81805+EXPORT_SYMBOL(gr_is_capable);
81806+EXPORT_SYMBOL(gr_is_capable_nolog);
81807diff --git a/grsecurity/grsec_fifo.c b/grsecurity/grsec_fifo.c
81808new file mode 100644
81809index 0000000..d3ee748
81810--- /dev/null
81811+++ b/grsecurity/grsec_fifo.c
81812@@ -0,0 +1,24 @@
81813+#include <linux/kernel.h>
81814+#include <linux/sched.h>
81815+#include <linux/fs.h>
81816+#include <linux/file.h>
81817+#include <linux/grinternal.h>
81818+
81819+int
81820+gr_handle_fifo(const struct dentry *dentry, const struct vfsmount *mnt,
81821+ const struct dentry *dir, const int flag, const int acc_mode)
81822+{
81823+#ifdef CONFIG_GRKERNSEC_FIFO
81824+ const struct cred *cred = current_cred();
81825+
81826+ if (grsec_enable_fifo && S_ISFIFO(dentry->d_inode->i_mode) &&
81827+ !(flag & O_EXCL) && (dir->d_inode->i_mode & S_ISVTX) &&
81828+ (dentry->d_inode->i_uid != dir->d_inode->i_uid) &&
81829+ (cred->fsuid != dentry->d_inode->i_uid)) {
81830+ if (!inode_permission(dentry->d_inode, acc_mode))
81831+ gr_log_fs_int2(GR_DONT_AUDIT, GR_FIFO_MSG, dentry, mnt, dentry->d_inode->i_uid, dentry->d_inode->i_gid);
81832+ return -EACCES;
81833+ }
81834+#endif
81835+ return 0;
81836+}
81837diff --git a/grsecurity/grsec_fork.c b/grsecurity/grsec_fork.c
81838new file mode 100644
81839index 0000000..8ca18bf
81840--- /dev/null
81841+++ b/grsecurity/grsec_fork.c
81842@@ -0,0 +1,23 @@
81843+#include <linux/kernel.h>
81844+#include <linux/sched.h>
81845+#include <linux/grsecurity.h>
81846+#include <linux/grinternal.h>
81847+#include <linux/errno.h>
81848+
81849+void
81850+gr_log_forkfail(const int retval)
81851+{
81852+#ifdef CONFIG_GRKERNSEC_FORKFAIL
81853+ if (grsec_enable_forkfail && (retval == -EAGAIN || retval == -ENOMEM)) {
81854+ switch (retval) {
81855+ case -EAGAIN:
81856+ gr_log_str(GR_DONT_AUDIT, GR_FAILFORK_MSG, "EAGAIN");
81857+ break;
81858+ case -ENOMEM:
81859+ gr_log_str(GR_DONT_AUDIT, GR_FAILFORK_MSG, "ENOMEM");
81860+ break;
81861+ }
81862+ }
81863+#endif
81864+ return;
81865+}
81866diff --git a/grsecurity/grsec_init.c b/grsecurity/grsec_init.c
81867new file mode 100644
81868index 0000000..1e995d3
81869--- /dev/null
81870+++ b/grsecurity/grsec_init.c
81871@@ -0,0 +1,278 @@
81872+#include <linux/kernel.h>
81873+#include <linux/sched.h>
81874+#include <linux/mm.h>
81875+#include <linux/smp_lock.h>
81876+#include <linux/gracl.h>
81877+#include <linux/slab.h>
81878+#include <linux/vmalloc.h>
81879+#include <linux/percpu.h>
81880+#include <linux/module.h>
81881+
81882+int grsec_enable_ptrace_readexec;
81883+int grsec_enable_setxid;
81884+int grsec_enable_brute;
81885+int grsec_enable_link;
81886+int grsec_enable_dmesg;
81887+int grsec_enable_harden_ptrace;
81888+int grsec_enable_fifo;
81889+int grsec_enable_execlog;
81890+int grsec_enable_signal;
81891+int grsec_enable_forkfail;
81892+int grsec_enable_audit_ptrace;
81893+int grsec_enable_time;
81894+int grsec_enable_audit_textrel;
81895+int grsec_enable_group;
81896+int grsec_audit_gid;
81897+int grsec_enable_chdir;
81898+int grsec_enable_mount;
81899+int grsec_enable_rofs;
81900+int grsec_enable_chroot_findtask;
81901+int grsec_enable_chroot_mount;
81902+int grsec_enable_chroot_shmat;
81903+int grsec_enable_chroot_fchdir;
81904+int grsec_enable_chroot_double;
81905+int grsec_enable_chroot_pivot;
81906+int grsec_enable_chroot_chdir;
81907+int grsec_enable_chroot_chmod;
81908+int grsec_enable_chroot_mknod;
81909+int grsec_enable_chroot_nice;
81910+int grsec_enable_chroot_execlog;
81911+int grsec_enable_chroot_caps;
81912+int grsec_enable_chroot_sysctl;
81913+int grsec_enable_chroot_unix;
81914+int grsec_enable_tpe;
81915+int grsec_tpe_gid;
81916+int grsec_enable_blackhole;
81917+#ifdef CONFIG_IPV6_MODULE
81918+EXPORT_SYMBOL(grsec_enable_blackhole);
81919+#endif
81920+int grsec_lastack_retries;
81921+int grsec_enable_tpe_all;
81922+int grsec_enable_tpe_invert;
81923+int grsec_enable_socket_all;
81924+int grsec_socket_all_gid;
81925+int grsec_enable_socket_client;
81926+int grsec_socket_client_gid;
81927+int grsec_enable_socket_server;
81928+int grsec_socket_server_gid;
81929+int grsec_resource_logging;
81930+int grsec_disable_privio;
81931+int grsec_enable_log_rwxmaps;
81932+int grsec_lock;
81933+
81934+DEFINE_SPINLOCK(grsec_alert_lock);
81935+unsigned long grsec_alert_wtime = 0;
81936+unsigned long grsec_alert_fyet = 0;
81937+
81938+DEFINE_SPINLOCK(grsec_audit_lock);
81939+
81940+DEFINE_RWLOCK(grsec_exec_file_lock);
81941+
81942+char *gr_shared_page[4];
81943+
81944+char *gr_alert_log_fmt;
81945+char *gr_audit_log_fmt;
81946+char *gr_alert_log_buf;
81947+char *gr_audit_log_buf;
81948+
81949+extern struct gr_arg *gr_usermode;
81950+extern unsigned char *gr_system_salt;
81951+extern unsigned char *gr_system_sum;
81952+
81953+void __init
81954+grsecurity_init(void)
81955+{
81956+ int j;
81957+ /* create the per-cpu shared pages */
81958+
81959+#ifdef CONFIG_X86
81960+ memset((char *)(0x41a + PAGE_OFFSET), 0, 36);
81961+#endif
81962+
81963+ for (j = 0; j < 4; j++) {
81964+ gr_shared_page[j] = (char *)__alloc_percpu(PAGE_SIZE, __alignof__(unsigned long long));
81965+ if (gr_shared_page[j] == NULL) {
81966+ panic("Unable to allocate grsecurity shared page");
81967+ return;
81968+ }
81969+ }
81970+
81971+ /* allocate log buffers */
81972+ gr_alert_log_fmt = kmalloc(512, GFP_KERNEL);
81973+ if (!gr_alert_log_fmt) {
81974+ panic("Unable to allocate grsecurity alert log format buffer");
81975+ return;
81976+ }
81977+ gr_audit_log_fmt = kmalloc(512, GFP_KERNEL);
81978+ if (!gr_audit_log_fmt) {
81979+ panic("Unable to allocate grsecurity audit log format buffer");
81980+ return;
81981+ }
81982+ gr_alert_log_buf = (char *) get_zeroed_page(GFP_KERNEL);
81983+ if (!gr_alert_log_buf) {
81984+ panic("Unable to allocate grsecurity alert log buffer");
81985+ return;
81986+ }
81987+ gr_audit_log_buf = (char *) get_zeroed_page(GFP_KERNEL);
81988+ if (!gr_audit_log_buf) {
81989+ panic("Unable to allocate grsecurity audit log buffer");
81990+ return;
81991+ }
81992+
81993+ /* allocate memory for authentication structure */
81994+ gr_usermode = kmalloc(sizeof(struct gr_arg), GFP_KERNEL);
81995+ gr_system_salt = kmalloc(GR_SALT_LEN, GFP_KERNEL);
81996+ gr_system_sum = kmalloc(GR_SHA_LEN, GFP_KERNEL);
81997+
81998+ if (!gr_usermode || !gr_system_salt || !gr_system_sum) {
81999+ panic("Unable to allocate grsecurity authentication structure");
82000+ return;
82001+ }
82002+
82003+
82004+#ifdef CONFIG_GRKERNSEC_IO
82005+#if !defined(CONFIG_GRKERNSEC_SYSCTL_DISTRO)
82006+ grsec_disable_privio = 1;
82007+#elif defined(CONFIG_GRKERNSEC_SYSCTL_ON)
82008+ grsec_disable_privio = 1;
82009+#else
82010+ grsec_disable_privio = 0;
82011+#endif
82012+#endif
82013+
82014+#ifdef CONFIG_GRKERNSEC_TPE_INVERT
82015+ /* for backward compatibility, tpe_invert always defaults to on if
82016+ enabled in the kernel
82017+ */
82018+ grsec_enable_tpe_invert = 1;
82019+#endif
82020+
82021+#if !defined(CONFIG_GRKERNSEC_SYSCTL) || defined(CONFIG_GRKERNSEC_SYSCTL_ON)
82022+#ifndef CONFIG_GRKERNSEC_SYSCTL
82023+ grsec_lock = 1;
82024+#endif
82025+
82026+#ifdef CONFIG_GRKERNSEC_AUDIT_TEXTREL
82027+ grsec_enable_audit_textrel = 1;
82028+#endif
82029+#ifdef CONFIG_GRKERNSEC_RWXMAP_LOG
82030+ grsec_enable_log_rwxmaps = 1;
82031+#endif
82032+#ifdef CONFIG_GRKERNSEC_AUDIT_GROUP
82033+ grsec_enable_group = 1;
82034+ grsec_audit_gid = CONFIG_GRKERNSEC_AUDIT_GID;
82035+#endif
82036+#ifdef CONFIG_GRKERNSEC_AUDIT_CHDIR
82037+ grsec_enable_chdir = 1;
82038+#endif
82039+#ifdef CONFIG_GRKERNSEC_HARDEN_PTRACE
82040+ grsec_enable_harden_ptrace = 1;
82041+#endif
82042+#ifdef CONFIG_GRKERNSEC_AUDIT_MOUNT
82043+ grsec_enable_mount = 1;
82044+#endif
82045+#ifdef CONFIG_GRKERNSEC_LINK
82046+ grsec_enable_link = 1;
82047+#endif
82048+#ifdef CONFIG_GRKERNSEC_BRUTE
82049+ grsec_enable_brute = 1;
82050+#endif
82051+#ifdef CONFIG_GRKERNSEC_DMESG
82052+ grsec_enable_dmesg = 1;
82053+#endif
82054+#ifdef CONFIG_GRKERNSEC_BLACKHOLE
82055+ grsec_enable_blackhole = 1;
82056+ grsec_lastack_retries = 4;
82057+#endif
82058+#ifdef CONFIG_GRKERNSEC_FIFO
82059+ grsec_enable_fifo = 1;
82060+#endif
82061+#ifdef CONFIG_GRKERNSEC_EXECLOG
82062+ grsec_enable_execlog = 1;
82063+#endif
82064+#ifdef CONFIG_GRKERNSEC_SETXID
82065+ grsec_enable_setxid = 1;
82066+#endif
82067+#ifdef CONFIG_GRKERNSEC_PTRACE_READEXEC
82068+ grsec_enable_ptrace_readexec = 1;
82069+#endif
82070+#ifdef CONFIG_GRKERNSEC_SIGNAL
82071+ grsec_enable_signal = 1;
82072+#endif
82073+#ifdef CONFIG_GRKERNSEC_FORKFAIL
82074+ grsec_enable_forkfail = 1;
82075+#endif
82076+#ifdef CONFIG_GRKERNSEC_TIME
82077+ grsec_enable_time = 1;
82078+#endif
82079+#ifdef CONFIG_GRKERNSEC_RESLOG
82080+ grsec_resource_logging = 1;
82081+#endif
82082+#ifdef CONFIG_GRKERNSEC_CHROOT_FINDTASK
82083+ grsec_enable_chroot_findtask = 1;
82084+#endif
82085+#ifdef CONFIG_GRKERNSEC_CHROOT_UNIX
82086+ grsec_enable_chroot_unix = 1;
82087+#endif
82088+#ifdef CONFIG_GRKERNSEC_CHROOT_MOUNT
82089+ grsec_enable_chroot_mount = 1;
82090+#endif
82091+#ifdef CONFIG_GRKERNSEC_CHROOT_FCHDIR
82092+ grsec_enable_chroot_fchdir = 1;
82093+#endif
82094+#ifdef CONFIG_GRKERNSEC_CHROOT_SHMAT
82095+ grsec_enable_chroot_shmat = 1;
82096+#endif
82097+#ifdef CONFIG_GRKERNSEC_AUDIT_PTRACE
82098+ grsec_enable_audit_ptrace = 1;
82099+#endif
82100+#ifdef CONFIG_GRKERNSEC_CHROOT_DOUBLE
82101+ grsec_enable_chroot_double = 1;
82102+#endif
82103+#ifdef CONFIG_GRKERNSEC_CHROOT_PIVOT
82104+ grsec_enable_chroot_pivot = 1;
82105+#endif
82106+#ifdef CONFIG_GRKERNSEC_CHROOT_CHDIR
82107+ grsec_enable_chroot_chdir = 1;
82108+#endif
82109+#ifdef CONFIG_GRKERNSEC_CHROOT_CHMOD
82110+ grsec_enable_chroot_chmod = 1;
82111+#endif
82112+#ifdef CONFIG_GRKERNSEC_CHROOT_MKNOD
82113+ grsec_enable_chroot_mknod = 1;
82114+#endif
82115+#ifdef CONFIG_GRKERNSEC_CHROOT_NICE
82116+ grsec_enable_chroot_nice = 1;
82117+#endif
82118+#ifdef CONFIG_GRKERNSEC_CHROOT_EXECLOG
82119+ grsec_enable_chroot_execlog = 1;
82120+#endif
82121+#ifdef CONFIG_GRKERNSEC_CHROOT_CAPS
82122+ grsec_enable_chroot_caps = 1;
82123+#endif
82124+#ifdef CONFIG_GRKERNSEC_CHROOT_SYSCTL
82125+ grsec_enable_chroot_sysctl = 1;
82126+#endif
82127+#ifdef CONFIG_GRKERNSEC_TPE
82128+ grsec_enable_tpe = 1;
82129+ grsec_tpe_gid = CONFIG_GRKERNSEC_TPE_GID;
82130+#ifdef CONFIG_GRKERNSEC_TPE_ALL
82131+ grsec_enable_tpe_all = 1;
82132+#endif
82133+#endif
82134+#ifdef CONFIG_GRKERNSEC_SOCKET_ALL
82135+ grsec_enable_socket_all = 1;
82136+ grsec_socket_all_gid = CONFIG_GRKERNSEC_SOCKET_ALL_GID;
82137+#endif
82138+#ifdef CONFIG_GRKERNSEC_SOCKET_CLIENT
82139+ grsec_enable_socket_client = 1;
82140+ grsec_socket_client_gid = CONFIG_GRKERNSEC_SOCKET_CLIENT_GID;
82141+#endif
82142+#ifdef CONFIG_GRKERNSEC_SOCKET_SERVER
82143+ grsec_enable_socket_server = 1;
82144+ grsec_socket_server_gid = CONFIG_GRKERNSEC_SOCKET_SERVER_GID;
82145+#endif
82146+#endif
82147+
82148+ return;
82149+}
82150diff --git a/grsecurity/grsec_link.c b/grsecurity/grsec_link.c
82151new file mode 100644
82152index 0000000..3efe141
82153--- /dev/null
82154+++ b/grsecurity/grsec_link.c
82155@@ -0,0 +1,43 @@
82156+#include <linux/kernel.h>
82157+#include <linux/sched.h>
82158+#include <linux/fs.h>
82159+#include <linux/file.h>
82160+#include <linux/grinternal.h>
82161+
82162+int
82163+gr_handle_follow_link(const struct inode *parent,
82164+ const struct inode *inode,
82165+ const struct dentry *dentry, const struct vfsmount *mnt)
82166+{
82167+#ifdef CONFIG_GRKERNSEC_LINK
82168+ const struct cred *cred = current_cred();
82169+
82170+ if (grsec_enable_link && S_ISLNK(inode->i_mode) &&
82171+ (parent->i_mode & S_ISVTX) && (parent->i_uid != inode->i_uid) &&
82172+ (parent->i_mode & S_IWOTH) && (cred->fsuid != inode->i_uid)) {
82173+ gr_log_fs_int2(GR_DONT_AUDIT, GR_SYMLINK_MSG, dentry, mnt, inode->i_uid, inode->i_gid);
82174+ return -EACCES;
82175+ }
82176+#endif
82177+ return 0;
82178+}
82179+
82180+int
82181+gr_handle_hardlink(const struct dentry *dentry,
82182+ const struct vfsmount *mnt,
82183+ struct inode *inode, const int mode, const char *to)
82184+{
82185+#ifdef CONFIG_GRKERNSEC_LINK
82186+ const struct cred *cred = current_cred();
82187+
82188+ if (grsec_enable_link && cred->fsuid != inode->i_uid &&
82189+ (!S_ISREG(mode) || (mode & S_ISUID) ||
82190+ ((mode & (S_ISGID | S_IXGRP)) == (S_ISGID | S_IXGRP)) ||
82191+ (inode_permission(inode, MAY_READ | MAY_WRITE))) &&
82192+ !capable(CAP_FOWNER) && cred->uid) {
82193+ gr_log_fs_int2_str(GR_DONT_AUDIT, GR_HARDLINK_MSG, dentry, mnt, inode->i_uid, inode->i_gid, to);
82194+ return -EPERM;
82195+ }
82196+#endif
82197+ return 0;
82198+}
82199diff --git a/grsecurity/grsec_log.c b/grsecurity/grsec_log.c
82200new file mode 100644
82201index 0000000..a45d2e9
82202--- /dev/null
82203+++ b/grsecurity/grsec_log.c
82204@@ -0,0 +1,322 @@
82205+#include <linux/kernel.h>
82206+#include <linux/sched.h>
82207+#include <linux/file.h>
82208+#include <linux/tty.h>
82209+#include <linux/fs.h>
82210+#include <linux/grinternal.h>
82211+
82212+#ifdef CONFIG_TREE_PREEMPT_RCU
82213+#define DISABLE_PREEMPT() preempt_disable()
82214+#define ENABLE_PREEMPT() preempt_enable()
82215+#else
82216+#define DISABLE_PREEMPT()
82217+#define ENABLE_PREEMPT()
82218+#endif
82219+
82220+#define BEGIN_LOCKS(x) \
82221+ DISABLE_PREEMPT(); \
82222+ rcu_read_lock(); \
82223+ read_lock(&tasklist_lock); \
82224+ read_lock(&grsec_exec_file_lock); \
82225+ if (x != GR_DO_AUDIT) \
82226+ spin_lock(&grsec_alert_lock); \
82227+ else \
82228+ spin_lock(&grsec_audit_lock)
82229+
82230+#define END_LOCKS(x) \
82231+ if (x != GR_DO_AUDIT) \
82232+ spin_unlock(&grsec_alert_lock); \
82233+ else \
82234+ spin_unlock(&grsec_audit_lock); \
82235+ read_unlock(&grsec_exec_file_lock); \
82236+ read_unlock(&tasklist_lock); \
82237+ rcu_read_unlock(); \
82238+ ENABLE_PREEMPT(); \
82239+ if (x == GR_DONT_AUDIT) \
82240+ gr_handle_alertkill(current)
82241+
82242+enum {
82243+ FLOODING,
82244+ NO_FLOODING
82245+};
82246+
82247+extern char *gr_alert_log_fmt;
82248+extern char *gr_audit_log_fmt;
82249+extern char *gr_alert_log_buf;
82250+extern char *gr_audit_log_buf;
82251+
82252+static int gr_log_start(int audit)
82253+{
82254+ char *loglevel = (audit == GR_DO_AUDIT) ? KERN_INFO : KERN_ALERT;
82255+ char *fmt = (audit == GR_DO_AUDIT) ? gr_audit_log_fmt : gr_alert_log_fmt;
82256+ char *buf = (audit == GR_DO_AUDIT) ? gr_audit_log_buf : gr_alert_log_buf;
82257+#if (CONFIG_GRKERNSEC_FLOODTIME > 0 && CONFIG_GRKERNSEC_FLOODBURST > 0)
82258+ unsigned long curr_secs = get_seconds();
82259+
82260+ if (audit == GR_DO_AUDIT)
82261+ goto set_fmt;
82262+
82263+ if (!grsec_alert_wtime || time_after(curr_secs, grsec_alert_wtime + CONFIG_GRKERNSEC_FLOODTIME)) {
82264+ grsec_alert_wtime = curr_secs;
82265+ grsec_alert_fyet = 0;
82266+ } else if (time_before_eq(curr_secs, grsec_alert_wtime + CONFIG_GRKERNSEC_FLOODTIME)
82267+ && (grsec_alert_fyet < CONFIG_GRKERNSEC_FLOODBURST)) {
82268+ grsec_alert_fyet++;
82269+ } else if (grsec_alert_fyet == CONFIG_GRKERNSEC_FLOODBURST) {
82270+ grsec_alert_wtime = curr_secs;
82271+ grsec_alert_fyet++;
82272+ printk(KERN_ALERT "grsec: more alerts, logging disabled for %d seconds\n", CONFIG_GRKERNSEC_FLOODTIME);
82273+ return FLOODING;
82274+ }
82275+ else return FLOODING;
82276+
82277+set_fmt:
82278+#endif
82279+ memset(buf, 0, PAGE_SIZE);
82280+ if (current->signal->curr_ip && gr_acl_is_enabled()) {
82281+ sprintf(fmt, "%s%s", loglevel, "grsec: From %pI4: (%.64s:%c:%.950s) ");
82282+ snprintf(buf, PAGE_SIZE - 1, fmt, &current->signal->curr_ip, current->role->rolename, gr_roletype_to_char(), current->acl->filename);
82283+ } else if (current->signal->curr_ip) {
82284+ sprintf(fmt, "%s%s", loglevel, "grsec: From %pI4: ");
82285+ snprintf(buf, PAGE_SIZE - 1, fmt, &current->signal->curr_ip);
82286+ } else if (gr_acl_is_enabled()) {
82287+ sprintf(fmt, "%s%s", loglevel, "grsec: (%.64s:%c:%.950s) ");
82288+ snprintf(buf, PAGE_SIZE - 1, fmt, current->role->rolename, gr_roletype_to_char(), current->acl->filename);
82289+ } else {
82290+ sprintf(fmt, "%s%s", loglevel, "grsec: ");
82291+ strcpy(buf, fmt);
82292+ }
82293+
82294+ return NO_FLOODING;
82295+}
82296+
82297+static void gr_log_middle(int audit, const char *msg, va_list ap)
82298+ __attribute__ ((format (printf, 2, 0)));
82299+
82300+static void gr_log_middle(int audit, const char *msg, va_list ap)
82301+{
82302+ char *buf = (audit == GR_DO_AUDIT) ? gr_audit_log_buf : gr_alert_log_buf;
82303+ unsigned int len = strlen(buf);
82304+
82305+ vsnprintf(buf + len, PAGE_SIZE - len - 1, msg, ap);
82306+
82307+ return;
82308+}
82309+
82310+static void gr_log_middle_varargs(int audit, const char *msg, ...)
82311+ __attribute__ ((format (printf, 2, 3)));
82312+
82313+static void gr_log_middle_varargs(int audit, const char *msg, ...)
82314+{
82315+ char *buf = (audit == GR_DO_AUDIT) ? gr_audit_log_buf : gr_alert_log_buf;
82316+ unsigned int len = strlen(buf);
82317+ va_list ap;
82318+
82319+ va_start(ap, msg);
82320+ vsnprintf(buf + len, PAGE_SIZE - len - 1, msg, ap);
82321+ va_end(ap);
82322+
82323+ return;
82324+}
82325+
82326+static void gr_log_end(int audit, int append_default)
82327+{
82328+ char *buf = (audit == GR_DO_AUDIT) ? gr_audit_log_buf : gr_alert_log_buf;
82329+
82330+ if (append_default) {
82331+ unsigned int len = strlen(buf);
82332+ snprintf(buf + len, PAGE_SIZE - len - 1, DEFAULTSECMSG, DEFAULTSECARGS(current, current_cred(), __task_cred(current->real_parent)));
82333+ }
82334+
82335+ printk("%s\n", buf);
82336+
82337+ return;
82338+}
82339+
82340+void gr_log_varargs(int audit, const char *msg, int argtypes, ...)
82341+{
82342+ int logtype;
82343+ char *result = (audit == GR_DO_AUDIT) ? "successful" : "denied";
82344+ char *str1 = NULL, *str2 = NULL, *str3 = NULL;
82345+ void *voidptr = NULL;
82346+ int num1 = 0, num2 = 0;
82347+ unsigned long ulong1 = 0, ulong2 = 0;
82348+ struct dentry *dentry = NULL;
82349+ struct vfsmount *mnt = NULL;
82350+ struct file *file = NULL;
82351+ struct task_struct *task = NULL;
82352+ const struct cred *cred, *pcred;
82353+ va_list ap;
82354+
82355+ BEGIN_LOCKS(audit);
82356+ logtype = gr_log_start(audit);
82357+ if (logtype == FLOODING) {
82358+ END_LOCKS(audit);
82359+ return;
82360+ }
82361+ va_start(ap, argtypes);
82362+ switch (argtypes) {
82363+ case GR_TTYSNIFF:
82364+ task = va_arg(ap, struct task_struct *);
82365+ gr_log_middle_varargs(audit, msg, &task->signal->curr_ip, gr_task_fullpath0(task), task->comm, task->pid, gr_parent_task_fullpath0(task), task->real_parent->comm, task->real_parent->pid);
82366+ break;
82367+ case GR_SYSCTL_HIDDEN:
82368+ str1 = va_arg(ap, char *);
82369+ gr_log_middle_varargs(audit, msg, result, str1);
82370+ break;
82371+ case GR_RBAC:
82372+ dentry = va_arg(ap, struct dentry *);
82373+ mnt = va_arg(ap, struct vfsmount *);
82374+ gr_log_middle_varargs(audit, msg, result, gr_to_filename(dentry, mnt));
82375+ break;
82376+ case GR_RBAC_STR:
82377+ dentry = va_arg(ap, struct dentry *);
82378+ mnt = va_arg(ap, struct vfsmount *);
82379+ str1 = va_arg(ap, char *);
82380+ gr_log_middle_varargs(audit, msg, result, gr_to_filename(dentry, mnt), str1);
82381+ break;
82382+ case GR_STR_RBAC:
82383+ str1 = va_arg(ap, char *);
82384+ dentry = va_arg(ap, struct dentry *);
82385+ mnt = va_arg(ap, struct vfsmount *);
82386+ gr_log_middle_varargs(audit, msg, result, str1, gr_to_filename(dentry, mnt));
82387+ break;
82388+ case GR_RBAC_MODE2:
82389+ dentry = va_arg(ap, struct dentry *);
82390+ mnt = va_arg(ap, struct vfsmount *);
82391+ str1 = va_arg(ap, char *);
82392+ str2 = va_arg(ap, char *);
82393+ gr_log_middle_varargs(audit, msg, result, gr_to_filename(dentry, mnt), str1, str2);
82394+ break;
82395+ case GR_RBAC_MODE3:
82396+ dentry = va_arg(ap, struct dentry *);
82397+ mnt = va_arg(ap, struct vfsmount *);
82398+ str1 = va_arg(ap, char *);
82399+ str2 = va_arg(ap, char *);
82400+ str3 = va_arg(ap, char *);
82401+ gr_log_middle_varargs(audit, msg, result, gr_to_filename(dentry, mnt), str1, str2, str3);
82402+ break;
82403+ case GR_FILENAME:
82404+ dentry = va_arg(ap, struct dentry *);
82405+ mnt = va_arg(ap, struct vfsmount *);
82406+ gr_log_middle_varargs(audit, msg, gr_to_filename(dentry, mnt));
82407+ break;
82408+ case GR_STR_FILENAME:
82409+ str1 = va_arg(ap, char *);
82410+ dentry = va_arg(ap, struct dentry *);
82411+ mnt = va_arg(ap, struct vfsmount *);
82412+ gr_log_middle_varargs(audit, msg, str1, gr_to_filename(dentry, mnt));
82413+ break;
82414+ case GR_FILENAME_STR:
82415+ dentry = va_arg(ap, struct dentry *);
82416+ mnt = va_arg(ap, struct vfsmount *);
82417+ str1 = va_arg(ap, char *);
82418+ gr_log_middle_varargs(audit, msg, gr_to_filename(dentry, mnt), str1);
82419+ break;
82420+ case GR_FILENAME_TWO_INT:
82421+ dentry = va_arg(ap, struct dentry *);
82422+ mnt = va_arg(ap, struct vfsmount *);
82423+ num1 = va_arg(ap, int);
82424+ num2 = va_arg(ap, int);
82425+ gr_log_middle_varargs(audit, msg, gr_to_filename(dentry, mnt), num1, num2);
82426+ break;
82427+ case GR_FILENAME_TWO_INT_STR:
82428+ dentry = va_arg(ap, struct dentry *);
82429+ mnt = va_arg(ap, struct vfsmount *);
82430+ num1 = va_arg(ap, int);
82431+ num2 = va_arg(ap, int);
82432+ str1 = va_arg(ap, char *);
82433+ gr_log_middle_varargs(audit, msg, gr_to_filename(dentry, mnt), num1, num2, str1);
82434+ break;
82435+ case GR_TEXTREL:
82436+ file = va_arg(ap, struct file *);
82437+ ulong1 = va_arg(ap, unsigned long);
82438+ ulong2 = va_arg(ap, unsigned long);
82439+ gr_log_middle_varargs(audit, msg, file ? gr_to_filename(file->f_path.dentry, file->f_path.mnt) : "<anonymous mapping>", ulong1, ulong2);
82440+ break;
82441+ case GR_PTRACE:
82442+ task = va_arg(ap, struct task_struct *);
82443+ gr_log_middle_varargs(audit, msg, task->exec_file ? gr_to_filename(task->exec_file->f_path.dentry, task->exec_file->f_path.mnt) : "(none)", task->comm, task->pid);
82444+ break;
82445+ case GR_RESOURCE:
82446+ task = va_arg(ap, struct task_struct *);
82447+ cred = __task_cred(task);
82448+ pcred = __task_cred(task->real_parent);
82449+ ulong1 = va_arg(ap, unsigned long);
82450+ str1 = va_arg(ap, char *);
82451+ ulong2 = va_arg(ap, unsigned long);
82452+ gr_log_middle_varargs(audit, msg, ulong1, str1, ulong2, gr_task_fullpath(task), task->comm, task->pid, cred->uid, cred->euid, cred->gid, cred->egid, gr_parent_task_fullpath(task), task->real_parent->comm, task->real_parent->pid, pcred->uid, pcred->euid, pcred->gid, pcred->egid);
82453+ break;
82454+ case GR_CAP:
82455+ task = va_arg(ap, struct task_struct *);
82456+ cred = __task_cred(task);
82457+ pcred = __task_cred(task->real_parent);
82458+ str1 = va_arg(ap, char *);
82459+ gr_log_middle_varargs(audit, msg, str1, gr_task_fullpath(task), task->comm, task->pid, cred->uid, cred->euid, cred->gid, cred->egid, gr_parent_task_fullpath(task), task->real_parent->comm, task->real_parent->pid, pcred->uid, pcred->euid, pcred->gid, pcred->egid);
82460+ break;
82461+ case GR_SIG:
82462+ str1 = va_arg(ap, char *);
82463+ voidptr = va_arg(ap, void *);
82464+ gr_log_middle_varargs(audit, msg, str1, voidptr);
82465+ break;
82466+ case GR_SIG2:
82467+ task = va_arg(ap, struct task_struct *);
82468+ cred = __task_cred(task);
82469+ pcred = __task_cred(task->real_parent);
82470+ num1 = va_arg(ap, int);
82471+ gr_log_middle_varargs(audit, msg, num1, gr_task_fullpath0(task), task->comm, task->pid, cred->uid, cred->euid, cred->gid, cred->egid, gr_parent_task_fullpath0(task), task->real_parent->comm, task->real_parent->pid, pcred->uid, pcred->euid, pcred->gid, pcred->egid);
82472+ break;
82473+ case GR_CRASH1:
82474+ task = va_arg(ap, struct task_struct *);
82475+ cred = __task_cred(task);
82476+ pcred = __task_cred(task->real_parent);
82477+ ulong1 = va_arg(ap, unsigned long);
82478+ gr_log_middle_varargs(audit, msg, gr_task_fullpath(task), task->comm, task->pid, cred->uid, cred->euid, cred->gid, cred->egid, gr_parent_task_fullpath(task), task->real_parent->comm, task->real_parent->pid, pcred->uid, pcred->euid, pcred->gid, pcred->egid, cred->uid, ulong1);
82479+ break;
82480+ case GR_CRASH2:
82481+ task = va_arg(ap, struct task_struct *);
82482+ cred = __task_cred(task);
82483+ pcred = __task_cred(task->real_parent);
82484+ ulong1 = va_arg(ap, unsigned long);
82485+ gr_log_middle_varargs(audit, msg, gr_task_fullpath(task), task->comm, task->pid, cred->uid, cred->euid, cred->gid, cred->egid, gr_parent_task_fullpath(task), task->real_parent->comm, task->real_parent->pid, pcred->uid, pcred->euid, pcred->gid, pcred->egid, ulong1);
82486+ break;
82487+ case GR_RWXMAP:
82488+ file = va_arg(ap, struct file *);
82489+ gr_log_middle_varargs(audit, msg, file ? gr_to_filename(file->f_path.dentry, file->f_path.mnt) : "<anonymous mapping>");
82490+ break;
82491+ case GR_PSACCT:
82492+ {
82493+ unsigned int wday, cday;
82494+ __u8 whr, chr;
82495+ __u8 wmin, cmin;
82496+ __u8 wsec, csec;
82497+ char cur_tty[64] = { 0 };
82498+ char parent_tty[64] = { 0 };
82499+
82500+ task = va_arg(ap, struct task_struct *);
82501+ wday = va_arg(ap, unsigned int);
82502+ cday = va_arg(ap, unsigned int);
82503+ whr = va_arg(ap, int);
82504+ chr = va_arg(ap, int);
82505+ wmin = va_arg(ap, int);
82506+ cmin = va_arg(ap, int);
82507+ wsec = va_arg(ap, int);
82508+ csec = va_arg(ap, int);
82509+ ulong1 = va_arg(ap, unsigned long);
82510+ cred = __task_cred(task);
82511+ pcred = __task_cred(task->real_parent);
82512+
82513+ gr_log_middle_varargs(audit, msg, gr_task_fullpath(task), task->comm, task->pid, &task->signal->curr_ip, tty_name(task->signal->tty, cur_tty), cred->uid, cred->euid, cred->gid, cred->egid, wday, whr, wmin, wsec, cday, chr, cmin, csec, (task->flags & PF_SIGNALED) ? "killed by signal" : "exited", ulong1, gr_parent_task_fullpath(task), task->real_parent->comm, task->real_parent->pid, &task->real_parent->signal->curr_ip, tty_name(task->real_parent->signal->tty, parent_tty), pcred->uid, pcred->euid, pcred->gid, pcred->egid);
82514+ }
82515+ break;
82516+ default:
82517+ gr_log_middle(audit, msg, ap);
82518+ }
82519+ va_end(ap);
82520+ // these don't need DEFAULTSECARGS printed on the end
82521+ if (argtypes == GR_CRASH1 || argtypes == GR_CRASH2)
82522+ gr_log_end(audit, 0);
82523+ else
82524+ gr_log_end(audit, 1);
82525+ END_LOCKS(audit);
82526+}
82527diff --git a/grsecurity/grsec_mem.c b/grsecurity/grsec_mem.c
82528new file mode 100644
82529index 0000000..f536303
82530--- /dev/null
82531+++ b/grsecurity/grsec_mem.c
82532@@ -0,0 +1,40 @@
82533+#include <linux/kernel.h>
82534+#include <linux/sched.h>
82535+#include <linux/mm.h>
82536+#include <linux/mman.h>
82537+#include <linux/grinternal.h>
82538+
82539+void
82540+gr_handle_ioperm(void)
82541+{
82542+ gr_log_noargs(GR_DONT_AUDIT, GR_IOPERM_MSG);
82543+ return;
82544+}
82545+
82546+void
82547+gr_handle_iopl(void)
82548+{
82549+ gr_log_noargs(GR_DONT_AUDIT, GR_IOPL_MSG);
82550+ return;
82551+}
82552+
82553+void
82554+gr_handle_mem_readwrite(u64 from, u64 to)
82555+{
82556+ gr_log_two_u64(GR_DONT_AUDIT, GR_MEM_READWRITE_MSG, from, to);
82557+ return;
82558+}
82559+
82560+void
82561+gr_handle_vm86(void)
82562+{
82563+ gr_log_noargs(GR_DONT_AUDIT, GR_VM86_MSG);
82564+ return;
82565+}
82566+
82567+void
82568+gr_log_badprocpid(const char *entry)
82569+{
82570+ gr_log_str(GR_DONT_AUDIT, GR_BADPROCPID_MSG, entry);
82571+ return;
82572+}
82573diff --git a/grsecurity/grsec_mount.c b/grsecurity/grsec_mount.c
82574new file mode 100644
82575index 0000000..2131422
82576--- /dev/null
82577+++ b/grsecurity/grsec_mount.c
82578@@ -0,0 +1,62 @@
82579+#include <linux/kernel.h>
82580+#include <linux/sched.h>
82581+#include <linux/mount.h>
82582+#include <linux/grsecurity.h>
82583+#include <linux/grinternal.h>
82584+
82585+void
82586+gr_log_remount(const char *devname, const int retval)
82587+{
82588+#ifdef CONFIG_GRKERNSEC_AUDIT_MOUNT
82589+ if (grsec_enable_mount && (retval >= 0))
82590+ gr_log_str(GR_DO_AUDIT, GR_REMOUNT_AUDIT_MSG, devname ? devname : "none");
82591+#endif
82592+ return;
82593+}
82594+
82595+void
82596+gr_log_unmount(const char *devname, const int retval)
82597+{
82598+#ifdef CONFIG_GRKERNSEC_AUDIT_MOUNT
82599+ if (grsec_enable_mount && (retval >= 0))
82600+ gr_log_str(GR_DO_AUDIT, GR_UNMOUNT_AUDIT_MSG, devname ? devname : "none");
82601+#endif
82602+ return;
82603+}
82604+
82605+void
82606+gr_log_mount(const char *from, const char *to, const int retval)
82607+{
82608+#ifdef CONFIG_GRKERNSEC_AUDIT_MOUNT
82609+ if (grsec_enable_mount && (retval >= 0))
82610+ gr_log_str_str(GR_DO_AUDIT, GR_MOUNT_AUDIT_MSG, from ? from : "none", to);
82611+#endif
82612+ return;
82613+}
82614+
82615+int
82616+gr_handle_rofs_mount(struct dentry *dentry, struct vfsmount *mnt, int mnt_flags)
82617+{
82618+#ifdef CONFIG_GRKERNSEC_ROFS
82619+ if (grsec_enable_rofs && !(mnt_flags & MNT_READONLY)) {
82620+ gr_log_fs_generic(GR_DO_AUDIT, GR_ROFS_MOUNT_MSG, dentry, mnt);
82621+ return -EPERM;
82622+ } else
82623+ return 0;
82624+#endif
82625+ return 0;
82626+}
82627+
82628+int
82629+gr_handle_rofs_blockwrite(struct dentry *dentry, struct vfsmount *mnt, int acc_mode)
82630+{
82631+#ifdef CONFIG_GRKERNSEC_ROFS
82632+ if (grsec_enable_rofs && (acc_mode & MAY_WRITE) &&
82633+ dentry->d_inode && S_ISBLK(dentry->d_inode->i_mode)) {
82634+ gr_log_fs_generic(GR_DO_AUDIT, GR_ROFS_BLOCKWRITE_MSG, dentry, mnt);
82635+ return -EPERM;
82636+ } else
82637+ return 0;
82638+#endif
82639+ return 0;
82640+}
82641diff --git a/grsecurity/grsec_pax.c b/grsecurity/grsec_pax.c
82642new file mode 100644
82643index 0000000..a3b12a0
82644--- /dev/null
82645+++ b/grsecurity/grsec_pax.c
82646@@ -0,0 +1,36 @@
82647+#include <linux/kernel.h>
82648+#include <linux/sched.h>
82649+#include <linux/mm.h>
82650+#include <linux/file.h>
82651+#include <linux/grinternal.h>
82652+#include <linux/grsecurity.h>
82653+
82654+void
82655+gr_log_textrel(struct vm_area_struct * vma)
82656+{
82657+#ifdef CONFIG_GRKERNSEC_AUDIT_TEXTREL
82658+ if (grsec_enable_audit_textrel)
82659+ gr_log_textrel_ulong_ulong(GR_DO_AUDIT, GR_TEXTREL_AUDIT_MSG, vma->vm_file, vma->vm_start, vma->vm_pgoff);
82660+#endif
82661+ return;
82662+}
82663+
82664+void
82665+gr_log_rwxmmap(struct file *file)
82666+{
82667+#ifdef CONFIG_GRKERNSEC_RWXMAP_LOG
82668+ if (grsec_enable_log_rwxmaps)
82669+ gr_log_rwxmap(GR_DONT_AUDIT, GR_RWXMMAP_MSG, file);
82670+#endif
82671+ return;
82672+}
82673+
82674+void
82675+gr_log_rwxmprotect(struct file *file)
82676+{
82677+#ifdef CONFIG_GRKERNSEC_RWXMAP_LOG
82678+ if (grsec_enable_log_rwxmaps)
82679+ gr_log_rwxmap(GR_DONT_AUDIT, GR_RWXMPROTECT_MSG, file);
82680+#endif
82681+ return;
82682+}
82683diff --git a/grsecurity/grsec_ptrace.c b/grsecurity/grsec_ptrace.c
82684new file mode 100644
82685index 0000000..78f8733
82686--- /dev/null
82687+++ b/grsecurity/grsec_ptrace.c
82688@@ -0,0 +1,30 @@
82689+#include <linux/kernel.h>
82690+#include <linux/sched.h>
82691+#include <linux/grinternal.h>
82692+#include <linux/security.h>
82693+
82694+void
82695+gr_audit_ptrace(struct task_struct *task)
82696+{
82697+#ifdef CONFIG_GRKERNSEC_AUDIT_PTRACE
82698+ if (grsec_enable_audit_ptrace)
82699+ gr_log_ptrace(GR_DO_AUDIT, GR_PTRACE_AUDIT_MSG, task);
82700+#endif
82701+ return;
82702+}
82703+
82704+int
82705+gr_ptrace_readexec(struct file *file, int unsafe_flags)
82706+{
82707+#ifdef CONFIG_GRKERNSEC_PTRACE_READEXEC
82708+ const struct dentry *dentry = file->f_path.dentry;
82709+ const struct vfsmount *mnt = file->f_path.mnt;
82710+
82711+ if (grsec_enable_ptrace_readexec && (unsafe_flags & LSM_UNSAFE_PTRACE) &&
82712+ (inode_permission(dentry->d_inode, MAY_READ) || !gr_acl_handle_open(dentry, mnt, MAY_READ))) {
82713+ gr_log_fs_generic(GR_DONT_AUDIT, GR_PTRACE_READEXEC_MSG, dentry, mnt);
82714+ return -EACCES;
82715+ }
82716+#endif
82717+ return 0;
82718+}
82719diff --git a/grsecurity/grsec_sig.c b/grsecurity/grsec_sig.c
82720new file mode 100644
82721index 0000000..c648492
82722--- /dev/null
82723+++ b/grsecurity/grsec_sig.c
82724@@ -0,0 +1,206 @@
82725+#include <linux/kernel.h>
82726+#include <linux/sched.h>
82727+#include <linux/delay.h>
82728+#include <linux/grsecurity.h>
82729+#include <linux/grinternal.h>
82730+#include <linux/hardirq.h>
82731+
82732+char *signames[] = {
82733+ [SIGSEGV] = "Segmentation fault",
82734+ [SIGILL] = "Illegal instruction",
82735+ [SIGABRT] = "Abort",
82736+ [SIGBUS] = "Invalid alignment/Bus error"
82737+};
82738+
82739+void
82740+gr_log_signal(const int sig, const void *addr, const struct task_struct *t)
82741+{
82742+#ifdef CONFIG_GRKERNSEC_SIGNAL
82743+ if (grsec_enable_signal && ((sig == SIGSEGV) || (sig == SIGILL) ||
82744+ (sig == SIGABRT) || (sig == SIGBUS))) {
82745+ if (t->pid == current->pid) {
82746+ gr_log_sig_addr(GR_DONT_AUDIT_GOOD, GR_UNISIGLOG_MSG, signames[sig], addr);
82747+ } else {
82748+ gr_log_sig_task(GR_DONT_AUDIT_GOOD, GR_DUALSIGLOG_MSG, t, sig);
82749+ }
82750+ }
82751+#endif
82752+ return;
82753+}
82754+
82755+int
82756+gr_handle_signal(const struct task_struct *p, const int sig)
82757+{
82758+#ifdef CONFIG_GRKERNSEC
82759+ /* ignore the 0 signal for protected task checks */
82760+ if (current->pid > 1 && sig && gr_check_protected_task(p)) {
82761+ gr_log_sig_task(GR_DONT_AUDIT, GR_SIG_ACL_MSG, p, sig);
82762+ return -EPERM;
82763+ } else if (gr_pid_is_chrooted((struct task_struct *)p)) {
82764+ return -EPERM;
82765+ }
82766+#endif
82767+ return 0;
82768+}
82769+
82770+#ifdef CONFIG_GRKERNSEC
82771+extern int specific_send_sig_info(int sig, struct siginfo *info, struct task_struct *t);
82772+
82773+int gr_fake_force_sig(int sig, struct task_struct *t)
82774+{
82775+ unsigned long int flags;
82776+ int ret, blocked, ignored;
82777+ struct k_sigaction *action;
82778+
82779+ spin_lock_irqsave(&t->sighand->siglock, flags);
82780+ action = &t->sighand->action[sig-1];
82781+ ignored = action->sa.sa_handler == SIG_IGN;
82782+ blocked = sigismember(&t->blocked, sig);
82783+ if (blocked || ignored) {
82784+ action->sa.sa_handler = SIG_DFL;
82785+ if (blocked) {
82786+ sigdelset(&t->blocked, sig);
82787+ recalc_sigpending_and_wake(t);
82788+ }
82789+ }
82790+ if (action->sa.sa_handler == SIG_DFL)
82791+ t->signal->flags &= ~SIGNAL_UNKILLABLE;
82792+ ret = specific_send_sig_info(sig, SEND_SIG_PRIV, t);
82793+
82794+ spin_unlock_irqrestore(&t->sighand->siglock, flags);
82795+
82796+ return ret;
82797+}
82798+#endif
82799+
82800+#ifdef CONFIG_GRKERNSEC_BRUTE
82801+#define GR_USER_BAN_TIME (15 * 60)
82802+
82803+static int __get_dumpable(unsigned long mm_flags)
82804+{
82805+ int ret;
82806+
82807+ ret = mm_flags & MMF_DUMPABLE_MASK;
82808+ return (ret >= 2) ? 2 : ret;
82809+}
82810+#endif
82811+
82812+void gr_handle_brute_attach(struct task_struct *p, unsigned long mm_flags)
82813+{
82814+#ifdef CONFIG_GRKERNSEC_BRUTE
82815+ uid_t uid = 0;
82816+
82817+ if (!grsec_enable_brute)
82818+ return;
82819+
82820+ rcu_read_lock();
82821+ read_lock(&tasklist_lock);
82822+ read_lock(&grsec_exec_file_lock);
82823+ if (p->real_parent && p->real_parent->exec_file == p->exec_file)
82824+ p->real_parent->brute = 1;
82825+ else {
82826+ const struct cred *cred = __task_cred(p), *cred2;
82827+ struct task_struct *tsk, *tsk2;
82828+
82829+ if (!__get_dumpable(mm_flags) && cred->uid) {
82830+ struct user_struct *user;
82831+
82832+ uid = cred->uid;
82833+
82834+ /* this is put upon execution past expiration */
82835+ user = find_user(uid);
82836+ if (user == NULL)
82837+ goto unlock;
82838+ user->banned = 1;
82839+ user->ban_expires = get_seconds() + GR_USER_BAN_TIME;
82840+ if (user->ban_expires == ~0UL)
82841+ user->ban_expires--;
82842+
82843+ do_each_thread(tsk2, tsk) {
82844+ cred2 = __task_cred(tsk);
82845+ if (tsk != p && cred2->uid == uid)
82846+ gr_fake_force_sig(SIGKILL, tsk);
82847+ } while_each_thread(tsk2, tsk);
82848+ }
82849+ }
82850+unlock:
82851+ read_unlock(&grsec_exec_file_lock);
82852+ read_unlock(&tasklist_lock);
82853+ rcu_read_unlock();
82854+
82855+ if (uid)
82856+ printk(KERN_ALERT "grsec: bruteforce prevention initiated against uid %u, banning for %d minutes\n", uid, GR_USER_BAN_TIME / 60);
82857+#endif
82858+ return;
82859+}
82860+
82861+void gr_handle_brute_check(void)
82862+{
82863+#ifdef CONFIG_GRKERNSEC_BRUTE
82864+ if (current->brute)
82865+ msleep(30 * 1000);
82866+#endif
82867+ return;
82868+}
82869+
82870+void gr_handle_kernel_exploit(void)
82871+{
82872+#ifdef CONFIG_GRKERNSEC_KERN_LOCKOUT
82873+ const struct cred *cred;
82874+ struct task_struct *tsk, *tsk2;
82875+ struct user_struct *user;
82876+ uid_t uid;
82877+
82878+ if (in_irq() || in_serving_softirq() || in_nmi())
82879+ panic("grsec: halting the system due to suspicious kernel crash caused in interrupt context");
82880+
82881+ uid = current_uid();
82882+
82883+ if (uid == 0)
82884+ panic("grsec: halting the system due to suspicious kernel crash caused by root");
82885+ else {
82886+ /* kill all the processes of this user, hold a reference
82887+ to their creds struct, and prevent them from creating
82888+ another process until system reset
82889+ */
82890+ printk(KERN_ALERT "grsec: banning user with uid %u until system restart for suspicious kernel crash\n", uid);
82891+ /* we intentionally leak this ref */
82892+ user = get_uid(current->cred->user);
82893+ if (user) {
82894+ user->banned = 1;
82895+ user->ban_expires = ~0UL;
82896+ }
82897+
82898+ read_lock(&tasklist_lock);
82899+ do_each_thread(tsk2, tsk) {
82900+ cred = __task_cred(tsk);
82901+ if (cred->uid == uid)
82902+ gr_fake_force_sig(SIGKILL, tsk);
82903+ } while_each_thread(tsk2, tsk);
82904+ read_unlock(&tasklist_lock);
82905+ }
82906+#endif
82907+}
82908+
82909+int __gr_process_user_ban(struct user_struct *user)
82910+{
82911+#if defined(CONFIG_GRKERNSEC_KERN_LOCKOUT) || defined(CONFIG_GRKERNSEC_BRUTE)
82912+ if (unlikely(user->banned)) {
82913+ if (user->ban_expires != ~0UL && time_after_eq(get_seconds(), user->ban_expires)) {
82914+ user->banned = 0;
82915+ user->ban_expires = 0;
82916+ free_uid(user);
82917+ } else
82918+ return -EPERM;
82919+ }
82920+#endif
82921+ return 0;
82922+}
82923+
82924+int gr_process_user_ban(void)
82925+{
82926+#if defined(CONFIG_GRKERNSEC_KERN_LOCKOUT) || defined(CONFIG_GRKERNSEC_BRUTE)
82927+ return __gr_process_user_ban(current->cred->user);
82928+#endif
82929+ return 0;
82930+}
82931diff --git a/grsecurity/grsec_sock.c b/grsecurity/grsec_sock.c
82932new file mode 100644
82933index 0000000..7512ea9
82934--- /dev/null
82935+++ b/grsecurity/grsec_sock.c
82936@@ -0,0 +1,275 @@
82937+#include <linux/kernel.h>
82938+#include <linux/module.h>
82939+#include <linux/sched.h>
82940+#include <linux/file.h>
82941+#include <linux/net.h>
82942+#include <linux/in.h>
82943+#include <linux/ip.h>
82944+#include <net/sock.h>
82945+#include <net/inet_sock.h>
82946+#include <linux/grsecurity.h>
82947+#include <linux/grinternal.h>
82948+#include <linux/gracl.h>
82949+
82950+kernel_cap_t gr_cap_rtnetlink(struct sock *sock);
82951+EXPORT_SYMBOL(gr_cap_rtnetlink);
82952+
82953+extern int gr_search_udp_recvmsg(const struct sock *sk, const struct sk_buff *skb);
82954+extern int gr_search_udp_sendmsg(const struct sock *sk, const struct sockaddr_in *addr);
82955+
82956+EXPORT_SYMBOL(gr_search_udp_recvmsg);
82957+EXPORT_SYMBOL(gr_search_udp_sendmsg);
82958+
82959+#ifdef CONFIG_UNIX_MODULE
82960+EXPORT_SYMBOL(gr_acl_handle_unix);
82961+EXPORT_SYMBOL(gr_acl_handle_mknod);
82962+EXPORT_SYMBOL(gr_handle_chroot_unix);
82963+EXPORT_SYMBOL(gr_handle_create);
82964+#endif
82965+
82966+#ifdef CONFIG_GRKERNSEC
82967+#define gr_conn_table_size 32749
82968+struct conn_table_entry {
82969+ struct conn_table_entry *next;
82970+ struct signal_struct *sig;
82971+};
82972+
82973+struct conn_table_entry *gr_conn_table[gr_conn_table_size];
82974+DEFINE_SPINLOCK(gr_conn_table_lock);
82975+
82976+extern const char * gr_socktype_to_name(unsigned char type);
82977+extern const char * gr_proto_to_name(unsigned char proto);
82978+extern const char * gr_sockfamily_to_name(unsigned char family);
82979+
82980+static __inline__ int
82981+conn_hash(__u32 saddr, __u32 daddr, __u16 sport, __u16 dport, unsigned int size)
82982+{
82983+ return ((daddr + saddr + (sport << 8) + (dport << 16)) % size);
82984+}
82985+
82986+static __inline__ int
82987+conn_match(const struct signal_struct *sig, __u32 saddr, __u32 daddr,
82988+ __u16 sport, __u16 dport)
82989+{
82990+ if (unlikely(sig->gr_saddr == saddr && sig->gr_daddr == daddr &&
82991+ sig->gr_sport == sport && sig->gr_dport == dport))
82992+ return 1;
82993+ else
82994+ return 0;
82995+}
82996+
82997+static void gr_add_to_task_ip_table_nolock(struct signal_struct *sig, struct conn_table_entry *newent)
82998+{
82999+ struct conn_table_entry **match;
83000+ unsigned int index;
83001+
83002+ index = conn_hash(sig->gr_saddr, sig->gr_daddr,
83003+ sig->gr_sport, sig->gr_dport,
83004+ gr_conn_table_size);
83005+
83006+ newent->sig = sig;
83007+
83008+ match = &gr_conn_table[index];
83009+ newent->next = *match;
83010+ *match = newent;
83011+
83012+ return;
83013+}
83014+
83015+static void gr_del_task_from_ip_table_nolock(struct signal_struct *sig)
83016+{
83017+ struct conn_table_entry *match, *last = NULL;
83018+ unsigned int index;
83019+
83020+ index = conn_hash(sig->gr_saddr, sig->gr_daddr,
83021+ sig->gr_sport, sig->gr_dport,
83022+ gr_conn_table_size);
83023+
83024+ match = gr_conn_table[index];
83025+ while (match && !conn_match(match->sig,
83026+ sig->gr_saddr, sig->gr_daddr, sig->gr_sport,
83027+ sig->gr_dport)) {
83028+ last = match;
83029+ match = match->next;
83030+ }
83031+
83032+ if (match) {
83033+ if (last)
83034+ last->next = match->next;
83035+ else
83036+ gr_conn_table[index] = NULL;
83037+ kfree(match);
83038+ }
83039+
83040+ return;
83041+}
83042+
83043+static struct signal_struct * gr_lookup_task_ip_table(__u32 saddr, __u32 daddr,
83044+ __u16 sport, __u16 dport)
83045+{
83046+ struct conn_table_entry *match;
83047+ unsigned int index;
83048+
83049+ index = conn_hash(saddr, daddr, sport, dport, gr_conn_table_size);
83050+
83051+ match = gr_conn_table[index];
83052+ while (match && !conn_match(match->sig, saddr, daddr, sport, dport))
83053+ match = match->next;
83054+
83055+ if (match)
83056+ return match->sig;
83057+ else
83058+ return NULL;
83059+}
83060+
83061+#endif
83062+
83063+void gr_update_task_in_ip_table(struct task_struct *task, const struct inet_sock *inet)
83064+{
83065+#ifdef CONFIG_GRKERNSEC
83066+ struct signal_struct *sig = task->signal;
83067+ struct conn_table_entry *newent;
83068+
83069+ newent = kmalloc(sizeof(struct conn_table_entry), GFP_ATOMIC);
83070+ if (newent == NULL)
83071+ return;
83072+ /* no bh lock needed since we are called with bh disabled */
83073+ spin_lock(&gr_conn_table_lock);
83074+ gr_del_task_from_ip_table_nolock(sig);
83075+ sig->gr_saddr = inet->rcv_saddr;
83076+ sig->gr_daddr = inet->daddr;
83077+ sig->gr_sport = inet->sport;
83078+ sig->gr_dport = inet->dport;
83079+ gr_add_to_task_ip_table_nolock(sig, newent);
83080+ spin_unlock(&gr_conn_table_lock);
83081+#endif
83082+ return;
83083+}
83084+
83085+void gr_del_task_from_ip_table(struct task_struct *task)
83086+{
83087+#ifdef CONFIG_GRKERNSEC
83088+ spin_lock_bh(&gr_conn_table_lock);
83089+ gr_del_task_from_ip_table_nolock(task->signal);
83090+ spin_unlock_bh(&gr_conn_table_lock);
83091+#endif
83092+ return;
83093+}
83094+
83095+void
83096+gr_attach_curr_ip(const struct sock *sk)
83097+{
83098+#ifdef CONFIG_GRKERNSEC
83099+ struct signal_struct *p, *set;
83100+ const struct inet_sock *inet = inet_sk(sk);
83101+
83102+ if (unlikely(sk->sk_protocol != IPPROTO_TCP))
83103+ return;
83104+
83105+ set = current->signal;
83106+
83107+ spin_lock_bh(&gr_conn_table_lock);
83108+ p = gr_lookup_task_ip_table(inet->daddr, inet->rcv_saddr,
83109+ inet->dport, inet->sport);
83110+ if (unlikely(p != NULL)) {
83111+ set->curr_ip = p->curr_ip;
83112+ set->used_accept = 1;
83113+ gr_del_task_from_ip_table_nolock(p);
83114+ spin_unlock_bh(&gr_conn_table_lock);
83115+ return;
83116+ }
83117+ spin_unlock_bh(&gr_conn_table_lock);
83118+
83119+ set->curr_ip = inet->daddr;
83120+ set->used_accept = 1;
83121+#endif
83122+ return;
83123+}
83124+
83125+int
83126+gr_handle_sock_all(const int family, const int type, const int protocol)
83127+{
83128+#ifdef CONFIG_GRKERNSEC_SOCKET_ALL
83129+ if (grsec_enable_socket_all && in_group_p(grsec_socket_all_gid) &&
83130+ (family != AF_UNIX)) {
83131+ if (family == AF_INET)
83132+ gr_log_str3(GR_DONT_AUDIT, GR_SOCK_MSG, gr_sockfamily_to_name(family), gr_socktype_to_name(type), gr_proto_to_name(protocol));
83133+ else
83134+ gr_log_str2_int(GR_DONT_AUDIT, GR_SOCK_NOINET_MSG, gr_sockfamily_to_name(family), gr_socktype_to_name(type), protocol);
83135+ return -EACCES;
83136+ }
83137+#endif
83138+ return 0;
83139+}
83140+
83141+int
83142+gr_handle_sock_server(const struct sockaddr *sck)
83143+{
83144+#ifdef CONFIG_GRKERNSEC_SOCKET_SERVER
83145+ if (grsec_enable_socket_server &&
83146+ in_group_p(grsec_socket_server_gid) &&
83147+ sck && (sck->sa_family != AF_UNIX) &&
83148+ (sck->sa_family != AF_LOCAL)) {
83149+ gr_log_noargs(GR_DONT_AUDIT, GR_BIND_MSG);
83150+ return -EACCES;
83151+ }
83152+#endif
83153+ return 0;
83154+}
83155+
83156+int
83157+gr_handle_sock_server_other(const struct sock *sck)
83158+{
83159+#ifdef CONFIG_GRKERNSEC_SOCKET_SERVER
83160+ if (grsec_enable_socket_server &&
83161+ in_group_p(grsec_socket_server_gid) &&
83162+ sck && (sck->sk_family != AF_UNIX) &&
83163+ (sck->sk_family != AF_LOCAL)) {
83164+ gr_log_noargs(GR_DONT_AUDIT, GR_BIND_MSG);
83165+ return -EACCES;
83166+ }
83167+#endif
83168+ return 0;
83169+}
83170+
83171+int
83172+gr_handle_sock_client(const struct sockaddr *sck)
83173+{
83174+#ifdef CONFIG_GRKERNSEC_SOCKET_CLIENT
83175+ if (grsec_enable_socket_client && in_group_p(grsec_socket_client_gid) &&
83176+ sck && (sck->sa_family != AF_UNIX) &&
83177+ (sck->sa_family != AF_LOCAL)) {
83178+ gr_log_noargs(GR_DONT_AUDIT, GR_CONNECT_MSG);
83179+ return -EACCES;
83180+ }
83181+#endif
83182+ return 0;
83183+}
83184+
83185+kernel_cap_t
83186+gr_cap_rtnetlink(struct sock *sock)
83187+{
83188+#ifdef CONFIG_GRKERNSEC
83189+ if (!gr_acl_is_enabled())
83190+ return current_cap();
83191+ else if (sock->sk_protocol == NETLINK_ISCSI &&
83192+ cap_raised(current_cap(), CAP_SYS_ADMIN) &&
83193+ gr_is_capable(CAP_SYS_ADMIN))
83194+ return current_cap();
83195+ else if (sock->sk_protocol == NETLINK_AUDIT &&
83196+ cap_raised(current_cap(), CAP_AUDIT_WRITE) &&
83197+ gr_is_capable(CAP_AUDIT_WRITE) &&
83198+ cap_raised(current_cap(), CAP_AUDIT_CONTROL) &&
83199+ gr_is_capable(CAP_AUDIT_CONTROL))
83200+ return current_cap();
83201+ else if (cap_raised(current_cap(), CAP_NET_ADMIN) &&
83202+ ((sock->sk_protocol == NETLINK_ROUTE) ?
83203+ gr_is_capable_nolog(CAP_NET_ADMIN) :
83204+ gr_is_capable(CAP_NET_ADMIN)))
83205+ return current_cap();
83206+ else
83207+ return __cap_empty_set;
83208+#else
83209+ return current_cap();
83210+#endif
83211+}
83212diff --git a/grsecurity/grsec_sysctl.c b/grsecurity/grsec_sysctl.c
83213new file mode 100644
83214index 0000000..31f3258
83215--- /dev/null
83216+++ b/grsecurity/grsec_sysctl.c
83217@@ -0,0 +1,499 @@
83218+#include <linux/kernel.h>
83219+#include <linux/sched.h>
83220+#include <linux/sysctl.h>
83221+#include <linux/grsecurity.h>
83222+#include <linux/grinternal.h>
83223+
83224+int
83225+gr_handle_sysctl_mod(const char *dirname, const char *name, const int op)
83226+{
83227+#ifdef CONFIG_GRKERNSEC_SYSCTL
83228+ if (!strcmp(dirname, "grsecurity") && grsec_lock && (op & MAY_WRITE)) {
83229+ gr_log_str(GR_DONT_AUDIT, GR_SYSCTL_MSG, name);
83230+ return -EACCES;
83231+ }
83232+#endif
83233+ return 0;
83234+}
83235+
83236+#ifdef CONFIG_GRKERNSEC_ROFS
83237+static int __maybe_unused one = 1;
83238+#endif
83239+
83240+#if defined(CONFIG_GRKERNSEC_SYSCTL) || defined(CONFIG_GRKERNSEC_ROFS)
83241+ctl_table grsecurity_table[] = {
83242+#ifdef CONFIG_GRKERNSEC_SYSCTL
83243+#ifdef CONFIG_GRKERNSEC_SYSCTL_DISTRO
83244+#ifdef CONFIG_GRKERNSEC_IO
83245+ {
83246+ .ctl_name = CTL_UNNUMBERED,
83247+ .procname = "disable_priv_io",
83248+ .data = &grsec_disable_privio,
83249+ .maxlen = sizeof(int),
83250+ .mode = 0600,
83251+ .proc_handler = &proc_dointvec,
83252+ },
83253+#endif
83254+#endif
83255+#ifdef CONFIG_GRKERNSEC_LINK
83256+ {
83257+ .ctl_name = CTL_UNNUMBERED,
83258+ .procname = "linking_restrictions",
83259+ .data = &grsec_enable_link,
83260+ .maxlen = sizeof(int),
83261+ .mode = 0600,
83262+ .proc_handler = &proc_dointvec,
83263+ },
83264+#endif
83265+#ifdef CONFIG_GRKERNSEC_BRUTE
83266+ {
83267+ .ctl_name = CTL_UNNUMBERED,
83268+ .procname = "deter_bruteforce",
83269+ .data = &grsec_enable_brute,
83270+ .maxlen = sizeof(int),
83271+ .mode = 0600,
83272+ .proc_handler = &proc_dointvec,
83273+ },
83274+#endif
83275+#ifdef CONFIG_GRKERNSEC_FIFO
83276+ {
83277+ .ctl_name = CTL_UNNUMBERED,
83278+ .procname = "fifo_restrictions",
83279+ .data = &grsec_enable_fifo,
83280+ .maxlen = sizeof(int),
83281+ .mode = 0600,
83282+ .proc_handler = &proc_dointvec,
83283+ },
83284+#endif
83285+#ifdef CONFIG_GRKERNSEC_PTRACE_READEXEC
83286+ {
83287+ .ctl_name = CTL_UNNUMBERED,
83288+ .procname = "ptrace_readexec",
83289+ .data = &grsec_enable_ptrace_readexec,
83290+ .maxlen = sizeof(int),
83291+ .mode = 0600,
83292+ .proc_handler = &proc_dointvec,
83293+ },
83294+#endif
83295+#ifdef CONFIG_GRKERNSEC_SETXID
83296+ {
83297+ .ctl_name = CTL_UNNUMBERED,
83298+ .procname = "consistent_setxid",
83299+ .data = &grsec_enable_setxid,
83300+ .maxlen = sizeof(int),
83301+ .mode = 0600,
83302+ .proc_handler = &proc_dointvec,
83303+ },
83304+#endif
83305+#ifdef CONFIG_GRKERNSEC_BLACKHOLE
83306+ {
83307+ .ctl_name = CTL_UNNUMBERED,
83308+ .procname = "ip_blackhole",
83309+ .data = &grsec_enable_blackhole,
83310+ .maxlen = sizeof(int),
83311+ .mode = 0600,
83312+ .proc_handler = &proc_dointvec,
83313+ },
83314+ {
83315+ .ctl_name = CTL_UNNUMBERED,
83316+ .procname = "lastack_retries",
83317+ .data = &grsec_lastack_retries,
83318+ .maxlen = sizeof(int),
83319+ .mode = 0600,
83320+ .proc_handler = &proc_dointvec,
83321+ },
83322+#endif
83323+#ifdef CONFIG_GRKERNSEC_EXECLOG
83324+ {
83325+ .ctl_name = CTL_UNNUMBERED,
83326+ .procname = "exec_logging",
83327+ .data = &grsec_enable_execlog,
83328+ .maxlen = sizeof(int),
83329+ .mode = 0600,
83330+ .proc_handler = &proc_dointvec,
83331+ },
83332+#endif
83333+#ifdef CONFIG_GRKERNSEC_RWXMAP_LOG
83334+ {
83335+ .ctl_name = CTL_UNNUMBERED,
83336+ .procname = "rwxmap_logging",
83337+ .data = &grsec_enable_log_rwxmaps,
83338+ .maxlen = sizeof(int),
83339+ .mode = 0600,
83340+ .proc_handler = &proc_dointvec,
83341+ },
83342+#endif
83343+#ifdef CONFIG_GRKERNSEC_SIGNAL
83344+ {
83345+ .ctl_name = CTL_UNNUMBERED,
83346+ .procname = "signal_logging",
83347+ .data = &grsec_enable_signal,
83348+ .maxlen = sizeof(int),
83349+ .mode = 0600,
83350+ .proc_handler = &proc_dointvec,
83351+ },
83352+#endif
83353+#ifdef CONFIG_GRKERNSEC_FORKFAIL
83354+ {
83355+ .ctl_name = CTL_UNNUMBERED,
83356+ .procname = "forkfail_logging",
83357+ .data = &grsec_enable_forkfail,
83358+ .maxlen = sizeof(int),
83359+ .mode = 0600,
83360+ .proc_handler = &proc_dointvec,
83361+ },
83362+#endif
83363+#ifdef CONFIG_GRKERNSEC_TIME
83364+ {
83365+ .ctl_name = CTL_UNNUMBERED,
83366+ .procname = "timechange_logging",
83367+ .data = &grsec_enable_time,
83368+ .maxlen = sizeof(int),
83369+ .mode = 0600,
83370+ .proc_handler = &proc_dointvec,
83371+ },
83372+#endif
83373+#ifdef CONFIG_GRKERNSEC_CHROOT_SHMAT
83374+ {
83375+ .ctl_name = CTL_UNNUMBERED,
83376+ .procname = "chroot_deny_shmat",
83377+ .data = &grsec_enable_chroot_shmat,
83378+ .maxlen = sizeof(int),
83379+ .mode = 0600,
83380+ .proc_handler = &proc_dointvec,
83381+ },
83382+#endif
83383+#ifdef CONFIG_GRKERNSEC_CHROOT_UNIX
83384+ {
83385+ .ctl_name = CTL_UNNUMBERED,
83386+ .procname = "chroot_deny_unix",
83387+ .data = &grsec_enable_chroot_unix,
83388+ .maxlen = sizeof(int),
83389+ .mode = 0600,
83390+ .proc_handler = &proc_dointvec,
83391+ },
83392+#endif
83393+#ifdef CONFIG_GRKERNSEC_CHROOT_MOUNT
83394+ {
83395+ .ctl_name = CTL_UNNUMBERED,
83396+ .procname = "chroot_deny_mount",
83397+ .data = &grsec_enable_chroot_mount,
83398+ .maxlen = sizeof(int),
83399+ .mode = 0600,
83400+ .proc_handler = &proc_dointvec,
83401+ },
83402+#endif
83403+#ifdef CONFIG_GRKERNSEC_CHROOT_FCHDIR
83404+ {
83405+ .ctl_name = CTL_UNNUMBERED,
83406+ .procname = "chroot_deny_fchdir",
83407+ .data = &grsec_enable_chroot_fchdir,
83408+ .maxlen = sizeof(int),
83409+ .mode = 0600,
83410+ .proc_handler = &proc_dointvec,
83411+ },
83412+#endif
83413+#ifdef CONFIG_GRKERNSEC_CHROOT_DOUBLE
83414+ {
83415+ .ctl_name = CTL_UNNUMBERED,
83416+ .procname = "chroot_deny_chroot",
83417+ .data = &grsec_enable_chroot_double,
83418+ .maxlen = sizeof(int),
83419+ .mode = 0600,
83420+ .proc_handler = &proc_dointvec,
83421+ },
83422+#endif
83423+#ifdef CONFIG_GRKERNSEC_CHROOT_PIVOT
83424+ {
83425+ .ctl_name = CTL_UNNUMBERED,
83426+ .procname = "chroot_deny_pivot",
83427+ .data = &grsec_enable_chroot_pivot,
83428+ .maxlen = sizeof(int),
83429+ .mode = 0600,
83430+ .proc_handler = &proc_dointvec,
83431+ },
83432+#endif
83433+#ifdef CONFIG_GRKERNSEC_CHROOT_CHDIR
83434+ {
83435+ .ctl_name = CTL_UNNUMBERED,
83436+ .procname = "chroot_enforce_chdir",
83437+ .data = &grsec_enable_chroot_chdir,
83438+ .maxlen = sizeof(int),
83439+ .mode = 0600,
83440+ .proc_handler = &proc_dointvec,
83441+ },
83442+#endif
83443+#ifdef CONFIG_GRKERNSEC_CHROOT_CHMOD
83444+ {
83445+ .ctl_name = CTL_UNNUMBERED,
83446+ .procname = "chroot_deny_chmod",
83447+ .data = &grsec_enable_chroot_chmod,
83448+ .maxlen = sizeof(int),
83449+ .mode = 0600,
83450+ .proc_handler = &proc_dointvec,
83451+ },
83452+#endif
83453+#ifdef CONFIG_GRKERNSEC_CHROOT_MKNOD
83454+ {
83455+ .ctl_name = CTL_UNNUMBERED,
83456+ .procname = "chroot_deny_mknod",
83457+ .data = &grsec_enable_chroot_mknod,
83458+ .maxlen = sizeof(int),
83459+ .mode = 0600,
83460+ .proc_handler = &proc_dointvec,
83461+ },
83462+#endif
83463+#ifdef CONFIG_GRKERNSEC_CHROOT_NICE
83464+ {
83465+ .ctl_name = CTL_UNNUMBERED,
83466+ .procname = "chroot_restrict_nice",
83467+ .data = &grsec_enable_chroot_nice,
83468+ .maxlen = sizeof(int),
83469+ .mode = 0600,
83470+ .proc_handler = &proc_dointvec,
83471+ },
83472+#endif
83473+#ifdef CONFIG_GRKERNSEC_CHROOT_EXECLOG
83474+ {
83475+ .ctl_name = CTL_UNNUMBERED,
83476+ .procname = "chroot_execlog",
83477+ .data = &grsec_enable_chroot_execlog,
83478+ .maxlen = sizeof(int),
83479+ .mode = 0600,
83480+ .proc_handler = &proc_dointvec,
83481+ },
83482+#endif
83483+#ifdef CONFIG_GRKERNSEC_CHROOT_CAPS
83484+ {
83485+ .ctl_name = CTL_UNNUMBERED,
83486+ .procname = "chroot_caps",
83487+ .data = &grsec_enable_chroot_caps,
83488+ .maxlen = sizeof(int),
83489+ .mode = 0600,
83490+ .proc_handler = &proc_dointvec,
83491+ },
83492+#endif
83493+#ifdef CONFIG_GRKERNSEC_CHROOT_SYSCTL
83494+ {
83495+ .ctl_name = CTL_UNNUMBERED,
83496+ .procname = "chroot_deny_sysctl",
83497+ .data = &grsec_enable_chroot_sysctl,
83498+ .maxlen = sizeof(int),
83499+ .mode = 0600,
83500+ .proc_handler = &proc_dointvec,
83501+ },
83502+#endif
83503+#ifdef CONFIG_GRKERNSEC_TPE
83504+ {
83505+ .ctl_name = CTL_UNNUMBERED,
83506+ .procname = "tpe",
83507+ .data = &grsec_enable_tpe,
83508+ .maxlen = sizeof(int),
83509+ .mode = 0600,
83510+ .proc_handler = &proc_dointvec,
83511+ },
83512+ {
83513+ .ctl_name = CTL_UNNUMBERED,
83514+ .procname = "tpe_gid",
83515+ .data = &grsec_tpe_gid,
83516+ .maxlen = sizeof(int),
83517+ .mode = 0600,
83518+ .proc_handler = &proc_dointvec,
83519+ },
83520+#endif
83521+#ifdef CONFIG_GRKERNSEC_TPE_INVERT
83522+ {
83523+ .ctl_name = CTL_UNNUMBERED,
83524+ .procname = "tpe_invert",
83525+ .data = &grsec_enable_tpe_invert,
83526+ .maxlen = sizeof(int),
83527+ .mode = 0600,
83528+ .proc_handler = &proc_dointvec,
83529+ },
83530+#endif
83531+#ifdef CONFIG_GRKERNSEC_TPE_ALL
83532+ {
83533+ .ctl_name = CTL_UNNUMBERED,
83534+ .procname = "tpe_restrict_all",
83535+ .data = &grsec_enable_tpe_all,
83536+ .maxlen = sizeof(int),
83537+ .mode = 0600,
83538+ .proc_handler = &proc_dointvec,
83539+ },
83540+#endif
83541+#ifdef CONFIG_GRKERNSEC_SOCKET_ALL
83542+ {
83543+ .ctl_name = CTL_UNNUMBERED,
83544+ .procname = "socket_all",
83545+ .data = &grsec_enable_socket_all,
83546+ .maxlen = sizeof(int),
83547+ .mode = 0600,
83548+ .proc_handler = &proc_dointvec,
83549+ },
83550+ {
83551+ .ctl_name = CTL_UNNUMBERED,
83552+ .procname = "socket_all_gid",
83553+ .data = &grsec_socket_all_gid,
83554+ .maxlen = sizeof(int),
83555+ .mode = 0600,
83556+ .proc_handler = &proc_dointvec,
83557+ },
83558+#endif
83559+#ifdef CONFIG_GRKERNSEC_SOCKET_CLIENT
83560+ {
83561+ .ctl_name = CTL_UNNUMBERED,
83562+ .procname = "socket_client",
83563+ .data = &grsec_enable_socket_client,
83564+ .maxlen = sizeof(int),
83565+ .mode = 0600,
83566+ .proc_handler = &proc_dointvec,
83567+ },
83568+ {
83569+ .ctl_name = CTL_UNNUMBERED,
83570+ .procname = "socket_client_gid",
83571+ .data = &grsec_socket_client_gid,
83572+ .maxlen = sizeof(int),
83573+ .mode = 0600,
83574+ .proc_handler = &proc_dointvec,
83575+ },
83576+#endif
83577+#ifdef CONFIG_GRKERNSEC_SOCKET_SERVER
83578+ {
83579+ .ctl_name = CTL_UNNUMBERED,
83580+ .procname = "socket_server",
83581+ .data = &grsec_enable_socket_server,
83582+ .maxlen = sizeof(int),
83583+ .mode = 0600,
83584+ .proc_handler = &proc_dointvec,
83585+ },
83586+ {
83587+ .ctl_name = CTL_UNNUMBERED,
83588+ .procname = "socket_server_gid",
83589+ .data = &grsec_socket_server_gid,
83590+ .maxlen = sizeof(int),
83591+ .mode = 0600,
83592+ .proc_handler = &proc_dointvec,
83593+ },
83594+#endif
83595+#ifdef CONFIG_GRKERNSEC_AUDIT_GROUP
83596+ {
83597+ .ctl_name = CTL_UNNUMBERED,
83598+ .procname = "audit_group",
83599+ .data = &grsec_enable_group,
83600+ .maxlen = sizeof(int),
83601+ .mode = 0600,
83602+ .proc_handler = &proc_dointvec,
83603+ },
83604+ {
83605+ .ctl_name = CTL_UNNUMBERED,
83606+ .procname = "audit_gid",
83607+ .data = &grsec_audit_gid,
83608+ .maxlen = sizeof(int),
83609+ .mode = 0600,
83610+ .proc_handler = &proc_dointvec,
83611+ },
83612+#endif
83613+#ifdef CONFIG_GRKERNSEC_AUDIT_CHDIR
83614+ {
83615+ .ctl_name = CTL_UNNUMBERED,
83616+ .procname = "audit_chdir",
83617+ .data = &grsec_enable_chdir,
83618+ .maxlen = sizeof(int),
83619+ .mode = 0600,
83620+ .proc_handler = &proc_dointvec,
83621+ },
83622+#endif
83623+#ifdef CONFIG_GRKERNSEC_AUDIT_MOUNT
83624+ {
83625+ .ctl_name = CTL_UNNUMBERED,
83626+ .procname = "audit_mount",
83627+ .data = &grsec_enable_mount,
83628+ .maxlen = sizeof(int),
83629+ .mode = 0600,
83630+ .proc_handler = &proc_dointvec,
83631+ },
83632+#endif
83633+#ifdef CONFIG_GRKERNSEC_AUDIT_TEXTREL
83634+ {
83635+ .ctl_name = CTL_UNNUMBERED,
83636+ .procname = "audit_textrel",
83637+ .data = &grsec_enable_audit_textrel,
83638+ .maxlen = sizeof(int),
83639+ .mode = 0600,
83640+ .proc_handler = &proc_dointvec,
83641+ },
83642+#endif
83643+#ifdef CONFIG_GRKERNSEC_DMESG
83644+ {
83645+ .ctl_name = CTL_UNNUMBERED,
83646+ .procname = "dmesg",
83647+ .data = &grsec_enable_dmesg,
83648+ .maxlen = sizeof(int),
83649+ .mode = 0600,
83650+ .proc_handler = &proc_dointvec,
83651+ },
83652+#endif
83653+#ifdef CONFIG_GRKERNSEC_CHROOT_FINDTASK
83654+ {
83655+ .ctl_name = CTL_UNNUMBERED,
83656+ .procname = "chroot_findtask",
83657+ .data = &grsec_enable_chroot_findtask,
83658+ .maxlen = sizeof(int),
83659+ .mode = 0600,
83660+ .proc_handler = &proc_dointvec,
83661+ },
83662+#endif
83663+#ifdef CONFIG_GRKERNSEC_RESLOG
83664+ {
83665+ .ctl_name = CTL_UNNUMBERED,
83666+ .procname = "resource_logging",
83667+ .data = &grsec_resource_logging,
83668+ .maxlen = sizeof(int),
83669+ .mode = 0600,
83670+ .proc_handler = &proc_dointvec,
83671+ },
83672+#endif
83673+#ifdef CONFIG_GRKERNSEC_AUDIT_PTRACE
83674+ {
83675+ .ctl_name = CTL_UNNUMBERED,
83676+ .procname = "audit_ptrace",
83677+ .data = &grsec_enable_audit_ptrace,
83678+ .maxlen = sizeof(int),
83679+ .mode = 0600,
83680+ .proc_handler = &proc_dointvec,
83681+ },
83682+#endif
83683+#ifdef CONFIG_GRKERNSEC_HARDEN_PTRACE
83684+ {
83685+ .ctl_name = CTL_UNNUMBERED,
83686+ .procname = "harden_ptrace",
83687+ .data = &grsec_enable_harden_ptrace,
83688+ .maxlen = sizeof(int),
83689+ .mode = 0600,
83690+ .proc_handler = &proc_dointvec,
83691+ },
83692+#endif
83693+ {
83694+ .ctl_name = CTL_UNNUMBERED,
83695+ .procname = "grsec_lock",
83696+ .data = &grsec_lock,
83697+ .maxlen = sizeof(int),
83698+ .mode = 0600,
83699+ .proc_handler = &proc_dointvec,
83700+ },
83701+#endif
83702+#ifdef CONFIG_GRKERNSEC_ROFS
83703+ {
83704+ .ctl_name = CTL_UNNUMBERED,
83705+ .procname = "romount_protect",
83706+ .data = &grsec_enable_rofs,
83707+ .maxlen = sizeof(int),
83708+ .mode = 0600,
83709+ .proc_handler = &proc_dointvec_minmax,
83710+ .extra1 = &one,
83711+ .extra2 = &one,
83712+ },
83713+#endif
83714+ { .ctl_name = 0 }
83715+};
83716+#endif
83717diff --git a/grsecurity/grsec_time.c b/grsecurity/grsec_time.c
83718new file mode 100644
83719index 0000000..0dc13c3
83720--- /dev/null
83721+++ b/grsecurity/grsec_time.c
83722@@ -0,0 +1,16 @@
83723+#include <linux/kernel.h>
83724+#include <linux/sched.h>
83725+#include <linux/grinternal.h>
83726+#include <linux/module.h>
83727+
83728+void
83729+gr_log_timechange(void)
83730+{
83731+#ifdef CONFIG_GRKERNSEC_TIME
83732+ if (grsec_enable_time)
83733+ gr_log_noargs(GR_DONT_AUDIT_GOOD, GR_TIME_MSG);
83734+#endif
83735+ return;
83736+}
83737+
83738+EXPORT_SYMBOL(gr_log_timechange);
83739diff --git a/grsecurity/grsec_tpe.c b/grsecurity/grsec_tpe.c
83740new file mode 100644
83741index 0000000..07e0dc0
83742--- /dev/null
83743+++ b/grsecurity/grsec_tpe.c
83744@@ -0,0 +1,73 @@
83745+#include <linux/kernel.h>
83746+#include <linux/sched.h>
83747+#include <linux/file.h>
83748+#include <linux/fs.h>
83749+#include <linux/grinternal.h>
83750+
83751+extern int gr_acl_tpe_check(void);
83752+
83753+int
83754+gr_tpe_allow(const struct file *file)
83755+{
83756+#ifdef CONFIG_GRKERNSEC
83757+ struct inode *inode = file->f_path.dentry->d_parent->d_inode;
83758+ const struct cred *cred = current_cred();
83759+ char *msg = NULL;
83760+ char *msg2 = NULL;
83761+
83762+ // never restrict root
83763+ if (!cred->uid)
83764+ return 1;
83765+
83766+ if (grsec_enable_tpe) {
83767+#ifdef CONFIG_GRKERNSEC_TPE_INVERT
83768+ if (grsec_enable_tpe_invert && !in_group_p(grsec_tpe_gid))
83769+ msg = "not being in trusted group";
83770+ else if (!grsec_enable_tpe_invert && in_group_p(grsec_tpe_gid))
83771+ msg = "being in untrusted group";
83772+#else
83773+ if (in_group_p(grsec_tpe_gid))
83774+ msg = "being in untrusted group";
83775+#endif
83776+ }
83777+ if (!msg && gr_acl_tpe_check())
83778+ msg = "being in untrusted role";
83779+
83780+ // not in any affected group/role
83781+ if (!msg)
83782+ goto next_check;
83783+
83784+ if (inode->i_uid)
83785+ msg2 = "file in non-root-owned directory";
83786+ else if (inode->i_mode & S_IWOTH)
83787+ msg2 = "file in world-writable directory";
83788+ else if (inode->i_mode & S_IWGRP)
83789+ msg2 = "file in group-writable directory";
83790+
83791+ if (msg && msg2) {
83792+ char fullmsg[70] = {0};
83793+ snprintf(fullmsg, sizeof(fullmsg)-1, "%s and %s", msg, msg2);
83794+ gr_log_str_fs(GR_DONT_AUDIT, GR_EXEC_TPE_MSG, fullmsg, file->f_path.dentry, file->f_path.mnt);
83795+ return 0;
83796+ }
83797+ msg = NULL;
83798+next_check:
83799+#ifdef CONFIG_GRKERNSEC_TPE_ALL
83800+ if (!grsec_enable_tpe || !grsec_enable_tpe_all)
83801+ return 1;
83802+
83803+ if (inode->i_uid && (inode->i_uid != cred->uid))
83804+ msg = "directory not owned by user";
83805+ else if (inode->i_mode & S_IWOTH)
83806+ msg = "file in world-writable directory";
83807+ else if (inode->i_mode & S_IWGRP)
83808+ msg = "file in group-writable directory";
83809+
83810+ if (msg) {
83811+ gr_log_str_fs(GR_DONT_AUDIT, GR_EXEC_TPE_MSG, msg, file->f_path.dentry, file->f_path.mnt);
83812+ return 0;
83813+ }
83814+#endif
83815+#endif
83816+ return 1;
83817+}
83818diff --git a/grsecurity/grsum.c b/grsecurity/grsum.c
83819new file mode 100644
83820index 0000000..9f7b1ac
83821--- /dev/null
83822+++ b/grsecurity/grsum.c
83823@@ -0,0 +1,61 @@
83824+#include <linux/err.h>
83825+#include <linux/kernel.h>
83826+#include <linux/sched.h>
83827+#include <linux/mm.h>
83828+#include <linux/scatterlist.h>
83829+#include <linux/crypto.h>
83830+#include <linux/gracl.h>
83831+
83832+
83833+#if !defined(CONFIG_CRYPTO) || defined(CONFIG_CRYPTO_MODULE) || !defined(CONFIG_CRYPTO_SHA256) || defined(CONFIG_CRYPTO_SHA256_MODULE)
83834+#error "crypto and sha256 must be built into the kernel"
83835+#endif
83836+
83837+int
83838+chkpw(struct gr_arg *entry, unsigned char *salt, unsigned char *sum)
83839+{
83840+ char *p;
83841+ struct crypto_hash *tfm;
83842+ struct hash_desc desc;
83843+ struct scatterlist sg;
83844+ unsigned char temp_sum[GR_SHA_LEN];
83845+ volatile int retval = 0;
83846+ volatile int dummy = 0;
83847+ unsigned int i;
83848+
83849+ sg_init_table(&sg, 1);
83850+
83851+ tfm = crypto_alloc_hash("sha256", 0, CRYPTO_ALG_ASYNC);
83852+ if (IS_ERR(tfm)) {
83853+ /* should never happen, since sha256 should be built in */
83854+ return 1;
83855+ }
83856+
83857+ desc.tfm = tfm;
83858+ desc.flags = 0;
83859+
83860+ crypto_hash_init(&desc);
83861+
83862+ p = salt;
83863+ sg_set_buf(&sg, p, GR_SALT_LEN);
83864+ crypto_hash_update(&desc, &sg, sg.length);
83865+
83866+ p = entry->pw;
83867+ sg_set_buf(&sg, p, strlen(p));
83868+
83869+ crypto_hash_update(&desc, &sg, sg.length);
83870+
83871+ crypto_hash_final(&desc, temp_sum);
83872+
83873+ memset(entry->pw, 0, GR_PW_LEN);
83874+
83875+ for (i = 0; i < GR_SHA_LEN; i++)
83876+ if (sum[i] != temp_sum[i])
83877+ retval = 1;
83878+ else
83879+ dummy = 1; // waste a cycle
83880+
83881+ crypto_free_hash(tfm);
83882+
83883+ return retval;
83884+}
83885diff --git a/include/acpi/acpi_bus.h b/include/acpi/acpi_bus.h
83886index 3cd9ccd..fe16d47 100644
83887--- a/include/acpi/acpi_bus.h
83888+++ b/include/acpi/acpi_bus.h
83889@@ -107,7 +107,7 @@ struct acpi_device_ops {
83890 acpi_op_bind bind;
83891 acpi_op_unbind unbind;
83892 acpi_op_notify notify;
83893-};
83894+} __no_const;
83895
83896 #define ACPI_DRIVER_ALL_NOTIFY_EVENTS 0x1 /* system AND device events */
83897
83898diff --git a/include/acpi/acpi_drivers.h b/include/acpi/acpi_drivers.h
83899index f4906f6..71feb73 100644
83900--- a/include/acpi/acpi_drivers.h
83901+++ b/include/acpi/acpi_drivers.h
83902@@ -119,8 +119,8 @@ int acpi_processor_set_thermal_limit(acpi_handle handle, int type);
83903 Dock Station
83904 -------------------------------------------------------------------------- */
83905 struct acpi_dock_ops {
83906- acpi_notify_handler handler;
83907- acpi_notify_handler uevent;
83908+ const acpi_notify_handler handler;
83909+ const acpi_notify_handler uevent;
83910 };
83911
83912 #if defined(CONFIG_ACPI_DOCK) || defined(CONFIG_ACPI_DOCK_MODULE)
83913@@ -128,7 +128,7 @@ extern int is_dock_device(acpi_handle handle);
83914 extern int register_dock_notifier(struct notifier_block *nb);
83915 extern void unregister_dock_notifier(struct notifier_block *nb);
83916 extern int register_hotplug_dock_device(acpi_handle handle,
83917- struct acpi_dock_ops *ops,
83918+ const struct acpi_dock_ops *ops,
83919 void *context);
83920 extern void unregister_hotplug_dock_device(acpi_handle handle);
83921 #else
83922@@ -144,7 +144,7 @@ static inline void unregister_dock_notifier(struct notifier_block *nb)
83923 {
83924 }
83925 static inline int register_hotplug_dock_device(acpi_handle handle,
83926- struct acpi_dock_ops *ops,
83927+ const struct acpi_dock_ops *ops,
83928 void *context)
83929 {
83930 return -ENODEV;
83931diff --git a/include/asm-generic/atomic-long.h b/include/asm-generic/atomic-long.h
83932index b7babf0..a9ac9fc 100644
83933--- a/include/asm-generic/atomic-long.h
83934+++ b/include/asm-generic/atomic-long.h
83935@@ -22,6 +22,12 @@
83936
83937 typedef atomic64_t atomic_long_t;
83938
83939+#ifdef CONFIG_PAX_REFCOUNT
83940+typedef atomic64_unchecked_t atomic_long_unchecked_t;
83941+#else
83942+typedef atomic64_t atomic_long_unchecked_t;
83943+#endif
83944+
83945 #define ATOMIC_LONG_INIT(i) ATOMIC64_INIT(i)
83946
83947 static inline long atomic_long_read(atomic_long_t *l)
83948@@ -31,6 +37,15 @@ static inline long atomic_long_read(atomic_long_t *l)
83949 return (long)atomic64_read(v);
83950 }
83951
83952+#ifdef CONFIG_PAX_REFCOUNT
83953+static inline long atomic_long_read_unchecked(atomic_long_unchecked_t *l)
83954+{
83955+ atomic64_unchecked_t *v = (atomic64_unchecked_t *)l;
83956+
83957+ return (long)atomic64_read_unchecked(v);
83958+}
83959+#endif
83960+
83961 static inline void atomic_long_set(atomic_long_t *l, long i)
83962 {
83963 atomic64_t *v = (atomic64_t *)l;
83964@@ -38,6 +53,15 @@ static inline void atomic_long_set(atomic_long_t *l, long i)
83965 atomic64_set(v, i);
83966 }
83967
83968+#ifdef CONFIG_PAX_REFCOUNT
83969+static inline void atomic_long_set_unchecked(atomic_long_unchecked_t *l, long i)
83970+{
83971+ atomic64_unchecked_t *v = (atomic64_unchecked_t *)l;
83972+
83973+ atomic64_set_unchecked(v, i);
83974+}
83975+#endif
83976+
83977 static inline void atomic_long_inc(atomic_long_t *l)
83978 {
83979 atomic64_t *v = (atomic64_t *)l;
83980@@ -45,6 +69,15 @@ static inline void atomic_long_inc(atomic_long_t *l)
83981 atomic64_inc(v);
83982 }
83983
83984+#ifdef CONFIG_PAX_REFCOUNT
83985+static inline void atomic_long_inc_unchecked(atomic_long_unchecked_t *l)
83986+{
83987+ atomic64_unchecked_t *v = (atomic64_unchecked_t *)l;
83988+
83989+ atomic64_inc_unchecked(v);
83990+}
83991+#endif
83992+
83993 static inline void atomic_long_dec(atomic_long_t *l)
83994 {
83995 atomic64_t *v = (atomic64_t *)l;
83996@@ -52,6 +85,15 @@ static inline void atomic_long_dec(atomic_long_t *l)
83997 atomic64_dec(v);
83998 }
83999
84000+#ifdef CONFIG_PAX_REFCOUNT
84001+static inline void atomic_long_dec_unchecked(atomic_long_unchecked_t *l)
84002+{
84003+ atomic64_unchecked_t *v = (atomic64_unchecked_t *)l;
84004+
84005+ atomic64_dec_unchecked(v);
84006+}
84007+#endif
84008+
84009 static inline void atomic_long_add(long i, atomic_long_t *l)
84010 {
84011 atomic64_t *v = (atomic64_t *)l;
84012@@ -59,6 +101,15 @@ static inline void atomic_long_add(long i, atomic_long_t *l)
84013 atomic64_add(i, v);
84014 }
84015
84016+#ifdef CONFIG_PAX_REFCOUNT
84017+static inline void atomic_long_add_unchecked(long i, atomic_long_unchecked_t *l)
84018+{
84019+ atomic64_unchecked_t *v = (atomic64_unchecked_t *)l;
84020+
84021+ atomic64_add_unchecked(i, v);
84022+}
84023+#endif
84024+
84025 static inline void atomic_long_sub(long i, atomic_long_t *l)
84026 {
84027 atomic64_t *v = (atomic64_t *)l;
84028@@ -115,6 +166,15 @@ static inline long atomic_long_inc_return(atomic_long_t *l)
84029 return (long)atomic64_inc_return(v);
84030 }
84031
84032+#ifdef CONFIG_PAX_REFCOUNT
84033+static inline long atomic_long_inc_return_unchecked(atomic_long_unchecked_t *l)
84034+{
84035+ atomic64_unchecked_t *v = (atomic64_unchecked_t *)l;
84036+
84037+ return (long)atomic64_inc_return_unchecked(v);
84038+}
84039+#endif
84040+
84041 static inline long atomic_long_dec_return(atomic_long_t *l)
84042 {
84043 atomic64_t *v = (atomic64_t *)l;
84044@@ -140,6 +200,12 @@ static inline long atomic_long_add_unless(atomic_long_t *l, long a, long u)
84045
84046 typedef atomic_t atomic_long_t;
84047
84048+#ifdef CONFIG_PAX_REFCOUNT
84049+typedef atomic_unchecked_t atomic_long_unchecked_t;
84050+#else
84051+typedef atomic_t atomic_long_unchecked_t;
84052+#endif
84053+
84054 #define ATOMIC_LONG_INIT(i) ATOMIC_INIT(i)
84055 static inline long atomic_long_read(atomic_long_t *l)
84056 {
84057@@ -148,6 +214,15 @@ static inline long atomic_long_read(atomic_long_t *l)
84058 return (long)atomic_read(v);
84059 }
84060
84061+#ifdef CONFIG_PAX_REFCOUNT
84062+static inline long atomic_long_read_unchecked(atomic_long_unchecked_t *l)
84063+{
84064+ atomic_unchecked_t *v = (atomic_unchecked_t *)l;
84065+
84066+ return (long)atomic_read_unchecked(v);
84067+}
84068+#endif
84069+
84070 static inline void atomic_long_set(atomic_long_t *l, long i)
84071 {
84072 atomic_t *v = (atomic_t *)l;
84073@@ -155,6 +230,15 @@ static inline void atomic_long_set(atomic_long_t *l, long i)
84074 atomic_set(v, i);
84075 }
84076
84077+#ifdef CONFIG_PAX_REFCOUNT
84078+static inline void atomic_long_set_unchecked(atomic_long_unchecked_t *l, long i)
84079+{
84080+ atomic_unchecked_t *v = (atomic_unchecked_t *)l;
84081+
84082+ atomic_set_unchecked(v, i);
84083+}
84084+#endif
84085+
84086 static inline void atomic_long_inc(atomic_long_t *l)
84087 {
84088 atomic_t *v = (atomic_t *)l;
84089@@ -162,6 +246,15 @@ static inline void atomic_long_inc(atomic_long_t *l)
84090 atomic_inc(v);
84091 }
84092
84093+#ifdef CONFIG_PAX_REFCOUNT
84094+static inline void atomic_long_inc_unchecked(atomic_long_unchecked_t *l)
84095+{
84096+ atomic_unchecked_t *v = (atomic_unchecked_t *)l;
84097+
84098+ atomic_inc_unchecked(v);
84099+}
84100+#endif
84101+
84102 static inline void atomic_long_dec(atomic_long_t *l)
84103 {
84104 atomic_t *v = (atomic_t *)l;
84105@@ -169,6 +262,15 @@ static inline void atomic_long_dec(atomic_long_t *l)
84106 atomic_dec(v);
84107 }
84108
84109+#ifdef CONFIG_PAX_REFCOUNT
84110+static inline void atomic_long_dec_unchecked(atomic_long_unchecked_t *l)
84111+{
84112+ atomic_unchecked_t *v = (atomic_unchecked_t *)l;
84113+
84114+ atomic_dec_unchecked(v);
84115+}
84116+#endif
84117+
84118 static inline void atomic_long_add(long i, atomic_long_t *l)
84119 {
84120 atomic_t *v = (atomic_t *)l;
84121@@ -176,6 +278,15 @@ static inline void atomic_long_add(long i, atomic_long_t *l)
84122 atomic_add(i, v);
84123 }
84124
84125+#ifdef CONFIG_PAX_REFCOUNT
84126+static inline void atomic_long_add_unchecked(long i, atomic_long_unchecked_t *l)
84127+{
84128+ atomic_unchecked_t *v = (atomic_unchecked_t *)l;
84129+
84130+ atomic_add_unchecked(i, v);
84131+}
84132+#endif
84133+
84134 static inline void atomic_long_sub(long i, atomic_long_t *l)
84135 {
84136 atomic_t *v = (atomic_t *)l;
84137@@ -232,6 +343,15 @@ static inline long atomic_long_inc_return(atomic_long_t *l)
84138 return (long)atomic_inc_return(v);
84139 }
84140
84141+#ifdef CONFIG_PAX_REFCOUNT
84142+static inline long atomic_long_inc_return_unchecked(atomic_long_unchecked_t *l)
84143+{
84144+ atomic_unchecked_t *v = (atomic_unchecked_t *)l;
84145+
84146+ return (long)atomic_inc_return_unchecked(v);
84147+}
84148+#endif
84149+
84150 static inline long atomic_long_dec_return(atomic_long_t *l)
84151 {
84152 atomic_t *v = (atomic_t *)l;
84153@@ -255,4 +375,47 @@ static inline long atomic_long_add_unless(atomic_long_t *l, long a, long u)
84154
84155 #endif /* BITS_PER_LONG == 64 */
84156
84157+#ifdef CONFIG_PAX_REFCOUNT
84158+static inline void pax_refcount_needs_these_functions(void)
84159+{
84160+ atomic_read_unchecked((atomic_unchecked_t *)NULL);
84161+ atomic_set_unchecked((atomic_unchecked_t *)NULL, 0);
84162+ atomic_add_unchecked(0, (atomic_unchecked_t *)NULL);
84163+ atomic_sub_unchecked(0, (atomic_unchecked_t *)NULL);
84164+ atomic_inc_unchecked((atomic_unchecked_t *)NULL);
84165+ (void)atomic_inc_and_test_unchecked((atomic_unchecked_t *)NULL);
84166+ atomic_inc_return_unchecked((atomic_unchecked_t *)NULL);
84167+ atomic_add_return_unchecked(0, (atomic_unchecked_t *)NULL);
84168+ atomic_dec_unchecked((atomic_unchecked_t *)NULL);
84169+ atomic_cmpxchg_unchecked((atomic_unchecked_t *)NULL, 0, 0);
84170+ (void)atomic_xchg_unchecked((atomic_unchecked_t *)NULL, 0);
84171+
84172+ atomic_long_read_unchecked((atomic_long_unchecked_t *)NULL);
84173+ atomic_long_set_unchecked((atomic_long_unchecked_t *)NULL, 0);
84174+ atomic_long_add_unchecked(0, (atomic_long_unchecked_t *)NULL);
84175+ atomic_long_inc_unchecked((atomic_long_unchecked_t *)NULL);
84176+ atomic_long_inc_return_unchecked((atomic_long_unchecked_t *)NULL);
84177+ atomic_long_dec_unchecked((atomic_long_unchecked_t *)NULL);
84178+}
84179+#else
84180+#define atomic_read_unchecked(v) atomic_read(v)
84181+#define atomic_set_unchecked(v, i) atomic_set((v), (i))
84182+#define atomic_add_unchecked(i, v) atomic_add((i), (v))
84183+#define atomic_sub_unchecked(i, v) atomic_sub((i), (v))
84184+#define atomic_inc_unchecked(v) atomic_inc(v)
84185+#define atomic_inc_and_test_unchecked(v) atomic_inc_and_test(v)
84186+#define atomic_inc_return_unchecked(v) atomic_inc_return(v)
84187+#define atomic_add_return_unchecked(i, v) atomic_add_return((i), (v))
84188+#define atomic_dec_unchecked(v) atomic_dec(v)
84189+#define atomic_cmpxchg_unchecked(v, o, n) atomic_cmpxchg((v), (o), (n))
84190+#define atomic_xchg_unchecked(v, i) atomic_xchg((v), (i))
84191+
84192+#define atomic_long_read_unchecked(v) atomic_long_read(v)
84193+#define atomic_long_set_unchecked(v, i) atomic_long_set((v), (i))
84194+#define atomic_long_add_unchecked(i, v) atomic_long_add((i), (v))
84195+#define atomic_long_inc_unchecked(v) atomic_long_inc(v)
84196+#define atomic_long_inc_return_unchecked(v) atomic_long_inc_return(v)
84197+#define atomic_long_dec_unchecked(v) atomic_long_dec(v)
84198+#endif
84199+
84200 #endif /* _ASM_GENERIC_ATOMIC_LONG_H */
84201diff --git a/include/asm-generic/atomic64.h b/include/asm-generic/atomic64.h
84202index b18ce4f..2ee2843 100644
84203--- a/include/asm-generic/atomic64.h
84204+++ b/include/asm-generic/atomic64.h
84205@@ -16,6 +16,8 @@ typedef struct {
84206 long long counter;
84207 } atomic64_t;
84208
84209+typedef atomic64_t atomic64_unchecked_t;
84210+
84211 #define ATOMIC64_INIT(i) { (i) }
84212
84213 extern long long atomic64_read(const atomic64_t *v);
84214@@ -39,4 +41,14 @@ extern int atomic64_add_unless(atomic64_t *v, long long a, long long u);
84215 #define atomic64_dec_and_test(v) (atomic64_dec_return((v)) == 0)
84216 #define atomic64_inc_not_zero(v) atomic64_add_unless((v), 1LL, 0LL)
84217
84218+#define atomic64_read_unchecked(v) atomic64_read(v)
84219+#define atomic64_set_unchecked(v, i) atomic64_set((v), (i))
84220+#define atomic64_add_unchecked(a, v) atomic64_add((a), (v))
84221+#define atomic64_add_return_unchecked(a, v) atomic64_add_return((a), (v))
84222+#define atomic64_sub_unchecked(a, v) atomic64_sub((a), (v))
84223+#define atomic64_inc_unchecked(v) atomic64_inc(v)
84224+#define atomic64_inc_return_unchecked(v) atomic64_inc_return(v)
84225+#define atomic64_dec_unchecked(v) atomic64_dec(v)
84226+#define atomic64_cmpxchg_unchecked(v, o, n) atomic64_cmpxchg((v), (o), (n))
84227+
84228 #endif /* _ASM_GENERIC_ATOMIC64_H */
84229diff --git a/include/asm-generic/bug.h b/include/asm-generic/bug.h
84230index d48ddf0..656a0ac 100644
84231--- a/include/asm-generic/bug.h
84232+++ b/include/asm-generic/bug.h
84233@@ -105,11 +105,11 @@ extern void warn_slowpath_null(const char *file, const int line);
84234
84235 #else /* !CONFIG_BUG */
84236 #ifndef HAVE_ARCH_BUG
84237-#define BUG() do {} while(0)
84238+#define BUG() do { for (;;) ; } while(0)
84239 #endif
84240
84241 #ifndef HAVE_ARCH_BUG_ON
84242-#define BUG_ON(condition) do { if (condition) ; } while(0)
84243+#define BUG_ON(condition) do { if (condition) for (;;) ; } while(0)
84244 #endif
84245
84246 #ifndef HAVE_ARCH_WARN_ON
84247diff --git a/include/asm-generic/cache.h b/include/asm-generic/cache.h
84248index 1bfcfe5..e04c5c9 100644
84249--- a/include/asm-generic/cache.h
84250+++ b/include/asm-generic/cache.h
84251@@ -6,7 +6,7 @@
84252 * cache lines need to provide their own cache.h.
84253 */
84254
84255-#define L1_CACHE_SHIFT 5
84256-#define L1_CACHE_BYTES (1 << L1_CACHE_SHIFT)
84257+#define L1_CACHE_SHIFT 5UL
84258+#define L1_CACHE_BYTES (1UL << L1_CACHE_SHIFT)
84259
84260 #endif /* __ASM_GENERIC_CACHE_H */
84261diff --git a/include/asm-generic/dma-mapping-common.h b/include/asm-generic/dma-mapping-common.h
84262index 6920695..41038bc 100644
84263--- a/include/asm-generic/dma-mapping-common.h
84264+++ b/include/asm-generic/dma-mapping-common.h
84265@@ -11,7 +11,7 @@ static inline dma_addr_t dma_map_single_attrs(struct device *dev, void *ptr,
84266 enum dma_data_direction dir,
84267 struct dma_attrs *attrs)
84268 {
84269- struct dma_map_ops *ops = get_dma_ops(dev);
84270+ const struct dma_map_ops *ops = get_dma_ops(dev);
84271 dma_addr_t addr;
84272
84273 kmemcheck_mark_initialized(ptr, size);
84274@@ -30,7 +30,7 @@ static inline void dma_unmap_single_attrs(struct device *dev, dma_addr_t addr,
84275 enum dma_data_direction dir,
84276 struct dma_attrs *attrs)
84277 {
84278- struct dma_map_ops *ops = get_dma_ops(dev);
84279+ const struct dma_map_ops *ops = get_dma_ops(dev);
84280
84281 BUG_ON(!valid_dma_direction(dir));
84282 if (ops->unmap_page)
84283@@ -42,7 +42,7 @@ static inline int dma_map_sg_attrs(struct device *dev, struct scatterlist *sg,
84284 int nents, enum dma_data_direction dir,
84285 struct dma_attrs *attrs)
84286 {
84287- struct dma_map_ops *ops = get_dma_ops(dev);
84288+ const struct dma_map_ops *ops = get_dma_ops(dev);
84289 int i, ents;
84290 struct scatterlist *s;
84291
84292@@ -59,7 +59,7 @@ static inline void dma_unmap_sg_attrs(struct device *dev, struct scatterlist *sg
84293 int nents, enum dma_data_direction dir,
84294 struct dma_attrs *attrs)
84295 {
84296- struct dma_map_ops *ops = get_dma_ops(dev);
84297+ const struct dma_map_ops *ops = get_dma_ops(dev);
84298
84299 BUG_ON(!valid_dma_direction(dir));
84300 debug_dma_unmap_sg(dev, sg, nents, dir);
84301@@ -71,7 +71,7 @@ static inline dma_addr_t dma_map_page(struct device *dev, struct page *page,
84302 size_t offset, size_t size,
84303 enum dma_data_direction dir)
84304 {
84305- struct dma_map_ops *ops = get_dma_ops(dev);
84306+ const struct dma_map_ops *ops = get_dma_ops(dev);
84307 dma_addr_t addr;
84308
84309 kmemcheck_mark_initialized(page_address(page) + offset, size);
84310@@ -85,7 +85,7 @@ static inline dma_addr_t dma_map_page(struct device *dev, struct page *page,
84311 static inline void dma_unmap_page(struct device *dev, dma_addr_t addr,
84312 size_t size, enum dma_data_direction dir)
84313 {
84314- struct dma_map_ops *ops = get_dma_ops(dev);
84315+ const struct dma_map_ops *ops = get_dma_ops(dev);
84316
84317 BUG_ON(!valid_dma_direction(dir));
84318 if (ops->unmap_page)
84319@@ -97,7 +97,7 @@ static inline void dma_sync_single_for_cpu(struct device *dev, dma_addr_t addr,
84320 size_t size,
84321 enum dma_data_direction dir)
84322 {
84323- struct dma_map_ops *ops = get_dma_ops(dev);
84324+ const struct dma_map_ops *ops = get_dma_ops(dev);
84325
84326 BUG_ON(!valid_dma_direction(dir));
84327 if (ops->sync_single_for_cpu)
84328@@ -109,7 +109,7 @@ static inline void dma_sync_single_for_device(struct device *dev,
84329 dma_addr_t addr, size_t size,
84330 enum dma_data_direction dir)
84331 {
84332- struct dma_map_ops *ops = get_dma_ops(dev);
84333+ const struct dma_map_ops *ops = get_dma_ops(dev);
84334
84335 BUG_ON(!valid_dma_direction(dir));
84336 if (ops->sync_single_for_device)
84337@@ -123,7 +123,7 @@ static inline void dma_sync_single_range_for_cpu(struct device *dev,
84338 size_t size,
84339 enum dma_data_direction dir)
84340 {
84341- struct dma_map_ops *ops = get_dma_ops(dev);
84342+ const struct dma_map_ops *ops = get_dma_ops(dev);
84343
84344 BUG_ON(!valid_dma_direction(dir));
84345 if (ops->sync_single_range_for_cpu) {
84346@@ -140,7 +140,7 @@ static inline void dma_sync_single_range_for_device(struct device *dev,
84347 size_t size,
84348 enum dma_data_direction dir)
84349 {
84350- struct dma_map_ops *ops = get_dma_ops(dev);
84351+ const struct dma_map_ops *ops = get_dma_ops(dev);
84352
84353 BUG_ON(!valid_dma_direction(dir));
84354 if (ops->sync_single_range_for_device) {
84355@@ -155,7 +155,7 @@ static inline void
84356 dma_sync_sg_for_cpu(struct device *dev, struct scatterlist *sg,
84357 int nelems, enum dma_data_direction dir)
84358 {
84359- struct dma_map_ops *ops = get_dma_ops(dev);
84360+ const struct dma_map_ops *ops = get_dma_ops(dev);
84361
84362 BUG_ON(!valid_dma_direction(dir));
84363 if (ops->sync_sg_for_cpu)
84364@@ -167,7 +167,7 @@ static inline void
84365 dma_sync_sg_for_device(struct device *dev, struct scatterlist *sg,
84366 int nelems, enum dma_data_direction dir)
84367 {
84368- struct dma_map_ops *ops = get_dma_ops(dev);
84369+ const struct dma_map_ops *ops = get_dma_ops(dev);
84370
84371 BUG_ON(!valid_dma_direction(dir));
84372 if (ops->sync_sg_for_device)
84373diff --git a/include/asm-generic/emergency-restart.h b/include/asm-generic/emergency-restart.h
84374index 0d68a1e..b74a761 100644
84375--- a/include/asm-generic/emergency-restart.h
84376+++ b/include/asm-generic/emergency-restart.h
84377@@ -1,7 +1,7 @@
84378 #ifndef _ASM_GENERIC_EMERGENCY_RESTART_H
84379 #define _ASM_GENERIC_EMERGENCY_RESTART_H
84380
84381-static inline void machine_emergency_restart(void)
84382+static inline __noreturn void machine_emergency_restart(void)
84383 {
84384 machine_restart(NULL);
84385 }
84386diff --git a/include/asm-generic/futex.h b/include/asm-generic/futex.h
84387index 3c2344f..4590a7d 100644
84388--- a/include/asm-generic/futex.h
84389+++ b/include/asm-generic/futex.h
84390@@ -6,7 +6,7 @@
84391 #include <asm/errno.h>
84392
84393 static inline int
84394-futex_atomic_op_inuser (int encoded_op, int __user *uaddr)
84395+futex_atomic_op_inuser (int encoded_op, u32 __user *uaddr)
84396 {
84397 int op = (encoded_op >> 28) & 7;
84398 int cmp = (encoded_op >> 24) & 15;
84399@@ -48,7 +48,7 @@ futex_atomic_op_inuser (int encoded_op, int __user *uaddr)
84400 }
84401
84402 static inline int
84403-futex_atomic_cmpxchg_inatomic(int __user *uaddr, int oldval, int newval)
84404+futex_atomic_cmpxchg_inatomic(u32 __user *uaddr, int oldval, int newval)
84405 {
84406 return -ENOSYS;
84407 }
84408diff --git a/include/asm-generic/kmap_types.h b/include/asm-generic/kmap_types.h
84409index e5f234a..cdb16b3 100644
84410--- a/include/asm-generic/kmap_types.h
84411+++ b/include/asm-generic/kmap_types.h
84412@@ -28,7 +28,8 @@ KMAP_D(15) KM_UML_USERCOPY,
84413 KMAP_D(16) KM_IRQ_PTE,
84414 KMAP_D(17) KM_NMI,
84415 KMAP_D(18) KM_NMI_PTE,
84416-KMAP_D(19) KM_TYPE_NR
84417+KMAP_D(19) KM_CLEARPAGE,
84418+KMAP_D(20) KM_TYPE_NR
84419 };
84420
84421 #undef KMAP_D
84422diff --git a/include/asm-generic/local.h b/include/asm-generic/local.h
84423index fc21844..2ee9629 100644
84424--- a/include/asm-generic/local.h
84425+++ b/include/asm-generic/local.h
84426@@ -39,6 +39,7 @@ typedef struct
84427 #define local_add_return(i, l) atomic_long_add_return((i), (&(l)->a))
84428 #define local_sub_return(i, l) atomic_long_sub_return((i), (&(l)->a))
84429 #define local_inc_return(l) atomic_long_inc_return(&(l)->a)
84430+#define local_dec_return(l) atomic_long_dec_return(&(l)->a)
84431
84432 #define local_cmpxchg(l, o, n) atomic_long_cmpxchg((&(l)->a), (o), (n))
84433 #define local_xchg(l, n) atomic_long_xchg((&(l)->a), (n))
84434diff --git a/include/asm-generic/pgtable-nopmd.h b/include/asm-generic/pgtable-nopmd.h
84435index 725612b..9cc513a 100644
84436--- a/include/asm-generic/pgtable-nopmd.h
84437+++ b/include/asm-generic/pgtable-nopmd.h
84438@@ -1,14 +1,19 @@
84439 #ifndef _PGTABLE_NOPMD_H
84440 #define _PGTABLE_NOPMD_H
84441
84442-#ifndef __ASSEMBLY__
84443-
84444 #include <asm-generic/pgtable-nopud.h>
84445
84446-struct mm_struct;
84447-
84448 #define __PAGETABLE_PMD_FOLDED
84449
84450+#define PMD_SHIFT PUD_SHIFT
84451+#define PTRS_PER_PMD 1
84452+#define PMD_SIZE (_AC(1,UL) << PMD_SHIFT)
84453+#define PMD_MASK (~(PMD_SIZE-1))
84454+
84455+#ifndef __ASSEMBLY__
84456+
84457+struct mm_struct;
84458+
84459 /*
84460 * Having the pmd type consist of a pud gets the size right, and allows
84461 * us to conceptually access the pud entry that this pmd is folded into
84462@@ -16,11 +21,6 @@ struct mm_struct;
84463 */
84464 typedef struct { pud_t pud; } pmd_t;
84465
84466-#define PMD_SHIFT PUD_SHIFT
84467-#define PTRS_PER_PMD 1
84468-#define PMD_SIZE (1UL << PMD_SHIFT)
84469-#define PMD_MASK (~(PMD_SIZE-1))
84470-
84471 /*
84472 * The "pud_xxx()" functions here are trivial for a folded two-level
84473 * setup: the pmd is never bad, and a pmd always exists (as it's folded
84474diff --git a/include/asm-generic/pgtable-nopud.h b/include/asm-generic/pgtable-nopud.h
84475index 810431d..ccc3638 100644
84476--- a/include/asm-generic/pgtable-nopud.h
84477+++ b/include/asm-generic/pgtable-nopud.h
84478@@ -1,10 +1,15 @@
84479 #ifndef _PGTABLE_NOPUD_H
84480 #define _PGTABLE_NOPUD_H
84481
84482-#ifndef __ASSEMBLY__
84483-
84484 #define __PAGETABLE_PUD_FOLDED
84485
84486+#define PUD_SHIFT PGDIR_SHIFT
84487+#define PTRS_PER_PUD 1
84488+#define PUD_SIZE (_AC(1,UL) << PUD_SHIFT)
84489+#define PUD_MASK (~(PUD_SIZE-1))
84490+
84491+#ifndef __ASSEMBLY__
84492+
84493 /*
84494 * Having the pud type consist of a pgd gets the size right, and allows
84495 * us to conceptually access the pgd entry that this pud is folded into
84496@@ -12,11 +17,6 @@
84497 */
84498 typedef struct { pgd_t pgd; } pud_t;
84499
84500-#define PUD_SHIFT PGDIR_SHIFT
84501-#define PTRS_PER_PUD 1
84502-#define PUD_SIZE (1UL << PUD_SHIFT)
84503-#define PUD_MASK (~(PUD_SIZE-1))
84504-
84505 /*
84506 * The "pgd_xxx()" functions here are trivial for a folded two-level
84507 * setup: the pud is never bad, and a pud always exists (as it's folded
84508diff --git a/include/asm-generic/pgtable.h b/include/asm-generic/pgtable.h
84509index e2bd73e..fea8ed3 100644
84510--- a/include/asm-generic/pgtable.h
84511+++ b/include/asm-generic/pgtable.h
84512@@ -344,6 +344,14 @@ extern void untrack_pfn_vma(struct vm_area_struct *vma, unsigned long pfn,
84513 unsigned long size);
84514 #endif
84515
84516+#ifndef __HAVE_ARCH_PAX_OPEN_KERNEL
84517+static inline unsigned long pax_open_kernel(void) { return 0; }
84518+#endif
84519+
84520+#ifndef __HAVE_ARCH_PAX_CLOSE_KERNEL
84521+static inline unsigned long pax_close_kernel(void) { return 0; }
84522+#endif
84523+
84524 #endif /* !__ASSEMBLY__ */
84525
84526 #endif /* _ASM_GENERIC_PGTABLE_H */
84527diff --git a/include/asm-generic/uaccess.h b/include/asm-generic/uaccess.h
84528index b218b85..043ee5b 100644
84529--- a/include/asm-generic/uaccess.h
84530+++ b/include/asm-generic/uaccess.h
84531@@ -76,6 +76,8 @@ extern unsigned long search_exception_table(unsigned long);
84532 */
84533 #ifndef __copy_from_user
84534 static inline __must_check long __copy_from_user(void *to,
84535+ const void __user * from, unsigned long n) __size_overflow(3);
84536+static inline __must_check long __copy_from_user(void *to,
84537 const void __user * from, unsigned long n)
84538 {
84539 if (__builtin_constant_p(n)) {
84540@@ -106,6 +108,8 @@ static inline __must_check long __copy_from_user(void *to,
84541
84542 #ifndef __copy_to_user
84543 static inline __must_check long __copy_to_user(void __user *to,
84544+ const void *from, unsigned long n) __size_overflow(3);
84545+static inline __must_check long __copy_to_user(void __user *to,
84546 const void *from, unsigned long n)
84547 {
84548 if (__builtin_constant_p(n)) {
84549@@ -224,6 +228,7 @@ extern int __put_user_bad(void) __attribute__((noreturn));
84550 -EFAULT; \
84551 })
84552
84553+static inline int __get_user_fn(size_t size, const void __user *ptr, void *x) __size_overflow(1);
84554 static inline int __get_user_fn(size_t size, const void __user *ptr, void *x)
84555 {
84556 size = __copy_from_user(x, ptr, size);
84557@@ -240,6 +245,7 @@ extern int __get_user_bad(void) __attribute__((noreturn));
84558 #define __copy_to_user_inatomic __copy_to_user
84559 #endif
84560
84561+static inline long copy_from_user(void *to, const void __user * from, unsigned long n) __size_overflow(3);
84562 static inline long copy_from_user(void *to,
84563 const void __user * from, unsigned long n)
84564 {
84565@@ -250,6 +256,7 @@ static inline long copy_from_user(void *to,
84566 return n;
84567 }
84568
84569+static inline long copy_to_user(void __user *to, const void *from, unsigned long n) __size_overflow(3);
84570 static inline long copy_to_user(void __user *to,
84571 const void *from, unsigned long n)
84572 {
84573@@ -307,6 +314,8 @@ static inline long strlen_user(const char __user *src)
84574 */
84575 #ifndef __clear_user
84576 static inline __must_check unsigned long
84577+__clear_user(void __user *to, unsigned long n) __size_overflow(2);
84578+static inline __must_check unsigned long
84579 __clear_user(void __user *to, unsigned long n)
84580 {
84581 memset((void __force *)to, 0, n);
84582@@ -315,6 +324,8 @@ __clear_user(void __user *to, unsigned long n)
84583 #endif
84584
84585 static inline __must_check unsigned long
84586+clear_user(void __user *to, unsigned long n) __size_overflow(2);
84587+static inline __must_check unsigned long
84588 clear_user(void __user *to, unsigned long n)
84589 {
84590 might_sleep();
84591diff --git a/include/asm-generic/vmlinux.lds.h b/include/asm-generic/vmlinux.lds.h
84592index b6e818f..21aa58a 100644
84593--- a/include/asm-generic/vmlinux.lds.h
84594+++ b/include/asm-generic/vmlinux.lds.h
84595@@ -199,6 +199,7 @@
84596 .rodata : AT(ADDR(.rodata) - LOAD_OFFSET) { \
84597 VMLINUX_SYMBOL(__start_rodata) = .; \
84598 *(.rodata) *(.rodata.*) \
84599+ *(.data.read_only) \
84600 *(__vermagic) /* Kernel version magic */ \
84601 *(__markers_strings) /* Markers: strings */ \
84602 *(__tracepoints_strings)/* Tracepoints: strings */ \
84603@@ -656,22 +657,24 @@
84604 * section in the linker script will go there too. @phdr should have
84605 * a leading colon.
84606 *
84607- * Note that this macros defines __per_cpu_load as an absolute symbol.
84608+ * Note that this macros defines per_cpu_load as an absolute symbol.
84609 * If there is no need to put the percpu section at a predetermined
84610 * address, use PERCPU().
84611 */
84612 #define PERCPU_VADDR(vaddr, phdr) \
84613- VMLINUX_SYMBOL(__per_cpu_load) = .; \
84614- .data.percpu vaddr : AT(VMLINUX_SYMBOL(__per_cpu_load) \
84615+ per_cpu_load = .; \
84616+ .data.percpu vaddr : AT(VMLINUX_SYMBOL(per_cpu_load) \
84617 - LOAD_OFFSET) { \
84618+ VMLINUX_SYMBOL(__per_cpu_load) = . + per_cpu_load; \
84619 VMLINUX_SYMBOL(__per_cpu_start) = .; \
84620 *(.data.percpu.first) \
84621- *(.data.percpu.page_aligned) \
84622 *(.data.percpu) \
84623+ . = ALIGN(PAGE_SIZE); \
84624+ *(.data.percpu.page_aligned) \
84625 *(.data.percpu.shared_aligned) \
84626 VMLINUX_SYMBOL(__per_cpu_end) = .; \
84627 } phdr \
84628- . = VMLINUX_SYMBOL(__per_cpu_load) + SIZEOF(.data.percpu);
84629+ . = VMLINUX_SYMBOL(per_cpu_load) + SIZEOF(.data.percpu);
84630
84631 /**
84632 * PERCPU - define output section for percpu area, simple version
84633diff --git a/include/drm/drmP.h b/include/drm/drmP.h
84634index ebab6a6..351dba1 100644
84635--- a/include/drm/drmP.h
84636+++ b/include/drm/drmP.h
84637@@ -71,6 +71,7 @@
84638 #include <linux/workqueue.h>
84639 #include <linux/poll.h>
84640 #include <asm/pgalloc.h>
84641+#include <asm/local.h>
84642 #include "drm.h"
84643
84644 #include <linux/idr.h>
84645@@ -814,7 +815,7 @@ struct drm_driver {
84646 void (*vgaarb_irq)(struct drm_device *dev, bool state);
84647
84648 /* Driver private ops for this object */
84649- struct vm_operations_struct *gem_vm_ops;
84650+ const struct vm_operations_struct *gem_vm_ops;
84651
84652 int major;
84653 int minor;
84654@@ -917,7 +918,7 @@ struct drm_device {
84655
84656 /** \name Usage Counters */
84657 /*@{ */
84658- int open_count; /**< Outstanding files open */
84659+ local_t open_count; /**< Outstanding files open */
84660 atomic_t ioctl_count; /**< Outstanding IOCTLs pending */
84661 atomic_t vma_count; /**< Outstanding vma areas open */
84662 int buf_use; /**< Buffers in use -- cannot alloc */
84663@@ -928,7 +929,7 @@ struct drm_device {
84664 /*@{ */
84665 unsigned long counters;
84666 enum drm_stat_type types[15];
84667- atomic_t counts[15];
84668+ atomic_unchecked_t counts[15];
84669 /*@} */
84670
84671 struct list_head filelist;
84672@@ -1016,7 +1017,7 @@ struct drm_device {
84673 struct pci_controller *hose;
84674 #endif
84675 struct drm_sg_mem *sg; /**< Scatter gather memory */
84676- unsigned int num_crtcs; /**< Number of CRTCs on this device */
84677+ unsigned int num_crtcs; /**< Number of CRTCs on this device */
84678 void *dev_private; /**< device private data */
84679 void *mm_private;
84680 struct address_space *dev_mapping;
84681@@ -1042,11 +1043,11 @@ struct drm_device {
84682 spinlock_t object_name_lock;
84683 struct idr object_name_idr;
84684 atomic_t object_count;
84685- atomic_t object_memory;
84686+ atomic_unchecked_t object_memory;
84687 atomic_t pin_count;
84688- atomic_t pin_memory;
84689+ atomic_unchecked_t pin_memory;
84690 atomic_t gtt_count;
84691- atomic_t gtt_memory;
84692+ atomic_unchecked_t gtt_memory;
84693 uint32_t gtt_total;
84694 uint32_t invalidate_domains; /* domains pending invalidation */
84695 uint32_t flush_domains; /* domains pending flush */
84696diff --git a/include/drm/drm_crtc_helper.h b/include/drm/drm_crtc_helper.h
84697index b29e201..3413cc9 100644
84698--- a/include/drm/drm_crtc_helper.h
84699+++ b/include/drm/drm_crtc_helper.h
84700@@ -64,7 +64,7 @@ struct drm_crtc_helper_funcs {
84701
84702 /* reload the current crtc LUT */
84703 void (*load_lut)(struct drm_crtc *crtc);
84704-};
84705+} __no_const;
84706
84707 struct drm_encoder_helper_funcs {
84708 void (*dpms)(struct drm_encoder *encoder, int mode);
84709@@ -85,7 +85,7 @@ struct drm_encoder_helper_funcs {
84710 struct drm_connector *connector);
84711 /* disable encoder when not in use - more explicit than dpms off */
84712 void (*disable)(struct drm_encoder *encoder);
84713-};
84714+} __no_const;
84715
84716 struct drm_connector_helper_funcs {
84717 int (*get_modes)(struct drm_connector *connector);
84718diff --git a/include/drm/ttm/ttm_memory.h b/include/drm/ttm/ttm_memory.h
84719index b199170..6f9e64c 100644
84720--- a/include/drm/ttm/ttm_memory.h
84721+++ b/include/drm/ttm/ttm_memory.h
84722@@ -47,7 +47,7 @@
84723
84724 struct ttm_mem_shrink {
84725 int (*do_shrink) (struct ttm_mem_shrink *);
84726-};
84727+} __no_const;
84728
84729 /**
84730 * struct ttm_mem_global - Global memory accounting structure.
84731diff --git a/include/linux/a.out.h b/include/linux/a.out.h
84732index e86dfca..40cc55f 100644
84733--- a/include/linux/a.out.h
84734+++ b/include/linux/a.out.h
84735@@ -39,6 +39,14 @@ enum machine_type {
84736 M_MIPS2 = 152 /* MIPS R6000/R4000 binary */
84737 };
84738
84739+/* Constants for the N_FLAGS field */
84740+#define F_PAX_PAGEEXEC 1 /* Paging based non-executable pages */
84741+#define F_PAX_EMUTRAMP 2 /* Emulate trampolines */
84742+#define F_PAX_MPROTECT 4 /* Restrict mprotect() */
84743+#define F_PAX_RANDMMAP 8 /* Randomize mmap() base */
84744+/*#define F_PAX_RANDEXEC 16*/ /* Randomize ET_EXEC base */
84745+#define F_PAX_SEGMEXEC 32 /* Segmentation based non-executable pages */
84746+
84747 #if !defined (N_MAGIC)
84748 #define N_MAGIC(exec) ((exec).a_info & 0xffff)
84749 #endif
84750diff --git a/include/linux/atmdev.h b/include/linux/atmdev.h
84751index 817b237..62c10bc 100644
84752--- a/include/linux/atmdev.h
84753+++ b/include/linux/atmdev.h
84754@@ -237,7 +237,7 @@ struct compat_atm_iobuf {
84755 #endif
84756
84757 struct k_atm_aal_stats {
84758-#define __HANDLE_ITEM(i) atomic_t i
84759+#define __HANDLE_ITEM(i) atomic_unchecked_t i
84760 __AAL_STAT_ITEMS
84761 #undef __HANDLE_ITEM
84762 };
84763diff --git a/include/linux/backlight.h b/include/linux/backlight.h
84764index 0f5f578..8c4f884 100644
84765--- a/include/linux/backlight.h
84766+++ b/include/linux/backlight.h
84767@@ -36,18 +36,18 @@ struct backlight_device;
84768 struct fb_info;
84769
84770 struct backlight_ops {
84771- unsigned int options;
84772+ const unsigned int options;
84773
84774 #define BL_CORE_SUSPENDRESUME (1 << 0)
84775
84776 /* Notify the backlight driver some property has changed */
84777- int (*update_status)(struct backlight_device *);
84778+ int (* const update_status)(struct backlight_device *);
84779 /* Return the current backlight brightness (accounting for power,
84780 fb_blank etc.) */
84781- int (*get_brightness)(struct backlight_device *);
84782+ int (* const get_brightness)(struct backlight_device *);
84783 /* Check if given framebuffer device is the one bound to this backlight;
84784 return 0 if not, !=0 if it is. If NULL, backlight always matches the fb. */
84785- int (*check_fb)(struct fb_info *);
84786+ int (* const check_fb)(struct fb_info *);
84787 };
84788
84789 /* This structure defines all the properties of a backlight */
84790@@ -86,7 +86,7 @@ struct backlight_device {
84791 registered this device has been unloaded, and if class_get_devdata()
84792 points to something in the body of that driver, it is also invalid. */
84793 struct mutex ops_lock;
84794- struct backlight_ops *ops;
84795+ const struct backlight_ops *ops;
84796
84797 /* The framebuffer notifier block */
84798 struct notifier_block fb_notif;
84799@@ -103,7 +103,7 @@ static inline void backlight_update_status(struct backlight_device *bd)
84800 }
84801
84802 extern struct backlight_device *backlight_device_register(const char *name,
84803- struct device *dev, void *devdata, struct backlight_ops *ops);
84804+ struct device *dev, void *devdata, const struct backlight_ops *ops);
84805 extern void backlight_device_unregister(struct backlight_device *bd);
84806 extern void backlight_force_update(struct backlight_device *bd,
84807 enum backlight_update_reason reason);
84808diff --git a/include/linux/binfmts.h b/include/linux/binfmts.h
84809index a3d802e..93a2ef4 100644
84810--- a/include/linux/binfmts.h
84811+++ b/include/linux/binfmts.h
84812@@ -18,7 +18,7 @@ struct pt_regs;
84813 #define BINPRM_BUF_SIZE 128
84814
84815 #ifdef __KERNEL__
84816-#include <linux/list.h>
84817+#include <linux/sched.h>
84818
84819 #define CORENAME_MAX_SIZE 128
84820
84821@@ -58,6 +58,7 @@ struct linux_binprm{
84822 unsigned interp_flags;
84823 unsigned interp_data;
84824 unsigned long loader, exec;
84825+ char tcomm[TASK_COMM_LEN];
84826 };
84827
84828 extern void acct_arg_size(struct linux_binprm *bprm, unsigned long pages);
84829@@ -83,6 +84,7 @@ struct linux_binfmt {
84830 int (*load_binary)(struct linux_binprm *, struct pt_regs * regs);
84831 int (*load_shlib)(struct file *);
84832 int (*core_dump)(long signr, struct pt_regs *regs, struct file *file, unsigned long limit);
84833+ void (*handle_mprotect)(struct vm_area_struct *vma, unsigned long newflags);
84834 unsigned long min_coredump; /* minimal dump size */
84835 int hasvdso;
84836 };
84837diff --git a/include/linux/blkdev.h b/include/linux/blkdev.h
84838index 5eb6cb0..a2906d2 100644
84839--- a/include/linux/blkdev.h
84840+++ b/include/linux/blkdev.h
84841@@ -1281,7 +1281,7 @@ struct block_device_operations {
84842 int (*revalidate_disk) (struct gendisk *);
84843 int (*getgeo)(struct block_device *, struct hd_geometry *);
84844 struct module *owner;
84845-};
84846+} __do_const;
84847
84848 extern int __blkdev_driver_ioctl(struct block_device *, fmode_t, unsigned int,
84849 unsigned long);
84850diff --git a/include/linux/blktrace_api.h b/include/linux/blktrace_api.h
84851index 3b73b99..629d21b 100644
84852--- a/include/linux/blktrace_api.h
84853+++ b/include/linux/blktrace_api.h
84854@@ -160,7 +160,7 @@ struct blk_trace {
84855 struct dentry *dir;
84856 struct dentry *dropped_file;
84857 struct dentry *msg_file;
84858- atomic_t dropped;
84859+ atomic_unchecked_t dropped;
84860 };
84861
84862 extern int blk_trace_ioctl(struct block_device *, unsigned, char __user *);
84863diff --git a/include/linux/byteorder/little_endian.h b/include/linux/byteorder/little_endian.h
84864index 83195fb..0b0f77d 100644
84865--- a/include/linux/byteorder/little_endian.h
84866+++ b/include/linux/byteorder/little_endian.h
84867@@ -42,51 +42,51 @@
84868
84869 static inline __le64 __cpu_to_le64p(const __u64 *p)
84870 {
84871- return (__force __le64)*p;
84872+ return (__force const __le64)*p;
84873 }
84874 static inline __u64 __le64_to_cpup(const __le64 *p)
84875 {
84876- return (__force __u64)*p;
84877+ return (__force const __u64)*p;
84878 }
84879 static inline __le32 __cpu_to_le32p(const __u32 *p)
84880 {
84881- return (__force __le32)*p;
84882+ return (__force const __le32)*p;
84883 }
84884 static inline __u32 __le32_to_cpup(const __le32 *p)
84885 {
84886- return (__force __u32)*p;
84887+ return (__force const __u32)*p;
84888 }
84889 static inline __le16 __cpu_to_le16p(const __u16 *p)
84890 {
84891- return (__force __le16)*p;
84892+ return (__force const __le16)*p;
84893 }
84894 static inline __u16 __le16_to_cpup(const __le16 *p)
84895 {
84896- return (__force __u16)*p;
84897+ return (__force const __u16)*p;
84898 }
84899 static inline __be64 __cpu_to_be64p(const __u64 *p)
84900 {
84901- return (__force __be64)__swab64p(p);
84902+ return (__force const __be64)__swab64p(p);
84903 }
84904 static inline __u64 __be64_to_cpup(const __be64 *p)
84905 {
84906- return __swab64p((__u64 *)p);
84907+ return __swab64p((const __u64 *)p);
84908 }
84909 static inline __be32 __cpu_to_be32p(const __u32 *p)
84910 {
84911- return (__force __be32)__swab32p(p);
84912+ return (__force const __be32)__swab32p(p);
84913 }
84914 static inline __u32 __be32_to_cpup(const __be32 *p)
84915 {
84916- return __swab32p((__u32 *)p);
84917+ return __swab32p((const __u32 *)p);
84918 }
84919 static inline __be16 __cpu_to_be16p(const __u16 *p)
84920 {
84921- return (__force __be16)__swab16p(p);
84922+ return (__force const __be16)__swab16p(p);
84923 }
84924 static inline __u16 __be16_to_cpup(const __be16 *p)
84925 {
84926- return __swab16p((__u16 *)p);
84927+ return __swab16p((const __u16 *)p);
84928 }
84929 #define __cpu_to_le64s(x) do { (void)(x); } while (0)
84930 #define __le64_to_cpus(x) do { (void)(x); } while (0)
84931diff --git a/include/linux/cache.h b/include/linux/cache.h
84932index 97e2488..e7576b9 100644
84933--- a/include/linux/cache.h
84934+++ b/include/linux/cache.h
84935@@ -16,6 +16,10 @@
84936 #define __read_mostly
84937 #endif
84938
84939+#ifndef __read_only
84940+#define __read_only __read_mostly
84941+#endif
84942+
84943 #ifndef ____cacheline_aligned
84944 #define ____cacheline_aligned __attribute__((__aligned__(SMP_CACHE_BYTES)))
84945 #endif
84946diff --git a/include/linux/capability.h b/include/linux/capability.h
84947index c8f2a5f7..1618a5c 100644
84948--- a/include/linux/capability.h
84949+++ b/include/linux/capability.h
84950@@ -563,6 +563,7 @@ extern const kernel_cap_t __cap_init_eff_set;
84951 (security_real_capable_noaudit((t), (cap)) == 0)
84952
84953 extern int capable(int cap);
84954+int capable_nolog(int cap);
84955
84956 /* audit system wants to get cap info from files as well */
84957 struct dentry;
84958diff --git a/include/linux/compiler-gcc4.h b/include/linux/compiler-gcc4.h
84959index 450fa59..246fa19 100644
84960--- a/include/linux/compiler-gcc4.h
84961+++ b/include/linux/compiler-gcc4.h
84962@@ -14,6 +14,9 @@
84963 #define __compiler_offsetof(a,b) __builtin_offsetof(a,b)
84964 #define __always_inline inline __attribute__((always_inline))
84965
84966+#ifdef SIZE_OVERFLOW_PLUGIN
84967+#define __size_overflow(...) __attribute__((size_overflow(__VA_ARGS__)))
84968+#endif
84969 /*
84970 * A trick to suppress uninitialized variable warning without generating any
84971 * code
84972@@ -36,4 +39,16 @@
84973 the kernel context */
84974 #define __cold __attribute__((__cold__))
84975
84976+#define __alloc_size(...) __attribute((alloc_size(__VA_ARGS__)))
84977+#define __bos(ptr, arg) __builtin_object_size((ptr), (arg))
84978+#define __bos0(ptr) __bos((ptr), 0)
84979+#define __bos1(ptr) __bos((ptr), 1)
84980+
84981+#if __GNUC_MINOR__ >= 5
84982+#ifdef CONSTIFY_PLUGIN
84983+#define __no_const __attribute__((no_const))
84984+#define __do_const __attribute__((do_const))
84985+#endif
84986+#endif
84987+
84988 #endif
84989diff --git a/include/linux/compiler.h b/include/linux/compiler.h
84990index 04fb513..6189f3b 100644
84991--- a/include/linux/compiler.h
84992+++ b/include/linux/compiler.h
84993@@ -5,11 +5,14 @@
84994
84995 #ifdef __CHECKER__
84996 # define __user __attribute__((noderef, address_space(1)))
84997+# define __force_user __force __user
84998 # define __kernel /* default address space */
84999+# define __force_kernel __force __kernel
85000 # define __safe __attribute__((safe))
85001 # define __force __attribute__((force))
85002 # define __nocast __attribute__((nocast))
85003 # define __iomem __attribute__((noderef, address_space(2)))
85004+# define __force_iomem __force __iomem
85005 # define __acquires(x) __attribute__((context(x,0,1)))
85006 # define __releases(x) __attribute__((context(x,1,0)))
85007 # define __acquire(x) __context__(x,1)
85008@@ -17,13 +20,34 @@
85009 # define __cond_lock(x,c) ((c) ? ({ __acquire(x); 1; }) : 0)
85010 extern void __chk_user_ptr(const volatile void __user *);
85011 extern void __chk_io_ptr(const volatile void __iomem *);
85012+#elif defined(CHECKER_PLUGIN)
85013+//# define __user
85014+//# define __force_user
85015+//# define __kernel
85016+//# define __force_kernel
85017+# define __safe
85018+# define __force
85019+# define __nocast
85020+# define __iomem
85021+# define __force_iomem
85022+# define __chk_user_ptr(x) (void)0
85023+# define __chk_io_ptr(x) (void)0
85024+# define __builtin_warning(x, y...) (1)
85025+# define __acquires(x)
85026+# define __releases(x)
85027+# define __acquire(x) (void)0
85028+# define __release(x) (void)0
85029+# define __cond_lock(x,c) (c)
85030 #else
85031 # define __user
85032+# define __force_user
85033 # define __kernel
85034+# define __force_kernel
85035 # define __safe
85036 # define __force
85037 # define __nocast
85038 # define __iomem
85039+# define __force_iomem
85040 # define __chk_user_ptr(x) (void)0
85041 # define __chk_io_ptr(x) (void)0
85042 # define __builtin_warning(x, y...) (1)
85043@@ -247,6 +271,17 @@ void ftrace_likely_update(struct ftrace_branch_data *f, int val, int expect);
85044 # define __attribute_const__ /* unimplemented */
85045 #endif
85046
85047+#ifndef __no_const
85048+# define __no_const
85049+#endif
85050+
85051+#ifndef __do_const
85052+# define __do_const
85053+#endif
85054+
85055+#ifndef __size_overflow
85056+# define __size_overflow(...)
85057+#endif
85058 /*
85059 * Tell gcc if a function is cold. The compiler will assume any path
85060 * directly leading to the call is unlikely.
85061@@ -256,6 +291,22 @@ void ftrace_likely_update(struct ftrace_branch_data *f, int val, int expect);
85062 #define __cold
85063 #endif
85064
85065+#ifndef __alloc_size
85066+#define __alloc_size(...)
85067+#endif
85068+
85069+#ifndef __bos
85070+#define __bos(ptr, arg)
85071+#endif
85072+
85073+#ifndef __bos0
85074+#define __bos0(ptr)
85075+#endif
85076+
85077+#ifndef __bos1
85078+#define __bos1(ptr)
85079+#endif
85080+
85081 /* Simple shorthand for a section definition */
85082 #ifndef __section
85083 # define __section(S) __attribute__ ((__section__(#S)))
85084@@ -278,6 +329,7 @@ void ftrace_likely_update(struct ftrace_branch_data *f, int val, int expect);
85085 * use is to mediate communication between process-level code and irq/NMI
85086 * handlers, all running on the same CPU.
85087 */
85088-#define ACCESS_ONCE(x) (*(volatile typeof(x) *)&(x))
85089+#define ACCESS_ONCE(x) (*(volatile const typeof(x) *)&(x))
85090+#define ACCESS_ONCE_RW(x) (*(volatile typeof(x) *)&(x))
85091
85092 #endif /* __LINUX_COMPILER_H */
85093diff --git a/include/linux/crash_dump.h b/include/linux/crash_dump.h
85094index 0026f26..6c237c5 100644
85095--- a/include/linux/crash_dump.h
85096+++ b/include/linux/crash_dump.h
85097@@ -12,7 +12,7 @@
85098 extern unsigned long long elfcorehdr_addr;
85099
85100 extern ssize_t copy_oldmem_page(unsigned long, char *, size_t,
85101- unsigned long, int);
85102+ unsigned long, int) __size_overflow(3);
85103
85104 /* Architecture code defines this if there are other possible ELF
85105 * machine types, e.g. on bi-arch capable hardware. */
85106diff --git a/include/linux/crypto.h b/include/linux/crypto.h
85107index fd92988..a3164bd 100644
85108--- a/include/linux/crypto.h
85109+++ b/include/linux/crypto.h
85110@@ -394,7 +394,7 @@ struct cipher_tfm {
85111 const u8 *key, unsigned int keylen);
85112 void (*cit_encrypt_one)(struct crypto_tfm *tfm, u8 *dst, const u8 *src);
85113 void (*cit_decrypt_one)(struct crypto_tfm *tfm, u8 *dst, const u8 *src);
85114-};
85115+} __no_const;
85116
85117 struct hash_tfm {
85118 int (*init)(struct hash_desc *desc);
85119@@ -415,13 +415,13 @@ struct compress_tfm {
85120 int (*cot_decompress)(struct crypto_tfm *tfm,
85121 const u8 *src, unsigned int slen,
85122 u8 *dst, unsigned int *dlen);
85123-};
85124+} __no_const;
85125
85126 struct rng_tfm {
85127 int (*rng_gen_random)(struct crypto_rng *tfm, u8 *rdata,
85128 unsigned int dlen);
85129 int (*rng_reset)(struct crypto_rng *tfm, u8 *seed, unsigned int slen);
85130-};
85131+} __no_const;
85132
85133 #define crt_ablkcipher crt_u.ablkcipher
85134 #define crt_aead crt_u.aead
85135diff --git a/include/linux/dcache.h b/include/linux/dcache.h
85136index 30b93b2..cd7a8db 100644
85137--- a/include/linux/dcache.h
85138+++ b/include/linux/dcache.h
85139@@ -119,6 +119,8 @@ struct dentry {
85140 unsigned char d_iname[DNAME_INLINE_LEN_MIN]; /* small names */
85141 };
85142
85143+#define DNAME_INLINE_LEN (sizeof(struct dentry)-offsetof(struct dentry,d_iname))
85144+
85145 /*
85146 * dentry->d_lock spinlock nesting subclasses:
85147 *
85148diff --git a/include/linux/decompress/mm.h b/include/linux/decompress/mm.h
85149index 3e9bd6a..f4e1aa0 100644
85150--- a/include/linux/decompress/mm.h
85151+++ b/include/linux/decompress/mm.h
85152@@ -78,7 +78,7 @@ static void free(void *where)
85153 * warnings when not needed (indeed large_malloc / large_free are not
85154 * needed by inflate */
85155
85156-#define malloc(a) kmalloc(a, GFP_KERNEL)
85157+#define malloc(a) kmalloc((a), GFP_KERNEL)
85158 #define free(a) kfree(a)
85159
85160 #define large_malloc(a) vmalloc(a)
85161diff --git a/include/linux/dma-mapping.h b/include/linux/dma-mapping.h
85162index 91b7618..92a93d32 100644
85163--- a/include/linux/dma-mapping.h
85164+++ b/include/linux/dma-mapping.h
85165@@ -16,51 +16,51 @@ enum dma_data_direction {
85166 };
85167
85168 struct dma_map_ops {
85169- void* (*alloc_coherent)(struct device *dev, size_t size,
85170+ void* (* const alloc_coherent)(struct device *dev, size_t size,
85171 dma_addr_t *dma_handle, gfp_t gfp);
85172- void (*free_coherent)(struct device *dev, size_t size,
85173+ void (* const free_coherent)(struct device *dev, size_t size,
85174 void *vaddr, dma_addr_t dma_handle);
85175- dma_addr_t (*map_page)(struct device *dev, struct page *page,
85176+ dma_addr_t (* const map_page)(struct device *dev, struct page *page,
85177 unsigned long offset, size_t size,
85178 enum dma_data_direction dir,
85179 struct dma_attrs *attrs);
85180- void (*unmap_page)(struct device *dev, dma_addr_t dma_handle,
85181+ void (* const unmap_page)(struct device *dev, dma_addr_t dma_handle,
85182 size_t size, enum dma_data_direction dir,
85183 struct dma_attrs *attrs);
85184- int (*map_sg)(struct device *dev, struct scatterlist *sg,
85185+ int (* const map_sg)(struct device *dev, struct scatterlist *sg,
85186 int nents, enum dma_data_direction dir,
85187 struct dma_attrs *attrs);
85188- void (*unmap_sg)(struct device *dev,
85189+ void (* const unmap_sg)(struct device *dev,
85190 struct scatterlist *sg, int nents,
85191 enum dma_data_direction dir,
85192 struct dma_attrs *attrs);
85193- void (*sync_single_for_cpu)(struct device *dev,
85194+ void (* const sync_single_for_cpu)(struct device *dev,
85195 dma_addr_t dma_handle, size_t size,
85196 enum dma_data_direction dir);
85197- void (*sync_single_for_device)(struct device *dev,
85198+ void (* const sync_single_for_device)(struct device *dev,
85199 dma_addr_t dma_handle, size_t size,
85200 enum dma_data_direction dir);
85201- void (*sync_single_range_for_cpu)(struct device *dev,
85202+ void (* const sync_single_range_for_cpu)(struct device *dev,
85203 dma_addr_t dma_handle,
85204 unsigned long offset,
85205 size_t size,
85206 enum dma_data_direction dir);
85207- void (*sync_single_range_for_device)(struct device *dev,
85208+ void (* const sync_single_range_for_device)(struct device *dev,
85209 dma_addr_t dma_handle,
85210 unsigned long offset,
85211 size_t size,
85212 enum dma_data_direction dir);
85213- void (*sync_sg_for_cpu)(struct device *dev,
85214+ void (* const sync_sg_for_cpu)(struct device *dev,
85215 struct scatterlist *sg, int nents,
85216 enum dma_data_direction dir);
85217- void (*sync_sg_for_device)(struct device *dev,
85218+ void (* const sync_sg_for_device)(struct device *dev,
85219 struct scatterlist *sg, int nents,
85220 enum dma_data_direction dir);
85221- int (*mapping_error)(struct device *dev, dma_addr_t dma_addr);
85222- int (*dma_supported)(struct device *dev, u64 mask);
85223+ int (* const mapping_error)(struct device *dev, dma_addr_t dma_addr);
85224+ int (* const dma_supported)(struct device *dev, u64 mask);
85225 int (*set_dma_mask)(struct device *dev, u64 mask);
85226 int is_phys;
85227-};
85228+} __do_const;
85229
85230 #define DMA_BIT_MASK(n) (((n) == 64) ? ~0ULL : ((1ULL<<(n))-1))
85231
85232diff --git a/include/linux/dst.h b/include/linux/dst.h
85233index e26fed8..b976d9f 100644
85234--- a/include/linux/dst.h
85235+++ b/include/linux/dst.h
85236@@ -380,7 +380,7 @@ struct dst_node
85237 struct thread_pool *pool;
85238
85239 /* Transaction IDs live here */
85240- atomic_long_t gen;
85241+ atomic_long_unchecked_t gen;
85242
85243 /*
85244 * How frequently and how many times transaction
85245diff --git a/include/linux/elf.h b/include/linux/elf.h
85246index 90a4ed0..d652617 100644
85247--- a/include/linux/elf.h
85248+++ b/include/linux/elf.h
85249@@ -49,6 +49,17 @@ typedef __s64 Elf64_Sxword;
85250 #define PT_GNU_EH_FRAME 0x6474e550
85251
85252 #define PT_GNU_STACK (PT_LOOS + 0x474e551)
85253+#define PT_GNU_RELRO (PT_LOOS + 0x474e552)
85254+
85255+#define PT_PAX_FLAGS (PT_LOOS + 0x5041580)
85256+
85257+/* Constants for the e_flags field */
85258+#define EF_PAX_PAGEEXEC 1 /* Paging based non-executable pages */
85259+#define EF_PAX_EMUTRAMP 2 /* Emulate trampolines */
85260+#define EF_PAX_MPROTECT 4 /* Restrict mprotect() */
85261+#define EF_PAX_RANDMMAP 8 /* Randomize mmap() base */
85262+/*#define EF_PAX_RANDEXEC 16*/ /* Randomize ET_EXEC base */
85263+#define EF_PAX_SEGMEXEC 32 /* Segmentation based non-executable pages */
85264
85265 /* These constants define the different elf file types */
85266 #define ET_NONE 0
85267@@ -84,6 +95,8 @@ typedef __s64 Elf64_Sxword;
85268 #define DT_DEBUG 21
85269 #define DT_TEXTREL 22
85270 #define DT_JMPREL 23
85271+#define DT_FLAGS 30
85272+ #define DF_TEXTREL 0x00000004
85273 #define DT_ENCODING 32
85274 #define OLD_DT_LOOS 0x60000000
85275 #define DT_LOOS 0x6000000d
85276@@ -230,6 +243,19 @@ typedef struct elf64_hdr {
85277 #define PF_W 0x2
85278 #define PF_X 0x1
85279
85280+#define PF_PAGEEXEC (1U << 4) /* Enable PAGEEXEC */
85281+#define PF_NOPAGEEXEC (1U << 5) /* Disable PAGEEXEC */
85282+#define PF_SEGMEXEC (1U << 6) /* Enable SEGMEXEC */
85283+#define PF_NOSEGMEXEC (1U << 7) /* Disable SEGMEXEC */
85284+#define PF_MPROTECT (1U << 8) /* Enable MPROTECT */
85285+#define PF_NOMPROTECT (1U << 9) /* Disable MPROTECT */
85286+/*#define PF_RANDEXEC (1U << 10)*/ /* Enable RANDEXEC */
85287+/*#define PF_NORANDEXEC (1U << 11)*/ /* Disable RANDEXEC */
85288+#define PF_EMUTRAMP (1U << 12) /* Enable EMUTRAMP */
85289+#define PF_NOEMUTRAMP (1U << 13) /* Disable EMUTRAMP */
85290+#define PF_RANDMMAP (1U << 14) /* Enable RANDMMAP */
85291+#define PF_NORANDMMAP (1U << 15) /* Disable RANDMMAP */
85292+
85293 typedef struct elf32_phdr{
85294 Elf32_Word p_type;
85295 Elf32_Off p_offset;
85296@@ -322,6 +348,8 @@ typedef struct elf64_shdr {
85297 #define EI_OSABI 7
85298 #define EI_PAD 8
85299
85300+#define EI_PAX 14
85301+
85302 #define ELFMAG0 0x7f /* EI_MAG */
85303 #define ELFMAG1 'E'
85304 #define ELFMAG2 'L'
85305@@ -386,6 +414,7 @@ extern Elf32_Dyn _DYNAMIC [];
85306 #define elf_phdr elf32_phdr
85307 #define elf_note elf32_note
85308 #define elf_addr_t Elf32_Off
85309+#define elf_dyn Elf32_Dyn
85310
85311 #else
85312
85313@@ -394,6 +423,7 @@ extern Elf64_Dyn _DYNAMIC [];
85314 #define elf_phdr elf64_phdr
85315 #define elf_note elf64_note
85316 #define elf_addr_t Elf64_Off
85317+#define elf_dyn Elf64_Dyn
85318
85319 #endif
85320
85321diff --git a/include/linux/fs.h b/include/linux/fs.h
85322index 1b9a47a..6fe2934 100644
85323--- a/include/linux/fs.h
85324+++ b/include/linux/fs.h
85325@@ -568,41 +568,41 @@ typedef int (*read_actor_t)(read_descriptor_t *, struct page *,
85326 unsigned long, unsigned long);
85327
85328 struct address_space_operations {
85329- int (*writepage)(struct page *page, struct writeback_control *wbc);
85330- int (*readpage)(struct file *, struct page *);
85331- void (*sync_page)(struct page *);
85332+ int (* const writepage)(struct page *page, struct writeback_control *wbc);
85333+ int (* const readpage)(struct file *, struct page *);
85334+ void (* const sync_page)(struct page *);
85335
85336 /* Write back some dirty pages from this mapping. */
85337- int (*writepages)(struct address_space *, struct writeback_control *);
85338+ int (* const writepages)(struct address_space *, struct writeback_control *);
85339
85340 /* Set a page dirty. Return true if this dirtied it */
85341- int (*set_page_dirty)(struct page *page);
85342+ int (* const set_page_dirty)(struct page *page);
85343
85344- int (*readpages)(struct file *filp, struct address_space *mapping,
85345+ int (* const readpages)(struct file *filp, struct address_space *mapping,
85346 struct list_head *pages, unsigned nr_pages);
85347
85348- int (*write_begin)(struct file *, struct address_space *mapping,
85349+ int (* const write_begin)(struct file *, struct address_space *mapping,
85350 loff_t pos, unsigned len, unsigned flags,
85351 struct page **pagep, void **fsdata);
85352- int (*write_end)(struct file *, struct address_space *mapping,
85353+ int (* const write_end)(struct file *, struct address_space *mapping,
85354 loff_t pos, unsigned len, unsigned copied,
85355 struct page *page, void *fsdata);
85356
85357 /* Unfortunately this kludge is needed for FIBMAP. Don't use it */
85358- sector_t (*bmap)(struct address_space *, sector_t);
85359- void (*invalidatepage) (struct page *, unsigned long);
85360- int (*releasepage) (struct page *, gfp_t);
85361- ssize_t (*direct_IO)(int, struct kiocb *, const struct iovec *iov,
85362+ sector_t (* const bmap)(struct address_space *, sector_t);
85363+ void (* const invalidatepage) (struct page *, unsigned long);
85364+ int (* const releasepage) (struct page *, gfp_t);
85365+ ssize_t (* const direct_IO)(int, struct kiocb *, const struct iovec *iov,
85366 loff_t offset, unsigned long nr_segs);
85367- int (*get_xip_mem)(struct address_space *, pgoff_t, int,
85368+ int (* const get_xip_mem)(struct address_space *, pgoff_t, int,
85369 void **, unsigned long *);
85370 /* migrate the contents of a page to the specified target */
85371- int (*migratepage) (struct address_space *,
85372+ int (* const migratepage) (struct address_space *,
85373 struct page *, struct page *);
85374- int (*launder_page) (struct page *);
85375- int (*is_partially_uptodate) (struct page *, read_descriptor_t *,
85376+ int (* const launder_page) (struct page *);
85377+ int (* const is_partially_uptodate) (struct page *, read_descriptor_t *,
85378 unsigned long);
85379- int (*error_remove_page)(struct address_space *, struct page *);
85380+ int (* const error_remove_page)(struct address_space *, struct page *);
85381 };
85382
85383 /*
85384@@ -1031,19 +1031,19 @@ static inline int file_check_writeable(struct file *filp)
85385 typedef struct files_struct *fl_owner_t;
85386
85387 struct file_lock_operations {
85388- void (*fl_copy_lock)(struct file_lock *, struct file_lock *);
85389- void (*fl_release_private)(struct file_lock *);
85390+ void (* const fl_copy_lock)(struct file_lock *, struct file_lock *);
85391+ void (* const fl_release_private)(struct file_lock *);
85392 };
85393
85394 struct lock_manager_operations {
85395- int (*fl_compare_owner)(struct file_lock *, struct file_lock *);
85396- void (*fl_notify)(struct file_lock *); /* unblock callback */
85397- int (*fl_grant)(struct file_lock *, struct file_lock *, int);
85398- void (*fl_copy_lock)(struct file_lock *, struct file_lock *);
85399- void (*fl_release_private)(struct file_lock *);
85400- void (*fl_break)(struct file_lock *);
85401- int (*fl_mylease)(struct file_lock *, struct file_lock *);
85402- int (*fl_change)(struct file_lock **, int);
85403+ int (* const fl_compare_owner)(struct file_lock *, struct file_lock *);
85404+ void (* const fl_notify)(struct file_lock *); /* unblock callback */
85405+ int (* const fl_grant)(struct file_lock *, struct file_lock *, int);
85406+ void (* const fl_copy_lock)(struct file_lock *, struct file_lock *);
85407+ void (* const fl_release_private)(struct file_lock *);
85408+ void (* const fl_break)(struct file_lock *);
85409+ int (* const fl_mylease)(struct file_lock *, struct file_lock *);
85410+ int (* const fl_change)(struct file_lock **, int);
85411 };
85412
85413 struct lock_manager {
85414@@ -1442,7 +1442,7 @@ struct fiemap_extent_info {
85415 unsigned int fi_flags; /* Flags as passed from user */
85416 unsigned int fi_extents_mapped; /* Number of mapped extents */
85417 unsigned int fi_extents_max; /* Size of fiemap_extent array */
85418- struct fiemap_extent *fi_extents_start; /* Start of fiemap_extent
85419+ struct fiemap_extent __user *fi_extents_start; /* Start of fiemap_extent
85420 * array */
85421 };
85422 int fiemap_fill_next_extent(struct fiemap_extent_info *info, u64 logical,
85423@@ -1512,7 +1512,8 @@ struct file_operations {
85424 ssize_t (*splice_write)(struct pipe_inode_info *, struct file *, loff_t *, size_t, unsigned int);
85425 ssize_t (*splice_read)(struct file *, loff_t *, struct pipe_inode_info *, size_t, unsigned int);
85426 int (*setlease)(struct file *, long, struct file_lock **);
85427-};
85428+} __do_const;
85429+typedef struct file_operations __no_const file_operations_no_const;
85430
85431 struct inode_operations {
85432 int (*create) (struct inode *,struct dentry *,int, struct nameidata *);
85433@@ -1559,30 +1560,30 @@ extern ssize_t vfs_writev(struct file *, const struct iovec __user *,
85434 unsigned long, loff_t *);
85435
85436 struct super_operations {
85437- struct inode *(*alloc_inode)(struct super_block *sb);
85438- void (*destroy_inode)(struct inode *);
85439+ struct inode *(* const alloc_inode)(struct super_block *sb);
85440+ void (* const destroy_inode)(struct inode *);
85441
85442- void (*dirty_inode) (struct inode *);
85443- int (*write_inode) (struct inode *, int);
85444- void (*drop_inode) (struct inode *);
85445- void (*delete_inode) (struct inode *);
85446- void (*put_super) (struct super_block *);
85447- void (*write_super) (struct super_block *);
85448- int (*sync_fs)(struct super_block *sb, int wait);
85449- int (*freeze_fs) (struct super_block *);
85450- int (*unfreeze_fs) (struct super_block *);
85451- int (*statfs) (struct dentry *, struct kstatfs *);
85452- int (*remount_fs) (struct super_block *, int *, char *);
85453- void (*clear_inode) (struct inode *);
85454- void (*umount_begin) (struct super_block *);
85455+ void (* const dirty_inode) (struct inode *);
85456+ int (* const write_inode) (struct inode *, int);
85457+ void (* const drop_inode) (struct inode *);
85458+ void (* const delete_inode) (struct inode *);
85459+ void (* const put_super) (struct super_block *);
85460+ void (* const write_super) (struct super_block *);
85461+ int (* const sync_fs)(struct super_block *sb, int wait);
85462+ int (* const freeze_fs) (struct super_block *);
85463+ int (* const unfreeze_fs) (struct super_block *);
85464+ int (* const statfs) (struct dentry *, struct kstatfs *);
85465+ int (* const remount_fs) (struct super_block *, int *, char *);
85466+ void (* const clear_inode) (struct inode *);
85467+ void (* const umount_begin) (struct super_block *);
85468
85469- int (*show_options)(struct seq_file *, struct vfsmount *);
85470- int (*show_stats)(struct seq_file *, struct vfsmount *);
85471+ int (* const show_options)(struct seq_file *, struct vfsmount *);
85472+ int (* const show_stats)(struct seq_file *, struct vfsmount *);
85473 #ifdef CONFIG_QUOTA
85474- ssize_t (*quota_read)(struct super_block *, int, char *, size_t, loff_t);
85475- ssize_t (*quota_write)(struct super_block *, int, const char *, size_t, loff_t);
85476+ ssize_t (* const quota_read)(struct super_block *, int, char *, size_t, loff_t);
85477+ ssize_t (* const quota_write)(struct super_block *, int, const char *, size_t, loff_t);
85478 #endif
85479- int (*bdev_try_to_free_page)(struct super_block*, struct page*, gfp_t);
85480+ int (* const bdev_try_to_free_page)(struct super_block*, struct page*, gfp_t);
85481 };
85482
85483 /*
85484diff --git a/include/linux/fs_struct.h b/include/linux/fs_struct.h
85485index 78a05bf..2a7d3e1 100644
85486--- a/include/linux/fs_struct.h
85487+++ b/include/linux/fs_struct.h
85488@@ -4,7 +4,7 @@
85489 #include <linux/path.h>
85490
85491 struct fs_struct {
85492- int users;
85493+ atomic_t users;
85494 rwlock_t lock;
85495 int umask;
85496 int in_exec;
85497diff --git a/include/linux/fscache-cache.h b/include/linux/fscache-cache.h
85498index 7be0c6f..2f63a2b 100644
85499--- a/include/linux/fscache-cache.h
85500+++ b/include/linux/fscache-cache.h
85501@@ -116,7 +116,7 @@ struct fscache_operation {
85502 #endif
85503 };
85504
85505-extern atomic_t fscache_op_debug_id;
85506+extern atomic_unchecked_t fscache_op_debug_id;
85507 extern const struct slow_work_ops fscache_op_slow_work_ops;
85508
85509 extern void fscache_enqueue_operation(struct fscache_operation *);
85510@@ -134,7 +134,7 @@ static inline void fscache_operation_init(struct fscache_operation *op,
85511 fscache_operation_release_t release)
85512 {
85513 atomic_set(&op->usage, 1);
85514- op->debug_id = atomic_inc_return(&fscache_op_debug_id);
85515+ op->debug_id = atomic_inc_return_unchecked(&fscache_op_debug_id);
85516 op->release = release;
85517 INIT_LIST_HEAD(&op->pend_link);
85518 fscache_set_op_state(op, "Init");
85519diff --git a/include/linux/fsnotify_backend.h b/include/linux/fsnotify_backend.h
85520index 4d6f47b..00bcedb 100644
85521--- a/include/linux/fsnotify_backend.h
85522+++ b/include/linux/fsnotify_backend.h
85523@@ -86,6 +86,7 @@ struct fsnotify_ops {
85524 void (*freeing_mark)(struct fsnotify_mark_entry *entry, struct fsnotify_group *group);
85525 void (*free_event_priv)(struct fsnotify_event_private_data *priv);
85526 };
85527+typedef struct fsnotify_ops __no_const fsnotify_ops_no_const;
85528
85529 /*
85530 * A group is a "thing" that wants to receive notification about filesystem
85531diff --git a/include/linux/ftrace_event.h b/include/linux/ftrace_event.h
85532index 4ec5e67..42f1eb9 100644
85533--- a/include/linux/ftrace_event.h
85534+++ b/include/linux/ftrace_event.h
85535@@ -163,7 +163,7 @@ extern int trace_define_field(struct ftrace_event_call *call,
85536 int filter_type);
85537 extern int trace_define_common_fields(struct ftrace_event_call *call);
85538
85539-#define is_signed_type(type) (((type)(-1)) < 0)
85540+#define is_signed_type(type) (((type)(-1)) < (type)1)
85541
85542 int trace_set_clr_event(const char *system, const char *event, int set);
85543
85544diff --git a/include/linux/genhd.h b/include/linux/genhd.h
85545index 297df45..b6a74ff 100644
85546--- a/include/linux/genhd.h
85547+++ b/include/linux/genhd.h
85548@@ -161,7 +161,7 @@ struct gendisk {
85549
85550 struct timer_rand_state *random;
85551
85552- atomic_t sync_io; /* RAID */
85553+ atomic_unchecked_t sync_io; /* RAID */
85554 struct work_struct async_notify;
85555 #ifdef CONFIG_BLK_DEV_INTEGRITY
85556 struct blk_integrity *integrity;
85557diff --git a/include/linux/gracl.h b/include/linux/gracl.h
85558new file mode 100644
85559index 0000000..af663cf
85560--- /dev/null
85561+++ b/include/linux/gracl.h
85562@@ -0,0 +1,319 @@
85563+#ifndef GR_ACL_H
85564+#define GR_ACL_H
85565+
85566+#include <linux/grdefs.h>
85567+#include <linux/resource.h>
85568+#include <linux/capability.h>
85569+#include <linux/dcache.h>
85570+#include <asm/resource.h>
85571+
85572+/* Major status information */
85573+
85574+#define GR_VERSION "grsecurity 2.9"
85575+#define GRSECURITY_VERSION 0x2900
85576+
85577+enum {
85578+ GR_SHUTDOWN = 0,
85579+ GR_ENABLE = 1,
85580+ GR_SPROLE = 2,
85581+ GR_RELOAD = 3,
85582+ GR_SEGVMOD = 4,
85583+ GR_STATUS = 5,
85584+ GR_UNSPROLE = 6,
85585+ GR_PASSSET = 7,
85586+ GR_SPROLEPAM = 8,
85587+};
85588+
85589+/* Password setup definitions
85590+ * kernel/grhash.c */
85591+enum {
85592+ GR_PW_LEN = 128,
85593+ GR_SALT_LEN = 16,
85594+ GR_SHA_LEN = 32,
85595+};
85596+
85597+enum {
85598+ GR_SPROLE_LEN = 64,
85599+};
85600+
85601+enum {
85602+ GR_NO_GLOB = 0,
85603+ GR_REG_GLOB,
85604+ GR_CREATE_GLOB
85605+};
85606+
85607+#define GR_NLIMITS 32
85608+
85609+/* Begin Data Structures */
85610+
85611+struct sprole_pw {
85612+ unsigned char *rolename;
85613+ unsigned char salt[GR_SALT_LEN];
85614+ unsigned char sum[GR_SHA_LEN]; /* 256-bit SHA hash of the password */
85615+};
85616+
85617+struct name_entry {
85618+ __u32 key;
85619+ ino_t inode;
85620+ dev_t device;
85621+ char *name;
85622+ __u16 len;
85623+ __u8 deleted;
85624+ struct name_entry *prev;
85625+ struct name_entry *next;
85626+};
85627+
85628+struct inodev_entry {
85629+ struct name_entry *nentry;
85630+ struct inodev_entry *prev;
85631+ struct inodev_entry *next;
85632+};
85633+
85634+struct acl_role_db {
85635+ struct acl_role_label **r_hash;
85636+ __u32 r_size;
85637+};
85638+
85639+struct inodev_db {
85640+ struct inodev_entry **i_hash;
85641+ __u32 i_size;
85642+};
85643+
85644+struct name_db {
85645+ struct name_entry **n_hash;
85646+ __u32 n_size;
85647+};
85648+
85649+struct crash_uid {
85650+ uid_t uid;
85651+ unsigned long expires;
85652+};
85653+
85654+struct gr_hash_struct {
85655+ void **table;
85656+ void **nametable;
85657+ void *first;
85658+ __u32 table_size;
85659+ __u32 used_size;
85660+ int type;
85661+};
85662+
85663+/* Userspace Grsecurity ACL data structures */
85664+
85665+struct acl_subject_label {
85666+ char *filename;
85667+ ino_t inode;
85668+ dev_t device;
85669+ __u32 mode;
85670+ kernel_cap_t cap_mask;
85671+ kernel_cap_t cap_lower;
85672+ kernel_cap_t cap_invert_audit;
85673+
85674+ struct rlimit res[GR_NLIMITS];
85675+ __u32 resmask;
85676+
85677+ __u8 user_trans_type;
85678+ __u8 group_trans_type;
85679+ uid_t *user_transitions;
85680+ gid_t *group_transitions;
85681+ __u16 user_trans_num;
85682+ __u16 group_trans_num;
85683+
85684+ __u32 sock_families[2];
85685+ __u32 ip_proto[8];
85686+ __u32 ip_type;
85687+ struct acl_ip_label **ips;
85688+ __u32 ip_num;
85689+ __u32 inaddr_any_override;
85690+
85691+ __u32 crashes;
85692+ unsigned long expires;
85693+
85694+ struct acl_subject_label *parent_subject;
85695+ struct gr_hash_struct *hash;
85696+ struct acl_subject_label *prev;
85697+ struct acl_subject_label *next;
85698+
85699+ struct acl_object_label **obj_hash;
85700+ __u32 obj_hash_size;
85701+ __u16 pax_flags;
85702+};
85703+
85704+struct role_allowed_ip {
85705+ __u32 addr;
85706+ __u32 netmask;
85707+
85708+ struct role_allowed_ip *prev;
85709+ struct role_allowed_ip *next;
85710+};
85711+
85712+struct role_transition {
85713+ char *rolename;
85714+
85715+ struct role_transition *prev;
85716+ struct role_transition *next;
85717+};
85718+
85719+struct acl_role_label {
85720+ char *rolename;
85721+ uid_t uidgid;
85722+ __u16 roletype;
85723+
85724+ __u16 auth_attempts;
85725+ unsigned long expires;
85726+
85727+ struct acl_subject_label *root_label;
85728+ struct gr_hash_struct *hash;
85729+
85730+ struct acl_role_label *prev;
85731+ struct acl_role_label *next;
85732+
85733+ struct role_transition *transitions;
85734+ struct role_allowed_ip *allowed_ips;
85735+ uid_t *domain_children;
85736+ __u16 domain_child_num;
85737+
85738+ mode_t umask;
85739+
85740+ struct acl_subject_label **subj_hash;
85741+ __u32 subj_hash_size;
85742+};
85743+
85744+struct user_acl_role_db {
85745+ struct acl_role_label **r_table;
85746+ __u32 num_pointers; /* Number of allocations to track */
85747+ __u32 num_roles; /* Number of roles */
85748+ __u32 num_domain_children; /* Number of domain children */
85749+ __u32 num_subjects; /* Number of subjects */
85750+ __u32 num_objects; /* Number of objects */
85751+};
85752+
85753+struct acl_object_label {
85754+ char *filename;
85755+ ino_t inode;
85756+ dev_t device;
85757+ __u32 mode;
85758+
85759+ struct acl_subject_label *nested;
85760+ struct acl_object_label *globbed;
85761+
85762+ /* next two structures not used */
85763+
85764+ struct acl_object_label *prev;
85765+ struct acl_object_label *next;
85766+};
85767+
85768+struct acl_ip_label {
85769+ char *iface;
85770+ __u32 addr;
85771+ __u32 netmask;
85772+ __u16 low, high;
85773+ __u8 mode;
85774+ __u32 type;
85775+ __u32 proto[8];
85776+
85777+ /* next two structures not used */
85778+
85779+ struct acl_ip_label *prev;
85780+ struct acl_ip_label *next;
85781+};
85782+
85783+struct gr_arg {
85784+ struct user_acl_role_db role_db;
85785+ unsigned char pw[GR_PW_LEN];
85786+ unsigned char salt[GR_SALT_LEN];
85787+ unsigned char sum[GR_SHA_LEN];
85788+ unsigned char sp_role[GR_SPROLE_LEN];
85789+ struct sprole_pw *sprole_pws;
85790+ dev_t segv_device;
85791+ ino_t segv_inode;
85792+ uid_t segv_uid;
85793+ __u16 num_sprole_pws;
85794+ __u16 mode;
85795+};
85796+
85797+struct gr_arg_wrapper {
85798+ struct gr_arg *arg;
85799+ __u32 version;
85800+ __u32 size;
85801+};
85802+
85803+struct subject_map {
85804+ struct acl_subject_label *user;
85805+ struct acl_subject_label *kernel;
85806+ struct subject_map *prev;
85807+ struct subject_map *next;
85808+};
85809+
85810+struct acl_subj_map_db {
85811+ struct subject_map **s_hash;
85812+ __u32 s_size;
85813+};
85814+
85815+/* End Data Structures Section */
85816+
85817+/* Hash functions generated by empirical testing by Brad Spengler
85818+ Makes good use of the low bits of the inode. Generally 0-1 times
85819+ in loop for successful match. 0-3 for unsuccessful match.
85820+ Shift/add algorithm with modulus of table size and an XOR*/
85821+
85822+static __inline__ unsigned int
85823+rhash(const uid_t uid, const __u16 type, const unsigned int sz)
85824+{
85825+ return ((((uid + type) << (16 + type)) ^ uid) % sz);
85826+}
85827+
85828+ static __inline__ unsigned int
85829+shash(const struct acl_subject_label *userp, const unsigned int sz)
85830+{
85831+ return ((const unsigned long)userp % sz);
85832+}
85833+
85834+static __inline__ unsigned int
85835+fhash(const ino_t ino, const dev_t dev, const unsigned int sz)
85836+{
85837+ return (((ino + dev) ^ ((ino << 13) + (ino << 23) + (dev << 9))) % sz);
85838+}
85839+
85840+static __inline__ unsigned int
85841+nhash(const char *name, const __u16 len, const unsigned int sz)
85842+{
85843+ return full_name_hash((const unsigned char *)name, len) % sz;
85844+}
85845+
85846+#define FOR_EACH_ROLE_START(role) \
85847+ role = role_list; \
85848+ while (role) {
85849+
85850+#define FOR_EACH_ROLE_END(role) \
85851+ role = role->prev; \
85852+ }
85853+
85854+#define FOR_EACH_SUBJECT_START(role,subj,iter) \
85855+ subj = NULL; \
85856+ iter = 0; \
85857+ while (iter < role->subj_hash_size) { \
85858+ if (subj == NULL) \
85859+ subj = role->subj_hash[iter]; \
85860+ if (subj == NULL) { \
85861+ iter++; \
85862+ continue; \
85863+ }
85864+
85865+#define FOR_EACH_SUBJECT_END(subj,iter) \
85866+ subj = subj->next; \
85867+ if (subj == NULL) \
85868+ iter++; \
85869+ }
85870+
85871+
85872+#define FOR_EACH_NESTED_SUBJECT_START(role,subj) \
85873+ subj = role->hash->first; \
85874+ while (subj != NULL) {
85875+
85876+#define FOR_EACH_NESTED_SUBJECT_END(subj) \
85877+ subj = subj->next; \
85878+ }
85879+
85880+#endif
85881+
85882diff --git a/include/linux/gralloc.h b/include/linux/gralloc.h
85883new file mode 100644
85884index 0000000..323ecf2
85885--- /dev/null
85886+++ b/include/linux/gralloc.h
85887@@ -0,0 +1,9 @@
85888+#ifndef __GRALLOC_H
85889+#define __GRALLOC_H
85890+
85891+void acl_free_all(void);
85892+int acl_alloc_stack_init(unsigned long size);
85893+void *acl_alloc(unsigned long len);
85894+void *acl_alloc_num(unsigned long num, unsigned long len);
85895+
85896+#endif
85897diff --git a/include/linux/grdefs.h b/include/linux/grdefs.h
85898new file mode 100644
85899index 0000000..70d6cd5
85900--- /dev/null
85901+++ b/include/linux/grdefs.h
85902@@ -0,0 +1,140 @@
85903+#ifndef GRDEFS_H
85904+#define GRDEFS_H
85905+
85906+/* Begin grsecurity status declarations */
85907+
85908+enum {
85909+ GR_READY = 0x01,
85910+ GR_STATUS_INIT = 0x00 // disabled state
85911+};
85912+
85913+/* Begin ACL declarations */
85914+
85915+/* Role flags */
85916+
85917+enum {
85918+ GR_ROLE_USER = 0x0001,
85919+ GR_ROLE_GROUP = 0x0002,
85920+ GR_ROLE_DEFAULT = 0x0004,
85921+ GR_ROLE_SPECIAL = 0x0008,
85922+ GR_ROLE_AUTH = 0x0010,
85923+ GR_ROLE_NOPW = 0x0020,
85924+ GR_ROLE_GOD = 0x0040,
85925+ GR_ROLE_LEARN = 0x0080,
85926+ GR_ROLE_TPE = 0x0100,
85927+ GR_ROLE_DOMAIN = 0x0200,
85928+ GR_ROLE_PAM = 0x0400,
85929+ GR_ROLE_PERSIST = 0x800
85930+};
85931+
85932+/* ACL Subject and Object mode flags */
85933+enum {
85934+ GR_DELETED = 0x80000000
85935+};
85936+
85937+/* ACL Object-only mode flags */
85938+enum {
85939+ GR_READ = 0x00000001,
85940+ GR_APPEND = 0x00000002,
85941+ GR_WRITE = 0x00000004,
85942+ GR_EXEC = 0x00000008,
85943+ GR_FIND = 0x00000010,
85944+ GR_INHERIT = 0x00000020,
85945+ GR_SETID = 0x00000040,
85946+ GR_CREATE = 0x00000080,
85947+ GR_DELETE = 0x00000100,
85948+ GR_LINK = 0x00000200,
85949+ GR_AUDIT_READ = 0x00000400,
85950+ GR_AUDIT_APPEND = 0x00000800,
85951+ GR_AUDIT_WRITE = 0x00001000,
85952+ GR_AUDIT_EXEC = 0x00002000,
85953+ GR_AUDIT_FIND = 0x00004000,
85954+ GR_AUDIT_INHERIT= 0x00008000,
85955+ GR_AUDIT_SETID = 0x00010000,
85956+ GR_AUDIT_CREATE = 0x00020000,
85957+ GR_AUDIT_DELETE = 0x00040000,
85958+ GR_AUDIT_LINK = 0x00080000,
85959+ GR_PTRACERD = 0x00100000,
85960+ GR_NOPTRACE = 0x00200000,
85961+ GR_SUPPRESS = 0x00400000,
85962+ GR_NOLEARN = 0x00800000,
85963+ GR_INIT_TRANSFER= 0x01000000
85964+};
85965+
85966+#define GR_AUDITS (GR_AUDIT_READ | GR_AUDIT_WRITE | GR_AUDIT_APPEND | GR_AUDIT_EXEC | \
85967+ GR_AUDIT_FIND | GR_AUDIT_INHERIT | GR_AUDIT_SETID | \
85968+ GR_AUDIT_CREATE | GR_AUDIT_DELETE | GR_AUDIT_LINK)
85969+
85970+/* ACL subject-only mode flags */
85971+enum {
85972+ GR_KILL = 0x00000001,
85973+ GR_VIEW = 0x00000002,
85974+ GR_PROTECTED = 0x00000004,
85975+ GR_LEARN = 0x00000008,
85976+ GR_OVERRIDE = 0x00000010,
85977+ /* just a placeholder, this mode is only used in userspace */
85978+ GR_DUMMY = 0x00000020,
85979+ GR_PROTSHM = 0x00000040,
85980+ GR_KILLPROC = 0x00000080,
85981+ GR_KILLIPPROC = 0x00000100,
85982+ /* just a placeholder, this mode is only used in userspace */
85983+ GR_NOTROJAN = 0x00000200,
85984+ GR_PROTPROCFD = 0x00000400,
85985+ GR_PROCACCT = 0x00000800,
85986+ GR_RELAXPTRACE = 0x00001000,
85987+ GR_NESTED = 0x00002000,
85988+ GR_INHERITLEARN = 0x00004000,
85989+ GR_PROCFIND = 0x00008000,
85990+ GR_POVERRIDE = 0x00010000,
85991+ GR_KERNELAUTH = 0x00020000,
85992+ GR_ATSECURE = 0x00040000,
85993+ GR_SHMEXEC = 0x00080000
85994+};
85995+
85996+enum {
85997+ GR_PAX_ENABLE_SEGMEXEC = 0x0001,
85998+ GR_PAX_ENABLE_PAGEEXEC = 0x0002,
85999+ GR_PAX_ENABLE_MPROTECT = 0x0004,
86000+ GR_PAX_ENABLE_RANDMMAP = 0x0008,
86001+ GR_PAX_ENABLE_EMUTRAMP = 0x0010,
86002+ GR_PAX_DISABLE_SEGMEXEC = 0x0100,
86003+ GR_PAX_DISABLE_PAGEEXEC = 0x0200,
86004+ GR_PAX_DISABLE_MPROTECT = 0x0400,
86005+ GR_PAX_DISABLE_RANDMMAP = 0x0800,
86006+ GR_PAX_DISABLE_EMUTRAMP = 0x1000,
86007+};
86008+
86009+enum {
86010+ GR_ID_USER = 0x01,
86011+ GR_ID_GROUP = 0x02,
86012+};
86013+
86014+enum {
86015+ GR_ID_ALLOW = 0x01,
86016+ GR_ID_DENY = 0x02,
86017+};
86018+
86019+#define GR_CRASH_RES 31
86020+#define GR_UIDTABLE_MAX 500
86021+
86022+/* begin resource learning section */
86023+enum {
86024+ GR_RLIM_CPU_BUMP = 60,
86025+ GR_RLIM_FSIZE_BUMP = 50000,
86026+ GR_RLIM_DATA_BUMP = 10000,
86027+ GR_RLIM_STACK_BUMP = 1000,
86028+ GR_RLIM_CORE_BUMP = 10000,
86029+ GR_RLIM_RSS_BUMP = 500000,
86030+ GR_RLIM_NPROC_BUMP = 1,
86031+ GR_RLIM_NOFILE_BUMP = 5,
86032+ GR_RLIM_MEMLOCK_BUMP = 50000,
86033+ GR_RLIM_AS_BUMP = 500000,
86034+ GR_RLIM_LOCKS_BUMP = 2,
86035+ GR_RLIM_SIGPENDING_BUMP = 5,
86036+ GR_RLIM_MSGQUEUE_BUMP = 10000,
86037+ GR_RLIM_NICE_BUMP = 1,
86038+ GR_RLIM_RTPRIO_BUMP = 1,
86039+ GR_RLIM_RTTIME_BUMP = 1000000
86040+};
86041+
86042+#endif
86043diff --git a/include/linux/grinternal.h b/include/linux/grinternal.h
86044new file mode 100644
86045index 0000000..3826b91
86046--- /dev/null
86047+++ b/include/linux/grinternal.h
86048@@ -0,0 +1,219 @@
86049+#ifndef __GRINTERNAL_H
86050+#define __GRINTERNAL_H
86051+
86052+#ifdef CONFIG_GRKERNSEC
86053+
86054+#include <linux/fs.h>
86055+#include <linux/mnt_namespace.h>
86056+#include <linux/nsproxy.h>
86057+#include <linux/gracl.h>
86058+#include <linux/grdefs.h>
86059+#include <linux/grmsg.h>
86060+
86061+void gr_add_learn_entry(const char *fmt, ...)
86062+ __attribute__ ((format (printf, 1, 2)));
86063+__u32 gr_search_file(const struct dentry *dentry, const __u32 mode,
86064+ const struct vfsmount *mnt);
86065+__u32 gr_check_create(const struct dentry *new_dentry,
86066+ const struct dentry *parent,
86067+ const struct vfsmount *mnt, const __u32 mode);
86068+int gr_check_protected_task(const struct task_struct *task);
86069+__u32 to_gr_audit(const __u32 reqmode);
86070+int gr_set_acls(const int type);
86071+int gr_apply_subject_to_task(struct task_struct *task);
86072+int gr_acl_is_enabled(void);
86073+char gr_roletype_to_char(void);
86074+
86075+void gr_handle_alertkill(struct task_struct *task);
86076+char *gr_to_filename(const struct dentry *dentry,
86077+ const struct vfsmount *mnt);
86078+char *gr_to_filename1(const struct dentry *dentry,
86079+ const struct vfsmount *mnt);
86080+char *gr_to_filename2(const struct dentry *dentry,
86081+ const struct vfsmount *mnt);
86082+char *gr_to_filename3(const struct dentry *dentry,
86083+ const struct vfsmount *mnt);
86084+
86085+extern int grsec_enable_ptrace_readexec;
86086+extern int grsec_enable_harden_ptrace;
86087+extern int grsec_enable_link;
86088+extern int grsec_enable_fifo;
86089+extern int grsec_enable_shm;
86090+extern int grsec_enable_execlog;
86091+extern int grsec_enable_signal;
86092+extern int grsec_enable_audit_ptrace;
86093+extern int grsec_enable_forkfail;
86094+extern int grsec_enable_time;
86095+extern int grsec_enable_rofs;
86096+extern int grsec_enable_chroot_shmat;
86097+extern int grsec_enable_chroot_mount;
86098+extern int grsec_enable_chroot_double;
86099+extern int grsec_enable_chroot_pivot;
86100+extern int grsec_enable_chroot_chdir;
86101+extern int grsec_enable_chroot_chmod;
86102+extern int grsec_enable_chroot_mknod;
86103+extern int grsec_enable_chroot_fchdir;
86104+extern int grsec_enable_chroot_nice;
86105+extern int grsec_enable_chroot_execlog;
86106+extern int grsec_enable_chroot_caps;
86107+extern int grsec_enable_chroot_sysctl;
86108+extern int grsec_enable_chroot_unix;
86109+extern int grsec_enable_tpe;
86110+extern int grsec_tpe_gid;
86111+extern int grsec_enable_tpe_all;
86112+extern int grsec_enable_tpe_invert;
86113+extern int grsec_enable_socket_all;
86114+extern int grsec_socket_all_gid;
86115+extern int grsec_enable_socket_client;
86116+extern int grsec_socket_client_gid;
86117+extern int grsec_enable_socket_server;
86118+extern int grsec_socket_server_gid;
86119+extern int grsec_audit_gid;
86120+extern int grsec_enable_group;
86121+extern int grsec_enable_audit_textrel;
86122+extern int grsec_enable_log_rwxmaps;
86123+extern int grsec_enable_mount;
86124+extern int grsec_enable_chdir;
86125+extern int grsec_resource_logging;
86126+extern int grsec_enable_blackhole;
86127+extern int grsec_lastack_retries;
86128+extern int grsec_enable_brute;
86129+extern int grsec_lock;
86130+
86131+extern spinlock_t grsec_alert_lock;
86132+extern unsigned long grsec_alert_wtime;
86133+extern unsigned long grsec_alert_fyet;
86134+
86135+extern spinlock_t grsec_audit_lock;
86136+
86137+extern rwlock_t grsec_exec_file_lock;
86138+
86139+#define gr_task_fullpath(tsk) ((tsk)->exec_file ? \
86140+ gr_to_filename2((tsk)->exec_file->f_path.dentry, \
86141+ (tsk)->exec_file->f_vfsmnt) : "/")
86142+
86143+#define gr_parent_task_fullpath(tsk) ((tsk)->real_parent->exec_file ? \
86144+ gr_to_filename3((tsk)->real_parent->exec_file->f_path.dentry, \
86145+ (tsk)->real_parent->exec_file->f_vfsmnt) : "/")
86146+
86147+#define gr_task_fullpath0(tsk) ((tsk)->exec_file ? \
86148+ gr_to_filename((tsk)->exec_file->f_path.dentry, \
86149+ (tsk)->exec_file->f_vfsmnt) : "/")
86150+
86151+#define gr_parent_task_fullpath0(tsk) ((tsk)->real_parent->exec_file ? \
86152+ gr_to_filename1((tsk)->real_parent->exec_file->f_path.dentry, \
86153+ (tsk)->real_parent->exec_file->f_vfsmnt) : "/")
86154+
86155+#define proc_is_chrooted(tsk_a) ((tsk_a)->gr_is_chrooted)
86156+
86157+#define have_same_root(tsk_a,tsk_b) ((tsk_a)->gr_chroot_dentry == (tsk_b)->gr_chroot_dentry)
86158+
86159+#define DEFAULTSECARGS(task, cred, pcred) gr_task_fullpath(task), (task)->comm, \
86160+ (task)->pid, (cred)->uid, \
86161+ (cred)->euid, (cred)->gid, (cred)->egid, \
86162+ gr_parent_task_fullpath(task), \
86163+ (task)->real_parent->comm, (task)->real_parent->pid, \
86164+ (pcred)->uid, (pcred)->euid, \
86165+ (pcred)->gid, (pcred)->egid
86166+
86167+#define GR_CHROOT_CAPS {{ \
86168+ CAP_TO_MASK(CAP_LINUX_IMMUTABLE) | CAP_TO_MASK(CAP_NET_ADMIN) | \
86169+ CAP_TO_MASK(CAP_SYS_MODULE) | CAP_TO_MASK(CAP_SYS_RAWIO) | \
86170+ CAP_TO_MASK(CAP_SYS_PACCT) | CAP_TO_MASK(CAP_SYS_ADMIN) | \
86171+ CAP_TO_MASK(CAP_SYS_BOOT) | CAP_TO_MASK(CAP_SYS_TIME) | \
86172+ CAP_TO_MASK(CAP_NET_RAW) | CAP_TO_MASK(CAP_SYS_TTY_CONFIG) | \
86173+ CAP_TO_MASK(CAP_IPC_OWNER) | CAP_TO_MASK(CAP_SETFCAP), \
86174+ CAP_TO_MASK(CAP_MAC_ADMIN) }}
86175+
86176+#define security_learn(normal_msg,args...) \
86177+({ \
86178+ read_lock(&grsec_exec_file_lock); \
86179+ gr_add_learn_entry(normal_msg "\n", ## args); \
86180+ read_unlock(&grsec_exec_file_lock); \
86181+})
86182+
86183+enum {
86184+ GR_DO_AUDIT,
86185+ GR_DONT_AUDIT,
86186+ GR_DONT_AUDIT_GOOD
86187+};
86188+
86189+enum {
86190+ GR_TTYSNIFF,
86191+ GR_RBAC,
86192+ GR_RBAC_STR,
86193+ GR_STR_RBAC,
86194+ GR_RBAC_MODE2,
86195+ GR_RBAC_MODE3,
86196+ GR_FILENAME,
86197+ GR_SYSCTL_HIDDEN,
86198+ GR_NOARGS,
86199+ GR_ONE_INT,
86200+ GR_ONE_INT_TWO_STR,
86201+ GR_ONE_STR,
86202+ GR_STR_INT,
86203+ GR_TWO_STR_INT,
86204+ GR_TWO_INT,
86205+ GR_TWO_U64,
86206+ GR_THREE_INT,
86207+ GR_FIVE_INT_TWO_STR,
86208+ GR_TWO_STR,
86209+ GR_THREE_STR,
86210+ GR_FOUR_STR,
86211+ GR_STR_FILENAME,
86212+ GR_FILENAME_STR,
86213+ GR_FILENAME_TWO_INT,
86214+ GR_FILENAME_TWO_INT_STR,
86215+ GR_TEXTREL,
86216+ GR_PTRACE,
86217+ GR_RESOURCE,
86218+ GR_CAP,
86219+ GR_SIG,
86220+ GR_SIG2,
86221+ GR_CRASH1,
86222+ GR_CRASH2,
86223+ GR_PSACCT,
86224+ GR_RWXMAP
86225+};
86226+
86227+#define gr_log_hidden_sysctl(audit, msg, str) gr_log_varargs(audit, msg, GR_SYSCTL_HIDDEN, str)
86228+#define gr_log_ttysniff(audit, msg, task) gr_log_varargs(audit, msg, GR_TTYSNIFF, task)
86229+#define gr_log_fs_rbac_generic(audit, msg, dentry, mnt) gr_log_varargs(audit, msg, GR_RBAC, dentry, mnt)
86230+#define gr_log_fs_rbac_str(audit, msg, dentry, mnt, str) gr_log_varargs(audit, msg, GR_RBAC_STR, dentry, mnt, str)
86231+#define gr_log_fs_str_rbac(audit, msg, str, dentry, mnt) gr_log_varargs(audit, msg, GR_STR_RBAC, str, dentry, mnt)
86232+#define gr_log_fs_rbac_mode2(audit, msg, dentry, mnt, str1, str2) gr_log_varargs(audit, msg, GR_RBAC_MODE2, dentry, mnt, str1, str2)
86233+#define gr_log_fs_rbac_mode3(audit, msg, dentry, mnt, str1, str2, str3) gr_log_varargs(audit, msg, GR_RBAC_MODE3, dentry, mnt, str1, str2, str3)
86234+#define gr_log_fs_generic(audit, msg, dentry, mnt) gr_log_varargs(audit, msg, GR_FILENAME, dentry, mnt)
86235+#define gr_log_noargs(audit, msg) gr_log_varargs(audit, msg, GR_NOARGS)
86236+#define gr_log_int(audit, msg, num) gr_log_varargs(audit, msg, GR_ONE_INT, num)
86237+#define gr_log_int_str2(audit, msg, num, str1, str2) gr_log_varargs(audit, msg, GR_ONE_INT_TWO_STR, num, str1, str2)
86238+#define gr_log_str(audit, msg, str) gr_log_varargs(audit, msg, GR_ONE_STR, str)
86239+#define gr_log_str_int(audit, msg, str, num) gr_log_varargs(audit, msg, GR_STR_INT, str, num)
86240+#define gr_log_int_int(audit, msg, num1, num2) gr_log_varargs(audit, msg, GR_TWO_INT, num1, num2)
86241+#define gr_log_two_u64(audit, msg, num1, num2) gr_log_varargs(audit, msg, GR_TWO_U64, num1, num2)
86242+#define gr_log_int3(audit, msg, num1, num2, num3) gr_log_varargs(audit, msg, GR_THREE_INT, num1, num2, num3)
86243+#define gr_log_int5_str2(audit, msg, num1, num2, str1, str2) gr_log_varargs(audit, msg, GR_FIVE_INT_TWO_STR, num1, num2, str1, str2)
86244+#define gr_log_str_str(audit, msg, str1, str2) gr_log_varargs(audit, msg, GR_TWO_STR, str1, str2)
86245+#define gr_log_str2_int(audit, msg, str1, str2, num) gr_log_varargs(audit, msg, GR_TWO_STR_INT, str1, str2, num)
86246+#define gr_log_str3(audit, msg, str1, str2, str3) gr_log_varargs(audit, msg, GR_THREE_STR, str1, str2, str3)
86247+#define gr_log_str4(audit, msg, str1, str2, str3, str4) gr_log_varargs(audit, msg, GR_FOUR_STR, str1, str2, str3, str4)
86248+#define gr_log_str_fs(audit, msg, str, dentry, mnt) gr_log_varargs(audit, msg, GR_STR_FILENAME, str, dentry, mnt)
86249+#define gr_log_fs_str(audit, msg, dentry, mnt, str) gr_log_varargs(audit, msg, GR_FILENAME_STR, dentry, mnt, str)
86250+#define gr_log_fs_int2(audit, msg, dentry, mnt, num1, num2) gr_log_varargs(audit, msg, GR_FILENAME_TWO_INT, dentry, mnt, num1, num2)
86251+#define gr_log_fs_int2_str(audit, msg, dentry, mnt, num1, num2, str) gr_log_varargs(audit, msg, GR_FILENAME_TWO_INT_STR, dentry, mnt, num1, num2, str)
86252+#define gr_log_textrel_ulong_ulong(audit, msg, file, ulong1, ulong2) gr_log_varargs(audit, msg, GR_TEXTREL, file, ulong1, ulong2)
86253+#define gr_log_ptrace(audit, msg, task) gr_log_varargs(audit, msg, GR_PTRACE, task)
86254+#define gr_log_res_ulong2_str(audit, msg, task, ulong1, str, ulong2) gr_log_varargs(audit, msg, GR_RESOURCE, task, ulong1, str, ulong2)
86255+#define gr_log_cap(audit, msg, task, str) gr_log_varargs(audit, msg, GR_CAP, task, str)
86256+#define gr_log_sig_addr(audit, msg, str, addr) gr_log_varargs(audit, msg, GR_SIG, str, addr)
86257+#define gr_log_sig_task(audit, msg, task, num) gr_log_varargs(audit, msg, GR_SIG2, task, num)
86258+#define gr_log_crash1(audit, msg, task, ulong) gr_log_varargs(audit, msg, GR_CRASH1, task, ulong)
86259+#define gr_log_crash2(audit, msg, task, ulong1) gr_log_varargs(audit, msg, GR_CRASH2, task, ulong1)
86260+#define gr_log_procacct(audit, msg, task, num1, num2, num3, num4, num5, num6, num7, num8, num9) gr_log_varargs(audit, msg, GR_PSACCT, task, num1, num2, num3, num4, num5, num6, num7, num8, num9)
86261+#define gr_log_rwxmap(audit, msg, str) gr_log_varargs(audit, msg, GR_RWXMAP, str)
86262+
86263+void gr_log_varargs(int audit, const char *msg, int argtypes, ...);
86264+
86265+#endif
86266+
86267+#endif
86268diff --git a/include/linux/grmsg.h b/include/linux/grmsg.h
86269new file mode 100644
86270index 0000000..f885406
86271--- /dev/null
86272+++ b/include/linux/grmsg.h
86273@@ -0,0 +1,109 @@
86274+#define DEFAULTSECMSG "%.256s[%.16s:%d] uid/euid:%u/%u gid/egid:%u/%u, parent %.256s[%.16s:%d] uid/euid:%u/%u gid/egid:%u/%u"
86275+#define GR_ACL_PROCACCT_MSG "%.256s[%.16s:%d] IP:%pI4 TTY:%.64s uid/euid:%u/%u gid/egid:%u/%u run time:[%ud %uh %um %us] cpu time:[%ud %uh %um %us] %s with exit code %ld, parent %.256s[%.16s:%d] IP:%pI4 TTY:%.64s uid/euid:%u/%u gid/egid:%u/%u"
86276+#define GR_PTRACE_ACL_MSG "denied ptrace of %.950s(%.16s:%d) by "
86277+#define GR_STOPMOD_MSG "denied modification of module state by "
86278+#define GR_ROFS_BLOCKWRITE_MSG "denied write to block device %.950s by "
86279+#define GR_ROFS_MOUNT_MSG "denied writable mount of %.950s by "
86280+#define GR_IOPERM_MSG "denied use of ioperm() by "
86281+#define GR_IOPL_MSG "denied use of iopl() by "
86282+#define GR_SHMAT_ACL_MSG "denied attach of shared memory of UID %u, PID %d, ID %u by "
86283+#define GR_UNIX_CHROOT_MSG "denied connect() to abstract AF_UNIX socket outside of chroot by "
86284+#define GR_SHMAT_CHROOT_MSG "denied attach of shared memory outside of chroot by "
86285+#define GR_MEM_READWRITE_MSG "denied access of range %Lx -> %Lx in /dev/mem by "
86286+#define GR_SYMLINK_MSG "not following symlink %.950s owned by %d.%d by "
86287+#define GR_LEARN_AUDIT_MSG "%s\t%u\t%u\t%u\t%.4095s\t%.4095s\t%lu\t%lu\t%.4095s\t%lu\t%pI4"
86288+#define GR_ID_LEARN_MSG "%s\t%u\t%u\t%u\t%.4095s\t%.4095s\t%c\t%d\t%d\t%d\t%pI4"
86289+#define GR_HIDDEN_ACL_MSG "%s access to hidden file %.950s by "
86290+#define GR_OPEN_ACL_MSG "%s open of %.950s for%s%s by "
86291+#define GR_CREATE_ACL_MSG "%s create of %.950s for%s%s by "
86292+#define GR_FIFO_MSG "denied writing FIFO %.950s of %d.%d by "
86293+#define GR_MKNOD_CHROOT_MSG "denied mknod of %.950s from chroot by "
86294+#define GR_MKNOD_ACL_MSG "%s mknod of %.950s by "
86295+#define GR_UNIXCONNECT_ACL_MSG "%s connect() to the unix domain socket %.950s by "
86296+#define GR_TTYSNIFF_ACL_MSG "terminal being sniffed by IP:%pI4 %.480s[%.16s:%d], parent %.480s[%.16s:%d] against "
86297+#define GR_MKDIR_ACL_MSG "%s mkdir of %.950s by "
86298+#define GR_RMDIR_ACL_MSG "%s rmdir of %.950s by "
86299+#define GR_UNLINK_ACL_MSG "%s unlink of %.950s by "
86300+#define GR_SYMLINK_ACL_MSG "%s symlink from %.480s to %.480s by "
86301+#define GR_HARDLINK_MSG "denied hardlink of %.930s (owned by %d.%d) to %.30s for "
86302+#define GR_LINK_ACL_MSG "%s link of %.480s to %.480s by "
86303+#define GR_INHERIT_ACL_MSG "successful inherit of %.480s's ACL for %.480s by "
86304+#define GR_RENAME_ACL_MSG "%s rename of %.480s to %.480s by "
86305+#define GR_UNSAFESHARE_EXEC_ACL_MSG "denied exec with cloned fs of %.950s by "
86306+#define GR_PTRACE_EXEC_ACL_MSG "denied ptrace of %.950s by "
86307+#define GR_EXEC_ACL_MSG "%s execution of %.950s by "
86308+#define GR_EXEC_TPE_MSG "denied untrusted exec (due to %.70s) of %.950s by "
86309+#define GR_SEGVSTART_ACL_MSG "possible exploit bruteforcing on " DEFAULTSECMSG " banning uid %u from login for %lu seconds"
86310+#define GR_SEGVNOSUID_ACL_MSG "possible exploit bruteforcing on " DEFAULTSECMSG " banning execution for %lu seconds"
86311+#define GR_MOUNT_CHROOT_MSG "denied mount of %.256s as %.930s from chroot by "
86312+#define GR_PIVOT_CHROOT_MSG "denied pivot_root from chroot by "
86313+#define GR_TRUNCATE_ACL_MSG "%s truncate of %.950s by "
86314+#define GR_ATIME_ACL_MSG "%s access time change of %.950s by "
86315+#define GR_ACCESS_ACL_MSG "%s access of %.950s for%s%s%s by "
86316+#define GR_CHROOT_CHROOT_MSG "denied double chroot to %.950s by "
86317+#define GR_CHMOD_CHROOT_MSG "denied chmod +s of %.950s by "
86318+#define GR_CHMOD_ACL_MSG "%s chmod of %.950s by "
86319+#define GR_CHROOT_FCHDIR_MSG "denied fchdir outside of chroot to %.950s by "
86320+#define GR_CHOWN_ACL_MSG "%s chown of %.950s by "
86321+#define GR_SETXATTR_ACL_MSG "%s setting extended attributes of %.950s by "
86322+#define GR_WRITLIB_ACL_MSG "denied load of writable library %.950s by "
86323+#define GR_INITF_ACL_MSG "init_variables() failed %s by "
86324+#define GR_DISABLED_ACL_MSG "Error loading %s, trying to run kernel with acls disabled. To disable acls at startup use <kernel image name> gracl=off from your boot loader"
86325+#define GR_DEV_ACL_MSG "/dev/grsec: %d bytes sent %d required, being fed garbaged by "
86326+#define GR_SHUTS_ACL_MSG "shutdown auth success for "
86327+#define GR_SHUTF_ACL_MSG "shutdown auth failure for "
86328+#define GR_SHUTI_ACL_MSG "ignoring shutdown for disabled RBAC system for "
86329+#define GR_SEGVMODS_ACL_MSG "segvmod auth success for "
86330+#define GR_SEGVMODF_ACL_MSG "segvmod auth failure for "
86331+#define GR_SEGVMODI_ACL_MSG "ignoring segvmod for disabled RBAC system for "
86332+#define GR_ENABLE_ACL_MSG "%s RBAC system loaded by "
86333+#define GR_ENABLEF_ACL_MSG "unable to load %s for "
86334+#define GR_RELOADI_ACL_MSG "ignoring reload request for disabled RBAC system"
86335+#define GR_RELOAD_ACL_MSG "%s RBAC system reloaded by "
86336+#define GR_RELOADF_ACL_MSG "failed reload of %s for "
86337+#define GR_SPROLEI_ACL_MSG "ignoring change to special role for disabled RBAC system for "
86338+#define GR_SPROLES_ACL_MSG "successful change to special role %s (id %d) by "
86339+#define GR_SPROLEL_ACL_MSG "special role %s (id %d) exited by "
86340+#define GR_SPROLEF_ACL_MSG "special role %s failure for "
86341+#define GR_UNSPROLEI_ACL_MSG "ignoring unauth of special role for disabled RBAC system for "
86342+#define GR_UNSPROLES_ACL_MSG "successful unauth of special role %s (id %d) by "
86343+#define GR_INVMODE_ACL_MSG "invalid mode %d by "
86344+#define GR_PRIORITY_CHROOT_MSG "denied priority change of process (%.16s:%d) by "
86345+#define GR_FAILFORK_MSG "failed fork with errno %s by "
86346+#define GR_NICE_CHROOT_MSG "denied priority change by "
86347+#define GR_UNISIGLOG_MSG "%.32s occurred at %p in "
86348+#define GR_DUALSIGLOG_MSG "signal %d sent to " DEFAULTSECMSG " by "
86349+#define GR_SIG_ACL_MSG "denied send of signal %d to protected task " DEFAULTSECMSG " by "
86350+#define GR_SYSCTL_MSG "denied modification of grsecurity sysctl value : %.32s by "
86351+#define GR_SYSCTL_ACL_MSG "%s sysctl of %.950s for%s%s by "
86352+#define GR_TIME_MSG "time set by "
86353+#define GR_DEFACL_MSG "fatal: unable to find subject for (%.16s:%d), loaded by "
86354+#define GR_MMAP_ACL_MSG "%s executable mmap of %.950s by "
86355+#define GR_MPROTECT_ACL_MSG "%s executable mprotect of %.950s by "
86356+#define GR_SOCK_MSG "denied socket(%.16s,%.16s,%.16s) by "
86357+#define GR_SOCK_NOINET_MSG "denied socket(%.16s,%.16s,%d) by "
86358+#define GR_BIND_MSG "denied bind() by "
86359+#define GR_CONNECT_MSG "denied connect() by "
86360+#define GR_BIND_ACL_MSG "denied bind() to %pI4 port %u sock type %.16s protocol %.16s by "
86361+#define GR_CONNECT_ACL_MSG "denied connect() to %pI4 port %u sock type %.16s protocol %.16s by "
86362+#define GR_IP_LEARN_MSG "%s\t%u\t%u\t%u\t%.4095s\t%.4095s\t%pI4\t%u\t%u\t%u\t%u\t%pI4"
86363+#define GR_EXEC_CHROOT_MSG "exec of %.980s within chroot by process "
86364+#define GR_CAP_ACL_MSG "use of %s denied for "
86365+#define GR_CAP_CHROOT_MSG "use of %s in chroot denied for "
86366+#define GR_CAP_ACL_MSG2 "use of %s permitted for "
86367+#define GR_USRCHANGE_ACL_MSG "change to uid %u denied for "
86368+#define GR_GRPCHANGE_ACL_MSG "change to gid %u denied for "
86369+#define GR_REMOUNT_AUDIT_MSG "remount of %.256s by "
86370+#define GR_UNMOUNT_AUDIT_MSG "unmount of %.256s by "
86371+#define GR_MOUNT_AUDIT_MSG "mount of %.256s to %.256s by "
86372+#define GR_CHDIR_AUDIT_MSG "chdir to %.980s by "
86373+#define GR_EXEC_AUDIT_MSG "exec of %.930s (%.128s) by "
86374+#define GR_RESOURCE_MSG "denied resource overstep by requesting %lu for %.16s against limit %lu for "
86375+#define GR_RWXMMAP_MSG "denied RWX mmap of %.950s by "
86376+#define GR_RWXMPROTECT_MSG "denied RWX mprotect of %.950s by "
86377+#define GR_TEXTREL_AUDIT_MSG "text relocation in %s, VMA:0x%08lx 0x%08lx by "
86378+#define GR_VM86_MSG "denied use of vm86 by "
86379+#define GR_PTRACE_AUDIT_MSG "process %.950s(%.16s:%d) attached to via ptrace by "
86380+#define GR_PTRACE_READEXEC_MSG "denied ptrace of unreadable binary %.950s by "
86381+#define GR_INIT_TRANSFER_MSG "persistent special role transferred privilege to init by "
86382+#define GR_BADPROCPID_MSG "denied read of sensitive /proc/pid/%s entry via fd passed across exec by "
86383diff --git a/include/linux/grsecurity.h b/include/linux/grsecurity.h
86384new file mode 100644
86385index 0000000..c1793ae
86386--- /dev/null
86387+++ b/include/linux/grsecurity.h
86388@@ -0,0 +1,219 @@
86389+#ifndef GR_SECURITY_H
86390+#define GR_SECURITY_H
86391+#include <linux/fs.h>
86392+#include <linux/fs_struct.h>
86393+#include <linux/binfmts.h>
86394+#include <linux/gracl.h>
86395+#include <linux/compat.h>
86396+
86397+/* notify of brain-dead configs */
86398+#if defined(CONFIG_GRKERNSEC_PROC_USER) && defined(CONFIG_GRKERNSEC_PROC_USERGROUP)
86399+#error "CONFIG_GRKERNSEC_PROC_USER and CONFIG_GRKERNSEC_PROC_USERGROUP cannot both be enabled."
86400+#endif
86401+#if defined(CONFIG_PAX_NOEXEC) && !defined(CONFIG_PAX_PAGEEXEC) && !defined(CONFIG_PAX_SEGMEXEC) && !defined(CONFIG_PAX_KERNEXEC)
86402+#error "CONFIG_PAX_NOEXEC enabled, but PAGEEXEC, SEGMEXEC, and KERNEXEC are disabled."
86403+#endif
86404+#if defined(CONFIG_PAX_ASLR) && !defined(CONFIG_PAX_RANDKSTACK) && !defined(CONFIG_PAX_RANDUSTACK) && !defined(CONFIG_PAX_RANDMMAP)
86405+#error "CONFIG_PAX_ASLR enabled, but RANDKSTACK, RANDUSTACK, and RANDMMAP are disabled."
86406+#endif
86407+#if defined(CONFIG_PAX) && !defined(CONFIG_PAX_NOEXEC) && !defined(CONFIG_PAX_ASLR)
86408+#error "CONFIG_PAX enabled, but no PaX options are enabled."
86409+#endif
86410+
86411+void gr_handle_brute_attach(struct task_struct *p, unsigned long mm_flags);
86412+void gr_handle_brute_check(void);
86413+void gr_handle_kernel_exploit(void);
86414+int gr_process_user_ban(void);
86415+
86416+char gr_roletype_to_char(void);
86417+
86418+int gr_acl_enable_at_secure(void);
86419+
86420+int gr_check_user_change(int real, int effective, int fs);
86421+int gr_check_group_change(int real, int effective, int fs);
86422+
86423+void gr_del_task_from_ip_table(struct task_struct *p);
86424+
86425+int gr_pid_is_chrooted(struct task_struct *p);
86426+int gr_handle_chroot_fowner(struct pid *pid, enum pid_type type);
86427+int gr_handle_chroot_nice(void);
86428+int gr_handle_chroot_sysctl(const int op);
86429+int gr_handle_chroot_setpriority(struct task_struct *p,
86430+ const int niceval);
86431+int gr_chroot_fchdir(struct dentry *u_dentry, struct vfsmount *u_mnt);
86432+int gr_handle_chroot_chroot(const struct dentry *dentry,
86433+ const struct vfsmount *mnt);
86434+void gr_handle_chroot_chdir(struct path *path);
86435+int gr_handle_chroot_chmod(const struct dentry *dentry,
86436+ const struct vfsmount *mnt, const int mode);
86437+int gr_handle_chroot_mknod(const struct dentry *dentry,
86438+ const struct vfsmount *mnt, const int mode);
86439+int gr_handle_chroot_mount(const struct dentry *dentry,
86440+ const struct vfsmount *mnt,
86441+ const char *dev_name);
86442+int gr_handle_chroot_pivot(void);
86443+int gr_handle_chroot_unix(const pid_t pid);
86444+
86445+int gr_handle_rawio(const struct inode *inode);
86446+
86447+void gr_handle_ioperm(void);
86448+void gr_handle_iopl(void);
86449+
86450+umode_t gr_acl_umask(void);
86451+
86452+int gr_tpe_allow(const struct file *file);
86453+
86454+void gr_set_chroot_entries(struct task_struct *task, struct path *path);
86455+void gr_clear_chroot_entries(struct task_struct *task);
86456+
86457+void gr_log_forkfail(const int retval);
86458+void gr_log_timechange(void);
86459+void gr_log_signal(const int sig, const void *addr, const struct task_struct *t);
86460+void gr_log_chdir(const struct dentry *dentry,
86461+ const struct vfsmount *mnt);
86462+void gr_log_chroot_exec(const struct dentry *dentry,
86463+ const struct vfsmount *mnt);
86464+void gr_handle_exec_args(struct linux_binprm *bprm, const char __user *const __user *argv);
86465+#ifdef CONFIG_COMPAT
86466+void gr_handle_exec_args_compat(struct linux_binprm *bprm, compat_uptr_t __user *argv);
86467+#endif
86468+void gr_log_remount(const char *devname, const int retval);
86469+void gr_log_unmount(const char *devname, const int retval);
86470+void gr_log_mount(const char *from, const char *to, const int retval);
86471+void gr_log_textrel(struct vm_area_struct *vma);
86472+void gr_log_rwxmmap(struct file *file);
86473+void gr_log_rwxmprotect(struct file *file);
86474+
86475+int gr_handle_follow_link(const struct inode *parent,
86476+ const struct inode *inode,
86477+ const struct dentry *dentry,
86478+ const struct vfsmount *mnt);
86479+int gr_handle_fifo(const struct dentry *dentry,
86480+ const struct vfsmount *mnt,
86481+ const struct dentry *dir, const int flag,
86482+ const int acc_mode);
86483+int gr_handle_hardlink(const struct dentry *dentry,
86484+ const struct vfsmount *mnt,
86485+ struct inode *inode,
86486+ const int mode, const char *to);
86487+
86488+int gr_is_capable(const int cap);
86489+int gr_is_capable_nolog(const int cap);
86490+void gr_learn_resource(const struct task_struct *task, const int limit,
86491+ const unsigned long wanted, const int gt);
86492+void gr_copy_label(struct task_struct *tsk);
86493+void gr_handle_crash(struct task_struct *task, const int sig);
86494+int gr_handle_signal(const struct task_struct *p, const int sig);
86495+int gr_check_crash_uid(const uid_t uid);
86496+int gr_check_protected_task(const struct task_struct *task);
86497+int gr_check_protected_task_fowner(struct pid *pid, enum pid_type type);
86498+int gr_acl_handle_mmap(const struct file *file,
86499+ const unsigned long prot);
86500+int gr_acl_handle_mprotect(const struct file *file,
86501+ const unsigned long prot);
86502+int gr_check_hidden_task(const struct task_struct *tsk);
86503+__u32 gr_acl_handle_truncate(const struct dentry *dentry,
86504+ const struct vfsmount *mnt);
86505+__u32 gr_acl_handle_utime(const struct dentry *dentry,
86506+ const struct vfsmount *mnt);
86507+__u32 gr_acl_handle_access(const struct dentry *dentry,
86508+ const struct vfsmount *mnt, const int fmode);
86509+__u32 gr_acl_handle_chmod(const struct dentry *dentry,
86510+ const struct vfsmount *mnt, umode_t *mode);
86511+__u32 gr_acl_handle_chown(const struct dentry *dentry,
86512+ const struct vfsmount *mnt);
86513+__u32 gr_acl_handle_setxattr(const struct dentry *dentry,
86514+ const struct vfsmount *mnt);
86515+int gr_handle_ptrace(struct task_struct *task, const long request);
86516+int gr_handle_proc_ptrace(struct task_struct *task);
86517+__u32 gr_acl_handle_execve(const struct dentry *dentry,
86518+ const struct vfsmount *mnt);
86519+int gr_check_crash_exec(const struct file *filp);
86520+int gr_acl_is_enabled(void);
86521+void gr_set_kernel_label(struct task_struct *task);
86522+void gr_set_role_label(struct task_struct *task, const uid_t uid,
86523+ const gid_t gid);
86524+int gr_set_proc_label(const struct dentry *dentry,
86525+ const struct vfsmount *mnt,
86526+ const int unsafe_flags);
86527+__u32 gr_acl_handle_hidden_file(const struct dentry *dentry,
86528+ const struct vfsmount *mnt);
86529+__u32 gr_acl_handle_open(const struct dentry *dentry,
86530+ const struct vfsmount *mnt, int acc_mode);
86531+__u32 gr_acl_handle_creat(const struct dentry *dentry,
86532+ const struct dentry *p_dentry,
86533+ const struct vfsmount *p_mnt,
86534+ int open_flags, int acc_mode, const int imode);
86535+void gr_handle_create(const struct dentry *dentry,
86536+ const struct vfsmount *mnt);
86537+void gr_handle_proc_create(const struct dentry *dentry,
86538+ const struct inode *inode);
86539+__u32 gr_acl_handle_mknod(const struct dentry *new_dentry,
86540+ const struct dentry *parent_dentry,
86541+ const struct vfsmount *parent_mnt,
86542+ const int mode);
86543+__u32 gr_acl_handle_mkdir(const struct dentry *new_dentry,
86544+ const struct dentry *parent_dentry,
86545+ const struct vfsmount *parent_mnt);
86546+__u32 gr_acl_handle_rmdir(const struct dentry *dentry,
86547+ const struct vfsmount *mnt);
86548+void gr_handle_delete(const ino_t ino, const dev_t dev);
86549+__u32 gr_acl_handle_unlink(const struct dentry *dentry,
86550+ const struct vfsmount *mnt);
86551+__u32 gr_acl_handle_symlink(const struct dentry *new_dentry,
86552+ const struct dentry *parent_dentry,
86553+ const struct vfsmount *parent_mnt,
86554+ const char *from);
86555+__u32 gr_acl_handle_link(const struct dentry *new_dentry,
86556+ const struct dentry *parent_dentry,
86557+ const struct vfsmount *parent_mnt,
86558+ const struct dentry *old_dentry,
86559+ const struct vfsmount *old_mnt, const char *to);
86560+int gr_acl_handle_rename(struct dentry *new_dentry,
86561+ struct dentry *parent_dentry,
86562+ const struct vfsmount *parent_mnt,
86563+ struct dentry *old_dentry,
86564+ struct inode *old_parent_inode,
86565+ struct vfsmount *old_mnt, const char *newname);
86566+void gr_handle_rename(struct inode *old_dir, struct inode *new_dir,
86567+ struct dentry *old_dentry,
86568+ struct dentry *new_dentry,
86569+ struct vfsmount *mnt, const __u8 replace);
86570+__u32 gr_check_link(const struct dentry *new_dentry,
86571+ const struct dentry *parent_dentry,
86572+ const struct vfsmount *parent_mnt,
86573+ const struct dentry *old_dentry,
86574+ const struct vfsmount *old_mnt);
86575+int gr_acl_handle_filldir(const struct file *file, const char *name,
86576+ const unsigned int namelen, const ino_t ino);
86577+
86578+__u32 gr_acl_handle_unix(const struct dentry *dentry,
86579+ const struct vfsmount *mnt);
86580+void gr_acl_handle_exit(void);
86581+void gr_acl_handle_psacct(struct task_struct *task, const long code);
86582+int gr_acl_handle_procpidmem(const struct task_struct *task);
86583+int gr_handle_rofs_mount(struct dentry *dentry, struct vfsmount *mnt, int mnt_flags);
86584+int gr_handle_rofs_blockwrite(struct dentry *dentry, struct vfsmount *mnt, int acc_mode);
86585+void gr_audit_ptrace(struct task_struct *task);
86586+dev_t gr_get_dev_from_dentry(struct dentry *dentry);
86587+
86588+int gr_ptrace_readexec(struct file *file, int unsafe_flags);
86589+
86590+#ifdef CONFIG_GRKERNSEC
86591+void task_grsec_rbac(struct seq_file *m, struct task_struct *p);
86592+void gr_handle_vm86(void);
86593+void gr_handle_mem_readwrite(u64 from, u64 to);
86594+
86595+void gr_log_badprocpid(const char *entry);
86596+
86597+extern int grsec_enable_dmesg;
86598+extern int grsec_disable_privio;
86599+#ifdef CONFIG_GRKERNSEC_CHROOT_FINDTASK
86600+extern int grsec_enable_chroot_findtask;
86601+#endif
86602+#ifdef CONFIG_GRKERNSEC_SETXID
86603+extern int grsec_enable_setxid;
86604+#endif
86605+#endif
86606+
86607+#endif
86608diff --git a/include/linux/hdpu_features.h b/include/linux/hdpu_features.h
86609index 6a87154..a3ce57b 100644
86610--- a/include/linux/hdpu_features.h
86611+++ b/include/linux/hdpu_features.h
86612@@ -3,7 +3,7 @@
86613 struct cpustate_t {
86614 spinlock_t lock;
86615 int excl;
86616- int open_count;
86617+ atomic_t open_count;
86618 unsigned char cached_val;
86619 int inited;
86620 unsigned long *set_addr;
86621diff --git a/include/linux/highmem.h b/include/linux/highmem.h
86622index 211ff44..00ab6d7 100644
86623--- a/include/linux/highmem.h
86624+++ b/include/linux/highmem.h
86625@@ -137,6 +137,18 @@ static inline void clear_highpage(struct page *page)
86626 kunmap_atomic(kaddr, KM_USER0);
86627 }
86628
86629+static inline void sanitize_highpage(struct page *page)
86630+{
86631+ void *kaddr;
86632+ unsigned long flags;
86633+
86634+ local_irq_save(flags);
86635+ kaddr = kmap_atomic(page, KM_CLEARPAGE);
86636+ clear_page(kaddr);
86637+ kunmap_atomic(kaddr, KM_CLEARPAGE);
86638+ local_irq_restore(flags);
86639+}
86640+
86641 static inline void zero_user_segments(struct page *page,
86642 unsigned start1, unsigned end1,
86643 unsigned start2, unsigned end2)
86644diff --git a/include/linux/i2c.h b/include/linux/i2c.h
86645index 7b40cda..24eb44e 100644
86646--- a/include/linux/i2c.h
86647+++ b/include/linux/i2c.h
86648@@ -325,6 +325,7 @@ struct i2c_algorithm {
86649 /* To determine what the adapter supports */
86650 u32 (*functionality) (struct i2c_adapter *);
86651 };
86652+typedef struct i2c_algorithm __no_const i2c_algorithm_no_const;
86653
86654 /*
86655 * i2c_adapter is the structure used to identify a physical i2c bus along
86656diff --git a/include/linux/i2o.h b/include/linux/i2o.h
86657index 4c4e57d..f3c5303 100644
86658--- a/include/linux/i2o.h
86659+++ b/include/linux/i2o.h
86660@@ -564,7 +564,7 @@ struct i2o_controller {
86661 struct i2o_device *exec; /* Executive */
86662 #if BITS_PER_LONG == 64
86663 spinlock_t context_list_lock; /* lock for context_list */
86664- atomic_t context_list_counter; /* needed for unique contexts */
86665+ atomic_unchecked_t context_list_counter; /* needed for unique contexts */
86666 struct list_head context_list; /* list of context id's
86667 and pointers */
86668 #endif
86669diff --git a/include/linux/init_task.h b/include/linux/init_task.h
86670index 21a6f5d..dc42eab 100644
86671--- a/include/linux/init_task.h
86672+++ b/include/linux/init_task.h
86673@@ -83,6 +83,12 @@ extern struct group_info init_groups;
86674 #define INIT_IDS
86675 #endif
86676
86677+#ifdef CONFIG_X86
86678+#define INIT_TASK_THREAD_INFO .tinfo = INIT_THREAD_INFO,
86679+#else
86680+#define INIT_TASK_THREAD_INFO
86681+#endif
86682+
86683 #ifdef CONFIG_SECURITY_FILE_CAPABILITIES
86684 /*
86685 * Because of the reduced scope of CAP_SETPCAP when filesystem
86686@@ -156,6 +162,7 @@ extern struct cred init_cred;
86687 __MUTEX_INITIALIZER(tsk.cred_guard_mutex), \
86688 .comm = "swapper", \
86689 .thread = INIT_THREAD, \
86690+ INIT_TASK_THREAD_INFO \
86691 .fs = &init_fs, \
86692 .files = &init_files, \
86693 .signal = &init_signals, \
86694diff --git a/include/linux/intel-iommu.h b/include/linux/intel-iommu.h
86695index 4f0a72a..a849599 100644
86696--- a/include/linux/intel-iommu.h
86697+++ b/include/linux/intel-iommu.h
86698@@ -296,7 +296,7 @@ struct iommu_flush {
86699 u8 fm, u64 type);
86700 void (*flush_iotlb)(struct intel_iommu *iommu, u16 did, u64 addr,
86701 unsigned int size_order, u64 type);
86702-};
86703+} __no_const;
86704
86705 enum {
86706 SR_DMAR_FECTL_REG,
86707diff --git a/include/linux/interrupt.h b/include/linux/interrupt.h
86708index c739150..be577b5 100644
86709--- a/include/linux/interrupt.h
86710+++ b/include/linux/interrupt.h
86711@@ -369,7 +369,7 @@ enum
86712 /* map softirq index to softirq name. update 'softirq_to_name' in
86713 * kernel/softirq.c when adding a new softirq.
86714 */
86715-extern char *softirq_to_name[NR_SOFTIRQS];
86716+extern const char * const softirq_to_name[NR_SOFTIRQS];
86717
86718 /* softirq mask and active fields moved to irq_cpustat_t in
86719 * asm/hardirq.h to get better cache usage. KAO
86720@@ -377,12 +377,12 @@ extern char *softirq_to_name[NR_SOFTIRQS];
86721
86722 struct softirq_action
86723 {
86724- void (*action)(struct softirq_action *);
86725+ void (*action)(void);
86726 };
86727
86728 asmlinkage void do_softirq(void);
86729 asmlinkage void __do_softirq(void);
86730-extern void open_softirq(int nr, void (*action)(struct softirq_action *));
86731+extern void open_softirq(int nr, void (*action)(void));
86732 extern void softirq_init(void);
86733 #define __raise_softirq_irqoff(nr) do { or_softirq_pending(1UL << (nr)); } while (0)
86734 extern void raise_softirq_irqoff(unsigned int nr);
86735diff --git a/include/linux/iocontext.h b/include/linux/iocontext.h
86736index eb73632..19abfc1 100644
86737--- a/include/linux/iocontext.h
86738+++ b/include/linux/iocontext.h
86739@@ -94,14 +94,15 @@ static inline struct io_context *ioc_task_link(struct io_context *ioc)
86740 return NULL;
86741 }
86742
86743+struct task_struct;
86744 #ifdef CONFIG_BLOCK
86745 int put_io_context(struct io_context *ioc);
86746-void exit_io_context(void);
86747+void exit_io_context(struct task_struct *task);
86748 struct io_context *get_io_context(gfp_t gfp_flags, int node);
86749 struct io_context *alloc_io_context(gfp_t gfp_flags, int node);
86750 void copy_io_context(struct io_context **pdst, struct io_context **psrc);
86751 #else
86752-static inline void exit_io_context(void)
86753+static inline void exit_io_context(struct task_struct *task)
86754 {
86755 }
86756
86757diff --git a/include/linux/irq.h b/include/linux/irq.h
86758index 9e5f45a..025865b 100644
86759--- a/include/linux/irq.h
86760+++ b/include/linux/irq.h
86761@@ -438,12 +438,12 @@ extern int set_irq_msi(unsigned int irq, struct msi_desc *entry);
86762 static inline bool alloc_desc_masks(struct irq_desc *desc, int node,
86763 bool boot)
86764 {
86765+#ifdef CONFIG_CPUMASK_OFFSTACK
86766 gfp_t gfp = GFP_ATOMIC;
86767
86768 if (boot)
86769 gfp = GFP_NOWAIT;
86770
86771-#ifdef CONFIG_CPUMASK_OFFSTACK
86772 if (!alloc_cpumask_var_node(&desc->affinity, gfp, node))
86773 return false;
86774
86775diff --git a/include/linux/kallsyms.h b/include/linux/kallsyms.h
86776index 7922742..27306a2 100644
86777--- a/include/linux/kallsyms.h
86778+++ b/include/linux/kallsyms.h
86779@@ -15,7 +15,8 @@
86780
86781 struct module;
86782
86783-#ifdef CONFIG_KALLSYMS
86784+#if !defined(__INCLUDED_BY_HIDESYM) || !defined(CONFIG_KALLSYMS)
86785+#if defined(CONFIG_KALLSYMS) && !defined(CONFIG_GRKERNSEC_HIDESYM)
86786 /* Lookup the address for a symbol. Returns 0 if not found. */
86787 unsigned long kallsyms_lookup_name(const char *name);
86788
86789@@ -92,6 +93,15 @@ static inline int lookup_symbol_attrs(unsigned long addr, unsigned long *size, u
86790 /* Stupid that this does nothing, but I didn't create this mess. */
86791 #define __print_symbol(fmt, addr)
86792 #endif /*CONFIG_KALLSYMS*/
86793+#else /* when included by kallsyms.c, vsnprintf.c, or
86794+ arch/x86/kernel/dumpstack.c, with HIDESYM enabled */
86795+extern void __print_symbol(const char *fmt, unsigned long address);
86796+extern int sprint_symbol(char *buffer, unsigned long address);
86797+const char *kallsyms_lookup(unsigned long addr,
86798+ unsigned long *symbolsize,
86799+ unsigned long *offset,
86800+ char **modname, char *namebuf);
86801+#endif
86802
86803 /* This macro allows us to keep printk typechecking */
86804 static void __check_printsym_format(const char *fmt, ...)
86805diff --git a/include/linux/kgdb.h b/include/linux/kgdb.h
86806index 6adcc29..13369e8 100644
86807--- a/include/linux/kgdb.h
86808+++ b/include/linux/kgdb.h
86809@@ -74,8 +74,8 @@ void kgdb_breakpoint(void);
86810
86811 extern int kgdb_connected;
86812
86813-extern atomic_t kgdb_setting_breakpoint;
86814-extern atomic_t kgdb_cpu_doing_single_step;
86815+extern atomic_unchecked_t kgdb_setting_breakpoint;
86816+extern atomic_unchecked_t kgdb_cpu_doing_single_step;
86817
86818 extern struct task_struct *kgdb_usethread;
86819 extern struct task_struct *kgdb_contthread;
86820@@ -235,7 +235,7 @@ struct kgdb_arch {
86821 int (*remove_hw_breakpoint)(unsigned long, int, enum kgdb_bptype);
86822 void (*remove_all_hw_break)(void);
86823 void (*correct_hw_break)(void);
86824-};
86825+} __do_const;
86826
86827 /**
86828 * struct kgdb_io - Describe the interface for an I/O driver to talk with KGDB.
86829@@ -257,14 +257,14 @@ struct kgdb_io {
86830 int (*init) (void);
86831 void (*pre_exception) (void);
86832 void (*post_exception) (void);
86833-};
86834+} __do_const;
86835
86836-extern struct kgdb_arch arch_kgdb_ops;
86837+extern const struct kgdb_arch arch_kgdb_ops;
86838
86839 extern unsigned long __weak kgdb_arch_pc(int exception, struct pt_regs *regs);
86840
86841-extern int kgdb_register_io_module(struct kgdb_io *local_kgdb_io_ops);
86842-extern void kgdb_unregister_io_module(struct kgdb_io *local_kgdb_io_ops);
86843+extern int kgdb_register_io_module(const struct kgdb_io *local_kgdb_io_ops);
86844+extern void kgdb_unregister_io_module(const struct kgdb_io *local_kgdb_io_ops);
86845
86846 extern int kgdb_hex2long(char **ptr, unsigned long *long_val);
86847 extern int kgdb_mem2hex(char *mem, char *buf, int count);
86848diff --git a/include/linux/kmod.h b/include/linux/kmod.h
86849index 0546fe7..2a22bc1 100644
86850--- a/include/linux/kmod.h
86851+++ b/include/linux/kmod.h
86852@@ -31,6 +31,8 @@
86853 * usually useless though. */
86854 extern int __request_module(bool wait, const char *name, ...) \
86855 __attribute__((format(printf, 2, 3)));
86856+extern int ___request_module(bool wait, char *param_name, const char *name, ...) \
86857+ __attribute__((format(printf, 3, 4)));
86858 #define request_module(mod...) __request_module(true, mod)
86859 #define request_module_nowait(mod...) __request_module(false, mod)
86860 #define try_then_request_module(x, mod...) \
86861diff --git a/include/linux/kobject.h b/include/linux/kobject.h
86862index 58ae8e0..3950d3c 100644
86863--- a/include/linux/kobject.h
86864+++ b/include/linux/kobject.h
86865@@ -106,7 +106,7 @@ extern char *kobject_get_path(struct kobject *kobj, gfp_t flag);
86866
86867 struct kobj_type {
86868 void (*release)(struct kobject *kobj);
86869- struct sysfs_ops *sysfs_ops;
86870+ const struct sysfs_ops *sysfs_ops;
86871 struct attribute **default_attrs;
86872 };
86873
86874@@ -118,9 +118,9 @@ struct kobj_uevent_env {
86875 };
86876
86877 struct kset_uevent_ops {
86878- int (*filter)(struct kset *kset, struct kobject *kobj);
86879- const char *(*name)(struct kset *kset, struct kobject *kobj);
86880- int (*uevent)(struct kset *kset, struct kobject *kobj,
86881+ int (* const filter)(struct kset *kset, struct kobject *kobj);
86882+ const char *(* const name)(struct kset *kset, struct kobject *kobj);
86883+ int (* const uevent)(struct kset *kset, struct kobject *kobj,
86884 struct kobj_uevent_env *env);
86885 };
86886
86887@@ -132,7 +132,7 @@ struct kobj_attribute {
86888 const char *buf, size_t count);
86889 };
86890
86891-extern struct sysfs_ops kobj_sysfs_ops;
86892+extern const struct sysfs_ops kobj_sysfs_ops;
86893
86894 /**
86895 * struct kset - a set of kobjects of a specific type, belonging to a specific subsystem.
86896@@ -155,14 +155,14 @@ struct kset {
86897 struct list_head list;
86898 spinlock_t list_lock;
86899 struct kobject kobj;
86900- struct kset_uevent_ops *uevent_ops;
86901+ const struct kset_uevent_ops *uevent_ops;
86902 };
86903
86904 extern void kset_init(struct kset *kset);
86905 extern int __must_check kset_register(struct kset *kset);
86906 extern void kset_unregister(struct kset *kset);
86907 extern struct kset * __must_check kset_create_and_add(const char *name,
86908- struct kset_uevent_ops *u,
86909+ const struct kset_uevent_ops *u,
86910 struct kobject *parent_kobj);
86911
86912 static inline struct kset *to_kset(struct kobject *kobj)
86913diff --git a/include/linux/kvm_host.h b/include/linux/kvm_host.h
86914index c728a50..762821f 100644
86915--- a/include/linux/kvm_host.h
86916+++ b/include/linux/kvm_host.h
86917@@ -210,7 +210,7 @@ void kvm_vcpu_uninit(struct kvm_vcpu *vcpu);
86918 void vcpu_load(struct kvm_vcpu *vcpu);
86919 void vcpu_put(struct kvm_vcpu *vcpu);
86920
86921-int kvm_init(void *opaque, unsigned int vcpu_size,
86922+int kvm_init(const void *opaque, unsigned int vcpu_size,
86923 struct module *module);
86924 void kvm_exit(void);
86925
86926@@ -316,7 +316,7 @@ int kvm_arch_vcpu_ioctl_set_guest_debug(struct kvm_vcpu *vcpu,
86927 struct kvm_guest_debug *dbg);
86928 int kvm_arch_vcpu_ioctl_run(struct kvm_vcpu *vcpu, struct kvm_run *kvm_run);
86929
86930-int kvm_arch_init(void *opaque);
86931+int kvm_arch_init(const void *opaque);
86932 void kvm_arch_exit(void);
86933
86934 int kvm_arch_vcpu_init(struct kvm_vcpu *vcpu);
86935@@ -519,7 +519,7 @@ int kvm_setup_default_irq_routing(struct kvm *kvm);
86936 int kvm_set_irq_routing(struct kvm *kvm,
86937 const struct kvm_irq_routing_entry *entries,
86938 unsigned nr,
86939- unsigned flags);
86940+ unsigned flags) __size_overflow(3);
86941 void kvm_free_irq_routing(struct kvm *kvm);
86942
86943 #else
86944diff --git a/include/linux/libata.h b/include/linux/libata.h
86945index a069916..223edde 100644
86946--- a/include/linux/libata.h
86947+++ b/include/linux/libata.h
86948@@ -525,11 +525,11 @@ struct ata_ioports {
86949
86950 struct ata_host {
86951 spinlock_t lock;
86952- struct device *dev;
86953+ struct device *dev;
86954 void __iomem * const *iomap;
86955 unsigned int n_ports;
86956 void *private_data;
86957- struct ata_port_operations *ops;
86958+ const struct ata_port_operations *ops;
86959 unsigned long flags;
86960 #ifdef CONFIG_ATA_ACPI
86961 acpi_handle acpi_handle;
86962@@ -710,7 +710,7 @@ struct ata_link {
86963
86964 struct ata_port {
86965 struct Scsi_Host *scsi_host; /* our co-allocated scsi host */
86966- struct ata_port_operations *ops;
86967+ const struct ata_port_operations *ops;
86968 spinlock_t *lock;
86969 /* Flags owned by the EH context. Only EH should touch these once the
86970 port is active */
86971@@ -884,7 +884,7 @@ struct ata_port_operations {
86972 * fields must be pointers.
86973 */
86974 const struct ata_port_operations *inherits;
86975-};
86976+} __do_const;
86977
86978 struct ata_port_info {
86979 unsigned long flags;
86980@@ -892,7 +892,7 @@ struct ata_port_info {
86981 unsigned long pio_mask;
86982 unsigned long mwdma_mask;
86983 unsigned long udma_mask;
86984- struct ata_port_operations *port_ops;
86985+ const struct ata_port_operations *port_ops;
86986 void *private_data;
86987 };
86988
86989@@ -916,7 +916,7 @@ extern const unsigned long sata_deb_timing_normal[];
86990 extern const unsigned long sata_deb_timing_hotplug[];
86991 extern const unsigned long sata_deb_timing_long[];
86992
86993-extern struct ata_port_operations ata_dummy_port_ops;
86994+extern const struct ata_port_operations ata_dummy_port_ops;
86995 extern const struct ata_port_info ata_dummy_port_info;
86996
86997 static inline const unsigned long *
86998@@ -962,7 +962,7 @@ extern int ata_host_activate(struct ata_host *host, int irq,
86999 struct scsi_host_template *sht);
87000 extern void ata_host_detach(struct ata_host *host);
87001 extern void ata_host_init(struct ata_host *, struct device *,
87002- unsigned long, struct ata_port_operations *);
87003+ unsigned long, const struct ata_port_operations *);
87004 extern int ata_scsi_detect(struct scsi_host_template *sht);
87005 extern int ata_scsi_ioctl(struct scsi_device *dev, int cmd, void __user *arg);
87006 extern int ata_scsi_queuecmd(struct scsi_cmnd *cmd, void (*done)(struct scsi_cmnd *));
87007diff --git a/include/linux/lockd/bind.h b/include/linux/lockd/bind.h
87008index fbc48f8..0886e57 100644
87009--- a/include/linux/lockd/bind.h
87010+++ b/include/linux/lockd/bind.h
87011@@ -23,13 +23,13 @@ struct svc_rqst;
87012 * This is the set of functions for lockd->nfsd communication
87013 */
87014 struct nlmsvc_binding {
87015- __be32 (*fopen)(struct svc_rqst *,
87016+ __be32 (* const fopen)(struct svc_rqst *,
87017 struct nfs_fh *,
87018 struct file **);
87019- void (*fclose)(struct file *);
87020+ void (* const fclose)(struct file *);
87021 };
87022
87023-extern struct nlmsvc_binding * nlmsvc_ops;
87024+extern const struct nlmsvc_binding * nlmsvc_ops;
87025
87026 /*
87027 * Similar to nfs_client_initdata, but without the NFS-specific
87028diff --git a/include/linux/mca.h b/include/linux/mca.h
87029index 3797270..7765ede 100644
87030--- a/include/linux/mca.h
87031+++ b/include/linux/mca.h
87032@@ -80,7 +80,7 @@ struct mca_bus_accessor_functions {
87033 int region);
87034 void * (*mca_transform_memory)(struct mca_device *,
87035 void *memory);
87036-};
87037+} __no_const;
87038
87039 struct mca_bus {
87040 u64 default_dma_mask;
87041diff --git a/include/linux/memory.h b/include/linux/memory.h
87042index 37fa19b..b597c85 100644
87043--- a/include/linux/memory.h
87044+++ b/include/linux/memory.h
87045@@ -108,7 +108,7 @@ struct memory_accessor {
87046 size_t count);
87047 ssize_t (*write)(struct memory_accessor *, const char *buf,
87048 off_t offset, size_t count);
87049-};
87050+} __no_const;
87051
87052 /*
87053 * Kernel text modification mutex, used for code patching. Users of this lock
87054diff --git a/include/linux/mm.h b/include/linux/mm.h
87055index 11e5be6..1ff2423 100644
87056--- a/include/linux/mm.h
87057+++ b/include/linux/mm.h
87058@@ -106,7 +106,14 @@ extern unsigned int kobjsize(const void *objp);
87059
87060 #define VM_CAN_NONLINEAR 0x08000000 /* Has ->fault & does nonlinear pages */
87061 #define VM_MIXEDMAP 0x10000000 /* Can contain "struct page" and pure PFN pages */
87062+
87063+#if defined(CONFIG_PAX_PAGEEXEC) && defined(CONFIG_X86_32)
87064+#define VM_SAO 0x00000000 /* Strong Access Ordering (powerpc) */
87065+#define VM_PAGEEXEC 0x20000000 /* vma->vm_page_prot needs special handling */
87066+#else
87067 #define VM_SAO 0x20000000 /* Strong Access Ordering (powerpc) */
87068+#endif
87069+
87070 #define VM_PFN_AT_MMAP 0x40000000 /* PFNMAP vma that is fully mapped at mmap time */
87071 #define VM_MERGEABLE 0x80000000 /* KSM may merge identical pages */
87072
87073@@ -841,12 +848,6 @@ int set_page_dirty(struct page *page);
87074 int set_page_dirty_lock(struct page *page);
87075 int clear_page_dirty_for_io(struct page *page);
87076
87077-/* Is the vma a continuation of the stack vma above it? */
87078-static inline int vma_stack_continue(struct vm_area_struct *vma, unsigned long addr)
87079-{
87080- return vma && (vma->vm_end == addr) && (vma->vm_flags & VM_GROWSDOWN);
87081-}
87082-
87083 extern unsigned long move_page_tables(struct vm_area_struct *vma,
87084 unsigned long old_addr, struct vm_area_struct *new_vma,
87085 unsigned long new_addr, unsigned long len);
87086@@ -890,6 +891,8 @@ struct shrinker {
87087 extern void register_shrinker(struct shrinker *);
87088 extern void unregister_shrinker(struct shrinker *);
87089
87090+pgprot_t vm_get_page_prot(unsigned long vm_flags);
87091+
87092 int vma_wants_writenotify(struct vm_area_struct *vma);
87093
87094 extern pte_t *get_locked_pte(struct mm_struct *mm, unsigned long addr, spinlock_t **ptl);
87095@@ -1162,6 +1165,7 @@ out:
87096 }
87097
87098 extern int do_munmap(struct mm_struct *, unsigned long, size_t);
87099+extern int __do_munmap(struct mm_struct *, unsigned long, size_t);
87100
87101 extern unsigned long do_brk(unsigned long, unsigned long);
87102
87103@@ -1218,6 +1222,10 @@ extern struct vm_area_struct * find_vma(struct mm_struct * mm, unsigned long add
87104 extern struct vm_area_struct * find_vma_prev(struct mm_struct * mm, unsigned long addr,
87105 struct vm_area_struct **pprev);
87106
87107+extern struct vm_area_struct *pax_find_mirror_vma(struct vm_area_struct *vma);
87108+extern void pax_mirror_vma(struct vm_area_struct *vma_m, struct vm_area_struct *vma);
87109+extern void pax_mirror_file_pte(struct vm_area_struct *vma, unsigned long address, struct page *page_m, spinlock_t *ptl);
87110+
87111 /* Look up the first VMA which intersects the interval start_addr..end_addr-1,
87112 NULL if none. Assume start_addr < end_addr. */
87113 static inline struct vm_area_struct * find_vma_intersection(struct mm_struct * mm, unsigned long start_addr, unsigned long end_addr)
87114@@ -1234,7 +1242,6 @@ static inline unsigned long vma_pages(struct vm_area_struct *vma)
87115 return (vma->vm_end - vma->vm_start) >> PAGE_SHIFT;
87116 }
87117
87118-pgprot_t vm_get_page_prot(unsigned long vm_flags);
87119 struct vm_area_struct *find_extend_vma(struct mm_struct *, unsigned long addr);
87120 int remap_pfn_range(struct vm_area_struct *, unsigned long addr,
87121 unsigned long pfn, unsigned long size, pgprot_t);
87122@@ -1332,7 +1339,13 @@ extern void memory_failure(unsigned long pfn, int trapno);
87123 extern int __memory_failure(unsigned long pfn, int trapno, int ref);
87124 extern int sysctl_memory_failure_early_kill;
87125 extern int sysctl_memory_failure_recovery;
87126-extern atomic_long_t mce_bad_pages;
87127+extern atomic_long_unchecked_t mce_bad_pages;
87128+
87129+#ifdef CONFIG_ARCH_TRACK_EXEC_LIMIT
87130+extern void track_exec_limit(struct mm_struct *mm, unsigned long start, unsigned long end, unsigned long prot);
87131+#else
87132+static inline void track_exec_limit(struct mm_struct *mm, unsigned long start, unsigned long end, unsigned long prot) {}
87133+#endif
87134
87135 #endif /* __KERNEL__ */
87136 #endif /* _LINUX_MM_H */
87137diff --git a/include/linux/mm_types.h b/include/linux/mm_types.h
87138index 9d12ed5..6d9707a 100644
87139--- a/include/linux/mm_types.h
87140+++ b/include/linux/mm_types.h
87141@@ -186,6 +186,8 @@ struct vm_area_struct {
87142 #ifdef CONFIG_NUMA
87143 struct mempolicy *vm_policy; /* NUMA policy for the VMA */
87144 #endif
87145+
87146+ struct vm_area_struct *vm_mirror;/* PaX: mirror vma or NULL */
87147 };
87148
87149 struct core_thread {
87150@@ -287,6 +289,24 @@ struct mm_struct {
87151 #ifdef CONFIG_MMU_NOTIFIER
87152 struct mmu_notifier_mm *mmu_notifier_mm;
87153 #endif
87154+
87155+#if defined(CONFIG_PAX_NOEXEC) || defined(CONFIG_PAX_ASLR)
87156+ unsigned long pax_flags;
87157+#endif
87158+
87159+#ifdef CONFIG_PAX_DLRESOLVE
87160+ unsigned long call_dl_resolve;
87161+#endif
87162+
87163+#if defined(CONFIG_PPC32) && defined(CONFIG_PAX_EMUSIGRT)
87164+ unsigned long call_syscall;
87165+#endif
87166+
87167+#ifdef CONFIG_PAX_ASLR
87168+ unsigned long delta_mmap; /* randomized offset */
87169+ unsigned long delta_stack; /* randomized offset */
87170+#endif
87171+
87172 };
87173
87174 /* Future-safe accessor for struct mm_struct's cpu_vm_mask. */
87175diff --git a/include/linux/mmu_notifier.h b/include/linux/mmu_notifier.h
87176index 4e02ee2..afb159e 100644
87177--- a/include/linux/mmu_notifier.h
87178+++ b/include/linux/mmu_notifier.h
87179@@ -235,12 +235,12 @@ static inline void mmu_notifier_mm_destroy(struct mm_struct *mm)
87180 */
87181 #define ptep_clear_flush_notify(__vma, __address, __ptep) \
87182 ({ \
87183- pte_t __pte; \
87184+ pte_t ___pte; \
87185 struct vm_area_struct *___vma = __vma; \
87186 unsigned long ___address = __address; \
87187- __pte = ptep_clear_flush(___vma, ___address, __ptep); \
87188+ ___pte = ptep_clear_flush(___vma, ___address, __ptep); \
87189 mmu_notifier_invalidate_page(___vma->vm_mm, ___address); \
87190- __pte; \
87191+ ___pte; \
87192 })
87193
87194 #define ptep_clear_flush_young_notify(__vma, __address, __ptep) \
87195diff --git a/include/linux/mmzone.h b/include/linux/mmzone.h
87196index 6c31a2a..4b0e930 100644
87197--- a/include/linux/mmzone.h
87198+++ b/include/linux/mmzone.h
87199@@ -350,7 +350,7 @@ struct zone {
87200 unsigned long flags; /* zone flags, see below */
87201
87202 /* Zone statistics */
87203- atomic_long_t vm_stat[NR_VM_ZONE_STAT_ITEMS];
87204+ atomic_long_unchecked_t vm_stat[NR_VM_ZONE_STAT_ITEMS];
87205
87206 /*
87207 * prev_priority holds the scanning priority for this zone. It is
87208diff --git a/include/linux/mod_devicetable.h b/include/linux/mod_devicetable.h
87209index f58e9d8..3503935 100644
87210--- a/include/linux/mod_devicetable.h
87211+++ b/include/linux/mod_devicetable.h
87212@@ -12,7 +12,7 @@
87213 typedef unsigned long kernel_ulong_t;
87214 #endif
87215
87216-#define PCI_ANY_ID (~0)
87217+#define PCI_ANY_ID ((__u16)~0)
87218
87219 struct pci_device_id {
87220 __u32 vendor, device; /* Vendor and device ID or PCI_ANY_ID*/
87221@@ -131,7 +131,7 @@ struct usb_device_id {
87222 #define USB_DEVICE_ID_MATCH_INT_SUBCLASS 0x0100
87223 #define USB_DEVICE_ID_MATCH_INT_PROTOCOL 0x0200
87224
87225-#define HID_ANY_ID (~0)
87226+#define HID_ANY_ID (~0U)
87227
87228 struct hid_device_id {
87229 __u16 bus;
87230diff --git a/include/linux/module.h b/include/linux/module.h
87231index 482efc8..642032b 100644
87232--- a/include/linux/module.h
87233+++ b/include/linux/module.h
87234@@ -16,6 +16,7 @@
87235 #include <linux/kobject.h>
87236 #include <linux/moduleparam.h>
87237 #include <linux/tracepoint.h>
87238+#include <linux/fs.h>
87239
87240 #include <asm/local.h>
87241 #include <asm/module.h>
87242@@ -287,16 +288,16 @@ struct module
87243 int (*init)(void);
87244
87245 /* If this is non-NULL, vfree after init() returns */
87246- void *module_init;
87247+ void *module_init_rx, *module_init_rw;
87248
87249 /* Here is the actual code + data, vfree'd on unload. */
87250- void *module_core;
87251+ void *module_core_rx, *module_core_rw;
87252
87253 /* Here are the sizes of the init and core sections */
87254- unsigned int init_size, core_size;
87255+ unsigned int init_size_rw, core_size_rw;
87256
87257 /* The size of the executable code in each section. */
87258- unsigned int init_text_size, core_text_size;
87259+ unsigned int init_size_rx, core_size_rx;
87260
87261 /* Arch-specific module values */
87262 struct mod_arch_specific arch;
87263@@ -345,6 +346,10 @@ struct module
87264 #ifdef CONFIG_EVENT_TRACING
87265 struct ftrace_event_call *trace_events;
87266 unsigned int num_trace_events;
87267+ struct file_operations trace_id;
87268+ struct file_operations trace_enable;
87269+ struct file_operations trace_format;
87270+ struct file_operations trace_filter;
87271 #endif
87272 #ifdef CONFIG_FTRACE_MCOUNT_RECORD
87273 unsigned long *ftrace_callsites;
87274@@ -393,16 +398,46 @@ struct module *__module_address(unsigned long addr);
87275 bool is_module_address(unsigned long addr);
87276 bool is_module_text_address(unsigned long addr);
87277
87278+static inline int within_module_range(unsigned long addr, void *start, unsigned long size)
87279+{
87280+
87281+#ifdef CONFIG_PAX_KERNEXEC
87282+ if (ktla_ktva(addr) >= (unsigned long)start &&
87283+ ktla_ktva(addr) < (unsigned long)start + size)
87284+ return 1;
87285+#endif
87286+
87287+ return ((void *)addr >= start && (void *)addr < start + size);
87288+}
87289+
87290+static inline int within_module_core_rx(unsigned long addr, struct module *mod)
87291+{
87292+ return within_module_range(addr, mod->module_core_rx, mod->core_size_rx);
87293+}
87294+
87295+static inline int within_module_core_rw(unsigned long addr, struct module *mod)
87296+{
87297+ return within_module_range(addr, mod->module_core_rw, mod->core_size_rw);
87298+}
87299+
87300+static inline int within_module_init_rx(unsigned long addr, struct module *mod)
87301+{
87302+ return within_module_range(addr, mod->module_init_rx, mod->init_size_rx);
87303+}
87304+
87305+static inline int within_module_init_rw(unsigned long addr, struct module *mod)
87306+{
87307+ return within_module_range(addr, mod->module_init_rw, mod->init_size_rw);
87308+}
87309+
87310 static inline int within_module_core(unsigned long addr, struct module *mod)
87311 {
87312- return (unsigned long)mod->module_core <= addr &&
87313- addr < (unsigned long)mod->module_core + mod->core_size;
87314+ return within_module_core_rx(addr, mod) || within_module_core_rw(addr, mod);
87315 }
87316
87317 static inline int within_module_init(unsigned long addr, struct module *mod)
87318 {
87319- return (unsigned long)mod->module_init <= addr &&
87320- addr < (unsigned long)mod->module_init + mod->init_size;
87321+ return within_module_init_rx(addr, mod) || within_module_init_rw(addr, mod);
87322 }
87323
87324 /* Search for module by name: must hold module_mutex. */
87325diff --git a/include/linux/moduleloader.h b/include/linux/moduleloader.h
87326index c1f40c2..e875ff4 100644
87327--- a/include/linux/moduleloader.h
87328+++ b/include/linux/moduleloader.h
87329@@ -18,11 +18,23 @@ unsigned int arch_mod_section_prepend(struct module *mod, unsigned int section);
87330
87331 /* Allocator used for allocating struct module, core sections and init
87332 sections. Returns NULL on failure. */
87333-void *module_alloc(unsigned long size);
87334+void *module_alloc(unsigned long size) __size_overflow(1);
87335+
87336+#ifdef CONFIG_PAX_KERNEXEC
87337+void *module_alloc_exec(unsigned long size);
87338+#else
87339+#define module_alloc_exec(x) module_alloc(x)
87340+#endif
87341
87342 /* Free memory returned from module_alloc. */
87343 void module_free(struct module *mod, void *module_region);
87344
87345+#ifdef CONFIG_PAX_KERNEXEC
87346+void module_free_exec(struct module *mod, void *module_region);
87347+#else
87348+#define module_free_exec(x, y) module_free((x), (y))
87349+#endif
87350+
87351 /* Apply the given relocation to the (simplified) ELF. Return -error
87352 or 0. */
87353 int apply_relocate(Elf_Shdr *sechdrs,
87354diff --git a/include/linux/moduleparam.h b/include/linux/moduleparam.h
87355index 82a9124..8a5f622 100644
87356--- a/include/linux/moduleparam.h
87357+++ b/include/linux/moduleparam.h
87358@@ -132,7 +132,7 @@ struct kparam_array
87359
87360 /* Actually copy string: maxlen param is usually sizeof(string). */
87361 #define module_param_string(name, string, len, perm) \
87362- static const struct kparam_string __param_string_##name \
87363+ static const struct kparam_string __param_string_##name __used \
87364 = { len, string }; \
87365 __module_param_call(MODULE_PARAM_PREFIX, name, \
87366 param_set_copystring, param_get_string, \
87367@@ -211,7 +211,7 @@ extern int param_get_invbool(char *buffer, struct kernel_param *kp);
87368
87369 /* Comma-separated array: *nump is set to number they actually specified. */
87370 #define module_param_array_named(name, array, type, nump, perm) \
87371- static const struct kparam_array __param_arr_##name \
87372+ static const struct kparam_array __param_arr_##name __used \
87373 = { ARRAY_SIZE(array), nump, param_set_##type, param_get_##type,\
87374 sizeof(array[0]), array }; \
87375 __module_param_call(MODULE_PARAM_PREFIX, name, \
87376diff --git a/include/linux/mutex.h b/include/linux/mutex.h
87377index 878cab4..c92cb3e 100644
87378--- a/include/linux/mutex.h
87379+++ b/include/linux/mutex.h
87380@@ -51,7 +51,7 @@ struct mutex {
87381 spinlock_t wait_lock;
87382 struct list_head wait_list;
87383 #if defined(CONFIG_DEBUG_MUTEXES) || defined(CONFIG_SMP)
87384- struct thread_info *owner;
87385+ struct task_struct *owner;
87386 #endif
87387 #ifdef CONFIG_DEBUG_MUTEXES
87388 const char *name;
87389diff --git a/include/linux/namei.h b/include/linux/namei.h
87390index ec0f607..d19e675 100644
87391--- a/include/linux/namei.h
87392+++ b/include/linux/namei.h
87393@@ -22,7 +22,7 @@ struct nameidata {
87394 unsigned int flags;
87395 int last_type;
87396 unsigned depth;
87397- char *saved_names[MAX_NESTED_LINKS + 1];
87398+ const char *saved_names[MAX_NESTED_LINKS + 1];
87399
87400 /* Intent data */
87401 union {
87402@@ -84,12 +84,12 @@ extern int follow_up(struct path *);
87403 extern struct dentry *lock_rename(struct dentry *, struct dentry *);
87404 extern void unlock_rename(struct dentry *, struct dentry *);
87405
87406-static inline void nd_set_link(struct nameidata *nd, char *path)
87407+static inline void nd_set_link(struct nameidata *nd, const char *path)
87408 {
87409 nd->saved_names[nd->depth] = path;
87410 }
87411
87412-static inline char *nd_get_link(struct nameidata *nd)
87413+static inline const char *nd_get_link(const struct nameidata *nd)
87414 {
87415 return nd->saved_names[nd->depth];
87416 }
87417diff --git a/include/linux/netdevice.h b/include/linux/netdevice.h
87418index 9d7e8f7..04428c5 100644
87419--- a/include/linux/netdevice.h
87420+++ b/include/linux/netdevice.h
87421@@ -637,6 +637,7 @@ struct net_device_ops {
87422 u16 xid);
87423 #endif
87424 };
87425+typedef struct net_device_ops __no_const net_device_ops_no_const;
87426
87427 /*
87428 * The DEVICE structure.
87429diff --git a/include/linux/netfilter/xt_gradm.h b/include/linux/netfilter/xt_gradm.h
87430new file mode 100644
87431index 0000000..33f4af8
87432--- /dev/null
87433+++ b/include/linux/netfilter/xt_gradm.h
87434@@ -0,0 +1,9 @@
87435+#ifndef _LINUX_NETFILTER_XT_GRADM_H
87436+#define _LINUX_NETFILTER_XT_GRADM_H 1
87437+
87438+struct xt_gradm_mtinfo {
87439+ __u16 flags;
87440+ __u16 invflags;
87441+};
87442+
87443+#endif
87444diff --git a/include/linux/nodemask.h b/include/linux/nodemask.h
87445index b359c4a..c08b334 100644
87446--- a/include/linux/nodemask.h
87447+++ b/include/linux/nodemask.h
87448@@ -464,11 +464,11 @@ static inline int num_node_state(enum node_states state)
87449
87450 #define any_online_node(mask) \
87451 ({ \
87452- int node; \
87453- for_each_node_mask(node, (mask)) \
87454- if (node_online(node)) \
87455+ int __node; \
87456+ for_each_node_mask(__node, (mask)) \
87457+ if (node_online(__node)) \
87458 break; \
87459- node; \
87460+ __node; \
87461 })
87462
87463 #define num_online_nodes() num_node_state(N_ONLINE)
87464diff --git a/include/linux/oprofile.h b/include/linux/oprofile.h
87465index 5171639..81f30d3 100644
87466--- a/include/linux/oprofile.h
87467+++ b/include/linux/oprofile.h
87468@@ -129,9 +129,9 @@ int oprofilefs_create_ulong(struct super_block * sb, struct dentry * root,
87469 int oprofilefs_create_ro_ulong(struct super_block * sb, struct dentry * root,
87470 char const * name, ulong * val);
87471
87472-/** Create a file for read-only access to an atomic_t. */
87473+/** Create a file for read-only access to an atomic_unchecked_t. */
87474 int oprofilefs_create_ro_atomic(struct super_block * sb, struct dentry * root,
87475- char const * name, atomic_t * val);
87476+ char const * name, atomic_unchecked_t * val);
87477
87478 /** create a directory */
87479 struct dentry * oprofilefs_mkdir(struct super_block * sb, struct dentry * root,
87480@@ -153,7 +153,7 @@ ssize_t oprofilefs_ulong_to_user(unsigned long val, char __user * buf, size_t co
87481 * Read an ASCII string for a number from a userspace buffer and fill *val on success.
87482 * Returns 0 on success, < 0 on error.
87483 */
87484-int oprofilefs_ulong_from_user(unsigned long * val, char const __user * buf, size_t count);
87485+int oprofilefs_ulong_from_user(unsigned long * val, char const __user * buf, size_t count) __size_overflow(3);
87486
87487 /** lock for read/write safety */
87488 extern spinlock_t oprofilefs_lock;
87489diff --git a/include/linux/pagemap.h b/include/linux/pagemap.h
87490index 3c62ed4..8924c7c 100644
87491--- a/include/linux/pagemap.h
87492+++ b/include/linux/pagemap.h
87493@@ -425,7 +425,9 @@ static inline int fault_in_pages_readable(const char __user *uaddr, int size)
87494 if (((unsigned long)uaddr & PAGE_MASK) !=
87495 ((unsigned long)end & PAGE_MASK))
87496 ret = __get_user(c, end);
87497+ (void)c;
87498 }
87499+ (void)c;
87500 return ret;
87501 }
87502
87503diff --git a/include/linux/perf_event.h b/include/linux/perf_event.h
87504index 81c9689..a567a55 100644
87505--- a/include/linux/perf_event.h
87506+++ b/include/linux/perf_event.h
87507@@ -476,7 +476,7 @@ struct hw_perf_event {
87508 struct hrtimer hrtimer;
87509 };
87510 };
87511- atomic64_t prev_count;
87512+ atomic64_unchecked_t prev_count;
87513 u64 sample_period;
87514 u64 last_period;
87515 atomic64_t period_left;
87516@@ -557,7 +557,7 @@ struct perf_event {
87517 const struct pmu *pmu;
87518
87519 enum perf_event_active_state state;
87520- atomic64_t count;
87521+ atomic64_unchecked_t count;
87522
87523 /*
87524 * These are the total time in nanoseconds that the event
87525@@ -595,8 +595,8 @@ struct perf_event {
87526 * These accumulate total time (in nanoseconds) that children
87527 * events have been enabled and running, respectively.
87528 */
87529- atomic64_t child_total_time_enabled;
87530- atomic64_t child_total_time_running;
87531+ atomic64_unchecked_t child_total_time_enabled;
87532+ atomic64_unchecked_t child_total_time_running;
87533
87534 /*
87535 * Protect attach/detach and child_list:
87536diff --git a/include/linux/personality.h b/include/linux/personality.h
87537index 1261208..ddef96f 100644
87538--- a/include/linux/personality.h
87539+++ b/include/linux/personality.h
87540@@ -43,6 +43,7 @@ enum {
87541 #define PER_CLEAR_ON_SETID (READ_IMPLIES_EXEC | \
87542 ADDR_NO_RANDOMIZE | \
87543 ADDR_COMPAT_LAYOUT | \
87544+ ADDR_LIMIT_3GB | \
87545 MMAP_PAGE_ZERO)
87546
87547 /*
87548diff --git a/include/linux/pipe_fs_i.h b/include/linux/pipe_fs_i.h
87549index b43a9e0..b77d869 100644
87550--- a/include/linux/pipe_fs_i.h
87551+++ b/include/linux/pipe_fs_i.h
87552@@ -46,9 +46,9 @@ struct pipe_inode_info {
87553 wait_queue_head_t wait;
87554 unsigned int nrbufs, curbuf;
87555 struct page *tmp_page;
87556- unsigned int readers;
87557- unsigned int writers;
87558- unsigned int waiting_writers;
87559+ atomic_t readers;
87560+ atomic_t writers;
87561+ atomic_t waiting_writers;
87562 unsigned int r_counter;
87563 unsigned int w_counter;
87564 struct fasync_struct *fasync_readers;
87565diff --git a/include/linux/poison.h b/include/linux/poison.h
87566index 34066ff..e95d744 100644
87567--- a/include/linux/poison.h
87568+++ b/include/linux/poison.h
87569@@ -19,8 +19,8 @@
87570 * under normal circumstances, used to verify that nobody uses
87571 * non-initialized list entries.
87572 */
87573-#define LIST_POISON1 ((void *) 0x00100100 + POISON_POINTER_DELTA)
87574-#define LIST_POISON2 ((void *) 0x00200200 + POISON_POINTER_DELTA)
87575+#define LIST_POISON1 ((void *) (long)0xFFFFFF01)
87576+#define LIST_POISON2 ((void *) (long)0xFFFFFF02)
87577
87578 /********** include/linux/timer.h **********/
87579 /*
87580diff --git a/include/linux/posix-timers.h b/include/linux/posix-timers.h
87581index 4f71bf4..cd2f68e 100644
87582--- a/include/linux/posix-timers.h
87583+++ b/include/linux/posix-timers.h
87584@@ -82,7 +82,8 @@ struct k_clock {
87585 #define TIMER_RETRY 1
87586 void (*timer_get) (struct k_itimer * timr,
87587 struct itimerspec * cur_setting);
87588-};
87589+} __do_const;
87590+typedef struct k_clock __no_const k_clock_no_const;
87591
87592 void register_posix_clock(const clockid_t clock_id, struct k_clock *new_clock);
87593
87594diff --git a/include/linux/preempt.h b/include/linux/preempt.h
87595index 72b1a10..13303a9 100644
87596--- a/include/linux/preempt.h
87597+++ b/include/linux/preempt.h
87598@@ -110,7 +110,7 @@ struct preempt_ops {
87599 void (*sched_in)(struct preempt_notifier *notifier, int cpu);
87600 void (*sched_out)(struct preempt_notifier *notifier,
87601 struct task_struct *next);
87602-};
87603+} __no_const;
87604
87605 /**
87606 * preempt_notifier - key for installing preemption notifiers
87607diff --git a/include/linux/prefetch.h b/include/linux/prefetch.h
87608index af7c36a..a93005c 100644
87609--- a/include/linux/prefetch.h
87610+++ b/include/linux/prefetch.h
87611@@ -11,6 +11,7 @@
87612 #define _LINUX_PREFETCH_H
87613
87614 #include <linux/types.h>
87615+#include <linux/const.h>
87616 #include <asm/processor.h>
87617 #include <asm/cache.h>
87618
87619diff --git a/include/linux/proc_fs.h b/include/linux/proc_fs.h
87620index 379eaed..1bf73e3 100644
87621--- a/include/linux/proc_fs.h
87622+++ b/include/linux/proc_fs.h
87623@@ -155,6 +155,19 @@ static inline struct proc_dir_entry *proc_create(const char *name, mode_t mode,
87624 return proc_create_data(name, mode, parent, proc_fops, NULL);
87625 }
87626
87627+static inline struct proc_dir_entry *proc_create_grsec(const char *name, mode_t mode,
87628+ struct proc_dir_entry *parent, const struct file_operations *proc_fops)
87629+{
87630+#ifdef CONFIG_GRKERNSEC_PROC_USER
87631+ return proc_create_data(name, S_IRUSR, parent, proc_fops, NULL);
87632+#elif defined(CONFIG_GRKERNSEC_PROC_USERGROUP)
87633+ return proc_create_data(name, S_IRUSR | S_IRGRP, parent, proc_fops, NULL);
87634+#else
87635+ return proc_create_data(name, mode, parent, proc_fops, NULL);
87636+#endif
87637+}
87638+
87639+
87640 static inline struct proc_dir_entry *create_proc_read_entry(const char *name,
87641 mode_t mode, struct proc_dir_entry *base,
87642 read_proc_t *read_proc, void * data)
87643@@ -256,7 +269,7 @@ union proc_op {
87644 int (*proc_show)(struct seq_file *m,
87645 struct pid_namespace *ns, struct pid *pid,
87646 struct task_struct *task);
87647-};
87648+} __no_const;
87649
87650 struct ctl_table_header;
87651 struct ctl_table;
87652diff --git a/include/linux/ptrace.h b/include/linux/ptrace.h
87653index 7456d7d..6c1cfc9 100644
87654--- a/include/linux/ptrace.h
87655+++ b/include/linux/ptrace.h
87656@@ -96,10 +96,10 @@ extern void __ptrace_unlink(struct task_struct *child);
87657 extern void exit_ptrace(struct task_struct *tracer);
87658 #define PTRACE_MODE_READ 1
87659 #define PTRACE_MODE_ATTACH 2
87660-/* Returns 0 on success, -errno on denial. */
87661-extern int __ptrace_may_access(struct task_struct *task, unsigned int mode);
87662 /* Returns true on success, false on denial. */
87663 extern bool ptrace_may_access(struct task_struct *task, unsigned int mode);
87664+/* Returns true on success, false on denial. */
87665+extern bool ptrace_may_access_log(struct task_struct *task, unsigned int mode);
87666
87667 static inline int ptrace_reparented(struct task_struct *child)
87668 {
87669diff --git a/include/linux/random.h b/include/linux/random.h
87670index 2948046..3262567 100644
87671--- a/include/linux/random.h
87672+++ b/include/linux/random.h
87673@@ -63,6 +63,11 @@ unsigned long randomize_range(unsigned long start, unsigned long end, unsigned l
87674 u32 random32(void);
87675 void srandom32(u32 seed);
87676
87677+static inline unsigned long pax_get_random_long(void)
87678+{
87679+ return random32() + (sizeof(long) > 4 ? (unsigned long)random32() << 32 : 0);
87680+}
87681+
87682 #endif /* __KERNEL___ */
87683
87684 #endif /* _LINUX_RANDOM_H */
87685diff --git a/include/linux/reboot.h b/include/linux/reboot.h
87686index 988e55f..17cb4ef 100644
87687--- a/include/linux/reboot.h
87688+++ b/include/linux/reboot.h
87689@@ -47,9 +47,9 @@ extern int unregister_reboot_notifier(struct notifier_block *);
87690 * Architecture-specific implementations of sys_reboot commands.
87691 */
87692
87693-extern void machine_restart(char *cmd);
87694-extern void machine_halt(void);
87695-extern void machine_power_off(void);
87696+extern void machine_restart(char *cmd) __noreturn;
87697+extern void machine_halt(void) __noreturn;
87698+extern void machine_power_off(void) __noreturn;
87699
87700 extern void machine_shutdown(void);
87701 struct pt_regs;
87702@@ -60,9 +60,9 @@ extern void machine_crash_shutdown(struct pt_regs *);
87703 */
87704
87705 extern void kernel_restart_prepare(char *cmd);
87706-extern void kernel_restart(char *cmd);
87707-extern void kernel_halt(void);
87708-extern void kernel_power_off(void);
87709+extern void kernel_restart(char *cmd) __noreturn;
87710+extern void kernel_halt(void) __noreturn;
87711+extern void kernel_power_off(void) __noreturn;
87712
87713 void ctrl_alt_del(void);
87714
87715@@ -75,7 +75,7 @@ extern int orderly_poweroff(bool force);
87716 * Emergency restart, callable from an interrupt handler.
87717 */
87718
87719-extern void emergency_restart(void);
87720+extern void emergency_restart(void) __noreturn;
87721 #include <asm/emergency-restart.h>
87722
87723 #endif
87724diff --git a/include/linux/reiserfs_fs.h b/include/linux/reiserfs_fs.h
87725index dd31e7b..5b03c5c 100644
87726--- a/include/linux/reiserfs_fs.h
87727+++ b/include/linux/reiserfs_fs.h
87728@@ -1326,7 +1326,7 @@ static inline loff_t max_reiserfs_offset(struct inode *inode)
87729 #define REISERFS_USER_MEM 1 /* reiserfs user memory mode */
87730
87731 #define fs_generation(s) (REISERFS_SB(s)->s_generation_counter)
87732-#define get_generation(s) atomic_read (&fs_generation(s))
87733+#define get_generation(s) atomic_read_unchecked (&fs_generation(s))
87734 #define FILESYSTEM_CHANGED_TB(tb) (get_generation((tb)->tb_sb) != (tb)->fs_gen)
87735 #define __fs_changed(gen,s) (gen != get_generation (s))
87736 #define fs_changed(gen,s) ({cond_resched(); __fs_changed(gen, s);})
87737@@ -1534,24 +1534,24 @@ static inline struct super_block *sb_from_bi(struct buffer_info *bi)
87738 */
87739
87740 struct item_operations {
87741- int (*bytes_number) (struct item_head * ih, int block_size);
87742- void (*decrement_key) (struct cpu_key *);
87743- int (*is_left_mergeable) (struct reiserfs_key * ih,
87744+ int (* const bytes_number) (struct item_head * ih, int block_size);
87745+ void (* const decrement_key) (struct cpu_key *);
87746+ int (* const is_left_mergeable) (struct reiserfs_key * ih,
87747 unsigned long bsize);
87748- void (*print_item) (struct item_head *, char *item);
87749- void (*check_item) (struct item_head *, char *item);
87750+ void (* const print_item) (struct item_head *, char *item);
87751+ void (* const check_item) (struct item_head *, char *item);
87752
87753- int (*create_vi) (struct virtual_node * vn, struct virtual_item * vi,
87754+ int (* const create_vi) (struct virtual_node * vn, struct virtual_item * vi,
87755 int is_affected, int insert_size);
87756- int (*check_left) (struct virtual_item * vi, int free,
87757+ int (* const check_left) (struct virtual_item * vi, int free,
87758 int start_skip, int end_skip);
87759- int (*check_right) (struct virtual_item * vi, int free);
87760- int (*part_size) (struct virtual_item * vi, int from, int to);
87761- int (*unit_num) (struct virtual_item * vi);
87762- void (*print_vi) (struct virtual_item * vi);
87763+ int (* const check_right) (struct virtual_item * vi, int free);
87764+ int (* const part_size) (struct virtual_item * vi, int from, int to);
87765+ int (* const unit_num) (struct virtual_item * vi);
87766+ void (* const print_vi) (struct virtual_item * vi);
87767 };
87768
87769-extern struct item_operations *item_ops[TYPE_ANY + 1];
87770+extern const struct item_operations * const item_ops[TYPE_ANY + 1];
87771
87772 #define op_bytes_number(ih,bsize) item_ops[le_ih_k_type (ih)]->bytes_number (ih, bsize)
87773 #define op_is_left_mergeable(key,bsize) item_ops[le_key_k_type (le_key_version (key), key)]->is_left_mergeable (key, bsize)
87774diff --git a/include/linux/reiserfs_fs_sb.h b/include/linux/reiserfs_fs_sb.h
87775index dab68bb..0688727 100644
87776--- a/include/linux/reiserfs_fs_sb.h
87777+++ b/include/linux/reiserfs_fs_sb.h
87778@@ -377,7 +377,7 @@ struct reiserfs_sb_info {
87779 /* Comment? -Hans */
87780 wait_queue_head_t s_wait;
87781 /* To be obsoleted soon by per buffer seals.. -Hans */
87782- atomic_t s_generation_counter; // increased by one every time the
87783+ atomic_unchecked_t s_generation_counter; // increased by one every time the
87784 // tree gets re-balanced
87785 unsigned long s_properties; /* File system properties. Currently holds
87786 on-disk FS format */
87787diff --git a/include/linux/relay.h b/include/linux/relay.h
87788index 14a86bc..17d0700 100644
87789--- a/include/linux/relay.h
87790+++ b/include/linux/relay.h
87791@@ -159,7 +159,7 @@ struct rchan_callbacks
87792 * The callback should return 0 if successful, negative if not.
87793 */
87794 int (*remove_buf_file)(struct dentry *dentry);
87795-};
87796+} __no_const;
87797
87798 /*
87799 * CONFIG_RELAY kernel API, kernel/relay.c
87800diff --git a/include/linux/rfkill.h b/include/linux/rfkill.h
87801index 3392c59..a746428 100644
87802--- a/include/linux/rfkill.h
87803+++ b/include/linux/rfkill.h
87804@@ -144,6 +144,7 @@ struct rfkill_ops {
87805 void (*query)(struct rfkill *rfkill, void *data);
87806 int (*set_block)(void *data, bool blocked);
87807 };
87808+typedef struct rfkill_ops __no_const rfkill_ops_no_const;
87809
87810 #if defined(CONFIG_RFKILL) || defined(CONFIG_RFKILL_MODULE)
87811 /**
87812diff --git a/include/linux/sched.h b/include/linux/sched.h
87813index 71849bf..8cf9dd2 100644
87814--- a/include/linux/sched.h
87815+++ b/include/linux/sched.h
87816@@ -101,6 +101,7 @@ struct bio;
87817 struct fs_struct;
87818 struct bts_context;
87819 struct perf_event_context;
87820+struct linux_binprm;
87821
87822 /*
87823 * List of flags we want to share for kernel threads,
87824@@ -350,7 +351,7 @@ extern signed long schedule_timeout_killable(signed long timeout);
87825 extern signed long schedule_timeout_uninterruptible(signed long timeout);
87826 asmlinkage void __schedule(void);
87827 asmlinkage void schedule(void);
87828-extern int mutex_spin_on_owner(struct mutex *lock, struct thread_info *owner);
87829+extern int mutex_spin_on_owner(struct mutex *lock, struct task_struct *owner);
87830
87831 struct nsproxy;
87832 struct user_namespace;
87833@@ -371,9 +372,12 @@ struct user_namespace;
87834 #define DEFAULT_MAX_MAP_COUNT (USHORT_MAX - MAPCOUNT_ELF_CORE_MARGIN)
87835
87836 extern int sysctl_max_map_count;
87837+extern unsigned long sysctl_heap_stack_gap;
87838
87839 #include <linux/aio.h>
87840
87841+extern bool check_heap_stack_gap(const struct vm_area_struct *vma, unsigned long addr, unsigned long len);
87842+extern unsigned long skip_heap_stack_gap(const struct vm_area_struct *vma, unsigned long len);
87843 extern unsigned long
87844 arch_get_unmapped_area(struct file *, unsigned long, unsigned long,
87845 unsigned long, unsigned long);
87846@@ -666,6 +670,16 @@ struct signal_struct {
87847 struct tty_audit_buf *tty_audit_buf;
87848 #endif
87849
87850+#ifdef CONFIG_GRKERNSEC
87851+ u32 curr_ip;
87852+ u32 saved_ip;
87853+ u32 gr_saddr;
87854+ u32 gr_daddr;
87855+ u16 gr_sport;
87856+ u16 gr_dport;
87857+ u8 used_accept:1;
87858+#endif
87859+
87860 int oom_adj; /* OOM kill score adjustment (bit shift) */
87861 };
87862
87863@@ -723,6 +737,11 @@ struct user_struct {
87864 struct key *session_keyring; /* UID's default session keyring */
87865 #endif
87866
87867+#if defined(CONFIG_GRKERNSEC_KERN_LOCKOUT) || defined(CONFIG_GRKERNSEC_BRUTE)
87868+ unsigned int banned;
87869+ unsigned long ban_expires;
87870+#endif
87871+
87872 /* Hash table maintenance information */
87873 struct hlist_node uidhash_node;
87874 uid_t uid;
87875@@ -1328,8 +1347,8 @@ struct task_struct {
87876 struct list_head thread_group;
87877
87878 struct completion *vfork_done; /* for vfork() */
87879- int __user *set_child_tid; /* CLONE_CHILD_SETTID */
87880- int __user *clear_child_tid; /* CLONE_CHILD_CLEARTID */
87881+ pid_t __user *set_child_tid; /* CLONE_CHILD_SETTID */
87882+ pid_t __user *clear_child_tid; /* CLONE_CHILD_CLEARTID */
87883
87884 cputime_t utime, stime, utimescaled, stimescaled;
87885 cputime_t gtime;
87886@@ -1343,16 +1362,6 @@ struct task_struct {
87887 struct task_cputime cputime_expires;
87888 struct list_head cpu_timers[3];
87889
87890-/* process credentials */
87891- const struct cred *real_cred; /* objective and real subjective task
87892- * credentials (COW) */
87893- const struct cred *cred; /* effective (overridable) subjective task
87894- * credentials (COW) */
87895- struct mutex cred_guard_mutex; /* guard against foreign influences on
87896- * credential calculations
87897- * (notably. ptrace) */
87898- struct cred *replacement_session_keyring; /* for KEYCTL_SESSION_TO_PARENT */
87899-
87900 char comm[TASK_COMM_LEN]; /* executable name excluding path
87901 - access with [gs]et_task_comm (which lock
87902 it with task_lock())
87903@@ -1369,6 +1378,10 @@ struct task_struct {
87904 #endif
87905 /* CPU-specific state of this task */
87906 struct thread_struct thread;
87907+/* thread_info moved to task_struct */
87908+#ifdef CONFIG_X86
87909+ struct thread_info tinfo;
87910+#endif
87911 /* filesystem information */
87912 struct fs_struct *fs;
87913 /* open file information */
87914@@ -1436,6 +1449,15 @@ struct task_struct {
87915 int hardirq_context;
87916 int softirq_context;
87917 #endif
87918+
87919+/* process credentials */
87920+ const struct cred *real_cred; /* objective and real subjective task
87921+ * credentials (COW) */
87922+ struct mutex cred_guard_mutex; /* guard against foreign influences on
87923+ * credential calculations
87924+ * (notably. ptrace) */
87925+ struct cred *replacement_session_keyring; /* for KEYCTL_SESSION_TO_PARENT */
87926+
87927 #ifdef CONFIG_LOCKDEP
87928 # define MAX_LOCK_DEPTH 48UL
87929 u64 curr_chain_key;
87930@@ -1456,6 +1478,9 @@ struct task_struct {
87931
87932 struct backing_dev_info *backing_dev_info;
87933
87934+ const struct cred *cred; /* effective (overridable) subjective task
87935+ * credentials (COW) */
87936+
87937 struct io_context *io_context;
87938
87939 unsigned long ptrace_message;
87940@@ -1519,6 +1544,27 @@ struct task_struct {
87941 unsigned long default_timer_slack_ns;
87942
87943 struct list_head *scm_work_list;
87944+
87945+#ifdef CONFIG_GRKERNSEC
87946+ /* grsecurity */
87947+#ifdef CONFIG_GRKERNSEC_PROC_MEMMAP
87948+ u64 exec_id;
87949+#endif
87950+#ifdef CONFIG_GRKERNSEC_SETXID
87951+ const struct cred *delayed_cred;
87952+#endif
87953+ struct dentry *gr_chroot_dentry;
87954+ struct acl_subject_label *acl;
87955+ struct acl_role_label *role;
87956+ struct file *exec_file;
87957+ u16 acl_role_id;
87958+ /* is this the task that authenticated to the special role */
87959+ u8 acl_sp_role;
87960+ u8 is_writable;
87961+ u8 brute;
87962+ u8 gr_is_chrooted;
87963+#endif
87964+
87965 #ifdef CONFIG_FUNCTION_GRAPH_TRACER
87966 /* Index of current stored adress in ret_stack */
87967 int curr_ret_stack;
87968@@ -1542,6 +1588,57 @@ struct task_struct {
87969 #endif /* CONFIG_TRACING */
87970 };
87971
87972+#define MF_PAX_PAGEEXEC 0x01000000 /* Paging based non-executable pages */
87973+#define MF_PAX_EMUTRAMP 0x02000000 /* Emulate trampolines */
87974+#define MF_PAX_MPROTECT 0x04000000 /* Restrict mprotect() */
87975+#define MF_PAX_RANDMMAP 0x08000000 /* Randomize mmap() base */
87976+/*#define MF_PAX_RANDEXEC 0x10000000*/ /* Randomize ET_EXEC base */
87977+#define MF_PAX_SEGMEXEC 0x20000000 /* Segmentation based non-executable pages */
87978+
87979+#ifdef CONFIG_PAX_SOFTMODE
87980+extern int pax_softmode;
87981+#endif
87982+
87983+extern int pax_check_flags(unsigned long *);
87984+
87985+/* if tsk != current then task_lock must be held on it */
87986+#if defined(CONFIG_PAX_NOEXEC) || defined(CONFIG_PAX_ASLR)
87987+static inline unsigned long pax_get_flags(struct task_struct *tsk)
87988+{
87989+ if (likely(tsk->mm))
87990+ return tsk->mm->pax_flags;
87991+ else
87992+ return 0UL;
87993+}
87994+
87995+/* if tsk != current then task_lock must be held on it */
87996+static inline long pax_set_flags(struct task_struct *tsk, unsigned long flags)
87997+{
87998+ if (likely(tsk->mm)) {
87999+ tsk->mm->pax_flags = flags;
88000+ return 0;
88001+ }
88002+ return -EINVAL;
88003+}
88004+#endif
88005+
88006+#ifdef CONFIG_PAX_HAVE_ACL_FLAGS
88007+extern void pax_set_initial_flags(struct linux_binprm *bprm);
88008+#elif defined(CONFIG_PAX_HOOK_ACL_FLAGS)
88009+extern void (*pax_set_initial_flags_func)(struct linux_binprm *bprm);
88010+#endif
88011+
88012+extern void pax_report_fault(struct pt_regs *regs, void *pc, void *sp);
88013+extern void pax_report_insns(struct pt_regs *regs, void *pc, void *sp);
88014+extern void pax_report_refcount_overflow(struct pt_regs *regs);
88015+extern __noreturn void pax_report_usercopy(const void *ptr, unsigned long len, bool to, const char *type);
88016+
88017+#ifdef CONFIG_PAX_MEMORY_STACKLEAK
88018+extern void pax_track_stack(void);
88019+#else
88020+static inline void pax_track_stack(void) {}
88021+#endif
88022+
88023 /* Future-safe accessor for struct task_struct's cpus_allowed. */
88024 #define tsk_cpumask(tsk) (&(tsk)->cpus_allowed)
88025
88026@@ -1740,7 +1837,7 @@ extern void thread_group_times(struct task_struct *p, cputime_t *ut, cputime_t *
88027 #define PF_DUMPCORE 0x00000200 /* dumped core */
88028 #define PF_SIGNALED 0x00000400 /* killed by a signal */
88029 #define PF_MEMALLOC 0x00000800 /* Allocating memory */
88030-#define PF_FLUSHER 0x00001000 /* responsible for disk writeback */
88031+#define PF_NPROC_EXCEEDED 0x00001000 /* set_user noticed that RLIMIT_NPROC was exceeded */
88032 #define PF_USED_MATH 0x00002000 /* if unset the fpu must be initialized before use */
88033 #define PF_FREEZING 0x00004000 /* freeze in progress. do not account to load */
88034 #define PF_NOFREEZE 0x00008000 /* this thread should not be frozen */
88035@@ -1978,7 +2075,9 @@ void yield(void);
88036 extern struct exec_domain default_exec_domain;
88037
88038 union thread_union {
88039+#ifndef CONFIG_X86
88040 struct thread_info thread_info;
88041+#endif
88042 unsigned long stack[THREAD_SIZE/sizeof(long)];
88043 };
88044
88045@@ -2011,6 +2110,7 @@ extern struct pid_namespace init_pid_ns;
88046 */
88047
88048 extern struct task_struct *find_task_by_vpid(pid_t nr);
88049+extern struct task_struct *find_task_by_vpid_unrestricted(pid_t nr);
88050 extern struct task_struct *find_task_by_pid_ns(pid_t nr,
88051 struct pid_namespace *ns);
88052
88053@@ -2155,7 +2255,7 @@ extern void __cleanup_sighand(struct sighand_struct *);
88054 extern void exit_itimers(struct signal_struct *);
88055 extern void flush_itimer_signals(void);
88056
88057-extern NORET_TYPE void do_group_exit(int);
88058+extern __noreturn void do_group_exit(int);
88059
88060 extern void daemonize(const char *, ...);
88061 extern int allow_signal(int);
88062@@ -2284,13 +2384,17 @@ static inline unsigned long *end_of_stack(struct task_struct *p)
88063
88064 #endif
88065
88066-static inline int object_is_on_stack(void *obj)
88067+static inline int object_starts_on_stack(void *obj)
88068 {
88069- void *stack = task_stack_page(current);
88070+ const void *stack = task_stack_page(current);
88071
88072 return (obj >= stack) && (obj < (stack + THREAD_SIZE));
88073 }
88074
88075+#ifdef CONFIG_PAX_USERCOPY
88076+extern int object_is_on_stack(const void *obj, unsigned long len);
88077+#endif
88078+
88079 extern void thread_info_cache_init(void);
88080
88081 #ifdef CONFIG_DEBUG_STACK_USAGE
88082@@ -2616,6 +2720,23 @@ static inline unsigned long rlimit_max(unsigned int limit)
88083 return task_rlimit_max(current, limit);
88084 }
88085
88086+#ifdef CONFIG_GRKERNSEC_PROC_MEMMAP
88087+DECLARE_PER_CPU(u64, exec_counter);
88088+static inline void increment_exec_counter(void)
88089+{
88090+ unsigned int cpu;
88091+ u64 *exec_id_ptr;
88092+ BUILD_BUG_ON(NR_CPUS > (1 << 16));
88093+ cpu = get_cpu();
88094+ exec_id_ptr = &per_cpu(exec_counter, cpu);
88095+ *exec_id_ptr += 1ULL << 16;
88096+ current->exec_id = *exec_id_ptr;
88097+ put_cpu();
88098+}
88099+#else
88100+static inline void increment_exec_counter(void) {}
88101+#endif
88102+
88103 #endif /* __KERNEL__ */
88104
88105 #endif
88106diff --git a/include/linux/screen_info.h b/include/linux/screen_info.h
88107index 1ee2c05..81b7ec4 100644
88108--- a/include/linux/screen_info.h
88109+++ b/include/linux/screen_info.h
88110@@ -42,7 +42,8 @@ struct screen_info {
88111 __u16 pages; /* 0x32 */
88112 __u16 vesa_attributes; /* 0x34 */
88113 __u32 capabilities; /* 0x36 */
88114- __u8 _reserved[6]; /* 0x3a */
88115+ __u16 vesapm_size; /* 0x3a */
88116+ __u8 _reserved[4]; /* 0x3c */
88117 } __attribute__((packed));
88118
88119 #define VIDEO_TYPE_MDA 0x10 /* Monochrome Text Display */
88120diff --git a/include/linux/security.h b/include/linux/security.h
88121index d40d23f..d739b08 100644
88122--- a/include/linux/security.h
88123+++ b/include/linux/security.h
88124@@ -34,6 +34,7 @@
88125 #include <linux/key.h>
88126 #include <linux/xfrm.h>
88127 #include <linux/gfp.h>
88128+#include <linux/grsecurity.h>
88129 #include <net/flow.h>
88130
88131 /* Maximum number of letters for an LSM name string */
88132@@ -76,7 +77,7 @@ extern int cap_task_prctl(int option, unsigned long arg2, unsigned long arg3,
88133 extern int cap_task_setscheduler(struct task_struct *p, int policy, struct sched_param *lp);
88134 extern int cap_task_setioprio(struct task_struct *p, int ioprio);
88135 extern int cap_task_setnice(struct task_struct *p, int nice);
88136-extern int cap_syslog(int type);
88137+extern int cap_syslog(int type, bool from_file);
88138 extern int cap_vm_enough_memory(struct mm_struct *mm, long pages);
88139
88140 struct msghdr;
88141@@ -1331,6 +1332,7 @@ static inline void security_free_mnt_opts(struct security_mnt_opts *opts)
88142 * logging to the console.
88143 * See the syslog(2) manual page for an explanation of the @type values.
88144 * @type contains the type of action.
88145+ * @from_file indicates the context of action (if it came from /proc).
88146 * Return 0 if permission is granted.
88147 * @settime:
88148 * Check permission to change the system time.
88149@@ -1445,7 +1447,7 @@ struct security_operations {
88150 int (*sysctl) (struct ctl_table *table, int op);
88151 int (*quotactl) (int cmds, int type, int id, struct super_block *sb);
88152 int (*quota_on) (struct dentry *dentry);
88153- int (*syslog) (int type);
88154+ int (*syslog) (int type, bool from_file);
88155 int (*settime) (struct timespec *ts, struct timezone *tz);
88156 int (*vm_enough_memory) (struct mm_struct *mm, long pages);
88157
88158@@ -1740,7 +1742,7 @@ int security_acct(struct file *file);
88159 int security_sysctl(struct ctl_table *table, int op);
88160 int security_quotactl(int cmds, int type, int id, struct super_block *sb);
88161 int security_quota_on(struct dentry *dentry);
88162-int security_syslog(int type);
88163+int security_syslog(int type, bool from_file);
88164 int security_settime(struct timespec *ts, struct timezone *tz);
88165 int security_vm_enough_memory(long pages);
88166 int security_vm_enough_memory_mm(struct mm_struct *mm, long pages);
88167@@ -1986,9 +1988,9 @@ static inline int security_quota_on(struct dentry *dentry)
88168 return 0;
88169 }
88170
88171-static inline int security_syslog(int type)
88172+static inline int security_syslog(int type, bool from_file)
88173 {
88174- return cap_syslog(type);
88175+ return cap_syslog(type, from_file);
88176 }
88177
88178 static inline int security_settime(struct timespec *ts, struct timezone *tz)
88179diff --git a/include/linux/seq_file.h b/include/linux/seq_file.h
88180index 8366d8f..cc5f9d6 100644
88181--- a/include/linux/seq_file.h
88182+++ b/include/linux/seq_file.h
88183@@ -23,6 +23,9 @@ struct seq_file {
88184 u64 version;
88185 struct mutex lock;
88186 const struct seq_operations *op;
88187+#ifdef CONFIG_GRKERNSEC_PROC_MEMMAP
88188+ u64 exec_id;
88189+#endif
88190 void *private;
88191 };
88192
88193@@ -32,6 +35,7 @@ struct seq_operations {
88194 void * (*next) (struct seq_file *m, void *v, loff_t *pos);
88195 int (*show) (struct seq_file *m, void *v);
88196 };
88197+typedef struct seq_operations __no_const seq_operations_no_const;
88198
88199 #define SEQ_SKIP 1
88200
88201diff --git a/include/linux/shm.h b/include/linux/shm.h
88202index eca6235..c7417ed 100644
88203--- a/include/linux/shm.h
88204+++ b/include/linux/shm.h
88205@@ -95,6 +95,10 @@ struct shmid_kernel /* private to the kernel */
88206 pid_t shm_cprid;
88207 pid_t shm_lprid;
88208 struct user_struct *mlock_user;
88209+#ifdef CONFIG_GRKERNSEC
88210+ time_t shm_createtime;
88211+ pid_t shm_lapid;
88212+#endif
88213 };
88214
88215 /* shm_mode upper byte flags */
88216diff --git a/include/linux/skbuff.h b/include/linux/skbuff.h
88217index bcdd660..fd2e332 100644
88218--- a/include/linux/skbuff.h
88219+++ b/include/linux/skbuff.h
88220@@ -14,6 +14,7 @@
88221 #ifndef _LINUX_SKBUFF_H
88222 #define _LINUX_SKBUFF_H
88223
88224+#include <linux/const.h>
88225 #include <linux/kernel.h>
88226 #include <linux/kmemcheck.h>
88227 #include <linux/compiler.h>
88228@@ -544,7 +545,7 @@ static inline union skb_shared_tx *skb_tx(struct sk_buff *skb)
88229 */
88230 static inline int skb_queue_empty(const struct sk_buff_head *list)
88231 {
88232- return list->next == (struct sk_buff *)list;
88233+ return list->next == (const struct sk_buff *)list;
88234 }
88235
88236 /**
88237@@ -557,7 +558,7 @@ static inline int skb_queue_empty(const struct sk_buff_head *list)
88238 static inline bool skb_queue_is_last(const struct sk_buff_head *list,
88239 const struct sk_buff *skb)
88240 {
88241- return (skb->next == (struct sk_buff *) list);
88242+ return (skb->next == (const struct sk_buff *) list);
88243 }
88244
88245 /**
88246@@ -570,7 +571,7 @@ static inline bool skb_queue_is_last(const struct sk_buff_head *list,
88247 static inline bool skb_queue_is_first(const struct sk_buff_head *list,
88248 const struct sk_buff *skb)
88249 {
88250- return (skb->prev == (struct sk_buff *) list);
88251+ return (skb->prev == (const struct sk_buff *) list);
88252 }
88253
88254 /**
88255@@ -1367,7 +1368,7 @@ static inline int skb_network_offset(const struct sk_buff *skb)
88256 * headroom, you should not reduce this.
88257 */
88258 #ifndef NET_SKB_PAD
88259-#define NET_SKB_PAD 32
88260+#define NET_SKB_PAD (_AC(32,UL))
88261 #endif
88262
88263 extern int ___pskb_trim(struct sk_buff *skb, unsigned int len);
88264@@ -1489,6 +1490,22 @@ static inline struct sk_buff *netdev_alloc_skb(struct net_device *dev,
88265 return __netdev_alloc_skb(dev, length, GFP_ATOMIC);
88266 }
88267
88268+static inline struct sk_buff *__netdev_alloc_skb_ip_align(struct net_device *dev,
88269+ unsigned int length, gfp_t gfp)
88270+{
88271+ struct sk_buff *skb = __netdev_alloc_skb(dev, length + NET_IP_ALIGN, gfp);
88272+
88273+ if (NET_IP_ALIGN && skb)
88274+ skb_reserve(skb, NET_IP_ALIGN);
88275+ return skb;
88276+}
88277+
88278+static inline struct sk_buff *netdev_alloc_skb_ip_align(struct net_device *dev,
88279+ unsigned int length)
88280+{
88281+ return __netdev_alloc_skb_ip_align(dev, length, GFP_ATOMIC);
88282+}
88283+
88284 extern struct page *__netdev_alloc_page(struct net_device *dev, gfp_t gfp_mask);
88285
88286 /**
88287diff --git a/include/linux/slab.h b/include/linux/slab.h
88288index 2da8372..9e01add 100644
88289--- a/include/linux/slab.h
88290+++ b/include/linux/slab.h
88291@@ -11,12 +11,20 @@
88292
88293 #include <linux/gfp.h>
88294 #include <linux/types.h>
88295+#include <linux/err.h>
88296
88297 /*
88298 * Flags to pass to kmem_cache_create().
88299 * The ones marked DEBUG are only valid if CONFIG_SLAB_DEBUG is set.
88300 */
88301 #define SLAB_DEBUG_FREE 0x00000100UL /* DEBUG: Perform (expensive) checks on free */
88302+
88303+#ifdef CONFIG_PAX_USERCOPY
88304+#define SLAB_USERCOPY 0x00000200UL /* PaX: Allow copying objs to/from userland */
88305+#else
88306+#define SLAB_USERCOPY 0x00000000UL
88307+#endif
88308+
88309 #define SLAB_RED_ZONE 0x00000400UL /* DEBUG: Red zone objs in a cache */
88310 #define SLAB_POISON 0x00000800UL /* DEBUG: Poison objects */
88311 #define SLAB_HWCACHE_ALIGN 0x00002000UL /* Align objs on cache lines */
88312@@ -82,10 +90,13 @@
88313 * ZERO_SIZE_PTR can be passed to kfree though in the same way that NULL can.
88314 * Both make kfree a no-op.
88315 */
88316-#define ZERO_SIZE_PTR ((void *)16)
88317+#define ZERO_SIZE_PTR \
88318+({ \
88319+ BUILD_BUG_ON(!(MAX_ERRNO & ~PAGE_MASK));\
88320+ (void *)(-MAX_ERRNO-1L); \
88321+})
88322
88323-#define ZERO_OR_NULL_PTR(x) ((unsigned long)(x) <= \
88324- (unsigned long)ZERO_SIZE_PTR)
88325+#define ZERO_OR_NULL_PTR(x) ((unsigned long)(x) - 1 >= (unsigned long)ZERO_SIZE_PTR - 1)
88326
88327 /*
88328 * struct kmem_cache related prototypes
88329@@ -133,11 +144,12 @@ int kmem_ptr_validate(struct kmem_cache *cachep, const void *ptr);
88330 /*
88331 * Common kmalloc functions provided by all allocators
88332 */
88333-void * __must_check __krealloc(const void *, size_t, gfp_t);
88334-void * __must_check krealloc(const void *, size_t, gfp_t);
88335+void * __must_check __krealloc(const void *, size_t, gfp_t) __size_overflow(2);
88336+void * __must_check krealloc(const void *, size_t, gfp_t) __size_overflow(2);
88337 void kfree(const void *);
88338 void kzfree(const void *);
88339 size_t ksize(const void *);
88340+void check_object_size(const void *ptr, unsigned long n, bool to);
88341
88342 /*
88343 * Allocator specific definitions. These are mainly used to establish optimized
88344@@ -263,7 +275,7 @@ static inline void *kmem_cache_alloc_node(struct kmem_cache *cachep,
88345 * request comes from.
88346 */
88347 #if defined(CONFIG_DEBUG_SLAB) || defined(CONFIG_SLUB)
88348-extern void *__kmalloc_track_caller(size_t, gfp_t, unsigned long);
88349+extern void *__kmalloc_track_caller(size_t, gfp_t, unsigned long) __size_overflow(1);
88350 #define kmalloc_track_caller(size, flags) \
88351 __kmalloc_track_caller(size, flags, _RET_IP_)
88352 #else
88353@@ -281,7 +293,7 @@ extern void *__kmalloc_track_caller(size_t, gfp_t, unsigned long);
88354 * allocation request comes from.
88355 */
88356 #if defined(CONFIG_DEBUG_SLAB) || defined(CONFIG_SLUB)
88357-extern void *__kmalloc_node_track_caller(size_t, gfp_t, int, unsigned long);
88358+extern void *__kmalloc_node_track_caller(size_t, gfp_t, int, unsigned long) __size_overflow(1);
88359 #define kmalloc_node_track_caller(size, flags, node) \
88360 __kmalloc_node_track_caller(size, flags, node, \
88361 _RET_IP_)
88362diff --git a/include/linux/slab_def.h b/include/linux/slab_def.h
88363index 850d057..33bad48 100644
88364--- a/include/linux/slab_def.h
88365+++ b/include/linux/slab_def.h
88366@@ -69,10 +69,10 @@ struct kmem_cache {
88367 unsigned long node_allocs;
88368 unsigned long node_frees;
88369 unsigned long node_overflow;
88370- atomic_t allochit;
88371- atomic_t allocmiss;
88372- atomic_t freehit;
88373- atomic_t freemiss;
88374+ atomic_unchecked_t allochit;
88375+ atomic_unchecked_t allocmiss;
88376+ atomic_unchecked_t freehit;
88377+ atomic_unchecked_t freemiss;
88378
88379 /*
88380 * If debugging is enabled, then the allocator can add additional
88381@@ -108,7 +108,7 @@ struct cache_sizes {
88382 extern struct cache_sizes malloc_sizes[];
88383
88384 void *kmem_cache_alloc(struct kmem_cache *, gfp_t);
88385-void *__kmalloc(size_t size, gfp_t flags);
88386+void *__kmalloc(size_t size, gfp_t flags) __size_overflow(1);
88387
88388 #ifdef CONFIG_KMEMTRACE
88389 extern void *kmem_cache_alloc_notrace(struct kmem_cache *cachep, gfp_t flags);
88390@@ -125,6 +125,7 @@ static inline size_t slab_buffer_size(struct kmem_cache *cachep)
88391 }
88392 #endif
88393
88394+static __always_inline void *kmalloc(size_t size, gfp_t flags) __size_overflow(1);
88395 static __always_inline void *kmalloc(size_t size, gfp_t flags)
88396 {
88397 struct kmem_cache *cachep;
88398@@ -163,7 +164,7 @@ found:
88399 }
88400
88401 #ifdef CONFIG_NUMA
88402-extern void *__kmalloc_node(size_t size, gfp_t flags, int node);
88403+extern void *__kmalloc_node(size_t size, gfp_t flags, int node) __size_overflow(1);
88404 extern void *kmem_cache_alloc_node(struct kmem_cache *, gfp_t flags, int node);
88405
88406 #ifdef CONFIG_KMEMTRACE
88407@@ -180,6 +181,7 @@ kmem_cache_alloc_node_notrace(struct kmem_cache *cachep,
88408 }
88409 #endif
88410
88411+static __always_inline void *kmalloc_node(size_t size, gfp_t flags, int node) __size_overflow(1);
88412 static __always_inline void *kmalloc_node(size_t size, gfp_t flags, int node)
88413 {
88414 struct kmem_cache *cachep;
88415diff --git a/include/linux/slob_def.h b/include/linux/slob_def.h
88416index 0ec00b3..65e7e0e 100644
88417--- a/include/linux/slob_def.h
88418+++ b/include/linux/slob_def.h
88419@@ -9,8 +9,9 @@ static __always_inline void *kmem_cache_alloc(struct kmem_cache *cachep,
88420 return kmem_cache_alloc_node(cachep, flags, -1);
88421 }
88422
88423-void *__kmalloc_node(size_t size, gfp_t flags, int node);
88424+void *__kmalloc_node(size_t size, gfp_t flags, int node) __size_overflow(1);
88425
88426+static __always_inline void *kmalloc_node(size_t size, gfp_t flags, int node) __size_overflow(1);
88427 static __always_inline void *kmalloc_node(size_t size, gfp_t flags, int node)
88428 {
88429 return __kmalloc_node(size, flags, node);
88430@@ -24,11 +25,13 @@ static __always_inline void *kmalloc_node(size_t size, gfp_t flags, int node)
88431 * kmalloc is the normal method of allocating memory
88432 * in the kernel.
88433 */
88434+static __always_inline void *kmalloc(size_t size, gfp_t flags) __size_overflow(1);
88435 static __always_inline void *kmalloc(size_t size, gfp_t flags)
88436 {
88437 return __kmalloc_node(size, flags, -1);
88438 }
88439
88440+static __always_inline void *__kmalloc(size_t size, gfp_t flags) __size_overflow(1);
88441 static __always_inline void *__kmalloc(size_t size, gfp_t flags)
88442 {
88443 return kmalloc(size, flags);
88444diff --git a/include/linux/slub_def.h b/include/linux/slub_def.h
88445index 5ad70a6..8f0e2c8 100644
88446--- a/include/linux/slub_def.h
88447+++ b/include/linux/slub_def.h
88448@@ -86,7 +86,7 @@ struct kmem_cache {
88449 struct kmem_cache_order_objects max;
88450 struct kmem_cache_order_objects min;
88451 gfp_t allocflags; /* gfp flags to use on each alloc */
88452- int refcount; /* Refcount for slab cache destroy */
88453+ atomic_t refcount; /* Refcount for slab cache destroy */
88454 void (*ctor)(void *);
88455 int inuse; /* Offset to metadata */
88456 int align; /* Alignment */
88457@@ -197,6 +197,7 @@ static __always_inline int kmalloc_index(size_t size)
88458 * This ought to end up with a global pointer to the right cache
88459 * in kmalloc_caches.
88460 */
88461+static __always_inline struct kmem_cache *kmalloc_slab(size_t size) __size_overflow(1);
88462 static __always_inline struct kmem_cache *kmalloc_slab(size_t size)
88463 {
88464 int index = kmalloc_index(size);
88465@@ -215,7 +216,7 @@ static __always_inline struct kmem_cache *kmalloc_slab(size_t size)
88466 #endif
88467
88468 void *kmem_cache_alloc(struct kmem_cache *, gfp_t);
88469-void *__kmalloc(size_t size, gfp_t flags);
88470+void *__kmalloc(size_t size, gfp_t flags) __alloc_size(1) __size_overflow(1);
88471
88472 #ifdef CONFIG_KMEMTRACE
88473 extern void *kmem_cache_alloc_notrace(struct kmem_cache *s, gfp_t gfpflags);
88474@@ -227,6 +228,7 @@ kmem_cache_alloc_notrace(struct kmem_cache *s, gfp_t gfpflags)
88475 }
88476 #endif
88477
88478+static __always_inline void *kmalloc_large(size_t size, gfp_t flags) __size_overflow(1);
88479 static __always_inline void *kmalloc_large(size_t size, gfp_t flags)
88480 {
88481 unsigned int order = get_order(size);
88482@@ -238,6 +240,7 @@ static __always_inline void *kmalloc_large(size_t size, gfp_t flags)
88483 return ret;
88484 }
88485
88486+static __always_inline void *kmalloc(size_t size, gfp_t flags) __size_overflow(1);
88487 static __always_inline void *kmalloc(size_t size, gfp_t flags)
88488 {
88489 void *ret;
88490@@ -263,7 +266,7 @@ static __always_inline void *kmalloc(size_t size, gfp_t flags)
88491 }
88492
88493 #ifdef CONFIG_NUMA
88494-void *__kmalloc_node(size_t size, gfp_t flags, int node);
88495+void *__kmalloc_node(size_t size, gfp_t flags, int node) __size_overflow(1);
88496 void *kmem_cache_alloc_node(struct kmem_cache *, gfp_t flags, int node);
88497
88498 #ifdef CONFIG_KMEMTRACE
88499@@ -280,6 +283,7 @@ kmem_cache_alloc_node_notrace(struct kmem_cache *s,
88500 }
88501 #endif
88502
88503+static __always_inline void *kmalloc_node(size_t size, gfp_t flags, int node) __size_overflow(1);
88504 static __always_inline void *kmalloc_node(size_t size, gfp_t flags, int node)
88505 {
88506 void *ret;
88507diff --git a/include/linux/sonet.h b/include/linux/sonet.h
88508index 67ad11f..0bbd8af 100644
88509--- a/include/linux/sonet.h
88510+++ b/include/linux/sonet.h
88511@@ -61,7 +61,7 @@ struct sonet_stats {
88512 #include <asm/atomic.h>
88513
88514 struct k_sonet_stats {
88515-#define __HANDLE_ITEM(i) atomic_t i
88516+#define __HANDLE_ITEM(i) atomic_unchecked_t i
88517 __SONET_ITEMS
88518 #undef __HANDLE_ITEM
88519 };
88520diff --git a/include/linux/sunrpc/cache.h b/include/linux/sunrpc/cache.h
88521index 6f52b4d..5500323 100644
88522--- a/include/linux/sunrpc/cache.h
88523+++ b/include/linux/sunrpc/cache.h
88524@@ -125,7 +125,7 @@ struct cache_detail {
88525 */
88526 struct cache_req {
88527 struct cache_deferred_req *(*defer)(struct cache_req *req);
88528-};
88529+} __no_const;
88530 /* this must be embedded in a deferred_request that is being
88531 * delayed awaiting cache-fill
88532 */
88533diff --git a/include/linux/sunrpc/clnt.h b/include/linux/sunrpc/clnt.h
88534index 8ed9642..101ceab 100644
88535--- a/include/linux/sunrpc/clnt.h
88536+++ b/include/linux/sunrpc/clnt.h
88537@@ -167,9 +167,9 @@ static inline unsigned short rpc_get_port(const struct sockaddr *sap)
88538 {
88539 switch (sap->sa_family) {
88540 case AF_INET:
88541- return ntohs(((struct sockaddr_in *)sap)->sin_port);
88542+ return ntohs(((const struct sockaddr_in *)sap)->sin_port);
88543 case AF_INET6:
88544- return ntohs(((struct sockaddr_in6 *)sap)->sin6_port);
88545+ return ntohs(((const struct sockaddr_in6 *)sap)->sin6_port);
88546 }
88547 return 0;
88548 }
88549@@ -202,7 +202,7 @@ static inline bool __rpc_cmp_addr4(const struct sockaddr *sap1,
88550 static inline bool __rpc_copy_addr4(struct sockaddr *dst,
88551 const struct sockaddr *src)
88552 {
88553- const struct sockaddr_in *ssin = (struct sockaddr_in *) src;
88554+ const struct sockaddr_in *ssin = (const struct sockaddr_in *) src;
88555 struct sockaddr_in *dsin = (struct sockaddr_in *) dst;
88556
88557 dsin->sin_family = ssin->sin_family;
88558@@ -299,7 +299,7 @@ static inline u32 rpc_get_scope_id(const struct sockaddr *sa)
88559 if (sa->sa_family != AF_INET6)
88560 return 0;
88561
88562- return ((struct sockaddr_in6 *) sa)->sin6_scope_id;
88563+ return ((const struct sockaddr_in6 *) sa)->sin6_scope_id;
88564 }
88565
88566 #endif /* __KERNEL__ */
88567diff --git a/include/linux/sunrpc/svc_rdma.h b/include/linux/sunrpc/svc_rdma.h
88568index c14fe86..393245e 100644
88569--- a/include/linux/sunrpc/svc_rdma.h
88570+++ b/include/linux/sunrpc/svc_rdma.h
88571@@ -53,15 +53,15 @@ extern unsigned int svcrdma_ord;
88572 extern unsigned int svcrdma_max_requests;
88573 extern unsigned int svcrdma_max_req_size;
88574
88575-extern atomic_t rdma_stat_recv;
88576-extern atomic_t rdma_stat_read;
88577-extern atomic_t rdma_stat_write;
88578-extern atomic_t rdma_stat_sq_starve;
88579-extern atomic_t rdma_stat_rq_starve;
88580-extern atomic_t rdma_stat_rq_poll;
88581-extern atomic_t rdma_stat_rq_prod;
88582-extern atomic_t rdma_stat_sq_poll;
88583-extern atomic_t rdma_stat_sq_prod;
88584+extern atomic_unchecked_t rdma_stat_recv;
88585+extern atomic_unchecked_t rdma_stat_read;
88586+extern atomic_unchecked_t rdma_stat_write;
88587+extern atomic_unchecked_t rdma_stat_sq_starve;
88588+extern atomic_unchecked_t rdma_stat_rq_starve;
88589+extern atomic_unchecked_t rdma_stat_rq_poll;
88590+extern atomic_unchecked_t rdma_stat_rq_prod;
88591+extern atomic_unchecked_t rdma_stat_sq_poll;
88592+extern atomic_unchecked_t rdma_stat_sq_prod;
88593
88594 #define RPCRDMA_VERSION 1
88595
88596diff --git a/include/linux/suspend.h b/include/linux/suspend.h
88597index 5e781d8..1e62818 100644
88598--- a/include/linux/suspend.h
88599+++ b/include/linux/suspend.h
88600@@ -104,15 +104,15 @@ typedef int __bitwise suspend_state_t;
88601 * which require special recovery actions in that situation.
88602 */
88603 struct platform_suspend_ops {
88604- int (*valid)(suspend_state_t state);
88605- int (*begin)(suspend_state_t state);
88606- int (*prepare)(void);
88607- int (*prepare_late)(void);
88608- int (*enter)(suspend_state_t state);
88609- void (*wake)(void);
88610- void (*finish)(void);
88611- void (*end)(void);
88612- void (*recover)(void);
88613+ int (* const valid)(suspend_state_t state);
88614+ int (* const begin)(suspend_state_t state);
88615+ int (* const prepare)(void);
88616+ int (* const prepare_late)(void);
88617+ int (* const enter)(suspend_state_t state);
88618+ void (* const wake)(void);
88619+ void (* const finish)(void);
88620+ void (* const end)(void);
88621+ void (* const recover)(void);
88622 };
88623
88624 #ifdef CONFIG_SUSPEND
88625@@ -120,7 +120,7 @@ struct platform_suspend_ops {
88626 * suspend_set_ops - set platform dependent suspend operations
88627 * @ops: The new suspend operations to set.
88628 */
88629-extern void suspend_set_ops(struct platform_suspend_ops *ops);
88630+extern void suspend_set_ops(const struct platform_suspend_ops *ops);
88631 extern int suspend_valid_only_mem(suspend_state_t state);
88632
88633 /**
88634@@ -145,7 +145,7 @@ extern int pm_suspend(suspend_state_t state);
88635 #else /* !CONFIG_SUSPEND */
88636 #define suspend_valid_only_mem NULL
88637
88638-static inline void suspend_set_ops(struct platform_suspend_ops *ops) {}
88639+static inline void suspend_set_ops(const struct platform_suspend_ops *ops) {}
88640 static inline int pm_suspend(suspend_state_t state) { return -ENOSYS; }
88641 #endif /* !CONFIG_SUSPEND */
88642
88643@@ -215,16 +215,16 @@ extern void mark_free_pages(struct zone *zone);
88644 * platforms which require special recovery actions in that situation.
88645 */
88646 struct platform_hibernation_ops {
88647- int (*begin)(void);
88648- void (*end)(void);
88649- int (*pre_snapshot)(void);
88650- void (*finish)(void);
88651- int (*prepare)(void);
88652- int (*enter)(void);
88653- void (*leave)(void);
88654- int (*pre_restore)(void);
88655- void (*restore_cleanup)(void);
88656- void (*recover)(void);
88657+ int (* const begin)(void);
88658+ void (* const end)(void);
88659+ int (* const pre_snapshot)(void);
88660+ void (* const finish)(void);
88661+ int (* const prepare)(void);
88662+ int (* const enter)(void);
88663+ void (* const leave)(void);
88664+ int (* const pre_restore)(void);
88665+ void (* const restore_cleanup)(void);
88666+ void (* const recover)(void);
88667 };
88668
88669 #ifdef CONFIG_HIBERNATION
88670@@ -243,7 +243,7 @@ extern void swsusp_set_page_free(struct page *);
88671 extern void swsusp_unset_page_free(struct page *);
88672 extern unsigned long get_safe_page(gfp_t gfp_mask);
88673
88674-extern void hibernation_set_ops(struct platform_hibernation_ops *ops);
88675+extern void hibernation_set_ops(const struct platform_hibernation_ops *ops);
88676 extern int hibernate(void);
88677 extern bool system_entering_hibernation(void);
88678 #else /* CONFIG_HIBERNATION */
88679@@ -251,7 +251,7 @@ static inline int swsusp_page_is_forbidden(struct page *p) { return 0; }
88680 static inline void swsusp_set_page_free(struct page *p) {}
88681 static inline void swsusp_unset_page_free(struct page *p) {}
88682
88683-static inline void hibernation_set_ops(struct platform_hibernation_ops *ops) {}
88684+static inline void hibernation_set_ops(const struct platform_hibernation_ops *ops) {}
88685 static inline int hibernate(void) { return -ENOSYS; }
88686 static inline bool system_entering_hibernation(void) { return false; }
88687 #endif /* CONFIG_HIBERNATION */
88688diff --git a/include/linux/sysctl.h b/include/linux/sysctl.h
88689index 0eb6942..a805cb6 100644
88690--- a/include/linux/sysctl.h
88691+++ b/include/linux/sysctl.h
88692@@ -164,7 +164,11 @@ enum
88693 KERN_PANIC_ON_NMI=76, /* int: whether we will panic on an unrecovered */
88694 };
88695
88696-
88697+#ifdef CONFIG_PAX_SOFTMODE
88698+enum {
88699+ PAX_SOFTMODE=1 /* PaX: disable/enable soft mode */
88700+};
88701+#endif
88702
88703 /* CTL_VM names: */
88704 enum
88705@@ -982,6 +986,8 @@ typedef int proc_handler (struct ctl_table *ctl, int write,
88706
88707 extern int proc_dostring(struct ctl_table *, int,
88708 void __user *, size_t *, loff_t *);
88709+extern int proc_dostring_modpriv(struct ctl_table *, int,
88710+ void __user *, size_t *, loff_t *);
88711 extern int proc_dointvec(struct ctl_table *, int,
88712 void __user *, size_t *, loff_t *);
88713 extern int proc_dointvec_minmax(struct ctl_table *, int,
88714@@ -1003,6 +1009,7 @@ extern int do_sysctl (int __user *name, int nlen,
88715
88716 extern ctl_handler sysctl_data;
88717 extern ctl_handler sysctl_string;
88718+extern ctl_handler sysctl_string_modpriv;
88719 extern ctl_handler sysctl_intvec;
88720 extern ctl_handler sysctl_jiffies;
88721 extern ctl_handler sysctl_ms_jiffies;
88722diff --git a/include/linux/sysfs.h b/include/linux/sysfs.h
88723index 9d68fed..71f02cc 100644
88724--- a/include/linux/sysfs.h
88725+++ b/include/linux/sysfs.h
88726@@ -75,8 +75,8 @@ struct bin_attribute {
88727 };
88728
88729 struct sysfs_ops {
88730- ssize_t (*show)(struct kobject *, struct attribute *,char *);
88731- ssize_t (*store)(struct kobject *,struct attribute *,const char *, size_t);
88732+ ssize_t (* const show)(struct kobject *, struct attribute *,char *);
88733+ ssize_t (* const store)(struct kobject *,struct attribute *,const char *, size_t);
88734 };
88735
88736 struct sysfs_dirent;
88737diff --git a/include/linux/syslog.h b/include/linux/syslog.h
88738new file mode 100644
88739index 0000000..3891139
88740--- /dev/null
88741+++ b/include/linux/syslog.h
88742@@ -0,0 +1,52 @@
88743+/* Syslog internals
88744+ *
88745+ * Copyright 2010 Canonical, Ltd.
88746+ * Author: Kees Cook <kees.cook@canonical.com>
88747+ *
88748+ * This program is free software; you can redistribute it and/or modify
88749+ * it under the terms of the GNU General Public License as published by
88750+ * the Free Software Foundation; either version 2, or (at your option)
88751+ * any later version.
88752+ *
88753+ * This program is distributed in the hope that it will be useful,
88754+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
88755+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
88756+ * GNU General Public License for more details.
88757+ *
88758+ * You should have received a copy of the GNU General Public License
88759+ * along with this program; see the file COPYING. If not, write to
88760+ * the Free Software Foundation, 675 Mass Ave, Cambridge, MA 02139, USA.
88761+ */
88762+
88763+#ifndef _LINUX_SYSLOG_H
88764+#define _LINUX_SYSLOG_H
88765+
88766+/* Close the log. Currently a NOP. */
88767+#define SYSLOG_ACTION_CLOSE 0
88768+/* Open the log. Currently a NOP. */
88769+#define SYSLOG_ACTION_OPEN 1
88770+/* Read from the log. */
88771+#define SYSLOG_ACTION_READ 2
88772+/* Read all messages remaining in the ring buffer. */
88773+#define SYSLOG_ACTION_READ_ALL 3
88774+/* Read and clear all messages remaining in the ring buffer */
88775+#define SYSLOG_ACTION_READ_CLEAR 4
88776+/* Clear ring buffer. */
88777+#define SYSLOG_ACTION_CLEAR 5
88778+/* Disable printk's to console */
88779+#define SYSLOG_ACTION_CONSOLE_OFF 6
88780+/* Enable printk's to console */
88781+#define SYSLOG_ACTION_CONSOLE_ON 7
88782+/* Set level of messages printed to console */
88783+#define SYSLOG_ACTION_CONSOLE_LEVEL 8
88784+/* Return number of unread characters in the log buffer */
88785+#define SYSLOG_ACTION_SIZE_UNREAD 9
88786+/* Return size of the log buffer */
88787+#define SYSLOG_ACTION_SIZE_BUFFER 10
88788+
88789+#define SYSLOG_FROM_CALL 0
88790+#define SYSLOG_FROM_FILE 1
88791+
88792+int do_syslog(int type, char __user *buf, int count, bool from_file);
88793+
88794+#endif /* _LINUX_SYSLOG_H */
88795diff --git a/include/linux/thread_info.h b/include/linux/thread_info.h
88796index a8cc4e1..98d3b85 100644
88797--- a/include/linux/thread_info.h
88798+++ b/include/linux/thread_info.h
88799@@ -23,7 +23,7 @@ struct restart_block {
88800 };
88801 /* For futex_wait and futex_wait_requeue_pi */
88802 struct {
88803- u32 *uaddr;
88804+ u32 __user *uaddr;
88805 u32 val;
88806 u32 flags;
88807 u32 bitset;
88808diff --git a/include/linux/tracehook.h b/include/linux/tracehook.h
88809index 1eb44a9..f582df3 100644
88810--- a/include/linux/tracehook.h
88811+++ b/include/linux/tracehook.h
88812@@ -69,12 +69,12 @@ static inline int tracehook_expect_breakpoints(struct task_struct *task)
88813 /*
88814 * ptrace report for syscall entry and exit looks identical.
88815 */
88816-static inline void ptrace_report_syscall(struct pt_regs *regs)
88817+static inline int ptrace_report_syscall(struct pt_regs *regs)
88818 {
88819 int ptrace = task_ptrace(current);
88820
88821 if (!(ptrace & PT_PTRACED))
88822- return;
88823+ return 0;
88824
88825 ptrace_notify(SIGTRAP | ((ptrace & PT_TRACESYSGOOD) ? 0x80 : 0));
88826
88827@@ -87,6 +87,8 @@ static inline void ptrace_report_syscall(struct pt_regs *regs)
88828 send_sig(current->exit_code, current, 1);
88829 current->exit_code = 0;
88830 }
88831+
88832+ return fatal_signal_pending(current);
88833 }
88834
88835 /**
88836@@ -111,8 +113,7 @@ static inline void ptrace_report_syscall(struct pt_regs *regs)
88837 static inline __must_check int tracehook_report_syscall_entry(
88838 struct pt_regs *regs)
88839 {
88840- ptrace_report_syscall(regs);
88841- return 0;
88842+ return ptrace_report_syscall(regs);
88843 }
88844
88845 /**
88846diff --git a/include/linux/tty.h b/include/linux/tty.h
88847index e9c57e9..ee6d489 100644
88848--- a/include/linux/tty.h
88849+++ b/include/linux/tty.h
88850@@ -493,7 +493,6 @@ extern void tty_ldisc_begin(void);
88851 /* This last one is just for the tty layer internals and shouldn't be used elsewhere */
88852 extern void tty_ldisc_enable(struct tty_struct *tty);
88853
88854-
88855 /* n_tty.c */
88856 extern struct tty_ldisc_ops tty_ldisc_N_TTY;
88857
88858diff --git a/include/linux/tty_ldisc.h b/include/linux/tty_ldisc.h
88859index 0c4ee9b..9f7c426 100644
88860--- a/include/linux/tty_ldisc.h
88861+++ b/include/linux/tty_ldisc.h
88862@@ -139,7 +139,7 @@ struct tty_ldisc_ops {
88863
88864 struct module *owner;
88865
88866- int refcount;
88867+ atomic_t refcount;
88868 };
88869
88870 struct tty_ldisc {
88871diff --git a/include/linux/types.h b/include/linux/types.h
88872index c42724f..d190eee 100644
88873--- a/include/linux/types.h
88874+++ b/include/linux/types.h
88875@@ -191,10 +191,26 @@ typedef struct {
88876 volatile int counter;
88877 } atomic_t;
88878
88879+#ifdef CONFIG_PAX_REFCOUNT
88880+typedef struct {
88881+ volatile int counter;
88882+} atomic_unchecked_t;
88883+#else
88884+typedef atomic_t atomic_unchecked_t;
88885+#endif
88886+
88887 #ifdef CONFIG_64BIT
88888 typedef struct {
88889 volatile long counter;
88890 } atomic64_t;
88891+
88892+#ifdef CONFIG_PAX_REFCOUNT
88893+typedef struct {
88894+ volatile long counter;
88895+} atomic64_unchecked_t;
88896+#else
88897+typedef atomic64_t atomic64_unchecked_t;
88898+#endif
88899 #endif
88900
88901 struct ustat {
88902diff --git a/include/linux/uaccess.h b/include/linux/uaccess.h
88903index 6b58367..57b150e 100644
88904--- a/include/linux/uaccess.h
88905+++ b/include/linux/uaccess.h
88906@@ -76,11 +76,11 @@ static inline unsigned long __copy_from_user_nocache(void *to,
88907 long ret; \
88908 mm_segment_t old_fs = get_fs(); \
88909 \
88910- set_fs(KERNEL_DS); \
88911 pagefault_disable(); \
88912- ret = __copy_from_user_inatomic(&(retval), (__force typeof(retval) __user *)(addr), sizeof(retval)); \
88913- pagefault_enable(); \
88914+ set_fs(KERNEL_DS); \
88915+ ret = __copy_from_user_inatomic(&(retval), (typeof(retval) __force_user *)(addr), sizeof(retval)); \
88916 set_fs(old_fs); \
88917+ pagefault_enable(); \
88918 ret; \
88919 })
88920
88921@@ -93,7 +93,7 @@ static inline unsigned long __copy_from_user_nocache(void *to,
88922 * Safely read from address @src to the buffer at @dst. If a kernel fault
88923 * happens, handle that and return -EFAULT.
88924 */
88925-extern long probe_kernel_read(void *dst, void *src, size_t size);
88926+extern long probe_kernel_read(void *dst, const void *src, size_t size);
88927
88928 /*
88929 * probe_kernel_write(): safely attempt to write to a location
88930@@ -104,6 +104,6 @@ extern long probe_kernel_read(void *dst, void *src, size_t size);
88931 * Safely write to address @dst from the buffer at @src. If a kernel fault
88932 * happens, handle that and return -EFAULT.
88933 */
88934-extern long probe_kernel_write(void *dst, void *src, size_t size);
88935+extern long probe_kernel_write(void *dst, const void *src, size_t size) __size_overflow(3);
88936
88937 #endif /* __LINUX_UACCESS_H__ */
88938diff --git a/include/linux/unaligned/access_ok.h b/include/linux/unaligned/access_ok.h
88939index 99c1b4d..bb94261 100644
88940--- a/include/linux/unaligned/access_ok.h
88941+++ b/include/linux/unaligned/access_ok.h
88942@@ -6,32 +6,32 @@
88943
88944 static inline u16 get_unaligned_le16(const void *p)
88945 {
88946- return le16_to_cpup((__le16 *)p);
88947+ return le16_to_cpup((const __le16 *)p);
88948 }
88949
88950 static inline u32 get_unaligned_le32(const void *p)
88951 {
88952- return le32_to_cpup((__le32 *)p);
88953+ return le32_to_cpup((const __le32 *)p);
88954 }
88955
88956 static inline u64 get_unaligned_le64(const void *p)
88957 {
88958- return le64_to_cpup((__le64 *)p);
88959+ return le64_to_cpup((const __le64 *)p);
88960 }
88961
88962 static inline u16 get_unaligned_be16(const void *p)
88963 {
88964- return be16_to_cpup((__be16 *)p);
88965+ return be16_to_cpup((const __be16 *)p);
88966 }
88967
88968 static inline u32 get_unaligned_be32(const void *p)
88969 {
88970- return be32_to_cpup((__be32 *)p);
88971+ return be32_to_cpup((const __be32 *)p);
88972 }
88973
88974 static inline u64 get_unaligned_be64(const void *p)
88975 {
88976- return be64_to_cpup((__be64 *)p);
88977+ return be64_to_cpup((const __be64 *)p);
88978 }
88979
88980 static inline void put_unaligned_le16(u16 val, void *p)
88981diff --git a/include/linux/vermagic.h b/include/linux/vermagic.h
88982index 79b9837..b5a56f9 100644
88983--- a/include/linux/vermagic.h
88984+++ b/include/linux/vermagic.h
88985@@ -26,9 +26,35 @@
88986 #define MODULE_ARCH_VERMAGIC ""
88987 #endif
88988
88989+#ifdef CONFIG_PAX_REFCOUNT
88990+#define MODULE_PAX_REFCOUNT "REFCOUNT "
88991+#else
88992+#define MODULE_PAX_REFCOUNT ""
88993+#endif
88994+
88995+#ifdef CONSTIFY_PLUGIN
88996+#define MODULE_CONSTIFY_PLUGIN "CONSTIFY_PLUGIN "
88997+#else
88998+#define MODULE_CONSTIFY_PLUGIN ""
88999+#endif
89000+
89001+#ifdef STACKLEAK_PLUGIN
89002+#define MODULE_STACKLEAK_PLUGIN "STACKLEAK_PLUGIN "
89003+#else
89004+#define MODULE_STACKLEAK_PLUGIN ""
89005+#endif
89006+
89007+#ifdef CONFIG_GRKERNSEC
89008+#define MODULE_GRSEC "GRSEC "
89009+#else
89010+#define MODULE_GRSEC ""
89011+#endif
89012+
89013 #define VERMAGIC_STRING \
89014 UTS_RELEASE " " \
89015 MODULE_VERMAGIC_SMP MODULE_VERMAGIC_PREEMPT \
89016 MODULE_VERMAGIC_MODULE_UNLOAD MODULE_VERMAGIC_MODVERSIONS \
89017- MODULE_ARCH_VERMAGIC
89018+ MODULE_ARCH_VERMAGIC \
89019+ MODULE_PAX_REFCOUNT MODULE_CONSTIFY_PLUGIN MODULE_STACKLEAK_PLUGIN \
89020+ MODULE_GRSEC
89021
89022diff --git a/include/linux/vmalloc.h b/include/linux/vmalloc.h
89023index 819a634..b99e71b 100644
89024--- a/include/linux/vmalloc.h
89025+++ b/include/linux/vmalloc.h
89026@@ -14,6 +14,11 @@ struct vm_area_struct; /* vma defining user mapping in mm_types.h */
89027 #define VM_USERMAP 0x00000008 /* suitable for remap_vmalloc_range */
89028 #define VM_VPAGES 0x00000010 /* buffer for pages was vmalloc'ed */
89029 #define VM_UNLIST 0x00000020 /* vm_struct is not listed in vmlist */
89030+
89031+#if defined(CONFIG_MODULES) && defined(CONFIG_X86) && defined(CONFIG_PAX_KERNEXEC)
89032+#define VM_KERNEXEC 0x00000040 /* allocate from executable kernel memory range */
89033+#endif
89034+
89035 /* bits [20..32] reserved for arch specific ioremap internals */
89036
89037 /*
89038@@ -51,13 +56,13 @@ static inline void vmalloc_init(void)
89039 }
89040 #endif
89041
89042-extern void *vmalloc(unsigned long size);
89043-extern void *vmalloc_user(unsigned long size);
89044-extern void *vmalloc_node(unsigned long size, int node);
89045-extern void *vmalloc_exec(unsigned long size);
89046-extern void *vmalloc_32(unsigned long size);
89047-extern void *vmalloc_32_user(unsigned long size);
89048-extern void *__vmalloc(unsigned long size, gfp_t gfp_mask, pgprot_t prot);
89049+extern void *vmalloc(unsigned long size) __size_overflow(1);
89050+extern void *vmalloc_user(unsigned long size) __size_overflow(1);
89051+extern void *vmalloc_node(unsigned long size, int node) __size_overflow(1);
89052+extern void *vmalloc_exec(unsigned long size) __size_overflow(1);
89053+extern void *vmalloc_32(unsigned long size) __size_overflow(1);
89054+extern void *vmalloc_32_user(unsigned long size) __size_overflow(1);
89055+extern void *__vmalloc(unsigned long size, gfp_t gfp_mask, pgprot_t prot) __size_overflow(1);
89056 extern void *__vmalloc_area(struct vm_struct *area, gfp_t gfp_mask,
89057 pgprot_t prot);
89058 extern void vfree(const void *addr);
89059@@ -106,8 +111,8 @@ extern struct vm_struct *alloc_vm_area(size_t size);
89060 extern void free_vm_area(struct vm_struct *area);
89061
89062 /* for /dev/kmem */
89063-extern long vread(char *buf, char *addr, unsigned long count);
89064-extern long vwrite(char *buf, char *addr, unsigned long count);
89065+extern long vread(char *buf, char *addr, unsigned long count) __size_overflow(3);
89066+extern long vwrite(char *buf, char *addr, unsigned long count) __size_overflow(3);
89067
89068 /*
89069 * Internals. Dont't use..
89070diff --git a/include/linux/vmstat.h b/include/linux/vmstat.h
89071index 13070d6..aa4159a 100644
89072--- a/include/linux/vmstat.h
89073+++ b/include/linux/vmstat.h
89074@@ -136,18 +136,18 @@ static inline void vm_events_fold_cpu(int cpu)
89075 /*
89076 * Zone based page accounting with per cpu differentials.
89077 */
89078-extern atomic_long_t vm_stat[NR_VM_ZONE_STAT_ITEMS];
89079+extern atomic_long_unchecked_t vm_stat[NR_VM_ZONE_STAT_ITEMS];
89080
89081 static inline void zone_page_state_add(long x, struct zone *zone,
89082 enum zone_stat_item item)
89083 {
89084- atomic_long_add(x, &zone->vm_stat[item]);
89085- atomic_long_add(x, &vm_stat[item]);
89086+ atomic_long_add_unchecked(x, &zone->vm_stat[item]);
89087+ atomic_long_add_unchecked(x, &vm_stat[item]);
89088 }
89089
89090 static inline unsigned long global_page_state(enum zone_stat_item item)
89091 {
89092- long x = atomic_long_read(&vm_stat[item]);
89093+ long x = atomic_long_read_unchecked(&vm_stat[item]);
89094 #ifdef CONFIG_SMP
89095 if (x < 0)
89096 x = 0;
89097@@ -158,7 +158,7 @@ static inline unsigned long global_page_state(enum zone_stat_item item)
89098 static inline unsigned long zone_page_state(struct zone *zone,
89099 enum zone_stat_item item)
89100 {
89101- long x = atomic_long_read(&zone->vm_stat[item]);
89102+ long x = atomic_long_read_unchecked(&zone->vm_stat[item]);
89103 #ifdef CONFIG_SMP
89104 if (x < 0)
89105 x = 0;
89106@@ -175,7 +175,7 @@ static inline unsigned long zone_page_state(struct zone *zone,
89107 static inline unsigned long zone_page_state_snapshot(struct zone *zone,
89108 enum zone_stat_item item)
89109 {
89110- long x = atomic_long_read(&zone->vm_stat[item]);
89111+ long x = atomic_long_read_unchecked(&zone->vm_stat[item]);
89112
89113 #ifdef CONFIG_SMP
89114 int cpu;
89115@@ -264,8 +264,8 @@ static inline void __mod_zone_page_state(struct zone *zone,
89116
89117 static inline void __inc_zone_state(struct zone *zone, enum zone_stat_item item)
89118 {
89119- atomic_long_inc(&zone->vm_stat[item]);
89120- atomic_long_inc(&vm_stat[item]);
89121+ atomic_long_inc_unchecked(&zone->vm_stat[item]);
89122+ atomic_long_inc_unchecked(&vm_stat[item]);
89123 }
89124
89125 static inline void __inc_zone_page_state(struct page *page,
89126@@ -276,8 +276,8 @@ static inline void __inc_zone_page_state(struct page *page,
89127
89128 static inline void __dec_zone_state(struct zone *zone, enum zone_stat_item item)
89129 {
89130- atomic_long_dec(&zone->vm_stat[item]);
89131- atomic_long_dec(&vm_stat[item]);
89132+ atomic_long_dec_unchecked(&zone->vm_stat[item]);
89133+ atomic_long_dec_unchecked(&vm_stat[item]);
89134 }
89135
89136 static inline void __dec_zone_page_state(struct page *page,
89137diff --git a/include/linux/xattr.h b/include/linux/xattr.h
89138index 5c84af8..1a3b6e2 100644
89139--- a/include/linux/xattr.h
89140+++ b/include/linux/xattr.h
89141@@ -33,6 +33,11 @@
89142 #define XATTR_USER_PREFIX "user."
89143 #define XATTR_USER_PREFIX_LEN (sizeof (XATTR_USER_PREFIX) - 1)
89144
89145+/* User namespace */
89146+#define XATTR_PAX_PREFIX XATTR_USER_PREFIX "pax."
89147+#define XATTR_PAX_FLAGS_SUFFIX "flags"
89148+#define XATTR_NAME_PAX_FLAGS XATTR_PAX_PREFIX XATTR_PAX_FLAGS_SUFFIX
89149+
89150 struct inode;
89151 struct dentry;
89152
89153diff --git a/include/media/saa7146_vv.h b/include/media/saa7146_vv.h
89154index eed5fcc..5080d24 100644
89155--- a/include/media/saa7146_vv.h
89156+++ b/include/media/saa7146_vv.h
89157@@ -167,7 +167,7 @@ struct saa7146_ext_vv
89158 int (*std_callback)(struct saa7146_dev*, struct saa7146_standard *);
89159
89160 /* the extension can override this */
89161- struct v4l2_ioctl_ops ops;
89162+ v4l2_ioctl_ops_no_const ops;
89163 /* pointer to the saa7146 core ops */
89164 const struct v4l2_ioctl_ops *core_ops;
89165
89166diff --git a/include/media/v4l2-dev.h b/include/media/v4l2-dev.h
89167index 73c9867..2da8837 100644
89168--- a/include/media/v4l2-dev.h
89169+++ b/include/media/v4l2-dev.h
89170@@ -34,7 +34,7 @@ struct v4l2_device;
89171 #define V4L2_FL_UNREGISTERED (0)
89172
89173 struct v4l2_file_operations {
89174- struct module *owner;
89175+ struct module * const owner;
89176 ssize_t (*read) (struct file *, char __user *, size_t, loff_t *);
89177 ssize_t (*write) (struct file *, const char __user *, size_t, loff_t *);
89178 unsigned int (*poll) (struct file *, struct poll_table_struct *);
89179@@ -46,6 +46,7 @@ struct v4l2_file_operations {
89180 int (*open) (struct file *);
89181 int (*release) (struct file *);
89182 };
89183+typedef struct v4l2_file_operations __no_const v4l2_file_operations_no_const;
89184
89185 /*
89186 * Newer version of video_device, handled by videodev2.c
89187diff --git a/include/media/v4l2-device.h b/include/media/v4l2-device.h
89188index 5d5d550..f559ef1 100644
89189--- a/include/media/v4l2-device.h
89190+++ b/include/media/v4l2-device.h
89191@@ -71,7 +71,7 @@ int __must_check v4l2_device_register(struct device *dev, struct v4l2_device *v4
89192 this function returns 0. If the name ends with a digit (e.g. cx18),
89193 then the name will be set to cx18-0 since cx180 looks really odd. */
89194 int v4l2_device_set_name(struct v4l2_device *v4l2_dev, const char *basename,
89195- atomic_t *instance);
89196+ atomic_unchecked_t *instance);
89197
89198 /* Set v4l2_dev->dev to NULL. Call when the USB parent disconnects.
89199 Since the parent disappears this ensures that v4l2_dev doesn't have an
89200diff --git a/include/media/v4l2-ioctl.h b/include/media/v4l2-ioctl.h
89201index 7a4529d..7244290 100644
89202--- a/include/media/v4l2-ioctl.h
89203+++ b/include/media/v4l2-ioctl.h
89204@@ -243,6 +243,7 @@ struct v4l2_ioctl_ops {
89205 long (*vidioc_default) (struct file *file, void *fh,
89206 int cmd, void *arg);
89207 };
89208+typedef struct v4l2_ioctl_ops __no_const v4l2_ioctl_ops_no_const;
89209
89210
89211 /* v4l debugging and diagnostics */
89212diff --git a/include/net/flow.h b/include/net/flow.h
89213index 809970b..c3df4f3 100644
89214--- a/include/net/flow.h
89215+++ b/include/net/flow.h
89216@@ -92,7 +92,7 @@ typedef int (*flow_resolve_t)(struct net *net, struct flowi *key, u16 family,
89217 extern void *flow_cache_lookup(struct net *net, struct flowi *key, u16 family,
89218 u8 dir, flow_resolve_t resolver);
89219 extern void flow_cache_flush(void);
89220-extern atomic_t flow_cache_genid;
89221+extern atomic_unchecked_t flow_cache_genid;
89222
89223 static inline int flow_cache_uli_match(struct flowi *fl1, struct flowi *fl2)
89224 {
89225diff --git a/include/net/inetpeer.h b/include/net/inetpeer.h
89226index 15e1f8fe..668837c 100644
89227--- a/include/net/inetpeer.h
89228+++ b/include/net/inetpeer.h
89229@@ -24,7 +24,7 @@ struct inet_peer
89230 __u32 dtime; /* the time of last use of not
89231 * referenced entries */
89232 atomic_t refcnt;
89233- atomic_t rid; /* Frag reception counter */
89234+ atomic_unchecked_t rid; /* Frag reception counter */
89235 __u32 tcp_ts;
89236 unsigned long tcp_ts_stamp;
89237 };
89238diff --git a/include/net/ip_vs.h b/include/net/ip_vs.h
89239index 98978e7..2243a3d 100644
89240--- a/include/net/ip_vs.h
89241+++ b/include/net/ip_vs.h
89242@@ -365,7 +365,7 @@ struct ip_vs_conn {
89243 struct ip_vs_conn *control; /* Master control connection */
89244 atomic_t n_control; /* Number of controlled ones */
89245 struct ip_vs_dest *dest; /* real server */
89246- atomic_t in_pkts; /* incoming packet counter */
89247+ atomic_unchecked_t in_pkts; /* incoming packet counter */
89248
89249 /* packet transmitter for different forwarding methods. If it
89250 mangles the packet, it must return NF_DROP or better NF_STOLEN,
89251@@ -466,7 +466,7 @@ struct ip_vs_dest {
89252 union nf_inet_addr addr; /* IP address of the server */
89253 __be16 port; /* port number of the server */
89254 volatile unsigned flags; /* dest status flags */
89255- atomic_t conn_flags; /* flags to copy to conn */
89256+ atomic_unchecked_t conn_flags; /* flags to copy to conn */
89257 atomic_t weight; /* server weight */
89258
89259 atomic_t refcnt; /* reference counter */
89260diff --git a/include/net/irda/ircomm_core.h b/include/net/irda/ircomm_core.h
89261index 69b610a..fe3962c 100644
89262--- a/include/net/irda/ircomm_core.h
89263+++ b/include/net/irda/ircomm_core.h
89264@@ -51,7 +51,7 @@ typedef struct {
89265 int (*connect_response)(struct ircomm_cb *, struct sk_buff *);
89266 int (*disconnect_request)(struct ircomm_cb *, struct sk_buff *,
89267 struct ircomm_info *);
89268-} call_t;
89269+} __no_const call_t;
89270
89271 struct ircomm_cb {
89272 irda_queue_t queue;
89273diff --git a/include/net/irda/ircomm_tty.h b/include/net/irda/ircomm_tty.h
89274index eea2e61..08c692d 100644
89275--- a/include/net/irda/ircomm_tty.h
89276+++ b/include/net/irda/ircomm_tty.h
89277@@ -35,6 +35,7 @@
89278 #include <linux/termios.h>
89279 #include <linux/timer.h>
89280 #include <linux/tty.h> /* struct tty_struct */
89281+#include <asm/local.h>
89282
89283 #include <net/irda/irias_object.h>
89284 #include <net/irda/ircomm_core.h>
89285@@ -105,8 +106,8 @@ struct ircomm_tty_cb {
89286 unsigned short close_delay;
89287 unsigned short closing_wait; /* time to wait before closing */
89288
89289- int open_count;
89290- int blocked_open; /* # of blocked opens */
89291+ local_t open_count;
89292+ local_t blocked_open; /* # of blocked opens */
89293
89294 /* Protect concurent access to :
89295 * o self->open_count
89296diff --git a/include/net/iucv/af_iucv.h b/include/net/iucv/af_iucv.h
89297index f82a1e8..82d81e8 100644
89298--- a/include/net/iucv/af_iucv.h
89299+++ b/include/net/iucv/af_iucv.h
89300@@ -87,7 +87,7 @@ struct iucv_sock {
89301 struct iucv_sock_list {
89302 struct hlist_head head;
89303 rwlock_t lock;
89304- atomic_t autobind_name;
89305+ atomic_unchecked_t autobind_name;
89306 };
89307
89308 unsigned int iucv_sock_poll(struct file *file, struct socket *sock,
89309diff --git a/include/net/lapb.h b/include/net/lapb.h
89310index 96cb5dd..25e8d4f 100644
89311--- a/include/net/lapb.h
89312+++ b/include/net/lapb.h
89313@@ -95,7 +95,7 @@ struct lapb_cb {
89314 struct sk_buff_head write_queue;
89315 struct sk_buff_head ack_queue;
89316 unsigned char window;
89317- struct lapb_register_struct callbacks;
89318+ struct lapb_register_struct *callbacks;
89319
89320 /* FRMR control information */
89321 struct lapb_frame frmr_data;
89322diff --git a/include/net/neighbour.h b/include/net/neighbour.h
89323index 3817fda..cdb2343 100644
89324--- a/include/net/neighbour.h
89325+++ b/include/net/neighbour.h
89326@@ -131,7 +131,7 @@ struct neigh_ops
89327 int (*connected_output)(struct sk_buff*);
89328 int (*hh_output)(struct sk_buff*);
89329 int (*queue_xmit)(struct sk_buff*);
89330-};
89331+} __do_const;
89332
89333 struct pneigh_entry
89334 {
89335diff --git a/include/net/netlink.h b/include/net/netlink.h
89336index c344646..4778c71 100644
89337--- a/include/net/netlink.h
89338+++ b/include/net/netlink.h
89339@@ -335,7 +335,7 @@ static inline int nlmsg_ok(const struct nlmsghdr *nlh, int remaining)
89340 {
89341 return (remaining >= (int) sizeof(struct nlmsghdr) &&
89342 nlh->nlmsg_len >= sizeof(struct nlmsghdr) &&
89343- nlh->nlmsg_len <= remaining);
89344+ nlh->nlmsg_len <= (unsigned int)remaining);
89345 }
89346
89347 /**
89348@@ -558,7 +558,7 @@ static inline void *nlmsg_get_pos(struct sk_buff *skb)
89349 static inline void nlmsg_trim(struct sk_buff *skb, const void *mark)
89350 {
89351 if (mark)
89352- skb_trim(skb, (unsigned char *) mark - skb->data);
89353+ skb_trim(skb, (const unsigned char *) mark - skb->data);
89354 }
89355
89356 /**
89357diff --git a/include/net/netns/ipv4.h b/include/net/netns/ipv4.h
89358index 9a4b8b7..e49e077 100644
89359--- a/include/net/netns/ipv4.h
89360+++ b/include/net/netns/ipv4.h
89361@@ -54,7 +54,7 @@ struct netns_ipv4 {
89362 int current_rt_cache_rebuild_count;
89363
89364 struct timer_list rt_secret_timer;
89365- atomic_t rt_genid;
89366+ atomic_unchecked_t rt_genid;
89367
89368 #ifdef CONFIG_IP_MROUTE
89369 struct sock *mroute_sk;
89370diff --git a/include/net/sctp/sctp.h b/include/net/sctp/sctp.h
89371index 8a6d529..171f401 100644
89372--- a/include/net/sctp/sctp.h
89373+++ b/include/net/sctp/sctp.h
89374@@ -305,8 +305,8 @@ extern int sctp_debug_flag;
89375
89376 #else /* SCTP_DEBUG */
89377
89378-#define SCTP_DEBUG_PRINTK(whatever...)
89379-#define SCTP_DEBUG_PRINTK_IPADDR(whatever...)
89380+#define SCTP_DEBUG_PRINTK(whatever...) do {} while (0)
89381+#define SCTP_DEBUG_PRINTK_IPADDR(whatever...) do {} while (0)
89382 #define SCTP_ENABLE_DEBUG
89383 #define SCTP_DISABLE_DEBUG
89384 #define SCTP_ASSERT(expr, str, func)
89385diff --git a/include/net/secure_seq.h b/include/net/secure_seq.h
89386index d97f689..f3b90ab 100644
89387--- a/include/net/secure_seq.h
89388+++ b/include/net/secure_seq.h
89389@@ -7,14 +7,14 @@ extern __u32 secure_ip_id(__be32 daddr);
89390 extern __u32 secure_ipv6_id(const __be32 daddr[4]);
89391 extern u32 secure_ipv4_port_ephemeral(__be32 saddr, __be32 daddr, __be16 dport);
89392 extern u32 secure_ipv6_port_ephemeral(const __be32 *saddr, const __be32 *daddr,
89393- __be16 dport);
89394+ __be16 dport);
89395 extern __u32 secure_tcp_sequence_number(__be32 saddr, __be32 daddr,
89396 __be16 sport, __be16 dport);
89397 extern __u32 secure_tcpv6_sequence_number(__be32 *saddr, __be32 *daddr,
89398- __be16 sport, __be16 dport);
89399+ __be16 sport, __be16 dport);
89400 extern u64 secure_dccp_sequence_number(__be32 saddr, __be32 daddr,
89401- __be16 sport, __be16 dport);
89402+ __be16 sport, __be16 dport);
89403 extern u64 secure_dccpv6_sequence_number(__be32 *saddr, __be32 *daddr,
89404- __be16 sport, __be16 dport);
89405+ __be16 sport, __be16 dport);
89406
89407 #endif /* _NET_SECURE_SEQ */
89408diff --git a/include/net/sock.h b/include/net/sock.h
89409index 78adf52..99afd29 100644
89410--- a/include/net/sock.h
89411+++ b/include/net/sock.h
89412@@ -272,7 +272,7 @@ struct sock {
89413 rwlock_t sk_callback_lock;
89414 int sk_err,
89415 sk_err_soft;
89416- atomic_t sk_drops;
89417+ atomic_unchecked_t sk_drops;
89418 unsigned short sk_ack_backlog;
89419 unsigned short sk_max_ack_backlog;
89420 __u32 sk_priority;
89421@@ -737,7 +737,7 @@ static inline void sk_refcnt_debug_release(const struct sock *sk)
89422 extern void sock_prot_inuse_add(struct net *net, struct proto *prot, int inc);
89423 extern int sock_prot_inuse_get(struct net *net, struct proto *proto);
89424 #else
89425-static void inline sock_prot_inuse_add(struct net *net, struct proto *prot,
89426+static inline void sock_prot_inuse_add(struct net *net, struct proto *prot,
89427 int inc)
89428 {
89429 }
89430diff --git a/include/net/tcp.h b/include/net/tcp.h
89431index 6cfe18b..dd21acb 100644
89432--- a/include/net/tcp.h
89433+++ b/include/net/tcp.h
89434@@ -1444,8 +1444,8 @@ enum tcp_seq_states {
89435 struct tcp_seq_afinfo {
89436 char *name;
89437 sa_family_t family;
89438- struct file_operations seq_fops;
89439- struct seq_operations seq_ops;
89440+ file_operations_no_const seq_fops;
89441+ seq_operations_no_const seq_ops;
89442 };
89443
89444 struct tcp_iter_state {
89445diff --git a/include/net/udp.h b/include/net/udp.h
89446index f98abd2..b4b042f 100644
89447--- a/include/net/udp.h
89448+++ b/include/net/udp.h
89449@@ -187,8 +187,8 @@ struct udp_seq_afinfo {
89450 char *name;
89451 sa_family_t family;
89452 struct udp_table *udp_table;
89453- struct file_operations seq_fops;
89454- struct seq_operations seq_ops;
89455+ file_operations_no_const seq_fops;
89456+ seq_operations_no_const seq_ops;
89457 };
89458
89459 struct udp_iter_state {
89460diff --git a/include/rdma/iw_cm.h b/include/rdma/iw_cm.h
89461index cbb822e..e9c1cbe 100644
89462--- a/include/rdma/iw_cm.h
89463+++ b/include/rdma/iw_cm.h
89464@@ -129,7 +129,7 @@ struct iw_cm_verbs {
89465 int backlog);
89466
89467 int (*destroy_listen)(struct iw_cm_id *cm_id);
89468-};
89469+} __no_const;
89470
89471 /**
89472 * iw_create_cm_id - Create an IW CM identifier.
89473diff --git a/include/scsi/libfc.h b/include/scsi/libfc.h
89474index 09a124b..caa8ca8 100644
89475--- a/include/scsi/libfc.h
89476+++ b/include/scsi/libfc.h
89477@@ -675,6 +675,7 @@ struct libfc_function_template {
89478 */
89479 void (*disc_stop_final) (struct fc_lport *);
89480 };
89481+typedef struct libfc_function_template __no_const libfc_function_template_no_const;
89482
89483 /* information used by the discovery layer */
89484 struct fc_disc {
89485@@ -707,7 +708,7 @@ struct fc_lport {
89486 struct fc_disc disc;
89487
89488 /* Operational Information */
89489- struct libfc_function_template tt;
89490+ libfc_function_template_no_const tt;
89491 u8 link_up;
89492 u8 qfull;
89493 enum fc_lport_state state;
89494diff --git a/include/scsi/scsi_device.h b/include/scsi/scsi_device.h
89495index de8e180..f15e0d7 100644
89496--- a/include/scsi/scsi_device.h
89497+++ b/include/scsi/scsi_device.h
89498@@ -156,9 +156,9 @@ struct scsi_device {
89499 unsigned int max_device_blocked; /* what device_blocked counts down from */
89500 #define SCSI_DEFAULT_DEVICE_BLOCKED 3
89501
89502- atomic_t iorequest_cnt;
89503- atomic_t iodone_cnt;
89504- atomic_t ioerr_cnt;
89505+ atomic_unchecked_t iorequest_cnt;
89506+ atomic_unchecked_t iodone_cnt;
89507+ atomic_unchecked_t ioerr_cnt;
89508
89509 struct device sdev_gendev,
89510 sdev_dev;
89511diff --git a/include/scsi/scsi_host.h b/include/scsi/scsi_host.h
89512index 0b4baba..0106e9e 100644
89513--- a/include/scsi/scsi_host.h
89514+++ b/include/scsi/scsi_host.h
89515@@ -43,6 +43,12 @@ struct blk_queue_tags;
89516 #define DISABLE_CLUSTERING 0
89517 #define ENABLE_CLUSTERING 1
89518
89519+enum {
89520+ SCSI_QDEPTH_DEFAULT, /* default requested change, e.g. from sysfs */
89521+ SCSI_QDEPTH_QFULL, /* scsi-ml requested due to queue full */
89522+ SCSI_QDEPTH_RAMP_UP, /* scsi-ml requested due to threshhold event */
89523+};
89524+
89525 struct scsi_host_template {
89526 struct module *module;
89527 const char *name;
89528diff --git a/include/scsi/scsi_transport_fc.h b/include/scsi/scsi_transport_fc.h
89529index fc50bd6..81ba9cb 100644
89530--- a/include/scsi/scsi_transport_fc.h
89531+++ b/include/scsi/scsi_transport_fc.h
89532@@ -708,7 +708,7 @@ struct fc_function_template {
89533 unsigned long show_host_system_hostname:1;
89534
89535 unsigned long disable_target_scan:1;
89536-};
89537+} __do_const;
89538
89539
89540 /**
89541diff --git a/include/sound/ac97_codec.h b/include/sound/ac97_codec.h
89542index 3dae3f7..8440d6f 100644
89543--- a/include/sound/ac97_codec.h
89544+++ b/include/sound/ac97_codec.h
89545@@ -419,15 +419,15 @@
89546 struct snd_ac97;
89547
89548 struct snd_ac97_build_ops {
89549- int (*build_3d) (struct snd_ac97 *ac97);
89550- int (*build_specific) (struct snd_ac97 *ac97);
89551- int (*build_spdif) (struct snd_ac97 *ac97);
89552- int (*build_post_spdif) (struct snd_ac97 *ac97);
89553+ int (* const build_3d) (struct snd_ac97 *ac97);
89554+ int (* const build_specific) (struct snd_ac97 *ac97);
89555+ int (* const build_spdif) (struct snd_ac97 *ac97);
89556+ int (* const build_post_spdif) (struct snd_ac97 *ac97);
89557 #ifdef CONFIG_PM
89558- void (*suspend) (struct snd_ac97 *ac97);
89559- void (*resume) (struct snd_ac97 *ac97);
89560+ void (* const suspend) (struct snd_ac97 *ac97);
89561+ void (* const resume) (struct snd_ac97 *ac97);
89562 #endif
89563- void (*update_jacks) (struct snd_ac97 *ac97); /* for jack-sharing */
89564+ void (* const update_jacks) (struct snd_ac97 *ac97); /* for jack-sharing */
89565 };
89566
89567 struct snd_ac97_bus_ops {
89568@@ -477,7 +477,7 @@ struct snd_ac97_template {
89569
89570 struct snd_ac97 {
89571 /* -- lowlevel (hardware) driver specific -- */
89572- struct snd_ac97_build_ops * build_ops;
89573+ const struct snd_ac97_build_ops * build_ops;
89574 void *private_data;
89575 void (*private_free) (struct snd_ac97 *ac97);
89576 /* --- */
89577diff --git a/include/sound/ak4xxx-adda.h b/include/sound/ak4xxx-adda.h
89578index 891cf1a..a94ba2b 100644
89579--- a/include/sound/ak4xxx-adda.h
89580+++ b/include/sound/ak4xxx-adda.h
89581@@ -35,7 +35,7 @@ struct snd_ak4xxx_ops {
89582 void (*write)(struct snd_akm4xxx *ak, int chip, unsigned char reg,
89583 unsigned char val);
89584 void (*set_rate_val)(struct snd_akm4xxx *ak, unsigned int rate);
89585-};
89586+} __no_const;
89587
89588 #define AK4XXX_IMAGE_SIZE (AK4XXX_MAX_CHIPS * 16) /* 64 bytes */
89589
89590diff --git a/include/sound/hwdep.h b/include/sound/hwdep.h
89591index 8c05e47..2b5df97 100644
89592--- a/include/sound/hwdep.h
89593+++ b/include/sound/hwdep.h
89594@@ -49,7 +49,7 @@ struct snd_hwdep_ops {
89595 struct snd_hwdep_dsp_status *status);
89596 int (*dsp_load)(struct snd_hwdep *hw,
89597 struct snd_hwdep_dsp_image *image);
89598-};
89599+} __no_const;
89600
89601 struct snd_hwdep {
89602 struct snd_card *card;
89603diff --git a/include/sound/info.h b/include/sound/info.h
89604index 112e894..6fda5b5 100644
89605--- a/include/sound/info.h
89606+++ b/include/sound/info.h
89607@@ -44,7 +44,7 @@ struct snd_info_entry_text {
89608 struct snd_info_buffer *buffer);
89609 void (*write)(struct snd_info_entry *entry,
89610 struct snd_info_buffer *buffer);
89611-};
89612+} __no_const;
89613
89614 struct snd_info_entry_ops {
89615 int (*open)(struct snd_info_entry *entry,
89616diff --git a/include/sound/pcm.h b/include/sound/pcm.h
89617index de6d981..590a550 100644
89618--- a/include/sound/pcm.h
89619+++ b/include/sound/pcm.h
89620@@ -80,6 +80,7 @@ struct snd_pcm_ops {
89621 int (*mmap)(struct snd_pcm_substream *substream, struct vm_area_struct *vma);
89622 int (*ack)(struct snd_pcm_substream *substream);
89623 };
89624+typedef struct snd_pcm_ops __no_const snd_pcm_ops_no_const;
89625
89626 /*
89627 *
89628diff --git a/include/sound/sb16_csp.h b/include/sound/sb16_csp.h
89629index 736eac7..fe8a80f 100644
89630--- a/include/sound/sb16_csp.h
89631+++ b/include/sound/sb16_csp.h
89632@@ -139,7 +139,7 @@ struct snd_sb_csp_ops {
89633 int (*csp_start) (struct snd_sb_csp * p, int sample_width, int channels);
89634 int (*csp_stop) (struct snd_sb_csp * p);
89635 int (*csp_qsound_transfer) (struct snd_sb_csp * p);
89636-};
89637+} __no_const;
89638
89639 /*
89640 * CSP private data
89641diff --git a/include/sound/ymfpci.h b/include/sound/ymfpci.h
89642index 444cd6b..3327cc5 100644
89643--- a/include/sound/ymfpci.h
89644+++ b/include/sound/ymfpci.h
89645@@ -358,7 +358,7 @@ struct snd_ymfpci {
89646 spinlock_t reg_lock;
89647 spinlock_t voice_lock;
89648 wait_queue_head_t interrupt_sleep;
89649- atomic_t interrupt_sleep_count;
89650+ atomic_unchecked_t interrupt_sleep_count;
89651 struct snd_info_entry *proc_entry;
89652 const struct firmware *dsp_microcode;
89653 const struct firmware *controller_microcode;
89654diff --git a/include/trace/events/irq.h b/include/trace/events/irq.h
89655index b89f9db..f097b38 100644
89656--- a/include/trace/events/irq.h
89657+++ b/include/trace/events/irq.h
89658@@ -34,7 +34,7 @@
89659 */
89660 TRACE_EVENT(irq_handler_entry,
89661
89662- TP_PROTO(int irq, struct irqaction *action),
89663+ TP_PROTO(int irq, const struct irqaction *action),
89664
89665 TP_ARGS(irq, action),
89666
89667@@ -64,7 +64,7 @@ TRACE_EVENT(irq_handler_entry,
89668 */
89669 TRACE_EVENT(irq_handler_exit,
89670
89671- TP_PROTO(int irq, struct irqaction *action, int ret),
89672+ TP_PROTO(int irq, const struct irqaction *action, int ret),
89673
89674 TP_ARGS(irq, action, ret),
89675
89676@@ -95,7 +95,7 @@ TRACE_EVENT(irq_handler_exit,
89677 */
89678 TRACE_EVENT(softirq_entry,
89679
89680- TP_PROTO(struct softirq_action *h, struct softirq_action *vec),
89681+ TP_PROTO(const struct softirq_action *h, const struct softirq_action *vec),
89682
89683 TP_ARGS(h, vec),
89684
89685@@ -124,7 +124,7 @@ TRACE_EVENT(softirq_entry,
89686 */
89687 TRACE_EVENT(softirq_exit,
89688
89689- TP_PROTO(struct softirq_action *h, struct softirq_action *vec),
89690+ TP_PROTO(const struct softirq_action *h, const struct softirq_action *vec),
89691
89692 TP_ARGS(h, vec),
89693
89694diff --git a/include/video/uvesafb.h b/include/video/uvesafb.h
89695index 0993a22..32ba2fe 100644
89696--- a/include/video/uvesafb.h
89697+++ b/include/video/uvesafb.h
89698@@ -177,6 +177,7 @@ struct uvesafb_par {
89699 u8 ypan; /* 0 - nothing, 1 - ypan, 2 - ywrap */
89700 u8 pmi_setpal; /* PMI for palette changes */
89701 u16 *pmi_base; /* protected mode interface location */
89702+ u8 *pmi_code; /* protected mode code location */
89703 void *pmi_start;
89704 void *pmi_pal;
89705 u8 *vbe_state_orig; /*
89706diff --git a/init/Kconfig b/init/Kconfig
89707index d72691b..3996e54 100644
89708--- a/init/Kconfig
89709+++ b/init/Kconfig
89710@@ -1004,7 +1004,7 @@ config SLUB_DEBUG
89711
89712 config COMPAT_BRK
89713 bool "Disable heap randomization"
89714- default y
89715+ default n
89716 help
89717 Randomizing heap placement makes heap exploits harder, but it
89718 also breaks ancient binaries (including anything libc5 based).
89719diff --git a/init/do_mounts.c b/init/do_mounts.c
89720index bb008d0..4fa3933 100644
89721--- a/init/do_mounts.c
89722+++ b/init/do_mounts.c
89723@@ -216,11 +216,11 @@ static void __init get_fs_names(char *page)
89724
89725 static int __init do_mount_root(char *name, char *fs, int flags, void *data)
89726 {
89727- int err = sys_mount(name, "/root", fs, flags, data);
89728+ int err = sys_mount((__force char __user *)name, (__force char __user *)"/root", (__force char __user *)fs, flags, (__force void __user *)data);
89729 if (err)
89730 return err;
89731
89732- sys_chdir("/root");
89733+ sys_chdir((__force const char __user *)"/root");
89734 ROOT_DEV = current->fs->pwd.mnt->mnt_sb->s_dev;
89735 printk("VFS: Mounted root (%s filesystem)%s on device %u:%u.\n",
89736 current->fs->pwd.mnt->mnt_sb->s_type->name,
89737@@ -311,18 +311,18 @@ void __init change_floppy(char *fmt, ...)
89738 va_start(args, fmt);
89739 vsprintf(buf, fmt, args);
89740 va_end(args);
89741- fd = sys_open("/dev/root", O_RDWR | O_NDELAY, 0);
89742+ fd = sys_open((char __user *)"/dev/root", O_RDWR | O_NDELAY, 0);
89743 if (fd >= 0) {
89744 sys_ioctl(fd, FDEJECT, 0);
89745 sys_close(fd);
89746 }
89747 printk(KERN_NOTICE "VFS: Insert %s and press ENTER\n", buf);
89748- fd = sys_open("/dev/console", O_RDWR, 0);
89749+ fd = sys_open((char __user *)"/dev/console", O_RDWR, 0);
89750 if (fd >= 0) {
89751 sys_ioctl(fd, TCGETS, (long)&termios);
89752 termios.c_lflag &= ~ICANON;
89753 sys_ioctl(fd, TCSETSF, (long)&termios);
89754- sys_read(fd, &c, 1);
89755+ sys_read(fd, (char __user *)&c, 1);
89756 termios.c_lflag |= ICANON;
89757 sys_ioctl(fd, TCSETSF, (long)&termios);
89758 sys_close(fd);
89759@@ -416,6 +416,6 @@ void __init prepare_namespace(void)
89760 mount_root();
89761 out:
89762 devtmpfs_mount("dev");
89763- sys_mount(".", "/", NULL, MS_MOVE, NULL);
89764- sys_chroot(".");
89765+ sys_mount((__force char __user *)".", (__force char __user *)"/", NULL, MS_MOVE, NULL);
89766+ sys_chroot((__force char __user *)".");
89767 }
89768diff --git a/init/do_mounts.h b/init/do_mounts.h
89769index f5b978a..69dbfe8 100644
89770--- a/init/do_mounts.h
89771+++ b/init/do_mounts.h
89772@@ -15,15 +15,15 @@ extern int root_mountflags;
89773
89774 static inline int create_dev(char *name, dev_t dev)
89775 {
89776- sys_unlink(name);
89777- return sys_mknod(name, S_IFBLK|0600, new_encode_dev(dev));
89778+ sys_unlink((char __force_user *)name);
89779+ return sys_mknod((char __force_user *)name, S_IFBLK|0600, new_encode_dev(dev));
89780 }
89781
89782 #if BITS_PER_LONG == 32
89783 static inline u32 bstat(char *name)
89784 {
89785 struct stat64 stat;
89786- if (sys_stat64(name, &stat) != 0)
89787+ if (sys_stat64((char __force_user *)name, (struct stat64 __force_user *)&stat) != 0)
89788 return 0;
89789 if (!S_ISBLK(stat.st_mode))
89790 return 0;
89791@@ -35,7 +35,7 @@ static inline u32 bstat(char *name)
89792 static inline u32 bstat(char *name)
89793 {
89794 struct stat stat;
89795- if (sys_newstat(name, &stat) != 0)
89796+ if (sys_newstat((const char __force_user *)name, (struct stat __force_user *)&stat) != 0)
89797 return 0;
89798 if (!S_ISBLK(stat.st_mode))
89799 return 0;
89800diff --git a/init/do_mounts_initrd.c b/init/do_mounts_initrd.c
89801index 614241b..4da046b 100644
89802--- a/init/do_mounts_initrd.c
89803+++ b/init/do_mounts_initrd.c
89804@@ -32,7 +32,7 @@ static int __init do_linuxrc(void * shell)
89805 sys_close(old_fd);sys_close(root_fd);
89806 sys_close(0);sys_close(1);sys_close(2);
89807 sys_setsid();
89808- (void) sys_open("/dev/console",O_RDWR,0);
89809+ (void) sys_open((__force const char __user *)"/dev/console",O_RDWR,0);
89810 (void) sys_dup(0);
89811 (void) sys_dup(0);
89812 return kernel_execve(shell, argv, envp_init);
89813@@ -47,13 +47,13 @@ static void __init handle_initrd(void)
89814 create_dev("/dev/root.old", Root_RAM0);
89815 /* mount initrd on rootfs' /root */
89816 mount_block_root("/dev/root.old", root_mountflags & ~MS_RDONLY);
89817- sys_mkdir("/old", 0700);
89818- root_fd = sys_open("/", 0, 0);
89819- old_fd = sys_open("/old", 0, 0);
89820+ sys_mkdir((const char __force_user *)"/old", 0700);
89821+ root_fd = sys_open((const char __force_user *)"/", 0, 0);
89822+ old_fd = sys_open((const char __force_user *)"/old", 0, 0);
89823 /* move initrd over / and chdir/chroot in initrd root */
89824- sys_chdir("/root");
89825- sys_mount(".", "/", NULL, MS_MOVE, NULL);
89826- sys_chroot(".");
89827+ sys_chdir((const char __force_user *)"/root");
89828+ sys_mount((char __force_user *)".", (char __force_user *)"/", NULL, MS_MOVE, NULL);
89829+ sys_chroot((const char __force_user *)".");
89830
89831 /*
89832 * In case that a resume from disk is carried out by linuxrc or one of
89833@@ -70,15 +70,15 @@ static void __init handle_initrd(void)
89834
89835 /* move initrd to rootfs' /old */
89836 sys_fchdir(old_fd);
89837- sys_mount("/", ".", NULL, MS_MOVE, NULL);
89838+ sys_mount((char __force_user *)"/", (char __force_user *)".", NULL, MS_MOVE, NULL);
89839 /* switch root and cwd back to / of rootfs */
89840 sys_fchdir(root_fd);
89841- sys_chroot(".");
89842+ sys_chroot((const char __force_user *)".");
89843 sys_close(old_fd);
89844 sys_close(root_fd);
89845
89846 if (new_decode_dev(real_root_dev) == Root_RAM0) {
89847- sys_chdir("/old");
89848+ sys_chdir((const char __force_user *)"/old");
89849 return;
89850 }
89851
89852@@ -86,17 +86,17 @@ static void __init handle_initrd(void)
89853 mount_root();
89854
89855 printk(KERN_NOTICE "Trying to move old root to /initrd ... ");
89856- error = sys_mount("/old", "/root/initrd", NULL, MS_MOVE, NULL);
89857+ error = sys_mount((char __force_user *)"/old", (char __force_user *)"/root/initrd", NULL, MS_MOVE, NULL);
89858 if (!error)
89859 printk("okay\n");
89860 else {
89861- int fd = sys_open("/dev/root.old", O_RDWR, 0);
89862+ int fd = sys_open((const char __force_user *)"/dev/root.old", O_RDWR, 0);
89863 if (error == -ENOENT)
89864 printk("/initrd does not exist. Ignored.\n");
89865 else
89866 printk("failed\n");
89867 printk(KERN_NOTICE "Unmounting old root\n");
89868- sys_umount("/old", MNT_DETACH);
89869+ sys_umount((char __force_user *)"/old", MNT_DETACH);
89870 printk(KERN_NOTICE "Trying to free ramdisk memory ... ");
89871 if (fd < 0) {
89872 error = fd;
89873@@ -119,11 +119,11 @@ int __init initrd_load(void)
89874 * mounted in the normal path.
89875 */
89876 if (rd_load_image("/initrd.image") && ROOT_DEV != Root_RAM0) {
89877- sys_unlink("/initrd.image");
89878+ sys_unlink((const char __force_user *)"/initrd.image");
89879 handle_initrd();
89880 return 1;
89881 }
89882 }
89883- sys_unlink("/initrd.image");
89884+ sys_unlink((const char __force_user *)"/initrd.image");
89885 return 0;
89886 }
89887diff --git a/init/do_mounts_md.c b/init/do_mounts_md.c
89888index 69aebbf..c0bf6a7 100644
89889--- a/init/do_mounts_md.c
89890+++ b/init/do_mounts_md.c
89891@@ -170,7 +170,7 @@ static void __init md_setup_drive(void)
89892 partitioned ? "_d" : "", minor,
89893 md_setup_args[ent].device_names);
89894
89895- fd = sys_open(name, 0, 0);
89896+ fd = sys_open((char __force_user *)name, 0, 0);
89897 if (fd < 0) {
89898 printk(KERN_ERR "md: open failed - cannot start "
89899 "array %s\n", name);
89900@@ -233,7 +233,7 @@ static void __init md_setup_drive(void)
89901 * array without it
89902 */
89903 sys_close(fd);
89904- fd = sys_open(name, 0, 0);
89905+ fd = sys_open((char __force_user *)name, 0, 0);
89906 sys_ioctl(fd, BLKRRPART, 0);
89907 }
89908 sys_close(fd);
89909@@ -283,7 +283,7 @@ static void __init autodetect_raid(void)
89910
89911 wait_for_device_probe();
89912
89913- fd = sys_open("/dev/md0", 0, 0);
89914+ fd = sys_open((__force char __user *)"/dev/md0", 0, 0);
89915 if (fd >= 0) {
89916 sys_ioctl(fd, RAID_AUTORUN, raid_autopart);
89917 sys_close(fd);
89918diff --git a/init/initramfs.c b/init/initramfs.c
89919index 1fd59b8..a01b079 100644
89920--- a/init/initramfs.c
89921+++ b/init/initramfs.c
89922@@ -74,7 +74,7 @@ static void __init free_hash(void)
89923 }
89924 }
89925
89926-static long __init do_utime(char __user *filename, time_t mtime)
89927+static long __init do_utime(__force char __user *filename, time_t mtime)
89928 {
89929 struct timespec t[2];
89930
89931@@ -109,7 +109,7 @@ static void __init dir_utime(void)
89932 struct dir_entry *de, *tmp;
89933 list_for_each_entry_safe(de, tmp, &dir_list, list) {
89934 list_del(&de->list);
89935- do_utime(de->name, de->mtime);
89936+ do_utime((char __force_user *)de->name, de->mtime);
89937 kfree(de->name);
89938 kfree(de);
89939 }
89940@@ -271,7 +271,7 @@ static int __init maybe_link(void)
89941 if (nlink >= 2) {
89942 char *old = find_link(major, minor, ino, mode, collected);
89943 if (old)
89944- return (sys_link(old, collected) < 0) ? -1 : 1;
89945+ return (sys_link((char __force_user *)old, (char __force_user *)collected) < 0) ? -1 : 1;
89946 }
89947 return 0;
89948 }
89949@@ -280,11 +280,11 @@ static void __init clean_path(char *path, mode_t mode)
89950 {
89951 struct stat st;
89952
89953- if (!sys_newlstat(path, &st) && (st.st_mode^mode) & S_IFMT) {
89954+ if (!sys_newlstat((char __force_user *)path, (struct stat __force_user *)&st) && (st.st_mode^mode) & S_IFMT) {
89955 if (S_ISDIR(st.st_mode))
89956- sys_rmdir(path);
89957+ sys_rmdir((char __force_user *)path);
89958 else
89959- sys_unlink(path);
89960+ sys_unlink((char __force_user *)path);
89961 }
89962 }
89963
89964@@ -305,7 +305,7 @@ static int __init do_name(void)
89965 int openflags = O_WRONLY|O_CREAT;
89966 if (ml != 1)
89967 openflags |= O_TRUNC;
89968- wfd = sys_open(collected, openflags, mode);
89969+ wfd = sys_open((char __force_user *)collected, openflags, mode);
89970
89971 if (wfd >= 0) {
89972 sys_fchown(wfd, uid, gid);
89973@@ -317,17 +317,17 @@ static int __init do_name(void)
89974 }
89975 }
89976 } else if (S_ISDIR(mode)) {
89977- sys_mkdir(collected, mode);
89978- sys_chown(collected, uid, gid);
89979- sys_chmod(collected, mode);
89980+ sys_mkdir((char __force_user *)collected, mode);
89981+ sys_chown((char __force_user *)collected, uid, gid);
89982+ sys_chmod((char __force_user *)collected, mode);
89983 dir_add(collected, mtime);
89984 } else if (S_ISBLK(mode) || S_ISCHR(mode) ||
89985 S_ISFIFO(mode) || S_ISSOCK(mode)) {
89986 if (maybe_link() == 0) {
89987- sys_mknod(collected, mode, rdev);
89988- sys_chown(collected, uid, gid);
89989- sys_chmod(collected, mode);
89990- do_utime(collected, mtime);
89991+ sys_mknod((char __force_user *)collected, mode, rdev);
89992+ sys_chown((char __force_user *)collected, uid, gid);
89993+ sys_chmod((char __force_user *)collected, mode);
89994+ do_utime((char __force_user *)collected, mtime);
89995 }
89996 }
89997 return 0;
89998@@ -336,15 +336,15 @@ static int __init do_name(void)
89999 static int __init do_copy(void)
90000 {
90001 if (count >= body_len) {
90002- sys_write(wfd, victim, body_len);
90003+ sys_write(wfd, (char __force_user *)victim, body_len);
90004 sys_close(wfd);
90005- do_utime(vcollected, mtime);
90006+ do_utime((char __force_user *)vcollected, mtime);
90007 kfree(vcollected);
90008 eat(body_len);
90009 state = SkipIt;
90010 return 0;
90011 } else {
90012- sys_write(wfd, victim, count);
90013+ sys_write(wfd, (char __force_user *)victim, count);
90014 body_len -= count;
90015 eat(count);
90016 return 1;
90017@@ -355,9 +355,9 @@ static int __init do_symlink(void)
90018 {
90019 collected[N_ALIGN(name_len) + body_len] = '\0';
90020 clean_path(collected, 0);
90021- sys_symlink(collected + N_ALIGN(name_len), collected);
90022- sys_lchown(collected, uid, gid);
90023- do_utime(collected, mtime);
90024+ sys_symlink((char __force_user *)collected + N_ALIGN(name_len), (char __force_user *)collected);
90025+ sys_lchown((char __force_user *)collected, uid, gid);
90026+ do_utime((char __force_user *)collected, mtime);
90027 state = SkipIt;
90028 next_state = Reset;
90029 return 0;
90030diff --git a/init/main.c b/init/main.c
90031index 1eb4bd5..fea5bbe 100644
90032--- a/init/main.c
90033+++ b/init/main.c
90034@@ -97,6 +97,7 @@ static inline void mark_rodata_ro(void) { }
90035 #ifdef CONFIG_TC
90036 extern void tc_init(void);
90037 #endif
90038+extern void grsecurity_init(void);
90039
90040 enum system_states system_state __read_mostly;
90041 EXPORT_SYMBOL(system_state);
90042@@ -183,6 +184,49 @@ static int __init set_reset_devices(char *str)
90043
90044 __setup("reset_devices", set_reset_devices);
90045
90046+#if defined(CONFIG_X86_64) && defined(CONFIG_PAX_MEMORY_UDEREF)
90047+extern char pax_enter_kernel_user[];
90048+extern char pax_exit_kernel_user[];
90049+extern pgdval_t clone_pgd_mask;
90050+#endif
90051+
90052+#if defined(CONFIG_X86) && defined(CONFIG_PAX_MEMORY_UDEREF)
90053+static int __init setup_pax_nouderef(char *str)
90054+{
90055+#ifdef CONFIG_X86_32
90056+ unsigned int cpu;
90057+ struct desc_struct *gdt;
90058+
90059+ for (cpu = 0; cpu < nr_cpu_ids; cpu++) {
90060+ gdt = get_cpu_gdt_table(cpu);
90061+ gdt[GDT_ENTRY_KERNEL_DS].type = 3;
90062+ gdt[GDT_ENTRY_KERNEL_DS].limit = 0xf;
90063+ gdt[GDT_ENTRY_DEFAULT_USER_CS].limit = 0xf;
90064+ gdt[GDT_ENTRY_DEFAULT_USER_DS].limit = 0xf;
90065+ }
90066+ asm("mov %0, %%ds; mov %0, %%es; mov %0, %%ss" : : "r" (__KERNEL_DS) : "memory");
90067+#else
90068+ memcpy(pax_enter_kernel_user, (unsigned char []){0xc3}, 1);
90069+ memcpy(pax_exit_kernel_user, (unsigned char []){0xc3}, 1);
90070+ clone_pgd_mask = ~(pgdval_t)0UL;
90071+#endif
90072+
90073+ return 0;
90074+}
90075+early_param("pax_nouderef", setup_pax_nouderef);
90076+#endif
90077+
90078+#ifdef CONFIG_PAX_SOFTMODE
90079+int pax_softmode;
90080+
90081+static int __init setup_pax_softmode(char *str)
90082+{
90083+ get_option(&str, &pax_softmode);
90084+ return 1;
90085+}
90086+__setup("pax_softmode=", setup_pax_softmode);
90087+#endif
90088+
90089 static char * argv_init[MAX_INIT_ARGS+2] = { "init", NULL, };
90090 char * envp_init[MAX_INIT_ENVS+2] = { "HOME=/", "TERM=linux", NULL, };
90091 static const char *panic_later, *panic_param;
90092@@ -705,52 +749,53 @@ int initcall_debug;
90093 core_param(initcall_debug, initcall_debug, bool, 0644);
90094
90095 static char msgbuf[64];
90096-static struct boot_trace_call call;
90097-static struct boot_trace_ret ret;
90098+static struct boot_trace_call trace_call;
90099+static struct boot_trace_ret trace_ret;
90100
90101 int do_one_initcall(initcall_t fn)
90102 {
90103 int count = preempt_count();
90104 ktime_t calltime, delta, rettime;
90105+ const char *msg1 = "", *msg2 = "";
90106
90107 if (initcall_debug) {
90108- call.caller = task_pid_nr(current);
90109- printk("calling %pF @ %i\n", fn, call.caller);
90110+ trace_call.caller = task_pid_nr(current);
90111+ printk("calling %pF @ %i\n", fn, trace_call.caller);
90112 calltime = ktime_get();
90113- trace_boot_call(&call, fn);
90114+ trace_boot_call(&trace_call, fn);
90115 enable_boot_trace();
90116 }
90117
90118- ret.result = fn();
90119+ trace_ret.result = fn();
90120
90121 if (initcall_debug) {
90122 disable_boot_trace();
90123 rettime = ktime_get();
90124 delta = ktime_sub(rettime, calltime);
90125- ret.duration = (unsigned long long) ktime_to_ns(delta) >> 10;
90126- trace_boot_ret(&ret, fn);
90127+ trace_ret.duration = (unsigned long long) ktime_to_ns(delta) >> 10;
90128+ trace_boot_ret(&trace_ret, fn);
90129 printk("initcall %pF returned %d after %Ld usecs\n", fn,
90130- ret.result, ret.duration);
90131+ trace_ret.result, trace_ret.duration);
90132 }
90133
90134 msgbuf[0] = 0;
90135
90136- if (ret.result && ret.result != -ENODEV && initcall_debug)
90137- sprintf(msgbuf, "error code %d ", ret.result);
90138+ if (trace_ret.result && trace_ret.result != -ENODEV && initcall_debug)
90139+ sprintf(msgbuf, "error code %d ", trace_ret.result);
90140
90141 if (preempt_count() != count) {
90142- strlcat(msgbuf, "preemption imbalance ", sizeof(msgbuf));
90143+ msg1 = " preemption imbalance";
90144 preempt_count() = count;
90145 }
90146 if (irqs_disabled()) {
90147- strlcat(msgbuf, "disabled interrupts ", sizeof(msgbuf));
90148+ msg2 = " disabled interrupts";
90149 local_irq_enable();
90150 }
90151- if (msgbuf[0]) {
90152- printk("initcall %pF returned with %s\n", fn, msgbuf);
90153+ if (msgbuf[0] || *msg1 || *msg2) {
90154+ printk("initcall %pF returned with %s%s%s\n", fn, msgbuf, msg1, msg2);
90155 }
90156
90157- return ret.result;
90158+ return trace_ret.result;
90159 }
90160
90161
90162@@ -893,11 +938,13 @@ static int __init kernel_init(void * unused)
90163 if (!ramdisk_execute_command)
90164 ramdisk_execute_command = "/init";
90165
90166- if (sys_access((const char __user *) ramdisk_execute_command, 0) != 0) {
90167+ if (sys_access((const char __force_user *) ramdisk_execute_command, 0) != 0) {
90168 ramdisk_execute_command = NULL;
90169 prepare_namespace();
90170 }
90171
90172+ grsecurity_init();
90173+
90174 /*
90175 * Ok, we have completed the initial bootup, and
90176 * we're essentially up and running. Get rid of the
90177diff --git a/init/noinitramfs.c b/init/noinitramfs.c
90178index f4c1a3a..96c19bd 100644
90179--- a/init/noinitramfs.c
90180+++ b/init/noinitramfs.c
90181@@ -29,7 +29,7 @@ static int __init default_rootfs(void)
90182 {
90183 int err;
90184
90185- err = sys_mkdir("/dev", 0755);
90186+ err = sys_mkdir((const char __user *)"/dev", 0755);
90187 if (err < 0)
90188 goto out;
90189
90190@@ -39,7 +39,7 @@ static int __init default_rootfs(void)
90191 if (err < 0)
90192 goto out;
90193
90194- err = sys_mkdir("/root", 0700);
90195+ err = sys_mkdir((const char __user *)"/root", 0700);
90196 if (err < 0)
90197 goto out;
90198
90199diff --git a/ipc/mqueue.c b/ipc/mqueue.c
90200index d01bc14..8df81db 100644
90201--- a/ipc/mqueue.c
90202+++ b/ipc/mqueue.c
90203@@ -150,6 +150,7 @@ static struct inode *mqueue_get_inode(struct super_block *sb,
90204 mq_bytes = (mq_msg_tblsz +
90205 (info->attr.mq_maxmsg * info->attr.mq_msgsize));
90206
90207+ gr_learn_resource(current, RLIMIT_MSGQUEUE, u->mq_bytes + mq_bytes, 1);
90208 spin_lock(&mq_lock);
90209 if (u->mq_bytes + mq_bytes < u->mq_bytes ||
90210 u->mq_bytes + mq_bytes >
90211diff --git a/ipc/msg.c b/ipc/msg.c
90212index 779f762..4af9e36 100644
90213--- a/ipc/msg.c
90214+++ b/ipc/msg.c
90215@@ -310,18 +310,19 @@ static inline int msg_security(struct kern_ipc_perm *ipcp, int msgflg)
90216 return security_msg_queue_associate(msq, msgflg);
90217 }
90218
90219+static struct ipc_ops msg_ops = {
90220+ .getnew = newque,
90221+ .associate = msg_security,
90222+ .more_checks = NULL
90223+};
90224+
90225 SYSCALL_DEFINE2(msgget, key_t, key, int, msgflg)
90226 {
90227 struct ipc_namespace *ns;
90228- struct ipc_ops msg_ops;
90229 struct ipc_params msg_params;
90230
90231 ns = current->nsproxy->ipc_ns;
90232
90233- msg_ops.getnew = newque;
90234- msg_ops.associate = msg_security;
90235- msg_ops.more_checks = NULL;
90236-
90237 msg_params.key = key;
90238 msg_params.flg = msgflg;
90239
90240diff --git a/ipc/sem.c b/ipc/sem.c
90241index b781007..f738b04 100644
90242--- a/ipc/sem.c
90243+++ b/ipc/sem.c
90244@@ -309,10 +309,15 @@ static inline int sem_more_checks(struct kern_ipc_perm *ipcp,
90245 return 0;
90246 }
90247
90248+static struct ipc_ops sem_ops = {
90249+ .getnew = newary,
90250+ .associate = sem_security,
90251+ .more_checks = sem_more_checks
90252+};
90253+
90254 SYSCALL_DEFINE3(semget, key_t, key, int, nsems, int, semflg)
90255 {
90256 struct ipc_namespace *ns;
90257- struct ipc_ops sem_ops;
90258 struct ipc_params sem_params;
90259
90260 ns = current->nsproxy->ipc_ns;
90261@@ -320,10 +325,6 @@ SYSCALL_DEFINE3(semget, key_t, key, int, nsems, int, semflg)
90262 if (nsems < 0 || nsems > ns->sc_semmsl)
90263 return -EINVAL;
90264
90265- sem_ops.getnew = newary;
90266- sem_ops.associate = sem_security;
90267- sem_ops.more_checks = sem_more_checks;
90268-
90269 sem_params.key = key;
90270 sem_params.flg = semflg;
90271 sem_params.u.nsems = nsems;
90272@@ -671,6 +672,8 @@ static int semctl_main(struct ipc_namespace *ns, int semid, int semnum,
90273 ushort* sem_io = fast_sem_io;
90274 int nsems;
90275
90276+ pax_track_stack();
90277+
90278 sma = sem_lock_check(ns, semid);
90279 if (IS_ERR(sma))
90280 return PTR_ERR(sma);
90281@@ -1071,6 +1074,8 @@ SYSCALL_DEFINE4(semtimedop, int, semid, struct sembuf __user *, tsops,
90282 unsigned long jiffies_left = 0;
90283 struct ipc_namespace *ns;
90284
90285+ pax_track_stack();
90286+
90287 ns = current->nsproxy->ipc_ns;
90288
90289 if (nsops < 1 || semid < 0)
90290diff --git a/ipc/shm.c b/ipc/shm.c
90291index d30732c..e4992cd 100644
90292--- a/ipc/shm.c
90293+++ b/ipc/shm.c
90294@@ -70,6 +70,14 @@ static void shm_destroy (struct ipc_namespace *ns, struct shmid_kernel *shp);
90295 static int sysvipc_shm_proc_show(struct seq_file *s, void *it);
90296 #endif
90297
90298+#ifdef CONFIG_GRKERNSEC
90299+extern int gr_handle_shmat(const pid_t shm_cprid, const pid_t shm_lapid,
90300+ const time_t shm_createtime, const uid_t cuid,
90301+ const int shmid);
90302+extern int gr_chroot_shmat(const pid_t shm_cprid, const pid_t shm_lapid,
90303+ const time_t shm_createtime);
90304+#endif
90305+
90306 void shm_init_ns(struct ipc_namespace *ns)
90307 {
90308 ns->shm_ctlmax = SHMMAX;
90309@@ -396,6 +404,14 @@ static int newseg(struct ipc_namespace *ns, struct ipc_params *params)
90310 shp->shm_lprid = 0;
90311 shp->shm_atim = shp->shm_dtim = 0;
90312 shp->shm_ctim = get_seconds();
90313+#ifdef CONFIG_GRKERNSEC
90314+ {
90315+ struct timespec timeval;
90316+ do_posix_clock_monotonic_gettime(&timeval);
90317+
90318+ shp->shm_createtime = timeval.tv_sec;
90319+ }
90320+#endif
90321 shp->shm_segsz = size;
90322 shp->shm_nattch = 0;
90323 shp->shm_file = file;
90324@@ -446,18 +462,19 @@ static inline int shm_more_checks(struct kern_ipc_perm *ipcp,
90325 return 0;
90326 }
90327
90328+static struct ipc_ops shm_ops = {
90329+ .getnew = newseg,
90330+ .associate = shm_security,
90331+ .more_checks = shm_more_checks
90332+};
90333+
90334 SYSCALL_DEFINE3(shmget, key_t, key, size_t, size, int, shmflg)
90335 {
90336 struct ipc_namespace *ns;
90337- struct ipc_ops shm_ops;
90338 struct ipc_params shm_params;
90339
90340 ns = current->nsproxy->ipc_ns;
90341
90342- shm_ops.getnew = newseg;
90343- shm_ops.associate = shm_security;
90344- shm_ops.more_checks = shm_more_checks;
90345-
90346 shm_params.key = key;
90347 shm_params.flg = shmflg;
90348 shm_params.u.size = size;
90349@@ -857,6 +874,12 @@ long do_shmat(int shmid, char __user *shmaddr, int shmflg, ulong *raddr)
90350 f_mode = FMODE_READ | FMODE_WRITE;
90351 }
90352 if (shmflg & SHM_EXEC) {
90353+
90354+#ifdef CONFIG_PAX_MPROTECT
90355+ if (current->mm->pax_flags & MF_PAX_MPROTECT)
90356+ goto out;
90357+#endif
90358+
90359 prot |= PROT_EXEC;
90360 acc_mode |= S_IXUGO;
90361 }
90362@@ -880,9 +903,21 @@ long do_shmat(int shmid, char __user *shmaddr, int shmflg, ulong *raddr)
90363 if (err)
90364 goto out_unlock;
90365
90366+#ifdef CONFIG_GRKERNSEC
90367+ if (!gr_handle_shmat(shp->shm_cprid, shp->shm_lapid, shp->shm_createtime,
90368+ shp->shm_perm.cuid, shmid) ||
90369+ !gr_chroot_shmat(shp->shm_cprid, shp->shm_lapid, shp->shm_createtime)) {
90370+ err = -EACCES;
90371+ goto out_unlock;
90372+ }
90373+#endif
90374+
90375 path.dentry = dget(shp->shm_file->f_path.dentry);
90376 path.mnt = shp->shm_file->f_path.mnt;
90377 shp->shm_nattch++;
90378+#ifdef CONFIG_GRKERNSEC
90379+ shp->shm_lapid = current->pid;
90380+#endif
90381 size = i_size_read(path.dentry->d_inode);
90382 shm_unlock(shp);
90383
90384diff --git a/kernel/acct.c b/kernel/acct.c
90385index a6605ca..ca91111 100644
90386--- a/kernel/acct.c
90387+++ b/kernel/acct.c
90388@@ -579,7 +579,7 @@ static void do_acct_process(struct bsd_acct_struct *acct,
90389 */
90390 flim = current->signal->rlim[RLIMIT_FSIZE].rlim_cur;
90391 current->signal->rlim[RLIMIT_FSIZE].rlim_cur = RLIM_INFINITY;
90392- file->f_op->write(file, (char *)&ac,
90393+ file->f_op->write(file, (char __force_user *)&ac,
90394 sizeof(acct_t), &file->f_pos);
90395 current->signal->rlim[RLIMIT_FSIZE].rlim_cur = flim;
90396 set_fs(fs);
90397diff --git a/kernel/audit.c b/kernel/audit.c
90398index 5feed23..48415fd 100644
90399--- a/kernel/audit.c
90400+++ b/kernel/audit.c
90401@@ -110,7 +110,7 @@ u32 audit_sig_sid = 0;
90402 3) suppressed due to audit_rate_limit
90403 4) suppressed due to audit_backlog_limit
90404 */
90405-static atomic_t audit_lost = ATOMIC_INIT(0);
90406+static atomic_unchecked_t audit_lost = ATOMIC_INIT(0);
90407
90408 /* The netlink socket. */
90409 static struct sock *audit_sock;
90410@@ -232,7 +232,7 @@ void audit_log_lost(const char *message)
90411 unsigned long now;
90412 int print;
90413
90414- atomic_inc(&audit_lost);
90415+ atomic_inc_unchecked(&audit_lost);
90416
90417 print = (audit_failure == AUDIT_FAIL_PANIC || !audit_rate_limit);
90418
90419@@ -251,7 +251,7 @@ void audit_log_lost(const char *message)
90420 printk(KERN_WARNING
90421 "audit: audit_lost=%d audit_rate_limit=%d "
90422 "audit_backlog_limit=%d\n",
90423- atomic_read(&audit_lost),
90424+ atomic_read_unchecked(&audit_lost),
90425 audit_rate_limit,
90426 audit_backlog_limit);
90427 audit_panic(message);
90428@@ -691,7 +691,7 @@ static int audit_receive_msg(struct sk_buff *skb, struct nlmsghdr *nlh)
90429 status_set.pid = audit_pid;
90430 status_set.rate_limit = audit_rate_limit;
90431 status_set.backlog_limit = audit_backlog_limit;
90432- status_set.lost = atomic_read(&audit_lost);
90433+ status_set.lost = atomic_read_unchecked(&audit_lost);
90434 status_set.backlog = skb_queue_len(&audit_skb_queue);
90435 audit_send_reply(NETLINK_CB(skb).pid, seq, AUDIT_GET, 0, 0,
90436 &status_set, sizeof(status_set));
90437@@ -891,8 +891,10 @@ static int audit_receive_msg(struct sk_buff *skb, struct nlmsghdr *nlh)
90438 spin_unlock_irq(&tsk->sighand->siglock);
90439 }
90440 read_unlock(&tasklist_lock);
90441- audit_send_reply(NETLINK_CB(skb).pid, seq, AUDIT_TTY_GET, 0, 0,
90442- &s, sizeof(s));
90443+
90444+ if (!err)
90445+ audit_send_reply(NETLINK_CB(skb).pid, seq,
90446+ AUDIT_TTY_GET, 0, 0, &s, sizeof(s));
90447 break;
90448 }
90449 case AUDIT_TTY_SET: {
90450@@ -1262,12 +1264,13 @@ static void audit_log_vformat(struct audit_buffer *ab, const char *fmt,
90451 avail = audit_expand(ab,
90452 max_t(unsigned, AUDIT_BUFSIZ, 1+len-avail));
90453 if (!avail)
90454- goto out;
90455+ goto out_va_end;
90456 len = vsnprintf(skb_tail_pointer(skb), avail, fmt, args2);
90457 }
90458- va_end(args2);
90459 if (len > 0)
90460 skb_put(skb, len);
90461+out_va_end:
90462+ va_end(args2);
90463 out:
90464 return;
90465 }
90466diff --git a/kernel/auditsc.c b/kernel/auditsc.c
90467index 267e484..ac41bc3 100644
90468--- a/kernel/auditsc.c
90469+++ b/kernel/auditsc.c
90470@@ -1157,8 +1157,8 @@ static void audit_log_execve_info(struct audit_context *context,
90471 struct audit_buffer **ab,
90472 struct audit_aux_data_execve *axi)
90473 {
90474- int i;
90475- size_t len, len_sent = 0;
90476+ int i, len;
90477+ size_t len_sent = 0;
90478 const char __user *p;
90479 char *buf;
90480
90481@@ -2113,7 +2113,7 @@ int auditsc_get_stamp(struct audit_context *ctx,
90482 }
90483
90484 /* global counter which is incremented every time something logs in */
90485-static atomic_t session_id = ATOMIC_INIT(0);
90486+static atomic_unchecked_t session_id = ATOMIC_INIT(0);
90487
90488 /**
90489 * audit_set_loginuid - set a task's audit_context loginuid
90490@@ -2126,7 +2126,7 @@ static atomic_t session_id = ATOMIC_INIT(0);
90491 */
90492 int audit_set_loginuid(struct task_struct *task, uid_t loginuid)
90493 {
90494- unsigned int sessionid = atomic_inc_return(&session_id);
90495+ unsigned int sessionid = atomic_inc_return_unchecked(&session_id);
90496 struct audit_context *context = task->audit_context;
90497
90498 if (context && context->in_syscall) {
90499diff --git a/kernel/capability.c b/kernel/capability.c
90500index 8a944f5..db5001e 100644
90501--- a/kernel/capability.c
90502+++ b/kernel/capability.c
90503@@ -305,10 +305,26 @@ int capable(int cap)
90504 BUG();
90505 }
90506
90507- if (security_capable(cap) == 0) {
90508+ if (security_capable(cap) == 0 && gr_is_capable(cap)) {
90509 current->flags |= PF_SUPERPRIV;
90510 return 1;
90511 }
90512 return 0;
90513 }
90514+
90515+int capable_nolog(int cap)
90516+{
90517+ if (unlikely(!cap_valid(cap))) {
90518+ printk(KERN_CRIT "capable() called with invalid cap=%u\n", cap);
90519+ BUG();
90520+ }
90521+
90522+ if (security_capable(cap) == 0 && gr_is_capable_nolog(cap)) {
90523+ current->flags |= PF_SUPERPRIV;
90524+ return 1;
90525+ }
90526+ return 0;
90527+}
90528+
90529 EXPORT_SYMBOL(capable);
90530+EXPORT_SYMBOL(capable_nolog);
90531diff --git a/kernel/cgroup.c b/kernel/cgroup.c
90532index 1fbcc74..7000012 100644
90533--- a/kernel/cgroup.c
90534+++ b/kernel/cgroup.c
90535@@ -536,6 +536,8 @@ static struct css_set *find_css_set(
90536 struct hlist_head *hhead;
90537 struct cg_cgroup_link *link;
90538
90539+ pax_track_stack();
90540+
90541 /* First see if we already have a cgroup group that matches
90542 * the desired set */
90543 read_lock(&css_set_lock);
90544diff --git a/kernel/compat.c b/kernel/compat.c
90545index 8bc5578..186e44a 100644
90546--- a/kernel/compat.c
90547+++ b/kernel/compat.c
90548@@ -108,7 +108,7 @@ static long compat_nanosleep_restart(struct restart_block *restart)
90549 mm_segment_t oldfs;
90550 long ret;
90551
90552- restart->nanosleep.rmtp = (struct timespec __user *) &rmt;
90553+ restart->nanosleep.rmtp = (struct timespec __force_user *) &rmt;
90554 oldfs = get_fs();
90555 set_fs(KERNEL_DS);
90556 ret = hrtimer_nanosleep_restart(restart);
90557@@ -140,7 +140,7 @@ asmlinkage long compat_sys_nanosleep(struct compat_timespec __user *rqtp,
90558 oldfs = get_fs();
90559 set_fs(KERNEL_DS);
90560 ret = hrtimer_nanosleep(&tu,
90561- rmtp ? (struct timespec __user *)&rmt : NULL,
90562+ rmtp ? (struct timespec __force_user *)&rmt : NULL,
90563 HRTIMER_MODE_REL, CLOCK_MONOTONIC);
90564 set_fs(oldfs);
90565
90566@@ -247,7 +247,7 @@ asmlinkage long compat_sys_sigpending(compat_old_sigset_t __user *set)
90567 mm_segment_t old_fs = get_fs();
90568
90569 set_fs(KERNEL_DS);
90570- ret = sys_sigpending((old_sigset_t __user *) &s);
90571+ ret = sys_sigpending((old_sigset_t __force_user *) &s);
90572 set_fs(old_fs);
90573 if (ret == 0)
90574 ret = put_user(s, set);
90575@@ -266,8 +266,8 @@ asmlinkage long compat_sys_sigprocmask(int how, compat_old_sigset_t __user *set,
90576 old_fs = get_fs();
90577 set_fs(KERNEL_DS);
90578 ret = sys_sigprocmask(how,
90579- set ? (old_sigset_t __user *) &s : NULL,
90580- oset ? (old_sigset_t __user *) &s : NULL);
90581+ set ? (old_sigset_t __force_user *) &s : NULL,
90582+ oset ? (old_sigset_t __force_user *) &s : NULL);
90583 set_fs(old_fs);
90584 if (ret == 0)
90585 if (oset)
90586@@ -310,7 +310,7 @@ asmlinkage long compat_sys_old_getrlimit(unsigned int resource,
90587 mm_segment_t old_fs = get_fs();
90588
90589 set_fs(KERNEL_DS);
90590- ret = sys_old_getrlimit(resource, &r);
90591+ ret = sys_old_getrlimit(resource, (struct rlimit __force_user *)&r);
90592 set_fs(old_fs);
90593
90594 if (!ret) {
90595@@ -385,7 +385,7 @@ asmlinkage long compat_sys_getrusage(int who, struct compat_rusage __user *ru)
90596 mm_segment_t old_fs = get_fs();
90597
90598 set_fs(KERNEL_DS);
90599- ret = sys_getrusage(who, (struct rusage __user *) &r);
90600+ ret = sys_getrusage(who, (struct rusage __force_user *) &r);
90601 set_fs(old_fs);
90602
90603 if (ret)
90604@@ -412,8 +412,8 @@ compat_sys_wait4(compat_pid_t pid, compat_uint_t __user *stat_addr, int options,
90605 set_fs (KERNEL_DS);
90606 ret = sys_wait4(pid,
90607 (stat_addr ?
90608- (unsigned int __user *) &status : NULL),
90609- options, (struct rusage __user *) &r);
90610+ (unsigned int __force_user *) &status : NULL),
90611+ options, (struct rusage __force_user *) &r);
90612 set_fs (old_fs);
90613
90614 if (ret > 0) {
90615@@ -438,8 +438,8 @@ asmlinkage long compat_sys_waitid(int which, compat_pid_t pid,
90616 memset(&info, 0, sizeof(info));
90617
90618 set_fs(KERNEL_DS);
90619- ret = sys_waitid(which, pid, (siginfo_t __user *)&info, options,
90620- uru ? (struct rusage __user *)&ru : NULL);
90621+ ret = sys_waitid(which, pid, (siginfo_t __force_user *)&info, options,
90622+ uru ? (struct rusage __force_user *)&ru : NULL);
90623 set_fs(old_fs);
90624
90625 if ((ret < 0) || (info.si_signo == 0))
90626@@ -569,8 +569,8 @@ long compat_sys_timer_settime(timer_t timer_id, int flags,
90627 oldfs = get_fs();
90628 set_fs(KERNEL_DS);
90629 err = sys_timer_settime(timer_id, flags,
90630- (struct itimerspec __user *) &newts,
90631- (struct itimerspec __user *) &oldts);
90632+ (struct itimerspec __force_user *) &newts,
90633+ (struct itimerspec __force_user *) &oldts);
90634 set_fs(oldfs);
90635 if (!err && old && put_compat_itimerspec(old, &oldts))
90636 return -EFAULT;
90637@@ -587,7 +587,7 @@ long compat_sys_timer_gettime(timer_t timer_id,
90638 oldfs = get_fs();
90639 set_fs(KERNEL_DS);
90640 err = sys_timer_gettime(timer_id,
90641- (struct itimerspec __user *) &ts);
90642+ (struct itimerspec __force_user *) &ts);
90643 set_fs(oldfs);
90644 if (!err && put_compat_itimerspec(setting, &ts))
90645 return -EFAULT;
90646@@ -606,7 +606,7 @@ long compat_sys_clock_settime(clockid_t which_clock,
90647 oldfs = get_fs();
90648 set_fs(KERNEL_DS);
90649 err = sys_clock_settime(which_clock,
90650- (struct timespec __user *) &ts);
90651+ (struct timespec __force_user *) &ts);
90652 set_fs(oldfs);
90653 return err;
90654 }
90655@@ -621,7 +621,7 @@ long compat_sys_clock_gettime(clockid_t which_clock,
90656 oldfs = get_fs();
90657 set_fs(KERNEL_DS);
90658 err = sys_clock_gettime(which_clock,
90659- (struct timespec __user *) &ts);
90660+ (struct timespec __force_user *) &ts);
90661 set_fs(oldfs);
90662 if (!err && put_compat_timespec(&ts, tp))
90663 return -EFAULT;
90664@@ -638,7 +638,7 @@ long compat_sys_clock_getres(clockid_t which_clock,
90665 oldfs = get_fs();
90666 set_fs(KERNEL_DS);
90667 err = sys_clock_getres(which_clock,
90668- (struct timespec __user *) &ts);
90669+ (struct timespec __force_user *) &ts);
90670 set_fs(oldfs);
90671 if (!err && tp && put_compat_timespec(&ts, tp))
90672 return -EFAULT;
90673@@ -650,9 +650,9 @@ static long compat_clock_nanosleep_restart(struct restart_block *restart)
90674 long err;
90675 mm_segment_t oldfs;
90676 struct timespec tu;
90677- struct compat_timespec *rmtp = restart->nanosleep.compat_rmtp;
90678+ struct compat_timespec __user *rmtp = restart->nanosleep.compat_rmtp;
90679
90680- restart->nanosleep.rmtp = (struct timespec __user *) &tu;
90681+ restart->nanosleep.rmtp = (struct timespec __force_user *) &tu;
90682 oldfs = get_fs();
90683 set_fs(KERNEL_DS);
90684 err = clock_nanosleep_restart(restart);
90685@@ -684,8 +684,8 @@ long compat_sys_clock_nanosleep(clockid_t which_clock, int flags,
90686 oldfs = get_fs();
90687 set_fs(KERNEL_DS);
90688 err = sys_clock_nanosleep(which_clock, flags,
90689- (struct timespec __user *) &in,
90690- (struct timespec __user *) &out);
90691+ (struct timespec __force_user *) &in,
90692+ (struct timespec __force_user *) &out);
90693 set_fs(oldfs);
90694
90695 if ((err == -ERESTART_RESTARTBLOCK) && rmtp &&
90696diff --git a/kernel/configs.c b/kernel/configs.c
90697index abaee68..047facd 100644
90698--- a/kernel/configs.c
90699+++ b/kernel/configs.c
90700@@ -73,8 +73,19 @@ static int __init ikconfig_init(void)
90701 struct proc_dir_entry *entry;
90702
90703 /* create the current config file */
90704+#if defined(CONFIG_GRKERNSEC_PROC_ADD) || defined(CONFIG_GRKERNSEC_HIDESYM)
90705+#if defined(CONFIG_GRKERNSEC_PROC_USER) || defined(CONFIG_GRKERNSEC_HIDESYM)
90706+ entry = proc_create("config.gz", S_IFREG | S_IRUSR, NULL,
90707+ &ikconfig_file_ops);
90708+#elif defined(CONFIG_GRKERNSEC_PROC_USERGROUP)
90709+ entry = proc_create("config.gz", S_IFREG | S_IRUSR | S_IRGRP, NULL,
90710+ &ikconfig_file_ops);
90711+#endif
90712+#else
90713 entry = proc_create("config.gz", S_IFREG | S_IRUGO, NULL,
90714 &ikconfig_file_ops);
90715+#endif
90716+
90717 if (!entry)
90718 return -ENOMEM;
90719
90720diff --git a/kernel/cpu.c b/kernel/cpu.c
90721index 3f2f04f..4e53ded 100644
90722--- a/kernel/cpu.c
90723+++ b/kernel/cpu.c
90724@@ -20,7 +20,7 @@
90725 /* Serializes the updates to cpu_online_mask, cpu_present_mask */
90726 static DEFINE_MUTEX(cpu_add_remove_lock);
90727
90728-static __cpuinitdata RAW_NOTIFIER_HEAD(cpu_chain);
90729+static RAW_NOTIFIER_HEAD(cpu_chain);
90730
90731 /* If set, cpu_up and cpu_down will return -EBUSY and do nothing.
90732 * Should always be manipulated under cpu_add_remove_lock
90733diff --git a/kernel/cred.c b/kernel/cred.c
90734index 0b5b5fc..f7fe51a 100644
90735--- a/kernel/cred.c
90736+++ b/kernel/cred.c
90737@@ -160,6 +160,8 @@ static void put_cred_rcu(struct rcu_head *rcu)
90738 */
90739 void __put_cred(struct cred *cred)
90740 {
90741+ pax_track_stack();
90742+
90743 kdebug("__put_cred(%p{%d,%d})", cred,
90744 atomic_read(&cred->usage),
90745 read_cred_subscribers(cred));
90746@@ -184,6 +186,8 @@ void exit_creds(struct task_struct *tsk)
90747 {
90748 struct cred *cred;
90749
90750+ pax_track_stack();
90751+
90752 kdebug("exit_creds(%u,%p,%p,{%d,%d})", tsk->pid, tsk->real_cred, tsk->cred,
90753 atomic_read(&tsk->cred->usage),
90754 read_cred_subscribers(tsk->cred));
90755@@ -206,6 +210,15 @@ void exit_creds(struct task_struct *tsk)
90756 validate_creds(cred);
90757 put_cred(cred);
90758 }
90759+
90760+#ifdef CONFIG_GRKERNSEC_SETXID
90761+ cred = (struct cred *) tsk->delayed_cred;
90762+ if (cred) {
90763+ tsk->delayed_cred = NULL;
90764+ validate_creds(cred);
90765+ put_cred(cred);
90766+ }
90767+#endif
90768 }
90769
90770 /**
90771@@ -222,6 +235,8 @@ const struct cred *get_task_cred(struct task_struct *task)
90772 {
90773 const struct cred *cred;
90774
90775+ pax_track_stack();
90776+
90777 rcu_read_lock();
90778
90779 do {
90780@@ -241,6 +256,8 @@ struct cred *cred_alloc_blank(void)
90781 {
90782 struct cred *new;
90783
90784+ pax_track_stack();
90785+
90786 new = kmem_cache_zalloc(cred_jar, GFP_KERNEL);
90787 if (!new)
90788 return NULL;
90789@@ -289,6 +306,8 @@ struct cred *prepare_creds(void)
90790 const struct cred *old;
90791 struct cred *new;
90792
90793+ pax_track_stack();
90794+
90795 validate_process_creds();
90796
90797 new = kmem_cache_alloc(cred_jar, GFP_KERNEL);
90798@@ -335,6 +354,8 @@ struct cred *prepare_exec_creds(void)
90799 struct thread_group_cred *tgcred = NULL;
90800 struct cred *new;
90801
90802+ pax_track_stack();
90803+
90804 #ifdef CONFIG_KEYS
90805 tgcred = kmalloc(sizeof(*tgcred), GFP_KERNEL);
90806 if (!tgcred)
90807@@ -441,6 +462,8 @@ int copy_creds(struct task_struct *p, unsigned long clone_flags)
90808 struct cred *new;
90809 int ret;
90810
90811+ pax_track_stack();
90812+
90813 mutex_init(&p->cred_guard_mutex);
90814
90815 if (
90816@@ -523,11 +546,13 @@ error_put:
90817 * Always returns 0 thus allowing this function to be tail-called at the end
90818 * of, say, sys_setgid().
90819 */
90820-int commit_creds(struct cred *new)
90821+static int __commit_creds(struct cred *new)
90822 {
90823 struct task_struct *task = current;
90824 const struct cred *old = task->real_cred;
90825
90826+ pax_track_stack();
90827+
90828 kdebug("commit_creds(%p{%d,%d})", new,
90829 atomic_read(&new->usage),
90830 read_cred_subscribers(new));
90831@@ -544,6 +569,8 @@ int commit_creds(struct cred *new)
90832
90833 get_cred(new); /* we will require a ref for the subj creds too */
90834
90835+ gr_set_role_label(task, new->uid, new->gid);
90836+
90837 /* dumpability changes */
90838 if (old->euid != new->euid ||
90839 old->egid != new->egid ||
90840@@ -563,10 +590,8 @@ int commit_creds(struct cred *new)
90841 key_fsgid_changed(task);
90842
90843 /* do it
90844- * - What if a process setreuid()'s and this brings the
90845- * new uid over his NPROC rlimit? We can check this now
90846- * cheaply with the new uid cache, so if it matters
90847- * we should be checking for it. -DaveM
90848+ * RLIMIT_NPROC limits on user->processes have already been checked
90849+ * in set_user().
90850 */
90851 alter_cred_subscribers(new, 2);
90852 if (new->user != old->user)
90853@@ -595,8 +620,96 @@ int commit_creds(struct cred *new)
90854 put_cred(old);
90855 return 0;
90856 }
90857+
90858+#ifdef CONFIG_GRKERNSEC_SETXID
90859+extern int set_user(struct cred *new);
90860+
90861+void gr_delayed_cred_worker(void)
90862+{
90863+ const struct cred *new = current->delayed_cred;
90864+ struct cred *ncred;
90865+
90866+ current->delayed_cred = NULL;
90867+
90868+ if (current_uid() && new != NULL) {
90869+ // from doing get_cred on it when queueing this
90870+ put_cred(new);
90871+ return;
90872+ } else if (new == NULL)
90873+ return;
90874+
90875+ ncred = prepare_creds();
90876+ if (!ncred)
90877+ goto die;
90878+ // uids
90879+ ncred->uid = new->uid;
90880+ ncred->euid = new->euid;
90881+ ncred->suid = new->suid;
90882+ ncred->fsuid = new->fsuid;
90883+ // gids
90884+ ncred->gid = new->gid;
90885+ ncred->egid = new->egid;
90886+ ncred->sgid = new->sgid;
90887+ ncred->fsgid = new->fsgid;
90888+ // groups
90889+ if (set_groups(ncred, new->group_info) < 0) {
90890+ abort_creds(ncred);
90891+ goto die;
90892+ }
90893+ // caps
90894+ ncred->securebits = new->securebits;
90895+ ncred->cap_inheritable = new->cap_inheritable;
90896+ ncred->cap_permitted = new->cap_permitted;
90897+ ncred->cap_effective = new->cap_effective;
90898+ ncred->cap_bset = new->cap_bset;
90899+
90900+ if (set_user(ncred)) {
90901+ abort_creds(ncred);
90902+ goto die;
90903+ }
90904+
90905+ // from doing get_cred on it when queueing this
90906+ put_cred(new);
90907+
90908+ __commit_creds(ncred);
90909+ return;
90910+die:
90911+ // from doing get_cred on it when queueing this
90912+ put_cred(new);
90913+ do_group_exit(SIGKILL);
90914+}
90915+#endif
90916+
90917+int commit_creds(struct cred *new)
90918+{
90919+#ifdef CONFIG_GRKERNSEC_SETXID
90920+ struct task_struct *t;
90921+
90922+ /* we won't get called with tasklist_lock held for writing
90923+ and interrupts disabled as the cred struct in that case is
90924+ init_cred
90925+ */
90926+ if (grsec_enable_setxid && !current_is_single_threaded() &&
90927+ !current_uid() && new->uid) {
90928+ rcu_read_lock();
90929+ read_lock(&tasklist_lock);
90930+ for (t = next_thread(current); t != current;
90931+ t = next_thread(t)) {
90932+ if (t->delayed_cred == NULL) {
90933+ t->delayed_cred = get_cred(new);
90934+ set_tsk_need_resched(t);
90935+ }
90936+ }
90937+ read_unlock(&tasklist_lock);
90938+ rcu_read_unlock();
90939+ }
90940+#endif
90941+ return __commit_creds(new);
90942+}
90943+
90944 EXPORT_SYMBOL(commit_creds);
90945
90946+
90947 /**
90948 * abort_creds - Discard a set of credentials and unlock the current task
90949 * @new: The credentials that were going to be applied
90950@@ -606,6 +719,8 @@ EXPORT_SYMBOL(commit_creds);
90951 */
90952 void abort_creds(struct cred *new)
90953 {
90954+ pax_track_stack();
90955+
90956 kdebug("abort_creds(%p{%d,%d})", new,
90957 atomic_read(&new->usage),
90958 read_cred_subscribers(new));
90959@@ -629,6 +744,8 @@ const struct cred *override_creds(const struct cred *new)
90960 {
90961 const struct cred *old = current->cred;
90962
90963+ pax_track_stack();
90964+
90965 kdebug("override_creds(%p{%d,%d})", new,
90966 atomic_read(&new->usage),
90967 read_cred_subscribers(new));
90968@@ -658,6 +775,8 @@ void revert_creds(const struct cred *old)
90969 {
90970 const struct cred *override = current->cred;
90971
90972+ pax_track_stack();
90973+
90974 kdebug("revert_creds(%p{%d,%d})", old,
90975 atomic_read(&old->usage),
90976 read_cred_subscribers(old));
90977@@ -704,6 +823,8 @@ struct cred *prepare_kernel_cred(struct task_struct *daemon)
90978 const struct cred *old;
90979 struct cred *new;
90980
90981+ pax_track_stack();
90982+
90983 new = kmem_cache_alloc(cred_jar, GFP_KERNEL);
90984 if (!new)
90985 return NULL;
90986@@ -758,6 +879,8 @@ EXPORT_SYMBOL(prepare_kernel_cred);
90987 */
90988 int set_security_override(struct cred *new, u32 secid)
90989 {
90990+ pax_track_stack();
90991+
90992 return security_kernel_act_as(new, secid);
90993 }
90994 EXPORT_SYMBOL(set_security_override);
90995@@ -777,6 +900,8 @@ int set_security_override_from_ctx(struct cred *new, const char *secctx)
90996 u32 secid;
90997 int ret;
90998
90999+ pax_track_stack();
91000+
91001 ret = security_secctx_to_secid(secctx, strlen(secctx), &secid);
91002 if (ret < 0)
91003 return ret;
91004diff --git a/kernel/exit.c b/kernel/exit.c
91005index 0f8fae3..66af9b1 100644
91006--- a/kernel/exit.c
91007+++ b/kernel/exit.c
91008@@ -55,6 +55,10 @@
91009 #include <asm/pgtable.h>
91010 #include <asm/mmu_context.h>
91011
91012+#ifdef CONFIG_GRKERNSEC
91013+extern rwlock_t grsec_exec_file_lock;
91014+#endif
91015+
91016 static void exit_mm(struct task_struct * tsk);
91017
91018 static void __unhash_process(struct task_struct *p)
91019@@ -174,6 +178,10 @@ void release_task(struct task_struct * p)
91020 struct task_struct *leader;
91021 int zap_leader;
91022 repeat:
91023+#ifdef CONFIG_NET
91024+ gr_del_task_from_ip_table(p);
91025+#endif
91026+
91027 tracehook_prepare_release_task(p);
91028 /* don't need to get the RCU readlock here - the process is dead and
91029 * can't be modifying its own credentials */
91030@@ -397,7 +405,7 @@ int allow_signal(int sig)
91031 * know it'll be handled, so that they don't get converted to
91032 * SIGKILL or just silently dropped.
91033 */
91034- current->sighand->action[(sig)-1].sa.sa_handler = (void __user *)2;
91035+ current->sighand->action[(sig)-1].sa.sa_handler = (__force void __user *)2;
91036 recalc_sigpending();
91037 spin_unlock_irq(&current->sighand->siglock);
91038 return 0;
91039@@ -433,6 +441,17 @@ void daemonize(const char *name, ...)
91040 vsnprintf(current->comm, sizeof(current->comm), name, args);
91041 va_end(args);
91042
91043+#ifdef CONFIG_GRKERNSEC
91044+ write_lock(&grsec_exec_file_lock);
91045+ if (current->exec_file) {
91046+ fput(current->exec_file);
91047+ current->exec_file = NULL;
91048+ }
91049+ write_unlock(&grsec_exec_file_lock);
91050+#endif
91051+
91052+ gr_set_kernel_label(current);
91053+
91054 /*
91055 * If we were started as result of loading a module, close all of the
91056 * user space pages. We don't need them, and if we didn't close them
91057@@ -897,17 +916,17 @@ NORET_TYPE void do_exit(long code)
91058 struct task_struct *tsk = current;
91059 int group_dead;
91060
91061- profile_task_exit(tsk);
91062-
91063- WARN_ON(atomic_read(&tsk->fs_excl));
91064-
91065+ /*
91066+ * Check this first since set_fs() below depends on
91067+ * current_thread_info(), which we better not access when we're in
91068+ * interrupt context. Other than that, we want to do the set_fs()
91069+ * as early as possible.
91070+ */
91071 if (unlikely(in_interrupt()))
91072 panic("Aiee, killing interrupt handler!");
91073- if (unlikely(!tsk->pid))
91074- panic("Attempted to kill the idle task!");
91075
91076 /*
91077- * If do_exit is called because this processes oopsed, it's possible
91078+ * If do_exit is called because this processes Oops'ed, it's possible
91079 * that get_fs() was left as KERNEL_DS, so reset it to USER_DS before
91080 * continuing. Amongst other possible reasons, this is to prevent
91081 * mm_release()->clear_child_tid() from writing to a user-controlled
91082@@ -915,6 +934,13 @@ NORET_TYPE void do_exit(long code)
91083 */
91084 set_fs(USER_DS);
91085
91086+ profile_task_exit(tsk);
91087+
91088+ WARN_ON(atomic_read(&tsk->fs_excl));
91089+
91090+ if (unlikely(!tsk->pid))
91091+ panic("Attempted to kill the idle task!");
91092+
91093 tracehook_report_exit(&code);
91094
91095 validate_creds_for_do_exit(tsk);
91096@@ -973,6 +999,9 @@ NORET_TYPE void do_exit(long code)
91097 tsk->exit_code = code;
91098 taskstats_exit(tsk, group_dead);
91099
91100+ gr_acl_handle_psacct(tsk, code);
91101+ gr_acl_handle_exit();
91102+
91103 exit_mm(tsk);
91104
91105 if (group_dead)
91106@@ -1020,7 +1049,7 @@ NORET_TYPE void do_exit(long code)
91107 tsk->flags |= PF_EXITPIDONE;
91108
91109 if (tsk->io_context)
91110- exit_io_context();
91111+ exit_io_context(tsk);
91112
91113 if (tsk->splice_pipe)
91114 __free_pipe_info(tsk->splice_pipe);
91115@@ -1059,7 +1088,7 @@ SYSCALL_DEFINE1(exit, int, error_code)
91116 * Take down every thread in the group. This is called by fatal signals
91117 * as well as by sys_exit_group (below).
91118 */
91119-NORET_TYPE void
91120+__noreturn void
91121 do_group_exit(int exit_code)
91122 {
91123 struct signal_struct *sig = current->signal;
91124@@ -1188,7 +1217,7 @@ static int wait_task_zombie(struct wait_opts *wo, struct task_struct *p)
91125
91126 if (unlikely(wo->wo_flags & WNOWAIT)) {
91127 int exit_code = p->exit_code;
91128- int why, status;
91129+ int why;
91130
91131 get_task_struct(p);
91132 read_unlock(&tasklist_lock);
91133diff --git a/kernel/fork.c b/kernel/fork.c
91134index 4bde56f..8976a8f 100644
91135--- a/kernel/fork.c
91136+++ b/kernel/fork.c
91137@@ -253,7 +253,7 @@ static struct task_struct *dup_task_struct(struct task_struct *orig)
91138 *stackend = STACK_END_MAGIC; /* for overflow detection */
91139
91140 #ifdef CONFIG_CC_STACKPROTECTOR
91141- tsk->stack_canary = get_random_int();
91142+ tsk->stack_canary = pax_get_random_long();
91143 #endif
91144
91145 /* One for us, one for whoever does the "release_task()" (usually parent) */
91146@@ -293,8 +293,8 @@ static int dup_mmap(struct mm_struct *mm, struct mm_struct *oldmm)
91147 mm->locked_vm = 0;
91148 mm->mmap = NULL;
91149 mm->mmap_cache = NULL;
91150- mm->free_area_cache = oldmm->mmap_base;
91151- mm->cached_hole_size = ~0UL;
91152+ mm->free_area_cache = oldmm->free_area_cache;
91153+ mm->cached_hole_size = oldmm->cached_hole_size;
91154 mm->map_count = 0;
91155 cpumask_clear(mm_cpumask(mm));
91156 mm->mm_rb = RB_ROOT;
91157@@ -335,6 +335,7 @@ static int dup_mmap(struct mm_struct *mm, struct mm_struct *oldmm)
91158 tmp->vm_flags &= ~VM_LOCKED;
91159 tmp->vm_mm = mm;
91160 tmp->vm_next = tmp->vm_prev = NULL;
91161+ tmp->vm_mirror = NULL;
91162 anon_vma_link(tmp);
91163 file = tmp->vm_file;
91164 if (file) {
91165@@ -384,6 +385,31 @@ static int dup_mmap(struct mm_struct *mm, struct mm_struct *oldmm)
91166 if (retval)
91167 goto out;
91168 }
91169+
91170+#ifdef CONFIG_PAX_SEGMEXEC
91171+ if (oldmm->pax_flags & MF_PAX_SEGMEXEC) {
91172+ struct vm_area_struct *mpnt_m;
91173+
91174+ for (mpnt = oldmm->mmap, mpnt_m = mm->mmap; mpnt; mpnt = mpnt->vm_next, mpnt_m = mpnt_m->vm_next) {
91175+ BUG_ON(!mpnt_m || mpnt_m->vm_mirror || mpnt->vm_mm != oldmm || mpnt_m->vm_mm != mm);
91176+
91177+ if (!mpnt->vm_mirror)
91178+ continue;
91179+
91180+ if (mpnt->vm_end <= SEGMEXEC_TASK_SIZE) {
91181+ BUG_ON(mpnt->vm_mirror->vm_mirror != mpnt);
91182+ mpnt->vm_mirror = mpnt_m;
91183+ } else {
91184+ BUG_ON(mpnt->vm_mirror->vm_mirror == mpnt || mpnt->vm_mirror->vm_mirror->vm_mm != mm);
91185+ mpnt_m->vm_mirror = mpnt->vm_mirror->vm_mirror;
91186+ mpnt_m->vm_mirror->vm_mirror = mpnt_m;
91187+ mpnt->vm_mirror->vm_mirror = mpnt;
91188+ }
91189+ }
91190+ BUG_ON(mpnt_m);
91191+ }
91192+#endif
91193+
91194 /* a new mm has just been created */
91195 arch_dup_mmap(oldmm, mm);
91196 retval = 0;
91197@@ -734,13 +760,14 @@ static int copy_fs(unsigned long clone_flags, struct task_struct *tsk)
91198 write_unlock(&fs->lock);
91199 return -EAGAIN;
91200 }
91201- fs->users++;
91202+ atomic_inc(&fs->users);
91203 write_unlock(&fs->lock);
91204 return 0;
91205 }
91206 tsk->fs = copy_fs_struct(fs);
91207 if (!tsk->fs)
91208 return -ENOMEM;
91209+ gr_set_chroot_entries(tsk, &tsk->fs->root);
91210 return 0;
91211 }
91212
91213@@ -1033,12 +1060,16 @@ static struct task_struct *copy_process(unsigned long clone_flags,
91214 DEBUG_LOCKS_WARN_ON(!p->softirqs_enabled);
91215 #endif
91216 retval = -EAGAIN;
91217+
91218+ gr_learn_resource(p, RLIMIT_NPROC, atomic_read(&p->real_cred->user->processes), 0);
91219+
91220 if (atomic_read(&p->real_cred->user->processes) >=
91221 p->signal->rlim[RLIMIT_NPROC].rlim_cur) {
91222- if (!capable(CAP_SYS_ADMIN) && !capable(CAP_SYS_RESOURCE) &&
91223- p->real_cred->user != INIT_USER)
91224+ if (p->real_cred->user != INIT_USER &&
91225+ !capable(CAP_SYS_RESOURCE) && !capable(CAP_SYS_ADMIN))
91226 goto bad_fork_free;
91227 }
91228+ current->flags &= ~PF_NPROC_EXCEEDED;
91229
91230 retval = copy_creds(p, clone_flags);
91231 if (retval < 0)
91232@@ -1183,6 +1214,8 @@ static struct task_struct *copy_process(unsigned long clone_flags,
91233 goto bad_fork_free_pid;
91234 }
91235
91236+ gr_copy_label(p);
91237+
91238 p->set_child_tid = (clone_flags & CLONE_CHILD_SETTID) ? child_tidptr : NULL;
91239 /*
91240 * Clear TID on mm_release()?
91241@@ -1299,7 +1332,8 @@ bad_fork_free_pid:
91242 if (pid != &init_struct_pid)
91243 free_pid(pid);
91244 bad_fork_cleanup_io:
91245- put_io_context(p->io_context);
91246+ if (p->io_context)
91247+ exit_io_context(p);
91248 bad_fork_cleanup_namespaces:
91249 exit_task_namespaces(p);
91250 bad_fork_cleanup_mm:
91251@@ -1333,6 +1367,8 @@ bad_fork_cleanup_count:
91252 bad_fork_free:
91253 free_task(p);
91254 fork_out:
91255+ gr_log_forkfail(retval);
91256+
91257 return ERR_PTR(retval);
91258 }
91259
91260@@ -1426,6 +1462,8 @@ long do_fork(unsigned long clone_flags,
91261 if (clone_flags & CLONE_PARENT_SETTID)
91262 put_user(nr, parent_tidptr);
91263
91264+ gr_handle_brute_check();
91265+
91266 if (clone_flags & CLONE_VFORK) {
91267 p->vfork_done = &vfork;
91268 init_completion(&vfork);
91269@@ -1558,7 +1596,7 @@ static int unshare_fs(unsigned long unshare_flags, struct fs_struct **new_fsp)
91270 return 0;
91271
91272 /* don't need lock here; in the worst case we'll do useless copy */
91273- if (fs->users == 1)
91274+ if (atomic_read(&fs->users) == 1)
91275 return 0;
91276
91277 *new_fsp = copy_fs_struct(fs);
91278@@ -1681,7 +1719,8 @@ SYSCALL_DEFINE1(unshare, unsigned long, unshare_flags)
91279 fs = current->fs;
91280 write_lock(&fs->lock);
91281 current->fs = new_fs;
91282- if (--fs->users)
91283+ gr_set_chroot_entries(current, &current->fs->root);
91284+ if (atomic_dec_return(&fs->users))
91285 new_fs = NULL;
91286 else
91287 new_fs = fs;
91288diff --git a/kernel/futex.c b/kernel/futex.c
91289index fb98c9f..333faec 100644
91290--- a/kernel/futex.c
91291+++ b/kernel/futex.c
91292@@ -54,6 +54,7 @@
91293 #include <linux/mount.h>
91294 #include <linux/pagemap.h>
91295 #include <linux/syscalls.h>
91296+#include <linux/ptrace.h>
91297 #include <linux/signal.h>
91298 #include <linux/module.h>
91299 #include <linux/magic.h>
91300@@ -223,6 +224,11 @@ get_futex_key(u32 __user *uaddr, int fshared, union futex_key *key, int rw)
91301 struct page *page;
91302 int err, ro = 0;
91303
91304+#ifdef CONFIG_PAX_SEGMEXEC
91305+ if ((mm->pax_flags & MF_PAX_SEGMEXEC) && address >= SEGMEXEC_TASK_SIZE)
91306+ return -EFAULT;
91307+#endif
91308+
91309 /*
91310 * The futex address must be "naturally" aligned.
91311 */
91312@@ -1819,6 +1825,8 @@ static int futex_wait(u32 __user *uaddr, int fshared,
91313 struct futex_q q;
91314 int ret;
91315
91316+ pax_track_stack();
91317+
91318 if (!bitset)
91319 return -EINVAL;
91320
91321@@ -1871,7 +1879,7 @@ retry:
91322
91323 restart = &current_thread_info()->restart_block;
91324 restart->fn = futex_wait_restart;
91325- restart->futex.uaddr = (u32 *)uaddr;
91326+ restart->futex.uaddr = uaddr;
91327 restart->futex.val = val;
91328 restart->futex.time = abs_time->tv64;
91329 restart->futex.bitset = bitset;
91330@@ -2233,6 +2241,8 @@ static int futex_wait_requeue_pi(u32 __user *uaddr, int fshared,
91331 struct futex_q q;
91332 int res, ret;
91333
91334+ pax_track_stack();
91335+
91336 if (!bitset)
91337 return -EINVAL;
91338
91339@@ -2423,6 +2433,10 @@ SYSCALL_DEFINE3(get_robust_list, int, pid,
91340 if (!p)
91341 goto err_unlock;
91342 ret = -EPERM;
91343+#ifdef CONFIG_GRKERNSEC_PROC_MEMMAP
91344+ if (!ptrace_may_access(p, PTRACE_MODE_READ))
91345+ goto err_unlock;
91346+#endif
91347 pcred = __task_cred(p);
91348 if (cred->euid != pcred->euid &&
91349 cred->euid != pcred->uid &&
91350@@ -2489,7 +2503,7 @@ retry:
91351 */
91352 static inline int fetch_robust_entry(struct robust_list __user **entry,
91353 struct robust_list __user * __user *head,
91354- int *pi)
91355+ unsigned int *pi)
91356 {
91357 unsigned long uentry;
91358
91359@@ -2670,6 +2684,7 @@ static int __init futex_init(void)
91360 {
91361 u32 curval;
91362 int i;
91363+ mm_segment_t oldfs;
91364
91365 /*
91366 * This will fail and we want it. Some arch implementations do
91367@@ -2681,7 +2696,10 @@ static int __init futex_init(void)
91368 * implementation, the non functional ones will return
91369 * -ENOSYS.
91370 */
91371+ oldfs = get_fs();
91372+ set_fs(USER_DS);
91373 curval = cmpxchg_futex_value_locked(NULL, 0, 0);
91374+ set_fs(oldfs);
91375 if (curval == -EFAULT)
91376 futex_cmpxchg_enabled = 1;
91377
91378diff --git a/kernel/futex_compat.c b/kernel/futex_compat.c
91379index 2357165..eb25501 100644
91380--- a/kernel/futex_compat.c
91381+++ b/kernel/futex_compat.c
91382@@ -10,6 +10,7 @@
91383 #include <linux/compat.h>
91384 #include <linux/nsproxy.h>
91385 #include <linux/futex.h>
91386+#include <linux/ptrace.h>
91387
91388 #include <asm/uaccess.h>
91389
91390@@ -135,7 +136,8 @@ compat_sys_get_robust_list(int pid, compat_uptr_t __user *head_ptr,
91391 {
91392 struct compat_robust_list_head __user *head;
91393 unsigned long ret;
91394- const struct cred *cred = current_cred(), *pcred;
91395+ const struct cred *cred = current_cred();
91396+ const struct cred *pcred;
91397
91398 if (!futex_cmpxchg_enabled)
91399 return -ENOSYS;
91400@@ -151,6 +153,10 @@ compat_sys_get_robust_list(int pid, compat_uptr_t __user *head_ptr,
91401 if (!p)
91402 goto err_unlock;
91403 ret = -EPERM;
91404+#ifdef CONFIG_GRKERNSEC_PROC_MEMMAP
91405+ if (!ptrace_may_access(p, PTRACE_MODE_READ))
91406+ goto err_unlock;
91407+#endif
91408 pcred = __task_cred(p);
91409 if (cred->euid != pcred->euid &&
91410 cred->euid != pcred->uid &&
91411diff --git a/kernel/gcov/base.c b/kernel/gcov/base.c
91412index 9b22d03..6295b62 100644
91413--- a/kernel/gcov/base.c
91414+++ b/kernel/gcov/base.c
91415@@ -102,11 +102,6 @@ void gcov_enable_events(void)
91416 }
91417
91418 #ifdef CONFIG_MODULES
91419-static inline int within(void *addr, void *start, unsigned long size)
91420-{
91421- return ((addr >= start) && (addr < start + size));
91422-}
91423-
91424 /* Update list and generate events when modules are unloaded. */
91425 static int gcov_module_notifier(struct notifier_block *nb, unsigned long event,
91426 void *data)
91427@@ -121,7 +116,7 @@ static int gcov_module_notifier(struct notifier_block *nb, unsigned long event,
91428 prev = NULL;
91429 /* Remove entries located in module from linked list. */
91430 for (info = gcov_info_head; info; info = info->next) {
91431- if (within(info, mod->module_core, mod->core_size)) {
91432+ if (within_module_core_rw((unsigned long)info, mod)) {
91433 if (prev)
91434 prev->next = info->next;
91435 else
91436diff --git a/kernel/hrtimer.c b/kernel/hrtimer.c
91437index a6e9d00..a0da4f9 100644
91438--- a/kernel/hrtimer.c
91439+++ b/kernel/hrtimer.c
91440@@ -1391,7 +1391,7 @@ void hrtimer_peek_ahead_timers(void)
91441 local_irq_restore(flags);
91442 }
91443
91444-static void run_hrtimer_softirq(struct softirq_action *h)
91445+static void run_hrtimer_softirq(void)
91446 {
91447 hrtimer_peek_ahead_timers();
91448 }
91449diff --git a/kernel/kallsyms.c b/kernel/kallsyms.c
91450index 8b6b8b6..6bc87df 100644
91451--- a/kernel/kallsyms.c
91452+++ b/kernel/kallsyms.c
91453@@ -11,6 +11,9 @@
91454 * Changed the compression method from stem compression to "table lookup"
91455 * compression (see scripts/kallsyms.c for a more complete description)
91456 */
91457+#ifdef CONFIG_GRKERNSEC_HIDESYM
91458+#define __INCLUDED_BY_HIDESYM 1
91459+#endif
91460 #include <linux/kallsyms.h>
91461 #include <linux/module.h>
91462 #include <linux/init.h>
91463@@ -51,12 +54,33 @@ extern const unsigned long kallsyms_markers[] __attribute__((weak));
91464
91465 static inline int is_kernel_inittext(unsigned long addr)
91466 {
91467+ if (system_state != SYSTEM_BOOTING)
91468+ return 0;
91469+
91470 if (addr >= (unsigned long)_sinittext
91471 && addr <= (unsigned long)_einittext)
91472 return 1;
91473 return 0;
91474 }
91475
91476+#if defined(CONFIG_X86_32) && defined(CONFIG_PAX_KERNEXEC)
91477+#ifdef CONFIG_MODULES
91478+static inline int is_module_text(unsigned long addr)
91479+{
91480+ if ((unsigned long)MODULES_EXEC_VADDR <= addr && addr <= (unsigned long)MODULES_EXEC_END)
91481+ return 1;
91482+
91483+ addr = ktla_ktva(addr);
91484+ return (unsigned long)MODULES_EXEC_VADDR <= addr && addr <= (unsigned long)MODULES_EXEC_END;
91485+}
91486+#else
91487+static inline int is_module_text(unsigned long addr)
91488+{
91489+ return 0;
91490+}
91491+#endif
91492+#endif
91493+
91494 static inline int is_kernel_text(unsigned long addr)
91495 {
91496 if ((addr >= (unsigned long)_stext && addr <= (unsigned long)_etext) ||
91497@@ -67,13 +91,28 @@ static inline int is_kernel_text(unsigned long addr)
91498
91499 static inline int is_kernel(unsigned long addr)
91500 {
91501+
91502+#if defined(CONFIG_X86_32) && defined(CONFIG_PAX_KERNEXEC)
91503+ if (is_kernel_text(addr) || is_kernel_inittext(addr))
91504+ return 1;
91505+
91506+ if (ktla_ktva((unsigned long)_text) <= addr && addr < (unsigned long)_end)
91507+#else
91508 if (addr >= (unsigned long)_stext && addr <= (unsigned long)_end)
91509+#endif
91510+
91511 return 1;
91512 return in_gate_area_no_task(addr);
91513 }
91514
91515 static int is_ksym_addr(unsigned long addr)
91516 {
91517+
91518+#if defined(CONFIG_X86_32) && defined(CONFIG_PAX_KERNEXEC)
91519+ if (is_module_text(addr))
91520+ return 0;
91521+#endif
91522+
91523 if (all_var)
91524 return is_kernel(addr);
91525
91526@@ -413,7 +452,6 @@ static unsigned long get_ksymbol_core(struct kallsym_iter *iter)
91527
91528 static void reset_iter(struct kallsym_iter *iter, loff_t new_pos)
91529 {
91530- iter->name[0] = '\0';
91531 iter->nameoff = get_symbol_offset(new_pos);
91532 iter->pos = new_pos;
91533 }
91534@@ -461,6 +499,11 @@ static int s_show(struct seq_file *m, void *p)
91535 {
91536 struct kallsym_iter *iter = m->private;
91537
91538+#ifdef CONFIG_GRKERNSEC_HIDESYM
91539+ if (current_uid())
91540+ return 0;
91541+#endif
91542+
91543 /* Some debugging symbols have no name. Ignore them. */
91544 if (!iter->name[0])
91545 return 0;
91546@@ -501,7 +544,7 @@ static int kallsyms_open(struct inode *inode, struct file *file)
91547 struct kallsym_iter *iter;
91548 int ret;
91549
91550- iter = kmalloc(sizeof(*iter), GFP_KERNEL);
91551+ iter = kzalloc(sizeof(*iter), GFP_KERNEL);
91552 if (!iter)
91553 return -ENOMEM;
91554 reset_iter(iter, 0);
91555diff --git a/kernel/kexec.c b/kernel/kexec.c
91556index f336e21..9c1c20b 100644
91557--- a/kernel/kexec.c
91558+++ b/kernel/kexec.c
91559@@ -1028,7 +1028,8 @@ asmlinkage long compat_sys_kexec_load(unsigned long entry,
91560 unsigned long flags)
91561 {
91562 struct compat_kexec_segment in;
91563- struct kexec_segment out, __user *ksegments;
91564+ struct kexec_segment out;
91565+ struct kexec_segment __user *ksegments;
91566 unsigned long i, result;
91567
91568 /* Don't allow clients that don't understand the native
91569diff --git a/kernel/kgdb.c b/kernel/kgdb.c
91570index 53dae4b..9ba3743 100644
91571--- a/kernel/kgdb.c
91572+++ b/kernel/kgdb.c
91573@@ -86,7 +86,7 @@ static int kgdb_io_module_registered;
91574 /* Guard for recursive entry */
91575 static int exception_level;
91576
91577-static struct kgdb_io *kgdb_io_ops;
91578+static const struct kgdb_io *kgdb_io_ops;
91579 static DEFINE_SPINLOCK(kgdb_registration_lock);
91580
91581 /* kgdb console driver is loaded */
91582@@ -123,7 +123,7 @@ atomic_t kgdb_active = ATOMIC_INIT(-1);
91583 */
91584 static atomic_t passive_cpu_wait[NR_CPUS];
91585 static atomic_t cpu_in_kgdb[NR_CPUS];
91586-atomic_t kgdb_setting_breakpoint;
91587+atomic_unchecked_t kgdb_setting_breakpoint;
91588
91589 struct task_struct *kgdb_usethread;
91590 struct task_struct *kgdb_contthread;
91591@@ -140,7 +140,7 @@ static unsigned long gdb_regs[(NUMREGBYTES +
91592 sizeof(unsigned long)];
91593
91594 /* to keep track of the CPU which is doing the single stepping*/
91595-atomic_t kgdb_cpu_doing_single_step = ATOMIC_INIT(-1);
91596+atomic_unchecked_t kgdb_cpu_doing_single_step = ATOMIC_INIT(-1);
91597
91598 /*
91599 * If you are debugging a problem where roundup (the collection of
91600@@ -815,7 +815,7 @@ static int kgdb_io_ready(int print_wait)
91601 return 0;
91602 if (kgdb_connected)
91603 return 1;
91604- if (atomic_read(&kgdb_setting_breakpoint))
91605+ if (atomic_read_unchecked(&kgdb_setting_breakpoint))
91606 return 1;
91607 if (print_wait)
91608 printk(KERN_CRIT "KGDB: Waiting for remote debugger\n");
91609@@ -1426,8 +1426,8 @@ acquirelock:
91610 * instance of the exception handler wanted to come into the
91611 * debugger on a different CPU via a single step
91612 */
91613- if (atomic_read(&kgdb_cpu_doing_single_step) != -1 &&
91614- atomic_read(&kgdb_cpu_doing_single_step) != cpu) {
91615+ if (atomic_read_unchecked(&kgdb_cpu_doing_single_step) != -1 &&
91616+ atomic_read_unchecked(&kgdb_cpu_doing_single_step) != cpu) {
91617
91618 atomic_set(&kgdb_active, -1);
91619 touch_softlockup_watchdog();
91620@@ -1634,7 +1634,7 @@ static void kgdb_initial_breakpoint(void)
91621 *
91622 * Register it with the KGDB core.
91623 */
91624-int kgdb_register_io_module(struct kgdb_io *new_kgdb_io_ops)
91625+int kgdb_register_io_module(const struct kgdb_io *new_kgdb_io_ops)
91626 {
91627 int err;
91628
91629@@ -1679,7 +1679,7 @@ EXPORT_SYMBOL_GPL(kgdb_register_io_module);
91630 *
91631 * Unregister it with the KGDB core.
91632 */
91633-void kgdb_unregister_io_module(struct kgdb_io *old_kgdb_io_ops)
91634+void kgdb_unregister_io_module(const struct kgdb_io *old_kgdb_io_ops)
91635 {
91636 BUG_ON(kgdb_connected);
91637
91638@@ -1712,11 +1712,11 @@ EXPORT_SYMBOL_GPL(kgdb_unregister_io_module);
91639 */
91640 void kgdb_breakpoint(void)
91641 {
91642- atomic_set(&kgdb_setting_breakpoint, 1);
91643+ atomic_set_unchecked(&kgdb_setting_breakpoint, 1);
91644 wmb(); /* Sync point before breakpoint */
91645 arch_kgdb_breakpoint();
91646 wmb(); /* Sync point after breakpoint */
91647- atomic_set(&kgdb_setting_breakpoint, 0);
91648+ atomic_set_unchecked(&kgdb_setting_breakpoint, 0);
91649 }
91650 EXPORT_SYMBOL_GPL(kgdb_breakpoint);
91651
91652diff --git a/kernel/kmod.c b/kernel/kmod.c
91653index a061472..40884b6 100644
91654--- a/kernel/kmod.c
91655+++ b/kernel/kmod.c
91656@@ -68,13 +68,12 @@ char modprobe_path[KMOD_PATH_LEN] = "/sbin/modprobe";
91657 * If module auto-loading support is disabled then this function
91658 * becomes a no-operation.
91659 */
91660-int __request_module(bool wait, const char *fmt, ...)
91661+static int ____request_module(bool wait, char *module_param, const char *fmt, va_list ap)
91662 {
91663- va_list args;
91664 char module_name[MODULE_NAME_LEN];
91665 unsigned int max_modprobes;
91666 int ret;
91667- char *argv[] = { modprobe_path, "-q", "--", module_name, NULL };
91668+ char *argv[] = { modprobe_path, "-q", "--", module_name, module_param, NULL };
91669 static char *envp[] = { "HOME=/",
91670 "TERM=linux",
91671 "PATH=/sbin:/usr/sbin:/bin:/usr/bin",
91672@@ -87,12 +86,24 @@ int __request_module(bool wait, const char *fmt, ...)
91673 if (ret)
91674 return ret;
91675
91676- va_start(args, fmt);
91677- ret = vsnprintf(module_name, MODULE_NAME_LEN, fmt, args);
91678- va_end(args);
91679+ ret = vsnprintf(module_name, MODULE_NAME_LEN, fmt, ap);
91680 if (ret >= MODULE_NAME_LEN)
91681 return -ENAMETOOLONG;
91682
91683+#ifdef CONFIG_GRKERNSEC_MODHARDEN
91684+ if (!current_uid()) {
91685+ /* hack to workaround consolekit/udisks stupidity */
91686+ read_lock(&tasklist_lock);
91687+ if (!strcmp(current->comm, "mount") &&
91688+ current->real_parent && !strncmp(current->real_parent->comm, "udisk", 5)) {
91689+ read_unlock(&tasklist_lock);
91690+ printk(KERN_ALERT "grsec: denied attempt to auto-load fs module %.64s by udisks\n", module_name);
91691+ return -EPERM;
91692+ }
91693+ read_unlock(&tasklist_lock);
91694+ }
91695+#endif
91696+
91697 /* If modprobe needs a service that is in a module, we get a recursive
91698 * loop. Limit the number of running kmod threads to max_threads/2 or
91699 * MAX_KMOD_CONCURRENT, whichever is the smaller. A cleaner method
91700@@ -126,6 +137,48 @@ int __request_module(bool wait, const char *fmt, ...)
91701 atomic_dec(&kmod_concurrent);
91702 return ret;
91703 }
91704+
91705+int ___request_module(bool wait, char *module_param, const char *fmt, ...)
91706+{
91707+ va_list args;
91708+ int ret;
91709+
91710+ va_start(args, fmt);
91711+ ret = ____request_module(wait, module_param, fmt, args);
91712+ va_end(args);
91713+
91714+ return ret;
91715+}
91716+
91717+int __request_module(bool wait, const char *fmt, ...)
91718+{
91719+ va_list args;
91720+ int ret;
91721+
91722+#ifdef CONFIG_GRKERNSEC_MODHARDEN
91723+ if (current_uid()) {
91724+ char module_param[MODULE_NAME_LEN];
91725+
91726+ memset(module_param, 0, sizeof(module_param));
91727+
91728+ snprintf(module_param, sizeof(module_param) - 1, "grsec_modharden_normal%u_", current_uid());
91729+
91730+ va_start(args, fmt);
91731+ ret = ____request_module(wait, module_param, fmt, args);
91732+ va_end(args);
91733+
91734+ return ret;
91735+ }
91736+#endif
91737+
91738+ va_start(args, fmt);
91739+ ret = ____request_module(wait, NULL, fmt, args);
91740+ va_end(args);
91741+
91742+ return ret;
91743+}
91744+
91745+
91746 EXPORT_SYMBOL(__request_module);
91747 #endif /* CONFIG_MODULES */
91748
91749@@ -231,7 +284,7 @@ static int wait_for_helper(void *data)
91750 *
91751 * Thus the __user pointer cast is valid here.
91752 */
91753- sys_wait4(pid, (int __user *)&ret, 0, NULL);
91754+ sys_wait4(pid, (int __force_user *)&ret, 0, NULL);
91755
91756 /*
91757 * If ret is 0, either ____call_usermodehelper failed and the
91758diff --git a/kernel/kprobes.c b/kernel/kprobes.c
91759index 176d825..77fa8ea 100644
91760--- a/kernel/kprobes.c
91761+++ b/kernel/kprobes.c
91762@@ -183,7 +183,7 @@ static kprobe_opcode_t __kprobes *__get_insn_slot(void)
91763 * kernel image and loaded module images reside. This is required
91764 * so x86_64 can correctly handle the %rip-relative fixups.
91765 */
91766- kip->insns = module_alloc(PAGE_SIZE);
91767+ kip->insns = module_alloc_exec(PAGE_SIZE);
91768 if (!kip->insns) {
91769 kfree(kip);
91770 return NULL;
91771@@ -220,7 +220,7 @@ static int __kprobes collect_one_slot(struct kprobe_insn_page *kip, int idx)
91772 */
91773 if (!list_is_singular(&kprobe_insn_pages)) {
91774 list_del(&kip->list);
91775- module_free(NULL, kip->insns);
91776+ module_free_exec(NULL, kip->insns);
91777 kfree(kip);
91778 }
91779 return 1;
91780@@ -1189,7 +1189,7 @@ static int __init init_kprobes(void)
91781 {
91782 int i, err = 0;
91783 unsigned long offset = 0, size = 0;
91784- char *modname, namebuf[128];
91785+ char *modname, namebuf[KSYM_NAME_LEN];
91786 const char *symbol_name;
91787 void *addr;
91788 struct kprobe_blackpoint *kb;
91789@@ -1304,7 +1304,7 @@ static int __kprobes show_kprobe_addr(struct seq_file *pi, void *v)
91790 const char *sym = NULL;
91791 unsigned int i = *(loff_t *) v;
91792 unsigned long offset = 0;
91793- char *modname, namebuf[128];
91794+ char *modname, namebuf[KSYM_NAME_LEN];
91795
91796 head = &kprobe_table[i];
91797 preempt_disable();
91798diff --git a/kernel/lockdep.c b/kernel/lockdep.c
91799index d86fe89..d12fc66 100644
91800--- a/kernel/lockdep.c
91801+++ b/kernel/lockdep.c
91802@@ -421,20 +421,20 @@ static struct stack_trace lockdep_init_trace = {
91803 /*
91804 * Various lockdep statistics:
91805 */
91806-atomic_t chain_lookup_hits;
91807-atomic_t chain_lookup_misses;
91808-atomic_t hardirqs_on_events;
91809-atomic_t hardirqs_off_events;
91810-atomic_t redundant_hardirqs_on;
91811-atomic_t redundant_hardirqs_off;
91812-atomic_t softirqs_on_events;
91813-atomic_t softirqs_off_events;
91814-atomic_t redundant_softirqs_on;
91815-atomic_t redundant_softirqs_off;
91816-atomic_t nr_unused_locks;
91817-atomic_t nr_cyclic_checks;
91818-atomic_t nr_find_usage_forwards_checks;
91819-atomic_t nr_find_usage_backwards_checks;
91820+atomic_unchecked_t chain_lookup_hits;
91821+atomic_unchecked_t chain_lookup_misses;
91822+atomic_unchecked_t hardirqs_on_events;
91823+atomic_unchecked_t hardirqs_off_events;
91824+atomic_unchecked_t redundant_hardirqs_on;
91825+atomic_unchecked_t redundant_hardirqs_off;
91826+atomic_unchecked_t softirqs_on_events;
91827+atomic_unchecked_t softirqs_off_events;
91828+atomic_unchecked_t redundant_softirqs_on;
91829+atomic_unchecked_t redundant_softirqs_off;
91830+atomic_unchecked_t nr_unused_locks;
91831+atomic_unchecked_t nr_cyclic_checks;
91832+atomic_unchecked_t nr_find_usage_forwards_checks;
91833+atomic_unchecked_t nr_find_usage_backwards_checks;
91834 #endif
91835
91836 /*
91837@@ -577,6 +577,10 @@ static int static_obj(void *obj)
91838 int i;
91839 #endif
91840
91841+#ifdef CONFIG_PAX_KERNEXEC
91842+ start = ktla_ktva(start);
91843+#endif
91844+
91845 /*
91846 * static variable?
91847 */
91848@@ -592,8 +596,7 @@ static int static_obj(void *obj)
91849 */
91850 for_each_possible_cpu(i) {
91851 start = (unsigned long) &__per_cpu_start + per_cpu_offset(i);
91852- end = (unsigned long) &__per_cpu_start + PERCPU_ENOUGH_ROOM
91853- + per_cpu_offset(i);
91854+ end = start + PERCPU_ENOUGH_ROOM;
91855
91856 if ((addr >= start) && (addr < end))
91857 return 1;
91858@@ -710,6 +713,7 @@ register_lock_class(struct lockdep_map *lock, unsigned int subclass, int force)
91859 if (!static_obj(lock->key)) {
91860 debug_locks_off();
91861 printk("INFO: trying to register non-static key.\n");
91862+ printk("lock:%pS key:%pS.\n", lock, lock->key);
91863 printk("the code is fine but needs lockdep annotation.\n");
91864 printk("turning off the locking correctness validator.\n");
91865 dump_stack();
91866@@ -2751,7 +2755,7 @@ static int __lock_acquire(struct lockdep_map *lock, unsigned int subclass,
91867 if (!class)
91868 return 0;
91869 }
91870- debug_atomic_inc((atomic_t *)&class->ops);
91871+ debug_atomic_inc((atomic_unchecked_t *)&class->ops);
91872 if (very_verbose(class)) {
91873 printk("\nacquire class [%p] %s", class->key, class->name);
91874 if (class->name_version > 1)
91875diff --git a/kernel/lockdep_internals.h b/kernel/lockdep_internals.h
91876index a2ee95a..092f0f2 100644
91877--- a/kernel/lockdep_internals.h
91878+++ b/kernel/lockdep_internals.h
91879@@ -113,26 +113,26 @@ lockdep_count_backward_deps(struct lock_class *class)
91880 /*
91881 * Various lockdep statistics:
91882 */
91883-extern atomic_t chain_lookup_hits;
91884-extern atomic_t chain_lookup_misses;
91885-extern atomic_t hardirqs_on_events;
91886-extern atomic_t hardirqs_off_events;
91887-extern atomic_t redundant_hardirqs_on;
91888-extern atomic_t redundant_hardirqs_off;
91889-extern atomic_t softirqs_on_events;
91890-extern atomic_t softirqs_off_events;
91891-extern atomic_t redundant_softirqs_on;
91892-extern atomic_t redundant_softirqs_off;
91893-extern atomic_t nr_unused_locks;
91894-extern atomic_t nr_cyclic_checks;
91895-extern atomic_t nr_cyclic_check_recursions;
91896-extern atomic_t nr_find_usage_forwards_checks;
91897-extern atomic_t nr_find_usage_forwards_recursions;
91898-extern atomic_t nr_find_usage_backwards_checks;
91899-extern atomic_t nr_find_usage_backwards_recursions;
91900-# define debug_atomic_inc(ptr) atomic_inc(ptr)
91901-# define debug_atomic_dec(ptr) atomic_dec(ptr)
91902-# define debug_atomic_read(ptr) atomic_read(ptr)
91903+extern atomic_unchecked_t chain_lookup_hits;
91904+extern atomic_unchecked_t chain_lookup_misses;
91905+extern atomic_unchecked_t hardirqs_on_events;
91906+extern atomic_unchecked_t hardirqs_off_events;
91907+extern atomic_unchecked_t redundant_hardirqs_on;
91908+extern atomic_unchecked_t redundant_hardirqs_off;
91909+extern atomic_unchecked_t softirqs_on_events;
91910+extern atomic_unchecked_t softirqs_off_events;
91911+extern atomic_unchecked_t redundant_softirqs_on;
91912+extern atomic_unchecked_t redundant_softirqs_off;
91913+extern atomic_unchecked_t nr_unused_locks;
91914+extern atomic_unchecked_t nr_cyclic_checks;
91915+extern atomic_unchecked_t nr_cyclic_check_recursions;
91916+extern atomic_unchecked_t nr_find_usage_forwards_checks;
91917+extern atomic_unchecked_t nr_find_usage_forwards_recursions;
91918+extern atomic_unchecked_t nr_find_usage_backwards_checks;
91919+extern atomic_unchecked_t nr_find_usage_backwards_recursions;
91920+# define debug_atomic_inc(ptr) atomic_inc_unchecked(ptr)
91921+# define debug_atomic_dec(ptr) atomic_dec_unchecked(ptr)
91922+# define debug_atomic_read(ptr) atomic_read_unchecked(ptr)
91923 #else
91924 # define debug_atomic_inc(ptr) do { } while (0)
91925 # define debug_atomic_dec(ptr) do { } while (0)
91926diff --git a/kernel/lockdep_proc.c b/kernel/lockdep_proc.c
91927index d4aba4f..02a353f 100644
91928--- a/kernel/lockdep_proc.c
91929+++ b/kernel/lockdep_proc.c
91930@@ -39,7 +39,7 @@ static void l_stop(struct seq_file *m, void *v)
91931
91932 static void print_name(struct seq_file *m, struct lock_class *class)
91933 {
91934- char str[128];
91935+ char str[KSYM_NAME_LEN];
91936 const char *name = class->name;
91937
91938 if (!name) {
91939diff --git a/kernel/module.c b/kernel/module.c
91940index 4b270e6..2efdb65 100644
91941--- a/kernel/module.c
91942+++ b/kernel/module.c
91943@@ -55,6 +55,7 @@
91944 #include <linux/async.h>
91945 #include <linux/percpu.h>
91946 #include <linux/kmemleak.h>
91947+#include <linux/grsecurity.h>
91948
91949 #define CREATE_TRACE_POINTS
91950 #include <trace/events/module.h>
91951@@ -89,7 +90,8 @@ static DECLARE_WAIT_QUEUE_HEAD(module_wq);
91952 static BLOCKING_NOTIFIER_HEAD(module_notify_list);
91953
91954 /* Bounds of module allocation, for speeding __module_address */
91955-static unsigned long module_addr_min = -1UL, module_addr_max = 0;
91956+static unsigned long module_addr_min_rw = -1UL, module_addr_max_rw = 0;
91957+static unsigned long module_addr_min_rx = -1UL, module_addr_max_rx = 0;
91958
91959 int register_module_notifier(struct notifier_block * nb)
91960 {
91961@@ -245,7 +247,7 @@ bool each_symbol(bool (*fn)(const struct symsearch *arr, struct module *owner,
91962 return true;
91963
91964 list_for_each_entry_rcu(mod, &modules, list) {
91965- struct symsearch arr[] = {
91966+ struct symsearch modarr[] = {
91967 { mod->syms, mod->syms + mod->num_syms, mod->crcs,
91968 NOT_GPL_ONLY, false },
91969 { mod->gpl_syms, mod->gpl_syms + mod->num_gpl_syms,
91970@@ -267,7 +269,7 @@ bool each_symbol(bool (*fn)(const struct symsearch *arr, struct module *owner,
91971 #endif
91972 };
91973
91974- if (each_symbol_in_section(arr, ARRAY_SIZE(arr), mod, fn, data))
91975+ if (each_symbol_in_section(modarr, ARRAY_SIZE(modarr), mod, fn, data))
91976 return true;
91977 }
91978 return false;
91979@@ -442,7 +444,7 @@ static void *percpu_modalloc(unsigned long size, unsigned long align,
91980 void *ptr;
91981 int cpu;
91982
91983- if (align > PAGE_SIZE) {
91984+ if (align-1 >= PAGE_SIZE) {
91985 printk(KERN_WARNING "%s: per-cpu alignment %li > %li\n",
91986 name, align, PAGE_SIZE);
91987 align = PAGE_SIZE;
91988@@ -1158,7 +1160,7 @@ static const struct kernel_symbol *resolve_symbol(Elf_Shdr *sechdrs,
91989 * /sys/module/foo/sections stuff
91990 * J. Corbet <corbet@lwn.net>
91991 */
91992-#if defined(CONFIG_KALLSYMS) && defined(CONFIG_SYSFS)
91993+#if defined(CONFIG_KALLSYMS) && defined(CONFIG_SYSFS) && !defined(CONFIG_GRKERNSEC_HIDESYM)
91994
91995 static inline bool sect_empty(const Elf_Shdr *sect)
91996 {
91997@@ -1545,7 +1547,8 @@ static void free_module(struct module *mod)
91998 destroy_params(mod->kp, mod->num_kp);
91999
92000 /* This may be NULL, but that's OK */
92001- module_free(mod, mod->module_init);
92002+ module_free(mod, mod->module_init_rw);
92003+ module_free_exec(mod, mod->module_init_rx);
92004 kfree(mod->args);
92005 if (mod->percpu)
92006 percpu_modfree(mod->percpu);
92007@@ -1554,10 +1557,12 @@ static void free_module(struct module *mod)
92008 percpu_modfree(mod->refptr);
92009 #endif
92010 /* Free lock-classes: */
92011- lockdep_free_key_range(mod->module_core, mod->core_size);
92012+ lockdep_free_key_range(mod->module_core_rx, mod->core_size_rx);
92013+ lockdep_free_key_range(mod->module_core_rw, mod->core_size_rw);
92014
92015 /* Finally, free the core (containing the module structure) */
92016- module_free(mod, mod->module_core);
92017+ module_free_exec(mod, mod->module_core_rx);
92018+ module_free(mod, mod->module_core_rw);
92019
92020 #ifdef CONFIG_MPU
92021 update_protections(current->mm);
92022@@ -1628,8 +1633,32 @@ static int simplify_symbols(Elf_Shdr *sechdrs,
92023 unsigned int i, n = sechdrs[symindex].sh_size / sizeof(Elf_Sym);
92024 int ret = 0;
92025 const struct kernel_symbol *ksym;
92026+#ifdef CONFIG_GRKERNSEC_MODHARDEN
92027+ int is_fs_load = 0;
92028+ int register_filesystem_found = 0;
92029+ char *p;
92030+
92031+ p = strstr(mod->args, "grsec_modharden_fs");
92032+
92033+ if (p) {
92034+ char *endptr = p + strlen("grsec_modharden_fs");
92035+ /* copy \0 as well */
92036+ memmove(p, endptr, strlen(mod->args) - (unsigned int)(endptr - mod->args) + 1);
92037+ is_fs_load = 1;
92038+ }
92039+#endif
92040+
92041
92042 for (i = 1; i < n; i++) {
92043+#ifdef CONFIG_GRKERNSEC_MODHARDEN
92044+ const char *name = strtab + sym[i].st_name;
92045+
92046+ /* it's a real shame this will never get ripped and copied
92047+ upstream! ;(
92048+ */
92049+ if (is_fs_load && !strcmp(name, "register_filesystem"))
92050+ register_filesystem_found = 1;
92051+#endif
92052 switch (sym[i].st_shndx) {
92053 case SHN_COMMON:
92054 /* We compiled with -fno-common. These are not
92055@@ -1651,7 +1680,9 @@ static int simplify_symbols(Elf_Shdr *sechdrs,
92056 strtab + sym[i].st_name, mod);
92057 /* Ok if resolved. */
92058 if (ksym) {
92059+ pax_open_kernel();
92060 sym[i].st_value = ksym->value;
92061+ pax_close_kernel();
92062 break;
92063 }
92064
92065@@ -1670,11 +1701,20 @@ static int simplify_symbols(Elf_Shdr *sechdrs,
92066 secbase = (unsigned long)mod->percpu;
92067 else
92068 secbase = sechdrs[sym[i].st_shndx].sh_addr;
92069+ pax_open_kernel();
92070 sym[i].st_value += secbase;
92071+ pax_close_kernel();
92072 break;
92073 }
92074 }
92075
92076+#ifdef CONFIG_GRKERNSEC_MODHARDEN
92077+ if (is_fs_load && !register_filesystem_found) {
92078+ printk(KERN_ALERT "grsec: Denied attempt to load non-fs module %.64s through mount\n", mod->name);
92079+ ret = -EPERM;
92080+ }
92081+#endif
92082+
92083 return ret;
92084 }
92085
92086@@ -1731,11 +1771,12 @@ static void layout_sections(struct module *mod,
92087 || s->sh_entsize != ~0UL
92088 || strstarts(secstrings + s->sh_name, ".init"))
92089 continue;
92090- s->sh_entsize = get_offset(mod, &mod->core_size, s, i);
92091+ if ((s->sh_flags & SHF_WRITE) || !(s->sh_flags & SHF_ALLOC))
92092+ s->sh_entsize = get_offset(mod, &mod->core_size_rw, s, i);
92093+ else
92094+ s->sh_entsize = get_offset(mod, &mod->core_size_rx, s, i);
92095 DEBUGP("\t%s\n", secstrings + s->sh_name);
92096 }
92097- if (m == 0)
92098- mod->core_text_size = mod->core_size;
92099 }
92100
92101 DEBUGP("Init section allocation order:\n");
92102@@ -1748,12 +1789,13 @@ static void layout_sections(struct module *mod,
92103 || s->sh_entsize != ~0UL
92104 || !strstarts(secstrings + s->sh_name, ".init"))
92105 continue;
92106- s->sh_entsize = (get_offset(mod, &mod->init_size, s, i)
92107- | INIT_OFFSET_MASK);
92108+ if ((s->sh_flags & SHF_WRITE) || !(s->sh_flags & SHF_ALLOC))
92109+ s->sh_entsize = get_offset(mod, &mod->init_size_rw, s, i);
92110+ else
92111+ s->sh_entsize = get_offset(mod, &mod->init_size_rx, s, i);
92112+ s->sh_entsize |= INIT_OFFSET_MASK;
92113 DEBUGP("\t%s\n", secstrings + s->sh_name);
92114 }
92115- if (m == 0)
92116- mod->init_text_size = mod->init_size;
92117 }
92118 }
92119
92120@@ -1857,9 +1899,8 @@ static int is_exported(const char *name, unsigned long value,
92121
92122 /* As per nm */
92123 static char elf_type(const Elf_Sym *sym,
92124- Elf_Shdr *sechdrs,
92125- const char *secstrings,
92126- struct module *mod)
92127+ const Elf_Shdr *sechdrs,
92128+ const char *secstrings)
92129 {
92130 if (ELF_ST_BIND(sym->st_info) == STB_WEAK) {
92131 if (ELF_ST_TYPE(sym->st_info) == STT_OBJECT)
92132@@ -1934,7 +1975,7 @@ static unsigned long layout_symtab(struct module *mod,
92133
92134 /* Put symbol section at end of init part of module. */
92135 symsect->sh_flags |= SHF_ALLOC;
92136- symsect->sh_entsize = get_offset(mod, &mod->init_size, symsect,
92137+ symsect->sh_entsize = get_offset(mod, &mod->init_size_rx, symsect,
92138 symindex) | INIT_OFFSET_MASK;
92139 DEBUGP("\t%s\n", secstrings + symsect->sh_name);
92140
92141@@ -1951,19 +1992,19 @@ static unsigned long layout_symtab(struct module *mod,
92142 }
92143
92144 /* Append room for core symbols at end of core part. */
92145- symoffs = ALIGN(mod->core_size, symsect->sh_addralign ?: 1);
92146- mod->core_size = symoffs + ndst * sizeof(Elf_Sym);
92147+ symoffs = ALIGN(mod->core_size_rx, symsect->sh_addralign ?: 1);
92148+ mod->core_size_rx = symoffs + ndst * sizeof(Elf_Sym);
92149
92150 /* Put string table section at end of init part of module. */
92151 strsect->sh_flags |= SHF_ALLOC;
92152- strsect->sh_entsize = get_offset(mod, &mod->init_size, strsect,
92153+ strsect->sh_entsize = get_offset(mod, &mod->init_size_rx, strsect,
92154 strindex) | INIT_OFFSET_MASK;
92155 DEBUGP("\t%s\n", secstrings + strsect->sh_name);
92156
92157 /* Append room for core symbols' strings at end of core part. */
92158- *pstroffs = mod->core_size;
92159+ *pstroffs = mod->core_size_rx;
92160 __set_bit(0, strmap);
92161- mod->core_size += bitmap_weight(strmap, strsect->sh_size);
92162+ mod->core_size_rx += bitmap_weight(strmap, strsect->sh_size);
92163
92164 return symoffs;
92165 }
92166@@ -1987,12 +2028,14 @@ static void add_kallsyms(struct module *mod,
92167 mod->num_symtab = sechdrs[symindex].sh_size / sizeof(Elf_Sym);
92168 mod->strtab = (void *)sechdrs[strindex].sh_addr;
92169
92170+ pax_open_kernel();
92171+
92172 /* Set types up while we still have access to sections. */
92173 for (i = 0; i < mod->num_symtab; i++)
92174 mod->symtab[i].st_info
92175- = elf_type(&mod->symtab[i], sechdrs, secstrings, mod);
92176+ = elf_type(&mod->symtab[i], sechdrs, secstrings);
92177
92178- mod->core_symtab = dst = mod->module_core + symoffs;
92179+ mod->core_symtab = dst = mod->module_core_rx + symoffs;
92180 src = mod->symtab;
92181 *dst = *src;
92182 for (ndst = i = 1; i < mod->num_symtab; ++i, ++src) {
92183@@ -2004,10 +2047,12 @@ static void add_kallsyms(struct module *mod,
92184 }
92185 mod->core_num_syms = ndst;
92186
92187- mod->core_strtab = s = mod->module_core + stroffs;
92188+ mod->core_strtab = s = mod->module_core_rx + stroffs;
92189 for (*s = 0, i = 1; i < sechdrs[strindex].sh_size; ++i)
92190 if (test_bit(i, strmap))
92191 *++s = mod->strtab[i];
92192+
92193+ pax_close_kernel();
92194 }
92195 #else
92196 static inline unsigned long layout_symtab(struct module *mod,
92197@@ -2044,16 +2089,30 @@ static void dynamic_debug_setup(struct _ddebug *debug, unsigned int num)
92198 #endif
92199 }
92200
92201-static void *module_alloc_update_bounds(unsigned long size)
92202+static void *module_alloc_update_bounds_rw(unsigned long size)
92203 {
92204 void *ret = module_alloc(size);
92205
92206 if (ret) {
92207 /* Update module bounds. */
92208- if ((unsigned long)ret < module_addr_min)
92209- module_addr_min = (unsigned long)ret;
92210- if ((unsigned long)ret + size > module_addr_max)
92211- module_addr_max = (unsigned long)ret + size;
92212+ if ((unsigned long)ret < module_addr_min_rw)
92213+ module_addr_min_rw = (unsigned long)ret;
92214+ if ((unsigned long)ret + size > module_addr_max_rw)
92215+ module_addr_max_rw = (unsigned long)ret + size;
92216+ }
92217+ return ret;
92218+}
92219+
92220+static void *module_alloc_update_bounds_rx(unsigned long size)
92221+{
92222+ void *ret = module_alloc_exec(size);
92223+
92224+ if (ret) {
92225+ /* Update module bounds. */
92226+ if ((unsigned long)ret < module_addr_min_rx)
92227+ module_addr_min_rx = (unsigned long)ret;
92228+ if ((unsigned long)ret + size > module_addr_max_rx)
92229+ module_addr_max_rx = (unsigned long)ret + size;
92230 }
92231 return ret;
92232 }
92233@@ -2065,8 +2124,8 @@ static void kmemleak_load_module(struct module *mod, Elf_Ehdr *hdr,
92234 unsigned int i;
92235
92236 /* only scan the sections containing data */
92237- kmemleak_scan_area(mod->module_core, (unsigned long)mod -
92238- (unsigned long)mod->module_core,
92239+ kmemleak_scan_area(mod->module_core_rw, (unsigned long)mod -
92240+ (unsigned long)mod->module_core_rw,
92241 sizeof(struct module), GFP_KERNEL);
92242
92243 for (i = 1; i < hdr->e_shnum; i++) {
92244@@ -2076,8 +2135,8 @@ static void kmemleak_load_module(struct module *mod, Elf_Ehdr *hdr,
92245 && strncmp(secstrings + sechdrs[i].sh_name, ".bss", 4) != 0)
92246 continue;
92247
92248- kmemleak_scan_area(mod->module_core, sechdrs[i].sh_addr -
92249- (unsigned long)mod->module_core,
92250+ kmemleak_scan_area(mod->module_core_rw, sechdrs[i].sh_addr -
92251+ (unsigned long)mod->module_core_rw,
92252 sechdrs[i].sh_size, GFP_KERNEL);
92253 }
92254 }
92255@@ -2097,7 +2156,7 @@ static noinline struct module *load_module(void __user *umod,
92256 Elf_Ehdr *hdr;
92257 Elf_Shdr *sechdrs;
92258 char *secstrings, *args, *modmagic, *strtab = NULL;
92259- char *staging;
92260+ char *staging, *license;
92261 unsigned int i;
92262 unsigned int symindex = 0;
92263 unsigned int strindex = 0;
92264@@ -2195,6 +2254,14 @@ static noinline struct module *load_module(void __user *umod,
92265 goto free_hdr;
92266 }
92267
92268+ license = get_modinfo(sechdrs, infoindex, "license");
92269+#ifdef CONFIG_PAX_KERNEXEC_PLUGIN_METHOD_OR
92270+ if (!license || !license_is_gpl_compatible(license)) {
92271+ err = -ENOEXEC;
92272+ goto free_hdr;
92273+ }
92274+#endif
92275+
92276 modmagic = get_modinfo(sechdrs, infoindex, "vermagic");
92277 /* This is allowed: modprobe --force will invalidate it. */
92278 if (!modmagic) {
92279@@ -2263,7 +2330,7 @@ static noinline struct module *load_module(void __user *umod,
92280 secstrings, &stroffs, strmap);
92281
92282 /* Do the allocs. */
92283- ptr = module_alloc_update_bounds(mod->core_size);
92284+ ptr = module_alloc_update_bounds_rw(mod->core_size_rw);
92285 /*
92286 * The pointer to this block is stored in the module structure
92287 * which is inside the block. Just mark it as not being a
92288@@ -2274,23 +2341,47 @@ static noinline struct module *load_module(void __user *umod,
92289 err = -ENOMEM;
92290 goto free_percpu;
92291 }
92292- memset(ptr, 0, mod->core_size);
92293- mod->module_core = ptr;
92294+ memset(ptr, 0, mod->core_size_rw);
92295+ mod->module_core_rw = ptr;
92296
92297- ptr = module_alloc_update_bounds(mod->init_size);
92298+ ptr = module_alloc_update_bounds_rw(mod->init_size_rw);
92299 /*
92300 * The pointer to this block is stored in the module structure
92301 * which is inside the block. This block doesn't need to be
92302 * scanned as it contains data and code that will be freed
92303 * after the module is initialized.
92304 */
92305- kmemleak_ignore(ptr);
92306- if (!ptr && mod->init_size) {
92307+ kmemleak_not_leak(ptr);
92308+ if (!ptr && mod->init_size_rw) {
92309 err = -ENOMEM;
92310- goto free_core;
92311+ goto free_core_rw;
92312 }
92313- memset(ptr, 0, mod->init_size);
92314- mod->module_init = ptr;
92315+ memset(ptr, 0, mod->init_size_rw);
92316+ mod->module_init_rw = ptr;
92317+
92318+ ptr = module_alloc_update_bounds_rx(mod->core_size_rx);
92319+ kmemleak_not_leak(ptr);
92320+ if (!ptr) {
92321+ err = -ENOMEM;
92322+ goto free_init_rw;
92323+ }
92324+
92325+ pax_open_kernel();
92326+ memset(ptr, 0, mod->core_size_rx);
92327+ pax_close_kernel();
92328+ mod->module_core_rx = ptr;
92329+
92330+ ptr = module_alloc_update_bounds_rx(mod->init_size_rx);
92331+ kmemleak_not_leak(ptr);
92332+ if (!ptr && mod->init_size_rx) {
92333+ err = -ENOMEM;
92334+ goto free_core_rx;
92335+ }
92336+
92337+ pax_open_kernel();
92338+ memset(ptr, 0, mod->init_size_rx);
92339+ pax_close_kernel();
92340+ mod->module_init_rx = ptr;
92341
92342 /* Transfer each section which specifies SHF_ALLOC */
92343 DEBUGP("final section addresses:\n");
92344@@ -2300,17 +2391,45 @@ static noinline struct module *load_module(void __user *umod,
92345 if (!(sechdrs[i].sh_flags & SHF_ALLOC))
92346 continue;
92347
92348- if (sechdrs[i].sh_entsize & INIT_OFFSET_MASK)
92349- dest = mod->module_init
92350- + (sechdrs[i].sh_entsize & ~INIT_OFFSET_MASK);
92351- else
92352- dest = mod->module_core + sechdrs[i].sh_entsize;
92353+ if (sechdrs[i].sh_entsize & INIT_OFFSET_MASK) {
92354+ if ((sechdrs[i].sh_flags & SHF_WRITE) || !(sechdrs[i].sh_flags & SHF_ALLOC))
92355+ dest = mod->module_init_rw
92356+ + (sechdrs[i].sh_entsize & ~INIT_OFFSET_MASK);
92357+ else
92358+ dest = mod->module_init_rx
92359+ + (sechdrs[i].sh_entsize & ~INIT_OFFSET_MASK);
92360+ } else {
92361+ if ((sechdrs[i].sh_flags & SHF_WRITE) || !(sechdrs[i].sh_flags & SHF_ALLOC))
92362+ dest = mod->module_core_rw + sechdrs[i].sh_entsize;
92363+ else
92364+ dest = mod->module_core_rx + sechdrs[i].sh_entsize;
92365+ }
92366
92367- if (sechdrs[i].sh_type != SHT_NOBITS)
92368- memcpy(dest, (void *)sechdrs[i].sh_addr,
92369- sechdrs[i].sh_size);
92370+ if (sechdrs[i].sh_type != SHT_NOBITS) {
92371+
92372+#ifdef CONFIG_PAX_KERNEXEC
92373+#ifdef CONFIG_X86_64
92374+ if ((sechdrs[i].sh_flags & SHF_WRITE) && (sechdrs[i].sh_flags & SHF_EXECINSTR))
92375+ set_memory_x((unsigned long)dest, (sechdrs[i].sh_size + PAGE_SIZE) >> PAGE_SHIFT);
92376+#endif
92377+ if (!(sechdrs[i].sh_flags & SHF_WRITE) && (sechdrs[i].sh_flags & SHF_ALLOC)) {
92378+ pax_open_kernel();
92379+ memcpy(dest, (void *)sechdrs[i].sh_addr, sechdrs[i].sh_size);
92380+ pax_close_kernel();
92381+ } else
92382+#endif
92383+
92384+ memcpy(dest, (void *)sechdrs[i].sh_addr, sechdrs[i].sh_size);
92385+ }
92386 /* Update sh_addr to point to copy in image. */
92387- sechdrs[i].sh_addr = (unsigned long)dest;
92388+
92389+#ifdef CONFIG_PAX_KERNEXEC
92390+ if (sechdrs[i].sh_flags & SHF_EXECINSTR)
92391+ sechdrs[i].sh_addr = ktva_ktla((unsigned long)dest);
92392+ else
92393+#endif
92394+
92395+ sechdrs[i].sh_addr = (unsigned long)dest;
92396 DEBUGP("\t0x%lx %s\n", sechdrs[i].sh_addr, secstrings + sechdrs[i].sh_name);
92397 }
92398 /* Module has been moved. */
92399@@ -2322,7 +2441,7 @@ static noinline struct module *load_module(void __user *umod,
92400 mod->name);
92401 if (!mod->refptr) {
92402 err = -ENOMEM;
92403- goto free_init;
92404+ goto free_init_rx;
92405 }
92406 #endif
92407 /* Now we've moved module, initialize linked lists, etc. */
92408@@ -2334,7 +2453,7 @@ static noinline struct module *load_module(void __user *umod,
92409 goto free_unload;
92410
92411 /* Set up license info based on the info section */
92412- set_license(mod, get_modinfo(sechdrs, infoindex, "license"));
92413+ set_license(mod, license);
92414
92415 /*
92416 * ndiswrapper is under GPL by itself, but loads proprietary modules.
92417@@ -2351,6 +2470,31 @@ static noinline struct module *load_module(void __user *umod,
92418 /* Set up MODINFO_ATTR fields */
92419 setup_modinfo(mod, sechdrs, infoindex);
92420
92421+ mod->args = args;
92422+
92423+#ifdef CONFIG_GRKERNSEC_MODHARDEN
92424+ {
92425+ char *p, *p2;
92426+
92427+ if (strstr(mod->args, "grsec_modharden_netdev")) {
92428+ printk(KERN_ALERT "grsec: denied auto-loading kernel module for a network device with CAP_SYS_MODULE (deprecated). Use CAP_NET_ADMIN and alias netdev-%.64s instead.", mod->name);
92429+ err = -EPERM;
92430+ goto cleanup;
92431+ } else if ((p = strstr(mod->args, "grsec_modharden_normal"))) {
92432+ p += strlen("grsec_modharden_normal");
92433+ p2 = strstr(p, "_");
92434+ if (p2) {
92435+ *p2 = '\0';
92436+ printk(KERN_ALERT "grsec: denied kernel module auto-load of %.64s by uid %.9s\n", mod->name, p);
92437+ *p2 = '_';
92438+ }
92439+ err = -EPERM;
92440+ goto cleanup;
92441+ }
92442+ }
92443+#endif
92444+
92445+
92446 /* Fix up syms, so that st_value is a pointer to location. */
92447 err = simplify_symbols(sechdrs, symindex, strtab, versindex, pcpuindex,
92448 mod);
92449@@ -2431,8 +2575,8 @@ static noinline struct module *load_module(void __user *umod,
92450
92451 /* Now do relocations. */
92452 for (i = 1; i < hdr->e_shnum; i++) {
92453- const char *strtab = (char *)sechdrs[strindex].sh_addr;
92454 unsigned int info = sechdrs[i].sh_info;
92455+ strtab = (char *)sechdrs[strindex].sh_addr;
92456
92457 /* Not a valid relocation section? */
92458 if (info >= hdr->e_shnum)
92459@@ -2493,16 +2637,15 @@ static noinline struct module *load_module(void __user *umod,
92460 * Do it before processing of module parameters, so the module
92461 * can provide parameter accessor functions of its own.
92462 */
92463- if (mod->module_init)
92464- flush_icache_range((unsigned long)mod->module_init,
92465- (unsigned long)mod->module_init
92466- + mod->init_size);
92467- flush_icache_range((unsigned long)mod->module_core,
92468- (unsigned long)mod->module_core + mod->core_size);
92469+ if (mod->module_init_rx)
92470+ flush_icache_range((unsigned long)mod->module_init_rx,
92471+ (unsigned long)mod->module_init_rx
92472+ + mod->init_size_rx);
92473+ flush_icache_range((unsigned long)mod->module_core_rx,
92474+ (unsigned long)mod->module_core_rx + mod->core_size_rx);
92475
92476 set_fs(old_fs);
92477
92478- mod->args = args;
92479 if (section_addr(hdr, sechdrs, secstrings, "__obsparm"))
92480 printk(KERN_WARNING "%s: Ignoring obsolete parameters\n",
92481 mod->name);
92482@@ -2546,12 +2689,16 @@ static noinline struct module *load_module(void __user *umod,
92483 free_unload:
92484 module_unload_free(mod);
92485 #if defined(CONFIG_MODULE_UNLOAD) && defined(CONFIG_SMP)
92486+ free_init_rx:
92487 percpu_modfree(mod->refptr);
92488- free_init:
92489 #endif
92490- module_free(mod, mod->module_init);
92491- free_core:
92492- module_free(mod, mod->module_core);
92493+ module_free_exec(mod, mod->module_init_rx);
92494+ free_core_rx:
92495+ module_free_exec(mod, mod->module_core_rx);
92496+ free_init_rw:
92497+ module_free(mod, mod->module_init_rw);
92498+ free_core_rw:
92499+ module_free(mod, mod->module_core_rw);
92500 /* mod will be freed with core. Don't access it beyond this line! */
92501 free_percpu:
92502 if (percpu)
92503@@ -2653,10 +2800,12 @@ SYSCALL_DEFINE3(init_module, void __user *, umod,
92504 mod->symtab = mod->core_symtab;
92505 mod->strtab = mod->core_strtab;
92506 #endif
92507- module_free(mod, mod->module_init);
92508- mod->module_init = NULL;
92509- mod->init_size = 0;
92510- mod->init_text_size = 0;
92511+ module_free(mod, mod->module_init_rw);
92512+ module_free_exec(mod, mod->module_init_rx);
92513+ mod->module_init_rw = NULL;
92514+ mod->module_init_rx = NULL;
92515+ mod->init_size_rw = 0;
92516+ mod->init_size_rx = 0;
92517 mutex_unlock(&module_mutex);
92518
92519 return 0;
92520@@ -2687,10 +2836,16 @@ static const char *get_ksymbol(struct module *mod,
92521 unsigned long nextval;
92522
92523 /* At worse, next value is at end of module */
92524- if (within_module_init(addr, mod))
92525- nextval = (unsigned long)mod->module_init+mod->init_text_size;
92526+ if (within_module_init_rx(addr, mod))
92527+ nextval = (unsigned long)mod->module_init_rx+mod->init_size_rx;
92528+ else if (within_module_init_rw(addr, mod))
92529+ nextval = (unsigned long)mod->module_init_rw+mod->init_size_rw;
92530+ else if (within_module_core_rx(addr, mod))
92531+ nextval = (unsigned long)mod->module_core_rx+mod->core_size_rx;
92532+ else if (within_module_core_rw(addr, mod))
92533+ nextval = (unsigned long)mod->module_core_rw+mod->core_size_rw;
92534 else
92535- nextval = (unsigned long)mod->module_core+mod->core_text_size;
92536+ return NULL;
92537
92538 /* Scan for closest preceeding symbol, and next symbol. (ELF
92539 starts real symbols at 1). */
92540@@ -2936,7 +3091,7 @@ static int m_show(struct seq_file *m, void *p)
92541 char buf[8];
92542
92543 seq_printf(m, "%s %u",
92544- mod->name, mod->init_size + mod->core_size);
92545+ mod->name, mod->init_size_rx + mod->init_size_rw + mod->core_size_rx + mod->core_size_rw);
92546 print_unload_info(m, mod);
92547
92548 /* Informative for users. */
92549@@ -2945,7 +3100,7 @@ static int m_show(struct seq_file *m, void *p)
92550 mod->state == MODULE_STATE_COMING ? "Loading":
92551 "Live");
92552 /* Used by oprofile and other similar tools. */
92553- seq_printf(m, " 0x%p", mod->module_core);
92554+ seq_printf(m, " 0x%p 0x%p", mod->module_core_rx, mod->module_core_rw);
92555
92556 /* Taints info */
92557 if (mod->taints)
92558@@ -2981,7 +3136,17 @@ static const struct file_operations proc_modules_operations = {
92559
92560 static int __init proc_modules_init(void)
92561 {
92562+#ifndef CONFIG_GRKERNSEC_HIDESYM
92563+#ifdef CONFIG_GRKERNSEC_PROC_USER
92564+ proc_create("modules", S_IRUSR, NULL, &proc_modules_operations);
92565+#elif defined(CONFIG_GRKERNSEC_PROC_USERGROUP)
92566+ proc_create("modules", S_IRUSR | S_IRGRP, NULL, &proc_modules_operations);
92567+#else
92568 proc_create("modules", 0, NULL, &proc_modules_operations);
92569+#endif
92570+#else
92571+ proc_create("modules", S_IRUSR, NULL, &proc_modules_operations);
92572+#endif
92573 return 0;
92574 }
92575 module_init(proc_modules_init);
92576@@ -3040,12 +3205,12 @@ struct module *__module_address(unsigned long addr)
92577 {
92578 struct module *mod;
92579
92580- if (addr < module_addr_min || addr > module_addr_max)
92581+ if ((addr < module_addr_min_rx || addr > module_addr_max_rx) &&
92582+ (addr < module_addr_min_rw || addr > module_addr_max_rw))
92583 return NULL;
92584
92585 list_for_each_entry_rcu(mod, &modules, list)
92586- if (within_module_core(addr, mod)
92587- || within_module_init(addr, mod))
92588+ if (within_module_init(addr, mod) || within_module_core(addr, mod))
92589 return mod;
92590 return NULL;
92591 }
92592@@ -3079,11 +3244,20 @@ bool is_module_text_address(unsigned long addr)
92593 */
92594 struct module *__module_text_address(unsigned long addr)
92595 {
92596- struct module *mod = __module_address(addr);
92597+ struct module *mod;
92598+
92599+#ifdef CONFIG_X86_32
92600+ addr = ktla_ktva(addr);
92601+#endif
92602+
92603+ if (addr < module_addr_min_rx || addr > module_addr_max_rx)
92604+ return NULL;
92605+
92606+ mod = __module_address(addr);
92607+
92608 if (mod) {
92609 /* Make sure it's within the text section. */
92610- if (!within(addr, mod->module_init, mod->init_text_size)
92611- && !within(addr, mod->module_core, mod->core_text_size))
92612+ if (!within_module_init_rx(addr, mod) && !within_module_core_rx(addr, mod))
92613 mod = NULL;
92614 }
92615 return mod;
92616diff --git a/kernel/mutex-debug.c b/kernel/mutex-debug.c
92617index ec815a9..fe46e99 100644
92618--- a/kernel/mutex-debug.c
92619+++ b/kernel/mutex-debug.c
92620@@ -49,21 +49,21 @@ void debug_mutex_free_waiter(struct mutex_waiter *waiter)
92621 }
92622
92623 void debug_mutex_add_waiter(struct mutex *lock, struct mutex_waiter *waiter,
92624- struct thread_info *ti)
92625+ struct task_struct *task)
92626 {
92627 SMP_DEBUG_LOCKS_WARN_ON(!spin_is_locked(&lock->wait_lock));
92628
92629 /* Mark the current thread as blocked on the lock: */
92630- ti->task->blocked_on = waiter;
92631+ task->blocked_on = waiter;
92632 }
92633
92634 void mutex_remove_waiter(struct mutex *lock, struct mutex_waiter *waiter,
92635- struct thread_info *ti)
92636+ struct task_struct *task)
92637 {
92638 DEBUG_LOCKS_WARN_ON(list_empty(&waiter->list));
92639- DEBUG_LOCKS_WARN_ON(waiter->task != ti->task);
92640- DEBUG_LOCKS_WARN_ON(ti->task->blocked_on != waiter);
92641- ti->task->blocked_on = NULL;
92642+ DEBUG_LOCKS_WARN_ON(waiter->task != task);
92643+ DEBUG_LOCKS_WARN_ON(task->blocked_on != waiter);
92644+ task->blocked_on = NULL;
92645
92646 list_del_init(&waiter->list);
92647 waiter->task = NULL;
92648@@ -75,7 +75,7 @@ void debug_mutex_unlock(struct mutex *lock)
92649 return;
92650
92651 DEBUG_LOCKS_WARN_ON(lock->magic != lock);
92652- DEBUG_LOCKS_WARN_ON(lock->owner != current_thread_info());
92653+ DEBUG_LOCKS_WARN_ON(lock->owner != current);
92654 DEBUG_LOCKS_WARN_ON(!lock->wait_list.prev && !lock->wait_list.next);
92655 mutex_clear_owner(lock);
92656 }
92657diff --git a/kernel/mutex-debug.h b/kernel/mutex-debug.h
92658index 6b2d735..372d3c4 100644
92659--- a/kernel/mutex-debug.h
92660+++ b/kernel/mutex-debug.h
92661@@ -20,16 +20,16 @@ extern void debug_mutex_wake_waiter(struct mutex *lock,
92662 extern void debug_mutex_free_waiter(struct mutex_waiter *waiter);
92663 extern void debug_mutex_add_waiter(struct mutex *lock,
92664 struct mutex_waiter *waiter,
92665- struct thread_info *ti);
92666+ struct task_struct *task);
92667 extern void mutex_remove_waiter(struct mutex *lock, struct mutex_waiter *waiter,
92668- struct thread_info *ti);
92669+ struct task_struct *task);
92670 extern void debug_mutex_unlock(struct mutex *lock);
92671 extern void debug_mutex_init(struct mutex *lock, const char *name,
92672 struct lock_class_key *key);
92673
92674 static inline void mutex_set_owner(struct mutex *lock)
92675 {
92676- lock->owner = current_thread_info();
92677+ lock->owner = current;
92678 }
92679
92680 static inline void mutex_clear_owner(struct mutex *lock)
92681diff --git a/kernel/mutex.c b/kernel/mutex.c
92682index f85644c..5ee9f77 100644
92683--- a/kernel/mutex.c
92684+++ b/kernel/mutex.c
92685@@ -169,7 +169,7 @@ __mutex_lock_common(struct mutex *lock, long state, unsigned int subclass,
92686 */
92687
92688 for (;;) {
92689- struct thread_info *owner;
92690+ struct task_struct *owner;
92691
92692 /*
92693 * If we own the BKL, then don't spin. The owner of
92694@@ -214,7 +214,7 @@ __mutex_lock_common(struct mutex *lock, long state, unsigned int subclass,
92695 spin_lock_mutex(&lock->wait_lock, flags);
92696
92697 debug_mutex_lock_common(lock, &waiter);
92698- debug_mutex_add_waiter(lock, &waiter, task_thread_info(task));
92699+ debug_mutex_add_waiter(lock, &waiter, task);
92700
92701 /* add waiting tasks to the end of the waitqueue (FIFO): */
92702 list_add_tail(&waiter.list, &lock->wait_list);
92703@@ -243,8 +243,7 @@ __mutex_lock_common(struct mutex *lock, long state, unsigned int subclass,
92704 * TASK_UNINTERRUPTIBLE case.)
92705 */
92706 if (unlikely(signal_pending_state(state, task))) {
92707- mutex_remove_waiter(lock, &waiter,
92708- task_thread_info(task));
92709+ mutex_remove_waiter(lock, &waiter, task);
92710 mutex_release(&lock->dep_map, 1, ip);
92711 spin_unlock_mutex(&lock->wait_lock, flags);
92712
92713@@ -265,7 +264,7 @@ __mutex_lock_common(struct mutex *lock, long state, unsigned int subclass,
92714 done:
92715 lock_acquired(&lock->dep_map, ip);
92716 /* got the lock - rejoice! */
92717- mutex_remove_waiter(lock, &waiter, current_thread_info());
92718+ mutex_remove_waiter(lock, &waiter, task);
92719 mutex_set_owner(lock);
92720
92721 /* set it to 0 if there are no waiters left: */
92722diff --git a/kernel/mutex.h b/kernel/mutex.h
92723index 67578ca..4115fbf 100644
92724--- a/kernel/mutex.h
92725+++ b/kernel/mutex.h
92726@@ -19,7 +19,7 @@
92727 #ifdef CONFIG_SMP
92728 static inline void mutex_set_owner(struct mutex *lock)
92729 {
92730- lock->owner = current_thread_info();
92731+ lock->owner = current;
92732 }
92733
92734 static inline void mutex_clear_owner(struct mutex *lock)
92735diff --git a/kernel/panic.c b/kernel/panic.c
92736index 96b45d0..ff70a46 100644
92737--- a/kernel/panic.c
92738+++ b/kernel/panic.c
92739@@ -71,7 +71,11 @@ NORET_TYPE void panic(const char * fmt, ...)
92740 va_end(args);
92741 printk(KERN_EMERG "Kernel panic - not syncing: %s\n",buf);
92742 #ifdef CONFIG_DEBUG_BUGVERBOSE
92743- dump_stack();
92744+ /*
92745+ * Avoid nested stack-dumping if a panic occurs during oops processing
92746+ */
92747+ if (!oops_in_progress)
92748+ dump_stack();
92749 #endif
92750
92751 /*
92752@@ -352,7 +356,7 @@ static void warn_slowpath_common(const char *file, int line, void *caller, struc
92753 const char *board;
92754
92755 printk(KERN_WARNING "------------[ cut here ]------------\n");
92756- printk(KERN_WARNING "WARNING: at %s:%d %pS()\n", file, line, caller);
92757+ printk(KERN_WARNING "WARNING: at %s:%d %pA()\n", file, line, caller);
92758 board = dmi_get_system_info(DMI_PRODUCT_NAME);
92759 if (board)
92760 printk(KERN_WARNING "Hardware name: %s\n", board);
92761@@ -392,7 +396,8 @@ EXPORT_SYMBOL(warn_slowpath_null);
92762 */
92763 void __stack_chk_fail(void)
92764 {
92765- panic("stack-protector: Kernel stack is corrupted in: %p\n",
92766+ dump_stack();
92767+ panic("stack-protector: Kernel stack is corrupted in: %pA\n",
92768 __builtin_return_address(0));
92769 }
92770 EXPORT_SYMBOL(__stack_chk_fail);
92771diff --git a/kernel/params.c b/kernel/params.c
92772index d656c27..21e452c 100644
92773--- a/kernel/params.c
92774+++ b/kernel/params.c
92775@@ -725,7 +725,7 @@ static ssize_t module_attr_store(struct kobject *kobj,
92776 return ret;
92777 }
92778
92779-static struct sysfs_ops module_sysfs_ops = {
92780+static const struct sysfs_ops module_sysfs_ops = {
92781 .show = module_attr_show,
92782 .store = module_attr_store,
92783 };
92784@@ -739,7 +739,7 @@ static int uevent_filter(struct kset *kset, struct kobject *kobj)
92785 return 0;
92786 }
92787
92788-static struct kset_uevent_ops module_uevent_ops = {
92789+static const struct kset_uevent_ops module_uevent_ops = {
92790 .filter = uevent_filter,
92791 };
92792
92793diff --git a/kernel/perf_event.c b/kernel/perf_event.c
92794index 37ebc14..9c121d9 100644
92795--- a/kernel/perf_event.c
92796+++ b/kernel/perf_event.c
92797@@ -77,7 +77,7 @@ int sysctl_perf_event_mlock __read_mostly = 516; /* 'free' kb per user */
92798 */
92799 int sysctl_perf_event_sample_rate __read_mostly = 100000;
92800
92801-static atomic64_t perf_event_id;
92802+static atomic64_unchecked_t perf_event_id;
92803
92804 /*
92805 * Lock for (sysadmin-configurable) event reservations:
92806@@ -1094,9 +1094,9 @@ static void __perf_event_sync_stat(struct perf_event *event,
92807 * In order to keep per-task stats reliable we need to flip the event
92808 * values when we flip the contexts.
92809 */
92810- value = atomic64_read(&next_event->count);
92811- value = atomic64_xchg(&event->count, value);
92812- atomic64_set(&next_event->count, value);
92813+ value = atomic64_read_unchecked(&next_event->count);
92814+ value = atomic64_xchg_unchecked(&event->count, value);
92815+ atomic64_set_unchecked(&next_event->count, value);
92816
92817 swap(event->total_time_enabled, next_event->total_time_enabled);
92818 swap(event->total_time_running, next_event->total_time_running);
92819@@ -1552,7 +1552,7 @@ static u64 perf_event_read(struct perf_event *event)
92820 update_event_times(event);
92821 }
92822
92823- return atomic64_read(&event->count);
92824+ return atomic64_read_unchecked(&event->count);
92825 }
92826
92827 /*
92828@@ -1790,11 +1790,11 @@ static int perf_event_read_group(struct perf_event *event,
92829 values[n++] = 1 + leader->nr_siblings;
92830 if (read_format & PERF_FORMAT_TOTAL_TIME_ENABLED) {
92831 values[n++] = leader->total_time_enabled +
92832- atomic64_read(&leader->child_total_time_enabled);
92833+ atomic64_read_unchecked(&leader->child_total_time_enabled);
92834 }
92835 if (read_format & PERF_FORMAT_TOTAL_TIME_RUNNING) {
92836 values[n++] = leader->total_time_running +
92837- atomic64_read(&leader->child_total_time_running);
92838+ atomic64_read_unchecked(&leader->child_total_time_running);
92839 }
92840
92841 size = n * sizeof(u64);
92842@@ -1829,11 +1829,11 @@ static int perf_event_read_one(struct perf_event *event,
92843 values[n++] = perf_event_read_value(event);
92844 if (read_format & PERF_FORMAT_TOTAL_TIME_ENABLED) {
92845 values[n++] = event->total_time_enabled +
92846- atomic64_read(&event->child_total_time_enabled);
92847+ atomic64_read_unchecked(&event->child_total_time_enabled);
92848 }
92849 if (read_format & PERF_FORMAT_TOTAL_TIME_RUNNING) {
92850 values[n++] = event->total_time_running +
92851- atomic64_read(&event->child_total_time_running);
92852+ atomic64_read_unchecked(&event->child_total_time_running);
92853 }
92854 if (read_format & PERF_FORMAT_ID)
92855 values[n++] = primary_event_id(event);
92856@@ -1903,7 +1903,7 @@ static unsigned int perf_poll(struct file *file, poll_table *wait)
92857 static void perf_event_reset(struct perf_event *event)
92858 {
92859 (void)perf_event_read(event);
92860- atomic64_set(&event->count, 0);
92861+ atomic64_set_unchecked(&event->count, 0);
92862 perf_event_update_userpage(event);
92863 }
92864
92865@@ -2079,15 +2079,15 @@ void perf_event_update_userpage(struct perf_event *event)
92866 ++userpg->lock;
92867 barrier();
92868 userpg->index = perf_event_index(event);
92869- userpg->offset = atomic64_read(&event->count);
92870+ userpg->offset = atomic64_read_unchecked(&event->count);
92871 if (event->state == PERF_EVENT_STATE_ACTIVE)
92872- userpg->offset -= atomic64_read(&event->hw.prev_count);
92873+ userpg->offset -= atomic64_read_unchecked(&event->hw.prev_count);
92874
92875 userpg->time_enabled = event->total_time_enabled +
92876- atomic64_read(&event->child_total_time_enabled);
92877+ atomic64_read_unchecked(&event->child_total_time_enabled);
92878
92879 userpg->time_running = event->total_time_running +
92880- atomic64_read(&event->child_total_time_running);
92881+ atomic64_read_unchecked(&event->child_total_time_running);
92882
92883 barrier();
92884 ++userpg->lock;
92885@@ -2903,14 +2903,14 @@ static void perf_output_read_one(struct perf_output_handle *handle,
92886 u64 values[4];
92887 int n = 0;
92888
92889- values[n++] = atomic64_read(&event->count);
92890+ values[n++] = atomic64_read_unchecked(&event->count);
92891 if (read_format & PERF_FORMAT_TOTAL_TIME_ENABLED) {
92892 values[n++] = event->total_time_enabled +
92893- atomic64_read(&event->child_total_time_enabled);
92894+ atomic64_read_unchecked(&event->child_total_time_enabled);
92895 }
92896 if (read_format & PERF_FORMAT_TOTAL_TIME_RUNNING) {
92897 values[n++] = event->total_time_running +
92898- atomic64_read(&event->child_total_time_running);
92899+ atomic64_read_unchecked(&event->child_total_time_running);
92900 }
92901 if (read_format & PERF_FORMAT_ID)
92902 values[n++] = primary_event_id(event);
92903@@ -2940,7 +2940,7 @@ static void perf_output_read_group(struct perf_output_handle *handle,
92904 if (leader != event)
92905 leader->pmu->read(leader);
92906
92907- values[n++] = atomic64_read(&leader->count);
92908+ values[n++] = atomic64_read_unchecked(&leader->count);
92909 if (read_format & PERF_FORMAT_ID)
92910 values[n++] = primary_event_id(leader);
92911
92912@@ -2952,7 +2952,7 @@ static void perf_output_read_group(struct perf_output_handle *handle,
92913 if (sub != event)
92914 sub->pmu->read(sub);
92915
92916- values[n++] = atomic64_read(&sub->count);
92917+ values[n++] = atomic64_read_unchecked(&sub->count);
92918 if (read_format & PERF_FORMAT_ID)
92919 values[n++] = primary_event_id(sub);
92920
92921@@ -3525,12 +3525,12 @@ static void perf_event_mmap_event(struct perf_mmap_event *mmap_event)
92922 * need to add enough zero bytes after the string to handle
92923 * the 64bit alignment we do later.
92924 */
92925- buf = kzalloc(PATH_MAX + sizeof(u64), GFP_KERNEL);
92926+ buf = kzalloc(PATH_MAX, GFP_KERNEL);
92927 if (!buf) {
92928 name = strncpy(tmp, "//enomem", sizeof(tmp));
92929 goto got_name;
92930 }
92931- name = d_path(&file->f_path, buf, PATH_MAX);
92932+ name = d_path(&file->f_path, buf, PATH_MAX - sizeof(u64));
92933 if (IS_ERR(name)) {
92934 name = strncpy(tmp, "//toolong", sizeof(tmp));
92935 goto got_name;
92936@@ -3783,7 +3783,7 @@ static void perf_swevent_add(struct perf_event *event, u64 nr,
92937 {
92938 struct hw_perf_event *hwc = &event->hw;
92939
92940- atomic64_add(nr, &event->count);
92941+ atomic64_add_unchecked(nr, &event->count);
92942
92943 if (!hwc->sample_period)
92944 return;
92945@@ -4040,9 +4040,9 @@ static void cpu_clock_perf_event_update(struct perf_event *event)
92946 u64 now;
92947
92948 now = cpu_clock(cpu);
92949- prev = atomic64_read(&event->hw.prev_count);
92950- atomic64_set(&event->hw.prev_count, now);
92951- atomic64_add(now - prev, &event->count);
92952+ prev = atomic64_read_unchecked(&event->hw.prev_count);
92953+ atomic64_set_unchecked(&event->hw.prev_count, now);
92954+ atomic64_add_unchecked(now - prev, &event->count);
92955 }
92956
92957 static int cpu_clock_perf_event_enable(struct perf_event *event)
92958@@ -4050,7 +4050,7 @@ static int cpu_clock_perf_event_enable(struct perf_event *event)
92959 struct hw_perf_event *hwc = &event->hw;
92960 int cpu = raw_smp_processor_id();
92961
92962- atomic64_set(&hwc->prev_count, cpu_clock(cpu));
92963+ atomic64_set_unchecked(&hwc->prev_count, cpu_clock(cpu));
92964 perf_swevent_start_hrtimer(event);
92965
92966 return 0;
92967@@ -4082,9 +4082,9 @@ static void task_clock_perf_event_update(struct perf_event *event, u64 now)
92968 u64 prev;
92969 s64 delta;
92970
92971- prev = atomic64_xchg(&event->hw.prev_count, now);
92972+ prev = atomic64_xchg_unchecked(&event->hw.prev_count, now);
92973 delta = now - prev;
92974- atomic64_add(delta, &event->count);
92975+ atomic64_add_unchecked(delta, &event->count);
92976 }
92977
92978 static int task_clock_perf_event_enable(struct perf_event *event)
92979@@ -4094,7 +4094,7 @@ static int task_clock_perf_event_enable(struct perf_event *event)
92980
92981 now = event->ctx->time;
92982
92983- atomic64_set(&hwc->prev_count, now);
92984+ atomic64_set_unchecked(&hwc->prev_count, now);
92985
92986 perf_swevent_start_hrtimer(event);
92987
92988@@ -4289,7 +4289,7 @@ perf_event_alloc(struct perf_event_attr *attr,
92989 event->parent = parent_event;
92990
92991 event->ns = get_pid_ns(current->nsproxy->pid_ns);
92992- event->id = atomic64_inc_return(&perf_event_id);
92993+ event->id = atomic64_inc_return_unchecked(&perf_event_id);
92994
92995 event->state = PERF_EVENT_STATE_INACTIVE;
92996
92997@@ -4720,15 +4720,15 @@ static void sync_child_event(struct perf_event *child_event,
92998 if (child_event->attr.inherit_stat)
92999 perf_event_read_event(child_event, child);
93000
93001- child_val = atomic64_read(&child_event->count);
93002+ child_val = atomic64_read_unchecked(&child_event->count);
93003
93004 /*
93005 * Add back the child's count to the parent's count:
93006 */
93007- atomic64_add(child_val, &parent_event->count);
93008- atomic64_add(child_event->total_time_enabled,
93009+ atomic64_add_unchecked(child_val, &parent_event->count);
93010+ atomic64_add_unchecked(child_event->total_time_enabled,
93011 &parent_event->child_total_time_enabled);
93012- atomic64_add(child_event->total_time_running,
93013+ atomic64_add_unchecked(child_event->total_time_running,
93014 &parent_event->child_total_time_running);
93015
93016 /*
93017diff --git a/kernel/pid.c b/kernel/pid.c
93018index fce7198..4f23a7e 100644
93019--- a/kernel/pid.c
93020+++ b/kernel/pid.c
93021@@ -33,6 +33,7 @@
93022 #include <linux/rculist.h>
93023 #include <linux/bootmem.h>
93024 #include <linux/hash.h>
93025+#include <linux/security.h>
93026 #include <linux/pid_namespace.h>
93027 #include <linux/init_task.h>
93028 #include <linux/syscalls.h>
93029@@ -45,7 +46,7 @@ struct pid init_struct_pid = INIT_STRUCT_PID;
93030
93031 int pid_max = PID_MAX_DEFAULT;
93032
93033-#define RESERVED_PIDS 300
93034+#define RESERVED_PIDS 500
93035
93036 int pid_max_min = RESERVED_PIDS + 1;
93037 int pid_max_max = PID_MAX_LIMIT;
93038@@ -383,7 +384,14 @@ EXPORT_SYMBOL(pid_task);
93039 */
93040 struct task_struct *find_task_by_pid_ns(pid_t nr, struct pid_namespace *ns)
93041 {
93042- return pid_task(find_pid_ns(nr, ns), PIDTYPE_PID);
93043+ struct task_struct *task;
93044+
93045+ task = pid_task(find_pid_ns(nr, ns), PIDTYPE_PID);
93046+
93047+ if (gr_pid_is_chrooted(task))
93048+ return NULL;
93049+
93050+ return task;
93051 }
93052
93053 struct task_struct *find_task_by_vpid(pid_t vnr)
93054@@ -391,6 +399,11 @@ struct task_struct *find_task_by_vpid(pid_t vnr)
93055 return find_task_by_pid_ns(vnr, current->nsproxy->pid_ns);
93056 }
93057
93058+struct task_struct *find_task_by_vpid_unrestricted(pid_t vnr)
93059+{
93060+ return pid_task(find_pid_ns(vnr, current->nsproxy->pid_ns), PIDTYPE_PID);
93061+}
93062+
93063 struct pid *get_task_pid(struct task_struct *task, enum pid_type type)
93064 {
93065 struct pid *pid;
93066diff --git a/kernel/posix-cpu-timers.c b/kernel/posix-cpu-timers.c
93067index 5c9dc22..d271117 100644
93068--- a/kernel/posix-cpu-timers.c
93069+++ b/kernel/posix-cpu-timers.c
93070@@ -6,6 +6,7 @@
93071 #include <linux/posix-timers.h>
93072 #include <linux/errno.h>
93073 #include <linux/math64.h>
93074+#include <linux/security.h>
93075 #include <asm/uaccess.h>
93076 #include <linux/kernel_stat.h>
93077 #include <trace/events/timer.h>
93078@@ -1697,7 +1698,7 @@ static long thread_cpu_nsleep_restart(struct restart_block *restart_block)
93079
93080 static __init int init_posix_cpu_timers(void)
93081 {
93082- struct k_clock process = {
93083+ static struct k_clock process = {
93084 .clock_getres = process_cpu_clock_getres,
93085 .clock_get = process_cpu_clock_get,
93086 .clock_set = do_posix_clock_nosettime,
93087@@ -1705,7 +1706,7 @@ static __init int init_posix_cpu_timers(void)
93088 .nsleep = process_cpu_nsleep,
93089 .nsleep_restart = process_cpu_nsleep_restart,
93090 };
93091- struct k_clock thread = {
93092+ static struct k_clock thread = {
93093 .clock_getres = thread_cpu_clock_getres,
93094 .clock_get = thread_cpu_clock_get,
93095 .clock_set = do_posix_clock_nosettime,
93096diff --git a/kernel/posix-timers.c b/kernel/posix-timers.c
93097index 5e76d22..cf1baeb 100644
93098--- a/kernel/posix-timers.c
93099+++ b/kernel/posix-timers.c
93100@@ -42,6 +42,7 @@
93101 #include <linux/compiler.h>
93102 #include <linux/idr.h>
93103 #include <linux/posix-timers.h>
93104+#include <linux/grsecurity.h>
93105 #include <linux/syscalls.h>
93106 #include <linux/wait.h>
93107 #include <linux/workqueue.h>
93108@@ -131,7 +132,7 @@ static DEFINE_SPINLOCK(idr_lock);
93109 * which we beg off on and pass to do_sys_settimeofday().
93110 */
93111
93112-static struct k_clock posix_clocks[MAX_CLOCKS];
93113+static struct k_clock *posix_clocks[MAX_CLOCKS];
93114
93115 /*
93116 * These ones are defined below.
93117@@ -157,8 +158,8 @@ static inline void unlock_timer(struct k_itimer *timr, unsigned long flags)
93118 */
93119 #define CLOCK_DISPATCH(clock, call, arglist) \
93120 ((clock) < 0 ? posix_cpu_##call arglist : \
93121- (posix_clocks[clock].call != NULL \
93122- ? (*posix_clocks[clock].call) arglist : common_##call arglist))
93123+ (posix_clocks[clock]->call != NULL \
93124+ ? (*posix_clocks[clock]->call) arglist : common_##call arglist))
93125
93126 /*
93127 * Default clock hook functions when the struct k_clock passed
93128@@ -172,7 +173,7 @@ static inline int common_clock_getres(const clockid_t which_clock,
93129 struct timespec *tp)
93130 {
93131 tp->tv_sec = 0;
93132- tp->tv_nsec = posix_clocks[which_clock].res;
93133+ tp->tv_nsec = posix_clocks[which_clock]->res;
93134 return 0;
93135 }
93136
93137@@ -217,9 +218,11 @@ static inline int invalid_clockid(const clockid_t which_clock)
93138 return 0;
93139 if ((unsigned) which_clock >= MAX_CLOCKS)
93140 return 1;
93141- if (posix_clocks[which_clock].clock_getres != NULL)
93142+ if (posix_clocks[which_clock] == NULL)
93143 return 0;
93144- if (posix_clocks[which_clock].res != 0)
93145+ if (posix_clocks[which_clock]->clock_getres != NULL)
93146+ return 0;
93147+ if (posix_clocks[which_clock]->res != 0)
93148 return 0;
93149 return 1;
93150 }
93151@@ -266,29 +269,29 @@ int posix_get_coarse_res(const clockid_t which_clock, struct timespec *tp)
93152 */
93153 static __init int init_posix_timers(void)
93154 {
93155- struct k_clock clock_realtime = {
93156+ static struct k_clock clock_realtime = {
93157 .clock_getres = hrtimer_get_res,
93158 };
93159- struct k_clock clock_monotonic = {
93160+ static struct k_clock clock_monotonic = {
93161 .clock_getres = hrtimer_get_res,
93162 .clock_get = posix_ktime_get_ts,
93163 .clock_set = do_posix_clock_nosettime,
93164 };
93165- struct k_clock clock_monotonic_raw = {
93166+ static struct k_clock clock_monotonic_raw = {
93167 .clock_getres = hrtimer_get_res,
93168 .clock_get = posix_get_monotonic_raw,
93169 .clock_set = do_posix_clock_nosettime,
93170 .timer_create = no_timer_create,
93171 .nsleep = no_nsleep,
93172 };
93173- struct k_clock clock_realtime_coarse = {
93174+ static struct k_clock clock_realtime_coarse = {
93175 .clock_getres = posix_get_coarse_res,
93176 .clock_get = posix_get_realtime_coarse,
93177 .clock_set = do_posix_clock_nosettime,
93178 .timer_create = no_timer_create,
93179 .nsleep = no_nsleep,
93180 };
93181- struct k_clock clock_monotonic_coarse = {
93182+ static struct k_clock clock_monotonic_coarse = {
93183 .clock_getres = posix_get_coarse_res,
93184 .clock_get = posix_get_monotonic_coarse,
93185 .clock_set = do_posix_clock_nosettime,
93186@@ -296,6 +299,8 @@ static __init int init_posix_timers(void)
93187 .nsleep = no_nsleep,
93188 };
93189
93190+ pax_track_stack();
93191+
93192 register_posix_clock(CLOCK_REALTIME, &clock_realtime);
93193 register_posix_clock(CLOCK_MONOTONIC, &clock_monotonic);
93194 register_posix_clock(CLOCK_MONOTONIC_RAW, &clock_monotonic_raw);
93195@@ -484,7 +489,7 @@ void register_posix_clock(const clockid_t clock_id, struct k_clock *new_clock)
93196 return;
93197 }
93198
93199- posix_clocks[clock_id] = *new_clock;
93200+ posix_clocks[clock_id] = new_clock;
93201 }
93202 EXPORT_SYMBOL_GPL(register_posix_clock);
93203
93204@@ -948,6 +953,13 @@ SYSCALL_DEFINE2(clock_settime, const clockid_t, which_clock,
93205 if (copy_from_user(&new_tp, tp, sizeof (*tp)))
93206 return -EFAULT;
93207
93208+ /* only the CLOCK_REALTIME clock can be set, all other clocks
93209+ have their clock_set fptr set to a nosettime dummy function
93210+ CLOCK_REALTIME has a NULL clock_set fptr which causes it to
93211+ call common_clock_set, which calls do_sys_settimeofday, which
93212+ we hook
93213+ */
93214+
93215 return CLOCK_DISPATCH(which_clock, clock_set, (which_clock, &new_tp));
93216 }
93217
93218diff --git a/kernel/power/hibernate.c b/kernel/power/hibernate.c
93219index 04a9e90..bc355aa 100644
93220--- a/kernel/power/hibernate.c
93221+++ b/kernel/power/hibernate.c
93222@@ -48,14 +48,14 @@ enum {
93223
93224 static int hibernation_mode = HIBERNATION_SHUTDOWN;
93225
93226-static struct platform_hibernation_ops *hibernation_ops;
93227+static const struct platform_hibernation_ops *hibernation_ops;
93228
93229 /**
93230 * hibernation_set_ops - set the global hibernate operations
93231 * @ops: the hibernation operations to use in subsequent hibernation transitions
93232 */
93233
93234-void hibernation_set_ops(struct platform_hibernation_ops *ops)
93235+void hibernation_set_ops(const struct platform_hibernation_ops *ops)
93236 {
93237 if (ops && !(ops->begin && ops->end && ops->pre_snapshot
93238 && ops->prepare && ops->finish && ops->enter && ops->pre_restore
93239diff --git a/kernel/power/poweroff.c b/kernel/power/poweroff.c
93240index e8b3370..484c2e4 100644
93241--- a/kernel/power/poweroff.c
93242+++ b/kernel/power/poweroff.c
93243@@ -37,7 +37,7 @@ static struct sysrq_key_op sysrq_poweroff_op = {
93244 .enable_mask = SYSRQ_ENABLE_BOOT,
93245 };
93246
93247-static int pm_sysrq_init(void)
93248+static int __init pm_sysrq_init(void)
93249 {
93250 register_sysrq_key('o', &sysrq_poweroff_op);
93251 return 0;
93252diff --git a/kernel/power/process.c b/kernel/power/process.c
93253index e7cd671..56d5f459 100644
93254--- a/kernel/power/process.c
93255+++ b/kernel/power/process.c
93256@@ -37,12 +37,15 @@ static int try_to_freeze_tasks(bool sig_only)
93257 struct timeval start, end;
93258 u64 elapsed_csecs64;
93259 unsigned int elapsed_csecs;
93260+ bool timedout = false;
93261
93262 do_gettimeofday(&start);
93263
93264 end_time = jiffies + TIMEOUT;
93265 do {
93266 todo = 0;
93267+ if (time_after(jiffies, end_time))
93268+ timedout = true;
93269 read_lock(&tasklist_lock);
93270 do_each_thread(g, p) {
93271 if (frozen(p) || !freezeable(p))
93272@@ -57,15 +60,17 @@ static int try_to_freeze_tasks(bool sig_only)
93273 * It is "frozen enough". If the task does wake
93274 * up, it will immediately call try_to_freeze.
93275 */
93276- if (!task_is_stopped_or_traced(p) &&
93277- !freezer_should_skip(p))
93278+ if (!task_is_stopped_or_traced(p) && !freezer_should_skip(p)) {
93279 todo++;
93280+ if (timedout) {
93281+ printk(KERN_ERR "Task refusing to freeze:\n");
93282+ sched_show_task(p);
93283+ }
93284+ }
93285 } while_each_thread(g, p);
93286 read_unlock(&tasklist_lock);
93287 yield(); /* Yield is okay here */
93288- if (time_after(jiffies, end_time))
93289- break;
93290- } while (todo);
93291+ } while (todo && !timedout);
93292
93293 do_gettimeofday(&end);
93294 elapsed_csecs64 = timeval_to_ns(&end) - timeval_to_ns(&start);
93295diff --git a/kernel/power/suspend.c b/kernel/power/suspend.c
93296index 40dd021..fb30ceb 100644
93297--- a/kernel/power/suspend.c
93298+++ b/kernel/power/suspend.c
93299@@ -23,13 +23,13 @@ const char *const pm_states[PM_SUSPEND_MAX] = {
93300 [PM_SUSPEND_MEM] = "mem",
93301 };
93302
93303-static struct platform_suspend_ops *suspend_ops;
93304+static const struct platform_suspend_ops *suspend_ops;
93305
93306 /**
93307 * suspend_set_ops - Set the global suspend method table.
93308 * @ops: Pointer to ops structure.
93309 */
93310-void suspend_set_ops(struct platform_suspend_ops *ops)
93311+void suspend_set_ops(const struct platform_suspend_ops *ops)
93312 {
93313 mutex_lock(&pm_mutex);
93314 suspend_ops = ops;
93315diff --git a/kernel/printk.c b/kernel/printk.c
93316index 4cade47..4d17900 100644
93317--- a/kernel/printk.c
93318+++ b/kernel/printk.c
93319@@ -33,6 +33,7 @@
93320 #include <linux/bootmem.h>
93321 #include <linux/syscalls.h>
93322 #include <linux/kexec.h>
93323+#include <linux/syslog.h>
93324
93325 #include <asm/uaccess.h>
93326
93327@@ -256,38 +257,30 @@ static inline void boot_delay_msec(void)
93328 }
93329 #endif
93330
93331-/*
93332- * Commands to do_syslog:
93333- *
93334- * 0 -- Close the log. Currently a NOP.
93335- * 1 -- Open the log. Currently a NOP.
93336- * 2 -- Read from the log.
93337- * 3 -- Read all messages remaining in the ring buffer.
93338- * 4 -- Read and clear all messages remaining in the ring buffer
93339- * 5 -- Clear ring buffer.
93340- * 6 -- Disable printk's to console
93341- * 7 -- Enable printk's to console
93342- * 8 -- Set level of messages printed to console
93343- * 9 -- Return number of unread characters in the log buffer
93344- * 10 -- Return size of the log buffer
93345- */
93346-int do_syslog(int type, char __user *buf, int len)
93347+int do_syslog(int type, char __user *buf, int len, bool from_file)
93348 {
93349 unsigned i, j, limit, count;
93350 int do_clear = 0;
93351 char c;
93352 int error = 0;
93353
93354- error = security_syslog(type);
93355+#ifdef CONFIG_GRKERNSEC_DMESG
93356+ if (grsec_enable_dmesg &&
93357+ (!from_file || (from_file && type == SYSLOG_ACTION_OPEN)) &&
93358+ !capable(CAP_SYS_ADMIN))
93359+ return -EPERM;
93360+#endif
93361+
93362+ error = security_syslog(type, from_file);
93363 if (error)
93364 return error;
93365
93366 switch (type) {
93367- case 0: /* Close log */
93368+ case SYSLOG_ACTION_CLOSE: /* Close log */
93369 break;
93370- case 1: /* Open log */
93371+ case SYSLOG_ACTION_OPEN: /* Open log */
93372 break;
93373- case 2: /* Read from log */
93374+ case SYSLOG_ACTION_READ: /* Read from log */
93375 error = -EINVAL;
93376 if (!buf || len < 0)
93377 goto out;
93378@@ -318,10 +311,12 @@ int do_syslog(int type, char __user *buf, int len)
93379 if (!error)
93380 error = i;
93381 break;
93382- case 4: /* Read/clear last kernel messages */
93383+ /* Read/clear last kernel messages */
93384+ case SYSLOG_ACTION_READ_CLEAR:
93385 do_clear = 1;
93386 /* FALL THRU */
93387- case 3: /* Read last kernel messages */
93388+ /* Read last kernel messages */
93389+ case SYSLOG_ACTION_READ_ALL:
93390 error = -EINVAL;
93391 if (!buf || len < 0)
93392 goto out;
93393@@ -374,21 +369,25 @@ int do_syslog(int type, char __user *buf, int len)
93394 }
93395 }
93396 break;
93397- case 5: /* Clear ring buffer */
93398+ /* Clear ring buffer */
93399+ case SYSLOG_ACTION_CLEAR:
93400 logged_chars = 0;
93401 break;
93402- case 6: /* Disable logging to console */
93403+ /* Disable logging to console */
93404+ case SYSLOG_ACTION_CONSOLE_OFF:
93405 if (saved_console_loglevel == -1)
93406 saved_console_loglevel = console_loglevel;
93407 console_loglevel = minimum_console_loglevel;
93408 break;
93409- case 7: /* Enable logging to console */
93410+ /* Enable logging to console */
93411+ case SYSLOG_ACTION_CONSOLE_ON:
93412 if (saved_console_loglevel != -1) {
93413 console_loglevel = saved_console_loglevel;
93414 saved_console_loglevel = -1;
93415 }
93416 break;
93417- case 8: /* Set level of messages printed to console */
93418+ /* Set level of messages printed to console */
93419+ case SYSLOG_ACTION_CONSOLE_LEVEL:
93420 error = -EINVAL;
93421 if (len < 1 || len > 8)
93422 goto out;
93423@@ -399,10 +398,12 @@ int do_syslog(int type, char __user *buf, int len)
93424 saved_console_loglevel = -1;
93425 error = 0;
93426 break;
93427- case 9: /* Number of chars in the log buffer */
93428+ /* Number of chars in the log buffer */
93429+ case SYSLOG_ACTION_SIZE_UNREAD:
93430 error = log_end - log_start;
93431 break;
93432- case 10: /* Size of the log buffer */
93433+ /* Size of the log buffer */
93434+ case SYSLOG_ACTION_SIZE_BUFFER:
93435 error = log_buf_len;
93436 break;
93437 default:
93438@@ -415,7 +416,7 @@ out:
93439
93440 SYSCALL_DEFINE3(syslog, int, type, char __user *, buf, int, len)
93441 {
93442- return do_syslog(type, buf, len);
93443+ return do_syslog(type, buf, len, SYSLOG_FROM_CALL);
93444 }
93445
93446 /*
93447diff --git a/kernel/profile.c b/kernel/profile.c
93448index dfadc5b..7f59404 100644
93449--- a/kernel/profile.c
93450+++ b/kernel/profile.c
93451@@ -39,7 +39,7 @@ struct profile_hit {
93452 /* Oprofile timer tick hook */
93453 static int (*timer_hook)(struct pt_regs *) __read_mostly;
93454
93455-static atomic_t *prof_buffer;
93456+static atomic_unchecked_t *prof_buffer;
93457 static unsigned long prof_len, prof_shift;
93458
93459 int prof_on __read_mostly;
93460@@ -283,7 +283,7 @@ static void profile_flip_buffers(void)
93461 hits[i].pc = 0;
93462 continue;
93463 }
93464- atomic_add(hits[i].hits, &prof_buffer[hits[i].pc]);
93465+ atomic_add_unchecked(hits[i].hits, &prof_buffer[hits[i].pc]);
93466 hits[i].hits = hits[i].pc = 0;
93467 }
93468 }
93469@@ -346,9 +346,9 @@ void profile_hits(int type, void *__pc, unsigned int nr_hits)
93470 * Add the current hit(s) and flush the write-queue out
93471 * to the global buffer:
93472 */
93473- atomic_add(nr_hits, &prof_buffer[pc]);
93474+ atomic_add_unchecked(nr_hits, &prof_buffer[pc]);
93475 for (i = 0; i < NR_PROFILE_HIT; ++i) {
93476- atomic_add(hits[i].hits, &prof_buffer[hits[i].pc]);
93477+ atomic_add_unchecked(hits[i].hits, &prof_buffer[hits[i].pc]);
93478 hits[i].pc = hits[i].hits = 0;
93479 }
93480 out:
93481@@ -426,7 +426,7 @@ void profile_hits(int type, void *__pc, unsigned int nr_hits)
93482 if (prof_on != type || !prof_buffer)
93483 return;
93484 pc = ((unsigned long)__pc - (unsigned long)_stext) >> prof_shift;
93485- atomic_add(nr_hits, &prof_buffer[min(pc, prof_len - 1)]);
93486+ atomic_add_unchecked(nr_hits, &prof_buffer[min(pc, prof_len - 1)]);
93487 }
93488 #endif /* !CONFIG_SMP */
93489 EXPORT_SYMBOL_GPL(profile_hits);
93490@@ -517,7 +517,7 @@ read_profile(struct file *file, char __user *buf, size_t count, loff_t *ppos)
93491 return -EFAULT;
93492 buf++; p++; count--; read++;
93493 }
93494- pnt = (char *)prof_buffer + p - sizeof(atomic_t);
93495+ pnt = (char *)prof_buffer + p - sizeof(atomic_unchecked_t);
93496 if (copy_to_user(buf, (void *)pnt, count))
93497 return -EFAULT;
93498 read += count;
93499@@ -548,7 +548,7 @@ static ssize_t write_profile(struct file *file, const char __user *buf,
93500 }
93501 #endif
93502 profile_discard_flip_buffers();
93503- memset(prof_buffer, 0, prof_len * sizeof(atomic_t));
93504+ memset(prof_buffer, 0, prof_len * sizeof(atomic_unchecked_t));
93505 return count;
93506 }
93507
93508diff --git a/kernel/ptrace.c b/kernel/ptrace.c
93509index 05625f6..733bf70 100644
93510--- a/kernel/ptrace.c
93511+++ b/kernel/ptrace.c
93512@@ -117,7 +117,8 @@ int ptrace_check_attach(struct task_struct *child, int kill)
93513 return ret;
93514 }
93515
93516-int __ptrace_may_access(struct task_struct *task, unsigned int mode)
93517+static int __ptrace_may_access(struct task_struct *task, unsigned int mode,
93518+ unsigned int log)
93519 {
93520 const struct cred *cred = current_cred(), *tcred;
93521
93522@@ -141,7 +142,9 @@ int __ptrace_may_access(struct task_struct *task, unsigned int mode)
93523 cred->gid != tcred->egid ||
93524 cred->gid != tcred->sgid ||
93525 cred->gid != tcred->gid) &&
93526- !capable(CAP_SYS_PTRACE)) {
93527+ ((!log && !capable_nolog(CAP_SYS_PTRACE)) ||
93528+ (log && !capable(CAP_SYS_PTRACE)))
93529+ ) {
93530 rcu_read_unlock();
93531 return -EPERM;
93532 }
93533@@ -149,7 +152,9 @@ int __ptrace_may_access(struct task_struct *task, unsigned int mode)
93534 smp_rmb();
93535 if (task->mm)
93536 dumpable = get_dumpable(task->mm);
93537- if (!dumpable && !capable(CAP_SYS_PTRACE))
93538+ if (!dumpable &&
93539+ ((!log && !capable_nolog(CAP_SYS_PTRACE)) ||
93540+ (log && !capable(CAP_SYS_PTRACE))))
93541 return -EPERM;
93542
93543 return security_ptrace_access_check(task, mode);
93544@@ -159,7 +164,16 @@ bool ptrace_may_access(struct task_struct *task, unsigned int mode)
93545 {
93546 int err;
93547 task_lock(task);
93548- err = __ptrace_may_access(task, mode);
93549+ err = __ptrace_may_access(task, mode, 0);
93550+ task_unlock(task);
93551+ return !err;
93552+}
93553+
93554+bool ptrace_may_access_log(struct task_struct *task, unsigned int mode)
93555+{
93556+ int err;
93557+ task_lock(task);
93558+ err = __ptrace_may_access(task, mode, 1);
93559 task_unlock(task);
93560 return !err;
93561 }
93562@@ -186,7 +200,7 @@ int ptrace_attach(struct task_struct *task)
93563 goto out;
93564
93565 task_lock(task);
93566- retval = __ptrace_may_access(task, PTRACE_MODE_ATTACH);
93567+ retval = __ptrace_may_access(task, PTRACE_MODE_ATTACH, 1);
93568 task_unlock(task);
93569 if (retval)
93570 goto unlock_creds;
93571@@ -199,7 +213,7 @@ int ptrace_attach(struct task_struct *task)
93572 goto unlock_tasklist;
93573
93574 task->ptrace = PT_PTRACED;
93575- if (capable(CAP_SYS_PTRACE))
93576+ if (capable_nolog(CAP_SYS_PTRACE))
93577 task->ptrace |= PT_PTRACE_CAP;
93578
93579 __ptrace_link(task, current);
93580@@ -351,6 +365,8 @@ int ptrace_readdata(struct task_struct *tsk, unsigned long src, char __user *dst
93581 {
93582 int copied = 0;
93583
93584+ pax_track_stack();
93585+
93586 while (len > 0) {
93587 char buf[128];
93588 int this_len, retval;
93589@@ -376,6 +392,8 @@ int ptrace_writedata(struct task_struct *tsk, char __user *src, unsigned long ds
93590 {
93591 int copied = 0;
93592
93593+ pax_track_stack();
93594+
93595 while (len > 0) {
93596 char buf[128];
93597 int this_len, retval;
93598@@ -517,6 +535,8 @@ int ptrace_request(struct task_struct *child, long request,
93599 int ret = -EIO;
93600 siginfo_t siginfo;
93601
93602+ pax_track_stack();
93603+
93604 switch (request) {
93605 case PTRACE_PEEKTEXT:
93606 case PTRACE_PEEKDATA:
93607@@ -532,18 +552,18 @@ int ptrace_request(struct task_struct *child, long request,
93608 ret = ptrace_setoptions(child, data);
93609 break;
93610 case PTRACE_GETEVENTMSG:
93611- ret = put_user(child->ptrace_message, (unsigned long __user *) data);
93612+ ret = put_user(child->ptrace_message, (__force unsigned long __user *) data);
93613 break;
93614
93615 case PTRACE_GETSIGINFO:
93616 ret = ptrace_getsiginfo(child, &siginfo);
93617 if (!ret)
93618- ret = copy_siginfo_to_user((siginfo_t __user *) data,
93619+ ret = copy_siginfo_to_user((__force siginfo_t __user *) data,
93620 &siginfo);
93621 break;
93622
93623 case PTRACE_SETSIGINFO:
93624- if (copy_from_user(&siginfo, (siginfo_t __user *) data,
93625+ if (copy_from_user(&siginfo, (__force siginfo_t __user *) data,
93626 sizeof siginfo))
93627 ret = -EFAULT;
93628 else
93629@@ -621,14 +641,21 @@ SYSCALL_DEFINE4(ptrace, long, request, long, pid, long, addr, long, data)
93630 goto out;
93631 }
93632
93633+ if (gr_handle_ptrace(child, request)) {
93634+ ret = -EPERM;
93635+ goto out_put_task_struct;
93636+ }
93637+
93638 if (request == PTRACE_ATTACH) {
93639 ret = ptrace_attach(child);
93640 /*
93641 * Some architectures need to do book-keeping after
93642 * a ptrace attach.
93643 */
93644- if (!ret)
93645+ if (!ret) {
93646 arch_ptrace_attach(child);
93647+ gr_audit_ptrace(child);
93648+ }
93649 goto out_put_task_struct;
93650 }
93651
93652@@ -653,7 +680,7 @@ int generic_ptrace_peekdata(struct task_struct *tsk, long addr, long data)
93653 copied = access_process_vm(tsk, addr, &tmp, sizeof(tmp), 0);
93654 if (copied != sizeof(tmp))
93655 return -EIO;
93656- return put_user(tmp, (unsigned long __user *)data);
93657+ return put_user(tmp, (__force unsigned long __user *)data);
93658 }
93659
93660 int generic_ptrace_pokedata(struct task_struct *tsk, long addr, long data)
93661@@ -675,6 +702,8 @@ int compat_ptrace_request(struct task_struct *child, compat_long_t request,
93662 siginfo_t siginfo;
93663 int ret;
93664
93665+ pax_track_stack();
93666+
93667 switch (request) {
93668 case PTRACE_PEEKTEXT:
93669 case PTRACE_PEEKDATA:
93670@@ -740,14 +769,21 @@ asmlinkage long compat_sys_ptrace(compat_long_t request, compat_long_t pid,
93671 goto out;
93672 }
93673
93674+ if (gr_handle_ptrace(child, request)) {
93675+ ret = -EPERM;
93676+ goto out_put_task_struct;
93677+ }
93678+
93679 if (request == PTRACE_ATTACH) {
93680 ret = ptrace_attach(child);
93681 /*
93682 * Some architectures need to do book-keeping after
93683 * a ptrace attach.
93684 */
93685- if (!ret)
93686+ if (!ret) {
93687 arch_ptrace_attach(child);
93688+ gr_audit_ptrace(child);
93689+ }
93690 goto out_put_task_struct;
93691 }
93692
93693diff --git a/kernel/rcutorture.c b/kernel/rcutorture.c
93694index 697c0a0..2402696 100644
93695--- a/kernel/rcutorture.c
93696+++ b/kernel/rcutorture.c
93697@@ -118,12 +118,12 @@ static DEFINE_PER_CPU(long [RCU_TORTURE_PIPE_LEN + 1], rcu_torture_count) =
93698 { 0 };
93699 static DEFINE_PER_CPU(long [RCU_TORTURE_PIPE_LEN + 1], rcu_torture_batch) =
93700 { 0 };
93701-static atomic_t rcu_torture_wcount[RCU_TORTURE_PIPE_LEN + 1];
93702-static atomic_t n_rcu_torture_alloc;
93703-static atomic_t n_rcu_torture_alloc_fail;
93704-static atomic_t n_rcu_torture_free;
93705-static atomic_t n_rcu_torture_mberror;
93706-static atomic_t n_rcu_torture_error;
93707+static atomic_unchecked_t rcu_torture_wcount[RCU_TORTURE_PIPE_LEN + 1];
93708+static atomic_unchecked_t n_rcu_torture_alloc;
93709+static atomic_unchecked_t n_rcu_torture_alloc_fail;
93710+static atomic_unchecked_t n_rcu_torture_free;
93711+static atomic_unchecked_t n_rcu_torture_mberror;
93712+static atomic_unchecked_t n_rcu_torture_error;
93713 static long n_rcu_torture_timers;
93714 static struct list_head rcu_torture_removed;
93715 static cpumask_var_t shuffle_tmp_mask;
93716@@ -187,11 +187,11 @@ rcu_torture_alloc(void)
93717
93718 spin_lock_bh(&rcu_torture_lock);
93719 if (list_empty(&rcu_torture_freelist)) {
93720- atomic_inc(&n_rcu_torture_alloc_fail);
93721+ atomic_inc_unchecked(&n_rcu_torture_alloc_fail);
93722 spin_unlock_bh(&rcu_torture_lock);
93723 return NULL;
93724 }
93725- atomic_inc(&n_rcu_torture_alloc);
93726+ atomic_inc_unchecked(&n_rcu_torture_alloc);
93727 p = rcu_torture_freelist.next;
93728 list_del_init(p);
93729 spin_unlock_bh(&rcu_torture_lock);
93730@@ -204,7 +204,7 @@ rcu_torture_alloc(void)
93731 static void
93732 rcu_torture_free(struct rcu_torture *p)
93733 {
93734- atomic_inc(&n_rcu_torture_free);
93735+ atomic_inc_unchecked(&n_rcu_torture_free);
93736 spin_lock_bh(&rcu_torture_lock);
93737 list_add_tail(&p->rtort_free, &rcu_torture_freelist);
93738 spin_unlock_bh(&rcu_torture_lock);
93739@@ -319,7 +319,7 @@ rcu_torture_cb(struct rcu_head *p)
93740 i = rp->rtort_pipe_count;
93741 if (i > RCU_TORTURE_PIPE_LEN)
93742 i = RCU_TORTURE_PIPE_LEN;
93743- atomic_inc(&rcu_torture_wcount[i]);
93744+ atomic_inc_unchecked(&rcu_torture_wcount[i]);
93745 if (++rp->rtort_pipe_count >= RCU_TORTURE_PIPE_LEN) {
93746 rp->rtort_mbtest = 0;
93747 rcu_torture_free(rp);
93748@@ -359,7 +359,7 @@ static void rcu_sync_torture_deferred_free(struct rcu_torture *p)
93749 i = rp->rtort_pipe_count;
93750 if (i > RCU_TORTURE_PIPE_LEN)
93751 i = RCU_TORTURE_PIPE_LEN;
93752- atomic_inc(&rcu_torture_wcount[i]);
93753+ atomic_inc_unchecked(&rcu_torture_wcount[i]);
93754 if (++rp->rtort_pipe_count >= RCU_TORTURE_PIPE_LEN) {
93755 rp->rtort_mbtest = 0;
93756 list_del(&rp->rtort_free);
93757@@ -653,7 +653,7 @@ rcu_torture_writer(void *arg)
93758 i = old_rp->rtort_pipe_count;
93759 if (i > RCU_TORTURE_PIPE_LEN)
93760 i = RCU_TORTURE_PIPE_LEN;
93761- atomic_inc(&rcu_torture_wcount[i]);
93762+ atomic_inc_unchecked(&rcu_torture_wcount[i]);
93763 old_rp->rtort_pipe_count++;
93764 cur_ops->deferred_free(old_rp);
93765 }
93766@@ -718,7 +718,7 @@ static void rcu_torture_timer(unsigned long unused)
93767 return;
93768 }
93769 if (p->rtort_mbtest == 0)
93770- atomic_inc(&n_rcu_torture_mberror);
93771+ atomic_inc_unchecked(&n_rcu_torture_mberror);
93772 spin_lock(&rand_lock);
93773 cur_ops->read_delay(&rand);
93774 n_rcu_torture_timers++;
93775@@ -776,7 +776,7 @@ rcu_torture_reader(void *arg)
93776 continue;
93777 }
93778 if (p->rtort_mbtest == 0)
93779- atomic_inc(&n_rcu_torture_mberror);
93780+ atomic_inc_unchecked(&n_rcu_torture_mberror);
93781 cur_ops->read_delay(&rand);
93782 preempt_disable();
93783 pipe_count = p->rtort_pipe_count;
93784@@ -834,17 +834,17 @@ rcu_torture_printk(char *page)
93785 rcu_torture_current,
93786 rcu_torture_current_version,
93787 list_empty(&rcu_torture_freelist),
93788- atomic_read(&n_rcu_torture_alloc),
93789- atomic_read(&n_rcu_torture_alloc_fail),
93790- atomic_read(&n_rcu_torture_free),
93791- atomic_read(&n_rcu_torture_mberror),
93792+ atomic_read_unchecked(&n_rcu_torture_alloc),
93793+ atomic_read_unchecked(&n_rcu_torture_alloc_fail),
93794+ atomic_read_unchecked(&n_rcu_torture_free),
93795+ atomic_read_unchecked(&n_rcu_torture_mberror),
93796 n_rcu_torture_timers);
93797- if (atomic_read(&n_rcu_torture_mberror) != 0)
93798+ if (atomic_read_unchecked(&n_rcu_torture_mberror) != 0)
93799 cnt += sprintf(&page[cnt], " !!!");
93800 cnt += sprintf(&page[cnt], "\n%s%s ", torture_type, TORTURE_FLAG);
93801 if (i > 1) {
93802 cnt += sprintf(&page[cnt], "!!! ");
93803- atomic_inc(&n_rcu_torture_error);
93804+ atomic_inc_unchecked(&n_rcu_torture_error);
93805 WARN_ON_ONCE(1);
93806 }
93807 cnt += sprintf(&page[cnt], "Reader Pipe: ");
93808@@ -858,7 +858,7 @@ rcu_torture_printk(char *page)
93809 cnt += sprintf(&page[cnt], "Free-Block Circulation: ");
93810 for (i = 0; i < RCU_TORTURE_PIPE_LEN + 1; i++) {
93811 cnt += sprintf(&page[cnt], " %d",
93812- atomic_read(&rcu_torture_wcount[i]));
93813+ atomic_read_unchecked(&rcu_torture_wcount[i]));
93814 }
93815 cnt += sprintf(&page[cnt], "\n");
93816 if (cur_ops->stats)
93817@@ -1084,7 +1084,7 @@ rcu_torture_cleanup(void)
93818
93819 if (cur_ops->cleanup)
93820 cur_ops->cleanup();
93821- if (atomic_read(&n_rcu_torture_error))
93822+ if (atomic_read_unchecked(&n_rcu_torture_error))
93823 rcu_torture_print_module_parms("End of test: FAILURE");
93824 else
93825 rcu_torture_print_module_parms("End of test: SUCCESS");
93826@@ -1138,13 +1138,13 @@ rcu_torture_init(void)
93827
93828 rcu_torture_current = NULL;
93829 rcu_torture_current_version = 0;
93830- atomic_set(&n_rcu_torture_alloc, 0);
93831- atomic_set(&n_rcu_torture_alloc_fail, 0);
93832- atomic_set(&n_rcu_torture_free, 0);
93833- atomic_set(&n_rcu_torture_mberror, 0);
93834- atomic_set(&n_rcu_torture_error, 0);
93835+ atomic_set_unchecked(&n_rcu_torture_alloc, 0);
93836+ atomic_set_unchecked(&n_rcu_torture_alloc_fail, 0);
93837+ atomic_set_unchecked(&n_rcu_torture_free, 0);
93838+ atomic_set_unchecked(&n_rcu_torture_mberror, 0);
93839+ atomic_set_unchecked(&n_rcu_torture_error, 0);
93840 for (i = 0; i < RCU_TORTURE_PIPE_LEN + 1; i++)
93841- atomic_set(&rcu_torture_wcount[i], 0);
93842+ atomic_set_unchecked(&rcu_torture_wcount[i], 0);
93843 for_each_possible_cpu(cpu) {
93844 for (i = 0; i < RCU_TORTURE_PIPE_LEN + 1; i++) {
93845 per_cpu(rcu_torture_count, cpu)[i] = 0;
93846diff --git a/kernel/rcutree.c b/kernel/rcutree.c
93847index 683c4f3..97f54c6 100644
93848--- a/kernel/rcutree.c
93849+++ b/kernel/rcutree.c
93850@@ -1303,7 +1303,7 @@ __rcu_process_callbacks(struct rcu_state *rsp, struct rcu_data *rdp)
93851 /*
93852 * Do softirq processing for the current CPU.
93853 */
93854-static void rcu_process_callbacks(struct softirq_action *unused)
93855+static void rcu_process_callbacks(void)
93856 {
93857 /*
93858 * Memory references from any prior RCU read-side critical sections
93859diff --git a/kernel/rcutree_plugin.h b/kernel/rcutree_plugin.h
93860index c03edf7..ac1b341 100644
93861--- a/kernel/rcutree_plugin.h
93862+++ b/kernel/rcutree_plugin.h
93863@@ -145,7 +145,7 @@ static void rcu_preempt_note_context_switch(int cpu)
93864 */
93865 void __rcu_read_lock(void)
93866 {
93867- ACCESS_ONCE(current->rcu_read_lock_nesting)++;
93868+ ACCESS_ONCE_RW(current->rcu_read_lock_nesting)++;
93869 barrier(); /* needed if we ever invoke rcu_read_lock in rcutree.c */
93870 }
93871 EXPORT_SYMBOL_GPL(__rcu_read_lock);
93872@@ -251,7 +251,7 @@ void __rcu_read_unlock(void)
93873 struct task_struct *t = current;
93874
93875 barrier(); /* needed if we ever invoke rcu_read_unlock in rcutree.c */
93876- if (--ACCESS_ONCE(t->rcu_read_lock_nesting) == 0 &&
93877+ if (--ACCESS_ONCE_RW(t->rcu_read_lock_nesting) == 0 &&
93878 unlikely(ACCESS_ONCE(t->rcu_read_unlock_special)))
93879 rcu_read_unlock_special(t);
93880 }
93881diff --git a/kernel/relay.c b/kernel/relay.c
93882index bf343f5..908e9ee 100644
93883--- a/kernel/relay.c
93884+++ b/kernel/relay.c
93885@@ -1228,7 +1228,7 @@ static int subbuf_splice_actor(struct file *in,
93886 unsigned int flags,
93887 int *nonpad_ret)
93888 {
93889- unsigned int pidx, poff, total_len, subbuf_pages, nr_pages, ret;
93890+ unsigned int pidx, poff, total_len, subbuf_pages, nr_pages;
93891 struct rchan_buf *rbuf = in->private_data;
93892 unsigned int subbuf_size = rbuf->chan->subbuf_size;
93893 uint64_t pos = (uint64_t) *ppos;
93894@@ -1247,6 +1247,9 @@ static int subbuf_splice_actor(struct file *in,
93895 .ops = &relay_pipe_buf_ops,
93896 .spd_release = relay_page_release,
93897 };
93898+ ssize_t ret;
93899+
93900+ pax_track_stack();
93901
93902 if (rbuf->subbufs_produced == rbuf->subbufs_consumed)
93903 return 0;
93904diff --git a/kernel/resource.c b/kernel/resource.c
93905index fb11a58..4e61ae1 100644
93906--- a/kernel/resource.c
93907+++ b/kernel/resource.c
93908@@ -132,8 +132,18 @@ static const struct file_operations proc_iomem_operations = {
93909
93910 static int __init ioresources_init(void)
93911 {
93912+#ifdef CONFIG_GRKERNSEC_PROC_ADD
93913+#ifdef CONFIG_GRKERNSEC_PROC_USER
93914+ proc_create("ioports", S_IRUSR, NULL, &proc_ioports_operations);
93915+ proc_create("iomem", S_IRUSR, NULL, &proc_iomem_operations);
93916+#elif defined(CONFIG_GRKERNSEC_PROC_USERGROUP)
93917+ proc_create("ioports", S_IRUSR | S_IRGRP, NULL, &proc_ioports_operations);
93918+ proc_create("iomem", S_IRUSR | S_IRGRP, NULL, &proc_iomem_operations);
93919+#endif
93920+#else
93921 proc_create("ioports", 0, NULL, &proc_ioports_operations);
93922 proc_create("iomem", 0, NULL, &proc_iomem_operations);
93923+#endif
93924 return 0;
93925 }
93926 __initcall(ioresources_init);
93927diff --git a/kernel/rtmutex-tester.c b/kernel/rtmutex-tester.c
93928index a56f629..1fc4989 100644
93929--- a/kernel/rtmutex-tester.c
93930+++ b/kernel/rtmutex-tester.c
93931@@ -21,7 +21,7 @@
93932 #define MAX_RT_TEST_MUTEXES 8
93933
93934 static spinlock_t rttest_lock;
93935-static atomic_t rttest_event;
93936+static atomic_unchecked_t rttest_event;
93937
93938 struct test_thread_data {
93939 int opcode;
93940@@ -64,7 +64,7 @@ static int handle_op(struct test_thread_data *td, int lockwakeup)
93941
93942 case RTTEST_LOCKCONT:
93943 td->mutexes[td->opdata] = 1;
93944- td->event = atomic_add_return(1, &rttest_event);
93945+ td->event = atomic_add_return_unchecked(1, &rttest_event);
93946 return 0;
93947
93948 case RTTEST_RESET:
93949@@ -82,7 +82,7 @@ static int handle_op(struct test_thread_data *td, int lockwakeup)
93950 return 0;
93951
93952 case RTTEST_RESETEVENT:
93953- atomic_set(&rttest_event, 0);
93954+ atomic_set_unchecked(&rttest_event, 0);
93955 return 0;
93956
93957 default:
93958@@ -99,9 +99,9 @@ static int handle_op(struct test_thread_data *td, int lockwakeup)
93959 return ret;
93960
93961 td->mutexes[id] = 1;
93962- td->event = atomic_add_return(1, &rttest_event);
93963+ td->event = atomic_add_return_unchecked(1, &rttest_event);
93964 rt_mutex_lock(&mutexes[id]);
93965- td->event = atomic_add_return(1, &rttest_event);
93966+ td->event = atomic_add_return_unchecked(1, &rttest_event);
93967 td->mutexes[id] = 4;
93968 return 0;
93969
93970@@ -112,9 +112,9 @@ static int handle_op(struct test_thread_data *td, int lockwakeup)
93971 return ret;
93972
93973 td->mutexes[id] = 1;
93974- td->event = atomic_add_return(1, &rttest_event);
93975+ td->event = atomic_add_return_unchecked(1, &rttest_event);
93976 ret = rt_mutex_lock_interruptible(&mutexes[id], 0);
93977- td->event = atomic_add_return(1, &rttest_event);
93978+ td->event = atomic_add_return_unchecked(1, &rttest_event);
93979 td->mutexes[id] = ret ? 0 : 4;
93980 return ret ? -EINTR : 0;
93981
93982@@ -123,9 +123,9 @@ static int handle_op(struct test_thread_data *td, int lockwakeup)
93983 if (id < 0 || id >= MAX_RT_TEST_MUTEXES || td->mutexes[id] != 4)
93984 return ret;
93985
93986- td->event = atomic_add_return(1, &rttest_event);
93987+ td->event = atomic_add_return_unchecked(1, &rttest_event);
93988 rt_mutex_unlock(&mutexes[id]);
93989- td->event = atomic_add_return(1, &rttest_event);
93990+ td->event = atomic_add_return_unchecked(1, &rttest_event);
93991 td->mutexes[id] = 0;
93992 return 0;
93993
93994@@ -187,7 +187,7 @@ void schedule_rt_mutex_test(struct rt_mutex *mutex)
93995 break;
93996
93997 td->mutexes[dat] = 2;
93998- td->event = atomic_add_return(1, &rttest_event);
93999+ td->event = atomic_add_return_unchecked(1, &rttest_event);
94000 break;
94001
94002 case RTTEST_LOCKBKL:
94003@@ -208,7 +208,7 @@ void schedule_rt_mutex_test(struct rt_mutex *mutex)
94004 return;
94005
94006 td->mutexes[dat] = 3;
94007- td->event = atomic_add_return(1, &rttest_event);
94008+ td->event = atomic_add_return_unchecked(1, &rttest_event);
94009 break;
94010
94011 case RTTEST_LOCKNOWAIT:
94012@@ -220,7 +220,7 @@ void schedule_rt_mutex_test(struct rt_mutex *mutex)
94013 return;
94014
94015 td->mutexes[dat] = 1;
94016- td->event = atomic_add_return(1, &rttest_event);
94017+ td->event = atomic_add_return_unchecked(1, &rttest_event);
94018 return;
94019
94020 case RTTEST_LOCKBKL:
94021diff --git a/kernel/rtmutex.c b/kernel/rtmutex.c
94022index 29bd4ba..8c5de90 100644
94023--- a/kernel/rtmutex.c
94024+++ b/kernel/rtmutex.c
94025@@ -511,7 +511,7 @@ static void wakeup_next_waiter(struct rt_mutex *lock)
94026 */
94027 spin_lock_irqsave(&pendowner->pi_lock, flags);
94028
94029- WARN_ON(!pendowner->pi_blocked_on);
94030+ BUG_ON(!pendowner->pi_blocked_on);
94031 WARN_ON(pendowner->pi_blocked_on != waiter);
94032 WARN_ON(pendowner->pi_blocked_on->lock != lock);
94033
94034diff --git a/kernel/sched.c b/kernel/sched.c
94035index 0591df8..e3af3a4 100644
94036--- a/kernel/sched.c
94037+++ b/kernel/sched.c
94038@@ -5043,7 +5043,7 @@ out:
94039 * In CONFIG_NO_HZ case, the idle load balance owner will do the
94040 * rebalancing for all the cpus for whom scheduler ticks are stopped.
94041 */
94042-static void run_rebalance_domains(struct softirq_action *h)
94043+static void run_rebalance_domains(void)
94044 {
94045 int this_cpu = smp_processor_id();
94046 struct rq *this_rq = cpu_rq(this_cpu);
94047@@ -5690,6 +5690,19 @@ pick_next_task(struct rq *rq)
94048 }
94049 }
94050
94051+#ifdef CONFIG_GRKERNSEC_SETXID
94052+extern void gr_delayed_cred_worker(void);
94053+static inline void gr_cred_schedule(void)
94054+{
94055+ if (unlikely(current->delayed_cred))
94056+ gr_delayed_cred_worker();
94057+}
94058+#else
94059+static inline void gr_cred_schedule(void)
94060+{
94061+}
94062+#endif
94063+
94064 /*
94065 * schedule() is the main scheduler function.
94066 */
94067@@ -5700,6 +5713,8 @@ asmlinkage void __sched schedule(void)
94068 struct rq *rq;
94069 int cpu;
94070
94071+ pax_track_stack();
94072+
94073 need_resched:
94074 preempt_disable();
94075 cpu = smp_processor_id();
94076@@ -5713,6 +5728,8 @@ need_resched_nonpreemptible:
94077
94078 schedule_debug(prev);
94079
94080+ gr_cred_schedule();
94081+
94082 if (sched_feat(HRTICK))
94083 hrtick_clear(rq);
94084
94085@@ -5770,7 +5787,7 @@ EXPORT_SYMBOL(schedule);
94086 * Look out! "owner" is an entirely speculative pointer
94087 * access and not reliable.
94088 */
94089-int mutex_spin_on_owner(struct mutex *lock, struct thread_info *owner)
94090+int mutex_spin_on_owner(struct mutex *lock, struct task_struct *owner)
94091 {
94092 unsigned int cpu;
94093 struct rq *rq;
94094@@ -5784,10 +5801,10 @@ int mutex_spin_on_owner(struct mutex *lock, struct thread_info *owner)
94095 * DEBUG_PAGEALLOC could have unmapped it if
94096 * the mutex owner just released it and exited.
94097 */
94098- if (probe_kernel_address(&owner->cpu, cpu))
94099+ if (probe_kernel_address(&task_thread_info(owner)->cpu, cpu))
94100 return 0;
94101 #else
94102- cpu = owner->cpu;
94103+ cpu = task_thread_info(owner)->cpu;
94104 #endif
94105
94106 /*
94107@@ -5816,7 +5833,7 @@ int mutex_spin_on_owner(struct mutex *lock, struct thread_info *owner)
94108 /*
94109 * Is that owner really running on that cpu?
94110 */
94111- if (task_thread_info(rq->curr) != owner || need_resched())
94112+ if (rq->curr != owner || need_resched())
94113 return 0;
94114
94115 cpu_relax();
94116@@ -6359,6 +6376,8 @@ int can_nice(const struct task_struct *p, const int nice)
94117 /* convert nice value [19,-20] to rlimit style value [1,40] */
94118 int nice_rlim = 20 - nice;
94119
94120+ gr_learn_resource(p, RLIMIT_NICE, nice_rlim, 1);
94121+
94122 return (nice_rlim <= p->signal->rlim[RLIMIT_NICE].rlim_cur ||
94123 capable(CAP_SYS_NICE));
94124 }
94125@@ -6392,7 +6411,8 @@ SYSCALL_DEFINE1(nice, int, increment)
94126 if (nice > 19)
94127 nice = 19;
94128
94129- if (increment < 0 && !can_nice(current, nice))
94130+ if (increment < 0 && (!can_nice(current, nice) ||
94131+ gr_handle_chroot_nice()))
94132 return -EPERM;
94133
94134 retval = security_task_setnice(current, nice);
94135@@ -8774,7 +8794,7 @@ static void init_sched_groups_power(int cpu, struct sched_domain *sd)
94136 long power;
94137 int weight;
94138
94139- WARN_ON(!sd || !sd->groups);
94140+ BUG_ON(!sd || !sd->groups);
94141
94142 if (cpu != group_first_cpu(sd->groups))
94143 return;
94144diff --git a/kernel/signal.c b/kernel/signal.c
94145index 2494827..cda80a0 100644
94146--- a/kernel/signal.c
94147+++ b/kernel/signal.c
94148@@ -41,12 +41,12 @@
94149
94150 static struct kmem_cache *sigqueue_cachep;
94151
94152-static void __user *sig_handler(struct task_struct *t, int sig)
94153+static __sighandler_t sig_handler(struct task_struct *t, int sig)
94154 {
94155 return t->sighand->action[sig - 1].sa.sa_handler;
94156 }
94157
94158-static int sig_handler_ignored(void __user *handler, int sig)
94159+static int sig_handler_ignored(__sighandler_t handler, int sig)
94160 {
94161 /* Is it explicitly or implicitly ignored? */
94162 return handler == SIG_IGN ||
94163@@ -56,7 +56,7 @@ static int sig_handler_ignored(void __user *handler, int sig)
94164 static int sig_task_ignored(struct task_struct *t, int sig,
94165 int from_ancestor_ns)
94166 {
94167- void __user *handler;
94168+ __sighandler_t handler;
94169
94170 handler = sig_handler(t, sig);
94171
94172@@ -207,6 +207,9 @@ static struct sigqueue *__sigqueue_alloc(struct task_struct *t, gfp_t flags,
94173 */
94174 user = get_uid(__task_cred(t)->user);
94175 atomic_inc(&user->sigpending);
94176+
94177+ if (!override_rlimit)
94178+ gr_learn_resource(t, RLIMIT_SIGPENDING, atomic_read(&user->sigpending), 1);
94179 if (override_rlimit ||
94180 atomic_read(&user->sigpending) <=
94181 t->signal->rlim[RLIMIT_SIGPENDING].rlim_cur)
94182@@ -327,7 +330,7 @@ flush_signal_handlers(struct task_struct *t, int force_default)
94183
94184 int unhandled_signal(struct task_struct *tsk, int sig)
94185 {
94186- void __user *handler = tsk->sighand->action[sig-1].sa.sa_handler;
94187+ __sighandler_t handler = tsk->sighand->action[sig-1].sa.sa_handler;
94188 if (is_global_init(tsk))
94189 return 1;
94190 if (handler != SIG_IGN && handler != SIG_DFL)
94191@@ -627,6 +630,13 @@ static int check_kill_permission(int sig, struct siginfo *info,
94192 }
94193 }
94194
94195+ /* allow glibc communication via tgkill to other threads in our
94196+ thread group */
94197+ if ((info == SEND_SIG_NOINFO || info->si_code != SI_TKILL ||
94198+ sig != (SIGRTMIN+1) || task_tgid_vnr(t) != info->si_pid)
94199+ && gr_handle_signal(t, sig))
94200+ return -EPERM;
94201+
94202 return security_task_kill(t, info, sig, 0);
94203 }
94204
94205@@ -968,7 +978,7 @@ __group_send_sig_info(int sig, struct siginfo *info, struct task_struct *p)
94206 return send_signal(sig, info, p, 1);
94207 }
94208
94209-static int
94210+int
94211 specific_send_sig_info(int sig, struct siginfo *info, struct task_struct *t)
94212 {
94213 return send_signal(sig, info, t, 0);
94214@@ -1005,6 +1015,7 @@ force_sig_info(int sig, struct siginfo *info, struct task_struct *t)
94215 unsigned long int flags;
94216 int ret, blocked, ignored;
94217 struct k_sigaction *action;
94218+ int is_unhandled = 0;
94219
94220 spin_lock_irqsave(&t->sighand->siglock, flags);
94221 action = &t->sighand->action[sig-1];
94222@@ -1019,9 +1030,18 @@ force_sig_info(int sig, struct siginfo *info, struct task_struct *t)
94223 }
94224 if (action->sa.sa_handler == SIG_DFL)
94225 t->signal->flags &= ~SIGNAL_UNKILLABLE;
94226+ if (action->sa.sa_handler == SIG_IGN || action->sa.sa_handler == SIG_DFL)
94227+ is_unhandled = 1;
94228 ret = specific_send_sig_info(sig, info, t);
94229 spin_unlock_irqrestore(&t->sighand->siglock, flags);
94230
94231+ /* only deal with unhandled signals, java etc trigger SIGSEGV during
94232+ normal operation */
94233+ if (is_unhandled) {
94234+ gr_log_signal(sig, !is_si_special(info) ? info->si_addr : NULL, t);
94235+ gr_handle_crash(t, sig);
94236+ }
94237+
94238 return ret;
94239 }
94240
94241@@ -1081,8 +1101,11 @@ int group_send_sig_info(int sig, struct siginfo *info, struct task_struct *p)
94242 {
94243 int ret = check_kill_permission(sig, info, p);
94244
94245- if (!ret && sig)
94246+ if (!ret && sig) {
94247 ret = do_send_sig_info(sig, info, p, true);
94248+ if (!ret)
94249+ gr_log_signal(sig, !is_si_special(info) ? info->si_addr : NULL, p);
94250+ }
94251
94252 return ret;
94253 }
94254@@ -1644,6 +1667,8 @@ void ptrace_notify(int exit_code)
94255 {
94256 siginfo_t info;
94257
94258+ pax_track_stack();
94259+
94260 BUG_ON((exit_code & (0x7f | ~0xffff)) != SIGTRAP);
94261
94262 memset(&info, 0, sizeof info);
94263@@ -2275,7 +2300,15 @@ do_send_specific(pid_t tgid, pid_t pid, int sig, struct siginfo *info)
94264 int error = -ESRCH;
94265
94266 rcu_read_lock();
94267- p = find_task_by_vpid(pid);
94268+#ifdef CONFIG_GRKERNSEC_CHROOT_FINDTASK
94269+ /* allow glibc communication via tgkill to other threads in our
94270+ thread group */
94271+ if (grsec_enable_chroot_findtask && info->si_code == SI_TKILL &&
94272+ sig == (SIGRTMIN+1) && tgid == info->si_pid)
94273+ p = find_task_by_vpid_unrestricted(pid);
94274+ else
94275+#endif
94276+ p = find_task_by_vpid(pid);
94277 if (p && (tgid <= 0 || task_tgid_vnr(p) == tgid)) {
94278 error = check_kill_permission(sig, info, p);
94279 /*
94280diff --git a/kernel/smp.c b/kernel/smp.c
94281index aa9cff3..631a0de 100644
94282--- a/kernel/smp.c
94283+++ b/kernel/smp.c
94284@@ -522,22 +522,22 @@ int smp_call_function(void (*func)(void *), void *info, int wait)
94285 }
94286 EXPORT_SYMBOL(smp_call_function);
94287
94288-void ipi_call_lock(void)
94289+void ipi_call_lock(void) __acquires(call_function.lock)
94290 {
94291 spin_lock(&call_function.lock);
94292 }
94293
94294-void ipi_call_unlock(void)
94295+void ipi_call_unlock(void) __releases(call_function.lock)
94296 {
94297 spin_unlock(&call_function.lock);
94298 }
94299
94300-void ipi_call_lock_irq(void)
94301+void ipi_call_lock_irq(void) __acquires(call_function.lock)
94302 {
94303 spin_lock_irq(&call_function.lock);
94304 }
94305
94306-void ipi_call_unlock_irq(void)
94307+void ipi_call_unlock_irq(void) __releases(call_function.lock)
94308 {
94309 spin_unlock_irq(&call_function.lock);
94310 }
94311diff --git a/kernel/softirq.c b/kernel/softirq.c
94312index 04a0252..580c512 100644
94313--- a/kernel/softirq.c
94314+++ b/kernel/softirq.c
94315@@ -56,7 +56,7 @@ static struct softirq_action softirq_vec[NR_SOFTIRQS] __cacheline_aligned_in_smp
94316
94317 static DEFINE_PER_CPU(struct task_struct *, ksoftirqd);
94318
94319-char *softirq_to_name[NR_SOFTIRQS] = {
94320+const char * const softirq_to_name[NR_SOFTIRQS] = {
94321 "HI", "TIMER", "NET_TX", "NET_RX", "BLOCK", "BLOCK_IOPOLL",
94322 "TASKLET", "SCHED", "HRTIMER", "RCU"
94323 };
94324@@ -206,7 +206,7 @@ EXPORT_SYMBOL(local_bh_enable_ip);
94325
94326 asmlinkage void __do_softirq(void)
94327 {
94328- struct softirq_action *h;
94329+ const struct softirq_action *h;
94330 __u32 pending;
94331 int max_restart = MAX_SOFTIRQ_RESTART;
94332 int cpu;
94333@@ -233,7 +233,7 @@ restart:
94334 kstat_incr_softirqs_this_cpu(h - softirq_vec);
94335
94336 trace_softirq_entry(h, softirq_vec);
94337- h->action(h);
94338+ h->action();
94339 trace_softirq_exit(h, softirq_vec);
94340 if (unlikely(prev_count != preempt_count())) {
94341 printk(KERN_ERR "huh, entered softirq %td %s %p"
94342@@ -363,9 +363,11 @@ void raise_softirq(unsigned int nr)
94343 local_irq_restore(flags);
94344 }
94345
94346-void open_softirq(int nr, void (*action)(struct softirq_action *))
94347+void open_softirq(int nr, void (*action)(void))
94348 {
94349- softirq_vec[nr].action = action;
94350+ pax_open_kernel();
94351+ *(void **)&softirq_vec[nr].action = action;
94352+ pax_close_kernel();
94353 }
94354
94355 /*
94356@@ -419,7 +421,7 @@ void __tasklet_hi_schedule_first(struct tasklet_struct *t)
94357
94358 EXPORT_SYMBOL(__tasklet_hi_schedule_first);
94359
94360-static void tasklet_action(struct softirq_action *a)
94361+static void tasklet_action(void)
94362 {
94363 struct tasklet_struct *list;
94364
94365@@ -454,7 +456,7 @@ static void tasklet_action(struct softirq_action *a)
94366 }
94367 }
94368
94369-static void tasklet_hi_action(struct softirq_action *a)
94370+static void tasklet_hi_action(void)
94371 {
94372 struct tasklet_struct *list;
94373
94374diff --git a/kernel/sys.c b/kernel/sys.c
94375index e9512b1..f07185f 100644
94376--- a/kernel/sys.c
94377+++ b/kernel/sys.c
94378@@ -133,6 +133,12 @@ static int set_one_prio(struct task_struct *p, int niceval, int error)
94379 error = -EACCES;
94380 goto out;
94381 }
94382+
94383+ if (gr_handle_chroot_setpriority(p, niceval)) {
94384+ error = -EACCES;
94385+ goto out;
94386+ }
94387+
94388 no_nice = security_task_setnice(p, niceval);
94389 if (no_nice) {
94390 error = no_nice;
94391@@ -190,10 +196,10 @@ SYSCALL_DEFINE3(setpriority, int, which, int, who, int, niceval)
94392 !(user = find_user(who)))
94393 goto out_unlock; /* No processes for this user */
94394
94395- do_each_thread(g, p)
94396+ do_each_thread(g, p) {
94397 if (__task_cred(p)->uid == who)
94398 error = set_one_prio(p, niceval, error);
94399- while_each_thread(g, p);
94400+ } while_each_thread(g, p);
94401 if (who != cred->uid)
94402 free_uid(user); /* For find_user() */
94403 break;
94404@@ -253,13 +259,13 @@ SYSCALL_DEFINE2(getpriority, int, which, int, who)
94405 !(user = find_user(who)))
94406 goto out_unlock; /* No processes for this user */
94407
94408- do_each_thread(g, p)
94409+ do_each_thread(g, p) {
94410 if (__task_cred(p)->uid == who) {
94411 niceval = 20 - task_nice(p);
94412 if (niceval > retval)
94413 retval = niceval;
94414 }
94415- while_each_thread(g, p);
94416+ } while_each_thread(g, p);
94417 if (who != cred->uid)
94418 free_uid(user); /* for find_user() */
94419 break;
94420@@ -509,6 +515,9 @@ SYSCALL_DEFINE2(setregid, gid_t, rgid, gid_t, egid)
94421 goto error;
94422 }
94423
94424+ if (gr_check_group_change(new->gid, new->egid, -1))
94425+ goto error;
94426+
94427 if (rgid != (gid_t) -1 ||
94428 (egid != (gid_t) -1 && egid != old->gid))
94429 new->sgid = new->egid;
94430@@ -542,6 +551,10 @@ SYSCALL_DEFINE1(setgid, gid_t, gid)
94431 goto error;
94432
94433 retval = -EPERM;
94434+
94435+ if (gr_check_group_change(gid, gid, gid))
94436+ goto error;
94437+
94438 if (capable(CAP_SETGID))
94439 new->gid = new->egid = new->sgid = new->fsgid = gid;
94440 else if (gid == old->gid || gid == old->sgid)
94441@@ -559,7 +572,7 @@ error:
94442 /*
94443 * change the user struct in a credentials set to match the new UID
94444 */
94445-static int set_user(struct cred *new)
94446+int set_user(struct cred *new)
94447 {
94448 struct user_struct *new_user;
94449
94450@@ -567,12 +580,19 @@ static int set_user(struct cred *new)
94451 if (!new_user)
94452 return -EAGAIN;
94453
94454+ /*
94455+ * We don't fail in case of NPROC limit excess here because too many
94456+ * poorly written programs don't check set*uid() return code, assuming
94457+ * it never fails if called by root. We may still enforce NPROC limit
94458+ * for programs doing set*uid()+execve() by harmlessly deferring the
94459+ * failure to the execve() stage.
94460+ */
94461 if (atomic_read(&new_user->processes) >=
94462 current->signal->rlim[RLIMIT_NPROC].rlim_cur &&
94463- new_user != INIT_USER) {
94464- free_uid(new_user);
94465- return -EAGAIN;
94466- }
94467+ new_user != INIT_USER)
94468+ current->flags |= PF_NPROC_EXCEEDED;
94469+ else
94470+ current->flags &= ~PF_NPROC_EXCEEDED;
94471
94472 free_uid(new->user);
94473 new->user = new_user;
94474@@ -627,6 +647,9 @@ SYSCALL_DEFINE2(setreuid, uid_t, ruid, uid_t, euid)
94475 goto error;
94476 }
94477
94478+ if (gr_check_user_change(new->uid, new->euid, -1))
94479+ goto error;
94480+
94481 if (new->uid != old->uid) {
94482 retval = set_user(new);
94483 if (retval < 0)
94484@@ -675,6 +698,12 @@ SYSCALL_DEFINE1(setuid, uid_t, uid)
94485 goto error;
94486
94487 retval = -EPERM;
94488+
94489+ if (gr_check_crash_uid(uid))
94490+ goto error;
94491+ if (gr_check_user_change(uid, uid, uid))
94492+ goto error;
94493+
94494 if (capable(CAP_SETUID)) {
94495 new->suid = new->uid = uid;
94496 if (uid != old->uid) {
94497@@ -732,6 +761,9 @@ SYSCALL_DEFINE3(setresuid, uid_t, ruid, uid_t, euid, uid_t, suid)
94498 goto error;
94499 }
94500
94501+ if (gr_check_user_change(ruid, euid, -1))
94502+ goto error;
94503+
94504 if (ruid != (uid_t) -1) {
94505 new->uid = ruid;
94506 if (ruid != old->uid) {
94507@@ -800,6 +832,9 @@ SYSCALL_DEFINE3(setresgid, gid_t, rgid, gid_t, egid, gid_t, sgid)
94508 goto error;
94509 }
94510
94511+ if (gr_check_group_change(rgid, egid, -1))
94512+ goto error;
94513+
94514 if (rgid != (gid_t) -1)
94515 new->gid = rgid;
94516 if (egid != (gid_t) -1)
94517@@ -849,6 +884,9 @@ SYSCALL_DEFINE1(setfsuid, uid_t, uid)
94518 if (security_task_setuid(uid, (uid_t)-1, (uid_t)-1, LSM_SETID_FS) < 0)
94519 goto error;
94520
94521+ if (gr_check_user_change(-1, -1, uid))
94522+ goto error;
94523+
94524 if (uid == old->uid || uid == old->euid ||
94525 uid == old->suid || uid == old->fsuid ||
94526 capable(CAP_SETUID)) {
94527@@ -889,6 +927,9 @@ SYSCALL_DEFINE1(setfsgid, gid_t, gid)
94528 if (gid == old->gid || gid == old->egid ||
94529 gid == old->sgid || gid == old->fsgid ||
94530 capable(CAP_SETGID)) {
94531+ if (gr_check_group_change(-1, -1, gid))
94532+ goto error;
94533+
94534 if (gid != old_fsgid) {
94535 new->fsgid = gid;
94536 goto change_okay;
94537@@ -1454,7 +1495,7 @@ SYSCALL_DEFINE5(prctl, int, option, unsigned long, arg2, unsigned long, arg3,
94538 error = get_dumpable(me->mm);
94539 break;
94540 case PR_SET_DUMPABLE:
94541- if (arg2 < 0 || arg2 > 1) {
94542+ if (arg2 > 1) {
94543 error = -EINVAL;
94544 break;
94545 }
94546diff --git a/kernel/sysctl.c b/kernel/sysctl.c
94547index b8bd058..ab6a76be 100644
94548--- a/kernel/sysctl.c
94549+++ b/kernel/sysctl.c
94550@@ -63,6 +63,13 @@
94551 static int deprecated_sysctl_warning(struct __sysctl_args *args);
94552
94553 #if defined(CONFIG_SYSCTL)
94554+#include <linux/grsecurity.h>
94555+#include <linux/grinternal.h>
94556+
94557+extern __u32 gr_handle_sysctl(const ctl_table *table, const int op);
94558+extern int gr_handle_sysctl_mod(const char *dirname, const char *name,
94559+ const int op);
94560+extern int gr_handle_chroot_sysctl(const int op);
94561
94562 /* External variables not in a header file. */
94563 extern int C_A_D;
94564@@ -168,6 +175,7 @@ static int proc_do_cad_pid(struct ctl_table *table, int write,
94565 static int proc_taint(struct ctl_table *table, int write,
94566 void __user *buffer, size_t *lenp, loff_t *ppos);
94567 #endif
94568+extern ctl_table grsecurity_table[];
94569
94570 static struct ctl_table root_table[];
94571 static struct ctl_table_root sysctl_table_root;
94572@@ -200,6 +208,21 @@ extern struct ctl_table epoll_table[];
94573 int sysctl_legacy_va_layout;
94574 #endif
94575
94576+#ifdef CONFIG_PAX_SOFTMODE
94577+static ctl_table pax_table[] = {
94578+ {
94579+ .ctl_name = CTL_UNNUMBERED,
94580+ .procname = "softmode",
94581+ .data = &pax_softmode,
94582+ .maxlen = sizeof(unsigned int),
94583+ .mode = 0600,
94584+ .proc_handler = &proc_dointvec,
94585+ },
94586+
94587+ { .ctl_name = 0 }
94588+};
94589+#endif
94590+
94591 extern int prove_locking;
94592 extern int lock_stat;
94593
94594@@ -251,6 +274,24 @@ static int max_wakeup_granularity_ns = NSEC_PER_SEC; /* 1 second */
94595 #endif
94596
94597 static struct ctl_table kern_table[] = {
94598+#if defined(CONFIG_GRKERNSEC_SYSCTL) || defined(CONFIG_GRKERNSEC_ROFS)
94599+ {
94600+ .ctl_name = CTL_UNNUMBERED,
94601+ .procname = "grsecurity",
94602+ .mode = 0500,
94603+ .child = grsecurity_table,
94604+ },
94605+#endif
94606+
94607+#ifdef CONFIG_PAX_SOFTMODE
94608+ {
94609+ .ctl_name = CTL_UNNUMBERED,
94610+ .procname = "pax",
94611+ .mode = 0500,
94612+ .child = pax_table,
94613+ },
94614+#endif
94615+
94616 {
94617 .ctl_name = CTL_UNNUMBERED,
94618 .procname = "sched_child_runs_first",
94619@@ -567,8 +608,8 @@ static struct ctl_table kern_table[] = {
94620 .data = &modprobe_path,
94621 .maxlen = KMOD_PATH_LEN,
94622 .mode = 0644,
94623- .proc_handler = &proc_dostring,
94624- .strategy = &sysctl_string,
94625+ .proc_handler = &proc_dostring_modpriv,
94626+ .strategy = &sysctl_string_modpriv,
94627 },
94628 {
94629 .ctl_name = CTL_UNNUMBERED,
94630@@ -1247,6 +1288,13 @@ static struct ctl_table vm_table[] = {
94631 .mode = 0644,
94632 .proc_handler = &proc_dointvec
94633 },
94634+ {
94635+ .procname = "heap_stack_gap",
94636+ .data = &sysctl_heap_stack_gap,
94637+ .maxlen = sizeof(sysctl_heap_stack_gap),
94638+ .mode = 0644,
94639+ .proc_handler = proc_doulongvec_minmax,
94640+ },
94641 #else
94642 {
94643 .ctl_name = CTL_UNNUMBERED,
94644@@ -1803,6 +1851,8 @@ static int do_sysctl_strategy(struct ctl_table_root *root,
94645 return 0;
94646 }
94647
94648+static int sysctl_perm_nochk(struct ctl_table_root *root, struct ctl_table *table, int op);
94649+
94650 static int parse_table(int __user *name, int nlen,
94651 void __user *oldval, size_t __user *oldlenp,
94652 void __user *newval, size_t newlen,
94653@@ -1821,7 +1871,7 @@ repeat:
94654 if (n == table->ctl_name) {
94655 int error;
94656 if (table->child) {
94657- if (sysctl_perm(root, table, MAY_EXEC))
94658+ if (sysctl_perm_nochk(root, table, MAY_EXEC))
94659 return -EPERM;
94660 name++;
94661 nlen--;
94662@@ -1906,6 +1956,33 @@ int sysctl_perm(struct ctl_table_root *root, struct ctl_table *table, int op)
94663 int error;
94664 int mode;
94665
94666+ if (table->parent != NULL && table->parent->procname != NULL &&
94667+ table->procname != NULL &&
94668+ gr_handle_sysctl_mod(table->parent->procname, table->procname, op))
94669+ return -EACCES;
94670+ if (gr_handle_chroot_sysctl(op))
94671+ return -EACCES;
94672+ error = gr_handle_sysctl(table, op);
94673+ if (error)
94674+ return error;
94675+
94676+ error = security_sysctl(table, op & (MAY_READ | MAY_WRITE | MAY_EXEC));
94677+ if (error)
94678+ return error;
94679+
94680+ if (root->permissions)
94681+ mode = root->permissions(root, current->nsproxy, table);
94682+ else
94683+ mode = table->mode;
94684+
94685+ return test_perm(mode, op);
94686+}
94687+
94688+int sysctl_perm_nochk(struct ctl_table_root *root, struct ctl_table *table, int op)
94689+{
94690+ int error;
94691+ int mode;
94692+
94693 error = security_sysctl(table, op & (MAY_READ | MAY_WRITE | MAY_EXEC));
94694 if (error)
94695 return error;
94696@@ -2335,6 +2412,16 @@ int proc_dostring(struct ctl_table *table, int write,
94697 buffer, lenp, ppos);
94698 }
94699
94700+int proc_dostring_modpriv(struct ctl_table *table, int write,
94701+ void __user *buffer, size_t *lenp, loff_t *ppos)
94702+{
94703+ if (write && !capable(CAP_SYS_MODULE))
94704+ return -EPERM;
94705+
94706+ return _proc_do_string(table->data, table->maxlen, write,
94707+ buffer, lenp, ppos);
94708+}
94709+
94710
94711 static int do_proc_dointvec_conv(int *negp, unsigned long *lvalp,
94712 int *valp,
94713@@ -2609,7 +2696,7 @@ static int __do_proc_doulongvec_minmax(void *data, struct ctl_table *table, int
94714 vleft = table->maxlen / sizeof(unsigned long);
94715 left = *lenp;
94716
94717- for (; left && vleft--; i++, min++, max++, first=0) {
94718+ for (; left && vleft--; i++, first=0) {
94719 if (write) {
94720 while (left) {
94721 char c;
94722@@ -2910,6 +2997,12 @@ int proc_dostring(struct ctl_table *table, int write,
94723 return -ENOSYS;
94724 }
94725
94726+int proc_dostring_modpriv(struct ctl_table *table, int write,
94727+ void __user *buffer, size_t *lenp, loff_t *ppos)
94728+{
94729+ return -ENOSYS;
94730+}
94731+
94732 int proc_dointvec(struct ctl_table *table, int write,
94733 void __user *buffer, size_t *lenp, loff_t *ppos)
94734 {
94735@@ -3038,6 +3131,16 @@ int sysctl_string(struct ctl_table *table,
94736 return 1;
94737 }
94738
94739+int sysctl_string_modpriv(struct ctl_table *table,
94740+ void __user *oldval, size_t __user *oldlenp,
94741+ void __user *newval, size_t newlen)
94742+{
94743+ if (newval && newlen && !capable(CAP_SYS_MODULE))
94744+ return -EPERM;
94745+
94746+ return sysctl_string(table, oldval, oldlenp, newval, newlen);
94747+}
94748+
94749 /*
94750 * This function makes sure that all of the integers in the vector
94751 * are between the minimum and maximum values given in the arrays
94752@@ -3182,6 +3285,13 @@ int sysctl_string(struct ctl_table *table,
94753 return -ENOSYS;
94754 }
94755
94756+int sysctl_string_modpriv(struct ctl_table *table,
94757+ void __user *oldval, size_t __user *oldlenp,
94758+ void __user *newval, size_t newlen)
94759+{
94760+ return -ENOSYS;
94761+}
94762+
94763 int sysctl_intvec(struct ctl_table *table,
94764 void __user *oldval, size_t __user *oldlenp,
94765 void __user *newval, size_t newlen)
94766@@ -3246,6 +3356,7 @@ EXPORT_SYMBOL(proc_dointvec_minmax);
94767 EXPORT_SYMBOL(proc_dointvec_userhz_jiffies);
94768 EXPORT_SYMBOL(proc_dointvec_ms_jiffies);
94769 EXPORT_SYMBOL(proc_dostring);
94770+EXPORT_SYMBOL(proc_dostring_modpriv);
94771 EXPORT_SYMBOL(proc_doulongvec_minmax);
94772 EXPORT_SYMBOL(proc_doulongvec_ms_jiffies_minmax);
94773 EXPORT_SYMBOL(register_sysctl_table);
94774@@ -3254,5 +3365,6 @@ EXPORT_SYMBOL(sysctl_intvec);
94775 EXPORT_SYMBOL(sysctl_jiffies);
94776 EXPORT_SYMBOL(sysctl_ms_jiffies);
94777 EXPORT_SYMBOL(sysctl_string);
94778+EXPORT_SYMBOL(sysctl_string_modpriv);
94779 EXPORT_SYMBOL(sysctl_data);
94780 EXPORT_SYMBOL(unregister_sysctl_table);
94781diff --git a/kernel/sysctl_check.c b/kernel/sysctl_check.c
94782index 469193c..ea3ecb2 100644
94783--- a/kernel/sysctl_check.c
94784+++ b/kernel/sysctl_check.c
94785@@ -1489,10 +1489,12 @@ int sysctl_check_table(struct nsproxy *namespaces, struct ctl_table *table)
94786 } else {
94787 if ((table->strategy == sysctl_data) ||
94788 (table->strategy == sysctl_string) ||
94789+ (table->strategy == sysctl_string_modpriv) ||
94790 (table->strategy == sysctl_intvec) ||
94791 (table->strategy == sysctl_jiffies) ||
94792 (table->strategy == sysctl_ms_jiffies) ||
94793 (table->proc_handler == proc_dostring) ||
94794+ (table->proc_handler == proc_dostring_modpriv) ||
94795 (table->proc_handler == proc_dointvec) ||
94796 (table->proc_handler == proc_dointvec_minmax) ||
94797 (table->proc_handler == proc_dointvec_jiffies) ||
94798diff --git a/kernel/taskstats.c b/kernel/taskstats.c
94799index a4ef542..798bcd7 100644
94800--- a/kernel/taskstats.c
94801+++ b/kernel/taskstats.c
94802@@ -26,9 +26,12 @@
94803 #include <linux/cgroup.h>
94804 #include <linux/fs.h>
94805 #include <linux/file.h>
94806+#include <linux/grsecurity.h>
94807 #include <net/genetlink.h>
94808 #include <asm/atomic.h>
94809
94810+extern int gr_is_taskstats_denied(int pid);
94811+
94812 /*
94813 * Maximum length of a cpumask that can be specified in
94814 * the TASKSTATS_CMD_ATTR_REGISTER/DEREGISTER_CPUMASK attribute
94815@@ -442,6 +445,9 @@ static int taskstats_user_cmd(struct sk_buff *skb, struct genl_info *info)
94816 size_t size;
94817 cpumask_var_t mask;
94818
94819+ if (gr_is_taskstats_denied(current->pid))
94820+ return -EACCES;
94821+
94822 if (!alloc_cpumask_var(&mask, GFP_KERNEL))
94823 return -ENOMEM;
94824
94825diff --git a/kernel/time.c b/kernel/time.c
94826index 33df60e..ca768bd 100644
94827--- a/kernel/time.c
94828+++ b/kernel/time.c
94829@@ -165,6 +165,11 @@ int do_sys_settimeofday(struct timespec *tv, struct timezone *tz)
94830 return error;
94831
94832 if (tz) {
94833+ /* we log in do_settimeofday called below, so don't log twice
94834+ */
94835+ if (!tv)
94836+ gr_log_timechange();
94837+
94838 /* SMP safe, global irq locking makes it work. */
94839 sys_tz = *tz;
94840 update_vsyscall_tz();
94841@@ -240,7 +245,7 @@ EXPORT_SYMBOL(current_fs_time);
94842 * Avoid unnecessary multiplications/divisions in the
94843 * two most common HZ cases:
94844 */
94845-unsigned int inline jiffies_to_msecs(const unsigned long j)
94846+inline unsigned int jiffies_to_msecs(const unsigned long j)
94847 {
94848 #if HZ <= MSEC_PER_SEC && !(MSEC_PER_SEC % HZ)
94849 return (MSEC_PER_SEC / HZ) * j;
94850@@ -256,7 +261,7 @@ unsigned int inline jiffies_to_msecs(const unsigned long j)
94851 }
94852 EXPORT_SYMBOL(jiffies_to_msecs);
94853
94854-unsigned int inline jiffies_to_usecs(const unsigned long j)
94855+inline unsigned int jiffies_to_usecs(const unsigned long j)
94856 {
94857 #if HZ <= USEC_PER_SEC && !(USEC_PER_SEC % HZ)
94858 return (USEC_PER_SEC / HZ) * j;
94859diff --git a/kernel/time/tick-broadcast.c b/kernel/time/tick-broadcast.c
94860index 57b953f..06f149f 100644
94861--- a/kernel/time/tick-broadcast.c
94862+++ b/kernel/time/tick-broadcast.c
94863@@ -116,7 +116,7 @@ int tick_device_uses_broadcast(struct clock_event_device *dev, int cpu)
94864 * then clear the broadcast bit.
94865 */
94866 if (!(dev->features & CLOCK_EVT_FEAT_C3STOP)) {
94867- int cpu = smp_processor_id();
94868+ cpu = smp_processor_id();
94869
94870 cpumask_clear_cpu(cpu, tick_get_broadcast_mask());
94871 tick_broadcast_clear_oneshot(cpu);
94872diff --git a/kernel/time/timekeeping.c b/kernel/time/timekeeping.c
94873index 4a71cff..ffb5548 100644
94874--- a/kernel/time/timekeeping.c
94875+++ b/kernel/time/timekeeping.c
94876@@ -14,6 +14,7 @@
94877 #include <linux/init.h>
94878 #include <linux/mm.h>
94879 #include <linux/sched.h>
94880+#include <linux/grsecurity.h>
94881 #include <linux/sysdev.h>
94882 #include <linux/clocksource.h>
94883 #include <linux/jiffies.h>
94884@@ -180,7 +181,7 @@ void update_xtime_cache(u64 nsec)
94885 */
94886 struct timespec ts = xtime;
94887 timespec_add_ns(&ts, nsec);
94888- ACCESS_ONCE(xtime_cache) = ts;
94889+ ACCESS_ONCE_RW(xtime_cache) = ts;
94890 }
94891
94892 /* must hold xtime_lock */
94893@@ -337,6 +338,8 @@ int do_settimeofday(struct timespec *tv)
94894 if ((unsigned long)tv->tv_nsec >= NSEC_PER_SEC)
94895 return -EINVAL;
94896
94897+ gr_log_timechange();
94898+
94899 write_seqlock_irqsave(&xtime_lock, flags);
94900
94901 timekeeping_forward_now();
94902diff --git a/kernel/time/timer_list.c b/kernel/time/timer_list.c
94903index 54c0dda..e9095d9 100644
94904--- a/kernel/time/timer_list.c
94905+++ b/kernel/time/timer_list.c
94906@@ -38,12 +38,16 @@ DECLARE_PER_CPU(struct hrtimer_cpu_base, hrtimer_bases);
94907
94908 static void print_name_offset(struct seq_file *m, void *sym)
94909 {
94910+#ifdef CONFIG_GRKERNSEC_HIDESYM
94911+ SEQ_printf(m, "<%p>", NULL);
94912+#else
94913 char symname[KSYM_NAME_LEN];
94914
94915 if (lookup_symbol_name((unsigned long)sym, symname) < 0)
94916 SEQ_printf(m, "<%p>", sym);
94917 else
94918 SEQ_printf(m, "%s", symname);
94919+#endif
94920 }
94921
94922 static void
94923@@ -112,7 +116,11 @@ next_one:
94924 static void
94925 print_base(struct seq_file *m, struct hrtimer_clock_base *base, u64 now)
94926 {
94927+#ifdef CONFIG_GRKERNSEC_HIDESYM
94928+ SEQ_printf(m, " .base: %p\n", NULL);
94929+#else
94930 SEQ_printf(m, " .base: %p\n", base);
94931+#endif
94932 SEQ_printf(m, " .index: %d\n",
94933 base->index);
94934 SEQ_printf(m, " .resolution: %Lu nsecs\n",
94935@@ -289,7 +297,11 @@ static int __init init_timer_list_procfs(void)
94936 {
94937 struct proc_dir_entry *pe;
94938
94939+#ifdef CONFIG_GRKERNSEC_PROC_ADD
94940+ pe = proc_create("timer_list", 0400, NULL, &timer_list_fops);
94941+#else
94942 pe = proc_create("timer_list", 0444, NULL, &timer_list_fops);
94943+#endif
94944 if (!pe)
94945 return -ENOMEM;
94946 return 0;
94947diff --git a/kernel/time/timer_stats.c b/kernel/time/timer_stats.c
94948index ee5681f..634089b 100644
94949--- a/kernel/time/timer_stats.c
94950+++ b/kernel/time/timer_stats.c
94951@@ -116,7 +116,7 @@ static ktime_t time_start, time_stop;
94952 static unsigned long nr_entries;
94953 static struct entry entries[MAX_ENTRIES];
94954
94955-static atomic_t overflow_count;
94956+static atomic_unchecked_t overflow_count;
94957
94958 /*
94959 * The entries are in a hash-table, for fast lookup:
94960@@ -140,7 +140,7 @@ static void reset_entries(void)
94961 nr_entries = 0;
94962 memset(entries, 0, sizeof(entries));
94963 memset(tstat_hash_table, 0, sizeof(tstat_hash_table));
94964- atomic_set(&overflow_count, 0);
94965+ atomic_set_unchecked(&overflow_count, 0);
94966 }
94967
94968 static struct entry *alloc_entry(void)
94969@@ -261,7 +261,7 @@ void timer_stats_update_stats(void *timer, pid_t pid, void *startf,
94970 if (likely(entry))
94971 entry->count++;
94972 else
94973- atomic_inc(&overflow_count);
94974+ atomic_inc_unchecked(&overflow_count);
94975
94976 out_unlock:
94977 spin_unlock_irqrestore(lock, flags);
94978@@ -269,12 +269,16 @@ void timer_stats_update_stats(void *timer, pid_t pid, void *startf,
94979
94980 static void print_name_offset(struct seq_file *m, unsigned long addr)
94981 {
94982+#ifdef CONFIG_GRKERNSEC_HIDESYM
94983+ seq_printf(m, "<%p>", NULL);
94984+#else
94985 char symname[KSYM_NAME_LEN];
94986
94987 if (lookup_symbol_name(addr, symname) < 0)
94988 seq_printf(m, "<%p>", (void *)addr);
94989 else
94990 seq_printf(m, "%s", symname);
94991+#endif
94992 }
94993
94994 static int tstats_show(struct seq_file *m, void *v)
94995@@ -300,9 +304,9 @@ static int tstats_show(struct seq_file *m, void *v)
94996
94997 seq_puts(m, "Timer Stats Version: v0.2\n");
94998 seq_printf(m, "Sample period: %ld.%03ld s\n", period.tv_sec, ms);
94999- if (atomic_read(&overflow_count))
95000+ if (atomic_read_unchecked(&overflow_count))
95001 seq_printf(m, "Overflow: %d entries\n",
95002- atomic_read(&overflow_count));
95003+ atomic_read_unchecked(&overflow_count));
95004
95005 for (i = 0; i < nr_entries; i++) {
95006 entry = entries + i;
95007@@ -415,7 +419,11 @@ static int __init init_tstats_procfs(void)
95008 {
95009 struct proc_dir_entry *pe;
95010
95011+#ifdef CONFIG_GRKERNSEC_PROC_ADD
95012+ pe = proc_create("timer_stats", 0600, NULL, &tstats_fops);
95013+#else
95014 pe = proc_create("timer_stats", 0644, NULL, &tstats_fops);
95015+#endif
95016 if (!pe)
95017 return -ENOMEM;
95018 return 0;
95019diff --git a/kernel/timer.c b/kernel/timer.c
95020index cb3c1f1..8bf5526 100644
95021--- a/kernel/timer.c
95022+++ b/kernel/timer.c
95023@@ -1213,7 +1213,7 @@ void update_process_times(int user_tick)
95024 /*
95025 * This function runs timers and the timer-tq in bottom half context.
95026 */
95027-static void run_timer_softirq(struct softirq_action *h)
95028+static void run_timer_softirq(void)
95029 {
95030 struct tvec_base *base = __get_cpu_var(tvec_bases);
95031
95032diff --git a/kernel/trace/blktrace.c b/kernel/trace/blktrace.c
95033index d9d6206..f19467e 100644
95034--- a/kernel/trace/blktrace.c
95035+++ b/kernel/trace/blktrace.c
95036@@ -313,7 +313,7 @@ static ssize_t blk_dropped_read(struct file *filp, char __user *buffer,
95037 struct blk_trace *bt = filp->private_data;
95038 char buf[16];
95039
95040- snprintf(buf, sizeof(buf), "%u\n", atomic_read(&bt->dropped));
95041+ snprintf(buf, sizeof(buf), "%u\n", atomic_read_unchecked(&bt->dropped));
95042
95043 return simple_read_from_buffer(buffer, count, ppos, buf, strlen(buf));
95044 }
95045@@ -376,7 +376,7 @@ static int blk_subbuf_start_callback(struct rchan_buf *buf, void *subbuf,
95046 return 1;
95047
95048 bt = buf->chan->private_data;
95049- atomic_inc(&bt->dropped);
95050+ atomic_inc_unchecked(&bt->dropped);
95051 return 0;
95052 }
95053
95054@@ -477,7 +477,7 @@ int do_blk_trace_setup(struct request_queue *q, char *name, dev_t dev,
95055
95056 bt->dir = dir;
95057 bt->dev = dev;
95058- atomic_set(&bt->dropped, 0);
95059+ atomic_set_unchecked(&bt->dropped, 0);
95060
95061 ret = -EIO;
95062 bt->dropped_file = debugfs_create_file("dropped", 0444, dir, bt,
95063diff --git a/kernel/trace/ftrace.c b/kernel/trace/ftrace.c
95064index 4872937..c794d40 100644
95065--- a/kernel/trace/ftrace.c
95066+++ b/kernel/trace/ftrace.c
95067@@ -1100,13 +1100,18 @@ ftrace_code_disable(struct module *mod, struct dyn_ftrace *rec)
95068
95069 ip = rec->ip;
95070
95071+ ret = ftrace_arch_code_modify_prepare();
95072+ FTRACE_WARN_ON(ret);
95073+ if (ret)
95074+ return 0;
95075+
95076 ret = ftrace_make_nop(mod, rec, MCOUNT_ADDR);
95077+ FTRACE_WARN_ON(ftrace_arch_code_modify_post_process());
95078 if (ret) {
95079 ftrace_bug(ret, ip);
95080 rec->flags |= FTRACE_FL_FAILED;
95081- return 0;
95082 }
95083- return 1;
95084+ return ret ? 0 : 1;
95085 }
95086
95087 /*
95088diff --git a/kernel/trace/ring_buffer.c b/kernel/trace/ring_buffer.c
95089index e749a05..19c6e94 100644
95090--- a/kernel/trace/ring_buffer.c
95091+++ b/kernel/trace/ring_buffer.c
95092@@ -606,7 +606,7 @@ static struct list_head *rb_list_head(struct list_head *list)
95093 * the reader page). But if the next page is a header page,
95094 * its flags will be non zero.
95095 */
95096-static int inline
95097+static inline int
95098 rb_is_head_page(struct ring_buffer_per_cpu *cpu_buffer,
95099 struct buffer_page *page, struct list_head *list)
95100 {
95101diff --git a/kernel/trace/trace.c b/kernel/trace/trace.c
95102index a2a2d1f..7f32b09 100644
95103--- a/kernel/trace/trace.c
95104+++ b/kernel/trace/trace.c
95105@@ -3193,6 +3193,8 @@ static ssize_t tracing_splice_read_pipe(struct file *filp,
95106 size_t rem;
95107 unsigned int i;
95108
95109+ pax_track_stack();
95110+
95111 /* copy the tracer to avoid using a global lock all around */
95112 mutex_lock(&trace_types_lock);
95113 if (unlikely(old_tracer != current_trace && current_trace)) {
95114@@ -3659,6 +3661,8 @@ tracing_buffers_splice_read(struct file *file, loff_t *ppos,
95115 int entries, size, i;
95116 size_t ret;
95117
95118+ pax_track_stack();
95119+
95120 if (*ppos & (PAGE_SIZE - 1)) {
95121 WARN_ONCE(1, "Ftrace: previous read must page-align\n");
95122 return -EINVAL;
95123@@ -3816,10 +3820,9 @@ static const struct file_operations tracing_dyn_info_fops = {
95124 };
95125 #endif
95126
95127-static struct dentry *d_tracer;
95128-
95129 struct dentry *tracing_init_dentry(void)
95130 {
95131+ static struct dentry *d_tracer;
95132 static int once;
95133
95134 if (d_tracer)
95135@@ -3839,10 +3842,9 @@ struct dentry *tracing_init_dentry(void)
95136 return d_tracer;
95137 }
95138
95139-static struct dentry *d_percpu;
95140-
95141 struct dentry *tracing_dentry_percpu(void)
95142 {
95143+ static struct dentry *d_percpu;
95144 static int once;
95145 struct dentry *d_tracer;
95146
95147diff --git a/kernel/trace/trace_events.c b/kernel/trace/trace_events.c
95148index d128f65..f37b4af 100644
95149--- a/kernel/trace/trace_events.c
95150+++ b/kernel/trace/trace_events.c
95151@@ -951,13 +951,10 @@ static LIST_HEAD(ftrace_module_file_list);
95152 * Modules must own their file_operations to keep up with
95153 * reference counting.
95154 */
95155+
95156 struct ftrace_module_file_ops {
95157 struct list_head list;
95158 struct module *mod;
95159- struct file_operations id;
95160- struct file_operations enable;
95161- struct file_operations format;
95162- struct file_operations filter;
95163 };
95164
95165 static void remove_subsystem_dir(const char *name)
95166@@ -1004,17 +1001,12 @@ trace_create_file_ops(struct module *mod)
95167
95168 file_ops->mod = mod;
95169
95170- file_ops->id = ftrace_event_id_fops;
95171- file_ops->id.owner = mod;
95172-
95173- file_ops->enable = ftrace_enable_fops;
95174- file_ops->enable.owner = mod;
95175-
95176- file_ops->filter = ftrace_event_filter_fops;
95177- file_ops->filter.owner = mod;
95178-
95179- file_ops->format = ftrace_event_format_fops;
95180- file_ops->format.owner = mod;
95181+ pax_open_kernel();
95182+ *(void **)&mod->trace_id.owner = mod;
95183+ *(void **)&mod->trace_enable.owner = mod;
95184+ *(void **)&mod->trace_filter.owner = mod;
95185+ *(void **)&mod->trace_format.owner = mod;
95186+ pax_close_kernel();
95187
95188 list_add(&file_ops->list, &ftrace_module_file_list);
95189
95190@@ -1063,8 +1055,8 @@ static void trace_module_add_events(struct module *mod)
95191 call->mod = mod;
95192 list_add(&call->list, &ftrace_events);
95193 event_create_dir(call, d_events,
95194- &file_ops->id, &file_ops->enable,
95195- &file_ops->filter, &file_ops->format);
95196+ &mod->trace_id, &mod->trace_enable,
95197+ &mod->trace_filter, &mod->trace_format);
95198 }
95199 }
95200
95201diff --git a/kernel/trace/trace_mmiotrace.c b/kernel/trace/trace_mmiotrace.c
95202index 0acd834..b800b56 100644
95203--- a/kernel/trace/trace_mmiotrace.c
95204+++ b/kernel/trace/trace_mmiotrace.c
95205@@ -23,7 +23,7 @@ struct header_iter {
95206 static struct trace_array *mmio_trace_array;
95207 static bool overrun_detected;
95208 static unsigned long prev_overruns;
95209-static atomic_t dropped_count;
95210+static atomic_unchecked_t dropped_count;
95211
95212 static void mmio_reset_data(struct trace_array *tr)
95213 {
95214@@ -126,7 +126,7 @@ static void mmio_close(struct trace_iterator *iter)
95215
95216 static unsigned long count_overruns(struct trace_iterator *iter)
95217 {
95218- unsigned long cnt = atomic_xchg(&dropped_count, 0);
95219+ unsigned long cnt = atomic_xchg_unchecked(&dropped_count, 0);
95220 unsigned long over = ring_buffer_overruns(iter->tr->buffer);
95221
95222 if (over > prev_overruns)
95223@@ -316,7 +316,7 @@ static void __trace_mmiotrace_rw(struct trace_array *tr,
95224 event = trace_buffer_lock_reserve(buffer, TRACE_MMIO_RW,
95225 sizeof(*entry), 0, pc);
95226 if (!event) {
95227- atomic_inc(&dropped_count);
95228+ atomic_inc_unchecked(&dropped_count);
95229 return;
95230 }
95231 entry = ring_buffer_event_data(event);
95232@@ -346,7 +346,7 @@ static void __trace_mmiotrace_map(struct trace_array *tr,
95233 event = trace_buffer_lock_reserve(buffer, TRACE_MMIO_MAP,
95234 sizeof(*entry), 0, pc);
95235 if (!event) {
95236- atomic_inc(&dropped_count);
95237+ atomic_inc_unchecked(&dropped_count);
95238 return;
95239 }
95240 entry = ring_buffer_event_data(event);
95241diff --git a/kernel/trace/trace_output.c b/kernel/trace/trace_output.c
95242index b6c12c6..41fdc53 100644
95243--- a/kernel/trace/trace_output.c
95244+++ b/kernel/trace/trace_output.c
95245@@ -237,7 +237,7 @@ int trace_seq_path(struct trace_seq *s, struct path *path)
95246 return 0;
95247 p = d_path(path, s->buffer + s->len, PAGE_SIZE - s->len);
95248 if (!IS_ERR(p)) {
95249- p = mangle_path(s->buffer + s->len, p, "\n");
95250+ p = mangle_path(s->buffer + s->len, p, "\n\\");
95251 if (p) {
95252 s->len = p - s->buffer;
95253 return 1;
95254diff --git a/kernel/trace/trace_stack.c b/kernel/trace/trace_stack.c
95255index 8504ac7..ecf0adb 100644
95256--- a/kernel/trace/trace_stack.c
95257+++ b/kernel/trace/trace_stack.c
95258@@ -50,7 +50,7 @@ static inline void check_stack(void)
95259 return;
95260
95261 /* we do not handle interrupt stacks yet */
95262- if (!object_is_on_stack(&this_size))
95263+ if (!object_starts_on_stack(&this_size))
95264 return;
95265
95266 local_irq_save(flags);
95267diff --git a/kernel/trace/trace_workqueue.c b/kernel/trace/trace_workqueue.c
95268index 40cafb0..d5ead43 100644
95269--- a/kernel/trace/trace_workqueue.c
95270+++ b/kernel/trace/trace_workqueue.c
95271@@ -21,7 +21,7 @@ struct cpu_workqueue_stats {
95272 int cpu;
95273 pid_t pid;
95274 /* Can be inserted from interrupt or user context, need to be atomic */
95275- atomic_t inserted;
95276+ atomic_unchecked_t inserted;
95277 /*
95278 * Don't need to be atomic, works are serialized in a single workqueue thread
95279 * on a single CPU.
95280@@ -58,7 +58,7 @@ probe_workqueue_insertion(struct task_struct *wq_thread,
95281 spin_lock_irqsave(&workqueue_cpu_stat(cpu)->lock, flags);
95282 list_for_each_entry(node, &workqueue_cpu_stat(cpu)->list, list) {
95283 if (node->pid == wq_thread->pid) {
95284- atomic_inc(&node->inserted);
95285+ atomic_inc_unchecked(&node->inserted);
95286 goto found;
95287 }
95288 }
95289@@ -205,7 +205,7 @@ static int workqueue_stat_show(struct seq_file *s, void *p)
95290 tsk = get_pid_task(pid, PIDTYPE_PID);
95291 if (tsk) {
95292 seq_printf(s, "%3d %6d %6u %s\n", cws->cpu,
95293- atomic_read(&cws->inserted), cws->executed,
95294+ atomic_read_unchecked(&cws->inserted), cws->executed,
95295 tsk->comm);
95296 put_task_struct(tsk);
95297 }
95298diff --git a/kernel/user.c b/kernel/user.c
95299index 1b91701..8795237 100644
95300--- a/kernel/user.c
95301+++ b/kernel/user.c
95302@@ -159,6 +159,7 @@ struct user_struct *alloc_uid(struct user_namespace *ns, uid_t uid)
95303 spin_lock_irq(&uidhash_lock);
95304 up = uid_hash_find(uid, hashent);
95305 if (up) {
95306+ put_user_ns(ns);
95307 key_put(new->uid_keyring);
95308 key_put(new->session_keyring);
95309 kmem_cache_free(uid_cachep, new);
95310diff --git a/lib/Kconfig.debug b/lib/Kconfig.debug
95311index 234ceb1..ad74049 100644
95312--- a/lib/Kconfig.debug
95313+++ b/lib/Kconfig.debug
95314@@ -905,7 +905,7 @@ config LATENCYTOP
95315 select STACKTRACE
95316 select SCHEDSTATS
95317 select SCHED_DEBUG
95318- depends on HAVE_LATENCYTOP_SUPPORT
95319+ depends on HAVE_LATENCYTOP_SUPPORT && !GRKERNSEC_HIDESYM
95320 help
95321 Enable this option if you want to use the LatencyTOP tool
95322 to find out which userspace is blocking on what kernel operations.
95323diff --git a/lib/bitmap.c b/lib/bitmap.c
95324index 7025658..8d14cab 100644
95325--- a/lib/bitmap.c
95326+++ b/lib/bitmap.c
95327@@ -341,7 +341,7 @@ int __bitmap_parse(const char *buf, unsigned int buflen,
95328 {
95329 int c, old_c, totaldigits, ndigits, nchunks, nbits;
95330 u32 chunk;
95331- const char __user *ubuf = buf;
95332+ const char __user *ubuf = (const char __force_user *)buf;
95333
95334 bitmap_zero(maskp, nmaskbits);
95335
95336@@ -426,7 +426,7 @@ int bitmap_parse_user(const char __user *ubuf,
95337 {
95338 if (!access_ok(VERIFY_READ, ubuf, ulen))
95339 return -EFAULT;
95340- return __bitmap_parse((const char *)ubuf, ulen, 1, maskp, nmaskbits);
95341+ return __bitmap_parse((const char __force_kernel *)ubuf, ulen, 1, maskp, nmaskbits);
95342 }
95343 EXPORT_SYMBOL(bitmap_parse_user);
95344
95345diff --git a/lib/bug.c b/lib/bug.c
95346index 300e41a..2779eb0 100644
95347--- a/lib/bug.c
95348+++ b/lib/bug.c
95349@@ -135,6 +135,8 @@ enum bug_trap_type report_bug(unsigned long bugaddr, struct pt_regs *regs)
95350 return BUG_TRAP_TYPE_NONE;
95351
95352 bug = find_bug(bugaddr);
95353+ if (!bug)
95354+ return BUG_TRAP_TYPE_NONE;
95355
95356 printk(KERN_EMERG "------------[ cut here ]------------\n");
95357
95358diff --git a/lib/debugobjects.c b/lib/debugobjects.c
95359index 2b413db..e21d207 100644
95360--- a/lib/debugobjects.c
95361+++ b/lib/debugobjects.c
95362@@ -277,7 +277,7 @@ static void debug_object_is_on_stack(void *addr, int onstack)
95363 if (limit > 4)
95364 return;
95365
95366- is_on_stack = object_is_on_stack(addr);
95367+ is_on_stack = object_starts_on_stack(addr);
95368 if (is_on_stack == onstack)
95369 return;
95370
95371diff --git a/lib/devres.c b/lib/devres.c
95372index 72c8909..7543868 100644
95373--- a/lib/devres.c
95374+++ b/lib/devres.c
95375@@ -80,7 +80,7 @@ void devm_iounmap(struct device *dev, void __iomem *addr)
95376 {
95377 iounmap(addr);
95378 WARN_ON(devres_destroy(dev, devm_ioremap_release, devm_ioremap_match,
95379- (void *)addr));
95380+ (void __force *)addr));
95381 }
95382 EXPORT_SYMBOL(devm_iounmap);
95383
95384@@ -140,7 +140,7 @@ void devm_ioport_unmap(struct device *dev, void __iomem *addr)
95385 {
95386 ioport_unmap(addr);
95387 WARN_ON(devres_destroy(dev, devm_ioport_map_release,
95388- devm_ioport_map_match, (void *)addr));
95389+ devm_ioport_map_match, (void __force *)addr));
95390 }
95391 EXPORT_SYMBOL(devm_ioport_unmap);
95392
95393diff --git a/lib/dma-debug.c b/lib/dma-debug.c
95394index 084e879..0674448 100644
95395--- a/lib/dma-debug.c
95396+++ b/lib/dma-debug.c
95397@@ -861,7 +861,7 @@ out:
95398
95399 static void check_for_stack(struct device *dev, void *addr)
95400 {
95401- if (object_is_on_stack(addr))
95402+ if (object_starts_on_stack(addr))
95403 err_printk(dev, NULL, "DMA-API: device driver maps memory from"
95404 "stack [addr=%p]\n", addr);
95405 }
95406diff --git a/lib/idr.c b/lib/idr.c
95407index eda7ba3..915dfae 100644
95408--- a/lib/idr.c
95409+++ b/lib/idr.c
95410@@ -156,7 +156,7 @@ static int sub_alloc(struct idr *idp, int *starting_id, struct idr_layer **pa)
95411 id = (id | ((1 << (IDR_BITS * l)) - 1)) + 1;
95412
95413 /* if already at the top layer, we need to grow */
95414- if (id >= 1 << (idp->layers * IDR_BITS)) {
95415+ if (id >= (1 << (idp->layers * IDR_BITS))) {
95416 *starting_id = id;
95417 return IDR_NEED_TO_GROW;
95418 }
95419diff --git a/lib/inflate.c b/lib/inflate.c
95420index d102559..4215f31 100644
95421--- a/lib/inflate.c
95422+++ b/lib/inflate.c
95423@@ -266,7 +266,7 @@ static void free(void *where)
95424 malloc_ptr = free_mem_ptr;
95425 }
95426 #else
95427-#define malloc(a) kmalloc(a, GFP_KERNEL)
95428+#define malloc(a) kmalloc((a), GFP_KERNEL)
95429 #define free(a) kfree(a)
95430 #endif
95431
95432diff --git a/lib/is_single_threaded.c b/lib/is_single_threaded.c
95433index bd2bea9..6b3c95e 100644
95434--- a/lib/is_single_threaded.c
95435+++ b/lib/is_single_threaded.c
95436@@ -22,6 +22,9 @@ bool current_is_single_threaded(void)
95437 struct task_struct *p, *t;
95438 bool ret;
95439
95440+ if (!mm)
95441+ return true;
95442+
95443 if (atomic_read(&task->signal->live) != 1)
95444 return false;
95445
95446diff --git a/lib/kobject.c b/lib/kobject.c
95447index b512b74..8115eb1 100644
95448--- a/lib/kobject.c
95449+++ b/lib/kobject.c
95450@@ -700,7 +700,7 @@ static ssize_t kobj_attr_store(struct kobject *kobj, struct attribute *attr,
95451 return ret;
95452 }
95453
95454-struct sysfs_ops kobj_sysfs_ops = {
95455+const struct sysfs_ops kobj_sysfs_ops = {
95456 .show = kobj_attr_show,
95457 .store = kobj_attr_store,
95458 };
95459@@ -789,7 +789,7 @@ static struct kobj_type kset_ktype = {
95460 * If the kset was not able to be created, NULL will be returned.
95461 */
95462 static struct kset *kset_create(const char *name,
95463- struct kset_uevent_ops *uevent_ops,
95464+ const struct kset_uevent_ops *uevent_ops,
95465 struct kobject *parent_kobj)
95466 {
95467 struct kset *kset;
95468@@ -832,7 +832,7 @@ static struct kset *kset_create(const char *name,
95469 * If the kset was not able to be created, NULL will be returned.
95470 */
95471 struct kset *kset_create_and_add(const char *name,
95472- struct kset_uevent_ops *uevent_ops,
95473+ const struct kset_uevent_ops *uevent_ops,
95474 struct kobject *parent_kobj)
95475 {
95476 struct kset *kset;
95477diff --git a/lib/kobject_uevent.c b/lib/kobject_uevent.c
95478index 507b821..0bf8ed0 100644
95479--- a/lib/kobject_uevent.c
95480+++ b/lib/kobject_uevent.c
95481@@ -95,7 +95,7 @@ int kobject_uevent_env(struct kobject *kobj, enum kobject_action action,
95482 const char *subsystem;
95483 struct kobject *top_kobj;
95484 struct kset *kset;
95485- struct kset_uevent_ops *uevent_ops;
95486+ const struct kset_uevent_ops *uevent_ops;
95487 u64 seq;
95488 int i = 0;
95489 int retval = 0;
95490diff --git a/lib/kref.c b/lib/kref.c
95491index 9ecd6e8..12c94c1 100644
95492--- a/lib/kref.c
95493+++ b/lib/kref.c
95494@@ -61,7 +61,7 @@ void kref_get(struct kref *kref)
95495 */
95496 int kref_put(struct kref *kref, void (*release)(struct kref *kref))
95497 {
95498- WARN_ON(release == NULL);
95499+ BUG_ON(release == NULL);
95500 WARN_ON(release == (void (*)(struct kref *))kfree);
95501
95502 if (atomic_dec_and_test(&kref->refcount)) {
95503diff --git a/lib/radix-tree.c b/lib/radix-tree.c
95504index 92cdd99..a8149d7 100644
95505--- a/lib/radix-tree.c
95506+++ b/lib/radix-tree.c
95507@@ -81,7 +81,7 @@ struct radix_tree_preload {
95508 int nr;
95509 struct radix_tree_node *nodes[RADIX_TREE_MAX_PATH];
95510 };
95511-static DEFINE_PER_CPU(struct radix_tree_preload, radix_tree_preloads) = { 0, };
95512+static DEFINE_PER_CPU(struct radix_tree_preload, radix_tree_preloads);
95513
95514 static inline gfp_t root_gfp_mask(struct radix_tree_root *root)
95515 {
95516diff --git a/lib/random32.c b/lib/random32.c
95517index 217d5c4..45aba8a 100644
95518--- a/lib/random32.c
95519+++ b/lib/random32.c
95520@@ -61,7 +61,7 @@ static u32 __random32(struct rnd_state *state)
95521 */
95522 static inline u32 __seed(u32 x, u32 m)
95523 {
95524- return (x < m) ? x + m : x;
95525+ return (x <= m) ? x + m + 1 : x;
95526 }
95527
95528 /**
95529diff --git a/lib/vsprintf.c b/lib/vsprintf.c
95530index 33bed5e..1477e46 100644
95531--- a/lib/vsprintf.c
95532+++ b/lib/vsprintf.c
95533@@ -16,6 +16,9 @@
95534 * - scnprintf and vscnprintf
95535 */
95536
95537+#ifdef CONFIG_GRKERNSEC_HIDESYM
95538+#define __INCLUDED_BY_HIDESYM 1
95539+#endif
95540 #include <stdarg.h>
95541 #include <linux/module.h>
95542 #include <linux/types.h>
95543@@ -546,12 +549,12 @@ static char *number(char *buf, char *end, unsigned long long num,
95544 return buf;
95545 }
95546
95547-static char *string(char *buf, char *end, char *s, struct printf_spec spec)
95548+static char *string(char *buf, char *end, const char *s, struct printf_spec spec)
95549 {
95550 int len, i;
95551
95552 if ((unsigned long)s < PAGE_SIZE)
95553- s = "<NULL>";
95554+ s = "(null)";
95555
95556 len = strnlen(s, spec.precision);
95557
95558@@ -581,7 +584,7 @@ static char *symbol_string(char *buf, char *end, void *ptr,
95559 unsigned long value = (unsigned long) ptr;
95560 #ifdef CONFIG_KALLSYMS
95561 char sym[KSYM_SYMBOL_LEN];
95562- if (ext != 'f' && ext != 's')
95563+ if (ext != 'f' && ext != 's' && ext != 'a')
95564 sprint_symbol(sym, value);
95565 else
95566 kallsyms_lookup(value, NULL, NULL, NULL, sym);
95567@@ -801,6 +804,8 @@ static char *ip4_addr_string(char *buf, char *end, const u8 *addr,
95568 * - 'f' For simple symbolic function names without offset
95569 * - 'S' For symbolic direct pointers with offset
95570 * - 's' For symbolic direct pointers without offset
95571+ * - 'A' For symbolic direct pointers with offset approved for use with GRKERNSEC_HIDESYM
95572+ * - 'a' For symbolic direct pointers without offset approved for use with GRKERNSEC_HIDESYM
95573 * - 'R' For a struct resource pointer, it prints the range of
95574 * addresses (not the name nor the flags)
95575 * - 'M' For a 6-byte MAC address, it prints the address in the
95576@@ -822,7 +827,7 @@ static char *pointer(const char *fmt, char *buf, char *end, void *ptr,
95577 struct printf_spec spec)
95578 {
95579 if (!ptr)
95580- return string(buf, end, "(null)", spec);
95581+ return string(buf, end, "(nil)", spec);
95582
95583 switch (*fmt) {
95584 case 'F':
95585@@ -831,6 +836,14 @@ static char *pointer(const char *fmt, char *buf, char *end, void *ptr,
95586 case 's':
95587 /* Fallthrough */
95588 case 'S':
95589+#ifdef CONFIG_GRKERNSEC_HIDESYM
95590+ break;
95591+#else
95592+ return symbol_string(buf, end, ptr, spec, *fmt);
95593+#endif
95594+ case 'a':
95595+ /* Fallthrough */
95596+ case 'A':
95597 return symbol_string(buf, end, ptr, spec, *fmt);
95598 case 'R':
95599 return resource_string(buf, end, ptr, spec);
95600@@ -1445,7 +1458,7 @@ do { \
95601 size_t len;
95602 if ((unsigned long)save_str > (unsigned long)-PAGE_SIZE
95603 || (unsigned long)save_str < PAGE_SIZE)
95604- save_str = "<NULL>";
95605+ save_str = "(null)";
95606 len = strlen(save_str);
95607 if (str + len + 1 < end)
95608 memcpy(str, save_str, len + 1);
95609@@ -1555,11 +1568,11 @@ int bstr_printf(char *buf, size_t size, const char *fmt, const u32 *bin_buf)
95610 typeof(type) value; \
95611 if (sizeof(type) == 8) { \
95612 args = PTR_ALIGN(args, sizeof(u32)); \
95613- *(u32 *)&value = *(u32 *)args; \
95614- *((u32 *)&value + 1) = *(u32 *)(args + 4); \
95615+ *(u32 *)&value = *(const u32 *)args; \
95616+ *((u32 *)&value + 1) = *(const u32 *)(args + 4); \
95617 } else { \
95618 args = PTR_ALIGN(args, sizeof(type)); \
95619- value = *(typeof(type) *)args; \
95620+ value = *(const typeof(type) *)args; \
95621 } \
95622 args += sizeof(type); \
95623 value; \
95624@@ -1622,7 +1635,7 @@ int bstr_printf(char *buf, size_t size, const char *fmt, const u32 *bin_buf)
95625 const char *str_arg = args;
95626 size_t len = strlen(str_arg);
95627 args += len + 1;
95628- str = string(str, end, (char *)str_arg, spec);
95629+ str = string(str, end, str_arg, spec);
95630 break;
95631 }
95632
95633diff --git a/localversion-grsec b/localversion-grsec
95634new file mode 100644
95635index 0000000..7cd6065
95636--- /dev/null
95637+++ b/localversion-grsec
95638@@ -0,0 +1 @@
95639+-grsec
95640diff --git a/mm/Kconfig b/mm/Kconfig
95641index 2c19c0b..f3c3f83 100644
95642--- a/mm/Kconfig
95643+++ b/mm/Kconfig
95644@@ -228,7 +228,7 @@ config KSM
95645 config DEFAULT_MMAP_MIN_ADDR
95646 int "Low address space to protect from user allocation"
95647 depends on MMU
95648- default 4096
95649+ default 65536
95650 help
95651 This is the portion of low virtual memory which should be protected
95652 from userspace allocation. Keeping a user from writing to low pages
95653diff --git a/mm/backing-dev.c b/mm/backing-dev.c
95654index d824401..9f5244a 100644
95655--- a/mm/backing-dev.c
95656+++ b/mm/backing-dev.c
95657@@ -271,7 +271,7 @@ static void bdi_task_init(struct backing_dev_info *bdi,
95658 list_add_tail_rcu(&wb->list, &bdi->wb_list);
95659 spin_unlock(&bdi->wb_lock);
95660
95661- tsk->flags |= PF_FLUSHER | PF_SWAPWRITE;
95662+ tsk->flags |= PF_SWAPWRITE;
95663 set_freezable();
95664
95665 /*
95666@@ -489,7 +489,7 @@ static void bdi_add_to_pending(struct rcu_head *head)
95667 * Add the default flusher task that gets created for any bdi
95668 * that has dirty data pending writeout
95669 */
95670-void static bdi_add_default_flusher_task(struct backing_dev_info *bdi)
95671+static void bdi_add_default_flusher_task(struct backing_dev_info *bdi)
95672 {
95673 if (!bdi_cap_writeback_dirty(bdi))
95674 return;
95675diff --git a/mm/filemap.c b/mm/filemap.c
95676index a1fe378..e26702f 100644
95677--- a/mm/filemap.c
95678+++ b/mm/filemap.c
95679@@ -1631,7 +1631,7 @@ int generic_file_mmap(struct file * file, struct vm_area_struct * vma)
95680 struct address_space *mapping = file->f_mapping;
95681
95682 if (!mapping->a_ops->readpage)
95683- return -ENOEXEC;
95684+ return -ENODEV;
95685 file_accessed(file);
95686 vma->vm_ops = &generic_file_vm_ops;
95687 vma->vm_flags |= VM_CAN_NONLINEAR;
95688@@ -2024,6 +2024,7 @@ inline int generic_write_checks(struct file *file, loff_t *pos, size_t *count, i
95689 *pos = i_size_read(inode);
95690
95691 if (limit != RLIM_INFINITY) {
95692+ gr_learn_resource(current, RLIMIT_FSIZE,*pos, 0);
95693 if (*pos >= limit) {
95694 send_sig(SIGXFSZ, current, 0);
95695 return -EFBIG;
95696diff --git a/mm/fremap.c b/mm/fremap.c
95697index b6ec85a..a24ac22 100644
95698--- a/mm/fremap.c
95699+++ b/mm/fremap.c
95700@@ -153,6 +153,11 @@ SYSCALL_DEFINE5(remap_file_pages, unsigned long, start, unsigned long, size,
95701 retry:
95702 vma = find_vma(mm, start);
95703
95704+#ifdef CONFIG_PAX_SEGMEXEC
95705+ if (vma && (mm->pax_flags & MF_PAX_SEGMEXEC) && (vma->vm_flags & VM_MAYEXEC))
95706+ goto out;
95707+#endif
95708+
95709 /*
95710 * Make sure the vma is shared, that it supports prefaulting,
95711 * and that the remapped range is valid and fully within
95712@@ -221,7 +226,7 @@ SYSCALL_DEFINE5(remap_file_pages, unsigned long, start, unsigned long, size,
95713 /*
95714 * drop PG_Mlocked flag for over-mapped range
95715 */
95716- unsigned int saved_flags = vma->vm_flags;
95717+ unsigned long saved_flags = vma->vm_flags;
95718 munlock_vma_pages_range(vma, start, start + size);
95719 vma->vm_flags = saved_flags;
95720 }
95721diff --git a/mm/highmem.c b/mm/highmem.c
95722index 9c1e627..5ca9447 100644
95723--- a/mm/highmem.c
95724+++ b/mm/highmem.c
95725@@ -116,9 +116,10 @@ static void flush_all_zero_pkmaps(void)
95726 * So no dangers, even with speculative execution.
95727 */
95728 page = pte_page(pkmap_page_table[i]);
95729+ pax_open_kernel();
95730 pte_clear(&init_mm, (unsigned long)page_address(page),
95731 &pkmap_page_table[i]);
95732-
95733+ pax_close_kernel();
95734 set_page_address(page, NULL);
95735 need_flush = 1;
95736 }
95737@@ -177,9 +178,11 @@ start:
95738 }
95739 }
95740 vaddr = PKMAP_ADDR(last_pkmap_nr);
95741+
95742+ pax_open_kernel();
95743 set_pte_at(&init_mm, vaddr,
95744 &(pkmap_page_table[last_pkmap_nr]), mk_pte(page, kmap_prot));
95745-
95746+ pax_close_kernel();
95747 pkmap_count[last_pkmap_nr] = 1;
95748 set_page_address(page, (void *)vaddr);
95749
95750diff --git a/mm/hugetlb.c b/mm/hugetlb.c
95751index 5e1e508..ac70275 100644
95752--- a/mm/hugetlb.c
95753+++ b/mm/hugetlb.c
95754@@ -869,6 +869,7 @@ free:
95755 list_del(&page->lru);
95756 enqueue_huge_page(h, page);
95757 }
95758+ spin_unlock(&hugetlb_lock);
95759
95760 /* Free unnecessary surplus pages to the buddy allocator */
95761 if (!list_empty(&surplus_list)) {
95762@@ -1933,6 +1934,26 @@ static int unmap_ref_private(struct mm_struct *mm, struct vm_area_struct *vma,
95763 return 1;
95764 }
95765
95766+#ifdef CONFIG_PAX_SEGMEXEC
95767+static void pax_mirror_huge_pte(struct vm_area_struct *vma, unsigned long address, struct page *page_m)
95768+{
95769+ struct mm_struct *mm = vma->vm_mm;
95770+ struct vm_area_struct *vma_m;
95771+ unsigned long address_m;
95772+ pte_t *ptep_m;
95773+
95774+ vma_m = pax_find_mirror_vma(vma);
95775+ if (!vma_m)
95776+ return;
95777+
95778+ BUG_ON(address >= SEGMEXEC_TASK_SIZE);
95779+ address_m = address + SEGMEXEC_TASK_SIZE;
95780+ ptep_m = huge_pte_offset(mm, address_m & HPAGE_MASK);
95781+ get_page(page_m);
95782+ set_huge_pte_at(mm, address_m, ptep_m, make_huge_pte(vma_m, page_m, 0));
95783+}
95784+#endif
95785+
95786 static int hugetlb_cow(struct mm_struct *mm, struct vm_area_struct *vma,
95787 unsigned long address, pte_t *ptep, pte_t pte,
95788 struct page *pagecache_page)
95789@@ -2004,6 +2025,11 @@ retry_avoidcopy:
95790 huge_ptep_clear_flush(vma, address, ptep);
95791 set_huge_pte_at(mm, address, ptep,
95792 make_huge_pte(vma, new_page, 1));
95793+
95794+#ifdef CONFIG_PAX_SEGMEXEC
95795+ pax_mirror_huge_pte(vma, address, new_page);
95796+#endif
95797+
95798 /* Make the old page be freed below */
95799 new_page = old_page;
95800 }
95801@@ -2135,6 +2161,10 @@ retry:
95802 && (vma->vm_flags & VM_SHARED)));
95803 set_huge_pte_at(mm, address, ptep, new_pte);
95804
95805+#ifdef CONFIG_PAX_SEGMEXEC
95806+ pax_mirror_huge_pte(vma, address, page);
95807+#endif
95808+
95809 if ((flags & FAULT_FLAG_WRITE) && !(vma->vm_flags & VM_SHARED)) {
95810 /* Optimization, do the COW without a second fault */
95811 ret = hugetlb_cow(mm, vma, address, ptep, new_pte, page);
95812@@ -2163,6 +2193,28 @@ int hugetlb_fault(struct mm_struct *mm, struct vm_area_struct *vma,
95813 static DEFINE_MUTEX(hugetlb_instantiation_mutex);
95814 struct hstate *h = hstate_vma(vma);
95815
95816+#ifdef CONFIG_PAX_SEGMEXEC
95817+ struct vm_area_struct *vma_m;
95818+
95819+ vma_m = pax_find_mirror_vma(vma);
95820+ if (vma_m) {
95821+ unsigned long address_m;
95822+
95823+ if (vma->vm_start > vma_m->vm_start) {
95824+ address_m = address;
95825+ address -= SEGMEXEC_TASK_SIZE;
95826+ vma = vma_m;
95827+ h = hstate_vma(vma);
95828+ } else
95829+ address_m = address + SEGMEXEC_TASK_SIZE;
95830+
95831+ if (!huge_pte_alloc(mm, address_m, huge_page_size(h)))
95832+ return VM_FAULT_OOM;
95833+ address_m &= HPAGE_MASK;
95834+ unmap_hugepage_range(vma, address_m, address_m + HPAGE_SIZE, NULL);
95835+ }
95836+#endif
95837+
95838 ptep = huge_pte_alloc(mm, address, huge_page_size(h));
95839 if (!ptep)
95840 return VM_FAULT_OOM;
95841diff --git a/mm/internal.h b/mm/internal.h
95842index f03e8e2..7354343 100644
95843--- a/mm/internal.h
95844+++ b/mm/internal.h
95845@@ -49,6 +49,7 @@ extern void putback_lru_page(struct page *page);
95846 * in mm/page_alloc.c
95847 */
95848 extern void __free_pages_bootmem(struct page *page, unsigned int order);
95849+extern void free_compound_page(struct page *page);
95850 extern void prep_compound_page(struct page *page, unsigned long order);
95851
95852
95853diff --git a/mm/kmemleak.c b/mm/kmemleak.c
95854index c346660..b47382f 100644
95855--- a/mm/kmemleak.c
95856+++ b/mm/kmemleak.c
95857@@ -358,7 +358,7 @@ static void print_unreferenced(struct seq_file *seq,
95858
95859 for (i = 0; i < object->trace_len; i++) {
95860 void *ptr = (void *)object->trace[i];
95861- seq_printf(seq, " [<%p>] %pS\n", ptr, ptr);
95862+ seq_printf(seq, " [<%p>] %pA\n", ptr, ptr);
95863 }
95864 }
95865
95866diff --git a/mm/maccess.c b/mm/maccess.c
95867index 9073695..1127f348 100644
95868--- a/mm/maccess.c
95869+++ b/mm/maccess.c
95870@@ -14,7 +14,7 @@
95871 * Safely read from address @src to the buffer at @dst. If a kernel fault
95872 * happens, handle that and return -EFAULT.
95873 */
95874-long probe_kernel_read(void *dst, void *src, size_t size)
95875+long probe_kernel_read(void *dst, const void *src, size_t size)
95876 {
95877 long ret;
95878 mm_segment_t old_fs = get_fs();
95879@@ -22,7 +22,7 @@ long probe_kernel_read(void *dst, void *src, size_t size)
95880 set_fs(KERNEL_DS);
95881 pagefault_disable();
95882 ret = __copy_from_user_inatomic(dst,
95883- (__force const void __user *)src, size);
95884+ (const void __force_user *)src, size);
95885 pagefault_enable();
95886 set_fs(old_fs);
95887
95888@@ -39,14 +39,14 @@ EXPORT_SYMBOL_GPL(probe_kernel_read);
95889 * Safely write to address @dst from the buffer at @src. If a kernel fault
95890 * happens, handle that and return -EFAULT.
95891 */
95892-long notrace __weak probe_kernel_write(void *dst, void *src, size_t size)
95893+long notrace __weak probe_kernel_write(void *dst, const void *src, size_t size)
95894 {
95895 long ret;
95896 mm_segment_t old_fs = get_fs();
95897
95898 set_fs(KERNEL_DS);
95899 pagefault_disable();
95900- ret = __copy_to_user_inatomic((__force void __user *)dst, src, size);
95901+ ret = __copy_to_user_inatomic((void __force_user *)dst, src, size);
95902 pagefault_enable();
95903 set_fs(old_fs);
95904
95905diff --git a/mm/madvise.c b/mm/madvise.c
95906index 35b1479..499f7d4 100644
95907--- a/mm/madvise.c
95908+++ b/mm/madvise.c
95909@@ -44,6 +44,10 @@ static long madvise_behavior(struct vm_area_struct * vma,
95910 pgoff_t pgoff;
95911 unsigned long new_flags = vma->vm_flags;
95912
95913+#ifdef CONFIG_PAX_SEGMEXEC
95914+ struct vm_area_struct *vma_m;
95915+#endif
95916+
95917 switch (behavior) {
95918 case MADV_NORMAL:
95919 new_flags = new_flags & ~VM_RAND_READ & ~VM_SEQ_READ;
95920@@ -103,6 +107,13 @@ success:
95921 /*
95922 * vm_flags is protected by the mmap_sem held in write mode.
95923 */
95924+
95925+#ifdef CONFIG_PAX_SEGMEXEC
95926+ vma_m = pax_find_mirror_vma(vma);
95927+ if (vma_m)
95928+ vma_m->vm_flags = new_flags & ~(VM_WRITE | VM_MAYWRITE | VM_ACCOUNT);
95929+#endif
95930+
95931 vma->vm_flags = new_flags;
95932
95933 out:
95934@@ -161,6 +172,11 @@ static long madvise_dontneed(struct vm_area_struct * vma,
95935 struct vm_area_struct ** prev,
95936 unsigned long start, unsigned long end)
95937 {
95938+
95939+#ifdef CONFIG_PAX_SEGMEXEC
95940+ struct vm_area_struct *vma_m;
95941+#endif
95942+
95943 *prev = vma;
95944 if (vma->vm_flags & (VM_LOCKED|VM_HUGETLB|VM_PFNMAP))
95945 return -EINVAL;
95946@@ -173,6 +189,21 @@ static long madvise_dontneed(struct vm_area_struct * vma,
95947 zap_page_range(vma, start, end - start, &details);
95948 } else
95949 zap_page_range(vma, start, end - start, NULL);
95950+
95951+#ifdef CONFIG_PAX_SEGMEXEC
95952+ vma_m = pax_find_mirror_vma(vma);
95953+ if (vma_m) {
95954+ if (unlikely(vma->vm_flags & VM_NONLINEAR)) {
95955+ struct zap_details details = {
95956+ .nonlinear_vma = vma_m,
95957+ .last_index = ULONG_MAX,
95958+ };
95959+ zap_page_range(vma, start + SEGMEXEC_TASK_SIZE, end - start, &details);
95960+ } else
95961+ zap_page_range(vma, start + SEGMEXEC_TASK_SIZE, end - start, NULL);
95962+ }
95963+#endif
95964+
95965 return 0;
95966 }
95967
95968@@ -359,6 +390,16 @@ SYSCALL_DEFINE3(madvise, unsigned long, start, size_t, len_in, int, behavior)
95969 if (end < start)
95970 goto out;
95971
95972+#ifdef CONFIG_PAX_SEGMEXEC
95973+ if (current->mm->pax_flags & MF_PAX_SEGMEXEC) {
95974+ if (end > SEGMEXEC_TASK_SIZE)
95975+ goto out;
95976+ } else
95977+#endif
95978+
95979+ if (end > TASK_SIZE)
95980+ goto out;
95981+
95982 error = 0;
95983 if (end == start)
95984 goto out;
95985diff --git a/mm/memory-failure.c b/mm/memory-failure.c
95986index 8aeba53..b4a4198 100644
95987--- a/mm/memory-failure.c
95988+++ b/mm/memory-failure.c
95989@@ -46,7 +46,7 @@ int sysctl_memory_failure_early_kill __read_mostly = 0;
95990
95991 int sysctl_memory_failure_recovery __read_mostly = 1;
95992
95993-atomic_long_t mce_bad_pages __read_mostly = ATOMIC_LONG_INIT(0);
95994+atomic_long_unchecked_t mce_bad_pages __read_mostly = ATOMIC_LONG_INIT(0);
95995
95996 /*
95997 * Send all the processes who have the page mapped an ``action optional''
95998@@ -64,7 +64,7 @@ static int kill_proc_ao(struct task_struct *t, unsigned long addr, int trapno,
95999 si.si_signo = SIGBUS;
96000 si.si_errno = 0;
96001 si.si_code = BUS_MCEERR_AO;
96002- si.si_addr = (void *)addr;
96003+ si.si_addr = (void __user *)addr;
96004 #ifdef __ARCH_SI_TRAPNO
96005 si.si_trapno = trapno;
96006 #endif
96007@@ -745,7 +745,7 @@ int __memory_failure(unsigned long pfn, int trapno, int ref)
96008 return 0;
96009 }
96010
96011- atomic_long_add(1, &mce_bad_pages);
96012+ atomic_long_add_unchecked(1, &mce_bad_pages);
96013
96014 /*
96015 * We need/can do nothing about count=0 pages.
96016diff --git a/mm/memory.c b/mm/memory.c
96017index 6c836d3..48f3264 100644
96018--- a/mm/memory.c
96019+++ b/mm/memory.c
96020@@ -187,8 +187,12 @@ static inline void free_pmd_range(struct mmu_gather *tlb, pud_t *pud,
96021 return;
96022
96023 pmd = pmd_offset(pud, start);
96024+
96025+#if !defined(CONFIG_X86_32) || !defined(CONFIG_PAX_PER_CPU_PGD)
96026 pud_clear(pud);
96027 pmd_free_tlb(tlb, pmd, start);
96028+#endif
96029+
96030 }
96031
96032 static inline void free_pud_range(struct mmu_gather *tlb, pgd_t *pgd,
96033@@ -219,9 +223,12 @@ static inline void free_pud_range(struct mmu_gather *tlb, pgd_t *pgd,
96034 if (end - 1 > ceiling - 1)
96035 return;
96036
96037+#if !defined(CONFIG_X86_64) || !defined(CONFIG_PAX_PER_CPU_PGD)
96038 pud = pud_offset(pgd, start);
96039 pgd_clear(pgd);
96040 pud_free_tlb(tlb, pud, start);
96041+#endif
96042+
96043 }
96044
96045 /*
96046@@ -1251,10 +1258,10 @@ int __get_user_pages(struct task_struct *tsk, struct mm_struct *mm,
96047 (VM_MAYREAD | VM_MAYWRITE) : (VM_READ | VM_WRITE);
96048 i = 0;
96049
96050- do {
96051+ while (nr_pages) {
96052 struct vm_area_struct *vma;
96053
96054- vma = find_extend_vma(mm, start);
96055+ vma = find_vma(mm, start);
96056 if (!vma && in_gate_area(tsk, start)) {
96057 unsigned long pg = start & PAGE_MASK;
96058 struct vm_area_struct *gate_vma = get_gate_vma(tsk);
96059@@ -1306,7 +1313,7 @@ int __get_user_pages(struct task_struct *tsk, struct mm_struct *mm,
96060 continue;
96061 }
96062
96063- if (!vma ||
96064+ if (!vma || start < vma->vm_start ||
96065 (vma->vm_flags & (VM_IO | VM_PFNMAP)) ||
96066 !(vm_flags & vma->vm_flags))
96067 return i ? : -EFAULT;
96068@@ -1381,7 +1388,7 @@ int __get_user_pages(struct task_struct *tsk, struct mm_struct *mm,
96069 start += PAGE_SIZE;
96070 nr_pages--;
96071 } while (nr_pages && start < vma->vm_end);
96072- } while (nr_pages);
96073+ }
96074 return i;
96075 }
96076
96077@@ -1526,6 +1533,10 @@ static int insert_page(struct vm_area_struct *vma, unsigned long addr,
96078 page_add_file_rmap(page);
96079 set_pte_at(mm, addr, pte, mk_pte(page, prot));
96080
96081+#ifdef CONFIG_PAX_SEGMEXEC
96082+ pax_mirror_file_pte(vma, addr, page, ptl);
96083+#endif
96084+
96085 retval = 0;
96086 pte_unmap_unlock(pte, ptl);
96087 return retval;
96088@@ -1560,10 +1571,22 @@ out:
96089 int vm_insert_page(struct vm_area_struct *vma, unsigned long addr,
96090 struct page *page)
96091 {
96092+
96093+#ifdef CONFIG_PAX_SEGMEXEC
96094+ struct vm_area_struct *vma_m;
96095+#endif
96096+
96097 if (addr < vma->vm_start || addr >= vma->vm_end)
96098 return -EFAULT;
96099 if (!page_count(page))
96100 return -EINVAL;
96101+
96102+#ifdef CONFIG_PAX_SEGMEXEC
96103+ vma_m = pax_find_mirror_vma(vma);
96104+ if (vma_m)
96105+ vma_m->vm_flags |= VM_INSERTPAGE;
96106+#endif
96107+
96108 vma->vm_flags |= VM_INSERTPAGE;
96109 return insert_page(vma, addr, page, vma->vm_page_prot);
96110 }
96111@@ -1649,6 +1672,7 @@ int vm_insert_mixed(struct vm_area_struct *vma, unsigned long addr,
96112 unsigned long pfn)
96113 {
96114 BUG_ON(!(vma->vm_flags & VM_MIXEDMAP));
96115+ BUG_ON(vma->vm_mirror);
96116
96117 if (addr < vma->vm_start || addr >= vma->vm_end)
96118 return -EFAULT;
96119@@ -1977,6 +2001,186 @@ static inline void cow_user_page(struct page *dst, struct page *src, unsigned lo
96120 copy_user_highpage(dst, src, va, vma);
96121 }
96122
96123+#ifdef CONFIG_PAX_SEGMEXEC
96124+static void pax_unmap_mirror_pte(struct vm_area_struct *vma, unsigned long address, pmd_t *pmd)
96125+{
96126+ struct mm_struct *mm = vma->vm_mm;
96127+ spinlock_t *ptl;
96128+ pte_t *pte, entry;
96129+
96130+ pte = pte_offset_map_lock(mm, pmd, address, &ptl);
96131+ entry = *pte;
96132+ if (!pte_present(entry)) {
96133+ if (!pte_none(entry)) {
96134+ BUG_ON(pte_file(entry));
96135+ free_swap_and_cache(pte_to_swp_entry(entry));
96136+ pte_clear_not_present_full(mm, address, pte, 0);
96137+ }
96138+ } else {
96139+ struct page *page;
96140+
96141+ flush_cache_page(vma, address, pte_pfn(entry));
96142+ entry = ptep_clear_flush(vma, address, pte);
96143+ BUG_ON(pte_dirty(entry));
96144+ page = vm_normal_page(vma, address, entry);
96145+ if (page) {
96146+ update_hiwater_rss(mm);
96147+ if (PageAnon(page))
96148+ dec_mm_counter(mm, anon_rss);
96149+ else
96150+ dec_mm_counter(mm, file_rss);
96151+ page_remove_rmap(page);
96152+ page_cache_release(page);
96153+ }
96154+ }
96155+ pte_unmap_unlock(pte, ptl);
96156+}
96157+
96158+/* PaX: if vma is mirrored, synchronize the mirror's PTE
96159+ *
96160+ * the ptl of the lower mapped page is held on entry and is not released on exit
96161+ * or inside to ensure atomic changes to the PTE states (swapout, mremap, munmap, etc)
96162+ */
96163+static void pax_mirror_anon_pte(struct vm_area_struct *vma, unsigned long address, struct page *page_m, spinlock_t *ptl)
96164+{
96165+ struct mm_struct *mm = vma->vm_mm;
96166+ unsigned long address_m;
96167+ spinlock_t *ptl_m;
96168+ struct vm_area_struct *vma_m;
96169+ pmd_t *pmd_m;
96170+ pte_t *pte_m, entry_m;
96171+
96172+ BUG_ON(!page_m || !PageAnon(page_m));
96173+
96174+ vma_m = pax_find_mirror_vma(vma);
96175+ if (!vma_m)
96176+ return;
96177+
96178+ BUG_ON(!PageLocked(page_m));
96179+ BUG_ON(address >= SEGMEXEC_TASK_SIZE);
96180+ address_m = address + SEGMEXEC_TASK_SIZE;
96181+ pmd_m = pmd_offset(pud_offset(pgd_offset(mm, address_m), address_m), address_m);
96182+ pte_m = pte_offset_map_nested(pmd_m, address_m);
96183+ ptl_m = pte_lockptr(mm, pmd_m);
96184+ if (ptl != ptl_m) {
96185+ spin_lock_nested(ptl_m, SINGLE_DEPTH_NESTING);
96186+ if (!pte_none(*pte_m))
96187+ goto out;
96188+ }
96189+
96190+ entry_m = pfn_pte(page_to_pfn(page_m), vma_m->vm_page_prot);
96191+ page_cache_get(page_m);
96192+ page_add_anon_rmap(page_m, vma_m, address_m);
96193+ inc_mm_counter(mm, anon_rss);
96194+ set_pte_at(mm, address_m, pte_m, entry_m);
96195+ update_mmu_cache(vma_m, address_m, entry_m);
96196+out:
96197+ if (ptl != ptl_m)
96198+ spin_unlock(ptl_m);
96199+ pte_unmap_nested(pte_m);
96200+ unlock_page(page_m);
96201+}
96202+
96203+void pax_mirror_file_pte(struct vm_area_struct *vma, unsigned long address, struct page *page_m, spinlock_t *ptl)
96204+{
96205+ struct mm_struct *mm = vma->vm_mm;
96206+ unsigned long address_m;
96207+ spinlock_t *ptl_m;
96208+ struct vm_area_struct *vma_m;
96209+ pmd_t *pmd_m;
96210+ pte_t *pte_m, entry_m;
96211+
96212+ BUG_ON(!page_m || PageAnon(page_m));
96213+
96214+ vma_m = pax_find_mirror_vma(vma);
96215+ if (!vma_m)
96216+ return;
96217+
96218+ BUG_ON(address >= SEGMEXEC_TASK_SIZE);
96219+ address_m = address + SEGMEXEC_TASK_SIZE;
96220+ pmd_m = pmd_offset(pud_offset(pgd_offset(mm, address_m), address_m), address_m);
96221+ pte_m = pte_offset_map_nested(pmd_m, address_m);
96222+ ptl_m = pte_lockptr(mm, pmd_m);
96223+ if (ptl != ptl_m) {
96224+ spin_lock_nested(ptl_m, SINGLE_DEPTH_NESTING);
96225+ if (!pte_none(*pte_m))
96226+ goto out;
96227+ }
96228+
96229+ entry_m = pfn_pte(page_to_pfn(page_m), vma_m->vm_page_prot);
96230+ page_cache_get(page_m);
96231+ page_add_file_rmap(page_m);
96232+ inc_mm_counter(mm, file_rss);
96233+ set_pte_at(mm, address_m, pte_m, entry_m);
96234+ update_mmu_cache(vma_m, address_m, entry_m);
96235+out:
96236+ if (ptl != ptl_m)
96237+ spin_unlock(ptl_m);
96238+ pte_unmap_nested(pte_m);
96239+}
96240+
96241+static void pax_mirror_pfn_pte(struct vm_area_struct *vma, unsigned long address, unsigned long pfn_m, spinlock_t *ptl)
96242+{
96243+ struct mm_struct *mm = vma->vm_mm;
96244+ unsigned long address_m;
96245+ spinlock_t *ptl_m;
96246+ struct vm_area_struct *vma_m;
96247+ pmd_t *pmd_m;
96248+ pte_t *pte_m, entry_m;
96249+
96250+ vma_m = pax_find_mirror_vma(vma);
96251+ if (!vma_m)
96252+ return;
96253+
96254+ BUG_ON(address >= SEGMEXEC_TASK_SIZE);
96255+ address_m = address + SEGMEXEC_TASK_SIZE;
96256+ pmd_m = pmd_offset(pud_offset(pgd_offset(mm, address_m), address_m), address_m);
96257+ pte_m = pte_offset_map_nested(pmd_m, address_m);
96258+ ptl_m = pte_lockptr(mm, pmd_m);
96259+ if (ptl != ptl_m) {
96260+ spin_lock_nested(ptl_m, SINGLE_DEPTH_NESTING);
96261+ if (!pte_none(*pte_m))
96262+ goto out;
96263+ }
96264+
96265+ entry_m = pfn_pte(pfn_m, vma_m->vm_page_prot);
96266+ set_pte_at(mm, address_m, pte_m, entry_m);
96267+out:
96268+ if (ptl != ptl_m)
96269+ spin_unlock(ptl_m);
96270+ pte_unmap_nested(pte_m);
96271+}
96272+
96273+static void pax_mirror_pte(struct vm_area_struct *vma, unsigned long address, pte_t *pte, pmd_t *pmd, spinlock_t *ptl)
96274+{
96275+ struct page *page_m;
96276+ pte_t entry;
96277+
96278+ if (!(vma->vm_mm->pax_flags & MF_PAX_SEGMEXEC))
96279+ goto out;
96280+
96281+ entry = *pte;
96282+ page_m = vm_normal_page(vma, address, entry);
96283+ if (!page_m)
96284+ pax_mirror_pfn_pte(vma, address, pte_pfn(entry), ptl);
96285+ else if (PageAnon(page_m)) {
96286+ if (pax_find_mirror_vma(vma)) {
96287+ pte_unmap_unlock(pte, ptl);
96288+ lock_page(page_m);
96289+ pte = pte_offset_map_lock(vma->vm_mm, pmd, address, &ptl);
96290+ if (pte_same(entry, *pte))
96291+ pax_mirror_anon_pte(vma, address, page_m, ptl);
96292+ else
96293+ unlock_page(page_m);
96294+ }
96295+ } else
96296+ pax_mirror_file_pte(vma, address, page_m, ptl);
96297+
96298+out:
96299+ pte_unmap_unlock(pte, ptl);
96300+}
96301+#endif
96302+
96303 /*
96304 * This routine handles present pages, when users try to write
96305 * to a shared page. It is done by copying the page to a new address
96306@@ -2156,6 +2360,12 @@ gotten:
96307 */
96308 page_table = pte_offset_map_lock(mm, pmd, address, &ptl);
96309 if (likely(pte_same(*page_table, orig_pte))) {
96310+
96311+#ifdef CONFIG_PAX_SEGMEXEC
96312+ if (pax_find_mirror_vma(vma))
96313+ BUG_ON(!trylock_page(new_page));
96314+#endif
96315+
96316 if (old_page) {
96317 if (!PageAnon(old_page)) {
96318 dec_mm_counter(mm, file_rss);
96319@@ -2207,6 +2417,10 @@ gotten:
96320 page_remove_rmap(old_page);
96321 }
96322
96323+#ifdef CONFIG_PAX_SEGMEXEC
96324+ pax_mirror_anon_pte(vma, address, new_page, ptl);
96325+#endif
96326+
96327 /* Free the old page.. */
96328 new_page = old_page;
96329 ret |= VM_FAULT_WRITE;
96330@@ -2606,6 +2820,11 @@ static int do_swap_page(struct mm_struct *mm, struct vm_area_struct *vma,
96331 swap_free(entry);
96332 if (vm_swap_full() || (vma->vm_flags & VM_LOCKED) || PageMlocked(page))
96333 try_to_free_swap(page);
96334+
96335+#ifdef CONFIG_PAX_SEGMEXEC
96336+ if ((flags & FAULT_FLAG_WRITE) || !pax_find_mirror_vma(vma))
96337+#endif
96338+
96339 unlock_page(page);
96340
96341 if (flags & FAULT_FLAG_WRITE) {
96342@@ -2617,6 +2836,11 @@ static int do_swap_page(struct mm_struct *mm, struct vm_area_struct *vma,
96343
96344 /* No need to invalidate - it was non-present before */
96345 update_mmu_cache(vma, address, pte);
96346+
96347+#ifdef CONFIG_PAX_SEGMEXEC
96348+ pax_mirror_anon_pte(vma, address, page, ptl);
96349+#endif
96350+
96351 unlock:
96352 pte_unmap_unlock(page_table, ptl);
96353 out:
96354@@ -2632,40 +2856,6 @@ out_release:
96355 }
96356
96357 /*
96358- * This is like a special single-page "expand_{down|up}wards()",
96359- * except we must first make sure that 'address{-|+}PAGE_SIZE'
96360- * doesn't hit another vma.
96361- */
96362-static inline int check_stack_guard_page(struct vm_area_struct *vma, unsigned long address)
96363-{
96364- address &= PAGE_MASK;
96365- if ((vma->vm_flags & VM_GROWSDOWN) && address == vma->vm_start) {
96366- struct vm_area_struct *prev = vma->vm_prev;
96367-
96368- /*
96369- * Is there a mapping abutting this one below?
96370- *
96371- * That's only ok if it's the same stack mapping
96372- * that has gotten split..
96373- */
96374- if (prev && prev->vm_end == address)
96375- return prev->vm_flags & VM_GROWSDOWN ? 0 : -ENOMEM;
96376-
96377- expand_stack(vma, address - PAGE_SIZE);
96378- }
96379- if ((vma->vm_flags & VM_GROWSUP) && address + PAGE_SIZE == vma->vm_end) {
96380- struct vm_area_struct *next = vma->vm_next;
96381-
96382- /* As VM_GROWSDOWN but s/below/above/ */
96383- if (next && next->vm_start == address + PAGE_SIZE)
96384- return next->vm_flags & VM_GROWSUP ? 0 : -ENOMEM;
96385-
96386- expand_upwards(vma, address + PAGE_SIZE);
96387- }
96388- return 0;
96389-}
96390-
96391-/*
96392 * We enter with non-exclusive mmap_sem (to exclude vma changes,
96393 * but allow concurrent faults), and pte mapped but not yet locked.
96394 * We return with mmap_sem still held, but pte unmapped and unlocked.
96395@@ -2674,27 +2864,23 @@ static int do_anonymous_page(struct mm_struct *mm, struct vm_area_struct *vma,
96396 unsigned long address, pte_t *page_table, pmd_t *pmd,
96397 unsigned int flags)
96398 {
96399- struct page *page;
96400+ struct page *page = NULL;
96401 spinlock_t *ptl;
96402 pte_t entry;
96403
96404- pte_unmap(page_table);
96405-
96406- /* Check if we need to add a guard page to the stack */
96407- if (check_stack_guard_page(vma, address) < 0)
96408- return VM_FAULT_SIGBUS;
96409-
96410- /* Use the zero-page for reads */
96411 if (!(flags & FAULT_FLAG_WRITE)) {
96412 entry = pte_mkspecial(pfn_pte(my_zero_pfn(address),
96413 vma->vm_page_prot));
96414- page_table = pte_offset_map_lock(mm, pmd, address, &ptl);
96415+ ptl = pte_lockptr(mm, pmd);
96416+ spin_lock(ptl);
96417 if (!pte_none(*page_table))
96418 goto unlock;
96419 goto setpte;
96420 }
96421
96422 /* Allocate our own private page. */
96423+ pte_unmap(page_table);
96424+
96425 if (unlikely(anon_vma_prepare(vma)))
96426 goto oom;
96427 page = alloc_zeroed_user_highpage_movable(vma, address);
96428@@ -2713,6 +2899,11 @@ static int do_anonymous_page(struct mm_struct *mm, struct vm_area_struct *vma,
96429 if (!pte_none(*page_table))
96430 goto release;
96431
96432+#ifdef CONFIG_PAX_SEGMEXEC
96433+ if (pax_find_mirror_vma(vma))
96434+ BUG_ON(!trylock_page(page));
96435+#endif
96436+
96437 inc_mm_counter(mm, anon_rss);
96438 page_add_new_anon_rmap(page, vma, address);
96439 setpte:
96440@@ -2720,6 +2911,12 @@ setpte:
96441
96442 /* No need to invalidate - it was non-present before */
96443 update_mmu_cache(vma, address, entry);
96444+
96445+#ifdef CONFIG_PAX_SEGMEXEC
96446+ if (page)
96447+ pax_mirror_anon_pte(vma, address, page, ptl);
96448+#endif
96449+
96450 unlock:
96451 pte_unmap_unlock(page_table, ptl);
96452 return 0;
96453@@ -2862,6 +3059,12 @@ static int __do_fault(struct mm_struct *mm, struct vm_area_struct *vma,
96454 */
96455 /* Only go through if we didn't race with anybody else... */
96456 if (likely(pte_same(*page_table, orig_pte))) {
96457+
96458+#ifdef CONFIG_PAX_SEGMEXEC
96459+ if (anon && pax_find_mirror_vma(vma))
96460+ BUG_ON(!trylock_page(page));
96461+#endif
96462+
96463 flush_icache_page(vma, page);
96464 entry = mk_pte(page, vma->vm_page_prot);
96465 if (flags & FAULT_FLAG_WRITE)
96466@@ -2881,6 +3084,14 @@ static int __do_fault(struct mm_struct *mm, struct vm_area_struct *vma,
96467
96468 /* no need to invalidate: a not-present page won't be cached */
96469 update_mmu_cache(vma, address, entry);
96470+
96471+#ifdef CONFIG_PAX_SEGMEXEC
96472+ if (anon)
96473+ pax_mirror_anon_pte(vma, address, page, ptl);
96474+ else
96475+ pax_mirror_file_pte(vma, address, page, ptl);
96476+#endif
96477+
96478 } else {
96479 if (charged)
96480 mem_cgroup_uncharge_page(page);
96481@@ -3028,6 +3239,12 @@ static inline int handle_pte_fault(struct mm_struct *mm,
96482 if (flags & FAULT_FLAG_WRITE)
96483 flush_tlb_page(vma, address);
96484 }
96485+
96486+#ifdef CONFIG_PAX_SEGMEXEC
96487+ pax_mirror_pte(vma, address, pte, pmd, ptl);
96488+ return 0;
96489+#endif
96490+
96491 unlock:
96492 pte_unmap_unlock(pte, ptl);
96493 return 0;
96494@@ -3044,6 +3261,10 @@ int handle_mm_fault(struct mm_struct *mm, struct vm_area_struct *vma,
96495 pmd_t *pmd;
96496 pte_t *pte;
96497
96498+#ifdef CONFIG_PAX_SEGMEXEC
96499+ struct vm_area_struct *vma_m;
96500+#endif
96501+
96502 __set_current_state(TASK_RUNNING);
96503
96504 count_vm_event(PGFAULT);
96505@@ -3051,6 +3272,34 @@ int handle_mm_fault(struct mm_struct *mm, struct vm_area_struct *vma,
96506 if (unlikely(is_vm_hugetlb_page(vma)))
96507 return hugetlb_fault(mm, vma, address, flags);
96508
96509+#ifdef CONFIG_PAX_SEGMEXEC
96510+ vma_m = pax_find_mirror_vma(vma);
96511+ if (vma_m) {
96512+ unsigned long address_m;
96513+ pgd_t *pgd_m;
96514+ pud_t *pud_m;
96515+ pmd_t *pmd_m;
96516+
96517+ if (vma->vm_start > vma_m->vm_start) {
96518+ address_m = address;
96519+ address -= SEGMEXEC_TASK_SIZE;
96520+ vma = vma_m;
96521+ } else
96522+ address_m = address + SEGMEXEC_TASK_SIZE;
96523+
96524+ pgd_m = pgd_offset(mm, address_m);
96525+ pud_m = pud_alloc(mm, pgd_m, address_m);
96526+ if (!pud_m)
96527+ return VM_FAULT_OOM;
96528+ pmd_m = pmd_alloc(mm, pud_m, address_m);
96529+ if (!pmd_m)
96530+ return VM_FAULT_OOM;
96531+ if (!pmd_present(*pmd_m) && __pte_alloc(mm, pmd_m, address_m))
96532+ return VM_FAULT_OOM;
96533+ pax_unmap_mirror_pte(vma_m, address_m, pmd_m);
96534+ }
96535+#endif
96536+
96537 pgd = pgd_offset(mm, address);
96538 pud = pud_alloc(mm, pgd, address);
96539 if (!pud)
96540@@ -3148,7 +3397,7 @@ static int __init gate_vma_init(void)
96541 gate_vma.vm_start = FIXADDR_USER_START;
96542 gate_vma.vm_end = FIXADDR_USER_END;
96543 gate_vma.vm_flags = VM_READ | VM_MAYREAD | VM_EXEC | VM_MAYEXEC;
96544- gate_vma.vm_page_prot = __P101;
96545+ gate_vma.vm_page_prot = vm_get_page_prot(gate_vma.vm_flags);
96546 /*
96547 * Make sure the vDSO gets into every core dump.
96548 * Dumping its contents makes post-mortem fully interpretable later
96549diff --git a/mm/mempolicy.c b/mm/mempolicy.c
96550index 3c6e3e2..b1ddbb8 100644
96551--- a/mm/mempolicy.c
96552+++ b/mm/mempolicy.c
96553@@ -573,6 +573,10 @@ static int mbind_range(struct vm_area_struct *vma, unsigned long start,
96554 struct vm_area_struct *next;
96555 int err;
96556
96557+#ifdef CONFIG_PAX_SEGMEXEC
96558+ struct vm_area_struct *vma_m;
96559+#endif
96560+
96561 err = 0;
96562 for (; vma && vma->vm_start < end; vma = next) {
96563 next = vma->vm_next;
96564@@ -584,6 +588,16 @@ static int mbind_range(struct vm_area_struct *vma, unsigned long start,
96565 err = policy_vma(vma, new);
96566 if (err)
96567 break;
96568+
96569+#ifdef CONFIG_PAX_SEGMEXEC
96570+ vma_m = pax_find_mirror_vma(vma);
96571+ if (vma_m) {
96572+ err = policy_vma(vma_m, new);
96573+ if (err)
96574+ break;
96575+ }
96576+#endif
96577+
96578 }
96579 return err;
96580 }
96581@@ -1002,6 +1016,17 @@ static long do_mbind(unsigned long start, unsigned long len,
96582
96583 if (end < start)
96584 return -EINVAL;
96585+
96586+#ifdef CONFIG_PAX_SEGMEXEC
96587+ if (mm->pax_flags & MF_PAX_SEGMEXEC) {
96588+ if (end > SEGMEXEC_TASK_SIZE)
96589+ return -EINVAL;
96590+ } else
96591+#endif
96592+
96593+ if (end > TASK_SIZE)
96594+ return -EINVAL;
96595+
96596 if (end == start)
96597 return 0;
96598
96599@@ -1207,6 +1232,14 @@ SYSCALL_DEFINE4(migrate_pages, pid_t, pid, unsigned long, maxnode,
96600 if (!mm)
96601 return -EINVAL;
96602
96603+#ifdef CONFIG_GRKERNSEC_PROC_MEMMAP
96604+ if (mm != current->mm &&
96605+ (mm->pax_flags & MF_PAX_RANDMMAP || mm->pax_flags & MF_PAX_SEGMEXEC)) {
96606+ err = -EPERM;
96607+ goto out;
96608+ }
96609+#endif
96610+
96611 /*
96612 * Check if this process has the right to modify the specified
96613 * process. The right exists if the process has administrative
96614@@ -1216,8 +1249,7 @@ SYSCALL_DEFINE4(migrate_pages, pid_t, pid, unsigned long, maxnode,
96615 rcu_read_lock();
96616 tcred = __task_cred(task);
96617 if (cred->euid != tcred->suid && cred->euid != tcred->uid &&
96618- cred->uid != tcred->suid && cred->uid != tcred->uid &&
96619- !capable(CAP_SYS_NICE)) {
96620+ cred->uid != tcred->suid && !capable(CAP_SYS_NICE)) {
96621 rcu_read_unlock();
96622 err = -EPERM;
96623 goto out;
96624@@ -2367,6 +2399,12 @@ static inline void check_huge_range(struct vm_area_struct *vma,
96625 }
96626 #endif
96627
96628+#ifdef CONFIG_GRKERNSEC_PROC_MEMMAP
96629+#define PAX_RAND_FLAGS(_mm) (_mm != NULL && _mm != current->mm && \
96630+ (_mm->pax_flags & MF_PAX_RANDMMAP || \
96631+ _mm->pax_flags & MF_PAX_SEGMEXEC))
96632+#endif
96633+
96634 /*
96635 * Display pages allocated per node and memory policy via /proc.
96636 */
96637@@ -2381,6 +2419,13 @@ int show_numa_map(struct seq_file *m, void *v)
96638 int n;
96639 char buffer[50];
96640
96641+#ifdef CONFIG_GRKERNSEC_PROC_MEMMAP
96642+ if (current->exec_id != m->exec_id) {
96643+ gr_log_badprocpid("numa_maps");
96644+ return 0;
96645+ }
96646+#endif
96647+
96648 if (!mm)
96649 return 0;
96650
96651@@ -2392,11 +2437,15 @@ int show_numa_map(struct seq_file *m, void *v)
96652 mpol_to_str(buffer, sizeof(buffer), pol, 0);
96653 mpol_cond_put(pol);
96654
96655+#ifdef CONFIG_GRKERNSEC_PROC_MEMMAP
96656+ seq_printf(m, "%08lx %s", PAX_RAND_FLAGS(vma->vm_mm) ? 0UL : vma->vm_start, buffer);
96657+#else
96658 seq_printf(m, "%08lx %s", vma->vm_start, buffer);
96659+#endif
96660
96661 if (file) {
96662 seq_printf(m, " file=");
96663- seq_path(m, &file->f_path, "\n\t= ");
96664+ seq_path(m, &file->f_path, "\n\t\\= ");
96665 } else if (vma->vm_start <= mm->brk && vma->vm_end >= mm->start_brk) {
96666 seq_printf(m, " heap");
96667 } else if (vma->vm_start <= mm->start_stack &&
96668diff --git a/mm/migrate.c b/mm/migrate.c
96669index aaca868..2ebecdc 100644
96670--- a/mm/migrate.c
96671+++ b/mm/migrate.c
96672@@ -916,6 +916,8 @@ static int do_pages_move(struct mm_struct *mm, struct task_struct *task,
96673 unsigned long chunk_start;
96674 int err;
96675
96676+ pax_track_stack();
96677+
96678 task_nodes = cpuset_mems_allowed(task);
96679
96680 err = -ENOMEM;
96681@@ -1106,6 +1108,14 @@ SYSCALL_DEFINE6(move_pages, pid_t, pid, unsigned long, nr_pages,
96682 if (!mm)
96683 return -EINVAL;
96684
96685+#ifdef CONFIG_GRKERNSEC_PROC_MEMMAP
96686+ if (mm != current->mm &&
96687+ (mm->pax_flags & MF_PAX_RANDMMAP || mm->pax_flags & MF_PAX_SEGMEXEC)) {
96688+ err = -EPERM;
96689+ goto out;
96690+ }
96691+#endif
96692+
96693 /*
96694 * Check if this process has the right to modify the specified
96695 * process. The right exists if the process has administrative
96696@@ -1115,8 +1125,7 @@ SYSCALL_DEFINE6(move_pages, pid_t, pid, unsigned long, nr_pages,
96697 rcu_read_lock();
96698 tcred = __task_cred(task);
96699 if (cred->euid != tcred->suid && cred->euid != tcred->uid &&
96700- cred->uid != tcred->suid && cred->uid != tcred->uid &&
96701- !capable(CAP_SYS_NICE)) {
96702+ cred->uid != tcred->suid && !capable(CAP_SYS_NICE)) {
96703 rcu_read_unlock();
96704 err = -EPERM;
96705 goto out;
96706diff --git a/mm/mlock.c b/mm/mlock.c
96707index 2d846cf..98134d2 100644
96708--- a/mm/mlock.c
96709+++ b/mm/mlock.c
96710@@ -13,6 +13,7 @@
96711 #include <linux/pagemap.h>
96712 #include <linux/mempolicy.h>
96713 #include <linux/syscalls.h>
96714+#include <linux/security.h>
96715 #include <linux/sched.h>
96716 #include <linux/module.h>
96717 #include <linux/rmap.h>
96718@@ -138,13 +139,6 @@ void munlock_vma_page(struct page *page)
96719 }
96720 }
96721
96722-static inline int stack_guard_page(struct vm_area_struct *vma, unsigned long addr)
96723-{
96724- return (vma->vm_flags & VM_GROWSDOWN) &&
96725- (vma->vm_start == addr) &&
96726- !vma_stack_continue(vma->vm_prev, addr);
96727-}
96728-
96729 /**
96730 * __mlock_vma_pages_range() - mlock a range of pages in the vma.
96731 * @vma: target vma
96732@@ -177,12 +171,6 @@ static long __mlock_vma_pages_range(struct vm_area_struct *vma,
96733 if (vma->vm_flags & VM_WRITE)
96734 gup_flags |= FOLL_WRITE;
96735
96736- /* We don't try to access the guard page of a stack vma */
96737- if (stack_guard_page(vma, start)) {
96738- addr += PAGE_SIZE;
96739- nr_pages--;
96740- }
96741-
96742 while (nr_pages > 0) {
96743 int i;
96744
96745@@ -440,7 +428,7 @@ static int do_mlock(unsigned long start, size_t len, int on)
96746 {
96747 unsigned long nstart, end, tmp;
96748 struct vm_area_struct * vma, * prev;
96749- int error;
96750+ int error = -EINVAL;
96751
96752 len = PAGE_ALIGN(len);
96753 end = start + len;
96754@@ -448,6 +436,9 @@ static int do_mlock(unsigned long start, size_t len, int on)
96755 return -EINVAL;
96756 if (end == start)
96757 return 0;
96758+ if (end > TASK_SIZE)
96759+ return -EINVAL;
96760+
96761 vma = find_vma_prev(current->mm, start, &prev);
96762 if (!vma || vma->vm_start > start)
96763 return -ENOMEM;
96764@@ -458,6 +449,11 @@ static int do_mlock(unsigned long start, size_t len, int on)
96765 for (nstart = start ; ; ) {
96766 unsigned int newflags;
96767
96768+#ifdef CONFIG_PAX_SEGMEXEC
96769+ if ((current->mm->pax_flags & MF_PAX_SEGMEXEC) && (vma->vm_start >= SEGMEXEC_TASK_SIZE))
96770+ break;
96771+#endif
96772+
96773 /* Here we know that vma->vm_start <= nstart < vma->vm_end. */
96774
96775 newflags = vma->vm_flags | VM_LOCKED;
96776@@ -507,6 +503,7 @@ SYSCALL_DEFINE2(mlock, unsigned long, start, size_t, len)
96777 lock_limit >>= PAGE_SHIFT;
96778
96779 /* check against resource limits */
96780+ gr_learn_resource(current, RLIMIT_MEMLOCK, (current->mm->locked_vm << PAGE_SHIFT) + len, 1);
96781 if ((locked <= lock_limit) || capable(CAP_IPC_LOCK))
96782 error = do_mlock(start, len, 1);
96783 up_write(&current->mm->mmap_sem);
96784@@ -528,17 +525,23 @@ SYSCALL_DEFINE2(munlock, unsigned long, start, size_t, len)
96785 static int do_mlockall(int flags)
96786 {
96787 struct vm_area_struct * vma, * prev = NULL;
96788- unsigned int def_flags = 0;
96789
96790 if (flags & MCL_FUTURE)
96791- def_flags = VM_LOCKED;
96792- current->mm->def_flags = def_flags;
96793+ current->mm->def_flags |= VM_LOCKED;
96794+ else
96795+ current->mm->def_flags &= ~VM_LOCKED;
96796 if (flags == MCL_FUTURE)
96797 goto out;
96798
96799 for (vma = current->mm->mmap; vma ; vma = prev->vm_next) {
96800- unsigned int newflags;
96801+ unsigned long newflags;
96802
96803+#ifdef CONFIG_PAX_SEGMEXEC
96804+ if ((current->mm->pax_flags & MF_PAX_SEGMEXEC) && (vma->vm_start >= SEGMEXEC_TASK_SIZE))
96805+ break;
96806+#endif
96807+
96808+ BUG_ON(vma->vm_end > TASK_SIZE);
96809 newflags = vma->vm_flags | VM_LOCKED;
96810 if (!(flags & MCL_CURRENT))
96811 newflags &= ~VM_LOCKED;
96812@@ -570,6 +573,7 @@ SYSCALL_DEFINE1(mlockall, int, flags)
96813 lock_limit >>= PAGE_SHIFT;
96814
96815 ret = -ENOMEM;
96816+ gr_learn_resource(current, RLIMIT_MEMLOCK, current->mm->total_vm << PAGE_SHIFT, 1);
96817 if (!(flags & MCL_CURRENT) || (current->mm->total_vm <= lock_limit) ||
96818 capable(CAP_IPC_LOCK))
96819 ret = do_mlockall(flags);
96820diff --git a/mm/mmap.c b/mm/mmap.c
96821index 4b80cbf..12a7861 100644
96822--- a/mm/mmap.c
96823+++ b/mm/mmap.c
96824@@ -45,6 +45,16 @@
96825 #define arch_rebalance_pgtables(addr, len) (addr)
96826 #endif
96827
96828+static inline void verify_mm_writelocked(struct mm_struct *mm)
96829+{
96830+#if defined(CONFIG_DEBUG_VM) || defined(CONFIG_PAX)
96831+ if (unlikely(down_read_trylock(&mm->mmap_sem))) {
96832+ up_read(&mm->mmap_sem);
96833+ BUG();
96834+ }
96835+#endif
96836+}
96837+
96838 static void unmap_region(struct mm_struct *mm,
96839 struct vm_area_struct *vma, struct vm_area_struct *prev,
96840 unsigned long start, unsigned long end);
96841@@ -70,22 +80,32 @@ static void unmap_region(struct mm_struct *mm,
96842 * x: (no) no x: (no) yes x: (no) yes x: (yes) yes
96843 *
96844 */
96845-pgprot_t protection_map[16] = {
96846+pgprot_t protection_map[16] __read_only = {
96847 __P000, __P001, __P010, __P011, __P100, __P101, __P110, __P111,
96848 __S000, __S001, __S010, __S011, __S100, __S101, __S110, __S111
96849 };
96850
96851 pgprot_t vm_get_page_prot(unsigned long vm_flags)
96852 {
96853- return __pgprot(pgprot_val(protection_map[vm_flags &
96854+ pgprot_t prot = __pgprot(pgprot_val(protection_map[vm_flags &
96855 (VM_READ|VM_WRITE|VM_EXEC|VM_SHARED)]) |
96856 pgprot_val(arch_vm_get_page_prot(vm_flags)));
96857+
96858+#if defined(CONFIG_PAX_PAGEEXEC) && defined(CONFIG_X86_32)
96859+ if (!nx_enabled &&
96860+ (vm_flags & (VM_PAGEEXEC | VM_EXEC)) == VM_PAGEEXEC &&
96861+ (vm_flags & (VM_READ | VM_WRITE)))
96862+ prot = __pgprot(pte_val(pte_exprotect(__pte(pgprot_val(prot)))));
96863+#endif
96864+
96865+ return prot;
96866 }
96867 EXPORT_SYMBOL(vm_get_page_prot);
96868
96869 int sysctl_overcommit_memory = OVERCOMMIT_GUESS; /* heuristic overcommit */
96870 int sysctl_overcommit_ratio = 50; /* default is 50% */
96871 int sysctl_max_map_count __read_mostly = DEFAULT_MAX_MAP_COUNT;
96872+unsigned long sysctl_heap_stack_gap __read_mostly = 64*1024;
96873 struct percpu_counter vm_committed_as;
96874
96875 /*
96876@@ -231,6 +251,7 @@ static struct vm_area_struct *remove_vma(struct vm_area_struct *vma)
96877 struct vm_area_struct *next = vma->vm_next;
96878
96879 might_sleep();
96880+ BUG_ON(vma->vm_mirror);
96881 if (vma->vm_ops && vma->vm_ops->close)
96882 vma->vm_ops->close(vma);
96883 if (vma->vm_file) {
96884@@ -267,6 +288,7 @@ SYSCALL_DEFINE1(brk, unsigned long, brk)
96885 * not page aligned -Ram Gupta
96886 */
96887 rlim = current->signal->rlim[RLIMIT_DATA].rlim_cur;
96888+ gr_learn_resource(current, RLIMIT_DATA, (brk - mm->start_brk) + (mm->end_data - mm->start_data), 1);
96889 if (rlim < RLIM_INFINITY && (brk - mm->start_brk) +
96890 (mm->end_data - mm->start_data) > rlim)
96891 goto out;
96892@@ -704,6 +726,12 @@ static int
96893 can_vma_merge_before(struct vm_area_struct *vma, unsigned long vm_flags,
96894 struct anon_vma *anon_vma, struct file *file, pgoff_t vm_pgoff)
96895 {
96896+
96897+#ifdef CONFIG_PAX_SEGMEXEC
96898+ if ((vma->vm_mm->pax_flags & MF_PAX_SEGMEXEC) && vma->vm_start == SEGMEXEC_TASK_SIZE)
96899+ return 0;
96900+#endif
96901+
96902 if (is_mergeable_vma(vma, file, vm_flags) &&
96903 is_mergeable_anon_vma(anon_vma, vma->anon_vma)) {
96904 if (vma->vm_pgoff == vm_pgoff)
96905@@ -723,6 +751,12 @@ static int
96906 can_vma_merge_after(struct vm_area_struct *vma, unsigned long vm_flags,
96907 struct anon_vma *anon_vma, struct file *file, pgoff_t vm_pgoff)
96908 {
96909+
96910+#ifdef CONFIG_PAX_SEGMEXEC
96911+ if ((vma->vm_mm->pax_flags & MF_PAX_SEGMEXEC) && vma->vm_end == SEGMEXEC_TASK_SIZE)
96912+ return 0;
96913+#endif
96914+
96915 if (is_mergeable_vma(vma, file, vm_flags) &&
96916 is_mergeable_anon_vma(anon_vma, vma->anon_vma)) {
96917 pgoff_t vm_pglen;
96918@@ -765,12 +799,19 @@ can_vma_merge_after(struct vm_area_struct *vma, unsigned long vm_flags,
96919 struct vm_area_struct *vma_merge(struct mm_struct *mm,
96920 struct vm_area_struct *prev, unsigned long addr,
96921 unsigned long end, unsigned long vm_flags,
96922- struct anon_vma *anon_vma, struct file *file,
96923+ struct anon_vma *anon_vma, struct file *file,
96924 pgoff_t pgoff, struct mempolicy *policy)
96925 {
96926 pgoff_t pglen = (end - addr) >> PAGE_SHIFT;
96927 struct vm_area_struct *area, *next;
96928
96929+#ifdef CONFIG_PAX_SEGMEXEC
96930+ unsigned long addr_m = addr + SEGMEXEC_TASK_SIZE, end_m = end + SEGMEXEC_TASK_SIZE;
96931+ struct vm_area_struct *area_m = NULL, *next_m = NULL, *prev_m = NULL;
96932+
96933+ BUG_ON((mm->pax_flags & MF_PAX_SEGMEXEC) && SEGMEXEC_TASK_SIZE < end);
96934+#endif
96935+
96936 /*
96937 * We later require that vma->vm_flags == vm_flags,
96938 * so this tests vma->vm_flags & VM_SPECIAL, too.
96939@@ -786,6 +827,15 @@ struct vm_area_struct *vma_merge(struct mm_struct *mm,
96940 if (next && next->vm_end == end) /* cases 6, 7, 8 */
96941 next = next->vm_next;
96942
96943+#ifdef CONFIG_PAX_SEGMEXEC
96944+ if (prev)
96945+ prev_m = pax_find_mirror_vma(prev);
96946+ if (area)
96947+ area_m = pax_find_mirror_vma(area);
96948+ if (next)
96949+ next_m = pax_find_mirror_vma(next);
96950+#endif
96951+
96952 /*
96953 * Can it merge with the predecessor?
96954 */
96955@@ -805,9 +855,24 @@ struct vm_area_struct *vma_merge(struct mm_struct *mm,
96956 /* cases 1, 6 */
96957 vma_adjust(prev, prev->vm_start,
96958 next->vm_end, prev->vm_pgoff, NULL);
96959- } else /* cases 2, 5, 7 */
96960+
96961+#ifdef CONFIG_PAX_SEGMEXEC
96962+ if (prev_m)
96963+ vma_adjust(prev_m, prev_m->vm_start,
96964+ next_m->vm_end, prev_m->vm_pgoff, NULL);
96965+#endif
96966+
96967+ } else { /* cases 2, 5, 7 */
96968 vma_adjust(prev, prev->vm_start,
96969 end, prev->vm_pgoff, NULL);
96970+
96971+#ifdef CONFIG_PAX_SEGMEXEC
96972+ if (prev_m)
96973+ vma_adjust(prev_m, prev_m->vm_start,
96974+ end_m, prev_m->vm_pgoff, NULL);
96975+#endif
96976+
96977+ }
96978 return prev;
96979 }
96980
96981@@ -818,12 +883,27 @@ struct vm_area_struct *vma_merge(struct mm_struct *mm,
96982 mpol_equal(policy, vma_policy(next)) &&
96983 can_vma_merge_before(next, vm_flags,
96984 anon_vma, file, pgoff+pglen)) {
96985- if (prev && addr < prev->vm_end) /* case 4 */
96986+ if (prev && addr < prev->vm_end) { /* case 4 */
96987 vma_adjust(prev, prev->vm_start,
96988 addr, prev->vm_pgoff, NULL);
96989- else /* cases 3, 8 */
96990+
96991+#ifdef CONFIG_PAX_SEGMEXEC
96992+ if (prev_m)
96993+ vma_adjust(prev_m, prev_m->vm_start,
96994+ addr_m, prev_m->vm_pgoff, NULL);
96995+#endif
96996+
96997+ } else { /* cases 3, 8 */
96998 vma_adjust(area, addr, next->vm_end,
96999 next->vm_pgoff - pglen, NULL);
97000+
97001+#ifdef CONFIG_PAX_SEGMEXEC
97002+ if (area_m)
97003+ vma_adjust(area_m, addr_m, next_m->vm_end,
97004+ next_m->vm_pgoff - pglen, NULL);
97005+#endif
97006+
97007+ }
97008 return area;
97009 }
97010
97011@@ -898,14 +978,11 @@ none:
97012 void vm_stat_account(struct mm_struct *mm, unsigned long flags,
97013 struct file *file, long pages)
97014 {
97015- const unsigned long stack_flags
97016- = VM_STACK_FLAGS & (VM_GROWSUP|VM_GROWSDOWN);
97017-
97018 if (file) {
97019 mm->shared_vm += pages;
97020 if ((flags & (VM_EXEC|VM_WRITE)) == VM_EXEC)
97021 mm->exec_vm += pages;
97022- } else if (flags & stack_flags)
97023+ } else if (flags & (VM_GROWSUP|VM_GROWSDOWN))
97024 mm->stack_vm += pages;
97025 if (flags & (VM_RESERVED|VM_IO))
97026 mm->reserved_vm += pages;
97027@@ -932,7 +1009,7 @@ unsigned long do_mmap_pgoff(struct file *file, unsigned long addr,
97028 * (the exception is when the underlying filesystem is noexec
97029 * mounted, in which case we dont add PROT_EXEC.)
97030 */
97031- if ((prot & PROT_READ) && (current->personality & READ_IMPLIES_EXEC))
97032+ if ((prot & (PROT_READ | PROT_WRITE)) && (current->personality & READ_IMPLIES_EXEC))
97033 if (!(file && (file->f_path.mnt->mnt_flags & MNT_NOEXEC)))
97034 prot |= PROT_EXEC;
97035
97036@@ -958,7 +1035,7 @@ unsigned long do_mmap_pgoff(struct file *file, unsigned long addr,
97037 /* Obtain the address to map to. we verify (or select) it and ensure
97038 * that it represents a valid section of the address space.
97039 */
97040- addr = get_unmapped_area(file, addr, len, pgoff, flags);
97041+ addr = get_unmapped_area(file, addr, len, pgoff, flags | ((prot & PROT_EXEC) ? MAP_EXECUTABLE : 0));
97042 if (addr & ~PAGE_MASK)
97043 return addr;
97044
97045@@ -969,6 +1046,36 @@ unsigned long do_mmap_pgoff(struct file *file, unsigned long addr,
97046 vm_flags = calc_vm_prot_bits(prot) | calc_vm_flag_bits(flags) |
97047 mm->def_flags | VM_MAYREAD | VM_MAYWRITE | VM_MAYEXEC;
97048
97049+#ifdef CONFIG_PAX_MPROTECT
97050+ if (mm->pax_flags & MF_PAX_MPROTECT) {
97051+#ifndef CONFIG_PAX_MPROTECT_COMPAT
97052+ if ((vm_flags & (VM_WRITE | VM_EXEC)) == (VM_WRITE | VM_EXEC)) {
97053+ gr_log_rwxmmap(file);
97054+
97055+#ifdef CONFIG_PAX_EMUPLT
97056+ vm_flags &= ~VM_EXEC;
97057+#else
97058+ return -EPERM;
97059+#endif
97060+
97061+ }
97062+
97063+ if (!(vm_flags & VM_EXEC))
97064+ vm_flags &= ~VM_MAYEXEC;
97065+#else
97066+ if ((vm_flags & (VM_WRITE | VM_EXEC)) != VM_EXEC)
97067+ vm_flags &= ~(VM_EXEC | VM_MAYEXEC);
97068+#endif
97069+ else
97070+ vm_flags &= ~VM_MAYWRITE;
97071+ }
97072+#endif
97073+
97074+#if defined(CONFIG_PAX_PAGEEXEC) && defined(CONFIG_X86_32)
97075+ if ((mm->pax_flags & MF_PAX_PAGEEXEC) && file)
97076+ vm_flags &= ~VM_PAGEEXEC;
97077+#endif
97078+
97079 if (flags & MAP_LOCKED)
97080 if (!can_do_mlock())
97081 return -EPERM;
97082@@ -980,6 +1087,7 @@ unsigned long do_mmap_pgoff(struct file *file, unsigned long addr,
97083 locked += mm->locked_vm;
97084 lock_limit = current->signal->rlim[RLIMIT_MEMLOCK].rlim_cur;
97085 lock_limit >>= PAGE_SHIFT;
97086+ gr_learn_resource(current, RLIMIT_MEMLOCK, locked << PAGE_SHIFT, 1);
97087 if (locked > lock_limit && !capable(CAP_IPC_LOCK))
97088 return -EAGAIN;
97089 }
97090@@ -1053,6 +1161,9 @@ unsigned long do_mmap_pgoff(struct file *file, unsigned long addr,
97091 if (error)
97092 return error;
97093
97094+ if (!gr_acl_handle_mmap(file, prot))
97095+ return -EACCES;
97096+
97097 return mmap_region(file, addr, len, flags, vm_flags, pgoff);
97098 }
97099 EXPORT_SYMBOL(do_mmap_pgoff);
97100@@ -1065,10 +1176,10 @@ EXPORT_SYMBOL(do_mmap_pgoff);
97101 */
97102 int vma_wants_writenotify(struct vm_area_struct *vma)
97103 {
97104- unsigned int vm_flags = vma->vm_flags;
97105+ unsigned long vm_flags = vma->vm_flags;
97106
97107 /* If it was private or non-writable, the write bit is already clear */
97108- if ((vm_flags & (VM_WRITE|VM_SHARED)) != ((VM_WRITE|VM_SHARED)))
97109+ if ((vm_flags & (VM_WRITE|VM_SHARED)) != (VM_WRITE|VM_SHARED))
97110 return 0;
97111
97112 /* The backer wishes to know when pages are first written to? */
97113@@ -1117,14 +1228,24 @@ unsigned long mmap_region(struct file *file, unsigned long addr,
97114 unsigned long charged = 0;
97115 struct inode *inode = file ? file->f_path.dentry->d_inode : NULL;
97116
97117+#ifdef CONFIG_PAX_SEGMEXEC
97118+ struct vm_area_struct *vma_m = NULL;
97119+#endif
97120+
97121+ /*
97122+ * mm->mmap_sem is required to protect against another thread
97123+ * changing the mappings in case we sleep.
97124+ */
97125+ verify_mm_writelocked(mm);
97126+
97127 /* Clear old maps */
97128 error = -ENOMEM;
97129-munmap_back:
97130 vma = find_vma_prepare(mm, addr, &prev, &rb_link, &rb_parent);
97131 if (vma && vma->vm_start < addr + len) {
97132 if (do_munmap(mm, addr, len))
97133 return -ENOMEM;
97134- goto munmap_back;
97135+ vma = find_vma_prepare(mm, addr, &prev, &rb_link, &rb_parent);
97136+ BUG_ON(vma && vma->vm_start < addr + len);
97137 }
97138
97139 /* Check against address space limit. */
97140@@ -1173,6 +1294,16 @@ munmap_back:
97141 goto unacct_error;
97142 }
97143
97144+#ifdef CONFIG_PAX_SEGMEXEC
97145+ if ((mm->pax_flags & MF_PAX_SEGMEXEC) && (vm_flags & VM_EXEC)) {
97146+ vma_m = kmem_cache_zalloc(vm_area_cachep, GFP_KERNEL);
97147+ if (!vma_m) {
97148+ error = -ENOMEM;
97149+ goto free_vma;
97150+ }
97151+ }
97152+#endif
97153+
97154 vma->vm_mm = mm;
97155 vma->vm_start = addr;
97156 vma->vm_end = addr + len;
97157@@ -1180,8 +1311,9 @@ munmap_back:
97158 vma->vm_page_prot = vm_get_page_prot(vm_flags);
97159 vma->vm_pgoff = pgoff;
97160
97161+ error = -EINVAL; /* when rejecting VM_GROWSDOWN|VM_GROWSUP */
97162+
97163 if (file) {
97164- error = -EINVAL;
97165 if (vm_flags & (VM_GROWSDOWN|VM_GROWSUP))
97166 goto free_vma;
97167 if (vm_flags & VM_DENYWRITE) {
97168@@ -1195,6 +1327,19 @@ munmap_back:
97169 error = file->f_op->mmap(file, vma);
97170 if (error)
97171 goto unmap_and_free_vma;
97172+
97173+#ifdef CONFIG_PAX_SEGMEXEC
97174+ if (vma_m && (vm_flags & VM_EXECUTABLE))
97175+ added_exe_file_vma(mm);
97176+#endif
97177+
97178+#if defined(CONFIG_PAX_PAGEEXEC) && defined(CONFIG_X86_32)
97179+ if ((mm->pax_flags & MF_PAX_PAGEEXEC) && !(vma->vm_flags & VM_SPECIAL)) {
97180+ vma->vm_flags |= VM_PAGEEXEC;
97181+ vma->vm_page_prot = vm_get_page_prot(vma->vm_flags);
97182+ }
97183+#endif
97184+
97185 if (vm_flags & VM_EXECUTABLE)
97186 added_exe_file_vma(mm);
97187
97188@@ -1207,6 +1352,8 @@ munmap_back:
97189 pgoff = vma->vm_pgoff;
97190 vm_flags = vma->vm_flags;
97191 } else if (vm_flags & VM_SHARED) {
97192+ if (unlikely(vm_flags & (VM_GROWSDOWN|VM_GROWSUP)))
97193+ goto free_vma;
97194 error = shmem_zero_setup(vma);
97195 if (error)
97196 goto free_vma;
97197@@ -1218,6 +1365,11 @@ munmap_back:
97198 vma_link(mm, vma, prev, rb_link, rb_parent);
97199 file = vma->vm_file;
97200
97201+#ifdef CONFIG_PAX_SEGMEXEC
97202+ if (vma_m)
97203+ pax_mirror_vma(vma_m, vma);
97204+#endif
97205+
97206 /* Once vma denies write, undo our temporary denial count */
97207 if (correct_wcount)
97208 atomic_inc(&inode->i_writecount);
97209@@ -1226,6 +1378,7 @@ out:
97210
97211 mm->total_vm += len >> PAGE_SHIFT;
97212 vm_stat_account(mm, vm_flags, file, len >> PAGE_SHIFT);
97213+ track_exec_limit(mm, addr, addr + len, vm_flags);
97214 if (vm_flags & VM_LOCKED) {
97215 /*
97216 * makes pages present; downgrades, drops, reacquires mmap_sem
97217@@ -1248,6 +1401,12 @@ unmap_and_free_vma:
97218 unmap_region(mm, vma, prev, vma->vm_start, vma->vm_end);
97219 charged = 0;
97220 free_vma:
97221+
97222+#ifdef CONFIG_PAX_SEGMEXEC
97223+ if (vma_m)
97224+ kmem_cache_free(vm_area_cachep, vma_m);
97225+#endif
97226+
97227 kmem_cache_free(vm_area_cachep, vma);
97228 unacct_error:
97229 if (charged)
97230@@ -1255,6 +1414,44 @@ unacct_error:
97231 return error;
97232 }
97233
97234+bool check_heap_stack_gap(const struct vm_area_struct *vma, unsigned long addr, unsigned long len)
97235+{
97236+ if (!vma) {
97237+#ifdef CONFIG_STACK_GROWSUP
97238+ if (addr > sysctl_heap_stack_gap)
97239+ vma = find_vma(current->mm, addr - sysctl_heap_stack_gap);
97240+ else
97241+ vma = find_vma(current->mm, 0);
97242+ if (vma && (vma->vm_flags & VM_GROWSUP))
97243+ return false;
97244+#endif
97245+ return true;
97246+ }
97247+
97248+ if (addr + len > vma->vm_start)
97249+ return false;
97250+
97251+ if (vma->vm_flags & VM_GROWSDOWN)
97252+ return sysctl_heap_stack_gap <= vma->vm_start - addr - len;
97253+#ifdef CONFIG_STACK_GROWSUP
97254+ else if (vma->vm_prev && (vma->vm_prev->vm_flags & VM_GROWSUP))
97255+ return addr - vma->vm_prev->vm_end <= sysctl_heap_stack_gap;
97256+#endif
97257+
97258+ return true;
97259+}
97260+
97261+unsigned long skip_heap_stack_gap(const struct vm_area_struct *vma, unsigned long len)
97262+{
97263+ if (vma->vm_start < len)
97264+ return -ENOMEM;
97265+ if (!(vma->vm_flags & VM_GROWSDOWN))
97266+ return vma->vm_start - len;
97267+ if (sysctl_heap_stack_gap <= vma->vm_start - len)
97268+ return vma->vm_start - len - sysctl_heap_stack_gap;
97269+ return -ENOMEM;
97270+}
97271+
97272 /* Get an address range which is currently unmapped.
97273 * For shmat() with addr=0.
97274 *
97275@@ -1281,18 +1478,23 @@ arch_get_unmapped_area(struct file *filp, unsigned long addr,
97276 if (flags & MAP_FIXED)
97277 return addr;
97278
97279+#ifdef CONFIG_PAX_RANDMMAP
97280+ if (!(mm->pax_flags & MF_PAX_RANDMMAP))
97281+#endif
97282+
97283 if (addr) {
97284 addr = PAGE_ALIGN(addr);
97285- vma = find_vma(mm, addr);
97286- if (TASK_SIZE - len >= addr &&
97287- (!vma || addr + len <= vma->vm_start))
97288- return addr;
97289+ if (TASK_SIZE - len >= addr) {
97290+ vma = find_vma(mm, addr);
97291+ if (check_heap_stack_gap(vma, addr, len))
97292+ return addr;
97293+ }
97294 }
97295 if (len > mm->cached_hole_size) {
97296- start_addr = addr = mm->free_area_cache;
97297+ start_addr = addr = mm->free_area_cache;
97298 } else {
97299- start_addr = addr = TASK_UNMAPPED_BASE;
97300- mm->cached_hole_size = 0;
97301+ start_addr = addr = mm->mmap_base;
97302+ mm->cached_hole_size = 0;
97303 }
97304
97305 full_search:
97306@@ -1303,34 +1505,40 @@ full_search:
97307 * Start a new search - just in case we missed
97308 * some holes.
97309 */
97310- if (start_addr != TASK_UNMAPPED_BASE) {
97311- addr = TASK_UNMAPPED_BASE;
97312- start_addr = addr;
97313+ if (start_addr != mm->mmap_base) {
97314+ start_addr = addr = mm->mmap_base;
97315 mm->cached_hole_size = 0;
97316 goto full_search;
97317 }
97318 return -ENOMEM;
97319 }
97320- if (!vma || addr + len <= vma->vm_start) {
97321- /*
97322- * Remember the place where we stopped the search:
97323- */
97324- mm->free_area_cache = addr + len;
97325- return addr;
97326- }
97327+ if (check_heap_stack_gap(vma, addr, len))
97328+ break;
97329 if (addr + mm->cached_hole_size < vma->vm_start)
97330 mm->cached_hole_size = vma->vm_start - addr;
97331 addr = vma->vm_end;
97332 }
97333+
97334+ /*
97335+ * Remember the place where we stopped the search:
97336+ */
97337+ mm->free_area_cache = addr + len;
97338+ return addr;
97339 }
97340 #endif
97341
97342 void arch_unmap_area(struct mm_struct *mm, unsigned long addr)
97343 {
97344+
97345+#ifdef CONFIG_PAX_SEGMEXEC
97346+ if ((mm->pax_flags & MF_PAX_SEGMEXEC) && SEGMEXEC_TASK_SIZE <= addr)
97347+ return;
97348+#endif
97349+
97350 /*
97351 * Is this a new hole at the lowest possible address?
97352 */
97353- if (addr >= TASK_UNMAPPED_BASE && addr < mm->free_area_cache) {
97354+ if (addr >= mm->mmap_base && addr < mm->free_area_cache) {
97355 mm->free_area_cache = addr;
97356 mm->cached_hole_size = ~0UL;
97357 }
97358@@ -1348,7 +1556,7 @@ arch_get_unmapped_area_topdown(struct file *filp, const unsigned long addr0,
97359 {
97360 struct vm_area_struct *vma;
97361 struct mm_struct *mm = current->mm;
97362- unsigned long addr = addr0;
97363+ unsigned long base = mm->mmap_base, addr = addr0;
97364
97365 /* requested length too big for entire address space */
97366 if (len > TASK_SIZE)
97367@@ -1357,13 +1565,18 @@ arch_get_unmapped_area_topdown(struct file *filp, const unsigned long addr0,
97368 if (flags & MAP_FIXED)
97369 return addr;
97370
97371+#ifdef CONFIG_PAX_RANDMMAP
97372+ if (!(mm->pax_flags & MF_PAX_RANDMMAP))
97373+#endif
97374+
97375 /* requesting a specific address */
97376 if (addr) {
97377 addr = PAGE_ALIGN(addr);
97378- vma = find_vma(mm, addr);
97379- if (TASK_SIZE - len >= addr &&
97380- (!vma || addr + len <= vma->vm_start))
97381- return addr;
97382+ if (TASK_SIZE - len >= addr) {
97383+ vma = find_vma(mm, addr);
97384+ if (check_heap_stack_gap(vma, addr, len))
97385+ return addr;
97386+ }
97387 }
97388
97389 /* check if free_area_cache is useful for us */
97390@@ -1378,7 +1591,7 @@ arch_get_unmapped_area_topdown(struct file *filp, const unsigned long addr0,
97391 /* make sure it can fit in the remaining address space */
97392 if (addr > len) {
97393 vma = find_vma(mm, addr-len);
97394- if (!vma || addr <= vma->vm_start)
97395+ if (check_heap_stack_gap(vma, addr - len, len))
97396 /* remember the address as a hint for next time */
97397 return (mm->free_area_cache = addr-len);
97398 }
97399@@ -1395,7 +1608,7 @@ arch_get_unmapped_area_topdown(struct file *filp, const unsigned long addr0,
97400 * return with success:
97401 */
97402 vma = find_vma(mm, addr);
97403- if (!vma || addr+len <= vma->vm_start)
97404+ if (check_heap_stack_gap(vma, addr, len))
97405 /* remember the address as a hint for next time */
97406 return (mm->free_area_cache = addr);
97407
97408@@ -1404,8 +1617,8 @@ arch_get_unmapped_area_topdown(struct file *filp, const unsigned long addr0,
97409 mm->cached_hole_size = vma->vm_start - addr;
97410
97411 /* try just below the current vma->vm_start */
97412- addr = vma->vm_start-len;
97413- } while (len < vma->vm_start);
97414+ addr = skip_heap_stack_gap(vma, len);
97415+ } while (!IS_ERR_VALUE(addr));
97416
97417 bottomup:
97418 /*
97419@@ -1414,13 +1627,21 @@ bottomup:
97420 * can happen with large stack limits and large mmap()
97421 * allocations.
97422 */
97423+ mm->mmap_base = TASK_UNMAPPED_BASE;
97424+
97425+#ifdef CONFIG_PAX_RANDMMAP
97426+ if (mm->pax_flags & MF_PAX_RANDMMAP)
97427+ mm->mmap_base += mm->delta_mmap;
97428+#endif
97429+
97430+ mm->free_area_cache = mm->mmap_base;
97431 mm->cached_hole_size = ~0UL;
97432- mm->free_area_cache = TASK_UNMAPPED_BASE;
97433 addr = arch_get_unmapped_area(filp, addr0, len, pgoff, flags);
97434 /*
97435 * Restore the topdown base:
97436 */
97437- mm->free_area_cache = mm->mmap_base;
97438+ mm->mmap_base = base;
97439+ mm->free_area_cache = base;
97440 mm->cached_hole_size = ~0UL;
97441
97442 return addr;
97443@@ -1429,6 +1650,12 @@ bottomup:
97444
97445 void arch_unmap_area_topdown(struct mm_struct *mm, unsigned long addr)
97446 {
97447+
97448+#ifdef CONFIG_PAX_SEGMEXEC
97449+ if ((mm->pax_flags & MF_PAX_SEGMEXEC) && SEGMEXEC_TASK_SIZE <= addr)
97450+ return;
97451+#endif
97452+
97453 /*
97454 * Is this a new hole at the highest possible address?
97455 */
97456@@ -1436,8 +1663,10 @@ void arch_unmap_area_topdown(struct mm_struct *mm, unsigned long addr)
97457 mm->free_area_cache = addr;
97458
97459 /* dont allow allocations above current base */
97460- if (mm->free_area_cache > mm->mmap_base)
97461+ if (mm->free_area_cache > mm->mmap_base) {
97462 mm->free_area_cache = mm->mmap_base;
97463+ mm->cached_hole_size = ~0UL;
97464+ }
97465 }
97466
97467 unsigned long
97468@@ -1510,40 +1739,49 @@ struct vm_area_struct *find_vma(struct mm_struct *mm, unsigned long addr)
97469
97470 EXPORT_SYMBOL(find_vma);
97471
97472-/* Same as find_vma, but also return a pointer to the previous VMA in *pprev. */
97473+/*
97474+ * Same as find_vma, but also return a pointer to the previous VMA in *pprev.
97475+ */
97476 struct vm_area_struct *
97477 find_vma_prev(struct mm_struct *mm, unsigned long addr,
97478 struct vm_area_struct **pprev)
97479 {
97480- struct vm_area_struct *vma = NULL, *prev = NULL;
97481- struct rb_node *rb_node;
97482- if (!mm)
97483- goto out;
97484-
97485- /* Guard against addr being lower than the first VMA */
97486- vma = mm->mmap;
97487-
97488- /* Go through the RB tree quickly. */
97489- rb_node = mm->mm_rb.rb_node;
97490-
97491- while (rb_node) {
97492- struct vm_area_struct *vma_tmp;
97493- vma_tmp = rb_entry(rb_node, struct vm_area_struct, vm_rb);
97494-
97495- if (addr < vma_tmp->vm_end) {
97496- rb_node = rb_node->rb_left;
97497- } else {
97498- prev = vma_tmp;
97499- if (!prev->vm_next || (addr < prev->vm_next->vm_end))
97500- break;
97501+ struct vm_area_struct *vma;
97502+
97503+ vma = find_vma(mm, addr);
97504+ if (vma) {
97505+ *pprev = vma->vm_prev;
97506+ } else {
97507+ struct rb_node *rb_node = mm->mm_rb.rb_node;
97508+ *pprev = NULL;
97509+ while (rb_node) {
97510+ *pprev = rb_entry(rb_node, struct vm_area_struct, vm_rb);
97511 rb_node = rb_node->rb_right;
97512 }
97513 }
97514+ return vma;
97515+}
97516+
97517+#ifdef CONFIG_PAX_SEGMEXEC
97518+struct vm_area_struct *pax_find_mirror_vma(struct vm_area_struct *vma)
97519+{
97520+ struct vm_area_struct *vma_m;
97521
97522-out:
97523- *pprev = prev;
97524- return prev ? prev->vm_next : vma;
97525+ BUG_ON(!vma || vma->vm_start >= vma->vm_end);
97526+ if (!(vma->vm_mm->pax_flags & MF_PAX_SEGMEXEC) || !(vma->vm_flags & VM_EXEC)) {
97527+ BUG_ON(vma->vm_mirror);
97528+ return NULL;
97529+ }
97530+ BUG_ON(vma->vm_start < SEGMEXEC_TASK_SIZE && SEGMEXEC_TASK_SIZE < vma->vm_end);
97531+ vma_m = vma->vm_mirror;
97532+ BUG_ON(!vma_m || vma_m->vm_mirror != vma);
97533+ BUG_ON(vma->vm_file != vma_m->vm_file);
97534+ BUG_ON(vma->vm_end - vma->vm_start != vma_m->vm_end - vma_m->vm_start);
97535+ BUG_ON(vma->vm_pgoff != vma_m->vm_pgoff || vma->anon_vma != vma_m->anon_vma);
97536+ BUG_ON((vma->vm_flags ^ vma_m->vm_flags) & ~(VM_WRITE | VM_MAYWRITE | VM_ACCOUNT | VM_LOCKED | VM_RESERVED));
97537+ return vma_m;
97538 }
97539+#endif
97540
97541 /*
97542 * Verify that the stack growth is acceptable and
97543@@ -1561,6 +1799,7 @@ static int acct_stack_growth(struct vm_area_struct *vma, unsigned long size, uns
97544 return -ENOMEM;
97545
97546 /* Stack limit test */
97547+ gr_learn_resource(current, RLIMIT_STACK, size, 1);
97548 if (size > rlim[RLIMIT_STACK].rlim_cur)
97549 return -ENOMEM;
97550
97551@@ -1570,6 +1809,7 @@ static int acct_stack_growth(struct vm_area_struct *vma, unsigned long size, uns
97552 unsigned long limit;
97553 locked = mm->locked_vm + grow;
97554 limit = rlim[RLIMIT_MEMLOCK].rlim_cur >> PAGE_SHIFT;
97555+ gr_learn_resource(current, RLIMIT_MEMLOCK, locked << PAGE_SHIFT, 1);
97556 if (locked > limit && !capable(CAP_IPC_LOCK))
97557 return -ENOMEM;
97558 }
97559@@ -1600,37 +1840,48 @@ static int acct_stack_growth(struct vm_area_struct *vma, unsigned long size, uns
97560 * PA-RISC uses this for its stack; IA64 for its Register Backing Store.
97561 * vma is the last one with address > vma->vm_end. Have to extend vma.
97562 */
97563+#ifndef CONFIG_IA64
97564+static
97565+#endif
97566 int expand_upwards(struct vm_area_struct *vma, unsigned long address)
97567 {
97568 int error;
97569+ bool locknext;
97570
97571 if (!(vma->vm_flags & VM_GROWSUP))
97572 return -EFAULT;
97573
97574+ /* Also guard against wrapping around to address 0. */
97575+ if (address < PAGE_ALIGN(address+1))
97576+ address = PAGE_ALIGN(address+1);
97577+ else
97578+ return -ENOMEM;
97579+
97580 /*
97581 * We must make sure the anon_vma is allocated
97582 * so that the anon_vma locking is not a noop.
97583 */
97584 if (unlikely(anon_vma_prepare(vma)))
97585 return -ENOMEM;
97586+ locknext = vma->vm_next && (vma->vm_next->vm_flags & VM_GROWSDOWN);
97587+ if (locknext && anon_vma_prepare(vma->vm_next))
97588+ return -ENOMEM;
97589 anon_vma_lock(vma);
97590+ if (locknext)
97591+ anon_vma_lock(vma->vm_next);
97592
97593 /*
97594 * vma->vm_start/vm_end cannot change under us because the caller
97595 * is required to hold the mmap_sem in read mode. We need the
97596- * anon_vma lock to serialize against concurrent expand_stacks.
97597- * Also guard against wrapping around to address 0.
97598+ * anon_vma locks to serialize against concurrent expand_stacks
97599+ * and expand_upwards.
97600 */
97601- if (address < PAGE_ALIGN(address+4))
97602- address = PAGE_ALIGN(address+4);
97603- else {
97604- anon_vma_unlock(vma);
97605- return -ENOMEM;
97606- }
97607 error = 0;
97608
97609 /* Somebody else might have raced and expanded it already */
97610- if (address > vma->vm_end) {
97611+ if (vma->vm_next && (vma->vm_next->vm_flags & (VM_READ | VM_WRITE | VM_EXEC)) && vma->vm_next->vm_start - address < sysctl_heap_stack_gap)
97612+ error = -ENOMEM;
97613+ else if (address > vma->vm_end && (!locknext || vma->vm_next->vm_start >= address)) {
97614 unsigned long size, grow;
97615
97616 size = address - vma->vm_start;
97617@@ -1643,6 +1894,8 @@ int expand_upwards(struct vm_area_struct *vma, unsigned long address)
97618 vma->vm_end = address;
97619 }
97620 }
97621+ if (locknext)
97622+ anon_vma_unlock(vma->vm_next);
97623 anon_vma_unlock(vma);
97624 return error;
97625 }
97626@@ -1655,6 +1908,8 @@ static int expand_downwards(struct vm_area_struct *vma,
97627 unsigned long address)
97628 {
97629 int error;
97630+ bool lockprev = false;
97631+ struct vm_area_struct *prev;
97632
97633 /*
97634 * We must make sure the anon_vma is allocated
97635@@ -1668,6 +1923,15 @@ static int expand_downwards(struct vm_area_struct *vma,
97636 if (error)
97637 return error;
97638
97639+ prev = vma->vm_prev;
97640+#if defined(CONFIG_STACK_GROWSUP) || defined(CONFIG_IA64)
97641+ lockprev = prev && (prev->vm_flags & VM_GROWSUP);
97642+#endif
97643+ if (lockprev && anon_vma_prepare(prev))
97644+ return -ENOMEM;
97645+ if (lockprev)
97646+ anon_vma_lock(prev);
97647+
97648 anon_vma_lock(vma);
97649
97650 /*
97651@@ -1677,9 +1941,17 @@ static int expand_downwards(struct vm_area_struct *vma,
97652 */
97653
97654 /* Somebody else might have raced and expanded it already */
97655- if (address < vma->vm_start) {
97656+ if (prev && (prev->vm_flags & (VM_READ | VM_WRITE | VM_EXEC)) && address - prev->vm_end < sysctl_heap_stack_gap)
97657+ error = -ENOMEM;
97658+ else if (address < vma->vm_start && (!lockprev || prev->vm_end <= address)) {
97659 unsigned long size, grow;
97660
97661+#ifdef CONFIG_PAX_SEGMEXEC
97662+ struct vm_area_struct *vma_m;
97663+
97664+ vma_m = pax_find_mirror_vma(vma);
97665+#endif
97666+
97667 size = vma->vm_end - address;
97668 grow = (vma->vm_start - address) >> PAGE_SHIFT;
97669
97670@@ -1689,10 +1961,22 @@ static int expand_downwards(struct vm_area_struct *vma,
97671 if (!error) {
97672 vma->vm_start = address;
97673 vma->vm_pgoff -= grow;
97674+ track_exec_limit(vma->vm_mm, vma->vm_start, vma->vm_end, vma->vm_flags);
97675+
97676+#ifdef CONFIG_PAX_SEGMEXEC
97677+ if (vma_m) {
97678+ vma_m->vm_start -= grow << PAGE_SHIFT;
97679+ vma_m->vm_pgoff -= grow;
97680+ }
97681+#endif
97682+
97683+
97684 }
97685 }
97686 }
97687 anon_vma_unlock(vma);
97688+ if (lockprev)
97689+ anon_vma_unlock(prev);
97690 return error;
97691 }
97692
97693@@ -1768,6 +2052,13 @@ static void remove_vma_list(struct mm_struct *mm, struct vm_area_struct *vma)
97694 do {
97695 long nrpages = vma_pages(vma);
97696
97697+#ifdef CONFIG_PAX_SEGMEXEC
97698+ if ((mm->pax_flags & MF_PAX_SEGMEXEC) && (vma->vm_start >= SEGMEXEC_TASK_SIZE)) {
97699+ vma = remove_vma(vma);
97700+ continue;
97701+ }
97702+#endif
97703+
97704 mm->total_vm -= nrpages;
97705 vm_stat_account(mm, vma->vm_flags, vma->vm_file, -nrpages);
97706 vma = remove_vma(vma);
97707@@ -1813,6 +2104,16 @@ detach_vmas_to_be_unmapped(struct mm_struct *mm, struct vm_area_struct *vma,
97708 insertion_point = (prev ? &prev->vm_next : &mm->mmap);
97709 vma->vm_prev = NULL;
97710 do {
97711+
97712+#ifdef CONFIG_PAX_SEGMEXEC
97713+ if (vma->vm_mirror) {
97714+ BUG_ON(!vma->vm_mirror->vm_mirror || vma->vm_mirror->vm_mirror != vma);
97715+ vma->vm_mirror->vm_mirror = NULL;
97716+ vma->vm_mirror->vm_flags &= ~VM_EXEC;
97717+ vma->vm_mirror = NULL;
97718+ }
97719+#endif
97720+
97721 rb_erase(&vma->vm_rb, &mm->mm_rb);
97722 mm->map_count--;
97723 tail_vma = vma;
97724@@ -1840,10 +2141,25 @@ int split_vma(struct mm_struct * mm, struct vm_area_struct * vma,
97725 struct mempolicy *pol;
97726 struct vm_area_struct *new;
97727
97728+#ifdef CONFIG_PAX_SEGMEXEC
97729+ struct vm_area_struct *vma_m, *new_m = NULL;
97730+ unsigned long addr_m = addr + SEGMEXEC_TASK_SIZE;
97731+#endif
97732+
97733 if (is_vm_hugetlb_page(vma) && (addr &
97734 ~(huge_page_mask(hstate_vma(vma)))))
97735 return -EINVAL;
97736
97737+#ifdef CONFIG_PAX_SEGMEXEC
97738+ vma_m = pax_find_mirror_vma(vma);
97739+
97740+ if (mm->pax_flags & MF_PAX_SEGMEXEC) {
97741+ BUG_ON(vma->vm_end > SEGMEXEC_TASK_SIZE);
97742+ if (mm->map_count >= sysctl_max_map_count-1)
97743+ return -ENOMEM;
97744+ } else
97745+#endif
97746+
97747 if (mm->map_count >= sysctl_max_map_count)
97748 return -ENOMEM;
97749
97750@@ -1851,6 +2167,16 @@ int split_vma(struct mm_struct * mm, struct vm_area_struct * vma,
97751 if (!new)
97752 return -ENOMEM;
97753
97754+#ifdef CONFIG_PAX_SEGMEXEC
97755+ if (vma_m) {
97756+ new_m = kmem_cache_alloc(vm_area_cachep, GFP_KERNEL);
97757+ if (!new_m) {
97758+ kmem_cache_free(vm_area_cachep, new);
97759+ return -ENOMEM;
97760+ }
97761+ }
97762+#endif
97763+
97764 /* most fields are the same, copy all, and then fixup */
97765 *new = *vma;
97766
97767@@ -1861,8 +2187,29 @@ int split_vma(struct mm_struct * mm, struct vm_area_struct * vma,
97768 new->vm_pgoff += ((addr - vma->vm_start) >> PAGE_SHIFT);
97769 }
97770
97771+#ifdef CONFIG_PAX_SEGMEXEC
97772+ if (vma_m) {
97773+ *new_m = *vma_m;
97774+ new_m->vm_mirror = new;
97775+ new->vm_mirror = new_m;
97776+
97777+ if (new_below)
97778+ new_m->vm_end = addr_m;
97779+ else {
97780+ new_m->vm_start = addr_m;
97781+ new_m->vm_pgoff += ((addr_m - vma_m->vm_start) >> PAGE_SHIFT);
97782+ }
97783+ }
97784+#endif
97785+
97786 pol = mpol_dup(vma_policy(vma));
97787 if (IS_ERR(pol)) {
97788+
97789+#ifdef CONFIG_PAX_SEGMEXEC
97790+ if (new_m)
97791+ kmem_cache_free(vm_area_cachep, new_m);
97792+#endif
97793+
97794 kmem_cache_free(vm_area_cachep, new);
97795 return PTR_ERR(pol);
97796 }
97797@@ -1883,6 +2230,28 @@ int split_vma(struct mm_struct * mm, struct vm_area_struct * vma,
97798 else
97799 vma_adjust(vma, vma->vm_start, addr, vma->vm_pgoff, new);
97800
97801+#ifdef CONFIG_PAX_SEGMEXEC
97802+ if (vma_m) {
97803+ mpol_get(pol);
97804+ vma_set_policy(new_m, pol);
97805+
97806+ if (new_m->vm_file) {
97807+ get_file(new_m->vm_file);
97808+ if (vma_m->vm_flags & VM_EXECUTABLE)
97809+ added_exe_file_vma(mm);
97810+ }
97811+
97812+ if (new_m->vm_ops && new_m->vm_ops->open)
97813+ new_m->vm_ops->open(new_m);
97814+
97815+ if (new_below)
97816+ vma_adjust(vma_m, addr_m, vma_m->vm_end, vma_m->vm_pgoff +
97817+ ((addr_m - new_m->vm_start) >> PAGE_SHIFT), new_m);
97818+ else
97819+ vma_adjust(vma_m, vma_m->vm_start, addr_m, vma_m->vm_pgoff, new_m);
97820+ }
97821+#endif
97822+
97823 return 0;
97824 }
97825
97826@@ -1891,11 +2260,30 @@ int split_vma(struct mm_struct * mm, struct vm_area_struct * vma,
97827 * work. This now handles partial unmappings.
97828 * Jeremy Fitzhardinge <jeremy@goop.org>
97829 */
97830+#ifdef CONFIG_PAX_SEGMEXEC
97831 int do_munmap(struct mm_struct *mm, unsigned long start, size_t len)
97832 {
97833+ int ret = __do_munmap(mm, start, len);
97834+ if (ret || !(mm->pax_flags & MF_PAX_SEGMEXEC))
97835+ return ret;
97836+
97837+ return __do_munmap(mm, start + SEGMEXEC_TASK_SIZE, len);
97838+}
97839+
97840+int __do_munmap(struct mm_struct *mm, unsigned long start, size_t len)
97841+#else
97842+int do_munmap(struct mm_struct *mm, unsigned long start, size_t len)
97843+#endif
97844+{
97845 unsigned long end;
97846 struct vm_area_struct *vma, *prev, *last;
97847
97848+ /*
97849+ * mm->mmap_sem is required to protect against another thread
97850+ * changing the mappings in case we sleep.
97851+ */
97852+ verify_mm_writelocked(mm);
97853+
97854 if ((start & ~PAGE_MASK) || start > TASK_SIZE || len > TASK_SIZE-start)
97855 return -EINVAL;
97856
97857@@ -1959,6 +2347,8 @@ int do_munmap(struct mm_struct *mm, unsigned long start, size_t len)
97858 /* Fix up all other VM information */
97859 remove_vma_list(mm, vma);
97860
97861+ track_exec_limit(mm, start, end, 0UL);
97862+
97863 return 0;
97864 }
97865
97866@@ -1971,22 +2361,18 @@ SYSCALL_DEFINE2(munmap, unsigned long, addr, size_t, len)
97867
97868 profile_munmap(addr);
97869
97870+#ifdef CONFIG_PAX_SEGMEXEC
97871+ if ((mm->pax_flags & MF_PAX_SEGMEXEC) &&
97872+ (len > SEGMEXEC_TASK_SIZE || addr > SEGMEXEC_TASK_SIZE-len))
97873+ return -EINVAL;
97874+#endif
97875+
97876 down_write(&mm->mmap_sem);
97877 ret = do_munmap(mm, addr, len);
97878 up_write(&mm->mmap_sem);
97879 return ret;
97880 }
97881
97882-static inline void verify_mm_writelocked(struct mm_struct *mm)
97883-{
97884-#ifdef CONFIG_DEBUG_VM
97885- if (unlikely(down_read_trylock(&mm->mmap_sem))) {
97886- WARN_ON(1);
97887- up_read(&mm->mmap_sem);
97888- }
97889-#endif
97890-}
97891-
97892 /*
97893 * this is really a simplified "do_mmap". it only handles
97894 * anonymous maps. eventually we may be able to do some
97895@@ -2000,6 +2386,7 @@ unsigned long do_brk(unsigned long addr, unsigned long len)
97896 struct rb_node ** rb_link, * rb_parent;
97897 pgoff_t pgoff = addr >> PAGE_SHIFT;
97898 int error;
97899+ unsigned long charged;
97900
97901 len = PAGE_ALIGN(len);
97902 if (!len)
97903@@ -2011,16 +2398,30 @@ unsigned long do_brk(unsigned long addr, unsigned long len)
97904
97905 flags = VM_DATA_DEFAULT_FLAGS | VM_ACCOUNT | mm->def_flags;
97906
97907+#if defined(CONFIG_PAX_PAGEEXEC) || defined(CONFIG_PAX_SEGMEXEC)
97908+ if (mm->pax_flags & (MF_PAX_PAGEEXEC | MF_PAX_SEGMEXEC)) {
97909+ flags &= ~VM_EXEC;
97910+
97911+#ifdef CONFIG_PAX_MPROTECT
97912+ if (mm->pax_flags & MF_PAX_MPROTECT)
97913+ flags &= ~VM_MAYEXEC;
97914+#endif
97915+
97916+ }
97917+#endif
97918+
97919 error = get_unmapped_area(NULL, addr, len, 0, MAP_FIXED);
97920 if (error & ~PAGE_MASK)
97921 return error;
97922
97923+ charged = len >> PAGE_SHIFT;
97924+
97925 /*
97926 * mlock MCL_FUTURE?
97927 */
97928 if (mm->def_flags & VM_LOCKED) {
97929 unsigned long locked, lock_limit;
97930- locked = len >> PAGE_SHIFT;
97931+ locked = charged;
97932 locked += mm->locked_vm;
97933 lock_limit = current->signal->rlim[RLIMIT_MEMLOCK].rlim_cur;
97934 lock_limit >>= PAGE_SHIFT;
97935@@ -2037,22 +2438,22 @@ unsigned long do_brk(unsigned long addr, unsigned long len)
97936 /*
97937 * Clear old maps. this also does some error checking for us
97938 */
97939- munmap_back:
97940 vma = find_vma_prepare(mm, addr, &prev, &rb_link, &rb_parent);
97941 if (vma && vma->vm_start < addr + len) {
97942 if (do_munmap(mm, addr, len))
97943 return -ENOMEM;
97944- goto munmap_back;
97945+ vma = find_vma_prepare(mm, addr, &prev, &rb_link, &rb_parent);
97946+ BUG_ON(vma && vma->vm_start < addr + len);
97947 }
97948
97949 /* Check against address space limits *after* clearing old maps... */
97950- if (!may_expand_vm(mm, len >> PAGE_SHIFT))
97951+ if (!may_expand_vm(mm, charged))
97952 return -ENOMEM;
97953
97954 if (mm->map_count > sysctl_max_map_count)
97955 return -ENOMEM;
97956
97957- if (security_vm_enough_memory(len >> PAGE_SHIFT))
97958+ if (security_vm_enough_memory(charged))
97959 return -ENOMEM;
97960
97961 /* Can we just expand an old private anonymous mapping? */
97962@@ -2066,7 +2467,7 @@ unsigned long do_brk(unsigned long addr, unsigned long len)
97963 */
97964 vma = kmem_cache_zalloc(vm_area_cachep, GFP_KERNEL);
97965 if (!vma) {
97966- vm_unacct_memory(len >> PAGE_SHIFT);
97967+ vm_unacct_memory(charged);
97968 return -ENOMEM;
97969 }
97970
97971@@ -2078,11 +2479,12 @@ unsigned long do_brk(unsigned long addr, unsigned long len)
97972 vma->vm_page_prot = vm_get_page_prot(flags);
97973 vma_link(mm, vma, prev, rb_link, rb_parent);
97974 out:
97975- mm->total_vm += len >> PAGE_SHIFT;
97976+ mm->total_vm += charged;
97977 if (flags & VM_LOCKED) {
97978 if (!mlock_vma_pages_range(vma, addr, addr + len))
97979- mm->locked_vm += (len >> PAGE_SHIFT);
97980+ mm->locked_vm += charged;
97981 }
97982+ track_exec_limit(mm, addr, addr + len, flags);
97983 return addr;
97984 }
97985
97986@@ -2129,8 +2531,10 @@ void exit_mmap(struct mm_struct *mm)
97987 * Walk the list again, actually closing and freeing it,
97988 * with preemption enabled, without holding any MM locks.
97989 */
97990- while (vma)
97991+ while (vma) {
97992+ vma->vm_mirror = NULL;
97993 vma = remove_vma(vma);
97994+ }
97995
97996 BUG_ON(mm->nr_ptes > (FIRST_USER_ADDRESS+PMD_SIZE-1)>>PMD_SHIFT);
97997 }
97998@@ -2144,6 +2548,10 @@ int insert_vm_struct(struct mm_struct * mm, struct vm_area_struct * vma)
97999 struct vm_area_struct * __vma, * prev;
98000 struct rb_node ** rb_link, * rb_parent;
98001
98002+#ifdef CONFIG_PAX_SEGMEXEC
98003+ struct vm_area_struct *vma_m = NULL;
98004+#endif
98005+
98006 /*
98007 * The vm_pgoff of a purely anonymous vma should be irrelevant
98008 * until its first write fault, when page's anon_vma and index
98009@@ -2166,7 +2574,22 @@ int insert_vm_struct(struct mm_struct * mm, struct vm_area_struct * vma)
98010 if ((vma->vm_flags & VM_ACCOUNT) &&
98011 security_vm_enough_memory_mm(mm, vma_pages(vma)))
98012 return -ENOMEM;
98013+
98014+#ifdef CONFIG_PAX_SEGMEXEC
98015+ if ((mm->pax_flags & MF_PAX_SEGMEXEC) && (vma->vm_flags & VM_EXEC)) {
98016+ vma_m = kmem_cache_zalloc(vm_area_cachep, GFP_KERNEL);
98017+ if (!vma_m)
98018+ return -ENOMEM;
98019+ }
98020+#endif
98021+
98022 vma_link(mm, vma, prev, rb_link, rb_parent);
98023+
98024+#ifdef CONFIG_PAX_SEGMEXEC
98025+ if (vma_m)
98026+ pax_mirror_vma(vma_m, vma);
98027+#endif
98028+
98029 return 0;
98030 }
98031
98032@@ -2184,6 +2607,8 @@ struct vm_area_struct *copy_vma(struct vm_area_struct **vmap,
98033 struct rb_node **rb_link, *rb_parent;
98034 struct mempolicy *pol;
98035
98036+ BUG_ON(vma->vm_mirror);
98037+
98038 /*
98039 * If anonymous vma has not yet been faulted, update new pgoff
98040 * to match new location, to increase its chance of merging.
98041@@ -2227,6 +2652,35 @@ struct vm_area_struct *copy_vma(struct vm_area_struct **vmap,
98042 return new_vma;
98043 }
98044
98045+#ifdef CONFIG_PAX_SEGMEXEC
98046+void pax_mirror_vma(struct vm_area_struct *vma_m, struct vm_area_struct *vma)
98047+{
98048+ struct vm_area_struct *prev_m;
98049+ struct rb_node **rb_link_m, *rb_parent_m;
98050+ struct mempolicy *pol_m;
98051+
98052+ BUG_ON(!(vma->vm_mm->pax_flags & MF_PAX_SEGMEXEC) || !(vma->vm_flags & VM_EXEC));
98053+ BUG_ON(vma->vm_mirror || vma_m->vm_mirror);
98054+ BUG_ON(!mpol_equal(vma_policy(vma), vma_policy(vma_m)));
98055+ *vma_m = *vma;
98056+ pol_m = vma_policy(vma_m);
98057+ mpol_get(pol_m);
98058+ vma_set_policy(vma_m, pol_m);
98059+ vma_m->vm_start += SEGMEXEC_TASK_SIZE;
98060+ vma_m->vm_end += SEGMEXEC_TASK_SIZE;
98061+ vma_m->vm_flags &= ~(VM_WRITE | VM_MAYWRITE | VM_ACCOUNT | VM_LOCKED);
98062+ vma_m->vm_page_prot = vm_get_page_prot(vma_m->vm_flags);
98063+ if (vma_m->vm_file)
98064+ get_file(vma_m->vm_file);
98065+ if (vma_m->vm_ops && vma_m->vm_ops->open)
98066+ vma_m->vm_ops->open(vma_m);
98067+ find_vma_prepare(vma->vm_mm, vma_m->vm_start, &prev_m, &rb_link_m, &rb_parent_m);
98068+ vma_link(vma->vm_mm, vma_m, prev_m, rb_link_m, rb_parent_m);
98069+ vma_m->vm_mirror = vma;
98070+ vma->vm_mirror = vma_m;
98071+}
98072+#endif
98073+
98074 /*
98075 * Return true if the calling process may expand its vm space by the passed
98076 * number of pages
98077@@ -2237,7 +2691,7 @@ int may_expand_vm(struct mm_struct *mm, unsigned long npages)
98078 unsigned long lim;
98079
98080 lim = current->signal->rlim[RLIMIT_AS].rlim_cur >> PAGE_SHIFT;
98081-
98082+ gr_learn_resource(current, RLIMIT_AS, (cur + npages) << PAGE_SHIFT, 1);
98083 if (cur + npages > lim)
98084 return 0;
98085 return 1;
98086@@ -2307,6 +2761,22 @@ int install_special_mapping(struct mm_struct *mm,
98087 vma->vm_start = addr;
98088 vma->vm_end = addr + len;
98089
98090+#ifdef CONFIG_PAX_MPROTECT
98091+ if (mm->pax_flags & MF_PAX_MPROTECT) {
98092+#ifndef CONFIG_PAX_MPROTECT_COMPAT
98093+ if ((vm_flags & (VM_WRITE | VM_EXEC)) == (VM_WRITE | VM_EXEC))
98094+ return -EPERM;
98095+ if (!(vm_flags & VM_EXEC))
98096+ vm_flags &= ~VM_MAYEXEC;
98097+#else
98098+ if ((vm_flags & (VM_WRITE | VM_EXEC)) != VM_EXEC)
98099+ vm_flags &= ~(VM_EXEC | VM_MAYEXEC);
98100+#endif
98101+ else
98102+ vm_flags &= ~VM_MAYWRITE;
98103+ }
98104+#endif
98105+
98106 vma->vm_flags = vm_flags | mm->def_flags | VM_DONTEXPAND;
98107 vma->vm_page_prot = vm_get_page_prot(vma->vm_flags);
98108
98109diff --git a/mm/mprotect.c b/mm/mprotect.c
98110index 1737c7e..c7faeb4 100644
98111--- a/mm/mprotect.c
98112+++ b/mm/mprotect.c
98113@@ -24,10 +24,16 @@
98114 #include <linux/mmu_notifier.h>
98115 #include <linux/migrate.h>
98116 #include <linux/perf_event.h>
98117+
98118+#ifdef CONFIG_PAX_MPROTECT
98119+#include <linux/elf.h>
98120+#endif
98121+
98122 #include <asm/uaccess.h>
98123 #include <asm/pgtable.h>
98124 #include <asm/cacheflush.h>
98125 #include <asm/tlbflush.h>
98126+#include <asm/mmu_context.h>
98127
98128 #ifndef pgprot_modify
98129 static inline pgprot_t pgprot_modify(pgprot_t oldprot, pgprot_t newprot)
98130@@ -132,6 +138,48 @@ static void change_protection(struct vm_area_struct *vma,
98131 flush_tlb_range(vma, start, end);
98132 }
98133
98134+#ifdef CONFIG_ARCH_TRACK_EXEC_LIMIT
98135+/* called while holding the mmap semaphor for writing except stack expansion */
98136+void track_exec_limit(struct mm_struct *mm, unsigned long start, unsigned long end, unsigned long prot)
98137+{
98138+ unsigned long oldlimit, newlimit = 0UL;
98139+
98140+ if (!(mm->pax_flags & MF_PAX_PAGEEXEC) || nx_enabled)
98141+ return;
98142+
98143+ spin_lock(&mm->page_table_lock);
98144+ oldlimit = mm->context.user_cs_limit;
98145+ if ((prot & VM_EXEC) && oldlimit < end)
98146+ /* USER_CS limit moved up */
98147+ newlimit = end;
98148+ else if (!(prot & VM_EXEC) && start < oldlimit && oldlimit <= end)
98149+ /* USER_CS limit moved down */
98150+ newlimit = start;
98151+
98152+ if (newlimit) {
98153+ mm->context.user_cs_limit = newlimit;
98154+
98155+#ifdef CONFIG_SMP
98156+ wmb();
98157+ cpus_clear(mm->context.cpu_user_cs_mask);
98158+ cpu_set(smp_processor_id(), mm->context.cpu_user_cs_mask);
98159+#endif
98160+
98161+ set_user_cs(mm->context.user_cs_base, mm->context.user_cs_limit, smp_processor_id());
98162+ }
98163+ spin_unlock(&mm->page_table_lock);
98164+ if (newlimit == end) {
98165+ struct vm_area_struct *vma = find_vma(mm, oldlimit);
98166+
98167+ for (; vma && vma->vm_start < end; vma = vma->vm_next)
98168+ if (is_vm_hugetlb_page(vma))
98169+ hugetlb_change_protection(vma, vma->vm_start, vma->vm_end, vma->vm_page_prot);
98170+ else
98171+ change_protection(vma, vma->vm_start, vma->vm_end, vma->vm_page_prot, vma_wants_writenotify(vma));
98172+ }
98173+}
98174+#endif
98175+
98176 int
98177 mprotect_fixup(struct vm_area_struct *vma, struct vm_area_struct **pprev,
98178 unsigned long start, unsigned long end, unsigned long newflags)
98179@@ -144,11 +192,29 @@ mprotect_fixup(struct vm_area_struct *vma, struct vm_area_struct **pprev,
98180 int error;
98181 int dirty_accountable = 0;
98182
98183+#ifdef CONFIG_PAX_SEGMEXEC
98184+ struct vm_area_struct *vma_m = NULL;
98185+ unsigned long start_m, end_m;
98186+
98187+ start_m = start + SEGMEXEC_TASK_SIZE;
98188+ end_m = end + SEGMEXEC_TASK_SIZE;
98189+#endif
98190+
98191 if (newflags == oldflags) {
98192 *pprev = vma;
98193 return 0;
98194 }
98195
98196+ if (newflags & (VM_READ | VM_WRITE | VM_EXEC)) {
98197+ struct vm_area_struct *prev = vma->vm_prev, *next = vma->vm_next;
98198+
98199+ if (next && (next->vm_flags & VM_GROWSDOWN) && sysctl_heap_stack_gap > next->vm_start - end)
98200+ return -ENOMEM;
98201+
98202+ if (prev && (prev->vm_flags & VM_GROWSUP) && sysctl_heap_stack_gap > start - prev->vm_end)
98203+ return -ENOMEM;
98204+ }
98205+
98206 /*
98207 * If we make a private mapping writable we increase our commit;
98208 * but (without finer accounting) cannot reduce our commit if we
98209@@ -165,6 +231,38 @@ mprotect_fixup(struct vm_area_struct *vma, struct vm_area_struct **pprev,
98210 }
98211 }
98212
98213+#ifdef CONFIG_PAX_SEGMEXEC
98214+ if ((mm->pax_flags & MF_PAX_SEGMEXEC) && ((oldflags ^ newflags) & VM_EXEC)) {
98215+ if (start != vma->vm_start) {
98216+ error = split_vma(mm, vma, start, 1);
98217+ if (error)
98218+ goto fail;
98219+ BUG_ON(!*pprev || (*pprev)->vm_next == vma);
98220+ *pprev = (*pprev)->vm_next;
98221+ }
98222+
98223+ if (end != vma->vm_end) {
98224+ error = split_vma(mm, vma, end, 0);
98225+ if (error)
98226+ goto fail;
98227+ }
98228+
98229+ if (pax_find_mirror_vma(vma)) {
98230+ error = __do_munmap(mm, start_m, end_m - start_m);
98231+ if (error)
98232+ goto fail;
98233+ } else {
98234+ vma_m = kmem_cache_zalloc(vm_area_cachep, GFP_KERNEL);
98235+ if (!vma_m) {
98236+ error = -ENOMEM;
98237+ goto fail;
98238+ }
98239+ vma->vm_flags = newflags;
98240+ pax_mirror_vma(vma_m, vma);
98241+ }
98242+ }
98243+#endif
98244+
98245 /*
98246 * First try to merge with previous and/or next vma.
98247 */
98248@@ -195,9 +293,21 @@ success:
98249 * vm_flags and vm_page_prot are protected by the mmap_sem
98250 * held in write mode.
98251 */
98252+
98253+#ifdef CONFIG_PAX_SEGMEXEC
98254+ if ((mm->pax_flags & MF_PAX_SEGMEXEC) && (newflags & VM_EXEC) && ((vma->vm_flags ^ newflags) & VM_READ))
98255+ pax_find_mirror_vma(vma)->vm_flags ^= VM_READ;
98256+#endif
98257+
98258 vma->vm_flags = newflags;
98259+
98260+#ifdef CONFIG_PAX_MPROTECT
98261+ if (mm->binfmt && mm->binfmt->handle_mprotect)
98262+ mm->binfmt->handle_mprotect(vma, newflags);
98263+#endif
98264+
98265 vma->vm_page_prot = pgprot_modify(vma->vm_page_prot,
98266- vm_get_page_prot(newflags));
98267+ vm_get_page_prot(vma->vm_flags));
98268
98269 if (vma_wants_writenotify(vma)) {
98270 vma->vm_page_prot = vm_get_page_prot(newflags & ~VM_SHARED);
98271@@ -239,6 +349,17 @@ SYSCALL_DEFINE3(mprotect, unsigned long, start, size_t, len,
98272 end = start + len;
98273 if (end <= start)
98274 return -ENOMEM;
98275+
98276+#ifdef CONFIG_PAX_SEGMEXEC
98277+ if (current->mm->pax_flags & MF_PAX_SEGMEXEC) {
98278+ if (end > SEGMEXEC_TASK_SIZE)
98279+ return -EINVAL;
98280+ } else
98281+#endif
98282+
98283+ if (end > TASK_SIZE)
98284+ return -EINVAL;
98285+
98286 if (!arch_validate_prot(prot))
98287 return -EINVAL;
98288
98289@@ -246,7 +367,7 @@ SYSCALL_DEFINE3(mprotect, unsigned long, start, size_t, len,
98290 /*
98291 * Does the application expect PROT_READ to imply PROT_EXEC:
98292 */
98293- if ((prot & PROT_READ) && (current->personality & READ_IMPLIES_EXEC))
98294+ if ((prot & (PROT_READ | PROT_WRITE)) && (current->personality & READ_IMPLIES_EXEC))
98295 prot |= PROT_EXEC;
98296
98297 vm_flags = calc_vm_prot_bits(prot);
98298@@ -278,6 +399,11 @@ SYSCALL_DEFINE3(mprotect, unsigned long, start, size_t, len,
98299 if (start > vma->vm_start)
98300 prev = vma;
98301
98302+#ifdef CONFIG_PAX_MPROTECT
98303+ if (current->mm->binfmt && current->mm->binfmt->handle_mprotect)
98304+ current->mm->binfmt->handle_mprotect(vma, vm_flags);
98305+#endif
98306+
98307 for (nstart = start ; ; ) {
98308 unsigned long newflags;
98309
98310@@ -287,6 +413,14 @@ SYSCALL_DEFINE3(mprotect, unsigned long, start, size_t, len,
98311
98312 /* newflags >> 4 shift VM_MAY% in place of VM_% */
98313 if ((newflags & ~(newflags >> 4)) & (VM_READ | VM_WRITE | VM_EXEC)) {
98314+ if (prot & (PROT_WRITE | PROT_EXEC))
98315+ gr_log_rwxmprotect(vma->vm_file);
98316+
98317+ error = -EACCES;
98318+ goto out;
98319+ }
98320+
98321+ if (!gr_acl_handle_mprotect(vma->vm_file, prot)) {
98322 error = -EACCES;
98323 goto out;
98324 }
98325@@ -301,6 +435,9 @@ SYSCALL_DEFINE3(mprotect, unsigned long, start, size_t, len,
98326 error = mprotect_fixup(vma, &prev, nstart, tmp, newflags);
98327 if (error)
98328 goto out;
98329+
98330+ track_exec_limit(current->mm, nstart, tmp, vm_flags);
98331+
98332 nstart = tmp;
98333
98334 if (nstart < prev->vm_end)
98335diff --git a/mm/mremap.c b/mm/mremap.c
98336index 3e98d79..1706cec 100644
98337--- a/mm/mremap.c
98338+++ b/mm/mremap.c
98339@@ -112,6 +112,12 @@ static void move_ptes(struct vm_area_struct *vma, pmd_t *old_pmd,
98340 continue;
98341 pte = ptep_clear_flush(vma, old_addr, old_pte);
98342 pte = move_pte(pte, new_vma->vm_page_prot, old_addr, new_addr);
98343+
98344+#ifdef CONFIG_ARCH_TRACK_EXEC_LIMIT
98345+ if (!nx_enabled && (new_vma->vm_flags & (VM_PAGEEXEC | VM_EXEC)) == VM_PAGEEXEC)
98346+ pte = pte_exprotect(pte);
98347+#endif
98348+
98349 set_pte_at(mm, new_addr, new_pte, pte);
98350 }
98351
98352@@ -271,6 +277,11 @@ static struct vm_area_struct *vma_to_resize(unsigned long addr,
98353 if (is_vm_hugetlb_page(vma))
98354 goto Einval;
98355
98356+#ifdef CONFIG_PAX_SEGMEXEC
98357+ if (pax_find_mirror_vma(vma))
98358+ goto Einval;
98359+#endif
98360+
98361 /* We can't remap across vm area boundaries */
98362 if (old_len > vma->vm_end - addr)
98363 goto Efault;
98364@@ -327,20 +338,25 @@ static unsigned long mremap_to(unsigned long addr,
98365 unsigned long ret = -EINVAL;
98366 unsigned long charged = 0;
98367 unsigned long map_flags;
98368+ unsigned long pax_task_size = TASK_SIZE;
98369
98370 if (new_addr & ~PAGE_MASK)
98371 goto out;
98372
98373- if (new_len > TASK_SIZE || new_addr > TASK_SIZE - new_len)
98374+#ifdef CONFIG_PAX_SEGMEXEC
98375+ if (mm->pax_flags & MF_PAX_SEGMEXEC)
98376+ pax_task_size = SEGMEXEC_TASK_SIZE;
98377+#endif
98378+
98379+ pax_task_size -= PAGE_SIZE;
98380+
98381+ if (new_len > TASK_SIZE || new_addr > pax_task_size - new_len)
98382 goto out;
98383
98384 /* Check if the location we're moving into overlaps the
98385 * old location at all, and fail if it does.
98386 */
98387- if ((new_addr <= addr) && (new_addr+new_len) > addr)
98388- goto out;
98389-
98390- if ((addr <= new_addr) && (addr+old_len) > new_addr)
98391+ if (addr + old_len > new_addr && new_addr + new_len > addr)
98392 goto out;
98393
98394 ret = security_file_mmap(NULL, 0, 0, 0, new_addr, 1);
98395@@ -412,6 +428,7 @@ unsigned long do_mremap(unsigned long addr,
98396 struct vm_area_struct *vma;
98397 unsigned long ret = -EINVAL;
98398 unsigned long charged = 0;
98399+ unsigned long pax_task_size = TASK_SIZE;
98400
98401 if (flags & ~(MREMAP_FIXED | MREMAP_MAYMOVE))
98402 goto out;
98403@@ -430,6 +447,17 @@ unsigned long do_mremap(unsigned long addr,
98404 if (!new_len)
98405 goto out;
98406
98407+#ifdef CONFIG_PAX_SEGMEXEC
98408+ if (mm->pax_flags & MF_PAX_SEGMEXEC)
98409+ pax_task_size = SEGMEXEC_TASK_SIZE;
98410+#endif
98411+
98412+ pax_task_size -= PAGE_SIZE;
98413+
98414+ if (new_len > pax_task_size || addr > pax_task_size-new_len ||
98415+ old_len > pax_task_size || addr > pax_task_size-old_len)
98416+ goto out;
98417+
98418 if (flags & MREMAP_FIXED) {
98419 if (flags & MREMAP_MAYMOVE)
98420 ret = mremap_to(addr, old_len, new_addr, new_len);
98421@@ -476,6 +504,7 @@ unsigned long do_mremap(unsigned long addr,
98422 addr + new_len);
98423 }
98424 ret = addr;
98425+ track_exec_limit(vma->vm_mm, vma->vm_start, addr + new_len, vma->vm_flags);
98426 goto out;
98427 }
98428 }
98429@@ -502,7 +531,13 @@ unsigned long do_mremap(unsigned long addr,
98430 ret = security_file_mmap(NULL, 0, 0, 0, new_addr, 1);
98431 if (ret)
98432 goto out;
98433+
98434+ map_flags = vma->vm_flags;
98435 ret = move_vma(vma, addr, old_len, new_len, new_addr);
98436+ if (!(ret & ~PAGE_MASK)) {
98437+ track_exec_limit(current->mm, addr, addr + old_len, 0UL);
98438+ track_exec_limit(current->mm, new_addr, new_addr + new_len, map_flags);
98439+ }
98440 }
98441 out:
98442 if (ret & ~PAGE_MASK)
98443diff --git a/mm/nommu.c b/mm/nommu.c
98444index 406e8d4..53970d3 100644
98445--- a/mm/nommu.c
98446+++ b/mm/nommu.c
98447@@ -67,7 +67,6 @@ int sysctl_overcommit_memory = OVERCOMMIT_GUESS; /* heuristic overcommit */
98448 int sysctl_overcommit_ratio = 50; /* default is 50% */
98449 int sysctl_max_map_count = DEFAULT_MAX_MAP_COUNT;
98450 int sysctl_nr_trim_pages = CONFIG_NOMMU_INITIAL_TRIM_EXCESS;
98451-int heap_stack_gap = 0;
98452
98453 atomic_long_t mmap_pages_allocated;
98454
98455@@ -761,15 +760,6 @@ struct vm_area_struct *find_vma(struct mm_struct *mm, unsigned long addr)
98456 EXPORT_SYMBOL(find_vma);
98457
98458 /*
98459- * find a VMA
98460- * - we don't extend stack VMAs under NOMMU conditions
98461- */
98462-struct vm_area_struct *find_extend_vma(struct mm_struct *mm, unsigned long addr)
98463-{
98464- return find_vma(mm, addr);
98465-}
98466-
98467-/*
98468 * expand a stack to a given address
98469 * - not supported under NOMMU conditions
98470 */
98471diff --git a/mm/page_alloc.c b/mm/page_alloc.c
98472index 3ecab7e..594a471 100644
98473--- a/mm/page_alloc.c
98474+++ b/mm/page_alloc.c
98475@@ -289,7 +289,7 @@ out:
98476 * This usage means that zero-order pages may not be compound.
98477 */
98478
98479-static void free_compound_page(struct page *page)
98480+void free_compound_page(struct page *page)
98481 {
98482 __free_pages_ok(page, compound_order(page));
98483 }
98484@@ -587,6 +587,10 @@ static void __free_pages_ok(struct page *page, unsigned int order)
98485 int bad = 0;
98486 int wasMlocked = __TestClearPageMlocked(page);
98487
98488+#ifdef CONFIG_PAX_MEMORY_SANITIZE
98489+ unsigned long index = 1UL << order;
98490+#endif
98491+
98492 kmemcheck_free_shadow(page, order);
98493
98494 for (i = 0 ; i < (1 << order) ; ++i)
98495@@ -599,6 +603,12 @@ static void __free_pages_ok(struct page *page, unsigned int order)
98496 debug_check_no_obj_freed(page_address(page),
98497 PAGE_SIZE << order);
98498 }
98499+
98500+#ifdef CONFIG_PAX_MEMORY_SANITIZE
98501+ for (; index; --index)
98502+ sanitize_highpage(page + index - 1);
98503+#endif
98504+
98505 arch_free_page(page, order);
98506 kernel_map_pages(page, 1 << order, 0);
98507
98508@@ -702,8 +712,10 @@ static int prep_new_page(struct page *page, int order, gfp_t gfp_flags)
98509 arch_alloc_page(page, order);
98510 kernel_map_pages(page, 1 << order, 1);
98511
98512+#ifndef CONFIG_PAX_MEMORY_SANITIZE
98513 if (gfp_flags & __GFP_ZERO)
98514 prep_zero_page(page, order, gfp_flags);
98515+#endif
98516
98517 if (order && (gfp_flags & __GFP_COMP))
98518 prep_compound_page(page, order);
98519@@ -1097,6 +1109,11 @@ static void free_hot_cold_page(struct page *page, int cold)
98520 debug_check_no_locks_freed(page_address(page), PAGE_SIZE);
98521 debug_check_no_obj_freed(page_address(page), PAGE_SIZE);
98522 }
98523+
98524+#ifdef CONFIG_PAX_MEMORY_SANITIZE
98525+ sanitize_highpage(page);
98526+#endif
98527+
98528 arch_free_page(page, 0);
98529 kernel_map_pages(page, 1, 0);
98530
98531@@ -2179,6 +2196,8 @@ void show_free_areas(void)
98532 int cpu;
98533 struct zone *zone;
98534
98535+ pax_track_stack();
98536+
98537 for_each_populated_zone(zone) {
98538 show_node(zone);
98539 printk("%s per-cpu:\n", zone->name);
98540@@ -3736,7 +3755,7 @@ static void __init setup_usemap(struct pglist_data *pgdat,
98541 zone->pageblock_flags = alloc_bootmem_node(pgdat, usemapsize);
98542 }
98543 #else
98544-static void inline setup_usemap(struct pglist_data *pgdat,
98545+static inline void setup_usemap(struct pglist_data *pgdat,
98546 struct zone *zone, unsigned long zonesize) {}
98547 #endif /* CONFIG_SPARSEMEM */
98548
98549diff --git a/mm/percpu.c b/mm/percpu.c
98550index c90614a..5f7b7b8 100644
98551--- a/mm/percpu.c
98552+++ b/mm/percpu.c
98553@@ -115,7 +115,7 @@ static unsigned int pcpu_low_unit_cpu __read_mostly;
98554 static unsigned int pcpu_high_unit_cpu __read_mostly;
98555
98556 /* the address of the first chunk which starts with the kernel static area */
98557-void *pcpu_base_addr __read_mostly;
98558+void *pcpu_base_addr __read_only;
98559 EXPORT_SYMBOL_GPL(pcpu_base_addr);
98560
98561 static const int *pcpu_unit_map __read_mostly; /* cpu -> unit */
98562diff --git a/mm/rmap.c b/mm/rmap.c
98563index dd43373..d848cd7 100644
98564--- a/mm/rmap.c
98565+++ b/mm/rmap.c
98566@@ -121,6 +121,17 @@ int anon_vma_prepare(struct vm_area_struct *vma)
98567 /* page_table_lock to protect against threads */
98568 spin_lock(&mm->page_table_lock);
98569 if (likely(!vma->anon_vma)) {
98570+
98571+#ifdef CONFIG_PAX_SEGMEXEC
98572+ struct vm_area_struct *vma_m = pax_find_mirror_vma(vma);
98573+
98574+ if (vma_m) {
98575+ BUG_ON(vma_m->anon_vma);
98576+ vma_m->anon_vma = anon_vma;
98577+ list_add_tail(&vma_m->anon_vma_node, &anon_vma->head);
98578+ }
98579+#endif
98580+
98581 vma->anon_vma = anon_vma;
98582 list_add_tail(&vma->anon_vma_node, &anon_vma->head);
98583 allocated = NULL;
98584diff --git a/mm/shmem.c b/mm/shmem.c
98585index 3e0005b..1d659a8 100644
98586--- a/mm/shmem.c
98587+++ b/mm/shmem.c
98588@@ -31,7 +31,7 @@
98589 #include <linux/swap.h>
98590 #include <linux/ima.h>
98591
98592-static struct vfsmount *shm_mnt;
98593+struct vfsmount *shm_mnt;
98594
98595 #ifdef CONFIG_SHMEM
98596 /*
98597@@ -1061,6 +1061,8 @@ static int shmem_writepage(struct page *page, struct writeback_control *wbc)
98598 goto unlock;
98599 }
98600 entry = shmem_swp_entry(info, index, NULL);
98601+ if (!entry)
98602+ goto unlock;
98603 if (entry->val) {
98604 /*
98605 * The more uptodate page coming down from a stacked
98606@@ -1144,6 +1146,8 @@ static struct page *shmem_swapin(swp_entry_t entry, gfp_t gfp,
98607 struct vm_area_struct pvma;
98608 struct page *page;
98609
98610+ pax_track_stack();
98611+
98612 spol = mpol_cond_copy(&mpol,
98613 mpol_shared_policy_lookup(&info->policy, idx));
98614
98615@@ -1962,7 +1966,7 @@ static int shmem_symlink(struct inode *dir, struct dentry *dentry, const char *s
98616
98617 info = SHMEM_I(inode);
98618 inode->i_size = len-1;
98619- if (len <= (char *)inode - (char *)info) {
98620+ if (len <= (char *)inode - (char *)info && len <= 64) {
98621 /* do it inline */
98622 memcpy(info, symname, len);
98623 inode->i_op = &shmem_symlink_inline_operations;
98624@@ -2310,8 +2314,7 @@ int shmem_fill_super(struct super_block *sb, void *data, int silent)
98625 int err = -ENOMEM;
98626
98627 /* Round up to L1_CACHE_BYTES to resist false sharing */
98628- sbinfo = kzalloc(max((int)sizeof(struct shmem_sb_info),
98629- L1_CACHE_BYTES), GFP_KERNEL);
98630+ sbinfo = kzalloc(max(sizeof(struct shmem_sb_info), L1_CACHE_BYTES), GFP_KERNEL);
98631 if (!sbinfo)
98632 return -ENOMEM;
98633
98634diff --git a/mm/slab.c b/mm/slab.c
98635index c8d466a..909e01e 100644
98636--- a/mm/slab.c
98637+++ b/mm/slab.c
98638@@ -174,7 +174,7 @@
98639
98640 /* Legal flag mask for kmem_cache_create(). */
98641 #if DEBUG
98642-# define CREATE_MASK (SLAB_RED_ZONE | \
98643+# define CREATE_MASK (SLAB_USERCOPY | SLAB_RED_ZONE | \
98644 SLAB_POISON | SLAB_HWCACHE_ALIGN | \
98645 SLAB_CACHE_DMA | \
98646 SLAB_STORE_USER | \
98647@@ -182,7 +182,7 @@
98648 SLAB_DESTROY_BY_RCU | SLAB_MEM_SPREAD | \
98649 SLAB_DEBUG_OBJECTS | SLAB_NOLEAKTRACE | SLAB_NOTRACK)
98650 #else
98651-# define CREATE_MASK (SLAB_HWCACHE_ALIGN | \
98652+# define CREATE_MASK (SLAB_USERCOPY | SLAB_HWCACHE_ALIGN | \
98653 SLAB_CACHE_DMA | \
98654 SLAB_RECLAIM_ACCOUNT | SLAB_PANIC | \
98655 SLAB_DESTROY_BY_RCU | SLAB_MEM_SPREAD | \
98656@@ -308,7 +308,7 @@ struct kmem_list3 {
98657 * Need this for bootstrapping a per node allocator.
98658 */
98659 #define NUM_INIT_LISTS (3 * MAX_NUMNODES)
98660-struct kmem_list3 __initdata initkmem_list3[NUM_INIT_LISTS];
98661+struct kmem_list3 initkmem_list3[NUM_INIT_LISTS];
98662 #define CACHE_CACHE 0
98663 #define SIZE_AC MAX_NUMNODES
98664 #define SIZE_L3 (2 * MAX_NUMNODES)
98665@@ -409,10 +409,10 @@ static void kmem_list3_init(struct kmem_list3 *parent)
98666 if ((x)->max_freeable < i) \
98667 (x)->max_freeable = i; \
98668 } while (0)
98669-#define STATS_INC_ALLOCHIT(x) atomic_inc(&(x)->allochit)
98670-#define STATS_INC_ALLOCMISS(x) atomic_inc(&(x)->allocmiss)
98671-#define STATS_INC_FREEHIT(x) atomic_inc(&(x)->freehit)
98672-#define STATS_INC_FREEMISS(x) atomic_inc(&(x)->freemiss)
98673+#define STATS_INC_ALLOCHIT(x) atomic_inc_unchecked(&(x)->allochit)
98674+#define STATS_INC_ALLOCMISS(x) atomic_inc_unchecked(&(x)->allocmiss)
98675+#define STATS_INC_FREEHIT(x) atomic_inc_unchecked(&(x)->freehit)
98676+#define STATS_INC_FREEMISS(x) atomic_inc_unchecked(&(x)->freemiss)
98677 #else
98678 #define STATS_INC_ACTIVE(x) do { } while (0)
98679 #define STATS_DEC_ACTIVE(x) do { } while (0)
98680@@ -558,7 +558,7 @@ static inline void *index_to_obj(struct kmem_cache *cache, struct slab *slab,
98681 * reciprocal_divide(offset, cache->reciprocal_buffer_size)
98682 */
98683 static inline unsigned int obj_to_index(const struct kmem_cache *cache,
98684- const struct slab *slab, void *obj)
98685+ const struct slab *slab, const void *obj)
98686 {
98687 u32 offset = (obj - slab->s_mem);
98688 return reciprocal_divide(offset, cache->reciprocal_buffer_size);
98689@@ -1453,7 +1453,7 @@ void __init kmem_cache_init(void)
98690 sizes[INDEX_AC].cs_cachep = kmem_cache_create(names[INDEX_AC].name,
98691 sizes[INDEX_AC].cs_size,
98692 ARCH_KMALLOC_MINALIGN,
98693- ARCH_KMALLOC_FLAGS|SLAB_PANIC,
98694+ ARCH_KMALLOC_FLAGS|SLAB_PANIC|SLAB_USERCOPY,
98695 NULL);
98696
98697 if (INDEX_AC != INDEX_L3) {
98698@@ -1461,7 +1461,7 @@ void __init kmem_cache_init(void)
98699 kmem_cache_create(names[INDEX_L3].name,
98700 sizes[INDEX_L3].cs_size,
98701 ARCH_KMALLOC_MINALIGN,
98702- ARCH_KMALLOC_FLAGS|SLAB_PANIC,
98703+ ARCH_KMALLOC_FLAGS|SLAB_PANIC|SLAB_USERCOPY,
98704 NULL);
98705 }
98706
98707@@ -1479,7 +1479,7 @@ void __init kmem_cache_init(void)
98708 sizes->cs_cachep = kmem_cache_create(names->name,
98709 sizes->cs_size,
98710 ARCH_KMALLOC_MINALIGN,
98711- ARCH_KMALLOC_FLAGS|SLAB_PANIC,
98712+ ARCH_KMALLOC_FLAGS|SLAB_PANIC|SLAB_USERCOPY,
98713 NULL);
98714 }
98715 #ifdef CONFIG_ZONE_DMA
98716@@ -4211,10 +4211,10 @@ static int s_show(struct seq_file *m, void *p)
98717 }
98718 /* cpu stats */
98719 {
98720- unsigned long allochit = atomic_read(&cachep->allochit);
98721- unsigned long allocmiss = atomic_read(&cachep->allocmiss);
98722- unsigned long freehit = atomic_read(&cachep->freehit);
98723- unsigned long freemiss = atomic_read(&cachep->freemiss);
98724+ unsigned long allochit = atomic_read_unchecked(&cachep->allochit);
98725+ unsigned long allocmiss = atomic_read_unchecked(&cachep->allocmiss);
98726+ unsigned long freehit = atomic_read_unchecked(&cachep->freehit);
98727+ unsigned long freemiss = atomic_read_unchecked(&cachep->freemiss);
98728
98729 seq_printf(m, " : cpustat %6lu %6lu %6lu %6lu",
98730 allochit, allocmiss, freehit, freemiss);
98731@@ -4471,15 +4471,70 @@ static const struct file_operations proc_slabstats_operations = {
98732
98733 static int __init slab_proc_init(void)
98734 {
98735- proc_create("slabinfo",S_IWUSR|S_IRUGO,NULL,&proc_slabinfo_operations);
98736+ mode_t gr_mode = S_IRUGO;
98737+
98738+#ifdef CONFIG_GRKERNSEC_PROC_ADD
98739+ gr_mode = S_IRUSR;
98740+#endif
98741+
98742+ proc_create("slabinfo",S_IWUSR|gr_mode,NULL,&proc_slabinfo_operations);
98743 #ifdef CONFIG_DEBUG_SLAB_LEAK
98744- proc_create("slab_allocators", 0, NULL, &proc_slabstats_operations);
98745+ proc_create("slab_allocators", gr_mode, NULL, &proc_slabstats_operations);
98746 #endif
98747 return 0;
98748 }
98749 module_init(slab_proc_init);
98750 #endif
98751
98752+void check_object_size(const void *ptr, unsigned long n, bool to)
98753+{
98754+
98755+#ifdef CONFIG_PAX_USERCOPY
98756+ struct page *page;
98757+ struct kmem_cache *cachep = NULL;
98758+ struct slab *slabp;
98759+ unsigned int objnr;
98760+ unsigned long offset;
98761+ const char *type;
98762+
98763+ if (!n)
98764+ return;
98765+
98766+ type = "<null>";
98767+ if (ZERO_OR_NULL_PTR(ptr))
98768+ goto report;
98769+
98770+ if (!virt_addr_valid(ptr))
98771+ return;
98772+
98773+ page = virt_to_head_page(ptr);
98774+
98775+ type = "<process stack>";
98776+ if (!PageSlab(page)) {
98777+ if (object_is_on_stack(ptr, n) == -1)
98778+ goto report;
98779+ return;
98780+ }
98781+
98782+ cachep = page_get_cache(page);
98783+ type = cachep->name;
98784+ if (!(cachep->flags & SLAB_USERCOPY))
98785+ goto report;
98786+
98787+ slabp = page_get_slab(page);
98788+ objnr = obj_to_index(cachep, slabp, ptr);
98789+ BUG_ON(objnr >= cachep->num);
98790+ offset = ptr - index_to_obj(cachep, slabp, objnr) - obj_offset(cachep);
98791+ if (offset <= obj_size(cachep) && n <= obj_size(cachep) - offset)
98792+ return;
98793+
98794+report:
98795+ pax_report_usercopy(ptr, n, to, type);
98796+#endif
98797+
98798+}
98799+EXPORT_SYMBOL(check_object_size);
98800+
98801 /**
98802 * ksize - get the actual amount of memory allocated for a given object
98803 * @objp: Pointer to the object
98804diff --git a/mm/slob.c b/mm/slob.c
98805index 837ebd6..0bd23bc 100644
98806--- a/mm/slob.c
98807+++ b/mm/slob.c
98808@@ -29,7 +29,7 @@
98809 * If kmalloc is asked for objects of PAGE_SIZE or larger, it calls
98810 * alloc_pages() directly, allocating compound pages so the page order
98811 * does not have to be separately tracked, and also stores the exact
98812- * allocation size in page->private so that it can be used to accurately
98813+ * allocation size in slob_page->size so that it can be used to accurately
98814 * provide ksize(). These objects are detected in kfree() because slob_page()
98815 * is false for them.
98816 *
98817@@ -58,6 +58,7 @@
98818 */
98819
98820 #include <linux/kernel.h>
98821+#include <linux/sched.h>
98822 #include <linux/slab.h>
98823 #include <linux/mm.h>
98824 #include <linux/swap.h> /* struct reclaim_state */
98825@@ -100,7 +101,8 @@ struct slob_page {
98826 unsigned long flags; /* mandatory */
98827 atomic_t _count; /* mandatory */
98828 slobidx_t units; /* free units left in page */
98829- unsigned long pad[2];
98830+ unsigned long pad[1];
98831+ unsigned long size; /* size when >=PAGE_SIZE */
98832 slob_t *free; /* first free slob_t in page */
98833 struct list_head list; /* linked list of free pages */
98834 };
98835@@ -133,7 +135,7 @@ static LIST_HEAD(free_slob_large);
98836 */
98837 static inline int is_slob_page(struct slob_page *sp)
98838 {
98839- return PageSlab((struct page *)sp);
98840+ return PageSlab((struct page *)sp) && !sp->size;
98841 }
98842
98843 static inline void set_slob_page(struct slob_page *sp)
98844@@ -148,7 +150,7 @@ static inline void clear_slob_page(struct slob_page *sp)
98845
98846 static inline struct slob_page *slob_page(const void *addr)
98847 {
98848- return (struct slob_page *)virt_to_page(addr);
98849+ return (struct slob_page *)virt_to_head_page(addr);
98850 }
98851
98852 /*
98853@@ -208,7 +210,7 @@ static void set_slob(slob_t *s, slobidx_t size, slob_t *next)
98854 /*
98855 * Return the size of a slob block.
98856 */
98857-static slobidx_t slob_units(slob_t *s)
98858+static slobidx_t slob_units(const slob_t *s)
98859 {
98860 if (s->units > 0)
98861 return s->units;
98862@@ -218,7 +220,7 @@ static slobidx_t slob_units(slob_t *s)
98863 /*
98864 * Return the next free slob block pointer after this one.
98865 */
98866-static slob_t *slob_next(slob_t *s)
98867+static slob_t *slob_next(const slob_t *s)
98868 {
98869 slob_t *base = (slob_t *)((unsigned long)s & PAGE_MASK);
98870 slobidx_t next;
98871@@ -233,7 +235,7 @@ static slob_t *slob_next(slob_t *s)
98872 /*
98873 * Returns true if s is the last free block in its page.
98874 */
98875-static int slob_last(slob_t *s)
98876+static int slob_last(const slob_t *s)
98877 {
98878 return !((unsigned long)slob_next(s) & ~PAGE_MASK);
98879 }
98880@@ -252,6 +254,7 @@ static void *slob_new_pages(gfp_t gfp, int order, int node)
98881 if (!page)
98882 return NULL;
98883
98884+ set_slob_page(page);
98885 return page_address(page);
98886 }
98887
98888@@ -368,11 +371,11 @@ static void *slob_alloc(size_t size, gfp_t gfp, int align, int node)
98889 if (!b)
98890 return NULL;
98891 sp = slob_page(b);
98892- set_slob_page(sp);
98893
98894 spin_lock_irqsave(&slob_lock, flags);
98895 sp->units = SLOB_UNITS(PAGE_SIZE);
98896 sp->free = b;
98897+ sp->size = 0;
98898 INIT_LIST_HEAD(&sp->list);
98899 set_slob(b, SLOB_UNITS(PAGE_SIZE), b + SLOB_UNITS(PAGE_SIZE));
98900 set_slob_page_free(sp, slob_list);
98901@@ -475,10 +478,9 @@ out:
98902 #define ARCH_SLAB_MINALIGN __alignof__(unsigned long)
98903 #endif
98904
98905-void *__kmalloc_node(size_t size, gfp_t gfp, int node)
98906+static void *__kmalloc_node_align(size_t size, gfp_t gfp, int node, int align)
98907 {
98908- unsigned int *m;
98909- int align = max(ARCH_KMALLOC_MINALIGN, ARCH_SLAB_MINALIGN);
98910+ slob_t *m;
98911 void *ret;
98912
98913 lockdep_trace_alloc(gfp);
98914@@ -491,7 +493,10 @@ void *__kmalloc_node(size_t size, gfp_t gfp, int node)
98915
98916 if (!m)
98917 return NULL;
98918- *m = size;
98919+ BUILD_BUG_ON(ARCH_KMALLOC_MINALIGN < 2 * SLOB_UNIT);
98920+ BUILD_BUG_ON(ARCH_SLAB_MINALIGN < 2 * SLOB_UNIT);
98921+ m[0].units = size;
98922+ m[1].units = align;
98923 ret = (void *)m + align;
98924
98925 trace_kmalloc_node(_RET_IP_, ret,
98926@@ -501,16 +506,25 @@ void *__kmalloc_node(size_t size, gfp_t gfp, int node)
98927
98928 ret = slob_new_pages(gfp | __GFP_COMP, get_order(size), node);
98929 if (ret) {
98930- struct page *page;
98931- page = virt_to_page(ret);
98932- page->private = size;
98933+ struct slob_page *sp;
98934+ sp = slob_page(ret);
98935+ sp->size = size;
98936 }
98937
98938 trace_kmalloc_node(_RET_IP_, ret,
98939 size, PAGE_SIZE << order, gfp, node);
98940 }
98941
98942- kmemleak_alloc(ret, size, 1, gfp);
98943+ return ret;
98944+}
98945+
98946+void *__kmalloc_node(size_t size, gfp_t gfp, int node)
98947+{
98948+ int align = max(ARCH_KMALLOC_MINALIGN, ARCH_SLAB_MINALIGN);
98949+ void *ret = __kmalloc_node_align(size, gfp, node, align);
98950+
98951+ if (!ZERO_OR_NULL_PTR(ret))
98952+ kmemleak_alloc(ret, size, 1, gfp);
98953 return ret;
98954 }
98955 EXPORT_SYMBOL(__kmalloc_node);
98956@@ -528,13 +542,92 @@ void kfree(const void *block)
98957 sp = slob_page(block);
98958 if (is_slob_page(sp)) {
98959 int align = max(ARCH_KMALLOC_MINALIGN, ARCH_SLAB_MINALIGN);
98960- unsigned int *m = (unsigned int *)(block - align);
98961- slob_free(m, *m + align);
98962- } else
98963+ slob_t *m = (slob_t *)(block - align);
98964+ slob_free(m, m[0].units + align);
98965+ } else {
98966+ clear_slob_page(sp);
98967+ free_slob_page(sp);
98968+ sp->size = 0;
98969 put_page(&sp->page);
98970+ }
98971 }
98972 EXPORT_SYMBOL(kfree);
98973
98974+void check_object_size(const void *ptr, unsigned long n, bool to)
98975+{
98976+
98977+#ifdef CONFIG_PAX_USERCOPY
98978+ struct slob_page *sp;
98979+ const slob_t *free;
98980+ const void *base;
98981+ unsigned long flags;
98982+ const char *type;
98983+
98984+ if (!n)
98985+ return;
98986+
98987+ type = "<null>";
98988+ if (ZERO_OR_NULL_PTR(ptr))
98989+ goto report;
98990+
98991+ if (!virt_addr_valid(ptr))
98992+ return;
98993+
98994+ type = "<process stack>";
98995+ sp = slob_page(ptr);
98996+ if (!PageSlab((struct page *)sp)) {
98997+ if (object_is_on_stack(ptr, n) == -1)
98998+ goto report;
98999+ return;
99000+ }
99001+
99002+ type = "<slob>";
99003+ if (sp->size) {
99004+ base = page_address(&sp->page);
99005+ if (base <= ptr && n <= sp->size - (ptr - base))
99006+ return;
99007+ goto report;
99008+ }
99009+
99010+ /* some tricky double walking to find the chunk */
99011+ spin_lock_irqsave(&slob_lock, flags);
99012+ base = (void *)((unsigned long)ptr & PAGE_MASK);
99013+ free = sp->free;
99014+
99015+ while (!slob_last(free) && (void *)free <= ptr) {
99016+ base = free + slob_units(free);
99017+ free = slob_next(free);
99018+ }
99019+
99020+ while (base < (void *)free) {
99021+ slobidx_t m = ((slob_t *)base)[0].units, align = ((slob_t *)base)[1].units;
99022+ int size = SLOB_UNIT * SLOB_UNITS(m + align);
99023+ int offset;
99024+
99025+ if (ptr < base + align)
99026+ break;
99027+
99028+ offset = ptr - base - align;
99029+ if (offset >= m) {
99030+ base += size;
99031+ continue;
99032+ }
99033+
99034+ if (n > m - offset)
99035+ break;
99036+
99037+ spin_unlock_irqrestore(&slob_lock, flags);
99038+ return;
99039+ }
99040+
99041+ spin_unlock_irqrestore(&slob_lock, flags);
99042+report:
99043+ pax_report_usercopy(ptr, n, to, type);
99044+#endif
99045+
99046+}
99047+EXPORT_SYMBOL(check_object_size);
99048+
99049 /* can't use ksize for kmem_cache_alloc memory, only kmalloc */
99050 size_t ksize(const void *block)
99051 {
99052@@ -547,10 +640,10 @@ size_t ksize(const void *block)
99053 sp = slob_page(block);
99054 if (is_slob_page(sp)) {
99055 int align = max(ARCH_KMALLOC_MINALIGN, ARCH_SLAB_MINALIGN);
99056- unsigned int *m = (unsigned int *)(block - align);
99057- return SLOB_UNITS(*m) * SLOB_UNIT;
99058+ slob_t *m = (slob_t *)(block - align);
99059+ return SLOB_UNITS(m[0].units) * SLOB_UNIT;
99060 } else
99061- return sp->page.private;
99062+ return sp->size;
99063 }
99064 EXPORT_SYMBOL(ksize);
99065
99066@@ -566,8 +659,13 @@ struct kmem_cache *kmem_cache_create(const char *name, size_t size,
99067 {
99068 struct kmem_cache *c;
99069
99070+#ifdef CONFIG_PAX_USERCOPY
99071+ c = __kmalloc_node_align(sizeof(struct kmem_cache),
99072+ GFP_KERNEL, -1, ARCH_KMALLOC_MINALIGN);
99073+#else
99074 c = slob_alloc(sizeof(struct kmem_cache),
99075 GFP_KERNEL, ARCH_KMALLOC_MINALIGN, -1);
99076+#endif
99077
99078 if (c) {
99079 c->name = name;
99080@@ -605,17 +703,25 @@ void *kmem_cache_alloc_node(struct kmem_cache *c, gfp_t flags, int node)
99081 {
99082 void *b;
99083
99084+#ifdef CONFIG_PAX_USERCOPY
99085+ b = __kmalloc_node_align(c->size, flags, node, c->align);
99086+#else
99087 if (c->size < PAGE_SIZE) {
99088 b = slob_alloc(c->size, flags, c->align, node);
99089 trace_kmem_cache_alloc_node(_RET_IP_, b, c->size,
99090 SLOB_UNITS(c->size) * SLOB_UNIT,
99091 flags, node);
99092 } else {
99093+ struct slob_page *sp;
99094+
99095 b = slob_new_pages(flags, get_order(c->size), node);
99096+ sp = slob_page(b);
99097+ sp->size = c->size;
99098 trace_kmem_cache_alloc_node(_RET_IP_, b, c->size,
99099 PAGE_SIZE << get_order(c->size),
99100 flags, node);
99101 }
99102+#endif
99103
99104 if (c->ctor)
99105 c->ctor(b);
99106@@ -627,10 +733,16 @@ EXPORT_SYMBOL(kmem_cache_alloc_node);
99107
99108 static void __kmem_cache_free(void *b, int size)
99109 {
99110- if (size < PAGE_SIZE)
99111+ struct slob_page *sp = slob_page(b);
99112+
99113+ if (is_slob_page(sp))
99114 slob_free(b, size);
99115- else
99116+ else {
99117+ clear_slob_page(sp);
99118+ free_slob_page(sp);
99119+ sp->size = 0;
99120 slob_free_pages(b, get_order(size));
99121+ }
99122 }
99123
99124 static void kmem_rcu_free(struct rcu_head *head)
99125@@ -643,18 +755,32 @@ static void kmem_rcu_free(struct rcu_head *head)
99126
99127 void kmem_cache_free(struct kmem_cache *c, void *b)
99128 {
99129+ int size = c->size;
99130+
99131+#ifdef CONFIG_PAX_USERCOPY
99132+ if (size + c->align < PAGE_SIZE) {
99133+ size += c->align;
99134+ b -= c->align;
99135+ }
99136+#endif
99137+
99138 kmemleak_free_recursive(b, c->flags);
99139 if (unlikely(c->flags & SLAB_DESTROY_BY_RCU)) {
99140 struct slob_rcu *slob_rcu;
99141- slob_rcu = b + (c->size - sizeof(struct slob_rcu));
99142+ slob_rcu = b + (size - sizeof(struct slob_rcu));
99143 INIT_RCU_HEAD(&slob_rcu->head);
99144- slob_rcu->size = c->size;
99145+ slob_rcu->size = size;
99146 call_rcu(&slob_rcu->head, kmem_rcu_free);
99147 } else {
99148- __kmem_cache_free(b, c->size);
99149+ __kmem_cache_free(b, size);
99150 }
99151
99152+#ifdef CONFIG_PAX_USERCOPY
99153+ trace_kfree(_RET_IP_, b);
99154+#else
99155 trace_kmem_cache_free(_RET_IP_, b);
99156+#endif
99157+
99158 }
99159 EXPORT_SYMBOL(kmem_cache_free);
99160
99161diff --git a/mm/slub.c b/mm/slub.c
99162index 4996fc7..87e01d0 100644
99163--- a/mm/slub.c
99164+++ b/mm/slub.c
99165@@ -201,7 +201,7 @@ struct track {
99166
99167 enum track_item { TRACK_ALLOC, TRACK_FREE };
99168
99169-#ifdef CONFIG_SLUB_DEBUG
99170+#if defined(CONFIG_SLUB_DEBUG) && !defined(CONFIG_GRKERNSEC_PROC_ADD)
99171 static int sysfs_slab_add(struct kmem_cache *);
99172 static int sysfs_slab_alias(struct kmem_cache *, const char *);
99173 static void sysfs_slab_remove(struct kmem_cache *);
99174@@ -410,7 +410,7 @@ static void print_track(const char *s, struct track *t)
99175 if (!t->addr)
99176 return;
99177
99178- printk(KERN_ERR "INFO: %s in %pS age=%lu cpu=%u pid=%d\n",
99179+ printk(KERN_ERR "INFO: %s in %pA age=%lu cpu=%u pid=%d\n",
99180 s, (void *)t->addr, jiffies - t->when, t->cpu, t->pid);
99181 }
99182
99183@@ -1893,6 +1893,8 @@ void kmem_cache_free(struct kmem_cache *s, void *x)
99184
99185 page = virt_to_head_page(x);
99186
99187+ BUG_ON(!PageSlab(page));
99188+
99189 slab_free(s, page, x, _RET_IP_);
99190
99191 trace_kmem_cache_free(_RET_IP_, x);
99192@@ -1937,7 +1939,7 @@ static int slub_min_objects;
99193 * Merge control. If this is set then no merging of slab caches will occur.
99194 * (Could be removed. This was introduced to pacify the merge skeptics.)
99195 */
99196-static int slub_nomerge;
99197+static int slub_nomerge = 1;
99198
99199 /*
99200 * Calculate the order of allocation given an slab object size.
99201@@ -2493,7 +2495,7 @@ static int kmem_cache_open(struct kmem_cache *s, gfp_t gfpflags,
99202 * list to avoid pounding the page allocator excessively.
99203 */
99204 set_min_partial(s, ilog2(s->size));
99205- s->refcount = 1;
99206+ atomic_set(&s->refcount, 1);
99207 #ifdef CONFIG_NUMA
99208 s->remote_node_defrag_ratio = 1000;
99209 #endif
99210@@ -2630,8 +2632,7 @@ static inline int kmem_cache_close(struct kmem_cache *s)
99211 void kmem_cache_destroy(struct kmem_cache *s)
99212 {
99213 down_write(&slub_lock);
99214- s->refcount--;
99215- if (!s->refcount) {
99216+ if (atomic_dec_and_test(&s->refcount)) {
99217 list_del(&s->list);
99218 up_write(&slub_lock);
99219 if (kmem_cache_close(s)) {
99220@@ -2691,12 +2692,10 @@ static int __init setup_slub_nomerge(char *str)
99221 __setup("slub_nomerge", setup_slub_nomerge);
99222
99223 static struct kmem_cache *create_kmalloc_cache(struct kmem_cache *s,
99224- const char *name, int size, gfp_t gfp_flags)
99225+ const char *name, int size, gfp_t gfp_flags, unsigned int flags)
99226 {
99227- unsigned int flags = 0;
99228-
99229 if (gfp_flags & SLUB_DMA)
99230- flags = SLAB_CACHE_DMA;
99231+ flags |= SLAB_CACHE_DMA;
99232
99233 /*
99234 * This function is called with IRQs disabled during early-boot on
99235@@ -2915,6 +2914,50 @@ void *__kmalloc_node(size_t size, gfp_t flags, int node)
99236 EXPORT_SYMBOL(__kmalloc_node);
99237 #endif
99238
99239+void check_object_size(const void *ptr, unsigned long n, bool to)
99240+{
99241+
99242+#ifdef CONFIG_PAX_USERCOPY
99243+ struct page *page;
99244+ struct kmem_cache *s = NULL;
99245+ unsigned long offset;
99246+ const char *type;
99247+
99248+ if (!n)
99249+ return;
99250+
99251+ type = "<null>";
99252+ if (ZERO_OR_NULL_PTR(ptr))
99253+ goto report;
99254+
99255+ if (!virt_addr_valid(ptr))
99256+ return;
99257+
99258+ page = get_object_page(ptr);
99259+
99260+ type = "<process stack>";
99261+ if (!page) {
99262+ if (object_is_on_stack(ptr, n) == -1)
99263+ goto report;
99264+ return;
99265+ }
99266+
99267+ s = page->slab;
99268+ type = s->name;
99269+ if (!(s->flags & SLAB_USERCOPY))
99270+ goto report;
99271+
99272+ offset = (ptr - page_address(page)) % s->size;
99273+ if (offset <= s->objsize && n <= s->objsize - offset)
99274+ return;
99275+
99276+report:
99277+ pax_report_usercopy(ptr, n, to, type);
99278+#endif
99279+
99280+}
99281+EXPORT_SYMBOL(check_object_size);
99282+
99283 size_t ksize(const void *object)
99284 {
99285 struct page *page;
99286@@ -3185,8 +3228,8 @@ void __init kmem_cache_init(void)
99287 * kmem_cache_open for slab_state == DOWN.
99288 */
99289 create_kmalloc_cache(&kmalloc_caches[0], "kmem_cache_node",
99290- sizeof(struct kmem_cache_node), GFP_NOWAIT);
99291- kmalloc_caches[0].refcount = -1;
99292+ sizeof(struct kmem_cache_node), GFP_NOWAIT, 0);
99293+ atomic_set(&kmalloc_caches[0].refcount, -1);
99294 caches++;
99295
99296 hotplug_memory_notifier(slab_memory_callback, SLAB_CALLBACK_PRI);
99297@@ -3198,18 +3241,18 @@ void __init kmem_cache_init(void)
99298 /* Caches that are not of the two-to-the-power-of size */
99299 if (KMALLOC_MIN_SIZE <= 32) {
99300 create_kmalloc_cache(&kmalloc_caches[1],
99301- "kmalloc-96", 96, GFP_NOWAIT);
99302+ "kmalloc-96", 96, GFP_NOWAIT, SLAB_USERCOPY);
99303 caches++;
99304 }
99305 if (KMALLOC_MIN_SIZE <= 64) {
99306 create_kmalloc_cache(&kmalloc_caches[2],
99307- "kmalloc-192", 192, GFP_NOWAIT);
99308+ "kmalloc-192", 192, GFP_NOWAIT, SLAB_USERCOPY);
99309 caches++;
99310 }
99311
99312 for (i = KMALLOC_SHIFT_LOW; i < SLUB_PAGE_SHIFT; i++) {
99313 create_kmalloc_cache(&kmalloc_caches[i],
99314- "kmalloc", 1 << i, GFP_NOWAIT);
99315+ "kmalloc", 1 << i, GFP_NOWAIT, SLAB_USERCOPY);
99316 caches++;
99317 }
99318
99319@@ -3293,7 +3336,7 @@ static int slab_unmergeable(struct kmem_cache *s)
99320 /*
99321 * We may have set a slab to be unmergeable during bootstrap.
99322 */
99323- if (s->refcount < 0)
99324+ if (atomic_read(&s->refcount) < 0)
99325 return 1;
99326
99327 return 0;
99328@@ -3353,7 +3396,7 @@ struct kmem_cache *kmem_cache_create(const char *name, size_t size,
99329 if (s) {
99330 int cpu;
99331
99332- s->refcount++;
99333+ atomic_inc(&s->refcount);
99334 /*
99335 * Adjust the object sizes so that we clear
99336 * the complete object on kzalloc.
99337@@ -3372,7 +3415,7 @@ struct kmem_cache *kmem_cache_create(const char *name, size_t size,
99338
99339 if (sysfs_slab_alias(s, name)) {
99340 down_write(&slub_lock);
99341- s->refcount--;
99342+ atomic_dec(&s->refcount);
99343 up_write(&slub_lock);
99344 goto err;
99345 }
99346@@ -4101,7 +4144,7 @@ SLAB_ATTR_RO(ctor);
99347
99348 static ssize_t aliases_show(struct kmem_cache *s, char *buf)
99349 {
99350- return sprintf(buf, "%d\n", s->refcount - 1);
99351+ return sprintf(buf, "%d\n", atomic_read(&s->refcount) - 1);
99352 }
99353 SLAB_ATTR_RO(aliases);
99354
99355@@ -4503,7 +4546,7 @@ static void kmem_cache_release(struct kobject *kobj)
99356 kfree(s);
99357 }
99358
99359-static struct sysfs_ops slab_sysfs_ops = {
99360+static const struct sysfs_ops slab_sysfs_ops = {
99361 .show = slab_attr_show,
99362 .store = slab_attr_store,
99363 };
99364@@ -4522,7 +4565,7 @@ static int uevent_filter(struct kset *kset, struct kobject *kobj)
99365 return 0;
99366 }
99367
99368-static struct kset_uevent_ops slab_uevent_ops = {
99369+static const struct kset_uevent_ops slab_uevent_ops = {
99370 .filter = uevent_filter,
99371 };
99372
99373@@ -4564,6 +4607,7 @@ static char *create_unique_id(struct kmem_cache *s)
99374 return name;
99375 }
99376
99377+#if defined(CONFIG_SLUB_DEBUG) && !defined(CONFIG_GRKERNSEC_PROC_ADD)
99378 static int sysfs_slab_add(struct kmem_cache *s)
99379 {
99380 int err;
99381@@ -4619,6 +4663,7 @@ static void sysfs_slab_remove(struct kmem_cache *s)
99382 kobject_del(&s->kobj);
99383 kobject_put(&s->kobj);
99384 }
99385+#endif
99386
99387 /*
99388 * Need to buffer aliases during bootup until sysfs becomes
99389@@ -4632,6 +4677,7 @@ struct saved_alias {
99390
99391 static struct saved_alias *alias_list;
99392
99393+#if defined(CONFIG_SLUB_DEBUG) && !defined(CONFIG_GRKERNSEC_PROC_ADD)
99394 static int sysfs_slab_alias(struct kmem_cache *s, const char *name)
99395 {
99396 struct saved_alias *al;
99397@@ -4654,6 +4700,7 @@ static int sysfs_slab_alias(struct kmem_cache *s, const char *name)
99398 alias_list = al;
99399 return 0;
99400 }
99401+#endif
99402
99403 static int __init slab_sysfs_init(void)
99404 {
99405@@ -4785,7 +4832,13 @@ static const struct file_operations proc_slabinfo_operations = {
99406
99407 static int __init slab_proc_init(void)
99408 {
99409- proc_create("slabinfo", S_IRUGO, NULL, &proc_slabinfo_operations);
99410+ mode_t gr_mode = S_IRUGO;
99411+
99412+#ifdef CONFIG_GRKERNSEC_PROC_ADD
99413+ gr_mode = S_IRUSR;
99414+#endif
99415+
99416+ proc_create("slabinfo", gr_mode, NULL, &proc_slabinfo_operations);
99417 return 0;
99418 }
99419 module_init(slab_proc_init);
99420diff --git a/mm/swap.c b/mm/swap.c
99421index 308e57d..5de19c0 100644
99422--- a/mm/swap.c
99423+++ b/mm/swap.c
99424@@ -30,6 +30,7 @@
99425 #include <linux/notifier.h>
99426 #include <linux/backing-dev.h>
99427 #include <linux/memcontrol.h>
99428+#include <linux/hugetlb.h>
99429
99430 #include "internal.h"
99431
99432@@ -65,6 +66,8 @@ static void put_compound_page(struct page *page)
99433 compound_page_dtor *dtor;
99434
99435 dtor = get_compound_page_dtor(page);
99436+ if (!PageHuge(page))
99437+ BUG_ON(dtor != free_compound_page);
99438 (*dtor)(page);
99439 }
99440 }
99441diff --git a/mm/util.c b/mm/util.c
99442index e48b493..24a601d 100644
99443--- a/mm/util.c
99444+++ b/mm/util.c
99445@@ -228,6 +228,12 @@ EXPORT_SYMBOL(strndup_user);
99446 void arch_pick_mmap_layout(struct mm_struct *mm)
99447 {
99448 mm->mmap_base = TASK_UNMAPPED_BASE;
99449+
99450+#ifdef CONFIG_PAX_RANDMMAP
99451+ if (mm->pax_flags & MF_PAX_RANDMMAP)
99452+ mm->mmap_base += mm->delta_mmap;
99453+#endif
99454+
99455 mm->get_unmapped_area = arch_get_unmapped_area;
99456 mm->unmap_area = arch_unmap_area;
99457 }
99458diff --git a/mm/vmalloc.c b/mm/vmalloc.c
99459index f34ffd0..90d7407 100644
99460--- a/mm/vmalloc.c
99461+++ b/mm/vmalloc.c
99462@@ -40,8 +40,19 @@ static void vunmap_pte_range(pmd_t *pmd, unsigned long addr, unsigned long end)
99463
99464 pte = pte_offset_kernel(pmd, addr);
99465 do {
99466- pte_t ptent = ptep_get_and_clear(&init_mm, addr, pte);
99467- WARN_ON(!pte_none(ptent) && !pte_present(ptent));
99468+
99469+#if defined(CONFIG_MODULES) && defined(CONFIG_X86_32) && defined(CONFIG_PAX_KERNEXEC)
99470+ if ((unsigned long)MODULES_EXEC_VADDR <= addr && addr < (unsigned long)MODULES_EXEC_END) {
99471+ BUG_ON(!pte_exec(*pte));
99472+ set_pte_at(&init_mm, addr, pte, pfn_pte(__pa(addr) >> PAGE_SHIFT, PAGE_KERNEL_EXEC));
99473+ continue;
99474+ }
99475+#endif
99476+
99477+ {
99478+ pte_t ptent = ptep_get_and_clear(&init_mm, addr, pte);
99479+ WARN_ON(!pte_none(ptent) && !pte_present(ptent));
99480+ }
99481 } while (pte++, addr += PAGE_SIZE, addr != end);
99482 }
99483
99484@@ -92,6 +103,7 @@ static int vmap_pte_range(pmd_t *pmd, unsigned long addr,
99485 unsigned long end, pgprot_t prot, struct page **pages, int *nr)
99486 {
99487 pte_t *pte;
99488+ int ret = -ENOMEM;
99489
99490 /*
99491 * nr is a running index into the array which helps higher level
99492@@ -101,17 +113,32 @@ static int vmap_pte_range(pmd_t *pmd, unsigned long addr,
99493 pte = pte_alloc_kernel(pmd, addr);
99494 if (!pte)
99495 return -ENOMEM;
99496+
99497+ pax_open_kernel();
99498 do {
99499 struct page *page = pages[*nr];
99500
99501- if (WARN_ON(!pte_none(*pte)))
99502- return -EBUSY;
99503- if (WARN_ON(!page))
99504- return -ENOMEM;
99505+#if defined(CONFIG_MODULES) && defined(CONFIG_X86_32) && defined(CONFIG_PAX_KERNEXEC)
99506+ if (!(pgprot_val(prot) & _PAGE_NX))
99507+ BUG_ON(!pte_exec(*pte) || pte_pfn(*pte) != __pa(addr) >> PAGE_SHIFT);
99508+ else
99509+#endif
99510+
99511+ if (WARN_ON(!pte_none(*pte))) {
99512+ ret = -EBUSY;
99513+ goto out;
99514+ }
99515+ if (WARN_ON(!page)) {
99516+ ret = -ENOMEM;
99517+ goto out;
99518+ }
99519 set_pte_at(&init_mm, addr, pte, mk_pte(page, prot));
99520 (*nr)++;
99521 } while (pte++, addr += PAGE_SIZE, addr != end);
99522- return 0;
99523+ ret = 0;
99524+out:
99525+ pax_close_kernel();
99526+ return ret;
99527 }
99528
99529 static int vmap_pmd_range(pud_t *pud, unsigned long addr,
99530@@ -192,11 +219,20 @@ int is_vmalloc_or_module_addr(const void *x)
99531 * and fall back on vmalloc() if that fails. Others
99532 * just put it in the vmalloc space.
99533 */
99534-#if defined(CONFIG_MODULES) && defined(MODULES_VADDR)
99535+#ifdef CONFIG_MODULES
99536+#ifdef MODULES_VADDR
99537 unsigned long addr = (unsigned long)x;
99538 if (addr >= MODULES_VADDR && addr < MODULES_END)
99539 return 1;
99540 #endif
99541+
99542+#if defined(CONFIG_X86_32) && defined(CONFIG_PAX_KERNEXEC)
99543+ if (x >= (const void *)MODULES_EXEC_VADDR && x < (const void *)MODULES_EXEC_END)
99544+ return 1;
99545+#endif
99546+
99547+#endif
99548+
99549 return is_vmalloc_addr(x);
99550 }
99551
99552@@ -217,8 +253,14 @@ struct page *vmalloc_to_page(const void *vmalloc_addr)
99553
99554 if (!pgd_none(*pgd)) {
99555 pud_t *pud = pud_offset(pgd, addr);
99556+#ifdef CONFIG_X86
99557+ if (!pud_large(*pud))
99558+#endif
99559 if (!pud_none(*pud)) {
99560 pmd_t *pmd = pmd_offset(pud, addr);
99561+#ifdef CONFIG_X86
99562+ if (!pmd_large(*pmd))
99563+#endif
99564 if (!pmd_none(*pmd)) {
99565 pte_t *ptep, pte;
99566
99567@@ -292,13 +334,13 @@ static void __insert_vmap_area(struct vmap_area *va)
99568 struct rb_node *tmp;
99569
99570 while (*p) {
99571- struct vmap_area *tmp;
99572+ struct vmap_area *varea;
99573
99574 parent = *p;
99575- tmp = rb_entry(parent, struct vmap_area, rb_node);
99576- if (va->va_start < tmp->va_end)
99577+ varea = rb_entry(parent, struct vmap_area, rb_node);
99578+ if (va->va_start < varea->va_end)
99579 p = &(*p)->rb_left;
99580- else if (va->va_end > tmp->va_start)
99581+ else if (va->va_end > varea->va_start)
99582 p = &(*p)->rb_right;
99583 else
99584 BUG();
99585@@ -1245,6 +1287,16 @@ static struct vm_struct *__get_vm_area_node(unsigned long size,
99586 struct vm_struct *area;
99587
99588 BUG_ON(in_interrupt());
99589+
99590+#if defined(CONFIG_MODULES) && defined(CONFIG_X86) && defined(CONFIG_PAX_KERNEXEC)
99591+ if (flags & VM_KERNEXEC) {
99592+ if (start != VMALLOC_START || end != VMALLOC_END)
99593+ return NULL;
99594+ start = (unsigned long)MODULES_EXEC_VADDR;
99595+ end = (unsigned long)MODULES_EXEC_END;
99596+ }
99597+#endif
99598+
99599 if (flags & VM_IOREMAP) {
99600 int bit = fls(size);
99601
99602@@ -1484,6 +1536,11 @@ void *vmap(struct page **pages, unsigned int count,
99603 if (count > totalram_pages)
99604 return NULL;
99605
99606+#if defined(CONFIG_MODULES) && defined(CONFIG_X86) && defined(CONFIG_PAX_KERNEXEC)
99607+ if (!(pgprot_val(prot) & _PAGE_NX))
99608+ flags |= VM_KERNEXEC;
99609+#endif
99610+
99611 area = get_vm_area_caller((count << PAGE_SHIFT), flags,
99612 __builtin_return_address(0));
99613 if (!area)
99614@@ -1594,6 +1651,14 @@ static void *__vmalloc_node(unsigned long size, unsigned long align,
99615 if (!size || (size >> PAGE_SHIFT) > totalram_pages)
99616 return NULL;
99617
99618+#if defined(CONFIG_MODULES) && defined(CONFIG_X86) && defined(CONFIG_PAX_KERNEXEC)
99619+ if (!(pgprot_val(prot) & _PAGE_NX))
99620+ area = __get_vm_area_node(size, align, VM_ALLOC | VM_UNLIST | VM_KERNEXEC,
99621+ VMALLOC_START, VMALLOC_END, node,
99622+ gfp_mask, caller);
99623+ else
99624+#endif
99625+
99626 area = __get_vm_area_node(size, align, VM_ALLOC | VM_UNLIST,
99627 VMALLOC_START, VMALLOC_END, node,
99628 gfp_mask, caller);
99629@@ -1698,10 +1763,9 @@ EXPORT_SYMBOL(vmalloc_node);
99630 * For tight control over page level allocator and protection flags
99631 * use __vmalloc() instead.
99632 */
99633-
99634 void *vmalloc_exec(unsigned long size)
99635 {
99636- return __vmalloc_node(size, 1, GFP_KERNEL | __GFP_HIGHMEM, PAGE_KERNEL_EXEC,
99637+ return __vmalloc_node(size, 1, GFP_KERNEL | __GFP_HIGHMEM | __GFP_ZERO, PAGE_KERNEL_EXEC,
99638 -1, __builtin_return_address(0));
99639 }
99640
99641@@ -1998,6 +2062,8 @@ int remap_vmalloc_range(struct vm_area_struct *vma, void *addr,
99642 unsigned long uaddr = vma->vm_start;
99643 unsigned long usize = vma->vm_end - vma->vm_start;
99644
99645+ BUG_ON(vma->vm_mirror);
99646+
99647 if ((PAGE_SIZE-1) & (unsigned long)addr)
99648 return -EINVAL;
99649
99650diff --git a/mm/vmstat.c b/mm/vmstat.c
99651index 42d76c6..5643dc4 100644
99652--- a/mm/vmstat.c
99653+++ b/mm/vmstat.c
99654@@ -74,7 +74,7 @@ void vm_events_fold_cpu(int cpu)
99655 *
99656 * vm_stat contains the global counters
99657 */
99658-atomic_long_t vm_stat[NR_VM_ZONE_STAT_ITEMS];
99659+atomic_long_unchecked_t vm_stat[NR_VM_ZONE_STAT_ITEMS];
99660 EXPORT_SYMBOL(vm_stat);
99661
99662 #ifdef CONFIG_SMP
99663@@ -324,7 +324,7 @@ void refresh_cpu_vm_stats(int cpu)
99664 v = p->vm_stat_diff[i];
99665 p->vm_stat_diff[i] = 0;
99666 local_irq_restore(flags);
99667- atomic_long_add(v, &zone->vm_stat[i]);
99668+ atomic_long_add_unchecked(v, &zone->vm_stat[i]);
99669 global_diff[i] += v;
99670 #ifdef CONFIG_NUMA
99671 /* 3 seconds idle till flush */
99672@@ -362,7 +362,7 @@ void refresh_cpu_vm_stats(int cpu)
99673
99674 for (i = 0; i < NR_VM_ZONE_STAT_ITEMS; i++)
99675 if (global_diff[i])
99676- atomic_long_add(global_diff[i], &vm_stat[i]);
99677+ atomic_long_add_unchecked(global_diff[i], &vm_stat[i]);
99678 }
99679
99680 #endif
99681@@ -953,10 +953,20 @@ static int __init setup_vmstat(void)
99682 start_cpu_timer(cpu);
99683 #endif
99684 #ifdef CONFIG_PROC_FS
99685- proc_create("buddyinfo", S_IRUGO, NULL, &fragmentation_file_operations);
99686- proc_create("pagetypeinfo", S_IRUGO, NULL, &pagetypeinfo_file_ops);
99687- proc_create("vmstat", S_IRUGO, NULL, &proc_vmstat_file_operations);
99688- proc_create("zoneinfo", S_IRUGO, NULL, &proc_zoneinfo_file_operations);
99689+ {
99690+ mode_t gr_mode = S_IRUGO;
99691+#ifdef CONFIG_GRKERNSEC_PROC_ADD
99692+ gr_mode = S_IRUSR;
99693+#endif
99694+ proc_create("buddyinfo", gr_mode, NULL, &fragmentation_file_operations);
99695+ proc_create("pagetypeinfo", gr_mode, NULL, &pagetypeinfo_file_ops);
99696+#ifdef CONFIG_GRKERNSEC_PROC_USERGROUP
99697+ proc_create("vmstat", gr_mode | S_IRGRP, NULL, &proc_vmstat_file_operations);
99698+#else
99699+ proc_create("vmstat", gr_mode, NULL, &proc_vmstat_file_operations);
99700+#endif
99701+ proc_create("zoneinfo", gr_mode, NULL, &proc_zoneinfo_file_operations);
99702+ }
99703 #endif
99704 return 0;
99705 }
99706diff --git a/net/8021q/vlan.c b/net/8021q/vlan.c
99707index a29c5ab..6143f20 100644
99708--- a/net/8021q/vlan.c
99709+++ b/net/8021q/vlan.c
99710@@ -622,8 +622,7 @@ static int vlan_ioctl_handler(struct net *net, void __user *arg)
99711 err = -EPERM;
99712 if (!capable(CAP_NET_ADMIN))
99713 break;
99714- if ((args.u.name_type >= 0) &&
99715- (args.u.name_type < VLAN_NAME_TYPE_HIGHEST)) {
99716+ if (args.u.name_type < VLAN_NAME_TYPE_HIGHEST) {
99717 struct vlan_net *vn;
99718
99719 vn = net_generic(net, vlan_net_id);
99720diff --git a/net/9p/trans_fd.c b/net/9p/trans_fd.c
99721index a2d2984..f9eb711 100644
99722--- a/net/9p/trans_fd.c
99723+++ b/net/9p/trans_fd.c
99724@@ -419,7 +419,7 @@ static int p9_fd_write(struct p9_client *client, void *v, int len)
99725 oldfs = get_fs();
99726 set_fs(get_ds());
99727 /* The cast to a user pointer is valid due to the set_fs() */
99728- ret = vfs_write(ts->wr, (__force void __user *)v, len, &ts->wr->f_pos);
99729+ ret = vfs_write(ts->wr, (void __force_user *)v, len, &ts->wr->f_pos);
99730 set_fs(oldfs);
99731
99732 if (ret <= 0 && ret != -ERESTARTSYS && ret != -EAGAIN)
99733diff --git a/net/atm/atm_misc.c b/net/atm/atm_misc.c
99734index 02cc7e7..4514f1b 100644
99735--- a/net/atm/atm_misc.c
99736+++ b/net/atm/atm_misc.c
99737@@ -19,7 +19,7 @@ int atm_charge(struct atm_vcc *vcc,int truesize)
99738 if (atomic_read(&sk_atm(vcc)->sk_rmem_alloc) <= sk_atm(vcc)->sk_rcvbuf)
99739 return 1;
99740 atm_return(vcc,truesize);
99741- atomic_inc(&vcc->stats->rx_drop);
99742+ atomic_inc_unchecked(&vcc->stats->rx_drop);
99743 return 0;
99744 }
99745
99746@@ -41,7 +41,7 @@ struct sk_buff *atm_alloc_charge(struct atm_vcc *vcc,int pdu_size,
99747 }
99748 }
99749 atm_return(vcc,guess);
99750- atomic_inc(&vcc->stats->rx_drop);
99751+ atomic_inc_unchecked(&vcc->stats->rx_drop);
99752 return NULL;
99753 }
99754
99755@@ -88,7 +88,7 @@ int atm_pcr_goal(const struct atm_trafprm *tp)
99756
99757 void sonet_copy_stats(struct k_sonet_stats *from,struct sonet_stats *to)
99758 {
99759-#define __HANDLE_ITEM(i) to->i = atomic_read(&from->i)
99760+#define __HANDLE_ITEM(i) to->i = atomic_read_unchecked(&from->i)
99761 __SONET_ITEMS
99762 #undef __HANDLE_ITEM
99763 }
99764@@ -96,7 +96,7 @@ void sonet_copy_stats(struct k_sonet_stats *from,struct sonet_stats *to)
99765
99766 void sonet_subtract_stats(struct k_sonet_stats *from,struct sonet_stats *to)
99767 {
99768-#define __HANDLE_ITEM(i) atomic_sub(to->i,&from->i)
99769+#define __HANDLE_ITEM(i) atomic_sub_unchecked(to->i,&from->i)
99770 __SONET_ITEMS
99771 #undef __HANDLE_ITEM
99772 }
99773diff --git a/net/atm/lec.h b/net/atm/lec.h
99774index 9d14d19..5c145f3 100644
99775--- a/net/atm/lec.h
99776+++ b/net/atm/lec.h
99777@@ -48,7 +48,7 @@ struct lane2_ops {
99778 const u8 *tlvs, u32 sizeoftlvs);
99779 void (*associate_indicator) (struct net_device *dev, const u8 *mac_addr,
99780 const u8 *tlvs, u32 sizeoftlvs);
99781-};
99782+} __no_const;
99783
99784 /*
99785 * ATM LAN Emulation supports both LLC & Dix Ethernet EtherType
99786diff --git a/net/atm/mpc.h b/net/atm/mpc.h
99787index 0919a88..a23d54e 100644
99788--- a/net/atm/mpc.h
99789+++ b/net/atm/mpc.h
99790@@ -33,7 +33,7 @@ struct mpoa_client {
99791 struct mpc_parameters parameters; /* parameters for this client */
99792
99793 const struct net_device_ops *old_ops;
99794- struct net_device_ops new_ops;
99795+ net_device_ops_no_const new_ops;
99796 };
99797
99798
99799diff --git a/net/atm/mpoa_caches.c b/net/atm/mpoa_caches.c
99800index 4504a4b..1733f1e 100644
99801--- a/net/atm/mpoa_caches.c
99802+++ b/net/atm/mpoa_caches.c
99803@@ -498,6 +498,8 @@ static void clear_expired(struct mpoa_client *client)
99804 struct timeval now;
99805 struct k_message msg;
99806
99807+ pax_track_stack();
99808+
99809 do_gettimeofday(&now);
99810
99811 write_lock_irq(&client->egress_lock);
99812diff --git a/net/atm/proc.c b/net/atm/proc.c
99813index ab8419a..aa91497 100644
99814--- a/net/atm/proc.c
99815+++ b/net/atm/proc.c
99816@@ -43,9 +43,9 @@ static void add_stats(struct seq_file *seq, const char *aal,
99817 const struct k_atm_aal_stats *stats)
99818 {
99819 seq_printf(seq, "%s ( %d %d %d %d %d )", aal,
99820- atomic_read(&stats->tx),atomic_read(&stats->tx_err),
99821- atomic_read(&stats->rx),atomic_read(&stats->rx_err),
99822- atomic_read(&stats->rx_drop));
99823+ atomic_read_unchecked(&stats->tx),atomic_read_unchecked(&stats->tx_err),
99824+ atomic_read_unchecked(&stats->rx),atomic_read_unchecked(&stats->rx_err),
99825+ atomic_read_unchecked(&stats->rx_drop));
99826 }
99827
99828 static void atm_dev_info(struct seq_file *seq, const struct atm_dev *dev)
99829@@ -188,7 +188,12 @@ static void vcc_info(struct seq_file *seq, struct atm_vcc *vcc)
99830 {
99831 struct sock *sk = sk_atm(vcc);
99832
99833+#ifdef CONFIG_GRKERNSEC_HIDESYM
99834+ seq_printf(seq, "%p ", NULL);
99835+#else
99836 seq_printf(seq, "%p ", vcc);
99837+#endif
99838+
99839 if (!vcc->dev)
99840 seq_printf(seq, "Unassigned ");
99841 else
99842@@ -214,7 +219,11 @@ static void svc_info(struct seq_file *seq, struct atm_vcc *vcc)
99843 {
99844 if (!vcc->dev)
99845 seq_printf(seq, sizeof(void *) == 4 ?
99846+#ifdef CONFIG_GRKERNSEC_HIDESYM
99847+ "N/A@%p%10s" : "N/A@%p%2s", NULL, "");
99848+#else
99849 "N/A@%p%10s" : "N/A@%p%2s", vcc, "");
99850+#endif
99851 else
99852 seq_printf(seq, "%3d %3d %5d ",
99853 vcc->dev->number, vcc->vpi, vcc->vci);
99854diff --git a/net/atm/resources.c b/net/atm/resources.c
99855index 56b7322..c48b84e 100644
99856--- a/net/atm/resources.c
99857+++ b/net/atm/resources.c
99858@@ -161,7 +161,7 @@ void atm_dev_deregister(struct atm_dev *dev)
99859 static void copy_aal_stats(struct k_atm_aal_stats *from,
99860 struct atm_aal_stats *to)
99861 {
99862-#define __HANDLE_ITEM(i) to->i = atomic_read(&from->i)
99863+#define __HANDLE_ITEM(i) to->i = atomic_read_unchecked(&from->i)
99864 __AAL_STAT_ITEMS
99865 #undef __HANDLE_ITEM
99866 }
99867@@ -170,7 +170,7 @@ static void copy_aal_stats(struct k_atm_aal_stats *from,
99868 static void subtract_aal_stats(struct k_atm_aal_stats *from,
99869 struct atm_aal_stats *to)
99870 {
99871-#define __HANDLE_ITEM(i) atomic_sub(to->i, &from->i)
99872+#define __HANDLE_ITEM(i) atomic_sub_unchecked(to->i, &from->i)
99873 __AAL_STAT_ITEMS
99874 #undef __HANDLE_ITEM
99875 }
99876diff --git a/net/bridge/br_private.h b/net/bridge/br_private.h
99877index 8567d47..bba2292 100644
99878--- a/net/bridge/br_private.h
99879+++ b/net/bridge/br_private.h
99880@@ -255,7 +255,7 @@ extern void br_ifinfo_notify(int event, struct net_bridge_port *port);
99881
99882 #ifdef CONFIG_SYSFS
99883 /* br_sysfs_if.c */
99884-extern struct sysfs_ops brport_sysfs_ops;
99885+extern const struct sysfs_ops brport_sysfs_ops;
99886 extern int br_sysfs_addif(struct net_bridge_port *p);
99887
99888 /* br_sysfs_br.c */
99889diff --git a/net/bridge/br_stp_if.c b/net/bridge/br_stp_if.c
99890index 9a52ac5..c97538e 100644
99891--- a/net/bridge/br_stp_if.c
99892+++ b/net/bridge/br_stp_if.c
99893@@ -146,7 +146,7 @@ static void br_stp_stop(struct net_bridge *br)
99894 char *envp[] = { NULL };
99895
99896 if (br->stp_enabled == BR_USER_STP) {
99897- r = call_usermodehelper(BR_STP_PROG, argv, envp, 1);
99898+ r = call_usermodehelper(BR_STP_PROG, argv, envp, UMH_WAIT_PROC);
99899 printk(KERN_INFO "%s: userspace STP stopped, return code %d\n",
99900 br->dev->name, r);
99901
99902diff --git a/net/bridge/br_sysfs_if.c b/net/bridge/br_sysfs_if.c
99903index 820643a..ce77fb3 100644
99904--- a/net/bridge/br_sysfs_if.c
99905+++ b/net/bridge/br_sysfs_if.c
99906@@ -220,7 +220,7 @@ static ssize_t brport_store(struct kobject * kobj,
99907 return ret;
99908 }
99909
99910-struct sysfs_ops brport_sysfs_ops = {
99911+const struct sysfs_ops brport_sysfs_ops = {
99912 .show = brport_show,
99913 .store = brport_store,
99914 };
99915diff --git a/net/bridge/netfilter/ebtables.c b/net/bridge/netfilter/ebtables.c
99916index d73d47f..72df42a 100644
99917--- a/net/bridge/netfilter/ebtables.c
99918+++ b/net/bridge/netfilter/ebtables.c
99919@@ -1337,6 +1337,8 @@ static int copy_everything_to_user(struct ebt_table *t, void __user *user,
99920 unsigned int entries_size, nentries;
99921 char *entries;
99922
99923+ pax_track_stack();
99924+
99925 if (cmd == EBT_SO_GET_ENTRIES) {
99926 entries_size = t->private->entries_size;
99927 nentries = t->private->nentries;
99928diff --git a/net/can/bcm.c b/net/can/bcm.c
99929index 2ffd2e0..72a7486 100644
99930--- a/net/can/bcm.c
99931+++ b/net/can/bcm.c
99932@@ -164,9 +164,15 @@ static int bcm_proc_show(struct seq_file *m, void *v)
99933 struct bcm_sock *bo = bcm_sk(sk);
99934 struct bcm_op *op;
99935
99936+#ifdef CONFIG_GRKERNSEC_HIDESYM
99937+ seq_printf(m, ">>> socket %p", NULL);
99938+ seq_printf(m, " / sk %p", NULL);
99939+ seq_printf(m, " / bo %p", NULL);
99940+#else
99941 seq_printf(m, ">>> socket %p", sk->sk_socket);
99942 seq_printf(m, " / sk %p", sk);
99943 seq_printf(m, " / bo %p", bo);
99944+#endif
99945 seq_printf(m, " / dropped %lu", bo->dropped_usr_msgs);
99946 seq_printf(m, " / bound %s", bcm_proc_getifname(ifname, bo->ifindex));
99947 seq_printf(m, " <<<\n");
99948diff --git a/net/compat.c b/net/compat.c
99949index 9559afc..ccd74e1 100644
99950--- a/net/compat.c
99951+++ b/net/compat.c
99952@@ -69,9 +69,9 @@ int get_compat_msghdr(struct msghdr *kmsg, struct compat_msghdr __user *umsg)
99953 __get_user(kmsg->msg_controllen, &umsg->msg_controllen) ||
99954 __get_user(kmsg->msg_flags, &umsg->msg_flags))
99955 return -EFAULT;
99956- kmsg->msg_name = compat_ptr(tmp1);
99957- kmsg->msg_iov = compat_ptr(tmp2);
99958- kmsg->msg_control = compat_ptr(tmp3);
99959+ kmsg->msg_name = (void __force_kernel *)compat_ptr(tmp1);
99960+ kmsg->msg_iov = (void __force_kernel *)compat_ptr(tmp2);
99961+ kmsg->msg_control = (void __force_kernel *)compat_ptr(tmp3);
99962 return 0;
99963 }
99964
99965@@ -94,7 +94,7 @@ int verify_compat_iovec(struct msghdr *kern_msg, struct iovec *kern_iov,
99966 kern_msg->msg_name = NULL;
99967
99968 tot_len = iov_from_user_compat_to_kern(kern_iov,
99969- (struct compat_iovec __user *)kern_msg->msg_iov,
99970+ (struct compat_iovec __force_user *)kern_msg->msg_iov,
99971 kern_msg->msg_iovlen);
99972 if (tot_len >= 0)
99973 kern_msg->msg_iov = kern_iov;
99974@@ -114,20 +114,20 @@ int verify_compat_iovec(struct msghdr *kern_msg, struct iovec *kern_iov,
99975
99976 #define CMSG_COMPAT_FIRSTHDR(msg) \
99977 (((msg)->msg_controllen) >= sizeof(struct compat_cmsghdr) ? \
99978- (struct compat_cmsghdr __user *)((msg)->msg_control) : \
99979+ (struct compat_cmsghdr __force_user *)((msg)->msg_control) : \
99980 (struct compat_cmsghdr __user *)NULL)
99981
99982 #define CMSG_COMPAT_OK(ucmlen, ucmsg, mhdr) \
99983 ((ucmlen) >= sizeof(struct compat_cmsghdr) && \
99984 (ucmlen) <= (unsigned long) \
99985 ((mhdr)->msg_controllen - \
99986- ((char *)(ucmsg) - (char *)(mhdr)->msg_control)))
99987+ ((char __force_kernel *)(ucmsg) - (char *)(mhdr)->msg_control)))
99988
99989 static inline struct compat_cmsghdr __user *cmsg_compat_nxthdr(struct msghdr *msg,
99990 struct compat_cmsghdr __user *cmsg, int cmsg_len)
99991 {
99992 char __user *ptr = (char __user *)cmsg + CMSG_COMPAT_ALIGN(cmsg_len);
99993- if ((unsigned long)(ptr + 1 - (char __user *)msg->msg_control) >
99994+ if ((unsigned long)(ptr + 1 - (char __force_user *)msg->msg_control) >
99995 msg->msg_controllen)
99996 return NULL;
99997 return (struct compat_cmsghdr __user *)ptr;
99998@@ -219,7 +219,7 @@ int put_cmsg_compat(struct msghdr *kmsg, int level, int type, int len, void *dat
99999 {
100000 struct compat_timeval ctv;
100001 struct compat_timespec cts[3];
100002- struct compat_cmsghdr __user *cm = (struct compat_cmsghdr __user *) kmsg->msg_control;
100003+ struct compat_cmsghdr __user *cm = (struct compat_cmsghdr __force_user *) kmsg->msg_control;
100004 struct compat_cmsghdr cmhdr;
100005 int cmlen;
100006
100007@@ -271,7 +271,7 @@ int put_cmsg_compat(struct msghdr *kmsg, int level, int type, int len, void *dat
100008
100009 void scm_detach_fds_compat(struct msghdr *kmsg, struct scm_cookie *scm)
100010 {
100011- struct compat_cmsghdr __user *cm = (struct compat_cmsghdr __user *) kmsg->msg_control;
100012+ struct compat_cmsghdr __user *cm = (struct compat_cmsghdr __force_user *) kmsg->msg_control;
100013 int fdmax = (kmsg->msg_controllen - sizeof(struct compat_cmsghdr)) / sizeof(int);
100014 int fdnum = scm->fp->count;
100015 struct file **fp = scm->fp->fp;
100016@@ -433,7 +433,7 @@ static int do_get_sock_timeout(struct socket *sock, int level, int optname,
100017 len = sizeof(ktime);
100018 old_fs = get_fs();
100019 set_fs(KERNEL_DS);
100020- err = sock_getsockopt(sock, level, optname, (char *) &ktime, &len);
100021+ err = sock_getsockopt(sock, level, optname, (char __force_user *) &ktime, (int __force_user *)&len);
100022 set_fs(old_fs);
100023
100024 if (!err) {
100025@@ -570,7 +570,7 @@ int compat_mc_setsockopt(struct sock *sock, int level, int optname,
100026 case MCAST_JOIN_GROUP:
100027 case MCAST_LEAVE_GROUP:
100028 {
100029- struct compat_group_req __user *gr32 = (void *)optval;
100030+ struct compat_group_req __user *gr32 = (void __user *)optval;
100031 struct group_req __user *kgr =
100032 compat_alloc_user_space(sizeof(struct group_req));
100033 u32 interface;
100034@@ -591,7 +591,7 @@ int compat_mc_setsockopt(struct sock *sock, int level, int optname,
100035 case MCAST_BLOCK_SOURCE:
100036 case MCAST_UNBLOCK_SOURCE:
100037 {
100038- struct compat_group_source_req __user *gsr32 = (void *)optval;
100039+ struct compat_group_source_req __user *gsr32 = (void __user *)optval;
100040 struct group_source_req __user *kgsr = compat_alloc_user_space(
100041 sizeof(struct group_source_req));
100042 u32 interface;
100043@@ -612,7 +612,7 @@ int compat_mc_setsockopt(struct sock *sock, int level, int optname,
100044 }
100045 case MCAST_MSFILTER:
100046 {
100047- struct compat_group_filter __user *gf32 = (void *)optval;
100048+ struct compat_group_filter __user *gf32 = (void __user *)optval;
100049 struct group_filter __user *kgf;
100050 u32 interface, fmode, numsrc;
100051
100052diff --git a/net/core/dev.c b/net/core/dev.c
100053index 84a0705..575db4c 100644
100054--- a/net/core/dev.c
100055+++ b/net/core/dev.c
100056@@ -1047,10 +1047,14 @@ void dev_load(struct net *net, const char *name)
100057 if (no_module && capable(CAP_NET_ADMIN))
100058 no_module = request_module("netdev-%s", name);
100059 if (no_module && capable(CAP_SYS_MODULE)) {
100060+#ifdef CONFIG_GRKERNSEC_MODHARDEN
100061+ ___request_module(true, "grsec_modharden_netdev", "%s", name);
100062+#else
100063 if (!request_module("%s", name))
100064 pr_err("Loading kernel module for a network device "
100065 "with CAP_SYS_MODULE (deprecated). Use CAP_NET_ADMIN and alias netdev-%s "
100066 "instead\n", name);
100067+#endif
100068 }
100069 }
100070 EXPORT_SYMBOL(dev_load);
100071@@ -1654,7 +1658,7 @@ static inline int illegal_highdma(struct net_device *dev, struct sk_buff *skb)
100072
100073 struct dev_gso_cb {
100074 void (*destructor)(struct sk_buff *skb);
100075-};
100076+} __no_const;
100077
100078 #define DEV_GSO_CB(skb) ((struct dev_gso_cb *)(skb)->cb)
100079
100080@@ -2063,7 +2067,7 @@ int netif_rx_ni(struct sk_buff *skb)
100081 }
100082 EXPORT_SYMBOL(netif_rx_ni);
100083
100084-static void net_tx_action(struct softirq_action *h)
100085+static void net_tx_action(void)
100086 {
100087 struct softnet_data *sd = &__get_cpu_var(softnet_data);
100088
100089@@ -2827,7 +2831,7 @@ void netif_napi_del(struct napi_struct *napi)
100090 EXPORT_SYMBOL(netif_napi_del);
100091
100092
100093-static void net_rx_action(struct softirq_action *h)
100094+static void net_rx_action(void)
100095 {
100096 struct list_head *list = &__get_cpu_var(softnet_data).poll_list;
100097 unsigned long time_limit = jiffies + 2;
100098diff --git a/net/core/flow.c b/net/core/flow.c
100099index 9601587..8c4824e 100644
100100--- a/net/core/flow.c
100101+++ b/net/core/flow.c
100102@@ -35,11 +35,11 @@ struct flow_cache_entry {
100103 atomic_t *object_ref;
100104 };
100105
100106-atomic_t flow_cache_genid = ATOMIC_INIT(0);
100107+atomic_unchecked_t flow_cache_genid = ATOMIC_INIT(0);
100108
100109 static u32 flow_hash_shift;
100110 #define flow_hash_size (1 << flow_hash_shift)
100111-static DEFINE_PER_CPU(struct flow_cache_entry **, flow_tables) = { NULL };
100112+static DEFINE_PER_CPU(struct flow_cache_entry **, flow_tables);
100113
100114 #define flow_table(cpu) (per_cpu(flow_tables, cpu))
100115
100116@@ -52,7 +52,7 @@ struct flow_percpu_info {
100117 u32 hash_rnd;
100118 int count;
100119 };
100120-static DEFINE_PER_CPU(struct flow_percpu_info, flow_hash_info) = { 0 };
100121+static DEFINE_PER_CPU(struct flow_percpu_info, flow_hash_info);
100122
100123 #define flow_hash_rnd_recalc(cpu) \
100124 (per_cpu(flow_hash_info, cpu).hash_rnd_recalc)
100125@@ -69,7 +69,7 @@ struct flow_flush_info {
100126 atomic_t cpuleft;
100127 struct completion completion;
100128 };
100129-static DEFINE_PER_CPU(struct tasklet_struct, flow_flush_tasklets) = { NULL };
100130+static DEFINE_PER_CPU(struct tasklet_struct, flow_flush_tasklets);
100131
100132 #define flow_flush_tasklet(cpu) (&per_cpu(flow_flush_tasklets, cpu))
100133
100134@@ -190,7 +190,7 @@ void *flow_cache_lookup(struct net *net, struct flowi *key, u16 family, u8 dir,
100135 if (fle->family == family &&
100136 fle->dir == dir &&
100137 flow_key_compare(key, &fle->key) == 0) {
100138- if (fle->genid == atomic_read(&flow_cache_genid)) {
100139+ if (fle->genid == atomic_read_unchecked(&flow_cache_genid)) {
100140 void *ret = fle->object;
100141
100142 if (ret)
100143@@ -228,7 +228,7 @@ nocache:
100144 err = resolver(net, key, family, dir, &obj, &obj_ref);
100145
100146 if (fle && !err) {
100147- fle->genid = atomic_read(&flow_cache_genid);
100148+ fle->genid = atomic_read_unchecked(&flow_cache_genid);
100149
100150 if (fle->object)
100151 atomic_dec(fle->object_ref);
100152@@ -258,7 +258,7 @@ static void flow_cache_flush_tasklet(unsigned long data)
100153
100154 fle = flow_table(cpu)[i];
100155 for (; fle; fle = fle->next) {
100156- unsigned genid = atomic_read(&flow_cache_genid);
100157+ unsigned genid = atomic_read_unchecked(&flow_cache_genid);
100158
100159 if (!fle->object || fle->genid == genid)
100160 continue;
100161diff --git a/net/core/rtnetlink.c b/net/core/rtnetlink.c
100162index d4fd895..ac9b1e6 100644
100163--- a/net/core/rtnetlink.c
100164+++ b/net/core/rtnetlink.c
100165@@ -57,7 +57,7 @@ struct rtnl_link
100166 {
100167 rtnl_doit_func doit;
100168 rtnl_dumpit_func dumpit;
100169-};
100170+} __no_const;
100171
100172 static DEFINE_MUTEX(rtnl_mutex);
100173
100174diff --git a/net/core/scm.c b/net/core/scm.c
100175index d98eafc..1a190a9 100644
100176--- a/net/core/scm.c
100177+++ b/net/core/scm.c
100178@@ -191,7 +191,7 @@ error:
100179 int put_cmsg(struct msghdr * msg, int level, int type, int len, void *data)
100180 {
100181 struct cmsghdr __user *cm
100182- = (__force struct cmsghdr __user *)msg->msg_control;
100183+ = (struct cmsghdr __force_user *)msg->msg_control;
100184 struct cmsghdr cmhdr;
100185 int cmlen = CMSG_LEN(len);
100186 int err;
100187@@ -214,7 +214,7 @@ int put_cmsg(struct msghdr * msg, int level, int type, int len, void *data)
100188 err = -EFAULT;
100189 if (copy_to_user(cm, &cmhdr, sizeof cmhdr))
100190 goto out;
100191- if (copy_to_user(CMSG_DATA(cm), data, cmlen - sizeof(struct cmsghdr)))
100192+ if (copy_to_user((void __force_user *)CMSG_DATA((void __force_kernel *)cm), data, cmlen - sizeof(struct cmsghdr)))
100193 goto out;
100194 cmlen = CMSG_SPACE(len);
100195 if (msg->msg_controllen < cmlen)
100196@@ -229,7 +229,7 @@ out:
100197 void scm_detach_fds(struct msghdr *msg, struct scm_cookie *scm)
100198 {
100199 struct cmsghdr __user *cm
100200- = (__force struct cmsghdr __user*)msg->msg_control;
100201+ = (struct cmsghdr __force_user *)msg->msg_control;
100202
100203 int fdmax = 0;
100204 int fdnum = scm->fp->count;
100205@@ -249,7 +249,7 @@ void scm_detach_fds(struct msghdr *msg, struct scm_cookie *scm)
100206 if (fdnum < fdmax)
100207 fdmax = fdnum;
100208
100209- for (i=0, cmfptr=(__force int __user *)CMSG_DATA(cm); i<fdmax;
100210+ for (i=0, cmfptr=(int __force_user *)CMSG_DATA((void __force_kernel *)cm); i<fdmax;
100211 i++, cmfptr++)
100212 {
100213 int new_fd;
100214diff --git a/net/core/secure_seq.c b/net/core/secure_seq.c
100215index 45329d7..626aaa6 100644
100216--- a/net/core/secure_seq.c
100217+++ b/net/core/secure_seq.c
100218@@ -57,7 +57,7 @@ __u32 secure_tcpv6_sequence_number(__be32 *saddr, __be32 *daddr,
100219 EXPORT_SYMBOL(secure_tcpv6_sequence_number);
100220
100221 u32 secure_ipv6_port_ephemeral(const __be32 *saddr, const __be32 *daddr,
100222- __be16 dport)
100223+ __be16 dport)
100224 {
100225 u32 secret[MD5_MESSAGE_BYTES / 4];
100226 u32 hash[MD5_DIGEST_WORDS];
100227@@ -71,7 +71,6 @@ u32 secure_ipv6_port_ephemeral(const __be32 *saddr, const __be32 *daddr,
100228 secret[i] = net_secret[i];
100229
100230 md5_transform(hash, secret);
100231-
100232 return hash[0];
100233 }
100234 #endif
100235diff --git a/net/core/skbuff.c b/net/core/skbuff.c
100236index 025f924..70a71c4 100644
100237--- a/net/core/skbuff.c
100238+++ b/net/core/skbuff.c
100239@@ -1544,6 +1544,8 @@ int skb_splice_bits(struct sk_buff *skb, unsigned int offset,
100240 struct sk_buff *frag_iter;
100241 struct sock *sk = skb->sk;
100242
100243+ pax_track_stack();
100244+
100245 /*
100246 * __skb_splice_bits() only fails if the output has no room left,
100247 * so no point in going over the frag_list for the error case.
100248diff --git a/net/core/sock.c b/net/core/sock.c
100249index 6605e75..3acebda 100644
100250--- a/net/core/sock.c
100251+++ b/net/core/sock.c
100252@@ -864,11 +864,15 @@ int sock_getsockopt(struct socket *sock, int level, int optname,
100253 break;
100254
100255 case SO_PEERCRED:
100256+ {
100257+ struct ucred peercred;
100258 if (len > sizeof(sk->sk_peercred))
100259 len = sizeof(sk->sk_peercred);
100260- if (copy_to_user(optval, &sk->sk_peercred, len))
100261+ peercred = sk->sk_peercred;
100262+ if (copy_to_user(optval, &peercred, len))
100263 return -EFAULT;
100264 goto lenout;
100265+ }
100266
100267 case SO_PEERNAME:
100268 {
100269@@ -1892,7 +1896,7 @@ void sock_init_data(struct socket *sock, struct sock *sk)
100270 */
100271 smp_wmb();
100272 atomic_set(&sk->sk_refcnt, 1);
100273- atomic_set(&sk->sk_drops, 0);
100274+ atomic_set_unchecked(&sk->sk_drops, 0);
100275 }
100276 EXPORT_SYMBOL(sock_init_data);
100277
100278diff --git a/net/decnet/sysctl_net_decnet.c b/net/decnet/sysctl_net_decnet.c
100279index 2036568..c55883d 100644
100280--- a/net/decnet/sysctl_net_decnet.c
100281+++ b/net/decnet/sysctl_net_decnet.c
100282@@ -206,7 +206,7 @@ static int dn_node_address_handler(ctl_table *table, int write,
100283
100284 if (len > *lenp) len = *lenp;
100285
100286- if (copy_to_user(buffer, addr, len))
100287+ if (len > sizeof addr || copy_to_user(buffer, addr, len))
100288 return -EFAULT;
100289
100290 *lenp = len;
100291@@ -327,7 +327,7 @@ static int dn_def_dev_handler(ctl_table *table, int write,
100292
100293 if (len > *lenp) len = *lenp;
100294
100295- if (copy_to_user(buffer, devname, len))
100296+ if (len > sizeof devname || copy_to_user(buffer, devname, len))
100297 return -EFAULT;
100298
100299 *lenp = len;
100300diff --git a/net/econet/Kconfig b/net/econet/Kconfig
100301index 39a2d29..f39c0fe 100644
100302--- a/net/econet/Kconfig
100303+++ b/net/econet/Kconfig
100304@@ -4,7 +4,7 @@
100305
100306 config ECONET
100307 tristate "Acorn Econet/AUN protocols (EXPERIMENTAL)"
100308- depends on EXPERIMENTAL && INET
100309+ depends on EXPERIMENTAL && INET && BROKEN
100310 ---help---
100311 Econet is a fairly old and slow networking protocol mainly used by
100312 Acorn computers to access file and print servers. It uses native
100313diff --git a/net/ieee802154/dgram.c b/net/ieee802154/dgram.c
100314index a413b1b..380849c 100644
100315--- a/net/ieee802154/dgram.c
100316+++ b/net/ieee802154/dgram.c
100317@@ -318,7 +318,7 @@ out:
100318 static int dgram_rcv_skb(struct sock *sk, struct sk_buff *skb)
100319 {
100320 if (sock_queue_rcv_skb(sk, skb) < 0) {
100321- atomic_inc(&sk->sk_drops);
100322+ atomic_inc_unchecked(&sk->sk_drops);
100323 kfree_skb(skb);
100324 return NET_RX_DROP;
100325 }
100326diff --git a/net/ieee802154/raw.c b/net/ieee802154/raw.c
100327index 30e74ee..bfc6ee0 100644
100328--- a/net/ieee802154/raw.c
100329+++ b/net/ieee802154/raw.c
100330@@ -206,7 +206,7 @@ out:
100331 static int raw_rcv_skb(struct sock *sk, struct sk_buff *skb)
100332 {
100333 if (sock_queue_rcv_skb(sk, skb) < 0) {
100334- atomic_inc(&sk->sk_drops);
100335+ atomic_inc_unchecked(&sk->sk_drops);
100336 kfree_skb(skb);
100337 return NET_RX_DROP;
100338 }
100339diff --git a/net/ipv4/inet_diag.c b/net/ipv4/inet_diag.c
100340index dba56d2..acee5d6 100644
100341--- a/net/ipv4/inet_diag.c
100342+++ b/net/ipv4/inet_diag.c
100343@@ -113,8 +113,13 @@ static int inet_csk_diag_fill(struct sock *sk,
100344 r->idiag_retrans = 0;
100345
100346 r->id.idiag_if = sk->sk_bound_dev_if;
100347+#ifdef CONFIG_GRKERNSEC_HIDESYM
100348+ r->id.idiag_cookie[0] = 0;
100349+ r->id.idiag_cookie[1] = 0;
100350+#else
100351 r->id.idiag_cookie[0] = (u32)(unsigned long)sk;
100352 r->id.idiag_cookie[1] = (u32)(((unsigned long)sk >> 31) >> 1);
100353+#endif
100354
100355 r->id.idiag_sport = inet->sport;
100356 r->id.idiag_dport = inet->dport;
100357@@ -200,8 +205,15 @@ static int inet_twsk_diag_fill(struct inet_timewait_sock *tw,
100358 r->idiag_family = tw->tw_family;
100359 r->idiag_retrans = 0;
100360 r->id.idiag_if = tw->tw_bound_dev_if;
100361+
100362+#ifdef CONFIG_GRKERNSEC_HIDESYM
100363+ r->id.idiag_cookie[0] = 0;
100364+ r->id.idiag_cookie[1] = 0;
100365+#else
100366 r->id.idiag_cookie[0] = (u32)(unsigned long)tw;
100367 r->id.idiag_cookie[1] = (u32)(((unsigned long)tw >> 31) >> 1);
100368+#endif
100369+
100370 r->id.idiag_sport = tw->tw_sport;
100371 r->id.idiag_dport = tw->tw_dport;
100372 r->id.idiag_src[0] = tw->tw_rcv_saddr;
100373@@ -284,12 +296,14 @@ static int inet_diag_get_exact(struct sk_buff *in_skb,
100374 if (sk == NULL)
100375 goto unlock;
100376
100377+#ifndef CONFIG_GRKERNSEC_HIDESYM
100378 err = -ESTALE;
100379 if ((req->id.idiag_cookie[0] != INET_DIAG_NOCOOKIE ||
100380 req->id.idiag_cookie[1] != INET_DIAG_NOCOOKIE) &&
100381 ((u32)(unsigned long)sk != req->id.idiag_cookie[0] ||
100382 (u32)((((unsigned long)sk) >> 31) >> 1) != req->id.idiag_cookie[1]))
100383 goto out;
100384+#endif
100385
100386 err = -ENOMEM;
100387 rep = alloc_skb(NLMSG_SPACE((sizeof(struct inet_diag_msg) +
100388@@ -579,8 +593,14 @@ static int inet_diag_fill_req(struct sk_buff *skb, struct sock *sk,
100389 r->idiag_retrans = req->retrans;
100390
100391 r->id.idiag_if = sk->sk_bound_dev_if;
100392+
100393+#ifdef CONFIG_GRKERNSEC_HIDESYM
100394+ r->id.idiag_cookie[0] = 0;
100395+ r->id.idiag_cookie[1] = 0;
100396+#else
100397 r->id.idiag_cookie[0] = (u32)(unsigned long)req;
100398 r->id.idiag_cookie[1] = (u32)(((unsigned long)req >> 31) >> 1);
100399+#endif
100400
100401 tmo = req->expires - jiffies;
100402 if (tmo < 0)
100403diff --git a/net/ipv4/inet_hashtables.c b/net/ipv4/inet_hashtables.c
100404index d717267..56de7e7 100644
100405--- a/net/ipv4/inet_hashtables.c
100406+++ b/net/ipv4/inet_hashtables.c
100407@@ -18,12 +18,15 @@
100408 #include <linux/sched.h>
100409 #include <linux/slab.h>
100410 #include <linux/wait.h>
100411+#include <linux/security.h>
100412
100413 #include <net/inet_connection_sock.h>
100414 #include <net/inet_hashtables.h>
100415 #include <net/secure_seq.h>
100416 #include <net/ip.h>
100417
100418+extern void gr_update_task_in_ip_table(struct task_struct *task, const struct inet_sock *inet);
100419+
100420 /*
100421 * Allocate and initialize a new local port bind bucket.
100422 * The bindhash mutex for snum's hash chain must be held here.
100423@@ -491,6 +494,8 @@ ok:
100424 }
100425 spin_unlock(&head->lock);
100426
100427+ gr_update_task_in_ip_table(current, inet_sk(sk));
100428+
100429 if (tw) {
100430 inet_twsk_deschedule(tw, death_row);
100431 inet_twsk_put(tw);
100432diff --git a/net/ipv4/inetpeer.c b/net/ipv4/inetpeer.c
100433index 13b229f..6956484 100644
100434--- a/net/ipv4/inetpeer.c
100435+++ b/net/ipv4/inetpeer.c
100436@@ -367,6 +367,8 @@ struct inet_peer *inet_getpeer(__be32 daddr, int create)
100437 struct inet_peer *p, *n;
100438 struct inet_peer **stack[PEER_MAXDEPTH], ***stackptr;
100439
100440+ pax_track_stack();
100441+
100442 /* Look up for the address quickly. */
100443 read_lock_bh(&peer_pool_lock);
100444 p = lookup(daddr, NULL);
100445@@ -390,7 +392,7 @@ struct inet_peer *inet_getpeer(__be32 daddr, int create)
100446 return NULL;
100447 n->v4daddr = daddr;
100448 atomic_set(&n->refcnt, 1);
100449- atomic_set(&n->rid, 0);
100450+ atomic_set_unchecked(&n->rid, 0);
100451 n->ip_id_count = secure_ip_id(daddr);
100452 n->tcp_ts_stamp = 0;
100453
100454diff --git a/net/ipv4/ip_fragment.c b/net/ipv4/ip_fragment.c
100455index d3fe10b..feeafc9 100644
100456--- a/net/ipv4/ip_fragment.c
100457+++ b/net/ipv4/ip_fragment.c
100458@@ -255,7 +255,7 @@ static inline int ip_frag_too_far(struct ipq *qp)
100459 return 0;
100460
100461 start = qp->rid;
100462- end = atomic_inc_return(&peer->rid);
100463+ end = atomic_inc_return_unchecked(&peer->rid);
100464 qp->rid = end;
100465
100466 rc = qp->q.fragments && (end - start) > max;
100467diff --git a/net/ipv4/ip_sockglue.c b/net/ipv4/ip_sockglue.c
100468index e982b5c..f079d75 100644
100469--- a/net/ipv4/ip_sockglue.c
100470+++ b/net/ipv4/ip_sockglue.c
100471@@ -1015,6 +1015,8 @@ static int do_ip_getsockopt(struct sock *sk, int level, int optname,
100472 int val;
100473 int len;
100474
100475+ pax_track_stack();
100476+
100477 if (level != SOL_IP)
100478 return -EOPNOTSUPP;
100479
100480@@ -1173,7 +1175,7 @@ static int do_ip_getsockopt(struct sock *sk, int level, int optname,
100481 if (sk->sk_type != SOCK_STREAM)
100482 return -ENOPROTOOPT;
100483
100484- msg.msg_control = optval;
100485+ msg.msg_control = (void __force_kernel *)optval;
100486 msg.msg_controllen = len;
100487 msg.msg_flags = 0;
100488
100489diff --git a/net/ipv4/ipconfig.c b/net/ipv4/ipconfig.c
100490index f8d04c2..c1188f2 100644
100491--- a/net/ipv4/ipconfig.c
100492+++ b/net/ipv4/ipconfig.c
100493@@ -295,7 +295,7 @@ static int __init ic_devinet_ioctl(unsigned int cmd, struct ifreq *arg)
100494
100495 mm_segment_t oldfs = get_fs();
100496 set_fs(get_ds());
100497- res = devinet_ioctl(&init_net, cmd, (struct ifreq __user *) arg);
100498+ res = devinet_ioctl(&init_net, cmd, (struct ifreq __force_user *) arg);
100499 set_fs(oldfs);
100500 return res;
100501 }
100502@@ -306,7 +306,7 @@ static int __init ic_dev_ioctl(unsigned int cmd, struct ifreq *arg)
100503
100504 mm_segment_t oldfs = get_fs();
100505 set_fs(get_ds());
100506- res = dev_ioctl(&init_net, cmd, (struct ifreq __user *) arg);
100507+ res = dev_ioctl(&init_net, cmd, (struct ifreq __force_user *) arg);
100508 set_fs(oldfs);
100509 return res;
100510 }
100511@@ -317,7 +317,7 @@ static int __init ic_route_ioctl(unsigned int cmd, struct rtentry *arg)
100512
100513 mm_segment_t oldfs = get_fs();
100514 set_fs(get_ds());
100515- res = ip_rt_ioctl(&init_net, cmd, (void __user *) arg);
100516+ res = ip_rt_ioctl(&init_net, cmd, (void __force_user *) arg);
100517 set_fs(oldfs);
100518 return res;
100519 }
100520diff --git a/net/ipv4/netfilter/arp_tables.c b/net/ipv4/netfilter/arp_tables.c
100521index c8b0cc3..05e4007 100644
100522--- a/net/ipv4/netfilter/arp_tables.c
100523+++ b/net/ipv4/netfilter/arp_tables.c
100524@@ -934,6 +934,7 @@ static int get_info(struct net *net, void __user *user, int *len, int compat)
100525 private = &tmp;
100526 }
100527 #endif
100528+ memset(&info, 0, sizeof(info));
100529 info.valid_hooks = t->valid_hooks;
100530 memcpy(info.hook_entry, private->hook_entry,
100531 sizeof(info.hook_entry));
100532@@ -1003,6 +1004,11 @@ static int __do_replace(struct net *net, const char *name,
100533 unsigned int valid_hooks,
100534 struct xt_table_info *newinfo,
100535 unsigned int num_counters,
100536+ void __user *counters_ptr) __size_overflow(5);
100537+static int __do_replace(struct net *net, const char *name,
100538+ unsigned int valid_hooks,
100539+ struct xt_table_info *newinfo,
100540+ unsigned int num_counters,
100541 void __user *counters_ptr)
100542 {
100543 int ret;
100544@@ -1135,6 +1141,8 @@ add_counter_to_entry(struct arpt_entry *e,
100545 }
100546
100547 static int do_add_counters(struct net *net, void __user *user, unsigned int len,
100548+ int compat) __size_overflow(3);
100549+static int do_add_counters(struct net *net, void __user *user, unsigned int len,
100550 int compat)
100551 {
100552 unsigned int i, curcpu;
100553diff --git a/net/ipv4/netfilter/ip_queue.c b/net/ipv4/netfilter/ip_queue.c
100554index c156db2..e772975 100644
100555--- a/net/ipv4/netfilter/ip_queue.c
100556+++ b/net/ipv4/netfilter/ip_queue.c
100557@@ -286,6 +286,9 @@ ipq_mangle_ipv4(ipq_verdict_msg_t *v, struct nf_queue_entry *e)
100558
100559 if (v->data_len < sizeof(*user_iph))
100560 return 0;
100561+ if (v->data_len > 65535)
100562+ return -EMSGSIZE;
100563+
100564 diff = v->data_len - e->skb->len;
100565 if (diff < 0) {
100566 if (pskb_trim(e->skb, v->data_len))
100567@@ -409,7 +412,8 @@ ipq_dev_drop(int ifindex)
100568 static inline void
100569 __ipq_rcv_skb(struct sk_buff *skb)
100570 {
100571- int status, type, pid, flags, nlmsglen, skblen;
100572+ int status, type, pid, flags;
100573+ unsigned int nlmsglen, skblen;
100574 struct nlmsghdr *nlh;
100575
100576 skblen = skb->len;
100577diff --git a/net/ipv4/netfilter/ip_tables.c b/net/ipv4/netfilter/ip_tables.c
100578index 0606db1..918b88a 100644
100579--- a/net/ipv4/netfilter/ip_tables.c
100580+++ b/net/ipv4/netfilter/ip_tables.c
100581@@ -1141,6 +1141,7 @@ static int get_info(struct net *net, void __user *user, int *len, int compat)
100582 private = &tmp;
100583 }
100584 #endif
100585+ memset(&info, 0, sizeof(info));
100586 info.valid_hooks = t->valid_hooks;
100587 memcpy(info.hook_entry, private->hook_entry,
100588 sizeof(info.hook_entry));
100589@@ -1208,6 +1209,10 @@ get_entries(struct net *net, struct ipt_get_entries __user *uptr, int *len)
100590 static int
100591 __do_replace(struct net *net, const char *name, unsigned int valid_hooks,
100592 struct xt_table_info *newinfo, unsigned int num_counters,
100593+ void __user *counters_ptr) __size_overflow(5);
100594+static int
100595+__do_replace(struct net *net, const char *name, unsigned int valid_hooks,
100596+ struct xt_table_info *newinfo, unsigned int num_counters,
100597 void __user *counters_ptr)
100598 {
100599 int ret;
100600@@ -1339,6 +1344,8 @@ add_counter_to_entry(struct ipt_entry *e,
100601 }
100602
100603 static int
100604+do_add_counters(struct net *net, void __user *user, unsigned int len, int compat) __size_overflow(3);
100605+static int
100606 do_add_counters(struct net *net, void __user *user, unsigned int len, int compat)
100607 {
100608 unsigned int i, curcpu;
100609diff --git a/net/ipv4/netfilter/nf_nat_snmp_basic.c b/net/ipv4/netfilter/nf_nat_snmp_basic.c
100610index d9521f6..127fa44 100644
100611--- a/net/ipv4/netfilter/nf_nat_snmp_basic.c
100612+++ b/net/ipv4/netfilter/nf_nat_snmp_basic.c
100613@@ -436,6 +436,10 @@ static unsigned char asn1_subid_decode(struct asn1_ctx *ctx,
100614 static unsigned char asn1_oid_decode(struct asn1_ctx *ctx,
100615 unsigned char *eoc,
100616 unsigned long **oid,
100617+ unsigned int *len) __size_overflow(2);
100618+static unsigned char asn1_oid_decode(struct asn1_ctx *ctx,
100619+ unsigned char *eoc,
100620+ unsigned long **oid,
100621 unsigned int *len)
100622 {
100623 unsigned long subid;
100624diff --git a/net/ipv4/raw.c b/net/ipv4/raw.c
100625index ab996f9..3da5f96 100644
100626--- a/net/ipv4/raw.c
100627+++ b/net/ipv4/raw.c
100628@@ -292,7 +292,7 @@ static int raw_rcv_skb(struct sock * sk, struct sk_buff * skb)
100629 /* Charge it to the socket. */
100630
100631 if (sock_queue_rcv_skb(sk, skb) < 0) {
100632- atomic_inc(&sk->sk_drops);
100633+ atomic_inc_unchecked(&sk->sk_drops);
100634 kfree_skb(skb);
100635 return NET_RX_DROP;
100636 }
100637@@ -303,7 +303,7 @@ static int raw_rcv_skb(struct sock * sk, struct sk_buff * skb)
100638 int raw_rcv(struct sock *sk, struct sk_buff *skb)
100639 {
100640 if (!xfrm4_policy_check(sk, XFRM_POLICY_IN, skb)) {
100641- atomic_inc(&sk->sk_drops);
100642+ atomic_inc_unchecked(&sk->sk_drops);
100643 kfree_skb(skb);
100644 return NET_RX_DROP;
100645 }
100646@@ -724,16 +724,23 @@ static int raw_init(struct sock *sk)
100647
100648 static int raw_seticmpfilter(struct sock *sk, char __user *optval, int optlen)
100649 {
100650+ struct icmp_filter filter;
100651+
100652+ if (optlen < 0)
100653+ return -EINVAL;
100654 if (optlen > sizeof(struct icmp_filter))
100655 optlen = sizeof(struct icmp_filter);
100656- if (copy_from_user(&raw_sk(sk)->filter, optval, optlen))
100657+ if (copy_from_user(&filter, optval, optlen))
100658 return -EFAULT;
100659+ raw_sk(sk)->filter = filter;
100660+
100661 return 0;
100662 }
100663
100664 static int raw_geticmpfilter(struct sock *sk, char __user *optval, int __user *optlen)
100665 {
100666 int len, ret = -EFAULT;
100667+ struct icmp_filter filter;
100668
100669 if (get_user(len, optlen))
100670 goto out;
100671@@ -743,8 +750,9 @@ static int raw_geticmpfilter(struct sock *sk, char __user *optval, int __user *o
100672 if (len > sizeof(struct icmp_filter))
100673 len = sizeof(struct icmp_filter);
100674 ret = -EFAULT;
100675- if (put_user(len, optlen) ||
100676- copy_to_user(optval, &raw_sk(sk)->filter, len))
100677+ filter = raw_sk(sk)->filter;
100678+ if (put_user(len, optlen) || len > sizeof filter ||
100679+ copy_to_user(optval, &filter, len))
100680 goto out;
100681 ret = 0;
100682 out: return ret;
100683@@ -954,7 +962,13 @@ static void raw_sock_seq_show(struct seq_file *seq, struct sock *sp, int i)
100684 sk_wmem_alloc_get(sp),
100685 sk_rmem_alloc_get(sp),
100686 0, 0L, 0, sock_i_uid(sp), 0, sock_i_ino(sp),
100687- atomic_read(&sp->sk_refcnt), sp, atomic_read(&sp->sk_drops));
100688+ atomic_read(&sp->sk_refcnt),
100689+#ifdef CONFIG_GRKERNSEC_HIDESYM
100690+ NULL,
100691+#else
100692+ sp,
100693+#endif
100694+ atomic_read_unchecked(&sp->sk_drops));
100695 }
100696
100697 static int raw_seq_show(struct seq_file *seq, void *v)
100698diff --git a/net/ipv4/route.c b/net/ipv4/route.c
100699index 58f141b..b759702 100644
100700--- a/net/ipv4/route.c
100701+++ b/net/ipv4/route.c
100702@@ -269,7 +269,7 @@ static inline unsigned int rt_hash(__be32 daddr, __be32 saddr, int idx,
100703
100704 static inline int rt_genid(struct net *net)
100705 {
100706- return atomic_read(&net->ipv4.rt_genid);
100707+ return atomic_read_unchecked(&net->ipv4.rt_genid);
100708 }
100709
100710 #ifdef CONFIG_PROC_FS
100711@@ -889,7 +889,7 @@ static void rt_cache_invalidate(struct net *net)
100712 unsigned char shuffle;
100713
100714 get_random_bytes(&shuffle, sizeof(shuffle));
100715- atomic_add(shuffle + 1U, &net->ipv4.rt_genid);
100716+ atomic_add_unchecked(shuffle + 1U, &net->ipv4.rt_genid);
100717 }
100718
100719 /*
100720@@ -3357,7 +3357,7 @@ static __net_initdata struct pernet_operations sysctl_route_ops = {
100721
100722 static __net_init int rt_secret_timer_init(struct net *net)
100723 {
100724- atomic_set(&net->ipv4.rt_genid,
100725+ atomic_set_unchecked(&net->ipv4.rt_genid,
100726 (int) ((num_physpages ^ (num_physpages>>8)) ^
100727 (jiffies ^ (jiffies >> 7))));
100728
100729diff --git a/net/ipv4/tcp.c b/net/ipv4/tcp.c
100730index f095659..adc892a 100644
100731--- a/net/ipv4/tcp.c
100732+++ b/net/ipv4/tcp.c
100733@@ -2085,6 +2085,8 @@ static int do_tcp_setsockopt(struct sock *sk, int level,
100734 int val;
100735 int err = 0;
100736
100737+ pax_track_stack();
100738+
100739 /* This is a string value all the others are int's */
100740 if (optname == TCP_CONGESTION) {
100741 char name[TCP_CA_NAME_MAX];
100742@@ -2355,6 +2357,8 @@ static int do_tcp_getsockopt(struct sock *sk, int level,
100743 struct tcp_sock *tp = tcp_sk(sk);
100744 int val, len;
100745
100746+ pax_track_stack();
100747+
100748 if (get_user(len, optlen))
100749 return -EFAULT;
100750
100751diff --git a/net/ipv4/tcp_ipv4.c b/net/ipv4/tcp_ipv4.c
100752index 6fc7961..33bad4a 100644
100753--- a/net/ipv4/tcp_ipv4.c
100754+++ b/net/ipv4/tcp_ipv4.c
100755@@ -85,6 +85,9 @@
100756 int sysctl_tcp_tw_reuse __read_mostly;
100757 int sysctl_tcp_low_latency __read_mostly;
100758
100759+#ifdef CONFIG_GRKERNSEC_BLACKHOLE
100760+extern int grsec_enable_blackhole;
100761+#endif
100762
100763 #ifdef CONFIG_TCP_MD5SIG
100764 static struct tcp_md5sig_key *tcp_v4_md5_do_lookup(struct sock *sk,
100765@@ -1543,6 +1546,9 @@ int tcp_v4_do_rcv(struct sock *sk, struct sk_buff *skb)
100766 return 0;
100767
100768 reset:
100769+#ifdef CONFIG_GRKERNSEC_BLACKHOLE
100770+ if (!grsec_enable_blackhole)
100771+#endif
100772 tcp_v4_send_reset(rsk, skb);
100773 discard:
100774 kfree_skb(skb);
100775@@ -1604,12 +1610,20 @@ int tcp_v4_rcv(struct sk_buff *skb)
100776 TCP_SKB_CB(skb)->sacked = 0;
100777
100778 sk = __inet_lookup_skb(&tcp_hashinfo, skb, th->source, th->dest);
100779- if (!sk)
100780+ if (!sk) {
100781+#ifdef CONFIG_GRKERNSEC_BLACKHOLE
100782+ ret = 1;
100783+#endif
100784 goto no_tcp_socket;
100785+ }
100786
100787 process:
100788- if (sk->sk_state == TCP_TIME_WAIT)
100789+ if (sk->sk_state == TCP_TIME_WAIT) {
100790+#ifdef CONFIG_GRKERNSEC_BLACKHOLE
100791+ ret = 2;
100792+#endif
100793 goto do_time_wait;
100794+ }
100795
100796 if (!xfrm4_policy_check(sk, XFRM_POLICY_IN, skb))
100797 goto discard_and_relse;
100798@@ -1651,6 +1665,10 @@ no_tcp_socket:
100799 bad_packet:
100800 TCP_INC_STATS_BH(net, TCP_MIB_INERRS);
100801 } else {
100802+#ifdef CONFIG_GRKERNSEC_BLACKHOLE
100803+ if (!grsec_enable_blackhole || (ret == 1 &&
100804+ (skb->dev->flags & IFF_LOOPBACK)))
100805+#endif
100806 tcp_v4_send_reset(NULL, skb);
100807 }
100808
100809@@ -2238,7 +2256,11 @@ static void get_openreq4(struct sock *sk, struct request_sock *req,
100810 0, /* non standard timer */
100811 0, /* open_requests have no inode */
100812 atomic_read(&sk->sk_refcnt),
100813+#ifdef CONFIG_GRKERNSEC_HIDESYM
100814+ NULL,
100815+#else
100816 req,
100817+#endif
100818 len);
100819 }
100820
100821@@ -2280,7 +2302,12 @@ static void get_tcp4_sock(struct sock *sk, struct seq_file *f, int i, int *len)
100822 sock_i_uid(sk),
100823 icsk->icsk_probes_out,
100824 sock_i_ino(sk),
100825- atomic_read(&sk->sk_refcnt), sk,
100826+ atomic_read(&sk->sk_refcnt),
100827+#ifdef CONFIG_GRKERNSEC_HIDESYM
100828+ NULL,
100829+#else
100830+ sk,
100831+#endif
100832 jiffies_to_clock_t(icsk->icsk_rto),
100833 jiffies_to_clock_t(icsk->icsk_ack.ato),
100834 (icsk->icsk_ack.quick << 1) | icsk->icsk_ack.pingpong,
100835@@ -2308,7 +2335,13 @@ static void get_timewait4_sock(struct inet_timewait_sock *tw,
100836 " %02X %08X:%08X %02X:%08lX %08X %5d %8d %d %d %p%n",
100837 i, src, srcp, dest, destp, tw->tw_substate, 0, 0,
100838 3, jiffies_to_clock_t(ttd), 0, 0, 0, 0,
100839- atomic_read(&tw->tw_refcnt), tw, len);
100840+ atomic_read(&tw->tw_refcnt),
100841+#ifdef CONFIG_GRKERNSEC_HIDESYM
100842+ NULL,
100843+#else
100844+ tw,
100845+#endif
100846+ len);
100847 }
100848
100849 #define TMPSZ 150
100850diff --git a/net/ipv4/tcp_minisocks.c b/net/ipv4/tcp_minisocks.c
100851index 4c03598..e09a8e8 100644
100852--- a/net/ipv4/tcp_minisocks.c
100853+++ b/net/ipv4/tcp_minisocks.c
100854@@ -26,6 +26,10 @@
100855 #include <net/inet_common.h>
100856 #include <net/xfrm.h>
100857
100858+#ifdef CONFIG_GRKERNSEC_BLACKHOLE
100859+extern int grsec_enable_blackhole;
100860+#endif
100861+
100862 #ifdef CONFIG_SYSCTL
100863 #define SYNC_INIT 0 /* let the user enable it */
100864 #else
100865@@ -672,6 +676,10 @@ listen_overflow:
100866
100867 embryonic_reset:
100868 NET_INC_STATS_BH(sock_net(sk), LINUX_MIB_EMBRYONICRSTS);
100869+
100870+#ifdef CONFIG_GRKERNSEC_BLACKHOLE
100871+ if (!grsec_enable_blackhole)
100872+#endif
100873 if (!(flg & TCP_FLAG_RST))
100874 req->rsk_ops->send_reset(sk, skb);
100875
100876diff --git a/net/ipv4/tcp_output.c b/net/ipv4/tcp_output.c
100877index af83bdf..ec91cb2 100644
100878--- a/net/ipv4/tcp_output.c
100879+++ b/net/ipv4/tcp_output.c
100880@@ -2234,6 +2234,8 @@ struct sk_buff *tcp_make_synack(struct sock *sk, struct dst_entry *dst,
100881 __u8 *md5_hash_location;
100882 int mss;
100883
100884+ pax_track_stack();
100885+
100886 skb = sock_wmalloc(sk, MAX_TCP_HEADER + 15, 1, GFP_ATOMIC);
100887 if (skb == NULL)
100888 return NULL;
100889diff --git a/net/ipv4/tcp_probe.c b/net/ipv4/tcp_probe.c
100890index 59f5b5e..193860f 100644
100891--- a/net/ipv4/tcp_probe.c
100892+++ b/net/ipv4/tcp_probe.c
100893@@ -200,7 +200,7 @@ static ssize_t tcpprobe_read(struct file *file, char __user *buf,
100894 if (cnt + width >= len)
100895 break;
100896
100897- if (copy_to_user(buf + cnt, tbuf, width))
100898+ if (width > sizeof tbuf || copy_to_user(buf + cnt, tbuf, width))
100899 return -EFAULT;
100900 cnt += width;
100901 }
100902diff --git a/net/ipv4/tcp_timer.c b/net/ipv4/tcp_timer.c
100903index 57d5501..a9ed13a 100644
100904--- a/net/ipv4/tcp_timer.c
100905+++ b/net/ipv4/tcp_timer.c
100906@@ -21,6 +21,10 @@
100907 #include <linux/module.h>
100908 #include <net/tcp.h>
100909
100910+#ifdef CONFIG_GRKERNSEC_BLACKHOLE
100911+extern int grsec_lastack_retries;
100912+#endif
100913+
100914 int sysctl_tcp_syn_retries __read_mostly = TCP_SYN_RETRIES;
100915 int sysctl_tcp_synack_retries __read_mostly = TCP_SYNACK_RETRIES;
100916 int sysctl_tcp_keepalive_time __read_mostly = TCP_KEEPALIVE_TIME;
100917@@ -164,6 +168,13 @@ static int tcp_write_timeout(struct sock *sk)
100918 }
100919 }
100920
100921+#ifdef CONFIG_GRKERNSEC_BLACKHOLE
100922+ if ((sk->sk_state == TCP_LAST_ACK) &&
100923+ (grsec_lastack_retries > 0) &&
100924+ (grsec_lastack_retries < retry_until))
100925+ retry_until = grsec_lastack_retries;
100926+#endif
100927+
100928 if (retransmits_timed_out(sk, retry_until)) {
100929 /* Has it gone just too far? */
100930 tcp_write_err(sk);
100931diff --git a/net/ipv4/udp.c b/net/ipv4/udp.c
100932index 8e28770..72105c8 100644
100933--- a/net/ipv4/udp.c
100934+++ b/net/ipv4/udp.c
100935@@ -86,6 +86,7 @@
100936 #include <linux/types.h>
100937 #include <linux/fcntl.h>
100938 #include <linux/module.h>
100939+#include <linux/security.h>
100940 #include <linux/socket.h>
100941 #include <linux/sockios.h>
100942 #include <linux/igmp.h>
100943@@ -106,6 +107,10 @@
100944 #include <net/xfrm.h>
100945 #include "udp_impl.h"
100946
100947+#ifdef CONFIG_GRKERNSEC_BLACKHOLE
100948+extern int grsec_enable_blackhole;
100949+#endif
100950+
100951 struct udp_table udp_table;
100952 EXPORT_SYMBOL(udp_table);
100953
100954@@ -371,6 +376,9 @@ found:
100955 return s;
100956 }
100957
100958+extern int gr_search_udp_recvmsg(struct sock *sk, const struct sk_buff *skb);
100959+extern int gr_search_udp_sendmsg(struct sock *sk, struct sockaddr_in *addr);
100960+
100961 /*
100962 * This routine is called by the ICMP module when it gets some
100963 * sort of error condition. If err < 0 then the socket should
100964@@ -639,9 +647,18 @@ int udp_sendmsg(struct kiocb *iocb, struct sock *sk, struct msghdr *msg,
100965 dport = usin->sin_port;
100966 if (dport == 0)
100967 return -EINVAL;
100968+
100969+ err = gr_search_udp_sendmsg(sk, usin);
100970+ if (err)
100971+ return err;
100972 } else {
100973 if (sk->sk_state != TCP_ESTABLISHED)
100974 return -EDESTADDRREQ;
100975+
100976+ err = gr_search_udp_sendmsg(sk, NULL);
100977+ if (err)
100978+ return err;
100979+
100980 daddr = inet->daddr;
100981 dport = inet->dport;
100982 /* Open fast path for connected socket.
100983@@ -945,6 +962,10 @@ try_again:
100984 if (!skb)
100985 goto out;
100986
100987+ err = gr_search_udp_recvmsg(sk, skb);
100988+ if (err)
100989+ goto out_free;
100990+
100991 ulen = skb->len - sizeof(struct udphdr);
100992 copied = len;
100993 if (copied > ulen)
100994@@ -1068,7 +1089,7 @@ static int __udp_queue_rcv_skb(struct sock *sk, struct sk_buff *skb)
100995 if (rc == -ENOMEM) {
100996 UDP_INC_STATS_BH(sock_net(sk), UDP_MIB_RCVBUFERRORS,
100997 is_udplite);
100998- atomic_inc(&sk->sk_drops);
100999+ atomic_inc_unchecked(&sk->sk_drops);
101000 }
101001 goto drop;
101002 }
101003@@ -1338,6 +1359,9 @@ int __udp4_lib_rcv(struct sk_buff *skb, struct udp_table *udptable,
101004 goto csum_error;
101005
101006 UDP_INC_STATS_BH(net, UDP_MIB_NOPORTS, proto == IPPROTO_UDPLITE);
101007+#ifdef CONFIG_GRKERNSEC_BLACKHOLE
101008+ if (!grsec_enable_blackhole || (skb->dev->flags & IFF_LOOPBACK))
101009+#endif
101010 icmp_send(skb, ICMP_DEST_UNREACH, ICMP_PORT_UNREACH, 0);
101011
101012 /*
101013@@ -1758,8 +1782,13 @@ static void udp4_format_sock(struct sock *sp, struct seq_file *f,
101014 sk_wmem_alloc_get(sp),
101015 sk_rmem_alloc_get(sp),
101016 0, 0L, 0, sock_i_uid(sp), 0, sock_i_ino(sp),
101017- atomic_read(&sp->sk_refcnt), sp,
101018- atomic_read(&sp->sk_drops), len);
101019+ atomic_read(&sp->sk_refcnt),
101020+#ifdef CONFIG_GRKERNSEC_HIDESYM
101021+ NULL,
101022+#else
101023+ sp,
101024+#endif
101025+ atomic_read_unchecked(&sp->sk_drops), len);
101026 }
101027
101028 int udp4_seq_show(struct seq_file *seq, void *v)
101029diff --git a/net/ipv6/addrconf.c b/net/ipv6/addrconf.c
101030index 8ac3d09..fc58c5f 100644
101031--- a/net/ipv6/addrconf.c
101032+++ b/net/ipv6/addrconf.c
101033@@ -2053,7 +2053,7 @@ int addrconf_set_dstaddr(struct net *net, void __user *arg)
101034 p.iph.ihl = 5;
101035 p.iph.protocol = IPPROTO_IPV6;
101036 p.iph.ttl = 64;
101037- ifr.ifr_ifru.ifru_data = (__force void __user *)&p;
101038+ ifr.ifr_ifru.ifru_data = (void __force_user *)&p;
101039
101040 if (ops->ndo_do_ioctl) {
101041 mm_segment_t oldfs = get_fs();
101042diff --git a/net/ipv6/inet6_connection_sock.c b/net/ipv6/inet6_connection_sock.c
101043index cc4797d..7cfdfcc 100644
101044--- a/net/ipv6/inet6_connection_sock.c
101045+++ b/net/ipv6/inet6_connection_sock.c
101046@@ -152,7 +152,7 @@ void __inet6_csk_dst_store(struct sock *sk, struct dst_entry *dst,
101047 #ifdef CONFIG_XFRM
101048 {
101049 struct rt6_info *rt = (struct rt6_info *)dst;
101050- rt->rt6i_flow_cache_genid = atomic_read(&flow_cache_genid);
101051+ rt->rt6i_flow_cache_genid = atomic_read_unchecked(&flow_cache_genid);
101052 }
101053 #endif
101054 }
101055@@ -167,7 +167,7 @@ struct dst_entry *__inet6_csk_dst_check(struct sock *sk, u32 cookie)
101056 #ifdef CONFIG_XFRM
101057 if (dst) {
101058 struct rt6_info *rt = (struct rt6_info *)dst;
101059- if (rt->rt6i_flow_cache_genid != atomic_read(&flow_cache_genid)) {
101060+ if (rt->rt6i_flow_cache_genid != atomic_read_unchecked(&flow_cache_genid)) {
101061 sk->sk_dst_cache = NULL;
101062 dst_release(dst);
101063 dst = NULL;
101064diff --git a/net/ipv6/inet6_hashtables.c b/net/ipv6/inet6_hashtables.c
101065index 093e9b2..f72cddb 100644
101066--- a/net/ipv6/inet6_hashtables.c
101067+++ b/net/ipv6/inet6_hashtables.c
101068@@ -119,7 +119,7 @@ out:
101069 }
101070 EXPORT_SYMBOL(__inet6_lookup_established);
101071
101072-static int inline compute_score(struct sock *sk, struct net *net,
101073+static inline int compute_score(struct sock *sk, struct net *net,
101074 const unsigned short hnum,
101075 const struct in6_addr *daddr,
101076 const int dif)
101077diff --git a/net/ipv6/ipv6_sockglue.c b/net/ipv6/ipv6_sockglue.c
101078index 4f7aaf6..f7acf45 100644
101079--- a/net/ipv6/ipv6_sockglue.c
101080+++ b/net/ipv6/ipv6_sockglue.c
101081@@ -130,6 +130,8 @@ static int do_ipv6_setsockopt(struct sock *sk, int level, int optname,
101082 int val, valbool;
101083 int retv = -ENOPROTOOPT;
101084
101085+ pax_track_stack();
101086+
101087 if (optval == NULL)
101088 val=0;
101089 else {
101090@@ -881,6 +883,8 @@ static int do_ipv6_getsockopt(struct sock *sk, int level, int optname,
101091 int len;
101092 int val;
101093
101094+ pax_track_stack();
101095+
101096 if (ip6_mroute_opt(optname))
101097 return ip6_mroute_getsockopt(sk, optname, optval, optlen);
101098
101099@@ -922,7 +926,7 @@ static int do_ipv6_getsockopt(struct sock *sk, int level, int optname,
101100 if (sk->sk_type != SOCK_STREAM)
101101 return -ENOPROTOOPT;
101102
101103- msg.msg_control = optval;
101104+ msg.msg_control = (void __force_kernel *)optval;
101105 msg.msg_controllen = len;
101106 msg.msg_flags = 0;
101107
101108diff --git a/net/ipv6/netfilter/ip6_queue.c b/net/ipv6/netfilter/ip6_queue.c
101109index 1cf3f0c..1d4376f 100644
101110--- a/net/ipv6/netfilter/ip6_queue.c
101111+++ b/net/ipv6/netfilter/ip6_queue.c
101112@@ -287,6 +287,9 @@ ipq_mangle_ipv6(ipq_verdict_msg_t *v, struct nf_queue_entry *e)
101113
101114 if (v->data_len < sizeof(*user_iph))
101115 return 0;
101116+ if (v->data_len > 65535)
101117+ return -EMSGSIZE;
101118+
101119 diff = v->data_len - e->skb->len;
101120 if (diff < 0) {
101121 if (pskb_trim(e->skb, v->data_len))
101122@@ -411,7 +414,8 @@ ipq_dev_drop(int ifindex)
101123 static inline void
101124 __ipq_rcv_skb(struct sk_buff *skb)
101125 {
101126- int status, type, pid, flags, nlmsglen, skblen;
101127+ int status, type, pid, flags;
101128+ unsigned int nlmsglen, skblen;
101129 struct nlmsghdr *nlh;
101130
101131 skblen = skb->len;
101132diff --git a/net/ipv6/netfilter/ip6_tables.c b/net/ipv6/netfilter/ip6_tables.c
101133index 78b5a36..2b9bb06 100644
101134--- a/net/ipv6/netfilter/ip6_tables.c
101135+++ b/net/ipv6/netfilter/ip6_tables.c
101136@@ -1173,6 +1173,7 @@ static int get_info(struct net *net, void __user *user, int *len, int compat)
101137 private = &tmp;
101138 }
101139 #endif
101140+ memset(&info, 0, sizeof(info));
101141 info.valid_hooks = t->valid_hooks;
101142 memcpy(info.hook_entry, private->hook_entry,
101143 sizeof(info.hook_entry));
101144@@ -1240,6 +1241,10 @@ get_entries(struct net *net, struct ip6t_get_entries __user *uptr, int *len)
101145 static int
101146 __do_replace(struct net *net, const char *name, unsigned int valid_hooks,
101147 struct xt_table_info *newinfo, unsigned int num_counters,
101148+ void __user *counters_ptr) __size_overflow(5);
101149+static int
101150+__do_replace(struct net *net, const char *name, unsigned int valid_hooks,
101151+ struct xt_table_info *newinfo, unsigned int num_counters,
101152 void __user *counters_ptr)
101153 {
101154 int ret;
101155@@ -1373,6 +1378,9 @@ add_counter_to_entry(struct ip6t_entry *e,
101156
101157 static int
101158 do_add_counters(struct net *net, void __user *user, unsigned int len,
101159+ int compat) __size_overflow(3);
101160+static int
101161+do_add_counters(struct net *net, void __user *user, unsigned int len,
101162 int compat)
101163 {
101164 unsigned int i, curcpu;
101165diff --git a/net/ipv6/raw.c b/net/ipv6/raw.c
101166index 4f24570..b813b34 100644
101167--- a/net/ipv6/raw.c
101168+++ b/net/ipv6/raw.c
101169@@ -375,14 +375,14 @@ static inline int rawv6_rcv_skb(struct sock * sk, struct sk_buff * skb)
101170 {
101171 if ((raw6_sk(sk)->checksum || sk->sk_filter) &&
101172 skb_checksum_complete(skb)) {
101173- atomic_inc(&sk->sk_drops);
101174+ atomic_inc_unchecked(&sk->sk_drops);
101175 kfree_skb(skb);
101176 return NET_RX_DROP;
101177 }
101178
101179 /* Charge it to the socket. */
101180 if (sock_queue_rcv_skb(sk,skb)<0) {
101181- atomic_inc(&sk->sk_drops);
101182+ atomic_inc_unchecked(&sk->sk_drops);
101183 kfree_skb(skb);
101184 return NET_RX_DROP;
101185 }
101186@@ -403,7 +403,7 @@ int rawv6_rcv(struct sock *sk, struct sk_buff *skb)
101187 struct raw6_sock *rp = raw6_sk(sk);
101188
101189 if (!xfrm6_policy_check(sk, XFRM_POLICY_IN, skb)) {
101190- atomic_inc(&sk->sk_drops);
101191+ atomic_inc_unchecked(&sk->sk_drops);
101192 kfree_skb(skb);
101193 return NET_RX_DROP;
101194 }
101195@@ -427,7 +427,7 @@ int rawv6_rcv(struct sock *sk, struct sk_buff *skb)
101196
101197 if (inet->hdrincl) {
101198 if (skb_checksum_complete(skb)) {
101199- atomic_inc(&sk->sk_drops);
101200+ atomic_inc_unchecked(&sk->sk_drops);
101201 kfree_skb(skb);
101202 return NET_RX_DROP;
101203 }
101204@@ -518,7 +518,7 @@ csum_copy_err:
101205 as some normal condition.
101206 */
101207 err = (flags&MSG_DONTWAIT) ? -EAGAIN : -EHOSTUNREACH;
101208- atomic_inc(&sk->sk_drops);
101209+ atomic_inc_unchecked(&sk->sk_drops);
101210 goto out;
101211 }
101212
101213@@ -600,7 +600,7 @@ out:
101214 return err;
101215 }
101216
101217-static int rawv6_send_hdrinc(struct sock *sk, void *from, int length,
101218+static int rawv6_send_hdrinc(struct sock *sk, void *from, unsigned int length,
101219 struct flowi *fl, struct rt6_info *rt,
101220 unsigned int flags)
101221 {
101222@@ -738,6 +738,8 @@ static int rawv6_sendmsg(struct kiocb *iocb, struct sock *sk,
101223 u16 proto;
101224 int err;
101225
101226+ pax_track_stack();
101227+
101228 /* Rough check on arithmetic overflow,
101229 better check is made in ip6_append_data().
101230 */
101231@@ -916,12 +918,17 @@ do_confirm:
101232 static int rawv6_seticmpfilter(struct sock *sk, int level, int optname,
101233 char __user *optval, int optlen)
101234 {
101235+ struct icmp6_filter filter;
101236+
101237 switch (optname) {
101238 case ICMPV6_FILTER:
101239+ if (optlen < 0)
101240+ return -EINVAL;
101241 if (optlen > sizeof(struct icmp6_filter))
101242 optlen = sizeof(struct icmp6_filter);
101243- if (copy_from_user(&raw6_sk(sk)->filter, optval, optlen))
101244+ if (copy_from_user(&filter, optval, optlen))
101245 return -EFAULT;
101246+ raw6_sk(sk)->filter = filter;
101247 return 0;
101248 default:
101249 return -ENOPROTOOPT;
101250@@ -934,6 +941,7 @@ static int rawv6_geticmpfilter(struct sock *sk, int level, int optname,
101251 char __user *optval, int __user *optlen)
101252 {
101253 int len;
101254+ struct icmp6_filter filter;
101255
101256 switch (optname) {
101257 case ICMPV6_FILTER:
101258@@ -945,7 +953,8 @@ static int rawv6_geticmpfilter(struct sock *sk, int level, int optname,
101259 len = sizeof(struct icmp6_filter);
101260 if (put_user(len, optlen))
101261 return -EFAULT;
101262- if (copy_to_user(optval, &raw6_sk(sk)->filter, len))
101263+ filter = raw6_sk(sk)->filter;
101264+ if (len > sizeof filter || copy_to_user(optval, &filter, len))
101265 return -EFAULT;
101266 return 0;
101267 default:
101268@@ -1241,7 +1250,13 @@ static void raw6_sock_seq_show(struct seq_file *seq, struct sock *sp, int i)
101269 0, 0L, 0,
101270 sock_i_uid(sp), 0,
101271 sock_i_ino(sp),
101272- atomic_read(&sp->sk_refcnt), sp, atomic_read(&sp->sk_drops));
101273+ atomic_read(&sp->sk_refcnt),
101274+#ifdef CONFIG_GRKERNSEC_HIDESYM
101275+ NULL,
101276+#else
101277+ sp,
101278+#endif
101279+ atomic_read_unchecked(&sp->sk_drops));
101280 }
101281
101282 static int raw6_seq_show(struct seq_file *seq, void *v)
101283diff --git a/net/ipv6/tcp_ipv6.c b/net/ipv6/tcp_ipv6.c
101284index faae6df..d4430c1 100644
101285--- a/net/ipv6/tcp_ipv6.c
101286+++ b/net/ipv6/tcp_ipv6.c
101287@@ -89,6 +89,10 @@ static struct tcp_md5sig_key *tcp_v6_md5_do_lookup(struct sock *sk,
101288 }
101289 #endif
101290
101291+#ifdef CONFIG_GRKERNSEC_BLACKHOLE
101292+extern int grsec_enable_blackhole;
101293+#endif
101294+
101295 static void tcp_v6_hash(struct sock *sk)
101296 {
101297 if (sk->sk_state != TCP_CLOSE) {
101298@@ -1579,6 +1583,9 @@ static int tcp_v6_do_rcv(struct sock *sk, struct sk_buff *skb)
101299 return 0;
101300
101301 reset:
101302+#ifdef CONFIG_GRKERNSEC_BLACKHOLE
101303+ if (!grsec_enable_blackhole)
101304+#endif
101305 tcp_v6_send_reset(sk, skb);
101306 discard:
101307 if (opt_skb)
101308@@ -1656,12 +1663,20 @@ static int tcp_v6_rcv(struct sk_buff *skb)
101309 TCP_SKB_CB(skb)->sacked = 0;
101310
101311 sk = __inet6_lookup_skb(&tcp_hashinfo, skb, th->source, th->dest);
101312- if (!sk)
101313+ if (!sk) {
101314+#ifdef CONFIG_GRKERNSEC_BLACKHOLE
101315+ ret = 1;
101316+#endif
101317 goto no_tcp_socket;
101318+ }
101319
101320 process:
101321- if (sk->sk_state == TCP_TIME_WAIT)
101322+ if (sk->sk_state == TCP_TIME_WAIT) {
101323+#ifdef CONFIG_GRKERNSEC_BLACKHOLE
101324+ ret = 2;
101325+#endif
101326 goto do_time_wait;
101327+ }
101328
101329 if (!xfrm6_policy_check(sk, XFRM_POLICY_IN, skb))
101330 goto discard_and_relse;
101331@@ -1701,6 +1716,10 @@ no_tcp_socket:
101332 bad_packet:
101333 TCP_INC_STATS_BH(net, TCP_MIB_INERRS);
101334 } else {
101335+#ifdef CONFIG_GRKERNSEC_BLACKHOLE
101336+ if (!grsec_enable_blackhole || (ret == 1 &&
101337+ (skb->dev->flags & IFF_LOOPBACK)))
101338+#endif
101339 tcp_v6_send_reset(NULL, skb);
101340 }
101341
101342@@ -1916,7 +1935,13 @@ static void get_openreq6(struct seq_file *seq,
101343 uid,
101344 0, /* non standard timer */
101345 0, /* open_requests have no inode */
101346- 0, req);
101347+ 0,
101348+#ifdef CONFIG_GRKERNSEC_HIDESYM
101349+ NULL
101350+#else
101351+ req
101352+#endif
101353+ );
101354 }
101355
101356 static void get_tcp6_sock(struct seq_file *seq, struct sock *sp, int i)
101357@@ -1966,7 +1991,12 @@ static void get_tcp6_sock(struct seq_file *seq, struct sock *sp, int i)
101358 sock_i_uid(sp),
101359 icsk->icsk_probes_out,
101360 sock_i_ino(sp),
101361- atomic_read(&sp->sk_refcnt), sp,
101362+ atomic_read(&sp->sk_refcnt),
101363+#ifdef CONFIG_GRKERNSEC_HIDESYM
101364+ NULL,
101365+#else
101366+ sp,
101367+#endif
101368 jiffies_to_clock_t(icsk->icsk_rto),
101369 jiffies_to_clock_t(icsk->icsk_ack.ato),
101370 (icsk->icsk_ack.quick << 1 ) | icsk->icsk_ack.pingpong,
101371@@ -2001,7 +2031,13 @@ static void get_timewait6_sock(struct seq_file *seq,
101372 dest->s6_addr32[2], dest->s6_addr32[3], destp,
101373 tw->tw_substate, 0, 0,
101374 3, jiffies_to_clock_t(ttd), 0, 0, 0, 0,
101375- atomic_read(&tw->tw_refcnt), tw);
101376+ atomic_read(&tw->tw_refcnt),
101377+#ifdef CONFIG_GRKERNSEC_HIDESYM
101378+ NULL
101379+#else
101380+ tw
101381+#endif
101382+ );
101383 }
101384
101385 static int tcp6_seq_show(struct seq_file *seq, void *v)
101386diff --git a/net/ipv6/udp.c b/net/ipv6/udp.c
101387index 9cc6289..052c521 100644
101388--- a/net/ipv6/udp.c
101389+++ b/net/ipv6/udp.c
101390@@ -49,6 +49,10 @@
101391 #include <linux/seq_file.h>
101392 #include "udp_impl.h"
101393
101394+#ifdef CONFIG_GRKERNSEC_BLACKHOLE
101395+extern int grsec_enable_blackhole;
101396+#endif
101397+
101398 int ipv6_rcv_saddr_equal(const struct sock *sk, const struct sock *sk2)
101399 {
101400 const struct in6_addr *sk_rcv_saddr6 = &inet6_sk(sk)->rcv_saddr;
101401@@ -391,7 +395,7 @@ int udpv6_queue_rcv_skb(struct sock * sk, struct sk_buff *skb)
101402 if (rc == -ENOMEM) {
101403 UDP6_INC_STATS_BH(sock_net(sk),
101404 UDP_MIB_RCVBUFERRORS, is_udplite);
101405- atomic_inc(&sk->sk_drops);
101406+ atomic_inc_unchecked(&sk->sk_drops);
101407 }
101408 goto drop;
101409 }
101410@@ -590,6 +594,9 @@ int __udp6_lib_rcv(struct sk_buff *skb, struct udp_table *udptable,
101411 UDP6_INC_STATS_BH(net, UDP_MIB_NOPORTS,
101412 proto == IPPROTO_UDPLITE);
101413
101414+#ifdef CONFIG_GRKERNSEC_BLACKHOLE
101415+ if (!grsec_enable_blackhole || (skb->dev->flags & IFF_LOOPBACK))
101416+#endif
101417 icmpv6_send(skb, ICMPV6_DEST_UNREACH, ICMPV6_PORT_UNREACH, 0, dev);
101418
101419 kfree_skb(skb);
101420@@ -1209,8 +1216,13 @@ static void udp6_sock_seq_show(struct seq_file *seq, struct sock *sp, int bucket
101421 0, 0L, 0,
101422 sock_i_uid(sp), 0,
101423 sock_i_ino(sp),
101424- atomic_read(&sp->sk_refcnt), sp,
101425- atomic_read(&sp->sk_drops));
101426+ atomic_read(&sp->sk_refcnt),
101427+#ifdef CONFIG_GRKERNSEC_HIDESYM
101428+ NULL,
101429+#else
101430+ sp,
101431+#endif
101432+ atomic_read_unchecked(&sp->sk_drops));
101433 }
101434
101435 int udp6_seq_show(struct seq_file *seq, void *v)
101436diff --git a/net/ipv6/xfrm6_tunnel.c b/net/ipv6/xfrm6_tunnel.c
101437index 48bb1e3..5980e6e 100644
101438--- a/net/ipv6/xfrm6_tunnel.c
101439+++ b/net/ipv6/xfrm6_tunnel.c
101440@@ -258,7 +258,7 @@ static int xfrm6_tunnel_rcv(struct sk_buff *skb)
101441 __be32 spi;
101442
101443 spi = xfrm6_tunnel_spi_lookup((xfrm_address_t *)&iph->saddr);
101444- return xfrm6_rcv_spi(skb, IPPROTO_IPV6, spi) > 0 ? : 0;
101445+ return xfrm6_rcv_spi(skb, IPPROTO_IPV6, spi);
101446 }
101447
101448 static int xfrm6_tunnel_err(struct sk_buff *skb, struct inet6_skb_parm *opt,
101449diff --git a/net/irda/ircomm/ircomm_tty.c b/net/irda/ircomm/ircomm_tty.c
101450index 811984d..11f59b7 100644
101451--- a/net/irda/ircomm/ircomm_tty.c
101452+++ b/net/irda/ircomm/ircomm_tty.c
101453@@ -280,16 +280,16 @@ static int ircomm_tty_block_til_ready(struct ircomm_tty_cb *self,
101454 add_wait_queue(&self->open_wait, &wait);
101455
101456 IRDA_DEBUG(2, "%s(%d):block_til_ready before block on %s open_count=%d\n",
101457- __FILE__,__LINE__, tty->driver->name, self->open_count );
101458+ __FILE__,__LINE__, tty->driver->name, local_read(&self->open_count) );
101459
101460 /* As far as I can see, we protect open_count - Jean II */
101461 spin_lock_irqsave(&self->spinlock, flags);
101462 if (!tty_hung_up_p(filp)) {
101463 extra_count = 1;
101464- self->open_count--;
101465+ local_dec(&self->open_count);
101466 }
101467 spin_unlock_irqrestore(&self->spinlock, flags);
101468- self->blocked_open++;
101469+ local_inc(&self->blocked_open);
101470
101471 while (1) {
101472 if (tty->termios->c_cflag & CBAUD) {
101473@@ -329,7 +329,7 @@ static int ircomm_tty_block_til_ready(struct ircomm_tty_cb *self,
101474 }
101475
101476 IRDA_DEBUG(1, "%s(%d):block_til_ready blocking on %s open_count=%d\n",
101477- __FILE__,__LINE__, tty->driver->name, self->open_count );
101478+ __FILE__,__LINE__, tty->driver->name, local_read(&self->open_count) );
101479
101480 schedule();
101481 }
101482@@ -340,13 +340,13 @@ static int ircomm_tty_block_til_ready(struct ircomm_tty_cb *self,
101483 if (extra_count) {
101484 /* ++ is not atomic, so this should be protected - Jean II */
101485 spin_lock_irqsave(&self->spinlock, flags);
101486- self->open_count++;
101487+ local_inc(&self->open_count);
101488 spin_unlock_irqrestore(&self->spinlock, flags);
101489 }
101490- self->blocked_open--;
101491+ local_dec(&self->blocked_open);
101492
101493 IRDA_DEBUG(1, "%s(%d):block_til_ready after blocking on %s open_count=%d\n",
101494- __FILE__,__LINE__, tty->driver->name, self->open_count);
101495+ __FILE__,__LINE__, tty->driver->name, local_read(&self->open_count));
101496
101497 if (!retval)
101498 self->flags |= ASYNC_NORMAL_ACTIVE;
101499@@ -415,14 +415,14 @@ static int ircomm_tty_open(struct tty_struct *tty, struct file *filp)
101500 }
101501 /* ++ is not atomic, so this should be protected - Jean II */
101502 spin_lock_irqsave(&self->spinlock, flags);
101503- self->open_count++;
101504+ local_inc(&self->open_count);
101505
101506 tty->driver_data = self;
101507 self->tty = tty;
101508 spin_unlock_irqrestore(&self->spinlock, flags);
101509
101510 IRDA_DEBUG(1, "%s(), %s%d, count = %d\n", __func__ , tty->driver->name,
101511- self->line, self->open_count);
101512+ self->line, local_read(&self->open_count));
101513
101514 /* Not really used by us, but lets do it anyway */
101515 self->tty->low_latency = (self->flags & ASYNC_LOW_LATENCY) ? 1 : 0;
101516@@ -511,7 +511,7 @@ static void ircomm_tty_close(struct tty_struct *tty, struct file *filp)
101517 return;
101518 }
101519
101520- if ((tty->count == 1) && (self->open_count != 1)) {
101521+ if ((tty->count == 1) && (local_read(&self->open_count) != 1)) {
101522 /*
101523 * Uh, oh. tty->count is 1, which means that the tty
101524 * structure will be freed. state->count should always
101525@@ -521,16 +521,16 @@ static void ircomm_tty_close(struct tty_struct *tty, struct file *filp)
101526 */
101527 IRDA_DEBUG(0, "%s(), bad serial port count; "
101528 "tty->count is 1, state->count is %d\n", __func__ ,
101529- self->open_count);
101530- self->open_count = 1;
101531+ local_read(&self->open_count));
101532+ local_set(&self->open_count, 1);
101533 }
101534
101535- if (--self->open_count < 0) {
101536+ if (local_dec_return(&self->open_count) < 0) {
101537 IRDA_ERROR("%s(), bad serial port count for ttys%d: %d\n",
101538- __func__, self->line, self->open_count);
101539- self->open_count = 0;
101540+ __func__, self->line, local_read(&self->open_count));
101541+ local_set(&self->open_count, 0);
101542 }
101543- if (self->open_count) {
101544+ if (local_read(&self->open_count)) {
101545 spin_unlock_irqrestore(&self->spinlock, flags);
101546
101547 IRDA_DEBUG(0, "%s(), open count > 0\n", __func__ );
101548@@ -562,7 +562,7 @@ static void ircomm_tty_close(struct tty_struct *tty, struct file *filp)
101549 tty->closing = 0;
101550 self->tty = NULL;
101551
101552- if (self->blocked_open) {
101553+ if (local_read(&self->blocked_open)) {
101554 if (self->close_delay)
101555 schedule_timeout_interruptible(self->close_delay);
101556 wake_up_interruptible(&self->open_wait);
101557@@ -1017,7 +1017,7 @@ static void ircomm_tty_hangup(struct tty_struct *tty)
101558 spin_lock_irqsave(&self->spinlock, flags);
101559 self->flags &= ~ASYNC_NORMAL_ACTIVE;
101560 self->tty = NULL;
101561- self->open_count = 0;
101562+ local_set(&self->open_count, 0);
101563 spin_unlock_irqrestore(&self->spinlock, flags);
101564
101565 wake_up_interruptible(&self->open_wait);
101566@@ -1369,7 +1369,7 @@ static void ircomm_tty_line_info(struct ircomm_tty_cb *self, struct seq_file *m)
101567 seq_putc(m, '\n');
101568
101569 seq_printf(m, "Role: %s\n", self->client ? "client" : "server");
101570- seq_printf(m, "Open count: %d\n", self->open_count);
101571+ seq_printf(m, "Open count: %d\n", local_read(&self->open_count));
101572 seq_printf(m, "Max data size: %d\n", self->max_data_size);
101573 seq_printf(m, "Max header size: %d\n", self->max_header_size);
101574
101575diff --git a/net/iucv/af_iucv.c b/net/iucv/af_iucv.c
101576index bada1b9..f325943 100644
101577--- a/net/iucv/af_iucv.c
101578+++ b/net/iucv/af_iucv.c
101579@@ -651,10 +651,10 @@ static int iucv_sock_autobind(struct sock *sk)
101580
101581 write_lock_bh(&iucv_sk_list.lock);
101582
101583- sprintf(name, "%08x", atomic_inc_return(&iucv_sk_list.autobind_name));
101584+ sprintf(name, "%08x", atomic_inc_return_unchecked(&iucv_sk_list.autobind_name));
101585 while (__iucv_get_sock_by_name(name)) {
101586 sprintf(name, "%08x",
101587- atomic_inc_return(&iucv_sk_list.autobind_name));
101588+ atomic_inc_return_unchecked(&iucv_sk_list.autobind_name));
101589 }
101590
101591 write_unlock_bh(&iucv_sk_list.lock);
101592diff --git a/net/key/af_key.c b/net/key/af_key.c
101593index 4e98193..439b449 100644
101594--- a/net/key/af_key.c
101595+++ b/net/key/af_key.c
101596@@ -2489,6 +2489,8 @@ static int pfkey_migrate(struct sock *sk, struct sk_buff *skb,
101597 struct xfrm_migrate m[XFRM_MAX_DEPTH];
101598 struct xfrm_kmaddress k;
101599
101600+ pax_track_stack();
101601+
101602 if (!present_and_same_family(ext_hdrs[SADB_EXT_ADDRESS_SRC - 1],
101603 ext_hdrs[SADB_EXT_ADDRESS_DST - 1]) ||
101604 !ext_hdrs[SADB_X_EXT_POLICY - 1]) {
101605@@ -3660,7 +3662,11 @@ static int pfkey_seq_show(struct seq_file *f, void *v)
101606 seq_printf(f ,"sk RefCnt Rmem Wmem User Inode\n");
101607 else
101608 seq_printf(f ,"%p %-6d %-6u %-6u %-6u %-6lu\n",
101609+#ifdef CONFIG_GRKERNSEC_HIDESYM
101610+ NULL,
101611+#else
101612 s,
101613+#endif
101614 atomic_read(&s->sk_refcnt),
101615 sk_rmem_alloc_get(s),
101616 sk_wmem_alloc_get(s),
101617diff --git a/net/lapb/lapb_iface.c b/net/lapb/lapb_iface.c
101618index bda96d1..c038b72 100644
101619--- a/net/lapb/lapb_iface.c
101620+++ b/net/lapb/lapb_iface.c
101621@@ -157,7 +157,7 @@ int lapb_register(struct net_device *dev, struct lapb_register_struct *callbacks
101622 goto out;
101623
101624 lapb->dev = dev;
101625- lapb->callbacks = *callbacks;
101626+ lapb->callbacks = callbacks;
101627
101628 __lapb_insert_cb(lapb);
101629
101630@@ -379,32 +379,32 @@ int lapb_data_received(struct net_device *dev, struct sk_buff *skb)
101631
101632 void lapb_connect_confirmation(struct lapb_cb *lapb, int reason)
101633 {
101634- if (lapb->callbacks.connect_confirmation)
101635- lapb->callbacks.connect_confirmation(lapb->dev, reason);
101636+ if (lapb->callbacks->connect_confirmation)
101637+ lapb->callbacks->connect_confirmation(lapb->dev, reason);
101638 }
101639
101640 void lapb_connect_indication(struct lapb_cb *lapb, int reason)
101641 {
101642- if (lapb->callbacks.connect_indication)
101643- lapb->callbacks.connect_indication(lapb->dev, reason);
101644+ if (lapb->callbacks->connect_indication)
101645+ lapb->callbacks->connect_indication(lapb->dev, reason);
101646 }
101647
101648 void lapb_disconnect_confirmation(struct lapb_cb *lapb, int reason)
101649 {
101650- if (lapb->callbacks.disconnect_confirmation)
101651- lapb->callbacks.disconnect_confirmation(lapb->dev, reason);
101652+ if (lapb->callbacks->disconnect_confirmation)
101653+ lapb->callbacks->disconnect_confirmation(lapb->dev, reason);
101654 }
101655
101656 void lapb_disconnect_indication(struct lapb_cb *lapb, int reason)
101657 {
101658- if (lapb->callbacks.disconnect_indication)
101659- lapb->callbacks.disconnect_indication(lapb->dev, reason);
101660+ if (lapb->callbacks->disconnect_indication)
101661+ lapb->callbacks->disconnect_indication(lapb->dev, reason);
101662 }
101663
101664 int lapb_data_indication(struct lapb_cb *lapb, struct sk_buff *skb)
101665 {
101666- if (lapb->callbacks.data_indication)
101667- return lapb->callbacks.data_indication(lapb->dev, skb);
101668+ if (lapb->callbacks->data_indication)
101669+ return lapb->callbacks->data_indication(lapb->dev, skb);
101670
101671 kfree_skb(skb);
101672 return NET_RX_SUCCESS; /* For now; must be != NET_RX_DROP */
101673@@ -414,8 +414,8 @@ int lapb_data_transmit(struct lapb_cb *lapb, struct sk_buff *skb)
101674 {
101675 int used = 0;
101676
101677- if (lapb->callbacks.data_transmit) {
101678- lapb->callbacks.data_transmit(lapb->dev, skb);
101679+ if (lapb->callbacks->data_transmit) {
101680+ lapb->callbacks->data_transmit(lapb->dev, skb);
101681 used = 1;
101682 }
101683
101684diff --git a/net/mac80211/cfg.c b/net/mac80211/cfg.c
101685index fe2d3f8..e57f683 100644
101686--- a/net/mac80211/cfg.c
101687+++ b/net/mac80211/cfg.c
101688@@ -1369,7 +1369,7 @@ static int ieee80211_set_bitrate_mask(struct wiphy *wiphy,
101689 return err;
101690 }
101691
101692-struct cfg80211_ops mac80211_config_ops = {
101693+const struct cfg80211_ops mac80211_config_ops = {
101694 .add_virtual_intf = ieee80211_add_iface,
101695 .del_virtual_intf = ieee80211_del_iface,
101696 .change_virtual_intf = ieee80211_change_iface,
101697diff --git a/net/mac80211/cfg.h b/net/mac80211/cfg.h
101698index 7d7879f..2d51f62 100644
101699--- a/net/mac80211/cfg.h
101700+++ b/net/mac80211/cfg.h
101701@@ -4,6 +4,6 @@
101702 #ifndef __CFG_H
101703 #define __CFG_H
101704
101705-extern struct cfg80211_ops mac80211_config_ops;
101706+extern const struct cfg80211_ops mac80211_config_ops;
101707
101708 #endif /* __CFG_H */
101709diff --git a/net/mac80211/debugfs_key.c b/net/mac80211/debugfs_key.c
101710index 99c7525..9cb4937 100644
101711--- a/net/mac80211/debugfs_key.c
101712+++ b/net/mac80211/debugfs_key.c
101713@@ -211,9 +211,13 @@ static ssize_t key_key_read(struct file *file, char __user *userbuf,
101714 size_t count, loff_t *ppos)
101715 {
101716 struct ieee80211_key *key = file->private_data;
101717- int i, res, bufsize = 2 * key->conf.keylen + 2;
101718+ int i, bufsize = 2 * key->conf.keylen + 2;
101719 char *buf = kmalloc(bufsize, GFP_KERNEL);
101720 char *p = buf;
101721+ ssize_t res;
101722+
101723+ if (buf == NULL)
101724+ return -ENOMEM;
101725
101726 for (i = 0; i < key->conf.keylen; i++)
101727 p += scnprintf(p, bufsize + buf - p, "%02x", key->conf.key[i]);
101728diff --git a/net/mac80211/debugfs_sta.c b/net/mac80211/debugfs_sta.c
101729index 33a2e89..08650c8 100644
101730--- a/net/mac80211/debugfs_sta.c
101731+++ b/net/mac80211/debugfs_sta.c
101732@@ -124,6 +124,8 @@ static ssize_t sta_agg_status_read(struct file *file, char __user *userbuf,
101733 int i;
101734 struct sta_info *sta = file->private_data;
101735
101736+ pax_track_stack();
101737+
101738 spin_lock_bh(&sta->lock);
101739 p += scnprintf(p, sizeof(buf)+buf-p, "next dialog_token is %#02x\n",
101740 sta->ampdu_mlme.dialog_token_allocator + 1);
101741diff --git a/net/mac80211/ieee80211_i.h b/net/mac80211/ieee80211_i.h
101742index ca62bfe..6657a03 100644
101743--- a/net/mac80211/ieee80211_i.h
101744+++ b/net/mac80211/ieee80211_i.h
101745@@ -25,6 +25,7 @@
101746 #include <linux/etherdevice.h>
101747 #include <net/cfg80211.h>
101748 #include <net/mac80211.h>
101749+#include <asm/local.h>
101750 #include "key.h"
101751 #include "sta_info.h"
101752
101753@@ -635,7 +636,7 @@ struct ieee80211_local {
101754 /* also used to protect ampdu_ac_queue and amdpu_ac_stop_refcnt */
101755 spinlock_t queue_stop_reason_lock;
101756
101757- int open_count;
101758+ local_t open_count;
101759 int monitors, cooked_mntrs;
101760 /* number of interfaces with corresponding FIF_ flags */
101761 int fif_fcsfail, fif_plcpfail, fif_control, fif_other_bss, fif_pspoll;
101762diff --git a/net/mac80211/iface.c b/net/mac80211/iface.c
101763index 079c500..eb3c6d4 100644
101764--- a/net/mac80211/iface.c
101765+++ b/net/mac80211/iface.c
101766@@ -166,7 +166,7 @@ static int ieee80211_open(struct net_device *dev)
101767 break;
101768 }
101769
101770- if (local->open_count == 0) {
101771+ if (local_read(&local->open_count) == 0) {
101772 res = drv_start(local);
101773 if (res)
101774 goto err_del_bss;
101775@@ -196,7 +196,7 @@ static int ieee80211_open(struct net_device *dev)
101776 * Validate the MAC address for this device.
101777 */
101778 if (!is_valid_ether_addr(dev->dev_addr)) {
101779- if (!local->open_count)
101780+ if (!local_read(&local->open_count))
101781 drv_stop(local);
101782 return -EADDRNOTAVAIL;
101783 }
101784@@ -292,7 +292,7 @@ static int ieee80211_open(struct net_device *dev)
101785
101786 hw_reconf_flags |= __ieee80211_recalc_idle(local);
101787
101788- local->open_count++;
101789+ local_inc(&local->open_count);
101790 if (hw_reconf_flags) {
101791 ieee80211_hw_config(local, hw_reconf_flags);
101792 /*
101793@@ -320,7 +320,7 @@ static int ieee80211_open(struct net_device *dev)
101794 err_del_interface:
101795 drv_remove_interface(local, &conf);
101796 err_stop:
101797- if (!local->open_count)
101798+ if (!local_read(&local->open_count))
101799 drv_stop(local);
101800 err_del_bss:
101801 sdata->bss = NULL;
101802@@ -420,7 +420,7 @@ static int ieee80211_stop(struct net_device *dev)
101803 WARN_ON(!list_empty(&sdata->u.ap.vlans));
101804 }
101805
101806- local->open_count--;
101807+ local_dec(&local->open_count);
101808
101809 switch (sdata->vif.type) {
101810 case NL80211_IFTYPE_AP_VLAN:
101811@@ -526,7 +526,7 @@ static int ieee80211_stop(struct net_device *dev)
101812
101813 ieee80211_recalc_ps(local, -1);
101814
101815- if (local->open_count == 0) {
101816+ if (local_read(&local->open_count) == 0) {
101817 ieee80211_clear_tx_pending(local);
101818 ieee80211_stop_device(local);
101819
101820diff --git a/net/mac80211/main.c b/net/mac80211/main.c
101821index 2dfe176..74e4388 100644
101822--- a/net/mac80211/main.c
101823+++ b/net/mac80211/main.c
101824@@ -145,7 +145,7 @@ int ieee80211_hw_config(struct ieee80211_local *local, u32 changed)
101825 local->hw.conf.power_level = power;
101826 }
101827
101828- if (changed && local->open_count) {
101829+ if (changed && local_read(&local->open_count)) {
101830 ret = drv_config(local, changed);
101831 /*
101832 * Goal:
101833diff --git a/net/mac80211/mlme.c b/net/mac80211/mlme.c
101834index e67eea7..fcc227e 100644
101835--- a/net/mac80211/mlme.c
101836+++ b/net/mac80211/mlme.c
101837@@ -1438,6 +1438,8 @@ ieee80211_rx_mgmt_assoc_resp(struct ieee80211_sub_if_data *sdata,
101838 bool have_higher_than_11mbit = false, newsta = false;
101839 u16 ap_ht_cap_flags;
101840
101841+ pax_track_stack();
101842+
101843 /*
101844 * AssocResp and ReassocResp have identical structure, so process both
101845 * of them in this function.
101846diff --git a/net/mac80211/pm.c b/net/mac80211/pm.c
101847index e535f1c..4d733d1 100644
101848--- a/net/mac80211/pm.c
101849+++ b/net/mac80211/pm.c
101850@@ -107,7 +107,7 @@ int __ieee80211_suspend(struct ieee80211_hw *hw)
101851 }
101852
101853 /* stop hardware - this must stop RX */
101854- if (local->open_count)
101855+ if (local_read(&local->open_count))
101856 ieee80211_stop_device(local);
101857
101858 local->suspended = true;
101859diff --git a/net/mac80211/rate.c b/net/mac80211/rate.c
101860index b33efc4..0a2efb6 100644
101861--- a/net/mac80211/rate.c
101862+++ b/net/mac80211/rate.c
101863@@ -287,7 +287,7 @@ int ieee80211_init_rate_ctrl_alg(struct ieee80211_local *local,
101864 struct rate_control_ref *ref, *old;
101865
101866 ASSERT_RTNL();
101867- if (local->open_count)
101868+ if (local_read(&local->open_count))
101869 return -EBUSY;
101870
101871 ref = rate_control_alloc(name, local);
101872diff --git a/net/mac80211/tx.c b/net/mac80211/tx.c
101873index b1d7904..57e4da7 100644
101874--- a/net/mac80211/tx.c
101875+++ b/net/mac80211/tx.c
101876@@ -173,7 +173,7 @@ static __le16 ieee80211_duration(struct ieee80211_tx_data *tx, int group_addr,
101877 return cpu_to_le16(dur);
101878 }
101879
101880-static int inline is_ieee80211_device(struct ieee80211_local *local,
101881+static inline int is_ieee80211_device(struct ieee80211_local *local,
101882 struct net_device *dev)
101883 {
101884 return local == wdev_priv(dev->ieee80211_ptr);
101885diff --git a/net/mac80211/util.c b/net/mac80211/util.c
101886index 31b1085..48fb26d 100644
101887--- a/net/mac80211/util.c
101888+++ b/net/mac80211/util.c
101889@@ -1042,7 +1042,7 @@ int ieee80211_reconfig(struct ieee80211_local *local)
101890 local->resuming = true;
101891
101892 /* restart hardware */
101893- if (local->open_count) {
101894+ if (local_read(&local->open_count)) {
101895 /*
101896 * Upon resume hardware can sometimes be goofy due to
101897 * various platform / driver / bus issues, so restarting
101898diff --git a/net/netfilter/Kconfig b/net/netfilter/Kconfig
101899index 634d14a..b35a608 100644
101900--- a/net/netfilter/Kconfig
101901+++ b/net/netfilter/Kconfig
101902@@ -635,6 +635,16 @@ config NETFILTER_XT_MATCH_ESP
101903
101904 To compile it as a module, choose M here. If unsure, say N.
101905
101906+config NETFILTER_XT_MATCH_GRADM
101907+ tristate '"gradm" match support'
101908+ depends on NETFILTER_XTABLES && NETFILTER_ADVANCED
101909+ depends on GRKERNSEC && !GRKERNSEC_NO_RBAC
101910+ ---help---
101911+ The gradm match allows to match on grsecurity RBAC being enabled.
101912+ It is useful when iptables rules are applied early on bootup to
101913+ prevent connections to the machine (except from a trusted host)
101914+ while the RBAC system is disabled.
101915+
101916 config NETFILTER_XT_MATCH_HASHLIMIT
101917 tristate '"hashlimit" match support'
101918 depends on (IP6_NF_IPTABLES || IP6_NF_IPTABLES=n)
101919diff --git a/net/netfilter/Makefile b/net/netfilter/Makefile
101920index 49f62ee..a17b2c6 100644
101921--- a/net/netfilter/Makefile
101922+++ b/net/netfilter/Makefile
101923@@ -68,6 +68,7 @@ obj-$(CONFIG_NETFILTER_XT_MATCH_CONNTRACK) += xt_conntrack.o
101924 obj-$(CONFIG_NETFILTER_XT_MATCH_DCCP) += xt_dccp.o
101925 obj-$(CONFIG_NETFILTER_XT_MATCH_DSCP) += xt_dscp.o
101926 obj-$(CONFIG_NETFILTER_XT_MATCH_ESP) += xt_esp.o
101927+obj-$(CONFIG_NETFILTER_XT_MATCH_GRADM) += xt_gradm.o
101928 obj-$(CONFIG_NETFILTER_XT_MATCH_HASHLIMIT) += xt_hashlimit.o
101929 obj-$(CONFIG_NETFILTER_XT_MATCH_HELPER) += xt_helper.o
101930 obj-$(CONFIG_NETFILTER_XT_MATCH_HL) += xt_hl.o
101931diff --git a/net/netfilter/ipvs/ip_vs_app.c b/net/netfilter/ipvs/ip_vs_app.c
101932index 3c7e427..724043c 100644
101933--- a/net/netfilter/ipvs/ip_vs_app.c
101934+++ b/net/netfilter/ipvs/ip_vs_app.c
101935@@ -564,7 +564,7 @@ static const struct file_operations ip_vs_app_fops = {
101936 .open = ip_vs_app_open,
101937 .read = seq_read,
101938 .llseek = seq_lseek,
101939- .release = seq_release,
101940+ .release = seq_release_net,
101941 };
101942 #endif
101943
101944diff --git a/net/netfilter/ipvs/ip_vs_conn.c b/net/netfilter/ipvs/ip_vs_conn.c
101945index 95682e5..457dbac 100644
101946--- a/net/netfilter/ipvs/ip_vs_conn.c
101947+++ b/net/netfilter/ipvs/ip_vs_conn.c
101948@@ -453,10 +453,10 @@ ip_vs_bind_dest(struct ip_vs_conn *cp, struct ip_vs_dest *dest)
101949 /* if the connection is not template and is created
101950 * by sync, preserve the activity flag.
101951 */
101952- cp->flags |= atomic_read(&dest->conn_flags) &
101953+ cp->flags |= atomic_read_unchecked(&dest->conn_flags) &
101954 (~IP_VS_CONN_F_INACTIVE);
101955 else
101956- cp->flags |= atomic_read(&dest->conn_flags);
101957+ cp->flags |= atomic_read_unchecked(&dest->conn_flags);
101958 cp->dest = dest;
101959
101960 IP_VS_DBG_BUF(7, "Bind-dest %s c:%s:%d v:%s:%d "
101961@@ -723,7 +723,7 @@ ip_vs_conn_new(int af, int proto, const union nf_inet_addr *caddr, __be16 cport,
101962 atomic_set(&cp->refcnt, 1);
101963
101964 atomic_set(&cp->n_control, 0);
101965- atomic_set(&cp->in_pkts, 0);
101966+ atomic_set_unchecked(&cp->in_pkts, 0);
101967
101968 atomic_inc(&ip_vs_conn_count);
101969 if (flags & IP_VS_CONN_F_NO_CPORT)
101970@@ -871,7 +871,7 @@ static const struct file_operations ip_vs_conn_fops = {
101971 .open = ip_vs_conn_open,
101972 .read = seq_read,
101973 .llseek = seq_lseek,
101974- .release = seq_release,
101975+ .release = seq_release_net,
101976 };
101977
101978 static const char *ip_vs_origin_name(unsigned flags)
101979@@ -934,7 +934,7 @@ static const struct file_operations ip_vs_conn_sync_fops = {
101980 .open = ip_vs_conn_sync_open,
101981 .read = seq_read,
101982 .llseek = seq_lseek,
101983- .release = seq_release,
101984+ .release = seq_release_net,
101985 };
101986
101987 #endif
101988@@ -961,7 +961,7 @@ static inline int todrop_entry(struct ip_vs_conn *cp)
101989
101990 /* Don't drop the entry if its number of incoming packets is not
101991 located in [0, 8] */
101992- i = atomic_read(&cp->in_pkts);
101993+ i = atomic_read_unchecked(&cp->in_pkts);
101994 if (i > 8 || i < 0) return 0;
101995
101996 if (!todrop_rate[i]) return 0;
101997diff --git a/net/netfilter/ipvs/ip_vs_core.c b/net/netfilter/ipvs/ip_vs_core.c
101998index b95699f..5fee919 100644
101999--- a/net/netfilter/ipvs/ip_vs_core.c
102000+++ b/net/netfilter/ipvs/ip_vs_core.c
102001@@ -485,7 +485,7 @@ int ip_vs_leave(struct ip_vs_service *svc, struct sk_buff *skb,
102002 ret = cp->packet_xmit(skb, cp, pp);
102003 /* do not touch skb anymore */
102004
102005- atomic_inc(&cp->in_pkts);
102006+ atomic_inc_unchecked(&cp->in_pkts);
102007 ip_vs_conn_put(cp);
102008 return ret;
102009 }
102010@@ -1357,7 +1357,7 @@ ip_vs_in(unsigned int hooknum, struct sk_buff *skb,
102011 * Sync connection if it is about to close to
102012 * encorage the standby servers to update the connections timeout
102013 */
102014- pkts = atomic_add_return(1, &cp->in_pkts);
102015+ pkts = atomic_add_return_unchecked(1, &cp->in_pkts);
102016 if (af == AF_INET &&
102017 (ip_vs_sync_state & IP_VS_STATE_MASTER) &&
102018 (((cp->protocol != IPPROTO_TCP ||
102019diff --git a/net/netfilter/ipvs/ip_vs_ctl.c b/net/netfilter/ipvs/ip_vs_ctl.c
102020index 02b2610..2d89424 100644
102021--- a/net/netfilter/ipvs/ip_vs_ctl.c
102022+++ b/net/netfilter/ipvs/ip_vs_ctl.c
102023@@ -792,7 +792,7 @@ __ip_vs_update_dest(struct ip_vs_service *svc,
102024 ip_vs_rs_hash(dest);
102025 write_unlock_bh(&__ip_vs_rs_lock);
102026 }
102027- atomic_set(&dest->conn_flags, conn_flags);
102028+ atomic_set_unchecked(&dest->conn_flags, conn_flags);
102029
102030 /* bind the service */
102031 if (!dest->svc) {
102032@@ -1888,7 +1888,7 @@ static int ip_vs_info_seq_show(struct seq_file *seq, void *v)
102033 " %-7s %-6d %-10d %-10d\n",
102034 &dest->addr.in6,
102035 ntohs(dest->port),
102036- ip_vs_fwd_name(atomic_read(&dest->conn_flags)),
102037+ ip_vs_fwd_name(atomic_read_unchecked(&dest->conn_flags)),
102038 atomic_read(&dest->weight),
102039 atomic_read(&dest->activeconns),
102040 atomic_read(&dest->inactconns));
102041@@ -1899,7 +1899,7 @@ static int ip_vs_info_seq_show(struct seq_file *seq, void *v)
102042 "%-7s %-6d %-10d %-10d\n",
102043 ntohl(dest->addr.ip),
102044 ntohs(dest->port),
102045- ip_vs_fwd_name(atomic_read(&dest->conn_flags)),
102046+ ip_vs_fwd_name(atomic_read_unchecked(&dest->conn_flags)),
102047 atomic_read(&dest->weight),
102048 atomic_read(&dest->activeconns),
102049 atomic_read(&dest->inactconns));
102050@@ -1927,7 +1927,7 @@ static const struct file_operations ip_vs_info_fops = {
102051 .open = ip_vs_info_open,
102052 .read = seq_read,
102053 .llseek = seq_lseek,
102054- .release = seq_release_private,
102055+ .release = seq_release_net,
102056 };
102057
102058 #endif
102059@@ -1976,7 +1976,7 @@ static const struct file_operations ip_vs_stats_fops = {
102060 .open = ip_vs_stats_seq_open,
102061 .read = seq_read,
102062 .llseek = seq_lseek,
102063- .release = single_release,
102064+ .release = single_release_net,
102065 };
102066
102067 #endif
102068@@ -2292,7 +2292,7 @@ __ip_vs_get_dest_entries(const struct ip_vs_get_dests *get,
102069
102070 entry.addr = dest->addr.ip;
102071 entry.port = dest->port;
102072- entry.conn_flags = atomic_read(&dest->conn_flags);
102073+ entry.conn_flags = atomic_read_unchecked(&dest->conn_flags);
102074 entry.weight = atomic_read(&dest->weight);
102075 entry.u_threshold = dest->u_threshold;
102076 entry.l_threshold = dest->l_threshold;
102077@@ -2353,6 +2353,8 @@ do_ip_vs_get_ctl(struct sock *sk, int cmd, void __user *user, int *len)
102078 unsigned char arg[128];
102079 int ret = 0;
102080
102081+ pax_track_stack();
102082+
102083 if (!capable(CAP_NET_ADMIN))
102084 return -EPERM;
102085
102086@@ -2802,7 +2804,7 @@ static int ip_vs_genl_fill_dest(struct sk_buff *skb, struct ip_vs_dest *dest)
102087 NLA_PUT_U16(skb, IPVS_DEST_ATTR_PORT, dest->port);
102088
102089 NLA_PUT_U32(skb, IPVS_DEST_ATTR_FWD_METHOD,
102090- atomic_read(&dest->conn_flags) & IP_VS_CONN_F_FWD_MASK);
102091+ atomic_read_unchecked(&dest->conn_flags) & IP_VS_CONN_F_FWD_MASK);
102092 NLA_PUT_U32(skb, IPVS_DEST_ATTR_WEIGHT, atomic_read(&dest->weight));
102093 NLA_PUT_U32(skb, IPVS_DEST_ATTR_U_THRESH, dest->u_threshold);
102094 NLA_PUT_U32(skb, IPVS_DEST_ATTR_L_THRESH, dest->l_threshold);
102095diff --git a/net/netfilter/ipvs/ip_vs_sync.c b/net/netfilter/ipvs/ip_vs_sync.c
102096index e177f0d..55e8581 100644
102097--- a/net/netfilter/ipvs/ip_vs_sync.c
102098+++ b/net/netfilter/ipvs/ip_vs_sync.c
102099@@ -438,7 +438,7 @@ static void ip_vs_process_message(const char *buffer, const size_t buflen)
102100
102101 if (opt)
102102 memcpy(&cp->in_seq, opt, sizeof(*opt));
102103- atomic_set(&cp->in_pkts, sysctl_ip_vs_sync_threshold[0]);
102104+ atomic_set_unchecked(&cp->in_pkts, sysctl_ip_vs_sync_threshold[0]);
102105 cp->state = state;
102106 cp->old_state = cp->state;
102107 /*
102108diff --git a/net/netfilter/ipvs/ip_vs_xmit.c b/net/netfilter/ipvs/ip_vs_xmit.c
102109index 30b3189..e2e4b55 100644
102110--- a/net/netfilter/ipvs/ip_vs_xmit.c
102111+++ b/net/netfilter/ipvs/ip_vs_xmit.c
102112@@ -875,7 +875,7 @@ ip_vs_icmp_xmit(struct sk_buff *skb, struct ip_vs_conn *cp,
102113 else
102114 rc = NF_ACCEPT;
102115 /* do not touch skb anymore */
102116- atomic_inc(&cp->in_pkts);
102117+ atomic_inc_unchecked(&cp->in_pkts);
102118 goto out;
102119 }
102120
102121@@ -949,7 +949,7 @@ ip_vs_icmp_xmit_v6(struct sk_buff *skb, struct ip_vs_conn *cp,
102122 else
102123 rc = NF_ACCEPT;
102124 /* do not touch skb anymore */
102125- atomic_inc(&cp->in_pkts);
102126+ atomic_inc_unchecked(&cp->in_pkts);
102127 goto out;
102128 }
102129
102130diff --git a/net/netfilter/nf_conntrack_netlink.c b/net/netfilter/nf_conntrack_netlink.c
102131index d521718..d0fd7a1 100644
102132--- a/net/netfilter/nf_conntrack_netlink.c
102133+++ b/net/netfilter/nf_conntrack_netlink.c
102134@@ -706,7 +706,7 @@ ctnetlink_parse_tuple_proto(struct nlattr *attr,
102135 static int
102136 ctnetlink_parse_tuple(const struct nlattr * const cda[],
102137 struct nf_conntrack_tuple *tuple,
102138- enum ctattr_tuple type, u_int8_t l3num)
102139+ enum ctattr_type type, u_int8_t l3num)
102140 {
102141 struct nlattr *tb[CTA_TUPLE_MAX+1];
102142 int err;
102143diff --git a/net/netfilter/nfnetlink_log.c b/net/netfilter/nfnetlink_log.c
102144index f900dc3..5e45346 100644
102145--- a/net/netfilter/nfnetlink_log.c
102146+++ b/net/netfilter/nfnetlink_log.c
102147@@ -68,7 +68,7 @@ struct nfulnl_instance {
102148 };
102149
102150 static DEFINE_RWLOCK(instances_lock);
102151-static atomic_t global_seq;
102152+static atomic_unchecked_t global_seq;
102153
102154 #define INSTANCE_BUCKETS 16
102155 static struct hlist_head instance_table[INSTANCE_BUCKETS];
102156@@ -493,7 +493,7 @@ __build_packet_message(struct nfulnl_instance *inst,
102157 /* global sequence number */
102158 if (inst->flags & NFULNL_CFG_F_SEQ_GLOBAL)
102159 NLA_PUT_BE32(inst->skb, NFULA_SEQ_GLOBAL,
102160- htonl(atomic_inc_return(&global_seq)));
102161+ htonl(atomic_inc_return_unchecked(&global_seq)));
102162
102163 if (data_len) {
102164 struct nlattr *nla;
102165diff --git a/net/netfilter/xt_gradm.c b/net/netfilter/xt_gradm.c
102166new file mode 100644
102167index 0000000..b1bac76
102168--- /dev/null
102169+++ b/net/netfilter/xt_gradm.c
102170@@ -0,0 +1,51 @@
102171+/*
102172+ * gradm match for netfilter
102173