]> git.ipfire.org Git - thirdparty/grsecurity-scrape.git/blame - test/grsecurity-2.9-2.6.32.59-201203221943.patch
Auto commit, 1 new patch{es}.
[thirdparty/grsecurity-scrape.git] / test / grsecurity-2.9-2.6.32.59-201203221943.patch
CommitLineData
3cba718f
PK
1diff --git a/Documentation/dontdiff b/Documentation/dontdiff
2index e1efc40..4e87324 100644
3--- a/Documentation/dontdiff
4+++ b/Documentation/dontdiff
5@@ -1,15 +1,20 @@
6 *.a
7 *.aux
8 *.bin
9+*.c.[012].*
10+*.cis
11 *.cpio
12 *.csp
13+*.dbg
14 *.dsp
15 *.dvi
16 *.elf
17 *.eps
18 *.fw
19+*.gcno
20 *.gen.S
21 *.gif
22+*.gmo
23 *.grep
24 *.grp
25 *.gz
26@@ -38,8 +43,10 @@
27 *.tab.h
28 *.tex
29 *.ver
30+*.vim
31 *.xml
32 *_MODULES
33+*_reg_safe.h
34 *_vga16.c
35 *~
36 *.9
37@@ -49,11 +56,16 @@
38 53c700_d.h
39 CVS
40 ChangeSet
41+GPATH
42+GRTAGS
43+GSYMS
44+GTAGS
45 Image
46 Kerntypes
47 Module.markers
48 Module.symvers
49 PENDING
50+PERF*
51 SCCS
52 System.map*
53 TAGS
54@@ -76,7 +88,11 @@ btfixupprep
55 build
56 bvmlinux
57 bzImage*
58+capability_names.h
59+capflags.c
60 classlist.h*
61+clut_vga16.c
62+common-cmds.h
63 comp*.log
64 compile.h*
65 conf
66@@ -84,6 +100,8 @@ config
67 config-*
68 config_data.h*
69 config_data.gz*
70+config.c
71+config.tmp
72 conmakehash
73 consolemap_deftbl.c*
74 cpustr.h
75@@ -97,19 +115,23 @@ elfconfig.h*
76 fixdep
77 fore200e_mkfirm
78 fore200e_pca_fw.c*
79+gate.lds
80 gconf
81 gen-devlist
82 gen_crc32table
83 gen_init_cpio
84 genksyms
85 *_gray256.c
86+hash
87+hid-example
88 ihex2fw
89 ikconfig.h*
90 initramfs_data.cpio
91+initramfs_data.cpio.bz2
92 initramfs_data.cpio.gz
93 initramfs_list
94 kallsyms
95-kconfig
96+kern_constants.h
97 keywords.c
98 ksym.c*
99 ksym.h*
100@@ -117,6 +139,7 @@ kxgettext
101 lkc_defs.h
102 lex.c
103 lex.*.c
104+lib1funcs.S
105 logo_*.c
106 logo_*_clut224.c
107 logo_*_mono.c
108@@ -127,13 +150,16 @@ machtypes.h
109 map
110 maui_boot.h
111 mconf
112+mdp
113 miboot*
114 mk_elfconfig
115 mkboot
116 mkbugboot
117 mkcpustr
118 mkdep
119+mkpiggy
120 mkprep
121+mkregtable
122 mktables
123 mktree
124 modpost
125@@ -149,6 +175,7 @@ patches*
126 pca200e.bin
127 pca200e_ecd.bin2
128 piggy.gz
129+piggy.S
130 piggyback
131 pnmtologo
132 ppc_defs.h*
133@@ -157,12 +184,15 @@ qconf
134 raid6altivec*.c
135 raid6int*.c
136 raid6tables.c
137+regdb.c
138 relocs
139+rlim_names.h
140 series
141 setup
142 setup.bin
143 setup.elf
144 sImage
145+slabinfo
146 sm_tbl*
147 split-include
148 syscalltab.h
149@@ -171,6 +201,7 @@ tftpboot.img
150 timeconst.h
151 times.h*
152 trix_boot.h
153+user_constants.h
154 utsrelease.h*
155 vdso-syms.lds
156 vdso.lds
157@@ -186,14 +217,20 @@ version.h*
158 vmlinux
159 vmlinux-*
160 vmlinux.aout
161+vmlinux.bin.all
162+vmlinux.bin.bz2
163 vmlinux.lds
164+vmlinux.relocs
165+voffset.h
166 vsyscall.lds
167 vsyscall_32.lds
168 wanxlfw.inc
169 uImage
170 unifdef
171+utsrelease.h
172 wakeup.bin
173 wakeup.elf
174 wakeup.lds
175 zImage*
176 zconf.hash.c
177+zoffset.h
178diff --git a/Documentation/kernel-parameters.txt b/Documentation/kernel-parameters.txt
179index c840e7d..f4c451c 100644
180--- a/Documentation/kernel-parameters.txt
181+++ b/Documentation/kernel-parameters.txt
182@@ -1837,6 +1837,13 @@ and is between 256 and 4096 characters. It is defined in the file
183 the specified number of seconds. This is to be used if
184 your oopses keep scrolling off the screen.
185
186+ pax_nouderef [X86] disables UDEREF. Most likely needed under certain
187+ virtualization environments that don't cope well with the
188+ expand down segment used by UDEREF on X86-32 or the frequent
189+ page table updates on X86-64.
190+
191+ pax_softmode= 0/1 to disable/enable PaX softmode on boot already.
192+
193 pcbit= [HW,ISDN]
194
195 pcd. [PARIDE]
196diff --git a/MAINTAINERS b/MAINTAINERS
197index 613da5d..4fe3eda 100644
198--- a/MAINTAINERS
199+++ b/MAINTAINERS
200@@ -5725,6 +5725,14 @@ L: netdev@vger.kernel.org
201 S: Maintained
202 F: drivers/net/vmxnet3/
203
204+VMware PVSCSI driver
205+M: Alok Kataria <akataria@vmware.com>
206+M: VMware PV-Drivers <pv-drivers@vmware.com>
207+L: linux-scsi@vger.kernel.org
208+S: Maintained
209+F: drivers/scsi/vmw_pvscsi.c
210+F: drivers/scsi/vmw_pvscsi.h
211+
212 VOLTAGE AND CURRENT REGULATOR FRAMEWORK
213 M: Liam Girdwood <lrg@slimlogic.co.uk>
214 M: Mark Brown <broonie@opensource.wolfsonmicro.com>
215diff --git a/Makefile b/Makefile
216index 3a9a721..e5a22f7 100644
217--- a/Makefile
218+++ b/Makefile
219@@ -221,8 +221,9 @@ CONFIG_SHELL := $(shell if [ -x "$$BASH" ]; then echo $$BASH; \
220
221 HOSTCC = gcc
222 HOSTCXX = g++
223-HOSTCFLAGS = -Wall -Wmissing-prototypes -Wstrict-prototypes -O2 -fomit-frame-pointer
224-HOSTCXXFLAGS = -O2
225+HOSTCFLAGS = -Wall -W -Wmissing-prototypes -Wstrict-prototypes -Wno-unused-parameter -Wno-missing-field-initializers -O2 -fomit-frame-pointer -fno-delete-null-pointer-checks
226+HOSTCFLAGS += $(call cc-option, -Wno-empty-body)
227+HOSTCXXFLAGS = -O2 -Wall -W -fno-delete-null-pointer-checks
228
229 # Decide whether to build built-in, modular, or both.
230 # Normally, just do built-in.
231@@ -376,8 +377,8 @@ export RCS_TAR_IGNORE := --exclude SCCS --exclude BitKeeper --exclude .svn --exc
232 # Rules shared between *config targets and build targets
233
234 # Basic helpers built in scripts/
235-PHONY += scripts_basic
236-scripts_basic:
237+PHONY += scripts_basic gcc-plugins
238+scripts_basic: gcc-plugins
239 $(Q)$(MAKE) $(build)=scripts/basic
240
241 # To avoid any implicit rule to kick in, define an empty command.
242@@ -403,7 +404,7 @@ endif
243 # of make so .config is not included in this case either (for *config).
244
245 no-dot-config-targets := clean mrproper distclean \
246- cscope TAGS tags help %docs check% \
247+ cscope gtags TAGS tags help %docs check% \
248 include/linux/version.h headers_% \
249 kernelrelease kernelversion
250
251@@ -526,6 +527,53 @@ else
252 KBUILD_CFLAGS += -O2
253 endif
254
255+ifndef DISABLE_PAX_PLUGINS
256+ifeq ($(shell $(CONFIG_SHELL) $(srctree)/scripts/gcc-plugin.sh "$(HOSTCC)" "$(CC)"), y)
257+ifndef DISABLE_PAX_CONSTIFY_PLUGIN
258+CONSTIFY_PLUGIN_CFLAGS := -fplugin=$(objtree)/tools/gcc/constify_plugin.so -DCONSTIFY_PLUGIN
259+endif
260+ifdef CONFIG_PAX_MEMORY_STACKLEAK
261+STACKLEAK_PLUGIN_CFLAGS := -fplugin=$(objtree)/tools/gcc/stackleak_plugin.so -DSTACKLEAK_PLUGIN
262+STACKLEAK_PLUGIN_CFLAGS += -fplugin-arg-stackleak_plugin-track-lowest-sp=100
263+endif
264+ifdef CONFIG_KALLOCSTAT_PLUGIN
265+KALLOCSTAT_PLUGIN_CFLAGS := -fplugin=$(objtree)/tools/gcc/kallocstat_plugin.so
266+endif
267+ifdef CONFIG_PAX_KERNEXEC_PLUGIN
268+KERNEXEC_PLUGIN_CFLAGS := -fplugin=$(objtree)/tools/gcc/kernexec_plugin.so
269+KERNEXEC_PLUGIN_CFLAGS += -fplugin-arg-kernexec_plugin-method=$(CONFIG_PAX_KERNEXEC_PLUGIN_METHOD) -DKERNEXEC_PLUGIN
270+KERNEXEC_PLUGIN_AFLAGS := -DKERNEXEC_PLUGIN
271+endif
272+ifdef CONFIG_CHECKER_PLUGIN
273+ifeq ($(call cc-ifversion, -ge, 0406, y), y)
274+CHECKER_PLUGIN_CFLAGS := -fplugin=$(objtree)/tools/gcc/checker_plugin.so -DCHECKER_PLUGIN
275+endif
276+endif
277+COLORIZE_PLUGIN_CFLAGS := -fplugin=$(objtree)/tools/gcc/colorize_plugin.so
278+ifdef CONFIG_PAX_SIZE_OVERFLOW
279+SIZE_OVERFLOW_PLUGIN_CFLAGS := -fplugin=$(objtree)/tools/gcc/size_overflow_plugin.so -DSIZE_OVERFLOW_PLUGIN
280+endif
281+GCC_PLUGINS_CFLAGS := $(CONSTIFY_PLUGIN_CFLAGS) $(STACKLEAK_PLUGIN_CFLAGS) $(KALLOCSTAT_PLUGIN_CFLAGS)
282+GCC_PLUGINS_CFLAGS += $(KERNEXEC_PLUGIN_CFLAGS) $(CHECKER_PLUGIN_CFLAGS) $(COLORIZE_PLUGIN_CFLAGS) $(SIZE_OVERFLOW_PLUGIN_CFLAGS)
283+GCC_PLUGINS_AFLAGS := $(KERNEXEC_PLUGIN_AFLAGS)
284+export CONSTIFY_PLUGIN STACKLEAK_PLUGIN KERNEXEC_PLUGIN CHECKER_PLUGIN SIZE_OVERFLOW_PLUGIN
285+ifeq ($(KBUILD_EXTMOD),)
286+gcc-plugins:
287+ $(Q)$(MAKE) $(build)=tools/gcc
288+else
289+gcc-plugins: ;
290+endif
291+else
292+gcc-plugins:
293+ifeq ($(call cc-ifversion, -ge, 0405, y), y)
294+ $(error Your gcc installation does not support plugins. If the necessary headers for plugin support are missing, they should be installed. On Debian, apt-get install gcc-<ver>-plugin-dev. If you choose to ignore this error and lessen the improvements provided by this patch, re-run make with the DISABLE_PAX_PLUGINS=y argument.))
295+else
296+ $(Q)echo "warning, your gcc version does not support plugins, you should upgrade it to gcc 4.5 at least"
297+endif
298+ $(Q)echo "PAX_MEMORY_STACKLEAK and constification will be less secure"
299+endif
300+endif
301+
302 include $(srctree)/arch/$(SRCARCH)/Makefile
303
304 ifneq ($(CONFIG_FRAME_WARN),0)
305@@ -647,7 +695,7 @@ export mod_strip_cmd
306
307
308 ifeq ($(KBUILD_EXTMOD),)
309-core-y += kernel/ mm/ fs/ ipc/ security/ crypto/ block/
310+core-y += kernel/ mm/ fs/ ipc/ security/ crypto/ block/ grsecurity/
311
312 vmlinux-dirs := $(patsubst %/,%,$(filter %/, $(init-y) $(init-m) \
313 $(core-y) $(core-m) $(drivers-y) $(drivers-m) \
314@@ -868,6 +916,8 @@ vmlinux.o: $(modpost-init) $(vmlinux-main) FORCE
315
316 # The actual objects are generated when descending,
317 # make sure no implicit rule kicks in
318+$(sort $(vmlinux-init) $(vmlinux-main)) $(vmlinux-lds): KBUILD_CFLAGS += $(GCC_PLUGINS_CFLAGS)
319+$(sort $(vmlinux-init) $(vmlinux-main)) $(vmlinux-lds): KBUILD_AFLAGS += $(GCC_PLUGINS_AFLAGS)
320 $(sort $(vmlinux-init) $(vmlinux-main)) $(vmlinux-lds): $(vmlinux-dirs) ;
321
322 # Handle descending into subdirectories listed in $(vmlinux-dirs)
323@@ -877,7 +927,7 @@ $(sort $(vmlinux-init) $(vmlinux-main)) $(vmlinux-lds): $(vmlinux-dirs) ;
324 # Error messages still appears in the original language
325
326 PHONY += $(vmlinux-dirs)
327-$(vmlinux-dirs): prepare scripts
328+$(vmlinux-dirs): gcc-plugins prepare scripts
329 $(Q)$(MAKE) $(build)=$@
330
331 # Build the kernel release string
332@@ -986,6 +1036,7 @@ prepare0: archprepare FORCE
333 $(Q)$(MAKE) $(build)=. missing-syscalls
334
335 # All the preparing..
336+prepare: KBUILD_CFLAGS := $(filter-out $(GCC_PLUGINS_CFLAGS),$(KBUILD_CFLAGS))
337 prepare: prepare0
338
339 # The asm symlink changes when $(ARCH) changes.
340@@ -1127,6 +1178,8 @@ all: modules
341 # using awk while concatenating to the final file.
342
343 PHONY += modules
344+modules: KBUILD_CFLAGS += $(GCC_PLUGINS_CFLAGS)
345+modules: KBUILD_AFLAGS += $(GCC_PLUGINS_AFLAGS)
346 modules: $(vmlinux-dirs) $(if $(KBUILD_BUILTIN),vmlinux)
347 $(Q)$(AWK) '!x[$$0]++' $(vmlinux-dirs:%=$(objtree)/%/modules.order) > $(objtree)/modules.order
348 @$(kecho) ' Building modules, stage 2.';
349@@ -1136,7 +1189,7 @@ modules: $(vmlinux-dirs) $(if $(KBUILD_BUILTIN),vmlinux)
350
351 # Target to prepare building external modules
352 PHONY += modules_prepare
353-modules_prepare: prepare scripts
354+modules_prepare: gcc-plugins prepare scripts
355
356 # Target to install modules
357 PHONY += modules_install
358@@ -1201,7 +1254,7 @@ MRPROPER_FILES += .config .config.old include/asm .version .old_version \
359 include/linux/autoconf.h include/linux/version.h \
360 include/linux/utsrelease.h \
361 include/linux/bounds.h include/asm*/asm-offsets.h \
362- Module.symvers Module.markers tags TAGS cscope*
363+ Module.symvers Module.markers tags TAGS cscope* GPATH GTAGS GRTAGS GSYMS
364
365 # clean - Delete most, but leave enough to build external modules
366 #
367@@ -1245,7 +1298,7 @@ distclean: mrproper
368 @find $(srctree) $(RCS_FIND_IGNORE) \
369 \( -name '*.orig' -o -name '*.rej' -o -name '*~' \
370 -o -name '*.bak' -o -name '#*#' -o -name '.*.orig' \
371- -o -name '.*.rej' -o -size 0 \
372+ -o -name '.*.rej' -o -name '*.so' -o -size 0 \
373 -o -name '*%' -o -name '.*.cmd' -o -name 'core' \) \
374 -type f -print | xargs rm -f
375
376@@ -1292,6 +1345,7 @@ help:
377 @echo ' modules_prepare - Set up for building external modules'
378 @echo ' tags/TAGS - Generate tags file for editors'
379 @echo ' cscope - Generate cscope index'
380+ @echo ' gtags - Generate GNU GLOBAL index'
381 @echo ' kernelrelease - Output the release version string'
382 @echo ' kernelversion - Output the version stored in Makefile'
383 @echo ' headers_install - Install sanitised kernel headers to INSTALL_HDR_PATH'; \
384@@ -1393,6 +1447,8 @@ PHONY += $(module-dirs) modules
385 $(module-dirs): crmodverdir $(objtree)/Module.symvers
386 $(Q)$(MAKE) $(build)=$(patsubst _module_%,%,$@)
387
388+modules: KBUILD_CFLAGS += $(GCC_PLUGINS_CFLAGS)
389+modules: KBUILD_AFLAGS += $(GCC_PLUGINS_AFLAGS)
390 modules: $(module-dirs)
391 @$(kecho) ' Building modules, stage 2.';
392 $(Q)$(MAKE) -f $(srctree)/scripts/Makefile.modpost
393@@ -1448,7 +1504,7 @@ endif # KBUILD_EXTMOD
394 quiet_cmd_tags = GEN $@
395 cmd_tags = $(CONFIG_SHELL) $(srctree)/scripts/tags.sh $@
396
397-tags TAGS cscope: FORCE
398+tags TAGS cscope gtags: FORCE
399 $(call cmd,tags)
400
401 # Scripts to check various things for consistency
402@@ -1513,17 +1569,21 @@ else
403 target-dir = $(if $(KBUILD_EXTMOD),$(dir $<),$(dir $@))
404 endif
405
406-%.s: %.c prepare scripts FORCE
407+%.s: KBUILD_CFLAGS += $(GCC_PLUGINS_CFLAGS)
408+%.s: KBUILD_AFLAGS += $(GCC_PLUGINS_AFLAGS)
409+%.s: %.c gcc-plugins prepare scripts FORCE
410 $(Q)$(MAKE) $(build)=$(build-dir) $(target-dir)$(notdir $@)
411 %.i: %.c prepare scripts FORCE
412 $(Q)$(MAKE) $(build)=$(build-dir) $(target-dir)$(notdir $@)
413-%.o: %.c prepare scripts FORCE
414+%.o: KBUILD_CFLAGS += $(GCC_PLUGINS_CFLAGS)
415+%.o: KBUILD_AFLAGS += $(GCC_PLUGINS_AFLAGS)
416+%.o: %.c gcc-plugins prepare scripts FORCE
417 $(Q)$(MAKE) $(build)=$(build-dir) $(target-dir)$(notdir $@)
418 %.lst: %.c prepare scripts FORCE
419 $(Q)$(MAKE) $(build)=$(build-dir) $(target-dir)$(notdir $@)
420-%.s: %.S prepare scripts FORCE
421+%.s: %.S gcc-plugins prepare scripts FORCE
422 $(Q)$(MAKE) $(build)=$(build-dir) $(target-dir)$(notdir $@)
423-%.o: %.S prepare scripts FORCE
424+%.o: %.S gcc-plugins prepare scripts FORCE
425 $(Q)$(MAKE) $(build)=$(build-dir) $(target-dir)$(notdir $@)
426 %.symtypes: %.c prepare scripts FORCE
427 $(Q)$(MAKE) $(build)=$(build-dir) $(target-dir)$(notdir $@)
428@@ -1533,11 +1593,15 @@ endif
429 $(cmd_crmodverdir)
430 $(Q)$(MAKE) KBUILD_MODULES=$(if $(CONFIG_MODULES),1) \
431 $(build)=$(build-dir)
432-%/: prepare scripts FORCE
433+%/: KBUILD_CFLAGS += $(GCC_PLUGINS_CFLAGS)
434+%/: KBUILD_AFLAGS += $(GCC_PLUGINS_AFLAGS)
435+%/: gcc-plugins prepare scripts FORCE
436 $(cmd_crmodverdir)
437 $(Q)$(MAKE) KBUILD_MODULES=$(if $(CONFIG_MODULES),1) \
438 $(build)=$(build-dir)
439-%.ko: prepare scripts FORCE
440+%.ko: KBUILD_CFLAGS += $(GCC_PLUGINS_CFLAGS)
441+%.ko: KBUILD_AFLAGS += $(GCC_PLUGINS_AFLAGS)
442+%.ko: gcc-plugins prepare scripts FORCE
443 $(cmd_crmodverdir)
444 $(Q)$(MAKE) KBUILD_MODULES=$(if $(CONFIG_MODULES),1) \
445 $(build)=$(build-dir) $(@:.ko=.o)
446diff --git a/arch/alpha/include/asm/atomic.h b/arch/alpha/include/asm/atomic.h
447index 610dff4..f396854 100644
448--- a/arch/alpha/include/asm/atomic.h
449+++ b/arch/alpha/include/asm/atomic.h
450@@ -251,6 +251,16 @@ static __inline__ int atomic64_add_unless(atomic64_t *v, long a, long u)
451 #define atomic_dec(v) atomic_sub(1,(v))
452 #define atomic64_dec(v) atomic64_sub(1,(v))
453
454+#define atomic64_read_unchecked(v) atomic64_read(v)
455+#define atomic64_set_unchecked(v, i) atomic64_set((v), (i))
456+#define atomic64_add_unchecked(a, v) atomic64_add((a), (v))
457+#define atomic64_add_return_unchecked(a, v) atomic64_add_return((a), (v))
458+#define atomic64_sub_unchecked(a, v) atomic64_sub((a), (v))
459+#define atomic64_inc_unchecked(v) atomic64_inc(v)
460+#define atomic64_inc_return_unchecked(v) atomic64_inc_return(v)
461+#define atomic64_dec_unchecked(v) atomic64_dec(v)
462+#define atomic64_cmpxchg_unchecked(v, o, n) atomic64_cmpxchg((v), (o), (n))
463+
464 #define smp_mb__before_atomic_dec() smp_mb()
465 #define smp_mb__after_atomic_dec() smp_mb()
466 #define smp_mb__before_atomic_inc() smp_mb()
467diff --git a/arch/alpha/include/asm/cache.h b/arch/alpha/include/asm/cache.h
468index f199e69..af005f5 100644
469--- a/arch/alpha/include/asm/cache.h
470+++ b/arch/alpha/include/asm/cache.h
471@@ -4,19 +4,20 @@
472 #ifndef __ARCH_ALPHA_CACHE_H
473 #define __ARCH_ALPHA_CACHE_H
474
475+#include <linux/const.h>
476
477 /* Bytes per L1 (data) cache line. */
478 #if defined(CONFIG_ALPHA_GENERIC) || defined(CONFIG_ALPHA_EV6)
479-# define L1_CACHE_BYTES 64
480 # define L1_CACHE_SHIFT 6
481 #else
482 /* Both EV4 and EV5 are write-through, read-allocate,
483 direct-mapped, physical.
484 */
485-# define L1_CACHE_BYTES 32
486 # define L1_CACHE_SHIFT 5
487 #endif
488
489+#define L1_CACHE_BYTES (_AC(1,UL) << L1_CACHE_SHIFT)
490+
491 #define L1_CACHE_ALIGN(x) (((x)+(L1_CACHE_BYTES-1))&~(L1_CACHE_BYTES-1))
492 #define SMP_CACHE_BYTES L1_CACHE_BYTES
493
494diff --git a/arch/alpha/include/asm/elf.h b/arch/alpha/include/asm/elf.h
495index 5c75c1b..c82f878 100644
496--- a/arch/alpha/include/asm/elf.h
497+++ b/arch/alpha/include/asm/elf.h
498@@ -91,6 +91,13 @@ typedef elf_fpreg_t elf_fpregset_t[ELF_NFPREG];
499
500 #define ELF_ET_DYN_BASE (TASK_UNMAPPED_BASE + 0x1000000)
501
502+#ifdef CONFIG_PAX_ASLR
503+#define PAX_ELF_ET_DYN_BASE (current->personality & ADDR_LIMIT_32BIT ? 0x10000 : 0x120000000UL)
504+
505+#define PAX_DELTA_MMAP_LEN (current->personality & ADDR_LIMIT_32BIT ? 14 : 28)
506+#define PAX_DELTA_STACK_LEN (current->personality & ADDR_LIMIT_32BIT ? 14 : 19)
507+#endif
508+
509 /* $0 is set by ld.so to a pointer to a function which might be
510 registered using atexit. This provides a mean for the dynamic
511 linker to call DT_FINI functions for shared libraries that have
512diff --git a/arch/alpha/include/asm/pgtable.h b/arch/alpha/include/asm/pgtable.h
513index 3f0c59f..cf1e100 100644
514--- a/arch/alpha/include/asm/pgtable.h
515+++ b/arch/alpha/include/asm/pgtable.h
516@@ -101,6 +101,17 @@ struct vm_area_struct;
517 #define PAGE_SHARED __pgprot(_PAGE_VALID | __ACCESS_BITS)
518 #define PAGE_COPY __pgprot(_PAGE_VALID | __ACCESS_BITS | _PAGE_FOW)
519 #define PAGE_READONLY __pgprot(_PAGE_VALID | __ACCESS_BITS | _PAGE_FOW)
520+
521+#ifdef CONFIG_PAX_PAGEEXEC
522+# define PAGE_SHARED_NOEXEC __pgprot(_PAGE_VALID | __ACCESS_BITS | _PAGE_FOE)
523+# define PAGE_COPY_NOEXEC __pgprot(_PAGE_VALID | __ACCESS_BITS | _PAGE_FOW | _PAGE_FOE)
524+# define PAGE_READONLY_NOEXEC __pgprot(_PAGE_VALID | __ACCESS_BITS | _PAGE_FOW | _PAGE_FOE)
525+#else
526+# define PAGE_SHARED_NOEXEC PAGE_SHARED
527+# define PAGE_COPY_NOEXEC PAGE_COPY
528+# define PAGE_READONLY_NOEXEC PAGE_READONLY
529+#endif
530+
531 #define PAGE_KERNEL __pgprot(_PAGE_VALID | _PAGE_ASM | _PAGE_KRE | _PAGE_KWE)
532
533 #define _PAGE_NORMAL(x) __pgprot(_PAGE_VALID | __ACCESS_BITS | (x))
534diff --git a/arch/alpha/kernel/module.c b/arch/alpha/kernel/module.c
535index ebc3c89..20cfa63 100644
536--- a/arch/alpha/kernel/module.c
537+++ b/arch/alpha/kernel/module.c
538@@ -182,7 +182,7 @@ apply_relocate_add(Elf64_Shdr *sechdrs, const char *strtab,
539
540 /* The small sections were sorted to the end of the segment.
541 The following should definitely cover them. */
542- gp = (u64)me->module_core + me->core_size - 0x8000;
543+ gp = (u64)me->module_core_rw + me->core_size_rw - 0x8000;
544 got = sechdrs[me->arch.gotsecindex].sh_addr;
545
546 for (i = 0; i < n; i++) {
547diff --git a/arch/alpha/kernel/osf_sys.c b/arch/alpha/kernel/osf_sys.c
548index a94e49c..d71dd44 100644
549--- a/arch/alpha/kernel/osf_sys.c
550+++ b/arch/alpha/kernel/osf_sys.c
551@@ -1172,7 +1172,7 @@ arch_get_unmapped_area_1(unsigned long addr, unsigned long len,
552 /* At this point: (!vma || addr < vma->vm_end). */
553 if (limit - len < addr)
554 return -ENOMEM;
555- if (!vma || addr + len <= vma->vm_start)
556+ if (check_heap_stack_gap(vma, addr, len))
557 return addr;
558 addr = vma->vm_end;
559 vma = vma->vm_next;
560@@ -1208,6 +1208,10 @@ arch_get_unmapped_area(struct file *filp, unsigned long addr,
561 merely specific addresses, but regions of memory -- perhaps
562 this feature should be incorporated into all ports? */
563
564+#ifdef CONFIG_PAX_RANDMMAP
565+ if (!(current->mm->pax_flags & MF_PAX_RANDMMAP))
566+#endif
567+
568 if (addr) {
569 addr = arch_get_unmapped_area_1 (PAGE_ALIGN(addr), len, limit);
570 if (addr != (unsigned long) -ENOMEM)
571@@ -1215,8 +1219,8 @@ arch_get_unmapped_area(struct file *filp, unsigned long addr,
572 }
573
574 /* Next, try allocating at TASK_UNMAPPED_BASE. */
575- addr = arch_get_unmapped_area_1 (PAGE_ALIGN(TASK_UNMAPPED_BASE),
576- len, limit);
577+ addr = arch_get_unmapped_area_1 (PAGE_ALIGN(current->mm->mmap_base), len, limit);
578+
579 if (addr != (unsigned long) -ENOMEM)
580 return addr;
581
582diff --git a/arch/alpha/mm/fault.c b/arch/alpha/mm/fault.c
583index 00a31de..2ded0f2 100644
584--- a/arch/alpha/mm/fault.c
585+++ b/arch/alpha/mm/fault.c
586@@ -54,6 +54,124 @@ __load_new_mm_context(struct mm_struct *next_mm)
587 __reload_thread(pcb);
588 }
589
590+#ifdef CONFIG_PAX_PAGEEXEC
591+/*
592+ * PaX: decide what to do with offenders (regs->pc = fault address)
593+ *
594+ * returns 1 when task should be killed
595+ * 2 when patched PLT trampoline was detected
596+ * 3 when unpatched PLT trampoline was detected
597+ */
598+static int pax_handle_fetch_fault(struct pt_regs *regs)
599+{
600+
601+#ifdef CONFIG_PAX_EMUPLT
602+ int err;
603+
604+ do { /* PaX: patched PLT emulation #1 */
605+ unsigned int ldah, ldq, jmp;
606+
607+ err = get_user(ldah, (unsigned int *)regs->pc);
608+ err |= get_user(ldq, (unsigned int *)(regs->pc+4));
609+ err |= get_user(jmp, (unsigned int *)(regs->pc+8));
610+
611+ if (err)
612+ break;
613+
614+ if ((ldah & 0xFFFF0000U) == 0x277B0000U &&
615+ (ldq & 0xFFFF0000U) == 0xA77B0000U &&
616+ jmp == 0x6BFB0000U)
617+ {
618+ unsigned long r27, addr;
619+ unsigned long addrh = (ldah | 0xFFFFFFFFFFFF0000UL) << 16;
620+ unsigned long addrl = ldq | 0xFFFFFFFFFFFF0000UL;
621+
622+ addr = regs->r27 + ((addrh ^ 0x80000000UL) + 0x80000000UL) + ((addrl ^ 0x8000UL) + 0x8000UL);
623+ err = get_user(r27, (unsigned long *)addr);
624+ if (err)
625+ break;
626+
627+ regs->r27 = r27;
628+ regs->pc = r27;
629+ return 2;
630+ }
631+ } while (0);
632+
633+ do { /* PaX: patched PLT emulation #2 */
634+ unsigned int ldah, lda, br;
635+
636+ err = get_user(ldah, (unsigned int *)regs->pc);
637+ err |= get_user(lda, (unsigned int *)(regs->pc+4));
638+ err |= get_user(br, (unsigned int *)(regs->pc+8));
639+
640+ if (err)
641+ break;
642+
643+ if ((ldah & 0xFFFF0000U) == 0x277B0000U &&
644+ (lda & 0xFFFF0000U) == 0xA77B0000U &&
645+ (br & 0xFFE00000U) == 0xC3E00000U)
646+ {
647+ unsigned long addr = br | 0xFFFFFFFFFFE00000UL;
648+ unsigned long addrh = (ldah | 0xFFFFFFFFFFFF0000UL) << 16;
649+ unsigned long addrl = lda | 0xFFFFFFFFFFFF0000UL;
650+
651+ regs->r27 += ((addrh ^ 0x80000000UL) + 0x80000000UL) + ((addrl ^ 0x8000UL) + 0x8000UL);
652+ regs->pc += 12 + (((addr ^ 0x00100000UL) + 0x00100000UL) << 2);
653+ return 2;
654+ }
655+ } while (0);
656+
657+ do { /* PaX: unpatched PLT emulation */
658+ unsigned int br;
659+
660+ err = get_user(br, (unsigned int *)regs->pc);
661+
662+ if (!err && (br & 0xFFE00000U) == 0xC3800000U) {
663+ unsigned int br2, ldq, nop, jmp;
664+ unsigned long addr = br | 0xFFFFFFFFFFE00000UL, resolver;
665+
666+ addr = regs->pc + 4 + (((addr ^ 0x00100000UL) + 0x00100000UL) << 2);
667+ err = get_user(br2, (unsigned int *)addr);
668+ err |= get_user(ldq, (unsigned int *)(addr+4));
669+ err |= get_user(nop, (unsigned int *)(addr+8));
670+ err |= get_user(jmp, (unsigned int *)(addr+12));
671+ err |= get_user(resolver, (unsigned long *)(addr+16));
672+
673+ if (err)
674+ break;
675+
676+ if (br2 == 0xC3600000U &&
677+ ldq == 0xA77B000CU &&
678+ nop == 0x47FF041FU &&
679+ jmp == 0x6B7B0000U)
680+ {
681+ regs->r28 = regs->pc+4;
682+ regs->r27 = addr+16;
683+ regs->pc = resolver;
684+ return 3;
685+ }
686+ }
687+ } while (0);
688+#endif
689+
690+ return 1;
691+}
692+
693+void pax_report_insns(struct pt_regs *regs, void *pc, void *sp)
694+{
695+ unsigned long i;
696+
697+ printk(KERN_ERR "PAX: bytes at PC: ");
698+ for (i = 0; i < 5; i++) {
699+ unsigned int c;
700+ if (get_user(c, (unsigned int *)pc+i))
701+ printk(KERN_CONT "???????? ");
702+ else
703+ printk(KERN_CONT "%08x ", c);
704+ }
705+ printk("\n");
706+}
707+#endif
708
709 /*
710 * This routine handles page faults. It determines the address,
711@@ -131,8 +249,29 @@ do_page_fault(unsigned long address, unsigned long mmcsr,
712 good_area:
713 si_code = SEGV_ACCERR;
714 if (cause < 0) {
715- if (!(vma->vm_flags & VM_EXEC))
716+ if (!(vma->vm_flags & VM_EXEC)) {
717+
718+#ifdef CONFIG_PAX_PAGEEXEC
719+ if (!(mm->pax_flags & MF_PAX_PAGEEXEC) || address != regs->pc)
720+ goto bad_area;
721+
722+ up_read(&mm->mmap_sem);
723+ switch (pax_handle_fetch_fault(regs)) {
724+
725+#ifdef CONFIG_PAX_EMUPLT
726+ case 2:
727+ case 3:
728+ return;
729+#endif
730+
731+ }
732+ pax_report_fault(regs, (void *)regs->pc, (void *)rdusp());
733+ do_group_exit(SIGKILL);
734+#else
735 goto bad_area;
736+#endif
737+
738+ }
739 } else if (!cause) {
740 /* Allow reads even for write-only mappings */
741 if (!(vma->vm_flags & (VM_READ | VM_WRITE)))
742diff --git a/arch/arm/Kconfig b/arch/arm/Kconfig
743index b68faef..6dd1496 100644
744--- a/arch/arm/Kconfig
745+++ b/arch/arm/Kconfig
746@@ -14,6 +14,7 @@ config ARM
747 select SYS_SUPPORTS_APM_EMULATION
748 select HAVE_OPROFILE
749 select HAVE_ARCH_KGDB
750+ select GENERIC_ATOMIC64
751 select HAVE_KPROBES if (!XIP_KERNEL)
752 select HAVE_KRETPROBES if (HAVE_KPROBES)
753 select HAVE_FUNCTION_TRACER if (!XIP_KERNEL)
754diff --git a/arch/arm/include/asm/atomic.h b/arch/arm/include/asm/atomic.h
755index d0daeab..ca7e10e 100644
756--- a/arch/arm/include/asm/atomic.h
757+++ b/arch/arm/include/asm/atomic.h
758@@ -15,6 +15,10 @@
759 #include <linux/types.h>
760 #include <asm/system.h>
761
762+#ifdef CONFIG_GENERIC_ATOMIC64
763+#include <asm-generic/atomic64.h>
764+#endif
765+
766 #define ATOMIC_INIT(i) { (i) }
767
768 #ifdef __KERNEL__
769@@ -24,8 +28,16 @@
770 * strex/ldrex monitor on some implementations. The reason we can use it for
771 * atomic_set() is the clrex or dummy strex done on every exception return.
772 */
773-#define atomic_read(v) ((v)->counter)
774+#define atomic_read(v) (*(volatile int *)&(v)->counter)
775+static inline int atomic_read_unchecked(const atomic_unchecked_t *v)
776+{
777+ return v->counter;
778+}
779 #define atomic_set(v,i) (((v)->counter) = (i))
780+static inline void atomic_set_unchecked(atomic_unchecked_t *v, int i)
781+{
782+ v->counter = i;
783+}
784
785 #if __LINUX_ARM_ARCH__ >= 6
786
787@@ -40,6 +52,35 @@ static inline void atomic_add(int i, atomic_t *v)
788 int result;
789
790 __asm__ __volatile__("@ atomic_add\n"
791+"1: ldrex %1, [%2]\n"
792+" add %0, %1, %3\n"
793+
794+#ifdef CONFIG_PAX_REFCOUNT
795+" bvc 3f\n"
796+"2: bkpt 0xf103\n"
797+"3:\n"
798+#endif
799+
800+" strex %1, %0, [%2]\n"
801+" teq %1, #0\n"
802+" bne 1b"
803+
804+#ifdef CONFIG_PAX_REFCOUNT
805+"\n4:\n"
806+ _ASM_EXTABLE(2b, 4b)
807+#endif
808+
809+ : "=&r" (result), "=&r" (tmp)
810+ : "r" (&v->counter), "Ir" (i)
811+ : "cc");
812+}
813+
814+static inline void atomic_add_unchecked(int i, atomic_unchecked_t *v)
815+{
816+ unsigned long tmp;
817+ int result;
818+
819+ __asm__ __volatile__("@ atomic_add_unchecked\n"
820 "1: ldrex %0, [%2]\n"
821 " add %0, %0, %3\n"
822 " strex %1, %0, [%2]\n"
823@@ -58,6 +99,42 @@ static inline int atomic_add_return(int i, atomic_t *v)
824 smp_mb();
825
826 __asm__ __volatile__("@ atomic_add_return\n"
827+"1: ldrex %1, [%2]\n"
828+" add %0, %1, %3\n"
829+
830+#ifdef CONFIG_PAX_REFCOUNT
831+" bvc 3f\n"
832+" mov %0, %1\n"
833+"2: bkpt 0xf103\n"
834+"3:\n"
835+#endif
836+
837+" strex %1, %0, [%2]\n"
838+" teq %1, #0\n"
839+" bne 1b"
840+
841+#ifdef CONFIG_PAX_REFCOUNT
842+"\n4:\n"
843+ _ASM_EXTABLE(2b, 4b)
844+#endif
845+
846+ : "=&r" (result), "=&r" (tmp)
847+ : "r" (&v->counter), "Ir" (i)
848+ : "cc");
849+
850+ smp_mb();
851+
852+ return result;
853+}
854+
855+static inline int atomic_add_return_unchecked(int i, atomic_unchecked_t *v)
856+{
857+ unsigned long tmp;
858+ int result;
859+
860+ smp_mb();
861+
862+ __asm__ __volatile__("@ atomic_add_return_unchecked\n"
863 "1: ldrex %0, [%2]\n"
864 " add %0, %0, %3\n"
865 " strex %1, %0, [%2]\n"
866@@ -78,6 +155,35 @@ static inline void atomic_sub(int i, atomic_t *v)
867 int result;
868
869 __asm__ __volatile__("@ atomic_sub\n"
870+"1: ldrex %1, [%2]\n"
871+" sub %0, %1, %3\n"
872+
873+#ifdef CONFIG_PAX_REFCOUNT
874+" bvc 3f\n"
875+"2: bkpt 0xf103\n"
876+"3:\n"
877+#endif
878+
879+" strex %1, %0, [%2]\n"
880+" teq %1, #0\n"
881+" bne 1b"
882+
883+#ifdef CONFIG_PAX_REFCOUNT
884+"\n4:\n"
885+ _ASM_EXTABLE(2b, 4b)
886+#endif
887+
888+ : "=&r" (result), "=&r" (tmp)
889+ : "r" (&v->counter), "Ir" (i)
890+ : "cc");
891+}
892+
893+static inline void atomic_sub_unchecked(int i, atomic_unchecked_t *v)
894+{
895+ unsigned long tmp;
896+ int result;
897+
898+ __asm__ __volatile__("@ atomic_sub_unchecked\n"
899 "1: ldrex %0, [%2]\n"
900 " sub %0, %0, %3\n"
901 " strex %1, %0, [%2]\n"
902@@ -96,11 +202,25 @@ static inline int atomic_sub_return(int i, atomic_t *v)
903 smp_mb();
904
905 __asm__ __volatile__("@ atomic_sub_return\n"
906-"1: ldrex %0, [%2]\n"
907-" sub %0, %0, %3\n"
908+"1: ldrex %1, [%2]\n"
909+" sub %0, %1, %3\n"
910+
911+#ifdef CONFIG_PAX_REFCOUNT
912+" bvc 3f\n"
913+" mov %0, %1\n"
914+"2: bkpt 0xf103\n"
915+"3:\n"
916+#endif
917+
918 " strex %1, %0, [%2]\n"
919 " teq %1, #0\n"
920 " bne 1b"
921+
922+#ifdef CONFIG_PAX_REFCOUNT
923+"\n4:\n"
924+ _ASM_EXTABLE(2b, 4b)
925+#endif
926+
927 : "=&r" (result), "=&r" (tmp)
928 : "r" (&v->counter), "Ir" (i)
929 : "cc");
930@@ -132,6 +252,28 @@ static inline int atomic_cmpxchg(atomic_t *ptr, int old, int new)
931 return oldval;
932 }
933
934+static inline int atomic_cmpxchg_unchecked(atomic_unchecked_t *ptr, int old, int new)
935+{
936+ unsigned long oldval, res;
937+
938+ smp_mb();
939+
940+ do {
941+ __asm__ __volatile__("@ atomic_cmpxchg_unchecked\n"
942+ "ldrex %1, [%2]\n"
943+ "mov %0, #0\n"
944+ "teq %1, %3\n"
945+ "strexeq %0, %4, [%2]\n"
946+ : "=&r" (res), "=&r" (oldval)
947+ : "r" (&ptr->counter), "Ir" (old), "r" (new)
948+ : "cc");
949+ } while (res);
950+
951+ smp_mb();
952+
953+ return oldval;
954+}
955+
956 static inline void atomic_clear_mask(unsigned long mask, unsigned long *addr)
957 {
958 unsigned long tmp, tmp2;
959@@ -207,6 +349,10 @@ static inline void atomic_clear_mask(unsigned long mask, unsigned long *addr)
960 #endif /* __LINUX_ARM_ARCH__ */
961
962 #define atomic_xchg(v, new) (xchg(&((v)->counter), new))
963+static inline int atomic_xchg_unchecked(atomic_unchecked_t *v, int new)
964+{
965+ return xchg(&v->counter, new);
966+}
967
968 static inline int atomic_add_unless(atomic_t *v, int a, int u)
969 {
970@@ -220,11 +366,27 @@ static inline int atomic_add_unless(atomic_t *v, int a, int u)
971 #define atomic_inc_not_zero(v) atomic_add_unless((v), 1, 0)
972
973 #define atomic_inc(v) atomic_add(1, v)
974+static inline void atomic_inc_unchecked(atomic_unchecked_t *v)
975+{
976+ atomic_add_unchecked(1, v);
977+}
978 #define atomic_dec(v) atomic_sub(1, v)
979+static inline void atomic_dec_unchecked(atomic_unchecked_t *v)
980+{
981+ atomic_sub_unchecked(1, v);
982+}
983
984 #define atomic_inc_and_test(v) (atomic_add_return(1, v) == 0)
985+static inline int atomic_inc_and_test_unchecked(atomic_unchecked_t *v)
986+{
987+ return atomic_add_return_unchecked(1, v) == 0;
988+}
989 #define atomic_dec_and_test(v) (atomic_sub_return(1, v) == 0)
990 #define atomic_inc_return(v) (atomic_add_return(1, v))
991+static inline int atomic_inc_return_unchecked(atomic_unchecked_t *v)
992+{
993+ return atomic_add_return_unchecked(1, v);
994+}
995 #define atomic_dec_return(v) (atomic_sub_return(1, v))
996 #define atomic_sub_and_test(i, v) (atomic_sub_return(i, v) == 0)
997
998diff --git a/arch/arm/include/asm/cache.h b/arch/arm/include/asm/cache.h
999index 66c160b..bca1449 100644
1000--- a/arch/arm/include/asm/cache.h
1001+++ b/arch/arm/include/asm/cache.h
1002@@ -4,8 +4,10 @@
1003 #ifndef __ASMARM_CACHE_H
1004 #define __ASMARM_CACHE_H
1005
1006+#include <linux/const.h>
1007+
1008 #define L1_CACHE_SHIFT CONFIG_ARM_L1_CACHE_SHIFT
1009-#define L1_CACHE_BYTES (1 << L1_CACHE_SHIFT)
1010+#define L1_CACHE_BYTES (_AC(1,UL) << L1_CACHE_SHIFT)
1011
1012 /*
1013 * Memory returned by kmalloc() may be used for DMA, so we must make
1014diff --git a/arch/arm/include/asm/cacheflush.h b/arch/arm/include/asm/cacheflush.h
1015index 3d0cdd2..19957c5 100644
1016--- a/arch/arm/include/asm/cacheflush.h
1017+++ b/arch/arm/include/asm/cacheflush.h
1018@@ -216,13 +216,13 @@ struct cpu_cache_fns {
1019 void (*dma_inv_range)(const void *, const void *);
1020 void (*dma_clean_range)(const void *, const void *);
1021 void (*dma_flush_range)(const void *, const void *);
1022-};
1023+} __no_const;
1024
1025 struct outer_cache_fns {
1026 void (*inv_range)(unsigned long, unsigned long);
1027 void (*clean_range)(unsigned long, unsigned long);
1028 void (*flush_range)(unsigned long, unsigned long);
1029-};
1030+} __no_const;
1031
1032 /*
1033 * Select the calling method
1034diff --git a/arch/arm/include/asm/elf.h b/arch/arm/include/asm/elf.h
1035index 6aac3f5..265536b 100644
1036--- a/arch/arm/include/asm/elf.h
1037+++ b/arch/arm/include/asm/elf.h
1038@@ -109,7 +109,14 @@ int dump_task_regs(struct task_struct *t, elf_gregset_t *elfregs);
1039 the loader. We need to make sure that it is out of the way of the program
1040 that it will "exec", and that there is sufficient room for the brk. */
1041
1042-#define ELF_ET_DYN_BASE (2 * TASK_SIZE / 3)
1043+#define ELF_ET_DYN_BASE (TASK_SIZE / 3 * 2)
1044+
1045+#ifdef CONFIG_PAX_ASLR
1046+#define PAX_ELF_ET_DYN_BASE 0x00008000UL
1047+
1048+#define PAX_DELTA_MMAP_LEN ((current->personality == PER_LINUX_32BIT) ? 16 : 10)
1049+#define PAX_DELTA_STACK_LEN ((current->personality == PER_LINUX_32BIT) ? 16 : 10)
1050+#endif
1051
1052 /* When the program starts, a1 contains a pointer to a function to be
1053 registered with atexit, as per the SVR4 ABI. A value of 0 means we
1054diff --git a/arch/arm/include/asm/kmap_types.h b/arch/arm/include/asm/kmap_types.h
1055index c019949..388fdd1 100644
1056--- a/arch/arm/include/asm/kmap_types.h
1057+++ b/arch/arm/include/asm/kmap_types.h
1058@@ -19,6 +19,7 @@ enum km_type {
1059 KM_SOFTIRQ0,
1060 KM_SOFTIRQ1,
1061 KM_L2_CACHE,
1062+ KM_CLEARPAGE,
1063 KM_TYPE_NR
1064 };
1065
1066diff --git a/arch/arm/include/asm/page.h b/arch/arm/include/asm/page.h
1067index 3a32af4..c8def8a 100644
1068--- a/arch/arm/include/asm/page.h
1069+++ b/arch/arm/include/asm/page.h
1070@@ -122,7 +122,7 @@ struct cpu_user_fns {
1071 void (*cpu_clear_user_highpage)(struct page *page, unsigned long vaddr);
1072 void (*cpu_copy_user_highpage)(struct page *to, struct page *from,
1073 unsigned long vaddr);
1074-};
1075+} __no_const;
1076
1077 #ifdef MULTI_USER
1078 extern struct cpu_user_fns cpu_user;
1079diff --git a/arch/arm/include/asm/system.h b/arch/arm/include/asm/system.h
1080index d65b2f5..9d87555 100644
1081--- a/arch/arm/include/asm/system.h
1082+++ b/arch/arm/include/asm/system.h
1083@@ -86,6 +86,8 @@ void hook_fault_code(int nr, int (*fn)(unsigned long, unsigned int,
1084
1085 #define xchg(ptr,x) \
1086 ((__typeof__(*(ptr)))__xchg((unsigned long)(x),(ptr),sizeof(*(ptr))))
1087+#define xchg_unchecked(ptr,x) \
1088+ ((__typeof__(*(ptr)))__xchg((unsigned long)(x),(ptr),sizeof(*(ptr))))
1089
1090 extern asmlinkage void __backtrace(void);
1091 extern asmlinkage void c_backtrace(unsigned long fp, int pmode);
1092@@ -98,7 +100,7 @@ extern int cpu_architecture(void);
1093 extern void cpu_init(void);
1094
1095 void arm_machine_restart(char mode, const char *cmd);
1096-extern void (*arm_pm_restart)(char str, const char *cmd);
1097+extern void (*arm_pm_restart)(char str, const char *cmd) __noreturn;
1098
1099 #define UDBG_UNDEFINED (1 << 0)
1100 #define UDBG_SYSCALL (1 << 1)
1101@@ -505,6 +507,13 @@ static inline unsigned long long __cmpxchg64_mb(volatile void *ptr,
1102
1103 #endif /* __LINUX_ARM_ARCH__ >= 6 */
1104
1105+#define _ASM_EXTABLE(from, to) \
1106+" .pushsection __ex_table,\"a\"\n"\
1107+" .align 3\n" \
1108+" .long " #from ", " #to"\n" \
1109+" .popsection"
1110+
1111+
1112 #endif /* __ASSEMBLY__ */
1113
1114 #define arch_align_stack(x) (x)
1115diff --git a/arch/arm/include/asm/uaccess.h b/arch/arm/include/asm/uaccess.h
1116index 1d6bd40..fba0cb9 100644
1117--- a/arch/arm/include/asm/uaccess.h
1118+++ b/arch/arm/include/asm/uaccess.h
1119@@ -22,6 +22,8 @@
1120 #define VERIFY_READ 0
1121 #define VERIFY_WRITE 1
1122
1123+extern void check_object_size(const void *ptr, unsigned long n, bool to);
1124+
1125 /*
1126 * The exception table consists of pairs of addresses: the first is the
1127 * address of an instruction that is allowed to fault, and the second is
1128@@ -387,8 +389,23 @@ do { \
1129
1130
1131 #ifdef CONFIG_MMU
1132-extern unsigned long __must_check __copy_from_user(void *to, const void __user *from, unsigned long n);
1133-extern unsigned long __must_check __copy_to_user(void __user *to, const void *from, unsigned long n);
1134+extern unsigned long __must_check ___copy_from_user(void *to, const void __user *from, unsigned long n);
1135+extern unsigned long __must_check ___copy_to_user(void __user *to, const void *from, unsigned long n);
1136+
1137+static inline unsigned long __must_check __copy_from_user(void *to, const void __user *from, unsigned long n)
1138+{
1139+ if (!__builtin_constant_p(n))
1140+ check_object_size(to, n, false);
1141+ return ___copy_from_user(to, from, n);
1142+}
1143+
1144+static inline unsigned long __must_check __copy_to_user(void __user *to, const void *from, unsigned long n)
1145+{
1146+ if (!__builtin_constant_p(n))
1147+ check_object_size(from, n, true);
1148+ return ___copy_to_user(to, from, n);
1149+}
1150+
1151 extern unsigned long __must_check __copy_to_user_std(void __user *to, const void *from, unsigned long n);
1152 extern unsigned long __must_check __clear_user(void __user *addr, unsigned long n);
1153 extern unsigned long __must_check __clear_user_std(void __user *addr, unsigned long n);
1154@@ -403,6 +420,9 @@ extern unsigned long __must_check __strnlen_user(const char __user *s, long n);
1155
1156 static inline unsigned long __must_check copy_from_user(void *to, const void __user *from, unsigned long n)
1157 {
1158+ if ((long)n < 0)
1159+ return n;
1160+
1161 if (access_ok(VERIFY_READ, from, n))
1162 n = __copy_from_user(to, from, n);
1163 else /* security hole - plug it */
1164@@ -412,6 +432,9 @@ static inline unsigned long __must_check copy_from_user(void *to, const void __u
1165
1166 static inline unsigned long __must_check copy_to_user(void __user *to, const void *from, unsigned long n)
1167 {
1168+ if ((long)n < 0)
1169+ return n;
1170+
1171 if (access_ok(VERIFY_WRITE, to, n))
1172 n = __copy_to_user(to, from, n);
1173 return n;
1174diff --git a/arch/arm/kernel/armksyms.c b/arch/arm/kernel/armksyms.c
1175index 0e62770..e2c2cd6 100644
1176--- a/arch/arm/kernel/armksyms.c
1177+++ b/arch/arm/kernel/armksyms.c
1178@@ -118,8 +118,8 @@ EXPORT_SYMBOL(__strncpy_from_user);
1179 #ifdef CONFIG_MMU
1180 EXPORT_SYMBOL(copy_page);
1181
1182-EXPORT_SYMBOL(__copy_from_user);
1183-EXPORT_SYMBOL(__copy_to_user);
1184+EXPORT_SYMBOL(___copy_from_user);
1185+EXPORT_SYMBOL(___copy_to_user);
1186 EXPORT_SYMBOL(__clear_user);
1187
1188 EXPORT_SYMBOL(__get_user_1);
1189diff --git a/arch/arm/kernel/kgdb.c b/arch/arm/kernel/kgdb.c
1190index ba8ccfe..2dc34dc 100644
1191--- a/arch/arm/kernel/kgdb.c
1192+++ b/arch/arm/kernel/kgdb.c
1193@@ -190,7 +190,7 @@ void kgdb_arch_exit(void)
1194 * and we handle the normal undef case within the do_undefinstr
1195 * handler.
1196 */
1197-struct kgdb_arch arch_kgdb_ops = {
1198+const struct kgdb_arch arch_kgdb_ops = {
1199 #ifndef __ARMEB__
1200 .gdb_bpt_instr = {0xfe, 0xde, 0xff, 0xe7}
1201 #else /* ! __ARMEB__ */
1202diff --git a/arch/arm/kernel/process.c b/arch/arm/kernel/process.c
1203index 61f90d3..771ab27 100644
1204--- a/arch/arm/kernel/process.c
1205+++ b/arch/arm/kernel/process.c
1206@@ -83,7 +83,7 @@ static int __init hlt_setup(char *__unused)
1207 __setup("nohlt", nohlt_setup);
1208 __setup("hlt", hlt_setup);
1209
1210-void arm_machine_restart(char mode, const char *cmd)
1211+__noreturn void arm_machine_restart(char mode, const char *cmd)
1212 {
1213 /*
1214 * Clean and disable cache, and turn off interrupts
1215@@ -117,7 +117,7 @@ void arm_machine_restart(char mode, const char *cmd)
1216 void (*pm_power_off)(void);
1217 EXPORT_SYMBOL(pm_power_off);
1218
1219-void (*arm_pm_restart)(char str, const char *cmd) = arm_machine_restart;
1220+void (*arm_pm_restart)(char str, const char *cmd) __noreturn = arm_machine_restart;
1221 EXPORT_SYMBOL_GPL(arm_pm_restart);
1222
1223
1224@@ -195,6 +195,7 @@ __setup("reboot=", reboot_setup);
1225
1226 void machine_halt(void)
1227 {
1228+ BUG();
1229 }
1230
1231
1232@@ -202,6 +203,7 @@ void machine_power_off(void)
1233 {
1234 if (pm_power_off)
1235 pm_power_off();
1236+ BUG();
1237 }
1238
1239 void machine_restart(char *cmd)
1240diff --git a/arch/arm/kernel/setup.c b/arch/arm/kernel/setup.c
1241index c6c57b6..0c3b29e 100644
1242--- a/arch/arm/kernel/setup.c
1243+++ b/arch/arm/kernel/setup.c
1244@@ -92,16 +92,16 @@ EXPORT_SYMBOL(elf_hwcap);
1245 struct processor processor;
1246 #endif
1247 #ifdef MULTI_TLB
1248-struct cpu_tlb_fns cpu_tlb;
1249+struct cpu_tlb_fns cpu_tlb __read_only;
1250 #endif
1251 #ifdef MULTI_USER
1252-struct cpu_user_fns cpu_user;
1253+struct cpu_user_fns cpu_user __read_only;
1254 #endif
1255 #ifdef MULTI_CACHE
1256-struct cpu_cache_fns cpu_cache;
1257+struct cpu_cache_fns cpu_cache __read_only;
1258 #endif
1259 #ifdef CONFIG_OUTER_CACHE
1260-struct outer_cache_fns outer_cache;
1261+struct outer_cache_fns outer_cache __read_only;
1262 #endif
1263
1264 struct stack {
1265diff --git a/arch/arm/kernel/traps.c b/arch/arm/kernel/traps.c
1266index 3f361a7..6e806e1 100644
1267--- a/arch/arm/kernel/traps.c
1268+++ b/arch/arm/kernel/traps.c
1269@@ -247,6 +247,8 @@ static void __die(const char *str, int err, struct thread_info *thread, struct p
1270
1271 DEFINE_SPINLOCK(die_lock);
1272
1273+extern void gr_handle_kernel_exploit(void);
1274+
1275 /*
1276 * This function is protected against re-entrancy.
1277 */
1278@@ -271,6 +273,8 @@ NORET_TYPE void die(const char *str, struct pt_regs *regs, int err)
1279 if (panic_on_oops)
1280 panic("Fatal exception");
1281
1282+ gr_handle_kernel_exploit();
1283+
1284 do_exit(SIGSEGV);
1285 }
1286
1287diff --git a/arch/arm/kernel/vmlinux.lds.S b/arch/arm/kernel/vmlinux.lds.S
1288index aecf87df..bed731b 100644
1289--- a/arch/arm/kernel/vmlinux.lds.S
1290+++ b/arch/arm/kernel/vmlinux.lds.S
1291@@ -74,14 +74,18 @@ SECTIONS
1292 #ifndef CONFIG_XIP_KERNEL
1293 __init_begin = _stext;
1294 INIT_DATA
1295+ EXIT_TEXT
1296+ EXIT_DATA
1297 . = ALIGN(PAGE_SIZE);
1298 __init_end = .;
1299 #endif
1300 }
1301
1302 /DISCARD/ : { /* Exit code and data */
1303+#ifdef CONFIG_XIP_KERNEL
1304 EXIT_TEXT
1305 EXIT_DATA
1306+#endif
1307 *(.exitcall.exit)
1308 *(.discard)
1309 *(.ARM.exidx.exit.text)
1310diff --git a/arch/arm/lib/copy_from_user.S b/arch/arm/lib/copy_from_user.S
1311index e4fe124..0fc246b 100644
1312--- a/arch/arm/lib/copy_from_user.S
1313+++ b/arch/arm/lib/copy_from_user.S
1314@@ -16,7 +16,7 @@
1315 /*
1316 * Prototype:
1317 *
1318- * size_t __copy_from_user(void *to, const void *from, size_t n)
1319+ * size_t ___copy_from_user(void *to, const void *from, size_t n)
1320 *
1321 * Purpose:
1322 *
1323@@ -84,11 +84,11 @@
1324
1325 .text
1326
1327-ENTRY(__copy_from_user)
1328+ENTRY(___copy_from_user)
1329
1330 #include "copy_template.S"
1331
1332-ENDPROC(__copy_from_user)
1333+ENDPROC(___copy_from_user)
1334
1335 .section .fixup,"ax"
1336 .align 0
1337diff --git a/arch/arm/lib/copy_page.S b/arch/arm/lib/copy_page.S
1338index 6ee2f67..d1cce76 100644
1339--- a/arch/arm/lib/copy_page.S
1340+++ b/arch/arm/lib/copy_page.S
1341@@ -10,6 +10,7 @@
1342 * ASM optimised string functions
1343 */
1344 #include <linux/linkage.h>
1345+#include <linux/const.h>
1346 #include <asm/assembler.h>
1347 #include <asm/asm-offsets.h>
1348 #include <asm/cache.h>
1349diff --git a/arch/arm/lib/copy_to_user.S b/arch/arm/lib/copy_to_user.S
1350index 1a71e15..ac7b258 100644
1351--- a/arch/arm/lib/copy_to_user.S
1352+++ b/arch/arm/lib/copy_to_user.S
1353@@ -16,7 +16,7 @@
1354 /*
1355 * Prototype:
1356 *
1357- * size_t __copy_to_user(void *to, const void *from, size_t n)
1358+ * size_t ___copy_to_user(void *to, const void *from, size_t n)
1359 *
1360 * Purpose:
1361 *
1362@@ -88,11 +88,11 @@
1363 .text
1364
1365 ENTRY(__copy_to_user_std)
1366-WEAK(__copy_to_user)
1367+WEAK(___copy_to_user)
1368
1369 #include "copy_template.S"
1370
1371-ENDPROC(__copy_to_user)
1372+ENDPROC(___copy_to_user)
1373
1374 .section .fixup,"ax"
1375 .align 0
1376diff --git a/arch/arm/lib/uaccess.S b/arch/arm/lib/uaccess.S
1377index ffdd274..91017b6 100644
1378--- a/arch/arm/lib/uaccess.S
1379+++ b/arch/arm/lib/uaccess.S
1380@@ -19,7 +19,7 @@
1381
1382 #define PAGE_SHIFT 12
1383
1384-/* Prototype: int __copy_to_user(void *to, const char *from, size_t n)
1385+/* Prototype: int ___copy_to_user(void *to, const char *from, size_t n)
1386 * Purpose : copy a block to user memory from kernel memory
1387 * Params : to - user memory
1388 * : from - kernel memory
1389@@ -39,7 +39,7 @@ USER( strgtbt r3, [r0], #1) @ May fault
1390 sub r2, r2, ip
1391 b .Lc2u_dest_aligned
1392
1393-ENTRY(__copy_to_user)
1394+ENTRY(___copy_to_user)
1395 stmfd sp!, {r2, r4 - r7, lr}
1396 cmp r2, #4
1397 blt .Lc2u_not_enough
1398@@ -277,14 +277,14 @@ USER( strgebt r3, [r0], #1) @ May fault
1399 ldrgtb r3, [r1], #0
1400 USER( strgtbt r3, [r0], #1) @ May fault
1401 b .Lc2u_finished
1402-ENDPROC(__copy_to_user)
1403+ENDPROC(___copy_to_user)
1404
1405 .section .fixup,"ax"
1406 .align 0
1407 9001: ldmfd sp!, {r0, r4 - r7, pc}
1408 .previous
1409
1410-/* Prototype: unsigned long __copy_from_user(void *to,const void *from,unsigned long n);
1411+/* Prototype: unsigned long ___copy_from_user(void *to,const void *from,unsigned long n);
1412 * Purpose : copy a block from user memory to kernel memory
1413 * Params : to - kernel memory
1414 * : from - user memory
1415@@ -303,7 +303,7 @@ USER( ldrgtbt r3, [r1], #1) @ May fault
1416 sub r2, r2, ip
1417 b .Lcfu_dest_aligned
1418
1419-ENTRY(__copy_from_user)
1420+ENTRY(___copy_from_user)
1421 stmfd sp!, {r0, r2, r4 - r7, lr}
1422 cmp r2, #4
1423 blt .Lcfu_not_enough
1424@@ -543,7 +543,7 @@ USER( ldrgebt r3, [r1], #1) @ May fault
1425 USER( ldrgtbt r3, [r1], #1) @ May fault
1426 strgtb r3, [r0], #1
1427 b .Lcfu_finished
1428-ENDPROC(__copy_from_user)
1429+ENDPROC(___copy_from_user)
1430
1431 .section .fixup,"ax"
1432 .align 0
1433diff --git a/arch/arm/lib/uaccess_with_memcpy.c b/arch/arm/lib/uaccess_with_memcpy.c
1434index 6b967ff..67d5b2b 100644
1435--- a/arch/arm/lib/uaccess_with_memcpy.c
1436+++ b/arch/arm/lib/uaccess_with_memcpy.c
1437@@ -97,7 +97,7 @@ out:
1438 }
1439
1440 unsigned long
1441-__copy_to_user(void __user *to, const void *from, unsigned long n)
1442+___copy_to_user(void __user *to, const void *from, unsigned long n)
1443 {
1444 /*
1445 * This test is stubbed out of the main function above to keep
1446diff --git a/arch/arm/mach-at91/pm.c b/arch/arm/mach-at91/pm.c
1447index 4028724..beec230 100644
1448--- a/arch/arm/mach-at91/pm.c
1449+++ b/arch/arm/mach-at91/pm.c
1450@@ -348,7 +348,7 @@ static void at91_pm_end(void)
1451 }
1452
1453
1454-static struct platform_suspend_ops at91_pm_ops ={
1455+static const struct platform_suspend_ops at91_pm_ops ={
1456 .valid = at91_pm_valid_state,
1457 .begin = at91_pm_begin,
1458 .enter = at91_pm_enter,
1459diff --git a/arch/arm/mach-omap1/pm.c b/arch/arm/mach-omap1/pm.c
1460index 5218943..0a34552 100644
1461--- a/arch/arm/mach-omap1/pm.c
1462+++ b/arch/arm/mach-omap1/pm.c
1463@@ -647,7 +647,7 @@ static struct irqaction omap_wakeup_irq = {
1464
1465
1466
1467-static struct platform_suspend_ops omap_pm_ops ={
1468+static const struct platform_suspend_ops omap_pm_ops ={
1469 .prepare = omap_pm_prepare,
1470 .enter = omap_pm_enter,
1471 .finish = omap_pm_finish,
1472diff --git a/arch/arm/mach-omap2/pm24xx.c b/arch/arm/mach-omap2/pm24xx.c
1473index bff5c4e..d4c649b 100644
1474--- a/arch/arm/mach-omap2/pm24xx.c
1475+++ b/arch/arm/mach-omap2/pm24xx.c
1476@@ -326,7 +326,7 @@ static void omap2_pm_finish(void)
1477 enable_hlt();
1478 }
1479
1480-static struct platform_suspend_ops omap_pm_ops = {
1481+static const struct platform_suspend_ops omap_pm_ops = {
1482 .prepare = omap2_pm_prepare,
1483 .enter = omap2_pm_enter,
1484 .finish = omap2_pm_finish,
1485diff --git a/arch/arm/mach-omap2/pm34xx.c b/arch/arm/mach-omap2/pm34xx.c
1486index 8946319..7d3e661 100644
1487--- a/arch/arm/mach-omap2/pm34xx.c
1488+++ b/arch/arm/mach-omap2/pm34xx.c
1489@@ -401,7 +401,7 @@ static void omap3_pm_end(void)
1490 return;
1491 }
1492
1493-static struct platform_suspend_ops omap_pm_ops = {
1494+static const struct platform_suspend_ops omap_pm_ops = {
1495 .begin = omap3_pm_begin,
1496 .end = omap3_pm_end,
1497 .prepare = omap3_pm_prepare,
1498diff --git a/arch/arm/mach-pnx4008/pm.c b/arch/arm/mach-pnx4008/pm.c
1499index b3d8d53..6e68ebc 100644
1500--- a/arch/arm/mach-pnx4008/pm.c
1501+++ b/arch/arm/mach-pnx4008/pm.c
1502@@ -116,7 +116,7 @@ static int pnx4008_pm_valid(suspend_state_t state)
1503 (state == PM_SUSPEND_MEM);
1504 }
1505
1506-static struct platform_suspend_ops pnx4008_pm_ops = {
1507+static const struct platform_suspend_ops pnx4008_pm_ops = {
1508 .enter = pnx4008_pm_enter,
1509 .valid = pnx4008_pm_valid,
1510 };
1511diff --git a/arch/arm/mach-pxa/pm.c b/arch/arm/mach-pxa/pm.c
1512index 7693355..9beb00a 100644
1513--- a/arch/arm/mach-pxa/pm.c
1514+++ b/arch/arm/mach-pxa/pm.c
1515@@ -95,7 +95,7 @@ void pxa_pm_finish(void)
1516 pxa_cpu_pm_fns->finish();
1517 }
1518
1519-static struct platform_suspend_ops pxa_pm_ops = {
1520+static const struct platform_suspend_ops pxa_pm_ops = {
1521 .valid = pxa_pm_valid,
1522 .enter = pxa_pm_enter,
1523 .prepare = pxa_pm_prepare,
1524diff --git a/arch/arm/mach-pxa/sharpsl_pm.c b/arch/arm/mach-pxa/sharpsl_pm.c
1525index 629e05d..06be589 100644
1526--- a/arch/arm/mach-pxa/sharpsl_pm.c
1527+++ b/arch/arm/mach-pxa/sharpsl_pm.c
1528@@ -891,7 +891,7 @@ static void sharpsl_apm_get_power_status(struct apm_power_info *info)
1529 }
1530
1531 #ifdef CONFIG_PM
1532-static struct platform_suspend_ops sharpsl_pm_ops = {
1533+static const struct platform_suspend_ops sharpsl_pm_ops = {
1534 .prepare = pxa_pm_prepare,
1535 .finish = pxa_pm_finish,
1536 .enter = corgi_pxa_pm_enter,
1537diff --git a/arch/arm/mach-sa1100/pm.c b/arch/arm/mach-sa1100/pm.c
1538index c83fdc8..ab9fc44 100644
1539--- a/arch/arm/mach-sa1100/pm.c
1540+++ b/arch/arm/mach-sa1100/pm.c
1541@@ -120,7 +120,7 @@ unsigned long sleep_phys_sp(void *sp)
1542 return virt_to_phys(sp);
1543 }
1544
1545-static struct platform_suspend_ops sa11x0_pm_ops = {
1546+static const struct platform_suspend_ops sa11x0_pm_ops = {
1547 .enter = sa11x0_pm_enter,
1548 .valid = suspend_valid_only_mem,
1549 };
1550diff --git a/arch/arm/mm/fault.c b/arch/arm/mm/fault.c
1551index 3191cd6..c322981 100644
1552--- a/arch/arm/mm/fault.c
1553+++ b/arch/arm/mm/fault.c
1554@@ -166,6 +166,13 @@ __do_user_fault(struct task_struct *tsk, unsigned long addr,
1555 }
1556 #endif
1557
1558+#ifdef CONFIG_PAX_PAGEEXEC
1559+ if (fsr & FSR_LNX_PF) {
1560+ pax_report_fault(regs, (void *)regs->ARM_pc, (void *)regs->ARM_sp);
1561+ do_group_exit(SIGKILL);
1562+ }
1563+#endif
1564+
1565 tsk->thread.address = addr;
1566 tsk->thread.error_code = fsr;
1567 tsk->thread.trap_no = 14;
1568@@ -357,6 +364,33 @@ do_page_fault(unsigned long addr, unsigned int fsr, struct pt_regs *regs)
1569 }
1570 #endif /* CONFIG_MMU */
1571
1572+#ifdef CONFIG_PAX_PAGEEXEC
1573+void pax_report_insns(struct pt_regs *regs, void *pc, void *sp)
1574+{
1575+ long i;
1576+
1577+ printk(KERN_ERR "PAX: bytes at PC: ");
1578+ for (i = 0; i < 20; i++) {
1579+ unsigned char c;
1580+ if (get_user(c, (__force unsigned char __user *)pc+i))
1581+ printk(KERN_CONT "?? ");
1582+ else
1583+ printk(KERN_CONT "%02x ", c);
1584+ }
1585+ printk("\n");
1586+
1587+ printk(KERN_ERR "PAX: bytes at SP-4: ");
1588+ for (i = -1; i < 20; i++) {
1589+ unsigned long c;
1590+ if (get_user(c, (__force unsigned long __user *)sp+i))
1591+ printk(KERN_CONT "???????? ");
1592+ else
1593+ printk(KERN_CONT "%08lx ", c);
1594+ }
1595+ printk("\n");
1596+}
1597+#endif
1598+
1599 /*
1600 * First Level Translation Fault Handler
1601 *
1602@@ -569,6 +603,20 @@ do_PrefetchAbort(unsigned long addr, unsigned int ifsr, struct pt_regs *regs)
1603 const struct fsr_info *inf = ifsr_info + fsr_fs(ifsr);
1604 struct siginfo info;
1605
1606+#ifdef CONFIG_PAX_REFCOUNT
1607+ if (fsr_fs(ifsr) == 2) {
1608+ unsigned int bkpt;
1609+
1610+ if (!probe_kernel_address((unsigned int *)addr, bkpt) && bkpt == 0xe12f1073) {
1611+ current->thread.error_code = ifsr;
1612+ current->thread.trap_no = 0;
1613+ pax_report_refcount_overflow(regs);
1614+ fixup_exception(regs);
1615+ return;
1616+ }
1617+ }
1618+#endif
1619+
1620 if (!inf->fn(addr, ifsr | FSR_LNX_PF, regs))
1621 return;
1622
1623diff --git a/arch/arm/mm/mmap.c b/arch/arm/mm/mmap.c
1624index f5abc51..7ec524c 100644
1625--- a/arch/arm/mm/mmap.c
1626+++ b/arch/arm/mm/mmap.c
1627@@ -63,6 +63,10 @@ arch_get_unmapped_area(struct file *filp, unsigned long addr,
1628 if (len > TASK_SIZE)
1629 return -ENOMEM;
1630
1631+#ifdef CONFIG_PAX_RANDMMAP
1632+ if (!(mm->pax_flags & MF_PAX_RANDMMAP))
1633+#endif
1634+
1635 if (addr) {
1636 if (do_align)
1637 addr = COLOUR_ALIGN(addr, pgoff);
1638@@ -70,15 +74,14 @@ arch_get_unmapped_area(struct file *filp, unsigned long addr,
1639 addr = PAGE_ALIGN(addr);
1640
1641 vma = find_vma(mm, addr);
1642- if (TASK_SIZE - len >= addr &&
1643- (!vma || addr + len <= vma->vm_start))
1644+ if (TASK_SIZE - len >= addr && check_heap_stack_gap(vma, addr, len))
1645 return addr;
1646 }
1647 if (len > mm->cached_hole_size) {
1648- start_addr = addr = mm->free_area_cache;
1649+ start_addr = addr = mm->free_area_cache;
1650 } else {
1651- start_addr = addr = TASK_UNMAPPED_BASE;
1652- mm->cached_hole_size = 0;
1653+ start_addr = addr = mm->mmap_base;
1654+ mm->cached_hole_size = 0;
1655 }
1656
1657 full_search:
1658@@ -94,14 +97,14 @@ full_search:
1659 * Start a new search - just in case we missed
1660 * some holes.
1661 */
1662- if (start_addr != TASK_UNMAPPED_BASE) {
1663- start_addr = addr = TASK_UNMAPPED_BASE;
1664+ if (start_addr != mm->mmap_base) {
1665+ start_addr = addr = mm->mmap_base;
1666 mm->cached_hole_size = 0;
1667 goto full_search;
1668 }
1669 return -ENOMEM;
1670 }
1671- if (!vma || addr + len <= vma->vm_start) {
1672+ if (check_heap_stack_gap(vma, addr, len)) {
1673 /*
1674 * Remember the place where we stopped the search:
1675 */
1676diff --git a/arch/arm/plat-s3c/pm.c b/arch/arm/plat-s3c/pm.c
1677index 8d97db2..b66cfa5 100644
1678--- a/arch/arm/plat-s3c/pm.c
1679+++ b/arch/arm/plat-s3c/pm.c
1680@@ -355,7 +355,7 @@ static void s3c_pm_finish(void)
1681 s3c_pm_check_cleanup();
1682 }
1683
1684-static struct platform_suspend_ops s3c_pm_ops = {
1685+static const struct platform_suspend_ops s3c_pm_ops = {
1686 .enter = s3c_pm_enter,
1687 .prepare = s3c_pm_prepare,
1688 .finish = s3c_pm_finish,
1689diff --git a/arch/avr32/include/asm/cache.h b/arch/avr32/include/asm/cache.h
1690index d3cf35a..0ba6053 100644
1691--- a/arch/avr32/include/asm/cache.h
1692+++ b/arch/avr32/include/asm/cache.h
1693@@ -1,8 +1,10 @@
1694 #ifndef __ASM_AVR32_CACHE_H
1695 #define __ASM_AVR32_CACHE_H
1696
1697+#include <linux/const.h>
1698+
1699 #define L1_CACHE_SHIFT 5
1700-#define L1_CACHE_BYTES (1 << L1_CACHE_SHIFT)
1701+#define L1_CACHE_BYTES (_AC(1,UL) << L1_CACHE_SHIFT)
1702
1703 /*
1704 * Memory returned by kmalloc() may be used for DMA, so we must make
1705diff --git a/arch/avr32/include/asm/elf.h b/arch/avr32/include/asm/elf.h
1706index d5d1d41..856e2ed 100644
1707--- a/arch/avr32/include/asm/elf.h
1708+++ b/arch/avr32/include/asm/elf.h
1709@@ -85,8 +85,14 @@ typedef struct user_fpu_struct elf_fpregset_t;
1710 the loader. We need to make sure that it is out of the way of the program
1711 that it will "exec", and that there is sufficient room for the brk. */
1712
1713-#define ELF_ET_DYN_BASE (2 * TASK_SIZE / 3)
1714+#define ELF_ET_DYN_BASE (TASK_SIZE / 3 * 2)
1715
1716+#ifdef CONFIG_PAX_ASLR
1717+#define PAX_ELF_ET_DYN_BASE 0x00001000UL
1718+
1719+#define PAX_DELTA_MMAP_LEN 15
1720+#define PAX_DELTA_STACK_LEN 15
1721+#endif
1722
1723 /* This yields a mask that user programs can use to figure out what
1724 instruction set this CPU supports. This could be done in user space,
1725diff --git a/arch/avr32/include/asm/kmap_types.h b/arch/avr32/include/asm/kmap_types.h
1726index b7f5c68..556135c 100644
1727--- a/arch/avr32/include/asm/kmap_types.h
1728+++ b/arch/avr32/include/asm/kmap_types.h
1729@@ -22,7 +22,8 @@ D(10) KM_IRQ0,
1730 D(11) KM_IRQ1,
1731 D(12) KM_SOFTIRQ0,
1732 D(13) KM_SOFTIRQ1,
1733-D(14) KM_TYPE_NR
1734+D(14) KM_CLEARPAGE,
1735+D(15) KM_TYPE_NR
1736 };
1737
1738 #undef D
1739diff --git a/arch/avr32/mach-at32ap/pm.c b/arch/avr32/mach-at32ap/pm.c
1740index f021edf..32d680e 100644
1741--- a/arch/avr32/mach-at32ap/pm.c
1742+++ b/arch/avr32/mach-at32ap/pm.c
1743@@ -176,7 +176,7 @@ out:
1744 return 0;
1745 }
1746
1747-static struct platform_suspend_ops avr32_pm_ops = {
1748+static const struct platform_suspend_ops avr32_pm_ops = {
1749 .valid = avr32_pm_valid_state,
1750 .enter = avr32_pm_enter,
1751 };
1752diff --git a/arch/avr32/mm/fault.c b/arch/avr32/mm/fault.c
1753index b61d86d..e292c7f 100644
1754--- a/arch/avr32/mm/fault.c
1755+++ b/arch/avr32/mm/fault.c
1756@@ -41,6 +41,23 @@ static inline int notify_page_fault(struct pt_regs *regs, int trap)
1757
1758 int exception_trace = 1;
1759
1760+#ifdef CONFIG_PAX_PAGEEXEC
1761+void pax_report_insns(struct pt_regs *regs, void *pc, void *sp)
1762+{
1763+ unsigned long i;
1764+
1765+ printk(KERN_ERR "PAX: bytes at PC: ");
1766+ for (i = 0; i < 20; i++) {
1767+ unsigned char c;
1768+ if (get_user(c, (unsigned char *)pc+i))
1769+ printk(KERN_CONT "???????? ");
1770+ else
1771+ printk(KERN_CONT "%02x ", c);
1772+ }
1773+ printk("\n");
1774+}
1775+#endif
1776+
1777 /*
1778 * This routine handles page faults. It determines the address and the
1779 * problem, and then passes it off to one of the appropriate routines.
1780@@ -157,6 +174,16 @@ bad_area:
1781 up_read(&mm->mmap_sem);
1782
1783 if (user_mode(regs)) {
1784+
1785+#ifdef CONFIG_PAX_PAGEEXEC
1786+ if (mm->pax_flags & MF_PAX_PAGEEXEC) {
1787+ if (ecr == ECR_PROTECTION_X || ecr == ECR_TLB_MISS_X) {
1788+ pax_report_fault(regs, (void *)regs->pc, (void *)regs->sp);
1789+ do_group_exit(SIGKILL);
1790+ }
1791+ }
1792+#endif
1793+
1794 if (exception_trace && printk_ratelimit())
1795 printk("%s%s[%d]: segfault at %08lx pc %08lx "
1796 "sp %08lx ecr %lu\n",
1797diff --git a/arch/blackfin/include/asm/cache.h b/arch/blackfin/include/asm/cache.h
1798index 93f6c63..d144953 100644
1799--- a/arch/blackfin/include/asm/cache.h
1800+++ b/arch/blackfin/include/asm/cache.h
1801@@ -7,12 +7,14 @@
1802 #ifndef __ARCH_BLACKFIN_CACHE_H
1803 #define __ARCH_BLACKFIN_CACHE_H
1804
1805+#include <linux/const.h>
1806+
1807 /*
1808 * Bytes per L1 cache line
1809 * Blackfin loads 32 bytes for cache
1810 */
1811 #define L1_CACHE_SHIFT 5
1812-#define L1_CACHE_BYTES (1 << L1_CACHE_SHIFT)
1813+#define L1_CACHE_BYTES (_AC(1,UL) << L1_CACHE_SHIFT)
1814 #define SMP_CACHE_BYTES L1_CACHE_BYTES
1815
1816 #define ARCH_KMALLOC_MINALIGN L1_CACHE_BYTES
1817diff --git a/arch/blackfin/kernel/kgdb.c b/arch/blackfin/kernel/kgdb.c
1818index cce79d0..c406c85 100644
1819--- a/arch/blackfin/kernel/kgdb.c
1820+++ b/arch/blackfin/kernel/kgdb.c
1821@@ -428,7 +428,7 @@ int kgdb_arch_handle_exception(int vector, int signo,
1822 return -1; /* this means that we do not want to exit from the handler */
1823 }
1824
1825-struct kgdb_arch arch_kgdb_ops = {
1826+const struct kgdb_arch arch_kgdb_ops = {
1827 .gdb_bpt_instr = {0xa1},
1828 #ifdef CONFIG_SMP
1829 .flags = KGDB_HW_BREAKPOINT|KGDB_THR_PROC_SWAP,
1830diff --git a/arch/blackfin/mach-common/pm.c b/arch/blackfin/mach-common/pm.c
1831index 8837be4..b2fb413 100644
1832--- a/arch/blackfin/mach-common/pm.c
1833+++ b/arch/blackfin/mach-common/pm.c
1834@@ -255,7 +255,7 @@ static int bfin_pm_enter(suspend_state_t state)
1835 return 0;
1836 }
1837
1838-struct platform_suspend_ops bfin_pm_ops = {
1839+const struct platform_suspend_ops bfin_pm_ops = {
1840 .enter = bfin_pm_enter,
1841 .valid = bfin_pm_valid,
1842 };
1843diff --git a/arch/cris/include/arch-v10/arch/cache.h b/arch/cris/include/arch-v10/arch/cache.h
1844index aea2718..3639a60 100644
1845--- a/arch/cris/include/arch-v10/arch/cache.h
1846+++ b/arch/cris/include/arch-v10/arch/cache.h
1847@@ -1,8 +1,9 @@
1848 #ifndef _ASM_ARCH_CACHE_H
1849 #define _ASM_ARCH_CACHE_H
1850
1851+#include <linux/const.h>
1852 /* Etrax 100LX have 32-byte cache-lines. */
1853-#define L1_CACHE_BYTES 32
1854 #define L1_CACHE_SHIFT 5
1855+#define L1_CACHE_BYTES (_AC(1,UL) << L1_CACHE_SHIFT)
1856
1857 #endif /* _ASM_ARCH_CACHE_H */
1858diff --git a/arch/cris/include/arch-v32/arch/cache.h b/arch/cris/include/arch-v32/arch/cache.h
1859index dfc7305..417f5b3 100644
1860--- a/arch/cris/include/arch-v32/arch/cache.h
1861+++ b/arch/cris/include/arch-v32/arch/cache.h
1862@@ -1,11 +1,12 @@
1863 #ifndef _ASM_CRIS_ARCH_CACHE_H
1864 #define _ASM_CRIS_ARCH_CACHE_H
1865
1866+#include <linux/const.h>
1867 #include <arch/hwregs/dma.h>
1868
1869 /* A cache-line is 32 bytes. */
1870-#define L1_CACHE_BYTES 32
1871 #define L1_CACHE_SHIFT 5
1872+#define L1_CACHE_BYTES (_AC(1,UL) << L1_CACHE_SHIFT)
1873
1874 void flush_dma_list(dma_descr_data *descr);
1875 void flush_dma_descr(dma_descr_data *descr, int flush_buf);
1876diff --git a/arch/frv/include/asm/atomic.h b/arch/frv/include/asm/atomic.h
1877index 00a57af..c3ef0cd 100644
1878--- a/arch/frv/include/asm/atomic.h
1879+++ b/arch/frv/include/asm/atomic.h
1880@@ -241,6 +241,16 @@ extern uint32_t __xchg_32(uint32_t i, volatile void *v);
1881 #define atomic64_cmpxchg(v, old, new) (__cmpxchg_64(old, new, &(v)->counter))
1882 #define atomic64_xchg(v, new) (__xchg_64(new, &(v)->counter))
1883
1884+#define atomic64_read_unchecked(v) atomic64_read(v)
1885+#define atomic64_set_unchecked(v, i) atomic64_set((v), (i))
1886+#define atomic64_add_unchecked(a, v) atomic64_add((a), (v))
1887+#define atomic64_add_return_unchecked(a, v) atomic64_add_return((a), (v))
1888+#define atomic64_sub_unchecked(a, v) atomic64_sub((a), (v))
1889+#define atomic64_inc_unchecked(v) atomic64_inc(v)
1890+#define atomic64_inc_return_unchecked(v) atomic64_inc_return(v)
1891+#define atomic64_dec_unchecked(v) atomic64_dec(v)
1892+#define atomic64_cmpxchg_unchecked(v, o, n) atomic64_cmpxchg((v), (o), (n))
1893+
1894 static __inline__ int atomic_add_unless(atomic_t *v, int a, int u)
1895 {
1896 int c, old;
1897diff --git a/arch/frv/include/asm/cache.h b/arch/frv/include/asm/cache.h
1898index 7dc0f0f..1e6a620 100644
1899--- a/arch/frv/include/asm/cache.h
1900+++ b/arch/frv/include/asm/cache.h
1901@@ -12,10 +12,11 @@
1902 #ifndef __ASM_CACHE_H
1903 #define __ASM_CACHE_H
1904
1905+#include <linux/const.h>
1906
1907 /* bytes per L1 cache line */
1908 #define L1_CACHE_SHIFT (CONFIG_FRV_L1_CACHE_SHIFT)
1909-#define L1_CACHE_BYTES (1 << L1_CACHE_SHIFT)
1910+#define L1_CACHE_BYTES (_AC(1,UL) << L1_CACHE_SHIFT)
1911
1912 #define ARCH_KMALLOC_MINALIGN L1_CACHE_BYTES
1913
1914diff --git a/arch/frv/include/asm/kmap_types.h b/arch/frv/include/asm/kmap_types.h
1915index f8e16b2..c73ff79 100644
1916--- a/arch/frv/include/asm/kmap_types.h
1917+++ b/arch/frv/include/asm/kmap_types.h
1918@@ -23,6 +23,7 @@ enum km_type {
1919 KM_IRQ1,
1920 KM_SOFTIRQ0,
1921 KM_SOFTIRQ1,
1922+ KM_CLEARPAGE,
1923 KM_TYPE_NR
1924 };
1925
1926diff --git a/arch/frv/mm/elf-fdpic.c b/arch/frv/mm/elf-fdpic.c
1927index 385fd30..6c3d97e 100644
1928--- a/arch/frv/mm/elf-fdpic.c
1929+++ b/arch/frv/mm/elf-fdpic.c
1930@@ -73,8 +73,7 @@ unsigned long arch_get_unmapped_area(struct file *filp, unsigned long addr, unsi
1931 if (addr) {
1932 addr = PAGE_ALIGN(addr);
1933 vma = find_vma(current->mm, addr);
1934- if (TASK_SIZE - len >= addr &&
1935- (!vma || addr + len <= vma->vm_start))
1936+ if (TASK_SIZE - len >= addr && check_heap_stack_gap(vma, addr, len))
1937 goto success;
1938 }
1939
1940@@ -89,7 +88,7 @@ unsigned long arch_get_unmapped_area(struct file *filp, unsigned long addr, unsi
1941 for (; vma; vma = vma->vm_next) {
1942 if (addr > limit)
1943 break;
1944- if (addr + len <= vma->vm_start)
1945+ if (check_heap_stack_gap(vma, addr, len))
1946 goto success;
1947 addr = vma->vm_end;
1948 }
1949@@ -104,7 +103,7 @@ unsigned long arch_get_unmapped_area(struct file *filp, unsigned long addr, unsi
1950 for (; vma; vma = vma->vm_next) {
1951 if (addr > limit)
1952 break;
1953- if (addr + len <= vma->vm_start)
1954+ if (check_heap_stack_gap(vma, addr, len))
1955 goto success;
1956 addr = vma->vm_end;
1957 }
1958diff --git a/arch/h8300/include/asm/cache.h b/arch/h8300/include/asm/cache.h
1959index c635028..6d9445a 100644
1960--- a/arch/h8300/include/asm/cache.h
1961+++ b/arch/h8300/include/asm/cache.h
1962@@ -1,8 +1,10 @@
1963 #ifndef __ARCH_H8300_CACHE_H
1964 #define __ARCH_H8300_CACHE_H
1965
1966+#include <linux/const.h>
1967+
1968 /* bytes per L1 cache line */
1969-#define L1_CACHE_BYTES 4
1970+#define L1_CACHE_BYTES _AC(4,UL)
1971
1972 /* m68k-elf-gcc 2.95.2 doesn't like these */
1973
1974diff --git a/arch/ia64/hp/common/hwsw_iommu.c b/arch/ia64/hp/common/hwsw_iommu.c
1975index e4a80d8..11a7ea1 100644
1976--- a/arch/ia64/hp/common/hwsw_iommu.c
1977+++ b/arch/ia64/hp/common/hwsw_iommu.c
1978@@ -17,7 +17,7 @@
1979 #include <linux/swiotlb.h>
1980 #include <asm/machvec.h>
1981
1982-extern struct dma_map_ops sba_dma_ops, swiotlb_dma_ops;
1983+extern const struct dma_map_ops sba_dma_ops, swiotlb_dma_ops;
1984
1985 /* swiotlb declarations & definitions: */
1986 extern int swiotlb_late_init_with_default_size (size_t size);
1987@@ -33,7 +33,7 @@ static inline int use_swiotlb(struct device *dev)
1988 !sba_dma_ops.dma_supported(dev, *dev->dma_mask);
1989 }
1990
1991-struct dma_map_ops *hwsw_dma_get_ops(struct device *dev)
1992+const struct dma_map_ops *hwsw_dma_get_ops(struct device *dev)
1993 {
1994 if (use_swiotlb(dev))
1995 return &swiotlb_dma_ops;
1996diff --git a/arch/ia64/hp/common/sba_iommu.c b/arch/ia64/hp/common/sba_iommu.c
1997index 01ae69b..35752fd 100644
1998--- a/arch/ia64/hp/common/sba_iommu.c
1999+++ b/arch/ia64/hp/common/sba_iommu.c
2000@@ -2097,7 +2097,7 @@ static struct acpi_driver acpi_sba_ioc_driver = {
2001 },
2002 };
2003
2004-extern struct dma_map_ops swiotlb_dma_ops;
2005+extern const struct dma_map_ops swiotlb_dma_ops;
2006
2007 static int __init
2008 sba_init(void)
2009@@ -2211,7 +2211,7 @@ sba_page_override(char *str)
2010
2011 __setup("sbapagesize=",sba_page_override);
2012
2013-struct dma_map_ops sba_dma_ops = {
2014+const struct dma_map_ops sba_dma_ops = {
2015 .alloc_coherent = sba_alloc_coherent,
2016 .free_coherent = sba_free_coherent,
2017 .map_page = sba_map_page,
2018diff --git a/arch/ia64/ia32/binfmt_elf32.c b/arch/ia64/ia32/binfmt_elf32.c
2019index c69552b..c7122f4 100644
2020--- a/arch/ia64/ia32/binfmt_elf32.c
2021+++ b/arch/ia64/ia32/binfmt_elf32.c
2022@@ -45,6 +45,13 @@ randomize_stack_top(unsigned long stack_top);
2023
2024 #define elf_read_implies_exec(ex, have_pt_gnu_stack) (!(have_pt_gnu_stack))
2025
2026+#ifdef CONFIG_PAX_ASLR
2027+#define PAX_ELF_ET_DYN_BASE (current->personality == PER_LINUX32 ? 0x08048000UL : 0x4000000000000000UL)
2028+
2029+#define PAX_DELTA_MMAP_LEN (current->personality == PER_LINUX32 ? 16 : 3*PAGE_SHIFT - 13)
2030+#define PAX_DELTA_STACK_LEN (current->personality == PER_LINUX32 ? 16 : 3*PAGE_SHIFT - 13)
2031+#endif
2032+
2033 /* Ugly but avoids duplication */
2034 #include "../../../fs/binfmt_elf.c"
2035
2036diff --git a/arch/ia64/ia32/ia32priv.h b/arch/ia64/ia32/ia32priv.h
2037index 0f15349..26b3429 100644
2038--- a/arch/ia64/ia32/ia32priv.h
2039+++ b/arch/ia64/ia32/ia32priv.h
2040@@ -296,7 +296,14 @@ typedef struct compat_siginfo {
2041 #define ELF_DATA ELFDATA2LSB
2042 #define ELF_ARCH EM_386
2043
2044-#define IA32_STACK_TOP IA32_PAGE_OFFSET
2045+#ifdef CONFIG_PAX_RANDUSTACK
2046+#define __IA32_DELTA_STACK (current->mm->delta_stack)
2047+#else
2048+#define __IA32_DELTA_STACK 0UL
2049+#endif
2050+
2051+#define IA32_STACK_TOP (IA32_PAGE_OFFSET - __IA32_DELTA_STACK)
2052+
2053 #define IA32_GATE_OFFSET IA32_PAGE_OFFSET
2054 #define IA32_GATE_END IA32_PAGE_OFFSET + PAGE_SIZE
2055
2056diff --git a/arch/ia64/include/asm/atomic.h b/arch/ia64/include/asm/atomic.h
2057index 88405cb..de5ca5d 100644
2058--- a/arch/ia64/include/asm/atomic.h
2059+++ b/arch/ia64/include/asm/atomic.h
2060@@ -210,6 +210,16 @@ atomic64_add_negative (__s64 i, atomic64_t *v)
2061 #define atomic64_inc(v) atomic64_add(1, (v))
2062 #define atomic64_dec(v) atomic64_sub(1, (v))
2063
2064+#define atomic64_read_unchecked(v) atomic64_read(v)
2065+#define atomic64_set_unchecked(v, i) atomic64_set((v), (i))
2066+#define atomic64_add_unchecked(a, v) atomic64_add((a), (v))
2067+#define atomic64_add_return_unchecked(a, v) atomic64_add_return((a), (v))
2068+#define atomic64_sub_unchecked(a, v) atomic64_sub((a), (v))
2069+#define atomic64_inc_unchecked(v) atomic64_inc(v)
2070+#define atomic64_inc_return_unchecked(v) atomic64_inc_return(v)
2071+#define atomic64_dec_unchecked(v) atomic64_dec(v)
2072+#define atomic64_cmpxchg_unchecked(v, o, n) atomic64_cmpxchg((v), (o), (n))
2073+
2074 /* Atomic operations are already serializing */
2075 #define smp_mb__before_atomic_dec() barrier()
2076 #define smp_mb__after_atomic_dec() barrier()
2077diff --git a/arch/ia64/include/asm/cache.h b/arch/ia64/include/asm/cache.h
2078index e7482bd..d1c9b8e 100644
2079--- a/arch/ia64/include/asm/cache.h
2080+++ b/arch/ia64/include/asm/cache.h
2081@@ -1,6 +1,7 @@
2082 #ifndef _ASM_IA64_CACHE_H
2083 #define _ASM_IA64_CACHE_H
2084
2085+#include <linux/const.h>
2086
2087 /*
2088 * Copyright (C) 1998-2000 Hewlett-Packard Co
2089@@ -9,7 +10,7 @@
2090
2091 /* Bytes per L1 (data) cache line. */
2092 #define L1_CACHE_SHIFT CONFIG_IA64_L1_CACHE_SHIFT
2093-#define L1_CACHE_BYTES (1 << L1_CACHE_SHIFT)
2094+#define L1_CACHE_BYTES (_AC(1,UL) << L1_CACHE_SHIFT)
2095
2096 #ifdef CONFIG_SMP
2097 # define SMP_CACHE_SHIFT L1_CACHE_SHIFT
2098diff --git a/arch/ia64/include/asm/dma-mapping.h b/arch/ia64/include/asm/dma-mapping.h
2099index 8d3c79c..71b3af6 100644
2100--- a/arch/ia64/include/asm/dma-mapping.h
2101+++ b/arch/ia64/include/asm/dma-mapping.h
2102@@ -12,7 +12,7 @@
2103
2104 #define ARCH_HAS_DMA_GET_REQUIRED_MASK
2105
2106-extern struct dma_map_ops *dma_ops;
2107+extern const struct dma_map_ops *dma_ops;
2108 extern struct ia64_machine_vector ia64_mv;
2109 extern void set_iommu_machvec(void);
2110
2111@@ -24,7 +24,7 @@ extern void machvec_dma_sync_sg(struct device *, struct scatterlist *, int,
2112 static inline void *dma_alloc_coherent(struct device *dev, size_t size,
2113 dma_addr_t *daddr, gfp_t gfp)
2114 {
2115- struct dma_map_ops *ops = platform_dma_get_ops(dev);
2116+ const struct dma_map_ops *ops = platform_dma_get_ops(dev);
2117 void *caddr;
2118
2119 caddr = ops->alloc_coherent(dev, size, daddr, gfp);
2120@@ -35,7 +35,7 @@ static inline void *dma_alloc_coherent(struct device *dev, size_t size,
2121 static inline void dma_free_coherent(struct device *dev, size_t size,
2122 void *caddr, dma_addr_t daddr)
2123 {
2124- struct dma_map_ops *ops = platform_dma_get_ops(dev);
2125+ const struct dma_map_ops *ops = platform_dma_get_ops(dev);
2126 debug_dma_free_coherent(dev, size, caddr, daddr);
2127 ops->free_coherent(dev, size, caddr, daddr);
2128 }
2129@@ -49,13 +49,13 @@ static inline void dma_free_coherent(struct device *dev, size_t size,
2130
2131 static inline int dma_mapping_error(struct device *dev, dma_addr_t daddr)
2132 {
2133- struct dma_map_ops *ops = platform_dma_get_ops(dev);
2134+ const struct dma_map_ops *ops = platform_dma_get_ops(dev);
2135 return ops->mapping_error(dev, daddr);
2136 }
2137
2138 static inline int dma_supported(struct device *dev, u64 mask)
2139 {
2140- struct dma_map_ops *ops = platform_dma_get_ops(dev);
2141+ const struct dma_map_ops *ops = platform_dma_get_ops(dev);
2142 return ops->dma_supported(dev, mask);
2143 }
2144
2145diff --git a/arch/ia64/include/asm/elf.h b/arch/ia64/include/asm/elf.h
2146index 86eddee..b116bb4 100644
2147--- a/arch/ia64/include/asm/elf.h
2148+++ b/arch/ia64/include/asm/elf.h
2149@@ -43,6 +43,13 @@
2150 */
2151 #define ELF_ET_DYN_BASE (TASK_UNMAPPED_BASE + 0x800000000UL)
2152
2153+#ifdef CONFIG_PAX_ASLR
2154+#define PAX_ELF_ET_DYN_BASE (current->personality == PER_LINUX32 ? 0x08048000UL : 0x4000000000000000UL)
2155+
2156+#define PAX_DELTA_MMAP_LEN (current->personality == PER_LINUX32 ? 16 : 3*PAGE_SHIFT - 13)
2157+#define PAX_DELTA_STACK_LEN (current->personality == PER_LINUX32 ? 16 : 3*PAGE_SHIFT - 13)
2158+#endif
2159+
2160 #define PT_IA_64_UNWIND 0x70000001
2161
2162 /* IA-64 relocations: */
2163diff --git a/arch/ia64/include/asm/machvec.h b/arch/ia64/include/asm/machvec.h
2164index 367d299..9ad4279 100644
2165--- a/arch/ia64/include/asm/machvec.h
2166+++ b/arch/ia64/include/asm/machvec.h
2167@@ -45,7 +45,7 @@ typedef void ia64_mv_kernel_launch_event_t(void);
2168 /* DMA-mapping interface: */
2169 typedef void ia64_mv_dma_init (void);
2170 typedef u64 ia64_mv_dma_get_required_mask (struct device *);
2171-typedef struct dma_map_ops *ia64_mv_dma_get_ops(struct device *);
2172+typedef const struct dma_map_ops *ia64_mv_dma_get_ops(struct device *);
2173
2174 /*
2175 * WARNING: The legacy I/O space is _architected_. Platforms are
2176@@ -251,7 +251,7 @@ extern void machvec_init_from_cmdline(const char *cmdline);
2177 # endif /* CONFIG_IA64_GENERIC */
2178
2179 extern void swiotlb_dma_init(void);
2180-extern struct dma_map_ops *dma_get_ops(struct device *);
2181+extern const struct dma_map_ops *dma_get_ops(struct device *);
2182
2183 /*
2184 * Define default versions so we can extend machvec for new platforms without having
2185diff --git a/arch/ia64/include/asm/pgtable.h b/arch/ia64/include/asm/pgtable.h
2186index 8840a69..cdb63d9 100644
2187--- a/arch/ia64/include/asm/pgtable.h
2188+++ b/arch/ia64/include/asm/pgtable.h
2189@@ -12,7 +12,7 @@
2190 * David Mosberger-Tang <davidm@hpl.hp.com>
2191 */
2192
2193-
2194+#include <linux/const.h>
2195 #include <asm/mman.h>
2196 #include <asm/page.h>
2197 #include <asm/processor.h>
2198@@ -143,6 +143,17 @@
2199 #define PAGE_READONLY __pgprot(__ACCESS_BITS | _PAGE_PL_3 | _PAGE_AR_R)
2200 #define PAGE_COPY __pgprot(__ACCESS_BITS | _PAGE_PL_3 | _PAGE_AR_R)
2201 #define PAGE_COPY_EXEC __pgprot(__ACCESS_BITS | _PAGE_PL_3 | _PAGE_AR_RX)
2202+
2203+#ifdef CONFIG_PAX_PAGEEXEC
2204+# define PAGE_SHARED_NOEXEC __pgprot(__ACCESS_BITS | _PAGE_PL_3 | _PAGE_AR_RW)
2205+# define PAGE_READONLY_NOEXEC __pgprot(__ACCESS_BITS | _PAGE_PL_3 | _PAGE_AR_R)
2206+# define PAGE_COPY_NOEXEC __pgprot(__ACCESS_BITS | _PAGE_PL_3 | _PAGE_AR_R)
2207+#else
2208+# define PAGE_SHARED_NOEXEC PAGE_SHARED
2209+# define PAGE_READONLY_NOEXEC PAGE_READONLY
2210+# define PAGE_COPY_NOEXEC PAGE_COPY
2211+#endif
2212+
2213 #define PAGE_GATE __pgprot(__ACCESS_BITS | _PAGE_PL_0 | _PAGE_AR_X_RX)
2214 #define PAGE_KERNEL __pgprot(__DIRTY_BITS | _PAGE_PL_0 | _PAGE_AR_RWX)
2215 #define PAGE_KERNELRX __pgprot(__ACCESS_BITS | _PAGE_PL_0 | _PAGE_AR_RX)
2216diff --git a/arch/ia64/include/asm/spinlock.h b/arch/ia64/include/asm/spinlock.h
2217index 239ecdc..f94170e 100644
2218--- a/arch/ia64/include/asm/spinlock.h
2219+++ b/arch/ia64/include/asm/spinlock.h
2220@@ -72,7 +72,7 @@ static __always_inline void __ticket_spin_unlock(raw_spinlock_t *lock)
2221 unsigned short *p = (unsigned short *)&lock->lock + 1, tmp;
2222
2223 asm volatile ("ld2.bias %0=[%1]" : "=r"(tmp) : "r"(p));
2224- ACCESS_ONCE(*p) = (tmp + 2) & ~1;
2225+ ACCESS_ONCE_RW(*p) = (tmp + 2) & ~1;
2226 }
2227
2228 static __always_inline void __ticket_spin_unlock_wait(raw_spinlock_t *lock)
2229diff --git a/arch/ia64/include/asm/uaccess.h b/arch/ia64/include/asm/uaccess.h
2230index 449c8c0..432a3d2 100644
2231--- a/arch/ia64/include/asm/uaccess.h
2232+++ b/arch/ia64/include/asm/uaccess.h
2233@@ -257,7 +257,7 @@ __copy_from_user (void *to, const void __user *from, unsigned long count)
2234 const void *__cu_from = (from); \
2235 long __cu_len = (n); \
2236 \
2237- if (__access_ok(__cu_to, __cu_len, get_fs())) \
2238+ if (__cu_len > 0 && __cu_len <= INT_MAX && __access_ok(__cu_to, __cu_len, get_fs())) \
2239 __cu_len = __copy_user(__cu_to, (__force void __user *) __cu_from, __cu_len); \
2240 __cu_len; \
2241 })
2242@@ -269,7 +269,7 @@ __copy_from_user (void *to, const void __user *from, unsigned long count)
2243 long __cu_len = (n); \
2244 \
2245 __chk_user_ptr(__cu_from); \
2246- if (__access_ok(__cu_from, __cu_len, get_fs())) \
2247+ if (__cu_len > 0 && __cu_len <= INT_MAX && __access_ok(__cu_from, __cu_len, get_fs())) \
2248 __cu_len = __copy_user((__force void __user *) __cu_to, __cu_from, __cu_len); \
2249 __cu_len; \
2250 })
2251diff --git a/arch/ia64/kernel/dma-mapping.c b/arch/ia64/kernel/dma-mapping.c
2252index f2c1600..969398a 100644
2253--- a/arch/ia64/kernel/dma-mapping.c
2254+++ b/arch/ia64/kernel/dma-mapping.c
2255@@ -3,7 +3,7 @@
2256 /* Set this to 1 if there is a HW IOMMU in the system */
2257 int iommu_detected __read_mostly;
2258
2259-struct dma_map_ops *dma_ops;
2260+const struct dma_map_ops *dma_ops;
2261 EXPORT_SYMBOL(dma_ops);
2262
2263 #define PREALLOC_DMA_DEBUG_ENTRIES (1 << 16)
2264@@ -16,7 +16,7 @@ static int __init dma_init(void)
2265 }
2266 fs_initcall(dma_init);
2267
2268-struct dma_map_ops *dma_get_ops(struct device *dev)
2269+const struct dma_map_ops *dma_get_ops(struct device *dev)
2270 {
2271 return dma_ops;
2272 }
2273diff --git a/arch/ia64/kernel/module.c b/arch/ia64/kernel/module.c
2274index 1481b0a..e7d38ff 100644
2275--- a/arch/ia64/kernel/module.c
2276+++ b/arch/ia64/kernel/module.c
2277@@ -315,8 +315,7 @@ module_alloc (unsigned long size)
2278 void
2279 module_free (struct module *mod, void *module_region)
2280 {
2281- if (mod && mod->arch.init_unw_table &&
2282- module_region == mod->module_init) {
2283+ if (mod && mod->arch.init_unw_table && module_region == mod->module_init_rx) {
2284 unw_remove_unwind_table(mod->arch.init_unw_table);
2285 mod->arch.init_unw_table = NULL;
2286 }
2287@@ -502,15 +501,39 @@ module_frob_arch_sections (Elf_Ehdr *ehdr, Elf_Shdr *sechdrs, char *secstrings,
2288 }
2289
2290 static inline int
2291+in_init_rx (const struct module *mod, uint64_t addr)
2292+{
2293+ return addr - (uint64_t) mod->module_init_rx < mod->init_size_rx;
2294+}
2295+
2296+static inline int
2297+in_init_rw (const struct module *mod, uint64_t addr)
2298+{
2299+ return addr - (uint64_t) mod->module_init_rw < mod->init_size_rw;
2300+}
2301+
2302+static inline int
2303 in_init (const struct module *mod, uint64_t addr)
2304 {
2305- return addr - (uint64_t) mod->module_init < mod->init_size;
2306+ return in_init_rx(mod, addr) || in_init_rw(mod, addr);
2307+}
2308+
2309+static inline int
2310+in_core_rx (const struct module *mod, uint64_t addr)
2311+{
2312+ return addr - (uint64_t) mod->module_core_rx < mod->core_size_rx;
2313+}
2314+
2315+static inline int
2316+in_core_rw (const struct module *mod, uint64_t addr)
2317+{
2318+ return addr - (uint64_t) mod->module_core_rw < mod->core_size_rw;
2319 }
2320
2321 static inline int
2322 in_core (const struct module *mod, uint64_t addr)
2323 {
2324- return addr - (uint64_t) mod->module_core < mod->core_size;
2325+ return in_core_rx(mod, addr) || in_core_rw(mod, addr);
2326 }
2327
2328 static inline int
2329@@ -693,7 +716,14 @@ do_reloc (struct module *mod, uint8_t r_type, Elf64_Sym *sym, uint64_t addend,
2330 break;
2331
2332 case RV_BDREL:
2333- val -= (uint64_t) (in_init(mod, val) ? mod->module_init : mod->module_core);
2334+ if (in_init_rx(mod, val))
2335+ val -= (uint64_t) mod->module_init_rx;
2336+ else if (in_init_rw(mod, val))
2337+ val -= (uint64_t) mod->module_init_rw;
2338+ else if (in_core_rx(mod, val))
2339+ val -= (uint64_t) mod->module_core_rx;
2340+ else if (in_core_rw(mod, val))
2341+ val -= (uint64_t) mod->module_core_rw;
2342 break;
2343
2344 case RV_LTV:
2345@@ -828,15 +858,15 @@ apply_relocate_add (Elf64_Shdr *sechdrs, const char *strtab, unsigned int symind
2346 * addresses have been selected...
2347 */
2348 uint64_t gp;
2349- if (mod->core_size > MAX_LTOFF)
2350+ if (mod->core_size_rx + mod->core_size_rw > MAX_LTOFF)
2351 /*
2352 * This takes advantage of fact that SHF_ARCH_SMALL gets allocated
2353 * at the end of the module.
2354 */
2355- gp = mod->core_size - MAX_LTOFF / 2;
2356+ gp = mod->core_size_rx + mod->core_size_rw - MAX_LTOFF / 2;
2357 else
2358- gp = mod->core_size / 2;
2359- gp = (uint64_t) mod->module_core + ((gp + 7) & -8);
2360+ gp = (mod->core_size_rx + mod->core_size_rw) / 2;
2361+ gp = (uint64_t) mod->module_core_rx + ((gp + 7) & -8);
2362 mod->arch.gp = gp;
2363 DEBUGP("%s: placing gp at 0x%lx\n", __func__, gp);
2364 }
2365diff --git a/arch/ia64/kernel/pci-dma.c b/arch/ia64/kernel/pci-dma.c
2366index f6b1ff0..de773fb 100644
2367--- a/arch/ia64/kernel/pci-dma.c
2368+++ b/arch/ia64/kernel/pci-dma.c
2369@@ -43,7 +43,7 @@ struct device fallback_dev = {
2370 .dma_mask = &fallback_dev.coherent_dma_mask,
2371 };
2372
2373-extern struct dma_map_ops intel_dma_ops;
2374+extern const struct dma_map_ops intel_dma_ops;
2375
2376 static int __init pci_iommu_init(void)
2377 {
2378@@ -96,15 +96,34 @@ int iommu_dma_supported(struct device *dev, u64 mask)
2379 }
2380 EXPORT_SYMBOL(iommu_dma_supported);
2381
2382+extern void *intel_alloc_coherent(struct device *hwdev, size_t size, dma_addr_t *dma_handle, gfp_t flags);
2383+extern void intel_free_coherent(struct device *hwdev, size_t size, void *vaddr, dma_addr_t dma_handle);
2384+extern int intel_map_sg(struct device *hwdev, struct scatterlist *sglist, int nelems, enum dma_data_direction dir, struct dma_attrs *attrs);
2385+extern void intel_unmap_sg(struct device *hwdev, struct scatterlist *sglist, int nelems, enum dma_data_direction dir, struct dma_attrs *attrs);
2386+extern dma_addr_t intel_map_page(struct device *dev, struct page *page, unsigned long offset, size_t size, enum dma_data_direction dir, struct dma_attrs *attrs);
2387+extern void intel_unmap_page(struct device *dev, dma_addr_t dev_addr, size_t size, enum dma_data_direction dir, struct dma_attrs *attrs);
2388+extern int intel_mapping_error(struct device *dev, dma_addr_t dma_addr);
2389+
2390+static const struct dma_map_ops intel_iommu_dma_ops = {
2391+ /* from drivers/pci/intel-iommu.c:intel_dma_ops */
2392+ .alloc_coherent = intel_alloc_coherent,
2393+ .free_coherent = intel_free_coherent,
2394+ .map_sg = intel_map_sg,
2395+ .unmap_sg = intel_unmap_sg,
2396+ .map_page = intel_map_page,
2397+ .unmap_page = intel_unmap_page,
2398+ .mapping_error = intel_mapping_error,
2399+
2400+ .sync_single_for_cpu = machvec_dma_sync_single,
2401+ .sync_sg_for_cpu = machvec_dma_sync_sg,
2402+ .sync_single_for_device = machvec_dma_sync_single,
2403+ .sync_sg_for_device = machvec_dma_sync_sg,
2404+ .dma_supported = iommu_dma_supported,
2405+};
2406+
2407 void __init pci_iommu_alloc(void)
2408 {
2409- dma_ops = &intel_dma_ops;
2410-
2411- dma_ops->sync_single_for_cpu = machvec_dma_sync_single;
2412- dma_ops->sync_sg_for_cpu = machvec_dma_sync_sg;
2413- dma_ops->sync_single_for_device = machvec_dma_sync_single;
2414- dma_ops->sync_sg_for_device = machvec_dma_sync_sg;
2415- dma_ops->dma_supported = iommu_dma_supported;
2416+ dma_ops = &intel_iommu_dma_ops;
2417
2418 /*
2419 * The order of these functions is important for
2420diff --git a/arch/ia64/kernel/pci-swiotlb.c b/arch/ia64/kernel/pci-swiotlb.c
2421index 285aae8..61dbab6 100644
2422--- a/arch/ia64/kernel/pci-swiotlb.c
2423+++ b/arch/ia64/kernel/pci-swiotlb.c
2424@@ -21,7 +21,7 @@ static void *ia64_swiotlb_alloc_coherent(struct device *dev, size_t size,
2425 return swiotlb_alloc_coherent(dev, size, dma_handle, gfp);
2426 }
2427
2428-struct dma_map_ops swiotlb_dma_ops = {
2429+const struct dma_map_ops swiotlb_dma_ops = {
2430 .alloc_coherent = ia64_swiotlb_alloc_coherent,
2431 .free_coherent = swiotlb_free_coherent,
2432 .map_page = swiotlb_map_page,
2433diff --git a/arch/ia64/kernel/sys_ia64.c b/arch/ia64/kernel/sys_ia64.c
2434index 609d500..7dde2a8 100644
2435--- a/arch/ia64/kernel/sys_ia64.c
2436+++ b/arch/ia64/kernel/sys_ia64.c
2437@@ -43,6 +43,13 @@ arch_get_unmapped_area (struct file *filp, unsigned long addr, unsigned long len
2438 if (REGION_NUMBER(addr) == RGN_HPAGE)
2439 addr = 0;
2440 #endif
2441+
2442+#ifdef CONFIG_PAX_RANDMMAP
2443+ if (mm->pax_flags & MF_PAX_RANDMMAP)
2444+ addr = mm->free_area_cache;
2445+ else
2446+#endif
2447+
2448 if (!addr)
2449 addr = mm->free_area_cache;
2450
2451@@ -61,14 +68,14 @@ arch_get_unmapped_area (struct file *filp, unsigned long addr, unsigned long len
2452 for (vma = find_vma(mm, addr); ; vma = vma->vm_next) {
2453 /* At this point: (!vma || addr < vma->vm_end). */
2454 if (TASK_SIZE - len < addr || RGN_MAP_LIMIT - len < REGION_OFFSET(addr)) {
2455- if (start_addr != TASK_UNMAPPED_BASE) {
2456+ if (start_addr != mm->mmap_base) {
2457 /* Start a new search --- just in case we missed some holes. */
2458- addr = TASK_UNMAPPED_BASE;
2459+ addr = mm->mmap_base;
2460 goto full_search;
2461 }
2462 return -ENOMEM;
2463 }
2464- if (!vma || addr + len <= vma->vm_start) {
2465+ if (check_heap_stack_gap(vma, addr, len)) {
2466 /* Remember the address where we stopped this search: */
2467 mm->free_area_cache = addr + len;
2468 return addr;
2469diff --git a/arch/ia64/kernel/topology.c b/arch/ia64/kernel/topology.c
2470index 8f06035..b3a5818 100644
2471--- a/arch/ia64/kernel/topology.c
2472+++ b/arch/ia64/kernel/topology.c
2473@@ -282,7 +282,7 @@ static ssize_t cache_show(struct kobject * kobj, struct attribute * attr, char *
2474 return ret;
2475 }
2476
2477-static struct sysfs_ops cache_sysfs_ops = {
2478+static const struct sysfs_ops cache_sysfs_ops = {
2479 .show = cache_show
2480 };
2481
2482diff --git a/arch/ia64/kernel/vmlinux.lds.S b/arch/ia64/kernel/vmlinux.lds.S
2483index 0a0c77b..8e55a81 100644
2484--- a/arch/ia64/kernel/vmlinux.lds.S
2485+++ b/arch/ia64/kernel/vmlinux.lds.S
2486@@ -190,7 +190,7 @@ SECTIONS
2487 /* Per-cpu data: */
2488 . = ALIGN(PERCPU_PAGE_SIZE);
2489 PERCPU_VADDR(PERCPU_ADDR, :percpu)
2490- __phys_per_cpu_start = __per_cpu_load;
2491+ __phys_per_cpu_start = per_cpu_load;
2492 . = __phys_per_cpu_start + PERCPU_PAGE_SIZE; /* ensure percpu data fits
2493 * into percpu page size
2494 */
2495diff --git a/arch/ia64/mm/fault.c b/arch/ia64/mm/fault.c
2496index 19261a9..1611b7a 100644
2497--- a/arch/ia64/mm/fault.c
2498+++ b/arch/ia64/mm/fault.c
2499@@ -72,6 +72,23 @@ mapped_kernel_page_is_present (unsigned long address)
2500 return pte_present(pte);
2501 }
2502
2503+#ifdef CONFIG_PAX_PAGEEXEC
2504+void pax_report_insns(struct pt_regs *regs, void *pc, void *sp)
2505+{
2506+ unsigned long i;
2507+
2508+ printk(KERN_ERR "PAX: bytes at PC: ");
2509+ for (i = 0; i < 8; i++) {
2510+ unsigned int c;
2511+ if (get_user(c, (unsigned int *)pc+i))
2512+ printk(KERN_CONT "???????? ");
2513+ else
2514+ printk(KERN_CONT "%08x ", c);
2515+ }
2516+ printk("\n");
2517+}
2518+#endif
2519+
2520 void __kprobes
2521 ia64_do_page_fault (unsigned long address, unsigned long isr, struct pt_regs *regs)
2522 {
2523@@ -145,9 +162,23 @@ ia64_do_page_fault (unsigned long address, unsigned long isr, struct pt_regs *re
2524 mask = ( (((isr >> IA64_ISR_X_BIT) & 1UL) << VM_EXEC_BIT)
2525 | (((isr >> IA64_ISR_W_BIT) & 1UL) << VM_WRITE_BIT));
2526
2527- if ((vma->vm_flags & mask) != mask)
2528+ if ((vma->vm_flags & mask) != mask) {
2529+
2530+#ifdef CONFIG_PAX_PAGEEXEC
2531+ if (!(vma->vm_flags & VM_EXEC) && (mask & VM_EXEC)) {
2532+ if (!(mm->pax_flags & MF_PAX_PAGEEXEC) || address != regs->cr_iip)
2533+ goto bad_area;
2534+
2535+ up_read(&mm->mmap_sem);
2536+ pax_report_fault(regs, (void *)regs->cr_iip, (void *)regs->r12);
2537+ do_group_exit(SIGKILL);
2538+ }
2539+#endif
2540+
2541 goto bad_area;
2542
2543+ }
2544+
2545 survive:
2546 /*
2547 * If for any reason at all we couldn't handle the fault, make
2548diff --git a/arch/ia64/mm/hugetlbpage.c b/arch/ia64/mm/hugetlbpage.c
2549index b0f6157..a082bbc 100644
2550--- a/arch/ia64/mm/hugetlbpage.c
2551+++ b/arch/ia64/mm/hugetlbpage.c
2552@@ -172,7 +172,7 @@ unsigned long hugetlb_get_unmapped_area(struct file *file, unsigned long addr, u
2553 /* At this point: (!vmm || addr < vmm->vm_end). */
2554 if (REGION_OFFSET(addr) + len > RGN_MAP_LIMIT)
2555 return -ENOMEM;
2556- if (!vmm || (addr + len) <= vmm->vm_start)
2557+ if (check_heap_stack_gap(vmm, addr, len))
2558 return addr;
2559 addr = ALIGN(vmm->vm_end, HPAGE_SIZE);
2560 }
2561diff --git a/arch/ia64/mm/init.c b/arch/ia64/mm/init.c
2562index 1857766..05cc6a3 100644
2563--- a/arch/ia64/mm/init.c
2564+++ b/arch/ia64/mm/init.c
2565@@ -122,6 +122,19 @@ ia64_init_addr_space (void)
2566 vma->vm_start = current->thread.rbs_bot & PAGE_MASK;
2567 vma->vm_end = vma->vm_start + PAGE_SIZE;
2568 vma->vm_flags = VM_DATA_DEFAULT_FLAGS|VM_GROWSUP|VM_ACCOUNT;
2569+
2570+#ifdef CONFIG_PAX_PAGEEXEC
2571+ if (current->mm->pax_flags & MF_PAX_PAGEEXEC) {
2572+ vma->vm_flags &= ~VM_EXEC;
2573+
2574+#ifdef CONFIG_PAX_MPROTECT
2575+ if (current->mm->pax_flags & MF_PAX_MPROTECT)
2576+ vma->vm_flags &= ~VM_MAYEXEC;
2577+#endif
2578+
2579+ }
2580+#endif
2581+
2582 vma->vm_page_prot = vm_get_page_prot(vma->vm_flags);
2583 down_write(&current->mm->mmap_sem);
2584 if (insert_vm_struct(current->mm, vma)) {
2585diff --git a/arch/ia64/sn/pci/pci_dma.c b/arch/ia64/sn/pci/pci_dma.c
2586index 98b6849..8046766 100644
2587--- a/arch/ia64/sn/pci/pci_dma.c
2588+++ b/arch/ia64/sn/pci/pci_dma.c
2589@@ -464,7 +464,7 @@ int sn_pci_legacy_write(struct pci_bus *bus, u16 port, u32 val, u8 size)
2590 return ret;
2591 }
2592
2593-static struct dma_map_ops sn_dma_ops = {
2594+static const struct dma_map_ops sn_dma_ops = {
2595 .alloc_coherent = sn_dma_alloc_coherent,
2596 .free_coherent = sn_dma_free_coherent,
2597 .map_page = sn_dma_map_page,
2598diff --git a/arch/m32r/include/asm/cache.h b/arch/m32r/include/asm/cache.h
2599index 40b3ee9..8c2c112 100644
2600--- a/arch/m32r/include/asm/cache.h
2601+++ b/arch/m32r/include/asm/cache.h
2602@@ -1,8 +1,10 @@
2603 #ifndef _ASM_M32R_CACHE_H
2604 #define _ASM_M32R_CACHE_H
2605
2606+#include <linux/const.h>
2607+
2608 /* L1 cache line size */
2609 #define L1_CACHE_SHIFT 4
2610-#define L1_CACHE_BYTES (1 << L1_CACHE_SHIFT)
2611+#define L1_CACHE_BYTES (_AC(1,UL) << L1_CACHE_SHIFT)
2612
2613 #endif /* _ASM_M32R_CACHE_H */
2614diff --git a/arch/m32r/lib/usercopy.c b/arch/m32r/lib/usercopy.c
2615index 82abd15..d95ae5d 100644
2616--- a/arch/m32r/lib/usercopy.c
2617+++ b/arch/m32r/lib/usercopy.c
2618@@ -14,6 +14,9 @@
2619 unsigned long
2620 __generic_copy_to_user(void __user *to, const void *from, unsigned long n)
2621 {
2622+ if ((long)n < 0)
2623+ return n;
2624+
2625 prefetch(from);
2626 if (access_ok(VERIFY_WRITE, to, n))
2627 __copy_user(to,from,n);
2628@@ -23,6 +26,9 @@ __generic_copy_to_user(void __user *to, const void *from, unsigned long n)
2629 unsigned long
2630 __generic_copy_from_user(void *to, const void __user *from, unsigned long n)
2631 {
2632+ if ((long)n < 0)
2633+ return n;
2634+
2635 prefetchw(to);
2636 if (access_ok(VERIFY_READ, from, n))
2637 __copy_user_zeroing(to,from,n);
2638diff --git a/arch/m68k/include/asm/cache.h b/arch/m68k/include/asm/cache.h
2639index ecafbe1..432c3e4 100644
2640--- a/arch/m68k/include/asm/cache.h
2641+++ b/arch/m68k/include/asm/cache.h
2642@@ -4,9 +4,11 @@
2643 #ifndef __ARCH_M68K_CACHE_H
2644 #define __ARCH_M68K_CACHE_H
2645
2646+#include <linux/const.h>
2647+
2648 /* bytes per L1 cache line */
2649 #define L1_CACHE_SHIFT 4
2650-#define L1_CACHE_BYTES (1<< L1_CACHE_SHIFT)
2651+#define L1_CACHE_BYTES (_AC(1,UL) << L1_CACHE_SHIFT)
2652
2653 #define ARCH_KMALLOC_MINALIGN L1_CACHE_BYTES
2654
2655diff --git a/arch/microblaze/include/asm/cache.h b/arch/microblaze/include/asm/cache.h
2656index c209c47..2ba96e2 100644
2657--- a/arch/microblaze/include/asm/cache.h
2658+++ b/arch/microblaze/include/asm/cache.h
2659@@ -13,11 +13,12 @@
2660 #ifndef _ASM_MICROBLAZE_CACHE_H
2661 #define _ASM_MICROBLAZE_CACHE_H
2662
2663+#include <linux/const.h>
2664 #include <asm/registers.h>
2665
2666 #define L1_CACHE_SHIFT 2
2667 /* word-granular cache in microblaze */
2668-#define L1_CACHE_BYTES (1 << L1_CACHE_SHIFT)
2669+#define L1_CACHE_BYTES (_AC(1,UL) << L1_CACHE_SHIFT)
2670
2671 #define SMP_CACHE_BYTES L1_CACHE_BYTES
2672
2673diff --git a/arch/mips/Kconfig b/arch/mips/Kconfig
2674index fd7620f..63d73a6 100644
2675--- a/arch/mips/Kconfig
2676+++ b/arch/mips/Kconfig
2677@@ -5,6 +5,7 @@ config MIPS
2678 select HAVE_IDE
2679 select HAVE_OPROFILE
2680 select HAVE_ARCH_KGDB
2681+ select GENERIC_ATOMIC64 if !64BIT
2682 # Horrible source of confusion. Die, die, die ...
2683 select EMBEDDED
2684 select RTC_LIB if !LEMOTE_FULOONG2E
2685diff --git a/arch/mips/Makefile b/arch/mips/Makefile
2686index 77f5021..2b1db8a 100644
2687--- a/arch/mips/Makefile
2688+++ b/arch/mips/Makefile
2689@@ -51,6 +51,8 @@ endif
2690 cflags-y := -ffunction-sections
2691 cflags-y += $(call cc-option, -mno-check-zero-division)
2692
2693+cflags-y += -Wno-sign-compare -Wno-extra
2694+
2695 ifdef CONFIG_32BIT
2696 ld-emul = $(32bit-emul)
2697 vmlinux-32 = vmlinux
2698diff --git a/arch/mips/alchemy/devboards/pm.c b/arch/mips/alchemy/devboards/pm.c
2699index 632f986..fd0378d 100644
2700--- a/arch/mips/alchemy/devboards/pm.c
2701+++ b/arch/mips/alchemy/devboards/pm.c
2702@@ -78,7 +78,7 @@ static void db1x_pm_end(void)
2703
2704 }
2705
2706-static struct platform_suspend_ops db1x_pm_ops = {
2707+static const struct platform_suspend_ops db1x_pm_ops = {
2708 .valid = suspend_valid_only_mem,
2709 .begin = db1x_pm_begin,
2710 .enter = db1x_pm_enter,
2711diff --git a/arch/mips/include/asm/atomic.h b/arch/mips/include/asm/atomic.h
2712index 09e7128..111035b 100644
2713--- a/arch/mips/include/asm/atomic.h
2714+++ b/arch/mips/include/asm/atomic.h
2715@@ -21,6 +21,10 @@
2716 #include <asm/war.h>
2717 #include <asm/system.h>
2718
2719+#ifdef CONFIG_GENERIC_ATOMIC64
2720+#include <asm-generic/atomic64.h>
2721+#endif
2722+
2723 #define ATOMIC_INIT(i) { (i) }
2724
2725 /*
2726@@ -782,6 +786,16 @@ static __inline__ int atomic64_add_unless(atomic64_t *v, long a, long u)
2727 */
2728 #define atomic64_add_negative(i, v) (atomic64_add_return(i, (v)) < 0)
2729
2730+#define atomic64_read_unchecked(v) atomic64_read(v)
2731+#define atomic64_set_unchecked(v, i) atomic64_set((v), (i))
2732+#define atomic64_add_unchecked(a, v) atomic64_add((a), (v))
2733+#define atomic64_add_return_unchecked(a, v) atomic64_add_return((a), (v))
2734+#define atomic64_sub_unchecked(a, v) atomic64_sub((a), (v))
2735+#define atomic64_inc_unchecked(v) atomic64_inc(v)
2736+#define atomic64_inc_return_unchecked(v) atomic64_inc_return(v)
2737+#define atomic64_dec_unchecked(v) atomic64_dec(v)
2738+#define atomic64_cmpxchg_unchecked(v, o, n) atomic64_cmpxchg((v), (o), (n))
2739+
2740 #endif /* CONFIG_64BIT */
2741
2742 /*
2743diff --git a/arch/mips/include/asm/cache.h b/arch/mips/include/asm/cache.h
2744index 37f175c..c7a3065 100644
2745--- a/arch/mips/include/asm/cache.h
2746+++ b/arch/mips/include/asm/cache.h
2747@@ -9,10 +9,11 @@
2748 #ifndef _ASM_CACHE_H
2749 #define _ASM_CACHE_H
2750
2751+#include <linux/const.h>
2752 #include <kmalloc.h>
2753
2754 #define L1_CACHE_SHIFT CONFIG_MIPS_L1_CACHE_SHIFT
2755-#define L1_CACHE_BYTES (1 << L1_CACHE_SHIFT)
2756+#define L1_CACHE_BYTES (_AC(1,UL) << L1_CACHE_SHIFT)
2757
2758 #define SMP_CACHE_SHIFT L1_CACHE_SHIFT
2759 #define SMP_CACHE_BYTES L1_CACHE_BYTES
2760diff --git a/arch/mips/include/asm/elf.h b/arch/mips/include/asm/elf.h
2761index 7990694..4e93acf 100644
2762--- a/arch/mips/include/asm/elf.h
2763+++ b/arch/mips/include/asm/elf.h
2764@@ -368,4 +368,11 @@ extern int dump_task_fpu(struct task_struct *, elf_fpregset_t *);
2765 #define ELF_ET_DYN_BASE (TASK_SIZE / 3 * 2)
2766 #endif
2767
2768+#ifdef CONFIG_PAX_ASLR
2769+#define PAX_ELF_ET_DYN_BASE (test_thread_flag(TIF_32BIT_ADDR) ? 0x00400000UL : 0x00400000UL)
2770+
2771+#define PAX_DELTA_MMAP_LEN (test_thread_flag(TIF_32BIT_ADDR) ? 27-PAGE_SHIFT : 36-PAGE_SHIFT)
2772+#define PAX_DELTA_STACK_LEN (test_thread_flag(TIF_32BIT_ADDR) ? 27-PAGE_SHIFT : 36-PAGE_SHIFT)
2773+#endif
2774+
2775 #endif /* _ASM_ELF_H */
2776diff --git a/arch/mips/include/asm/page.h b/arch/mips/include/asm/page.h
2777index f266295..627cfff 100644
2778--- a/arch/mips/include/asm/page.h
2779+++ b/arch/mips/include/asm/page.h
2780@@ -93,7 +93,7 @@ extern void copy_user_highpage(struct page *to, struct page *from,
2781 #ifdef CONFIG_CPU_MIPS32
2782 typedef struct { unsigned long pte_low, pte_high; } pte_t;
2783 #define pte_val(x) ((x).pte_low | ((unsigned long long)(x).pte_high << 32))
2784- #define __pte(x) ({ pte_t __pte = {(x), ((unsigned long long)(x)) >> 32}; __pte; })
2785+ #define __pte(x) ({ pte_t __pte = {(x), (x) >> 32}; __pte; })
2786 #else
2787 typedef struct { unsigned long long pte; } pte_t;
2788 #define pte_val(x) ((x).pte)
2789diff --git a/arch/mips/include/asm/reboot.h b/arch/mips/include/asm/reboot.h
2790index e48c0bf..f3acf65 100644
2791--- a/arch/mips/include/asm/reboot.h
2792+++ b/arch/mips/include/asm/reboot.h
2793@@ -9,7 +9,7 @@
2794 #ifndef _ASM_REBOOT_H
2795 #define _ASM_REBOOT_H
2796
2797-extern void (*_machine_restart)(char *command);
2798-extern void (*_machine_halt)(void);
2799+extern void (*__noreturn _machine_restart)(char *command);
2800+extern void (*__noreturn _machine_halt)(void);
2801
2802 #endif /* _ASM_REBOOT_H */
2803diff --git a/arch/mips/include/asm/system.h b/arch/mips/include/asm/system.h
2804index 83b5509..9fa24a23 100644
2805--- a/arch/mips/include/asm/system.h
2806+++ b/arch/mips/include/asm/system.h
2807@@ -230,6 +230,6 @@ extern void per_cpu_trap_init(void);
2808 */
2809 #define __ARCH_WANT_UNLOCKED_CTXSW
2810
2811-extern unsigned long arch_align_stack(unsigned long sp);
2812+#define arch_align_stack(x) ((x) & ~0xfUL)
2813
2814 #endif /* _ASM_SYSTEM_H */
2815diff --git a/arch/mips/kernel/binfmt_elfn32.c b/arch/mips/kernel/binfmt_elfn32.c
2816index 9fdd8bc..fcf9d68 100644
2817--- a/arch/mips/kernel/binfmt_elfn32.c
2818+++ b/arch/mips/kernel/binfmt_elfn32.c
2819@@ -50,6 +50,13 @@ typedef elf_fpreg_t elf_fpregset_t[ELF_NFPREG];
2820 #undef ELF_ET_DYN_BASE
2821 #define ELF_ET_DYN_BASE (TASK32_SIZE / 3 * 2)
2822
2823+#ifdef CONFIG_PAX_ASLR
2824+#define PAX_ELF_ET_DYN_BASE (test_thread_flag(TIF_32BIT_ADDR) ? 0x00400000UL : 0x00400000UL)
2825+
2826+#define PAX_DELTA_MMAP_LEN (test_thread_flag(TIF_32BIT_ADDR) ? 27-PAGE_SHIFT : 36-PAGE_SHIFT)
2827+#define PAX_DELTA_STACK_LEN (test_thread_flag(TIF_32BIT_ADDR) ? 27-PAGE_SHIFT : 36-PAGE_SHIFT)
2828+#endif
2829+
2830 #include <asm/processor.h>
2831 #include <linux/module.h>
2832 #include <linux/elfcore.h>
2833diff --git a/arch/mips/kernel/binfmt_elfo32.c b/arch/mips/kernel/binfmt_elfo32.c
2834index ff44823..cf0b48a 100644
2835--- a/arch/mips/kernel/binfmt_elfo32.c
2836+++ b/arch/mips/kernel/binfmt_elfo32.c
2837@@ -52,6 +52,13 @@ typedef elf_fpreg_t elf_fpregset_t[ELF_NFPREG];
2838 #undef ELF_ET_DYN_BASE
2839 #define ELF_ET_DYN_BASE (TASK32_SIZE / 3 * 2)
2840
2841+#ifdef CONFIG_PAX_ASLR
2842+#define PAX_ELF_ET_DYN_BASE (test_thread_flag(TIF_32BIT_ADDR) ? 0x00400000UL : 0x00400000UL)
2843+
2844+#define PAX_DELTA_MMAP_LEN (test_thread_flag(TIF_32BIT_ADDR) ? 27-PAGE_SHIFT : 36-PAGE_SHIFT)
2845+#define PAX_DELTA_STACK_LEN (test_thread_flag(TIF_32BIT_ADDR) ? 27-PAGE_SHIFT : 36-PAGE_SHIFT)
2846+#endif
2847+
2848 #include <asm/processor.h>
2849
2850 /*
2851diff --git a/arch/mips/kernel/kgdb.c b/arch/mips/kernel/kgdb.c
2852index 50c9bb8..efdd5f8 100644
2853--- a/arch/mips/kernel/kgdb.c
2854+++ b/arch/mips/kernel/kgdb.c
2855@@ -245,6 +245,7 @@ int kgdb_arch_handle_exception(int vector, int signo, int err_code,
2856 return -1;
2857 }
2858
2859+/* cannot be const */
2860 struct kgdb_arch arch_kgdb_ops;
2861
2862 /*
2863diff --git a/arch/mips/kernel/process.c b/arch/mips/kernel/process.c
2864index f3d73e1..bb3f57a 100644
2865--- a/arch/mips/kernel/process.c
2866+++ b/arch/mips/kernel/process.c
2867@@ -470,15 +470,3 @@ unsigned long get_wchan(struct task_struct *task)
2868 out:
2869 return pc;
2870 }
2871-
2872-/*
2873- * Don't forget that the stack pointer must be aligned on a 8 bytes
2874- * boundary for 32-bits ABI and 16 bytes for 64-bits ABI.
2875- */
2876-unsigned long arch_align_stack(unsigned long sp)
2877-{
2878- if (!(current->personality & ADDR_NO_RANDOMIZE) && randomize_va_space)
2879- sp -= get_random_int() & ~PAGE_MASK;
2880-
2881- return sp & ALMASK;
2882-}
2883diff --git a/arch/mips/kernel/reset.c b/arch/mips/kernel/reset.c
2884index 060563a..7fbf310 100644
2885--- a/arch/mips/kernel/reset.c
2886+++ b/arch/mips/kernel/reset.c
2887@@ -19,8 +19,8 @@
2888 * So handle all using function pointers to machine specific
2889 * functions.
2890 */
2891-void (*_machine_restart)(char *command);
2892-void (*_machine_halt)(void);
2893+void (*__noreturn _machine_restart)(char *command);
2894+void (*__noreturn _machine_halt)(void);
2895 void (*pm_power_off)(void);
2896
2897 EXPORT_SYMBOL(pm_power_off);
2898@@ -29,16 +29,19 @@ void machine_restart(char *command)
2899 {
2900 if (_machine_restart)
2901 _machine_restart(command);
2902+ BUG();
2903 }
2904
2905 void machine_halt(void)
2906 {
2907 if (_machine_halt)
2908 _machine_halt();
2909+ BUG();
2910 }
2911
2912 void machine_power_off(void)
2913 {
2914 if (pm_power_off)
2915 pm_power_off();
2916+ BUG();
2917 }
2918diff --git a/arch/mips/kernel/syscall.c b/arch/mips/kernel/syscall.c
2919index 3f7f466..3abe0b5 100644
2920--- a/arch/mips/kernel/syscall.c
2921+++ b/arch/mips/kernel/syscall.c
2922@@ -102,17 +102,21 @@ unsigned long arch_get_unmapped_area(struct file *filp, unsigned long addr,
2923 do_color_align = 0;
2924 if (filp || (flags & MAP_SHARED))
2925 do_color_align = 1;
2926+
2927+#ifdef CONFIG_PAX_RANDMMAP
2928+ if (!(current->mm->pax_flags & MF_PAX_RANDMMAP))
2929+#endif
2930+
2931 if (addr) {
2932 if (do_color_align)
2933 addr = COLOUR_ALIGN(addr, pgoff);
2934 else
2935 addr = PAGE_ALIGN(addr);
2936 vmm = find_vma(current->mm, addr);
2937- if (task_size - len >= addr &&
2938- (!vmm || addr + len <= vmm->vm_start))
2939+ if (task_size - len >= addr && check_heap_stack_gap(vmm, addr, len))
2940 return addr;
2941 }
2942- addr = TASK_UNMAPPED_BASE;
2943+ addr = current->mm->mmap_base;
2944 if (do_color_align)
2945 addr = COLOUR_ALIGN(addr, pgoff);
2946 else
2947@@ -122,7 +126,7 @@ unsigned long arch_get_unmapped_area(struct file *filp, unsigned long addr,
2948 /* At this point: (!vmm || addr < vmm->vm_end). */
2949 if (task_size - len < addr)
2950 return -ENOMEM;
2951- if (!vmm || addr + len <= vmm->vm_start)
2952+ if (check_heap_stack_gap(vmm, addr, len))
2953 return addr;
2954 addr = vmm->vm_end;
2955 if (do_color_align)
2956diff --git a/arch/mips/mm/fault.c b/arch/mips/mm/fault.c
2957index e97a7a2..f18f5b0 100644
2958--- a/arch/mips/mm/fault.c
2959+++ b/arch/mips/mm/fault.c
2960@@ -26,6 +26,23 @@
2961 #include <asm/ptrace.h>
2962 #include <asm/highmem.h> /* For VMALLOC_END */
2963
2964+#ifdef CONFIG_PAX_PAGEEXEC
2965+void pax_report_insns(struct pt_regs *regs, void *pc, void *sp)
2966+{
2967+ unsigned long i;
2968+
2969+ printk(KERN_ERR "PAX: bytes at PC: ");
2970+ for (i = 0; i < 5; i++) {
2971+ unsigned int c;
2972+ if (get_user(c, (unsigned int *)pc+i))
2973+ printk(KERN_CONT "???????? ");
2974+ else
2975+ printk(KERN_CONT "%08x ", c);
2976+ }
2977+ printk("\n");
2978+}
2979+#endif
2980+
2981 /*
2982 * This routine handles page faults. It determines the address,
2983 * and the problem, and then passes it off to one of the appropriate
2984diff --git a/arch/mn10300/proc-mn103e010/include/proc/cache.h b/arch/mn10300/proc-mn103e010/include/proc/cache.h
2985index bdc1f9a..e8de5c5 100644
2986--- a/arch/mn10300/proc-mn103e010/include/proc/cache.h
2987+++ b/arch/mn10300/proc-mn103e010/include/proc/cache.h
2988@@ -11,12 +11,14 @@
2989 #ifndef _ASM_PROC_CACHE_H
2990 #define _ASM_PROC_CACHE_H
2991
2992+#include <linux/const.h>
2993+
2994 /* L1 cache */
2995
2996 #define L1_CACHE_NWAYS 4 /* number of ways in caches */
2997 #define L1_CACHE_NENTRIES 256 /* number of entries in each way */
2998-#define L1_CACHE_BYTES 16 /* bytes per entry */
2999 #define L1_CACHE_SHIFT 4 /* shift for bytes per entry */
3000+#define L1_CACHE_BYTES (_AC(1,UL) << L1_CACHE_SHIFT) /* bytes per entry */
3001 #define L1_CACHE_WAYDISP 0x1000 /* displacement of one way from the next */
3002
3003 #define L1_CACHE_TAG_VALID 0x00000001 /* cache tag valid bit */
3004diff --git a/arch/parisc/include/asm/atomic.h b/arch/parisc/include/asm/atomic.h
3005index 8bc9e96..26554f8 100644
3006--- a/arch/parisc/include/asm/atomic.h
3007+++ b/arch/parisc/include/asm/atomic.h
3008@@ -336,6 +336,16 @@ static __inline__ int atomic64_add_unless(atomic64_t *v, long a, long u)
3009
3010 #define atomic64_inc_not_zero(v) atomic64_add_unless((v), 1, 0)
3011
3012+#define atomic64_read_unchecked(v) atomic64_read(v)
3013+#define atomic64_set_unchecked(v, i) atomic64_set((v), (i))
3014+#define atomic64_add_unchecked(a, v) atomic64_add((a), (v))
3015+#define atomic64_add_return_unchecked(a, v) atomic64_add_return((a), (v))
3016+#define atomic64_sub_unchecked(a, v) atomic64_sub((a), (v))
3017+#define atomic64_inc_unchecked(v) atomic64_inc(v)
3018+#define atomic64_inc_return_unchecked(v) atomic64_inc_return(v)
3019+#define atomic64_dec_unchecked(v) atomic64_dec(v)
3020+#define atomic64_cmpxchg_unchecked(v, o, n) atomic64_cmpxchg((v), (o), (n))
3021+
3022 #else /* CONFIG_64BIT */
3023
3024 #include <asm-generic/atomic64.h>
3025diff --git a/arch/parisc/include/asm/cache.h b/arch/parisc/include/asm/cache.h
3026index 32c2cca..a7b3a64 100644
3027--- a/arch/parisc/include/asm/cache.h
3028+++ b/arch/parisc/include/asm/cache.h
3029@@ -5,6 +5,7 @@
3030 #ifndef __ARCH_PARISC_CACHE_H
3031 #define __ARCH_PARISC_CACHE_H
3032
3033+#include <linux/const.h>
3034
3035 /*
3036 * PA 2.0 processors have 64-byte cachelines; PA 1.1 processors have
3037@@ -15,13 +16,13 @@
3038 * just ruin performance.
3039 */
3040 #ifdef CONFIG_PA20
3041-#define L1_CACHE_BYTES 64
3042 #define L1_CACHE_SHIFT 6
3043 #else
3044-#define L1_CACHE_BYTES 32
3045 #define L1_CACHE_SHIFT 5
3046 #endif
3047
3048+#define L1_CACHE_BYTES (_AC(1,UL) << L1_CACHE_SHIFT)
3049+
3050 #ifndef __ASSEMBLY__
3051
3052 #define L1_CACHE_ALIGN(x) (((x)+(L1_CACHE_BYTES-1))&~(L1_CACHE_BYTES-1))
3053diff --git a/arch/parisc/include/asm/elf.h b/arch/parisc/include/asm/elf.h
3054index 9c802eb..0592e41 100644
3055--- a/arch/parisc/include/asm/elf.h
3056+++ b/arch/parisc/include/asm/elf.h
3057@@ -343,6 +343,13 @@ struct pt_regs; /* forward declaration... */
3058
3059 #define ELF_ET_DYN_BASE (TASK_UNMAPPED_BASE + 0x01000000)
3060
3061+#ifdef CONFIG_PAX_ASLR
3062+#define PAX_ELF_ET_DYN_BASE 0x10000UL
3063+
3064+#define PAX_DELTA_MMAP_LEN 16
3065+#define PAX_DELTA_STACK_LEN 16
3066+#endif
3067+
3068 /* This yields a mask that user programs can use to figure out what
3069 instruction set this CPU supports. This could be done in user space,
3070 but it's not easy, and we've already done it here. */
3071diff --git a/arch/parisc/include/asm/pgtable.h b/arch/parisc/include/asm/pgtable.h
3072index a27d2e2..18fd845 100644
3073--- a/arch/parisc/include/asm/pgtable.h
3074+++ b/arch/parisc/include/asm/pgtable.h
3075@@ -207,6 +207,17 @@
3076 #define PAGE_EXECREAD __pgprot(_PAGE_PRESENT | _PAGE_USER | _PAGE_READ | _PAGE_EXEC |_PAGE_ACCESSED)
3077 #define PAGE_COPY PAGE_EXECREAD
3078 #define PAGE_RWX __pgprot(_PAGE_PRESENT | _PAGE_USER | _PAGE_READ | _PAGE_WRITE | _PAGE_EXEC |_PAGE_ACCESSED)
3079+
3080+#ifdef CONFIG_PAX_PAGEEXEC
3081+# define PAGE_SHARED_NOEXEC __pgprot(_PAGE_PRESENT | _PAGE_USER | _PAGE_READ | _PAGE_WRITE | _PAGE_ACCESSED)
3082+# define PAGE_COPY_NOEXEC __pgprot(_PAGE_PRESENT | _PAGE_USER | _PAGE_READ | _PAGE_ACCESSED)
3083+# define PAGE_READONLY_NOEXEC __pgprot(_PAGE_PRESENT | _PAGE_USER | _PAGE_READ | _PAGE_ACCESSED)
3084+#else
3085+# define PAGE_SHARED_NOEXEC PAGE_SHARED
3086+# define PAGE_COPY_NOEXEC PAGE_COPY
3087+# define PAGE_READONLY_NOEXEC PAGE_READONLY
3088+#endif
3089+
3090 #define PAGE_KERNEL __pgprot(_PAGE_KERNEL)
3091 #define PAGE_KERNEL_RO __pgprot(_PAGE_KERNEL & ~_PAGE_WRITE)
3092 #define PAGE_KERNEL_UNC __pgprot(_PAGE_KERNEL | _PAGE_NO_CACHE)
3093diff --git a/arch/parisc/kernel/module.c b/arch/parisc/kernel/module.c
3094index 2120746..8d70a5e 100644
3095--- a/arch/parisc/kernel/module.c
3096+++ b/arch/parisc/kernel/module.c
3097@@ -95,16 +95,38 @@
3098
3099 /* three functions to determine where in the module core
3100 * or init pieces the location is */
3101+static inline int in_init_rx(struct module *me, void *loc)
3102+{
3103+ return (loc >= me->module_init_rx &&
3104+ loc < (me->module_init_rx + me->init_size_rx));
3105+}
3106+
3107+static inline int in_init_rw(struct module *me, void *loc)
3108+{
3109+ return (loc >= me->module_init_rw &&
3110+ loc < (me->module_init_rw + me->init_size_rw));
3111+}
3112+
3113 static inline int in_init(struct module *me, void *loc)
3114 {
3115- return (loc >= me->module_init &&
3116- loc <= (me->module_init + me->init_size));
3117+ return in_init_rx(me, loc) || in_init_rw(me, loc);
3118+}
3119+
3120+static inline int in_core_rx(struct module *me, void *loc)
3121+{
3122+ return (loc >= me->module_core_rx &&
3123+ loc < (me->module_core_rx + me->core_size_rx));
3124+}
3125+
3126+static inline int in_core_rw(struct module *me, void *loc)
3127+{
3128+ return (loc >= me->module_core_rw &&
3129+ loc < (me->module_core_rw + me->core_size_rw));
3130 }
3131
3132 static inline int in_core(struct module *me, void *loc)
3133 {
3134- return (loc >= me->module_core &&
3135- loc <= (me->module_core + me->core_size));
3136+ return in_core_rx(me, loc) || in_core_rw(me, loc);
3137 }
3138
3139 static inline int in_local(struct module *me, void *loc)
3140@@ -364,13 +386,13 @@ int module_frob_arch_sections(CONST Elf_Ehdr *hdr,
3141 }
3142
3143 /* align things a bit */
3144- me->core_size = ALIGN(me->core_size, 16);
3145- me->arch.got_offset = me->core_size;
3146- me->core_size += gots * sizeof(struct got_entry);
3147+ me->core_size_rw = ALIGN(me->core_size_rw, 16);
3148+ me->arch.got_offset = me->core_size_rw;
3149+ me->core_size_rw += gots * sizeof(struct got_entry);
3150
3151- me->core_size = ALIGN(me->core_size, 16);
3152- me->arch.fdesc_offset = me->core_size;
3153- me->core_size += fdescs * sizeof(Elf_Fdesc);
3154+ me->core_size_rw = ALIGN(me->core_size_rw, 16);
3155+ me->arch.fdesc_offset = me->core_size_rw;
3156+ me->core_size_rw += fdescs * sizeof(Elf_Fdesc);
3157
3158 me->arch.got_max = gots;
3159 me->arch.fdesc_max = fdescs;
3160@@ -388,7 +410,7 @@ static Elf64_Word get_got(struct module *me, unsigned long value, long addend)
3161
3162 BUG_ON(value == 0);
3163
3164- got = me->module_core + me->arch.got_offset;
3165+ got = me->module_core_rw + me->arch.got_offset;
3166 for (i = 0; got[i].addr; i++)
3167 if (got[i].addr == value)
3168 goto out;
3169@@ -406,7 +428,7 @@ static Elf64_Word get_got(struct module *me, unsigned long value, long addend)
3170 #ifdef CONFIG_64BIT
3171 static Elf_Addr get_fdesc(struct module *me, unsigned long value)
3172 {
3173- Elf_Fdesc *fdesc = me->module_core + me->arch.fdesc_offset;
3174+ Elf_Fdesc *fdesc = me->module_core_rw + me->arch.fdesc_offset;
3175
3176 if (!value) {
3177 printk(KERN_ERR "%s: zero OPD requested!\n", me->name);
3178@@ -424,7 +446,7 @@ static Elf_Addr get_fdesc(struct module *me, unsigned long value)
3179
3180 /* Create new one */
3181 fdesc->addr = value;
3182- fdesc->gp = (Elf_Addr)me->module_core + me->arch.got_offset;
3183+ fdesc->gp = (Elf_Addr)me->module_core_rw + me->arch.got_offset;
3184 return (Elf_Addr)fdesc;
3185 }
3186 #endif /* CONFIG_64BIT */
3187@@ -848,7 +870,7 @@ register_unwind_table(struct module *me,
3188
3189 table = (unsigned char *)sechdrs[me->arch.unwind_section].sh_addr;
3190 end = table + sechdrs[me->arch.unwind_section].sh_size;
3191- gp = (Elf_Addr)me->module_core + me->arch.got_offset;
3192+ gp = (Elf_Addr)me->module_core_rw + me->arch.got_offset;
3193
3194 DEBUGP("register_unwind_table(), sect = %d at 0x%p - 0x%p (gp=0x%lx)\n",
3195 me->arch.unwind_section, table, end, gp);
3196diff --git a/arch/parisc/kernel/sys_parisc.c b/arch/parisc/kernel/sys_parisc.c
3197index 9147391..f3d949a 100644
3198--- a/arch/parisc/kernel/sys_parisc.c
3199+++ b/arch/parisc/kernel/sys_parisc.c
3200@@ -43,7 +43,7 @@ static unsigned long get_unshared_area(unsigned long addr, unsigned long len)
3201 /* At this point: (!vma || addr < vma->vm_end). */
3202 if (TASK_SIZE - len < addr)
3203 return -ENOMEM;
3204- if (!vma || addr + len <= vma->vm_start)
3205+ if (check_heap_stack_gap(vma, addr, len))
3206 return addr;
3207 addr = vma->vm_end;
3208 }
3209@@ -79,7 +79,7 @@ static unsigned long get_shared_area(struct address_space *mapping,
3210 /* At this point: (!vma || addr < vma->vm_end). */
3211 if (TASK_SIZE - len < addr)
3212 return -ENOMEM;
3213- if (!vma || addr + len <= vma->vm_start)
3214+ if (check_heap_stack_gap(vma, addr, len))
3215 return addr;
3216 addr = DCACHE_ALIGN(vma->vm_end - offset) + offset;
3217 if (addr < vma->vm_end) /* handle wraparound */
3218@@ -98,7 +98,7 @@ unsigned long arch_get_unmapped_area(struct file *filp, unsigned long addr,
3219 if (flags & MAP_FIXED)
3220 return addr;
3221 if (!addr)
3222- addr = TASK_UNMAPPED_BASE;
3223+ addr = current->mm->mmap_base;
3224
3225 if (filp) {
3226 addr = get_shared_area(filp->f_mapping, addr, len, pgoff);
3227diff --git a/arch/parisc/kernel/traps.c b/arch/parisc/kernel/traps.c
3228index 8b58bf0..7afff03 100644
3229--- a/arch/parisc/kernel/traps.c
3230+++ b/arch/parisc/kernel/traps.c
3231@@ -733,9 +733,7 @@ void notrace handle_interruption(int code, struct pt_regs *regs)
3232
3233 down_read(&current->mm->mmap_sem);
3234 vma = find_vma(current->mm,regs->iaoq[0]);
3235- if (vma && (regs->iaoq[0] >= vma->vm_start)
3236- && (vma->vm_flags & VM_EXEC)) {
3237-
3238+ if (vma && (regs->iaoq[0] >= vma->vm_start)) {
3239 fault_address = regs->iaoq[0];
3240 fault_space = regs->iasq[0];
3241
3242diff --git a/arch/parisc/mm/fault.c b/arch/parisc/mm/fault.c
3243index c6afbfc..c5839f6 100644
3244--- a/arch/parisc/mm/fault.c
3245+++ b/arch/parisc/mm/fault.c
3246@@ -15,6 +15,7 @@
3247 #include <linux/sched.h>
3248 #include <linux/interrupt.h>
3249 #include <linux/module.h>
3250+#include <linux/unistd.h>
3251
3252 #include <asm/uaccess.h>
3253 #include <asm/traps.h>
3254@@ -52,7 +53,7 @@ DEFINE_PER_CPU(struct exception_data, exception_data);
3255 static unsigned long
3256 parisc_acctyp(unsigned long code, unsigned int inst)
3257 {
3258- if (code == 6 || code == 16)
3259+ if (code == 6 || code == 7 || code == 16)
3260 return VM_EXEC;
3261
3262 switch (inst & 0xf0000000) {
3263@@ -138,6 +139,116 @@ parisc_acctyp(unsigned long code, unsigned int inst)
3264 }
3265 #endif
3266
3267+#ifdef CONFIG_PAX_PAGEEXEC
3268+/*
3269+ * PaX: decide what to do with offenders (instruction_pointer(regs) = fault address)
3270+ *
3271+ * returns 1 when task should be killed
3272+ * 2 when rt_sigreturn trampoline was detected
3273+ * 3 when unpatched PLT trampoline was detected
3274+ */
3275+static int pax_handle_fetch_fault(struct pt_regs *regs)
3276+{
3277+
3278+#ifdef CONFIG_PAX_EMUPLT
3279+ int err;
3280+
3281+ do { /* PaX: unpatched PLT emulation */
3282+ unsigned int bl, depwi;
3283+
3284+ err = get_user(bl, (unsigned int *)instruction_pointer(regs));
3285+ err |= get_user(depwi, (unsigned int *)(instruction_pointer(regs)+4));
3286+
3287+ if (err)
3288+ break;
3289+
3290+ if (bl == 0xEA9F1FDDU && depwi == 0xD6801C1EU) {
3291+ unsigned int ldw, bv, ldw2, addr = instruction_pointer(regs)-12;
3292+
3293+ err = get_user(ldw, (unsigned int *)addr);
3294+ err |= get_user(bv, (unsigned int *)(addr+4));
3295+ err |= get_user(ldw2, (unsigned int *)(addr+8));
3296+
3297+ if (err)
3298+ break;
3299+
3300+ if (ldw == 0x0E801096U &&
3301+ bv == 0xEAC0C000U &&
3302+ ldw2 == 0x0E881095U)
3303+ {
3304+ unsigned int resolver, map;
3305+
3306+ err = get_user(resolver, (unsigned int *)(instruction_pointer(regs)+8));
3307+ err |= get_user(map, (unsigned int *)(instruction_pointer(regs)+12));
3308+ if (err)
3309+ break;
3310+
3311+ regs->gr[20] = instruction_pointer(regs)+8;
3312+ regs->gr[21] = map;
3313+ regs->gr[22] = resolver;
3314+ regs->iaoq[0] = resolver | 3UL;
3315+ regs->iaoq[1] = regs->iaoq[0] + 4;
3316+ return 3;
3317+ }
3318+ }
3319+ } while (0);
3320+#endif
3321+
3322+#ifdef CONFIG_PAX_EMUTRAMP
3323+
3324+#ifndef CONFIG_PAX_EMUSIGRT
3325+ if (!(current->mm->pax_flags & MF_PAX_EMUTRAMP))
3326+ return 1;
3327+#endif
3328+
3329+ do { /* PaX: rt_sigreturn emulation */
3330+ unsigned int ldi1, ldi2, bel, nop;
3331+
3332+ err = get_user(ldi1, (unsigned int *)instruction_pointer(regs));
3333+ err |= get_user(ldi2, (unsigned int *)(instruction_pointer(regs)+4));
3334+ err |= get_user(bel, (unsigned int *)(instruction_pointer(regs)+8));
3335+ err |= get_user(nop, (unsigned int *)(instruction_pointer(regs)+12));
3336+
3337+ if (err)
3338+ break;
3339+
3340+ if ((ldi1 == 0x34190000U || ldi1 == 0x34190002U) &&
3341+ ldi2 == 0x3414015AU &&
3342+ bel == 0xE4008200U &&
3343+ nop == 0x08000240U)
3344+ {
3345+ regs->gr[25] = (ldi1 & 2) >> 1;
3346+ regs->gr[20] = __NR_rt_sigreturn;
3347+ regs->gr[31] = regs->iaoq[1] + 16;
3348+ regs->sr[0] = regs->iasq[1];
3349+ regs->iaoq[0] = 0x100UL;
3350+ regs->iaoq[1] = regs->iaoq[0] + 4;
3351+ regs->iasq[0] = regs->sr[2];
3352+ regs->iasq[1] = regs->sr[2];
3353+ return 2;
3354+ }
3355+ } while (0);
3356+#endif
3357+
3358+ return 1;
3359+}
3360+
3361+void pax_report_insns(struct pt_regs *regs, void *pc, void *sp)
3362+{
3363+ unsigned long i;
3364+
3365+ printk(KERN_ERR "PAX: bytes at PC: ");
3366+ for (i = 0; i < 5; i++) {
3367+ unsigned int c;
3368+ if (get_user(c, (unsigned int *)pc+i))
3369+ printk(KERN_CONT "???????? ");
3370+ else
3371+ printk(KERN_CONT "%08x ", c);
3372+ }
3373+ printk("\n");
3374+}
3375+#endif
3376+
3377 int fixup_exception(struct pt_regs *regs)
3378 {
3379 const struct exception_table_entry *fix;
3380@@ -192,8 +303,33 @@ good_area:
3381
3382 acc_type = parisc_acctyp(code,regs->iir);
3383
3384- if ((vma->vm_flags & acc_type) != acc_type)
3385+ if ((vma->vm_flags & acc_type) != acc_type) {
3386+
3387+#ifdef CONFIG_PAX_PAGEEXEC
3388+ if ((mm->pax_flags & MF_PAX_PAGEEXEC) && (acc_type & VM_EXEC) &&
3389+ (address & ~3UL) == instruction_pointer(regs))
3390+ {
3391+ up_read(&mm->mmap_sem);
3392+ switch (pax_handle_fetch_fault(regs)) {
3393+
3394+#ifdef CONFIG_PAX_EMUPLT
3395+ case 3:
3396+ return;
3397+#endif
3398+
3399+#ifdef CONFIG_PAX_EMUTRAMP
3400+ case 2:
3401+ return;
3402+#endif
3403+
3404+ }
3405+ pax_report_fault(regs, (void *)instruction_pointer(regs), (void *)regs->gr[30]);
3406+ do_group_exit(SIGKILL);
3407+ }
3408+#endif
3409+
3410 goto bad_area;
3411+ }
3412
3413 /*
3414 * If for any reason at all we couldn't handle the fault, make
3415diff --git a/arch/powerpc/Makefile b/arch/powerpc/Makefile
3416index c107b74..409dc0f 100644
3417--- a/arch/powerpc/Makefile
3418+++ b/arch/powerpc/Makefile
3419@@ -74,6 +74,8 @@ KBUILD_AFLAGS += -Iarch/$(ARCH)
3420 KBUILD_CFLAGS += -msoft-float -pipe -Iarch/$(ARCH) $(CFLAGS-y)
3421 CPP = $(CC) -E $(KBUILD_CFLAGS)
3422
3423+cflags-y += -Wno-sign-compare -Wno-extra
3424+
3425 CHECKFLAGS += -m$(CONFIG_WORD_SIZE) -D__powerpc__ -D__powerpc$(CONFIG_WORD_SIZE)__
3426
3427 ifeq ($(CONFIG_PPC64),y)
3428diff --git a/arch/powerpc/include/asm/cache.h b/arch/powerpc/include/asm/cache.h
3429index 81de6eb..d5d0e24 100644
3430--- a/arch/powerpc/include/asm/cache.h
3431+++ b/arch/powerpc/include/asm/cache.h
3432@@ -3,6 +3,7 @@
3433
3434 #ifdef __KERNEL__
3435
3436+#include <linux/const.h>
3437
3438 /* bytes per L1 cache line */
3439 #if defined(CONFIG_8xx) || defined(CONFIG_403GCX)
3440@@ -18,7 +19,7 @@
3441 #define L1_CACHE_SHIFT 7
3442 #endif
3443
3444-#define L1_CACHE_BYTES (1 << L1_CACHE_SHIFT)
3445+#define L1_CACHE_BYTES (_AC(1,UL) << L1_CACHE_SHIFT)
3446
3447 #define SMP_CACHE_BYTES L1_CACHE_BYTES
3448
3449diff --git a/arch/powerpc/include/asm/device.h b/arch/powerpc/include/asm/device.h
3450index 6d94d27..50d4cad 100644
3451--- a/arch/powerpc/include/asm/device.h
3452+++ b/arch/powerpc/include/asm/device.h
3453@@ -14,7 +14,7 @@ struct dev_archdata {
3454 struct device_node *of_node;
3455
3456 /* DMA operations on that device */
3457- struct dma_map_ops *dma_ops;
3458+ const struct dma_map_ops *dma_ops;
3459
3460 /*
3461 * When an iommu is in use, dma_data is used as a ptr to the base of the
3462diff --git a/arch/powerpc/include/asm/dma-mapping.h b/arch/powerpc/include/asm/dma-mapping.h
3463index e281dae..2b8a784 100644
3464--- a/arch/powerpc/include/asm/dma-mapping.h
3465+++ b/arch/powerpc/include/asm/dma-mapping.h
3466@@ -69,9 +69,9 @@ static inline unsigned long device_to_mask(struct device *dev)
3467 #ifdef CONFIG_PPC64
3468 extern struct dma_map_ops dma_iommu_ops;
3469 #endif
3470-extern struct dma_map_ops dma_direct_ops;
3471+extern const struct dma_map_ops dma_direct_ops;
3472
3473-static inline struct dma_map_ops *get_dma_ops(struct device *dev)
3474+static inline const struct dma_map_ops *get_dma_ops(struct device *dev)
3475 {
3476 /* We don't handle the NULL dev case for ISA for now. We could
3477 * do it via an out of line call but it is not needed for now. The
3478@@ -84,7 +84,7 @@ static inline struct dma_map_ops *get_dma_ops(struct device *dev)
3479 return dev->archdata.dma_ops;
3480 }
3481
3482-static inline void set_dma_ops(struct device *dev, struct dma_map_ops *ops)
3483+static inline void set_dma_ops(struct device *dev, const struct dma_map_ops *ops)
3484 {
3485 dev->archdata.dma_ops = ops;
3486 }
3487@@ -118,7 +118,7 @@ static inline void set_dma_offset(struct device *dev, dma_addr_t off)
3488
3489 static inline int dma_supported(struct device *dev, u64 mask)
3490 {
3491- struct dma_map_ops *dma_ops = get_dma_ops(dev);
3492+ const struct dma_map_ops *dma_ops = get_dma_ops(dev);
3493
3494 if (unlikely(dma_ops == NULL))
3495 return 0;
3496@@ -132,7 +132,7 @@ static inline int dma_supported(struct device *dev, u64 mask)
3497
3498 static inline int dma_set_mask(struct device *dev, u64 dma_mask)
3499 {
3500- struct dma_map_ops *dma_ops = get_dma_ops(dev);
3501+ const struct dma_map_ops *dma_ops = get_dma_ops(dev);
3502
3503 if (unlikely(dma_ops == NULL))
3504 return -EIO;
3505@@ -147,7 +147,7 @@ static inline int dma_set_mask(struct device *dev, u64 dma_mask)
3506 static inline void *dma_alloc_coherent(struct device *dev, size_t size,
3507 dma_addr_t *dma_handle, gfp_t flag)
3508 {
3509- struct dma_map_ops *dma_ops = get_dma_ops(dev);
3510+ const struct dma_map_ops *dma_ops = get_dma_ops(dev);
3511 void *cpu_addr;
3512
3513 BUG_ON(!dma_ops);
3514@@ -162,7 +162,7 @@ static inline void *dma_alloc_coherent(struct device *dev, size_t size,
3515 static inline void dma_free_coherent(struct device *dev, size_t size,
3516 void *cpu_addr, dma_addr_t dma_handle)
3517 {
3518- struct dma_map_ops *dma_ops = get_dma_ops(dev);
3519+ const struct dma_map_ops *dma_ops = get_dma_ops(dev);
3520
3521 BUG_ON(!dma_ops);
3522
3523@@ -173,7 +173,7 @@ static inline void dma_free_coherent(struct device *dev, size_t size,
3524
3525 static inline int dma_mapping_error(struct device *dev, dma_addr_t dma_addr)
3526 {
3527- struct dma_map_ops *dma_ops = get_dma_ops(dev);
3528+ const struct dma_map_ops *dma_ops = get_dma_ops(dev);
3529
3530 if (dma_ops->mapping_error)
3531 return dma_ops->mapping_error(dev, dma_addr);
3532diff --git a/arch/powerpc/include/asm/elf.h b/arch/powerpc/include/asm/elf.h
3533index 5698502..5db093c 100644
3534--- a/arch/powerpc/include/asm/elf.h
3535+++ b/arch/powerpc/include/asm/elf.h
3536@@ -179,8 +179,19 @@ typedef elf_fpreg_t elf_vsrreghalf_t32[ELF_NVSRHALFREG];
3537 the loader. We need to make sure that it is out of the way of the program
3538 that it will "exec", and that there is sufficient room for the brk. */
3539
3540-extern unsigned long randomize_et_dyn(unsigned long base);
3541-#define ELF_ET_DYN_BASE (randomize_et_dyn(0x20000000))
3542+#define ELF_ET_DYN_BASE (0x20000000)
3543+
3544+#ifdef CONFIG_PAX_ASLR
3545+#define PAX_ELF_ET_DYN_BASE (0x10000000UL)
3546+
3547+#ifdef __powerpc64__
3548+#define PAX_DELTA_MMAP_LEN (test_thread_flag(TIF_32BIT) ? 16 : 28)
3549+#define PAX_DELTA_STACK_LEN (test_thread_flag(TIF_32BIT) ? 16 : 28)
3550+#else
3551+#define PAX_DELTA_MMAP_LEN 15
3552+#define PAX_DELTA_STACK_LEN 15
3553+#endif
3554+#endif
3555
3556 /*
3557 * Our registers are always unsigned longs, whether we're a 32 bit
3558@@ -275,9 +286,6 @@ extern int arch_setup_additional_pages(struct linux_binprm *bprm,
3559 (0x7ff >> (PAGE_SHIFT - 12)) : \
3560 (0x3ffff >> (PAGE_SHIFT - 12)))
3561
3562-extern unsigned long arch_randomize_brk(struct mm_struct *mm);
3563-#define arch_randomize_brk arch_randomize_brk
3564-
3565 #endif /* __KERNEL__ */
3566
3567 /*
3568diff --git a/arch/powerpc/include/asm/iommu.h b/arch/powerpc/include/asm/iommu.h
3569index edfc980..1766f59 100644
3570--- a/arch/powerpc/include/asm/iommu.h
3571+++ b/arch/powerpc/include/asm/iommu.h
3572@@ -116,6 +116,9 @@ extern void iommu_init_early_iSeries(void);
3573 extern void iommu_init_early_dart(void);
3574 extern void iommu_init_early_pasemi(void);
3575
3576+/* dma-iommu.c */
3577+extern int dma_iommu_dma_supported(struct device *dev, u64 mask);
3578+
3579 #ifdef CONFIG_PCI
3580 extern void pci_iommu_init(void);
3581 extern void pci_direct_iommu_init(void);
3582diff --git a/arch/powerpc/include/asm/kmap_types.h b/arch/powerpc/include/asm/kmap_types.h
3583index 9163695..5a00112 100644
3584--- a/arch/powerpc/include/asm/kmap_types.h
3585+++ b/arch/powerpc/include/asm/kmap_types.h
3586@@ -26,6 +26,7 @@ enum km_type {
3587 KM_SOFTIRQ1,
3588 KM_PPC_SYNC_PAGE,
3589 KM_PPC_SYNC_ICACHE,
3590+ KM_CLEARPAGE,
3591 KM_TYPE_NR
3592 };
3593
3594diff --git a/arch/powerpc/include/asm/page.h b/arch/powerpc/include/asm/page.h
3595index ff24254..fe45b21 100644
3596--- a/arch/powerpc/include/asm/page.h
3597+++ b/arch/powerpc/include/asm/page.h
3598@@ -116,8 +116,9 @@ extern phys_addr_t kernstart_addr;
3599 * and needs to be executable. This means the whole heap ends
3600 * up being executable.
3601 */
3602-#define VM_DATA_DEFAULT_FLAGS32 (VM_READ | VM_WRITE | VM_EXEC | \
3603- VM_MAYREAD | VM_MAYWRITE | VM_MAYEXEC)
3604+#define VM_DATA_DEFAULT_FLAGS32 \
3605+ (((current->personality & READ_IMPLIES_EXEC) ? VM_EXEC : 0) | \
3606+ VM_READ | VM_WRITE | VM_MAYREAD | VM_MAYWRITE | VM_MAYEXEC)
3607
3608 #define VM_DATA_DEFAULT_FLAGS64 (VM_READ | VM_WRITE | \
3609 VM_MAYREAD | VM_MAYWRITE | VM_MAYEXEC)
3610@@ -145,6 +146,9 @@ extern phys_addr_t kernstart_addr;
3611 #define is_kernel_addr(x) ((x) >= PAGE_OFFSET)
3612 #endif
3613
3614+#define ktla_ktva(addr) (addr)
3615+#define ktva_ktla(addr) (addr)
3616+
3617 #ifndef __ASSEMBLY__
3618
3619 #undef STRICT_MM_TYPECHECKS
3620diff --git a/arch/powerpc/include/asm/page_64.h b/arch/powerpc/include/asm/page_64.h
3621index 3f17b83..1f9e766 100644
3622--- a/arch/powerpc/include/asm/page_64.h
3623+++ b/arch/powerpc/include/asm/page_64.h
3624@@ -180,15 +180,18 @@ do { \
3625 * stack by default, so in the absense of a PT_GNU_STACK program header
3626 * we turn execute permission off.
3627 */
3628-#define VM_STACK_DEFAULT_FLAGS32 (VM_READ | VM_WRITE | VM_EXEC | \
3629- VM_MAYREAD | VM_MAYWRITE | VM_MAYEXEC)
3630+#define VM_STACK_DEFAULT_FLAGS32 \
3631+ (((current->personality & READ_IMPLIES_EXEC) ? VM_EXEC : 0) | \
3632+ VM_READ | VM_WRITE | VM_MAYREAD | VM_MAYWRITE | VM_MAYEXEC)
3633
3634 #define VM_STACK_DEFAULT_FLAGS64 (VM_READ | VM_WRITE | \
3635 VM_MAYREAD | VM_MAYWRITE | VM_MAYEXEC)
3636
3637+#ifndef CONFIG_PAX_PAGEEXEC
3638 #define VM_STACK_DEFAULT_FLAGS \
3639 (test_thread_flag(TIF_32BIT) ? \
3640 VM_STACK_DEFAULT_FLAGS32 : VM_STACK_DEFAULT_FLAGS64)
3641+#endif
3642
3643 #include <asm-generic/getorder.h>
3644
3645diff --git a/arch/powerpc/include/asm/pci.h b/arch/powerpc/include/asm/pci.h
3646index b5ea626..40308222 100644
3647--- a/arch/powerpc/include/asm/pci.h
3648+++ b/arch/powerpc/include/asm/pci.h
3649@@ -65,8 +65,8 @@ static inline int pci_get_legacy_ide_irq(struct pci_dev *dev, int channel)
3650 }
3651
3652 #ifdef CONFIG_PCI
3653-extern void set_pci_dma_ops(struct dma_map_ops *dma_ops);
3654-extern struct dma_map_ops *get_pci_dma_ops(void);
3655+extern void set_pci_dma_ops(const struct dma_map_ops *dma_ops);
3656+extern const struct dma_map_ops *get_pci_dma_ops(void);
3657 #else /* CONFIG_PCI */
3658 #define set_pci_dma_ops(d)
3659 #define get_pci_dma_ops() NULL
3660diff --git a/arch/powerpc/include/asm/pgtable.h b/arch/powerpc/include/asm/pgtable.h
3661index 2a5da06..d65bea2 100644
3662--- a/arch/powerpc/include/asm/pgtable.h
3663+++ b/arch/powerpc/include/asm/pgtable.h
3664@@ -2,6 +2,7 @@
3665 #define _ASM_POWERPC_PGTABLE_H
3666 #ifdef __KERNEL__
3667
3668+#include <linux/const.h>
3669 #ifndef __ASSEMBLY__
3670 #include <asm/processor.h> /* For TASK_SIZE */
3671 #include <asm/mmu.h>
3672diff --git a/arch/powerpc/include/asm/pte-hash32.h b/arch/powerpc/include/asm/pte-hash32.h
3673index 4aad413..85d86bf 100644
3674--- a/arch/powerpc/include/asm/pte-hash32.h
3675+++ b/arch/powerpc/include/asm/pte-hash32.h
3676@@ -21,6 +21,7 @@
3677 #define _PAGE_FILE 0x004 /* when !present: nonlinear file mapping */
3678 #define _PAGE_USER 0x004 /* usermode access allowed */
3679 #define _PAGE_GUARDED 0x008 /* G: prohibit speculative access */
3680+#define _PAGE_EXEC _PAGE_GUARDED
3681 #define _PAGE_COHERENT 0x010 /* M: enforce memory coherence (SMP systems) */
3682 #define _PAGE_NO_CACHE 0x020 /* I: cache inhibit */
3683 #define _PAGE_WRITETHRU 0x040 /* W: cache write-through */
3684diff --git a/arch/powerpc/include/asm/ptrace.h b/arch/powerpc/include/asm/ptrace.h
3685index 8c34149..78f425a 100644
3686--- a/arch/powerpc/include/asm/ptrace.h
3687+++ b/arch/powerpc/include/asm/ptrace.h
3688@@ -103,7 +103,7 @@ extern unsigned long profile_pc(struct pt_regs *regs);
3689 } while(0)
3690
3691 struct task_struct;
3692-extern unsigned long ptrace_get_reg(struct task_struct *task, int regno);
3693+extern unsigned long ptrace_get_reg(struct task_struct *task, unsigned int regno);
3694 extern int ptrace_put_reg(struct task_struct *task, int regno,
3695 unsigned long data);
3696
3697diff --git a/arch/powerpc/include/asm/reg.h b/arch/powerpc/include/asm/reg.h
3698index 32a7c30..be3a8bb 100644
3699--- a/arch/powerpc/include/asm/reg.h
3700+++ b/arch/powerpc/include/asm/reg.h
3701@@ -191,6 +191,7 @@
3702 #define SPRN_DBCR 0x136 /* e300 Data Breakpoint Control Reg */
3703 #define SPRN_DSISR 0x012 /* Data Storage Interrupt Status Register */
3704 #define DSISR_NOHPTE 0x40000000 /* no translation found */
3705+#define DSISR_GUARDED 0x10000000 /* fetch from guarded storage */
3706 #define DSISR_PROTFAULT 0x08000000 /* protection fault */
3707 #define DSISR_ISSTORE 0x02000000 /* access was a store */
3708 #define DSISR_DABRMATCH 0x00400000 /* hit data breakpoint */
3709diff --git a/arch/powerpc/include/asm/swiotlb.h b/arch/powerpc/include/asm/swiotlb.h
3710index 8979d4c..d2fd0d3 100644
3711--- a/arch/powerpc/include/asm/swiotlb.h
3712+++ b/arch/powerpc/include/asm/swiotlb.h
3713@@ -13,7 +13,7 @@
3714
3715 #include <linux/swiotlb.h>
3716
3717-extern struct dma_map_ops swiotlb_dma_ops;
3718+extern const struct dma_map_ops swiotlb_dma_ops;
3719
3720 static inline void dma_mark_clean(void *addr, size_t size) {}
3721
3722diff --git a/arch/powerpc/include/asm/system.h b/arch/powerpc/include/asm/system.h
3723index 094a12a..877a60a 100644
3724--- a/arch/powerpc/include/asm/system.h
3725+++ b/arch/powerpc/include/asm/system.h
3726@@ -531,7 +531,7 @@ __cmpxchg_local(volatile void *ptr, unsigned long old, unsigned long new,
3727 #define cmpxchg64_local(ptr, o, n) __cmpxchg64_local_generic((ptr), (o), (n))
3728 #endif
3729
3730-extern unsigned long arch_align_stack(unsigned long sp);
3731+#define arch_align_stack(x) ((x) & ~0xfUL)
3732
3733 /* Used in very early kernel initialization. */
3734 extern unsigned long reloc_offset(void);
3735diff --git a/arch/powerpc/include/asm/uaccess.h b/arch/powerpc/include/asm/uaccess.h
3736index bd0fb84..a42a14b 100644
3737--- a/arch/powerpc/include/asm/uaccess.h
3738+++ b/arch/powerpc/include/asm/uaccess.h
3739@@ -13,6 +13,8 @@
3740 #define VERIFY_READ 0
3741 #define VERIFY_WRITE 1
3742
3743+extern void check_object_size(const void *ptr, unsigned long n, bool to);
3744+
3745 /*
3746 * The fs value determines whether argument validity checking should be
3747 * performed or not. If get_fs() == USER_DS, checking is performed, with
3748@@ -327,52 +329,6 @@ do { \
3749 extern unsigned long __copy_tofrom_user(void __user *to,
3750 const void __user *from, unsigned long size);
3751
3752-#ifndef __powerpc64__
3753-
3754-static inline unsigned long copy_from_user(void *to,
3755- const void __user *from, unsigned long n)
3756-{
3757- unsigned long over;
3758-
3759- if (access_ok(VERIFY_READ, from, n))
3760- return __copy_tofrom_user((__force void __user *)to, from, n);
3761- if ((unsigned long)from < TASK_SIZE) {
3762- over = (unsigned long)from + n - TASK_SIZE;
3763- return __copy_tofrom_user((__force void __user *)to, from,
3764- n - over) + over;
3765- }
3766- return n;
3767-}
3768-
3769-static inline unsigned long copy_to_user(void __user *to,
3770- const void *from, unsigned long n)
3771-{
3772- unsigned long over;
3773-
3774- if (access_ok(VERIFY_WRITE, to, n))
3775- return __copy_tofrom_user(to, (__force void __user *)from, n);
3776- if ((unsigned long)to < TASK_SIZE) {
3777- over = (unsigned long)to + n - TASK_SIZE;
3778- return __copy_tofrom_user(to, (__force void __user *)from,
3779- n - over) + over;
3780- }
3781- return n;
3782-}
3783-
3784-#else /* __powerpc64__ */
3785-
3786-#define __copy_in_user(to, from, size) \
3787- __copy_tofrom_user((to), (from), (size))
3788-
3789-extern unsigned long copy_from_user(void *to, const void __user *from,
3790- unsigned long n);
3791-extern unsigned long copy_to_user(void __user *to, const void *from,
3792- unsigned long n);
3793-extern unsigned long copy_in_user(void __user *to, const void __user *from,
3794- unsigned long n);
3795-
3796-#endif /* __powerpc64__ */
3797-
3798 static inline unsigned long __copy_from_user_inatomic(void *to,
3799 const void __user *from, unsigned long n)
3800 {
3801@@ -396,6 +352,10 @@ static inline unsigned long __copy_from_user_inatomic(void *to,
3802 if (ret == 0)
3803 return 0;
3804 }
3805+
3806+ if (!__builtin_constant_p(n))
3807+ check_object_size(to, n, false);
3808+
3809 return __copy_tofrom_user((__force void __user *)to, from, n);
3810 }
3811
3812@@ -422,6 +382,10 @@ static inline unsigned long __copy_to_user_inatomic(void __user *to,
3813 if (ret == 0)
3814 return 0;
3815 }
3816+
3817+ if (!__builtin_constant_p(n))
3818+ check_object_size(from, n, true);
3819+
3820 return __copy_tofrom_user(to, (__force const void __user *)from, n);
3821 }
3822
3823@@ -439,6 +403,92 @@ static inline unsigned long __copy_to_user(void __user *to,
3824 return __copy_to_user_inatomic(to, from, size);
3825 }
3826
3827+#ifndef __powerpc64__
3828+
3829+static inline unsigned long __must_check copy_from_user(void *to,
3830+ const void __user *from, unsigned long n)
3831+{
3832+ unsigned long over;
3833+
3834+ if ((long)n < 0)
3835+ return n;
3836+
3837+ if (access_ok(VERIFY_READ, from, n)) {
3838+ if (!__builtin_constant_p(n))
3839+ check_object_size(to, n, false);
3840+ return __copy_tofrom_user((__force void __user *)to, from, n);
3841+ }
3842+ if ((unsigned long)from < TASK_SIZE) {
3843+ over = (unsigned long)from + n - TASK_SIZE;
3844+ if (!__builtin_constant_p(n - over))
3845+ check_object_size(to, n - over, false);
3846+ return __copy_tofrom_user((__force void __user *)to, from,
3847+ n - over) + over;
3848+ }
3849+ return n;
3850+}
3851+
3852+static inline unsigned long __must_check copy_to_user(void __user *to,
3853+ const void *from, unsigned long n)
3854+{
3855+ unsigned long over;
3856+
3857+ if ((long)n < 0)
3858+ return n;
3859+
3860+ if (access_ok(VERIFY_WRITE, to, n)) {
3861+ if (!__builtin_constant_p(n))
3862+ check_object_size(from, n, true);
3863+ return __copy_tofrom_user(to, (__force void __user *)from, n);
3864+ }
3865+ if ((unsigned long)to < TASK_SIZE) {
3866+ over = (unsigned long)to + n - TASK_SIZE;
3867+ if (!__builtin_constant_p(n))
3868+ check_object_size(from, n - over, true);
3869+ return __copy_tofrom_user(to, (__force void __user *)from,
3870+ n - over) + over;
3871+ }
3872+ return n;
3873+}
3874+
3875+#else /* __powerpc64__ */
3876+
3877+#define __copy_in_user(to, from, size) \
3878+ __copy_tofrom_user((to), (from), (size))
3879+
3880+static inline unsigned long __must_check copy_from_user(void *to, const void __user *from, unsigned long n)
3881+{
3882+ if ((long)n < 0 || n > INT_MAX)
3883+ return n;
3884+
3885+ if (!__builtin_constant_p(n))
3886+ check_object_size(to, n, false);
3887+
3888+ if (likely(access_ok(VERIFY_READ, from, n)))
3889+ n = __copy_from_user(to, from, n);
3890+ else
3891+ memset(to, 0, n);
3892+ return n;
3893+}
3894+
3895+static inline unsigned long __must_check copy_to_user(void __user *to, const void *from, unsigned long n)
3896+{
3897+ if ((long)n < 0 || n > INT_MAX)
3898+ return n;
3899+
3900+ if (likely(access_ok(VERIFY_WRITE, to, n))) {
3901+ if (!__builtin_constant_p(n))
3902+ check_object_size(from, n, true);
3903+ n = __copy_to_user(to, from, n);
3904+ }
3905+ return n;
3906+}
3907+
3908+extern unsigned long copy_in_user(void __user *to, const void __user *from,
3909+ unsigned long n);
3910+
3911+#endif /* __powerpc64__ */
3912+
3913 extern unsigned long __clear_user(void __user *addr, unsigned long size);
3914
3915 static inline unsigned long clear_user(void __user *addr, unsigned long size)
3916diff --git a/arch/powerpc/kernel/cacheinfo.c b/arch/powerpc/kernel/cacheinfo.c
3917index bb37b1d..01fe9ce 100644
3918--- a/arch/powerpc/kernel/cacheinfo.c
3919+++ b/arch/powerpc/kernel/cacheinfo.c
3920@@ -642,7 +642,7 @@ static struct kobj_attribute *cache_index_opt_attrs[] = {
3921 &cache_assoc_attr,
3922 };
3923
3924-static struct sysfs_ops cache_index_ops = {
3925+static const struct sysfs_ops cache_index_ops = {
3926 .show = cache_index_show,
3927 };
3928
3929diff --git a/arch/powerpc/kernel/dma-iommu.c b/arch/powerpc/kernel/dma-iommu.c
3930index 37771a5..648530c 100644
3931--- a/arch/powerpc/kernel/dma-iommu.c
3932+++ b/arch/powerpc/kernel/dma-iommu.c
3933@@ -70,7 +70,7 @@ static void dma_iommu_unmap_sg(struct device *dev, struct scatterlist *sglist,
3934 }
3935
3936 /* We support DMA to/from any memory page via the iommu */
3937-static int dma_iommu_dma_supported(struct device *dev, u64 mask)
3938+int dma_iommu_dma_supported(struct device *dev, u64 mask)
3939 {
3940 struct iommu_table *tbl = get_iommu_table_base(dev);
3941
3942diff --git a/arch/powerpc/kernel/dma-swiotlb.c b/arch/powerpc/kernel/dma-swiotlb.c
3943index e96cbbd..bdd6d41 100644
3944--- a/arch/powerpc/kernel/dma-swiotlb.c
3945+++ b/arch/powerpc/kernel/dma-swiotlb.c
3946@@ -31,7 +31,7 @@ unsigned int ppc_swiotlb_enable;
3947 * map_page, and unmap_page on highmem, use normal dma_ops
3948 * for everything else.
3949 */
3950-struct dma_map_ops swiotlb_dma_ops = {
3951+const struct dma_map_ops swiotlb_dma_ops = {
3952 .alloc_coherent = dma_direct_alloc_coherent,
3953 .free_coherent = dma_direct_free_coherent,
3954 .map_sg = swiotlb_map_sg_attrs,
3955diff --git a/arch/powerpc/kernel/dma.c b/arch/powerpc/kernel/dma.c
3956index 6215062..ebea59c 100644
3957--- a/arch/powerpc/kernel/dma.c
3958+++ b/arch/powerpc/kernel/dma.c
3959@@ -134,7 +134,7 @@ static inline void dma_direct_sync_single_range(struct device *dev,
3960 }
3961 #endif
3962
3963-struct dma_map_ops dma_direct_ops = {
3964+const struct dma_map_ops dma_direct_ops = {
3965 .alloc_coherent = dma_direct_alloc_coherent,
3966 .free_coherent = dma_direct_free_coherent,
3967 .map_sg = dma_direct_map_sg,
3968diff --git a/arch/powerpc/kernel/exceptions-64e.S b/arch/powerpc/kernel/exceptions-64e.S
3969index 24dcc0e..a300455 100644
3970--- a/arch/powerpc/kernel/exceptions-64e.S
3971+++ b/arch/powerpc/kernel/exceptions-64e.S
3972@@ -455,6 +455,7 @@ storage_fault_common:
3973 std r14,_DAR(r1)
3974 std r15,_DSISR(r1)
3975 addi r3,r1,STACK_FRAME_OVERHEAD
3976+ bl .save_nvgprs
3977 mr r4,r14
3978 mr r5,r15
3979 ld r14,PACA_EXGEN+EX_R14(r13)
3980@@ -464,8 +465,7 @@ storage_fault_common:
3981 cmpdi r3,0
3982 bne- 1f
3983 b .ret_from_except_lite
3984-1: bl .save_nvgprs
3985- mr r5,r3
3986+1: mr r5,r3
3987 addi r3,r1,STACK_FRAME_OVERHEAD
3988 ld r4,_DAR(r1)
3989 bl .bad_page_fault
3990diff --git a/arch/powerpc/kernel/exceptions-64s.S b/arch/powerpc/kernel/exceptions-64s.S
3991index 1808876..9fd206a 100644
3992--- a/arch/powerpc/kernel/exceptions-64s.S
3993+++ b/arch/powerpc/kernel/exceptions-64s.S
3994@@ -818,10 +818,10 @@ handle_page_fault:
3995 11: ld r4,_DAR(r1)
3996 ld r5,_DSISR(r1)
3997 addi r3,r1,STACK_FRAME_OVERHEAD
3998+ bl .save_nvgprs
3999 bl .do_page_fault
4000 cmpdi r3,0
4001 beq+ 13f
4002- bl .save_nvgprs
4003 mr r5,r3
4004 addi r3,r1,STACK_FRAME_OVERHEAD
4005 lwz r4,_DAR(r1)
4006diff --git a/arch/powerpc/kernel/ibmebus.c b/arch/powerpc/kernel/ibmebus.c
4007index a4c8b38..1b09ad9 100644
4008--- a/arch/powerpc/kernel/ibmebus.c
4009+++ b/arch/powerpc/kernel/ibmebus.c
4010@@ -127,7 +127,7 @@ static int ibmebus_dma_supported(struct device *dev, u64 mask)
4011 return 1;
4012 }
4013
4014-static struct dma_map_ops ibmebus_dma_ops = {
4015+static const struct dma_map_ops ibmebus_dma_ops = {
4016 .alloc_coherent = ibmebus_alloc_coherent,
4017 .free_coherent = ibmebus_free_coherent,
4018 .map_sg = ibmebus_map_sg,
4019diff --git a/arch/powerpc/kernel/irq.c b/arch/powerpc/kernel/irq.c
4020index 8564a41..67f3471 100644
4021--- a/arch/powerpc/kernel/irq.c
4022+++ b/arch/powerpc/kernel/irq.c
4023@@ -490,9 +490,6 @@ struct irq_host *irq_alloc_host(struct device_node *of_node,
4024 host->ops = ops;
4025 host->of_node = of_node_get(of_node);
4026
4027- if (host->ops->match == NULL)
4028- host->ops->match = default_irq_host_match;
4029-
4030 spin_lock_irqsave(&irq_big_lock, flags);
4031
4032 /* If it's a legacy controller, check for duplicates and
4033@@ -567,7 +564,12 @@ struct irq_host *irq_find_host(struct device_node *node)
4034 */
4035 spin_lock_irqsave(&irq_big_lock, flags);
4036 list_for_each_entry(h, &irq_hosts, link)
4037- if (h->ops->match(h, node)) {
4038+ if (h->ops->match) {
4039+ if (h->ops->match(h, node)) {
4040+ found = h;
4041+ break;
4042+ }
4043+ } else if (default_irq_host_match(h, node)) {
4044 found = h;
4045 break;
4046 }
4047diff --git a/arch/powerpc/kernel/kgdb.c b/arch/powerpc/kernel/kgdb.c
4048index 641c74b..8339ad7 100644
4049--- a/arch/powerpc/kernel/kgdb.c
4050+++ b/arch/powerpc/kernel/kgdb.c
4051@@ -126,7 +126,7 @@ static int kgdb_handle_breakpoint(struct pt_regs *regs)
4052 if (kgdb_handle_exception(0, SIGTRAP, 0, regs) != 0)
4053 return 0;
4054
4055- if (*(u32 *) (regs->nip) == *(u32 *) (&arch_kgdb_ops.gdb_bpt_instr))
4056+ if (*(u32 *) (regs->nip) == *(const u32 *) (&arch_kgdb_ops.gdb_bpt_instr))
4057 regs->nip += 4;
4058
4059 return 1;
4060@@ -353,7 +353,7 @@ int kgdb_arch_handle_exception(int vector, int signo, int err_code,
4061 /*
4062 * Global data
4063 */
4064-struct kgdb_arch arch_kgdb_ops = {
4065+const struct kgdb_arch arch_kgdb_ops = {
4066 .gdb_bpt_instr = {0x7d, 0x82, 0x10, 0x08},
4067 };
4068
4069diff --git a/arch/powerpc/kernel/module.c b/arch/powerpc/kernel/module.c
4070index 477c663..4f50234 100644
4071--- a/arch/powerpc/kernel/module.c
4072+++ b/arch/powerpc/kernel/module.c
4073@@ -31,11 +31,24 @@
4074
4075 LIST_HEAD(module_bug_list);
4076
4077+#ifdef CONFIG_PAX_KERNEXEC
4078 void *module_alloc(unsigned long size)
4079 {
4080 if (size == 0)
4081 return NULL;
4082
4083+ return vmalloc(size);
4084+}
4085+
4086+void *module_alloc_exec(unsigned long size)
4087+#else
4088+void *module_alloc(unsigned long size)
4089+#endif
4090+
4091+{
4092+ if (size == 0)
4093+ return NULL;
4094+
4095 return vmalloc_exec(size);
4096 }
4097
4098@@ -45,6 +58,13 @@ void module_free(struct module *mod, void *module_region)
4099 vfree(module_region);
4100 }
4101
4102+#ifdef CONFIG_PAX_KERNEXEC
4103+void module_free_exec(struct module *mod, void *module_region)
4104+{
4105+ module_free(mod, module_region);
4106+}
4107+#endif
4108+
4109 static const Elf_Shdr *find_section(const Elf_Ehdr *hdr,
4110 const Elf_Shdr *sechdrs,
4111 const char *name)
4112diff --git a/arch/powerpc/kernel/module_32.c b/arch/powerpc/kernel/module_32.c
4113index f832773..0507238 100644
4114--- a/arch/powerpc/kernel/module_32.c
4115+++ b/arch/powerpc/kernel/module_32.c
4116@@ -162,7 +162,7 @@ int module_frob_arch_sections(Elf32_Ehdr *hdr,
4117 me->arch.core_plt_section = i;
4118 }
4119 if (!me->arch.core_plt_section || !me->arch.init_plt_section) {
4120- printk("Module doesn't contain .plt or .init.plt sections.\n");
4121+ printk("Module %s doesn't contain .plt or .init.plt sections.\n", me->name);
4122 return -ENOEXEC;
4123 }
4124
4125@@ -203,11 +203,16 @@ static uint32_t do_plt_call(void *location,
4126
4127 DEBUGP("Doing plt for call to 0x%x at 0x%x\n", val, (unsigned int)location);
4128 /* Init, or core PLT? */
4129- if (location >= mod->module_core
4130- && location < mod->module_core + mod->core_size)
4131+ if ((location >= mod->module_core_rx && location < mod->module_core_rx + mod->core_size_rx) ||
4132+ (location >= mod->module_core_rw && location < mod->module_core_rw + mod->core_size_rw))
4133 entry = (void *)sechdrs[mod->arch.core_plt_section].sh_addr;
4134- else
4135+ else if ((location >= mod->module_init_rx && location < mod->module_init_rx + mod->init_size_rx) ||
4136+ (location >= mod->module_init_rw && location < mod->module_init_rw + mod->init_size_rw))
4137 entry = (void *)sechdrs[mod->arch.init_plt_section].sh_addr;
4138+ else {
4139+ printk(KERN_ERR "%s: invalid R_PPC_REL24 entry found\n", mod->name);
4140+ return ~0UL;
4141+ }
4142
4143 /* Find this entry, or if that fails, the next avail. entry */
4144 while (entry->jump[0]) {
4145diff --git a/arch/powerpc/kernel/pci-common.c b/arch/powerpc/kernel/pci-common.c
4146index cadbed6..b9bbb00 100644
4147--- a/arch/powerpc/kernel/pci-common.c
4148+++ b/arch/powerpc/kernel/pci-common.c
4149@@ -50,14 +50,14 @@ resource_size_t isa_mem_base;
4150 unsigned int ppc_pci_flags = 0;
4151
4152
4153-static struct dma_map_ops *pci_dma_ops = &dma_direct_ops;
4154+static const struct dma_map_ops *pci_dma_ops = &dma_direct_ops;
4155
4156-void set_pci_dma_ops(struct dma_map_ops *dma_ops)
4157+void set_pci_dma_ops(const struct dma_map_ops *dma_ops)
4158 {
4159 pci_dma_ops = dma_ops;
4160 }
4161
4162-struct dma_map_ops *get_pci_dma_ops(void)
4163+const struct dma_map_ops *get_pci_dma_ops(void)
4164 {
4165 return pci_dma_ops;
4166 }
4167diff --git a/arch/powerpc/kernel/process.c b/arch/powerpc/kernel/process.c
4168index 7b816da..8d5c277 100644
4169--- a/arch/powerpc/kernel/process.c
4170+++ b/arch/powerpc/kernel/process.c
4171@@ -539,8 +539,8 @@ void show_regs(struct pt_regs * regs)
4172 * Lookup NIP late so we have the best change of getting the
4173 * above info out without failing
4174 */
4175- printk("NIP ["REG"] %pS\n", regs->nip, (void *)regs->nip);
4176- printk("LR ["REG"] %pS\n", regs->link, (void *)regs->link);
4177+ printk("NIP ["REG"] %pA\n", regs->nip, (void *)regs->nip);
4178+ printk("LR ["REG"] %pA\n", regs->link, (void *)regs->link);
4179 #endif
4180 show_stack(current, (unsigned long *) regs->gpr[1]);
4181 if (!user_mode(regs))
4182@@ -1034,10 +1034,10 @@ void show_stack(struct task_struct *tsk, unsigned long *stack)
4183 newsp = stack[0];
4184 ip = stack[STACK_FRAME_LR_SAVE];
4185 if (!firstframe || ip != lr) {
4186- printk("["REG"] ["REG"] %pS", sp, ip, (void *)ip);
4187+ printk("["REG"] ["REG"] %pA", sp, ip, (void *)ip);
4188 #ifdef CONFIG_FUNCTION_GRAPH_TRACER
4189 if ((ip == rth || ip == mrth) && curr_frame >= 0) {
4190- printk(" (%pS)",
4191+ printk(" (%pA)",
4192 (void *)current->ret_stack[curr_frame].ret);
4193 curr_frame--;
4194 }
4195@@ -1057,7 +1057,7 @@ void show_stack(struct task_struct *tsk, unsigned long *stack)
4196 struct pt_regs *regs = (struct pt_regs *)
4197 (sp + STACK_FRAME_OVERHEAD);
4198 lr = regs->link;
4199- printk("--- Exception: %lx at %pS\n LR = %pS\n",
4200+ printk("--- Exception: %lx at %pA\n LR = %pA\n",
4201 regs->trap, (void *)regs->nip, (void *)lr);
4202 firstframe = 1;
4203 }
4204@@ -1134,58 +1134,3 @@ void thread_info_cache_init(void)
4205 }
4206
4207 #endif /* THREAD_SHIFT < PAGE_SHIFT */
4208-
4209-unsigned long arch_align_stack(unsigned long sp)
4210-{
4211- if (!(current->personality & ADDR_NO_RANDOMIZE) && randomize_va_space)
4212- sp -= get_random_int() & ~PAGE_MASK;
4213- return sp & ~0xf;
4214-}
4215-
4216-static inline unsigned long brk_rnd(void)
4217-{
4218- unsigned long rnd = 0;
4219-
4220- /* 8MB for 32bit, 1GB for 64bit */
4221- if (is_32bit_task())
4222- rnd = (long)(get_random_int() % (1<<(23-PAGE_SHIFT)));
4223- else
4224- rnd = (long)(get_random_int() % (1<<(30-PAGE_SHIFT)));
4225-
4226- return rnd << PAGE_SHIFT;
4227-}
4228-
4229-unsigned long arch_randomize_brk(struct mm_struct *mm)
4230-{
4231- unsigned long base = mm->brk;
4232- unsigned long ret;
4233-
4234-#ifdef CONFIG_PPC_STD_MMU_64
4235- /*
4236- * If we are using 1TB segments and we are allowed to randomise
4237- * the heap, we can put it above 1TB so it is backed by a 1TB
4238- * segment. Otherwise the heap will be in the bottom 1TB
4239- * which always uses 256MB segments and this may result in a
4240- * performance penalty.
4241- */
4242- if (!is_32bit_task() && (mmu_highuser_ssize == MMU_SEGSIZE_1T))
4243- base = max_t(unsigned long, mm->brk, 1UL << SID_SHIFT_1T);
4244-#endif
4245-
4246- ret = PAGE_ALIGN(base + brk_rnd());
4247-
4248- if (ret < mm->brk)
4249- return mm->brk;
4250-
4251- return ret;
4252-}
4253-
4254-unsigned long randomize_et_dyn(unsigned long base)
4255-{
4256- unsigned long ret = PAGE_ALIGN(base + brk_rnd());
4257-
4258- if (ret < base)
4259- return base;
4260-
4261- return ret;
4262-}
4263diff --git a/arch/powerpc/kernel/ptrace.c b/arch/powerpc/kernel/ptrace.c
4264index ef14988..856c4bc 100644
4265--- a/arch/powerpc/kernel/ptrace.c
4266+++ b/arch/powerpc/kernel/ptrace.c
4267@@ -86,7 +86,7 @@ static int set_user_trap(struct task_struct *task, unsigned long trap)
4268 /*
4269 * Get contents of register REGNO in task TASK.
4270 */
4271-unsigned long ptrace_get_reg(struct task_struct *task, int regno)
4272+unsigned long ptrace_get_reg(struct task_struct *task, unsigned int regno)
4273 {
4274 if (task->thread.regs == NULL)
4275 return -EIO;
4276@@ -894,7 +894,7 @@ long arch_ptrace(struct task_struct *child, long request, long addr, long data)
4277
4278 CHECK_FULL_REGS(child->thread.regs);
4279 if (index < PT_FPR0) {
4280- tmp = ptrace_get_reg(child, (int) index);
4281+ tmp = ptrace_get_reg(child, index);
4282 } else {
4283 flush_fp_to_thread(child);
4284 tmp = ((unsigned long *)child->thread.fpr)
4285diff --git a/arch/powerpc/kernel/signal_32.c b/arch/powerpc/kernel/signal_32.c
4286index d670429..2bc59b2 100644
4287--- a/arch/powerpc/kernel/signal_32.c
4288+++ b/arch/powerpc/kernel/signal_32.c
4289@@ -857,7 +857,7 @@ int handle_rt_signal32(unsigned long sig, struct k_sigaction *ka,
4290 /* Save user registers on the stack */
4291 frame = &rt_sf->uc.uc_mcontext;
4292 addr = frame;
4293- if (vdso32_rt_sigtramp && current->mm->context.vdso_base) {
4294+ if (vdso32_rt_sigtramp && current->mm->context.vdso_base != ~0UL) {
4295 if (save_user_regs(regs, frame, 0, 1))
4296 goto badframe;
4297 regs->link = current->mm->context.vdso_base + vdso32_rt_sigtramp;
4298diff --git a/arch/powerpc/kernel/signal_64.c b/arch/powerpc/kernel/signal_64.c
4299index 2fe6fc6..ada0d96 100644
4300--- a/arch/powerpc/kernel/signal_64.c
4301+++ b/arch/powerpc/kernel/signal_64.c
4302@@ -429,7 +429,7 @@ int handle_rt_signal64(int signr, struct k_sigaction *ka, siginfo_t *info,
4303 current->thread.fpscr.val = 0;
4304
4305 /* Set up to return from userspace. */
4306- if (vdso64_rt_sigtramp && current->mm->context.vdso_base) {
4307+ if (vdso64_rt_sigtramp && current->mm->context.vdso_base != ~0UL) {
4308 regs->link = current->mm->context.vdso_base + vdso64_rt_sigtramp;
4309 } else {
4310 err |= setup_trampoline(__NR_rt_sigreturn, &frame->tramp[0]);
4311diff --git a/arch/powerpc/kernel/sys_ppc32.c b/arch/powerpc/kernel/sys_ppc32.c
4312index b97c2d6..dd01a6a 100644
4313--- a/arch/powerpc/kernel/sys_ppc32.c
4314+++ b/arch/powerpc/kernel/sys_ppc32.c
4315@@ -563,10 +563,10 @@ asmlinkage long compat_sys_sysctl(struct __sysctl_args32 __user *args)
4316 if (oldlenp) {
4317 if (!error) {
4318 if (get_user(oldlen, oldlenp) ||
4319- put_user(oldlen, (compat_size_t __user *)compat_ptr(tmp.oldlenp)))
4320+ put_user(oldlen, (compat_size_t __user *)compat_ptr(tmp.oldlenp)) ||
4321+ copy_to_user(args->__unused, tmp.__unused, sizeof(tmp.__unused)))
4322 error = -EFAULT;
4323 }
4324- copy_to_user(args->__unused, tmp.__unused, sizeof(tmp.__unused));
4325 }
4326 return error;
4327 }
4328diff --git a/arch/powerpc/kernel/traps.c b/arch/powerpc/kernel/traps.c
4329index 6f0ae1a..e4b6a56 100644
4330--- a/arch/powerpc/kernel/traps.c
4331+++ b/arch/powerpc/kernel/traps.c
4332@@ -99,6 +99,8 @@ static void pmac_backlight_unblank(void)
4333 static inline void pmac_backlight_unblank(void) { }
4334 #endif
4335
4336+extern void gr_handle_kernel_exploit(void);
4337+
4338 int die(const char *str, struct pt_regs *regs, long err)
4339 {
4340 static struct {
4341@@ -168,6 +170,8 @@ int die(const char *str, struct pt_regs *regs, long err)
4342 if (panic_on_oops)
4343 panic("Fatal exception");
4344
4345+ gr_handle_kernel_exploit();
4346+
4347 oops_exit();
4348 do_exit(err);
4349
4350diff --git a/arch/powerpc/kernel/vdso.c b/arch/powerpc/kernel/vdso.c
4351index 137dc22..fe57a79 100644
4352--- a/arch/powerpc/kernel/vdso.c
4353+++ b/arch/powerpc/kernel/vdso.c
4354@@ -36,6 +36,7 @@
4355 #include <asm/firmware.h>
4356 #include <asm/vdso.h>
4357 #include <asm/vdso_datapage.h>
4358+#include <asm/mman.h>
4359
4360 #include "setup.h"
4361
4362@@ -220,7 +221,7 @@ int arch_setup_additional_pages(struct linux_binprm *bprm, int uses_interp)
4363 vdso_base = VDSO32_MBASE;
4364 #endif
4365
4366- current->mm->context.vdso_base = 0;
4367+ current->mm->context.vdso_base = ~0UL;
4368
4369 /* vDSO has a problem and was disabled, just don't "enable" it for the
4370 * process
4371@@ -240,7 +241,7 @@ int arch_setup_additional_pages(struct linux_binprm *bprm, int uses_interp)
4372 vdso_base = get_unmapped_area(NULL, vdso_base,
4373 (vdso_pages << PAGE_SHIFT) +
4374 ((VDSO_ALIGNMENT - 1) & PAGE_MASK),
4375- 0, 0);
4376+ 0, MAP_PRIVATE | MAP_EXECUTABLE);
4377 if (IS_ERR_VALUE(vdso_base)) {
4378 rc = vdso_base;
4379 goto fail_mmapsem;
4380diff --git a/arch/powerpc/kernel/vio.c b/arch/powerpc/kernel/vio.c
4381index 77f6421..829564a 100644
4382--- a/arch/powerpc/kernel/vio.c
4383+++ b/arch/powerpc/kernel/vio.c
4384@@ -601,11 +601,12 @@ static void vio_dma_iommu_unmap_sg(struct device *dev,
4385 vio_cmo_dealloc(viodev, alloc_size);
4386 }
4387
4388-struct dma_map_ops vio_dma_mapping_ops = {
4389+static const struct dma_map_ops vio_dma_mapping_ops = {
4390 .alloc_coherent = vio_dma_iommu_alloc_coherent,
4391 .free_coherent = vio_dma_iommu_free_coherent,
4392 .map_sg = vio_dma_iommu_map_sg,
4393 .unmap_sg = vio_dma_iommu_unmap_sg,
4394+ .dma_supported = dma_iommu_dma_supported,
4395 .map_page = vio_dma_iommu_map_page,
4396 .unmap_page = vio_dma_iommu_unmap_page,
4397
4398@@ -857,7 +858,6 @@ static void vio_cmo_bus_remove(struct vio_dev *viodev)
4399
4400 static void vio_cmo_set_dma_ops(struct vio_dev *viodev)
4401 {
4402- vio_dma_mapping_ops.dma_supported = dma_iommu_ops.dma_supported;
4403 viodev->dev.archdata.dma_ops = &vio_dma_mapping_ops;
4404 }
4405
4406diff --git a/arch/powerpc/lib/usercopy_64.c b/arch/powerpc/lib/usercopy_64.c
4407index 5eea6f3..5d10396 100644
4408--- a/arch/powerpc/lib/usercopy_64.c
4409+++ b/arch/powerpc/lib/usercopy_64.c
4410@@ -9,22 +9,6 @@
4411 #include <linux/module.h>
4412 #include <asm/uaccess.h>
4413
4414-unsigned long copy_from_user(void *to, const void __user *from, unsigned long n)
4415-{
4416- if (likely(access_ok(VERIFY_READ, from, n)))
4417- n = __copy_from_user(to, from, n);
4418- else
4419- memset(to, 0, n);
4420- return n;
4421-}
4422-
4423-unsigned long copy_to_user(void __user *to, const void *from, unsigned long n)
4424-{
4425- if (likely(access_ok(VERIFY_WRITE, to, n)))
4426- n = __copy_to_user(to, from, n);
4427- return n;
4428-}
4429-
4430 unsigned long copy_in_user(void __user *to, const void __user *from,
4431 unsigned long n)
4432 {
4433@@ -35,7 +19,5 @@ unsigned long copy_in_user(void __user *to, const void __user *from,
4434 return n;
4435 }
4436
4437-EXPORT_SYMBOL(copy_from_user);
4438-EXPORT_SYMBOL(copy_to_user);
4439 EXPORT_SYMBOL(copy_in_user);
4440
4441diff --git a/arch/powerpc/mm/fault.c b/arch/powerpc/mm/fault.c
4442index e7dae82..877ce0d 100644
4443--- a/arch/powerpc/mm/fault.c
4444+++ b/arch/powerpc/mm/fault.c
4445@@ -30,6 +30,10 @@
4446 #include <linux/kprobes.h>
4447 #include <linux/kdebug.h>
4448 #include <linux/perf_event.h>
4449+#include <linux/slab.h>
4450+#include <linux/pagemap.h>
4451+#include <linux/compiler.h>
4452+#include <linux/unistd.h>
4453
4454 #include <asm/firmware.h>
4455 #include <asm/page.h>
4456@@ -40,6 +44,7 @@
4457 #include <asm/uaccess.h>
4458 #include <asm/tlbflush.h>
4459 #include <asm/siginfo.h>
4460+#include <asm/ptrace.h>
4461
4462
4463 #ifdef CONFIG_KPROBES
4464@@ -64,6 +69,33 @@ static inline int notify_page_fault(struct pt_regs *regs)
4465 }
4466 #endif
4467
4468+#ifdef CONFIG_PAX_PAGEEXEC
4469+/*
4470+ * PaX: decide what to do with offenders (regs->nip = fault address)
4471+ *
4472+ * returns 1 when task should be killed
4473+ */
4474+static int pax_handle_fetch_fault(struct pt_regs *regs)
4475+{
4476+ return 1;
4477+}
4478+
4479+void pax_report_insns(struct pt_regs *regs, void *pc, void *sp)
4480+{
4481+ unsigned long i;
4482+
4483+ printk(KERN_ERR "PAX: bytes at PC: ");
4484+ for (i = 0; i < 5; i++) {
4485+ unsigned int c;
4486+ if (get_user(c, (unsigned int __user *)pc+i))
4487+ printk(KERN_CONT "???????? ");
4488+ else
4489+ printk(KERN_CONT "%08x ", c);
4490+ }
4491+ printk("\n");
4492+}
4493+#endif
4494+
4495 /*
4496 * Check whether the instruction at regs->nip is a store using
4497 * an update addressing form which will update r1.
4498@@ -134,7 +166,7 @@ int __kprobes do_page_fault(struct pt_regs *regs, unsigned long address,
4499 * indicate errors in DSISR but can validly be set in SRR1.
4500 */
4501 if (trap == 0x400)
4502- error_code &= 0x48200000;
4503+ error_code &= 0x58200000;
4504 else
4505 is_write = error_code & DSISR_ISSTORE;
4506 #else
4507@@ -250,7 +282,7 @@ good_area:
4508 * "undefined". Of those that can be set, this is the only
4509 * one which seems bad.
4510 */
4511- if (error_code & 0x10000000)
4512+ if (error_code & DSISR_GUARDED)
4513 /* Guarded storage error. */
4514 goto bad_area;
4515 #endif /* CONFIG_8xx */
4516@@ -265,7 +297,7 @@ good_area:
4517 * processors use the same I/D cache coherency mechanism
4518 * as embedded.
4519 */
4520- if (error_code & DSISR_PROTFAULT)
4521+ if (error_code & (DSISR_PROTFAULT | DSISR_GUARDED))
4522 goto bad_area;
4523 #endif /* CONFIG_PPC_STD_MMU */
4524
4525@@ -335,6 +367,23 @@ bad_area:
4526 bad_area_nosemaphore:
4527 /* User mode accesses cause a SIGSEGV */
4528 if (user_mode(regs)) {
4529+
4530+#ifdef CONFIG_PAX_PAGEEXEC
4531+ if (mm->pax_flags & MF_PAX_PAGEEXEC) {
4532+#ifdef CONFIG_PPC_STD_MMU
4533+ if (is_exec && (error_code & (DSISR_PROTFAULT | DSISR_GUARDED))) {
4534+#else
4535+ if (is_exec && regs->nip == address) {
4536+#endif
4537+ switch (pax_handle_fetch_fault(regs)) {
4538+ }
4539+
4540+ pax_report_fault(regs, (void *)regs->nip, (void *)regs->gpr[PT_R1]);
4541+ do_group_exit(SIGKILL);
4542+ }
4543+ }
4544+#endif
4545+
4546 _exception(SIGSEGV, regs, code, address);
4547 return 0;
4548 }
4549diff --git a/arch/powerpc/mm/mem.c b/arch/powerpc/mm/mem.c
4550index 5973631..ad617af 100644
4551--- a/arch/powerpc/mm/mem.c
4552+++ b/arch/powerpc/mm/mem.c
4553@@ -250,7 +250,7 @@ static int __init mark_nonram_nosave(void)
4554 {
4555 unsigned long lmb_next_region_start_pfn,
4556 lmb_region_max_pfn;
4557- int i;
4558+ unsigned int i;
4559
4560 for (i = 0; i < lmb.memory.cnt - 1; i++) {
4561 lmb_region_max_pfn =
4562diff --git a/arch/powerpc/mm/mmap_64.c b/arch/powerpc/mm/mmap_64.c
4563index 0d957a4..26d968f 100644
4564--- a/arch/powerpc/mm/mmap_64.c
4565+++ b/arch/powerpc/mm/mmap_64.c
4566@@ -99,10 +99,22 @@ void arch_pick_mmap_layout(struct mm_struct *mm)
4567 */
4568 if (mmap_is_legacy()) {
4569 mm->mmap_base = TASK_UNMAPPED_BASE;
4570+
4571+#ifdef CONFIG_PAX_RANDMMAP
4572+ if (mm->pax_flags & MF_PAX_RANDMMAP)
4573+ mm->mmap_base += mm->delta_mmap;
4574+#endif
4575+
4576 mm->get_unmapped_area = arch_get_unmapped_area;
4577 mm->unmap_area = arch_unmap_area;
4578 } else {
4579 mm->mmap_base = mmap_base();
4580+
4581+#ifdef CONFIG_PAX_RANDMMAP
4582+ if (mm->pax_flags & MF_PAX_RANDMMAP)
4583+ mm->mmap_base -= mm->delta_mmap + mm->delta_stack;
4584+#endif
4585+
4586 mm->get_unmapped_area = arch_get_unmapped_area_topdown;
4587 mm->unmap_area = arch_unmap_area_topdown;
4588 }
4589diff --git a/arch/powerpc/mm/slice.c b/arch/powerpc/mm/slice.c
4590index ba51948..23009d9 100644
4591--- a/arch/powerpc/mm/slice.c
4592+++ b/arch/powerpc/mm/slice.c
4593@@ -98,7 +98,7 @@ static int slice_area_is_free(struct mm_struct *mm, unsigned long addr,
4594 if ((mm->task_size - len) < addr)
4595 return 0;
4596 vma = find_vma(mm, addr);
4597- return (!vma || (addr + len) <= vma->vm_start);
4598+ return check_heap_stack_gap(vma, addr, len);
4599 }
4600
4601 static int slice_low_has_vma(struct mm_struct *mm, unsigned long slice)
4602@@ -256,7 +256,7 @@ full_search:
4603 addr = _ALIGN_UP(addr + 1, 1ul << SLICE_HIGH_SHIFT);
4604 continue;
4605 }
4606- if (!vma || addr + len <= vma->vm_start) {
4607+ if (check_heap_stack_gap(vma, addr, len)) {
4608 /*
4609 * Remember the place where we stopped the search:
4610 */
4611@@ -313,10 +313,14 @@ static unsigned long slice_find_area_topdown(struct mm_struct *mm,
4612 }
4613 }
4614
4615- addr = mm->mmap_base;
4616- while (addr > len) {
4617+ if (mm->mmap_base < len)
4618+ addr = -ENOMEM;
4619+ else
4620+ addr = mm->mmap_base - len;
4621+
4622+ while (!IS_ERR_VALUE(addr)) {
4623 /* Go down by chunk size */
4624- addr = _ALIGN_DOWN(addr - len, 1ul << pshift);
4625+ addr = _ALIGN_DOWN(addr, 1ul << pshift);
4626
4627 /* Check for hit with different page size */
4628 mask = slice_range_to_mask(addr, len);
4629@@ -336,7 +340,7 @@ static unsigned long slice_find_area_topdown(struct mm_struct *mm,
4630 * return with success:
4631 */
4632 vma = find_vma(mm, addr);
4633- if (!vma || (addr + len) <= vma->vm_start) {
4634+ if (check_heap_stack_gap(vma, addr, len)) {
4635 /* remember the address as a hint for next time */
4636 if (use_cache)
4637 mm->free_area_cache = addr;
4638@@ -348,7 +352,7 @@ static unsigned long slice_find_area_topdown(struct mm_struct *mm,
4639 mm->cached_hole_size = vma->vm_start - addr;
4640
4641 /* try just below the current vma->vm_start */
4642- addr = vma->vm_start;
4643+ addr = skip_heap_stack_gap(vma, len);
4644 }
4645
4646 /*
4647@@ -426,6 +430,11 @@ unsigned long slice_get_unmapped_area(unsigned long addr, unsigned long len,
4648 if (fixed && addr > (mm->task_size - len))
4649 return -EINVAL;
4650
4651+#ifdef CONFIG_PAX_RANDMMAP
4652+ if (!fixed && (mm->pax_flags & MF_PAX_RANDMMAP))
4653+ addr = 0;
4654+#endif
4655+
4656 /* If hint, make sure it matches our alignment restrictions */
4657 if (!fixed && addr) {
4658 addr = _ALIGN_UP(addr, 1ul << pshift);
4659diff --git a/arch/powerpc/platforms/52xx/lite5200_pm.c b/arch/powerpc/platforms/52xx/lite5200_pm.c
4660index b5c753d..8f01abe 100644
4661--- a/arch/powerpc/platforms/52xx/lite5200_pm.c
4662+++ b/arch/powerpc/platforms/52xx/lite5200_pm.c
4663@@ -235,7 +235,7 @@ static void lite5200_pm_end(void)
4664 lite5200_pm_target_state = PM_SUSPEND_ON;
4665 }
4666
4667-static struct platform_suspend_ops lite5200_pm_ops = {
4668+static const struct platform_suspend_ops lite5200_pm_ops = {
4669 .valid = lite5200_pm_valid,
4670 .begin = lite5200_pm_begin,
4671 .prepare = lite5200_pm_prepare,
4672diff --git a/arch/powerpc/platforms/52xx/mpc52xx_pm.c b/arch/powerpc/platforms/52xx/mpc52xx_pm.c
4673index a55b0b6..478c18e 100644
4674--- a/arch/powerpc/platforms/52xx/mpc52xx_pm.c
4675+++ b/arch/powerpc/platforms/52xx/mpc52xx_pm.c
4676@@ -180,7 +180,7 @@ void mpc52xx_pm_finish(void)
4677 iounmap(mbar);
4678 }
4679
4680-static struct platform_suspend_ops mpc52xx_pm_ops = {
4681+static const struct platform_suspend_ops mpc52xx_pm_ops = {
4682 .valid = mpc52xx_pm_valid,
4683 .prepare = mpc52xx_pm_prepare,
4684 .enter = mpc52xx_pm_enter,
4685diff --git a/arch/powerpc/platforms/83xx/suspend.c b/arch/powerpc/platforms/83xx/suspend.c
4686index 08e65fc..643d3ac 100644
4687--- a/arch/powerpc/platforms/83xx/suspend.c
4688+++ b/arch/powerpc/platforms/83xx/suspend.c
4689@@ -273,7 +273,7 @@ static int mpc83xx_is_pci_agent(void)
4690 return ret;
4691 }
4692
4693-static struct platform_suspend_ops mpc83xx_suspend_ops = {
4694+static const struct platform_suspend_ops mpc83xx_suspend_ops = {
4695 .valid = mpc83xx_suspend_valid,
4696 .begin = mpc83xx_suspend_begin,
4697 .enter = mpc83xx_suspend_enter,
4698diff --git a/arch/powerpc/platforms/cell/iommu.c b/arch/powerpc/platforms/cell/iommu.c
4699index ca5bfdf..1602e09 100644
4700--- a/arch/powerpc/platforms/cell/iommu.c
4701+++ b/arch/powerpc/platforms/cell/iommu.c
4702@@ -642,7 +642,7 @@ static int dma_fixed_dma_supported(struct device *dev, u64 mask)
4703
4704 static int dma_set_mask_and_switch(struct device *dev, u64 dma_mask);
4705
4706-struct dma_map_ops dma_iommu_fixed_ops = {
4707+const struct dma_map_ops dma_iommu_fixed_ops = {
4708 .alloc_coherent = dma_fixed_alloc_coherent,
4709 .free_coherent = dma_fixed_free_coherent,
4710 .map_sg = dma_fixed_map_sg,
4711diff --git a/arch/powerpc/platforms/ps3/system-bus.c b/arch/powerpc/platforms/ps3/system-bus.c
4712index e34b305..20e48ec 100644
4713--- a/arch/powerpc/platforms/ps3/system-bus.c
4714+++ b/arch/powerpc/platforms/ps3/system-bus.c
4715@@ -694,7 +694,7 @@ static int ps3_dma_supported(struct device *_dev, u64 mask)
4716 return mask >= DMA_BIT_MASK(32);
4717 }
4718
4719-static struct dma_map_ops ps3_sb_dma_ops = {
4720+static const struct dma_map_ops ps3_sb_dma_ops = {
4721 .alloc_coherent = ps3_alloc_coherent,
4722 .free_coherent = ps3_free_coherent,
4723 .map_sg = ps3_sb_map_sg,
4724@@ -704,7 +704,7 @@ static struct dma_map_ops ps3_sb_dma_ops = {
4725 .unmap_page = ps3_unmap_page,
4726 };
4727
4728-static struct dma_map_ops ps3_ioc0_dma_ops = {
4729+static const struct dma_map_ops ps3_ioc0_dma_ops = {
4730 .alloc_coherent = ps3_alloc_coherent,
4731 .free_coherent = ps3_free_coherent,
4732 .map_sg = ps3_ioc0_map_sg,
4733diff --git a/arch/powerpc/platforms/pseries/Kconfig b/arch/powerpc/platforms/pseries/Kconfig
4734index f0e6f28..60d53ed 100644
4735--- a/arch/powerpc/platforms/pseries/Kconfig
4736+++ b/arch/powerpc/platforms/pseries/Kconfig
4737@@ -2,6 +2,8 @@ config PPC_PSERIES
4738 depends on PPC64 && PPC_BOOK3S
4739 bool "IBM pSeries & new (POWER5-based) iSeries"
4740 select MPIC
4741+ select PCI_MSI
4742+ select XICS
4743 select PPC_I8259
4744 select PPC_RTAS
4745 select RTAS_ERROR_LOGGING
4746diff --git a/arch/s390/Kconfig b/arch/s390/Kconfig
4747index aca7fff..76c2b6b 100644
4748--- a/arch/s390/Kconfig
4749+++ b/arch/s390/Kconfig
4750@@ -197,28 +197,26 @@ config AUDIT_ARCH
4751
4752 config S390_SWITCH_AMODE
4753 bool "Switch kernel/user addressing modes"
4754+ default y
4755 help
4756 This option allows to switch the addressing modes of kernel and user
4757- space. The kernel parameter switch_amode=on will enable this feature,
4758- default is disabled. Enabling this (via kernel parameter) on machines
4759- earlier than IBM System z9-109 EC/BC will reduce system performance.
4760+ space. Enabling this on machines earlier than IBM System z9-109 EC/BC
4761+ will reduce system performance.
4762
4763 Note that this option will also be selected by selecting the execute
4764- protection option below. Enabling the execute protection via the
4765- noexec kernel parameter will also switch the addressing modes,
4766- independent of the switch_amode kernel parameter.
4767+ protection option below. Enabling the execute protection will also
4768+ switch the addressing modes, independent of this option.
4769
4770
4771 config S390_EXEC_PROTECT
4772 bool "Data execute protection"
4773+ default y
4774 select S390_SWITCH_AMODE
4775 help
4776 This option allows to enable a buffer overflow protection for user
4777 space programs and it also selects the addressing mode option above.
4778- The kernel parameter noexec=on will enable this feature and also
4779- switch the addressing modes, default is disabled. Enabling this (via
4780- kernel parameter) on machines earlier than IBM System z9-109 EC/BC
4781- will reduce system performance.
4782+ Enabling this on machines earlier than IBM System z9-109 EC/BC will
4783+ reduce system performance.
4784
4785 comment "Code generation options"
4786
4787diff --git a/arch/s390/include/asm/atomic.h b/arch/s390/include/asm/atomic.h
4788index ae7c8f9..3f01a0c 100644
4789--- a/arch/s390/include/asm/atomic.h
4790+++ b/arch/s390/include/asm/atomic.h
4791@@ -362,6 +362,16 @@ static inline int atomic64_add_unless(atomic64_t *v, long long a, long long u)
4792 #define atomic64_dec_and_test(_v) (atomic64_sub_return(1, _v) == 0)
4793 #define atomic64_inc_not_zero(v) atomic64_add_unless((v), 1, 0)
4794
4795+#define atomic64_read_unchecked(v) atomic64_read(v)
4796+#define atomic64_set_unchecked(v, i) atomic64_set((v), (i))
4797+#define atomic64_add_unchecked(a, v) atomic64_add((a), (v))
4798+#define atomic64_add_return_unchecked(a, v) atomic64_add_return((a), (v))
4799+#define atomic64_sub_unchecked(a, v) atomic64_sub((a), (v))
4800+#define atomic64_inc_unchecked(v) atomic64_inc(v)
4801+#define atomic64_inc_return_unchecked(v) atomic64_inc_return(v)
4802+#define atomic64_dec_unchecked(v) atomic64_dec(v)
4803+#define atomic64_cmpxchg_unchecked(v, o, n) atomic64_cmpxchg((v), (o), (n))
4804+
4805 #define smp_mb__before_atomic_dec() smp_mb()
4806 #define smp_mb__after_atomic_dec() smp_mb()
4807 #define smp_mb__before_atomic_inc() smp_mb()
4808diff --git a/arch/s390/include/asm/cache.h b/arch/s390/include/asm/cache.h
4809index 9b86681..c5140db 100644
4810--- a/arch/s390/include/asm/cache.h
4811+++ b/arch/s390/include/asm/cache.h
4812@@ -11,8 +11,10 @@
4813 #ifndef __ARCH_S390_CACHE_H
4814 #define __ARCH_S390_CACHE_H
4815
4816-#define L1_CACHE_BYTES 256
4817+#include <linux/const.h>
4818+
4819 #define L1_CACHE_SHIFT 8
4820+#define L1_CACHE_BYTES (_AC(1,UL) << L1_CACHE_SHIFT)
4821
4822 #define __read_mostly __attribute__((__section__(".data.read_mostly")))
4823
4824diff --git a/arch/s390/include/asm/elf.h b/arch/s390/include/asm/elf.h
4825index e885442..e3a2817 100644
4826--- a/arch/s390/include/asm/elf.h
4827+++ b/arch/s390/include/asm/elf.h
4828@@ -164,6 +164,13 @@ extern unsigned int vdso_enabled;
4829 that it will "exec", and that there is sufficient room for the brk. */
4830 #define ELF_ET_DYN_BASE (STACK_TOP / 3 * 2)
4831
4832+#ifdef CONFIG_PAX_ASLR
4833+#define PAX_ELF_ET_DYN_BASE (test_thread_flag(TIF_31BIT) ? 0x10000UL : 0x80000000UL)
4834+
4835+#define PAX_DELTA_MMAP_LEN (test_thread_flag(TIF_31BIT) ? 15 : 26)
4836+#define PAX_DELTA_STACK_LEN (test_thread_flag(TIF_31BIT) ? 15 : 26)
4837+#endif
4838+
4839 /* This yields a mask that user programs can use to figure out what
4840 instruction set this CPU supports. */
4841
4842diff --git a/arch/s390/include/asm/setup.h b/arch/s390/include/asm/setup.h
4843index e37478e..9ce0e9f 100644
4844--- a/arch/s390/include/asm/setup.h
4845+++ b/arch/s390/include/asm/setup.h
4846@@ -50,13 +50,13 @@ extern unsigned long memory_end;
4847 void detect_memory_layout(struct mem_chunk chunk[]);
4848
4849 #ifdef CONFIG_S390_SWITCH_AMODE
4850-extern unsigned int switch_amode;
4851+#define switch_amode (1)
4852 #else
4853 #define switch_amode (0)
4854 #endif
4855
4856 #ifdef CONFIG_S390_EXEC_PROTECT
4857-extern unsigned int s390_noexec;
4858+#define s390_noexec (1)
4859 #else
4860 #define s390_noexec (0)
4861 #endif
4862diff --git a/arch/s390/include/asm/uaccess.h b/arch/s390/include/asm/uaccess.h
4863index 8377e91..e28e6f1 100644
4864--- a/arch/s390/include/asm/uaccess.h
4865+++ b/arch/s390/include/asm/uaccess.h
4866@@ -232,6 +232,10 @@ static inline unsigned long __must_check
4867 copy_to_user(void __user *to, const void *from, unsigned long n)
4868 {
4869 might_fault();
4870+
4871+ if ((long)n < 0)
4872+ return n;
4873+
4874 if (access_ok(VERIFY_WRITE, to, n))
4875 n = __copy_to_user(to, from, n);
4876 return n;
4877@@ -257,6 +261,9 @@ copy_to_user(void __user *to, const void *from, unsigned long n)
4878 static inline unsigned long __must_check
4879 __copy_from_user(void *to, const void __user *from, unsigned long n)
4880 {
4881+ if ((long)n < 0)
4882+ return n;
4883+
4884 if (__builtin_constant_p(n) && (n <= 256))
4885 return uaccess.copy_from_user_small(n, from, to);
4886 else
4887@@ -283,6 +290,10 @@ static inline unsigned long __must_check
4888 copy_from_user(void *to, const void __user *from, unsigned long n)
4889 {
4890 might_fault();
4891+
4892+ if ((long)n < 0)
4893+ return n;
4894+
4895 if (access_ok(VERIFY_READ, from, n))
4896 n = __copy_from_user(to, from, n);
4897 else
4898diff --git a/arch/s390/kernel/module.c b/arch/s390/kernel/module.c
4899index 639380a..72e3c02 100644
4900--- a/arch/s390/kernel/module.c
4901+++ b/arch/s390/kernel/module.c
4902@@ -166,11 +166,11 @@ module_frob_arch_sections(Elf_Ehdr *hdr, Elf_Shdr *sechdrs,
4903
4904 /* Increase core size by size of got & plt and set start
4905 offsets for got and plt. */
4906- me->core_size = ALIGN(me->core_size, 4);
4907- me->arch.got_offset = me->core_size;
4908- me->core_size += me->arch.got_size;
4909- me->arch.plt_offset = me->core_size;
4910- me->core_size += me->arch.plt_size;
4911+ me->core_size_rw = ALIGN(me->core_size_rw, 4);
4912+ me->arch.got_offset = me->core_size_rw;
4913+ me->core_size_rw += me->arch.got_size;
4914+ me->arch.plt_offset = me->core_size_rx;
4915+ me->core_size_rx += me->arch.plt_size;
4916 return 0;
4917 }
4918
4919@@ -256,7 +256,7 @@ apply_rela(Elf_Rela *rela, Elf_Addr base, Elf_Sym *symtab,
4920 if (info->got_initialized == 0) {
4921 Elf_Addr *gotent;
4922
4923- gotent = me->module_core + me->arch.got_offset +
4924+ gotent = me->module_core_rw + me->arch.got_offset +
4925 info->got_offset;
4926 *gotent = val;
4927 info->got_initialized = 1;
4928@@ -280,7 +280,7 @@ apply_rela(Elf_Rela *rela, Elf_Addr base, Elf_Sym *symtab,
4929 else if (r_type == R_390_GOTENT ||
4930 r_type == R_390_GOTPLTENT)
4931 *(unsigned int *) loc =
4932- (val + (Elf_Addr) me->module_core - loc) >> 1;
4933+ (val + (Elf_Addr) me->module_core_rw - loc) >> 1;
4934 else if (r_type == R_390_GOT64 ||
4935 r_type == R_390_GOTPLT64)
4936 *(unsigned long *) loc = val;
4937@@ -294,7 +294,7 @@ apply_rela(Elf_Rela *rela, Elf_Addr base, Elf_Sym *symtab,
4938 case R_390_PLTOFF64: /* 16 bit offset from GOT to PLT. */
4939 if (info->plt_initialized == 0) {
4940 unsigned int *ip;
4941- ip = me->module_core + me->arch.plt_offset +
4942+ ip = me->module_core_rx + me->arch.plt_offset +
4943 info->plt_offset;
4944 #ifndef CONFIG_64BIT
4945 ip[0] = 0x0d105810; /* basr 1,0; l 1,6(1); br 1 */
4946@@ -319,7 +319,7 @@ apply_rela(Elf_Rela *rela, Elf_Addr base, Elf_Sym *symtab,
4947 val - loc + 0xffffUL < 0x1ffffeUL) ||
4948 (r_type == R_390_PLT32DBL &&
4949 val - loc + 0xffffffffULL < 0x1fffffffeULL)))
4950- val = (Elf_Addr) me->module_core +
4951+ val = (Elf_Addr) me->module_core_rx +
4952 me->arch.plt_offset +
4953 info->plt_offset;
4954 val += rela->r_addend - loc;
4955@@ -341,7 +341,7 @@ apply_rela(Elf_Rela *rela, Elf_Addr base, Elf_Sym *symtab,
4956 case R_390_GOTOFF32: /* 32 bit offset to GOT. */
4957 case R_390_GOTOFF64: /* 64 bit offset to GOT. */
4958 val = val + rela->r_addend -
4959- ((Elf_Addr) me->module_core + me->arch.got_offset);
4960+ ((Elf_Addr) me->module_core_rw + me->arch.got_offset);
4961 if (r_type == R_390_GOTOFF16)
4962 *(unsigned short *) loc = val;
4963 else if (r_type == R_390_GOTOFF32)
4964@@ -351,7 +351,7 @@ apply_rela(Elf_Rela *rela, Elf_Addr base, Elf_Sym *symtab,
4965 break;
4966 case R_390_GOTPC: /* 32 bit PC relative offset to GOT. */
4967 case R_390_GOTPCDBL: /* 32 bit PC rel. off. to GOT shifted by 1. */
4968- val = (Elf_Addr) me->module_core + me->arch.got_offset +
4969+ val = (Elf_Addr) me->module_core_rw + me->arch.got_offset +
4970 rela->r_addend - loc;
4971 if (r_type == R_390_GOTPC)
4972 *(unsigned int *) loc = val;
4973diff --git a/arch/s390/kernel/setup.c b/arch/s390/kernel/setup.c
4974index 358e545..051e4f4 100644
4975--- a/arch/s390/kernel/setup.c
4976+++ b/arch/s390/kernel/setup.c
4977@@ -307,9 +307,6 @@ static int __init early_parse_mem(char *p)
4978 early_param("mem", early_parse_mem);
4979
4980 #ifdef CONFIG_S390_SWITCH_AMODE
4981-unsigned int switch_amode = 0;
4982-EXPORT_SYMBOL_GPL(switch_amode);
4983-
4984 static int set_amode_and_uaccess(unsigned long user_amode,
4985 unsigned long user32_amode)
4986 {
4987@@ -335,17 +332,6 @@ static int set_amode_and_uaccess(unsigned long user_amode,
4988 return 0;
4989 }
4990 }
4991-
4992-/*
4993- * Switch kernel/user addressing modes?
4994- */
4995-static int __init early_parse_switch_amode(char *p)
4996-{
4997- switch_amode = 1;
4998- return 0;
4999-}
5000-early_param("switch_amode", early_parse_switch_amode);
5001-
5002 #else /* CONFIG_S390_SWITCH_AMODE */
5003 static inline int set_amode_and_uaccess(unsigned long user_amode,
5004 unsigned long user32_amode)
5005@@ -354,24 +340,6 @@ static inline int set_amode_and_uaccess(unsigned long user_amode,
5006 }
5007 #endif /* CONFIG_S390_SWITCH_AMODE */
5008
5009-#ifdef CONFIG_S390_EXEC_PROTECT
5010-unsigned int s390_noexec = 0;
5011-EXPORT_SYMBOL_GPL(s390_noexec);
5012-
5013-/*
5014- * Enable execute protection?
5015- */
5016-static int __init early_parse_noexec(char *p)
5017-{
5018- if (!strncmp(p, "off", 3))
5019- return 0;
5020- switch_amode = 1;
5021- s390_noexec = 1;
5022- return 0;
5023-}
5024-early_param("noexec", early_parse_noexec);
5025-#endif /* CONFIG_S390_EXEC_PROTECT */
5026-
5027 static void setup_addressing_mode(void)
5028 {
5029 if (s390_noexec) {
5030diff --git a/arch/s390/mm/mmap.c b/arch/s390/mm/mmap.c
5031index 0ab74ae..c8b68f9 100644
5032--- a/arch/s390/mm/mmap.c
5033+++ b/arch/s390/mm/mmap.c
5034@@ -78,10 +78,22 @@ void arch_pick_mmap_layout(struct mm_struct *mm)
5035 */
5036 if (mmap_is_legacy()) {
5037 mm->mmap_base = TASK_UNMAPPED_BASE;
5038+
5039+#ifdef CONFIG_PAX_RANDMMAP
5040+ if (mm->pax_flags & MF_PAX_RANDMMAP)
5041+ mm->mmap_base += mm->delta_mmap;
5042+#endif
5043+
5044 mm->get_unmapped_area = arch_get_unmapped_area;
5045 mm->unmap_area = arch_unmap_area;
5046 } else {
5047 mm->mmap_base = mmap_base();
5048+
5049+#ifdef CONFIG_PAX_RANDMMAP
5050+ if (mm->pax_flags & MF_PAX_RANDMMAP)
5051+ mm->mmap_base -= mm->delta_mmap + mm->delta_stack;
5052+#endif
5053+
5054 mm->get_unmapped_area = arch_get_unmapped_area_topdown;
5055 mm->unmap_area = arch_unmap_area_topdown;
5056 }
5057@@ -153,10 +165,22 @@ void arch_pick_mmap_layout(struct mm_struct *mm)
5058 */
5059 if (mmap_is_legacy()) {
5060 mm->mmap_base = TASK_UNMAPPED_BASE;
5061+
5062+#ifdef CONFIG_PAX_RANDMMAP
5063+ if (mm->pax_flags & MF_PAX_RANDMMAP)
5064+ mm->mmap_base += mm->delta_mmap;
5065+#endif
5066+
5067 mm->get_unmapped_area = s390_get_unmapped_area;
5068 mm->unmap_area = arch_unmap_area;
5069 } else {
5070 mm->mmap_base = mmap_base();
5071+
5072+#ifdef CONFIG_PAX_RANDMMAP
5073+ if (mm->pax_flags & MF_PAX_RANDMMAP)
5074+ mm->mmap_base -= mm->delta_mmap + mm->delta_stack;
5075+#endif
5076+
5077 mm->get_unmapped_area = s390_get_unmapped_area_topdown;
5078 mm->unmap_area = arch_unmap_area_topdown;
5079 }
5080diff --git a/arch/score/include/asm/cache.h b/arch/score/include/asm/cache.h
5081index ae3d59f..f65f075 100644
5082--- a/arch/score/include/asm/cache.h
5083+++ b/arch/score/include/asm/cache.h
5084@@ -1,7 +1,9 @@
5085 #ifndef _ASM_SCORE_CACHE_H
5086 #define _ASM_SCORE_CACHE_H
5087
5088+#include <linux/const.h>
5089+
5090 #define L1_CACHE_SHIFT 4
5091-#define L1_CACHE_BYTES (1 << L1_CACHE_SHIFT)
5092+#define L1_CACHE_BYTES (_AC(1,UL) << L1_CACHE_SHIFT)
5093
5094 #endif /* _ASM_SCORE_CACHE_H */
5095diff --git a/arch/score/include/asm/system.h b/arch/score/include/asm/system.h
5096index 589d5c7..669e274 100644
5097--- a/arch/score/include/asm/system.h
5098+++ b/arch/score/include/asm/system.h
5099@@ -17,7 +17,7 @@ do { \
5100 #define finish_arch_switch(prev) do {} while (0)
5101
5102 typedef void (*vi_handler_t)(void);
5103-extern unsigned long arch_align_stack(unsigned long sp);
5104+#define arch_align_stack(x) (x)
5105
5106 #define mb() barrier()
5107 #define rmb() barrier()
5108diff --git a/arch/score/kernel/process.c b/arch/score/kernel/process.c
5109index 25d0803..d6c8e36 100644
5110--- a/arch/score/kernel/process.c
5111+++ b/arch/score/kernel/process.c
5112@@ -161,8 +161,3 @@ unsigned long get_wchan(struct task_struct *task)
5113
5114 return task_pt_regs(task)->cp0_epc;
5115 }
5116-
5117-unsigned long arch_align_stack(unsigned long sp)
5118-{
5119- return sp;
5120-}
5121diff --git a/arch/sh/boards/mach-hp6xx/pm.c b/arch/sh/boards/mach-hp6xx/pm.c
5122index d936c1a..304a252 100644
5123--- a/arch/sh/boards/mach-hp6xx/pm.c
5124+++ b/arch/sh/boards/mach-hp6xx/pm.c
5125@@ -143,7 +143,7 @@ static int hp6x0_pm_enter(suspend_state_t state)
5126 return 0;
5127 }
5128
5129-static struct platform_suspend_ops hp6x0_pm_ops = {
5130+static const struct platform_suspend_ops hp6x0_pm_ops = {
5131 .enter = hp6x0_pm_enter,
5132 .valid = suspend_valid_only_mem,
5133 };
5134diff --git a/arch/sh/include/asm/cache.h b/arch/sh/include/asm/cache.h
5135index 02df18e..ae3a793 100644
5136--- a/arch/sh/include/asm/cache.h
5137+++ b/arch/sh/include/asm/cache.h
5138@@ -9,10 +9,11 @@
5139 #define __ASM_SH_CACHE_H
5140 #ifdef __KERNEL__
5141
5142+#include <linux/const.h>
5143 #include <linux/init.h>
5144 #include <cpu/cache.h>
5145
5146-#define L1_CACHE_BYTES (1 << L1_CACHE_SHIFT)
5147+#define L1_CACHE_BYTES (_AC(1,UL) << L1_CACHE_SHIFT)
5148
5149 #define __read_mostly __attribute__((__section__(".data.read_mostly")))
5150
5151diff --git a/arch/sh/kernel/cpu/sh4/sq.c b/arch/sh/kernel/cpu/sh4/sq.c
5152index 8a8a993..7b3079b 100644
5153--- a/arch/sh/kernel/cpu/sh4/sq.c
5154+++ b/arch/sh/kernel/cpu/sh4/sq.c
5155@@ -327,7 +327,7 @@ static struct attribute *sq_sysfs_attrs[] = {
5156 NULL,
5157 };
5158
5159-static struct sysfs_ops sq_sysfs_ops = {
5160+static const struct sysfs_ops sq_sysfs_ops = {
5161 .show = sq_sysfs_show,
5162 .store = sq_sysfs_store,
5163 };
5164diff --git a/arch/sh/kernel/cpu/shmobile/pm.c b/arch/sh/kernel/cpu/shmobile/pm.c
5165index ee3c2aa..c49cee6 100644
5166--- a/arch/sh/kernel/cpu/shmobile/pm.c
5167+++ b/arch/sh/kernel/cpu/shmobile/pm.c
5168@@ -58,7 +58,7 @@ static int sh_pm_enter(suspend_state_t state)
5169 return 0;
5170 }
5171
5172-static struct platform_suspend_ops sh_pm_ops = {
5173+static const struct platform_suspend_ops sh_pm_ops = {
5174 .enter = sh_pm_enter,
5175 .valid = suspend_valid_only_mem,
5176 };
5177diff --git a/arch/sh/kernel/kgdb.c b/arch/sh/kernel/kgdb.c
5178index 3e532d0..9faa306 100644
5179--- a/arch/sh/kernel/kgdb.c
5180+++ b/arch/sh/kernel/kgdb.c
5181@@ -271,7 +271,7 @@ void kgdb_arch_exit(void)
5182 {
5183 }
5184
5185-struct kgdb_arch arch_kgdb_ops = {
5186+const struct kgdb_arch arch_kgdb_ops = {
5187 /* Breakpoint instruction: trapa #0x3c */
5188 #ifdef CONFIG_CPU_LITTLE_ENDIAN
5189 .gdb_bpt_instr = { 0x3c, 0xc3 },
5190diff --git a/arch/sh/mm/mmap.c b/arch/sh/mm/mmap.c
5191index afeb710..d1d1289 100644
5192--- a/arch/sh/mm/mmap.c
5193+++ b/arch/sh/mm/mmap.c
5194@@ -74,8 +74,7 @@ unsigned long arch_get_unmapped_area(struct file *filp, unsigned long addr,
5195 addr = PAGE_ALIGN(addr);
5196
5197 vma = find_vma(mm, addr);
5198- if (TASK_SIZE - len >= addr &&
5199- (!vma || addr + len <= vma->vm_start))
5200+ if (TASK_SIZE - len >= addr && check_heap_stack_gap(vma, addr, len))
5201 return addr;
5202 }
5203
5204@@ -106,7 +105,7 @@ full_search:
5205 }
5206 return -ENOMEM;
5207 }
5208- if (likely(!vma || addr + len <= vma->vm_start)) {
5209+ if (likely(check_heap_stack_gap(vma, addr, len))) {
5210 /*
5211 * Remember the place where we stopped the search:
5212 */
5213@@ -157,8 +156,7 @@ arch_get_unmapped_area_topdown(struct file *filp, const unsigned long addr0,
5214 addr = PAGE_ALIGN(addr);
5215
5216 vma = find_vma(mm, addr);
5217- if (TASK_SIZE - len >= addr &&
5218- (!vma || addr + len <= vma->vm_start))
5219+ if (TASK_SIZE - len >= addr && check_heap_stack_gap(vma, addr, len))
5220 return addr;
5221 }
5222
5223@@ -179,7 +177,7 @@ arch_get_unmapped_area_topdown(struct file *filp, const unsigned long addr0,
5224 /* make sure it can fit in the remaining address space */
5225 if (likely(addr > len)) {
5226 vma = find_vma(mm, addr-len);
5227- if (!vma || addr <= vma->vm_start) {
5228+ if (check_heap_stack_gap(vma, addr - len, len)) {
5229 /* remember the address as a hint for next time */
5230 return (mm->free_area_cache = addr-len);
5231 }
5232@@ -188,18 +186,18 @@ arch_get_unmapped_area_topdown(struct file *filp, const unsigned long addr0,
5233 if (unlikely(mm->mmap_base < len))
5234 goto bottomup;
5235
5236- addr = mm->mmap_base-len;
5237- if (do_colour_align)
5238- addr = COLOUR_ALIGN_DOWN(addr, pgoff);
5239+ addr = mm->mmap_base - len;
5240
5241 do {
5242+ if (do_colour_align)
5243+ addr = COLOUR_ALIGN_DOWN(addr, pgoff);
5244 /*
5245 * Lookup failure means no vma is above this address,
5246 * else if new region fits below vma->vm_start,
5247 * return with success:
5248 */
5249 vma = find_vma(mm, addr);
5250- if (likely(!vma || addr+len <= vma->vm_start)) {
5251+ if (likely(check_heap_stack_gap(vma, addr, len))) {
5252 /* remember the address as a hint for next time */
5253 return (mm->free_area_cache = addr);
5254 }
5255@@ -209,10 +207,8 @@ arch_get_unmapped_area_topdown(struct file *filp, const unsigned long addr0,
5256 mm->cached_hole_size = vma->vm_start - addr;
5257
5258 /* try just below the current vma->vm_start */
5259- addr = vma->vm_start-len;
5260- if (do_colour_align)
5261- addr = COLOUR_ALIGN_DOWN(addr, pgoff);
5262- } while (likely(len < vma->vm_start));
5263+ addr = skip_heap_stack_gap(vma, len);
5264+ } while (!IS_ERR_VALUE(addr));
5265
5266 bottomup:
5267 /*
5268diff --git a/arch/sparc/Kconfig b/arch/sparc/Kconfig
5269index 05ef538..dc9c857 100644
5270--- a/arch/sparc/Kconfig
5271+++ b/arch/sparc/Kconfig
5272@@ -32,6 +32,7 @@ config SPARC
5273
5274 config SPARC32
5275 def_bool !64BIT
5276+ select GENERIC_ATOMIC64
5277
5278 config SPARC64
5279 def_bool 64BIT
5280diff --git a/arch/sparc/Makefile b/arch/sparc/Makefile
5281index 113225b..7fd04e7 100644
5282--- a/arch/sparc/Makefile
5283+++ b/arch/sparc/Makefile
5284@@ -75,7 +75,7 @@ drivers-$(CONFIG_OPROFILE) += arch/sparc/oprofile/
5285 # Export what is needed by arch/sparc/boot/Makefile
5286 export VMLINUX_INIT VMLINUX_MAIN
5287 VMLINUX_INIT := $(head-y) $(init-y)
5288-VMLINUX_MAIN := $(core-y) kernel/ mm/ fs/ ipc/ security/ crypto/ block/
5289+VMLINUX_MAIN := $(core-y) kernel/ mm/ fs/ ipc/ security/ crypto/ block/ grsecurity/
5290 VMLINUX_MAIN += $(patsubst %/, %/lib.a, $(libs-y)) $(libs-y)
5291 VMLINUX_MAIN += $(drivers-y) $(net-y)
5292
5293diff --git a/arch/sparc/include/asm/atomic_32.h b/arch/sparc/include/asm/atomic_32.h
5294index f0d343c..cf36e68 100644
5295--- a/arch/sparc/include/asm/atomic_32.h
5296+++ b/arch/sparc/include/asm/atomic_32.h
5297@@ -13,6 +13,8 @@
5298
5299 #include <linux/types.h>
5300
5301+#include <asm-generic/atomic64.h>
5302+
5303 #ifdef __KERNEL__
5304
5305 #include <asm/system.h>
5306diff --git a/arch/sparc/include/asm/atomic_64.h b/arch/sparc/include/asm/atomic_64.h
5307index f5cc06f..f858d47 100644
5308--- a/arch/sparc/include/asm/atomic_64.h
5309+++ b/arch/sparc/include/asm/atomic_64.h
5310@@ -14,18 +14,40 @@
5311 #define ATOMIC64_INIT(i) { (i) }
5312
5313 #define atomic_read(v) ((v)->counter)
5314+static inline int atomic_read_unchecked(const atomic_unchecked_t *v)
5315+{
5316+ return v->counter;
5317+}
5318 #define atomic64_read(v) ((v)->counter)
5319+static inline long atomic64_read_unchecked(const atomic64_unchecked_t *v)
5320+{
5321+ return v->counter;
5322+}
5323
5324 #define atomic_set(v, i) (((v)->counter) = i)
5325+static inline void atomic_set_unchecked(atomic_unchecked_t *v, int i)
5326+{
5327+ v->counter = i;
5328+}
5329 #define atomic64_set(v, i) (((v)->counter) = i)
5330+static inline void atomic64_set_unchecked(atomic64_unchecked_t *v, long i)
5331+{
5332+ v->counter = i;
5333+}
5334
5335 extern void atomic_add(int, atomic_t *);
5336+extern void atomic_add_unchecked(int, atomic_unchecked_t *);
5337 extern void atomic64_add(long, atomic64_t *);
5338+extern void atomic64_add_unchecked(long, atomic64_unchecked_t *);
5339 extern void atomic_sub(int, atomic_t *);
5340+extern void atomic_sub_unchecked(int, atomic_unchecked_t *);
5341 extern void atomic64_sub(long, atomic64_t *);
5342+extern void atomic64_sub_unchecked(long, atomic64_unchecked_t *);
5343
5344 extern int atomic_add_ret(int, atomic_t *);
5345+extern int atomic_add_ret_unchecked(int, atomic_unchecked_t *);
5346 extern long atomic64_add_ret(long, atomic64_t *);
5347+extern long atomic64_add_ret_unchecked(long, atomic64_unchecked_t *);
5348 extern int atomic_sub_ret(int, atomic_t *);
5349 extern long atomic64_sub_ret(long, atomic64_t *);
5350
5351@@ -33,13 +55,29 @@ extern long atomic64_sub_ret(long, atomic64_t *);
5352 #define atomic64_dec_return(v) atomic64_sub_ret(1, v)
5353
5354 #define atomic_inc_return(v) atomic_add_ret(1, v)
5355+static inline int atomic_inc_return_unchecked(atomic_unchecked_t *v)
5356+{
5357+ return atomic_add_ret_unchecked(1, v);
5358+}
5359 #define atomic64_inc_return(v) atomic64_add_ret(1, v)
5360+static inline long atomic64_inc_return_unchecked(atomic64_unchecked_t *v)
5361+{
5362+ return atomic64_add_ret_unchecked(1, v);
5363+}
5364
5365 #define atomic_sub_return(i, v) atomic_sub_ret(i, v)
5366 #define atomic64_sub_return(i, v) atomic64_sub_ret(i, v)
5367
5368 #define atomic_add_return(i, v) atomic_add_ret(i, v)
5369+static inline int atomic_add_return_unchecked(int i, atomic_unchecked_t *v)
5370+{
5371+ return atomic_add_ret_unchecked(i, v);
5372+}
5373 #define atomic64_add_return(i, v) atomic64_add_ret(i, v)
5374+static inline long atomic64_add_return_unchecked(long i, atomic64_unchecked_t *v)
5375+{
5376+ return atomic64_add_ret_unchecked(i, v);
5377+}
5378
5379 /*
5380 * atomic_inc_and_test - increment and test
5381@@ -50,6 +88,10 @@ extern long atomic64_sub_ret(long, atomic64_t *);
5382 * other cases.
5383 */
5384 #define atomic_inc_and_test(v) (atomic_inc_return(v) == 0)
5385+static inline int atomic_inc_and_test_unchecked(atomic_unchecked_t *v)
5386+{
5387+ return atomic_inc_return_unchecked(v) == 0;
5388+}
5389 #define atomic64_inc_and_test(v) (atomic64_inc_return(v) == 0)
5390
5391 #define atomic_sub_and_test(i, v) (atomic_sub_ret(i, v) == 0)
5392@@ -59,30 +101,65 @@ extern long atomic64_sub_ret(long, atomic64_t *);
5393 #define atomic64_dec_and_test(v) (atomic64_sub_ret(1, v) == 0)
5394
5395 #define atomic_inc(v) atomic_add(1, v)
5396+static inline void atomic_inc_unchecked(atomic_unchecked_t *v)
5397+{
5398+ atomic_add_unchecked(1, v);
5399+}
5400 #define atomic64_inc(v) atomic64_add(1, v)
5401+static inline void atomic64_inc_unchecked(atomic64_unchecked_t *v)
5402+{
5403+ atomic64_add_unchecked(1, v);
5404+}
5405
5406 #define atomic_dec(v) atomic_sub(1, v)
5407+static inline void atomic_dec_unchecked(atomic_unchecked_t *v)
5408+{
5409+ atomic_sub_unchecked(1, v);
5410+}
5411 #define atomic64_dec(v) atomic64_sub(1, v)
5412+static inline void atomic64_dec_unchecked(atomic64_unchecked_t *v)
5413+{
5414+ atomic64_sub_unchecked(1, v);
5415+}
5416
5417 #define atomic_add_negative(i, v) (atomic_add_ret(i, v) < 0)
5418 #define atomic64_add_negative(i, v) (atomic64_add_ret(i, v) < 0)
5419
5420 #define atomic_cmpxchg(v, o, n) (cmpxchg(&((v)->counter), (o), (n)))
5421+static inline int atomic_cmpxchg_unchecked(atomic_unchecked_t *v, int old, int new)
5422+{
5423+ return cmpxchg(&v->counter, old, new);
5424+}
5425 #define atomic_xchg(v, new) (xchg(&((v)->counter), new))
5426+static inline int atomic_xchg_unchecked(atomic_unchecked_t *v, int new)
5427+{
5428+ return xchg(&v->counter, new);
5429+}
5430
5431 static inline int atomic_add_unless(atomic_t *v, int a, int u)
5432 {
5433- int c, old;
5434+ int c, old, new;
5435 c = atomic_read(v);
5436 for (;;) {
5437- if (unlikely(c == (u)))
5438+ if (unlikely(c == u))
5439 break;
5440- old = atomic_cmpxchg((v), c, c + (a));
5441+
5442+ asm volatile("addcc %2, %0, %0\n"
5443+
5444+#ifdef CONFIG_PAX_REFCOUNT
5445+ "tvs %%icc, 6\n"
5446+#endif
5447+
5448+ : "=r" (new)
5449+ : "0" (c), "ir" (a)
5450+ : "cc");
5451+
5452+ old = atomic_cmpxchg(v, c, new);
5453 if (likely(old == c))
5454 break;
5455 c = old;
5456 }
5457- return c != (u);
5458+ return c != u;
5459 }
5460
5461 #define atomic_inc_not_zero(v) atomic_add_unless((v), 1, 0)
5462@@ -90,20 +167,35 @@ static inline int atomic_add_unless(atomic_t *v, int a, int u)
5463 #define atomic64_cmpxchg(v, o, n) \
5464 ((__typeof__((v)->counter))cmpxchg(&((v)->counter), (o), (n)))
5465 #define atomic64_xchg(v, new) (xchg(&((v)->counter), new))
5466+static inline long atomic64_xchg_unchecked(atomic64_unchecked_t *v, long new)
5467+{
5468+ return xchg(&v->counter, new);
5469+}
5470
5471 static inline long atomic64_add_unless(atomic64_t *v, long a, long u)
5472 {
5473- long c, old;
5474+ long c, old, new;
5475 c = atomic64_read(v);
5476 for (;;) {
5477- if (unlikely(c == (u)))
5478+ if (unlikely(c == u))
5479 break;
5480- old = atomic64_cmpxchg((v), c, c + (a));
5481+
5482+ asm volatile("addcc %2, %0, %0\n"
5483+
5484+#ifdef CONFIG_PAX_REFCOUNT
5485+ "tvs %%xcc, 6\n"
5486+#endif
5487+
5488+ : "=r" (new)
5489+ : "0" (c), "ir" (a)
5490+ : "cc");
5491+
5492+ old = atomic64_cmpxchg(v, c, new);
5493 if (likely(old == c))
5494 break;
5495 c = old;
5496 }
5497- return c != (u);
5498+ return c != u;
5499 }
5500
5501 #define atomic64_inc_not_zero(v) atomic64_add_unless((v), 1, 0)
5502diff --git a/arch/sparc/include/asm/cache.h b/arch/sparc/include/asm/cache.h
5503index 41f85ae..73b80b5 100644
5504--- a/arch/sparc/include/asm/cache.h
5505+++ b/arch/sparc/include/asm/cache.h
5506@@ -7,8 +7,10 @@
5507 #ifndef _SPARC_CACHE_H
5508 #define _SPARC_CACHE_H
5509
5510+#include <linux/const.h>
5511+
5512 #define L1_CACHE_SHIFT 5
5513-#define L1_CACHE_BYTES 32
5514+#define L1_CACHE_BYTES (_AC(1,UL) << L1_CACHE_SHIFT)
5515 #define L1_CACHE_ALIGN(x) ((((x)+(L1_CACHE_BYTES-1))&~(L1_CACHE_BYTES-1)))
5516
5517 #ifdef CONFIG_SPARC32
5518diff --git a/arch/sparc/include/asm/dma-mapping.h b/arch/sparc/include/asm/dma-mapping.h
5519index 5a8c308..38def92 100644
5520--- a/arch/sparc/include/asm/dma-mapping.h
5521+++ b/arch/sparc/include/asm/dma-mapping.h
5522@@ -14,10 +14,10 @@ extern int dma_set_mask(struct device *dev, u64 dma_mask);
5523 #define dma_free_noncoherent(d, s, v, h) dma_free_coherent(d, s, v, h)
5524 #define dma_is_consistent(d, h) (1)
5525
5526-extern struct dma_map_ops *dma_ops, pci32_dma_ops;
5527+extern const struct dma_map_ops *dma_ops, pci32_dma_ops;
5528 extern struct bus_type pci_bus_type;
5529
5530-static inline struct dma_map_ops *get_dma_ops(struct device *dev)
5531+static inline const struct dma_map_ops *get_dma_ops(struct device *dev)
5532 {
5533 #if defined(CONFIG_SPARC32) && defined(CONFIG_PCI)
5534 if (dev->bus == &pci_bus_type)
5535@@ -31,7 +31,7 @@ static inline struct dma_map_ops *get_dma_ops(struct device *dev)
5536 static inline void *dma_alloc_coherent(struct device *dev, size_t size,
5537 dma_addr_t *dma_handle, gfp_t flag)
5538 {
5539- struct dma_map_ops *ops = get_dma_ops(dev);
5540+ const struct dma_map_ops *ops = get_dma_ops(dev);
5541 void *cpu_addr;
5542
5543 cpu_addr = ops->alloc_coherent(dev, size, dma_handle, flag);
5544@@ -42,7 +42,7 @@ static inline void *dma_alloc_coherent(struct device *dev, size_t size,
5545 static inline void dma_free_coherent(struct device *dev, size_t size,
5546 void *cpu_addr, dma_addr_t dma_handle)
5547 {
5548- struct dma_map_ops *ops = get_dma_ops(dev);
5549+ const struct dma_map_ops *ops = get_dma_ops(dev);
5550
5551 debug_dma_free_coherent(dev, size, cpu_addr, dma_handle);
5552 ops->free_coherent(dev, size, cpu_addr, dma_handle);
5553diff --git a/arch/sparc/include/asm/elf_32.h b/arch/sparc/include/asm/elf_32.h
5554index 381a1b5..b97e3ff 100644
5555--- a/arch/sparc/include/asm/elf_32.h
5556+++ b/arch/sparc/include/asm/elf_32.h
5557@@ -116,6 +116,13 @@ typedef struct {
5558
5559 #define ELF_ET_DYN_BASE (TASK_UNMAPPED_BASE)
5560
5561+#ifdef CONFIG_PAX_ASLR
5562+#define PAX_ELF_ET_DYN_BASE 0x10000UL
5563+
5564+#define PAX_DELTA_MMAP_LEN 16
5565+#define PAX_DELTA_STACK_LEN 16
5566+#endif
5567+
5568 /* This yields a mask that user programs can use to figure out what
5569 instruction set this cpu supports. This can NOT be done in userspace
5570 on Sparc. */
5571diff --git a/arch/sparc/include/asm/elf_64.h b/arch/sparc/include/asm/elf_64.h
5572index 9968085..c2106ef 100644
5573--- a/arch/sparc/include/asm/elf_64.h
5574+++ b/arch/sparc/include/asm/elf_64.h
5575@@ -163,6 +163,12 @@ typedef struct {
5576 #define ELF_ET_DYN_BASE 0x0000010000000000UL
5577 #define COMPAT_ELF_ET_DYN_BASE 0x0000000070000000UL
5578
5579+#ifdef CONFIG_PAX_ASLR
5580+#define PAX_ELF_ET_DYN_BASE (test_thread_flag(TIF_32BIT) ? 0x10000UL : 0x100000UL)
5581+
5582+#define PAX_DELTA_MMAP_LEN (test_thread_flag(TIF_32BIT) ? 14 : 28)
5583+#define PAX_DELTA_STACK_LEN (test_thread_flag(TIF_32BIT) ? 15 : 29)
5584+#endif
5585
5586 /* This yields a mask that user programs can use to figure out what
5587 instruction set this cpu supports. */
5588diff --git a/arch/sparc/include/asm/page_32.h b/arch/sparc/include/asm/page_32.h
5589index 156707b..aefa786 100644
5590--- a/arch/sparc/include/asm/page_32.h
5591+++ b/arch/sparc/include/asm/page_32.h
5592@@ -8,6 +8,8 @@
5593 #ifndef _SPARC_PAGE_H
5594 #define _SPARC_PAGE_H
5595
5596+#include <linux/const.h>
5597+
5598 #define PAGE_SHIFT 12
5599
5600 #ifndef __ASSEMBLY__
5601diff --git a/arch/sparc/include/asm/pgtable_32.h b/arch/sparc/include/asm/pgtable_32.h
5602index e0cabe7..efd60f1 100644
5603--- a/arch/sparc/include/asm/pgtable_32.h
5604+++ b/arch/sparc/include/asm/pgtable_32.h
5605@@ -43,6 +43,13 @@ BTFIXUPDEF_SIMM13(user_ptrs_per_pgd)
5606 BTFIXUPDEF_INT(page_none)
5607 BTFIXUPDEF_INT(page_copy)
5608 BTFIXUPDEF_INT(page_readonly)
5609+
5610+#ifdef CONFIG_PAX_PAGEEXEC
5611+BTFIXUPDEF_INT(page_shared_noexec)
5612+BTFIXUPDEF_INT(page_copy_noexec)
5613+BTFIXUPDEF_INT(page_readonly_noexec)
5614+#endif
5615+
5616 BTFIXUPDEF_INT(page_kernel)
5617
5618 #define PMD_SHIFT SUN4C_PMD_SHIFT
5619@@ -64,6 +71,16 @@ extern pgprot_t PAGE_SHARED;
5620 #define PAGE_COPY __pgprot(BTFIXUP_INT(page_copy))
5621 #define PAGE_READONLY __pgprot(BTFIXUP_INT(page_readonly))
5622
5623+#ifdef CONFIG_PAX_PAGEEXEC
5624+extern pgprot_t PAGE_SHARED_NOEXEC;
5625+# define PAGE_COPY_NOEXEC __pgprot(BTFIXUP_INT(page_copy_noexec))
5626+# define PAGE_READONLY_NOEXEC __pgprot(BTFIXUP_INT(page_readonly_noexec))
5627+#else
5628+# define PAGE_SHARED_NOEXEC PAGE_SHARED
5629+# define PAGE_COPY_NOEXEC PAGE_COPY
5630+# define PAGE_READONLY_NOEXEC PAGE_READONLY
5631+#endif
5632+
5633 extern unsigned long page_kernel;
5634
5635 #ifdef MODULE
5636diff --git a/arch/sparc/include/asm/pgtsrmmu.h b/arch/sparc/include/asm/pgtsrmmu.h
5637index 1407c07..7e10231 100644
5638--- a/arch/sparc/include/asm/pgtsrmmu.h
5639+++ b/arch/sparc/include/asm/pgtsrmmu.h
5640@@ -115,6 +115,13 @@
5641 SRMMU_EXEC | SRMMU_REF)
5642 #define SRMMU_PAGE_RDONLY __pgprot(SRMMU_VALID | SRMMU_CACHE | \
5643 SRMMU_EXEC | SRMMU_REF)
5644+
5645+#ifdef CONFIG_PAX_PAGEEXEC
5646+#define SRMMU_PAGE_SHARED_NOEXEC __pgprot(SRMMU_VALID | SRMMU_CACHE | SRMMU_WRITE | SRMMU_REF)
5647+#define SRMMU_PAGE_COPY_NOEXEC __pgprot(SRMMU_VALID | SRMMU_CACHE | SRMMU_REF)
5648+#define SRMMU_PAGE_RDONLY_NOEXEC __pgprot(SRMMU_VALID | SRMMU_CACHE | SRMMU_REF)
5649+#endif
5650+
5651 #define SRMMU_PAGE_KERNEL __pgprot(SRMMU_VALID | SRMMU_CACHE | SRMMU_PRIV | \
5652 SRMMU_DIRTY | SRMMU_REF)
5653
5654diff --git a/arch/sparc/include/asm/spinlock_64.h b/arch/sparc/include/asm/spinlock_64.h
5655index 43e5147..47622a1 100644
5656--- a/arch/sparc/include/asm/spinlock_64.h
5657+++ b/arch/sparc/include/asm/spinlock_64.h
5658@@ -92,14 +92,19 @@ static inline void __raw_spin_lock_flags(raw_spinlock_t *lock, unsigned long fla
5659
5660 /* Multi-reader locks, these are much saner than the 32-bit Sparc ones... */
5661
5662-static void inline arch_read_lock(raw_rwlock_t *lock)
5663+static inline void arch_read_lock(raw_rwlock_t *lock)
5664 {
5665 unsigned long tmp1, tmp2;
5666
5667 __asm__ __volatile__ (
5668 "1: ldsw [%2], %0\n"
5669 " brlz,pn %0, 2f\n"
5670-"4: add %0, 1, %1\n"
5671+"4: addcc %0, 1, %1\n"
5672+
5673+#ifdef CONFIG_PAX_REFCOUNT
5674+" tvs %%icc, 6\n"
5675+#endif
5676+
5677 " cas [%2], %0, %1\n"
5678 " cmp %0, %1\n"
5679 " bne,pn %%icc, 1b\n"
5680@@ -112,10 +117,10 @@ static void inline arch_read_lock(raw_rwlock_t *lock)
5681 " .previous"
5682 : "=&r" (tmp1), "=&r" (tmp2)
5683 : "r" (lock)
5684- : "memory");
5685+ : "memory", "cc");
5686 }
5687
5688-static int inline arch_read_trylock(raw_rwlock_t *lock)
5689+static inline int arch_read_trylock(raw_rwlock_t *lock)
5690 {
5691 int tmp1, tmp2;
5692
5693@@ -123,7 +128,12 @@ static int inline arch_read_trylock(raw_rwlock_t *lock)
5694 "1: ldsw [%2], %0\n"
5695 " brlz,a,pn %0, 2f\n"
5696 " mov 0, %0\n"
5697-" add %0, 1, %1\n"
5698+" addcc %0, 1, %1\n"
5699+
5700+#ifdef CONFIG_PAX_REFCOUNT
5701+" tvs %%icc, 6\n"
5702+#endif
5703+
5704 " cas [%2], %0, %1\n"
5705 " cmp %0, %1\n"
5706 " bne,pn %%icc, 1b\n"
5707@@ -136,13 +146,18 @@ static int inline arch_read_trylock(raw_rwlock_t *lock)
5708 return tmp1;
5709 }
5710
5711-static void inline arch_read_unlock(raw_rwlock_t *lock)
5712+static inline void arch_read_unlock(raw_rwlock_t *lock)
5713 {
5714 unsigned long tmp1, tmp2;
5715
5716 __asm__ __volatile__(
5717 "1: lduw [%2], %0\n"
5718-" sub %0, 1, %1\n"
5719+" subcc %0, 1, %1\n"
5720+
5721+#ifdef CONFIG_PAX_REFCOUNT
5722+" tvs %%icc, 6\n"
5723+#endif
5724+
5725 " cas [%2], %0, %1\n"
5726 " cmp %0, %1\n"
5727 " bne,pn %%xcc, 1b\n"
5728@@ -152,7 +167,7 @@ static void inline arch_read_unlock(raw_rwlock_t *lock)
5729 : "memory");
5730 }
5731
5732-static void inline arch_write_lock(raw_rwlock_t *lock)
5733+static inline void arch_write_lock(raw_rwlock_t *lock)
5734 {
5735 unsigned long mask, tmp1, tmp2;
5736
5737@@ -177,7 +192,7 @@ static void inline arch_write_lock(raw_rwlock_t *lock)
5738 : "memory");
5739 }
5740
5741-static void inline arch_write_unlock(raw_rwlock_t *lock)
5742+static inline void arch_write_unlock(raw_rwlock_t *lock)
5743 {
5744 __asm__ __volatile__(
5745 " stw %%g0, [%0]"
5746@@ -186,7 +201,7 @@ static void inline arch_write_unlock(raw_rwlock_t *lock)
5747 : "memory");
5748 }
5749
5750-static int inline arch_write_trylock(raw_rwlock_t *lock)
5751+static inline int arch_write_trylock(raw_rwlock_t *lock)
5752 {
5753 unsigned long mask, tmp1, tmp2, result;
5754
5755diff --git a/arch/sparc/include/asm/thread_info_32.h b/arch/sparc/include/asm/thread_info_32.h
5756index 844d73a..f787fb9 100644
5757--- a/arch/sparc/include/asm/thread_info_32.h
5758+++ b/arch/sparc/include/asm/thread_info_32.h
5759@@ -50,6 +50,8 @@ struct thread_info {
5760 unsigned long w_saved;
5761
5762 struct restart_block restart_block;
5763+
5764+ unsigned long lowest_stack;
5765 };
5766
5767 /*
5768diff --git a/arch/sparc/include/asm/thread_info_64.h b/arch/sparc/include/asm/thread_info_64.h
5769index f78ad9a..9f55fc7 100644
5770--- a/arch/sparc/include/asm/thread_info_64.h
5771+++ b/arch/sparc/include/asm/thread_info_64.h
5772@@ -68,6 +68,8 @@ struct thread_info {
5773 struct pt_regs *kern_una_regs;
5774 unsigned int kern_una_insn;
5775
5776+ unsigned long lowest_stack;
5777+
5778 unsigned long fpregs[0] __attribute__ ((aligned(64)));
5779 };
5780
5781diff --git a/arch/sparc/include/asm/uaccess.h b/arch/sparc/include/asm/uaccess.h
5782index e88fbe5..96b0ce5 100644
5783--- a/arch/sparc/include/asm/uaccess.h
5784+++ b/arch/sparc/include/asm/uaccess.h
5785@@ -1,5 +1,13 @@
5786 #ifndef ___ASM_SPARC_UACCESS_H
5787 #define ___ASM_SPARC_UACCESS_H
5788+
5789+#ifdef __KERNEL__
5790+#ifndef __ASSEMBLY__
5791+#include <linux/types.h>
5792+extern void check_object_size(const void *ptr, unsigned long n, bool to);
5793+#endif
5794+#endif
5795+
5796 #if defined(__sparc__) && defined(__arch64__)
5797 #include <asm/uaccess_64.h>
5798 #else
5799diff --git a/arch/sparc/include/asm/uaccess_32.h b/arch/sparc/include/asm/uaccess_32.h
5800index 8303ac4..07f333d 100644
5801--- a/arch/sparc/include/asm/uaccess_32.h
5802+++ b/arch/sparc/include/asm/uaccess_32.h
5803@@ -249,27 +249,46 @@ extern unsigned long __copy_user(void __user *to, const void __user *from, unsig
5804
5805 static inline unsigned long copy_to_user(void __user *to, const void *from, unsigned long n)
5806 {
5807- if (n && __access_ok((unsigned long) to, n))
5808+ if ((long)n < 0)
5809+ return n;
5810+
5811+ if (n && __access_ok((unsigned long) to, n)) {
5812+ if (!__builtin_constant_p(n))
5813+ check_object_size(from, n, true);
5814 return __copy_user(to, (__force void __user *) from, n);
5815- else
5816+ } else
5817 return n;
5818 }
5819
5820 static inline unsigned long __copy_to_user(void __user *to, const void *from, unsigned long n)
5821 {
5822+ if ((long)n < 0)
5823+ return n;
5824+
5825+ if (!__builtin_constant_p(n))
5826+ check_object_size(from, n, true);
5827+
5828 return __copy_user(to, (__force void __user *) from, n);
5829 }
5830
5831 static inline unsigned long copy_from_user(void *to, const void __user *from, unsigned long n)
5832 {
5833- if (n && __access_ok((unsigned long) from, n))
5834+ if ((long)n < 0)
5835+ return n;
5836+
5837+ if (n && __access_ok((unsigned long) from, n)) {
5838+ if (!__builtin_constant_p(n))
5839+ check_object_size(to, n, false);
5840 return __copy_user((__force void __user *) to, from, n);
5841- else
5842+ } else
5843 return n;
5844 }
5845
5846 static inline unsigned long __copy_from_user(void *to, const void __user *from, unsigned long n)
5847 {
5848+ if ((long)n < 0)
5849+ return n;
5850+
5851 return __copy_user((__force void __user *) to, from, n);
5852 }
5853
5854diff --git a/arch/sparc/include/asm/uaccess_64.h b/arch/sparc/include/asm/uaccess_64.h
5855index 9ea271e..7b8a271 100644
5856--- a/arch/sparc/include/asm/uaccess_64.h
5857+++ b/arch/sparc/include/asm/uaccess_64.h
5858@@ -9,6 +9,7 @@
5859 #include <linux/compiler.h>
5860 #include <linux/string.h>
5861 #include <linux/thread_info.h>
5862+#include <linux/kernel.h>
5863 #include <asm/asi.h>
5864 #include <asm/system.h>
5865 #include <asm/spitfire.h>
5866@@ -212,8 +213,15 @@ extern unsigned long copy_from_user_fixup(void *to, const void __user *from,
5867 static inline unsigned long __must_check
5868 copy_from_user(void *to, const void __user *from, unsigned long size)
5869 {
5870- unsigned long ret = ___copy_from_user(to, from, size);
5871+ unsigned long ret;
5872
5873+ if ((long)size < 0 || size > INT_MAX)
5874+ return size;
5875+
5876+ if (!__builtin_constant_p(size))
5877+ check_object_size(to, size, false);
5878+
5879+ ret = ___copy_from_user(to, from, size);
5880 if (unlikely(ret))
5881 ret = copy_from_user_fixup(to, from, size);
5882 return ret;
5883@@ -228,8 +236,15 @@ extern unsigned long copy_to_user_fixup(void __user *to, const void *from,
5884 static inline unsigned long __must_check
5885 copy_to_user(void __user *to, const void *from, unsigned long size)
5886 {
5887- unsigned long ret = ___copy_to_user(to, from, size);
5888+ unsigned long ret;
5889
5890+ if ((long)size < 0 || size > INT_MAX)
5891+ return size;
5892+
5893+ if (!__builtin_constant_p(size))
5894+ check_object_size(from, size, true);
5895+
5896+ ret = ___copy_to_user(to, from, size);
5897 if (unlikely(ret))
5898 ret = copy_to_user_fixup(to, from, size);
5899 return ret;
5900diff --git a/arch/sparc/kernel/Makefile b/arch/sparc/kernel/Makefile
5901index 2782681..77ded84 100644
5902--- a/arch/sparc/kernel/Makefile
5903+++ b/arch/sparc/kernel/Makefile
5904@@ -3,7 +3,7 @@
5905 #
5906
5907 asflags-y := -ansi
5908-ccflags-y := -Werror
5909+#ccflags-y := -Werror
5910
5911 extra-y := head_$(BITS).o
5912 extra-y += init_task.o
5913diff --git a/arch/sparc/kernel/iommu.c b/arch/sparc/kernel/iommu.c
5914index 7690cc2..ece64c9 100644
5915--- a/arch/sparc/kernel/iommu.c
5916+++ b/arch/sparc/kernel/iommu.c
5917@@ -826,7 +826,7 @@ static void dma_4u_sync_sg_for_cpu(struct device *dev,
5918 spin_unlock_irqrestore(&iommu->lock, flags);
5919 }
5920
5921-static struct dma_map_ops sun4u_dma_ops = {
5922+static const struct dma_map_ops sun4u_dma_ops = {
5923 .alloc_coherent = dma_4u_alloc_coherent,
5924 .free_coherent = dma_4u_free_coherent,
5925 .map_page = dma_4u_map_page,
5926@@ -837,7 +837,7 @@ static struct dma_map_ops sun4u_dma_ops = {
5927 .sync_sg_for_cpu = dma_4u_sync_sg_for_cpu,
5928 };
5929
5930-struct dma_map_ops *dma_ops = &sun4u_dma_ops;
5931+const struct dma_map_ops *dma_ops = &sun4u_dma_ops;
5932 EXPORT_SYMBOL(dma_ops);
5933
5934 extern int pci64_dma_supported(struct pci_dev *pdev, u64 device_mask);
5935diff --git a/arch/sparc/kernel/ioport.c b/arch/sparc/kernel/ioport.c
5936index 9f61fd8..bd048db 100644
5937--- a/arch/sparc/kernel/ioport.c
5938+++ b/arch/sparc/kernel/ioport.c
5939@@ -392,7 +392,7 @@ static void sbus_sync_sg_for_device(struct device *dev, struct scatterlist *sg,
5940 BUG();
5941 }
5942
5943-struct dma_map_ops sbus_dma_ops = {
5944+const struct dma_map_ops sbus_dma_ops = {
5945 .alloc_coherent = sbus_alloc_coherent,
5946 .free_coherent = sbus_free_coherent,
5947 .map_page = sbus_map_page,
5948@@ -403,7 +403,7 @@ struct dma_map_ops sbus_dma_ops = {
5949 .sync_sg_for_device = sbus_sync_sg_for_device,
5950 };
5951
5952-struct dma_map_ops *dma_ops = &sbus_dma_ops;
5953+const struct dma_map_ops *dma_ops = &sbus_dma_ops;
5954 EXPORT_SYMBOL(dma_ops);
5955
5956 static int __init sparc_register_ioport(void)
5957@@ -640,7 +640,7 @@ static void pci32_sync_sg_for_device(struct device *device, struct scatterlist *
5958 }
5959 }
5960
5961-struct dma_map_ops pci32_dma_ops = {
5962+const struct dma_map_ops pci32_dma_ops = {
5963 .alloc_coherent = pci32_alloc_coherent,
5964 .free_coherent = pci32_free_coherent,
5965 .map_page = pci32_map_page,
5966diff --git a/arch/sparc/kernel/kgdb_32.c b/arch/sparc/kernel/kgdb_32.c
5967index 04df4ed..55c4b6e 100644
5968--- a/arch/sparc/kernel/kgdb_32.c
5969+++ b/arch/sparc/kernel/kgdb_32.c
5970@@ -158,7 +158,7 @@ void kgdb_arch_exit(void)
5971 {
5972 }
5973
5974-struct kgdb_arch arch_kgdb_ops = {
5975+const struct kgdb_arch arch_kgdb_ops = {
5976 /* Breakpoint instruction: ta 0x7d */
5977 .gdb_bpt_instr = { 0x91, 0xd0, 0x20, 0x7d },
5978 };
5979diff --git a/arch/sparc/kernel/kgdb_64.c b/arch/sparc/kernel/kgdb_64.c
5980index f5a0fd4..d886f71 100644
5981--- a/arch/sparc/kernel/kgdb_64.c
5982+++ b/arch/sparc/kernel/kgdb_64.c
5983@@ -180,7 +180,7 @@ void kgdb_arch_exit(void)
5984 {
5985 }
5986
5987-struct kgdb_arch arch_kgdb_ops = {
5988+const struct kgdb_arch arch_kgdb_ops = {
5989 /* Breakpoint instruction: ta 0x72 */
5990 .gdb_bpt_instr = { 0x91, 0xd0, 0x20, 0x72 },
5991 };
5992diff --git a/arch/sparc/kernel/pci_sun4v.c b/arch/sparc/kernel/pci_sun4v.c
5993index 23c33ff..d137fbd 100644
5994--- a/arch/sparc/kernel/pci_sun4v.c
5995+++ b/arch/sparc/kernel/pci_sun4v.c
5996@@ -525,7 +525,7 @@ static void dma_4v_unmap_sg(struct device *dev, struct scatterlist *sglist,
5997 spin_unlock_irqrestore(&iommu->lock, flags);
5998 }
5999
6000-static struct dma_map_ops sun4v_dma_ops = {
6001+static const struct dma_map_ops sun4v_dma_ops = {
6002 .alloc_coherent = dma_4v_alloc_coherent,
6003 .free_coherent = dma_4v_free_coherent,
6004 .map_page = dma_4v_map_page,
6005diff --git a/arch/sparc/kernel/process_32.c b/arch/sparc/kernel/process_32.c
6006index c49865b..b41a81b 100644
6007--- a/arch/sparc/kernel/process_32.c
6008+++ b/arch/sparc/kernel/process_32.c
6009@@ -196,7 +196,7 @@ void __show_backtrace(unsigned long fp)
6010 rw->ins[4], rw->ins[5],
6011 rw->ins[6],
6012 rw->ins[7]);
6013- printk("%pS\n", (void *) rw->ins[7]);
6014+ printk("%pA\n", (void *) rw->ins[7]);
6015 rw = (struct reg_window32 *) rw->ins[6];
6016 }
6017 spin_unlock_irqrestore(&sparc_backtrace_lock, flags);
6018@@ -263,14 +263,14 @@ void show_regs(struct pt_regs *r)
6019
6020 printk("PSR: %08lx PC: %08lx NPC: %08lx Y: %08lx %s\n",
6021 r->psr, r->pc, r->npc, r->y, print_tainted());
6022- printk("PC: <%pS>\n", (void *) r->pc);
6023+ printk("PC: <%pA>\n", (void *) r->pc);
6024 printk("%%G: %08lx %08lx %08lx %08lx %08lx %08lx %08lx %08lx\n",
6025 r->u_regs[0], r->u_regs[1], r->u_regs[2], r->u_regs[3],
6026 r->u_regs[4], r->u_regs[5], r->u_regs[6], r->u_regs[7]);
6027 printk("%%O: %08lx %08lx %08lx %08lx %08lx %08lx %08lx %08lx\n",
6028 r->u_regs[8], r->u_regs[9], r->u_regs[10], r->u_regs[11],
6029 r->u_regs[12], r->u_regs[13], r->u_regs[14], r->u_regs[15]);
6030- printk("RPC: <%pS>\n", (void *) r->u_regs[15]);
6031+ printk("RPC: <%pA>\n", (void *) r->u_regs[15]);
6032
6033 printk("%%L: %08lx %08lx %08lx %08lx %08lx %08lx %08lx %08lx\n",
6034 rw->locals[0], rw->locals[1], rw->locals[2], rw->locals[3],
6035@@ -305,7 +305,7 @@ void show_stack(struct task_struct *tsk, unsigned long *_ksp)
6036 rw = (struct reg_window32 *) fp;
6037 pc = rw->ins[7];
6038 printk("[%08lx : ", pc);
6039- printk("%pS ] ", (void *) pc);
6040+ printk("%pA ] ", (void *) pc);
6041 fp = rw->ins[6];
6042 } while (++count < 16);
6043 printk("\n");
6044diff --git a/arch/sparc/kernel/process_64.c b/arch/sparc/kernel/process_64.c
6045index cb70476..3d0c191 100644
6046--- a/arch/sparc/kernel/process_64.c
6047+++ b/arch/sparc/kernel/process_64.c
6048@@ -180,14 +180,14 @@ static void show_regwindow(struct pt_regs *regs)
6049 printk("i4: %016lx i5: %016lx i6: %016lx i7: %016lx\n",
6050 rwk->ins[4], rwk->ins[5], rwk->ins[6], rwk->ins[7]);
6051 if (regs->tstate & TSTATE_PRIV)
6052- printk("I7: <%pS>\n", (void *) rwk->ins[7]);
6053+ printk("I7: <%pA>\n", (void *) rwk->ins[7]);
6054 }
6055
6056 void show_regs(struct pt_regs *regs)
6057 {
6058 printk("TSTATE: %016lx TPC: %016lx TNPC: %016lx Y: %08x %s\n", regs->tstate,
6059 regs->tpc, regs->tnpc, regs->y, print_tainted());
6060- printk("TPC: <%pS>\n", (void *) regs->tpc);
6061+ printk("TPC: <%pA>\n", (void *) regs->tpc);
6062 printk("g0: %016lx g1: %016lx g2: %016lx g3: %016lx\n",
6063 regs->u_regs[0], regs->u_regs[1], regs->u_regs[2],
6064 regs->u_regs[3]);
6065@@ -200,7 +200,7 @@ void show_regs(struct pt_regs *regs)
6066 printk("o4: %016lx o5: %016lx sp: %016lx ret_pc: %016lx\n",
6067 regs->u_regs[12], regs->u_regs[13], regs->u_regs[14],
6068 regs->u_regs[15]);
6069- printk("RPC: <%pS>\n", (void *) regs->u_regs[15]);
6070+ printk("RPC: <%pA>\n", (void *) regs->u_regs[15]);
6071 show_regwindow(regs);
6072 }
6073
6074@@ -284,7 +284,7 @@ void arch_trigger_all_cpu_backtrace(void)
6075 ((tp && tp->task) ? tp->task->pid : -1));
6076
6077 if (gp->tstate & TSTATE_PRIV) {
6078- printk(" TPC[%pS] O7[%pS] I7[%pS] RPC[%pS]\n",
6079+ printk(" TPC[%pA] O7[%pA] I7[%pA] RPC[%pA]\n",
6080 (void *) gp->tpc,
6081 (void *) gp->o7,
6082 (void *) gp->i7,
6083diff --git a/arch/sparc/kernel/sigutil_64.c b/arch/sparc/kernel/sigutil_64.c
6084index 6edc4e5..06a69b4 100644
6085--- a/arch/sparc/kernel/sigutil_64.c
6086+++ b/arch/sparc/kernel/sigutil_64.c
6087@@ -2,6 +2,7 @@
6088 #include <linux/types.h>
6089 #include <linux/thread_info.h>
6090 #include <linux/uaccess.h>
6091+#include <linux/errno.h>
6092
6093 #include <asm/sigcontext.h>
6094 #include <asm/fpumacro.h>
6095diff --git a/arch/sparc/kernel/sys_sparc_32.c b/arch/sparc/kernel/sys_sparc_32.c
6096index 3a82e65..ce0a53a 100644
6097--- a/arch/sparc/kernel/sys_sparc_32.c
6098+++ b/arch/sparc/kernel/sys_sparc_32.c
6099@@ -57,7 +57,7 @@ unsigned long arch_get_unmapped_area(struct file *filp, unsigned long addr, unsi
6100 if (ARCH_SUN4C && len > 0x20000000)
6101 return -ENOMEM;
6102 if (!addr)
6103- addr = TASK_UNMAPPED_BASE;
6104+ addr = current->mm->mmap_base;
6105
6106 if (flags & MAP_SHARED)
6107 addr = COLOUR_ALIGN(addr);
6108@@ -72,7 +72,7 @@ unsigned long arch_get_unmapped_area(struct file *filp, unsigned long addr, unsi
6109 }
6110 if (TASK_SIZE - PAGE_SIZE - len < addr)
6111 return -ENOMEM;
6112- if (!vmm || addr + len <= vmm->vm_start)
6113+ if (check_heap_stack_gap(vmm, addr, len))
6114 return addr;
6115 addr = vmm->vm_end;
6116 if (flags & MAP_SHARED)
6117diff --git a/arch/sparc/kernel/sys_sparc_64.c b/arch/sparc/kernel/sys_sparc_64.c
6118index cfa0e19..98972ac 100644
6119--- a/arch/sparc/kernel/sys_sparc_64.c
6120+++ b/arch/sparc/kernel/sys_sparc_64.c
6121@@ -125,7 +125,7 @@ unsigned long arch_get_unmapped_area(struct file *filp, unsigned long addr, unsi
6122 /* We do not accept a shared mapping if it would violate
6123 * cache aliasing constraints.
6124 */
6125- if ((flags & MAP_SHARED) &&
6126+ if ((filp || (flags & MAP_SHARED)) &&
6127 ((addr - (pgoff << PAGE_SHIFT)) & (SHMLBA - 1)))
6128 return -EINVAL;
6129 return addr;
6130@@ -140,6 +140,10 @@ unsigned long arch_get_unmapped_area(struct file *filp, unsigned long addr, unsi
6131 if (filp || (flags & MAP_SHARED))
6132 do_color_align = 1;
6133
6134+#ifdef CONFIG_PAX_RANDMMAP
6135+ if (!(mm->pax_flags & MF_PAX_RANDMMAP))
6136+#endif
6137+
6138 if (addr) {
6139 if (do_color_align)
6140 addr = COLOUR_ALIGN(addr, pgoff);
6141@@ -147,15 +151,14 @@ unsigned long arch_get_unmapped_area(struct file *filp, unsigned long addr, unsi
6142 addr = PAGE_ALIGN(addr);
6143
6144 vma = find_vma(mm, addr);
6145- if (task_size - len >= addr &&
6146- (!vma || addr + len <= vma->vm_start))
6147+ if (task_size - len >= addr && check_heap_stack_gap(vma, addr, len))
6148 return addr;
6149 }
6150
6151 if (len > mm->cached_hole_size) {
6152- start_addr = addr = mm->free_area_cache;
6153+ start_addr = addr = mm->free_area_cache;
6154 } else {
6155- start_addr = addr = TASK_UNMAPPED_BASE;
6156+ start_addr = addr = mm->mmap_base;
6157 mm->cached_hole_size = 0;
6158 }
6159
6160@@ -175,14 +178,14 @@ full_search:
6161 vma = find_vma(mm, VA_EXCLUDE_END);
6162 }
6163 if (unlikely(task_size < addr)) {
6164- if (start_addr != TASK_UNMAPPED_BASE) {
6165- start_addr = addr = TASK_UNMAPPED_BASE;
6166+ if (start_addr != mm->mmap_base) {
6167+ start_addr = addr = mm->mmap_base;
6168 mm->cached_hole_size = 0;
6169 goto full_search;
6170 }
6171 return -ENOMEM;
6172 }
6173- if (likely(!vma || addr + len <= vma->vm_start)) {
6174+ if (likely(check_heap_stack_gap(vma, addr, len))) {
6175 /*
6176 * Remember the place where we stopped the search:
6177 */
6178@@ -216,7 +219,7 @@ arch_get_unmapped_area_topdown(struct file *filp, const unsigned long addr0,
6179 /* We do not accept a shared mapping if it would violate
6180 * cache aliasing constraints.
6181 */
6182- if ((flags & MAP_SHARED) &&
6183+ if ((filp || (flags & MAP_SHARED)) &&
6184 ((addr - (pgoff << PAGE_SHIFT)) & (SHMLBA - 1)))
6185 return -EINVAL;
6186 return addr;
6187@@ -237,8 +240,7 @@ arch_get_unmapped_area_topdown(struct file *filp, const unsigned long addr0,
6188 addr = PAGE_ALIGN(addr);
6189
6190 vma = find_vma(mm, addr);
6191- if (task_size - len >= addr &&
6192- (!vma || addr + len <= vma->vm_start))
6193+ if (task_size - len >= addr && check_heap_stack_gap(vma, addr, len))
6194 return addr;
6195 }
6196
6197@@ -259,7 +261,7 @@ arch_get_unmapped_area_topdown(struct file *filp, const unsigned long addr0,
6198 /* make sure it can fit in the remaining address space */
6199 if (likely(addr > len)) {
6200 vma = find_vma(mm, addr-len);
6201- if (!vma || addr <= vma->vm_start) {
6202+ if (check_heap_stack_gap(vma, addr - len, len)) {
6203 /* remember the address as a hint for next time */
6204 return (mm->free_area_cache = addr-len);
6205 }
6206@@ -268,18 +270,18 @@ arch_get_unmapped_area_topdown(struct file *filp, const unsigned long addr0,
6207 if (unlikely(mm->mmap_base < len))
6208 goto bottomup;
6209
6210- addr = mm->mmap_base-len;
6211- if (do_color_align)
6212- addr = COLOUR_ALIGN_DOWN(addr, pgoff);
6213+ addr = mm->mmap_base - len;
6214
6215 do {
6216+ if (do_color_align)
6217+ addr = COLOUR_ALIGN_DOWN(addr, pgoff);
6218 /*
6219 * Lookup failure means no vma is above this address,
6220 * else if new region fits below vma->vm_start,
6221 * return with success:
6222 */
6223 vma = find_vma(mm, addr);
6224- if (likely(!vma || addr+len <= vma->vm_start)) {
6225+ if (likely(check_heap_stack_gap(vma, addr, len))) {
6226 /* remember the address as a hint for next time */
6227 return (mm->free_area_cache = addr);
6228 }
6229@@ -289,10 +291,8 @@ arch_get_unmapped_area_topdown(struct file *filp, const unsigned long addr0,
6230 mm->cached_hole_size = vma->vm_start - addr;
6231
6232 /* try just below the current vma->vm_start */
6233- addr = vma->vm_start-len;
6234- if (do_color_align)
6235- addr = COLOUR_ALIGN_DOWN(addr, pgoff);
6236- } while (likely(len < vma->vm_start));
6237+ addr = skip_heap_stack_gap(vma, len);
6238+ } while (!IS_ERR_VALUE(addr));
6239
6240 bottomup:
6241 /*
6242@@ -384,6 +384,12 @@ void arch_pick_mmap_layout(struct mm_struct *mm)
6243 current->signal->rlim[RLIMIT_STACK].rlim_cur == RLIM_INFINITY ||
6244 sysctl_legacy_va_layout) {
6245 mm->mmap_base = TASK_UNMAPPED_BASE + random_factor;
6246+
6247+#ifdef CONFIG_PAX_RANDMMAP
6248+ if (mm->pax_flags & MF_PAX_RANDMMAP)
6249+ mm->mmap_base += mm->delta_mmap;
6250+#endif
6251+
6252 mm->get_unmapped_area = arch_get_unmapped_area;
6253 mm->unmap_area = arch_unmap_area;
6254 } else {
6255@@ -398,6 +404,12 @@ void arch_pick_mmap_layout(struct mm_struct *mm)
6256 gap = (task_size / 6 * 5);
6257
6258 mm->mmap_base = PAGE_ALIGN(task_size - gap - random_factor);
6259+
6260+#ifdef CONFIG_PAX_RANDMMAP
6261+ if (mm->pax_flags & MF_PAX_RANDMMAP)
6262+ mm->mmap_base -= mm->delta_mmap + mm->delta_stack;
6263+#endif
6264+
6265 mm->get_unmapped_area = arch_get_unmapped_area_topdown;
6266 mm->unmap_area = arch_unmap_area_topdown;
6267 }
6268diff --git a/arch/sparc/kernel/traps_32.c b/arch/sparc/kernel/traps_32.c
6269index c0490c7..84959d1 100644
6270--- a/arch/sparc/kernel/traps_32.c
6271+++ b/arch/sparc/kernel/traps_32.c
6272@@ -44,6 +44,8 @@ static void instruction_dump(unsigned long *pc)
6273 #define __SAVE __asm__ __volatile__("save %sp, -0x40, %sp\n\t")
6274 #define __RESTORE __asm__ __volatile__("restore %g0, %g0, %g0\n\t")
6275
6276+extern void gr_handle_kernel_exploit(void);
6277+
6278 void die_if_kernel(char *str, struct pt_regs *regs)
6279 {
6280 static int die_counter;
6281@@ -76,15 +78,17 @@ void die_if_kernel(char *str, struct pt_regs *regs)
6282 count++ < 30 &&
6283 (((unsigned long) rw) >= PAGE_OFFSET) &&
6284 !(((unsigned long) rw) & 0x7)) {
6285- printk("Caller[%08lx]: %pS\n", rw->ins[7],
6286+ printk("Caller[%08lx]: %pA\n", rw->ins[7],
6287 (void *) rw->ins[7]);
6288 rw = (struct reg_window32 *)rw->ins[6];
6289 }
6290 }
6291 printk("Instruction DUMP:");
6292 instruction_dump ((unsigned long *) regs->pc);
6293- if(regs->psr & PSR_PS)
6294+ if(regs->psr & PSR_PS) {
6295+ gr_handle_kernel_exploit();
6296 do_exit(SIGKILL);
6297+ }
6298 do_exit(SIGSEGV);
6299 }
6300
6301diff --git a/arch/sparc/kernel/traps_64.c b/arch/sparc/kernel/traps_64.c
6302index 10f7bb9..cdb6793 100644
6303--- a/arch/sparc/kernel/traps_64.c
6304+++ b/arch/sparc/kernel/traps_64.c
6305@@ -73,7 +73,7 @@ static void dump_tl1_traplog(struct tl1_traplog *p)
6306 i + 1,
6307 p->trapstack[i].tstate, p->trapstack[i].tpc,
6308 p->trapstack[i].tnpc, p->trapstack[i].tt);
6309- printk("TRAPLOG: TPC<%pS>\n", (void *) p->trapstack[i].tpc);
6310+ printk("TRAPLOG: TPC<%pA>\n", (void *) p->trapstack[i].tpc);
6311 }
6312 }
6313
6314@@ -93,6 +93,12 @@ void bad_trap(struct pt_regs *regs, long lvl)
6315
6316 lvl -= 0x100;
6317 if (regs->tstate & TSTATE_PRIV) {
6318+
6319+#ifdef CONFIG_PAX_REFCOUNT
6320+ if (lvl == 6)
6321+ pax_report_refcount_overflow(regs);
6322+#endif
6323+
6324 sprintf(buffer, "Kernel bad sw trap %lx", lvl);
6325 die_if_kernel(buffer, regs);
6326 }
6327@@ -111,11 +117,16 @@ void bad_trap(struct pt_regs *regs, long lvl)
6328 void bad_trap_tl1(struct pt_regs *regs, long lvl)
6329 {
6330 char buffer[32];
6331-
6332+
6333 if (notify_die(DIE_TRAP_TL1, "bad trap tl1", regs,
6334 0, lvl, SIGTRAP) == NOTIFY_STOP)
6335 return;
6336
6337+#ifdef CONFIG_PAX_REFCOUNT
6338+ if (lvl == 6)
6339+ pax_report_refcount_overflow(regs);
6340+#endif
6341+
6342 dump_tl1_traplog((struct tl1_traplog *)(regs + 1));
6343
6344 sprintf (buffer, "Bad trap %lx at tl>0", lvl);
6345@@ -1139,7 +1150,7 @@ static void cheetah_log_errors(struct pt_regs *regs, struct cheetah_err_info *in
6346 regs->tpc, regs->tnpc, regs->u_regs[UREG_I7], regs->tstate);
6347 printk("%s" "ERROR(%d): ",
6348 (recoverable ? KERN_WARNING : KERN_CRIT), smp_processor_id());
6349- printk("TPC<%pS>\n", (void *) regs->tpc);
6350+ printk("TPC<%pA>\n", (void *) regs->tpc);
6351 printk("%s" "ERROR(%d): M_SYND(%lx), E_SYND(%lx)%s%s\n",
6352 (recoverable ? KERN_WARNING : KERN_CRIT), smp_processor_id(),
6353 (afsr & CHAFSR_M_SYNDROME) >> CHAFSR_M_SYNDROME_SHIFT,
6354@@ -1746,7 +1757,7 @@ void cheetah_plus_parity_error(int type, struct pt_regs *regs)
6355 smp_processor_id(),
6356 (type & 0x1) ? 'I' : 'D',
6357 regs->tpc);
6358- printk(KERN_EMERG "TPC<%pS>\n", (void *) regs->tpc);
6359+ printk(KERN_EMERG "TPC<%pA>\n", (void *) regs->tpc);
6360 panic("Irrecoverable Cheetah+ parity error.");
6361 }
6362
6363@@ -1754,7 +1765,7 @@ void cheetah_plus_parity_error(int type, struct pt_regs *regs)
6364 smp_processor_id(),
6365 (type & 0x1) ? 'I' : 'D',
6366 regs->tpc);
6367- printk(KERN_WARNING "TPC<%pS>\n", (void *) regs->tpc);
6368+ printk(KERN_WARNING "TPC<%pA>\n", (void *) regs->tpc);
6369 }
6370
6371 struct sun4v_error_entry {
6372@@ -1961,9 +1972,9 @@ void sun4v_itlb_error_report(struct pt_regs *regs, int tl)
6373
6374 printk(KERN_EMERG "SUN4V-ITLB: Error at TPC[%lx], tl %d\n",
6375 regs->tpc, tl);
6376- printk(KERN_EMERG "SUN4V-ITLB: TPC<%pS>\n", (void *) regs->tpc);
6377+ printk(KERN_EMERG "SUN4V-ITLB: TPC<%pA>\n", (void *) regs->tpc);
6378 printk(KERN_EMERG "SUN4V-ITLB: O7[%lx]\n", regs->u_regs[UREG_I7]);
6379- printk(KERN_EMERG "SUN4V-ITLB: O7<%pS>\n",
6380+ printk(KERN_EMERG "SUN4V-ITLB: O7<%pA>\n",
6381 (void *) regs->u_regs[UREG_I7]);
6382 printk(KERN_EMERG "SUN4V-ITLB: vaddr[%lx] ctx[%lx] "
6383 "pte[%lx] error[%lx]\n",
6384@@ -1985,9 +1996,9 @@ void sun4v_dtlb_error_report(struct pt_regs *regs, int tl)
6385
6386 printk(KERN_EMERG "SUN4V-DTLB: Error at TPC[%lx], tl %d\n",
6387 regs->tpc, tl);
6388- printk(KERN_EMERG "SUN4V-DTLB: TPC<%pS>\n", (void *) regs->tpc);
6389+ printk(KERN_EMERG "SUN4V-DTLB: TPC<%pA>\n", (void *) regs->tpc);
6390 printk(KERN_EMERG "SUN4V-DTLB: O7[%lx]\n", regs->u_regs[UREG_I7]);
6391- printk(KERN_EMERG "SUN4V-DTLB: O7<%pS>\n",
6392+ printk(KERN_EMERG "SUN4V-DTLB: O7<%pA>\n",
6393 (void *) regs->u_regs[UREG_I7]);
6394 printk(KERN_EMERG "SUN4V-DTLB: vaddr[%lx] ctx[%lx] "
6395 "pte[%lx] error[%lx]\n",
6396@@ -2191,7 +2202,7 @@ void show_stack(struct task_struct *tsk, unsigned long *_ksp)
6397 fp = (unsigned long)sf->fp + STACK_BIAS;
6398 }
6399
6400- printk(" [%016lx] %pS\n", pc, (void *) pc);
6401+ printk(" [%016lx] %pA\n", pc, (void *) pc);
6402 } while (++count < 16);
6403 }
6404
6405@@ -2233,6 +2244,8 @@ static inline struct reg_window *kernel_stack_up(struct reg_window *rw)
6406 return (struct reg_window *) (fp + STACK_BIAS);
6407 }
6408
6409+extern void gr_handle_kernel_exploit(void);
6410+
6411 void die_if_kernel(char *str, struct pt_regs *regs)
6412 {
6413 static int die_counter;
6414@@ -2260,7 +2273,7 @@ void die_if_kernel(char *str, struct pt_regs *regs)
6415 while (rw &&
6416 count++ < 30&&
6417 is_kernel_stack(current, rw)) {
6418- printk("Caller[%016lx]: %pS\n", rw->ins[7],
6419+ printk("Caller[%016lx]: %pA\n", rw->ins[7],
6420 (void *) rw->ins[7]);
6421
6422 rw = kernel_stack_up(rw);
6423@@ -2273,8 +2286,11 @@ void die_if_kernel(char *str, struct pt_regs *regs)
6424 }
6425 user_instruction_dump ((unsigned int __user *) regs->tpc);
6426 }
6427- if (regs->tstate & TSTATE_PRIV)
6428+ if (regs->tstate & TSTATE_PRIV) {
6429+ gr_handle_kernel_exploit();
6430 do_exit(SIGKILL);
6431+ }
6432+
6433 do_exit(SIGSEGV);
6434 }
6435 EXPORT_SYMBOL(die_if_kernel);
6436diff --git a/arch/sparc/kernel/una_asm_64.S b/arch/sparc/kernel/una_asm_64.S
6437index be183fe..1c8d332 100644
6438--- a/arch/sparc/kernel/una_asm_64.S
6439+++ b/arch/sparc/kernel/una_asm_64.S
6440@@ -127,7 +127,7 @@ do_int_load:
6441 wr %o5, 0x0, %asi
6442 retl
6443 mov 0, %o0
6444- .size __do_int_load, .-__do_int_load
6445+ .size do_int_load, .-do_int_load
6446
6447 .section __ex_table,"a"
6448 .word 4b, __retl_efault
6449diff --git a/arch/sparc/kernel/unaligned_64.c b/arch/sparc/kernel/unaligned_64.c
6450index 3792099..2af17d8 100644
6451--- a/arch/sparc/kernel/unaligned_64.c
6452+++ b/arch/sparc/kernel/unaligned_64.c
6453@@ -288,7 +288,7 @@ static void log_unaligned(struct pt_regs *regs)
6454 if (count < 5) {
6455 last_time = jiffies;
6456 count++;
6457- printk("Kernel unaligned access at TPC[%lx] %pS\n",
6458+ printk("Kernel unaligned access at TPC[%lx] %pA\n",
6459 regs->tpc, (void *) regs->tpc);
6460 }
6461 }
6462diff --git a/arch/sparc/lib/Makefile b/arch/sparc/lib/Makefile
6463index e75faf0..24f12f9 100644
6464--- a/arch/sparc/lib/Makefile
6465+++ b/arch/sparc/lib/Makefile
6466@@ -2,7 +2,7 @@
6467 #
6468
6469 asflags-y := -ansi -DST_DIV0=0x02
6470-ccflags-y := -Werror
6471+#ccflags-y := -Werror
6472
6473 lib-$(CONFIG_SPARC32) += mul.o rem.o sdiv.o udiv.o umul.o urem.o ashrdi3.o
6474 lib-$(CONFIG_SPARC32) += memcpy.o memset.o
6475diff --git a/arch/sparc/lib/atomic_64.S b/arch/sparc/lib/atomic_64.S
6476index 0268210..f0291ca 100644
6477--- a/arch/sparc/lib/atomic_64.S
6478+++ b/arch/sparc/lib/atomic_64.S
6479@@ -18,7 +18,12 @@
6480 atomic_add: /* %o0 = increment, %o1 = atomic_ptr */
6481 BACKOFF_SETUP(%o2)
6482 1: lduw [%o1], %g1
6483- add %g1, %o0, %g7
6484+ addcc %g1, %o0, %g7
6485+
6486+#ifdef CONFIG_PAX_REFCOUNT
6487+ tvs %icc, 6
6488+#endif
6489+
6490 cas [%o1], %g1, %g7
6491 cmp %g1, %g7
6492 bne,pn %icc, 2f
6493@@ -28,12 +33,32 @@ atomic_add: /* %o0 = increment, %o1 = atomic_ptr */
6494 2: BACKOFF_SPIN(%o2, %o3, 1b)
6495 .size atomic_add, .-atomic_add
6496
6497+ .globl atomic_add_unchecked
6498+ .type atomic_add_unchecked,#function
6499+atomic_add_unchecked: /* %o0 = increment, %o1 = atomic_ptr */
6500+ BACKOFF_SETUP(%o2)
6501+1: lduw [%o1], %g1
6502+ add %g1, %o0, %g7
6503+ cas [%o1], %g1, %g7
6504+ cmp %g1, %g7
6505+ bne,pn %icc, 2f
6506+ nop
6507+ retl
6508+ nop
6509+2: BACKOFF_SPIN(%o2, %o3, 1b)
6510+ .size atomic_add_unchecked, .-atomic_add_unchecked
6511+
6512 .globl atomic_sub
6513 .type atomic_sub,#function
6514 atomic_sub: /* %o0 = decrement, %o1 = atomic_ptr */
6515 BACKOFF_SETUP(%o2)
6516 1: lduw [%o1], %g1
6517- sub %g1, %o0, %g7
6518+ subcc %g1, %o0, %g7
6519+
6520+#ifdef CONFIG_PAX_REFCOUNT
6521+ tvs %icc, 6
6522+#endif
6523+
6524 cas [%o1], %g1, %g7
6525 cmp %g1, %g7
6526 bne,pn %icc, 2f
6527@@ -43,12 +68,32 @@ atomic_sub: /* %o0 = decrement, %o1 = atomic_ptr */
6528 2: BACKOFF_SPIN(%o2, %o3, 1b)
6529 .size atomic_sub, .-atomic_sub
6530
6531+ .globl atomic_sub_unchecked
6532+ .type atomic_sub_unchecked,#function
6533+atomic_sub_unchecked: /* %o0 = decrement, %o1 = atomic_ptr */
6534+ BACKOFF_SETUP(%o2)
6535+1: lduw [%o1], %g1
6536+ sub %g1, %o0, %g7
6537+ cas [%o1], %g1, %g7
6538+ cmp %g1, %g7
6539+ bne,pn %icc, 2f
6540+ nop
6541+ retl
6542+ nop
6543+2: BACKOFF_SPIN(%o2, %o3, 1b)
6544+ .size atomic_sub_unchecked, .-atomic_sub_unchecked
6545+
6546 .globl atomic_add_ret
6547 .type atomic_add_ret,#function
6548 atomic_add_ret: /* %o0 = increment, %o1 = atomic_ptr */
6549 BACKOFF_SETUP(%o2)
6550 1: lduw [%o1], %g1
6551- add %g1, %o0, %g7
6552+ addcc %g1, %o0, %g7
6553+
6554+#ifdef CONFIG_PAX_REFCOUNT
6555+ tvs %icc, 6
6556+#endif
6557+
6558 cas [%o1], %g1, %g7
6559 cmp %g1, %g7
6560 bne,pn %icc, 2f
6561@@ -59,12 +104,33 @@ atomic_add_ret: /* %o0 = increment, %o1 = atomic_ptr */
6562 2: BACKOFF_SPIN(%o2, %o3, 1b)
6563 .size atomic_add_ret, .-atomic_add_ret
6564
6565+ .globl atomic_add_ret_unchecked
6566+ .type atomic_add_ret_unchecked,#function
6567+atomic_add_ret_unchecked: /* %o0 = increment, %o1 = atomic_ptr */
6568+ BACKOFF_SETUP(%o2)
6569+1: lduw [%o1], %g1
6570+ addcc %g1, %o0, %g7
6571+ cas [%o1], %g1, %g7
6572+ cmp %g1, %g7
6573+ bne,pn %icc, 2f
6574+ add %g7, %o0, %g7
6575+ sra %g7, 0, %o0
6576+ retl
6577+ nop
6578+2: BACKOFF_SPIN(%o2, %o3, 1b)
6579+ .size atomic_add_ret_unchecked, .-atomic_add_ret_unchecked
6580+
6581 .globl atomic_sub_ret
6582 .type atomic_sub_ret,#function
6583 atomic_sub_ret: /* %o0 = decrement, %o1 = atomic_ptr */
6584 BACKOFF_SETUP(%o2)
6585 1: lduw [%o1], %g1
6586- sub %g1, %o0, %g7
6587+ subcc %g1, %o0, %g7
6588+
6589+#ifdef CONFIG_PAX_REFCOUNT
6590+ tvs %icc, 6
6591+#endif
6592+
6593 cas [%o1], %g1, %g7
6594 cmp %g1, %g7
6595 bne,pn %icc, 2f
6596@@ -80,7 +146,12 @@ atomic_sub_ret: /* %o0 = decrement, %o1 = atomic_ptr */
6597 atomic64_add: /* %o0 = increment, %o1 = atomic_ptr */
6598 BACKOFF_SETUP(%o2)
6599 1: ldx [%o1], %g1
6600- add %g1, %o0, %g7
6601+ addcc %g1, %o0, %g7
6602+
6603+#ifdef CONFIG_PAX_REFCOUNT
6604+ tvs %xcc, 6
6605+#endif
6606+
6607 casx [%o1], %g1, %g7
6608 cmp %g1, %g7
6609 bne,pn %xcc, 2f
6610@@ -90,12 +161,32 @@ atomic64_add: /* %o0 = increment, %o1 = atomic_ptr */
6611 2: BACKOFF_SPIN(%o2, %o3, 1b)
6612 .size atomic64_add, .-atomic64_add
6613
6614+ .globl atomic64_add_unchecked
6615+ .type atomic64_add_unchecked,#function
6616+atomic64_add_unchecked: /* %o0 = increment, %o1 = atomic_ptr */
6617+ BACKOFF_SETUP(%o2)
6618+1: ldx [%o1], %g1
6619+ addcc %g1, %o0, %g7
6620+ casx [%o1], %g1, %g7
6621+ cmp %g1, %g7
6622+ bne,pn %xcc, 2f
6623+ nop
6624+ retl
6625+ nop
6626+2: BACKOFF_SPIN(%o2, %o3, 1b)
6627+ .size atomic64_add_unchecked, .-atomic64_add_unchecked
6628+
6629 .globl atomic64_sub
6630 .type atomic64_sub,#function
6631 atomic64_sub: /* %o0 = decrement, %o1 = atomic_ptr */
6632 BACKOFF_SETUP(%o2)
6633 1: ldx [%o1], %g1
6634- sub %g1, %o0, %g7
6635+ subcc %g1, %o0, %g7
6636+
6637+#ifdef CONFIG_PAX_REFCOUNT
6638+ tvs %xcc, 6
6639+#endif
6640+
6641 casx [%o1], %g1, %g7
6642 cmp %g1, %g7
6643 bne,pn %xcc, 2f
6644@@ -105,12 +196,32 @@ atomic64_sub: /* %o0 = decrement, %o1 = atomic_ptr */
6645 2: BACKOFF_SPIN(%o2, %o3, 1b)
6646 .size atomic64_sub, .-atomic64_sub
6647
6648+ .globl atomic64_sub_unchecked
6649+ .type atomic64_sub_unchecked,#function
6650+atomic64_sub_unchecked: /* %o0 = decrement, %o1 = atomic_ptr */
6651+ BACKOFF_SETUP(%o2)
6652+1: ldx [%o1], %g1
6653+ subcc %g1, %o0, %g7
6654+ casx [%o1], %g1, %g7
6655+ cmp %g1, %g7
6656+ bne,pn %xcc, 2f
6657+ nop
6658+ retl
6659+ nop
6660+2: BACKOFF_SPIN(%o2, %o3, 1b)
6661+ .size atomic64_sub_unchecked, .-atomic64_sub_unchecked
6662+
6663 .globl atomic64_add_ret
6664 .type atomic64_add_ret,#function
6665 atomic64_add_ret: /* %o0 = increment, %o1 = atomic_ptr */
6666 BACKOFF_SETUP(%o2)
6667 1: ldx [%o1], %g1
6668- add %g1, %o0, %g7
6669+ addcc %g1, %o0, %g7
6670+
6671+#ifdef CONFIG_PAX_REFCOUNT
6672+ tvs %xcc, 6
6673+#endif
6674+
6675 casx [%o1], %g1, %g7
6676 cmp %g1, %g7
6677 bne,pn %xcc, 2f
6678@@ -121,12 +232,33 @@ atomic64_add_ret: /* %o0 = increment, %o1 = atomic_ptr */
6679 2: BACKOFF_SPIN(%o2, %o3, 1b)
6680 .size atomic64_add_ret, .-atomic64_add_ret
6681
6682+ .globl atomic64_add_ret_unchecked
6683+ .type atomic64_add_ret_unchecked,#function
6684+atomic64_add_ret_unchecked: /* %o0 = increment, %o1 = atomic_ptr */
6685+ BACKOFF_SETUP(%o2)
6686+1: ldx [%o1], %g1
6687+ addcc %g1, %o0, %g7
6688+ casx [%o1], %g1, %g7
6689+ cmp %g1, %g7
6690+ bne,pn %xcc, 2f
6691+ add %g7, %o0, %g7
6692+ mov %g7, %o0
6693+ retl
6694+ nop
6695+2: BACKOFF_SPIN(%o2, %o3, 1b)
6696+ .size atomic64_add_ret_unchecked, .-atomic64_add_ret_unchecked
6697+
6698 .globl atomic64_sub_ret
6699 .type atomic64_sub_ret,#function
6700 atomic64_sub_ret: /* %o0 = decrement, %o1 = atomic_ptr */
6701 BACKOFF_SETUP(%o2)
6702 1: ldx [%o1], %g1
6703- sub %g1, %o0, %g7
6704+ subcc %g1, %o0, %g7
6705+
6706+#ifdef CONFIG_PAX_REFCOUNT
6707+ tvs %xcc, 6
6708+#endif
6709+
6710 casx [%o1], %g1, %g7
6711 cmp %g1, %g7
6712 bne,pn %xcc, 2f
6713diff --git a/arch/sparc/lib/ksyms.c b/arch/sparc/lib/ksyms.c
6714index 704b126..2e79d76 100644
6715--- a/arch/sparc/lib/ksyms.c
6716+++ b/arch/sparc/lib/ksyms.c
6717@@ -144,12 +144,18 @@ EXPORT_SYMBOL(__downgrade_write);
6718
6719 /* Atomic counter implementation. */
6720 EXPORT_SYMBOL(atomic_add);
6721+EXPORT_SYMBOL(atomic_add_unchecked);
6722 EXPORT_SYMBOL(atomic_add_ret);
6723+EXPORT_SYMBOL(atomic_add_ret_unchecked);
6724 EXPORT_SYMBOL(atomic_sub);
6725+EXPORT_SYMBOL(atomic_sub_unchecked);
6726 EXPORT_SYMBOL(atomic_sub_ret);
6727 EXPORT_SYMBOL(atomic64_add);
6728+EXPORT_SYMBOL(atomic64_add_unchecked);
6729 EXPORT_SYMBOL(atomic64_add_ret);
6730+EXPORT_SYMBOL(atomic64_add_ret_unchecked);
6731 EXPORT_SYMBOL(atomic64_sub);
6732+EXPORT_SYMBOL(atomic64_sub_unchecked);
6733 EXPORT_SYMBOL(atomic64_sub_ret);
6734
6735 /* Atomic bit operations. */
6736diff --git a/arch/sparc/lib/rwsem_64.S b/arch/sparc/lib/rwsem_64.S
6737index 91a7d29..ce75c29 100644
6738--- a/arch/sparc/lib/rwsem_64.S
6739+++ b/arch/sparc/lib/rwsem_64.S
6740@@ -11,7 +11,12 @@
6741 .globl __down_read
6742 __down_read:
6743 1: lduw [%o0], %g1
6744- add %g1, 1, %g7
6745+ addcc %g1, 1, %g7
6746+
6747+#ifdef CONFIG_PAX_REFCOUNT
6748+ tvs %icc, 6
6749+#endif
6750+
6751 cas [%o0], %g1, %g7
6752 cmp %g1, %g7
6753 bne,pn %icc, 1b
6754@@ -33,7 +38,12 @@ __down_read:
6755 .globl __down_read_trylock
6756 __down_read_trylock:
6757 1: lduw [%o0], %g1
6758- add %g1, 1, %g7
6759+ addcc %g1, 1, %g7
6760+
6761+#ifdef CONFIG_PAX_REFCOUNT
6762+ tvs %icc, 6
6763+#endif
6764+
6765 cmp %g7, 0
6766 bl,pn %icc, 2f
6767 mov 0, %o1
6768@@ -51,7 +61,12 @@ __down_write:
6769 or %g1, %lo(RWSEM_ACTIVE_WRITE_BIAS), %g1
6770 1:
6771 lduw [%o0], %g3
6772- add %g3, %g1, %g7
6773+ addcc %g3, %g1, %g7
6774+
6775+#ifdef CONFIG_PAX_REFCOUNT
6776+ tvs %icc, 6
6777+#endif
6778+
6779 cas [%o0], %g3, %g7
6780 cmp %g3, %g7
6781 bne,pn %icc, 1b
6782@@ -77,7 +92,12 @@ __down_write_trylock:
6783 cmp %g3, 0
6784 bne,pn %icc, 2f
6785 mov 0, %o1
6786- add %g3, %g1, %g7
6787+ addcc %g3, %g1, %g7
6788+
6789+#ifdef CONFIG_PAX_REFCOUNT
6790+ tvs %icc, 6
6791+#endif
6792+
6793 cas [%o0], %g3, %g7
6794 cmp %g3, %g7
6795 bne,pn %icc, 1b
6796@@ -90,7 +110,12 @@ __down_write_trylock:
6797 __up_read:
6798 1:
6799 lduw [%o0], %g1
6800- sub %g1, 1, %g7
6801+ subcc %g1, 1, %g7
6802+
6803+#ifdef CONFIG_PAX_REFCOUNT
6804+ tvs %icc, 6
6805+#endif
6806+
6807 cas [%o0], %g1, %g7
6808 cmp %g1, %g7
6809 bne,pn %icc, 1b
6810@@ -118,7 +143,12 @@ __up_write:
6811 or %g1, %lo(RWSEM_ACTIVE_WRITE_BIAS), %g1
6812 1:
6813 lduw [%o0], %g3
6814- sub %g3, %g1, %g7
6815+ subcc %g3, %g1, %g7
6816+
6817+#ifdef CONFIG_PAX_REFCOUNT
6818+ tvs %icc, 6
6819+#endif
6820+
6821 cas [%o0], %g3, %g7
6822 cmp %g3, %g7
6823 bne,pn %icc, 1b
6824@@ -143,7 +173,12 @@ __downgrade_write:
6825 or %g1, %lo(RWSEM_WAITING_BIAS), %g1
6826 1:
6827 lduw [%o0], %g3
6828- sub %g3, %g1, %g7
6829+ subcc %g3, %g1, %g7
6830+
6831+#ifdef CONFIG_PAX_REFCOUNT
6832+ tvs %icc, 6
6833+#endif
6834+
6835 cas [%o0], %g3, %g7
6836 cmp %g3, %g7
6837 bne,pn %icc, 1b
6838diff --git a/arch/sparc/mm/Makefile b/arch/sparc/mm/Makefile
6839index 79836a7..62f47a2 100644
6840--- a/arch/sparc/mm/Makefile
6841+++ b/arch/sparc/mm/Makefile
6842@@ -2,7 +2,7 @@
6843 #
6844
6845 asflags-y := -ansi
6846-ccflags-y := -Werror
6847+#ccflags-y := -Werror
6848
6849 obj-$(CONFIG_SPARC64) += ultra.o tlb.o tsb.o
6850 obj-y += fault_$(BITS).o
6851diff --git a/arch/sparc/mm/fault_32.c b/arch/sparc/mm/fault_32.c
6852index b99f81c..3453e93 100644
6853--- a/arch/sparc/mm/fault_32.c
6854+++ b/arch/sparc/mm/fault_32.c
6855@@ -21,6 +21,9 @@
6856 #include <linux/interrupt.h>
6857 #include <linux/module.h>
6858 #include <linux/kdebug.h>
6859+#include <linux/slab.h>
6860+#include <linux/pagemap.h>
6861+#include <linux/compiler.h>
6862
6863 #include <asm/system.h>
6864 #include <asm/page.h>
6865@@ -167,6 +170,267 @@ static unsigned long compute_si_addr(struct pt_regs *regs, int text_fault)
6866 return safe_compute_effective_address(regs, insn);
6867 }
6868
6869+#ifdef CONFIG_PAX_PAGEEXEC
6870+#ifdef CONFIG_PAX_DLRESOLVE
6871+static void pax_emuplt_close(struct vm_area_struct *vma)
6872+{
6873+ vma->vm_mm->call_dl_resolve = 0UL;
6874+}
6875+
6876+static int pax_emuplt_fault(struct vm_area_struct *vma, struct vm_fault *vmf)
6877+{
6878+ unsigned int *kaddr;
6879+
6880+ vmf->page = alloc_page(GFP_HIGHUSER);
6881+ if (!vmf->page)
6882+ return VM_FAULT_OOM;
6883+
6884+ kaddr = kmap(vmf->page);
6885+ memset(kaddr, 0, PAGE_SIZE);
6886+ kaddr[0] = 0x9DE3BFA8U; /* save */
6887+ flush_dcache_page(vmf->page);
6888+ kunmap(vmf->page);
6889+ return VM_FAULT_MAJOR;
6890+}
6891+
6892+static const struct vm_operations_struct pax_vm_ops = {
6893+ .close = pax_emuplt_close,
6894+ .fault = pax_emuplt_fault
6895+};
6896+
6897+static int pax_insert_vma(struct vm_area_struct *vma, unsigned long addr)
6898+{
6899+ int ret;
6900+
6901+ vma->vm_mm = current->mm;
6902+ vma->vm_start = addr;
6903+ vma->vm_end = addr + PAGE_SIZE;
6904+ vma->vm_flags = VM_READ | VM_EXEC | VM_MAYREAD | VM_MAYEXEC;
6905+ vma->vm_page_prot = vm_get_page_prot(vma->vm_flags);
6906+ vma->vm_ops = &pax_vm_ops;
6907+
6908+ ret = insert_vm_struct(current->mm, vma);
6909+ if (ret)
6910+ return ret;
6911+
6912+ ++current->mm->total_vm;
6913+ return 0;
6914+}
6915+#endif
6916+
6917+/*
6918+ * PaX: decide what to do with offenders (regs->pc = fault address)
6919+ *
6920+ * returns 1 when task should be killed
6921+ * 2 when patched PLT trampoline was detected
6922+ * 3 when unpatched PLT trampoline was detected
6923+ */
6924+static int pax_handle_fetch_fault(struct pt_regs *regs)
6925+{
6926+
6927+#ifdef CONFIG_PAX_EMUPLT
6928+ int err;
6929+
6930+ do { /* PaX: patched PLT emulation #1 */
6931+ unsigned int sethi1, sethi2, jmpl;
6932+
6933+ err = get_user(sethi1, (unsigned int *)regs->pc);
6934+ err |= get_user(sethi2, (unsigned int *)(regs->pc+4));
6935+ err |= get_user(jmpl, (unsigned int *)(regs->pc+8));
6936+
6937+ if (err)
6938+ break;
6939+
6940+ if ((sethi1 & 0xFFC00000U) == 0x03000000U &&
6941+ (sethi2 & 0xFFC00000U) == 0x03000000U &&
6942+ (jmpl & 0xFFFFE000U) == 0x81C06000U)
6943+ {
6944+ unsigned int addr;
6945+
6946+ regs->u_regs[UREG_G1] = (sethi2 & 0x003FFFFFU) << 10;
6947+ addr = regs->u_regs[UREG_G1];
6948+ addr += (((jmpl | 0xFFFFE000U) ^ 0x00001000U) + 0x00001000U);
6949+ regs->pc = addr;
6950+ regs->npc = addr+4;
6951+ return 2;
6952+ }
6953+ } while (0);
6954+
6955+ { /* PaX: patched PLT emulation #2 */
6956+ unsigned int ba;
6957+
6958+ err = get_user(ba, (unsigned int *)regs->pc);
6959+
6960+ if (!err && (ba & 0xFFC00000U) == 0x30800000U) {
6961+ unsigned int addr;
6962+
6963+ addr = regs->pc + ((((ba | 0xFFC00000U) ^ 0x00200000U) + 0x00200000U) << 2);
6964+ regs->pc = addr;
6965+ regs->npc = addr+4;
6966+ return 2;
6967+ }
6968+ }
6969+
6970+ do { /* PaX: patched PLT emulation #3 */
6971+ unsigned int sethi, jmpl, nop;
6972+
6973+ err = get_user(sethi, (unsigned int *)regs->pc);
6974+ err |= get_user(jmpl, (unsigned int *)(regs->pc+4));
6975+ err |= get_user(nop, (unsigned int *)(regs->pc+8));
6976+
6977+ if (err)
6978+ break;
6979+
6980+ if ((sethi & 0xFFC00000U) == 0x03000000U &&
6981+ (jmpl & 0xFFFFE000U) == 0x81C06000U &&
6982+ nop == 0x01000000U)
6983+ {
6984+ unsigned int addr;
6985+
6986+ addr = (sethi & 0x003FFFFFU) << 10;
6987+ regs->u_regs[UREG_G1] = addr;
6988+ addr += (((jmpl | 0xFFFFE000U) ^ 0x00001000U) + 0x00001000U);
6989+ regs->pc = addr;
6990+ regs->npc = addr+4;
6991+ return 2;
6992+ }
6993+ } while (0);
6994+
6995+ do { /* PaX: unpatched PLT emulation step 1 */
6996+ unsigned int sethi, ba, nop;
6997+
6998+ err = get_user(sethi, (unsigned int *)regs->pc);
6999+ err |= get_user(ba, (unsigned int *)(regs->pc+4));
7000+ err |= get_user(nop, (unsigned int *)(regs->pc+8));
7001+
7002+ if (err)
7003+ break;
7004+
7005+ if ((sethi & 0xFFC00000U) == 0x03000000U &&
7006+ ((ba & 0xFFC00000U) == 0x30800000U || (ba & 0xFFF80000U) == 0x30680000U) &&
7007+ nop == 0x01000000U)
7008+ {
7009+ unsigned int addr, save, call;
7010+
7011+ if ((ba & 0xFFC00000U) == 0x30800000U)
7012+ addr = regs->pc + 4 + ((((ba | 0xFFC00000U) ^ 0x00200000U) + 0x00200000U) << 2);
7013+ else
7014+ addr = regs->pc + 4 + ((((ba | 0xFFF80000U) ^ 0x00040000U) + 0x00040000U) << 2);
7015+
7016+ err = get_user(save, (unsigned int *)addr);
7017+ err |= get_user(call, (unsigned int *)(addr+4));
7018+ err |= get_user(nop, (unsigned int *)(addr+8));
7019+ if (err)
7020+ break;
7021+
7022+#ifdef CONFIG_PAX_DLRESOLVE
7023+ if (save == 0x9DE3BFA8U &&
7024+ (call & 0xC0000000U) == 0x40000000U &&
7025+ nop == 0x01000000U)
7026+ {
7027+ struct vm_area_struct *vma;
7028+ unsigned long call_dl_resolve;
7029+
7030+ down_read(&current->mm->mmap_sem);
7031+ call_dl_resolve = current->mm->call_dl_resolve;
7032+ up_read(&current->mm->mmap_sem);
7033+ if (likely(call_dl_resolve))
7034+ goto emulate;
7035+
7036+ vma = kmem_cache_zalloc(vm_area_cachep, GFP_KERNEL);
7037+
7038+ down_write(&current->mm->mmap_sem);
7039+ if (current->mm->call_dl_resolve) {
7040+ call_dl_resolve = current->mm->call_dl_resolve;
7041+ up_write(&current->mm->mmap_sem);
7042+ if (vma)
7043+ kmem_cache_free(vm_area_cachep, vma);
7044+ goto emulate;
7045+ }
7046+
7047+ call_dl_resolve = get_unmapped_area(NULL, 0UL, PAGE_SIZE, 0UL, MAP_PRIVATE);
7048+ if (!vma || (call_dl_resolve & ~PAGE_MASK)) {
7049+ up_write(&current->mm->mmap_sem);
7050+ if (vma)
7051+ kmem_cache_free(vm_area_cachep, vma);
7052+ return 1;
7053+ }
7054+
7055+ if (pax_insert_vma(vma, call_dl_resolve)) {
7056+ up_write(&current->mm->mmap_sem);
7057+ kmem_cache_free(vm_area_cachep, vma);
7058+ return 1;
7059+ }
7060+
7061+ current->mm->call_dl_resolve = call_dl_resolve;
7062+ up_write(&current->mm->mmap_sem);
7063+
7064+emulate:
7065+ regs->u_regs[UREG_G1] = (sethi & 0x003FFFFFU) << 10;
7066+ regs->pc = call_dl_resolve;
7067+ regs->npc = addr+4;
7068+ return 3;
7069+ }
7070+#endif
7071+
7072+ /* PaX: glibc 2.4+ generates sethi/jmpl instead of save/call */
7073+ if ((save & 0xFFC00000U) == 0x05000000U &&
7074+ (call & 0xFFFFE000U) == 0x85C0A000U &&
7075+ nop == 0x01000000U)
7076+ {
7077+ regs->u_regs[UREG_G1] = (sethi & 0x003FFFFFU) << 10;
7078+ regs->u_regs[UREG_G2] = addr + 4;
7079+ addr = (save & 0x003FFFFFU) << 10;
7080+ addr += (((call | 0xFFFFE000U) ^ 0x00001000U) + 0x00001000U);
7081+ regs->pc = addr;
7082+ regs->npc = addr+4;
7083+ return 3;
7084+ }
7085+ }
7086+ } while (0);
7087+
7088+ do { /* PaX: unpatched PLT emulation step 2 */
7089+ unsigned int save, call, nop;
7090+
7091+ err = get_user(save, (unsigned int *)(regs->pc-4));
7092+ err |= get_user(call, (unsigned int *)regs->pc);
7093+ err |= get_user(nop, (unsigned int *)(regs->pc+4));
7094+ if (err)
7095+ break;
7096+
7097+ if (save == 0x9DE3BFA8U &&
7098+ (call & 0xC0000000U) == 0x40000000U &&
7099+ nop == 0x01000000U)
7100+ {
7101+ unsigned int dl_resolve = regs->pc + ((((call | 0xC0000000U) ^ 0x20000000U) + 0x20000000U) << 2);
7102+
7103+ regs->u_regs[UREG_RETPC] = regs->pc;
7104+ regs->pc = dl_resolve;
7105+ regs->npc = dl_resolve+4;
7106+ return 3;
7107+ }
7108+ } while (0);
7109+#endif
7110+
7111+ return 1;
7112+}
7113+
7114+void pax_report_insns(struct pt_regs *regs, void *pc, void *sp)
7115+{
7116+ unsigned long i;
7117+
7118+ printk(KERN_ERR "PAX: bytes at PC: ");
7119+ for (i = 0; i < 8; i++) {
7120+ unsigned int c;
7121+ if (get_user(c, (unsigned int *)pc+i))
7122+ printk(KERN_CONT "???????? ");
7123+ else
7124+ printk(KERN_CONT "%08x ", c);
7125+ }
7126+ printk("\n");
7127+}
7128+#endif
7129+
7130 asmlinkage void do_sparc_fault(struct pt_regs *regs, int text_fault, int write,
7131 unsigned long address)
7132 {
7133@@ -231,6 +495,24 @@ good_area:
7134 if(!(vma->vm_flags & VM_WRITE))
7135 goto bad_area;
7136 } else {
7137+
7138+#ifdef CONFIG_PAX_PAGEEXEC
7139+ if ((mm->pax_flags & MF_PAX_PAGEEXEC) && text_fault && !(vma->vm_flags & VM_EXEC)) {
7140+ up_read(&mm->mmap_sem);
7141+ switch (pax_handle_fetch_fault(regs)) {
7142+
7143+#ifdef CONFIG_PAX_EMUPLT
7144+ case 2:
7145+ case 3:
7146+ return;
7147+#endif
7148+
7149+ }
7150+ pax_report_fault(regs, (void *)regs->pc, (void *)regs->u_regs[UREG_FP]);
7151+ do_group_exit(SIGKILL);
7152+ }
7153+#endif
7154+
7155 /* Allow reads even for write-only mappings */
7156 if(!(vma->vm_flags & (VM_READ | VM_EXEC)))
7157 goto bad_area;
7158diff --git a/arch/sparc/mm/fault_64.c b/arch/sparc/mm/fault_64.c
7159index 43b0da9..a0b78f9 100644
7160--- a/arch/sparc/mm/fault_64.c
7161+++ b/arch/sparc/mm/fault_64.c
7162@@ -20,6 +20,9 @@
7163 #include <linux/kprobes.h>
7164 #include <linux/kdebug.h>
7165 #include <linux/percpu.h>
7166+#include <linux/slab.h>
7167+#include <linux/pagemap.h>
7168+#include <linux/compiler.h>
7169
7170 #include <asm/page.h>
7171 #include <asm/pgtable.h>
7172@@ -78,7 +81,7 @@ static void bad_kernel_pc(struct pt_regs *regs, unsigned long vaddr)
7173 printk(KERN_CRIT "OOPS: Bogus kernel PC [%016lx] in fault handler\n",
7174 regs->tpc);
7175 printk(KERN_CRIT "OOPS: RPC [%016lx]\n", regs->u_regs[15]);
7176- printk("OOPS: RPC <%pS>\n", (void *) regs->u_regs[15]);
7177+ printk("OOPS: RPC <%pA>\n", (void *) regs->u_regs[15]);
7178 printk(KERN_CRIT "OOPS: Fault was to vaddr[%lx]\n", vaddr);
7179 dump_stack();
7180 unhandled_fault(regs->tpc, current, regs);
7181@@ -249,6 +252,456 @@ static void noinline bogus_32bit_fault_address(struct pt_regs *regs,
7182 show_regs(regs);
7183 }
7184
7185+#ifdef CONFIG_PAX_PAGEEXEC
7186+#ifdef CONFIG_PAX_DLRESOLVE
7187+static void pax_emuplt_close(struct vm_area_struct *vma)
7188+{
7189+ vma->vm_mm->call_dl_resolve = 0UL;
7190+}
7191+
7192+static int pax_emuplt_fault(struct vm_area_struct *vma, struct vm_fault *vmf)
7193+{
7194+ unsigned int *kaddr;
7195+
7196+ vmf->page = alloc_page(GFP_HIGHUSER);
7197+ if (!vmf->page)
7198+ return VM_FAULT_OOM;
7199+
7200+ kaddr = kmap(vmf->page);
7201+ memset(kaddr, 0, PAGE_SIZE);
7202+ kaddr[0] = 0x9DE3BFA8U; /* save */
7203+ flush_dcache_page(vmf->page);
7204+ kunmap(vmf->page);
7205+ return VM_FAULT_MAJOR;
7206+}
7207+
7208+static const struct vm_operations_struct pax_vm_ops = {
7209+ .close = pax_emuplt_close,
7210+ .fault = pax_emuplt_fault
7211+};
7212+
7213+static int pax_insert_vma(struct vm_area_struct *vma, unsigned long addr)
7214+{
7215+ int ret;
7216+
7217+ vma->vm_mm = current->mm;
7218+ vma->vm_start = addr;
7219+ vma->vm_end = addr + PAGE_SIZE;
7220+ vma->vm_flags = VM_READ | VM_EXEC | VM_MAYREAD | VM_MAYEXEC;
7221+ vma->vm_page_prot = vm_get_page_prot(vma->vm_flags);
7222+ vma->vm_ops = &pax_vm_ops;
7223+
7224+ ret = insert_vm_struct(current->mm, vma);
7225+ if (ret)
7226+ return ret;
7227+
7228+ ++current->mm->total_vm;
7229+ return 0;
7230+}
7231+#endif
7232+
7233+/*
7234+ * PaX: decide what to do with offenders (regs->tpc = fault address)
7235+ *
7236+ * returns 1 when task should be killed
7237+ * 2 when patched PLT trampoline was detected
7238+ * 3 when unpatched PLT trampoline was detected
7239+ */
7240+static int pax_handle_fetch_fault(struct pt_regs *regs)
7241+{
7242+
7243+#ifdef CONFIG_PAX_EMUPLT
7244+ int err;
7245+
7246+ do { /* PaX: patched PLT emulation #1 */
7247+ unsigned int sethi1, sethi2, jmpl;
7248+
7249+ err = get_user(sethi1, (unsigned int *)regs->tpc);
7250+ err |= get_user(sethi2, (unsigned int *)(regs->tpc+4));
7251+ err |= get_user(jmpl, (unsigned int *)(regs->tpc+8));
7252+
7253+ if (err)
7254+ break;
7255+
7256+ if ((sethi1 & 0xFFC00000U) == 0x03000000U &&
7257+ (sethi2 & 0xFFC00000U) == 0x03000000U &&
7258+ (jmpl & 0xFFFFE000U) == 0x81C06000U)
7259+ {
7260+ unsigned long addr;
7261+
7262+ regs->u_regs[UREG_G1] = (sethi2 & 0x003FFFFFU) << 10;
7263+ addr = regs->u_regs[UREG_G1];
7264+ addr += (((jmpl | 0xFFFFFFFFFFFFE000UL) ^ 0x00001000UL) + 0x00001000UL);
7265+
7266+ if (test_thread_flag(TIF_32BIT))
7267+ addr &= 0xFFFFFFFFUL;
7268+
7269+ regs->tpc = addr;
7270+ regs->tnpc = addr+4;
7271+ return 2;
7272+ }
7273+ } while (0);
7274+
7275+ { /* PaX: patched PLT emulation #2 */
7276+ unsigned int ba;
7277+
7278+ err = get_user(ba, (unsigned int *)regs->tpc);
7279+
7280+ if (!err && (ba & 0xFFC00000U) == 0x30800000U) {
7281+ unsigned long addr;
7282+
7283+ addr = regs->tpc + ((((ba | 0xFFFFFFFFFFC00000UL) ^ 0x00200000UL) + 0x00200000UL) << 2);
7284+
7285+ if (test_thread_flag(TIF_32BIT))
7286+ addr &= 0xFFFFFFFFUL;
7287+
7288+ regs->tpc = addr;
7289+ regs->tnpc = addr+4;
7290+ return 2;
7291+ }
7292+ }
7293+
7294+ do { /* PaX: patched PLT emulation #3 */
7295+ unsigned int sethi, jmpl, nop;
7296+
7297+ err = get_user(sethi, (unsigned int *)regs->tpc);
7298+ err |= get_user(jmpl, (unsigned int *)(regs->tpc+4));
7299+ err |= get_user(nop, (unsigned int *)(regs->tpc+8));
7300+
7301+ if (err)
7302+ break;
7303+
7304+ if ((sethi & 0xFFC00000U) == 0x03000000U &&
7305+ (jmpl & 0xFFFFE000U) == 0x81C06000U &&
7306+ nop == 0x01000000U)
7307+ {
7308+ unsigned long addr;
7309+
7310+ addr = (sethi & 0x003FFFFFU) << 10;
7311+ regs->u_regs[UREG_G1] = addr;
7312+ addr += (((jmpl | 0xFFFFFFFFFFFFE000UL) ^ 0x00001000UL) + 0x00001000UL);
7313+
7314+ if (test_thread_flag(TIF_32BIT))
7315+ addr &= 0xFFFFFFFFUL;
7316+
7317+ regs->tpc = addr;
7318+ regs->tnpc = addr+4;
7319+ return 2;
7320+ }
7321+ } while (0);
7322+
7323+ do { /* PaX: patched PLT emulation #4 */
7324+ unsigned int sethi, mov1, call, mov2;
7325+
7326+ err = get_user(sethi, (unsigned int *)regs->tpc);
7327+ err |= get_user(mov1, (unsigned int *)(regs->tpc+4));
7328+ err |= get_user(call, (unsigned int *)(regs->tpc+8));
7329+ err |= get_user(mov2, (unsigned int *)(regs->tpc+12));
7330+
7331+ if (err)
7332+ break;
7333+
7334+ if ((sethi & 0xFFC00000U) == 0x03000000U &&
7335+ mov1 == 0x8210000FU &&
7336+ (call & 0xC0000000U) == 0x40000000U &&
7337+ mov2 == 0x9E100001U)
7338+ {
7339+ unsigned long addr;
7340+
7341+ regs->u_regs[UREG_G1] = regs->u_regs[UREG_RETPC];
7342+ addr = regs->tpc + 4 + ((((call | 0xFFFFFFFFC0000000UL) ^ 0x20000000UL) + 0x20000000UL) << 2);
7343+
7344+ if (test_thread_flag(TIF_32BIT))
7345+ addr &= 0xFFFFFFFFUL;
7346+
7347+ regs->tpc = addr;
7348+ regs->tnpc = addr+4;
7349+ return 2;
7350+ }
7351+ } while (0);
7352+
7353+ do { /* PaX: patched PLT emulation #5 */
7354+ unsigned int sethi, sethi1, sethi2, or1, or2, sllx, jmpl, nop;
7355+
7356+ err = get_user(sethi, (unsigned int *)regs->tpc);
7357+ err |= get_user(sethi1, (unsigned int *)(regs->tpc+4));
7358+ err |= get_user(sethi2, (unsigned int *)(regs->tpc+8));
7359+ err |= get_user(or1, (unsigned int *)(regs->tpc+12));
7360+ err |= get_user(or2, (unsigned int *)(regs->tpc+16));
7361+ err |= get_user(sllx, (unsigned int *)(regs->tpc+20));
7362+ err |= get_user(jmpl, (unsigned int *)(regs->tpc+24));
7363+ err |= get_user(nop, (unsigned int *)(regs->tpc+28));
7364+
7365+ if (err)
7366+ break;
7367+
7368+ if ((sethi & 0xFFC00000U) == 0x03000000U &&
7369+ (sethi1 & 0xFFC00000U) == 0x03000000U &&
7370+ (sethi2 & 0xFFC00000U) == 0x0B000000U &&
7371+ (or1 & 0xFFFFE000U) == 0x82106000U &&
7372+ (or2 & 0xFFFFE000U) == 0x8A116000U &&
7373+ sllx == 0x83287020U &&
7374+ jmpl == 0x81C04005U &&
7375+ nop == 0x01000000U)
7376+ {
7377+ unsigned long addr;
7378+
7379+ regs->u_regs[UREG_G1] = ((sethi1 & 0x003FFFFFU) << 10) | (or1 & 0x000003FFU);
7380+ regs->u_regs[UREG_G1] <<= 32;
7381+ regs->u_regs[UREG_G5] = ((sethi2 & 0x003FFFFFU) << 10) | (or2 & 0x000003FFU);
7382+ addr = regs->u_regs[UREG_G1] + regs->u_regs[UREG_G5];
7383+ regs->tpc = addr;
7384+ regs->tnpc = addr+4;
7385+ return 2;
7386+ }
7387+ } while (0);
7388+
7389+ do { /* PaX: patched PLT emulation #6 */
7390+ unsigned int sethi, sethi1, sethi2, sllx, or, jmpl, nop;
7391+
7392+ err = get_user(sethi, (unsigned int *)regs->tpc);
7393+ err |= get_user(sethi1, (unsigned int *)(regs->tpc+4));
7394+ err |= get_user(sethi2, (unsigned int *)(regs->tpc+8));
7395+ err |= get_user(sllx, (unsigned int *)(regs->tpc+12));
7396+ err |= get_user(or, (unsigned int *)(regs->tpc+16));
7397+ err |= get_user(jmpl, (unsigned int *)(regs->tpc+20));
7398+ err |= get_user(nop, (unsigned int *)(regs->tpc+24));
7399+
7400+ if (err)
7401+ break;
7402+
7403+ if ((sethi & 0xFFC00000U) == 0x03000000U &&
7404+ (sethi1 & 0xFFC00000U) == 0x03000000U &&
7405+ (sethi2 & 0xFFC00000U) == 0x0B000000U &&
7406+ sllx == 0x83287020U &&
7407+ (or & 0xFFFFE000U) == 0x8A116000U &&
7408+ jmpl == 0x81C04005U &&
7409+ nop == 0x01000000U)
7410+ {
7411+ unsigned long addr;
7412+
7413+ regs->u_regs[UREG_G1] = (sethi1 & 0x003FFFFFU) << 10;
7414+ regs->u_regs[UREG_G1] <<= 32;
7415+ regs->u_regs[UREG_G5] = ((sethi2 & 0x003FFFFFU) << 10) | (or & 0x3FFU);
7416+ addr = regs->u_regs[UREG_G1] + regs->u_regs[UREG_G5];
7417+ regs->tpc = addr;
7418+ regs->tnpc = addr+4;
7419+ return 2;
7420+ }
7421+ } while (0);
7422+
7423+ do { /* PaX: unpatched PLT emulation step 1 */
7424+ unsigned int sethi, ba, nop;
7425+
7426+ err = get_user(sethi, (unsigned int *)regs->tpc);
7427+ err |= get_user(ba, (unsigned int *)(regs->tpc+4));
7428+ err |= get_user(nop, (unsigned int *)(regs->tpc+8));
7429+
7430+ if (err)
7431+ break;
7432+
7433+ if ((sethi & 0xFFC00000U) == 0x03000000U &&
7434+ ((ba & 0xFFC00000U) == 0x30800000U || (ba & 0xFFF80000U) == 0x30680000U) &&
7435+ nop == 0x01000000U)
7436+ {
7437+ unsigned long addr;
7438+ unsigned int save, call;
7439+ unsigned int sethi1, sethi2, or1, or2, sllx, add, jmpl;
7440+
7441+ if ((ba & 0xFFC00000U) == 0x30800000U)
7442+ addr = regs->tpc + 4 + ((((ba | 0xFFFFFFFFFFC00000UL) ^ 0x00200000UL) + 0x00200000UL) << 2);
7443+ else
7444+ addr = regs->tpc + 4 + ((((ba | 0xFFFFFFFFFFF80000UL) ^ 0x00040000UL) + 0x00040000UL) << 2);
7445+
7446+ if (test_thread_flag(TIF_32BIT))
7447+ addr &= 0xFFFFFFFFUL;
7448+
7449+ err = get_user(save, (unsigned int *)addr);
7450+ err |= get_user(call, (unsigned int *)(addr+4));
7451+ err |= get_user(nop, (unsigned int *)(addr+8));
7452+ if (err)
7453+ break;
7454+
7455+#ifdef CONFIG_PAX_DLRESOLVE
7456+ if (save == 0x9DE3BFA8U &&
7457+ (call & 0xC0000000U) == 0x40000000U &&
7458+ nop == 0x01000000U)
7459+ {
7460+ struct vm_area_struct *vma;
7461+ unsigned long call_dl_resolve;
7462+
7463+ down_read(&current->mm->mmap_sem);
7464+ call_dl_resolve = current->mm->call_dl_resolve;
7465+ up_read(&current->mm->mmap_sem);
7466+ if (likely(call_dl_resolve))
7467+ goto emulate;
7468+
7469+ vma = kmem_cache_zalloc(vm_area_cachep, GFP_KERNEL);
7470+
7471+ down_write(&current->mm->mmap_sem);
7472+ if (current->mm->call_dl_resolve) {
7473+ call_dl_resolve = current->mm->call_dl_resolve;
7474+ up_write(&current->mm->mmap_sem);
7475+ if (vma)
7476+ kmem_cache_free(vm_area_cachep, vma);
7477+ goto emulate;
7478+ }
7479+
7480+ call_dl_resolve = get_unmapped_area(NULL, 0UL, PAGE_SIZE, 0UL, MAP_PRIVATE);
7481+ if (!vma || (call_dl_resolve & ~PAGE_MASK)) {
7482+ up_write(&current->mm->mmap_sem);
7483+ if (vma)
7484+ kmem_cache_free(vm_area_cachep, vma);
7485+ return 1;
7486+ }
7487+
7488+ if (pax_insert_vma(vma, call_dl_resolve)) {
7489+ up_write(&current->mm->mmap_sem);
7490+ kmem_cache_free(vm_area_cachep, vma);
7491+ return 1;
7492+ }
7493+
7494+ current->mm->call_dl_resolve = call_dl_resolve;
7495+ up_write(&current->mm->mmap_sem);
7496+
7497+emulate:
7498+ regs->u_regs[UREG_G1] = (sethi & 0x003FFFFFU) << 10;
7499+ regs->tpc = call_dl_resolve;
7500+ regs->tnpc = addr+4;
7501+ return 3;
7502+ }
7503+#endif
7504+
7505+ /* PaX: glibc 2.4+ generates sethi/jmpl instead of save/call */
7506+ if ((save & 0xFFC00000U) == 0x05000000U &&
7507+ (call & 0xFFFFE000U) == 0x85C0A000U &&
7508+ nop == 0x01000000U)
7509+ {
7510+ regs->u_regs[UREG_G1] = (sethi & 0x003FFFFFU) << 10;
7511+ regs->u_regs[UREG_G2] = addr + 4;
7512+ addr = (save & 0x003FFFFFU) << 10;
7513+ addr += (((call | 0xFFFFFFFFFFFFE000UL) ^ 0x00001000UL) + 0x00001000UL);
7514+
7515+ if (test_thread_flag(TIF_32BIT))
7516+ addr &= 0xFFFFFFFFUL;
7517+
7518+ regs->tpc = addr;
7519+ regs->tnpc = addr+4;
7520+ return 3;
7521+ }
7522+
7523+ /* PaX: 64-bit PLT stub */
7524+ err = get_user(sethi1, (unsigned int *)addr);
7525+ err |= get_user(sethi2, (unsigned int *)(addr+4));
7526+ err |= get_user(or1, (unsigned int *)(addr+8));
7527+ err |= get_user(or2, (unsigned int *)(addr+12));
7528+ err |= get_user(sllx, (unsigned int *)(addr+16));
7529+ err |= get_user(add, (unsigned int *)(addr+20));
7530+ err |= get_user(jmpl, (unsigned int *)(addr+24));
7531+ err |= get_user(nop, (unsigned int *)(addr+28));
7532+ if (err)
7533+ break;
7534+
7535+ if ((sethi1 & 0xFFC00000U) == 0x09000000U &&
7536+ (sethi2 & 0xFFC00000U) == 0x0B000000U &&
7537+ (or1 & 0xFFFFE000U) == 0x88112000U &&
7538+ (or2 & 0xFFFFE000U) == 0x8A116000U &&
7539+ sllx == 0x89293020U &&
7540+ add == 0x8A010005U &&
7541+ jmpl == 0x89C14000U &&
7542+ nop == 0x01000000U)
7543+ {
7544+ regs->u_regs[UREG_G1] = (sethi & 0x003FFFFFU) << 10;
7545+ regs->u_regs[UREG_G4] = ((sethi1 & 0x003FFFFFU) << 10) | (or1 & 0x000003FFU);
7546+ regs->u_regs[UREG_G4] <<= 32;
7547+ regs->u_regs[UREG_G5] = ((sethi2 & 0x003FFFFFU) << 10) | (or2 & 0x000003FFU);
7548+ regs->u_regs[UREG_G5] += regs->u_regs[UREG_G4];
7549+ regs->u_regs[UREG_G4] = addr + 24;
7550+ addr = regs->u_regs[UREG_G5];
7551+ regs->tpc = addr;
7552+ regs->tnpc = addr+4;
7553+ return 3;
7554+ }
7555+ }
7556+ } while (0);
7557+
7558+#ifdef CONFIG_PAX_DLRESOLVE
7559+ do { /* PaX: unpatched PLT emulation step 2 */
7560+ unsigned int save, call, nop;
7561+
7562+ err = get_user(save, (unsigned int *)(regs->tpc-4));
7563+ err |= get_user(call, (unsigned int *)regs->tpc);
7564+ err |= get_user(nop, (unsigned int *)(regs->tpc+4));
7565+ if (err)
7566+ break;
7567+
7568+ if (save == 0x9DE3BFA8U &&
7569+ (call & 0xC0000000U) == 0x40000000U &&
7570+ nop == 0x01000000U)
7571+ {
7572+ unsigned long dl_resolve = regs->tpc + ((((call | 0xFFFFFFFFC0000000UL) ^ 0x20000000UL) + 0x20000000UL) << 2);
7573+
7574+ if (test_thread_flag(TIF_32BIT))
7575+ dl_resolve &= 0xFFFFFFFFUL;
7576+
7577+ regs->u_regs[UREG_RETPC] = regs->tpc;
7578+ regs->tpc = dl_resolve;
7579+ regs->tnpc = dl_resolve+4;
7580+ return 3;
7581+ }
7582+ } while (0);
7583+#endif
7584+
7585+ do { /* PaX: patched PLT emulation #7, must be AFTER the unpatched PLT emulation */
7586+ unsigned int sethi, ba, nop;
7587+
7588+ err = get_user(sethi, (unsigned int *)regs->tpc);
7589+ err |= get_user(ba, (unsigned int *)(regs->tpc+4));
7590+ err |= get_user(nop, (unsigned int *)(regs->tpc+8));
7591+
7592+ if (err)
7593+ break;
7594+
7595+ if ((sethi & 0xFFC00000U) == 0x03000000U &&
7596+ (ba & 0xFFF00000U) == 0x30600000U &&
7597+ nop == 0x01000000U)
7598+ {
7599+ unsigned long addr;
7600+
7601+ addr = (sethi & 0x003FFFFFU) << 10;
7602+ regs->u_regs[UREG_G1] = addr;
7603+ addr = regs->tpc + ((((ba | 0xFFFFFFFFFFF80000UL) ^ 0x00040000UL) + 0x00040000UL) << 2);
7604+
7605+ if (test_thread_flag(TIF_32BIT))
7606+ addr &= 0xFFFFFFFFUL;
7607+
7608+ regs->tpc = addr;
7609+ regs->tnpc = addr+4;
7610+ return 2;
7611+ }
7612+ } while (0);
7613+
7614+#endif
7615+
7616+ return 1;
7617+}
7618+
7619+void pax_report_insns(struct pt_regs *regs, void *pc, void *sp)
7620+{
7621+ unsigned long i;
7622+
7623+ printk(KERN_ERR "PAX: bytes at PC: ");
7624+ for (i = 0; i < 8; i++) {
7625+ unsigned int c;
7626+ if (get_user(c, (unsigned int *)pc+i))
7627+ printk(KERN_CONT "???????? ");
7628+ else
7629+ printk(KERN_CONT "%08x ", c);
7630+ }
7631+ printk("\n");
7632+}
7633+#endif
7634+
7635 asmlinkage void __kprobes do_sparc64_fault(struct pt_regs *regs)
7636 {
7637 struct mm_struct *mm = current->mm;
7638@@ -315,6 +768,29 @@ asmlinkage void __kprobes do_sparc64_fault(struct pt_regs *regs)
7639 if (!vma)
7640 goto bad_area;
7641
7642+#ifdef CONFIG_PAX_PAGEEXEC
7643+ /* PaX: detect ITLB misses on non-exec pages */
7644+ if ((mm->pax_flags & MF_PAX_PAGEEXEC) && vma->vm_start <= address &&
7645+ !(vma->vm_flags & VM_EXEC) && (fault_code & FAULT_CODE_ITLB))
7646+ {
7647+ if (address != regs->tpc)
7648+ goto good_area;
7649+
7650+ up_read(&mm->mmap_sem);
7651+ switch (pax_handle_fetch_fault(regs)) {
7652+
7653+#ifdef CONFIG_PAX_EMUPLT
7654+ case 2:
7655+ case 3:
7656+ return;
7657+#endif
7658+
7659+ }
7660+ pax_report_fault(regs, (void *)regs->tpc, (void *)(regs->u_regs[UREG_FP] + STACK_BIAS));
7661+ do_group_exit(SIGKILL);
7662+ }
7663+#endif
7664+
7665 /* Pure DTLB misses do not tell us whether the fault causing
7666 * load/store/atomic was a write or not, it only says that there
7667 * was no match. So in such a case we (carefully) read the
7668diff --git a/arch/sparc/mm/hugetlbpage.c b/arch/sparc/mm/hugetlbpage.c
7669index f27d103..1b06377 100644
7670--- a/arch/sparc/mm/hugetlbpage.c
7671+++ b/arch/sparc/mm/hugetlbpage.c
7672@@ -69,7 +69,7 @@ full_search:
7673 }
7674 return -ENOMEM;
7675 }
7676- if (likely(!vma || addr + len <= vma->vm_start)) {
7677+ if (likely(check_heap_stack_gap(vma, addr, len))) {
7678 /*
7679 * Remember the place where we stopped the search:
7680 */
7681@@ -108,7 +108,7 @@ hugetlb_get_unmapped_area_topdown(struct file *filp, const unsigned long addr0,
7682 /* make sure it can fit in the remaining address space */
7683 if (likely(addr > len)) {
7684 vma = find_vma(mm, addr-len);
7685- if (!vma || addr <= vma->vm_start) {
7686+ if (check_heap_stack_gap(vma, addr - len, len)) {
7687 /* remember the address as a hint for next time */
7688 return (mm->free_area_cache = addr-len);
7689 }
7690@@ -117,16 +117,17 @@ hugetlb_get_unmapped_area_topdown(struct file *filp, const unsigned long addr0,
7691 if (unlikely(mm->mmap_base < len))
7692 goto bottomup;
7693
7694- addr = (mm->mmap_base-len) & HPAGE_MASK;
7695+ addr = mm->mmap_base - len;
7696
7697 do {
7698+ addr &= HPAGE_MASK;
7699 /*
7700 * Lookup failure means no vma is above this address,
7701 * else if new region fits below vma->vm_start,
7702 * return with success:
7703 */
7704 vma = find_vma(mm, addr);
7705- if (likely(!vma || addr+len <= vma->vm_start)) {
7706+ if (likely(check_heap_stack_gap(vma, addr, len))) {
7707 /* remember the address as a hint for next time */
7708 return (mm->free_area_cache = addr);
7709 }
7710@@ -136,8 +137,8 @@ hugetlb_get_unmapped_area_topdown(struct file *filp, const unsigned long addr0,
7711 mm->cached_hole_size = vma->vm_start - addr;
7712
7713 /* try just below the current vma->vm_start */
7714- addr = (vma->vm_start-len) & HPAGE_MASK;
7715- } while (likely(len < vma->vm_start));
7716+ addr = skip_heap_stack_gap(vma, len);
7717+ } while (!IS_ERR_VALUE(addr));
7718
7719 bottomup:
7720 /*
7721@@ -183,8 +184,7 @@ hugetlb_get_unmapped_area(struct file *file, unsigned long addr,
7722 if (addr) {
7723 addr = ALIGN(addr, HPAGE_SIZE);
7724 vma = find_vma(mm, addr);
7725- if (task_size - len >= addr &&
7726- (!vma || addr + len <= vma->vm_start))
7727+ if (task_size - len >= addr && check_heap_stack_gap(vma, addr, len))
7728 return addr;
7729 }
7730 if (mm->get_unmapped_area == arch_get_unmapped_area)
7731diff --git a/arch/sparc/mm/init_32.c b/arch/sparc/mm/init_32.c
7732index dc7c3b1..34c0070 100644
7733--- a/arch/sparc/mm/init_32.c
7734+++ b/arch/sparc/mm/init_32.c
7735@@ -317,6 +317,9 @@ extern void device_scan(void);
7736 pgprot_t PAGE_SHARED __read_mostly;
7737 EXPORT_SYMBOL(PAGE_SHARED);
7738
7739+pgprot_t PAGE_SHARED_NOEXEC __read_mostly;
7740+EXPORT_SYMBOL(PAGE_SHARED_NOEXEC);
7741+
7742 void __init paging_init(void)
7743 {
7744 switch(sparc_cpu_model) {
7745@@ -345,17 +348,17 @@ void __init paging_init(void)
7746
7747 /* Initialize the protection map with non-constant, MMU dependent values. */
7748 protection_map[0] = PAGE_NONE;
7749- protection_map[1] = PAGE_READONLY;
7750- protection_map[2] = PAGE_COPY;
7751- protection_map[3] = PAGE_COPY;
7752+ protection_map[1] = PAGE_READONLY_NOEXEC;
7753+ protection_map[2] = PAGE_COPY_NOEXEC;
7754+ protection_map[3] = PAGE_COPY_NOEXEC;
7755 protection_map[4] = PAGE_READONLY;
7756 protection_map[5] = PAGE_READONLY;
7757 protection_map[6] = PAGE_COPY;
7758 protection_map[7] = PAGE_COPY;
7759 protection_map[8] = PAGE_NONE;
7760- protection_map[9] = PAGE_READONLY;
7761- protection_map[10] = PAGE_SHARED;
7762- protection_map[11] = PAGE_SHARED;
7763+ protection_map[9] = PAGE_READONLY_NOEXEC;
7764+ protection_map[10] = PAGE_SHARED_NOEXEC;
7765+ protection_map[11] = PAGE_SHARED_NOEXEC;
7766 protection_map[12] = PAGE_READONLY;
7767 protection_map[13] = PAGE_READONLY;
7768 protection_map[14] = PAGE_SHARED;
7769diff --git a/arch/sparc/mm/srmmu.c b/arch/sparc/mm/srmmu.c
7770index 509b1ff..bfd7118 100644
7771--- a/arch/sparc/mm/srmmu.c
7772+++ b/arch/sparc/mm/srmmu.c
7773@@ -2200,6 +2200,13 @@ void __init ld_mmu_srmmu(void)
7774 PAGE_SHARED = pgprot_val(SRMMU_PAGE_SHARED);
7775 BTFIXUPSET_INT(page_copy, pgprot_val(SRMMU_PAGE_COPY));
7776 BTFIXUPSET_INT(page_readonly, pgprot_val(SRMMU_PAGE_RDONLY));
7777+
7778+#ifdef CONFIG_PAX_PAGEEXEC
7779+ PAGE_SHARED_NOEXEC = pgprot_val(SRMMU_PAGE_SHARED_NOEXEC);
7780+ BTFIXUPSET_INT(page_copy_noexec, pgprot_val(SRMMU_PAGE_COPY_NOEXEC));
7781+ BTFIXUPSET_INT(page_readonly_noexec, pgprot_val(SRMMU_PAGE_RDONLY_NOEXEC));
7782+#endif
7783+
7784 BTFIXUPSET_INT(page_kernel, pgprot_val(SRMMU_PAGE_KERNEL));
7785 page_kernel = pgprot_val(SRMMU_PAGE_KERNEL);
7786
7787diff --git a/arch/um/Makefile b/arch/um/Makefile
7788index fc633db..5e1a1c2 100644
7789--- a/arch/um/Makefile
7790+++ b/arch/um/Makefile
7791@@ -49,6 +49,10 @@ USER_CFLAGS = $(patsubst $(KERNEL_DEFINES),,$(patsubst -D__KERNEL__,,\
7792 $(patsubst -I%,,$(KBUILD_CFLAGS)))) $(ARCH_INCLUDE) $(MODE_INCLUDE) \
7793 $(filter -I%,$(CFLAGS)) -D_FILE_OFFSET_BITS=64
7794
7795+ifdef CONSTIFY_PLUGIN
7796+USER_CFLAGS += $(CONSTIFY_PLUGIN) -fplugin-arg-constify_plugin-no-constify
7797+endif
7798+
7799 include $(srctree)/$(ARCH_DIR)/Makefile-$(SUBARCH)
7800
7801 #This will adjust *FLAGS accordingly to the platform.
7802diff --git a/arch/um/include/asm/cache.h b/arch/um/include/asm/cache.h
7803index 19e1bdd..3665b77 100644
7804--- a/arch/um/include/asm/cache.h
7805+++ b/arch/um/include/asm/cache.h
7806@@ -1,6 +1,7 @@
7807 #ifndef __UM_CACHE_H
7808 #define __UM_CACHE_H
7809
7810+#include <linux/const.h>
7811
7812 #if defined(CONFIG_UML_X86) && !defined(CONFIG_64BIT)
7813 # define L1_CACHE_SHIFT (CONFIG_X86_L1_CACHE_SHIFT)
7814@@ -12,6 +13,6 @@
7815 # define L1_CACHE_SHIFT 5
7816 #endif
7817
7818-#define L1_CACHE_BYTES (1 << L1_CACHE_SHIFT)
7819+#define L1_CACHE_BYTES (_AC(1,UL) << L1_CACHE_SHIFT)
7820
7821 #endif
7822diff --git a/arch/um/include/asm/kmap_types.h b/arch/um/include/asm/kmap_types.h
7823index 6c03acd..a5e0215 100644
7824--- a/arch/um/include/asm/kmap_types.h
7825+++ b/arch/um/include/asm/kmap_types.h
7826@@ -23,6 +23,7 @@ enum km_type {
7827 KM_IRQ1,
7828 KM_SOFTIRQ0,
7829 KM_SOFTIRQ1,
7830+ KM_CLEARPAGE,
7831 KM_TYPE_NR
7832 };
7833
7834diff --git a/arch/um/include/asm/page.h b/arch/um/include/asm/page.h
7835index 4cc9b6c..02e5029 100644
7836--- a/arch/um/include/asm/page.h
7837+++ b/arch/um/include/asm/page.h
7838@@ -14,6 +14,9 @@
7839 #define PAGE_SIZE (_AC(1, UL) << PAGE_SHIFT)
7840 #define PAGE_MASK (~(PAGE_SIZE-1))
7841
7842+#define ktla_ktva(addr) (addr)
7843+#define ktva_ktla(addr) (addr)
7844+
7845 #ifndef __ASSEMBLY__
7846
7847 struct page;
7848diff --git a/arch/um/kernel/process.c b/arch/um/kernel/process.c
7849index 4a28a15..654dc2a 100644
7850--- a/arch/um/kernel/process.c
7851+++ b/arch/um/kernel/process.c
7852@@ -393,22 +393,6 @@ int singlestepping(void * t)
7853 return 2;
7854 }
7855
7856-/*
7857- * Only x86 and x86_64 have an arch_align_stack().
7858- * All other arches have "#define arch_align_stack(x) (x)"
7859- * in their asm/system.h
7860- * As this is included in UML from asm-um/system-generic.h,
7861- * we can use it to behave as the subarch does.
7862- */
7863-#ifndef arch_align_stack
7864-unsigned long arch_align_stack(unsigned long sp)
7865-{
7866- if (!(current->personality & ADDR_NO_RANDOMIZE) && randomize_va_space)
7867- sp -= get_random_int() % 8192;
7868- return sp & ~0xf;
7869-}
7870-#endif
7871-
7872 unsigned long get_wchan(struct task_struct *p)
7873 {
7874 unsigned long stack_page, sp, ip;
7875diff --git a/arch/um/sys-i386/shared/sysdep/system.h b/arch/um/sys-i386/shared/sysdep/system.h
7876index d1b93c4..ae1b7fd 100644
7877--- a/arch/um/sys-i386/shared/sysdep/system.h
7878+++ b/arch/um/sys-i386/shared/sysdep/system.h
7879@@ -17,7 +17,7 @@
7880 # define AT_VECTOR_SIZE_ARCH 1
7881 #endif
7882
7883-extern unsigned long arch_align_stack(unsigned long sp);
7884+#define arch_align_stack(x) ((x) & ~0xfUL)
7885
7886 void default_idle(void);
7887
7888diff --git a/arch/um/sys-i386/syscalls.c b/arch/um/sys-i386/syscalls.c
7889index 857ca0b..9a2669d 100644
7890--- a/arch/um/sys-i386/syscalls.c
7891+++ b/arch/um/sys-i386/syscalls.c
7892@@ -11,6 +11,21 @@
7893 #include "asm/uaccess.h"
7894 #include "asm/unistd.h"
7895
7896+int i386_mmap_check(unsigned long addr, unsigned long len, unsigned long flags)
7897+{
7898+ unsigned long pax_task_size = TASK_SIZE;
7899+
7900+#ifdef CONFIG_PAX_SEGMEXEC
7901+ if (current->mm->pax_flags & MF_PAX_SEGMEXEC)
7902+ pax_task_size = SEGMEXEC_TASK_SIZE;
7903+#endif
7904+
7905+ if (len > pax_task_size || addr > pax_task_size - len)
7906+ return -EINVAL;
7907+
7908+ return 0;
7909+}
7910+
7911 /*
7912 * Perform the select(nd, in, out, ex, tv) and mmap() system
7913 * calls. Linux/i386 didn't use to be able to handle more than
7914diff --git a/arch/um/sys-x86_64/shared/sysdep/system.h b/arch/um/sys-x86_64/shared/sysdep/system.h
7915index d1b93c4..ae1b7fd 100644
7916--- a/arch/um/sys-x86_64/shared/sysdep/system.h
7917+++ b/arch/um/sys-x86_64/shared/sysdep/system.h
7918@@ -17,7 +17,7 @@
7919 # define AT_VECTOR_SIZE_ARCH 1
7920 #endif
7921
7922-extern unsigned long arch_align_stack(unsigned long sp);
7923+#define arch_align_stack(x) ((x) & ~0xfUL)
7924
7925 void default_idle(void);
7926
7927diff --git a/arch/x86/Kconfig b/arch/x86/Kconfig
7928index 73ae02a..f932de5 100644
7929--- a/arch/x86/Kconfig
7930+++ b/arch/x86/Kconfig
7931@@ -223,7 +223,7 @@ config X86_TRAMPOLINE
7932
7933 config X86_32_LAZY_GS
7934 def_bool y
7935- depends on X86_32 && !CC_STACKPROTECTOR
7936+ depends on X86_32 && !CC_STACKPROTECTOR && !PAX_MEMORY_UDEREF
7937
7938 config KTIME_SCALAR
7939 def_bool X86_32
7940@@ -1008,7 +1008,7 @@ choice
7941
7942 config NOHIGHMEM
7943 bool "off"
7944- depends on !X86_NUMAQ
7945+ depends on !X86_NUMAQ && !(PAX_PAGEEXEC && PAX_ENABLE_PAE)
7946 ---help---
7947 Linux can use up to 64 Gigabytes of physical memory on x86 systems.
7948 However, the address space of 32-bit x86 processors is only 4
7949@@ -1045,7 +1045,7 @@ config NOHIGHMEM
7950
7951 config HIGHMEM4G
7952 bool "4GB"
7953- depends on !X86_NUMAQ
7954+ depends on !X86_NUMAQ && !(PAX_PAGEEXEC && PAX_ENABLE_PAE)
7955 ---help---
7956 Select this if you have a 32-bit processor and between 1 and 4
7957 gigabytes of physical RAM.
7958@@ -1099,7 +1099,7 @@ config PAGE_OFFSET
7959 hex
7960 default 0xB0000000 if VMSPLIT_3G_OPT
7961 default 0x80000000 if VMSPLIT_2G
7962- default 0x78000000 if VMSPLIT_2G_OPT
7963+ default 0x70000000 if VMSPLIT_2G_OPT
7964 default 0x40000000 if VMSPLIT_1G
7965 default 0xC0000000
7966 depends on X86_32
7967@@ -1460,6 +1460,7 @@ config SECCOMP
7968
7969 config CC_STACKPROTECTOR
7970 bool "Enable -fstack-protector buffer overflow detection (EXPERIMENTAL)"
7971+ depends on X86_64 || !PAX_MEMORY_UDEREF
7972 ---help---
7973 This option turns on the -fstack-protector GCC feature. This
7974 feature puts, at the beginning of functions, a canary value on
7975@@ -1517,6 +1518,7 @@ config KEXEC_JUMP
7976 config PHYSICAL_START
7977 hex "Physical address where the kernel is loaded" if (EMBEDDED || CRASH_DUMP)
7978 default "0x1000000"
7979+ range 0x400000 0x40000000
7980 ---help---
7981 This gives the physical address where the kernel is loaded.
7982
7983@@ -1581,6 +1583,7 @@ config PHYSICAL_ALIGN
7984 hex
7985 prompt "Alignment value to which kernel should be aligned" if X86_32
7986 default "0x1000000"
7987+ range 0x400000 0x1000000 if PAX_KERNEXEC
7988 range 0x2000 0x1000000
7989 ---help---
7990 This value puts the alignment restrictions on physical address
7991@@ -1612,9 +1615,10 @@ config HOTPLUG_CPU
7992 Say N if you want to disable CPU hotplug.
7993
7994 config COMPAT_VDSO
7995- def_bool y
7996+ def_bool n
7997 prompt "Compat VDSO support"
7998 depends on X86_32 || IA32_EMULATION
7999+ depends on !PAX_NOEXEC && !PAX_MEMORY_UDEREF
8000 ---help---
8001 Map the 32-bit VDSO to the predictable old-style address too.
8002 ---help---
8003diff --git a/arch/x86/Kconfig.cpu b/arch/x86/Kconfig.cpu
8004index 0e566103..1a6b57e 100644
8005--- a/arch/x86/Kconfig.cpu
8006+++ b/arch/x86/Kconfig.cpu
8007@@ -340,7 +340,7 @@ config X86_PPRO_FENCE
8008
8009 config X86_F00F_BUG
8010 def_bool y
8011- depends on M586MMX || M586TSC || M586 || M486 || M386
8012+ depends on (M586MMX || M586TSC || M586 || M486 || M386) && !PAX_KERNEXEC
8013
8014 config X86_WP_WORKS_OK
8015 def_bool y
8016@@ -360,7 +360,7 @@ config X86_POPAD_OK
8017
8018 config X86_ALIGNMENT_16
8019 def_bool y
8020- depends on MWINCHIP3D || MWINCHIPC6 || MCYRIXIII || X86_ELAN || MK6 || M586MMX || M586TSC || M586 || M486 || MVIAC3_2 || MGEODEGX1
8021+ depends on MWINCHIP3D || MWINCHIPC6 || MCYRIXIII || X86_ELAN || MK8 || MK7 || MK6 || MCORE2 || MPENTIUM4 || MPENTIUMIII || MPENTIUMII || M686 || M586MMX || M586TSC || M586 || M486 || MVIAC3_2 || MGEODEGX1
8022
8023 config X86_INTEL_USERCOPY
8024 def_bool y
8025@@ -406,7 +406,7 @@ config X86_CMPXCHG64
8026 # generates cmov.
8027 config X86_CMOV
8028 def_bool y
8029- depends on (MK8 || MK7 || MCORE2 || MPENTIUM4 || MPENTIUMM || MPENTIUMIII || MPENTIUMII || M686 || MVIAC3_2 || MVIAC7 || MCRUSOE || MEFFICEON || X86_64 || MATOM)
8030+ depends on (MK8 || MK7 || MCORE2 || MPSC || MPENTIUM4 || MPENTIUMM || MPENTIUMIII || MPENTIUMII || M686 || MVIAC3_2 || MVIAC7 || MCRUSOE || MEFFICEON || X86_64 || MATOM)
8031
8032 config X86_MINIMUM_CPU_FAMILY
8033 int
8034diff --git a/arch/x86/Kconfig.debug b/arch/x86/Kconfig.debug
8035index d105f29..c928727 100644
8036--- a/arch/x86/Kconfig.debug
8037+++ b/arch/x86/Kconfig.debug
8038@@ -99,7 +99,7 @@ config X86_PTDUMP
8039 config DEBUG_RODATA
8040 bool "Write protect kernel read-only data structures"
8041 default y
8042- depends on DEBUG_KERNEL
8043+ depends on DEBUG_KERNEL && BROKEN
8044 ---help---
8045 Mark the kernel read-only data as write-protected in the pagetables,
8046 in order to catch accidental (and incorrect) writes to such const
8047diff --git a/arch/x86/Makefile b/arch/x86/Makefile
8048index d2d24c9..0f21f8d 100644
8049--- a/arch/x86/Makefile
8050+++ b/arch/x86/Makefile
8051@@ -44,6 +44,7 @@ ifeq ($(CONFIG_X86_32),y)
8052 else
8053 BITS := 64
8054 UTS_MACHINE := x86_64
8055+ biarch := $(call cc-option,-m64)
8056 CHECKFLAGS += -D__x86_64__ -m64
8057
8058 KBUILD_AFLAGS += -m64
8059@@ -189,3 +190,12 @@ define archhelp
8060 echo ' FDARGS="..." arguments for the booted kernel'
8061 echo ' FDINITRD=file initrd for the booted kernel'
8062 endef
8063+
8064+define OLD_LD
8065+
8066+*** ${VERSION}.${PATCHLEVEL} PaX kernels no longer build correctly with old versions of binutils.
8067+*** Please upgrade your binutils to 2.18 or newer
8068+endef
8069+
8070+archprepare:
8071+ $(if $(LDFLAGS_BUILD_ID),,$(error $(OLD_LD)))
8072diff --git a/arch/x86/boot/Makefile b/arch/x86/boot/Makefile
8073index ec749c2..bbb5319 100644
8074--- a/arch/x86/boot/Makefile
8075+++ b/arch/x86/boot/Makefile
8076@@ -69,6 +69,9 @@ KBUILD_CFLAGS := $(LINUXINCLUDE) -g -Os -D_SETUP -D__KERNEL__ \
8077 $(call cc-option, -fno-stack-protector) \
8078 $(call cc-option, -mpreferred-stack-boundary=2)
8079 KBUILD_CFLAGS += $(call cc-option, -m32)
8080+ifdef CONSTIFY_PLUGIN
8081+KBUILD_CFLAGS += $(CONSTIFY_PLUGIN) -fplugin-arg-constify_plugin-no-constify
8082+endif
8083 KBUILD_AFLAGS := $(KBUILD_CFLAGS) -D__ASSEMBLY__
8084 GCOV_PROFILE := n
8085
8086diff --git a/arch/x86/boot/bitops.h b/arch/x86/boot/bitops.h
8087index 878e4b9..20537ab 100644
8088--- a/arch/x86/boot/bitops.h
8089+++ b/arch/x86/boot/bitops.h
8090@@ -26,7 +26,7 @@ static inline int variable_test_bit(int nr, const void *addr)
8091 u8 v;
8092 const u32 *p = (const u32 *)addr;
8093
8094- asm("btl %2,%1; setc %0" : "=qm" (v) : "m" (*p), "Ir" (nr));
8095+ asm volatile("btl %2,%1; setc %0" : "=qm" (v) : "m" (*p), "Ir" (nr));
8096 return v;
8097 }
8098
8099@@ -37,7 +37,7 @@ static inline int variable_test_bit(int nr, const void *addr)
8100
8101 static inline void set_bit(int nr, void *addr)
8102 {
8103- asm("btsl %1,%0" : "+m" (*(u32 *)addr) : "Ir" (nr));
8104+ asm volatile("btsl %1,%0" : "+m" (*(u32 *)addr) : "Ir" (nr));
8105 }
8106
8107 #endif /* BOOT_BITOPS_H */
8108diff --git a/arch/x86/boot/boot.h b/arch/x86/boot/boot.h
8109index 98239d2..f40214c 100644
8110--- a/arch/x86/boot/boot.h
8111+++ b/arch/x86/boot/boot.h
8112@@ -82,7 +82,7 @@ static inline void io_delay(void)
8113 static inline u16 ds(void)
8114 {
8115 u16 seg;
8116- asm("movw %%ds,%0" : "=rm" (seg));
8117+ asm volatile("movw %%ds,%0" : "=rm" (seg));
8118 return seg;
8119 }
8120
8121@@ -178,7 +178,7 @@ static inline void wrgs32(u32 v, addr_t addr)
8122 static inline int memcmp(const void *s1, const void *s2, size_t len)
8123 {
8124 u8 diff;
8125- asm("repe; cmpsb; setnz %0"
8126+ asm volatile("repe; cmpsb; setnz %0"
8127 : "=qm" (diff), "+D" (s1), "+S" (s2), "+c" (len));
8128 return diff;
8129 }
8130diff --git a/arch/x86/boot/compressed/Makefile b/arch/x86/boot/compressed/Makefile
8131index f8ed065..5bf5ff3 100644
8132--- a/arch/x86/boot/compressed/Makefile
8133+++ b/arch/x86/boot/compressed/Makefile
8134@@ -13,6 +13,9 @@ cflags-$(CONFIG_X86_64) := -mcmodel=small
8135 KBUILD_CFLAGS += $(cflags-y)
8136 KBUILD_CFLAGS += $(call cc-option,-ffreestanding)
8137 KBUILD_CFLAGS += $(call cc-option,-fno-stack-protector)
8138+ifdef CONSTIFY_PLUGIN
8139+KBUILD_CFLAGS += $(CONSTIFY_PLUGIN) -fplugin-arg-constify_plugin-no-constify
8140+endif
8141
8142 KBUILD_AFLAGS := $(KBUILD_CFLAGS) -D__ASSEMBLY__
8143 GCOV_PROFILE := n
8144diff --git a/arch/x86/boot/compressed/head_32.S b/arch/x86/boot/compressed/head_32.S
8145index f543b70..b60fba8 100644
8146--- a/arch/x86/boot/compressed/head_32.S
8147+++ b/arch/x86/boot/compressed/head_32.S
8148@@ -76,7 +76,7 @@ ENTRY(startup_32)
8149 notl %eax
8150 andl %eax, %ebx
8151 #else
8152- movl $LOAD_PHYSICAL_ADDR, %ebx
8153+ movl $____LOAD_PHYSICAL_ADDR, %ebx
8154 #endif
8155
8156 /* Target address to relocate to for decompression */
8157@@ -149,7 +149,7 @@ relocated:
8158 * and where it was actually loaded.
8159 */
8160 movl %ebp, %ebx
8161- subl $LOAD_PHYSICAL_ADDR, %ebx
8162+ subl $____LOAD_PHYSICAL_ADDR, %ebx
8163 jz 2f /* Nothing to be done if loaded at compiled addr. */
8164 /*
8165 * Process relocations.
8166@@ -157,8 +157,7 @@ relocated:
8167
8168 1: subl $4, %edi
8169 movl (%edi), %ecx
8170- testl %ecx, %ecx
8171- jz 2f
8172+ jecxz 2f
8173 addl %ebx, -__PAGE_OFFSET(%ebx, %ecx)
8174 jmp 1b
8175 2:
8176diff --git a/arch/x86/boot/compressed/head_64.S b/arch/x86/boot/compressed/head_64.S
8177index 077e1b6..2c6b13b 100644
8178--- a/arch/x86/boot/compressed/head_64.S
8179+++ b/arch/x86/boot/compressed/head_64.S
8180@@ -91,7 +91,7 @@ ENTRY(startup_32)
8181 notl %eax
8182 andl %eax, %ebx
8183 #else
8184- movl $LOAD_PHYSICAL_ADDR, %ebx
8185+ movl $____LOAD_PHYSICAL_ADDR, %ebx
8186 #endif
8187
8188 /* Target address to relocate to for decompression */
8189@@ -183,7 +183,7 @@ no_longmode:
8190 hlt
8191 jmp 1b
8192
8193-#include "../../kernel/verify_cpu_64.S"
8194+#include "../../kernel/verify_cpu.S"
8195
8196 /*
8197 * Be careful here startup_64 needs to be at a predictable
8198@@ -234,7 +234,7 @@ ENTRY(startup_64)
8199 notq %rax
8200 andq %rax, %rbp
8201 #else
8202- movq $LOAD_PHYSICAL_ADDR, %rbp
8203+ movq $____LOAD_PHYSICAL_ADDR, %rbp
8204 #endif
8205
8206 /* Target address to relocate to for decompression */
8207diff --git a/arch/x86/boot/compressed/misc.c b/arch/x86/boot/compressed/misc.c
8208index 842b2a3..f00178b 100644
8209--- a/arch/x86/boot/compressed/misc.c
8210+++ b/arch/x86/boot/compressed/misc.c
8211@@ -288,7 +288,7 @@ static void parse_elf(void *output)
8212 case PT_LOAD:
8213 #ifdef CONFIG_RELOCATABLE
8214 dest = output;
8215- dest += (phdr->p_paddr - LOAD_PHYSICAL_ADDR);
8216+ dest += (phdr->p_paddr - ____LOAD_PHYSICAL_ADDR);
8217 #else
8218 dest = (void *)(phdr->p_paddr);
8219 #endif
8220@@ -335,7 +335,7 @@ asmlinkage void decompress_kernel(void *rmode, memptr heap,
8221 error("Destination address too large");
8222 #endif
8223 #ifndef CONFIG_RELOCATABLE
8224- if ((unsigned long)output != LOAD_PHYSICAL_ADDR)
8225+ if ((unsigned long)output != ____LOAD_PHYSICAL_ADDR)
8226 error("Wrong destination address");
8227 #endif
8228
8229diff --git a/arch/x86/boot/compressed/mkpiggy.c b/arch/x86/boot/compressed/mkpiggy.c
8230index bcbd36c..b1754af 100644
8231--- a/arch/x86/boot/compressed/mkpiggy.c
8232+++ b/arch/x86/boot/compressed/mkpiggy.c
8233@@ -74,7 +74,7 @@ int main(int argc, char *argv[])
8234
8235 offs = (olen > ilen) ? olen - ilen : 0;
8236 offs += olen >> 12; /* Add 8 bytes for each 32K block */
8237- offs += 32*1024 + 18; /* Add 32K + 18 bytes slack */
8238+ offs += 64*1024; /* Add 64K bytes slack */
8239 offs = (offs+4095) & ~4095; /* Round to a 4K boundary */
8240
8241 printf(".section \".rodata.compressed\",\"a\",@progbits\n");
8242diff --git a/arch/x86/boot/compressed/relocs.c b/arch/x86/boot/compressed/relocs.c
8243index bbeb0c3..f5167ab 100644
8244--- a/arch/x86/boot/compressed/relocs.c
8245+++ b/arch/x86/boot/compressed/relocs.c
8246@@ -10,8 +10,11 @@
8247 #define USE_BSD
8248 #include <endian.h>
8249
8250+#include "../../../../include/linux/autoconf.h"
8251+
8252 #define ARRAY_SIZE(x) (sizeof(x) / sizeof((x)[0]))
8253 static Elf32_Ehdr ehdr;
8254+static Elf32_Phdr *phdr;
8255 static unsigned long reloc_count, reloc_idx;
8256 static unsigned long *relocs;
8257
8258@@ -37,7 +40,7 @@ static const char* safe_abs_relocs[] = {
8259
8260 static int is_safe_abs_reloc(const char* sym_name)
8261 {
8262- int i;
8263+ unsigned int i;
8264
8265 for (i = 0; i < ARRAY_SIZE(safe_abs_relocs); i++) {
8266 if (!strcmp(sym_name, safe_abs_relocs[i]))
8267@@ -245,9 +248,39 @@ static void read_ehdr(FILE *fp)
8268 }
8269 }
8270
8271+static void read_phdrs(FILE *fp)
8272+{
8273+ unsigned int i;
8274+
8275+ phdr = calloc(ehdr.e_phnum, sizeof(Elf32_Phdr));
8276+ if (!phdr) {
8277+ die("Unable to allocate %d program headers\n",
8278+ ehdr.e_phnum);
8279+ }
8280+ if (fseek(fp, ehdr.e_phoff, SEEK_SET) < 0) {
8281+ die("Seek to %d failed: %s\n",
8282+ ehdr.e_phoff, strerror(errno));
8283+ }
8284+ if (fread(phdr, sizeof(*phdr), ehdr.e_phnum, fp) != ehdr.e_phnum) {
8285+ die("Cannot read ELF program headers: %s\n",
8286+ strerror(errno));
8287+ }
8288+ for(i = 0; i < ehdr.e_phnum; i++) {
8289+ phdr[i].p_type = elf32_to_cpu(phdr[i].p_type);
8290+ phdr[i].p_offset = elf32_to_cpu(phdr[i].p_offset);
8291+ phdr[i].p_vaddr = elf32_to_cpu(phdr[i].p_vaddr);
8292+ phdr[i].p_paddr = elf32_to_cpu(phdr[i].p_paddr);
8293+ phdr[i].p_filesz = elf32_to_cpu(phdr[i].p_filesz);
8294+ phdr[i].p_memsz = elf32_to_cpu(phdr[i].p_memsz);
8295+ phdr[i].p_flags = elf32_to_cpu(phdr[i].p_flags);
8296+ phdr[i].p_align = elf32_to_cpu(phdr[i].p_align);
8297+ }
8298+
8299+}
8300+
8301 static void read_shdrs(FILE *fp)
8302 {
8303- int i;
8304+ unsigned int i;
8305 Elf32_Shdr shdr;
8306
8307 secs = calloc(ehdr.e_shnum, sizeof(struct section));
8308@@ -282,7 +315,7 @@ static void read_shdrs(FILE *fp)
8309
8310 static void read_strtabs(FILE *fp)
8311 {
8312- int i;
8313+ unsigned int i;
8314 for (i = 0; i < ehdr.e_shnum; i++) {
8315 struct section *sec = &secs[i];
8316 if (sec->shdr.sh_type != SHT_STRTAB) {
8317@@ -307,7 +340,7 @@ static void read_strtabs(FILE *fp)
8318
8319 static void read_symtabs(FILE *fp)
8320 {
8321- int i,j;
8322+ unsigned int i,j;
8323 for (i = 0; i < ehdr.e_shnum; i++) {
8324 struct section *sec = &secs[i];
8325 if (sec->shdr.sh_type != SHT_SYMTAB) {
8326@@ -340,7 +373,9 @@ static void read_symtabs(FILE *fp)
8327
8328 static void read_relocs(FILE *fp)
8329 {
8330- int i,j;
8331+ unsigned int i,j;
8332+ uint32_t base;
8333+
8334 for (i = 0; i < ehdr.e_shnum; i++) {
8335 struct section *sec = &secs[i];
8336 if (sec->shdr.sh_type != SHT_REL) {
8337@@ -360,9 +395,18 @@ static void read_relocs(FILE *fp)
8338 die("Cannot read symbol table: %s\n",
8339 strerror(errno));
8340 }
8341+ base = 0;
8342+ for (j = 0; j < ehdr.e_phnum; j++) {
8343+ if (phdr[j].p_type != PT_LOAD )
8344+ continue;
8345+ if (secs[sec->shdr.sh_info].shdr.sh_offset < phdr[j].p_offset || secs[sec->shdr.sh_info].shdr.sh_offset >= phdr[j].p_offset + phdr[j].p_filesz)
8346+ continue;
8347+ base = CONFIG_PAGE_OFFSET + phdr[j].p_paddr - phdr[j].p_vaddr;
8348+ break;
8349+ }
8350 for (j = 0; j < sec->shdr.sh_size/sizeof(Elf32_Rel); j++) {
8351 Elf32_Rel *rel = &sec->reltab[j];
8352- rel->r_offset = elf32_to_cpu(rel->r_offset);
8353+ rel->r_offset = elf32_to_cpu(rel->r_offset) + base;
8354 rel->r_info = elf32_to_cpu(rel->r_info);
8355 }
8356 }
8357@@ -371,14 +415,14 @@ static void read_relocs(FILE *fp)
8358
8359 static void print_absolute_symbols(void)
8360 {
8361- int i;
8362+ unsigned int i;
8363 printf("Absolute symbols\n");
8364 printf(" Num: Value Size Type Bind Visibility Name\n");
8365 for (i = 0; i < ehdr.e_shnum; i++) {
8366 struct section *sec = &secs[i];
8367 char *sym_strtab;
8368 Elf32_Sym *sh_symtab;
8369- int j;
8370+ unsigned int j;
8371
8372 if (sec->shdr.sh_type != SHT_SYMTAB) {
8373 continue;
8374@@ -406,14 +450,14 @@ static void print_absolute_symbols(void)
8375
8376 static void print_absolute_relocs(void)
8377 {
8378- int i, printed = 0;
8379+ unsigned int i, printed = 0;
8380
8381 for (i = 0; i < ehdr.e_shnum; i++) {
8382 struct section *sec = &secs[i];
8383 struct section *sec_applies, *sec_symtab;
8384 char *sym_strtab;
8385 Elf32_Sym *sh_symtab;
8386- int j;
8387+ unsigned int j;
8388 if (sec->shdr.sh_type != SHT_REL) {
8389 continue;
8390 }
8391@@ -474,13 +518,13 @@ static void print_absolute_relocs(void)
8392
8393 static void walk_relocs(void (*visit)(Elf32_Rel *rel, Elf32_Sym *sym))
8394 {
8395- int i;
8396+ unsigned int i;
8397 /* Walk through the relocations */
8398 for (i = 0; i < ehdr.e_shnum; i++) {
8399 char *sym_strtab;
8400 Elf32_Sym *sh_symtab;
8401 struct section *sec_applies, *sec_symtab;
8402- int j;
8403+ unsigned int j;
8404 struct section *sec = &secs[i];
8405
8406 if (sec->shdr.sh_type != SHT_REL) {
8407@@ -504,6 +548,21 @@ static void walk_relocs(void (*visit)(Elf32_Rel *rel, Elf32_Sym *sym))
8408 if (sym->st_shndx == SHN_ABS) {
8409 continue;
8410 }
8411+ /* Don't relocate actual per-cpu variables, they are absolute indices, not addresses */
8412+ if (!strcmp(sec_name(sym->st_shndx), ".data.percpu") && strcmp(sym_name(sym_strtab, sym), "__per_cpu_load"))
8413+ continue;
8414+
8415+#if defined(CONFIG_PAX_KERNEXEC) && defined(CONFIG_X86_32)
8416+ /* Don't relocate actual code, they are relocated implicitly by the base address of KERNEL_CS */
8417+ if (!strcmp(sec_name(sym->st_shndx), ".module.text") && !strcmp(sym_name(sym_strtab, sym), "_etext"))
8418+ continue;
8419+ if (!strcmp(sec_name(sym->st_shndx), ".init.text"))
8420+ continue;
8421+ if (!strcmp(sec_name(sym->st_shndx), ".exit.text"))
8422+ continue;
8423+ if (!strcmp(sec_name(sym->st_shndx), ".text") && strcmp(sym_name(sym_strtab, sym), "__LOAD_PHYSICAL_ADDR"))
8424+ continue;
8425+#endif
8426 if (r_type == R_386_NONE || r_type == R_386_PC32) {
8427 /*
8428 * NONE can be ignored and and PC relative
8429@@ -541,7 +600,7 @@ static int cmp_relocs(const void *va, const void *vb)
8430
8431 static void emit_relocs(int as_text)
8432 {
8433- int i;
8434+ unsigned int i;
8435 /* Count how many relocations I have and allocate space for them. */
8436 reloc_count = 0;
8437 walk_relocs(count_reloc);
8438@@ -634,6 +693,7 @@ int main(int argc, char **argv)
8439 fname, strerror(errno));
8440 }
8441 read_ehdr(fp);
8442+ read_phdrs(fp);
8443 read_shdrs(fp);
8444 read_strtabs(fp);
8445 read_symtabs(fp);
8446diff --git a/arch/x86/boot/cpucheck.c b/arch/x86/boot/cpucheck.c
8447index 4d3ff03..e4972ff 100644
8448--- a/arch/x86/boot/cpucheck.c
8449+++ b/arch/x86/boot/cpucheck.c
8450@@ -74,7 +74,7 @@ static int has_fpu(void)
8451 u16 fcw = -1, fsw = -1;
8452 u32 cr0;
8453
8454- asm("movl %%cr0,%0" : "=r" (cr0));
8455+ asm volatile("movl %%cr0,%0" : "=r" (cr0));
8456 if (cr0 & (X86_CR0_EM|X86_CR0_TS)) {
8457 cr0 &= ~(X86_CR0_EM|X86_CR0_TS);
8458 asm volatile("movl %0,%%cr0" : : "r" (cr0));
8459@@ -90,7 +90,7 @@ static int has_eflag(u32 mask)
8460 {
8461 u32 f0, f1;
8462
8463- asm("pushfl ; "
8464+ asm volatile("pushfl ; "
8465 "pushfl ; "
8466 "popl %0 ; "
8467 "movl %0,%1 ; "
8468@@ -115,7 +115,7 @@ static void get_flags(void)
8469 set_bit(X86_FEATURE_FPU, cpu.flags);
8470
8471 if (has_eflag(X86_EFLAGS_ID)) {
8472- asm("cpuid"
8473+ asm volatile("cpuid"
8474 : "=a" (max_intel_level),
8475 "=b" (cpu_vendor[0]),
8476 "=d" (cpu_vendor[1]),
8477@@ -124,7 +124,7 @@ static void get_flags(void)
8478
8479 if (max_intel_level >= 0x00000001 &&
8480 max_intel_level <= 0x0000ffff) {
8481- asm("cpuid"
8482+ asm volatile("cpuid"
8483 : "=a" (tfms),
8484 "=c" (cpu.flags[4]),
8485 "=d" (cpu.flags[0])
8486@@ -136,7 +136,7 @@ static void get_flags(void)
8487 cpu.model += ((tfms >> 16) & 0xf) << 4;
8488 }
8489
8490- asm("cpuid"
8491+ asm volatile("cpuid"
8492 : "=a" (max_amd_level)
8493 : "a" (0x80000000)
8494 : "ebx", "ecx", "edx");
8495@@ -144,7 +144,7 @@ static void get_flags(void)
8496 if (max_amd_level >= 0x80000001 &&
8497 max_amd_level <= 0x8000ffff) {
8498 u32 eax = 0x80000001;
8499- asm("cpuid"
8500+ asm volatile("cpuid"
8501 : "+a" (eax),
8502 "=c" (cpu.flags[6]),
8503 "=d" (cpu.flags[1])
8504@@ -203,9 +203,9 @@ int check_cpu(int *cpu_level_ptr, int *req_level_ptr, u32 **err_flags_ptr)
8505 u32 ecx = MSR_K7_HWCR;
8506 u32 eax, edx;
8507
8508- asm("rdmsr" : "=a" (eax), "=d" (edx) : "c" (ecx));
8509+ asm volatile("rdmsr" : "=a" (eax), "=d" (edx) : "c" (ecx));
8510 eax &= ~(1 << 15);
8511- asm("wrmsr" : : "a" (eax), "d" (edx), "c" (ecx));
8512+ asm volatile("wrmsr" : : "a" (eax), "d" (edx), "c" (ecx));
8513
8514 get_flags(); /* Make sure it really did something */
8515 err = check_flags();
8516@@ -218,9 +218,9 @@ int check_cpu(int *cpu_level_ptr, int *req_level_ptr, u32 **err_flags_ptr)
8517 u32 ecx = MSR_VIA_FCR;
8518 u32 eax, edx;
8519
8520- asm("rdmsr" : "=a" (eax), "=d" (edx) : "c" (ecx));
8521+ asm volatile("rdmsr" : "=a" (eax), "=d" (edx) : "c" (ecx));
8522 eax |= (1<<1)|(1<<7);
8523- asm("wrmsr" : : "a" (eax), "d" (edx), "c" (ecx));
8524+ asm volatile("wrmsr" : : "a" (eax), "d" (edx), "c" (ecx));
8525
8526 set_bit(X86_FEATURE_CX8, cpu.flags);
8527 err = check_flags();
8528@@ -231,12 +231,12 @@ int check_cpu(int *cpu_level_ptr, int *req_level_ptr, u32 **err_flags_ptr)
8529 u32 eax, edx;
8530 u32 level = 1;
8531
8532- asm("rdmsr" : "=a" (eax), "=d" (edx) : "c" (ecx));
8533- asm("wrmsr" : : "a" (~0), "d" (edx), "c" (ecx));
8534- asm("cpuid"
8535+ asm volatile("rdmsr" : "=a" (eax), "=d" (edx) : "c" (ecx));
8536+ asm volatile("wrmsr" : : "a" (~0), "d" (edx), "c" (ecx));
8537+ asm volatile("cpuid"
8538 : "+a" (level), "=d" (cpu.flags[0])
8539 : : "ecx", "ebx");
8540- asm("wrmsr" : : "a" (eax), "d" (edx), "c" (ecx));
8541+ asm volatile("wrmsr" : : "a" (eax), "d" (edx), "c" (ecx));
8542
8543 err = check_flags();
8544 }
8545diff --git a/arch/x86/boot/header.S b/arch/x86/boot/header.S
8546index b31cc54..8d69237 100644
8547--- a/arch/x86/boot/header.S
8548+++ b/arch/x86/boot/header.S
8549@@ -224,7 +224,7 @@ setup_data: .quad 0 # 64-bit physical pointer to
8550 # single linked list of
8551 # struct setup_data
8552
8553-pref_address: .quad LOAD_PHYSICAL_ADDR # preferred load addr
8554+pref_address: .quad ____LOAD_PHYSICAL_ADDR # preferred load addr
8555
8556 #define ZO_INIT_SIZE (ZO__end - ZO_startup_32 + ZO_z_extract_offset)
8557 #define VO_INIT_SIZE (VO__end - VO__text)
8558diff --git a/arch/x86/boot/memory.c b/arch/x86/boot/memory.c
8559index cae3feb..ff8ff2a 100644
8560--- a/arch/x86/boot/memory.c
8561+++ b/arch/x86/boot/memory.c
8562@@ -19,7 +19,7 @@
8563
8564 static int detect_memory_e820(void)
8565 {
8566- int count = 0;
8567+ unsigned int count = 0;
8568 struct biosregs ireg, oreg;
8569 struct e820entry *desc = boot_params.e820_map;
8570 static struct e820entry buf; /* static so it is zeroed */
8571diff --git a/arch/x86/boot/video-vesa.c b/arch/x86/boot/video-vesa.c
8572index 11e8c6e..fdbb1ed 100644
8573--- a/arch/x86/boot/video-vesa.c
8574+++ b/arch/x86/boot/video-vesa.c
8575@@ -200,6 +200,7 @@ static void vesa_store_pm_info(void)
8576
8577 boot_params.screen_info.vesapm_seg = oreg.es;
8578 boot_params.screen_info.vesapm_off = oreg.di;
8579+ boot_params.screen_info.vesapm_size = oreg.cx;
8580 }
8581
8582 /*
8583diff --git a/arch/x86/boot/video.c b/arch/x86/boot/video.c
8584index d42da38..787cdf3 100644
8585--- a/arch/x86/boot/video.c
8586+++ b/arch/x86/boot/video.c
8587@@ -90,7 +90,7 @@ static void store_mode_params(void)
8588 static unsigned int get_entry(void)
8589 {
8590 char entry_buf[4];
8591- int i, len = 0;
8592+ unsigned int i, len = 0;
8593 int key;
8594 unsigned int v;
8595
8596diff --git a/arch/x86/crypto/aes-x86_64-asm_64.S b/arch/x86/crypto/aes-x86_64-asm_64.S
8597index 5b577d5..3c1fed4 100644
8598--- a/arch/x86/crypto/aes-x86_64-asm_64.S
8599+++ b/arch/x86/crypto/aes-x86_64-asm_64.S
8600@@ -8,6 +8,8 @@
8601 * including this sentence is retained in full.
8602 */
8603
8604+#include <asm/alternative-asm.h>
8605+
8606 .extern crypto_ft_tab
8607 .extern crypto_it_tab
8608 .extern crypto_fl_tab
8609@@ -71,6 +73,8 @@ FUNC: movq r1,r2; \
8610 je B192; \
8611 leaq 32(r9),r9;
8612
8613+#define ret pax_force_retaddr 0, 1; ret
8614+
8615 #define epilogue(r1,r2,r3,r4,r5,r6,r7,r8,r9) \
8616 movq r1,r2; \
8617 movq r3,r4; \
8618diff --git a/arch/x86/crypto/aesni-intel_asm.S b/arch/x86/crypto/aesni-intel_asm.S
8619index eb0566e..e3ebad8 100644
8620--- a/arch/x86/crypto/aesni-intel_asm.S
8621+++ b/arch/x86/crypto/aesni-intel_asm.S
8622@@ -16,6 +16,7 @@
8623 */
8624
8625 #include <linux/linkage.h>
8626+#include <asm/alternative-asm.h>
8627
8628 .text
8629
8630@@ -52,6 +53,7 @@ _key_expansion_256a:
8631 pxor %xmm1, %xmm0
8632 movaps %xmm0, (%rcx)
8633 add $0x10, %rcx
8634+ pax_force_retaddr_bts
8635 ret
8636
8637 _key_expansion_192a:
8638@@ -75,6 +77,7 @@ _key_expansion_192a:
8639 shufps $0b01001110, %xmm2, %xmm1
8640 movaps %xmm1, 16(%rcx)
8641 add $0x20, %rcx
8642+ pax_force_retaddr_bts
8643 ret
8644
8645 _key_expansion_192b:
8646@@ -93,6 +96,7 @@ _key_expansion_192b:
8647
8648 movaps %xmm0, (%rcx)
8649 add $0x10, %rcx
8650+ pax_force_retaddr_bts
8651 ret
8652
8653 _key_expansion_256b:
8654@@ -104,6 +108,7 @@ _key_expansion_256b:
8655 pxor %xmm1, %xmm2
8656 movaps %xmm2, (%rcx)
8657 add $0x10, %rcx
8658+ pax_force_retaddr_bts
8659 ret
8660
8661 /*
8662@@ -239,7 +244,9 @@ ENTRY(aesni_set_key)
8663 cmp %rcx, %rdi
8664 jb .Ldec_key_loop
8665 xor %rax, %rax
8666+ pax_force_retaddr 0, 1
8667 ret
8668+ENDPROC(aesni_set_key)
8669
8670 /*
8671 * void aesni_enc(struct crypto_aes_ctx *ctx, u8 *dst, const u8 *src)
8672@@ -249,7 +256,9 @@ ENTRY(aesni_enc)
8673 movups (INP), STATE # input
8674 call _aesni_enc1
8675 movups STATE, (OUTP) # output
8676+ pax_force_retaddr 0, 1
8677 ret
8678+ENDPROC(aesni_enc)
8679
8680 /*
8681 * _aesni_enc1: internal ABI
8682@@ -319,6 +328,7 @@ _aesni_enc1:
8683 movaps 0x70(TKEYP), KEY
8684 # aesenclast KEY, STATE # last round
8685 .byte 0x66, 0x0f, 0x38, 0xdd, 0xc2
8686+ pax_force_retaddr_bts
8687 ret
8688
8689 /*
8690@@ -482,6 +492,7 @@ _aesni_enc4:
8691 .byte 0x66, 0x0f, 0x38, 0xdd, 0xea
8692 # aesenclast KEY, STATE4
8693 .byte 0x66, 0x0f, 0x38, 0xdd, 0xf2
8694+ pax_force_retaddr_bts
8695 ret
8696
8697 /*
8698@@ -493,7 +504,9 @@ ENTRY(aesni_dec)
8699 movups (INP), STATE # input
8700 call _aesni_dec1
8701 movups STATE, (OUTP) #output
8702+ pax_force_retaddr 0, 1
8703 ret
8704+ENDPROC(aesni_dec)
8705
8706 /*
8707 * _aesni_dec1: internal ABI
8708@@ -563,6 +576,7 @@ _aesni_dec1:
8709 movaps 0x70(TKEYP), KEY
8710 # aesdeclast KEY, STATE # last round
8711 .byte 0x66, 0x0f, 0x38, 0xdf, 0xc2
8712+ pax_force_retaddr_bts
8713 ret
8714
8715 /*
8716@@ -726,6 +740,7 @@ _aesni_dec4:
8717 .byte 0x66, 0x0f, 0x38, 0xdf, 0xea
8718 # aesdeclast KEY, STATE4
8719 .byte 0x66, 0x0f, 0x38, 0xdf, 0xf2
8720+ pax_force_retaddr_bts
8721 ret
8722
8723 /*
8724@@ -769,7 +784,9 @@ ENTRY(aesni_ecb_enc)
8725 cmp $16, LEN
8726 jge .Lecb_enc_loop1
8727 .Lecb_enc_ret:
8728+ pax_force_retaddr 0, 1
8729 ret
8730+ENDPROC(aesni_ecb_enc)
8731
8732 /*
8733 * void aesni_ecb_dec(struct crypto_aes_ctx *ctx, const u8 *dst, u8 *src,
8734@@ -813,7 +830,9 @@ ENTRY(aesni_ecb_dec)
8735 cmp $16, LEN
8736 jge .Lecb_dec_loop1
8737 .Lecb_dec_ret:
8738+ pax_force_retaddr 0, 1
8739 ret
8740+ENDPROC(aesni_ecb_dec)
8741
8742 /*
8743 * void aesni_cbc_enc(struct crypto_aes_ctx *ctx, const u8 *dst, u8 *src,
8744@@ -837,7 +856,9 @@ ENTRY(aesni_cbc_enc)
8745 jge .Lcbc_enc_loop
8746 movups STATE, (IVP)
8747 .Lcbc_enc_ret:
8748+ pax_force_retaddr 0, 1
8749 ret
8750+ENDPROC(aesni_cbc_enc)
8751
8752 /*
8753 * void aesni_cbc_dec(struct crypto_aes_ctx *ctx, const u8 *dst, u8 *src,
8754@@ -894,4 +915,6 @@ ENTRY(aesni_cbc_dec)
8755 .Lcbc_dec_ret:
8756 movups IV, (IVP)
8757 .Lcbc_dec_just_ret:
8758+ pax_force_retaddr 0, 1
8759 ret
8760+ENDPROC(aesni_cbc_dec)
8761diff --git a/arch/x86/crypto/salsa20-x86_64-asm_64.S b/arch/x86/crypto/salsa20-x86_64-asm_64.S
8762index 6214a9b..1f4fc9a 100644
8763--- a/arch/x86/crypto/salsa20-x86_64-asm_64.S
8764+++ b/arch/x86/crypto/salsa20-x86_64-asm_64.S
8765@@ -1,3 +1,5 @@
8766+#include <asm/alternative-asm.h>
8767+
8768 # enter ECRYPT_encrypt_bytes
8769 .text
8770 .p2align 5
8771@@ -790,6 +792,7 @@ ECRYPT_encrypt_bytes:
8772 add %r11,%rsp
8773 mov %rdi,%rax
8774 mov %rsi,%rdx
8775+ pax_force_retaddr 0, 1
8776 ret
8777 # bytesatleast65:
8778 ._bytesatleast65:
8779@@ -891,6 +894,7 @@ ECRYPT_keysetup:
8780 add %r11,%rsp
8781 mov %rdi,%rax
8782 mov %rsi,%rdx
8783+ pax_force_retaddr
8784 ret
8785 # enter ECRYPT_ivsetup
8786 .text
8787@@ -917,4 +921,5 @@ ECRYPT_ivsetup:
8788 add %r11,%rsp
8789 mov %rdi,%rax
8790 mov %rsi,%rdx
8791+ pax_force_retaddr
8792 ret
8793diff --git a/arch/x86/crypto/twofish-x86_64-asm_64.S b/arch/x86/crypto/twofish-x86_64-asm_64.S
8794index 35974a5..5662ae2 100644
8795--- a/arch/x86/crypto/twofish-x86_64-asm_64.S
8796+++ b/arch/x86/crypto/twofish-x86_64-asm_64.S
8797@@ -21,6 +21,7 @@
8798 .text
8799
8800 #include <asm/asm-offsets.h>
8801+#include <asm/alternative-asm.h>
8802
8803 #define a_offset 0
8804 #define b_offset 4
8805@@ -269,6 +270,7 @@ twofish_enc_blk:
8806
8807 popq R1
8808 movq $1,%rax
8809+ pax_force_retaddr 0, 1
8810 ret
8811
8812 twofish_dec_blk:
8813@@ -321,4 +323,5 @@ twofish_dec_blk:
8814
8815 popq R1
8816 movq $1,%rax
8817+ pax_force_retaddr 0, 1
8818 ret
8819diff --git a/arch/x86/ia32/ia32_aout.c b/arch/x86/ia32/ia32_aout.c
8820index 14531ab..bc68a7b 100644
8821--- a/arch/x86/ia32/ia32_aout.c
8822+++ b/arch/x86/ia32/ia32_aout.c
8823@@ -169,6 +169,8 @@ static int aout_core_dump(long signr, struct pt_regs *regs, struct file *file,
8824 unsigned long dump_start, dump_size;
8825 struct user32 dump;
8826
8827+ memset(&dump, 0, sizeof(dump));
8828+
8829 fs = get_fs();
8830 set_fs(KERNEL_DS);
8831 has_dumped = 1;
8832@@ -218,12 +220,6 @@ static int aout_core_dump(long signr, struct pt_regs *regs, struct file *file,
8833 dump_size = dump.u_ssize << PAGE_SHIFT;
8834 DUMP_WRITE(dump_start, dump_size);
8835 }
8836- /*
8837- * Finally dump the task struct. Not be used by gdb, but
8838- * could be useful
8839- */
8840- set_fs(KERNEL_DS);
8841- DUMP_WRITE(current, sizeof(*current));
8842 end_coredump:
8843 set_fs(fs);
8844 return has_dumped;
8845@@ -327,6 +323,13 @@ static int load_aout_binary(struct linux_binprm *bprm, struct pt_regs *regs)
8846 current->mm->free_area_cache = TASK_UNMAPPED_BASE;
8847 current->mm->cached_hole_size = 0;
8848
8849+ retval = setup_arg_pages(bprm, IA32_STACK_TOP, EXSTACK_DEFAULT);
8850+ if (retval < 0) {
8851+ /* Someone check-me: is this error path enough? */
8852+ send_sig(SIGKILL, current, 0);
8853+ return retval;
8854+ }
8855+
8856 install_exec_creds(bprm);
8857 current->flags &= ~PF_FORKNOEXEC;
8858
8859@@ -422,13 +425,6 @@ beyond_if:
8860
8861 set_brk(current->mm->start_brk, current->mm->brk);
8862
8863- retval = setup_arg_pages(bprm, IA32_STACK_TOP, EXSTACK_DEFAULT);
8864- if (retval < 0) {
8865- /* Someone check-me: is this error path enough? */
8866- send_sig(SIGKILL, current, 0);
8867- return retval;
8868- }
8869-
8870 current->mm->start_stack =
8871 (unsigned long)create_aout_tables((char __user *)bprm->p, bprm);
8872 /* start thread */
8873diff --git a/arch/x86/ia32/ia32_signal.c b/arch/x86/ia32/ia32_signal.c
8874index 588a7aa..a3468b0 100644
8875--- a/arch/x86/ia32/ia32_signal.c
8876+++ b/arch/x86/ia32/ia32_signal.c
8877@@ -167,7 +167,7 @@ asmlinkage long sys32_sigaltstack(const stack_ia32_t __user *uss_ptr,
8878 }
8879 seg = get_fs();
8880 set_fs(KERNEL_DS);
8881- ret = do_sigaltstack(uss_ptr ? &uss : NULL, &uoss, regs->sp);
8882+ ret = do_sigaltstack(uss_ptr ? (const stack_t __force_user *)&uss : NULL, (stack_t __force_user *)&uoss, regs->sp);
8883 set_fs(seg);
8884 if (ret >= 0 && uoss_ptr) {
8885 if (!access_ok(VERIFY_WRITE, uoss_ptr, sizeof(stack_ia32_t)))
8886@@ -374,7 +374,7 @@ static int ia32_setup_sigcontext(struct sigcontext_ia32 __user *sc,
8887 */
8888 static void __user *get_sigframe(struct k_sigaction *ka, struct pt_regs *regs,
8889 size_t frame_size,
8890- void **fpstate)
8891+ void __user **fpstate)
8892 {
8893 unsigned long sp;
8894
8895@@ -395,7 +395,7 @@ static void __user *get_sigframe(struct k_sigaction *ka, struct pt_regs *regs,
8896
8897 if (used_math()) {
8898 sp = sp - sig_xstate_ia32_size;
8899- *fpstate = (struct _fpstate_ia32 *) sp;
8900+ *fpstate = (struct _fpstate_ia32 __user *) sp;
8901 if (save_i387_xstate_ia32(*fpstate) < 0)
8902 return (void __user *) -1L;
8903 }
8904@@ -403,7 +403,7 @@ static void __user *get_sigframe(struct k_sigaction *ka, struct pt_regs *regs,
8905 sp -= frame_size;
8906 /* Align the stack pointer according to the i386 ABI,
8907 * i.e. so that on function entry ((sp + 4) & 15) == 0. */
8908- sp = ((sp + 4) & -16ul) - 4;
8909+ sp = ((sp - 12) & -16ul) - 4;
8910 return (void __user *) sp;
8911 }
8912
8913@@ -461,7 +461,7 @@ int ia32_setup_frame(int sig, struct k_sigaction *ka,
8914 * These are actually not used anymore, but left because some
8915 * gdb versions depend on them as a marker.
8916 */
8917- put_user_ex(*((u64 *)&code), (u64 *)frame->retcode);
8918+ put_user_ex(*((const u64 *)&code), (u64 __user *)frame->retcode);
8919 } put_user_catch(err);
8920
8921 if (err)
8922@@ -503,7 +503,7 @@ int ia32_setup_rt_frame(int sig, struct k_sigaction *ka, siginfo_t *info,
8923 0xb8,
8924 __NR_ia32_rt_sigreturn,
8925 0x80cd,
8926- 0,
8927+ 0
8928 };
8929
8930 frame = get_sigframe(ka, regs, sizeof(*frame), &fpstate);
8931@@ -533,16 +533,18 @@ int ia32_setup_rt_frame(int sig, struct k_sigaction *ka, siginfo_t *info,
8932
8933 if (ka->sa.sa_flags & SA_RESTORER)
8934 restorer = ka->sa.sa_restorer;
8935+ else if (current->mm->context.vdso)
8936+ /* Return stub is in 32bit vsyscall page */
8937+ restorer = VDSO32_SYMBOL(current->mm->context.vdso, rt_sigreturn);
8938 else
8939- restorer = VDSO32_SYMBOL(current->mm->context.vdso,
8940- rt_sigreturn);
8941+ restorer = &frame->retcode;
8942 put_user_ex(ptr_to_compat(restorer), &frame->pretcode);
8943
8944 /*
8945 * Not actually used anymore, but left because some gdb
8946 * versions need it.
8947 */
8948- put_user_ex(*((u64 *)&code), (u64 *)frame->retcode);
8949+ put_user_ex(*((const u64 *)&code), (u64 __user *)frame->retcode);
8950 } put_user_catch(err);
8951
8952 if (err)
8953diff --git a/arch/x86/ia32/ia32entry.S b/arch/x86/ia32/ia32entry.S
8954index 4edd8eb..29124b4 100644
8955--- a/arch/x86/ia32/ia32entry.S
8956+++ b/arch/x86/ia32/ia32entry.S
8957@@ -13,7 +13,9 @@
8958 #include <asm/thread_info.h>
8959 #include <asm/segment.h>
8960 #include <asm/irqflags.h>
8961+#include <asm/pgtable.h>
8962 #include <linux/linkage.h>
8963+#include <asm/alternative-asm.h>
8964
8965 /* Avoid __ASSEMBLER__'ifying <linux/audit.h> just for this. */
8966 #include <linux/elf-em.h>
8967@@ -93,6 +95,32 @@ ENTRY(native_irq_enable_sysexit)
8968 ENDPROC(native_irq_enable_sysexit)
8969 #endif
8970
8971+ .macro pax_enter_kernel_user
8972+ pax_set_fptr_mask
8973+#ifdef CONFIG_PAX_MEMORY_UDEREF
8974+ call pax_enter_kernel_user
8975+#endif
8976+ .endm
8977+
8978+ .macro pax_exit_kernel_user
8979+#ifdef CONFIG_PAX_MEMORY_UDEREF
8980+ call pax_exit_kernel_user
8981+#endif
8982+#ifdef CONFIG_PAX_RANDKSTACK
8983+ pushq %rax
8984+ pushq %r11
8985+ call pax_randomize_kstack
8986+ popq %r11
8987+ popq %rax
8988+#endif
8989+ .endm
8990+
8991+.macro pax_erase_kstack
8992+#ifdef CONFIG_PAX_MEMORY_STACKLEAK
8993+ call pax_erase_kstack
8994+#endif
8995+.endm
8996+
8997 /*
8998 * 32bit SYSENTER instruction entry.
8999 *
9000@@ -119,12 +147,6 @@ ENTRY(ia32_sysenter_target)
9001 CFI_REGISTER rsp,rbp
9002 SWAPGS_UNSAFE_STACK
9003 movq PER_CPU_VAR(kernel_stack), %rsp
9004- addq $(KERNEL_STACK_OFFSET),%rsp
9005- /*
9006- * No need to follow this irqs on/off section: the syscall
9007- * disabled irqs, here we enable it straight after entry:
9008- */
9009- ENABLE_INTERRUPTS(CLBR_NONE)
9010 movl %ebp,%ebp /* zero extension */
9011 pushq $__USER32_DS
9012 CFI_ADJUST_CFA_OFFSET 8
9013@@ -135,28 +157,42 @@ ENTRY(ia32_sysenter_target)
9014 pushfq
9015 CFI_ADJUST_CFA_OFFSET 8
9016 /*CFI_REL_OFFSET rflags,0*/
9017- movl 8*3-THREAD_SIZE+TI_sysenter_return(%rsp), %r10d
9018- CFI_REGISTER rip,r10
9019+ orl $X86_EFLAGS_IF,(%rsp)
9020+ GET_THREAD_INFO(%r11)
9021+ movl TI_sysenter_return(%r11), %r11d
9022+ CFI_REGISTER rip,r11
9023 pushq $__USER32_CS
9024 CFI_ADJUST_CFA_OFFSET 8
9025 /*CFI_REL_OFFSET cs,0*/
9026 movl %eax, %eax
9027- pushq %r10
9028+ pushq %r11
9029 CFI_ADJUST_CFA_OFFSET 8
9030 CFI_REL_OFFSET rip,0
9031 pushq %rax
9032 CFI_ADJUST_CFA_OFFSET 8
9033 cld
9034 SAVE_ARGS 0,0,1
9035+ pax_enter_kernel_user
9036+ /*
9037+ * No need to follow this irqs on/off section: the syscall
9038+ * disabled irqs, here we enable it straight after entry:
9039+ */
9040+ ENABLE_INTERRUPTS(CLBR_NONE)
9041 /* no need to do an access_ok check here because rbp has been
9042 32bit zero extended */
9043+
9044+#ifdef CONFIG_PAX_MEMORY_UDEREF
9045+ mov $PAX_USER_SHADOW_BASE,%r11
9046+ add %r11,%rbp
9047+#endif
9048+
9049 1: movl (%rbp),%ebp
9050 .section __ex_table,"a"
9051 .quad 1b,ia32_badarg
9052 .previous
9053- GET_THREAD_INFO(%r10)
9054- orl $TS_COMPAT,TI_status(%r10)
9055- testl $_TIF_WORK_SYSCALL_ENTRY,TI_flags(%r10)
9056+ GET_THREAD_INFO(%r11)
9057+ orl $TS_COMPAT,TI_status(%r11)
9058+ testl $_TIF_WORK_SYSCALL_ENTRY,TI_flags(%r11)
9059 CFI_REMEMBER_STATE
9060 jnz sysenter_tracesys
9061 cmpq $(IA32_NR_syscalls-1),%rax
9062@@ -166,13 +202,15 @@ sysenter_do_call:
9063 sysenter_dispatch:
9064 call *ia32_sys_call_table(,%rax,8)
9065 movq %rax,RAX-ARGOFFSET(%rsp)
9066- GET_THREAD_INFO(%r10)
9067+ GET_THREAD_INFO(%r11)
9068 DISABLE_INTERRUPTS(CLBR_NONE)
9069 TRACE_IRQS_OFF
9070- testl $_TIF_ALLWORK_MASK,TI_flags(%r10)
9071+ testl $_TIF_ALLWORK_MASK,TI_flags(%r11)
9072 jnz sysexit_audit
9073 sysexit_from_sys_call:
9074- andl $~TS_COMPAT,TI_status(%r10)
9075+ pax_exit_kernel_user
9076+ pax_erase_kstack
9077+ andl $~TS_COMPAT,TI_status(%r11)
9078 /* clear IF, that popfq doesn't enable interrupts early */
9079 andl $~0x200,EFLAGS-R11(%rsp)
9080 movl RIP-R11(%rsp),%edx /* User %eip */
9081@@ -200,6 +238,9 @@ sysexit_from_sys_call:
9082 movl %eax,%esi /* 2nd arg: syscall number */
9083 movl $AUDIT_ARCH_I386,%edi /* 1st arg: audit arch */
9084 call audit_syscall_entry
9085+
9086+ pax_erase_kstack
9087+
9088 movl RAX-ARGOFFSET(%rsp),%eax /* reload syscall number */
9089 cmpq $(IA32_NR_syscalls-1),%rax
9090 ja ia32_badsys
9091@@ -211,7 +252,7 @@ sysexit_from_sys_call:
9092 .endm
9093
9094 .macro auditsys_exit exit
9095- testl $(_TIF_ALLWORK_MASK & ~_TIF_SYSCALL_AUDIT),TI_flags(%r10)
9096+ testl $(_TIF_ALLWORK_MASK & ~_TIF_SYSCALL_AUDIT),TI_flags(%r11)
9097 jnz ia32_ret_from_sys_call
9098 TRACE_IRQS_ON
9099 sti
9100@@ -221,12 +262,12 @@ sysexit_from_sys_call:
9101 movzbl %al,%edi /* zero-extend that into %edi */
9102 inc %edi /* first arg, 0->1(AUDITSC_SUCCESS), 1->2(AUDITSC_FAILURE) */
9103 call audit_syscall_exit
9104- GET_THREAD_INFO(%r10)
9105+ GET_THREAD_INFO(%r11)
9106 movl RAX-ARGOFFSET(%rsp),%eax /* reload syscall return value */
9107 movl $(_TIF_ALLWORK_MASK & ~_TIF_SYSCALL_AUDIT),%edi
9108 cli
9109 TRACE_IRQS_OFF
9110- testl %edi,TI_flags(%r10)
9111+ testl %edi,TI_flags(%r11)
9112 jz \exit
9113 CLEAR_RREGS -ARGOFFSET
9114 jmp int_with_check
9115@@ -244,7 +285,7 @@ sysexit_audit:
9116
9117 sysenter_tracesys:
9118 #ifdef CONFIG_AUDITSYSCALL
9119- testl $(_TIF_WORK_SYSCALL_ENTRY & ~_TIF_SYSCALL_AUDIT),TI_flags(%r10)
9120+ testl $(_TIF_WORK_SYSCALL_ENTRY & ~_TIF_SYSCALL_AUDIT),TI_flags(%r11)
9121 jz sysenter_auditsys
9122 #endif
9123 SAVE_REST
9124@@ -252,6 +293,9 @@ sysenter_tracesys:
9125 movq $-ENOSYS,RAX(%rsp)/* ptrace can change this for a bad syscall */
9126 movq %rsp,%rdi /* &pt_regs -> arg1 */
9127 call syscall_trace_enter
9128+
9129+ pax_erase_kstack
9130+
9131 LOAD_ARGS32 ARGOFFSET /* reload args from stack in case ptrace changed it */
9132 RESTORE_REST
9133 cmpq $(IA32_NR_syscalls-1),%rax
9134@@ -283,19 +327,20 @@ ENDPROC(ia32_sysenter_target)
9135 ENTRY(ia32_cstar_target)
9136 CFI_STARTPROC32 simple
9137 CFI_SIGNAL_FRAME
9138- CFI_DEF_CFA rsp,KERNEL_STACK_OFFSET
9139+ CFI_DEF_CFA rsp,0
9140 CFI_REGISTER rip,rcx
9141 /*CFI_REGISTER rflags,r11*/
9142 SWAPGS_UNSAFE_STACK
9143 movl %esp,%r8d
9144 CFI_REGISTER rsp,r8
9145 movq PER_CPU_VAR(kernel_stack),%rsp
9146+ SAVE_ARGS 8*6,1,1
9147+ pax_enter_kernel_user
9148 /*
9149 * No need to follow this irqs on/off section: the syscall
9150 * disabled irqs and here we enable it straight after entry:
9151 */
9152 ENABLE_INTERRUPTS(CLBR_NONE)
9153- SAVE_ARGS 8,1,1
9154 movl %eax,%eax /* zero extension */
9155 movq %rax,ORIG_RAX-ARGOFFSET(%rsp)
9156 movq %rcx,RIP-ARGOFFSET(%rsp)
9157@@ -311,13 +356,19 @@ ENTRY(ia32_cstar_target)
9158 /* no need to do an access_ok check here because r8 has been
9159 32bit zero extended */
9160 /* hardware stack frame is complete now */
9161+
9162+#ifdef CONFIG_PAX_MEMORY_UDEREF
9163+ mov $PAX_USER_SHADOW_BASE,%r11
9164+ add %r11,%r8
9165+#endif
9166+
9167 1: movl (%r8),%r9d
9168 .section __ex_table,"a"
9169 .quad 1b,ia32_badarg
9170 .previous
9171- GET_THREAD_INFO(%r10)
9172- orl $TS_COMPAT,TI_status(%r10)
9173- testl $_TIF_WORK_SYSCALL_ENTRY,TI_flags(%r10)
9174+ GET_THREAD_INFO(%r11)
9175+ orl $TS_COMPAT,TI_status(%r11)
9176+ testl $_TIF_WORK_SYSCALL_ENTRY,TI_flags(%r11)
9177 CFI_REMEMBER_STATE
9178 jnz cstar_tracesys
9179 cmpq $IA32_NR_syscalls-1,%rax
9180@@ -327,13 +378,15 @@ cstar_do_call:
9181 cstar_dispatch:
9182 call *ia32_sys_call_table(,%rax,8)
9183 movq %rax,RAX-ARGOFFSET(%rsp)
9184- GET_THREAD_INFO(%r10)
9185+ GET_THREAD_INFO(%r11)
9186 DISABLE_INTERRUPTS(CLBR_NONE)
9187 TRACE_IRQS_OFF
9188- testl $_TIF_ALLWORK_MASK,TI_flags(%r10)
9189+ testl $_TIF_ALLWORK_MASK,TI_flags(%r11)
9190 jnz sysretl_audit
9191 sysretl_from_sys_call:
9192- andl $~TS_COMPAT,TI_status(%r10)
9193+ pax_exit_kernel_user
9194+ pax_erase_kstack
9195+ andl $~TS_COMPAT,TI_status(%r11)
9196 RESTORE_ARGS 1,-ARG_SKIP,1,1,1
9197 movl RIP-ARGOFFSET(%rsp),%ecx
9198 CFI_REGISTER rip,rcx
9199@@ -361,7 +414,7 @@ sysretl_audit:
9200
9201 cstar_tracesys:
9202 #ifdef CONFIG_AUDITSYSCALL
9203- testl $(_TIF_WORK_SYSCALL_ENTRY & ~_TIF_SYSCALL_AUDIT),TI_flags(%r10)
9204+ testl $(_TIF_WORK_SYSCALL_ENTRY & ~_TIF_SYSCALL_AUDIT),TI_flags(%r11)
9205 jz cstar_auditsys
9206 #endif
9207 xchgl %r9d,%ebp
9208@@ -370,6 +423,9 @@ cstar_tracesys:
9209 movq $-ENOSYS,RAX(%rsp) /* ptrace can change this for a bad syscall */
9210 movq %rsp,%rdi /* &pt_regs -> arg1 */
9211 call syscall_trace_enter
9212+
9213+ pax_erase_kstack
9214+
9215 LOAD_ARGS32 ARGOFFSET, 1 /* reload args from stack in case ptrace changed it */
9216 RESTORE_REST
9217 xchgl %ebp,%r9d
9218@@ -415,11 +471,6 @@ ENTRY(ia32_syscall)
9219 CFI_REL_OFFSET rip,RIP-RIP
9220 PARAVIRT_ADJUST_EXCEPTION_FRAME
9221 SWAPGS
9222- /*
9223- * No need to follow this irqs on/off section: the syscall
9224- * disabled irqs and here we enable it straight after entry:
9225- */
9226- ENABLE_INTERRUPTS(CLBR_NONE)
9227 movl %eax,%eax
9228 pushq %rax
9229 CFI_ADJUST_CFA_OFFSET 8
9230@@ -427,9 +478,15 @@ ENTRY(ia32_syscall)
9231 /* note the registers are not zero extended to the sf.
9232 this could be a problem. */
9233 SAVE_ARGS 0,0,1
9234- GET_THREAD_INFO(%r10)
9235- orl $TS_COMPAT,TI_status(%r10)
9236- testl $_TIF_WORK_SYSCALL_ENTRY,TI_flags(%r10)
9237+ pax_enter_kernel_user
9238+ /*
9239+ * No need to follow this irqs on/off section: the syscall
9240+ * disabled irqs and here we enable it straight after entry:
9241+ */
9242+ ENABLE_INTERRUPTS(CLBR_NONE)
9243+ GET_THREAD_INFO(%r11)
9244+ orl $TS_COMPAT,TI_status(%r11)
9245+ testl $_TIF_WORK_SYSCALL_ENTRY,TI_flags(%r11)
9246 jnz ia32_tracesys
9247 cmpq $(IA32_NR_syscalls-1),%rax
9248 ja ia32_badsys
9249@@ -448,6 +505,9 @@ ia32_tracesys:
9250 movq $-ENOSYS,RAX(%rsp) /* ptrace can change this for a bad syscall */
9251 movq %rsp,%rdi /* &pt_regs -> arg1 */
9252 call syscall_trace_enter
9253+
9254+ pax_erase_kstack
9255+
9256 LOAD_ARGS32 ARGOFFSET /* reload args from stack in case ptrace changed it */
9257 RESTORE_REST
9258 cmpq $(IA32_NR_syscalls-1),%rax
9259@@ -462,6 +522,7 @@ ia32_badsys:
9260
9261 quiet_ni_syscall:
9262 movq $-ENOSYS,%rax
9263+ pax_force_retaddr
9264 ret
9265 CFI_ENDPROC
9266
9267diff --git a/arch/x86/ia32/sys_ia32.c b/arch/x86/ia32/sys_ia32.c
9268index 016218c..47ccbdd 100644
9269--- a/arch/x86/ia32/sys_ia32.c
9270+++ b/arch/x86/ia32/sys_ia32.c
9271@@ -69,8 +69,8 @@ asmlinkage long sys32_ftruncate64(unsigned int fd, unsigned long offset_low,
9272 */
9273 static int cp_stat64(struct stat64 __user *ubuf, struct kstat *stat)
9274 {
9275- typeof(ubuf->st_uid) uid = 0;
9276- typeof(ubuf->st_gid) gid = 0;
9277+ typeof(((struct stat64 *)0)->st_uid) uid = 0;
9278+ typeof(((struct stat64 *)0)->st_gid) gid = 0;
9279 SET_UID(uid, stat->uid);
9280 SET_GID(gid, stat->gid);
9281 if (!access_ok(VERIFY_WRITE, ubuf, sizeof(struct stat64)) ||
9282@@ -308,8 +308,8 @@ asmlinkage long sys32_rt_sigprocmask(int how, compat_sigset_t __user *set,
9283 }
9284 set_fs(KERNEL_DS);
9285 ret = sys_rt_sigprocmask(how,
9286- set ? (sigset_t __user *)&s : NULL,
9287- oset ? (sigset_t __user *)&s : NULL,
9288+ set ? (sigset_t __force_user *)&s : NULL,
9289+ oset ? (sigset_t __force_user *)&s : NULL,
9290 sigsetsize);
9291 set_fs(old_fs);
9292 if (ret)
9293@@ -371,7 +371,7 @@ asmlinkage long sys32_sched_rr_get_interval(compat_pid_t pid,
9294 mm_segment_t old_fs = get_fs();
9295
9296 set_fs(KERNEL_DS);
9297- ret = sys_sched_rr_get_interval(pid, (struct timespec __user *)&t);
9298+ ret = sys_sched_rr_get_interval(pid, (struct timespec __force_user *)&t);
9299 set_fs(old_fs);
9300 if (put_compat_timespec(&t, interval))
9301 return -EFAULT;
9302@@ -387,7 +387,7 @@ asmlinkage long sys32_rt_sigpending(compat_sigset_t __user *set,
9303 mm_segment_t old_fs = get_fs();
9304
9305 set_fs(KERNEL_DS);
9306- ret = sys_rt_sigpending((sigset_t __user *)&s, sigsetsize);
9307+ ret = sys_rt_sigpending((sigset_t __force_user *)&s, sigsetsize);
9308 set_fs(old_fs);
9309 if (!ret) {
9310 switch (_NSIG_WORDS) {
9311@@ -412,7 +412,7 @@ asmlinkage long sys32_rt_sigqueueinfo(int pid, int sig,
9312 if (copy_siginfo_from_user32(&info, uinfo))
9313 return -EFAULT;
9314 set_fs(KERNEL_DS);
9315- ret = sys_rt_sigqueueinfo(pid, sig, (siginfo_t __user *)&info);
9316+ ret = sys_rt_sigqueueinfo(pid, sig, (siginfo_t __force_user *)&info);
9317 set_fs(old_fs);
9318 return ret;
9319 }
9320@@ -513,7 +513,7 @@ asmlinkage long sys32_sendfile(int out_fd, int in_fd,
9321 return -EFAULT;
9322
9323 set_fs(KERNEL_DS);
9324- ret = sys_sendfile(out_fd, in_fd, offset ? (off_t __user *)&of : NULL,
9325+ ret = sys_sendfile(out_fd, in_fd, offset ? (off_t __force_user *)&of : NULL,
9326 count);
9327 set_fs(old_fs);
9328
9329diff --git a/arch/x86/include/asm/alternative-asm.h b/arch/x86/include/asm/alternative-asm.h
9330index e2077d3..17d07ad 100644
9331--- a/arch/x86/include/asm/alternative-asm.h
9332+++ b/arch/x86/include/asm/alternative-asm.h
9333@@ -8,10 +8,10 @@
9334
9335 #ifdef CONFIG_SMP
9336 .macro LOCK_PREFIX
9337-1: lock
9338+672: lock
9339 .section .smp_locks,"a"
9340 .align 4
9341- X86_ALIGN 1b
9342+ X86_ALIGN 672b
9343 .previous
9344 .endm
9345 #else
9346@@ -19,4 +19,43 @@
9347 .endm
9348 #endif
9349
9350+#ifdef KERNEXEC_PLUGIN
9351+ .macro pax_force_retaddr_bts rip=0
9352+ btsq $63,\rip(%rsp)
9353+ .endm
9354+#ifdef CONFIG_PAX_KERNEXEC_PLUGIN_METHOD_BTS
9355+ .macro pax_force_retaddr rip=0, reload=0
9356+ btsq $63,\rip(%rsp)
9357+ .endm
9358+ .macro pax_force_fptr ptr
9359+ btsq $63,\ptr
9360+ .endm
9361+ .macro pax_set_fptr_mask
9362+ .endm
9363+#endif
9364+#ifdef CONFIG_PAX_KERNEXEC_PLUGIN_METHOD_OR
9365+ .macro pax_force_retaddr rip=0, reload=0
9366+ .if \reload
9367+ pax_set_fptr_mask
9368+ .endif
9369+ orq %r10,\rip(%rsp)
9370+ .endm
9371+ .macro pax_force_fptr ptr
9372+ orq %r10,\ptr
9373+ .endm
9374+ .macro pax_set_fptr_mask
9375+ movabs $0x8000000000000000,%r10
9376+ .endm
9377+#endif
9378+#else
9379+ .macro pax_force_retaddr rip=0, reload=0
9380+ .endm
9381+ .macro pax_force_fptr ptr
9382+ .endm
9383+ .macro pax_force_retaddr_bts rip=0
9384+ .endm
9385+ .macro pax_set_fptr_mask
9386+ .endm
9387+#endif
9388+
9389 #endif /* __ASSEMBLY__ */
9390diff --git a/arch/x86/include/asm/alternative.h b/arch/x86/include/asm/alternative.h
9391index c240efc..fdfadf3 100644
9392--- a/arch/x86/include/asm/alternative.h
9393+++ b/arch/x86/include/asm/alternative.h
9394@@ -85,7 +85,7 @@ static inline void alternatives_smp_switch(int smp) {}
9395 " .byte 662b-661b\n" /* sourcelen */ \
9396 " .byte 664f-663f\n" /* replacementlen */ \
9397 ".previous\n" \
9398- ".section .altinstr_replacement, \"ax\"\n" \
9399+ ".section .altinstr_replacement, \"a\"\n" \
9400 "663:\n\t" newinstr "\n664:\n" /* replacement */ \
9401 ".previous"
9402
9403diff --git a/arch/x86/include/asm/apic.h b/arch/x86/include/asm/apic.h
9404index 474d80d..1f97d58 100644
9405--- a/arch/x86/include/asm/apic.h
9406+++ b/arch/x86/include/asm/apic.h
9407@@ -46,7 +46,7 @@ static inline void generic_apic_probe(void)
9408
9409 #ifdef CONFIG_X86_LOCAL_APIC
9410
9411-extern unsigned int apic_verbosity;
9412+extern int apic_verbosity;
9413 extern int local_apic_timer_c2_ok;
9414
9415 extern int disable_apic;
9416diff --git a/arch/x86/include/asm/apm.h b/arch/x86/include/asm/apm.h
9417index 20370c6..a2eb9b0 100644
9418--- a/arch/x86/include/asm/apm.h
9419+++ b/arch/x86/include/asm/apm.h
9420@@ -34,7 +34,7 @@ static inline void apm_bios_call_asm(u32 func, u32 ebx_in, u32 ecx_in,
9421 __asm__ __volatile__(APM_DO_ZERO_SEGS
9422 "pushl %%edi\n\t"
9423 "pushl %%ebp\n\t"
9424- "lcall *%%cs:apm_bios_entry\n\t"
9425+ "lcall *%%ss:apm_bios_entry\n\t"
9426 "setc %%al\n\t"
9427 "popl %%ebp\n\t"
9428 "popl %%edi\n\t"
9429@@ -58,7 +58,7 @@ static inline u8 apm_bios_call_simple_asm(u32 func, u32 ebx_in,
9430 __asm__ __volatile__(APM_DO_ZERO_SEGS
9431 "pushl %%edi\n\t"
9432 "pushl %%ebp\n\t"
9433- "lcall *%%cs:apm_bios_entry\n\t"
9434+ "lcall *%%ss:apm_bios_entry\n\t"
9435 "setc %%bl\n\t"
9436 "popl %%ebp\n\t"
9437 "popl %%edi\n\t"
9438diff --git a/arch/x86/include/asm/atomic_32.h b/arch/x86/include/asm/atomic_32.h
9439index dc5a667..939040c 100644
9440--- a/arch/x86/include/asm/atomic_32.h
9441+++ b/arch/x86/include/asm/atomic_32.h
9442@@ -25,6 +25,17 @@ static inline int atomic_read(const atomic_t *v)
9443 }
9444
9445 /**
9446+ * atomic_read_unchecked - read atomic variable
9447+ * @v: pointer of type atomic_unchecked_t
9448+ *
9449+ * Atomically reads the value of @v.
9450+ */
9451+static inline int atomic_read_unchecked(const atomic_unchecked_t *v)
9452+{
9453+ return v->counter;
9454+}
9455+
9456+/**
9457 * atomic_set - set atomic variable
9458 * @v: pointer of type atomic_t
9459 * @i: required value
9460@@ -37,6 +48,18 @@ static inline void atomic_set(atomic_t *v, int i)
9461 }
9462
9463 /**
9464+ * atomic_set_unchecked - set atomic variable
9465+ * @v: pointer of type atomic_unchecked_t
9466+ * @i: required value
9467+ *
9468+ * Atomically sets the value of @v to @i.
9469+ */
9470+static inline void atomic_set_unchecked(atomic_unchecked_t *v, int i)
9471+{
9472+ v->counter = i;
9473+}
9474+
9475+/**
9476 * atomic_add - add integer to atomic variable
9477 * @i: integer value to add
9478 * @v: pointer of type atomic_t
9479@@ -45,7 +68,29 @@ static inline void atomic_set(atomic_t *v, int i)
9480 */
9481 static inline void atomic_add(int i, atomic_t *v)
9482 {
9483- asm volatile(LOCK_PREFIX "addl %1,%0"
9484+ asm volatile(LOCK_PREFIX "addl %1,%0\n"
9485+
9486+#ifdef CONFIG_PAX_REFCOUNT
9487+ "jno 0f\n"
9488+ LOCK_PREFIX "subl %1,%0\n"
9489+ "int $4\n0:\n"
9490+ _ASM_EXTABLE(0b, 0b)
9491+#endif
9492+
9493+ : "+m" (v->counter)
9494+ : "ir" (i));
9495+}
9496+
9497+/**
9498+ * atomic_add_unchecked - add integer to atomic variable
9499+ * @i: integer value to add
9500+ * @v: pointer of type atomic_unchecked_t
9501+ *
9502+ * Atomically adds @i to @v.
9503+ */
9504+static inline void atomic_add_unchecked(int i, atomic_unchecked_t *v)
9505+{
9506+ asm volatile(LOCK_PREFIX "addl %1,%0\n"
9507 : "+m" (v->counter)
9508 : "ir" (i));
9509 }
9510@@ -59,7 +104,29 @@ static inline void atomic_add(int i, atomic_t *v)
9511 */
9512 static inline void atomic_sub(int i, atomic_t *v)
9513 {
9514- asm volatile(LOCK_PREFIX "subl %1,%0"
9515+ asm volatile(LOCK_PREFIX "subl %1,%0\n"
9516+
9517+#ifdef CONFIG_PAX_REFCOUNT
9518+ "jno 0f\n"
9519+ LOCK_PREFIX "addl %1,%0\n"
9520+ "int $4\n0:\n"
9521+ _ASM_EXTABLE(0b, 0b)
9522+#endif
9523+
9524+ : "+m" (v->counter)
9525+ : "ir" (i));
9526+}
9527+
9528+/**
9529+ * atomic_sub_unchecked - subtract integer from atomic variable
9530+ * @i: integer value to subtract
9531+ * @v: pointer of type atomic_unchecked_t
9532+ *
9533+ * Atomically subtracts @i from @v.
9534+ */
9535+static inline void atomic_sub_unchecked(int i, atomic_unchecked_t *v)
9536+{
9537+ asm volatile(LOCK_PREFIX "subl %1,%0\n"
9538 : "+m" (v->counter)
9539 : "ir" (i));
9540 }
9541@@ -77,7 +144,16 @@ static inline int atomic_sub_and_test(int i, atomic_t *v)
9542 {
9543 unsigned char c;
9544
9545- asm volatile(LOCK_PREFIX "subl %2,%0; sete %1"
9546+ asm volatile(LOCK_PREFIX "subl %2,%0\n"
9547+
9548+#ifdef CONFIG_PAX_REFCOUNT
9549+ "jno 0f\n"
9550+ LOCK_PREFIX "addl %2,%0\n"
9551+ "int $4\n0:\n"
9552+ _ASM_EXTABLE(0b, 0b)
9553+#endif
9554+
9555+ "sete %1\n"
9556 : "+m" (v->counter), "=qm" (c)
9557 : "ir" (i) : "memory");
9558 return c;
9559@@ -91,7 +167,27 @@ static inline int atomic_sub_and_test(int i, atomic_t *v)
9560 */
9561 static inline void atomic_inc(atomic_t *v)
9562 {
9563- asm volatile(LOCK_PREFIX "incl %0"
9564+ asm volatile(LOCK_PREFIX "incl %0\n"
9565+
9566+#ifdef CONFIG_PAX_REFCOUNT
9567+ "jno 0f\n"
9568+ LOCK_PREFIX "decl %0\n"
9569+ "int $4\n0:\n"
9570+ _ASM_EXTABLE(0b, 0b)
9571+#endif
9572+
9573+ : "+m" (v->counter));
9574+}
9575+
9576+/**
9577+ * atomic_inc_unchecked - increment atomic variable
9578+ * @v: pointer of type atomic_unchecked_t
9579+ *
9580+ * Atomically increments @v by 1.
9581+ */
9582+static inline void atomic_inc_unchecked(atomic_unchecked_t *v)
9583+{
9584+ asm volatile(LOCK_PREFIX "incl %0\n"
9585 : "+m" (v->counter));
9586 }
9587
9588@@ -103,7 +199,27 @@ static inline void atomic_inc(atomic_t *v)
9589 */
9590 static inline void atomic_dec(atomic_t *v)
9591 {
9592- asm volatile(LOCK_PREFIX "decl %0"
9593+ asm volatile(LOCK_PREFIX "decl %0\n"
9594+
9595+#ifdef CONFIG_PAX_REFCOUNT
9596+ "jno 0f\n"
9597+ LOCK_PREFIX "incl %0\n"
9598+ "int $4\n0:\n"
9599+ _ASM_EXTABLE(0b, 0b)
9600+#endif
9601+
9602+ : "+m" (v->counter));
9603+}
9604+
9605+/**
9606+ * atomic_dec_unchecked - decrement atomic variable
9607+ * @v: pointer of type atomic_unchecked_t
9608+ *
9609+ * Atomically decrements @v by 1.
9610+ */
9611+static inline void atomic_dec_unchecked(atomic_unchecked_t *v)
9612+{
9613+ asm volatile(LOCK_PREFIX "decl %0\n"
9614 : "+m" (v->counter));
9615 }
9616
9617@@ -119,7 +235,16 @@ static inline int atomic_dec_and_test(atomic_t *v)
9618 {
9619 unsigned char c;
9620
9621- asm volatile(LOCK_PREFIX "decl %0; sete %1"
9622+ asm volatile(LOCK_PREFIX "decl %0\n"
9623+
9624+#ifdef CONFIG_PAX_REFCOUNT
9625+ "jno 0f\n"
9626+ LOCK_PREFIX "incl %0\n"
9627+ "int $4\n0:\n"
9628+ _ASM_EXTABLE(0b, 0b)
9629+#endif
9630+
9631+ "sete %1\n"
9632 : "+m" (v->counter), "=qm" (c)
9633 : : "memory");
9634 return c != 0;
9635@@ -137,7 +262,35 @@ static inline int atomic_inc_and_test(atomic_t *v)
9636 {
9637 unsigned char c;
9638
9639- asm volatile(LOCK_PREFIX "incl %0; sete %1"
9640+ asm volatile(LOCK_PREFIX "incl %0\n"
9641+
9642+#ifdef CONFIG_PAX_REFCOUNT
9643+ "jno 0f\n"
9644+ LOCK_PREFIX "decl %0\n"
9645+ "into\n0:\n"
9646+ _ASM_EXTABLE(0b, 0b)
9647+#endif
9648+
9649+ "sete %1\n"
9650+ : "+m" (v->counter), "=qm" (c)
9651+ : : "memory");
9652+ return c != 0;
9653+}
9654+
9655+/**
9656+ * atomic_inc_and_test_unchecked - increment and test
9657+ * @v: pointer of type atomic_unchecked_t
9658+ *
9659+ * Atomically increments @v by 1
9660+ * and returns true if the result is zero, or false for all
9661+ * other cases.
9662+ */
9663+static inline int atomic_inc_and_test_unchecked(atomic_unchecked_t *v)
9664+{
9665+ unsigned char c;
9666+
9667+ asm volatile(LOCK_PREFIX "incl %0\n"
9668+ "sete %1\n"
9669 : "+m" (v->counter), "=qm" (c)
9670 : : "memory");
9671 return c != 0;
9672@@ -156,7 +309,16 @@ static inline int atomic_add_negative(int i, atomic_t *v)
9673 {
9674 unsigned char c;
9675
9676- asm volatile(LOCK_PREFIX "addl %2,%0; sets %1"
9677+ asm volatile(LOCK_PREFIX "addl %2,%0\n"
9678+
9679+#ifdef CONFIG_PAX_REFCOUNT
9680+ "jno 0f\n"
9681+ LOCK_PREFIX "subl %2,%0\n"
9682+ "int $4\n0:\n"
9683+ _ASM_EXTABLE(0b, 0b)
9684+#endif
9685+
9686+ "sets %1\n"
9687 : "+m" (v->counter), "=qm" (c)
9688 : "ir" (i) : "memory");
9689 return c;
9690@@ -179,7 +341,15 @@ static inline int atomic_add_return(int i, atomic_t *v)
9691 #endif
9692 /* Modern 486+ processor */
9693 __i = i;
9694- asm volatile(LOCK_PREFIX "xaddl %0, %1"
9695+ asm volatile(LOCK_PREFIX "xaddl %0, %1\n"
9696+
9697+#ifdef CONFIG_PAX_REFCOUNT
9698+ "jno 0f\n"
9699+ "movl %0, %1\n"
9700+ "int $4\n0:\n"
9701+ _ASM_EXTABLE(0b, 0b)
9702+#endif
9703+
9704 : "+r" (i), "+m" (v->counter)
9705 : : "memory");
9706 return i + __i;
9707@@ -195,6 +365,38 @@ no_xadd: /* Legacy 386 processor */
9708 }
9709
9710 /**
9711+ * atomic_add_return_unchecked - add integer and return
9712+ * @v: pointer of type atomic_unchecked_t
9713+ * @i: integer value to add
9714+ *
9715+ * Atomically adds @i to @v and returns @i + @v
9716+ */
9717+static inline int atomic_add_return_unchecked(int i, atomic_unchecked_t *v)
9718+{
9719+ int __i;
9720+#ifdef CONFIG_M386
9721+ unsigned long flags;
9722+ if (unlikely(boot_cpu_data.x86 <= 3))
9723+ goto no_xadd;
9724+#endif
9725+ /* Modern 486+ processor */
9726+ __i = i;
9727+ asm volatile(LOCK_PREFIX "xaddl %0, %1"
9728+ : "+r" (i), "+m" (v->counter)
9729+ : : "memory");
9730+ return i + __i;
9731+
9732+#ifdef CONFIG_M386
9733+no_xadd: /* Legacy 386 processor */
9734+ local_irq_save(flags);
9735+ __i = atomic_read_unchecked(v);
9736+ atomic_set_unchecked(v, i + __i);
9737+ local_irq_restore(flags);
9738+ return i + __i;
9739+#endif
9740+}
9741+
9742+/**
9743 * atomic_sub_return - subtract integer and return
9744 * @v: pointer of type atomic_t
9745 * @i: integer value to subtract
9746@@ -211,11 +413,21 @@ static inline int atomic_cmpxchg(atomic_t *v, int old, int new)
9747 return cmpxchg(&v->counter, old, new);
9748 }
9749
9750+static inline int atomic_cmpxchg_unchecked(atomic_unchecked_t *v, int old, int new)
9751+{
9752+ return cmpxchg(&v->counter, old, new);
9753+}
9754+
9755 static inline int atomic_xchg(atomic_t *v, int new)
9756 {
9757 return xchg(&v->counter, new);
9758 }
9759
9760+static inline int atomic_xchg_unchecked(atomic_unchecked_t *v, int new)
9761+{
9762+ return xchg(&v->counter, new);
9763+}
9764+
9765 /**
9766 * atomic_add_unless - add unless the number is already a given value
9767 * @v: pointer of type atomic_t
9768@@ -227,22 +439,39 @@ static inline int atomic_xchg(atomic_t *v, int new)
9769 */
9770 static inline int atomic_add_unless(atomic_t *v, int a, int u)
9771 {
9772- int c, old;
9773+ int c, old, new;
9774 c = atomic_read(v);
9775 for (;;) {
9776- if (unlikely(c == (u)))
9777+ if (unlikely(c == u))
9778 break;
9779- old = atomic_cmpxchg((v), c, c + (a));
9780+
9781+ asm volatile("addl %2,%0\n"
9782+
9783+#ifdef CONFIG_PAX_REFCOUNT
9784+ "jno 0f\n"
9785+ "subl %2,%0\n"
9786+ "int $4\n0:\n"
9787+ _ASM_EXTABLE(0b, 0b)
9788+#endif
9789+
9790+ : "=r" (new)
9791+ : "0" (c), "ir" (a));
9792+
9793+ old = atomic_cmpxchg(v, c, new);
9794 if (likely(old == c))
9795 break;
9796 c = old;
9797 }
9798- return c != (u);
9799+ return c != u;
9800 }
9801
9802 #define atomic_inc_not_zero(v) atomic_add_unless((v), 1, 0)
9803
9804 #define atomic_inc_return(v) (atomic_add_return(1, v))
9805+static inline int atomic_inc_return_unchecked(atomic_unchecked_t *v)
9806+{
9807+ return atomic_add_return_unchecked(1, v);
9808+}
9809 #define atomic_dec_return(v) (atomic_sub_return(1, v))
9810
9811 /* These are x86-specific, used by some header files */
9812@@ -266,9 +495,18 @@ typedef struct {
9813 u64 __aligned(8) counter;
9814 } atomic64_t;
9815
9816+#ifdef CONFIG_PAX_REFCOUNT
9817+typedef struct {
9818+ u64 __aligned(8) counter;
9819+} atomic64_unchecked_t;
9820+#else
9821+typedef atomic64_t atomic64_unchecked_t;
9822+#endif
9823+
9824 #define ATOMIC64_INIT(val) { (val) }
9825
9826 extern u64 atomic64_cmpxchg(atomic64_t *ptr, u64 old_val, u64 new_val);
9827+extern u64 atomic64_cmpxchg_unchecked(atomic64_unchecked_t *ptr, u64 old_val, u64 new_val);
9828
9829 /**
9830 * atomic64_xchg - xchg atomic64 variable
9831@@ -279,6 +517,7 @@ extern u64 atomic64_cmpxchg(atomic64_t *ptr, u64 old_val, u64 new_val);
9832 * the old value.
9833 */
9834 extern u64 atomic64_xchg(atomic64_t *ptr, u64 new_val);
9835+extern u64 atomic64_xchg_unchecked(atomic64_unchecked_t *ptr, u64 new_val);
9836
9837 /**
9838 * atomic64_set - set atomic64 variable
9839@@ -290,6 +529,15 @@ extern u64 atomic64_xchg(atomic64_t *ptr, u64 new_val);
9840 extern void atomic64_set(atomic64_t *ptr, u64 new_val);
9841
9842 /**
9843+ * atomic64_unchecked_set - set atomic64 variable
9844+ * @ptr: pointer to type atomic64_unchecked_t
9845+ * @new_val: value to assign
9846+ *
9847+ * Atomically sets the value of @ptr to @new_val.
9848+ */
9849+extern void atomic64_set_unchecked(atomic64_unchecked_t *ptr, u64 new_val);
9850+
9851+/**
9852 * atomic64_read - read atomic64 variable
9853 * @ptr: pointer to type atomic64_t
9854 *
9855@@ -317,7 +565,33 @@ static inline u64 atomic64_read(atomic64_t *ptr)
9856 return res;
9857 }
9858
9859-extern u64 atomic64_read(atomic64_t *ptr);
9860+/**
9861+ * atomic64_read_unchecked - read atomic64 variable
9862+ * @ptr: pointer to type atomic64_unchecked_t
9863+ *
9864+ * Atomically reads the value of @ptr and returns it.
9865+ */
9866+static inline u64 atomic64_read_unchecked(atomic64_unchecked_t *ptr)
9867+{
9868+ u64 res;
9869+
9870+ /*
9871+ * Note, we inline this atomic64_unchecked_t primitive because
9872+ * it only clobbers EAX/EDX and leaves the others
9873+ * untouched. We also (somewhat subtly) rely on the
9874+ * fact that cmpxchg8b returns the current 64-bit value
9875+ * of the memory location we are touching:
9876+ */
9877+ asm volatile(
9878+ "mov %%ebx, %%eax\n\t"
9879+ "mov %%ecx, %%edx\n\t"
9880+ LOCK_PREFIX "cmpxchg8b %1\n"
9881+ : "=&A" (res)
9882+ : "m" (*ptr)
9883+ );
9884+
9885+ return res;
9886+}
9887
9888 /**
9889 * atomic64_add_return - add and return
9890@@ -332,8 +606,11 @@ extern u64 atomic64_add_return(u64 delta, atomic64_t *ptr);
9891 * Other variants with different arithmetic operators:
9892 */
9893 extern u64 atomic64_sub_return(u64 delta, atomic64_t *ptr);
9894+extern u64 atomic64_sub_return_unchecked(u64 delta, atomic64_unchecked_t *ptr);
9895 extern u64 atomic64_inc_return(atomic64_t *ptr);
9896+extern u64 atomic64_inc_return_unchecked(atomic64_unchecked_t *ptr);
9897 extern u64 atomic64_dec_return(atomic64_t *ptr);
9898+extern u64 atomic64_dec_return_unchecked(atomic64_unchecked_t *ptr);
9899
9900 /**
9901 * atomic64_add - add integer to atomic64 variable
9902@@ -345,6 +622,15 @@ extern u64 atomic64_dec_return(atomic64_t *ptr);
9903 extern void atomic64_add(u64 delta, atomic64_t *ptr);
9904
9905 /**
9906+ * atomic64_add_unchecked - add integer to atomic64 variable
9907+ * @delta: integer value to add
9908+ * @ptr: pointer to type atomic64_unchecked_t
9909+ *
9910+ * Atomically adds @delta to @ptr.
9911+ */
9912+extern void atomic64_add_unchecked(u64 delta, atomic64_unchecked_t *ptr);
9913+
9914+/**
9915 * atomic64_sub - subtract the atomic64 variable
9916 * @delta: integer value to subtract
9917 * @ptr: pointer to type atomic64_t
9918@@ -354,6 +640,15 @@ extern void atomic64_add(u64 delta, atomic64_t *ptr);
9919 extern void atomic64_sub(u64 delta, atomic64_t *ptr);
9920
9921 /**
9922+ * atomic64_sub_unchecked - subtract the atomic64 variable
9923+ * @delta: integer value to subtract
9924+ * @ptr: pointer to type atomic64_unchecked_t
9925+ *
9926+ * Atomically subtracts @delta from @ptr.
9927+ */
9928+extern void atomic64_sub_unchecked(u64 delta, atomic64_unchecked_t *ptr);
9929+
9930+/**
9931 * atomic64_sub_and_test - subtract value from variable and test result
9932 * @delta: integer value to subtract
9933 * @ptr: pointer to type atomic64_t
9934@@ -373,6 +668,14 @@ extern int atomic64_sub_and_test(u64 delta, atomic64_t *ptr);
9935 extern void atomic64_inc(atomic64_t *ptr);
9936
9937 /**
9938+ * atomic64_inc_unchecked - increment atomic64 variable
9939+ * @ptr: pointer to type atomic64_unchecked_t
9940+ *
9941+ * Atomically increments @ptr by 1.
9942+ */
9943+extern void atomic64_inc_unchecked(atomic64_unchecked_t *ptr);
9944+
9945+/**
9946 * atomic64_dec - decrement atomic64 variable
9947 * @ptr: pointer to type atomic64_t
9948 *
9949@@ -381,6 +684,14 @@ extern void atomic64_inc(atomic64_t *ptr);
9950 extern void atomic64_dec(atomic64_t *ptr);
9951
9952 /**
9953+ * atomic64_dec_unchecked - decrement atomic64 variable
9954+ * @ptr: pointer to type atomic64_unchecked_t
9955+ *
9956+ * Atomically decrements @ptr by 1.
9957+ */
9958+extern void atomic64_dec_unchecked(atomic64_unchecked_t *ptr);
9959+
9960+/**
9961 * atomic64_dec_and_test - decrement and test
9962 * @ptr: pointer to type atomic64_t
9963 *
9964diff --git a/arch/x86/include/asm/atomic_64.h b/arch/x86/include/asm/atomic_64.h
9965index d605dc2..fafd7bd 100644
9966--- a/arch/x86/include/asm/atomic_64.h
9967+++ b/arch/x86/include/asm/atomic_64.h
9968@@ -24,6 +24,17 @@ static inline int atomic_read(const atomic_t *v)
9969 }
9970
9971 /**
9972+ * atomic_read_unchecked - read atomic variable
9973+ * @v: pointer of type atomic_unchecked_t
9974+ *
9975+ * Atomically reads the value of @v.
9976+ */
9977+static inline int atomic_read_unchecked(const atomic_unchecked_t *v)
9978+{
9979+ return v->counter;
9980+}
9981+
9982+/**
9983 * atomic_set - set atomic variable
9984 * @v: pointer of type atomic_t
9985 * @i: required value
9986@@ -36,6 +47,18 @@ static inline void atomic_set(atomic_t *v, int i)
9987 }
9988
9989 /**
9990+ * atomic_set_unchecked - set atomic variable
9991+ * @v: pointer of type atomic_unchecked_t
9992+ * @i: required value
9993+ *
9994+ * Atomically sets the value of @v to @i.
9995+ */
9996+static inline void atomic_set_unchecked(atomic_unchecked_t *v, int i)
9997+{
9998+ v->counter = i;
9999+}
10000+
10001+/**
10002 * atomic_add - add integer to atomic variable
10003 * @i: integer value to add
10004 * @v: pointer of type atomic_t
10005@@ -44,7 +67,29 @@ static inline void atomic_set(atomic_t *v, int i)
10006 */
10007 static inline void atomic_add(int i, atomic_t *v)
10008 {
10009- asm volatile(LOCK_PREFIX "addl %1,%0"
10010+ asm volatile(LOCK_PREFIX "addl %1,%0\n"
10011+
10012+#ifdef CONFIG_PAX_REFCOUNT
10013+ "jno 0f\n"
10014+ LOCK_PREFIX "subl %1,%0\n"
10015+ "int $4\n0:\n"
10016+ _ASM_EXTABLE(0b, 0b)
10017+#endif
10018+
10019+ : "=m" (v->counter)
10020+ : "ir" (i), "m" (v->counter));
10021+}
10022+
10023+/**
10024+ * atomic_add_unchecked - add integer to atomic variable
10025+ * @i: integer value to add
10026+ * @v: pointer of type atomic_unchecked_t
10027+ *
10028+ * Atomically adds @i to @v.
10029+ */
10030+static inline void atomic_add_unchecked(int i, atomic_unchecked_t *v)
10031+{
10032+ asm volatile(LOCK_PREFIX "addl %1,%0\n"
10033 : "=m" (v->counter)
10034 : "ir" (i), "m" (v->counter));
10035 }
10036@@ -58,7 +103,29 @@ static inline void atomic_add(int i, atomic_t *v)
10037 */
10038 static inline void atomic_sub(int i, atomic_t *v)
10039 {
10040- asm volatile(LOCK_PREFIX "subl %1,%0"
10041+ asm volatile(LOCK_PREFIX "subl %1,%0\n"
10042+
10043+#ifdef CONFIG_PAX_REFCOUNT
10044+ "jno 0f\n"
10045+ LOCK_PREFIX "addl %1,%0\n"
10046+ "int $4\n0:\n"
10047+ _ASM_EXTABLE(0b, 0b)
10048+#endif
10049+
10050+ : "=m" (v->counter)
10051+ : "ir" (i), "m" (v->counter));
10052+}
10053+
10054+/**
10055+ * atomic_sub_unchecked - subtract the atomic variable
10056+ * @i: integer value to subtract
10057+ * @v: pointer of type atomic_unchecked_t
10058+ *
10059+ * Atomically subtracts @i from @v.
10060+ */
10061+static inline void atomic_sub_unchecked(int i, atomic_unchecked_t *v)
10062+{
10063+ asm volatile(LOCK_PREFIX "subl %1,%0\n"
10064 : "=m" (v->counter)
10065 : "ir" (i), "m" (v->counter));
10066 }
10067@@ -76,7 +143,16 @@ static inline int atomic_sub_and_test(int i, atomic_t *v)
10068 {
10069 unsigned char c;
10070
10071- asm volatile(LOCK_PREFIX "subl %2,%0; sete %1"
10072+ asm volatile(LOCK_PREFIX "subl %2,%0\n"
10073+
10074+#ifdef CONFIG_PAX_REFCOUNT
10075+ "jno 0f\n"
10076+ LOCK_PREFIX "addl %2,%0\n"
10077+ "int $4\n0:\n"
10078+ _ASM_EXTABLE(0b, 0b)
10079+#endif
10080+
10081+ "sete %1\n"
10082 : "=m" (v->counter), "=qm" (c)
10083 : "ir" (i), "m" (v->counter) : "memory");
10084 return c;
10085@@ -90,7 +166,28 @@ static inline int atomic_sub_and_test(int i, atomic_t *v)
10086 */
10087 static inline void atomic_inc(atomic_t *v)
10088 {
10089- asm volatile(LOCK_PREFIX "incl %0"
10090+ asm volatile(LOCK_PREFIX "incl %0\n"
10091+
10092+#ifdef CONFIG_PAX_REFCOUNT
10093+ "jno 0f\n"
10094+ LOCK_PREFIX "decl %0\n"
10095+ "int $4\n0:\n"
10096+ _ASM_EXTABLE(0b, 0b)
10097+#endif
10098+
10099+ : "=m" (v->counter)
10100+ : "m" (v->counter));
10101+}
10102+
10103+/**
10104+ * atomic_inc_unchecked - increment atomic variable
10105+ * @v: pointer of type atomic_unchecked_t
10106+ *
10107+ * Atomically increments @v by 1.
10108+ */
10109+static inline void atomic_inc_unchecked(atomic_unchecked_t *v)
10110+{
10111+ asm volatile(LOCK_PREFIX "incl %0\n"
10112 : "=m" (v->counter)
10113 : "m" (v->counter));
10114 }
10115@@ -103,7 +200,28 @@ static inline void atomic_inc(atomic_t *v)
10116 */
10117 static inline void atomic_dec(atomic_t *v)
10118 {
10119- asm volatile(LOCK_PREFIX "decl %0"
10120+ asm volatile(LOCK_PREFIX "decl %0\n"
10121+
10122+#ifdef CONFIG_PAX_REFCOUNT
10123+ "jno 0f\n"
10124+ LOCK_PREFIX "incl %0\n"
10125+ "int $4\n0:\n"
10126+ _ASM_EXTABLE(0b, 0b)
10127+#endif
10128+
10129+ : "=m" (v->counter)
10130+ : "m" (v->counter));
10131+}
10132+
10133+/**
10134+ * atomic_dec_unchecked - decrement atomic variable
10135+ * @v: pointer of type atomic_unchecked_t
10136+ *
10137+ * Atomically decrements @v by 1.
10138+ */
10139+static inline void atomic_dec_unchecked(atomic_unchecked_t *v)
10140+{
10141+ asm volatile(LOCK_PREFIX "decl %0\n"
10142 : "=m" (v->counter)
10143 : "m" (v->counter));
10144 }
10145@@ -120,7 +238,16 @@ static inline int atomic_dec_and_test(atomic_t *v)
10146 {
10147 unsigned char c;
10148
10149- asm volatile(LOCK_PREFIX "decl %0; sete %1"
10150+ asm volatile(LOCK_PREFIX "decl %0\n"
10151+
10152+#ifdef CONFIG_PAX_REFCOUNT
10153+ "jno 0f\n"
10154+ LOCK_PREFIX "incl %0\n"
10155+ "int $4\n0:\n"
10156+ _ASM_EXTABLE(0b, 0b)
10157+#endif
10158+
10159+ "sete %1\n"
10160 : "=m" (v->counter), "=qm" (c)
10161 : "m" (v->counter) : "memory");
10162 return c != 0;
10163@@ -138,7 +265,35 @@ static inline int atomic_inc_and_test(atomic_t *v)
10164 {
10165 unsigned char c;
10166
10167- asm volatile(LOCK_PREFIX "incl %0; sete %1"
10168+ asm volatile(LOCK_PREFIX "incl %0\n"
10169+
10170+#ifdef CONFIG_PAX_REFCOUNT
10171+ "jno 0f\n"
10172+ LOCK_PREFIX "decl %0\n"
10173+ "int $4\n0:\n"
10174+ _ASM_EXTABLE(0b, 0b)
10175+#endif
10176+
10177+ "sete %1\n"
10178+ : "=m" (v->counter), "=qm" (c)
10179+ : "m" (v->counter) : "memory");
10180+ return c != 0;
10181+}
10182+
10183+/**
10184+ * atomic_inc_and_test_unchecked - increment and test
10185+ * @v: pointer of type atomic_unchecked_t
10186+ *
10187+ * Atomically increments @v by 1
10188+ * and returns true if the result is zero, or false for all
10189+ * other cases.
10190+ */
10191+static inline int atomic_inc_and_test_unchecked(atomic_unchecked_t *v)
10192+{
10193+ unsigned char c;
10194+
10195+ asm volatile(LOCK_PREFIX "incl %0\n"
10196+ "sete %1\n"
10197 : "=m" (v->counter), "=qm" (c)
10198 : "m" (v->counter) : "memory");
10199 return c != 0;
10200@@ -157,7 +312,16 @@ static inline int atomic_add_negative(int i, atomic_t *v)
10201 {
10202 unsigned char c;
10203
10204- asm volatile(LOCK_PREFIX "addl %2,%0; sets %1"
10205+ asm volatile(LOCK_PREFIX "addl %2,%0\n"
10206+
10207+#ifdef CONFIG_PAX_REFCOUNT
10208+ "jno 0f\n"
10209+ LOCK_PREFIX "subl %2,%0\n"
10210+ "int $4\n0:\n"
10211+ _ASM_EXTABLE(0b, 0b)
10212+#endif
10213+
10214+ "sets %1\n"
10215 : "=m" (v->counter), "=qm" (c)
10216 : "ir" (i), "m" (v->counter) : "memory");
10217 return c;
10218@@ -173,7 +337,31 @@ static inline int atomic_add_negative(int i, atomic_t *v)
10219 static inline int atomic_add_return(int i, atomic_t *v)
10220 {
10221 int __i = i;
10222- asm volatile(LOCK_PREFIX "xaddl %0, %1"
10223+ asm volatile(LOCK_PREFIX "xaddl %0, %1\n"
10224+
10225+#ifdef CONFIG_PAX_REFCOUNT
10226+ "jno 0f\n"
10227+ "movl %0, %1\n"
10228+ "int $4\n0:\n"
10229+ _ASM_EXTABLE(0b, 0b)
10230+#endif
10231+
10232+ : "+r" (i), "+m" (v->counter)
10233+ : : "memory");
10234+ return i + __i;
10235+}
10236+
10237+/**
10238+ * atomic_add_return_unchecked - add and return
10239+ * @i: integer value to add
10240+ * @v: pointer of type atomic_unchecked_t
10241+ *
10242+ * Atomically adds @i to @v and returns @i + @v
10243+ */
10244+static inline int atomic_add_return_unchecked(int i, atomic_unchecked_t *v)
10245+{
10246+ int __i = i;
10247+ asm volatile(LOCK_PREFIX "xaddl %0, %1\n"
10248 : "+r" (i), "+m" (v->counter)
10249 : : "memory");
10250 return i + __i;
10251@@ -185,6 +373,10 @@ static inline int atomic_sub_return(int i, atomic_t *v)
10252 }
10253
10254 #define atomic_inc_return(v) (atomic_add_return(1, v))
10255+static inline int atomic_inc_return_unchecked(atomic_unchecked_t *v)
10256+{
10257+ return atomic_add_return_unchecked(1, v);
10258+}
10259 #define atomic_dec_return(v) (atomic_sub_return(1, v))
10260
10261 /* The 64-bit atomic type */
10262@@ -204,6 +396,18 @@ static inline long atomic64_read(const atomic64_t *v)
10263 }
10264
10265 /**
10266+ * atomic64_read_unchecked - read atomic64 variable
10267+ * @v: pointer of type atomic64_unchecked_t
10268+ *
10269+ * Atomically reads the value of @v.
10270+ * Doesn't imply a read memory barrier.
10271+ */
10272+static inline long atomic64_read_unchecked(const atomic64_unchecked_t *v)
10273+{
10274+ return v->counter;
10275+}
10276+
10277+/**
10278 * atomic64_set - set atomic64 variable
10279 * @v: pointer to type atomic64_t
10280 * @i: required value
10281@@ -216,6 +420,18 @@ static inline void atomic64_set(atomic64_t *v, long i)
10282 }
10283
10284 /**
10285+ * atomic64_set_unchecked - set atomic64 variable
10286+ * @v: pointer to type atomic64_unchecked_t
10287+ * @i: required value
10288+ *
10289+ * Atomically sets the value of @v to @i.
10290+ */
10291+static inline void atomic64_set_unchecked(atomic64_unchecked_t *v, long i)
10292+{
10293+ v->counter = i;
10294+}
10295+
10296+/**
10297 * atomic64_add - add integer to atomic64 variable
10298 * @i: integer value to add
10299 * @v: pointer to type atomic64_t
10300@@ -224,6 +440,28 @@ static inline void atomic64_set(atomic64_t *v, long i)
10301 */
10302 static inline void atomic64_add(long i, atomic64_t *v)
10303 {
10304+ asm volatile(LOCK_PREFIX "addq %1,%0\n"
10305+
10306+#ifdef CONFIG_PAX_REFCOUNT
10307+ "jno 0f\n"
10308+ LOCK_PREFIX "subq %1,%0\n"
10309+ "int $4\n0:\n"
10310+ _ASM_EXTABLE(0b, 0b)
10311+#endif
10312+
10313+ : "=m" (v->counter)
10314+ : "er" (i), "m" (v->counter));
10315+}
10316+
10317+/**
10318+ * atomic64_add_unchecked - add integer to atomic64 variable
10319+ * @i: integer value to add
10320+ * @v: pointer to type atomic64_unchecked_t
10321+ *
10322+ * Atomically adds @i to @v.
10323+ */
10324+static inline void atomic64_add_unchecked(long i, atomic64_unchecked_t *v)
10325+{
10326 asm volatile(LOCK_PREFIX "addq %1,%0"
10327 : "=m" (v->counter)
10328 : "er" (i), "m" (v->counter));
10329@@ -238,7 +476,15 @@ static inline void atomic64_add(long i, atomic64_t *v)
10330 */
10331 static inline void atomic64_sub(long i, atomic64_t *v)
10332 {
10333- asm volatile(LOCK_PREFIX "subq %1,%0"
10334+ asm volatile(LOCK_PREFIX "subq %1,%0\n"
10335+
10336+#ifdef CONFIG_PAX_REFCOUNT
10337+ "jno 0f\n"
10338+ LOCK_PREFIX "addq %1,%0\n"
10339+ "int $4\n0:\n"
10340+ _ASM_EXTABLE(0b, 0b)
10341+#endif
10342+
10343 : "=m" (v->counter)
10344 : "er" (i), "m" (v->counter));
10345 }
10346@@ -256,7 +502,16 @@ static inline int atomic64_sub_and_test(long i, atomic64_t *v)
10347 {
10348 unsigned char c;
10349
10350- asm volatile(LOCK_PREFIX "subq %2,%0; sete %1"
10351+ asm volatile(LOCK_PREFIX "subq %2,%0\n"
10352+
10353+#ifdef CONFIG_PAX_REFCOUNT
10354+ "jno 0f\n"
10355+ LOCK_PREFIX "addq %2,%0\n"
10356+ "int $4\n0:\n"
10357+ _ASM_EXTABLE(0b, 0b)
10358+#endif
10359+
10360+ "sete %1\n"
10361 : "=m" (v->counter), "=qm" (c)
10362 : "er" (i), "m" (v->counter) : "memory");
10363 return c;
10364@@ -270,6 +525,27 @@ static inline int atomic64_sub_and_test(long i, atomic64_t *v)
10365 */
10366 static inline void atomic64_inc(atomic64_t *v)
10367 {
10368+ asm volatile(LOCK_PREFIX "incq %0\n"
10369+
10370+#ifdef CONFIG_PAX_REFCOUNT
10371+ "jno 0f\n"
10372+ LOCK_PREFIX "decq %0\n"
10373+ "int $4\n0:\n"
10374+ _ASM_EXTABLE(0b, 0b)
10375+#endif
10376+
10377+ : "=m" (v->counter)
10378+ : "m" (v->counter));
10379+}
10380+
10381+/**
10382+ * atomic64_inc_unchecked - increment atomic64 variable
10383+ * @v: pointer to type atomic64_unchecked_t
10384+ *
10385+ * Atomically increments @v by 1.
10386+ */
10387+static inline void atomic64_inc_unchecked(atomic64_unchecked_t *v)
10388+{
10389 asm volatile(LOCK_PREFIX "incq %0"
10390 : "=m" (v->counter)
10391 : "m" (v->counter));
10392@@ -283,7 +559,28 @@ static inline void atomic64_inc(atomic64_t *v)
10393 */
10394 static inline void atomic64_dec(atomic64_t *v)
10395 {
10396- asm volatile(LOCK_PREFIX "decq %0"
10397+ asm volatile(LOCK_PREFIX "decq %0\n"
10398+
10399+#ifdef CONFIG_PAX_REFCOUNT
10400+ "jno 0f\n"
10401+ LOCK_PREFIX "incq %0\n"
10402+ "int $4\n0:\n"
10403+ _ASM_EXTABLE(0b, 0b)
10404+#endif
10405+
10406+ : "=m" (v->counter)
10407+ : "m" (v->counter));
10408+}
10409+
10410+/**
10411+ * atomic64_dec_unchecked - decrement atomic64 variable
10412+ * @v: pointer to type atomic64_t
10413+ *
10414+ * Atomically decrements @v by 1.
10415+ */
10416+static inline void atomic64_dec_unchecked(atomic64_unchecked_t *v)
10417+{
10418+ asm volatile(LOCK_PREFIX "decq %0\n"
10419 : "=m" (v->counter)
10420 : "m" (v->counter));
10421 }
10422@@ -300,7 +597,16 @@ static inline int atomic64_dec_and_test(atomic64_t *v)
10423 {
10424 unsigned char c;
10425
10426- asm volatile(LOCK_PREFIX "decq %0; sete %1"
10427+ asm volatile(LOCK_PREFIX "decq %0\n"
10428+
10429+#ifdef CONFIG_PAX_REFCOUNT
10430+ "jno 0f\n"
10431+ LOCK_PREFIX "incq %0\n"
10432+ "int $4\n0:\n"
10433+ _ASM_EXTABLE(0b, 0b)
10434+#endif
10435+
10436+ "sete %1\n"
10437 : "=m" (v->counter), "=qm" (c)
10438 : "m" (v->counter) : "memory");
10439 return c != 0;
10440@@ -318,7 +624,16 @@ static inline int atomic64_inc_and_test(atomic64_t *v)
10441 {
10442 unsigned char c;
10443
10444- asm volatile(LOCK_PREFIX "incq %0; sete %1"
10445+ asm volatile(LOCK_PREFIX "incq %0\n"
10446+
10447+#ifdef CONFIG_PAX_REFCOUNT
10448+ "jno 0f\n"
10449+ LOCK_PREFIX "decq %0\n"
10450+ "int $4\n0:\n"
10451+ _ASM_EXTABLE(0b, 0b)
10452+#endif
10453+
10454+ "sete %1\n"
10455 : "=m" (v->counter), "=qm" (c)
10456 : "m" (v->counter) : "memory");
10457 return c != 0;
10458@@ -337,7 +652,16 @@ static inline int atomic64_add_negative(long i, atomic64_t *v)
10459 {
10460 unsigned char c;
10461
10462- asm volatile(LOCK_PREFIX "addq %2,%0; sets %1"
10463+ asm volatile(LOCK_PREFIX "addq %2,%0\n"
10464+
10465+#ifdef CONFIG_PAX_REFCOUNT
10466+ "jno 0f\n"
10467+ LOCK_PREFIX "subq %2,%0\n"
10468+ "int $4\n0:\n"
10469+ _ASM_EXTABLE(0b, 0b)
10470+#endif
10471+
10472+ "sets %1\n"
10473 : "=m" (v->counter), "=qm" (c)
10474 : "er" (i), "m" (v->counter) : "memory");
10475 return c;
10476@@ -353,7 +677,31 @@ static inline int atomic64_add_negative(long i, atomic64_t *v)
10477 static inline long atomic64_add_return(long i, atomic64_t *v)
10478 {
10479 long __i = i;
10480- asm volatile(LOCK_PREFIX "xaddq %0, %1;"
10481+ asm volatile(LOCK_PREFIX "xaddq %0, %1\n"
10482+
10483+#ifdef CONFIG_PAX_REFCOUNT
10484+ "jno 0f\n"
10485+ "movq %0, %1\n"
10486+ "int $4\n0:\n"
10487+ _ASM_EXTABLE(0b, 0b)
10488+#endif
10489+
10490+ : "+r" (i), "+m" (v->counter)
10491+ : : "memory");
10492+ return i + __i;
10493+}
10494+
10495+/**
10496+ * atomic64_add_return_unchecked - add and return
10497+ * @i: integer value to add
10498+ * @v: pointer to type atomic64_unchecked_t
10499+ *
10500+ * Atomically adds @i to @v and returns @i + @v
10501+ */
10502+static inline long atomic64_add_return_unchecked(long i, atomic64_unchecked_t *v)
10503+{
10504+ long __i = i;
10505+ asm volatile(LOCK_PREFIX "xaddq %0, %1"
10506 : "+r" (i), "+m" (v->counter)
10507 : : "memory");
10508 return i + __i;
10509@@ -365,6 +713,10 @@ static inline long atomic64_sub_return(long i, atomic64_t *v)
10510 }
10511
10512 #define atomic64_inc_return(v) (atomic64_add_return(1, (v)))
10513+static inline long atomic64_inc_return_unchecked(atomic64_unchecked_t *v)
10514+{
10515+ return atomic64_add_return_unchecked(1, v);
10516+}
10517 #define atomic64_dec_return(v) (atomic64_sub_return(1, (v)))
10518
10519 static inline long atomic64_cmpxchg(atomic64_t *v, long old, long new)
10520@@ -372,21 +724,41 @@ static inline long atomic64_cmpxchg(atomic64_t *v, long old, long new)
10521 return cmpxchg(&v->counter, old, new);
10522 }
10523
10524+static inline long atomic64_cmpxchg_unchecked(atomic64_unchecked_t *v, long old, long new)
10525+{
10526+ return cmpxchg(&v->counter, old, new);
10527+}
10528+
10529 static inline long atomic64_xchg(atomic64_t *v, long new)
10530 {
10531 return xchg(&v->counter, new);
10532 }
10533
10534+static inline long atomic64_xchg_unchecked(atomic64_unchecked_t *v, long new)
10535+{
10536+ return xchg(&v->counter, new);
10537+}
10538+
10539 static inline long atomic_cmpxchg(atomic_t *v, int old, int new)
10540 {
10541 return cmpxchg(&v->counter, old, new);
10542 }
10543
10544+static inline long atomic_cmpxchg_unchecked(atomic_unchecked_t *v, int old, int new)
10545+{
10546+ return cmpxchg(&v->counter, old, new);
10547+}
10548+
10549 static inline long atomic_xchg(atomic_t *v, int new)
10550 {
10551 return xchg(&v->counter, new);
10552 }
10553
10554+static inline long atomic_xchg_unchecked(atomic_unchecked_t *v, int new)
10555+{
10556+ return xchg(&v->counter, new);
10557+}
10558+
10559 /**
10560 * atomic_add_unless - add unless the number is a given value
10561 * @v: pointer of type atomic_t
10562@@ -398,17 +770,30 @@ static inline long atomic_xchg(atomic_t *v, int new)
10563 */
10564 static inline int atomic_add_unless(atomic_t *v, int a, int u)
10565 {
10566- int c, old;
10567+ int c, old, new;
10568 c = atomic_read(v);
10569 for (;;) {
10570- if (unlikely(c == (u)))
10571+ if (unlikely(c == u))
10572 break;
10573- old = atomic_cmpxchg((v), c, c + (a));
10574+
10575+ asm volatile("addl %2,%0\n"
10576+
10577+#ifdef CONFIG_PAX_REFCOUNT
10578+ "jno 0f\n"
10579+ "subl %2,%0\n"
10580+ "int $4\n0:\n"
10581+ _ASM_EXTABLE(0b, 0b)
10582+#endif
10583+
10584+ : "=r" (new)
10585+ : "0" (c), "ir" (a));
10586+
10587+ old = atomic_cmpxchg(v, c, new);
10588 if (likely(old == c))
10589 break;
10590 c = old;
10591 }
10592- return c != (u);
10593+ return c != u;
10594 }
10595
10596 #define atomic_inc_not_zero(v) atomic_add_unless((v), 1, 0)
10597@@ -424,17 +809,30 @@ static inline int atomic_add_unless(atomic_t *v, int a, int u)
10598 */
10599 static inline int atomic64_add_unless(atomic64_t *v, long a, long u)
10600 {
10601- long c, old;
10602+ long c, old, new;
10603 c = atomic64_read(v);
10604 for (;;) {
10605- if (unlikely(c == (u)))
10606+ if (unlikely(c == u))
10607 break;
10608- old = atomic64_cmpxchg((v), c, c + (a));
10609+
10610+ asm volatile("addq %2,%0\n"
10611+
10612+#ifdef CONFIG_PAX_REFCOUNT
10613+ "jno 0f\n"
10614+ "subq %2,%0\n"
10615+ "int $4\n0:\n"
10616+ _ASM_EXTABLE(0b, 0b)
10617+#endif
10618+
10619+ : "=r" (new)
10620+ : "0" (c), "er" (a));
10621+
10622+ old = atomic64_cmpxchg(v, c, new);
10623 if (likely(old == c))
10624 break;
10625 c = old;
10626 }
10627- return c != (u);
10628+ return c != u;
10629 }
10630
10631 /**
10632diff --git a/arch/x86/include/asm/bitops.h b/arch/x86/include/asm/bitops.h
10633index 02b47a6..d5c4b15 100644
10634--- a/arch/x86/include/asm/bitops.h
10635+++ b/arch/x86/include/asm/bitops.h
10636@@ -38,7 +38,7 @@
10637 * a mask operation on a byte.
10638 */
10639 #define IS_IMMEDIATE(nr) (__builtin_constant_p(nr))
10640-#define CONST_MASK_ADDR(nr, addr) BITOP_ADDR((void *)(addr) + ((nr)>>3))
10641+#define CONST_MASK_ADDR(nr, addr) BITOP_ADDR((volatile void *)(addr) + ((nr)>>3))
10642 #define CONST_MASK(nr) (1 << ((nr) & 7))
10643
10644 /**
10645diff --git a/arch/x86/include/asm/boot.h b/arch/x86/include/asm/boot.h
10646index 7a10659..8bbf355 100644
10647--- a/arch/x86/include/asm/boot.h
10648+++ b/arch/x86/include/asm/boot.h
10649@@ -11,10 +11,15 @@
10650 #include <asm/pgtable_types.h>
10651
10652 /* Physical address where kernel should be loaded. */
10653-#define LOAD_PHYSICAL_ADDR ((CONFIG_PHYSICAL_START \
10654+#define ____LOAD_PHYSICAL_ADDR ((CONFIG_PHYSICAL_START \
10655 + (CONFIG_PHYSICAL_ALIGN - 1)) \
10656 & ~(CONFIG_PHYSICAL_ALIGN - 1))
10657
10658+#ifndef __ASSEMBLY__
10659+extern unsigned char __LOAD_PHYSICAL_ADDR[];
10660+#define LOAD_PHYSICAL_ADDR ((unsigned long)__LOAD_PHYSICAL_ADDR)
10661+#endif
10662+
10663 /* Minimum kernel alignment, as a power of two */
10664 #ifdef CONFIG_X86_64
10665 #define MIN_KERNEL_ALIGN_LG2 PMD_SHIFT
10666diff --git a/arch/x86/include/asm/cache.h b/arch/x86/include/asm/cache.h
10667index 549860d..7d45f68 100644
10668--- a/arch/x86/include/asm/cache.h
10669+++ b/arch/x86/include/asm/cache.h
10670@@ -5,9 +5,10 @@
10671
10672 /* L1 cache line size */
10673 #define L1_CACHE_SHIFT (CONFIG_X86_L1_CACHE_SHIFT)
10674-#define L1_CACHE_BYTES (1 << L1_CACHE_SHIFT)
10675+#define L1_CACHE_BYTES (_AC(1,UL) << L1_CACHE_SHIFT)
10676
10677 #define __read_mostly __attribute__((__section__(".data.read_mostly")))
10678+#define __read_only __attribute__((__section__(".data.read_only")))
10679
10680 #ifdef CONFIG_X86_VSMP
10681 /* vSMP Internode cacheline shift */
10682diff --git a/arch/x86/include/asm/cacheflush.h b/arch/x86/include/asm/cacheflush.h
10683index b54f6af..5b376a6 100644
10684--- a/arch/x86/include/asm/cacheflush.h
10685+++ b/arch/x86/include/asm/cacheflush.h
10686@@ -60,7 +60,7 @@ PAGEFLAG(WC, WC)
10687 static inline unsigned long get_page_memtype(struct page *pg)
10688 {
10689 if (!PageUncached(pg) && !PageWC(pg))
10690- return -1;
10691+ return ~0UL;
10692 else if (!PageUncached(pg) && PageWC(pg))
10693 return _PAGE_CACHE_WC;
10694 else if (PageUncached(pg) && !PageWC(pg))
10695@@ -85,7 +85,7 @@ static inline void set_page_memtype(struct page *pg, unsigned long memtype)
10696 SetPageWC(pg);
10697 break;
10698 default:
10699- case -1:
10700+ case ~0UL:
10701 ClearPageUncached(pg);
10702 ClearPageWC(pg);
10703 break;
10704diff --git a/arch/x86/include/asm/calling.h b/arch/x86/include/asm/calling.h
10705index 0e63c9a..ab8d972 100644
10706--- a/arch/x86/include/asm/calling.h
10707+++ b/arch/x86/include/asm/calling.h
10708@@ -52,32 +52,32 @@ For 32-bit we have the following conventions - kernel is built with
10709 * for assembly code:
10710 */
10711
10712-#define R15 0
10713-#define R14 8
10714-#define R13 16
10715-#define R12 24
10716-#define RBP 32
10717-#define RBX 40
10718+#define R15 (0)
10719+#define R14 (8)
10720+#define R13 (16)
10721+#define R12 (24)
10722+#define RBP (32)
10723+#define RBX (40)
10724
10725 /* arguments: interrupts/non tracing syscalls only save up to here: */
10726-#define R11 48
10727-#define R10 56
10728-#define R9 64
10729-#define R8 72
10730-#define RAX 80
10731-#define RCX 88
10732-#define RDX 96
10733-#define RSI 104
10734-#define RDI 112
10735-#define ORIG_RAX 120 /* + error_code */
10736+#define R11 (48)
10737+#define R10 (56)
10738+#define R9 (64)
10739+#define R8 (72)
10740+#define RAX (80)
10741+#define RCX (88)
10742+#define RDX (96)
10743+#define RSI (104)
10744+#define RDI (112)
10745+#define ORIG_RAX (120) /* + error_code */
10746 /* end of arguments */
10747
10748 /* cpu exception frame or undefined in case of fast syscall: */
10749-#define RIP 128
10750-#define CS 136
10751-#define EFLAGS 144
10752-#define RSP 152
10753-#define SS 160
10754+#define RIP (128)
10755+#define CS (136)
10756+#define EFLAGS (144)
10757+#define RSP (152)
10758+#define SS (160)
10759
10760 #define ARGOFFSET R11
10761 #define SWFRAME ORIG_RAX
10762diff --git a/arch/x86/include/asm/checksum_32.h b/arch/x86/include/asm/checksum_32.h
10763index 46fc474..b02b0f9 100644
10764--- a/arch/x86/include/asm/checksum_32.h
10765+++ b/arch/x86/include/asm/checksum_32.h
10766@@ -31,6 +31,14 @@ asmlinkage __wsum csum_partial_copy_generic(const void *src, void *dst,
10767 int len, __wsum sum,
10768 int *src_err_ptr, int *dst_err_ptr);
10769
10770+asmlinkage __wsum csum_partial_copy_generic_to_user(const void *src, void *dst,
10771+ int len, __wsum sum,
10772+ int *src_err_ptr, int *dst_err_ptr);
10773+
10774+asmlinkage __wsum csum_partial_copy_generic_from_user(const void *src, void *dst,
10775+ int len, __wsum sum,
10776+ int *src_err_ptr, int *dst_err_ptr);
10777+
10778 /*
10779 * Note: when you get a NULL pointer exception here this means someone
10780 * passed in an incorrect kernel address to one of these functions.
10781@@ -50,7 +58,7 @@ static inline __wsum csum_partial_copy_from_user(const void __user *src,
10782 int *err_ptr)
10783 {
10784 might_sleep();
10785- return csum_partial_copy_generic((__force void *)src, dst,
10786+ return csum_partial_copy_generic_from_user((__force void *)src, dst,
10787 len, sum, err_ptr, NULL);
10788 }
10789
10790@@ -178,7 +186,7 @@ static inline __wsum csum_and_copy_to_user(const void *src,
10791 {
10792 might_sleep();
10793 if (access_ok(VERIFY_WRITE, dst, len))
10794- return csum_partial_copy_generic(src, (__force void *)dst,
10795+ return csum_partial_copy_generic_to_user(src, (__force void *)dst,
10796 len, sum, NULL, err_ptr);
10797
10798 if (len)
10799diff --git a/arch/x86/include/asm/desc.h b/arch/x86/include/asm/desc.h
10800index 617bd56..7b047a1 100644
10801--- a/arch/x86/include/asm/desc.h
10802+++ b/arch/x86/include/asm/desc.h
10803@@ -4,6 +4,7 @@
10804 #include <asm/desc_defs.h>
10805 #include <asm/ldt.h>
10806 #include <asm/mmu.h>
10807+#include <asm/pgtable.h>
10808 #include <linux/smp.h>
10809
10810 static inline void fill_ldt(struct desc_struct *desc,
10811@@ -15,6 +16,7 @@ static inline void fill_ldt(struct desc_struct *desc,
10812 desc->base1 = (info->base_addr & 0x00ff0000) >> 16;
10813 desc->type = (info->read_exec_only ^ 1) << 1;
10814 desc->type |= info->contents << 2;
10815+ desc->type |= info->seg_not_present ^ 1;
10816 desc->s = 1;
10817 desc->dpl = 0x3;
10818 desc->p = info->seg_not_present ^ 1;
10819@@ -31,16 +33,12 @@ static inline void fill_ldt(struct desc_struct *desc,
10820 }
10821
10822 extern struct desc_ptr idt_descr;
10823-extern gate_desc idt_table[];
10824-
10825-struct gdt_page {
10826- struct desc_struct gdt[GDT_ENTRIES];
10827-} __attribute__((aligned(PAGE_SIZE)));
10828-DECLARE_PER_CPU_PAGE_ALIGNED(struct gdt_page, gdt_page);
10829+extern gate_desc idt_table[256];
10830
10831+extern struct desc_struct cpu_gdt_table[NR_CPUS][PAGE_SIZE / sizeof(struct desc_struct)];
10832 static inline struct desc_struct *get_cpu_gdt_table(unsigned int cpu)
10833 {
10834- return per_cpu(gdt_page, cpu).gdt;
10835+ return cpu_gdt_table[cpu];
10836 }
10837
10838 #ifdef CONFIG_X86_64
10839@@ -65,9 +63,14 @@ static inline void pack_gate(gate_desc *gate, unsigned char type,
10840 unsigned long base, unsigned dpl, unsigned flags,
10841 unsigned short seg)
10842 {
10843- gate->a = (seg << 16) | (base & 0xffff);
10844- gate->b = (base & 0xffff0000) |
10845- (((0x80 | type | (dpl << 5)) & 0xff) << 8);
10846+ gate->gate.offset_low = base;
10847+ gate->gate.seg = seg;
10848+ gate->gate.reserved = 0;
10849+ gate->gate.type = type;
10850+ gate->gate.s = 0;
10851+ gate->gate.dpl = dpl;
10852+ gate->gate.p = 1;
10853+ gate->gate.offset_high = base >> 16;
10854 }
10855
10856 #endif
10857@@ -115,13 +118,17 @@ static inline void paravirt_free_ldt(struct desc_struct *ldt, unsigned entries)
10858 static inline void native_write_idt_entry(gate_desc *idt, int entry,
10859 const gate_desc *gate)
10860 {
10861+ pax_open_kernel();
10862 memcpy(&idt[entry], gate, sizeof(*gate));
10863+ pax_close_kernel();
10864 }
10865
10866 static inline void native_write_ldt_entry(struct desc_struct *ldt, int entry,
10867 const void *desc)
10868 {
10869+ pax_open_kernel();
10870 memcpy(&ldt[entry], desc, 8);
10871+ pax_close_kernel();
10872 }
10873
10874 static inline void native_write_gdt_entry(struct desc_struct *gdt, int entry,
10875@@ -139,7 +146,10 @@ static inline void native_write_gdt_entry(struct desc_struct *gdt, int entry,
10876 size = sizeof(struct desc_struct);
10877 break;
10878 }
10879+
10880+ pax_open_kernel();
10881 memcpy(&gdt[entry], desc, size);
10882+ pax_close_kernel();
10883 }
10884
10885 static inline void pack_descriptor(struct desc_struct *desc, unsigned long base,
10886@@ -211,7 +221,9 @@ static inline void native_set_ldt(const void *addr, unsigned int entries)
10887
10888 static inline void native_load_tr_desc(void)
10889 {
10890+ pax_open_kernel();
10891 asm volatile("ltr %w0"::"q" (GDT_ENTRY_TSS*8));
10892+ pax_close_kernel();
10893 }
10894
10895 static inline void native_load_gdt(const struct desc_ptr *dtr)
10896@@ -246,8 +258,10 @@ static inline void native_load_tls(struct thread_struct *t, unsigned int cpu)
10897 unsigned int i;
10898 struct desc_struct *gdt = get_cpu_gdt_table(cpu);
10899
10900+ pax_open_kernel();
10901 for (i = 0; i < GDT_ENTRY_TLS_ENTRIES; i++)
10902 gdt[GDT_ENTRY_TLS_MIN + i] = t->tls_array[i];
10903+ pax_close_kernel();
10904 }
10905
10906 #define _LDT_empty(info) \
10907@@ -309,7 +323,7 @@ static inline void set_desc_limit(struct desc_struct *desc, unsigned long limit)
10908 desc->limit = (limit >> 16) & 0xf;
10909 }
10910
10911-static inline void _set_gate(int gate, unsigned type, void *addr,
10912+static inline void _set_gate(int gate, unsigned type, const void *addr,
10913 unsigned dpl, unsigned ist, unsigned seg)
10914 {
10915 gate_desc s;
10916@@ -327,7 +341,7 @@ static inline void _set_gate(int gate, unsigned type, void *addr,
10917 * Pentium F0 0F bugfix can have resulted in the mapped
10918 * IDT being write-protected.
10919 */
10920-static inline void set_intr_gate(unsigned int n, void *addr)
10921+static inline void set_intr_gate(unsigned int n, const void *addr)
10922 {
10923 BUG_ON((unsigned)n > 0xFF);
10924 _set_gate(n, GATE_INTERRUPT, addr, 0, 0, __KERNEL_CS);
10925@@ -356,19 +370,19 @@ static inline void alloc_intr_gate(unsigned int n, void *addr)
10926 /*
10927 * This routine sets up an interrupt gate at directory privilege level 3.
10928 */
10929-static inline void set_system_intr_gate(unsigned int n, void *addr)
10930+static inline void set_system_intr_gate(unsigned int n, const void *addr)
10931 {
10932 BUG_ON((unsigned)n > 0xFF);
10933 _set_gate(n, GATE_INTERRUPT, addr, 0x3, 0, __KERNEL_CS);
10934 }
10935
10936-static inline void set_system_trap_gate(unsigned int n, void *addr)
10937+static inline void set_system_trap_gate(unsigned int n, const void *addr)
10938 {
10939 BUG_ON((unsigned)n > 0xFF);
10940 _set_gate(n, GATE_TRAP, addr, 0x3, 0, __KERNEL_CS);
10941 }
10942
10943-static inline void set_trap_gate(unsigned int n, void *addr)
10944+static inline void set_trap_gate(unsigned int n, const void *addr)
10945 {
10946 BUG_ON((unsigned)n > 0xFF);
10947 _set_gate(n, GATE_TRAP, addr, 0, 0, __KERNEL_CS);
10948@@ -377,19 +391,31 @@ static inline void set_trap_gate(unsigned int n, void *addr)
10949 static inline void set_task_gate(unsigned int n, unsigned int gdt_entry)
10950 {
10951 BUG_ON((unsigned)n > 0xFF);
10952- _set_gate(n, GATE_TASK, (void *)0, 0, 0, (gdt_entry<<3));
10953+ _set_gate(n, GATE_TASK, (const void *)0, 0, 0, (gdt_entry<<3));
10954 }
10955
10956-static inline void set_intr_gate_ist(int n, void *addr, unsigned ist)
10957+static inline void set_intr_gate_ist(int n, const void *addr, unsigned ist)
10958 {
10959 BUG_ON((unsigned)n > 0xFF);
10960 _set_gate(n, GATE_INTERRUPT, addr, 0, ist, __KERNEL_CS);
10961 }
10962
10963-static inline void set_system_intr_gate_ist(int n, void *addr, unsigned ist)
10964+static inline void set_system_intr_gate_ist(int n, const void *addr, unsigned ist)
10965 {
10966 BUG_ON((unsigned)n > 0xFF);
10967 _set_gate(n, GATE_INTERRUPT, addr, 0x3, ist, __KERNEL_CS);
10968 }
10969
10970+#ifdef CONFIG_X86_32
10971+static inline void set_user_cs(unsigned long base, unsigned long limit, int cpu)
10972+{
10973+ struct desc_struct d;
10974+
10975+ if (likely(limit))
10976+ limit = (limit - 1UL) >> PAGE_SHIFT;
10977+ pack_descriptor(&d, base, limit, 0xFB, 0xC);
10978+ write_gdt_entry(get_cpu_gdt_table(cpu), GDT_ENTRY_DEFAULT_USER_CS, &d, DESCTYPE_S);
10979+}
10980+#endif
10981+
10982 #endif /* _ASM_X86_DESC_H */
10983diff --git a/arch/x86/include/asm/desc_defs.h b/arch/x86/include/asm/desc_defs.h
10984index 9d66848..6b4a691 100644
10985--- a/arch/x86/include/asm/desc_defs.h
10986+++ b/arch/x86/include/asm/desc_defs.h
10987@@ -31,6 +31,12 @@ struct desc_struct {
10988 unsigned base1: 8, type: 4, s: 1, dpl: 2, p: 1;
10989 unsigned limit: 4, avl: 1, l: 1, d: 1, g: 1, base2: 8;
10990 };
10991+ struct {
10992+ u16 offset_low;
10993+ u16 seg;
10994+ unsigned reserved: 8, type: 4, s: 1, dpl: 2, p: 1;
10995+ unsigned offset_high: 16;
10996+ } gate;
10997 };
10998 } __attribute__((packed));
10999
11000diff --git a/arch/x86/include/asm/device.h b/arch/x86/include/asm/device.h
11001index cee34e9..a7c3fa2 100644
11002--- a/arch/x86/include/asm/device.h
11003+++ b/arch/x86/include/asm/device.h
11004@@ -6,7 +6,7 @@ struct dev_archdata {
11005 void *acpi_handle;
11006 #endif
11007 #ifdef CONFIG_X86_64
11008-struct dma_map_ops *dma_ops;
11009+ const struct dma_map_ops *dma_ops;
11010 #endif
11011 #ifdef CONFIG_DMAR
11012 void *iommu; /* hook for IOMMU specific extension */
11013diff --git a/arch/x86/include/asm/dma-mapping.h b/arch/x86/include/asm/dma-mapping.h
11014index 6a25d5d..786b202 100644
11015--- a/arch/x86/include/asm/dma-mapping.h
11016+++ b/arch/x86/include/asm/dma-mapping.h
11017@@ -25,9 +25,9 @@ extern int iommu_merge;
11018 extern struct device x86_dma_fallback_dev;
11019 extern int panic_on_overflow;
11020
11021-extern struct dma_map_ops *dma_ops;
11022+extern const struct dma_map_ops *dma_ops;
11023
11024-static inline struct dma_map_ops *get_dma_ops(struct device *dev)
11025+static inline const struct dma_map_ops *get_dma_ops(struct device *dev)
11026 {
11027 #ifdef CONFIG_X86_32
11028 return dma_ops;
11029@@ -44,7 +44,7 @@ static inline struct dma_map_ops *get_dma_ops(struct device *dev)
11030 /* Make sure we keep the same behaviour */
11031 static inline int dma_mapping_error(struct device *dev, dma_addr_t dma_addr)
11032 {
11033- struct dma_map_ops *ops = get_dma_ops(dev);
11034+ const struct dma_map_ops *ops = get_dma_ops(dev);
11035 if (ops->mapping_error)
11036 return ops->mapping_error(dev, dma_addr);
11037
11038@@ -122,7 +122,7 @@ static inline void *
11039 dma_alloc_coherent(struct device *dev, size_t size, dma_addr_t *dma_handle,
11040 gfp_t gfp)
11041 {
11042- struct dma_map_ops *ops = get_dma_ops(dev);
11043+ const struct dma_map_ops *ops = get_dma_ops(dev);
11044 void *memory;
11045
11046 gfp &= ~(__GFP_DMA | __GFP_HIGHMEM | __GFP_DMA32);
11047@@ -149,7 +149,7 @@ dma_alloc_coherent(struct device *dev, size_t size, dma_addr_t *dma_handle,
11048 static inline void dma_free_coherent(struct device *dev, size_t size,
11049 void *vaddr, dma_addr_t bus)
11050 {
11051- struct dma_map_ops *ops = get_dma_ops(dev);
11052+ const struct dma_map_ops *ops = get_dma_ops(dev);
11053
11054 WARN_ON(irqs_disabled()); /* for portability */
11055
11056diff --git a/arch/x86/include/asm/e820.h b/arch/x86/include/asm/e820.h
11057index 40b4e61..40d8133 100644
11058--- a/arch/x86/include/asm/e820.h
11059+++ b/arch/x86/include/asm/e820.h
11060@@ -133,7 +133,7 @@ extern char *default_machine_specific_memory_setup(void);
11061 #define ISA_END_ADDRESS 0x100000
11062 #define is_ISA_range(s, e) ((s) >= ISA_START_ADDRESS && (e) < ISA_END_ADDRESS)
11063
11064-#define BIOS_BEGIN 0x000a0000
11065+#define BIOS_BEGIN 0x000c0000
11066 #define BIOS_END 0x00100000
11067
11068 #ifdef __KERNEL__
11069diff --git a/arch/x86/include/asm/elf.h b/arch/x86/include/asm/elf.h
11070index 8ac9d9a..0a6c96e 100644
11071--- a/arch/x86/include/asm/elf.h
11072+++ b/arch/x86/include/asm/elf.h
11073@@ -257,7 +257,25 @@ extern int force_personality32;
11074 the loader. We need to make sure that it is out of the way of the program
11075 that it will "exec", and that there is sufficient room for the brk. */
11076
11077+#ifdef CONFIG_PAX_SEGMEXEC
11078+#define ELF_ET_DYN_BASE ((current->mm->pax_flags & MF_PAX_SEGMEXEC) ? SEGMEXEC_TASK_SIZE/3*2 : TASK_SIZE/3*2)
11079+#else
11080 #define ELF_ET_DYN_BASE (TASK_SIZE / 3 * 2)
11081+#endif
11082+
11083+#ifdef CONFIG_PAX_ASLR
11084+#ifdef CONFIG_X86_32
11085+#define PAX_ELF_ET_DYN_BASE 0x10000000UL
11086+
11087+#define PAX_DELTA_MMAP_LEN (current->mm->pax_flags & MF_PAX_SEGMEXEC ? 15 : 16)
11088+#define PAX_DELTA_STACK_LEN (current->mm->pax_flags & MF_PAX_SEGMEXEC ? 15 : 16)
11089+#else
11090+#define PAX_ELF_ET_DYN_BASE 0x400000UL
11091+
11092+#define PAX_DELTA_MMAP_LEN ((test_thread_flag(TIF_IA32)) ? 16 : TASK_SIZE_MAX_SHIFT - PAGE_SHIFT - 3)
11093+#define PAX_DELTA_STACK_LEN ((test_thread_flag(TIF_IA32)) ? 16 : TASK_SIZE_MAX_SHIFT - PAGE_SHIFT - 3)
11094+#endif
11095+#endif
11096
11097 /* This yields a mask that user programs can use to figure out what
11098 instruction set this CPU supports. This could be done in user space,
11099@@ -310,9 +328,7 @@ do { \
11100
11101 #define ARCH_DLINFO \
11102 do { \
11103- if (vdso_enabled) \
11104- NEW_AUX_ENT(AT_SYSINFO_EHDR, \
11105- (unsigned long)current->mm->context.vdso); \
11106+ NEW_AUX_ENT(AT_SYSINFO_EHDR, current->mm->context.vdso); \
11107 } while (0)
11108
11109 #define AT_SYSINFO 32
11110@@ -323,7 +339,7 @@ do { \
11111
11112 #endif /* !CONFIG_X86_32 */
11113
11114-#define VDSO_CURRENT_BASE ((unsigned long)current->mm->context.vdso)
11115+#define VDSO_CURRENT_BASE (current->mm->context.vdso)
11116
11117 #define VDSO_ENTRY \
11118 ((unsigned long)VDSO32_SYMBOL(VDSO_CURRENT_BASE, vsyscall))
11119@@ -337,7 +353,4 @@ extern int arch_setup_additional_pages(struct linux_binprm *bprm,
11120 extern int syscall32_setup_pages(struct linux_binprm *, int exstack);
11121 #define compat_arch_setup_additional_pages syscall32_setup_pages
11122
11123-extern unsigned long arch_randomize_brk(struct mm_struct *mm);
11124-#define arch_randomize_brk arch_randomize_brk
11125-
11126 #endif /* _ASM_X86_ELF_H */
11127diff --git a/arch/x86/include/asm/emergency-restart.h b/arch/x86/include/asm/emergency-restart.h
11128index cc70c1c..d96d011 100644
11129--- a/arch/x86/include/asm/emergency-restart.h
11130+++ b/arch/x86/include/asm/emergency-restart.h
11131@@ -15,6 +15,6 @@ enum reboot_type {
11132
11133 extern enum reboot_type reboot_type;
11134
11135-extern void machine_emergency_restart(void);
11136+extern void machine_emergency_restart(void) __noreturn;
11137
11138 #endif /* _ASM_X86_EMERGENCY_RESTART_H */
11139diff --git a/arch/x86/include/asm/floppy.h b/arch/x86/include/asm/floppy.h
11140index dbe82a5..c6d8a00 100644
11141--- a/arch/x86/include/asm/floppy.h
11142+++ b/arch/x86/include/asm/floppy.h
11143@@ -157,6 +157,7 @@ static unsigned long dma_mem_alloc(unsigned long size)
11144 }
11145
11146
11147+static unsigned long vdma_mem_alloc(unsigned long size) __size_overflow(1);
11148 static unsigned long vdma_mem_alloc(unsigned long size)
11149 {
11150 return (unsigned long)vmalloc(size);
11151diff --git a/arch/x86/include/asm/futex.h b/arch/x86/include/asm/futex.h
11152index 1f11ce4..7caabd1 100644
11153--- a/arch/x86/include/asm/futex.h
11154+++ b/arch/x86/include/asm/futex.h
11155@@ -12,16 +12,18 @@
11156 #include <asm/system.h>
11157
11158 #define __futex_atomic_op1(insn, ret, oldval, uaddr, oparg) \
11159+ typecheck(u32 __user *, uaddr); \
11160 asm volatile("1:\t" insn "\n" \
11161 "2:\t.section .fixup,\"ax\"\n" \
11162 "3:\tmov\t%3, %1\n" \
11163 "\tjmp\t2b\n" \
11164 "\t.previous\n" \
11165 _ASM_EXTABLE(1b, 3b) \
11166- : "=r" (oldval), "=r" (ret), "+m" (*uaddr) \
11167+ : "=r" (oldval), "=r" (ret), "+m" (*(u32 __user *)____m(uaddr))\
11168 : "i" (-EFAULT), "0" (oparg), "1" (0))
11169
11170 #define __futex_atomic_op2(insn, ret, oldval, uaddr, oparg) \
11171+ typecheck(u32 __user *, uaddr); \
11172 asm volatile("1:\tmovl %2, %0\n" \
11173 "\tmovl\t%0, %3\n" \
11174 "\t" insn "\n" \
11175@@ -34,10 +36,10 @@
11176 _ASM_EXTABLE(1b, 4b) \
11177 _ASM_EXTABLE(2b, 4b) \
11178 : "=&a" (oldval), "=&r" (ret), \
11179- "+m" (*uaddr), "=&r" (tem) \
11180+ "+m" (*(u32 __user *)____m(uaddr)), "=&r" (tem) \
11181 : "r" (oparg), "i" (-EFAULT), "1" (0))
11182
11183-static inline int futex_atomic_op_inuser(int encoded_op, int __user *uaddr)
11184+static inline int futex_atomic_op_inuser(int encoded_op, u32 __user *uaddr)
11185 {
11186 int op = (encoded_op >> 28) & 7;
11187 int cmp = (encoded_op >> 24) & 15;
11188@@ -61,10 +63,10 @@ static inline int futex_atomic_op_inuser(int encoded_op, int __user *uaddr)
11189
11190 switch (op) {
11191 case FUTEX_OP_SET:
11192- __futex_atomic_op1("xchgl %0, %2", ret, oldval, uaddr, oparg);
11193+ __futex_atomic_op1(__copyuser_seg"xchgl %0, %2", ret, oldval, uaddr, oparg);
11194 break;
11195 case FUTEX_OP_ADD:
11196- __futex_atomic_op1(LOCK_PREFIX "xaddl %0, %2", ret, oldval,
11197+ __futex_atomic_op1(LOCK_PREFIX __copyuser_seg"xaddl %0, %2", ret, oldval,
11198 uaddr, oparg);
11199 break;
11200 case FUTEX_OP_OR:
11201@@ -109,7 +111,7 @@ static inline int futex_atomic_op_inuser(int encoded_op, int __user *uaddr)
11202 return ret;
11203 }
11204
11205-static inline int futex_atomic_cmpxchg_inatomic(int __user *uaddr, int oldval,
11206+static inline int futex_atomic_cmpxchg_inatomic(u32 __user *uaddr, int oldval,
11207 int newval)
11208 {
11209
11210@@ -119,16 +121,16 @@ static inline int futex_atomic_cmpxchg_inatomic(int __user *uaddr, int oldval,
11211 return -ENOSYS;
11212 #endif
11213
11214- if (!access_ok(VERIFY_WRITE, uaddr, sizeof(int)))
11215+ if (!access_ok(VERIFY_WRITE, uaddr, sizeof(u32)))
11216 return -EFAULT;
11217
11218- asm volatile("1:\t" LOCK_PREFIX "cmpxchgl %3, %1\n"
11219+ asm volatile("1:\t" LOCK_PREFIX __copyuser_seg"cmpxchgl %3, %1\n"
11220 "2:\t.section .fixup, \"ax\"\n"
11221 "3:\tmov %2, %0\n"
11222 "\tjmp 2b\n"
11223 "\t.previous\n"
11224 _ASM_EXTABLE(1b, 3b)
11225- : "=a" (oldval), "+m" (*uaddr)
11226+ : "=a" (oldval), "+m" (*(u32 *)____m(uaddr))
11227 : "i" (-EFAULT), "r" (newval), "0" (oldval)
11228 : "memory"
11229 );
11230diff --git a/arch/x86/include/asm/hw_irq.h b/arch/x86/include/asm/hw_irq.h
11231index ba180d9..3bad351 100644
11232--- a/arch/x86/include/asm/hw_irq.h
11233+++ b/arch/x86/include/asm/hw_irq.h
11234@@ -92,8 +92,8 @@ extern void setup_ioapic_dest(void);
11235 extern void enable_IO_APIC(void);
11236
11237 /* Statistics */
11238-extern atomic_t irq_err_count;
11239-extern atomic_t irq_mis_count;
11240+extern atomic_unchecked_t irq_err_count;
11241+extern atomic_unchecked_t irq_mis_count;
11242
11243 /* EISA */
11244 extern void eisa_set_level_irq(unsigned int irq);
11245diff --git a/arch/x86/include/asm/i387.h b/arch/x86/include/asm/i387.h
11246index 0b20bbb..4cb1396 100644
11247--- a/arch/x86/include/asm/i387.h
11248+++ b/arch/x86/include/asm/i387.h
11249@@ -60,6 +60,11 @@ static inline int fxrstor_checking(struct i387_fxsave_struct *fx)
11250 {
11251 int err;
11252
11253+#if defined(CONFIG_X86_64) && defined(CONFIG_PAX_MEMORY_UDEREF)
11254+ if ((unsigned long)fx < PAX_USER_SHADOW_BASE)
11255+ fx = (struct i387_fxsave_struct *)((void *)fx + PAX_USER_SHADOW_BASE);
11256+#endif
11257+
11258 asm volatile("1: rex64/fxrstor (%[fx])\n\t"
11259 "2:\n"
11260 ".section .fixup,\"ax\"\n"
11261@@ -105,6 +110,11 @@ static inline int fxsave_user(struct i387_fxsave_struct __user *fx)
11262 {
11263 int err;
11264
11265+#if defined(CONFIG_X86_64) && defined(CONFIG_PAX_MEMORY_UDEREF)
11266+ if ((unsigned long)fx < PAX_USER_SHADOW_BASE)
11267+ fx = (struct i387_fxsave_struct __user *)((void __user *)fx + PAX_USER_SHADOW_BASE);
11268+#endif
11269+
11270 asm volatile("1: rex64/fxsave (%[fx])\n\t"
11271 "2:\n"
11272 ".section .fixup,\"ax\"\n"
11273@@ -195,13 +205,8 @@ static inline int fxrstor_checking(struct i387_fxsave_struct *fx)
11274 }
11275
11276 /* We need a safe address that is cheap to find and that is already
11277- in L1 during context switch. The best choices are unfortunately
11278- different for UP and SMP */
11279-#ifdef CONFIG_SMP
11280-#define safe_address (__per_cpu_offset[0])
11281-#else
11282-#define safe_address (kstat_cpu(0).cpustat.user)
11283-#endif
11284+ in L1 during context switch. */
11285+#define safe_address (init_tss[smp_processor_id()].x86_tss.sp0)
11286
11287 /*
11288 * These must be called with preempt disabled
11289@@ -291,7 +296,7 @@ static inline void kernel_fpu_begin(void)
11290 struct thread_info *me = current_thread_info();
11291 preempt_disable();
11292 if (me->status & TS_USEDFPU)
11293- __save_init_fpu(me->task);
11294+ __save_init_fpu(current);
11295 else
11296 clts();
11297 }
11298diff --git a/arch/x86/include/asm/io_32.h b/arch/x86/include/asm/io_32.h
11299index a299900..15c5410 100644
11300--- a/arch/x86/include/asm/io_32.h
11301+++ b/arch/x86/include/asm/io_32.h
11302@@ -3,6 +3,7 @@
11303
11304 #include <linux/string.h>
11305 #include <linux/compiler.h>
11306+#include <asm/processor.h>
11307
11308 /*
11309 * This file contains the definitions for the x86 IO instructions
11310@@ -42,6 +43,17 @@
11311
11312 #ifdef __KERNEL__
11313
11314+#define ARCH_HAS_VALID_PHYS_ADDR_RANGE
11315+static inline int valid_phys_addr_range(unsigned long addr, size_t count)
11316+{
11317+ return ((addr + count + PAGE_SIZE - 1) >> PAGE_SHIFT) < (1ULL << (boot_cpu_data.x86_phys_bits - PAGE_SHIFT)) ? 1 : 0;
11318+}
11319+
11320+static inline int valid_mmap_phys_addr_range(unsigned long pfn, size_t count)
11321+{
11322+ return (pfn + (count >> PAGE_SHIFT)) < (1ULL << (boot_cpu_data.x86_phys_bits - PAGE_SHIFT)) ? 1 : 0;
11323+}
11324+
11325 #include <asm-generic/iomap.h>
11326
11327 #include <linux/vmalloc.h>
11328diff --git a/arch/x86/include/asm/io_64.h b/arch/x86/include/asm/io_64.h
11329index 2440678..c158b88 100644
11330--- a/arch/x86/include/asm/io_64.h
11331+++ b/arch/x86/include/asm/io_64.h
11332@@ -140,6 +140,17 @@ __OUTS(l)
11333
11334 #include <linux/vmalloc.h>
11335
11336+#define ARCH_HAS_VALID_PHYS_ADDR_RANGE
11337+static inline int valid_phys_addr_range(unsigned long addr, size_t count)
11338+{
11339+ return ((addr + count + PAGE_SIZE - 1) >> PAGE_SHIFT) < (1ULL << (boot_cpu_data.x86_phys_bits - PAGE_SHIFT)) ? 1 : 0;
11340+}
11341+
11342+static inline int valid_mmap_phys_addr_range(unsigned long pfn, size_t count)
11343+{
11344+ return (pfn + (count >> PAGE_SHIFT)) < (1ULL << (boot_cpu_data.x86_phys_bits - PAGE_SHIFT)) ? 1 : 0;
11345+}
11346+
11347 #include <asm-generic/iomap.h>
11348
11349 void __memcpy_fromio(void *, unsigned long, unsigned);
11350diff --git a/arch/x86/include/asm/iommu.h b/arch/x86/include/asm/iommu.h
11351index fd6d21b..8b13915 100644
11352--- a/arch/x86/include/asm/iommu.h
11353+++ b/arch/x86/include/asm/iommu.h
11354@@ -3,7 +3,7 @@
11355
11356 extern void pci_iommu_shutdown(void);
11357 extern void no_iommu_init(void);
11358-extern struct dma_map_ops nommu_dma_ops;
11359+extern const struct dma_map_ops nommu_dma_ops;
11360 extern int force_iommu, no_iommu;
11361 extern int iommu_detected;
11362 extern int iommu_pass_through;
11363diff --git a/arch/x86/include/asm/irqflags.h b/arch/x86/include/asm/irqflags.h
11364index 9e2b952..557206e 100644
11365--- a/arch/x86/include/asm/irqflags.h
11366+++ b/arch/x86/include/asm/irqflags.h
11367@@ -142,6 +142,11 @@ static inline unsigned long __raw_local_irq_save(void)
11368 sti; \
11369 sysexit
11370
11371+#define GET_CR0_INTO_RDI mov %cr0, %rdi
11372+#define SET_RDI_INTO_CR0 mov %rdi, %cr0
11373+#define GET_CR3_INTO_RDI mov %cr3, %rdi
11374+#define SET_RDI_INTO_CR3 mov %rdi, %cr3
11375+
11376 #else
11377 #define INTERRUPT_RETURN iret
11378 #define ENABLE_INTERRUPTS_SYSEXIT sti; sysexit
11379diff --git a/arch/x86/include/asm/kprobes.h b/arch/x86/include/asm/kprobes.h
11380index 4fe681d..bb6d40c 100644
11381--- a/arch/x86/include/asm/kprobes.h
11382+++ b/arch/x86/include/asm/kprobes.h
11383@@ -34,13 +34,8 @@ typedef u8 kprobe_opcode_t;
11384 #define BREAKPOINT_INSTRUCTION 0xcc
11385 #define RELATIVEJUMP_INSTRUCTION 0xe9
11386 #define MAX_INSN_SIZE 16
11387-#define MAX_STACK_SIZE 64
11388-#define MIN_STACK_SIZE(ADDR) \
11389- (((MAX_STACK_SIZE) < (((unsigned long)current_thread_info()) + \
11390- THREAD_SIZE - (unsigned long)(ADDR))) \
11391- ? (MAX_STACK_SIZE) \
11392- : (((unsigned long)current_thread_info()) + \
11393- THREAD_SIZE - (unsigned long)(ADDR)))
11394+#define MAX_STACK_SIZE 64UL
11395+#define MIN_STACK_SIZE(ADDR) min(MAX_STACK_SIZE, current->thread.sp0 - (unsigned long)(ADDR))
11396
11397 #define flush_insn_slot(p) do { } while (0)
11398
11399diff --git a/arch/x86/include/asm/kvm_host.h b/arch/x86/include/asm/kvm_host.h
11400index 08bc2ff..acafd8f 100644
11401--- a/arch/x86/include/asm/kvm_host.h
11402+++ b/arch/x86/include/asm/kvm_host.h
11403@@ -534,9 +534,9 @@ struct kvm_x86_ops {
11404 bool (*gb_page_enable)(void);
11405
11406 const struct trace_print_flags *exit_reasons_str;
11407-};
11408+} __do_const;
11409
11410-extern struct kvm_x86_ops *kvm_x86_ops;
11411+extern const struct kvm_x86_ops *kvm_x86_ops;
11412
11413 int kvm_mmu_module_init(void);
11414 void kvm_mmu_module_exit(void);
11415@@ -558,9 +558,9 @@ void kvm_mmu_change_mmu_pages(struct kvm *kvm, unsigned int kvm_nr_mmu_pages);
11416 int load_pdptrs(struct kvm_vcpu *vcpu, unsigned long cr3);
11417
11418 int emulator_write_phys(struct kvm_vcpu *vcpu, gpa_t gpa,
11419- const void *val, int bytes);
11420+ const void *val, int bytes) __size_overflow(2);
11421 int kvm_pv_mmu_op(struct kvm_vcpu *vcpu, unsigned long bytes,
11422- gpa_t addr, unsigned long *ret);
11423+ gpa_t addr, unsigned long *ret) __size_overflow(2,3);
11424 u8 kvm_get_guest_memory_type(struct kvm_vcpu *vcpu, gfn_t gfn);
11425
11426 extern bool tdp_enabled;
11427@@ -619,7 +619,7 @@ void kvm_lmsw(struct kvm_vcpu *vcpu, unsigned long msw);
11428 void kvm_get_cs_db_l_bits(struct kvm_vcpu *vcpu, int *db, int *l);
11429
11430 int kvm_get_msr_common(struct kvm_vcpu *vcpu, u32 msr, u64 *pdata);
11431-int kvm_set_msr_common(struct kvm_vcpu *vcpu, u32 msr, u64 data);
11432+int kvm_set_msr_common(struct kvm_vcpu *vcpu, u32 msr, u64 data) __size_overflow(3);
11433
11434 void kvm_queue_exception(struct kvm_vcpu *vcpu, unsigned nr);
11435 void kvm_queue_exception_e(struct kvm_vcpu *vcpu, unsigned nr, u32 error_code);
11436@@ -643,7 +643,7 @@ unsigned long segment_base(u16 selector);
11437 void kvm_mmu_flush_tlb(struct kvm_vcpu *vcpu);
11438 void kvm_mmu_pte_write(struct kvm_vcpu *vcpu, gpa_t gpa,
11439 const u8 *new, int bytes,
11440- bool guest_initiated);
11441+ bool guest_initiated) __size_overflow(2);
11442 int kvm_mmu_unprotect_page_virt(struct kvm_vcpu *vcpu, gva_t gva);
11443 void __kvm_mmu_free_some_pages(struct kvm_vcpu *vcpu);
11444 int kvm_mmu_load(struct kvm_vcpu *vcpu);
11445diff --git a/arch/x86/include/asm/local.h b/arch/x86/include/asm/local.h
11446index 47b9b6f..815aaa1 100644
11447--- a/arch/x86/include/asm/local.h
11448+++ b/arch/x86/include/asm/local.h
11449@@ -18,26 +18,58 @@ typedef struct {
11450
11451 static inline void local_inc(local_t *l)
11452 {
11453- asm volatile(_ASM_INC "%0"
11454+ asm volatile(_ASM_INC "%0\n"
11455+
11456+#ifdef CONFIG_PAX_REFCOUNT
11457+ "jno 0f\n"
11458+ _ASM_DEC "%0\n"
11459+ "int $4\n0:\n"
11460+ _ASM_EXTABLE(0b, 0b)
11461+#endif
11462+
11463 : "+m" (l->a.counter));
11464 }
11465
11466 static inline void local_dec(local_t *l)
11467 {
11468- asm volatile(_ASM_DEC "%0"
11469+ asm volatile(_ASM_DEC "%0\n"
11470+
11471+#ifdef CONFIG_PAX_REFCOUNT
11472+ "jno 0f\n"
11473+ _ASM_INC "%0\n"
11474+ "int $4\n0:\n"
11475+ _ASM_EXTABLE(0b, 0b)
11476+#endif
11477+
11478 : "+m" (l->a.counter));
11479 }
11480
11481 static inline void local_add(long i, local_t *l)
11482 {
11483- asm volatile(_ASM_ADD "%1,%0"
11484+ asm volatile(_ASM_ADD "%1,%0\n"
11485+
11486+#ifdef CONFIG_PAX_REFCOUNT
11487+ "jno 0f\n"
11488+ _ASM_SUB "%1,%0\n"
11489+ "int $4\n0:\n"
11490+ _ASM_EXTABLE(0b, 0b)
11491+#endif
11492+
11493 : "+m" (l->a.counter)
11494 : "ir" (i));
11495 }
11496
11497 static inline void local_sub(long i, local_t *l)
11498 {
11499- asm volatile(_ASM_SUB "%1,%0"
11500+ asm volatile(_ASM_SUB "%1,%0\n"
11501+
11502+#ifdef CONFIG_PAX_REFCOUNT
11503+ "jno 0f\n"
11504+ _ASM_ADD "%1,%0\n"
11505+ "int $4\n0:\n"
11506+ _ASM_EXTABLE(0b, 0b)
11507+#endif
11508+
11509 : "+m" (l->a.counter)
11510 : "ir" (i));
11511 }
11512@@ -55,7 +87,16 @@ static inline int local_sub_and_test(long i, local_t *l)
11513 {
11514 unsigned char c;
11515
11516- asm volatile(_ASM_SUB "%2,%0; sete %1"
11517+ asm volatile(_ASM_SUB "%2,%0\n"
11518+
11519+#ifdef CONFIG_PAX_REFCOUNT
11520+ "jno 0f\n"
11521+ _ASM_ADD "%2,%0\n"
11522+ "int $4\n0:\n"
11523+ _ASM_EXTABLE(0b, 0b)
11524+#endif
11525+
11526+ "sete %1\n"
11527 : "+m" (l->a.counter), "=qm" (c)
11528 : "ir" (i) : "memory");
11529 return c;
11530@@ -73,7 +114,16 @@ static inline int local_dec_and_test(local_t *l)
11531 {
11532 unsigned char c;
11533
11534- asm volatile(_ASM_DEC "%0; sete %1"
11535+ asm volatile(_ASM_DEC "%0\n"
11536+
11537+#ifdef CONFIG_PAX_REFCOUNT
11538+ "jno 0f\n"
11539+ _ASM_INC "%0\n"
11540+ "int $4\n0:\n"
11541+ _ASM_EXTABLE(0b, 0b)
11542+#endif
11543+
11544+ "sete %1\n"
11545 : "+m" (l->a.counter), "=qm" (c)
11546 : : "memory");
11547 return c != 0;
11548@@ -91,7 +141,16 @@ static inline int local_inc_and_test(local_t *l)
11549 {
11550 unsigned char c;
11551
11552- asm volatile(_ASM_INC "%0; sete %1"
11553+ asm volatile(_ASM_INC "%0\n"
11554+
11555+#ifdef CONFIG_PAX_REFCOUNT
11556+ "jno 0f\n"
11557+ _ASM_DEC "%0\n"
11558+ "int $4\n0:\n"
11559+ _ASM_EXTABLE(0b, 0b)
11560+#endif
11561+
11562+ "sete %1\n"
11563 : "+m" (l->a.counter), "=qm" (c)
11564 : : "memory");
11565 return c != 0;
11566@@ -110,7 +169,16 @@ static inline int local_add_negative(long i, local_t *l)
11567 {
11568 unsigned char c;
11569
11570- asm volatile(_ASM_ADD "%2,%0; sets %1"
11571+ asm volatile(_ASM_ADD "%2,%0\n"
11572+
11573+#ifdef CONFIG_PAX_REFCOUNT
11574+ "jno 0f\n"
11575+ _ASM_SUB "%2,%0\n"
11576+ "int $4\n0:\n"
11577+ _ASM_EXTABLE(0b, 0b)
11578+#endif
11579+
11580+ "sets %1\n"
11581 : "+m" (l->a.counter), "=qm" (c)
11582 : "ir" (i) : "memory");
11583 return c;
11584@@ -133,7 +201,15 @@ static inline long local_add_return(long i, local_t *l)
11585 #endif
11586 /* Modern 486+ processor */
11587 __i = i;
11588- asm volatile(_ASM_XADD "%0, %1;"
11589+ asm volatile(_ASM_XADD "%0, %1\n"
11590+
11591+#ifdef CONFIG_PAX_REFCOUNT
11592+ "jno 0f\n"
11593+ _ASM_MOV "%0,%1\n"
11594+ "int $4\n0:\n"
11595+ _ASM_EXTABLE(0b, 0b)
11596+#endif
11597+
11598 : "+r" (i), "+m" (l->a.counter)
11599 : : "memory");
11600 return i + __i;
11601diff --git a/arch/x86/include/asm/microcode.h b/arch/x86/include/asm/microcode.h
11602index ef51b50..514ba37 100644
11603--- a/arch/x86/include/asm/microcode.h
11604+++ b/arch/x86/include/asm/microcode.h
11605@@ -12,13 +12,13 @@ struct device;
11606 enum ucode_state { UCODE_ERROR, UCODE_OK, UCODE_NFOUND };
11607
11608 struct microcode_ops {
11609- enum ucode_state (*request_microcode_user) (int cpu,
11610+ enum ucode_state (* const request_microcode_user) (int cpu,
11611 const void __user *buf, size_t size);
11612
11613- enum ucode_state (*request_microcode_fw) (int cpu,
11614+ enum ucode_state (* const request_microcode_fw) (int cpu,
11615 struct device *device);
11616
11617- void (*microcode_fini_cpu) (int cpu);
11618+ void (* const microcode_fini_cpu) (int cpu);
11619
11620 /*
11621 * The generic 'microcode_core' part guarantees that
11622@@ -38,18 +38,18 @@ struct ucode_cpu_info {
11623 extern struct ucode_cpu_info ucode_cpu_info[];
11624
11625 #ifdef CONFIG_MICROCODE_INTEL
11626-extern struct microcode_ops * __init init_intel_microcode(void);
11627+extern const struct microcode_ops * __init init_intel_microcode(void);
11628 #else
11629-static inline struct microcode_ops * __init init_intel_microcode(void)
11630+static inline const struct microcode_ops * __init init_intel_microcode(void)
11631 {
11632 return NULL;
11633 }
11634 #endif /* CONFIG_MICROCODE_INTEL */
11635
11636 #ifdef CONFIG_MICROCODE_AMD
11637-extern struct microcode_ops * __init init_amd_microcode(void);
11638+extern const struct microcode_ops * __init init_amd_microcode(void);
11639 #else
11640-static inline struct microcode_ops * __init init_amd_microcode(void)
11641+static inline const struct microcode_ops * __init init_amd_microcode(void)
11642 {
11643 return NULL;
11644 }
11645diff --git a/arch/x86/include/asm/mman.h b/arch/x86/include/asm/mman.h
11646index 593e51d..fa69c9a 100644
11647--- a/arch/x86/include/asm/mman.h
11648+++ b/arch/x86/include/asm/mman.h
11649@@ -5,4 +5,14 @@
11650
11651 #include <asm-generic/mman.h>
11652
11653+#ifdef __KERNEL__
11654+#ifndef __ASSEMBLY__
11655+#ifdef CONFIG_X86_32
11656+#define arch_mmap_check i386_mmap_check
11657+int i386_mmap_check(unsigned long addr, unsigned long len,
11658+ unsigned long flags);
11659+#endif
11660+#endif
11661+#endif
11662+
11663 #endif /* _ASM_X86_MMAN_H */
11664diff --git a/arch/x86/include/asm/mmu.h b/arch/x86/include/asm/mmu.h
11665index 80a1dee..239c67d 100644
11666--- a/arch/x86/include/asm/mmu.h
11667+++ b/arch/x86/include/asm/mmu.h
11668@@ -9,10 +9,23 @@
11669 * we put the segment information here.
11670 */
11671 typedef struct {
11672- void *ldt;
11673+ struct desc_struct *ldt;
11674 int size;
11675 struct mutex lock;
11676- void *vdso;
11677+ unsigned long vdso;
11678+
11679+#ifdef CONFIG_X86_32
11680+#if defined(CONFIG_PAX_PAGEEXEC) || defined(CONFIG_PAX_SEGMEXEC)
11681+ unsigned long user_cs_base;
11682+ unsigned long user_cs_limit;
11683+
11684+#if defined(CONFIG_PAX_PAGEEXEC) && defined(CONFIG_SMP)
11685+ cpumask_t cpu_user_cs_mask;
11686+#endif
11687+
11688+#endif
11689+#endif
11690+
11691 } mm_context_t;
11692
11693 #ifdef CONFIG_SMP
11694diff --git a/arch/x86/include/asm/mmu_context.h b/arch/x86/include/asm/mmu_context.h
11695index 8b5393e..8143173 100644
11696--- a/arch/x86/include/asm/mmu_context.h
11697+++ b/arch/x86/include/asm/mmu_context.h
11698@@ -24,6 +24,18 @@ void destroy_context(struct mm_struct *mm);
11699
11700 static inline void enter_lazy_tlb(struct mm_struct *mm, struct task_struct *tsk)
11701 {
11702+
11703+#if defined(CONFIG_X86_64) && defined(CONFIG_PAX_MEMORY_UDEREF)
11704+ unsigned int i;
11705+ pgd_t *pgd;
11706+
11707+ pax_open_kernel();
11708+ pgd = get_cpu_pgd(smp_processor_id());
11709+ for (i = USER_PGD_PTRS; i < 2 * USER_PGD_PTRS; ++i)
11710+ set_pgd_batched(pgd+i, native_make_pgd(0));
11711+ pax_close_kernel();
11712+#endif
11713+
11714 #ifdef CONFIG_SMP
11715 if (percpu_read(cpu_tlbstate.state) == TLBSTATE_OK)
11716 percpu_write(cpu_tlbstate.state, TLBSTATE_LAZY);
11717@@ -34,16 +46,30 @@ static inline void switch_mm(struct mm_struct *prev, struct mm_struct *next,
11718 struct task_struct *tsk)
11719 {
11720 unsigned cpu = smp_processor_id();
11721+#if defined(CONFIG_X86_32) && (defined(CONFIG_PAX_PAGEEXEC) || defined(CONFIG_PAX_SEGMEXEC)) && defined(CONFIG_SMP)
11722+ int tlbstate = TLBSTATE_OK;
11723+#endif
11724
11725 if (likely(prev != next)) {
11726 #ifdef CONFIG_SMP
11727+#if defined(CONFIG_X86_32) && (defined(CONFIG_PAX_PAGEEXEC) || defined(CONFIG_PAX_SEGMEXEC))
11728+ tlbstate = percpu_read(cpu_tlbstate.state);
11729+#endif
11730 percpu_write(cpu_tlbstate.state, TLBSTATE_OK);
11731 percpu_write(cpu_tlbstate.active_mm, next);
11732 #endif
11733 cpumask_set_cpu(cpu, mm_cpumask(next));
11734
11735 /* Re-load page tables */
11736+#ifdef CONFIG_PAX_PER_CPU_PGD
11737+ pax_open_kernel();
11738+ __clone_user_pgds(get_cpu_pgd(cpu), next->pgd, USER_PGD_PTRS);
11739+ __shadow_user_pgds(get_cpu_pgd(cpu) + USER_PGD_PTRS, next->pgd, USER_PGD_PTRS);
11740+ pax_close_kernel();
11741+ load_cr3(get_cpu_pgd(cpu));
11742+#else
11743 load_cr3(next->pgd);
11744+#endif
11745
11746 /* stop flush ipis for the previous mm */
11747 cpumask_clear_cpu(cpu, mm_cpumask(prev));
11748@@ -53,9 +79,38 @@ static inline void switch_mm(struct mm_struct *prev, struct mm_struct *next,
11749 */
11750 if (unlikely(prev->context.ldt != next->context.ldt))
11751 load_LDT_nolock(&next->context);
11752- }
11753+
11754+#if defined(CONFIG_X86_32) && defined(CONFIG_PAX_PAGEEXEC) && defined(CONFIG_SMP)
11755+ if (!nx_enabled) {
11756+ smp_mb__before_clear_bit();
11757+ cpu_clear(cpu, prev->context.cpu_user_cs_mask);
11758+ smp_mb__after_clear_bit();
11759+ cpu_set(cpu, next->context.cpu_user_cs_mask);
11760+ }
11761+#endif
11762+
11763+#if defined(CONFIG_X86_32) && (defined(CONFIG_PAX_PAGEEXEC) || defined(CONFIG_PAX_SEGMEXEC))
11764+ if (unlikely(prev->context.user_cs_base != next->context.user_cs_base ||
11765+ prev->context.user_cs_limit != next->context.user_cs_limit))
11766+ set_user_cs(next->context.user_cs_base, next->context.user_cs_limit, cpu);
11767 #ifdef CONFIG_SMP
11768+ else if (unlikely(tlbstate != TLBSTATE_OK))
11769+ set_user_cs(next->context.user_cs_base, next->context.user_cs_limit, cpu);
11770+#endif
11771+#endif
11772+
11773+ }
11774 else {
11775+
11776+#ifdef CONFIG_PAX_PER_CPU_PGD
11777+ pax_open_kernel();
11778+ __clone_user_pgds(get_cpu_pgd(cpu), next->pgd, USER_PGD_PTRS);
11779+ __shadow_user_pgds(get_cpu_pgd(cpu) + USER_PGD_PTRS, next->pgd, USER_PGD_PTRS);
11780+ pax_close_kernel();
11781+ load_cr3(get_cpu_pgd(cpu));
11782+#endif
11783+
11784+#ifdef CONFIG_SMP
11785 percpu_write(cpu_tlbstate.state, TLBSTATE_OK);
11786 BUG_ON(percpu_read(cpu_tlbstate.active_mm) != next);
11787
11788@@ -64,11 +119,28 @@ static inline void switch_mm(struct mm_struct *prev, struct mm_struct *next,
11789 * tlb flush IPI delivery. We must reload CR3
11790 * to make sure to use no freed page tables.
11791 */
11792+
11793+#ifndef CONFIG_PAX_PER_CPU_PGD
11794 load_cr3(next->pgd);
11795+#endif
11796+
11797 load_LDT_nolock(&next->context);
11798+
11799+#if defined(CONFIG_X86_32) && defined(CONFIG_PAX_PAGEEXEC)
11800+ if (!nx_enabled)
11801+ cpu_set(cpu, next->context.cpu_user_cs_mask);
11802+#endif
11803+
11804+#if defined(CONFIG_X86_32) && (defined(CONFIG_PAX_PAGEEXEC) || defined(CONFIG_PAX_SEGMEXEC))
11805+#ifdef CONFIG_PAX_PAGEEXEC
11806+ if (!((next->pax_flags & MF_PAX_PAGEEXEC) && nx_enabled))
11807+#endif
11808+ set_user_cs(next->context.user_cs_base, next->context.user_cs_limit, cpu);
11809+#endif
11810+
11811 }
11812+#endif
11813 }
11814-#endif
11815 }
11816
11817 #define activate_mm(prev, next) \
11818diff --git a/arch/x86/include/asm/module.h b/arch/x86/include/asm/module.h
11819index 3e2ce58..caaf478 100644
11820--- a/arch/x86/include/asm/module.h
11821+++ b/arch/x86/include/asm/module.h
11822@@ -5,6 +5,7 @@
11823
11824 #ifdef CONFIG_X86_64
11825 /* X86_64 does not define MODULE_PROC_FAMILY */
11826+#define MODULE_PROC_FAMILY ""
11827 #elif defined CONFIG_M386
11828 #define MODULE_PROC_FAMILY "386 "
11829 #elif defined CONFIG_M486
11830@@ -59,13 +60,26 @@
11831 #error unknown processor family
11832 #endif
11833
11834-#ifdef CONFIG_X86_32
11835-# ifdef CONFIG_4KSTACKS
11836-# define MODULE_STACKSIZE "4KSTACKS "
11837-# else
11838-# define MODULE_STACKSIZE ""
11839-# endif
11840-# define MODULE_ARCH_VERMAGIC MODULE_PROC_FAMILY MODULE_STACKSIZE
11841+#if defined(CONFIG_X86_32) && defined(CONFIG_4KSTACKS)
11842+#define MODULE_STACKSIZE "4KSTACKS "
11843+#else
11844+#define MODULE_STACKSIZE ""
11845 #endif
11846
11847+#ifdef CONFIG_PAX_KERNEXEC_PLUGIN_METHOD_BTS
11848+#define MODULE_PAX_KERNEXEC "KERNEXEC_BTS "
11849+#elif defined(CONFIG_PAX_KERNEXEC_PLUGIN_METHOD_OR)
11850+#define MODULE_PAX_KERNEXEC "KERNEXEC_OR "
11851+#else
11852+#define MODULE_PAX_KERNEXEC ""
11853+#endif
11854+
11855+#ifdef CONFIG_PAX_MEMORY_UDEREF
11856+#define MODULE_PAX_UDEREF "UDEREF "
11857+#else
11858+#define MODULE_PAX_UDEREF ""
11859+#endif
11860+
11861+#define MODULE_ARCH_VERMAGIC MODULE_PROC_FAMILY MODULE_STACKSIZE MODULE_PAX_KERNEXEC MODULE_PAX_UDEREF
11862+
11863 #endif /* _ASM_X86_MODULE_H */
11864diff --git a/arch/x86/include/asm/page_64_types.h b/arch/x86/include/asm/page_64_types.h
11865index 7639dbf..e08a58c 100644
11866--- a/arch/x86/include/asm/page_64_types.h
11867+++ b/arch/x86/include/asm/page_64_types.h
11868@@ -56,7 +56,7 @@ void copy_page(void *to, void *from);
11869
11870 /* duplicated to the one in bootmem.h */
11871 extern unsigned long max_pfn;
11872-extern unsigned long phys_base;
11873+extern const unsigned long phys_base;
11874
11875 extern unsigned long __phys_addr(unsigned long);
11876 #define __phys_reloc_hide(x) (x)
11877diff --git a/arch/x86/include/asm/paravirt.h b/arch/x86/include/asm/paravirt.h
11878index efb3899..ef30687 100644
11879--- a/arch/x86/include/asm/paravirt.h
11880+++ b/arch/x86/include/asm/paravirt.h
11881@@ -648,6 +648,18 @@ static inline void set_pgd(pgd_t *pgdp, pgd_t pgd)
11882 val);
11883 }
11884
11885+static inline void set_pgd_batched(pgd_t *pgdp, pgd_t pgd)
11886+{
11887+ pgdval_t val = native_pgd_val(pgd);
11888+
11889+ if (sizeof(pgdval_t) > sizeof(long))
11890+ PVOP_VCALL3(pv_mmu_ops.set_pgd_batched, pgdp,
11891+ val, (u64)val >> 32);
11892+ else
11893+ PVOP_VCALL2(pv_mmu_ops.set_pgd_batched, pgdp,
11894+ val);
11895+}
11896+
11897 static inline void pgd_clear(pgd_t *pgdp)
11898 {
11899 set_pgd(pgdp, __pgd(0));
11900@@ -729,6 +741,21 @@ static inline void __set_fixmap(unsigned /* enum fixed_addresses */ idx,
11901 pv_mmu_ops.set_fixmap(idx, phys, flags);
11902 }
11903
11904+#ifdef CONFIG_PAX_KERNEXEC
11905+static inline unsigned long pax_open_kernel(void)
11906+{
11907+ return PVOP_CALL0(unsigned long, pv_mmu_ops.pax_open_kernel);
11908+}
11909+
11910+static inline unsigned long pax_close_kernel(void)
11911+{
11912+ return PVOP_CALL0(unsigned long, pv_mmu_ops.pax_close_kernel);
11913+}
11914+#else
11915+static inline unsigned long pax_open_kernel(void) { return 0; }
11916+static inline unsigned long pax_close_kernel(void) { return 0; }
11917+#endif
11918+
11919 #if defined(CONFIG_SMP) && defined(CONFIG_PARAVIRT_SPINLOCKS)
11920
11921 static inline int __raw_spin_is_locked(struct raw_spinlock *lock)
11922@@ -945,7 +972,7 @@ extern void default_banner(void);
11923
11924 #define PARA_PATCH(struct, off) ((PARAVIRT_PATCH_##struct + (off)) / 4)
11925 #define PARA_SITE(ptype, clobbers, ops) _PVSITE(ptype, clobbers, ops, .long, 4)
11926-#define PARA_INDIRECT(addr) *%cs:addr
11927+#define PARA_INDIRECT(addr) *%ss:addr
11928 #endif
11929
11930 #define INTERRUPT_RETURN \
11931@@ -1022,6 +1049,21 @@ extern void default_banner(void);
11932 PARA_SITE(PARA_PATCH(pv_cpu_ops, PV_CPU_irq_enable_sysexit), \
11933 CLBR_NONE, \
11934 jmp PARA_INDIRECT(pv_cpu_ops+PV_CPU_irq_enable_sysexit))
11935+
11936+#define GET_CR0_INTO_RDI \
11937+ call PARA_INDIRECT(pv_cpu_ops+PV_CPU_read_cr0); \
11938+ mov %rax,%rdi
11939+
11940+#define SET_RDI_INTO_CR0 \
11941+ call PARA_INDIRECT(pv_cpu_ops+PV_CPU_write_cr0)
11942+
11943+#define GET_CR3_INTO_RDI \
11944+ call PARA_INDIRECT(pv_mmu_ops+PV_MMU_read_cr3); \
11945+ mov %rax,%rdi
11946+
11947+#define SET_RDI_INTO_CR3 \
11948+ call PARA_INDIRECT(pv_mmu_ops+PV_MMU_write_cr3)
11949+
11950 #endif /* CONFIG_X86_32 */
11951
11952 #endif /* __ASSEMBLY__ */
11953diff --git a/arch/x86/include/asm/paravirt_types.h b/arch/x86/include/asm/paravirt_types.h
11954index 9357473..aeb2de5 100644
11955--- a/arch/x86/include/asm/paravirt_types.h
11956+++ b/arch/x86/include/asm/paravirt_types.h
11957@@ -78,19 +78,19 @@ struct pv_init_ops {
11958 */
11959 unsigned (*patch)(u8 type, u16 clobber, void *insnbuf,
11960 unsigned long addr, unsigned len);
11961-};
11962+} __no_const;
11963
11964
11965 struct pv_lazy_ops {
11966 /* Set deferred update mode, used for batching operations. */
11967 void (*enter)(void);
11968 void (*leave)(void);
11969-};
11970+} __no_const;
11971
11972 struct pv_time_ops {
11973 unsigned long long (*sched_clock)(void);
11974 unsigned long (*get_tsc_khz)(void);
11975-};
11976+} __no_const;
11977
11978 struct pv_cpu_ops {
11979 /* hooks for various privileged instructions */
11980@@ -186,7 +186,7 @@ struct pv_cpu_ops {
11981
11982 void (*start_context_switch)(struct task_struct *prev);
11983 void (*end_context_switch)(struct task_struct *next);
11984-};
11985+} __no_const;
11986
11987 struct pv_irq_ops {
11988 /*
11989@@ -217,7 +217,7 @@ struct pv_apic_ops {
11990 unsigned long start_eip,
11991 unsigned long start_esp);
11992 #endif
11993-};
11994+} __no_const;
11995
11996 struct pv_mmu_ops {
11997 unsigned long (*read_cr2)(void);
11998@@ -301,6 +301,7 @@ struct pv_mmu_ops {
11999 struct paravirt_callee_save make_pud;
12000
12001 void (*set_pgd)(pgd_t *pudp, pgd_t pgdval);
12002+ void (*set_pgd_batched)(pgd_t *pudp, pgd_t pgdval);
12003 #endif /* PAGETABLE_LEVELS == 4 */
12004 #endif /* PAGETABLE_LEVELS >= 3 */
12005
12006@@ -316,6 +317,12 @@ struct pv_mmu_ops {
12007 an mfn. We can tell which is which from the index. */
12008 void (*set_fixmap)(unsigned /* enum fixed_addresses */ idx,
12009 phys_addr_t phys, pgprot_t flags);
12010+
12011+#ifdef CONFIG_PAX_KERNEXEC
12012+ unsigned long (*pax_open_kernel)(void);
12013+ unsigned long (*pax_close_kernel)(void);
12014+#endif
12015+
12016 };
12017
12018 struct raw_spinlock;
12019@@ -326,7 +333,7 @@ struct pv_lock_ops {
12020 void (*spin_lock_flags)(struct raw_spinlock *lock, unsigned long flags);
12021 int (*spin_trylock)(struct raw_spinlock *lock);
12022 void (*spin_unlock)(struct raw_spinlock *lock);
12023-};
12024+} __no_const;
12025
12026 /* This contains all the paravirt structures: we get a convenient
12027 * number for each function using the offset which we use to indicate
12028diff --git a/arch/x86/include/asm/pci_x86.h b/arch/x86/include/asm/pci_x86.h
12029index b399988..3f47c38 100644
12030--- a/arch/x86/include/asm/pci_x86.h
12031+++ b/arch/x86/include/asm/pci_x86.h
12032@@ -89,16 +89,16 @@ extern int (*pcibios_enable_irq)(struct pci_dev *dev);
12033 extern void (*pcibios_disable_irq)(struct pci_dev *dev);
12034
12035 struct pci_raw_ops {
12036- int (*read)(unsigned int domain, unsigned int bus, unsigned int devfn,
12037+ int (* const read)(unsigned int domain, unsigned int bus, unsigned int devfn,
12038 int reg, int len, u32 *val);
12039- int (*write)(unsigned int domain, unsigned int bus, unsigned int devfn,
12040+ int (* const write)(unsigned int domain, unsigned int bus, unsigned int devfn,
12041 int reg, int len, u32 val);
12042 };
12043
12044-extern struct pci_raw_ops *raw_pci_ops;
12045-extern struct pci_raw_ops *raw_pci_ext_ops;
12046+extern const struct pci_raw_ops *raw_pci_ops;
12047+extern const struct pci_raw_ops *raw_pci_ext_ops;
12048
12049-extern struct pci_raw_ops pci_direct_conf1;
12050+extern const struct pci_raw_ops pci_direct_conf1;
12051 extern bool port_cf9_safe;
12052
12053 /* arch_initcall level */
12054diff --git a/arch/x86/include/asm/percpu.h b/arch/x86/include/asm/percpu.h
12055index b65a36d..50345a4 100644
12056--- a/arch/x86/include/asm/percpu.h
12057+++ b/arch/x86/include/asm/percpu.h
12058@@ -78,6 +78,7 @@ do { \
12059 if (0) { \
12060 T__ tmp__; \
12061 tmp__ = (val); \
12062+ (void)tmp__; \
12063 } \
12064 switch (sizeof(var)) { \
12065 case 1: \
12066diff --git a/arch/x86/include/asm/pgalloc.h b/arch/x86/include/asm/pgalloc.h
12067index 271de94..ef944d6 100644
12068--- a/arch/x86/include/asm/pgalloc.h
12069+++ b/arch/x86/include/asm/pgalloc.h
12070@@ -63,6 +63,13 @@ static inline void pmd_populate_kernel(struct mm_struct *mm,
12071 pmd_t *pmd, pte_t *pte)
12072 {
12073 paravirt_alloc_pte(mm, __pa(pte) >> PAGE_SHIFT);
12074+ set_pmd(pmd, __pmd(__pa(pte) | _KERNPG_TABLE));
12075+}
12076+
12077+static inline void pmd_populate_user(struct mm_struct *mm,
12078+ pmd_t *pmd, pte_t *pte)
12079+{
12080+ paravirt_alloc_pte(mm, __pa(pte) >> PAGE_SHIFT);
12081 set_pmd(pmd, __pmd(__pa(pte) | _PAGE_TABLE));
12082 }
12083
12084diff --git a/arch/x86/include/asm/pgtable-2level.h b/arch/x86/include/asm/pgtable-2level.h
12085index 2334982..70bc412 100644
12086--- a/arch/x86/include/asm/pgtable-2level.h
12087+++ b/arch/x86/include/asm/pgtable-2level.h
12088@@ -18,7 +18,9 @@ static inline void native_set_pte(pte_t *ptep , pte_t pte)
12089
12090 static inline void native_set_pmd(pmd_t *pmdp, pmd_t pmd)
12091 {
12092+ pax_open_kernel();
12093 *pmdp = pmd;
12094+ pax_close_kernel();
12095 }
12096
12097 static inline void native_set_pte_atomic(pte_t *ptep, pte_t pte)
12098diff --git a/arch/x86/include/asm/pgtable-3level.h b/arch/x86/include/asm/pgtable-3level.h
12099index 33927d2..ccde329 100644
12100--- a/arch/x86/include/asm/pgtable-3level.h
12101+++ b/arch/x86/include/asm/pgtable-3level.h
12102@@ -38,12 +38,16 @@ static inline void native_set_pte_atomic(pte_t *ptep, pte_t pte)
12103
12104 static inline void native_set_pmd(pmd_t *pmdp, pmd_t pmd)
12105 {
12106+ pax_open_kernel();
12107 set_64bit((unsigned long long *)(pmdp), native_pmd_val(pmd));
12108+ pax_close_kernel();
12109 }
12110
12111 static inline void native_set_pud(pud_t *pudp, pud_t pud)
12112 {
12113+ pax_open_kernel();
12114 set_64bit((unsigned long long *)(pudp), native_pud_val(pud));
12115+ pax_close_kernel();
12116 }
12117
12118 /*
12119diff --git a/arch/x86/include/asm/pgtable.h b/arch/x86/include/asm/pgtable.h
12120index af6fd36..867ff74 100644
12121--- a/arch/x86/include/asm/pgtable.h
12122+++ b/arch/x86/include/asm/pgtable.h
12123@@ -39,6 +39,7 @@ extern struct list_head pgd_list;
12124
12125 #ifndef __PAGETABLE_PUD_FOLDED
12126 #define set_pgd(pgdp, pgd) native_set_pgd(pgdp, pgd)
12127+#define set_pgd_batched(pgdp, pgd) native_set_pgd_batched(pgdp, pgd)
12128 #define pgd_clear(pgd) native_pgd_clear(pgd)
12129 #endif
12130
12131@@ -74,12 +75,51 @@ extern struct list_head pgd_list;
12132
12133 #define arch_end_context_switch(prev) do {} while(0)
12134
12135+#define pax_open_kernel() native_pax_open_kernel()
12136+#define pax_close_kernel() native_pax_close_kernel()
12137 #endif /* CONFIG_PARAVIRT */
12138
12139+#define __HAVE_ARCH_PAX_OPEN_KERNEL
12140+#define __HAVE_ARCH_PAX_CLOSE_KERNEL
12141+
12142+#ifdef CONFIG_PAX_KERNEXEC
12143+static inline unsigned long native_pax_open_kernel(void)
12144+{
12145+ unsigned long cr0;
12146+
12147+ preempt_disable();
12148+ barrier();
12149+ cr0 = read_cr0() ^ X86_CR0_WP;
12150+ BUG_ON(unlikely(cr0 & X86_CR0_WP));
12151+ write_cr0(cr0);
12152+ return cr0 ^ X86_CR0_WP;
12153+}
12154+
12155+static inline unsigned long native_pax_close_kernel(void)
12156+{
12157+ unsigned long cr0;
12158+
12159+ cr0 = read_cr0() ^ X86_CR0_WP;
12160+ BUG_ON(unlikely(!(cr0 & X86_CR0_WP)));
12161+ write_cr0(cr0);
12162+ barrier();
12163+ preempt_enable_no_resched();
12164+ return cr0 ^ X86_CR0_WP;
12165+}
12166+#else
12167+static inline unsigned long native_pax_open_kernel(void) { return 0; }
12168+static inline unsigned long native_pax_close_kernel(void) { return 0; }
12169+#endif
12170+
12171 /*
12172 * The following only work if pte_present() is true.
12173 * Undefined behaviour if not..
12174 */
12175+static inline int pte_user(pte_t pte)
12176+{
12177+ return pte_val(pte) & _PAGE_USER;
12178+}
12179+
12180 static inline int pte_dirty(pte_t pte)
12181 {
12182 return pte_flags(pte) & _PAGE_DIRTY;
12183@@ -167,9 +207,29 @@ static inline pte_t pte_wrprotect(pte_t pte)
12184 return pte_clear_flags(pte, _PAGE_RW);
12185 }
12186
12187+static inline pte_t pte_mkread(pte_t pte)
12188+{
12189+ return __pte(pte_val(pte) | _PAGE_USER);
12190+}
12191+
12192 static inline pte_t pte_mkexec(pte_t pte)
12193 {
12194- return pte_clear_flags(pte, _PAGE_NX);
12195+#ifdef CONFIG_X86_PAE
12196+ if (__supported_pte_mask & _PAGE_NX)
12197+ return pte_clear_flags(pte, _PAGE_NX);
12198+ else
12199+#endif
12200+ return pte_set_flags(pte, _PAGE_USER);
12201+}
12202+
12203+static inline pte_t pte_exprotect(pte_t pte)
12204+{
12205+#ifdef CONFIG_X86_PAE
12206+ if (__supported_pte_mask & _PAGE_NX)
12207+ return pte_set_flags(pte, _PAGE_NX);
12208+ else
12209+#endif
12210+ return pte_clear_flags(pte, _PAGE_USER);
12211 }
12212
12213 static inline pte_t pte_mkdirty(pte_t pte)
12214@@ -302,6 +362,15 @@ pte_t *populate_extra_pte(unsigned long vaddr);
12215 #endif
12216
12217 #ifndef __ASSEMBLY__
12218+
12219+#ifdef CONFIG_PAX_PER_CPU_PGD
12220+extern pgd_t cpu_pgd[NR_CPUS][PTRS_PER_PGD];
12221+static inline pgd_t *get_cpu_pgd(unsigned int cpu)
12222+{
12223+ return cpu_pgd[cpu];
12224+}
12225+#endif
12226+
12227 #include <linux/mm_types.h>
12228
12229 static inline int pte_none(pte_t pte)
12230@@ -472,7 +541,7 @@ static inline pud_t *pud_offset(pgd_t *pgd, unsigned long address)
12231
12232 static inline int pgd_bad(pgd_t pgd)
12233 {
12234- return (pgd_flags(pgd) & ~_PAGE_USER) != _KERNPG_TABLE;
12235+ return (pgd_flags(pgd) & ~(_PAGE_USER | _PAGE_NX)) != _KERNPG_TABLE;
12236 }
12237
12238 static inline int pgd_none(pgd_t pgd)
12239@@ -495,7 +564,12 @@ static inline int pgd_none(pgd_t pgd)
12240 * pgd_offset() returns a (pgd_t *)
12241 * pgd_index() is used get the offset into the pgd page's array of pgd_t's;
12242 */
12243-#define pgd_offset(mm, address) ((mm)->pgd + pgd_index((address)))
12244+#define pgd_offset(mm, address) ((mm)->pgd + pgd_index(address))
12245+
12246+#ifdef CONFIG_PAX_PER_CPU_PGD
12247+#define pgd_offset_cpu(cpu, address) (get_cpu_pgd(cpu) + pgd_index(address))
12248+#endif
12249+
12250 /*
12251 * a shortcut which implies the use of the kernel's pgd, instead
12252 * of a process's
12253@@ -506,6 +580,20 @@ static inline int pgd_none(pgd_t pgd)
12254 #define KERNEL_PGD_BOUNDARY pgd_index(PAGE_OFFSET)
12255 #define KERNEL_PGD_PTRS (PTRS_PER_PGD - KERNEL_PGD_BOUNDARY)
12256
12257+#ifdef CONFIG_X86_32
12258+#define USER_PGD_PTRS KERNEL_PGD_BOUNDARY
12259+#else
12260+#define TASK_SIZE_MAX_SHIFT CONFIG_TASK_SIZE_MAX_SHIFT
12261+#define USER_PGD_PTRS (_AC(1,UL) << (TASK_SIZE_MAX_SHIFT - PGDIR_SHIFT))
12262+
12263+#ifdef CONFIG_PAX_MEMORY_UDEREF
12264+#define PAX_USER_SHADOW_BASE (_AC(1,UL) << TASK_SIZE_MAX_SHIFT)
12265+#else
12266+#define PAX_USER_SHADOW_BASE (_AC(0,UL))
12267+#endif
12268+
12269+#endif
12270+
12271 #ifndef __ASSEMBLY__
12272
12273 extern int direct_gbpages;
12274@@ -611,11 +699,23 @@ static inline void ptep_set_wrprotect(struct mm_struct *mm,
12275 * dst and src can be on the same page, but the range must not overlap,
12276 * and must not cross a page boundary.
12277 */
12278-static inline void clone_pgd_range(pgd_t *dst, pgd_t *src, int count)
12279+static inline void clone_pgd_range(pgd_t *dst, const pgd_t *src, int count)
12280 {
12281- memcpy(dst, src, count * sizeof(pgd_t));
12282+ pax_open_kernel();
12283+ while (count--)
12284+ *dst++ = *src++;
12285+ pax_close_kernel();
12286 }
12287
12288+#ifdef CONFIG_PAX_PER_CPU_PGD
12289+extern void __clone_user_pgds(pgd_t *dst, const pgd_t *src, int count);
12290+#endif
12291+
12292+#if defined(CONFIG_X86_64) && defined(CONFIG_PAX_MEMORY_UDEREF)
12293+extern void __shadow_user_pgds(pgd_t *dst, const pgd_t *src, int count);
12294+#else
12295+static inline void __shadow_user_pgds(pgd_t *dst, const pgd_t *src, int count) {}
12296+#endif
12297
12298 #include <asm-generic/pgtable.h>
12299 #endif /* __ASSEMBLY__ */
12300diff --git a/arch/x86/include/asm/pgtable_32.h b/arch/x86/include/asm/pgtable_32.h
12301index 750f1bf..971e8394 100644
12302--- a/arch/x86/include/asm/pgtable_32.h
12303+++ b/arch/x86/include/asm/pgtable_32.h
12304@@ -26,9 +26,6 @@
12305 struct mm_struct;
12306 struct vm_area_struct;
12307
12308-extern pgd_t swapper_pg_dir[1024];
12309-extern pgd_t trampoline_pg_dir[1024];
12310-
12311 static inline void pgtable_cache_init(void) { }
12312 static inline void check_pgt_cache(void) { }
12313 void paging_init(void);
12314@@ -49,6 +46,12 @@ extern void set_pmd_pfn(unsigned long, unsigned long, pgprot_t);
12315 # include <asm/pgtable-2level.h>
12316 #endif
12317
12318+extern pgd_t swapper_pg_dir[PTRS_PER_PGD];
12319+extern pgd_t trampoline_pg_dir[PTRS_PER_PGD];
12320+#ifdef CONFIG_X86_PAE
12321+extern pmd_t swapper_pm_dir[PTRS_PER_PGD][PTRS_PER_PMD];
12322+#endif
12323+
12324 #if defined(CONFIG_HIGHPTE)
12325 #define __KM_PTE \
12326 (in_nmi() ? KM_NMI_PTE : \
12327@@ -73,7 +76,9 @@ extern void set_pmd_pfn(unsigned long, unsigned long, pgprot_t);
12328 /* Clear a kernel PTE and flush it from the TLB */
12329 #define kpte_clear_flush(ptep, vaddr) \
12330 do { \
12331+ pax_open_kernel(); \
12332 pte_clear(&init_mm, (vaddr), (ptep)); \
12333+ pax_close_kernel(); \
12334 __flush_tlb_one((vaddr)); \
12335 } while (0)
12336
12337@@ -85,6 +90,9 @@ do { \
12338
12339 #endif /* !__ASSEMBLY__ */
12340
12341+#define HAVE_ARCH_UNMAPPED_AREA
12342+#define HAVE_ARCH_UNMAPPED_AREA_TOPDOWN
12343+
12344 /*
12345 * kern_addr_valid() is (1) for FLATMEM and (0) for
12346 * SPARSEMEM and DISCONTIGMEM
12347diff --git a/arch/x86/include/asm/pgtable_32_types.h b/arch/x86/include/asm/pgtable_32_types.h
12348index 5e67c15..12d5c47 100644
12349--- a/arch/x86/include/asm/pgtable_32_types.h
12350+++ b/arch/x86/include/asm/pgtable_32_types.h
12351@@ -8,7 +8,7 @@
12352 */
12353 #ifdef CONFIG_X86_PAE
12354 # include <asm/pgtable-3level_types.h>
12355-# define PMD_SIZE (1UL << PMD_SHIFT)
12356+# define PMD_SIZE (_AC(1, UL) << PMD_SHIFT)
12357 # define PMD_MASK (~(PMD_SIZE - 1))
12358 #else
12359 # include <asm/pgtable-2level_types.h>
12360@@ -46,6 +46,19 @@ extern bool __vmalloc_start_set; /* set once high_memory is set */
12361 # define VMALLOC_END (FIXADDR_START - 2 * PAGE_SIZE)
12362 #endif
12363
12364+#ifdef CONFIG_PAX_KERNEXEC
12365+#ifndef __ASSEMBLY__
12366+extern unsigned char MODULES_EXEC_VADDR[];
12367+extern unsigned char MODULES_EXEC_END[];
12368+#endif
12369+#include <asm/boot.h>
12370+#define ktla_ktva(addr) (addr + LOAD_PHYSICAL_ADDR + PAGE_OFFSET)
12371+#define ktva_ktla(addr) (addr - LOAD_PHYSICAL_ADDR - PAGE_OFFSET)
12372+#else
12373+#define ktla_ktva(addr) (addr)
12374+#define ktva_ktla(addr) (addr)
12375+#endif
12376+
12377 #define MODULES_VADDR VMALLOC_START
12378 #define MODULES_END VMALLOC_END
12379 #define MODULES_LEN (MODULES_VADDR - MODULES_END)
12380diff --git a/arch/x86/include/asm/pgtable_64.h b/arch/x86/include/asm/pgtable_64.h
12381index c57a301..6b414ff 100644
12382--- a/arch/x86/include/asm/pgtable_64.h
12383+++ b/arch/x86/include/asm/pgtable_64.h
12384@@ -16,10 +16,14 @@
12385
12386 extern pud_t level3_kernel_pgt[512];
12387 extern pud_t level3_ident_pgt[512];
12388+extern pud_t level3_vmalloc_start_pgt[512];
12389+extern pud_t level3_vmalloc_end_pgt[512];
12390+extern pud_t level3_vmemmap_pgt[512];
12391+extern pud_t level2_vmemmap_pgt[512];
12392 extern pmd_t level2_kernel_pgt[512];
12393 extern pmd_t level2_fixmap_pgt[512];
12394-extern pmd_t level2_ident_pgt[512];
12395-extern pgd_t init_level4_pgt[];
12396+extern pmd_t level2_ident_pgt[512*2];
12397+extern pgd_t init_level4_pgt[512];
12398
12399 #define swapper_pg_dir init_level4_pgt
12400
12401@@ -74,7 +78,9 @@ static inline pte_t native_ptep_get_and_clear(pte_t *xp)
12402
12403 static inline void native_set_pmd(pmd_t *pmdp, pmd_t pmd)
12404 {
12405+ pax_open_kernel();
12406 *pmdp = pmd;
12407+ pax_close_kernel();
12408 }
12409
12410 static inline void native_pmd_clear(pmd_t *pmd)
12411@@ -94,6 +100,13 @@ static inline void native_pud_clear(pud_t *pud)
12412
12413 static inline void native_set_pgd(pgd_t *pgdp, pgd_t pgd)
12414 {
12415+ pax_open_kernel();
12416+ *pgdp = pgd;
12417+ pax_close_kernel();
12418+}
12419+
12420+static inline void native_set_pgd_batched(pgd_t *pgdp, pgd_t pgd)
12421+{
12422 *pgdp = pgd;
12423 }
12424
12425diff --git a/arch/x86/include/asm/pgtable_64_types.h b/arch/x86/include/asm/pgtable_64_types.h
12426index 766ea16..5b96cb3 100644
12427--- a/arch/x86/include/asm/pgtable_64_types.h
12428+++ b/arch/x86/include/asm/pgtable_64_types.h
12429@@ -59,5 +59,10 @@ typedef struct { pteval_t pte; } pte_t;
12430 #define MODULES_VADDR _AC(0xffffffffa0000000, UL)
12431 #define MODULES_END _AC(0xffffffffff000000, UL)
12432 #define MODULES_LEN (MODULES_END - MODULES_VADDR)
12433+#define MODULES_EXEC_VADDR MODULES_VADDR
12434+#define MODULES_EXEC_END MODULES_END
12435+
12436+#define ktla_ktva(addr) (addr)
12437+#define ktva_ktla(addr) (addr)
12438
12439 #endif /* _ASM_X86_PGTABLE_64_DEFS_H */
12440diff --git a/arch/x86/include/asm/pgtable_types.h b/arch/x86/include/asm/pgtable_types.h
12441index d1f4a76..2f46ba1 100644
12442--- a/arch/x86/include/asm/pgtable_types.h
12443+++ b/arch/x86/include/asm/pgtable_types.h
12444@@ -16,12 +16,11 @@
12445 #define _PAGE_BIT_PSE 7 /* 4 MB (or 2MB) page */
12446 #define _PAGE_BIT_PAT 7 /* on 4KB pages */
12447 #define _PAGE_BIT_GLOBAL 8 /* Global TLB entry PPro+ */
12448-#define _PAGE_BIT_UNUSED1 9 /* available for programmer */
12449+#define _PAGE_BIT_SPECIAL 9 /* special mappings, no associated struct page */
12450 #define _PAGE_BIT_IOMAP 10 /* flag used to indicate IO mapping */
12451 #define _PAGE_BIT_HIDDEN 11 /* hidden by kmemcheck */
12452 #define _PAGE_BIT_PAT_LARGE 12 /* On 2MB or 1GB pages */
12453-#define _PAGE_BIT_SPECIAL _PAGE_BIT_UNUSED1
12454-#define _PAGE_BIT_CPA_TEST _PAGE_BIT_UNUSED1
12455+#define _PAGE_BIT_CPA_TEST _PAGE_BIT_SPECIAL
12456 #define _PAGE_BIT_NX 63 /* No execute: only valid after cpuid check */
12457
12458 /* If _PAGE_BIT_PRESENT is clear, we use these: */
12459@@ -39,7 +38,6 @@
12460 #define _PAGE_DIRTY (_AT(pteval_t, 1) << _PAGE_BIT_DIRTY)
12461 #define _PAGE_PSE (_AT(pteval_t, 1) << _PAGE_BIT_PSE)
12462 #define _PAGE_GLOBAL (_AT(pteval_t, 1) << _PAGE_BIT_GLOBAL)
12463-#define _PAGE_UNUSED1 (_AT(pteval_t, 1) << _PAGE_BIT_UNUSED1)
12464 #define _PAGE_IOMAP (_AT(pteval_t, 1) << _PAGE_BIT_IOMAP)
12465 #define _PAGE_PAT (_AT(pteval_t, 1) << _PAGE_BIT_PAT)
12466 #define _PAGE_PAT_LARGE (_AT(pteval_t, 1) << _PAGE_BIT_PAT_LARGE)
12467@@ -55,8 +53,10 @@
12468
12469 #if defined(CONFIG_X86_64) || defined(CONFIG_X86_PAE)
12470 #define _PAGE_NX (_AT(pteval_t, 1) << _PAGE_BIT_NX)
12471-#else
12472+#elif defined(CONFIG_KMEMCHECK)
12473 #define _PAGE_NX (_AT(pteval_t, 0))
12474+#else
12475+#define _PAGE_NX (_AT(pteval_t, 1) << _PAGE_BIT_HIDDEN)
12476 #endif
12477
12478 #define _PAGE_FILE (_AT(pteval_t, 1) << _PAGE_BIT_FILE)
12479@@ -93,6 +93,9 @@
12480 #define PAGE_READONLY_EXEC __pgprot(_PAGE_PRESENT | _PAGE_USER | \
12481 _PAGE_ACCESSED)
12482
12483+#define PAGE_READONLY_NOEXEC PAGE_READONLY
12484+#define PAGE_SHARED_NOEXEC PAGE_SHARED
12485+
12486 #define __PAGE_KERNEL_EXEC \
12487 (_PAGE_PRESENT | _PAGE_RW | _PAGE_DIRTY | _PAGE_ACCESSED | _PAGE_GLOBAL)
12488 #define __PAGE_KERNEL (__PAGE_KERNEL_EXEC | _PAGE_NX)
12489@@ -103,8 +106,8 @@
12490 #define __PAGE_KERNEL_WC (__PAGE_KERNEL | _PAGE_CACHE_WC)
12491 #define __PAGE_KERNEL_NOCACHE (__PAGE_KERNEL | _PAGE_PCD | _PAGE_PWT)
12492 #define __PAGE_KERNEL_UC_MINUS (__PAGE_KERNEL | _PAGE_PCD)
12493-#define __PAGE_KERNEL_VSYSCALL (__PAGE_KERNEL_RX | _PAGE_USER)
12494-#define __PAGE_KERNEL_VSYSCALL_NOCACHE (__PAGE_KERNEL_VSYSCALL | _PAGE_PCD | _PAGE_PWT)
12495+#define __PAGE_KERNEL_VSYSCALL (__PAGE_KERNEL_RO | _PAGE_USER)
12496+#define __PAGE_KERNEL_VSYSCALL_NOCACHE (__PAGE_KERNEL_RO | _PAGE_PCD | _PAGE_PWT | _PAGE_USER)
12497 #define __PAGE_KERNEL_LARGE (__PAGE_KERNEL | _PAGE_PSE)
12498 #define __PAGE_KERNEL_LARGE_NOCACHE (__PAGE_KERNEL | _PAGE_CACHE_UC | _PAGE_PSE)
12499 #define __PAGE_KERNEL_LARGE_EXEC (__PAGE_KERNEL_EXEC | _PAGE_PSE)
12500@@ -163,8 +166,8 @@
12501 * bits are combined, this will alow user to access the high address mapped
12502 * VDSO in the presence of CONFIG_COMPAT_VDSO
12503 */
12504-#define PTE_IDENT_ATTR 0x003 /* PRESENT+RW */
12505-#define PDE_IDENT_ATTR 0x067 /* PRESENT+RW+USER+DIRTY+ACCESSED */
12506+#define PTE_IDENT_ATTR 0x063 /* PRESENT+RW+DIRTY+ACCESSED */
12507+#define PDE_IDENT_ATTR 0x063 /* PRESENT+RW+DIRTY+ACCESSED */
12508 #define PGD_IDENT_ATTR 0x001 /* PRESENT (no other attributes) */
12509 #endif
12510
12511@@ -202,7 +205,17 @@ static inline pgdval_t pgd_flags(pgd_t pgd)
12512 {
12513 return native_pgd_val(pgd) & PTE_FLAGS_MASK;
12514 }
12515+#endif
12516
12517+#if PAGETABLE_LEVELS == 3
12518+#include <asm-generic/pgtable-nopud.h>
12519+#endif
12520+
12521+#if PAGETABLE_LEVELS == 2
12522+#include <asm-generic/pgtable-nopmd.h>
12523+#endif
12524+
12525+#ifndef __ASSEMBLY__
12526 #if PAGETABLE_LEVELS > 3
12527 typedef struct { pudval_t pud; } pud_t;
12528
12529@@ -216,8 +229,6 @@ static inline pudval_t native_pud_val(pud_t pud)
12530 return pud.pud;
12531 }
12532 #else
12533-#include <asm-generic/pgtable-nopud.h>
12534-
12535 static inline pudval_t native_pud_val(pud_t pud)
12536 {
12537 return native_pgd_val(pud.pgd);
12538@@ -237,8 +248,6 @@ static inline pmdval_t native_pmd_val(pmd_t pmd)
12539 return pmd.pmd;
12540 }
12541 #else
12542-#include <asm-generic/pgtable-nopmd.h>
12543-
12544 static inline pmdval_t native_pmd_val(pmd_t pmd)
12545 {
12546 return native_pgd_val(pmd.pud.pgd);
12547@@ -278,7 +287,16 @@ typedef struct page *pgtable_t;
12548
12549 extern pteval_t __supported_pte_mask;
12550 extern void set_nx(void);
12551+
12552+#ifdef CONFIG_X86_32
12553+#ifdef CONFIG_X86_PAE
12554 extern int nx_enabled;
12555+#else
12556+#define nx_enabled (0)
12557+#endif
12558+#else
12559+#define nx_enabled (1)
12560+#endif
12561
12562 #define pgprot_writecombine pgprot_writecombine
12563 extern pgprot_t pgprot_writecombine(pgprot_t prot);
12564diff --git a/arch/x86/include/asm/processor.h b/arch/x86/include/asm/processor.h
12565index fa04dea..5f823fc 100644
12566--- a/arch/x86/include/asm/processor.h
12567+++ b/arch/x86/include/asm/processor.h
12568@@ -272,7 +272,7 @@ struct tss_struct {
12569
12570 } ____cacheline_aligned;
12571
12572-DECLARE_PER_CPU_SHARED_ALIGNED(struct tss_struct, init_tss);
12573+extern struct tss_struct init_tss[NR_CPUS];
12574
12575 /*
12576 * Save the original ist values for checking stack pointers during debugging
12577@@ -911,11 +911,18 @@ static inline void spin_lock_prefetch(const void *x)
12578 */
12579 #define TASK_SIZE PAGE_OFFSET
12580 #define TASK_SIZE_MAX TASK_SIZE
12581+
12582+#ifdef CONFIG_PAX_SEGMEXEC
12583+#define SEGMEXEC_TASK_SIZE (TASK_SIZE / 2)
12584+#define STACK_TOP ((current->mm->pax_flags & MF_PAX_SEGMEXEC)?SEGMEXEC_TASK_SIZE:TASK_SIZE)
12585+#else
12586 #define STACK_TOP TASK_SIZE
12587-#define STACK_TOP_MAX STACK_TOP
12588+#endif
12589+
12590+#define STACK_TOP_MAX TASK_SIZE
12591
12592 #define INIT_THREAD { \
12593- .sp0 = sizeof(init_stack) + (long)&init_stack, \
12594+ .sp0 = sizeof(init_stack) + (long)&init_stack - 8, \
12595 .vm86_info = NULL, \
12596 .sysenter_cs = __KERNEL_CS, \
12597 .io_bitmap_ptr = NULL, \
12598@@ -929,7 +936,7 @@ static inline void spin_lock_prefetch(const void *x)
12599 */
12600 #define INIT_TSS { \
12601 .x86_tss = { \
12602- .sp0 = sizeof(init_stack) + (long)&init_stack, \
12603+ .sp0 = sizeof(init_stack) + (long)&init_stack - 8, \
12604 .ss0 = __KERNEL_DS, \
12605 .ss1 = __KERNEL_CS, \
12606 .io_bitmap_base = INVALID_IO_BITMAP_OFFSET, \
12607@@ -940,11 +947,7 @@ static inline void spin_lock_prefetch(const void *x)
12608 extern unsigned long thread_saved_pc(struct task_struct *tsk);
12609
12610 #define THREAD_SIZE_LONGS (THREAD_SIZE/sizeof(unsigned long))
12611-#define KSTK_TOP(info) \
12612-({ \
12613- unsigned long *__ptr = (unsigned long *)(info); \
12614- (unsigned long)(&__ptr[THREAD_SIZE_LONGS]); \
12615-})
12616+#define KSTK_TOP(info) ((container_of(info, struct task_struct, tinfo))->thread.sp0)
12617
12618 /*
12619 * The below -8 is to reserve 8 bytes on top of the ring0 stack.
12620@@ -959,7 +962,7 @@ extern unsigned long thread_saved_pc(struct task_struct *tsk);
12621 #define task_pt_regs(task) \
12622 ({ \
12623 struct pt_regs *__regs__; \
12624- __regs__ = (struct pt_regs *)(KSTK_TOP(task_stack_page(task))-8); \
12625+ __regs__ = (struct pt_regs *)((task)->thread.sp0); \
12626 __regs__ - 1; \
12627 })
12628
12629@@ -969,13 +972,13 @@ extern unsigned long thread_saved_pc(struct task_struct *tsk);
12630 /*
12631 * User space process size. 47bits minus one guard page.
12632 */
12633-#define TASK_SIZE_MAX ((1UL << 47) - PAGE_SIZE)
12634+#define TASK_SIZE_MAX ((1UL << TASK_SIZE_MAX_SHIFT) - PAGE_SIZE)
12635
12636 /* This decides where the kernel will search for a free chunk of vm
12637 * space during mmap's.
12638 */
12639 #define IA32_PAGE_OFFSET ((current->personality & ADDR_LIMIT_3GB) ? \
12640- 0xc0000000 : 0xFFFFe000)
12641+ 0xc0000000 : 0xFFFFf000)
12642
12643 #define TASK_SIZE (test_thread_flag(TIF_IA32) ? \
12644 IA32_PAGE_OFFSET : TASK_SIZE_MAX)
12645@@ -986,11 +989,11 @@ extern unsigned long thread_saved_pc(struct task_struct *tsk);
12646 #define STACK_TOP_MAX TASK_SIZE_MAX
12647
12648 #define INIT_THREAD { \
12649- .sp0 = (unsigned long)&init_stack + sizeof(init_stack) \
12650+ .sp0 = (unsigned long)&init_stack + sizeof(init_stack) - 16 \
12651 }
12652
12653 #define INIT_TSS { \
12654- .x86_tss.sp0 = (unsigned long)&init_stack + sizeof(init_stack) \
12655+ .x86_tss.sp0 = (unsigned long)&init_stack + sizeof(init_stack) - 16 \
12656 }
12657
12658 /*
12659@@ -1012,6 +1015,10 @@ extern void start_thread(struct pt_regs *regs, unsigned long new_ip,
12660 */
12661 #define TASK_UNMAPPED_BASE (PAGE_ALIGN(TASK_SIZE / 3))
12662
12663+#ifdef CONFIG_PAX_SEGMEXEC
12664+#define SEGMEXEC_TASK_UNMAPPED_BASE (PAGE_ALIGN(SEGMEXEC_TASK_SIZE / 3))
12665+#endif
12666+
12667 #define KSTK_EIP(task) (task_pt_regs(task)->ip)
12668
12669 /* Get/set a process' ability to use the timestamp counter instruction */
12670diff --git a/arch/x86/include/asm/ptrace.h b/arch/x86/include/asm/ptrace.h
12671index 0f0d908..f2e3da2 100644
12672--- a/arch/x86/include/asm/ptrace.h
12673+++ b/arch/x86/include/asm/ptrace.h
12674@@ -151,28 +151,29 @@ static inline unsigned long regs_return_value(struct pt_regs *regs)
12675 }
12676
12677 /*
12678- * user_mode_vm(regs) determines whether a register set came from user mode.
12679+ * user_mode(regs) determines whether a register set came from user mode.
12680 * This is true if V8086 mode was enabled OR if the register set was from
12681 * protected mode with RPL-3 CS value. This tricky test checks that with
12682 * one comparison. Many places in the kernel can bypass this full check
12683- * if they have already ruled out V8086 mode, so user_mode(regs) can be used.
12684+ * if they have already ruled out V8086 mode, so user_mode_novm(regs) can
12685+ * be used.
12686 */
12687-static inline int user_mode(struct pt_regs *regs)
12688+static inline int user_mode_novm(struct pt_regs *regs)
12689 {
12690 #ifdef CONFIG_X86_32
12691 return (regs->cs & SEGMENT_RPL_MASK) == USER_RPL;
12692 #else
12693- return !!(regs->cs & 3);
12694+ return !!(regs->cs & SEGMENT_RPL_MASK);
12695 #endif
12696 }
12697
12698-static inline int user_mode_vm(struct pt_regs *regs)
12699+static inline int user_mode(struct pt_regs *regs)
12700 {
12701 #ifdef CONFIG_X86_32
12702 return ((regs->cs & SEGMENT_RPL_MASK) | (regs->flags & X86_VM_MASK)) >=
12703 USER_RPL;
12704 #else
12705- return user_mode(regs);
12706+ return user_mode_novm(regs);
12707 #endif
12708 }
12709
12710diff --git a/arch/x86/include/asm/reboot.h b/arch/x86/include/asm/reboot.h
12711index 562d4fd..6e39df1 100644
12712--- a/arch/x86/include/asm/reboot.h
12713+++ b/arch/x86/include/asm/reboot.h
12714@@ -6,19 +6,19 @@
12715 struct pt_regs;
12716
12717 struct machine_ops {
12718- void (*restart)(char *cmd);
12719- void (*halt)(void);
12720- void (*power_off)(void);
12721+ void (* __noreturn restart)(char *cmd);
12722+ void (* __noreturn halt)(void);
12723+ void (* __noreturn power_off)(void);
12724 void (*shutdown)(void);
12725 void (*crash_shutdown)(struct pt_regs *);
12726- void (*emergency_restart)(void);
12727-};
12728+ void (* __noreturn emergency_restart)(void);
12729+} __no_const;
12730
12731 extern struct machine_ops machine_ops;
12732
12733 void native_machine_crash_shutdown(struct pt_regs *regs);
12734 void native_machine_shutdown(void);
12735-void machine_real_restart(const unsigned char *code, int length);
12736+void machine_real_restart(const unsigned char *code, unsigned int length) __noreturn;
12737
12738 typedef void (*nmi_shootdown_cb)(int, struct die_args*);
12739 void nmi_shootdown_cpus(nmi_shootdown_cb callback);
12740diff --git a/arch/x86/include/asm/rwsem.h b/arch/x86/include/asm/rwsem.h
12741index 606ede1..dbfff37 100644
12742--- a/arch/x86/include/asm/rwsem.h
12743+++ b/arch/x86/include/asm/rwsem.h
12744@@ -118,6 +118,14 @@ static inline void __down_read(struct rw_semaphore *sem)
12745 {
12746 asm volatile("# beginning down_read\n\t"
12747 LOCK_PREFIX _ASM_INC "(%1)\n\t"
12748+
12749+#ifdef CONFIG_PAX_REFCOUNT
12750+ "jno 0f\n"
12751+ LOCK_PREFIX _ASM_DEC "(%1)\n\t"
12752+ "int $4\n0:\n"
12753+ _ASM_EXTABLE(0b, 0b)
12754+#endif
12755+
12756 /* adds 0x00000001, returns the old value */
12757 " jns 1f\n"
12758 " call call_rwsem_down_read_failed\n"
12759@@ -139,6 +147,14 @@ static inline int __down_read_trylock(struct rw_semaphore *sem)
12760 "1:\n\t"
12761 " mov %1,%2\n\t"
12762 " add %3,%2\n\t"
12763+
12764+#ifdef CONFIG_PAX_REFCOUNT
12765+ "jno 0f\n"
12766+ "sub %3,%2\n"
12767+ "int $4\n0:\n"
12768+ _ASM_EXTABLE(0b, 0b)
12769+#endif
12770+
12771 " jle 2f\n\t"
12772 LOCK_PREFIX " cmpxchg %2,%0\n\t"
12773 " jnz 1b\n\t"
12774@@ -160,6 +176,14 @@ static inline void __down_write_nested(struct rw_semaphore *sem, int subclass)
12775 tmp = RWSEM_ACTIVE_WRITE_BIAS;
12776 asm volatile("# beginning down_write\n\t"
12777 LOCK_PREFIX " xadd %1,(%2)\n\t"
12778+
12779+#ifdef CONFIG_PAX_REFCOUNT
12780+ "jno 0f\n"
12781+ "mov %1,(%2)\n"
12782+ "int $4\n0:\n"
12783+ _ASM_EXTABLE(0b, 0b)
12784+#endif
12785+
12786 /* subtract 0x0000ffff, returns the old value */
12787 " test %1,%1\n\t"
12788 /* was the count 0 before? */
12789@@ -198,6 +222,14 @@ static inline void __up_read(struct rw_semaphore *sem)
12790 rwsem_count_t tmp = -RWSEM_ACTIVE_READ_BIAS;
12791 asm volatile("# beginning __up_read\n\t"
12792 LOCK_PREFIX " xadd %1,(%2)\n\t"
12793+
12794+#ifdef CONFIG_PAX_REFCOUNT
12795+ "jno 0f\n"
12796+ "mov %1,(%2)\n"
12797+ "int $4\n0:\n"
12798+ _ASM_EXTABLE(0b, 0b)
12799+#endif
12800+
12801 /* subtracts 1, returns the old value */
12802 " jns 1f\n\t"
12803 " call call_rwsem_wake\n"
12804@@ -216,6 +248,14 @@ static inline void __up_write(struct rw_semaphore *sem)
12805 rwsem_count_t tmp;
12806 asm volatile("# beginning __up_write\n\t"
12807 LOCK_PREFIX " xadd %1,(%2)\n\t"
12808+
12809+#ifdef CONFIG_PAX_REFCOUNT
12810+ "jno 0f\n"
12811+ "mov %1,(%2)\n"
12812+ "int $4\n0:\n"
12813+ _ASM_EXTABLE(0b, 0b)
12814+#endif
12815+
12816 /* tries to transition
12817 0xffff0001 -> 0x00000000 */
12818 " jz 1f\n"
12819@@ -234,6 +274,14 @@ static inline void __downgrade_write(struct rw_semaphore *sem)
12820 {
12821 asm volatile("# beginning __downgrade_write\n\t"
12822 LOCK_PREFIX _ASM_ADD "%2,(%1)\n\t"
12823+
12824+#ifdef CONFIG_PAX_REFCOUNT
12825+ "jno 0f\n"
12826+ LOCK_PREFIX _ASM_SUB "%2,(%1)\n"
12827+ "int $4\n0:\n"
12828+ _ASM_EXTABLE(0b, 0b)
12829+#endif
12830+
12831 /*
12832 * transitions 0xZZZZ0001 -> 0xYYYY0001 (i386)
12833 * 0xZZZZZZZZ00000001 -> 0xYYYYYYYY00000001 (x86_64)
12834@@ -253,7 +301,15 @@ static inline void __downgrade_write(struct rw_semaphore *sem)
12835 static inline void rwsem_atomic_add(rwsem_count_t delta,
12836 struct rw_semaphore *sem)
12837 {
12838- asm volatile(LOCK_PREFIX _ASM_ADD "%1,%0"
12839+ asm volatile(LOCK_PREFIX _ASM_ADD "%1,%0\n"
12840+
12841+#ifdef CONFIG_PAX_REFCOUNT
12842+ "jno 0f\n"
12843+ LOCK_PREFIX _ASM_SUB "%1,%0\n"
12844+ "int $4\n0:\n"
12845+ _ASM_EXTABLE(0b, 0b)
12846+#endif
12847+
12848 : "+m" (sem->count)
12849 : "er" (delta));
12850 }
12851@@ -266,7 +322,15 @@ static inline rwsem_count_t rwsem_atomic_update(rwsem_count_t delta,
12852 {
12853 rwsem_count_t tmp = delta;
12854
12855- asm volatile(LOCK_PREFIX "xadd %0,%1"
12856+ asm volatile(LOCK_PREFIX "xadd %0,%1\n"
12857+
12858+#ifdef CONFIG_PAX_REFCOUNT
12859+ "jno 0f\n"
12860+ "mov %0,%1\n"
12861+ "int $4\n0:\n"
12862+ _ASM_EXTABLE(0b, 0b)
12863+#endif
12864+
12865 : "+r" (tmp), "+m" (sem->count)
12866 : : "memory");
12867
12868diff --git a/arch/x86/include/asm/segment.h b/arch/x86/include/asm/segment.h
12869index 14e0ed8..7f7dd5e 100644
12870--- a/arch/x86/include/asm/segment.h
12871+++ b/arch/x86/include/asm/segment.h
12872@@ -62,10 +62,15 @@
12873 * 26 - ESPFIX small SS
12874 * 27 - per-cpu [ offset to per-cpu data area ]
12875 * 28 - stack_canary-20 [ for stack protector ]
12876- * 29 - unused
12877- * 30 - unused
12878+ * 29 - PCI BIOS CS
12879+ * 30 - PCI BIOS DS
12880 * 31 - TSS for double fault handler
12881 */
12882+#define GDT_ENTRY_KERNEXEC_EFI_CS (1)
12883+#define GDT_ENTRY_KERNEXEC_EFI_DS (2)
12884+#define __KERNEXEC_EFI_CS (GDT_ENTRY_KERNEXEC_EFI_CS*8)
12885+#define __KERNEXEC_EFI_DS (GDT_ENTRY_KERNEXEC_EFI_DS*8)
12886+
12887 #define GDT_ENTRY_TLS_MIN 6
12888 #define GDT_ENTRY_TLS_MAX (GDT_ENTRY_TLS_MIN + GDT_ENTRY_TLS_ENTRIES - 1)
12889
12890@@ -77,6 +82,8 @@
12891
12892 #define GDT_ENTRY_KERNEL_CS (GDT_ENTRY_KERNEL_BASE + 0)
12893
12894+#define GDT_ENTRY_KERNEXEC_KERNEL_CS (4)
12895+
12896 #define GDT_ENTRY_KERNEL_DS (GDT_ENTRY_KERNEL_BASE + 1)
12897
12898 #define GDT_ENTRY_TSS (GDT_ENTRY_KERNEL_BASE + 4)
12899@@ -88,7 +95,7 @@
12900 #define GDT_ENTRY_ESPFIX_SS (GDT_ENTRY_KERNEL_BASE + 14)
12901 #define __ESPFIX_SS (GDT_ENTRY_ESPFIX_SS * 8)
12902
12903-#define GDT_ENTRY_PERCPU (GDT_ENTRY_KERNEL_BASE + 15)
12904+#define GDT_ENTRY_PERCPU (GDT_ENTRY_KERNEL_BASE + 15)
12905 #ifdef CONFIG_SMP
12906 #define __KERNEL_PERCPU (GDT_ENTRY_PERCPU * 8)
12907 #else
12908@@ -102,6 +109,12 @@
12909 #define __KERNEL_STACK_CANARY 0
12910 #endif
12911
12912+#define GDT_ENTRY_PCIBIOS_CS (GDT_ENTRY_KERNEL_BASE + 17)
12913+#define __PCIBIOS_CS (GDT_ENTRY_PCIBIOS_CS * 8)
12914+
12915+#define GDT_ENTRY_PCIBIOS_DS (GDT_ENTRY_KERNEL_BASE + 18)
12916+#define __PCIBIOS_DS (GDT_ENTRY_PCIBIOS_DS * 8)
12917+
12918 #define GDT_ENTRY_DOUBLEFAULT_TSS 31
12919
12920 /*
12921@@ -139,7 +152,7 @@
12922 */
12923
12924 /* Matches PNP_CS32 and PNP_CS16 (they must be consecutive) */
12925-#define SEGMENT_IS_PNP_CODE(x) (((x) & 0xf4) == GDT_ENTRY_PNPBIOS_BASE * 8)
12926+#define SEGMENT_IS_PNP_CODE(x) (((x) & 0xFFFCU) == PNP_CS32 || ((x) & 0xFFFCU) == PNP_CS16)
12927
12928
12929 #else
12930@@ -163,6 +176,8 @@
12931 #define __USER32_CS (GDT_ENTRY_DEFAULT_USER32_CS * 8 + 3)
12932 #define __USER32_DS __USER_DS
12933
12934+#define GDT_ENTRY_KERNEXEC_KERNEL_CS 7
12935+
12936 #define GDT_ENTRY_TSS 8 /* needs two entries */
12937 #define GDT_ENTRY_LDT 10 /* needs two entries */
12938 #define GDT_ENTRY_TLS_MIN 12
12939@@ -183,6 +198,7 @@
12940 #endif
12941
12942 #define __KERNEL_CS (GDT_ENTRY_KERNEL_CS * 8)
12943+#define __KERNEXEC_KERNEL_CS (GDT_ENTRY_KERNEXEC_KERNEL_CS * 8)
12944 #define __KERNEL_DS (GDT_ENTRY_KERNEL_DS * 8)
12945 #define __USER_DS (GDT_ENTRY_DEFAULT_USER_DS* 8 + 3)
12946 #define __USER_CS (GDT_ENTRY_DEFAULT_USER_CS* 8 + 3)
12947diff --git a/arch/x86/include/asm/smp.h b/arch/x86/include/asm/smp.h
12948index 4c2f63c..5685db2 100644
12949--- a/arch/x86/include/asm/smp.h
12950+++ b/arch/x86/include/asm/smp.h
12951@@ -24,7 +24,7 @@ extern unsigned int num_processors;
12952 DECLARE_PER_CPU(cpumask_var_t, cpu_sibling_map);
12953 DECLARE_PER_CPU(cpumask_var_t, cpu_core_map);
12954 DECLARE_PER_CPU(u16, cpu_llc_id);
12955-DECLARE_PER_CPU(int, cpu_number);
12956+DECLARE_PER_CPU(unsigned int, cpu_number);
12957
12958 static inline struct cpumask *cpu_sibling_mask(int cpu)
12959 {
12960@@ -40,10 +40,7 @@ DECLARE_EARLY_PER_CPU(u16, x86_cpu_to_apicid);
12961 DECLARE_EARLY_PER_CPU(u16, x86_bios_cpu_apicid);
12962
12963 /* Static state in head.S used to set up a CPU */
12964-extern struct {
12965- void *sp;
12966- unsigned short ss;
12967-} stack_start;
12968+extern unsigned long stack_start; /* Initial stack pointer address */
12969
12970 struct smp_ops {
12971 void (*smp_prepare_boot_cpu)(void);
12972@@ -60,7 +57,7 @@ struct smp_ops {
12973
12974 void (*send_call_func_ipi)(const struct cpumask *mask);
12975 void (*send_call_func_single_ipi)(int cpu);
12976-};
12977+} __no_const;
12978
12979 /* Globals due to paravirt */
12980 extern void set_cpu_sibling_map(int cpu);
12981@@ -175,14 +172,8 @@ extern unsigned disabled_cpus __cpuinitdata;
12982 extern int safe_smp_processor_id(void);
12983
12984 #elif defined(CONFIG_X86_64_SMP)
12985-#define raw_smp_processor_id() (percpu_read(cpu_number))
12986-
12987-#define stack_smp_processor_id() \
12988-({ \
12989- struct thread_info *ti; \
12990- __asm__("andq %%rsp,%0; ":"=r" (ti) : "0" (CURRENT_MASK)); \
12991- ti->cpu; \
12992-})
12993+#define raw_smp_processor_id() (percpu_read(cpu_number))
12994+#define stack_smp_processor_id() raw_smp_processor_id()
12995 #define safe_smp_processor_id() smp_processor_id()
12996
12997 #endif
12998diff --git a/arch/x86/include/asm/spinlock.h b/arch/x86/include/asm/spinlock.h
12999index 4e77853..4359783 100644
13000--- a/arch/x86/include/asm/spinlock.h
13001+++ b/arch/x86/include/asm/spinlock.h
13002@@ -249,6 +249,14 @@ static inline int __raw_write_can_lock(raw_rwlock_t *lock)
13003 static inline void __raw_read_lock(raw_rwlock_t *rw)
13004 {
13005 asm volatile(LOCK_PREFIX " subl $1,(%0)\n\t"
13006+
13007+#ifdef CONFIG_PAX_REFCOUNT
13008+ "jno 0f\n"
13009+ LOCK_PREFIX " addl $1,(%0)\n"
13010+ "int $4\n0:\n"
13011+ _ASM_EXTABLE(0b, 0b)
13012+#endif
13013+
13014 "jns 1f\n"
13015 "call __read_lock_failed\n\t"
13016 "1:\n"
13017@@ -258,6 +266,14 @@ static inline void __raw_read_lock(raw_rwlock_t *rw)
13018 static inline void __raw_write_lock(raw_rwlock_t *rw)
13019 {
13020 asm volatile(LOCK_PREFIX " subl %1,(%0)\n\t"
13021+
13022+#ifdef CONFIG_PAX_REFCOUNT
13023+ "jno 0f\n"
13024+ LOCK_PREFIX " addl %1,(%0)\n"
13025+ "int $4\n0:\n"
13026+ _ASM_EXTABLE(0b, 0b)
13027+#endif
13028+
13029 "jz 1f\n"
13030 "call __write_lock_failed\n\t"
13031 "1:\n"
13032@@ -286,12 +302,29 @@ static inline int __raw_write_trylock(raw_rwlock_t *lock)
13033
13034 static inline void __raw_read_unlock(raw_rwlock_t *rw)
13035 {
13036- asm volatile(LOCK_PREFIX "incl %0" :"+m" (rw->lock) : : "memory");
13037+ asm volatile(LOCK_PREFIX "incl %0\n"
13038+
13039+#ifdef CONFIG_PAX_REFCOUNT
13040+ "jno 0f\n"
13041+ LOCK_PREFIX "decl %0\n"
13042+ "int $4\n0:\n"
13043+ _ASM_EXTABLE(0b, 0b)
13044+#endif
13045+
13046+ :"+m" (rw->lock) : : "memory");
13047 }
13048
13049 static inline void __raw_write_unlock(raw_rwlock_t *rw)
13050 {
13051- asm volatile(LOCK_PREFIX "addl %1, %0"
13052+ asm volatile(LOCK_PREFIX "addl %1, %0\n"
13053+
13054+#ifdef CONFIG_PAX_REFCOUNT
13055+ "jno 0f\n"
13056+ LOCK_PREFIX "subl %1, %0\n"
13057+ "int $4\n0:\n"
13058+ _ASM_EXTABLE(0b, 0b)
13059+#endif
13060+
13061 : "+m" (rw->lock) : "i" (RW_LOCK_BIAS) : "memory");
13062 }
13063
13064diff --git a/arch/x86/include/asm/stackprotector.h b/arch/x86/include/asm/stackprotector.h
13065index 1575177..cb23f52 100644
13066--- a/arch/x86/include/asm/stackprotector.h
13067+++ b/arch/x86/include/asm/stackprotector.h
13068@@ -48,7 +48,7 @@
13069 * head_32 for boot CPU and setup_per_cpu_areas() for others.
13070 */
13071 #define GDT_STACK_CANARY_INIT \
13072- [GDT_ENTRY_STACK_CANARY] = GDT_ENTRY_INIT(0x4090, 0, 0x18),
13073+ [GDT_ENTRY_STACK_CANARY] = GDT_ENTRY_INIT(0x4090, 0, 0x17),
13074
13075 /*
13076 * Initialize the stackprotector canary value.
13077@@ -113,7 +113,7 @@ static inline void setup_stack_canary_segment(int cpu)
13078
13079 static inline void load_stack_canary_segment(void)
13080 {
13081-#ifdef CONFIG_X86_32
13082+#if defined(CONFIG_X86_32) && !defined(CONFIG_PAX_MEMORY_UDEREF)
13083 asm volatile ("mov %0, %%gs" : : "r" (0));
13084 #endif
13085 }
13086diff --git a/arch/x86/include/asm/syscalls.h b/arch/x86/include/asm/syscalls.h
13087index 1bb6e39..234246f 100644
13088--- a/arch/x86/include/asm/syscalls.h
13089+++ b/arch/x86/include/asm/syscalls.h
13090@@ -24,7 +24,7 @@ int sys_fork(struct pt_regs *);
13091 int sys_vfork(struct pt_regs *);
13092
13093 /* kernel/ldt.c */
13094-asmlinkage int sys_modify_ldt(int, void __user *, unsigned long);
13095+asmlinkage int sys_modify_ldt(int, void __user *, unsigned long) __size_overflow(3);
13096
13097 /* kernel/signal.c */
13098 long sys_rt_sigreturn(struct pt_regs *);
13099diff --git a/arch/x86/include/asm/system.h b/arch/x86/include/asm/system.h
13100index e0fbf29..858ef4a 100644
13101--- a/arch/x86/include/asm/system.h
13102+++ b/arch/x86/include/asm/system.h
13103@@ -132,7 +132,7 @@ do { \
13104 "thread_return:\n\t" \
13105 "movq "__percpu_arg([current_task])",%%rsi\n\t" \
13106 __switch_canary \
13107- "movq %P[thread_info](%%rsi),%%r8\n\t" \
13108+ "movq "__percpu_arg([thread_info])",%%r8\n\t" \
13109 "movq %%rax,%%rdi\n\t" \
13110 "testl %[_tif_fork],%P[ti_flags](%%r8)\n\t" \
13111 "jnz ret_from_fork\n\t" \
13112@@ -143,7 +143,7 @@ do { \
13113 [threadrsp] "i" (offsetof(struct task_struct, thread.sp)), \
13114 [ti_flags] "i" (offsetof(struct thread_info, flags)), \
13115 [_tif_fork] "i" (_TIF_FORK), \
13116- [thread_info] "i" (offsetof(struct task_struct, stack)), \
13117+ [thread_info] "m" (per_cpu_var(current_tinfo)), \
13118 [current_task] "m" (per_cpu_var(current_task)) \
13119 __switch_canary_iparam \
13120 : "memory", "cc" __EXTRA_CLOBBER)
13121@@ -200,7 +200,7 @@ static inline unsigned long get_limit(unsigned long segment)
13122 {
13123 unsigned long __limit;
13124 asm("lsll %1,%0" : "=r" (__limit) : "r" (segment));
13125- return __limit + 1;
13126+ return __limit;
13127 }
13128
13129 static inline void native_clts(void)
13130@@ -340,12 +340,12 @@ void enable_hlt(void);
13131
13132 void cpu_idle_wait(void);
13133
13134-extern unsigned long arch_align_stack(unsigned long sp);
13135+#define arch_align_stack(x) ((x) & ~0xfUL)
13136 extern void free_init_pages(char *what, unsigned long begin, unsigned long end);
13137
13138 void default_idle(void);
13139
13140-void stop_this_cpu(void *dummy);
13141+void stop_this_cpu(void *dummy) __noreturn;
13142
13143 /*
13144 * Force strict CPU ordering.
13145diff --git a/arch/x86/include/asm/thread_info.h b/arch/x86/include/asm/thread_info.h
13146index 19c3ce4..8962535 100644
13147--- a/arch/x86/include/asm/thread_info.h
13148+++ b/arch/x86/include/asm/thread_info.h
13149@@ -10,6 +10,7 @@
13150 #include <linux/compiler.h>
13151 #include <asm/page.h>
13152 #include <asm/types.h>
13153+#include <asm/percpu.h>
13154
13155 /*
13156 * low level task data that entry.S needs immediate access to
13157@@ -24,7 +25,6 @@ struct exec_domain;
13158 #include <asm/atomic.h>
13159
13160 struct thread_info {
13161- struct task_struct *task; /* main task structure */
13162 struct exec_domain *exec_domain; /* execution domain */
13163 __u32 flags; /* low level flags */
13164 __u32 status; /* thread synchronous flags */
13165@@ -34,18 +34,12 @@ struct thread_info {
13166 mm_segment_t addr_limit;
13167 struct restart_block restart_block;
13168 void __user *sysenter_return;
13169-#ifdef CONFIG_X86_32
13170- unsigned long previous_esp; /* ESP of the previous stack in
13171- case of nested (IRQ) stacks
13172- */
13173- __u8 supervisor_stack[0];
13174-#endif
13175+ unsigned long lowest_stack;
13176 int uaccess_err;
13177 };
13178
13179-#define INIT_THREAD_INFO(tsk) \
13180+#define INIT_THREAD_INFO \
13181 { \
13182- .task = &tsk, \
13183 .exec_domain = &default_exec_domain, \
13184 .flags = 0, \
13185 .cpu = 0, \
13186@@ -56,7 +50,7 @@ struct thread_info {
13187 }, \
13188 }
13189
13190-#define init_thread_info (init_thread_union.thread_info)
13191+#define init_thread_info (init_thread_union.stack)
13192 #define init_stack (init_thread_union.stack)
13193
13194 #else /* !__ASSEMBLY__ */
13195@@ -163,45 +157,40 @@ struct thread_info {
13196 #define alloc_thread_info(tsk) \
13197 ((struct thread_info *)__get_free_pages(THREAD_FLAGS, THREAD_ORDER))
13198
13199-#ifdef CONFIG_X86_32
13200-
13201-#define STACK_WARN (THREAD_SIZE/8)
13202-/*
13203- * macros/functions for gaining access to the thread information structure
13204- *
13205- * preempt_count needs to be 1 initially, until the scheduler is functional.
13206- */
13207-#ifndef __ASSEMBLY__
13208-
13209-
13210-/* how to get the current stack pointer from C */
13211-register unsigned long current_stack_pointer asm("esp") __used;
13212-
13213-/* how to get the thread information struct from C */
13214-static inline struct thread_info *current_thread_info(void)
13215-{
13216- return (struct thread_info *)
13217- (current_stack_pointer & ~(THREAD_SIZE - 1));
13218-}
13219-
13220-#else /* !__ASSEMBLY__ */
13221-
13222+#ifdef __ASSEMBLY__
13223 /* how to get the thread information struct from ASM */
13224 #define GET_THREAD_INFO(reg) \
13225- movl $-THREAD_SIZE, reg; \
13226- andl %esp, reg
13227+ mov PER_CPU_VAR(current_tinfo), reg
13228
13229 /* use this one if reg already contains %esp */
13230-#define GET_THREAD_INFO_WITH_ESP(reg) \
13231- andl $-THREAD_SIZE, reg
13232+#define GET_THREAD_INFO_WITH_ESP(reg) GET_THREAD_INFO(reg)
13233+#else
13234+/* how to get the thread information struct from C */
13235+DECLARE_PER_CPU(struct thread_info *, current_tinfo);
13236+
13237+static __always_inline struct thread_info *current_thread_info(void)
13238+{
13239+ return percpu_read_stable(current_tinfo);
13240+}
13241+#endif
13242+
13243+#ifdef CONFIG_X86_32
13244+
13245+#define STACK_WARN (THREAD_SIZE/8)
13246+/*
13247+ * macros/functions for gaining access to the thread information structure
13248+ *
13249+ * preempt_count needs to be 1 initially, until the scheduler is functional.
13250+ */
13251+#ifndef __ASSEMBLY__
13252+
13253+/* how to get the current stack pointer from C */
13254+register unsigned long current_stack_pointer asm("esp") __used;
13255
13256 #endif
13257
13258 #else /* X86_32 */
13259
13260-#include <asm/percpu.h>
13261-#define KERNEL_STACK_OFFSET (5*8)
13262-
13263 /*
13264 * macros/functions for gaining access to the thread information structure
13265 * preempt_count needs to be 1 initially, until the scheduler is functional.
13266@@ -209,21 +198,8 @@ static inline struct thread_info *current_thread_info(void)
13267 #ifndef __ASSEMBLY__
13268 DECLARE_PER_CPU(unsigned long, kernel_stack);
13269
13270-static inline struct thread_info *current_thread_info(void)
13271-{
13272- struct thread_info *ti;
13273- ti = (void *)(percpu_read_stable(kernel_stack) +
13274- KERNEL_STACK_OFFSET - THREAD_SIZE);
13275- return ti;
13276-}
13277-
13278-#else /* !__ASSEMBLY__ */
13279-
13280-/* how to get the thread information struct from ASM */
13281-#define GET_THREAD_INFO(reg) \
13282- movq PER_CPU_VAR(kernel_stack),reg ; \
13283- subq $(THREAD_SIZE-KERNEL_STACK_OFFSET),reg
13284-
13285+/* how to get the current stack pointer from C */
13286+register unsigned long current_stack_pointer asm("rsp") __used;
13287 #endif
13288
13289 #endif /* !X86_32 */
13290@@ -260,5 +236,16 @@ extern void arch_task_cache_init(void);
13291 extern void free_thread_info(struct thread_info *ti);
13292 extern int arch_dup_task_struct(struct task_struct *dst, struct task_struct *src);
13293 #define arch_task_cache_init arch_task_cache_init
13294+
13295+#define __HAVE_THREAD_FUNCTIONS
13296+#define task_thread_info(task) (&(task)->tinfo)
13297+#define task_stack_page(task) ((task)->stack)
13298+#define setup_thread_stack(p, org) do {} while (0)
13299+#define end_of_stack(p) ((unsigned long *)task_stack_page(p) + 1)
13300+
13301+#define __HAVE_ARCH_TASK_STRUCT_ALLOCATOR
13302+extern struct task_struct *alloc_task_struct(void);
13303+extern void free_task_struct(struct task_struct *);
13304+
13305 #endif
13306 #endif /* _ASM_X86_THREAD_INFO_H */
13307diff --git a/arch/x86/include/asm/uaccess.h b/arch/x86/include/asm/uaccess.h
13308index 61c5874..8a046e9 100644
13309--- a/arch/x86/include/asm/uaccess.h
13310+++ b/arch/x86/include/asm/uaccess.h
13311@@ -8,12 +8,15 @@
13312 #include <linux/thread_info.h>
13313 #include <linux/prefetch.h>
13314 #include <linux/string.h>
13315+#include <linux/sched.h>
13316 #include <asm/asm.h>
13317 #include <asm/page.h>
13318
13319 #define VERIFY_READ 0
13320 #define VERIFY_WRITE 1
13321
13322+extern void check_object_size(const void *ptr, unsigned long n, bool to);
13323+
13324 /*
13325 * The fs value determines whether argument validity checking should be
13326 * performed or not. If get_fs() == USER_DS, checking is performed, with
13327@@ -29,7 +32,12 @@
13328
13329 #define get_ds() (KERNEL_DS)
13330 #define get_fs() (current_thread_info()->addr_limit)
13331+#if defined(CONFIG_X86_32) && defined(CONFIG_PAX_MEMORY_UDEREF)
13332+void __set_fs(mm_segment_t x);
13333+void set_fs(mm_segment_t x);
13334+#else
13335 #define set_fs(x) (current_thread_info()->addr_limit = (x))
13336+#endif
13337
13338 #define segment_eq(a, b) ((a).seg == (b).seg)
13339
13340@@ -77,7 +85,33 @@
13341 * checks that the pointer is in the user space range - after calling
13342 * this function, memory access functions may still return -EFAULT.
13343 */
13344-#define access_ok(type, addr, size) (likely(__range_not_ok(addr, size) == 0))
13345+#define __access_ok(type, addr, size) (likely(__range_not_ok(addr, size) == 0))
13346+#define access_ok(type, addr, size) \
13347+({ \
13348+ long __size = size; \
13349+ unsigned long __addr = (unsigned long)addr; \
13350+ unsigned long __addr_ao = __addr & PAGE_MASK; \
13351+ unsigned long __end_ao = __addr + __size - 1; \
13352+ bool __ret_ao = __range_not_ok(__addr, __size) == 0; \
13353+ if (__ret_ao && unlikely((__end_ao ^ __addr_ao) & PAGE_MASK)) { \
13354+ while(__addr_ao <= __end_ao) { \
13355+ char __c_ao; \
13356+ __addr_ao += PAGE_SIZE; \
13357+ if (__size > PAGE_SIZE) \
13358+ cond_resched(); \
13359+ if (__get_user(__c_ao, (char __user *)__addr)) \
13360+ break; \
13361+ if (type != VERIFY_WRITE) { \
13362+ __addr = __addr_ao; \
13363+ continue; \
13364+ } \
13365+ if (__put_user(__c_ao, (char __user *)__addr)) \
13366+ break; \
13367+ __addr = __addr_ao; \
13368+ } \
13369+ } \
13370+ __ret_ao; \
13371+})
13372
13373 /*
13374 * The exception table consists of pairs of addresses: the first is the
13375@@ -183,12 +217,20 @@ extern int __get_user_bad(void);
13376 asm volatile("call __put_user_" #size : "=a" (__ret_pu) \
13377 : "0" ((typeof(*(ptr)))(x)), "c" (ptr) : "ebx")
13378
13379-
13380+#if defined(CONFIG_X86_32) && defined(CONFIG_PAX_MEMORY_UDEREF)
13381+#define __copyuser_seg "gs;"
13382+#define __COPYUSER_SET_ES "pushl %%gs; popl %%es\n"
13383+#define __COPYUSER_RESTORE_ES "pushl %%ss; popl %%es\n"
13384+#else
13385+#define __copyuser_seg
13386+#define __COPYUSER_SET_ES
13387+#define __COPYUSER_RESTORE_ES
13388+#endif
13389
13390 #ifdef CONFIG_X86_32
13391 #define __put_user_asm_u64(x, addr, err, errret) \
13392- asm volatile("1: movl %%eax,0(%2)\n" \
13393- "2: movl %%edx,4(%2)\n" \
13394+ asm volatile("1: "__copyuser_seg"movl %%eax,0(%2)\n" \
13395+ "2: "__copyuser_seg"movl %%edx,4(%2)\n" \
13396 "3:\n" \
13397 ".section .fixup,\"ax\"\n" \
13398 "4: movl %3,%0\n" \
13399@@ -200,8 +242,8 @@ extern int __get_user_bad(void);
13400 : "A" (x), "r" (addr), "i" (errret), "0" (err))
13401
13402 #define __put_user_asm_ex_u64(x, addr) \
13403- asm volatile("1: movl %%eax,0(%1)\n" \
13404- "2: movl %%edx,4(%1)\n" \
13405+ asm volatile("1: "__copyuser_seg"movl %%eax,0(%1)\n" \
13406+ "2: "__copyuser_seg"movl %%edx,4(%1)\n" \
13407 "3:\n" \
13408 _ASM_EXTABLE(1b, 2b - 1b) \
13409 _ASM_EXTABLE(2b, 3b - 2b) \
13410@@ -253,7 +295,7 @@ extern void __put_user_8(void);
13411 __typeof__(*(ptr)) __pu_val; \
13412 __chk_user_ptr(ptr); \
13413 might_fault(); \
13414- __pu_val = x; \
13415+ __pu_val = (x); \
13416 switch (sizeof(*(ptr))) { \
13417 case 1: \
13418 __put_user_x(1, __pu_val, ptr, __ret_pu); \
13419@@ -374,7 +416,7 @@ do { \
13420 } while (0)
13421
13422 #define __get_user_asm(x, addr, err, itype, rtype, ltype, errret) \
13423- asm volatile("1: mov"itype" %2,%"rtype"1\n" \
13424+ asm volatile("1: "__copyuser_seg"mov"itype" %2,%"rtype"1\n"\
13425 "2:\n" \
13426 ".section .fixup,\"ax\"\n" \
13427 "3: mov %3,%0\n" \
13428@@ -382,7 +424,7 @@ do { \
13429 " jmp 2b\n" \
13430 ".previous\n" \
13431 _ASM_EXTABLE(1b, 3b) \
13432- : "=r" (err), ltype(x) \
13433+ : "=r" (err), ltype (x) \
13434 : "m" (__m(addr)), "i" (errret), "0" (err))
13435
13436 #define __get_user_size_ex(x, ptr, size) \
13437@@ -407,7 +449,7 @@ do { \
13438 } while (0)
13439
13440 #define __get_user_asm_ex(x, addr, itype, rtype, ltype) \
13441- asm volatile("1: mov"itype" %1,%"rtype"0\n" \
13442+ asm volatile("1: "__copyuser_seg"mov"itype" %1,%"rtype"0\n"\
13443 "2:\n" \
13444 _ASM_EXTABLE(1b, 2b - 1b) \
13445 : ltype(x) : "m" (__m(addr)))
13446@@ -424,13 +466,24 @@ do { \
13447 int __gu_err; \
13448 unsigned long __gu_val; \
13449 __get_user_size(__gu_val, (ptr), (size), __gu_err, -EFAULT); \
13450- (x) = (__force __typeof__(*(ptr)))__gu_val; \
13451+ (x) = (__typeof__(*(ptr)))__gu_val; \
13452 __gu_err; \
13453 })
13454
13455 /* FIXME: this hack is definitely wrong -AK */
13456 struct __large_struct { unsigned long buf[100]; };
13457-#define __m(x) (*(struct __large_struct __user *)(x))
13458+#if defined(CONFIG_X86_64) && defined(CONFIG_PAX_MEMORY_UDEREF)
13459+#define ____m(x) \
13460+({ \
13461+ unsigned long ____x = (unsigned long)(x); \
13462+ if (____x < PAX_USER_SHADOW_BASE) \
13463+ ____x += PAX_USER_SHADOW_BASE; \
13464+ (void __user *)____x; \
13465+})
13466+#else
13467+#define ____m(x) (x)
13468+#endif
13469+#define __m(x) (*(struct __large_struct __user *)____m(x))
13470
13471 /*
13472 * Tell gcc we read from memory instead of writing: this is because
13473@@ -438,7 +491,7 @@ struct __large_struct { unsigned long buf[100]; };
13474 * aliasing issues.
13475 */
13476 #define __put_user_asm(x, addr, err, itype, rtype, ltype, errret) \
13477- asm volatile("1: mov"itype" %"rtype"1,%2\n" \
13478+ asm volatile("1: "__copyuser_seg"mov"itype" %"rtype"1,%2\n"\
13479 "2:\n" \
13480 ".section .fixup,\"ax\"\n" \
13481 "3: mov %3,%0\n" \
13482@@ -446,10 +499,10 @@ struct __large_struct { unsigned long buf[100]; };
13483 ".previous\n" \
13484 _ASM_EXTABLE(1b, 3b) \
13485 : "=r"(err) \
13486- : ltype(x), "m" (__m(addr)), "i" (errret), "0" (err))
13487+ : ltype (x), "m" (__m(addr)), "i" (errret), "0" (err))
13488
13489 #define __put_user_asm_ex(x, addr, itype, rtype, ltype) \
13490- asm volatile("1: mov"itype" %"rtype"0,%1\n" \
13491+ asm volatile("1: "__copyuser_seg"mov"itype" %"rtype"0,%1\n"\
13492 "2:\n" \
13493 _ASM_EXTABLE(1b, 2b - 1b) \
13494 : : ltype(x), "m" (__m(addr)))
13495@@ -488,8 +541,12 @@ struct __large_struct { unsigned long buf[100]; };
13496 * On error, the variable @x is set to zero.
13497 */
13498
13499+#if defined(CONFIG_X86_64) && defined(CONFIG_PAX_MEMORY_UDEREF)
13500+#define __get_user(x, ptr) get_user((x), (ptr))
13501+#else
13502 #define __get_user(x, ptr) \
13503 __get_user_nocheck((x), (ptr), sizeof(*(ptr)))
13504+#endif
13505
13506 /**
13507 * __put_user: - Write a simple value into user space, with less checking.
13508@@ -511,8 +568,12 @@ struct __large_struct { unsigned long buf[100]; };
13509 * Returns zero on success, or -EFAULT on error.
13510 */
13511
13512+#if defined(CONFIG_X86_64) && defined(CONFIG_PAX_MEMORY_UDEREF)
13513+#define __put_user(x, ptr) put_user((x), (ptr))
13514+#else
13515 #define __put_user(x, ptr) \
13516 __put_user_nocheck((__typeof__(*(ptr)))(x), (ptr), sizeof(*(ptr)))
13517+#endif
13518
13519 #define __get_user_unaligned __get_user
13520 #define __put_user_unaligned __put_user
13521@@ -530,7 +591,7 @@ struct __large_struct { unsigned long buf[100]; };
13522 #define get_user_ex(x, ptr) do { \
13523 unsigned long __gue_val; \
13524 __get_user_size_ex((__gue_val), (ptr), (sizeof(*(ptr)))); \
13525- (x) = (__force __typeof__(*(ptr)))__gue_val; \
13526+ (x) = (__typeof__(*(ptr)))__gue_val; \
13527 } while (0)
13528
13529 #ifdef CONFIG_X86_WP_WORKS_OK
13530@@ -567,6 +628,7 @@ extern struct movsl_mask {
13531
13532 #define ARCH_HAS_NOCACHE_UACCESS 1
13533
13534+#define ARCH_HAS_SORT_EXTABLE
13535 #ifdef CONFIG_X86_32
13536 # include "uaccess_32.h"
13537 #else
13538diff --git a/arch/x86/include/asm/uaccess_32.h b/arch/x86/include/asm/uaccess_32.h
13539index 632fb44..12702d6 100644
13540--- a/arch/x86/include/asm/uaccess_32.h
13541+++ b/arch/x86/include/asm/uaccess_32.h
13542@@ -12,15 +12,15 @@
13543 #include <asm/page.h>
13544
13545 unsigned long __must_check __copy_to_user_ll
13546- (void __user *to, const void *from, unsigned long n);
13547+ (void __user *to, const void *from, unsigned long n) __size_overflow(3);
13548 unsigned long __must_check __copy_from_user_ll
13549- (void *to, const void __user *from, unsigned long n);
13550+ (void *to, const void __user *from, unsigned long n) __size_overflow(3);
13551 unsigned long __must_check __copy_from_user_ll_nozero
13552- (void *to, const void __user *from, unsigned long n);
13553+ (void *to, const void __user *from, unsigned long n) __size_overflow(3);
13554 unsigned long __must_check __copy_from_user_ll_nocache
13555- (void *to, const void __user *from, unsigned long n);
13556+ (void *to, const void __user *from, unsigned long n) __size_overflow(3);
13557 unsigned long __must_check __copy_from_user_ll_nocache_nozero
13558- (void *to, const void __user *from, unsigned long n);
13559+ (void *to, const void __user *from, unsigned long n) __size_overflow(3);
13560
13561 /**
13562 * __copy_to_user_inatomic: - Copy a block of data into user space, with less checking.
13563@@ -42,8 +42,15 @@ unsigned long __must_check __copy_from_user_ll_nocache_nozero
13564 */
13565
13566 static __always_inline unsigned long __must_check
13567+__copy_to_user_inatomic(void __user *to, const void *from, unsigned long n) __size_overflow(3);
13568+static __always_inline unsigned long __must_check
13569 __copy_to_user_inatomic(void __user *to, const void *from, unsigned long n)
13570 {
13571+ pax_track_stack();
13572+
13573+ if ((long)n < 0)
13574+ return n;
13575+
13576 if (__builtin_constant_p(n)) {
13577 unsigned long ret;
13578
13579@@ -62,6 +69,8 @@ __copy_to_user_inatomic(void __user *to, const void *from, unsigned long n)
13580 return ret;
13581 }
13582 }
13583+ if (!__builtin_constant_p(n))
13584+ check_object_size(from, n, true);
13585 return __copy_to_user_ll(to, from, n);
13586 }
13587
13588@@ -80,15 +89,23 @@ __copy_to_user_inatomic(void __user *to, const void *from, unsigned long n)
13589 * On success, this will be zero.
13590 */
13591 static __always_inline unsigned long __must_check
13592+__copy_to_user(void __user *to, const void *from, unsigned long n) __size_overflow(3);
13593+static __always_inline unsigned long __must_check
13594 __copy_to_user(void __user *to, const void *from, unsigned long n)
13595 {
13596 might_fault();
13597+
13598 return __copy_to_user_inatomic(to, from, n);
13599 }
13600
13601 static __always_inline unsigned long
13602+__copy_from_user_inatomic(void *to, const void __user *from, unsigned long n) __size_overflow(3);
13603+static __always_inline unsigned long
13604 __copy_from_user_inatomic(void *to, const void __user *from, unsigned long n)
13605 {
13606+ if ((long)n < 0)
13607+ return n;
13608+
13609 /* Avoid zeroing the tail if the copy fails..
13610 * If 'n' is constant and 1, 2, or 4, we do still zero on a failure,
13611 * but as the zeroing behaviour is only significant when n is not
13612@@ -135,9 +152,17 @@ __copy_from_user_inatomic(void *to, const void __user *from, unsigned long n)
13613 * for explanation of why this is needed.
13614 */
13615 static __always_inline unsigned long
13616+__copy_from_user(void *to, const void __user *from, unsigned long n) __size_overflow(3);
13617+static __always_inline unsigned long
13618 __copy_from_user(void *to, const void __user *from, unsigned long n)
13619 {
13620 might_fault();
13621+
13622+ pax_track_stack();
13623+
13624+ if ((long)n < 0)
13625+ return n;
13626+
13627 if (__builtin_constant_p(n)) {
13628 unsigned long ret;
13629
13630@@ -153,13 +178,21 @@ __copy_from_user(void *to, const void __user *from, unsigned long n)
13631 return ret;
13632 }
13633 }
13634+ if (!__builtin_constant_p(n))
13635+ check_object_size(to, n, false);
13636 return __copy_from_user_ll(to, from, n);
13637 }
13638
13639 static __always_inline unsigned long __copy_from_user_nocache(void *to,
13640+ const void __user *from, unsigned long n) __size_overflow(3);
13641+static __always_inline unsigned long __copy_from_user_nocache(void *to,
13642 const void __user *from, unsigned long n)
13643 {
13644 might_fault();
13645+
13646+ if ((long)n < 0)
13647+ return n;
13648+
13649 if (__builtin_constant_p(n)) {
13650 unsigned long ret;
13651
13652@@ -180,16 +213,71 @@ static __always_inline unsigned long __copy_from_user_nocache(void *to,
13653
13654 static __always_inline unsigned long
13655 __copy_from_user_inatomic_nocache(void *to, const void __user *from,
13656+ unsigned long n) __size_overflow(3);
13657+static __always_inline unsigned long
13658+__copy_from_user_inatomic_nocache(void *to, const void __user *from,
13659 unsigned long n)
13660 {
13661- return __copy_from_user_ll_nocache_nozero(to, from, n);
13662+ if ((long)n < 0)
13663+ return n;
13664+
13665+ return __copy_from_user_ll_nocache_nozero(to, from, n);
13666+}
13667+
13668+/**
13669+ * copy_to_user: - Copy a block of data into user space.
13670+ * @to: Destination address, in user space.
13671+ * @from: Source address, in kernel space.
13672+ * @n: Number of bytes to copy.
13673+ *
13674+ * Context: User context only. This function may sleep.
13675+ *
13676+ * Copy data from kernel space to user space.
13677+ *
13678+ * Returns number of bytes that could not be copied.
13679+ * On success, this will be zero.
13680+ */
13681+static __always_inline unsigned long __must_check
13682+copy_to_user(void __user *to, const void *from, unsigned long n) __size_overflow(3);
13683+static __always_inline unsigned long __must_check
13684+copy_to_user(void __user *to, const void *from, unsigned long n)
13685+{
13686+ if (access_ok(VERIFY_WRITE, to, n))
13687+ n = __copy_to_user(to, from, n);
13688+ return n;
13689+}
13690+
13691+/**
13692+ * copy_from_user: - Copy a block of data from user space.
13693+ * @to: Destination address, in kernel space.
13694+ * @from: Source address, in user space.
13695+ * @n: Number of bytes to copy.
13696+ *
13697+ * Context: User context only. This function may sleep.
13698+ *
13699+ * Copy data from user space to kernel space.
13700+ *
13701+ * Returns number of bytes that could not be copied.
13702+ * On success, this will be zero.
13703+ *
13704+ * If some data could not be copied, this function will pad the copied
13705+ * data to the requested size using zero bytes.
13706+ */
13707+static __always_inline unsigned long __must_check
13708+copy_from_user(void *to, const void __user *from, unsigned long n) __size_overflow(3);
13709+static __always_inline unsigned long __must_check
13710+copy_from_user(void *to, const void __user *from, unsigned long n)
13711+{
13712+ if (access_ok(VERIFY_READ, from, n))
13713+ n = __copy_from_user(to, from, n);
13714+ else if ((long)n > 0) {
13715+ if (!__builtin_constant_p(n))
13716+ check_object_size(to, n, false);
13717+ memset(to, 0, n);
13718+ }
13719+ return n;
13720 }
13721
13722-unsigned long __must_check copy_to_user(void __user *to,
13723- const void *from, unsigned long n);
13724-unsigned long __must_check copy_from_user(void *to,
13725- const void __user *from,
13726- unsigned long n);
13727 long __must_check strncpy_from_user(char *dst, const char __user *src,
13728 long count);
13729 long __must_check __strncpy_from_user(char *dst,
13730@@ -212,7 +300,7 @@ long __must_check __strncpy_from_user(char *dst,
13731 #define strlen_user(str) strnlen_user(str, LONG_MAX)
13732
13733 long strnlen_user(const char __user *str, long n);
13734-unsigned long __must_check clear_user(void __user *mem, unsigned long len);
13735-unsigned long __must_check __clear_user(void __user *mem, unsigned long len);
13736+unsigned long __must_check clear_user(void __user *mem, unsigned long len) __size_overflow(2);
13737+unsigned long __must_check __clear_user(void __user *mem, unsigned long len) __size_overflow(2);
13738
13739 #endif /* _ASM_X86_UACCESS_32_H */
13740diff --git a/arch/x86/include/asm/uaccess_64.h b/arch/x86/include/asm/uaccess_64.h
13741index db24b21..618b613 100644
13742--- a/arch/x86/include/asm/uaccess_64.h
13743+++ b/arch/x86/include/asm/uaccess_64.h
13744@@ -9,6 +9,9 @@
13745 #include <linux/prefetch.h>
13746 #include <linux/lockdep.h>
13747 #include <asm/page.h>
13748+#include <asm/pgtable.h>
13749+
13750+#define set_fs(x) (current_thread_info()->addr_limit = (x))
13751
13752 /*
13753 * Copy To/From Userspace
13754@@ -16,116 +19,215 @@
13755
13756 /* Handles exceptions in both to and from, but doesn't do access_ok */
13757 __must_check unsigned long
13758-copy_user_generic(void *to, const void *from, unsigned len);
13759+copy_user_generic(void *to, const void *from, unsigned long len) __size_overflow(3);
13760
13761 __must_check unsigned long
13762-copy_to_user(void __user *to, const void *from, unsigned len);
13763-__must_check unsigned long
13764-copy_from_user(void *to, const void __user *from, unsigned len);
13765-__must_check unsigned long
13766-copy_in_user(void __user *to, const void __user *from, unsigned len);
13767+copy_in_user(void __user *to, const void __user *from, unsigned long len) __size_overflow(3);
13768
13769 static __always_inline __must_check
13770-int __copy_from_user(void *dst, const void __user *src, unsigned size)
13771+unsigned long __copy_from_user(void *dst, const void __user *src, unsigned long size) __size_overflow(3);
13772+static __always_inline __must_check
13773+unsigned long __copy_from_user(void *dst, const void __user *src, unsigned long size)
13774 {
13775- int ret = 0;
13776+ unsigned ret = 0;
13777
13778 might_fault();
13779- if (!__builtin_constant_p(size))
13780- return copy_user_generic(dst, (__force void *)src, size);
13781+
13782+ if (size > INT_MAX)
13783+ return size;
13784+
13785+#ifdef CONFIG_PAX_MEMORY_UDEREF
13786+ if (!__access_ok(VERIFY_READ, src, size))
13787+ return size;
13788+#endif
13789+
13790+ if (!__builtin_constant_p(size)) {
13791+ check_object_size(dst, size, false);
13792+
13793+#ifdef CONFIG_PAX_MEMORY_UDEREF
13794+ if ((unsigned long)src < PAX_USER_SHADOW_BASE)
13795+ src += PAX_USER_SHADOW_BASE;
13796+#endif
13797+
13798+ return copy_user_generic(dst, (__force_kernel const void *)src, size);
13799+ }
13800 switch (size) {
13801- case 1:__get_user_asm(*(u8 *)dst, (u8 __user *)src,
13802+ case 1:__get_user_asm(*(u8 *)dst, (const u8 __user *)src,
13803 ret, "b", "b", "=q", 1);
13804 return ret;
13805- case 2:__get_user_asm(*(u16 *)dst, (u16 __user *)src,
13806+ case 2:__get_user_asm(*(u16 *)dst, (const u16 __user *)src,
13807 ret, "w", "w", "=r", 2);
13808 return ret;
13809- case 4:__get_user_asm(*(u32 *)dst, (u32 __user *)src,
13810+ case 4:__get_user_asm(*(u32 *)dst, (const u32 __user *)src,
13811 ret, "l", "k", "=r", 4);
13812 return ret;
13813- case 8:__get_user_asm(*(u64 *)dst, (u64 __user *)src,
13814+ case 8:__get_user_asm(*(u64 *)dst, (const u64 __user *)src,
13815 ret, "q", "", "=r", 8);
13816 return ret;
13817 case 10:
13818- __get_user_asm(*(u64 *)dst, (u64 __user *)src,
13819+ __get_user_asm(*(u64 *)dst, (const u64 __user *)src,
13820 ret, "q", "", "=r", 10);
13821 if (unlikely(ret))
13822 return ret;
13823 __get_user_asm(*(u16 *)(8 + (char *)dst),
13824- (u16 __user *)(8 + (char __user *)src),
13825+ (const u16 __user *)(8 + (const char __user *)src),
13826 ret, "w", "w", "=r", 2);
13827 return ret;
13828 case 16:
13829- __get_user_asm(*(u64 *)dst, (u64 __user *)src,
13830+ __get_user_asm(*(u64 *)dst, (const u64 __user *)src,
13831 ret, "q", "", "=r", 16);
13832 if (unlikely(ret))
13833 return ret;
13834 __get_user_asm(*(u64 *)(8 + (char *)dst),
13835- (u64 __user *)(8 + (char __user *)src),
13836+ (const u64 __user *)(8 + (const char __user *)src),
13837 ret, "q", "", "=r", 8);
13838 return ret;
13839 default:
13840- return copy_user_generic(dst, (__force void *)src, size);
13841+
13842+#ifdef CONFIG_PAX_MEMORY_UDEREF
13843+ if ((unsigned long)src < PAX_USER_SHADOW_BASE)
13844+ src += PAX_USER_SHADOW_BASE;
13845+#endif
13846+
13847+ return copy_user_generic(dst, (__force_kernel const void *)src, size);
13848 }
13849 }
13850
13851 static __always_inline __must_check
13852-int __copy_to_user(void __user *dst, const void *src, unsigned size)
13853+unsigned long __copy_to_user(void __user *dst, const void *src, unsigned long size) __size_overflow(3);
13854+static __always_inline __must_check
13855+unsigned long __copy_to_user(void __user *dst, const void *src, unsigned long size)
13856 {
13857- int ret = 0;
13858+ unsigned ret = 0;
13859
13860 might_fault();
13861- if (!__builtin_constant_p(size))
13862- return copy_user_generic((__force void *)dst, src, size);
13863+
13864+ pax_track_stack();
13865+
13866+ if (size > INT_MAX)
13867+ return size;
13868+
13869+#ifdef CONFIG_PAX_MEMORY_UDEREF
13870+ if (!__access_ok(VERIFY_WRITE, dst, size))
13871+ return size;
13872+#endif
13873+
13874+ if (!__builtin_constant_p(size)) {
13875+ check_object_size(src, size, true);
13876+
13877+#ifdef CONFIG_PAX_MEMORY_UDEREF
13878+ if ((unsigned long)dst < PAX_USER_SHADOW_BASE)
13879+ dst += PAX_USER_SHADOW_BASE;
13880+#endif
13881+
13882+ return copy_user_generic((__force_kernel void *)dst, src, size);
13883+ }
13884 switch (size) {
13885- case 1:__put_user_asm(*(u8 *)src, (u8 __user *)dst,
13886+ case 1:__put_user_asm(*(const u8 *)src, (u8 __user *)dst,
13887 ret, "b", "b", "iq", 1);
13888 return ret;
13889- case 2:__put_user_asm(*(u16 *)src, (u16 __user *)dst,
13890+ case 2:__put_user_asm(*(const u16 *)src, (u16 __user *)dst,
13891 ret, "w", "w", "ir", 2);
13892 return ret;
13893- case 4:__put_user_asm(*(u32 *)src, (u32 __user *)dst,
13894+ case 4:__put_user_asm(*(const u32 *)src, (u32 __user *)dst,
13895 ret, "l", "k", "ir", 4);
13896 return ret;
13897- case 8:__put_user_asm(*(u64 *)src, (u64 __user *)dst,
13898+ case 8:__put_user_asm(*(const u64 *)src, (u64 __user *)dst,
13899 ret, "q", "", "er", 8);
13900 return ret;
13901 case 10:
13902- __put_user_asm(*(u64 *)src, (u64 __user *)dst,
13903+ __put_user_asm(*(const u64 *)src, (u64 __user *)dst,
13904 ret, "q", "", "er", 10);
13905 if (unlikely(ret))
13906 return ret;
13907 asm("":::"memory");
13908- __put_user_asm(4[(u16 *)src], 4 + (u16 __user *)dst,
13909+ __put_user_asm(4[(const u16 *)src], 4 + (u16 __user *)dst,
13910 ret, "w", "w", "ir", 2);
13911 return ret;
13912 case 16:
13913- __put_user_asm(*(u64 *)src, (u64 __user *)dst,
13914+ __put_user_asm(*(const u64 *)src, (u64 __user *)dst,
13915 ret, "q", "", "er", 16);
13916 if (unlikely(ret))
13917 return ret;
13918 asm("":::"memory");
13919- __put_user_asm(1[(u64 *)src], 1 + (u64 __user *)dst,
13920+ __put_user_asm(1[(const u64 *)src], 1 + (u64 __user *)dst,
13921 ret, "q", "", "er", 8);
13922 return ret;
13923 default:
13924- return copy_user_generic((__force void *)dst, src, size);
13925+
13926+#ifdef CONFIG_PAX_MEMORY_UDEREF
13927+ if ((unsigned long)dst < PAX_USER_SHADOW_BASE)
13928+ dst += PAX_USER_SHADOW_BASE;
13929+#endif
13930+
13931+ return copy_user_generic((__force_kernel void *)dst, src, size);
13932 }
13933 }
13934
13935 static __always_inline __must_check
13936-int __copy_in_user(void __user *dst, const void __user *src, unsigned size)
13937+unsigned long copy_to_user(void __user *to, const void *from, unsigned long len) __size_overflow(3);
13938+static __always_inline __must_check
13939+unsigned long copy_to_user(void __user *to, const void *from, unsigned long len)
13940 {
13941- int ret = 0;
13942+ if (access_ok(VERIFY_WRITE, to, len))
13943+ len = __copy_to_user(to, from, len);
13944+ return len;
13945+}
13946
13947+static __always_inline __must_check
13948+unsigned long copy_from_user(void *to, const void __user *from, unsigned long len) __size_overflow(3);
13949+static __always_inline __must_check
13950+unsigned long copy_from_user(void *to, const void __user *from, unsigned long len)
13951+{
13952 might_fault();
13953- if (!__builtin_constant_p(size))
13954- return copy_user_generic((__force void *)dst,
13955- (__force void *)src, size);
13956+
13957+ if (access_ok(VERIFY_READ, from, len))
13958+ len = __copy_from_user(to, from, len);
13959+ else if (len < INT_MAX) {
13960+ if (!__builtin_constant_p(len))
13961+ check_object_size(to, len, false);
13962+ memset(to, 0, len);
13963+ }
13964+ return len;
13965+}
13966+
13967+static __always_inline __must_check
13968+unsigned long __copy_in_user(void __user *dst, const void __user *src, unsigned long size) __size_overflow(3);
13969+static __always_inline __must_check
13970+unsigned long __copy_in_user(void __user *dst, const void __user *src, unsigned long size)
13971+{
13972+ unsigned ret = 0;
13973+
13974+ might_fault();
13975+
13976+ pax_track_stack();
13977+
13978+ if (size > INT_MAX)
13979+ return size;
13980+
13981+#ifdef CONFIG_PAX_MEMORY_UDEREF
13982+ if (!__access_ok(VERIFY_READ, src, size))
13983+ return size;
13984+ if (!__access_ok(VERIFY_WRITE, dst, size))
13985+ return size;
13986+#endif
13987+
13988+ if (!__builtin_constant_p(size)) {
13989+
13990+#ifdef CONFIG_PAX_MEMORY_UDEREF
13991+ if ((unsigned long)src < PAX_USER_SHADOW_BASE)
13992+ src += PAX_USER_SHADOW_BASE;
13993+ if ((unsigned long)dst < PAX_USER_SHADOW_BASE)
13994+ dst += PAX_USER_SHADOW_BASE;
13995+#endif
13996+
13997+ return copy_user_generic((__force_kernel void *)dst,
13998+ (__force_kernel const void *)src, size);
13999+ }
14000 switch (size) {
14001 case 1: {
14002 u8 tmp;
14003- __get_user_asm(tmp, (u8 __user *)src,
14004+ __get_user_asm(tmp, (const u8 __user *)src,
14005 ret, "b", "b", "=q", 1);
14006 if (likely(!ret))
14007 __put_user_asm(tmp, (u8 __user *)dst,
14008@@ -134,7 +236,7 @@ int __copy_in_user(void __user *dst, const void __user *src, unsigned size)
14009 }
14010 case 2: {
14011 u16 tmp;
14012- __get_user_asm(tmp, (u16 __user *)src,
14013+ __get_user_asm(tmp, (const u16 __user *)src,
14014 ret, "w", "w", "=r", 2);
14015 if (likely(!ret))
14016 __put_user_asm(tmp, (u16 __user *)dst,
14017@@ -144,7 +246,7 @@ int __copy_in_user(void __user *dst, const void __user *src, unsigned size)
14018
14019 case 4: {
14020 u32 tmp;
14021- __get_user_asm(tmp, (u32 __user *)src,
14022+ __get_user_asm(tmp, (const u32 __user *)src,
14023 ret, "l", "k", "=r", 4);
14024 if (likely(!ret))
14025 __put_user_asm(tmp, (u32 __user *)dst,
14026@@ -153,7 +255,7 @@ int __copy_in_user(void __user *dst, const void __user *src, unsigned size)
14027 }
14028 case 8: {
14029 u64 tmp;
14030- __get_user_asm(tmp, (u64 __user *)src,
14031+ __get_user_asm(tmp, (const u64 __user *)src,
14032 ret, "q", "", "=r", 8);
14033 if (likely(!ret))
14034 __put_user_asm(tmp, (u64 __user *)dst,
14035@@ -161,8 +263,16 @@ int __copy_in_user(void __user *dst, const void __user *src, unsigned size)
14036 return ret;
14037 }
14038 default:
14039- return copy_user_generic((__force void *)dst,
14040- (__force void *)src, size);
14041+
14042+#ifdef CONFIG_PAX_MEMORY_UDEREF
14043+ if ((unsigned long)src < PAX_USER_SHADOW_BASE)
14044+ src += PAX_USER_SHADOW_BASE;
14045+ if ((unsigned long)dst < PAX_USER_SHADOW_BASE)
14046+ dst += PAX_USER_SHADOW_BASE;
14047+#endif
14048+
14049+ return copy_user_generic((__force_kernel void *)dst,
14050+ (__force_kernel const void *)src, size);
14051 }
14052 }
14053
14054@@ -173,36 +283,85 @@ __strncpy_from_user(char *dst, const char __user *src, long count);
14055 __must_check long strnlen_user(const char __user *str, long n);
14056 __must_check long __strnlen_user(const char __user *str, long n);
14057 __must_check long strlen_user(const char __user *str);
14058-__must_check unsigned long clear_user(void __user *mem, unsigned long len);
14059-__must_check unsigned long __clear_user(void __user *mem, unsigned long len);
14060+__must_check unsigned long clear_user(void __user *mem, unsigned long len) __size_overflow(2);
14061+__must_check unsigned long __clear_user(void __user *mem, unsigned long len) __size_overflow(2);
14062
14063-__must_check long __copy_from_user_inatomic(void *dst, const void __user *src,
14064- unsigned size);
14065+static __must_check __always_inline unsigned long
14066+__copy_from_user_inatomic(void *dst, const void __user *src, unsigned long size) __size_overflow(3);
14067+static __must_check __always_inline unsigned long
14068+__copy_from_user_inatomic(void *dst, const void __user *src, unsigned long size)
14069+{
14070+ pax_track_stack();
14071+
14072+ if (size > INT_MAX)
14073+ return size;
14074+
14075+#ifdef CONFIG_PAX_MEMORY_UDEREF
14076+ if (!__access_ok(VERIFY_READ, src, size))
14077+ return size;
14078
14079-static __must_check __always_inline int
14080-__copy_to_user_inatomic(void __user *dst, const void *src, unsigned size)
14081+ if ((unsigned long)src < PAX_USER_SHADOW_BASE)
14082+ src += PAX_USER_SHADOW_BASE;
14083+#endif
14084+
14085+ return copy_user_generic(dst, (__force_kernel const void *)src, size);
14086+}
14087+
14088+static __must_check __always_inline unsigned long
14089+__copy_to_user_inatomic(void __user *dst, const void *src, unsigned long size) __size_overflow(3);
14090+static __must_check __always_inline unsigned long
14091+__copy_to_user_inatomic(void __user *dst, const void *src, unsigned long size)
14092 {
14093- return copy_user_generic((__force void *)dst, src, size);
14094+ if (size > INT_MAX)
14095+ return size;
14096+
14097+#ifdef CONFIG_PAX_MEMORY_UDEREF
14098+ if (!__access_ok(VERIFY_WRITE, dst, size))
14099+ return size;
14100+
14101+ if ((unsigned long)dst < PAX_USER_SHADOW_BASE)
14102+ dst += PAX_USER_SHADOW_BASE;
14103+#endif
14104+
14105+ return copy_user_generic((__force_kernel void *)dst, src, size);
14106 }
14107
14108-extern long __copy_user_nocache(void *dst, const void __user *src,
14109- unsigned size, int zerorest);
14110+extern unsigned long __copy_user_nocache(void *dst, const void __user *src,
14111+ unsigned long size, int zerorest) __size_overflow(3);
14112
14113-static inline int
14114-__copy_from_user_nocache(void *dst, const void __user *src, unsigned size)
14115+static inline unsigned long __copy_from_user_nocache(void *dst, const void __user *src, unsigned long size) __size_overflow(3);
14116+static inline unsigned long __copy_from_user_nocache(void *dst, const void __user *src, unsigned long size)
14117 {
14118 might_sleep();
14119+
14120+ if (size > INT_MAX)
14121+ return size;
14122+
14123+#ifdef CONFIG_PAX_MEMORY_UDEREF
14124+ if (!__access_ok(VERIFY_READ, src, size))
14125+ return size;
14126+#endif
14127+
14128 return __copy_user_nocache(dst, src, size, 1);
14129 }
14130
14131-static inline int
14132-__copy_from_user_inatomic_nocache(void *dst, const void __user *src,
14133- unsigned size)
14134+static inline unsigned long __copy_from_user_inatomic_nocache(void *dst, const void __user *src,
14135+ unsigned long size) __size_overflow(3);
14136+static inline unsigned long __copy_from_user_inatomic_nocache(void *dst, const void __user *src,
14137+ unsigned long size)
14138 {
14139+ if (size > INT_MAX)
14140+ return size;
14141+
14142+#ifdef CONFIG_PAX_MEMORY_UDEREF
14143+ if (!__access_ok(VERIFY_READ, src, size))
14144+ return size;
14145+#endif
14146+
14147 return __copy_user_nocache(dst, src, size, 0);
14148 }
14149
14150-unsigned long
14151-copy_user_handle_tail(char *to, char *from, unsigned len, unsigned zerorest);
14152+extern unsigned long
14153+copy_user_handle_tail(char __user *to, char __user *from, unsigned long len, unsigned zerorest) __size_overflow(3);
14154
14155 #endif /* _ASM_X86_UACCESS_64_H */
14156diff --git a/arch/x86/include/asm/vdso.h b/arch/x86/include/asm/vdso.h
14157index 9064052..786cfbc 100644
14158--- a/arch/x86/include/asm/vdso.h
14159+++ b/arch/x86/include/asm/vdso.h
14160@@ -25,7 +25,7 @@ extern const char VDSO32_PRELINK[];
14161 #define VDSO32_SYMBOL(base, name) \
14162 ({ \
14163 extern const char VDSO32_##name[]; \
14164- (void *)(VDSO32_##name - VDSO32_PRELINK + (unsigned long)(base)); \
14165+ (void __user *)(VDSO32_##name - VDSO32_PRELINK + (unsigned long)(base)); \
14166 })
14167 #endif
14168
14169diff --git a/arch/x86/include/asm/vgtod.h b/arch/x86/include/asm/vgtod.h
14170index 3d61e20..9507180 100644
14171--- a/arch/x86/include/asm/vgtod.h
14172+++ b/arch/x86/include/asm/vgtod.h
14173@@ -14,6 +14,7 @@ struct vsyscall_gtod_data {
14174 int sysctl_enabled;
14175 struct timezone sys_tz;
14176 struct { /* extract of a clocksource struct */
14177+ char name[8];
14178 cycle_t (*vread)(void);
14179 cycle_t cycle_last;
14180 cycle_t mask;
14181diff --git a/arch/x86/include/asm/vmi.h b/arch/x86/include/asm/vmi.h
14182index 61e08c0..b0da582 100644
14183--- a/arch/x86/include/asm/vmi.h
14184+++ b/arch/x86/include/asm/vmi.h
14185@@ -191,6 +191,7 @@ struct vrom_header {
14186 u8 reserved[96]; /* Reserved for headers */
14187 char vmi_init[8]; /* VMI_Init jump point */
14188 char get_reloc[8]; /* VMI_GetRelocationInfo jump point */
14189+ char rom_data[8048]; /* rest of the option ROM */
14190 } __attribute__((packed));
14191
14192 struct pnp_header {
14193diff --git a/arch/x86/include/asm/vmi_time.h b/arch/x86/include/asm/vmi_time.h
14194index c6e0bee..fcb9f74 100644
14195--- a/arch/x86/include/asm/vmi_time.h
14196+++ b/arch/x86/include/asm/vmi_time.h
14197@@ -43,7 +43,7 @@ extern struct vmi_timer_ops {
14198 int (*wallclock_updated)(void);
14199 void (*set_alarm)(u32 flags, u64 expiry, u64 period);
14200 void (*cancel_alarm)(u32 flags);
14201-} vmi_timer_ops;
14202+} __no_const vmi_timer_ops;
14203
14204 /* Prototypes */
14205 extern void __init vmi_time_init(void);
14206diff --git a/arch/x86/include/asm/vsyscall.h b/arch/x86/include/asm/vsyscall.h
14207index d0983d2..1f7c9e9 100644
14208--- a/arch/x86/include/asm/vsyscall.h
14209+++ b/arch/x86/include/asm/vsyscall.h
14210@@ -15,9 +15,10 @@ enum vsyscall_num {
14211
14212 #ifdef __KERNEL__
14213 #include <linux/seqlock.h>
14214+#include <linux/getcpu.h>
14215+#include <linux/time.h>
14216
14217 #define __section_vgetcpu_mode __attribute__ ((unused, __section__ (".vgetcpu_mode"), aligned(16)))
14218-#define __section_jiffies __attribute__ ((unused, __section__ (".jiffies"), aligned(16)))
14219
14220 /* Definitions for CONFIG_GENERIC_TIME definitions */
14221 #define __section_vsyscall_gtod_data __attribute__ \
14222@@ -31,7 +32,6 @@ enum vsyscall_num {
14223 #define VGETCPU_LSL 2
14224
14225 extern int __vgetcpu_mode;
14226-extern volatile unsigned long __jiffies;
14227
14228 /* kernel space (writeable) */
14229 extern int vgetcpu_mode;
14230@@ -39,6 +39,9 @@ extern struct timezone sys_tz;
14231
14232 extern void map_vsyscall(void);
14233
14234+extern int vgettimeofday(struct timeval * tv, struct timezone * tz);
14235+extern time_t vtime(time_t *t);
14236+extern long vgetcpu(unsigned *cpu, unsigned *node, struct getcpu_cache *tcache);
14237 #endif /* __KERNEL__ */
14238
14239 #endif /* _ASM_X86_VSYSCALL_H */
14240diff --git a/arch/x86/include/asm/x86_init.h b/arch/x86/include/asm/x86_init.h
14241index 2c756fd..3377e37 100644
14242--- a/arch/x86/include/asm/x86_init.h
14243+++ b/arch/x86/include/asm/x86_init.h
14244@@ -28,7 +28,7 @@ struct x86_init_mpparse {
14245 void (*mpc_oem_bus_info)(struct mpc_bus *m, char *name);
14246 void (*find_smp_config)(unsigned int reserve);
14247 void (*get_smp_config)(unsigned int early);
14248-};
14249+} __no_const;
14250
14251 /**
14252 * struct x86_init_resources - platform specific resource related ops
14253@@ -42,7 +42,7 @@ struct x86_init_resources {
14254 void (*probe_roms)(void);
14255 void (*reserve_resources)(void);
14256 char *(*memory_setup)(void);
14257-};
14258+} __no_const;
14259
14260 /**
14261 * struct x86_init_irqs - platform specific interrupt setup
14262@@ -55,7 +55,7 @@ struct x86_init_irqs {
14263 void (*pre_vector_init)(void);
14264 void (*intr_init)(void);
14265 void (*trap_init)(void);
14266-};
14267+} __no_const;
14268
14269 /**
14270 * struct x86_init_oem - oem platform specific customizing functions
14271@@ -65,7 +65,7 @@ struct x86_init_irqs {
14272 struct x86_init_oem {
14273 void (*arch_setup)(void);
14274 void (*banner)(void);
14275-};
14276+} __no_const;
14277
14278 /**
14279 * struct x86_init_paging - platform specific paging functions
14280@@ -75,7 +75,7 @@ struct x86_init_oem {
14281 struct x86_init_paging {
14282 void (*pagetable_setup_start)(pgd_t *base);
14283 void (*pagetable_setup_done)(pgd_t *base);
14284-};
14285+} __no_const;
14286
14287 /**
14288 * struct x86_init_timers - platform specific timer setup
14289@@ -88,7 +88,7 @@ struct x86_init_timers {
14290 void (*setup_percpu_clockev)(void);
14291 void (*tsc_pre_init)(void);
14292 void (*timer_init)(void);
14293-};
14294+} __no_const;
14295
14296 /**
14297 * struct x86_init_ops - functions for platform specific setup
14298@@ -101,7 +101,7 @@ struct x86_init_ops {
14299 struct x86_init_oem oem;
14300 struct x86_init_paging paging;
14301 struct x86_init_timers timers;
14302-};
14303+} __no_const;
14304
14305 /**
14306 * struct x86_cpuinit_ops - platform specific cpu hotplug setups
14307@@ -109,7 +109,7 @@ struct x86_init_ops {
14308 */
14309 struct x86_cpuinit_ops {
14310 void (*setup_percpu_clockev)(void);
14311-};
14312+} __no_const;
14313
14314 /**
14315 * struct x86_platform_ops - platform specific runtime functions
14316@@ -121,7 +121,7 @@ struct x86_platform_ops {
14317 unsigned long (*calibrate_tsc)(void);
14318 unsigned long (*get_wallclock)(void);
14319 int (*set_wallclock)(unsigned long nowtime);
14320-};
14321+} __no_const;
14322
14323 extern struct x86_init_ops x86_init;
14324 extern struct x86_cpuinit_ops x86_cpuinit;
14325diff --git a/arch/x86/include/asm/xsave.h b/arch/x86/include/asm/xsave.h
14326index 727acc1..554f3eb 100644
14327--- a/arch/x86/include/asm/xsave.h
14328+++ b/arch/x86/include/asm/xsave.h
14329@@ -56,6 +56,12 @@ static inline int xrstor_checking(struct xsave_struct *fx)
14330 static inline int xsave_user(struct xsave_struct __user *buf)
14331 {
14332 int err;
14333+
14334+#if defined(CONFIG_X86_64) && defined(CONFIG_PAX_MEMORY_UDEREF)
14335+ if ((unsigned long)buf < PAX_USER_SHADOW_BASE)
14336+ buf = (struct xsave_struct __user *)((void __user*)buf + PAX_USER_SHADOW_BASE);
14337+#endif
14338+
14339 __asm__ __volatile__("1: .byte " REX_PREFIX "0x0f,0xae,0x27\n"
14340 "2:\n"
14341 ".section .fixup,\"ax\"\n"
14342@@ -78,10 +84,15 @@ static inline int xsave_user(struct xsave_struct __user *buf)
14343 static inline int xrestore_user(struct xsave_struct __user *buf, u64 mask)
14344 {
14345 int err;
14346- struct xsave_struct *xstate = ((__force struct xsave_struct *)buf);
14347+ struct xsave_struct *xstate = ((__force_kernel struct xsave_struct *)buf);
14348 u32 lmask = mask;
14349 u32 hmask = mask >> 32;
14350
14351+#if defined(CONFIG_X86_64) && defined(CONFIG_PAX_MEMORY_UDEREF)
14352+ if ((unsigned long)xstate < PAX_USER_SHADOW_BASE)
14353+ xstate = (struct xsave_struct *)((void *)xstate + PAX_USER_SHADOW_BASE);
14354+#endif
14355+
14356 __asm__ __volatile__("1: .byte " REX_PREFIX "0x0f,0xae,0x2f\n"
14357 "2:\n"
14358 ".section .fixup,\"ax\"\n"
14359diff --git a/arch/x86/kernel/acpi/realmode/Makefile b/arch/x86/kernel/acpi/realmode/Makefile
14360index 6a564ac..9b1340c 100644
14361--- a/arch/x86/kernel/acpi/realmode/Makefile
14362+++ b/arch/x86/kernel/acpi/realmode/Makefile
14363@@ -41,6 +41,9 @@ KBUILD_CFLAGS := $(LINUXINCLUDE) -g -Os -D_SETUP -D_WAKEUP -D__KERNEL__ \
14364 $(call cc-option, -fno-stack-protector) \
14365 $(call cc-option, -mpreferred-stack-boundary=2)
14366 KBUILD_CFLAGS += $(call cc-option, -m32)
14367+ifdef CONSTIFY_PLUGIN
14368+KBUILD_CFLAGS += $(CONSTIFY_PLUGIN) -fplugin-arg-constify_plugin-no-constify
14369+endif
14370 KBUILD_AFLAGS := $(KBUILD_CFLAGS) -D__ASSEMBLY__
14371 GCOV_PROFILE := n
14372
14373diff --git a/arch/x86/kernel/acpi/realmode/wakeup.S b/arch/x86/kernel/acpi/realmode/wakeup.S
14374index 580b4e2..d4129e4 100644
14375--- a/arch/x86/kernel/acpi/realmode/wakeup.S
14376+++ b/arch/x86/kernel/acpi/realmode/wakeup.S
14377@@ -91,6 +91,9 @@ _start:
14378 /* Do any other stuff... */
14379
14380 #ifndef CONFIG_64BIT
14381+ /* Recheck NX bit overrides (64bit path does this in trampoline) */
14382+ call verify_cpu
14383+
14384 /* This could also be done in C code... */
14385 movl pmode_cr3, %eax
14386 movl %eax, %cr3
14387@@ -104,7 +107,7 @@ _start:
14388 movl %eax, %ecx
14389 orl %edx, %ecx
14390 jz 1f
14391- movl $0xc0000080, %ecx
14392+ mov $MSR_EFER, %ecx
14393 wrmsr
14394 1:
14395
14396@@ -114,6 +117,7 @@ _start:
14397 movl pmode_cr0, %eax
14398 movl %eax, %cr0
14399 jmp pmode_return
14400+# include "../../verify_cpu.S"
14401 #else
14402 pushw $0
14403 pushw trampoline_segment
14404diff --git a/arch/x86/kernel/acpi/sleep.c b/arch/x86/kernel/acpi/sleep.c
14405index ca93638..7042f24 100644
14406--- a/arch/x86/kernel/acpi/sleep.c
14407+++ b/arch/x86/kernel/acpi/sleep.c
14408@@ -11,11 +11,12 @@
14409 #include <linux/cpumask.h>
14410 #include <asm/segment.h>
14411 #include <asm/desc.h>
14412+#include <asm/e820.h>
14413
14414 #include "realmode/wakeup.h"
14415 #include "sleep.h"
14416
14417-unsigned long acpi_wakeup_address;
14418+unsigned long acpi_wakeup_address = 0x2000;
14419 unsigned long acpi_realmode_flags;
14420
14421 /* address in low memory of the wakeup routine. */
14422@@ -98,9 +99,13 @@ int acpi_save_state_mem(void)
14423 #else /* CONFIG_64BIT */
14424 header->trampoline_segment = setup_trampoline() >> 4;
14425 #ifdef CONFIG_SMP
14426- stack_start.sp = temp_stack + sizeof(temp_stack);
14427+ stack_start = (unsigned long)temp_stack + sizeof(temp_stack);
14428+
14429+ pax_open_kernel();
14430 early_gdt_descr.address =
14431 (unsigned long)get_cpu_gdt_table(smp_processor_id());
14432+ pax_close_kernel();
14433+
14434 initial_gs = per_cpu_offset(smp_processor_id());
14435 #endif
14436 initial_code = (unsigned long)wakeup_long64;
14437@@ -134,14 +139,8 @@ void __init acpi_reserve_bootmem(void)
14438 return;
14439 }
14440
14441- acpi_realmode = (unsigned long)alloc_bootmem_low(WAKEUP_SIZE);
14442-
14443- if (!acpi_realmode) {
14444- printk(KERN_ERR "ACPI: Cannot allocate lowmem, S3 disabled.\n");
14445- return;
14446- }
14447-
14448- acpi_wakeup_address = virt_to_phys((void *)acpi_realmode);
14449+ reserve_early(acpi_wakeup_address, acpi_wakeup_address + WAKEUP_SIZE, "ACPI Wakeup Code");
14450+ acpi_realmode = (unsigned long)__va(acpi_wakeup_address);;
14451 }
14452
14453
14454diff --git a/arch/x86/kernel/acpi/wakeup_32.S b/arch/x86/kernel/acpi/wakeup_32.S
14455index 8ded418..079961e 100644
14456--- a/arch/x86/kernel/acpi/wakeup_32.S
14457+++ b/arch/x86/kernel/acpi/wakeup_32.S
14458@@ -30,13 +30,11 @@ wakeup_pmode_return:
14459 # and restore the stack ... but you need gdt for this to work
14460 movl saved_context_esp, %esp
14461
14462- movl %cs:saved_magic, %eax
14463- cmpl $0x12345678, %eax
14464+ cmpl $0x12345678, saved_magic
14465 jne bogus_magic
14466
14467 # jump to place where we left off
14468- movl saved_eip, %eax
14469- jmp *%eax
14470+ jmp *(saved_eip)
14471
14472 bogus_magic:
14473 jmp bogus_magic
14474diff --git a/arch/x86/kernel/alternative.c b/arch/x86/kernel/alternative.c
14475index de7353c..075da5f 100644
14476--- a/arch/x86/kernel/alternative.c
14477+++ b/arch/x86/kernel/alternative.c
14478@@ -407,7 +407,7 @@ void __init_or_module apply_paravirt(struct paravirt_patch_site *start,
14479
14480 BUG_ON(p->len > MAX_PATCH_LEN);
14481 /* prep the buffer with the original instructions */
14482- memcpy(insnbuf, p->instr, p->len);
14483+ memcpy(insnbuf, ktla_ktva(p->instr), p->len);
14484 used = pv_init_ops.patch(p->instrtype, p->clobbers, insnbuf,
14485 (unsigned long)p->instr, p->len);
14486
14487@@ -475,7 +475,7 @@ void __init alternative_instructions(void)
14488 if (smp_alt_once)
14489 free_init_pages("SMP alternatives",
14490 (unsigned long)__smp_locks,
14491- (unsigned long)__smp_locks_end);
14492+ PAGE_ALIGN((unsigned long)__smp_locks_end));
14493
14494 restart_nmi();
14495 }
14496@@ -492,13 +492,17 @@ void __init alternative_instructions(void)
14497 * instructions. And on the local CPU you need to be protected again NMI or MCE
14498 * handlers seeing an inconsistent instruction while you patch.
14499 */
14500-static void *__init_or_module text_poke_early(void *addr, const void *opcode,
14501+static void *__kprobes text_poke_early(void *addr, const void *opcode,
14502 size_t len)
14503 {
14504 unsigned long flags;
14505 local_irq_save(flags);
14506- memcpy(addr, opcode, len);
14507+
14508+ pax_open_kernel();
14509+ memcpy(ktla_ktva(addr), opcode, len);
14510 sync_core();
14511+ pax_close_kernel();
14512+
14513 local_irq_restore(flags);
14514 /* Could also do a CLFLUSH here to speed up CPU recovery; but
14515 that causes hangs on some VIA CPUs. */
14516@@ -520,35 +524,21 @@ static void *__init_or_module text_poke_early(void *addr, const void *opcode,
14517 */
14518 void *__kprobes text_poke(void *addr, const void *opcode, size_t len)
14519 {
14520- unsigned long flags;
14521- char *vaddr;
14522+ unsigned char *vaddr = ktla_ktva(addr);
14523 struct page *pages[2];
14524- int i;
14525+ size_t i;
14526
14527 if (!core_kernel_text((unsigned long)addr)) {
14528- pages[0] = vmalloc_to_page(addr);
14529- pages[1] = vmalloc_to_page(addr + PAGE_SIZE);
14530+ pages[0] = vmalloc_to_page(vaddr);
14531+ pages[1] = vmalloc_to_page(vaddr + PAGE_SIZE);
14532 } else {
14533- pages[0] = virt_to_page(addr);
14534+ pages[0] = virt_to_page(vaddr);
14535 WARN_ON(!PageReserved(pages[0]));
14536- pages[1] = virt_to_page(addr + PAGE_SIZE);
14537+ pages[1] = virt_to_page(vaddr + PAGE_SIZE);
14538 }
14539 BUG_ON(!pages[0]);
14540- local_irq_save(flags);
14541- set_fixmap(FIX_TEXT_POKE0, page_to_phys(pages[0]));
14542- if (pages[1])
14543- set_fixmap(FIX_TEXT_POKE1, page_to_phys(pages[1]));
14544- vaddr = (char *)fix_to_virt(FIX_TEXT_POKE0);
14545- memcpy(&vaddr[(unsigned long)addr & ~PAGE_MASK], opcode, len);
14546- clear_fixmap(FIX_TEXT_POKE0);
14547- if (pages[1])
14548- clear_fixmap(FIX_TEXT_POKE1);
14549- local_flush_tlb();
14550- sync_core();
14551- /* Could also do a CLFLUSH here to speed up CPU recovery; but
14552- that causes hangs on some VIA CPUs. */
14553+ text_poke_early(addr, opcode, len);
14554 for (i = 0; i < len; i++)
14555- BUG_ON(((char *)addr)[i] != ((char *)opcode)[i]);
14556- local_irq_restore(flags);
14557+ BUG_ON((vaddr)[i] != ((const unsigned char *)opcode)[i]);
14558 return addr;
14559 }
14560diff --git a/arch/x86/kernel/amd_iommu.c b/arch/x86/kernel/amd_iommu.c
14561index 3a44b75..1601800 100644
14562--- a/arch/x86/kernel/amd_iommu.c
14563+++ b/arch/x86/kernel/amd_iommu.c
14564@@ -2076,7 +2076,7 @@ static void prealloc_protection_domains(void)
14565 }
14566 }
14567
14568-static struct dma_map_ops amd_iommu_dma_ops = {
14569+static const struct dma_map_ops amd_iommu_dma_ops = {
14570 .alloc_coherent = alloc_coherent,
14571 .free_coherent = free_coherent,
14572 .map_page = map_page,
14573diff --git a/arch/x86/kernel/apic/apic.c b/arch/x86/kernel/apic/apic.c
14574index 1d2d670..8e3f477 100644
14575--- a/arch/x86/kernel/apic/apic.c
14576+++ b/arch/x86/kernel/apic/apic.c
14577@@ -170,7 +170,7 @@ int first_system_vector = 0xfe;
14578 /*
14579 * Debug level, exported for io_apic.c
14580 */
14581-unsigned int apic_verbosity;
14582+int apic_verbosity;
14583
14584 int pic_mode;
14585
14586@@ -1794,7 +1794,7 @@ void smp_error_interrupt(struct pt_regs *regs)
14587 apic_write(APIC_ESR, 0);
14588 v1 = apic_read(APIC_ESR);
14589 ack_APIC_irq();
14590- atomic_inc(&irq_err_count);
14591+ atomic_inc_unchecked(&irq_err_count);
14592
14593 /*
14594 * Here is what the APIC error bits mean:
14595@@ -2184,6 +2184,8 @@ static int __cpuinit apic_cluster_num(void)
14596 u16 *bios_cpu_apicid;
14597 DECLARE_BITMAP(clustermap, NUM_APIC_CLUSTERS);
14598
14599+ pax_track_stack();
14600+
14601 bios_cpu_apicid = early_per_cpu_ptr(x86_bios_cpu_apicid);
14602 bitmap_zero(clustermap, NUM_APIC_CLUSTERS);
14603
14604diff --git a/arch/x86/kernel/apic/io_apic.c b/arch/x86/kernel/apic/io_apic.c
14605index 8928d97..f799cea 100644
14606--- a/arch/x86/kernel/apic/io_apic.c
14607+++ b/arch/x86/kernel/apic/io_apic.c
14608@@ -716,7 +716,7 @@ struct IO_APIC_route_entry **alloc_ioapic_entries(void)
14609 ioapic_entries = kzalloc(sizeof(*ioapic_entries) * nr_ioapics,
14610 GFP_ATOMIC);
14611 if (!ioapic_entries)
14612- return 0;
14613+ return NULL;
14614
14615 for (apic = 0; apic < nr_ioapics; apic++) {
14616 ioapic_entries[apic] =
14617@@ -733,7 +733,7 @@ nomem:
14618 kfree(ioapic_entries[apic]);
14619 kfree(ioapic_entries);
14620
14621- return 0;
14622+ return NULL;
14623 }
14624
14625 /*
14626@@ -1150,7 +1150,7 @@ int IO_APIC_get_PCI_irq_vector(int bus, int slot, int pin,
14627 }
14628 EXPORT_SYMBOL(IO_APIC_get_PCI_irq_vector);
14629
14630-void lock_vector_lock(void)
14631+void lock_vector_lock(void) __acquires(vector_lock)
14632 {
14633 /* Used to the online set of cpus does not change
14634 * during assign_irq_vector.
14635@@ -1158,7 +1158,7 @@ void lock_vector_lock(void)
14636 spin_lock(&vector_lock);
14637 }
14638
14639-void unlock_vector_lock(void)
14640+void unlock_vector_lock(void) __releases(vector_lock)
14641 {
14642 spin_unlock(&vector_lock);
14643 }
14644@@ -2542,7 +2542,7 @@ static void ack_apic_edge(unsigned int irq)
14645 ack_APIC_irq();
14646 }
14647
14648-atomic_t irq_mis_count;
14649+atomic_unchecked_t irq_mis_count;
14650
14651 static void ack_apic_level(unsigned int irq)
14652 {
14653@@ -2626,7 +2626,7 @@ static void ack_apic_level(unsigned int irq)
14654
14655 /* Tail end of version 0x11 I/O APIC bug workaround */
14656 if (!(v & (1 << (i & 0x1f)))) {
14657- atomic_inc(&irq_mis_count);
14658+ atomic_inc_unchecked(&irq_mis_count);
14659 spin_lock(&ioapic_lock);
14660 __mask_and_edge_IO_APIC_irq(cfg);
14661 __unmask_and_level_IO_APIC_irq(cfg);
14662diff --git a/arch/x86/kernel/apm_32.c b/arch/x86/kernel/apm_32.c
14663index 151ace6..f317474 100644
14664--- a/arch/x86/kernel/apm_32.c
14665+++ b/arch/x86/kernel/apm_32.c
14666@@ -410,7 +410,7 @@ static DEFINE_SPINLOCK(user_list_lock);
14667 * This is for buggy BIOS's that refer to (real mode) segment 0x40
14668 * even though they are called in protected mode.
14669 */
14670-static struct desc_struct bad_bios_desc = GDT_ENTRY_INIT(0x4092,
14671+static const struct desc_struct bad_bios_desc = GDT_ENTRY_INIT(0x4093,
14672 (unsigned long)__va(0x400UL), PAGE_SIZE - 0x400 - 1);
14673
14674 static const char driver_version[] = "1.16ac"; /* no spaces */
14675@@ -588,7 +588,10 @@ static long __apm_bios_call(void *_call)
14676 BUG_ON(cpu != 0);
14677 gdt = get_cpu_gdt_table(cpu);
14678 save_desc_40 = gdt[0x40 / 8];
14679+
14680+ pax_open_kernel();
14681 gdt[0x40 / 8] = bad_bios_desc;
14682+ pax_close_kernel();
14683
14684 apm_irq_save(flags);
14685 APM_DO_SAVE_SEGS;
14686@@ -597,7 +600,11 @@ static long __apm_bios_call(void *_call)
14687 &call->esi);
14688 APM_DO_RESTORE_SEGS;
14689 apm_irq_restore(flags);
14690+
14691+ pax_open_kernel();
14692 gdt[0x40 / 8] = save_desc_40;
14693+ pax_close_kernel();
14694+
14695 put_cpu();
14696
14697 return call->eax & 0xff;
14698@@ -664,7 +671,10 @@ static long __apm_bios_call_simple(void *_call)
14699 BUG_ON(cpu != 0);
14700 gdt = get_cpu_gdt_table(cpu);
14701 save_desc_40 = gdt[0x40 / 8];
14702+
14703+ pax_open_kernel();
14704 gdt[0x40 / 8] = bad_bios_desc;
14705+ pax_close_kernel();
14706
14707 apm_irq_save(flags);
14708 APM_DO_SAVE_SEGS;
14709@@ -672,7 +682,11 @@ static long __apm_bios_call_simple(void *_call)
14710 &call->eax);
14711 APM_DO_RESTORE_SEGS;
14712 apm_irq_restore(flags);
14713+
14714+ pax_open_kernel();
14715 gdt[0x40 / 8] = save_desc_40;
14716+ pax_close_kernel();
14717+
14718 put_cpu();
14719 return error;
14720 }
14721@@ -975,7 +989,7 @@ recalc:
14722
14723 static void apm_power_off(void)
14724 {
14725- unsigned char po_bios_call[] = {
14726+ const unsigned char po_bios_call[] = {
14727 0xb8, 0x00, 0x10, /* movw $0x1000,ax */
14728 0x8e, 0xd0, /* movw ax,ss */
14729 0xbc, 0x00, 0xf0, /* movw $0xf000,sp */
14730@@ -2357,12 +2371,15 @@ static int __init apm_init(void)
14731 * code to that CPU.
14732 */
14733 gdt = get_cpu_gdt_table(0);
14734+
14735+ pax_open_kernel();
14736 set_desc_base(&gdt[APM_CS >> 3],
14737 (unsigned long)__va((unsigned long)apm_info.bios.cseg << 4));
14738 set_desc_base(&gdt[APM_CS_16 >> 3],
14739 (unsigned long)__va((unsigned long)apm_info.bios.cseg_16 << 4));
14740 set_desc_base(&gdt[APM_DS >> 3],
14741 (unsigned long)__va((unsigned long)apm_info.bios.dseg << 4));
14742+ pax_close_kernel();
14743
14744 proc_create("apm", 0, NULL, &apm_file_ops);
14745
14746diff --git a/arch/x86/kernel/asm-offsets_32.c b/arch/x86/kernel/asm-offsets_32.c
14747index dfdbf64..9b2b6ce 100644
14748--- a/arch/x86/kernel/asm-offsets_32.c
14749+++ b/arch/x86/kernel/asm-offsets_32.c
14750@@ -51,7 +51,6 @@ void foo(void)
14751 OFFSET(CPUINFO_x86_vendor_id, cpuinfo_x86, x86_vendor_id);
14752 BLANK();
14753
14754- OFFSET(TI_task, thread_info, task);
14755 OFFSET(TI_exec_domain, thread_info, exec_domain);
14756 OFFSET(TI_flags, thread_info, flags);
14757 OFFSET(TI_status, thread_info, status);
14758@@ -60,6 +59,8 @@ void foo(void)
14759 OFFSET(TI_restart_block, thread_info, restart_block);
14760 OFFSET(TI_sysenter_return, thread_info, sysenter_return);
14761 OFFSET(TI_cpu, thread_info, cpu);
14762+ OFFSET(TI_lowest_stack, thread_info, lowest_stack);
14763+ DEFINE(TI_task_thread_sp0, offsetof(struct task_struct, thread.sp0) - offsetof(struct task_struct, tinfo));
14764 BLANK();
14765
14766 OFFSET(GDS_size, desc_ptr, size);
14767@@ -99,6 +100,7 @@ void foo(void)
14768
14769 DEFINE(PAGE_SIZE_asm, PAGE_SIZE);
14770 DEFINE(PAGE_SHIFT_asm, PAGE_SHIFT);
14771+ DEFINE(THREAD_SIZE_asm, THREAD_SIZE);
14772 DEFINE(PTRS_PER_PTE, PTRS_PER_PTE);
14773 DEFINE(PTRS_PER_PMD, PTRS_PER_PMD);
14774 DEFINE(PTRS_PER_PGD, PTRS_PER_PGD);
14775@@ -115,6 +117,11 @@ void foo(void)
14776 OFFSET(PV_CPU_iret, pv_cpu_ops, iret);
14777 OFFSET(PV_CPU_irq_enable_sysexit, pv_cpu_ops, irq_enable_sysexit);
14778 OFFSET(PV_CPU_read_cr0, pv_cpu_ops, read_cr0);
14779+
14780+#ifdef CONFIG_PAX_KERNEXEC
14781+ OFFSET(PV_CPU_write_cr0, pv_cpu_ops, write_cr0);
14782+#endif
14783+
14784 #endif
14785
14786 #ifdef CONFIG_XEN
14787diff --git a/arch/x86/kernel/asm-offsets_64.c b/arch/x86/kernel/asm-offsets_64.c
14788index 4a6aeed..371de20 100644
14789--- a/arch/x86/kernel/asm-offsets_64.c
14790+++ b/arch/x86/kernel/asm-offsets_64.c
14791@@ -44,6 +44,8 @@ int main(void)
14792 ENTRY(addr_limit);
14793 ENTRY(preempt_count);
14794 ENTRY(status);
14795+ ENTRY(lowest_stack);
14796+ DEFINE(TI_task_thread_sp0, offsetof(struct task_struct, thread.sp0) - offsetof(struct task_struct, tinfo));
14797 #ifdef CONFIG_IA32_EMULATION
14798 ENTRY(sysenter_return);
14799 #endif
14800@@ -63,6 +65,18 @@ int main(void)
14801 OFFSET(PV_CPU_irq_enable_sysexit, pv_cpu_ops, irq_enable_sysexit);
14802 OFFSET(PV_CPU_swapgs, pv_cpu_ops, swapgs);
14803 OFFSET(PV_MMU_read_cr2, pv_mmu_ops, read_cr2);
14804+
14805+#ifdef CONFIG_PAX_KERNEXEC
14806+ OFFSET(PV_CPU_read_cr0, pv_cpu_ops, read_cr0);
14807+ OFFSET(PV_CPU_write_cr0, pv_cpu_ops, write_cr0);
14808+#endif
14809+
14810+#ifdef CONFIG_PAX_MEMORY_UDEREF
14811+ OFFSET(PV_MMU_read_cr3, pv_mmu_ops, read_cr3);
14812+ OFFSET(PV_MMU_write_cr3, pv_mmu_ops, write_cr3);
14813+ OFFSET(PV_MMU_set_pgd_batched, pv_mmu_ops, set_pgd_batched);
14814+#endif
14815+
14816 #endif
14817
14818
14819@@ -115,6 +129,7 @@ int main(void)
14820 ENTRY(cr8);
14821 BLANK();
14822 #undef ENTRY
14823+ DEFINE(TSS_size, sizeof(struct tss_struct));
14824 DEFINE(TSS_ist, offsetof(struct tss_struct, x86_tss.ist));
14825 BLANK();
14826 DEFINE(crypto_tfm_ctx_offset, offsetof(struct crypto_tfm, __crt_ctx));
14827@@ -130,6 +145,7 @@ int main(void)
14828
14829 BLANK();
14830 DEFINE(PAGE_SIZE_asm, PAGE_SIZE);
14831+ DEFINE(THREAD_SIZE_asm, THREAD_SIZE);
14832 #ifdef CONFIG_XEN
14833 BLANK();
14834 OFFSET(XEN_vcpu_info_mask, vcpu_info, evtchn_upcall_mask);
14835diff --git a/arch/x86/kernel/cpu/Makefile b/arch/x86/kernel/cpu/Makefile
14836index ff502cc..dc5133e 100644
14837--- a/arch/x86/kernel/cpu/Makefile
14838+++ b/arch/x86/kernel/cpu/Makefile
14839@@ -7,10 +7,6 @@ ifdef CONFIG_FUNCTION_TRACER
14840 CFLAGS_REMOVE_common.o = -pg
14841 endif
14842
14843-# Make sure load_percpu_segment has no stackprotector
14844-nostackp := $(call cc-option, -fno-stack-protector)
14845-CFLAGS_common.o := $(nostackp)
14846-
14847 obj-y := intel_cacheinfo.o addon_cpuid_features.o
14848 obj-y += proc.o capflags.o powerflags.o common.o
14849 obj-y += vmware.o hypervisor.o sched.o
14850diff --git a/arch/x86/kernel/cpu/amd.c b/arch/x86/kernel/cpu/amd.c
14851index 6e082dc..a0b5f36 100644
14852--- a/arch/x86/kernel/cpu/amd.c
14853+++ b/arch/x86/kernel/cpu/amd.c
14854@@ -602,7 +602,7 @@ static unsigned int __cpuinit amd_size_cache(struct cpuinfo_x86 *c,
14855 unsigned int size)
14856 {
14857 /* AMD errata T13 (order #21922) */
14858- if ((c->x86 == 6)) {
14859+ if (c->x86 == 6) {
14860 /* Duron Rev A0 */
14861 if (c->x86_model == 3 && c->x86_mask == 0)
14862 size = 64;
14863diff --git a/arch/x86/kernel/cpu/common.c b/arch/x86/kernel/cpu/common.c
14864index 4e34d10..ba6bc97 100644
14865--- a/arch/x86/kernel/cpu/common.c
14866+++ b/arch/x86/kernel/cpu/common.c
14867@@ -83,60 +83,6 @@ static const struct cpu_dev __cpuinitconst default_cpu = {
14868
14869 static const struct cpu_dev *this_cpu __cpuinitdata = &default_cpu;
14870
14871-DEFINE_PER_CPU_PAGE_ALIGNED(struct gdt_page, gdt_page) = { .gdt = {
14872-#ifdef CONFIG_X86_64
14873- /*
14874- * We need valid kernel segments for data and code in long mode too
14875- * IRET will check the segment types kkeil 2000/10/28
14876- * Also sysret mandates a special GDT layout
14877- *
14878- * TLS descriptors are currently at a different place compared to i386.
14879- * Hopefully nobody expects them at a fixed place (Wine?)
14880- */
14881- [GDT_ENTRY_KERNEL32_CS] = GDT_ENTRY_INIT(0xc09b, 0, 0xfffff),
14882- [GDT_ENTRY_KERNEL_CS] = GDT_ENTRY_INIT(0xa09b, 0, 0xfffff),
14883- [GDT_ENTRY_KERNEL_DS] = GDT_ENTRY_INIT(0xc093, 0, 0xfffff),
14884- [GDT_ENTRY_DEFAULT_USER32_CS] = GDT_ENTRY_INIT(0xc0fb, 0, 0xfffff),
14885- [GDT_ENTRY_DEFAULT_USER_DS] = GDT_ENTRY_INIT(0xc0f3, 0, 0xfffff),
14886- [GDT_ENTRY_DEFAULT_USER_CS] = GDT_ENTRY_INIT(0xa0fb, 0, 0xfffff),
14887-#else
14888- [GDT_ENTRY_KERNEL_CS] = GDT_ENTRY_INIT(0xc09a, 0, 0xfffff),
14889- [GDT_ENTRY_KERNEL_DS] = GDT_ENTRY_INIT(0xc092, 0, 0xfffff),
14890- [GDT_ENTRY_DEFAULT_USER_CS] = GDT_ENTRY_INIT(0xc0fa, 0, 0xfffff),
14891- [GDT_ENTRY_DEFAULT_USER_DS] = GDT_ENTRY_INIT(0xc0f2, 0, 0xfffff),
14892- /*
14893- * Segments used for calling PnP BIOS have byte granularity.
14894- * They code segments and data segments have fixed 64k limits,
14895- * the transfer segment sizes are set at run time.
14896- */
14897- /* 32-bit code */
14898- [GDT_ENTRY_PNPBIOS_CS32] = GDT_ENTRY_INIT(0x409a, 0, 0xffff),
14899- /* 16-bit code */
14900- [GDT_ENTRY_PNPBIOS_CS16] = GDT_ENTRY_INIT(0x009a, 0, 0xffff),
14901- /* 16-bit data */
14902- [GDT_ENTRY_PNPBIOS_DS] = GDT_ENTRY_INIT(0x0092, 0, 0xffff),
14903- /* 16-bit data */
14904- [GDT_ENTRY_PNPBIOS_TS1] = GDT_ENTRY_INIT(0x0092, 0, 0),
14905- /* 16-bit data */
14906- [GDT_ENTRY_PNPBIOS_TS2] = GDT_ENTRY_INIT(0x0092, 0, 0),
14907- /*
14908- * The APM segments have byte granularity and their bases
14909- * are set at run time. All have 64k limits.
14910- */
14911- /* 32-bit code */
14912- [GDT_ENTRY_APMBIOS_BASE] = GDT_ENTRY_INIT(0x409a, 0, 0xffff),
14913- /* 16-bit code */
14914- [GDT_ENTRY_APMBIOS_BASE+1] = GDT_ENTRY_INIT(0x009a, 0, 0xffff),
14915- /* data */
14916- [GDT_ENTRY_APMBIOS_BASE+2] = GDT_ENTRY_INIT(0x4092, 0, 0xffff),
14917-
14918- [GDT_ENTRY_ESPFIX_SS] = GDT_ENTRY_INIT(0xc092, 0, 0xfffff),
14919- [GDT_ENTRY_PERCPU] = GDT_ENTRY_INIT(0xc092, 0, 0xfffff),
14920- GDT_STACK_CANARY_INIT
14921-#endif
14922-} };
14923-EXPORT_PER_CPU_SYMBOL_GPL(gdt_page);
14924-
14925 static int __init x86_xsave_setup(char *s)
14926 {
14927 setup_clear_cpu_cap(X86_FEATURE_XSAVE);
14928@@ -344,7 +290,7 @@ void switch_to_new_gdt(int cpu)
14929 {
14930 struct desc_ptr gdt_descr;
14931
14932- gdt_descr.address = (long)get_cpu_gdt_table(cpu);
14933+ gdt_descr.address = (unsigned long)get_cpu_gdt_table(cpu);
14934 gdt_descr.size = GDT_SIZE - 1;
14935 load_gdt(&gdt_descr);
14936 /* Reload the per-cpu base */
14937@@ -798,6 +744,10 @@ static void __cpuinit identify_cpu(struct cpuinfo_x86 *c)
14938 /* Filter out anything that depends on CPUID levels we don't have */
14939 filter_cpuid_features(c, true);
14940
14941+#if defined(CONFIG_X86_32) && (defined(CONFIG_PAX_SEGMEXEC) || defined(CONFIG_PAX_KERNEXEC) || defined(CONFIG_PAX_MEMORY_UDEREF))
14942+ setup_clear_cpu_cap(X86_FEATURE_SEP);
14943+#endif
14944+
14945 /* If the model name is still unset, do table lookup. */
14946 if (!c->x86_model_id[0]) {
14947 const char *p;
14948@@ -980,6 +930,9 @@ static __init int setup_disablecpuid(char *arg)
14949 }
14950 __setup("clearcpuid=", setup_disablecpuid);
14951
14952+DEFINE_PER_CPU(struct thread_info *, current_tinfo) = &init_task.tinfo;
14953+EXPORT_PER_CPU_SYMBOL(current_tinfo);
14954+
14955 #ifdef CONFIG_X86_64
14956 struct desc_ptr idt_descr = { NR_VECTORS * 16 - 1, (unsigned long) idt_table };
14957
14958@@ -995,7 +948,7 @@ DEFINE_PER_CPU(struct task_struct *, current_task) ____cacheline_aligned =
14959 EXPORT_PER_CPU_SYMBOL(current_task);
14960
14961 DEFINE_PER_CPU(unsigned long, kernel_stack) =
14962- (unsigned long)&init_thread_union - KERNEL_STACK_OFFSET + THREAD_SIZE;
14963+ (unsigned long)&init_thread_union - 16 + THREAD_SIZE;
14964 EXPORT_PER_CPU_SYMBOL(kernel_stack);
14965
14966 DEFINE_PER_CPU(char *, irq_stack_ptr) =
14967@@ -1060,7 +1013,7 @@ struct pt_regs * __cpuinit idle_regs(struct pt_regs *regs)
14968 {
14969 memset(regs, 0, sizeof(struct pt_regs));
14970 regs->fs = __KERNEL_PERCPU;
14971- regs->gs = __KERNEL_STACK_CANARY;
14972+ savesegment(gs, regs->gs);
14973
14974 return regs;
14975 }
14976@@ -1101,7 +1054,7 @@ void __cpuinit cpu_init(void)
14977 int i;
14978
14979 cpu = stack_smp_processor_id();
14980- t = &per_cpu(init_tss, cpu);
14981+ t = init_tss + cpu;
14982 orig_ist = &per_cpu(orig_ist, cpu);
14983
14984 #ifdef CONFIG_NUMA
14985@@ -1127,7 +1080,7 @@ void __cpuinit cpu_init(void)
14986 switch_to_new_gdt(cpu);
14987 loadsegment(fs, 0);
14988
14989- load_idt((const struct desc_ptr *)&idt_descr);
14990+ load_idt(&idt_descr);
14991
14992 memset(me->thread.tls_array, 0, GDT_ENTRY_TLS_ENTRIES * 8);
14993 syscall_init();
14994@@ -1136,7 +1089,6 @@ void __cpuinit cpu_init(void)
14995 wrmsrl(MSR_KERNEL_GS_BASE, 0);
14996 barrier();
14997
14998- check_efer();
14999 if (cpu != 0)
15000 enable_x2apic();
15001
15002@@ -1199,7 +1151,7 @@ void __cpuinit cpu_init(void)
15003 {
15004 int cpu = smp_processor_id();
15005 struct task_struct *curr = current;
15006- struct tss_struct *t = &per_cpu(init_tss, cpu);
15007+ struct tss_struct *t = init_tss + cpu;
15008 struct thread_struct *thread = &curr->thread;
15009
15010 if (cpumask_test_and_set_cpu(cpu, cpu_initialized_mask)) {
15011diff --git a/arch/x86/kernel/cpu/intel.c b/arch/x86/kernel/cpu/intel.c
15012index 6a77cca..4f4fca0 100644
15013--- a/arch/x86/kernel/cpu/intel.c
15014+++ b/arch/x86/kernel/cpu/intel.c
15015@@ -162,7 +162,7 @@ static void __cpuinit trap_init_f00f_bug(void)
15016 * Update the IDT descriptor and reload the IDT so that
15017 * it uses the read-only mapped virtual address.
15018 */
15019- idt_descr.address = fix_to_virt(FIX_F00F_IDT);
15020+ idt_descr.address = (struct desc_struct *)fix_to_virt(FIX_F00F_IDT);
15021 load_idt(&idt_descr);
15022 }
15023 #endif
15024diff --git a/arch/x86/kernel/cpu/intel_cacheinfo.c b/arch/x86/kernel/cpu/intel_cacheinfo.c
15025index 417990f..96dc36b 100644
15026--- a/arch/x86/kernel/cpu/intel_cacheinfo.c
15027+++ b/arch/x86/kernel/cpu/intel_cacheinfo.c
15028@@ -921,7 +921,7 @@ static ssize_t store(struct kobject *kobj, struct attribute *attr,
15029 return ret;
15030 }
15031
15032-static struct sysfs_ops sysfs_ops = {
15033+static const struct sysfs_ops sysfs_ops = {
15034 .show = show,
15035 .store = store,
15036 };
15037diff --git a/arch/x86/kernel/cpu/mcheck/mce-inject.c b/arch/x86/kernel/cpu/mcheck/mce-inject.c
15038index 472763d..aa4d686 100644
15039--- a/arch/x86/kernel/cpu/mcheck/mce-inject.c
15040+++ b/arch/x86/kernel/cpu/mcheck/mce-inject.c
15041@@ -178,6 +178,8 @@ static void raise_mce(struct mce *m)
15042
15043 /* Error injection interface */
15044 static ssize_t mce_write(struct file *filp, const char __user *ubuf,
15045+ size_t usize, loff_t *off) __size_overflow(3);
15046+static ssize_t mce_write(struct file *filp, const char __user *ubuf,
15047 size_t usize, loff_t *off)
15048 {
15049 struct mce m;
15050@@ -211,7 +213,9 @@ static ssize_t mce_write(struct file *filp, const char __user *ubuf,
15051 static int inject_init(void)
15052 {
15053 printk(KERN_INFO "Machine check injector initialized\n");
15054- mce_chrdev_ops.write = mce_write;
15055+ pax_open_kernel();
15056+ *(void **)&mce_chrdev_ops.write = mce_write;
15057+ pax_close_kernel();
15058 register_die_notifier(&mce_raise_nb);
15059 return 0;
15060 }
15061diff --git a/arch/x86/kernel/cpu/mcheck/mce.c b/arch/x86/kernel/cpu/mcheck/mce.c
15062index 0f16a2b..21740f5 100644
15063--- a/arch/x86/kernel/cpu/mcheck/mce.c
15064+++ b/arch/x86/kernel/cpu/mcheck/mce.c
15065@@ -43,6 +43,7 @@
15066 #include <asm/ipi.h>
15067 #include <asm/mce.h>
15068 #include <asm/msr.h>
15069+#include <asm/local.h>
15070
15071 #include "mce-internal.h"
15072
15073@@ -187,7 +188,7 @@ static void print_mce(struct mce *m)
15074 !(m->mcgstatus & MCG_STATUS_EIPV) ? " !INEXACT!" : "",
15075 m->cs, m->ip);
15076
15077- if (m->cs == __KERNEL_CS)
15078+ if (m->cs == __KERNEL_CS || m->cs == __KERNEXEC_KERNEL_CS)
15079 print_symbol("{%s}", m->ip);
15080 pr_cont("\n");
15081 }
15082@@ -221,10 +222,10 @@ static void print_mce_tail(void)
15083
15084 #define PANIC_TIMEOUT 5 /* 5 seconds */
15085
15086-static atomic_t mce_paniced;
15087+static atomic_unchecked_t mce_paniced;
15088
15089 static int fake_panic;
15090-static atomic_t mce_fake_paniced;
15091+static atomic_unchecked_t mce_fake_paniced;
15092
15093 /* Panic in progress. Enable interrupts and wait for final IPI */
15094 static void wait_for_panic(void)
15095@@ -248,7 +249,7 @@ static void mce_panic(char *msg, struct mce *final, char *exp)
15096 /*
15097 * Make sure only one CPU runs in machine check panic
15098 */
15099- if (atomic_inc_return(&mce_paniced) > 1)
15100+ if (atomic_inc_return_unchecked(&mce_paniced) > 1)
15101 wait_for_panic();
15102 barrier();
15103
15104@@ -256,7 +257,7 @@ static void mce_panic(char *msg, struct mce *final, char *exp)
15105 console_verbose();
15106 } else {
15107 /* Don't log too much for fake panic */
15108- if (atomic_inc_return(&mce_fake_paniced) > 1)
15109+ if (atomic_inc_return_unchecked(&mce_fake_paniced) > 1)
15110 return;
15111 }
15112 print_mce_head();
15113@@ -616,7 +617,7 @@ static int mce_timed_out(u64 *t)
15114 * might have been modified by someone else.
15115 */
15116 rmb();
15117- if (atomic_read(&mce_paniced))
15118+ if (atomic_read_unchecked(&mce_paniced))
15119 wait_for_panic();
15120 if (!monarch_timeout)
15121 goto out;
15122@@ -1394,7 +1395,7 @@ static void unexpected_machine_check(struct pt_regs *regs, long error_code)
15123 }
15124
15125 /* Call the installed machine check handler for this CPU setup. */
15126-void (*machine_check_vector)(struct pt_regs *, long error_code) =
15127+void (*machine_check_vector)(struct pt_regs *, long error_code) __read_only =
15128 unexpected_machine_check;
15129
15130 /*
15131@@ -1416,7 +1417,9 @@ void __cpuinit mcheck_init(struct cpuinfo_x86 *c)
15132 return;
15133 }
15134
15135+ pax_open_kernel();
15136 machine_check_vector = do_machine_check;
15137+ pax_close_kernel();
15138
15139 mce_init();
15140 mce_cpu_features(c);
15141@@ -1429,14 +1432,14 @@ void __cpuinit mcheck_init(struct cpuinfo_x86 *c)
15142 */
15143
15144 static DEFINE_SPINLOCK(mce_state_lock);
15145-static int open_count; /* #times opened */
15146+static local_t open_count; /* #times opened */
15147 static int open_exclu; /* already open exclusive? */
15148
15149 static int mce_open(struct inode *inode, struct file *file)
15150 {
15151 spin_lock(&mce_state_lock);
15152
15153- if (open_exclu || (open_count && (file->f_flags & O_EXCL))) {
15154+ if (open_exclu || (local_read(&open_count) && (file->f_flags & O_EXCL))) {
15155 spin_unlock(&mce_state_lock);
15156
15157 return -EBUSY;
15158@@ -1444,7 +1447,7 @@ static int mce_open(struct inode *inode, struct file *file)
15159
15160 if (file->f_flags & O_EXCL)
15161 open_exclu = 1;
15162- open_count++;
15163+ local_inc(&open_count);
15164
15165 spin_unlock(&mce_state_lock);
15166
15167@@ -1455,7 +1458,7 @@ static int mce_release(struct inode *inode, struct file *file)
15168 {
15169 spin_lock(&mce_state_lock);
15170
15171- open_count--;
15172+ local_dec(&open_count);
15173 open_exclu = 0;
15174
15175 spin_unlock(&mce_state_lock);
15176@@ -2082,7 +2085,7 @@ struct dentry *mce_get_debugfs_dir(void)
15177 static void mce_reset(void)
15178 {
15179 cpu_missing = 0;
15180- atomic_set(&mce_fake_paniced, 0);
15181+ atomic_set_unchecked(&mce_fake_paniced, 0);
15182 atomic_set(&mce_executing, 0);
15183 atomic_set(&mce_callin, 0);
15184 atomic_set(&global_nwo, 0);
15185diff --git a/arch/x86/kernel/cpu/mcheck/mce_amd.c b/arch/x86/kernel/cpu/mcheck/mce_amd.c
15186index ef3cd31..9d2f6ab 100644
15187--- a/arch/x86/kernel/cpu/mcheck/mce_amd.c
15188+++ b/arch/x86/kernel/cpu/mcheck/mce_amd.c
15189@@ -385,7 +385,7 @@ static ssize_t store(struct kobject *kobj, struct attribute *attr,
15190 return ret;
15191 }
15192
15193-static struct sysfs_ops threshold_ops = {
15194+static const struct sysfs_ops threshold_ops = {
15195 .show = show,
15196 .store = store,
15197 };
15198diff --git a/arch/x86/kernel/cpu/mcheck/p5.c b/arch/x86/kernel/cpu/mcheck/p5.c
15199index 5c0e653..0882b0a 100644
15200--- a/arch/x86/kernel/cpu/mcheck/p5.c
15201+++ b/arch/x86/kernel/cpu/mcheck/p5.c
15202@@ -12,6 +12,7 @@
15203 #include <asm/system.h>
15204 #include <asm/mce.h>
15205 #include <asm/msr.h>
15206+#include <asm/pgtable.h>
15207
15208 /* By default disabled */
15209 int mce_p5_enabled __read_mostly;
15210@@ -50,7 +51,9 @@ void intel_p5_mcheck_init(struct cpuinfo_x86 *c)
15211 if (!cpu_has(c, X86_FEATURE_MCE))
15212 return;
15213
15214+ pax_open_kernel();
15215 machine_check_vector = pentium_machine_check;
15216+ pax_close_kernel();
15217 /* Make sure the vector pointer is visible before we enable MCEs: */
15218 wmb();
15219
15220diff --git a/arch/x86/kernel/cpu/mcheck/winchip.c b/arch/x86/kernel/cpu/mcheck/winchip.c
15221index 54060f5..c1a7577 100644
15222--- a/arch/x86/kernel/cpu/mcheck/winchip.c
15223+++ b/arch/x86/kernel/cpu/mcheck/winchip.c
15224@@ -11,6 +11,7 @@
15225 #include <asm/system.h>
15226 #include <asm/mce.h>
15227 #include <asm/msr.h>
15228+#include <asm/pgtable.h>
15229
15230 /* Machine check handler for WinChip C6: */
15231 static void winchip_machine_check(struct pt_regs *regs, long error_code)
15232@@ -24,7 +25,9 @@ void winchip_mcheck_init(struct cpuinfo_x86 *c)
15233 {
15234 u32 lo, hi;
15235
15236+ pax_open_kernel();
15237 machine_check_vector = winchip_machine_check;
15238+ pax_close_kernel();
15239 /* Make sure the vector pointer is visible before we enable MCEs: */
15240 wmb();
15241
15242diff --git a/arch/x86/kernel/cpu/mtrr/amd.c b/arch/x86/kernel/cpu/mtrr/amd.c
15243index 33af141..92ba9cd 100644
15244--- a/arch/x86/kernel/cpu/mtrr/amd.c
15245+++ b/arch/x86/kernel/cpu/mtrr/amd.c
15246@@ -108,7 +108,7 @@ amd_validate_add_page(unsigned long base, unsigned long size, unsigned int type)
15247 return 0;
15248 }
15249
15250-static struct mtrr_ops amd_mtrr_ops = {
15251+static const struct mtrr_ops amd_mtrr_ops = {
15252 .vendor = X86_VENDOR_AMD,
15253 .set = amd_set_mtrr,
15254 .get = amd_get_mtrr,
15255diff --git a/arch/x86/kernel/cpu/mtrr/centaur.c b/arch/x86/kernel/cpu/mtrr/centaur.c
15256index de89f14..316fe3e 100644
15257--- a/arch/x86/kernel/cpu/mtrr/centaur.c
15258+++ b/arch/x86/kernel/cpu/mtrr/centaur.c
15259@@ -110,7 +110,7 @@ centaur_validate_add_page(unsigned long base, unsigned long size, unsigned int t
15260 return 0;
15261 }
15262
15263-static struct mtrr_ops centaur_mtrr_ops = {
15264+static const struct mtrr_ops centaur_mtrr_ops = {
15265 .vendor = X86_VENDOR_CENTAUR,
15266 .set = centaur_set_mcr,
15267 .get = centaur_get_mcr,
15268diff --git a/arch/x86/kernel/cpu/mtrr/cyrix.c b/arch/x86/kernel/cpu/mtrr/cyrix.c
15269index 228d982..68a3343 100644
15270--- a/arch/x86/kernel/cpu/mtrr/cyrix.c
15271+++ b/arch/x86/kernel/cpu/mtrr/cyrix.c
15272@@ -265,7 +265,7 @@ static void cyrix_set_all(void)
15273 post_set();
15274 }
15275
15276-static struct mtrr_ops cyrix_mtrr_ops = {
15277+static const struct mtrr_ops cyrix_mtrr_ops = {
15278 .vendor = X86_VENDOR_CYRIX,
15279 .set_all = cyrix_set_all,
15280 .set = cyrix_set_arr,
15281diff --git a/arch/x86/kernel/cpu/mtrr/generic.c b/arch/x86/kernel/cpu/mtrr/generic.c
15282index 55da0c5..4d75584 100644
15283--- a/arch/x86/kernel/cpu/mtrr/generic.c
15284+++ b/arch/x86/kernel/cpu/mtrr/generic.c
15285@@ -752,7 +752,7 @@ int positive_have_wrcomb(void)
15286 /*
15287 * Generic structure...
15288 */
15289-struct mtrr_ops generic_mtrr_ops = {
15290+const struct mtrr_ops generic_mtrr_ops = {
15291 .use_intel_if = 1,
15292 .set_all = generic_set_all,
15293 .get = generic_get_mtrr,
15294diff --git a/arch/x86/kernel/cpu/mtrr/if.c b/arch/x86/kernel/cpu/mtrr/if.c
15295index 3c1b12d..454f6b6 100644
15296--- a/arch/x86/kernel/cpu/mtrr/if.c
15297+++ b/arch/x86/kernel/cpu/mtrr/if.c
15298@@ -89,6 +89,8 @@ mtrr_file_del(unsigned long base, unsigned long size,
15299 * "base=%Lx size=%Lx type=%s" or "disable=%d"
15300 */
15301 static ssize_t
15302+mtrr_write(struct file *file, const char __user *buf, size_t len, loff_t * ppos) __size_overflow(3);
15303+static ssize_t
15304 mtrr_write(struct file *file, const char __user *buf, size_t len, loff_t * ppos)
15305 {
15306 int i, err;
15307diff --git a/arch/x86/kernel/cpu/mtrr/main.c b/arch/x86/kernel/cpu/mtrr/main.c
15308index fd60f09..c94ef52 100644
15309--- a/arch/x86/kernel/cpu/mtrr/main.c
15310+++ b/arch/x86/kernel/cpu/mtrr/main.c
15311@@ -60,14 +60,14 @@ static DEFINE_MUTEX(mtrr_mutex);
15312 u64 size_or_mask, size_and_mask;
15313 static bool mtrr_aps_delayed_init;
15314
15315-static struct mtrr_ops *mtrr_ops[X86_VENDOR_NUM];
15316+static const struct mtrr_ops *mtrr_ops[X86_VENDOR_NUM] __read_only;
15317
15318-struct mtrr_ops *mtrr_if;
15319+const struct mtrr_ops *mtrr_if;
15320
15321 static void set_mtrr(unsigned int reg, unsigned long base,
15322 unsigned long size, mtrr_type type);
15323
15324-void set_mtrr_ops(struct mtrr_ops *ops)
15325+void set_mtrr_ops(const struct mtrr_ops *ops)
15326 {
15327 if (ops->vendor && ops->vendor < X86_VENDOR_NUM)
15328 mtrr_ops[ops->vendor] = ops;
15329diff --git a/arch/x86/kernel/cpu/mtrr/mtrr.h b/arch/x86/kernel/cpu/mtrr/mtrr.h
15330index a501dee..816c719 100644
15331--- a/arch/x86/kernel/cpu/mtrr/mtrr.h
15332+++ b/arch/x86/kernel/cpu/mtrr/mtrr.h
15333@@ -25,14 +25,14 @@ struct mtrr_ops {
15334 int (*validate_add_page)(unsigned long base, unsigned long size,
15335 unsigned int type);
15336 int (*have_wrcomb)(void);
15337-};
15338+} __do_const;
15339
15340 extern int generic_get_free_region(unsigned long base, unsigned long size,
15341 int replace_reg);
15342 extern int generic_validate_add_page(unsigned long base, unsigned long size,
15343 unsigned int type);
15344
15345-extern struct mtrr_ops generic_mtrr_ops;
15346+extern const struct mtrr_ops generic_mtrr_ops;
15347
15348 extern int positive_have_wrcomb(void);
15349
15350@@ -53,10 +53,10 @@ void fill_mtrr_var_range(unsigned int index,
15351 u32 base_lo, u32 base_hi, u32 mask_lo, u32 mask_hi);
15352 void get_mtrr_state(void);
15353
15354-extern void set_mtrr_ops(struct mtrr_ops *ops);
15355+extern void set_mtrr_ops(const struct mtrr_ops *ops);
15356
15357 extern u64 size_or_mask, size_and_mask;
15358-extern struct mtrr_ops *mtrr_if;
15359+extern const struct mtrr_ops *mtrr_if;
15360
15361 #define is_cpu(vnd) (mtrr_if && mtrr_if->vendor == X86_VENDOR_##vnd)
15362 #define use_intel() (mtrr_if && mtrr_if->use_intel_if == 1)
15363diff --git a/arch/x86/kernel/cpu/perf_event.c b/arch/x86/kernel/cpu/perf_event.c
15364index 0ff02ca..fc49a60 100644
15365--- a/arch/x86/kernel/cpu/perf_event.c
15366+++ b/arch/x86/kernel/cpu/perf_event.c
15367@@ -723,10 +723,10 @@ x86_perf_event_update(struct perf_event *event,
15368 * count to the generic event atomically:
15369 */
15370 again:
15371- prev_raw_count = atomic64_read(&hwc->prev_count);
15372+ prev_raw_count = atomic64_read_unchecked(&hwc->prev_count);
15373 rdmsrl(hwc->event_base + idx, new_raw_count);
15374
15375- if (atomic64_cmpxchg(&hwc->prev_count, prev_raw_count,
15376+ if (atomic64_cmpxchg_unchecked(&hwc->prev_count, prev_raw_count,
15377 new_raw_count) != prev_raw_count)
15378 goto again;
15379
15380@@ -741,7 +741,7 @@ again:
15381 delta = (new_raw_count << shift) - (prev_raw_count << shift);
15382 delta >>= shift;
15383
15384- atomic64_add(delta, &event->count);
15385+ atomic64_add_unchecked(delta, &event->count);
15386 atomic64_sub(delta, &hwc->period_left);
15387
15388 return new_raw_count;
15389@@ -1353,7 +1353,7 @@ x86_perf_event_set_period(struct perf_event *event,
15390 * The hw event starts counting from this event offset,
15391 * mark it to be able to extra future deltas:
15392 */
15393- atomic64_set(&hwc->prev_count, (u64)-left);
15394+ atomic64_set_unchecked(&hwc->prev_count, (u64)-left);
15395
15396 err = checking_wrmsrl(hwc->event_base + idx,
15397 (u64)(-left) & x86_pmu.event_mask);
15398@@ -2357,7 +2357,7 @@ perf_callchain_user(struct pt_regs *regs, struct perf_callchain_entry *entry)
15399 break;
15400
15401 callchain_store(entry, frame.return_address);
15402- fp = frame.next_frame;
15403+ fp = (__force const void __user *)frame.next_frame;
15404 }
15405 }
15406
15407diff --git a/arch/x86/kernel/cpu/perfctr-watchdog.c b/arch/x86/kernel/cpu/perfctr-watchdog.c
15408index 898df97..9e82503 100644
15409--- a/arch/x86/kernel/cpu/perfctr-watchdog.c
15410+++ b/arch/x86/kernel/cpu/perfctr-watchdog.c
15411@@ -30,11 +30,11 @@ struct nmi_watchdog_ctlblk {
15412
15413 /* Interface defining a CPU specific perfctr watchdog */
15414 struct wd_ops {
15415- int (*reserve)(void);
15416- void (*unreserve)(void);
15417- int (*setup)(unsigned nmi_hz);
15418- void (*rearm)(struct nmi_watchdog_ctlblk *wd, unsigned nmi_hz);
15419- void (*stop)(void);
15420+ int (* const reserve)(void);
15421+ void (* const unreserve)(void);
15422+ int (* const setup)(unsigned nmi_hz);
15423+ void (* const rearm)(struct nmi_watchdog_ctlblk *wd, unsigned nmi_hz);
15424+ void (* const stop)(void);
15425 unsigned perfctr;
15426 unsigned evntsel;
15427 u64 checkbit;
15428@@ -645,6 +645,7 @@ static const struct wd_ops p4_wd_ops = {
15429 #define ARCH_PERFMON_NMI_EVENT_SEL ARCH_PERFMON_UNHALTED_CORE_CYCLES_SEL
15430 #define ARCH_PERFMON_NMI_EVENT_UMASK ARCH_PERFMON_UNHALTED_CORE_CYCLES_UMASK
15431
15432+/* cannot be const */
15433 static struct wd_ops intel_arch_wd_ops;
15434
15435 static int setup_intel_arch_watchdog(unsigned nmi_hz)
15436@@ -697,6 +698,7 @@ static int setup_intel_arch_watchdog(unsigned nmi_hz)
15437 return 1;
15438 }
15439
15440+/* cannot be const */
15441 static struct wd_ops intel_arch_wd_ops __read_mostly = {
15442 .reserve = single_msr_reserve,
15443 .unreserve = single_msr_unreserve,
15444diff --git a/arch/x86/kernel/crash.c b/arch/x86/kernel/crash.c
15445index ff95824..2ffdcb5 100644
15446--- a/arch/x86/kernel/crash.c
15447+++ b/arch/x86/kernel/crash.c
15448@@ -41,7 +41,7 @@ static void kdump_nmi_callback(int cpu, struct die_args *args)
15449 regs = args->regs;
15450
15451 #ifdef CONFIG_X86_32
15452- if (!user_mode_vm(regs)) {
15453+ if (!user_mode(regs)) {
15454 crash_fixup_ss_esp(&fixed_regs, regs);
15455 regs = &fixed_regs;
15456 }
15457diff --git a/arch/x86/kernel/doublefault_32.c b/arch/x86/kernel/doublefault_32.c
15458index 37250fe..bf2ec74 100644
15459--- a/arch/x86/kernel/doublefault_32.c
15460+++ b/arch/x86/kernel/doublefault_32.c
15461@@ -11,7 +11,7 @@
15462
15463 #define DOUBLEFAULT_STACKSIZE (1024)
15464 static unsigned long doublefault_stack[DOUBLEFAULT_STACKSIZE];
15465-#define STACK_START (unsigned long)(doublefault_stack+DOUBLEFAULT_STACKSIZE)
15466+#define STACK_START (unsigned long)(doublefault_stack+DOUBLEFAULT_STACKSIZE-2)
15467
15468 #define ptr_ok(x) ((x) > PAGE_OFFSET && (x) < PAGE_OFFSET + MAXMEM)
15469
15470@@ -21,7 +21,7 @@ static void doublefault_fn(void)
15471 unsigned long gdt, tss;
15472
15473 store_gdt(&gdt_desc);
15474- gdt = gdt_desc.address;
15475+ gdt = (unsigned long)gdt_desc.address;
15476
15477 printk(KERN_EMERG "PANIC: double fault, gdt at %08lx [%d bytes]\n", gdt, gdt_desc.size);
15478
15479@@ -58,10 +58,10 @@ struct tss_struct doublefault_tss __cacheline_aligned = {
15480 /* 0x2 bit is always set */
15481 .flags = X86_EFLAGS_SF | 0x2,
15482 .sp = STACK_START,
15483- .es = __USER_DS,
15484+ .es = __KERNEL_DS,
15485 .cs = __KERNEL_CS,
15486 .ss = __KERNEL_DS,
15487- .ds = __USER_DS,
15488+ .ds = __KERNEL_DS,
15489 .fs = __KERNEL_PERCPU,
15490
15491 .__cr3 = __pa_nodebug(swapper_pg_dir),
15492diff --git a/arch/x86/kernel/dumpstack.c b/arch/x86/kernel/dumpstack.c
15493index 2d8a371..4fa6ae6 100644
15494--- a/arch/x86/kernel/dumpstack.c
15495+++ b/arch/x86/kernel/dumpstack.c
15496@@ -2,6 +2,9 @@
15497 * Copyright (C) 1991, 1992 Linus Torvalds
15498 * Copyright (C) 2000, 2001, 2002 Andi Kleen, SuSE Labs
15499 */
15500+#ifdef CONFIG_GRKERNSEC_HIDESYM
15501+#define __INCLUDED_BY_HIDESYM 1
15502+#endif
15503 #include <linux/kallsyms.h>
15504 #include <linux/kprobes.h>
15505 #include <linux/uaccess.h>
15506@@ -28,7 +31,7 @@ static int die_counter;
15507
15508 void printk_address(unsigned long address, int reliable)
15509 {
15510- printk(" [<%p>] %s%pS\n", (void *) address,
15511+ printk(" [<%p>] %s%pA\n", (void *) address,
15512 reliable ? "" : "? ", (void *) address);
15513 }
15514
15515@@ -36,9 +39,8 @@ void printk_address(unsigned long address, int reliable)
15516 static void
15517 print_ftrace_graph_addr(unsigned long addr, void *data,
15518 const struct stacktrace_ops *ops,
15519- struct thread_info *tinfo, int *graph)
15520+ struct task_struct *task, int *graph)
15521 {
15522- struct task_struct *task = tinfo->task;
15523 unsigned long ret_addr;
15524 int index = task->curr_ret_stack;
15525
15526@@ -59,7 +61,7 @@ print_ftrace_graph_addr(unsigned long addr, void *data,
15527 static inline void
15528 print_ftrace_graph_addr(unsigned long addr, void *data,
15529 const struct stacktrace_ops *ops,
15530- struct thread_info *tinfo, int *graph)
15531+ struct task_struct *task, int *graph)
15532 { }
15533 #endif
15534
15535@@ -70,10 +72,8 @@ print_ftrace_graph_addr(unsigned long addr, void *data,
15536 * severe exception (double fault, nmi, stack fault, debug, mce) hardware stack
15537 */
15538
15539-static inline int valid_stack_ptr(struct thread_info *tinfo,
15540- void *p, unsigned int size, void *end)
15541+static inline int valid_stack_ptr(void *t, void *p, unsigned int size, void *end)
15542 {
15543- void *t = tinfo;
15544 if (end) {
15545 if (p < end && p >= (end-THREAD_SIZE))
15546 return 1;
15547@@ -84,14 +84,14 @@ static inline int valid_stack_ptr(struct thread_info *tinfo,
15548 }
15549
15550 unsigned long
15551-print_context_stack(struct thread_info *tinfo,
15552+print_context_stack(struct task_struct *task, void *stack_start,
15553 unsigned long *stack, unsigned long bp,
15554 const struct stacktrace_ops *ops, void *data,
15555 unsigned long *end, int *graph)
15556 {
15557 struct stack_frame *frame = (struct stack_frame *)bp;
15558
15559- while (valid_stack_ptr(tinfo, stack, sizeof(*stack), end)) {
15560+ while (valid_stack_ptr(stack_start, stack, sizeof(*stack), end)) {
15561 unsigned long addr;
15562
15563 addr = *stack;
15564@@ -103,7 +103,7 @@ print_context_stack(struct thread_info *tinfo,
15565 } else {
15566 ops->address(data, addr, 0);
15567 }
15568- print_ftrace_graph_addr(addr, data, ops, tinfo, graph);
15569+ print_ftrace_graph_addr(addr, data, ops, task, graph);
15570 }
15571 stack++;
15572 }
15573@@ -180,7 +180,7 @@ void dump_stack(void)
15574 #endif
15575
15576 printk("Pid: %d, comm: %.20s %s %s %.*s\n",
15577- current->pid, current->comm, print_tainted(),
15578+ task_pid_nr(current), current->comm, print_tainted(),
15579 init_utsname()->release,
15580 (int)strcspn(init_utsname()->version, " "),
15581 init_utsname()->version);
15582@@ -220,6 +220,8 @@ unsigned __kprobes long oops_begin(void)
15583 return flags;
15584 }
15585
15586+extern void gr_handle_kernel_exploit(void);
15587+
15588 void __kprobes oops_end(unsigned long flags, struct pt_regs *regs, int signr)
15589 {
15590 if (regs && kexec_should_crash(current))
15591@@ -241,7 +243,10 @@ void __kprobes oops_end(unsigned long flags, struct pt_regs *regs, int signr)
15592 panic("Fatal exception in interrupt");
15593 if (panic_on_oops)
15594 panic("Fatal exception");
15595- do_exit(signr);
15596+
15597+ gr_handle_kernel_exploit();
15598+
15599+ do_group_exit(signr);
15600 }
15601
15602 int __kprobes __die(const char *str, struct pt_regs *regs, long err)
15603@@ -295,7 +300,7 @@ void die(const char *str, struct pt_regs *regs, long err)
15604 unsigned long flags = oops_begin();
15605 int sig = SIGSEGV;
15606
15607- if (!user_mode_vm(regs))
15608+ if (!user_mode(regs))
15609 report_bug(regs->ip, regs);
15610
15611 if (__die(str, regs, err))
15612diff --git a/arch/x86/kernel/dumpstack.h b/arch/x86/kernel/dumpstack.h
15613index 81086c2..13e8b17 100644
15614--- a/arch/x86/kernel/dumpstack.h
15615+++ b/arch/x86/kernel/dumpstack.h
15616@@ -15,7 +15,7 @@
15617 #endif
15618
15619 extern unsigned long
15620-print_context_stack(struct thread_info *tinfo,
15621+print_context_stack(struct task_struct *task, void *stack_start,
15622 unsigned long *stack, unsigned long bp,
15623 const struct stacktrace_ops *ops, void *data,
15624 unsigned long *end, int *graph);
15625diff --git a/arch/x86/kernel/dumpstack_32.c b/arch/x86/kernel/dumpstack_32.c
15626index f7dd2a7..504f53b 100644
15627--- a/arch/x86/kernel/dumpstack_32.c
15628+++ b/arch/x86/kernel/dumpstack_32.c
15629@@ -53,16 +53,12 @@ void dump_trace(struct task_struct *task, struct pt_regs *regs,
15630 #endif
15631
15632 for (;;) {
15633- struct thread_info *context;
15634+ void *stack_start = (void *)((unsigned long)stack & ~(THREAD_SIZE-1));
15635+ bp = print_context_stack(task, stack_start, stack, bp, ops, data, NULL, &graph);
15636
15637- context = (struct thread_info *)
15638- ((unsigned long)stack & (~(THREAD_SIZE - 1)));
15639- bp = print_context_stack(context, stack, bp, ops,
15640- data, NULL, &graph);
15641-
15642- stack = (unsigned long *)context->previous_esp;
15643- if (!stack)
15644+ if (stack_start == task_stack_page(task))
15645 break;
15646+ stack = *(unsigned long **)stack_start;
15647 if (ops->stack(data, "IRQ") < 0)
15648 break;
15649 touch_nmi_watchdog();
15650@@ -112,11 +108,12 @@ void show_registers(struct pt_regs *regs)
15651 * When in-kernel, we also print out the stack and code at the
15652 * time of the fault..
15653 */
15654- if (!user_mode_vm(regs)) {
15655+ if (!user_mode(regs)) {
15656 unsigned int code_prologue = code_bytes * 43 / 64;
15657 unsigned int code_len = code_bytes;
15658 unsigned char c;
15659 u8 *ip;
15660+ unsigned long cs_base = get_desc_base(&get_cpu_gdt_table(smp_processor_id())[(0xffff & regs->cs) >> 3]);
15661
15662 printk(KERN_EMERG "Stack:\n");
15663 show_stack_log_lvl(NULL, regs, &regs->sp,
15664@@ -124,10 +121,10 @@ void show_registers(struct pt_regs *regs)
15665
15666 printk(KERN_EMERG "Code: ");
15667
15668- ip = (u8 *)regs->ip - code_prologue;
15669+ ip = (u8 *)regs->ip - code_prologue + cs_base;
15670 if (ip < (u8 *)PAGE_OFFSET || probe_kernel_address(ip, c)) {
15671 /* try starting at IP */
15672- ip = (u8 *)regs->ip;
15673+ ip = (u8 *)regs->ip + cs_base;
15674 code_len = code_len - code_prologue + 1;
15675 }
15676 for (i = 0; i < code_len; i++, ip++) {
15677@@ -136,7 +133,7 @@ void show_registers(struct pt_regs *regs)
15678 printk(" Bad EIP value.");
15679 break;
15680 }
15681- if (ip == (u8 *)regs->ip)
15682+ if (ip == (u8 *)regs->ip + cs_base)
15683 printk("<%02x> ", c);
15684 else
15685 printk("%02x ", c);
15686@@ -145,10 +142,23 @@ void show_registers(struct pt_regs *regs)
15687 printk("\n");
15688 }
15689
15690+#ifdef CONFIG_PAX_MEMORY_STACKLEAK
15691+void pax_check_alloca(unsigned long size)
15692+{
15693+ unsigned long sp = (unsigned long)&sp, stack_left;
15694+
15695+ /* all kernel stacks are of the same size */
15696+ stack_left = sp & (THREAD_SIZE - 1);
15697+ BUG_ON(stack_left < 256 || size >= stack_left - 256);
15698+}
15699+EXPORT_SYMBOL(pax_check_alloca);
15700+#endif
15701+
15702 int is_valid_bugaddr(unsigned long ip)
15703 {
15704 unsigned short ud2;
15705
15706+ ip = ktla_ktva(ip);
15707 if (ip < PAGE_OFFSET)
15708 return 0;
15709 if (probe_kernel_address((unsigned short *)ip, ud2))
15710diff --git a/arch/x86/kernel/dumpstack_64.c b/arch/x86/kernel/dumpstack_64.c
15711index a071e6b..36cd585 100644
15712--- a/arch/x86/kernel/dumpstack_64.c
15713+++ b/arch/x86/kernel/dumpstack_64.c
15714@@ -116,8 +116,8 @@ void dump_trace(struct task_struct *task, struct pt_regs *regs,
15715 unsigned long *irq_stack_end =
15716 (unsigned long *)per_cpu(irq_stack_ptr, cpu);
15717 unsigned used = 0;
15718- struct thread_info *tinfo;
15719 int graph = 0;
15720+ void *stack_start;
15721
15722 if (!task)
15723 task = current;
15724@@ -146,10 +146,10 @@ void dump_trace(struct task_struct *task, struct pt_regs *regs,
15725 * current stack address. If the stacks consist of nested
15726 * exceptions
15727 */
15728- tinfo = task_thread_info(task);
15729 for (;;) {
15730 char *id;
15731 unsigned long *estack_end;
15732+
15733 estack_end = in_exception_stack(cpu, (unsigned long)stack,
15734 &used, &id);
15735
15736@@ -157,7 +157,7 @@ void dump_trace(struct task_struct *task, struct pt_regs *regs,
15737 if (ops->stack(data, id) < 0)
15738 break;
15739
15740- bp = print_context_stack(tinfo, stack, bp, ops,
15741+ bp = print_context_stack(task, estack_end - EXCEPTION_STKSZ, stack, bp, ops,
15742 data, estack_end, &graph);
15743 ops->stack(data, "<EOE>");
15744 /*
15745@@ -176,7 +176,7 @@ void dump_trace(struct task_struct *task, struct pt_regs *regs,
15746 if (stack >= irq_stack && stack < irq_stack_end) {
15747 if (ops->stack(data, "IRQ") < 0)
15748 break;
15749- bp = print_context_stack(tinfo, stack, bp,
15750+ bp = print_context_stack(task, irq_stack, stack, bp,
15751 ops, data, irq_stack_end, &graph);
15752 /*
15753 * We link to the next stack (which would be
15754@@ -195,7 +195,8 @@ void dump_trace(struct task_struct *task, struct pt_regs *regs,
15755 /*
15756 * This handles the process stack:
15757 */
15758- bp = print_context_stack(tinfo, stack, bp, ops, data, NULL, &graph);
15759+ stack_start = (void *)((unsigned long)stack & ~(THREAD_SIZE-1));
15760+ bp = print_context_stack(task, stack_start, stack, bp, ops, data, NULL, &graph);
15761 put_cpu();
15762 }
15763 EXPORT_SYMBOL(dump_trace);
15764@@ -304,3 +305,50 @@ int is_valid_bugaddr(unsigned long ip)
15765 return ud2 == 0x0b0f;
15766 }
15767
15768+
15769+#ifdef CONFIG_PAX_MEMORY_STACKLEAK
15770+void pax_check_alloca(unsigned long size)
15771+{
15772+ unsigned long sp = (unsigned long)&sp, stack_start, stack_end;
15773+ unsigned cpu, used;
15774+ char *id;
15775+
15776+ /* check the process stack first */
15777+ stack_start = (unsigned long)task_stack_page(current);
15778+ stack_end = stack_start + THREAD_SIZE;
15779+ if (likely(stack_start <= sp && sp < stack_end)) {
15780+ unsigned long stack_left = sp & (THREAD_SIZE - 1);
15781+ BUG_ON(stack_left < 256 || size >= stack_left - 256);
15782+ return;
15783+ }
15784+
15785+ cpu = get_cpu();
15786+
15787+ /* check the irq stacks */
15788+ stack_end = (unsigned long)per_cpu(irq_stack_ptr, cpu);
15789+ stack_start = stack_end - IRQ_STACK_SIZE;
15790+ if (stack_start <= sp && sp < stack_end) {
15791+ unsigned long stack_left = sp & (IRQ_STACK_SIZE - 1);
15792+ put_cpu();
15793+ BUG_ON(stack_left < 256 || size >= stack_left - 256);
15794+ return;
15795+ }
15796+
15797+ /* check the exception stacks */
15798+ used = 0;
15799+ stack_end = (unsigned long)in_exception_stack(cpu, sp, &used, &id);
15800+ stack_start = stack_end - EXCEPTION_STKSZ;
15801+ if (stack_end && stack_start <= sp && sp < stack_end) {
15802+ unsigned long stack_left = sp & (EXCEPTION_STKSZ - 1);
15803+ put_cpu();
15804+ BUG_ON(stack_left < 256 || size >= stack_left - 256);
15805+ return;
15806+ }
15807+
15808+ put_cpu();
15809+
15810+ /* unknown stack */
15811+ BUG();
15812+}
15813+EXPORT_SYMBOL(pax_check_alloca);
15814+#endif
15815diff --git a/arch/x86/kernel/e820.c b/arch/x86/kernel/e820.c
15816index a89739a..95e0c48 100644
15817--- a/arch/x86/kernel/e820.c
15818+++ b/arch/x86/kernel/e820.c
15819@@ -733,7 +733,7 @@ struct early_res {
15820 };
15821 static struct early_res early_res[MAX_EARLY_RES] __initdata = {
15822 { 0, PAGE_SIZE, "BIOS data page" }, /* BIOS data page */
15823- {}
15824+ { 0, 0, {0}, 0 }
15825 };
15826
15827 static int __init find_overlapped_early(u64 start, u64 end)
15828diff --git a/arch/x86/kernel/early_printk.c b/arch/x86/kernel/early_printk.c
15829index b9c830c..1e41a96 100644
15830--- a/arch/x86/kernel/early_printk.c
15831+++ b/arch/x86/kernel/early_printk.c
15832@@ -7,6 +7,7 @@
15833 #include <linux/pci_regs.h>
15834 #include <linux/pci_ids.h>
15835 #include <linux/errno.h>
15836+#include <linux/sched.h>
15837 #include <asm/io.h>
15838 #include <asm/processor.h>
15839 #include <asm/fcntl.h>
15840@@ -170,6 +171,8 @@ asmlinkage void early_printk(const char *fmt, ...)
15841 int n;
15842 va_list ap;
15843
15844+ pax_track_stack();
15845+
15846 va_start(ap, fmt);
15847 n = vscnprintf(buf, sizeof(buf), fmt, ap);
15848 early_console->write(early_console, buf, n);
15849diff --git a/arch/x86/kernel/efi_32.c b/arch/x86/kernel/efi_32.c
15850index 5cab48e..b025f9b 100644
15851--- a/arch/x86/kernel/efi_32.c
15852+++ b/arch/x86/kernel/efi_32.c
15853@@ -38,70 +38,56 @@
15854 */
15855
15856 static unsigned long efi_rt_eflags;
15857-static pgd_t efi_bak_pg_dir_pointer[2];
15858+static pgd_t __initdata efi_bak_pg_dir_pointer[KERNEL_PGD_PTRS];
15859
15860-void efi_call_phys_prelog(void)
15861+void __init efi_call_phys_prelog(void)
15862 {
15863- unsigned long cr4;
15864- unsigned long temp;
15865 struct desc_ptr gdt_descr;
15866
15867+#ifdef CONFIG_PAX_KERNEXEC
15868+ struct desc_struct d;
15869+#endif
15870+
15871 local_irq_save(efi_rt_eflags);
15872
15873- /*
15874- * If I don't have PAE, I should just duplicate two entries in page
15875- * directory. If I have PAE, I just need to duplicate one entry in
15876- * page directory.
15877- */
15878- cr4 = read_cr4_safe();
15879-
15880- if (cr4 & X86_CR4_PAE) {
15881- efi_bak_pg_dir_pointer[0].pgd =
15882- swapper_pg_dir[pgd_index(0)].pgd;
15883- swapper_pg_dir[0].pgd =
15884- swapper_pg_dir[pgd_index(PAGE_OFFSET)].pgd;
15885- } else {
15886- efi_bak_pg_dir_pointer[0].pgd =
15887- swapper_pg_dir[pgd_index(0)].pgd;
15888- efi_bak_pg_dir_pointer[1].pgd =
15889- swapper_pg_dir[pgd_index(0x400000)].pgd;
15890- swapper_pg_dir[pgd_index(0)].pgd =
15891- swapper_pg_dir[pgd_index(PAGE_OFFSET)].pgd;
15892- temp = PAGE_OFFSET + 0x400000;
15893- swapper_pg_dir[pgd_index(0x400000)].pgd =
15894- swapper_pg_dir[pgd_index(temp)].pgd;
15895- }
15896+ clone_pgd_range(efi_bak_pg_dir_pointer, swapper_pg_dir, KERNEL_PGD_PTRS);
15897+ clone_pgd_range(swapper_pg_dir, swapper_pg_dir + KERNEL_PGD_BOUNDARY,
15898+ min_t(unsigned long, KERNEL_PGD_PTRS, KERNEL_PGD_BOUNDARY));
15899
15900 /*
15901 * After the lock is released, the original page table is restored.
15902 */
15903 __flush_tlb_all();
15904
15905+#ifdef CONFIG_PAX_KERNEXEC
15906+ pack_descriptor(&d, 0, 0xFFFFF, 0x9B, 0xC);
15907+ write_gdt_entry(get_cpu_gdt_table(0), GDT_ENTRY_KERNEXEC_EFI_CS, &d, DESCTYPE_S);
15908+ pack_descriptor(&d, 0, 0xFFFFF, 0x93, 0xC);
15909+ write_gdt_entry(get_cpu_gdt_table(0), GDT_ENTRY_KERNEXEC_EFI_DS, &d, DESCTYPE_S);
15910+#endif
15911+
15912 gdt_descr.address = __pa(get_cpu_gdt_table(0));
15913 gdt_descr.size = GDT_SIZE - 1;
15914 load_gdt(&gdt_descr);
15915 }
15916
15917-void efi_call_phys_epilog(void)
15918+void __init efi_call_phys_epilog(void)
15919 {
15920- unsigned long cr4;
15921 struct desc_ptr gdt_descr;
15922
15923+#ifdef CONFIG_PAX_KERNEXEC
15924+ struct desc_struct d;
15925+
15926+ memset(&d, 0, sizeof d);
15927+ write_gdt_entry(get_cpu_gdt_table(0), GDT_ENTRY_KERNEXEC_EFI_CS, &d, DESCTYPE_S);
15928+ write_gdt_entry(get_cpu_gdt_table(0), GDT_ENTRY_KERNEXEC_EFI_DS, &d, DESCTYPE_S);
15929+#endif
15930+
15931 gdt_descr.address = (unsigned long)get_cpu_gdt_table(0);
15932 gdt_descr.size = GDT_SIZE - 1;
15933 load_gdt(&gdt_descr);
15934
15935- cr4 = read_cr4_safe();
15936-
15937- if (cr4 & X86_CR4_PAE) {
15938- swapper_pg_dir[pgd_index(0)].pgd =
15939- efi_bak_pg_dir_pointer[0].pgd;
15940- } else {
15941- swapper_pg_dir[pgd_index(0)].pgd =
15942- efi_bak_pg_dir_pointer[0].pgd;
15943- swapper_pg_dir[pgd_index(0x400000)].pgd =
15944- efi_bak_pg_dir_pointer[1].pgd;
15945- }
15946+ clone_pgd_range(swapper_pg_dir, efi_bak_pg_dir_pointer, KERNEL_PGD_PTRS);
15947
15948 /*
15949 * After the lock is released, the original page table is restored.
15950diff --git a/arch/x86/kernel/efi_stub_32.S b/arch/x86/kernel/efi_stub_32.S
15951index fbe66e6..c5c0dd2 100644
15952--- a/arch/x86/kernel/efi_stub_32.S
15953+++ b/arch/x86/kernel/efi_stub_32.S
15954@@ -6,7 +6,9 @@
15955 */
15956
15957 #include <linux/linkage.h>
15958+#include <linux/init.h>
15959 #include <asm/page_types.h>
15960+#include <asm/segment.h>
15961
15962 /*
15963 * efi_call_phys(void *, ...) is a function with variable parameters.
15964@@ -20,7 +22,7 @@
15965 * service functions will comply with gcc calling convention, too.
15966 */
15967
15968-.text
15969+__INIT
15970 ENTRY(efi_call_phys)
15971 /*
15972 * 0. The function can only be called in Linux kernel. So CS has been
15973@@ -36,9 +38,11 @@ ENTRY(efi_call_phys)
15974 * The mapping of lower virtual memory has been created in prelog and
15975 * epilog.
15976 */
15977- movl $1f, %edx
15978- subl $__PAGE_OFFSET, %edx
15979- jmp *%edx
15980+ movl $(__KERNEXEC_EFI_DS), %edx
15981+ mov %edx, %ds
15982+ mov %edx, %es
15983+ mov %edx, %ss
15984+ ljmp $(__KERNEXEC_EFI_CS),$1f-__PAGE_OFFSET
15985 1:
15986
15987 /*
15988@@ -47,14 +51,8 @@ ENTRY(efi_call_phys)
15989 * parameter 2, ..., param n. To make things easy, we save the return
15990 * address of efi_call_phys in a global variable.
15991 */
15992- popl %edx
15993- movl %edx, saved_return_addr
15994- /* get the function pointer into ECX*/
15995- popl %ecx
15996- movl %ecx, efi_rt_function_ptr
15997- movl $2f, %edx
15998- subl $__PAGE_OFFSET, %edx
15999- pushl %edx
16000+ popl (saved_return_addr)
16001+ popl (efi_rt_function_ptr)
16002
16003 /*
16004 * 3. Clear PG bit in %CR0.
16005@@ -73,9 +71,8 @@ ENTRY(efi_call_phys)
16006 /*
16007 * 5. Call the physical function.
16008 */
16009- jmp *%ecx
16010+ call *(efi_rt_function_ptr-__PAGE_OFFSET)
16011
16012-2:
16013 /*
16014 * 6. After EFI runtime service returns, control will return to
16015 * following instruction. We'd better readjust stack pointer first.
16016@@ -88,35 +85,32 @@ ENTRY(efi_call_phys)
16017 movl %cr0, %edx
16018 orl $0x80000000, %edx
16019 movl %edx, %cr0
16020- jmp 1f
16021-1:
16022+
16023 /*
16024 * 8. Now restore the virtual mode from flat mode by
16025 * adding EIP with PAGE_OFFSET.
16026 */
16027- movl $1f, %edx
16028- jmp *%edx
16029+ ljmp $(__KERNEL_CS),$1f+__PAGE_OFFSET
16030 1:
16031+ movl $(__KERNEL_DS), %edx
16032+ mov %edx, %ds
16033+ mov %edx, %es
16034+ mov %edx, %ss
16035
16036 /*
16037 * 9. Balance the stack. And because EAX contain the return value,
16038 * we'd better not clobber it.
16039 */
16040- leal efi_rt_function_ptr, %edx
16041- movl (%edx), %ecx
16042- pushl %ecx
16043+ pushl (efi_rt_function_ptr)
16044
16045 /*
16046- * 10. Push the saved return address onto the stack and return.
16047+ * 10. Return to the saved return address.
16048 */
16049- leal saved_return_addr, %edx
16050- movl (%edx), %ecx
16051- pushl %ecx
16052- ret
16053+ jmpl *(saved_return_addr)
16054 ENDPROC(efi_call_phys)
16055 .previous
16056
16057-.data
16058+__INITDATA
16059 saved_return_addr:
16060 .long 0
16061 efi_rt_function_ptr:
16062diff --git a/arch/x86/kernel/efi_stub_64.S b/arch/x86/kernel/efi_stub_64.S
16063index 4c07cca..2c8427d 100644
16064--- a/arch/x86/kernel/efi_stub_64.S
16065+++ b/arch/x86/kernel/efi_stub_64.S
16066@@ -7,6 +7,7 @@
16067 */
16068
16069 #include <linux/linkage.h>
16070+#include <asm/alternative-asm.h>
16071
16072 #define SAVE_XMM \
16073 mov %rsp, %rax; \
16074@@ -40,6 +41,7 @@ ENTRY(efi_call0)
16075 call *%rdi
16076 addq $32, %rsp
16077 RESTORE_XMM
16078+ pax_force_retaddr 0, 1
16079 ret
16080 ENDPROC(efi_call0)
16081
16082@@ -50,6 +52,7 @@ ENTRY(efi_call1)
16083 call *%rdi
16084 addq $32, %rsp
16085 RESTORE_XMM
16086+ pax_force_retaddr 0, 1
16087 ret
16088 ENDPROC(efi_call1)
16089
16090@@ -60,6 +63,7 @@ ENTRY(efi_call2)
16091 call *%rdi
16092 addq $32, %rsp
16093 RESTORE_XMM
16094+ pax_force_retaddr 0, 1
16095 ret
16096 ENDPROC(efi_call2)
16097
16098@@ -71,6 +75,7 @@ ENTRY(efi_call3)
16099 call *%rdi
16100 addq $32, %rsp
16101 RESTORE_XMM
16102+ pax_force_retaddr 0, 1
16103 ret
16104 ENDPROC(efi_call3)
16105
16106@@ -83,6 +88,7 @@ ENTRY(efi_call4)
16107 call *%rdi
16108 addq $32, %rsp
16109 RESTORE_XMM
16110+ pax_force_retaddr 0, 1
16111 ret
16112 ENDPROC(efi_call4)
16113
16114@@ -96,6 +102,7 @@ ENTRY(efi_call5)
16115 call *%rdi
16116 addq $48, %rsp
16117 RESTORE_XMM
16118+ pax_force_retaddr 0, 1
16119 ret
16120 ENDPROC(efi_call5)
16121
16122@@ -112,5 +119,6 @@ ENTRY(efi_call6)
16123 call *%rdi
16124 addq $48, %rsp
16125 RESTORE_XMM
16126+ pax_force_retaddr 0, 1
16127 ret
16128 ENDPROC(efi_call6)
16129diff --git a/arch/x86/kernel/entry_32.S b/arch/x86/kernel/entry_32.S
16130index c097e7d..c689cf4 100644
16131--- a/arch/x86/kernel/entry_32.S
16132+++ b/arch/x86/kernel/entry_32.S
16133@@ -185,13 +185,146 @@
16134 /*CFI_REL_OFFSET gs, PT_GS*/
16135 .endm
16136 .macro SET_KERNEL_GS reg
16137+
16138+#ifdef CONFIG_CC_STACKPROTECTOR
16139 movl $(__KERNEL_STACK_CANARY), \reg
16140+#elif defined(CONFIG_PAX_MEMORY_UDEREF)
16141+ movl $(__USER_DS), \reg
16142+#else
16143+ xorl \reg, \reg
16144+#endif
16145+
16146 movl \reg, %gs
16147 .endm
16148
16149 #endif /* CONFIG_X86_32_LAZY_GS */
16150
16151-.macro SAVE_ALL
16152+.macro pax_enter_kernel
16153+#ifdef CONFIG_PAX_KERNEXEC
16154+ call pax_enter_kernel
16155+#endif
16156+.endm
16157+
16158+.macro pax_exit_kernel
16159+#ifdef CONFIG_PAX_KERNEXEC
16160+ call pax_exit_kernel
16161+#endif
16162+.endm
16163+
16164+#ifdef CONFIG_PAX_KERNEXEC
16165+ENTRY(pax_enter_kernel)
16166+#ifdef CONFIG_PARAVIRT
16167+ pushl %eax
16168+ pushl %ecx
16169+ call PARA_INDIRECT(pv_cpu_ops+PV_CPU_read_cr0)
16170+ mov %eax, %esi
16171+#else
16172+ mov %cr0, %esi
16173+#endif
16174+ bts $16, %esi
16175+ jnc 1f
16176+ mov %cs, %esi
16177+ cmp $__KERNEL_CS, %esi
16178+ jz 3f
16179+ ljmp $__KERNEL_CS, $3f
16180+1: ljmp $__KERNEXEC_KERNEL_CS, $2f
16181+2:
16182+#ifdef CONFIG_PARAVIRT
16183+ mov %esi, %eax
16184+ call PARA_INDIRECT(pv_cpu_ops+PV_CPU_write_cr0)
16185+#else
16186+ mov %esi, %cr0
16187+#endif
16188+3:
16189+#ifdef CONFIG_PARAVIRT
16190+ popl %ecx
16191+ popl %eax
16192+#endif
16193+ ret
16194+ENDPROC(pax_enter_kernel)
16195+
16196+ENTRY(pax_exit_kernel)
16197+#ifdef CONFIG_PARAVIRT
16198+ pushl %eax
16199+ pushl %ecx
16200+#endif
16201+ mov %cs, %esi
16202+ cmp $__KERNEXEC_KERNEL_CS, %esi
16203+ jnz 2f
16204+#ifdef CONFIG_PARAVIRT
16205+ call PARA_INDIRECT(pv_cpu_ops+PV_CPU_read_cr0);
16206+ mov %eax, %esi
16207+#else
16208+ mov %cr0, %esi
16209+#endif
16210+ btr $16, %esi
16211+ ljmp $__KERNEL_CS, $1f
16212+1:
16213+#ifdef CONFIG_PARAVIRT
16214+ mov %esi, %eax
16215+ call PARA_INDIRECT(pv_cpu_ops+PV_CPU_write_cr0);
16216+#else
16217+ mov %esi, %cr0
16218+#endif
16219+2:
16220+#ifdef CONFIG_PARAVIRT
16221+ popl %ecx
16222+ popl %eax
16223+#endif
16224+ ret
16225+ENDPROC(pax_exit_kernel)
16226+#endif
16227+
16228+.macro pax_erase_kstack
16229+#ifdef CONFIG_PAX_MEMORY_STACKLEAK
16230+ call pax_erase_kstack
16231+#endif
16232+.endm
16233+
16234+#ifdef CONFIG_PAX_MEMORY_STACKLEAK
16235+/*
16236+ * ebp: thread_info
16237+ * ecx, edx: can be clobbered
16238+ */
16239+ENTRY(pax_erase_kstack)
16240+ pushl %edi
16241+ pushl %eax
16242+
16243+ mov TI_lowest_stack(%ebp), %edi
16244+ mov $-0xBEEF, %eax
16245+ std
16246+
16247+1: mov %edi, %ecx
16248+ and $THREAD_SIZE_asm - 1, %ecx
16249+ shr $2, %ecx
16250+ repne scasl
16251+ jecxz 2f
16252+
16253+ cmp $2*16, %ecx
16254+ jc 2f
16255+
16256+ mov $2*16, %ecx
16257+ repe scasl
16258+ jecxz 2f
16259+ jne 1b
16260+
16261+2: cld
16262+ mov %esp, %ecx
16263+ sub %edi, %ecx
16264+ shr $2, %ecx
16265+ rep stosl
16266+
16267+ mov TI_task_thread_sp0(%ebp), %edi
16268+ sub $128, %edi
16269+ mov %edi, TI_lowest_stack(%ebp)
16270+
16271+ popl %eax
16272+ popl %edi
16273+ ret
16274+ENDPROC(pax_erase_kstack)
16275+#endif
16276+
16277+.macro __SAVE_ALL _DS
16278 cld
16279 PUSH_GS
16280 pushl %fs
16281@@ -224,7 +357,7 @@
16282 pushl %ebx
16283 CFI_ADJUST_CFA_OFFSET 4
16284 CFI_REL_OFFSET ebx, 0
16285- movl $(__USER_DS), %edx
16286+ movl $\_DS, %edx
16287 movl %edx, %ds
16288 movl %edx, %es
16289 movl $(__KERNEL_PERCPU), %edx
16290@@ -232,6 +365,15 @@
16291 SET_KERNEL_GS %edx
16292 .endm
16293
16294+.macro SAVE_ALL
16295+#if defined(CONFIG_PAX_KERNEXEC) || defined(CONFIG_PAX_PAGEEXEC) || defined(CONFIG_PAX_SEGMEXEC) || defined(CONFIG_PAX_MEMORY_UDEREF)
16296+ __SAVE_ALL __KERNEL_DS
16297+ pax_enter_kernel
16298+#else
16299+ __SAVE_ALL __USER_DS
16300+#endif
16301+.endm
16302+
16303 .macro RESTORE_INT_REGS
16304 popl %ebx
16305 CFI_ADJUST_CFA_OFFSET -4
16306@@ -331,7 +473,7 @@ ENTRY(ret_from_fork)
16307 CFI_ADJUST_CFA_OFFSET -4
16308 jmp syscall_exit
16309 CFI_ENDPROC
16310-END(ret_from_fork)
16311+ENDPROC(ret_from_fork)
16312
16313 /*
16314 * Return to user mode is not as complex as all this looks,
16315@@ -352,7 +494,15 @@ check_userspace:
16316 movb PT_CS(%esp), %al
16317 andl $(X86_EFLAGS_VM | SEGMENT_RPL_MASK), %eax
16318 cmpl $USER_RPL, %eax
16319+
16320+#ifdef CONFIG_PAX_KERNEXEC
16321+ jae resume_userspace
16322+
16323+ PAX_EXIT_KERNEL
16324+ jmp resume_kernel
16325+#else
16326 jb resume_kernel # not returning to v8086 or userspace
16327+#endif
16328
16329 ENTRY(resume_userspace)
16330 LOCKDEP_SYS_EXIT
16331@@ -364,8 +514,8 @@ ENTRY(resume_userspace)
16332 andl $_TIF_WORK_MASK, %ecx # is there any work to be done on
16333 # int/exception return?
16334 jne work_pending
16335- jmp restore_all
16336-END(ret_from_exception)
16337+ jmp restore_all_pax
16338+ENDPROC(ret_from_exception)
16339
16340 #ifdef CONFIG_PREEMPT
16341 ENTRY(resume_kernel)
16342@@ -380,7 +530,7 @@ need_resched:
16343 jz restore_all
16344 call preempt_schedule_irq
16345 jmp need_resched
16346-END(resume_kernel)
16347+ENDPROC(resume_kernel)
16348 #endif
16349 CFI_ENDPROC
16350
16351@@ -414,25 +564,36 @@ sysenter_past_esp:
16352 /*CFI_REL_OFFSET cs, 0*/
16353 /*
16354 * Push current_thread_info()->sysenter_return to the stack.
16355- * A tiny bit of offset fixup is necessary - 4*4 means the 4 words
16356- * pushed above; +8 corresponds to copy_thread's esp0 setting.
16357 */
16358- pushl (TI_sysenter_return-THREAD_SIZE+8+4*4)(%esp)
16359+ pushl $0
16360 CFI_ADJUST_CFA_OFFSET 4
16361 CFI_REL_OFFSET eip, 0
16362
16363 pushl %eax
16364 CFI_ADJUST_CFA_OFFSET 4
16365 SAVE_ALL
16366+ GET_THREAD_INFO(%ebp)
16367+ movl TI_sysenter_return(%ebp),%ebp
16368+ movl %ebp,PT_EIP(%esp)
16369 ENABLE_INTERRUPTS(CLBR_NONE)
16370
16371 /*
16372 * Load the potential sixth argument from user stack.
16373 * Careful about security.
16374 */
16375+ movl PT_OLDESP(%esp),%ebp
16376+
16377+#ifdef CONFIG_PAX_MEMORY_UDEREF
16378+ mov PT_OLDSS(%esp),%ds
16379+1: movl %ds:(%ebp),%ebp
16380+ push %ss
16381+ pop %ds
16382+#else
16383 cmpl $__PAGE_OFFSET-3,%ebp
16384 jae syscall_fault
16385 1: movl (%ebp),%ebp
16386+#endif
16387+
16388 movl %ebp,PT_EBP(%esp)
16389 .section __ex_table,"a"
16390 .align 4
16391@@ -455,12 +616,24 @@ sysenter_do_call:
16392 testl $_TIF_ALLWORK_MASK, %ecx
16393 jne sysexit_audit
16394 sysenter_exit:
16395+
16396+#ifdef CONFIG_PAX_RANDKSTACK
16397+ pushl_cfi %eax
16398+ movl %esp, %eax
16399+ call pax_randomize_kstack
16400+ popl_cfi %eax
16401+#endif
16402+
16403+ pax_erase_kstack
16404+
16405 /* if something modifies registers it must also disable sysexit */
16406 movl PT_EIP(%esp), %edx
16407 movl PT_OLDESP(%esp), %ecx
16408 xorl %ebp,%ebp
16409 TRACE_IRQS_ON
16410 1: mov PT_FS(%esp), %fs
16411+2: mov PT_DS(%esp), %ds
16412+3: mov PT_ES(%esp), %es
16413 PTGS_TO_GS
16414 ENABLE_INTERRUPTS_SYSEXIT
16415
16416@@ -477,6 +650,9 @@ sysenter_audit:
16417 movl %eax,%edx /* 2nd arg: syscall number */
16418 movl $AUDIT_ARCH_I386,%eax /* 1st arg: audit arch */
16419 call audit_syscall_entry
16420+
16421+ pax_erase_kstack
16422+
16423 pushl %ebx
16424 CFI_ADJUST_CFA_OFFSET 4
16425 movl PT_EAX(%esp),%eax /* reload syscall number */
16426@@ -504,11 +680,17 @@ sysexit_audit:
16427
16428 CFI_ENDPROC
16429 .pushsection .fixup,"ax"
16430-2: movl $0,PT_FS(%esp)
16431+4: movl $0,PT_FS(%esp)
16432+ jmp 1b
16433+5: movl $0,PT_DS(%esp)
16434+ jmp 1b
16435+6: movl $0,PT_ES(%esp)
16436 jmp 1b
16437 .section __ex_table,"a"
16438 .align 4
16439- .long 1b,2b
16440+ .long 1b,4b
16441+ .long 2b,5b
16442+ .long 3b,6b
16443 .popsection
16444 PTGS_TO_GS_EX
16445 ENDPROC(ia32_sysenter_target)
16446@@ -538,6 +720,15 @@ syscall_exit:
16447 testl $_TIF_ALLWORK_MASK, %ecx # current->work
16448 jne syscall_exit_work
16449
16450+restore_all_pax:
16451+
16452+#ifdef CONFIG_PAX_RANDKSTACK
16453+ movl %esp, %eax
16454+ call pax_randomize_kstack
16455+#endif
16456+
16457+ pax_erase_kstack
16458+
16459 restore_all:
16460 TRACE_IRQS_IRET
16461 restore_all_notrace:
16462@@ -602,10 +793,29 @@ ldt_ss:
16463 mov PT_OLDESP(%esp), %eax /* load userspace esp */
16464 mov %dx, %ax /* eax: new kernel esp */
16465 sub %eax, %edx /* offset (low word is 0) */
16466- PER_CPU(gdt_page, %ebx)
16467+#ifdef CONFIG_SMP
16468+ movl PER_CPU_VAR(cpu_number), %ebx
16469+ shll $PAGE_SHIFT_asm, %ebx
16470+ addl $cpu_gdt_table, %ebx
16471+#else
16472+ movl $cpu_gdt_table, %ebx
16473+#endif
16474 shr $16, %edx
16475+
16476+#ifdef CONFIG_PAX_KERNEXEC
16477+ mov %cr0, %esi
16478+ btr $16, %esi
16479+ mov %esi, %cr0
16480+#endif
16481+
16482 mov %dl, GDT_ENTRY_ESPFIX_SS * 8 + 4(%ebx) /* bits 16..23 */
16483 mov %dh, GDT_ENTRY_ESPFIX_SS * 8 + 7(%ebx) /* bits 24..31 */
16484+
16485+#ifdef CONFIG_PAX_KERNEXEC
16486+ bts $16, %esi
16487+ mov %esi, %cr0
16488+#endif
16489+
16490 pushl $__ESPFIX_SS
16491 CFI_ADJUST_CFA_OFFSET 4
16492 push %eax /* new kernel esp */
16493@@ -636,36 +846,30 @@ work_resched:
16494 movl TI_flags(%ebp), %ecx
16495 andl $_TIF_WORK_MASK, %ecx # is there any work to be done other
16496 # than syscall tracing?
16497- jz restore_all
16498+ jz restore_all_pax
16499 testb $_TIF_NEED_RESCHED, %cl
16500 jnz work_resched
16501
16502 work_notifysig: # deal with pending signals and
16503 # notify-resume requests
16504+ movl %esp, %eax
16505 #ifdef CONFIG_VM86
16506 testl $X86_EFLAGS_VM, PT_EFLAGS(%esp)
16507- movl %esp, %eax
16508- jne work_notifysig_v86 # returning to kernel-space or
16509+ jz 1f # returning to kernel-space or
16510 # vm86-space
16511- xorl %edx, %edx
16512- call do_notify_resume
16513- jmp resume_userspace_sig
16514
16515- ALIGN
16516-work_notifysig_v86:
16517 pushl %ecx # save ti_flags for do_notify_resume
16518 CFI_ADJUST_CFA_OFFSET 4
16519 call save_v86_state # %eax contains pt_regs pointer
16520 popl %ecx
16521 CFI_ADJUST_CFA_OFFSET -4
16522 movl %eax, %esp
16523-#else
16524- movl %esp, %eax
16525+1:
16526 #endif
16527 xorl %edx, %edx
16528 call do_notify_resume
16529 jmp resume_userspace_sig
16530-END(work_pending)
16531+ENDPROC(work_pending)
16532
16533 # perform syscall exit tracing
16534 ALIGN
16535@@ -673,11 +877,14 @@ syscall_trace_entry:
16536 movl $-ENOSYS,PT_EAX(%esp)
16537 movl %esp, %eax
16538 call syscall_trace_enter
16539+
16540+ pax_erase_kstack
16541+
16542 /* What it returned is what we'll actually use. */
16543 cmpl $(nr_syscalls), %eax
16544 jnae syscall_call
16545 jmp syscall_exit
16546-END(syscall_trace_entry)
16547+ENDPROC(syscall_trace_entry)
16548
16549 # perform syscall exit tracing
16550 ALIGN
16551@@ -690,20 +897,24 @@ syscall_exit_work:
16552 movl %esp, %eax
16553 call syscall_trace_leave
16554 jmp resume_userspace
16555-END(syscall_exit_work)
16556+ENDPROC(syscall_exit_work)
16557 CFI_ENDPROC
16558
16559 RING0_INT_FRAME # can't unwind into user space anyway
16560 syscall_fault:
16561+#ifdef CONFIG_PAX_MEMORY_UDEREF
16562+ push %ss
16563+ pop %ds
16564+#endif
16565 GET_THREAD_INFO(%ebp)
16566 movl $-EFAULT,PT_EAX(%esp)
16567 jmp resume_userspace
16568-END(syscall_fault)
16569+ENDPROC(syscall_fault)
16570
16571 syscall_badsys:
16572 movl $-ENOSYS,PT_EAX(%esp)
16573 jmp resume_userspace
16574-END(syscall_badsys)
16575+ENDPROC(syscall_badsys)
16576 CFI_ENDPROC
16577
16578 /*
16579@@ -726,6 +937,33 @@ PTREGSCALL(rt_sigreturn)
16580 PTREGSCALL(vm86)
16581 PTREGSCALL(vm86old)
16582
16583+ ALIGN;
16584+ENTRY(kernel_execve)
16585+ push %ebp
16586+ sub $PT_OLDSS+4,%esp
16587+ push %edi
16588+ push %ecx
16589+ push %eax
16590+ lea 3*4(%esp),%edi
16591+ mov $PT_OLDSS/4+1,%ecx
16592+ xorl %eax,%eax
16593+ rep stosl
16594+ pop %eax
16595+ pop %ecx
16596+ pop %edi
16597+ movl $X86_EFLAGS_IF,PT_EFLAGS(%esp)
16598+ mov %eax,PT_EBX(%esp)
16599+ mov %edx,PT_ECX(%esp)
16600+ mov %ecx,PT_EDX(%esp)
16601+ mov %esp,%eax
16602+ call sys_execve
16603+ GET_THREAD_INFO(%ebp)
16604+ test %eax,%eax
16605+ jz syscall_exit
16606+ add $PT_OLDSS+4,%esp
16607+ pop %ebp
16608+ ret
16609+
16610 .macro FIXUP_ESPFIX_STACK
16611 /*
16612 * Switch back for ESPFIX stack to the normal zerobased stack
16613@@ -735,7 +973,13 @@ PTREGSCALL(vm86old)
16614 * normal stack and adjusts ESP with the matching offset.
16615 */
16616 /* fixup the stack */
16617- PER_CPU(gdt_page, %ebx)
16618+#ifdef CONFIG_SMP
16619+ movl PER_CPU_VAR(cpu_number), %ebx
16620+ shll $PAGE_SHIFT_asm, %ebx
16621+ addl $cpu_gdt_table, %ebx
16622+#else
16623+ movl $cpu_gdt_table, %ebx
16624+#endif
16625 mov GDT_ENTRY_ESPFIX_SS * 8 + 4(%ebx), %al /* bits 16..23 */
16626 mov GDT_ENTRY_ESPFIX_SS * 8 + 7(%ebx), %ah /* bits 24..31 */
16627 shl $16, %eax
16628@@ -793,7 +1037,7 @@ vector=vector+1
16629 .endr
16630 2: jmp common_interrupt
16631 .endr
16632-END(irq_entries_start)
16633+ENDPROC(irq_entries_start)
16634
16635 .previous
16636 END(interrupt)
16637@@ -840,7 +1084,7 @@ ENTRY(coprocessor_error)
16638 CFI_ADJUST_CFA_OFFSET 4
16639 jmp error_code
16640 CFI_ENDPROC
16641-END(coprocessor_error)
16642+ENDPROC(coprocessor_error)
16643
16644 ENTRY(simd_coprocessor_error)
16645 RING0_INT_FRAME
16646@@ -850,7 +1094,7 @@ ENTRY(simd_coprocessor_error)
16647 CFI_ADJUST_CFA_OFFSET 4
16648 jmp error_code
16649 CFI_ENDPROC
16650-END(simd_coprocessor_error)
16651+ENDPROC(simd_coprocessor_error)
16652
16653 ENTRY(device_not_available)
16654 RING0_INT_FRAME
16655@@ -860,7 +1104,7 @@ ENTRY(device_not_available)
16656 CFI_ADJUST_CFA_OFFSET 4
16657 jmp error_code
16658 CFI_ENDPROC
16659-END(device_not_available)
16660+ENDPROC(device_not_available)
16661
16662 #ifdef CONFIG_PARAVIRT
16663 ENTRY(native_iret)
16664@@ -869,12 +1113,12 @@ ENTRY(native_iret)
16665 .align 4
16666 .long native_iret, iret_exc
16667 .previous
16668-END(native_iret)
16669+ENDPROC(native_iret)
16670
16671 ENTRY(native_irq_enable_sysexit)
16672 sti
16673 sysexit
16674-END(native_irq_enable_sysexit)
16675+ENDPROC(native_irq_enable_sysexit)
16676 #endif
16677
16678 ENTRY(overflow)
16679@@ -885,7 +1129,7 @@ ENTRY(overflow)
16680 CFI_ADJUST_CFA_OFFSET 4
16681 jmp error_code
16682 CFI_ENDPROC
16683-END(overflow)
16684+ENDPROC(overflow)
16685
16686 ENTRY(bounds)
16687 RING0_INT_FRAME
16688@@ -895,7 +1139,7 @@ ENTRY(bounds)
16689 CFI_ADJUST_CFA_OFFSET 4
16690 jmp error_code
16691 CFI_ENDPROC
16692-END(bounds)
16693+ENDPROC(bounds)
16694
16695 ENTRY(invalid_op)
16696 RING0_INT_FRAME
16697@@ -905,7 +1149,7 @@ ENTRY(invalid_op)
16698 CFI_ADJUST_CFA_OFFSET 4
16699 jmp error_code
16700 CFI_ENDPROC
16701-END(invalid_op)
16702+ENDPROC(invalid_op)
16703
16704 ENTRY(coprocessor_segment_overrun)
16705 RING0_INT_FRAME
16706@@ -915,7 +1159,7 @@ ENTRY(coprocessor_segment_overrun)
16707 CFI_ADJUST_CFA_OFFSET 4
16708 jmp error_code
16709 CFI_ENDPROC
16710-END(coprocessor_segment_overrun)
16711+ENDPROC(coprocessor_segment_overrun)
16712
16713 ENTRY(invalid_TSS)
16714 RING0_EC_FRAME
16715@@ -923,7 +1167,7 @@ ENTRY(invalid_TSS)
16716 CFI_ADJUST_CFA_OFFSET 4
16717 jmp error_code
16718 CFI_ENDPROC
16719-END(invalid_TSS)
16720+ENDPROC(invalid_TSS)
16721
16722 ENTRY(segment_not_present)
16723 RING0_EC_FRAME
16724@@ -931,7 +1175,7 @@ ENTRY(segment_not_present)
16725 CFI_ADJUST_CFA_OFFSET 4
16726 jmp error_code
16727 CFI_ENDPROC
16728-END(segment_not_present)
16729+ENDPROC(segment_not_present)
16730
16731 ENTRY(stack_segment)
16732 RING0_EC_FRAME
16733@@ -939,7 +1183,7 @@ ENTRY(stack_segment)
16734 CFI_ADJUST_CFA_OFFSET 4
16735 jmp error_code
16736 CFI_ENDPROC
16737-END(stack_segment)
16738+ENDPROC(stack_segment)
16739
16740 ENTRY(alignment_check)
16741 RING0_EC_FRAME
16742@@ -947,7 +1191,7 @@ ENTRY(alignment_check)
16743 CFI_ADJUST_CFA_OFFSET 4
16744 jmp error_code
16745 CFI_ENDPROC
16746-END(alignment_check)
16747+ENDPROC(alignment_check)
16748
16749 ENTRY(divide_error)
16750 RING0_INT_FRAME
16751@@ -957,7 +1201,7 @@ ENTRY(divide_error)
16752 CFI_ADJUST_CFA_OFFSET 4
16753 jmp error_code
16754 CFI_ENDPROC
16755-END(divide_error)
16756+ENDPROC(divide_error)
16757
16758 #ifdef CONFIG_X86_MCE
16759 ENTRY(machine_check)
16760@@ -968,7 +1212,7 @@ ENTRY(machine_check)
16761 CFI_ADJUST_CFA_OFFSET 4
16762 jmp error_code
16763 CFI_ENDPROC
16764-END(machine_check)
16765+ENDPROC(machine_check)
16766 #endif
16767
16768 ENTRY(spurious_interrupt_bug)
16769@@ -979,7 +1223,7 @@ ENTRY(spurious_interrupt_bug)
16770 CFI_ADJUST_CFA_OFFSET 4
16771 jmp error_code
16772 CFI_ENDPROC
16773-END(spurious_interrupt_bug)
16774+ENDPROC(spurious_interrupt_bug)
16775
16776 ENTRY(kernel_thread_helper)
16777 pushl $0 # fake return address for unwinder
16778@@ -1095,7 +1339,7 @@ ENDPROC(xen_failsafe_callback)
16779
16780 ENTRY(mcount)
16781 ret
16782-END(mcount)
16783+ENDPROC(mcount)
16784
16785 ENTRY(ftrace_caller)
16786 cmpl $0, function_trace_stop
16787@@ -1124,7 +1368,7 @@ ftrace_graph_call:
16788 .globl ftrace_stub
16789 ftrace_stub:
16790 ret
16791-END(ftrace_caller)
16792+ENDPROC(ftrace_caller)
16793
16794 #else /* ! CONFIG_DYNAMIC_FTRACE */
16795
16796@@ -1160,7 +1404,7 @@ trace:
16797 popl %ecx
16798 popl %eax
16799 jmp ftrace_stub
16800-END(mcount)
16801+ENDPROC(mcount)
16802 #endif /* CONFIG_DYNAMIC_FTRACE */
16803 #endif /* CONFIG_FUNCTION_TRACER */
16804
16805@@ -1181,7 +1425,7 @@ ENTRY(ftrace_graph_caller)
16806 popl %ecx
16807 popl %eax
16808 ret
16809-END(ftrace_graph_caller)
16810+ENDPROC(ftrace_graph_caller)
16811
16812 .globl return_to_handler
16813 return_to_handler:
16814@@ -1198,7 +1442,6 @@ return_to_handler:
16815 ret
16816 #endif
16817
16818-.section .rodata,"a"
16819 #include "syscall_table_32.S"
16820
16821 syscall_table_size=(.-sys_call_table)
16822@@ -1255,15 +1498,18 @@ error_code:
16823 movl $-1, PT_ORIG_EAX(%esp) # no syscall to restart
16824 REG_TO_PTGS %ecx
16825 SET_KERNEL_GS %ecx
16826- movl $(__USER_DS), %ecx
16827+ movl $(__KERNEL_DS), %ecx
16828 movl %ecx, %ds
16829 movl %ecx, %es
16830+
16831+ pax_enter_kernel
16832+
16833 TRACE_IRQS_OFF
16834 movl %esp,%eax # pt_regs pointer
16835 call *%edi
16836 jmp ret_from_exception
16837 CFI_ENDPROC
16838-END(page_fault)
16839+ENDPROC(page_fault)
16840
16841 /*
16842 * Debug traps and NMI can happen at the one SYSENTER instruction
16843@@ -1309,7 +1555,7 @@ debug_stack_correct:
16844 call do_debug
16845 jmp ret_from_exception
16846 CFI_ENDPROC
16847-END(debug)
16848+ENDPROC(debug)
16849
16850 /*
16851 * NMI is doubly nasty. It can happen _while_ we're handling
16852@@ -1351,6 +1597,9 @@ nmi_stack_correct:
16853 xorl %edx,%edx # zero error code
16854 movl %esp,%eax # pt_regs pointer
16855 call do_nmi
16856+
16857+ pax_exit_kernel
16858+
16859 jmp restore_all_notrace
16860 CFI_ENDPROC
16861
16862@@ -1391,12 +1640,15 @@ nmi_espfix_stack:
16863 FIXUP_ESPFIX_STACK # %eax == %esp
16864 xorl %edx,%edx # zero error code
16865 call do_nmi
16866+
16867+ pax_exit_kernel
16868+
16869 RESTORE_REGS
16870 lss 12+4(%esp), %esp # back to espfix stack
16871 CFI_ADJUST_CFA_OFFSET -24
16872 jmp irq_return
16873 CFI_ENDPROC
16874-END(nmi)
16875+ENDPROC(nmi)
16876
16877 ENTRY(int3)
16878 RING0_INT_FRAME
16879@@ -1409,7 +1661,7 @@ ENTRY(int3)
16880 call do_int3
16881 jmp ret_from_exception
16882 CFI_ENDPROC
16883-END(int3)
16884+ENDPROC(int3)
16885
16886 ENTRY(general_protection)
16887 RING0_EC_FRAME
16888@@ -1417,7 +1669,7 @@ ENTRY(general_protection)
16889 CFI_ADJUST_CFA_OFFSET 4
16890 jmp error_code
16891 CFI_ENDPROC
16892-END(general_protection)
16893+ENDPROC(general_protection)
16894
16895 /*
16896 * End of kprobes section
16897diff --git a/arch/x86/kernel/entry_64.S b/arch/x86/kernel/entry_64.S
16898index 34a56a9..74613c5 100644
16899--- a/arch/x86/kernel/entry_64.S
16900+++ b/arch/x86/kernel/entry_64.S
16901@@ -53,6 +53,8 @@
16902 #include <asm/paravirt.h>
16903 #include <asm/ftrace.h>
16904 #include <asm/percpu.h>
16905+#include <asm/pgtable.h>
16906+#include <asm/alternative-asm.h>
16907
16908 /* Avoid __ASSEMBLER__'ifying <linux/audit.h> just for this. */
16909 #include <linux/elf-em.h>
16910@@ -64,8 +66,9 @@
16911 #ifdef CONFIG_FUNCTION_TRACER
16912 #ifdef CONFIG_DYNAMIC_FTRACE
16913 ENTRY(mcount)
16914+ pax_force_retaddr
16915 retq
16916-END(mcount)
16917+ENDPROC(mcount)
16918
16919 ENTRY(ftrace_caller)
16920 cmpl $0, function_trace_stop
16921@@ -88,8 +91,9 @@ GLOBAL(ftrace_graph_call)
16922 #endif
16923
16924 GLOBAL(ftrace_stub)
16925+ pax_force_retaddr
16926 retq
16927-END(ftrace_caller)
16928+ENDPROC(ftrace_caller)
16929
16930 #else /* ! CONFIG_DYNAMIC_FTRACE */
16931 ENTRY(mcount)
16932@@ -108,6 +112,7 @@ ENTRY(mcount)
16933 #endif
16934
16935 GLOBAL(ftrace_stub)
16936+ pax_force_retaddr
16937 retq
16938
16939 trace:
16940@@ -117,12 +122,13 @@ trace:
16941 movq 8(%rbp), %rsi
16942 subq $MCOUNT_INSN_SIZE, %rdi
16943
16944+ pax_force_fptr ftrace_trace_function
16945 call *ftrace_trace_function
16946
16947 MCOUNT_RESTORE_FRAME
16948
16949 jmp ftrace_stub
16950-END(mcount)
16951+ENDPROC(mcount)
16952 #endif /* CONFIG_DYNAMIC_FTRACE */
16953 #endif /* CONFIG_FUNCTION_TRACER */
16954
16955@@ -142,8 +148,9 @@ ENTRY(ftrace_graph_caller)
16956
16957 MCOUNT_RESTORE_FRAME
16958
16959+ pax_force_retaddr
16960 retq
16961-END(ftrace_graph_caller)
16962+ENDPROC(ftrace_graph_caller)
16963
16964 GLOBAL(return_to_handler)
16965 subq $24, %rsp
16966@@ -159,6 +166,7 @@ GLOBAL(return_to_handler)
16967 movq 8(%rsp), %rdx
16968 movq (%rsp), %rax
16969 addq $16, %rsp
16970+ pax_force_retaddr
16971 retq
16972 #endif
16973
16974@@ -174,6 +182,282 @@ ENTRY(native_usergs_sysret64)
16975 ENDPROC(native_usergs_sysret64)
16976 #endif /* CONFIG_PARAVIRT */
16977
16978+ .macro ljmpq sel, off
16979+#if defined(CONFIG_MPSC) || defined(CONFIG_MCORE2) || defined (CONFIG_MATOM)
16980+ .byte 0x48; ljmp *1234f(%rip)
16981+ .pushsection .rodata
16982+ .align 16
16983+ 1234: .quad \off; .word \sel
16984+ .popsection
16985+#else
16986+ pushq $\sel
16987+ pushq $\off
16988+ lretq
16989+#endif
16990+ .endm
16991+
16992+ .macro pax_enter_kernel
16993+ pax_set_fptr_mask
16994+#ifdef CONFIG_PAX_KERNEXEC
16995+ call pax_enter_kernel
16996+#endif
16997+ .endm
16998+
16999+ .macro pax_exit_kernel
17000+#ifdef CONFIG_PAX_KERNEXEC
17001+ call pax_exit_kernel
17002+#endif
17003+ .endm
17004+
17005+#ifdef CONFIG_PAX_KERNEXEC
17006+ENTRY(pax_enter_kernel)
17007+ pushq %rdi
17008+
17009+#ifdef CONFIG_PARAVIRT
17010+ PV_SAVE_REGS(CLBR_RDI)
17011+#endif
17012+
17013+ GET_CR0_INTO_RDI
17014+ bts $16,%rdi
17015+ jnc 3f
17016+ mov %cs,%edi
17017+ cmp $__KERNEL_CS,%edi
17018+ jnz 2f
17019+1:
17020+
17021+#ifdef CONFIG_PARAVIRT
17022+ PV_RESTORE_REGS(CLBR_RDI)
17023+#endif
17024+
17025+ popq %rdi
17026+ pax_force_retaddr
17027+ retq
17028+
17029+2: ljmpq __KERNEL_CS,1f
17030+3: ljmpq __KERNEXEC_KERNEL_CS,4f
17031+4: SET_RDI_INTO_CR0
17032+ jmp 1b
17033+ENDPROC(pax_enter_kernel)
17034+
17035+ENTRY(pax_exit_kernel)
17036+ pushq %rdi
17037+
17038+#ifdef CONFIG_PARAVIRT
17039+ PV_SAVE_REGS(CLBR_RDI)
17040+#endif
17041+
17042+ mov %cs,%rdi
17043+ cmp $__KERNEXEC_KERNEL_CS,%edi
17044+ jz 2f
17045+1:
17046+
17047+#ifdef CONFIG_PARAVIRT
17048+ PV_RESTORE_REGS(CLBR_RDI);
17049+#endif
17050+
17051+ popq %rdi
17052+ pax_force_retaddr
17053+ retq
17054+
17055+2: GET_CR0_INTO_RDI
17056+ btr $16,%rdi
17057+ ljmpq __KERNEL_CS,3f
17058+3: SET_RDI_INTO_CR0
17059+ jmp 1b
17060+#ifdef CONFIG_PARAVIRT
17061+ PV_RESTORE_REGS(CLBR_RDI);
17062+#endif
17063+
17064+ popq %rdi
17065+ pax_force_retaddr
17066+ retq
17067+ENDPROC(pax_exit_kernel)
17068+#endif
17069+
17070+ .macro pax_enter_kernel_user
17071+ pax_set_fptr_mask
17072+#ifdef CONFIG_PAX_MEMORY_UDEREF
17073+ call pax_enter_kernel_user
17074+#endif
17075+ .endm
17076+
17077+ .macro pax_exit_kernel_user
17078+#ifdef CONFIG_PAX_MEMORY_UDEREF
17079+ call pax_exit_kernel_user
17080+#endif
17081+#ifdef CONFIG_PAX_RANDKSTACK
17082+ pushq %rax
17083+ call pax_randomize_kstack
17084+ popq %rax
17085+#endif
17086+ .endm
17087+
17088+#ifdef CONFIG_PAX_MEMORY_UDEREF
17089+ENTRY(pax_enter_kernel_user)
17090+ pushq %rdi
17091+ pushq %rbx
17092+
17093+#ifdef CONFIG_PARAVIRT
17094+ PV_SAVE_REGS(CLBR_RDI)
17095+#endif
17096+
17097+ GET_CR3_INTO_RDI
17098+ mov %rdi,%rbx
17099+ add $__START_KERNEL_map,%rbx
17100+ sub phys_base(%rip),%rbx
17101+
17102+#ifdef CONFIG_PARAVIRT
17103+ pushq %rdi
17104+ cmpl $0, pv_info+PARAVIRT_enabled
17105+ jz 1f
17106+ i = 0
17107+ .rept USER_PGD_PTRS
17108+ mov i*8(%rbx),%rsi
17109+ mov $0,%sil
17110+ lea i*8(%rbx),%rdi
17111+ call PARA_INDIRECT(pv_mmu_ops+PV_MMU_set_pgd_batched)
17112+ i = i + 1
17113+ .endr
17114+ jmp 2f
17115+1:
17116+#endif
17117+
17118+ i = 0
17119+ .rept USER_PGD_PTRS
17120+ movb $0,i*8(%rbx)
17121+ i = i + 1
17122+ .endr
17123+
17124+#ifdef CONFIG_PARAVIRT
17125+2: popq %rdi
17126+#endif
17127+ SET_RDI_INTO_CR3
17128+
17129+#ifdef CONFIG_PAX_KERNEXEC
17130+ GET_CR0_INTO_RDI
17131+ bts $16,%rdi
17132+ SET_RDI_INTO_CR0
17133+#endif
17134+
17135+#ifdef CONFIG_PARAVIRT
17136+ PV_RESTORE_REGS(CLBR_RDI)
17137+#endif
17138+
17139+ popq %rbx
17140+ popq %rdi
17141+ pax_force_retaddr
17142+ retq
17143+ENDPROC(pax_enter_kernel_user)
17144+
17145+ENTRY(pax_exit_kernel_user)
17146+ push %rdi
17147+
17148+#ifdef CONFIG_PARAVIRT
17149+ pushq %rbx
17150+ PV_SAVE_REGS(CLBR_RDI)
17151+#endif
17152+
17153+#ifdef CONFIG_PAX_KERNEXEC
17154+ GET_CR0_INTO_RDI
17155+ btr $16,%rdi
17156+ SET_RDI_INTO_CR0
17157+#endif
17158+
17159+ GET_CR3_INTO_RDI
17160+ add $__START_KERNEL_map,%rdi
17161+ sub phys_base(%rip),%rdi
17162+
17163+#ifdef CONFIG_PARAVIRT
17164+ cmpl $0, pv_info+PARAVIRT_enabled
17165+ jz 1f
17166+ mov %rdi,%rbx
17167+ i = 0
17168+ .rept USER_PGD_PTRS
17169+ mov i*8(%rbx),%rsi
17170+ mov $0x67,%sil
17171+ lea i*8(%rbx),%rdi
17172+ call PARA_INDIRECT(pv_mmu_ops+PV_MMU_set_pgd_batched)
17173+ i = i + 1
17174+ .endr
17175+ jmp 2f
17176+1:
17177+#endif
17178+
17179+ i = 0
17180+ .rept USER_PGD_PTRS
17181+ movb $0x67,i*8(%rdi)
17182+ i = i + 1
17183+ .endr
17184+
17185+#ifdef CONFIG_PARAVIRT
17186+2: PV_RESTORE_REGS(CLBR_RDI)
17187+ popq %rbx
17188+#endif
17189+
17190+ popq %rdi
17191+ pax_force_retaddr
17192+ retq
17193+ENDPROC(pax_exit_kernel_user)
17194+#endif
17195+
17196+.macro pax_erase_kstack
17197+#ifdef CONFIG_PAX_MEMORY_STACKLEAK
17198+ call pax_erase_kstack
17199+#endif
17200+.endm
17201+
17202+#ifdef CONFIG_PAX_MEMORY_STACKLEAK
17203+/*
17204+ * r11: thread_info
17205+ * rcx, rdx: can be clobbered
17206+ */
17207+ENTRY(pax_erase_kstack)
17208+ pushq %rdi
17209+ pushq %rax
17210+ pushq %r11
17211+
17212+ GET_THREAD_INFO(%r11)
17213+ mov TI_lowest_stack(%r11), %rdi
17214+ mov $-0xBEEF, %rax
17215+ std
17216+
17217+1: mov %edi, %ecx
17218+ and $THREAD_SIZE_asm - 1, %ecx
17219+ shr $3, %ecx
17220+ repne scasq
17221+ jecxz 2f
17222+
17223+ cmp $2*8, %ecx
17224+ jc 2f
17225+
17226+ mov $2*8, %ecx
17227+ repe scasq
17228+ jecxz 2f
17229+ jne 1b
17230+
17231+2: cld
17232+ mov %esp, %ecx
17233+ sub %edi, %ecx
17234+
17235+ cmp $THREAD_SIZE_asm, %rcx
17236+ jb 3f
17237+ ud2
17238+3:
17239+
17240+ shr $3, %ecx
17241+ rep stosq
17242+
17243+ mov TI_task_thread_sp0(%r11), %rdi
17244+ sub $256, %rdi
17245+ mov %rdi, TI_lowest_stack(%r11)
17246+
17247+ popq %r11
17248+ popq %rax
17249+ popq %rdi
17250+ pax_force_retaddr
17251+ ret
17252+ENDPROC(pax_erase_kstack)
17253+#endif
17254
17255 .macro TRACE_IRQS_IRETQ offset=ARGOFFSET
17256 #ifdef CONFIG_TRACE_IRQFLAGS
17257@@ -233,8 +517,8 @@ ENDPROC(native_usergs_sysret64)
17258 .endm
17259
17260 .macro UNFAKE_STACK_FRAME
17261- addq $8*6, %rsp
17262- CFI_ADJUST_CFA_OFFSET -(6*8)
17263+ addq $8*6 + ARG_SKIP, %rsp
17264+ CFI_ADJUST_CFA_OFFSET -(6*8 + ARG_SKIP)
17265 .endm
17266
17267 /*
17268@@ -317,7 +601,7 @@ ENTRY(save_args)
17269 leaq -ARGOFFSET+16(%rsp),%rdi /* arg1 for handler */
17270 movq_cfi rbp, 8 /* push %rbp */
17271 leaq 8(%rsp), %rbp /* mov %rsp, %ebp */
17272- testl $3, CS(%rdi)
17273+ testb $3, CS(%rdi)
17274 je 1f
17275 SWAPGS
17276 /*
17277@@ -337,9 +621,10 @@ ENTRY(save_args)
17278 * We entered an interrupt context - irqs are off:
17279 */
17280 2: TRACE_IRQS_OFF
17281+ pax_force_retaddr_bts
17282 ret
17283 CFI_ENDPROC
17284-END(save_args)
17285+ENDPROC(save_args)
17286
17287 ENTRY(save_rest)
17288 PARTIAL_FRAME 1 REST_SKIP+8
17289@@ -352,9 +637,10 @@ ENTRY(save_rest)
17290 movq_cfi r15, R15+16
17291 movq %r11, 8(%rsp) /* return address */
17292 FIXUP_TOP_OF_STACK %r11, 16
17293+ pax_force_retaddr
17294 ret
17295 CFI_ENDPROC
17296-END(save_rest)
17297+ENDPROC(save_rest)
17298
17299 /* save complete stack frame */
17300 .pushsection .kprobes.text, "ax"
17301@@ -383,9 +669,10 @@ ENTRY(save_paranoid)
17302 js 1f /* negative -> in kernel */
17303 SWAPGS
17304 xorl %ebx,%ebx
17305-1: ret
17306+1: pax_force_retaddr_bts
17307+ ret
17308 CFI_ENDPROC
17309-END(save_paranoid)
17310+ENDPROC(save_paranoid)
17311 .popsection
17312
17313 /*
17314@@ -409,7 +696,7 @@ ENTRY(ret_from_fork)
17315
17316 RESTORE_REST
17317
17318- testl $3, CS-ARGOFFSET(%rsp) # from kernel_thread?
17319+ testb $3, CS-ARGOFFSET(%rsp) # from kernel_thread?
17320 je int_ret_from_sys_call
17321
17322 testl $_TIF_IA32, TI_flags(%rcx) # 32-bit compat task needs IRET
17323@@ -419,7 +706,7 @@ ENTRY(ret_from_fork)
17324 jmp ret_from_sys_call # go to the SYSRET fastpath
17325
17326 CFI_ENDPROC
17327-END(ret_from_fork)
17328+ENDPROC(ret_from_fork)
17329
17330 /*
17331 * System call entry. Upto 6 arguments in registers are supported.
17332@@ -455,7 +742,7 @@ END(ret_from_fork)
17333 ENTRY(system_call)
17334 CFI_STARTPROC simple
17335 CFI_SIGNAL_FRAME
17336- CFI_DEF_CFA rsp,KERNEL_STACK_OFFSET
17337+ CFI_DEF_CFA rsp,0
17338 CFI_REGISTER rip,rcx
17339 /*CFI_REGISTER rflags,r11*/
17340 SWAPGS_UNSAFE_STACK
17341@@ -468,12 +755,13 @@ ENTRY(system_call_after_swapgs)
17342
17343 movq %rsp,PER_CPU_VAR(old_rsp)
17344 movq PER_CPU_VAR(kernel_stack),%rsp
17345+ SAVE_ARGS 8*6,1
17346+ pax_enter_kernel_user
17347 /*
17348 * No need to follow this irqs off/on section - it's straight
17349 * and short:
17350 */
17351 ENABLE_INTERRUPTS(CLBR_NONE)
17352- SAVE_ARGS 8,1
17353 movq %rax,ORIG_RAX-ARGOFFSET(%rsp)
17354 movq %rcx,RIP-ARGOFFSET(%rsp)
17355 CFI_REL_OFFSET rip,RIP-ARGOFFSET
17356@@ -483,7 +771,7 @@ ENTRY(system_call_after_swapgs)
17357 system_call_fastpath:
17358 cmpq $__NR_syscall_max,%rax
17359 ja badsys
17360- movq %r10,%rcx
17361+ movq R10-ARGOFFSET(%rsp),%rcx
17362 call *sys_call_table(,%rax,8) # XXX: rip relative
17363 movq %rax,RAX-ARGOFFSET(%rsp)
17364 /*
17365@@ -502,6 +790,8 @@ sysret_check:
17366 andl %edi,%edx
17367 jnz sysret_careful
17368 CFI_REMEMBER_STATE
17369+ pax_exit_kernel_user
17370+ pax_erase_kstack
17371 /*
17372 * sysretq will re-enable interrupts:
17373 */
17374@@ -555,14 +845,18 @@ badsys:
17375 * jump back to the normal fast path.
17376 */
17377 auditsys:
17378- movq %r10,%r9 /* 6th arg: 4th syscall arg */
17379+ movq R10-ARGOFFSET(%rsp),%r9 /* 6th arg: 4th syscall arg */
17380 movq %rdx,%r8 /* 5th arg: 3rd syscall arg */
17381 movq %rsi,%rcx /* 4th arg: 2nd syscall arg */
17382 movq %rdi,%rdx /* 3rd arg: 1st syscall arg */
17383 movq %rax,%rsi /* 2nd arg: syscall number */
17384 movl $AUDIT_ARCH_X86_64,%edi /* 1st arg: audit arch */
17385 call audit_syscall_entry
17386+
17387+ pax_erase_kstack
17388+
17389 LOAD_ARGS 0 /* reload call-clobbered registers */
17390+ pax_set_fptr_mask
17391 jmp system_call_fastpath
17392
17393 /*
17394@@ -592,16 +886,20 @@ tracesys:
17395 FIXUP_TOP_OF_STACK %rdi
17396 movq %rsp,%rdi
17397 call syscall_trace_enter
17398+
17399+ pax_erase_kstack
17400+
17401 /*
17402 * Reload arg registers from stack in case ptrace changed them.
17403 * We don't reload %rax because syscall_trace_enter() returned
17404 * the value it wants us to use in the table lookup.
17405 */
17406 LOAD_ARGS ARGOFFSET, 1
17407+ pax_set_fptr_mask
17408 RESTORE_REST
17409 cmpq $__NR_syscall_max,%rax
17410 ja int_ret_from_sys_call /* RAX(%rsp) set to -ENOSYS above */
17411- movq %r10,%rcx /* fixup for C */
17412+ movq R10-ARGOFFSET(%rsp),%rcx /* fixup for C */
17413 call *sys_call_table(,%rax,8)
17414 movq %rax,RAX-ARGOFFSET(%rsp)
17415 /* Use IRET because user could have changed frame */
17416@@ -613,7 +911,7 @@ tracesys:
17417 GLOBAL(int_ret_from_sys_call)
17418 DISABLE_INTERRUPTS(CLBR_NONE)
17419 TRACE_IRQS_OFF
17420- testl $3,CS-ARGOFFSET(%rsp)
17421+ testb $3,CS-ARGOFFSET(%rsp)
17422 je retint_restore_args
17423 movl $_TIF_ALLWORK_MASK,%edi
17424 /* edi: mask to check */
17425@@ -624,6 +922,7 @@ GLOBAL(int_with_check)
17426 andl %edi,%edx
17427 jnz int_careful
17428 andl $~TS_COMPAT,TI_status(%rcx)
17429+ pax_erase_kstack
17430 jmp retint_swapgs
17431
17432 /* Either reschedule or signal or syscall exit tracking needed. */
17433@@ -674,7 +973,7 @@ int_restore_rest:
17434 TRACE_IRQS_OFF
17435 jmp int_with_check
17436 CFI_ENDPROC
17437-END(system_call)
17438+ENDPROC(system_call)
17439
17440 /*
17441 * Certain special system calls that need to save a complete full stack frame.
17442@@ -690,7 +989,7 @@ ENTRY(\label)
17443 call \func
17444 jmp ptregscall_common
17445 CFI_ENDPROC
17446-END(\label)
17447+ENDPROC(\label)
17448 .endm
17449
17450 PTREGSCALL stub_clone, sys_clone, %r8
17451@@ -708,9 +1007,10 @@ ENTRY(ptregscall_common)
17452 movq_cfi_restore R12+8, r12
17453 movq_cfi_restore RBP+8, rbp
17454 movq_cfi_restore RBX+8, rbx
17455+ pax_force_retaddr
17456 ret $REST_SKIP /* pop extended registers */
17457 CFI_ENDPROC
17458-END(ptregscall_common)
17459+ENDPROC(ptregscall_common)
17460
17461 ENTRY(stub_execve)
17462 CFI_STARTPROC
17463@@ -726,7 +1026,7 @@ ENTRY(stub_execve)
17464 RESTORE_REST
17465 jmp int_ret_from_sys_call
17466 CFI_ENDPROC
17467-END(stub_execve)
17468+ENDPROC(stub_execve)
17469
17470 /*
17471 * sigreturn is special because it needs to restore all registers on return.
17472@@ -744,7 +1044,7 @@ ENTRY(stub_rt_sigreturn)
17473 RESTORE_REST
17474 jmp int_ret_from_sys_call
17475 CFI_ENDPROC
17476-END(stub_rt_sigreturn)
17477+ENDPROC(stub_rt_sigreturn)
17478
17479 /*
17480 * Build the entry stubs and pointer table with some assembler magic.
17481@@ -780,7 +1080,7 @@ vector=vector+1
17482 2: jmp common_interrupt
17483 .endr
17484 CFI_ENDPROC
17485-END(irq_entries_start)
17486+ENDPROC(irq_entries_start)
17487
17488 .previous
17489 END(interrupt)
17490@@ -800,6 +1100,16 @@ END(interrupt)
17491 CFI_ADJUST_CFA_OFFSET 10*8
17492 call save_args
17493 PARTIAL_FRAME 0
17494+#ifdef CONFIG_PAX_MEMORY_UDEREF
17495+ testb $3, CS(%rdi)
17496+ jnz 1f
17497+ pax_enter_kernel
17498+ jmp 2f
17499+1: pax_enter_kernel_user
17500+2:
17501+#else
17502+ pax_enter_kernel
17503+#endif
17504 call \func
17505 .endm
17506
17507@@ -822,7 +1132,7 @@ ret_from_intr:
17508 CFI_ADJUST_CFA_OFFSET -8
17509 exit_intr:
17510 GET_THREAD_INFO(%rcx)
17511- testl $3,CS-ARGOFFSET(%rsp)
17512+ testb $3,CS-ARGOFFSET(%rsp)
17513 je retint_kernel
17514
17515 /* Interrupt came from user space */
17516@@ -844,12 +1154,15 @@ retint_swapgs: /* return to user-space */
17517 * The iretq could re-enable interrupts:
17518 */
17519 DISABLE_INTERRUPTS(CLBR_ANY)
17520+ pax_exit_kernel_user
17521 TRACE_IRQS_IRETQ
17522 SWAPGS
17523 jmp restore_args
17524
17525 retint_restore_args: /* return to kernel space */
17526 DISABLE_INTERRUPTS(CLBR_ANY)
17527+ pax_exit_kernel
17528+ pax_force_retaddr RIP-ARGOFFSET
17529 /*
17530 * The iretq could re-enable interrupts:
17531 */
17532@@ -940,7 +1253,7 @@ ENTRY(retint_kernel)
17533 #endif
17534
17535 CFI_ENDPROC
17536-END(common_interrupt)
17537+ENDPROC(common_interrupt)
17538
17539 /*
17540 * APIC interrupts.
17541@@ -953,7 +1266,7 @@ ENTRY(\sym)
17542 interrupt \do_sym
17543 jmp ret_from_intr
17544 CFI_ENDPROC
17545-END(\sym)
17546+ENDPROC(\sym)
17547 .endm
17548
17549 #ifdef CONFIG_SMP
17550@@ -1032,12 +1345,22 @@ ENTRY(\sym)
17551 CFI_ADJUST_CFA_OFFSET 15*8
17552 call error_entry
17553 DEFAULT_FRAME 0
17554+#ifdef CONFIG_PAX_MEMORY_UDEREF
17555+ testb $3, CS(%rsp)
17556+ jnz 1f
17557+ pax_enter_kernel
17558+ jmp 2f
17559+1: pax_enter_kernel_user
17560+2:
17561+#else
17562+ pax_enter_kernel
17563+#endif
17564 movq %rsp,%rdi /* pt_regs pointer */
17565 xorl %esi,%esi /* no error code */
17566 call \do_sym
17567 jmp error_exit /* %ebx: no swapgs flag */
17568 CFI_ENDPROC
17569-END(\sym)
17570+ENDPROC(\sym)
17571 .endm
17572
17573 .macro paranoidzeroentry sym do_sym
17574@@ -1049,12 +1372,22 @@ ENTRY(\sym)
17575 subq $15*8, %rsp
17576 call save_paranoid
17577 TRACE_IRQS_OFF
17578+#ifdef CONFIG_PAX_MEMORY_UDEREF
17579+ testb $3, CS(%rsp)
17580+ jnz 1f
17581+ pax_enter_kernel
17582+ jmp 2f
17583+1: pax_enter_kernel_user
17584+2:
17585+#else
17586+ pax_enter_kernel
17587+#endif
17588 movq %rsp,%rdi /* pt_regs pointer */
17589 xorl %esi,%esi /* no error code */
17590 call \do_sym
17591 jmp paranoid_exit /* %ebx: no swapgs flag */
17592 CFI_ENDPROC
17593-END(\sym)
17594+ENDPROC(\sym)
17595 .endm
17596
17597 .macro paranoidzeroentry_ist sym do_sym ist
17598@@ -1066,15 +1399,30 @@ ENTRY(\sym)
17599 subq $15*8, %rsp
17600 call save_paranoid
17601 TRACE_IRQS_OFF
17602+#ifdef CONFIG_PAX_MEMORY_UDEREF
17603+ testb $3, CS(%rsp)
17604+ jnz 1f
17605+ pax_enter_kernel
17606+ jmp 2f
17607+1: pax_enter_kernel_user
17608+2:
17609+#else
17610+ pax_enter_kernel
17611+#endif
17612 movq %rsp,%rdi /* pt_regs pointer */
17613 xorl %esi,%esi /* no error code */
17614- PER_CPU(init_tss, %rbp)
17615+#ifdef CONFIG_SMP
17616+ imul $TSS_size, PER_CPU_VAR(cpu_number), %ebp
17617+ lea init_tss(%rbp), %rbp
17618+#else
17619+ lea init_tss(%rip), %rbp
17620+#endif
17621 subq $EXCEPTION_STKSZ, TSS_ist + (\ist - 1) * 8(%rbp)
17622 call \do_sym
17623 addq $EXCEPTION_STKSZ, TSS_ist + (\ist - 1) * 8(%rbp)
17624 jmp paranoid_exit /* %ebx: no swapgs flag */
17625 CFI_ENDPROC
17626-END(\sym)
17627+ENDPROC(\sym)
17628 .endm
17629
17630 .macro errorentry sym do_sym
17631@@ -1085,13 +1433,23 @@ ENTRY(\sym)
17632 CFI_ADJUST_CFA_OFFSET 15*8
17633 call error_entry
17634 DEFAULT_FRAME 0
17635+#ifdef CONFIG_PAX_MEMORY_UDEREF
17636+ testb $3, CS(%rsp)
17637+ jnz 1f
17638+ pax_enter_kernel
17639+ jmp 2f
17640+1: pax_enter_kernel_user
17641+2:
17642+#else
17643+ pax_enter_kernel
17644+#endif
17645 movq %rsp,%rdi /* pt_regs pointer */
17646 movq ORIG_RAX(%rsp),%rsi /* get error code */
17647 movq $-1,ORIG_RAX(%rsp) /* no syscall to restart */
17648 call \do_sym
17649 jmp error_exit /* %ebx: no swapgs flag */
17650 CFI_ENDPROC
17651-END(\sym)
17652+ENDPROC(\sym)
17653 .endm
17654
17655 /* error code is on the stack already */
17656@@ -1104,13 +1462,23 @@ ENTRY(\sym)
17657 call save_paranoid
17658 DEFAULT_FRAME 0
17659 TRACE_IRQS_OFF
17660+#ifdef CONFIG_PAX_MEMORY_UDEREF
17661+ testb $3, CS(%rsp)
17662+ jnz 1f
17663+ pax_enter_kernel
17664+ jmp 2f
17665+1: pax_enter_kernel_user
17666+2:
17667+#else
17668+ pax_enter_kernel
17669+#endif
17670 movq %rsp,%rdi /* pt_regs pointer */
17671 movq ORIG_RAX(%rsp),%rsi /* get error code */
17672 movq $-1,ORIG_RAX(%rsp) /* no syscall to restart */
17673 call \do_sym
17674 jmp paranoid_exit /* %ebx: no swapgs flag */
17675 CFI_ENDPROC
17676-END(\sym)
17677+ENDPROC(\sym)
17678 .endm
17679
17680 zeroentry divide_error do_divide_error
17681@@ -1141,9 +1509,10 @@ gs_change:
17682 SWAPGS
17683 popf
17684 CFI_ADJUST_CFA_OFFSET -8
17685+ pax_force_retaddr
17686 ret
17687 CFI_ENDPROC
17688-END(native_load_gs_index)
17689+ENDPROC(native_load_gs_index)
17690
17691 .section __ex_table,"a"
17692 .align 8
17693@@ -1193,11 +1562,12 @@ ENTRY(kernel_thread)
17694 * of hacks for example to fork off the per-CPU idle tasks.
17695 * [Hopefully no generic code relies on the reschedule -AK]
17696 */
17697- RESTORE_ALL
17698+ RESTORE_REST
17699 UNFAKE_STACK_FRAME
17700+ pax_force_retaddr
17701 ret
17702 CFI_ENDPROC
17703-END(kernel_thread)
17704+ENDPROC(kernel_thread)
17705
17706 ENTRY(child_rip)
17707 pushq $0 # fake return address
17708@@ -1208,13 +1578,14 @@ ENTRY(child_rip)
17709 */
17710 movq %rdi, %rax
17711 movq %rsi, %rdi
17712+ pax_force_fptr %rax
17713 call *%rax
17714 # exit
17715 mov %eax, %edi
17716 call do_exit
17717 ud2 # padding for call trace
17718 CFI_ENDPROC
17719-END(child_rip)
17720+ENDPROC(child_rip)
17721
17722 /*
17723 * execve(). This function needs to use IRET, not SYSRET, to set up all state properly.
17724@@ -1241,11 +1612,11 @@ ENTRY(kernel_execve)
17725 RESTORE_REST
17726 testq %rax,%rax
17727 je int_ret_from_sys_call
17728- RESTORE_ARGS
17729 UNFAKE_STACK_FRAME
17730+ pax_force_retaddr
17731 ret
17732 CFI_ENDPROC
17733-END(kernel_execve)
17734+ENDPROC(kernel_execve)
17735
17736 /* Call softirq on interrupt stack. Interrupts are off. */
17737 ENTRY(call_softirq)
17738@@ -1263,9 +1634,10 @@ ENTRY(call_softirq)
17739 CFI_DEF_CFA_REGISTER rsp
17740 CFI_ADJUST_CFA_OFFSET -8
17741 decl PER_CPU_VAR(irq_count)
17742+ pax_force_retaddr
17743 ret
17744 CFI_ENDPROC
17745-END(call_softirq)
17746+ENDPROC(call_softirq)
17747
17748 #ifdef CONFIG_XEN
17749 zeroentry xen_hypervisor_callback xen_do_hypervisor_callback
17750@@ -1303,7 +1675,7 @@ ENTRY(xen_do_hypervisor_callback) # do_hypervisor_callback(struct *pt_regs)
17751 decl PER_CPU_VAR(irq_count)
17752 jmp error_exit
17753 CFI_ENDPROC
17754-END(xen_do_hypervisor_callback)
17755+ENDPROC(xen_do_hypervisor_callback)
17756
17757 /*
17758 * Hypervisor uses this for application faults while it executes.
17759@@ -1362,7 +1734,7 @@ ENTRY(xen_failsafe_callback)
17760 SAVE_ALL
17761 jmp error_exit
17762 CFI_ENDPROC
17763-END(xen_failsafe_callback)
17764+ENDPROC(xen_failsafe_callback)
17765
17766 #endif /* CONFIG_XEN */
17767
17768@@ -1405,16 +1777,31 @@ ENTRY(paranoid_exit)
17769 TRACE_IRQS_OFF
17770 testl %ebx,%ebx /* swapgs needed? */
17771 jnz paranoid_restore
17772- testl $3,CS(%rsp)
17773+ testb $3,CS(%rsp)
17774 jnz paranoid_userspace
17775+#ifdef CONFIG_PAX_MEMORY_UDEREF
17776+ pax_exit_kernel
17777+ TRACE_IRQS_IRETQ 0
17778+ SWAPGS_UNSAFE_STACK
17779+ RESTORE_ALL 8
17780+ pax_force_retaddr_bts
17781+ jmp irq_return
17782+#endif
17783 paranoid_swapgs:
17784+#ifdef CONFIG_PAX_MEMORY_UDEREF
17785+ pax_exit_kernel_user
17786+#else
17787+ pax_exit_kernel
17788+#endif
17789 TRACE_IRQS_IRETQ 0
17790 SWAPGS_UNSAFE_STACK
17791 RESTORE_ALL 8
17792 jmp irq_return
17793 paranoid_restore:
17794+ pax_exit_kernel
17795 TRACE_IRQS_IRETQ 0
17796 RESTORE_ALL 8
17797+ pax_force_retaddr_bts
17798 jmp irq_return
17799 paranoid_userspace:
17800 GET_THREAD_INFO(%rcx)
17801@@ -1443,7 +1830,7 @@ paranoid_schedule:
17802 TRACE_IRQS_OFF
17803 jmp paranoid_userspace
17804 CFI_ENDPROC
17805-END(paranoid_exit)
17806+ENDPROC(paranoid_exit)
17807
17808 /*
17809 * Exception entry point. This expects an error code/orig_rax on the stack.
17810@@ -1470,12 +1857,13 @@ ENTRY(error_entry)
17811 movq_cfi r14, R14+8
17812 movq_cfi r15, R15+8
17813 xorl %ebx,%ebx
17814- testl $3,CS+8(%rsp)
17815+ testb $3,CS+8(%rsp)
17816 je error_kernelspace
17817 error_swapgs:
17818 SWAPGS
17819 error_sti:
17820 TRACE_IRQS_OFF
17821+ pax_force_retaddr_bts
17822 ret
17823 CFI_ENDPROC
17824
17825@@ -1497,7 +1885,7 @@ error_kernelspace:
17826 cmpq $gs_change,RIP+8(%rsp)
17827 je error_swapgs
17828 jmp error_sti
17829-END(error_entry)
17830+ENDPROC(error_entry)
17831
17832
17833 /* ebx: no swapgs flag (1: don't need swapgs, 0: need it) */
17834@@ -1517,7 +1905,7 @@ ENTRY(error_exit)
17835 jnz retint_careful
17836 jmp retint_swapgs
17837 CFI_ENDPROC
17838-END(error_exit)
17839+ENDPROC(error_exit)
17840
17841
17842 /* runs on exception stack */
17843@@ -1529,6 +1917,16 @@ ENTRY(nmi)
17844 CFI_ADJUST_CFA_OFFSET 15*8
17845 call save_paranoid
17846 DEFAULT_FRAME 0
17847+#ifdef CONFIG_PAX_MEMORY_UDEREF
17848+ testb $3, CS(%rsp)
17849+ jnz 1f
17850+ pax_enter_kernel
17851+ jmp 2f
17852+1: pax_enter_kernel_user
17853+2:
17854+#else
17855+ pax_enter_kernel
17856+#endif
17857 /* paranoidentry do_nmi, 0; without TRACE_IRQS_OFF */
17858 movq %rsp,%rdi
17859 movq $-1,%rsi
17860@@ -1539,12 +1937,28 @@ ENTRY(nmi)
17861 DISABLE_INTERRUPTS(CLBR_NONE)
17862 testl %ebx,%ebx /* swapgs needed? */
17863 jnz nmi_restore
17864- testl $3,CS(%rsp)
17865+ testb $3,CS(%rsp)
17866 jnz nmi_userspace
17867+#ifdef CONFIG_PAX_MEMORY_UDEREF
17868+ pax_exit_kernel
17869+ SWAPGS_UNSAFE_STACK
17870+ RESTORE_ALL 8
17871+ pax_force_retaddr_bts
17872+ jmp irq_return
17873+#endif
17874 nmi_swapgs:
17875+#ifdef CONFIG_PAX_MEMORY_UDEREF
17876+ pax_exit_kernel_user
17877+#else
17878+ pax_exit_kernel
17879+#endif
17880 SWAPGS_UNSAFE_STACK
17881+ RESTORE_ALL 8
17882+ jmp irq_return
17883 nmi_restore:
17884+ pax_exit_kernel
17885 RESTORE_ALL 8
17886+ pax_force_retaddr_bts
17887 jmp irq_return
17888 nmi_userspace:
17889 GET_THREAD_INFO(%rcx)
17890@@ -1573,14 +1987,14 @@ nmi_schedule:
17891 jmp paranoid_exit
17892 CFI_ENDPROC
17893 #endif
17894-END(nmi)
17895+ENDPROC(nmi)
17896
17897 ENTRY(ignore_sysret)
17898 CFI_STARTPROC
17899 mov $-ENOSYS,%eax
17900 sysret
17901 CFI_ENDPROC
17902-END(ignore_sysret)
17903+ENDPROC(ignore_sysret)
17904
17905 /*
17906 * End of kprobes section
17907diff --git a/arch/x86/kernel/ftrace.c b/arch/x86/kernel/ftrace.c
17908index 9dbb527..7b3615a 100644
17909--- a/arch/x86/kernel/ftrace.c
17910+++ b/arch/x86/kernel/ftrace.c
17911@@ -103,7 +103,7 @@ static void *mod_code_ip; /* holds the IP to write to */
17912 static void *mod_code_newcode; /* holds the text to write to the IP */
17913
17914 static unsigned nmi_wait_count;
17915-static atomic_t nmi_update_count = ATOMIC_INIT(0);
17916+static atomic_unchecked_t nmi_update_count = ATOMIC_INIT(0);
17917
17918 int ftrace_arch_read_dyn_info(char *buf, int size)
17919 {
17920@@ -111,7 +111,7 @@ int ftrace_arch_read_dyn_info(char *buf, int size)
17921
17922 r = snprintf(buf, size, "%u %u",
17923 nmi_wait_count,
17924- atomic_read(&nmi_update_count));
17925+ atomic_read_unchecked(&nmi_update_count));
17926 return r;
17927 }
17928
17929@@ -149,8 +149,10 @@ void ftrace_nmi_enter(void)
17930 {
17931 if (atomic_inc_return(&nmi_running) & MOD_CODE_WRITE_FLAG) {
17932 smp_rmb();
17933+ pax_open_kernel();
17934 ftrace_mod_code();
17935- atomic_inc(&nmi_update_count);
17936+ pax_close_kernel();
17937+ atomic_inc_unchecked(&nmi_update_count);
17938 }
17939 /* Must have previous changes seen before executions */
17940 smp_mb();
17941@@ -215,7 +217,7 @@ do_ftrace_mod_code(unsigned long ip, void *new_code)
17942
17943
17944
17945-static unsigned char ftrace_nop[MCOUNT_INSN_SIZE];
17946+static unsigned char ftrace_nop[MCOUNT_INSN_SIZE] __read_only;
17947
17948 static unsigned char *ftrace_nop_replace(void)
17949 {
17950@@ -228,6 +230,8 @@ ftrace_modify_code(unsigned long ip, unsigned char *old_code,
17951 {
17952 unsigned char replaced[MCOUNT_INSN_SIZE];
17953
17954+ ip = ktla_ktva(ip);
17955+
17956 /*
17957 * Note: Due to modules and __init, code can
17958 * disappear and change, we need to protect against faulting
17959@@ -284,7 +288,7 @@ int ftrace_update_ftrace_func(ftrace_func_t func)
17960 unsigned char old[MCOUNT_INSN_SIZE], *new;
17961 int ret;
17962
17963- memcpy(old, &ftrace_call, MCOUNT_INSN_SIZE);
17964+ memcpy(old, (void *)ktla_ktva((unsigned long)ftrace_call), MCOUNT_INSN_SIZE);
17965 new = ftrace_call_replace(ip, (unsigned long)func);
17966 ret = ftrace_modify_code(ip, old, new);
17967
17968@@ -337,15 +341,15 @@ int __init ftrace_dyn_arch_init(void *data)
17969 switch (faulted) {
17970 case 0:
17971 pr_info("ftrace: converting mcount calls to 0f 1f 44 00 00\n");
17972- memcpy(ftrace_nop, ftrace_test_p6nop, MCOUNT_INSN_SIZE);
17973+ memcpy(ftrace_nop, ktla_ktva(ftrace_test_p6nop), MCOUNT_INSN_SIZE);
17974 break;
17975 case 1:
17976 pr_info("ftrace: converting mcount calls to 66 66 66 66 90\n");
17977- memcpy(ftrace_nop, ftrace_test_nop5, MCOUNT_INSN_SIZE);
17978+ memcpy(ftrace_nop, ktla_ktva(ftrace_test_nop5), MCOUNT_INSN_SIZE);
17979 break;
17980 case 2:
17981 pr_info("ftrace: converting mcount calls to jmp . + 5\n");
17982- memcpy(ftrace_nop, ftrace_test_jmp, MCOUNT_INSN_SIZE);
17983+ memcpy(ftrace_nop, ktla_ktva(ftrace_test_jmp), MCOUNT_INSN_SIZE);
17984 break;
17985 }
17986
17987@@ -366,6 +370,8 @@ static int ftrace_mod_jmp(unsigned long ip,
17988 {
17989 unsigned char code[MCOUNT_INSN_SIZE];
17990
17991+ ip = ktla_ktva(ip);
17992+
17993 if (probe_kernel_read(code, (void *)ip, MCOUNT_INSN_SIZE))
17994 return -EFAULT;
17995
17996diff --git a/arch/x86/kernel/head32.c b/arch/x86/kernel/head32.c
17997index 4f8e250..df24706 100644
17998--- a/arch/x86/kernel/head32.c
17999+++ b/arch/x86/kernel/head32.c
18000@@ -16,6 +16,7 @@
18001 #include <asm/apic.h>
18002 #include <asm/io_apic.h>
18003 #include <asm/bios_ebda.h>
18004+#include <asm/boot.h>
18005
18006 static void __init i386_default_early_setup(void)
18007 {
18008@@ -31,7 +32,7 @@ void __init i386_start_kernel(void)
18009 {
18010 reserve_trampoline_memory();
18011
18012- reserve_early(__pa_symbol(&_text), __pa_symbol(&__bss_stop), "TEXT DATA BSS");
18013+ reserve_early(LOAD_PHYSICAL_ADDR, __pa_symbol(&__bss_stop), "TEXT DATA BSS");
18014
18015 #ifdef CONFIG_BLK_DEV_INITRD
18016 /* Reserve INITRD */
18017diff --git a/arch/x86/kernel/head_32.S b/arch/x86/kernel/head_32.S
18018index 34c3308..6fc4e76 100644
18019--- a/arch/x86/kernel/head_32.S
18020+++ b/arch/x86/kernel/head_32.S
18021@@ -19,10 +19,17 @@
18022 #include <asm/setup.h>
18023 #include <asm/processor-flags.h>
18024 #include <asm/percpu.h>
18025+#include <asm/msr-index.h>
18026
18027 /* Physical address */
18028 #define pa(X) ((X) - __PAGE_OFFSET)
18029
18030+#ifdef CONFIG_PAX_KERNEXEC
18031+#define ta(X) (X)
18032+#else
18033+#define ta(X) ((X) - __PAGE_OFFSET)
18034+#endif
18035+
18036 /*
18037 * References to members of the new_cpu_data structure.
18038 */
18039@@ -52,11 +59,7 @@
18040 * and small than max_low_pfn, otherwise will waste some page table entries
18041 */
18042
18043-#if PTRS_PER_PMD > 1
18044-#define PAGE_TABLE_SIZE(pages) (((pages) / PTRS_PER_PMD) + PTRS_PER_PGD)
18045-#else
18046-#define PAGE_TABLE_SIZE(pages) ((pages) / PTRS_PER_PGD)
18047-#endif
18048+#define PAGE_TABLE_SIZE(pages) ((pages) / PTRS_PER_PTE)
18049
18050 /* Enough space to fit pagetables for the low memory linear map */
18051 MAPPING_BEYOND_END = \
18052@@ -73,6 +76,12 @@ INIT_MAP_SIZE = PAGE_TABLE_SIZE(KERNEL_PAGES) * PAGE_SIZE_asm
18053 RESERVE_BRK(pagetables, INIT_MAP_SIZE)
18054
18055 /*
18056+ * Real beginning of normal "text" segment
18057+ */
18058+ENTRY(stext)
18059+ENTRY(_stext)
18060+
18061+/*
18062 * 32-bit kernel entrypoint; only used by the boot CPU. On entry,
18063 * %esi points to the real-mode code as a 32-bit pointer.
18064 * CS and DS must be 4 GB flat segments, but we don't depend on
18065@@ -80,7 +89,16 @@ RESERVE_BRK(pagetables, INIT_MAP_SIZE)
18066 * can.
18067 */
18068 __HEAD
18069+
18070+#ifdef CONFIG_PAX_KERNEXEC
18071+ jmp startup_32
18072+/* PaX: fill first page in .text with int3 to catch NULL derefs in kernel mode */
18073+.fill PAGE_SIZE-5,1,0xcc
18074+#endif
18075+
18076 ENTRY(startup_32)
18077+ movl pa(stack_start),%ecx
18078+
18079 /* test KEEP_SEGMENTS flag to see if the bootloader is asking
18080 us to not reload segments */
18081 testb $(1<<6), BP_loadflags(%esi)
18082@@ -95,7 +113,60 @@ ENTRY(startup_32)
18083 movl %eax,%es
18084 movl %eax,%fs
18085 movl %eax,%gs
18086+ movl %eax,%ss
18087 2:
18088+ leal -__PAGE_OFFSET(%ecx),%esp
18089+
18090+#ifdef CONFIG_SMP
18091+ movl $pa(cpu_gdt_table),%edi
18092+ movl $__per_cpu_load,%eax
18093+ movw %ax,__KERNEL_PERCPU + 2(%edi)
18094+ rorl $16,%eax
18095+ movb %al,__KERNEL_PERCPU + 4(%edi)
18096+ movb %ah,__KERNEL_PERCPU + 7(%edi)
18097+ movl $__per_cpu_end - 1,%eax
18098+ subl $__per_cpu_start,%eax
18099+ movw %ax,__KERNEL_PERCPU + 0(%edi)
18100+#endif
18101+
18102+#ifdef CONFIG_PAX_MEMORY_UDEREF
18103+ movl $NR_CPUS,%ecx
18104+ movl $pa(cpu_gdt_table),%edi
18105+1:
18106+ movl $((((__PAGE_OFFSET-1) & 0xf0000000) >> 12) | 0x00c09700),GDT_ENTRY_KERNEL_DS * 8 + 4(%edi)
18107+ movl $((((__PAGE_OFFSET-1) & 0xf0000000) >> 12) | 0x00c0fb00),GDT_ENTRY_DEFAULT_USER_CS * 8 + 4(%edi)
18108+ movl $((((__PAGE_OFFSET-1) & 0xf0000000) >> 12) | 0x00c0f300),GDT_ENTRY_DEFAULT_USER_DS * 8 + 4(%edi)
18109+ addl $PAGE_SIZE_asm,%edi
18110+ loop 1b
18111+#endif
18112+
18113+#ifdef CONFIG_PAX_KERNEXEC
18114+ movl $pa(boot_gdt),%edi
18115+ movl $__LOAD_PHYSICAL_ADDR,%eax
18116+ movw %ax,__BOOT_CS + 2(%edi)
18117+ rorl $16,%eax
18118+ movb %al,__BOOT_CS + 4(%edi)
18119+ movb %ah,__BOOT_CS + 7(%edi)
18120+ rorl $16,%eax
18121+
18122+ ljmp $(__BOOT_CS),$1f
18123+1:
18124+
18125+ movl $NR_CPUS,%ecx
18126+ movl $pa(cpu_gdt_table),%edi
18127+ addl $__PAGE_OFFSET,%eax
18128+1:
18129+ movw %ax,__KERNEL_CS + 2(%edi)
18130+ movw %ax,__KERNEXEC_KERNEL_CS + 2(%edi)
18131+ rorl $16,%eax
18132+ movb %al,__KERNEL_CS + 4(%edi)
18133+ movb %al,__KERNEXEC_KERNEL_CS + 4(%edi)
18134+ movb %ah,__KERNEL_CS + 7(%edi)
18135+ movb %ah,__KERNEXEC_KERNEL_CS + 7(%edi)
18136+ rorl $16,%eax
18137+ addl $PAGE_SIZE_asm,%edi
18138+ loop 1b
18139+#endif
18140
18141 /*
18142 * Clear BSS first so that there are no surprises...
18143@@ -140,9 +211,7 @@ ENTRY(startup_32)
18144 cmpl $num_subarch_entries, %eax
18145 jae bad_subarch
18146
18147- movl pa(subarch_entries)(,%eax,4), %eax
18148- subl $__PAGE_OFFSET, %eax
18149- jmp *%eax
18150+ jmp *pa(subarch_entries)(,%eax,4)
18151
18152 bad_subarch:
18153 WEAK(lguest_entry)
18154@@ -154,10 +223,10 @@ WEAK(xen_entry)
18155 __INITDATA
18156
18157 subarch_entries:
18158- .long default_entry /* normal x86/PC */
18159- .long lguest_entry /* lguest hypervisor */
18160- .long xen_entry /* Xen hypervisor */
18161- .long default_entry /* Moorestown MID */
18162+ .long ta(default_entry) /* normal x86/PC */
18163+ .long ta(lguest_entry) /* lguest hypervisor */
18164+ .long ta(xen_entry) /* Xen hypervisor */
18165+ .long ta(default_entry) /* Moorestown MID */
18166 num_subarch_entries = (. - subarch_entries) / 4
18167 .previous
18168 #endif /* CONFIG_PARAVIRT */
18169@@ -218,8 +287,11 @@ default_entry:
18170 movl %eax, pa(max_pfn_mapped)
18171
18172 /* Do early initialization of the fixmap area */
18173- movl $pa(swapper_pg_fixmap)+PDE_IDENT_ATTR,%eax
18174- movl %eax,pa(swapper_pg_pmd+0x1000*KPMDS-8)
18175+#ifdef CONFIG_COMPAT_VDSO
18176+ movl $pa(swapper_pg_fixmap)+PDE_IDENT_ATTR+_PAGE_USER,pa(swapper_pg_pmd+0x1000*KPMDS-8)
18177+#else
18178+ movl $pa(swapper_pg_fixmap)+PDE_IDENT_ATTR,pa(swapper_pg_pmd+0x1000*KPMDS-8)
18179+#endif
18180 #else /* Not PAE */
18181
18182 page_pde_offset = (__PAGE_OFFSET >> 20);
18183@@ -249,8 +321,11 @@ page_pde_offset = (__PAGE_OFFSET >> 20);
18184 movl %eax, pa(max_pfn_mapped)
18185
18186 /* Do early initialization of the fixmap area */
18187- movl $pa(swapper_pg_fixmap)+PDE_IDENT_ATTR,%eax
18188- movl %eax,pa(swapper_pg_dir+0xffc)
18189+#ifdef CONFIG_COMPAT_VDSO
18190+ movl $pa(swapper_pg_fixmap)+PDE_IDENT_ATTR+_PAGE_USER,pa(swapper_pg_dir+0xffc)
18191+#else
18192+ movl $pa(swapper_pg_fixmap)+PDE_IDENT_ATTR,pa(swapper_pg_dir+0xffc)
18193+#endif
18194 #endif
18195 jmp 3f
18196 /*
18197@@ -272,6 +347,9 @@ ENTRY(startup_32_smp)
18198 movl %eax,%es
18199 movl %eax,%fs
18200 movl %eax,%gs
18201+ movl pa(stack_start),%ecx
18202+ movl %eax,%ss
18203+ leal -__PAGE_OFFSET(%ecx),%esp
18204 #endif /* CONFIG_SMP */
18205 3:
18206
18207@@ -297,6 +375,7 @@ ENTRY(startup_32_smp)
18208 orl %edx,%eax
18209 movl %eax,%cr4
18210
18211+#ifdef CONFIG_X86_PAE
18212 btl $5, %eax # check if PAE is enabled
18213 jnc 6f
18214
18215@@ -305,6 +384,10 @@ ENTRY(startup_32_smp)
18216 cpuid
18217 cmpl $0x80000000, %eax
18218 jbe 6f
18219+
18220+ /* Clear bogus XD_DISABLE bits */
18221+ call verify_cpu
18222+
18223 mov $0x80000001, %eax
18224 cpuid
18225 /* Execute Disable bit supported? */
18226@@ -312,13 +395,17 @@ ENTRY(startup_32_smp)
18227 jnc 6f
18228
18229 /* Setup EFER (Extended Feature Enable Register) */
18230- movl $0xc0000080, %ecx
18231+ movl $MSR_EFER, %ecx
18232 rdmsr
18233
18234 btsl $11, %eax
18235 /* Make changes effective */
18236 wrmsr
18237
18238+ btsl $_PAGE_BIT_NX-32,pa(__supported_pte_mask+4)
18239+ movl $1,pa(nx_enabled)
18240+#endif
18241+
18242 6:
18243
18244 /*
18245@@ -331,8 +418,8 @@ ENTRY(startup_32_smp)
18246 movl %eax,%cr0 /* ..and set paging (PG) bit */
18247 ljmp $__BOOT_CS,$1f /* Clear prefetch and normalize %eip */
18248 1:
18249- /* Set up the stack pointer */
18250- lss stack_start,%esp
18251+ /* Shift the stack pointer to a virtual address */
18252+ addl $__PAGE_OFFSET, %esp
18253
18254 /*
18255 * Initialize eflags. Some BIOS's leave bits like NT set. This would
18256@@ -344,9 +431,7 @@ ENTRY(startup_32_smp)
18257
18258 #ifdef CONFIG_SMP
18259 cmpb $0, ready
18260- jz 1f /* Initial CPU cleans BSS */
18261- jmp checkCPUtype
18262-1:
18263+ jnz checkCPUtype
18264 #endif /* CONFIG_SMP */
18265
18266 /*
18267@@ -424,7 +509,7 @@ is386: movl $2,%ecx # set MP
18268 1: movl $(__KERNEL_DS),%eax # reload all the segment registers
18269 movl %eax,%ss # after changing gdt.
18270
18271- movl $(__USER_DS),%eax # DS/ES contains default USER segment
18272+# movl $(__KERNEL_DS),%eax # DS/ES contains default KERNEL segment
18273 movl %eax,%ds
18274 movl %eax,%es
18275
18276@@ -438,15 +523,22 @@ is386: movl $2,%ecx # set MP
18277 */
18278 cmpb $0,ready
18279 jne 1f
18280- movl $per_cpu__gdt_page,%eax
18281+ movl $cpu_gdt_table,%eax
18282 movl $per_cpu__stack_canary,%ecx
18283+#ifdef CONFIG_SMP
18284+ addl $__per_cpu_load,%ecx
18285+#endif
18286 movw %cx, 8 * GDT_ENTRY_STACK_CANARY + 2(%eax)
18287 shrl $16, %ecx
18288 movb %cl, 8 * GDT_ENTRY_STACK_CANARY + 4(%eax)
18289 movb %ch, 8 * GDT_ENTRY_STACK_CANARY + 7(%eax)
18290 1:
18291-#endif
18292 movl $(__KERNEL_STACK_CANARY),%eax
18293+#elif defined(CONFIG_PAX_MEMORY_UDEREF)
18294+ movl $(__USER_DS),%eax
18295+#else
18296+ xorl %eax,%eax
18297+#endif
18298 movl %eax,%gs
18299
18300 xorl %eax,%eax # Clear LDT
18301@@ -454,14 +546,7 @@ is386: movl $2,%ecx # set MP
18302
18303 cld # gcc2 wants the direction flag cleared at all times
18304 pushl $0 # fake return address for unwinder
18305-#ifdef CONFIG_SMP
18306- movb ready, %cl
18307 movb $1, ready
18308- cmpb $0,%cl # the first CPU calls start_kernel
18309- je 1f
18310- movl (stack_start), %esp
18311-1:
18312-#endif /* CONFIG_SMP */
18313 jmp *(initial_code)
18314
18315 /*
18316@@ -546,22 +631,22 @@ early_page_fault:
18317 jmp early_fault
18318
18319 early_fault:
18320- cld
18321 #ifdef CONFIG_PRINTK
18322+ cmpl $1,%ss:early_recursion_flag
18323+ je hlt_loop
18324+ incl %ss:early_recursion_flag
18325+ cld
18326 pusha
18327 movl $(__KERNEL_DS),%eax
18328 movl %eax,%ds
18329 movl %eax,%es
18330- cmpl $2,early_recursion_flag
18331- je hlt_loop
18332- incl early_recursion_flag
18333 movl %cr2,%eax
18334 pushl %eax
18335 pushl %edx /* trapno */
18336 pushl $fault_msg
18337 call printk
18338+; call dump_stack
18339 #endif
18340- call dump_stack
18341 hlt_loop:
18342 hlt
18343 jmp hlt_loop
18344@@ -569,8 +654,11 @@ hlt_loop:
18345 /* This is the default interrupt "handler" :-) */
18346 ALIGN
18347 ignore_int:
18348- cld
18349 #ifdef CONFIG_PRINTK
18350+ cmpl $2,%ss:early_recursion_flag
18351+ je hlt_loop
18352+ incl %ss:early_recursion_flag
18353+ cld
18354 pushl %eax
18355 pushl %ecx
18356 pushl %edx
18357@@ -579,9 +667,6 @@ ignore_int:
18358 movl $(__KERNEL_DS),%eax
18359 movl %eax,%ds
18360 movl %eax,%es
18361- cmpl $2,early_recursion_flag
18362- je hlt_loop
18363- incl early_recursion_flag
18364 pushl 16(%esp)
18365 pushl 24(%esp)
18366 pushl 32(%esp)
18367@@ -600,6 +685,8 @@ ignore_int:
18368 #endif
18369 iret
18370
18371+#include "verify_cpu.S"
18372+
18373 __REFDATA
18374 .align 4
18375 ENTRY(initial_code)
18376@@ -610,31 +697,47 @@ ENTRY(initial_page_table)
18377 /*
18378 * BSS section
18379 */
18380-__PAGE_ALIGNED_BSS
18381- .align PAGE_SIZE_asm
18382 #ifdef CONFIG_X86_PAE
18383+.section .swapper_pg_pmd,"a",@progbits
18384 swapper_pg_pmd:
18385 .fill 1024*KPMDS,4,0
18386 #else
18387+.section .swapper_pg_dir,"a",@progbits
18388 ENTRY(swapper_pg_dir)
18389 .fill 1024,4,0
18390 #endif
18391+.section .swapper_pg_fixmap,"a",@progbits
18392 swapper_pg_fixmap:
18393 .fill 1024,4,0
18394 #ifdef CONFIG_X86_TRAMPOLINE
18395+.section .trampoline_pg_dir,"a",@progbits
18396 ENTRY(trampoline_pg_dir)
18397+#ifdef CONFIG_X86_PAE
18398+ .fill 4,8,0
18399+#else
18400 .fill 1024,4,0
18401 #endif
18402+#endif
18403+
18404+.section .empty_zero_page,"a",@progbits
18405 ENTRY(empty_zero_page)
18406 .fill 4096,1,0
18407
18408 /*
18409+ * The IDT has to be page-aligned to simplify the Pentium
18410+ * F0 0F bug workaround.. We have a special link segment
18411+ * for this.
18412+ */
18413+.section .idt,"a",@progbits
18414+ENTRY(idt_table)
18415+ .fill 256,8,0
18416+
18417+/*
18418 * This starts the data section.
18419 */
18420 #ifdef CONFIG_X86_PAE
18421-__PAGE_ALIGNED_DATA
18422- /* Page-aligned for the benefit of paravirt? */
18423- .align PAGE_SIZE_asm
18424+.section .swapper_pg_dir,"a",@progbits
18425+
18426 ENTRY(swapper_pg_dir)
18427 .long pa(swapper_pg_pmd+PGD_IDENT_ATTR),0 /* low identity map */
18428 # if KPMDS == 3
18429@@ -653,15 +756,24 @@ ENTRY(swapper_pg_dir)
18430 # error "Kernel PMDs should be 1, 2 or 3"
18431 # endif
18432 .align PAGE_SIZE_asm /* needs to be page-sized too */
18433+
18434+#ifdef CONFIG_PAX_PER_CPU_PGD
18435+ENTRY(cpu_pgd)
18436+ .rept NR_CPUS
18437+ .fill 4,8,0
18438+ .endr
18439+#endif
18440+
18441 #endif
18442
18443 .data
18444+.balign 4
18445 ENTRY(stack_start)
18446- .long init_thread_union+THREAD_SIZE
18447- .long __BOOT_DS
18448+ .long init_thread_union+THREAD_SIZE-8
18449
18450 ready: .byte 0
18451
18452+.section .rodata,"a",@progbits
18453 early_recursion_flag:
18454 .long 0
18455
18456@@ -697,7 +809,7 @@ fault_msg:
18457 .word 0 # 32 bit align gdt_desc.address
18458 boot_gdt_descr:
18459 .word __BOOT_DS+7
18460- .long boot_gdt - __PAGE_OFFSET
18461+ .long pa(boot_gdt)
18462
18463 .word 0 # 32-bit align idt_desc.address
18464 idt_descr:
18465@@ -708,7 +820,7 @@ idt_descr:
18466 .word 0 # 32 bit align gdt_desc.address
18467 ENTRY(early_gdt_descr)
18468 .word GDT_ENTRIES*8-1
18469- .long per_cpu__gdt_page /* Overwritten for secondary CPUs */
18470+ .long cpu_gdt_table /* Overwritten for secondary CPUs */
18471
18472 /*
18473 * The boot_gdt must mirror the equivalent in setup.S and is
18474@@ -717,5 +829,65 @@ ENTRY(early_gdt_descr)
18475 .align L1_CACHE_BYTES
18476 ENTRY(boot_gdt)
18477 .fill GDT_ENTRY_BOOT_CS,8,0
18478- .quad 0x00cf9a000000ffff /* kernel 4GB code at 0x00000000 */
18479- .quad 0x00cf92000000ffff /* kernel 4GB data at 0x00000000 */
18480+ .quad 0x00cf9b000000ffff /* kernel 4GB code at 0x00000000 */
18481+ .quad 0x00cf93000000ffff /* kernel 4GB data at 0x00000000 */
18482+
18483+ .align PAGE_SIZE_asm
18484+ENTRY(cpu_gdt_table)
18485+ .rept NR_CPUS
18486+ .quad 0x0000000000000000 /* NULL descriptor */
18487+ .quad 0x0000000000000000 /* 0x0b reserved */
18488+ .quad 0x0000000000000000 /* 0x13 reserved */
18489+ .quad 0x0000000000000000 /* 0x1b reserved */
18490+
18491+#ifdef CONFIG_PAX_KERNEXEC
18492+ .quad 0x00cf9b000000ffff /* 0x20 alternate kernel 4GB code at 0x00000000 */
18493+#else
18494+ .quad 0x0000000000000000 /* 0x20 unused */
18495+#endif
18496+
18497+ .quad 0x0000000000000000 /* 0x28 unused */
18498+ .quad 0x0000000000000000 /* 0x33 TLS entry 1 */
18499+ .quad 0x0000000000000000 /* 0x3b TLS entry 2 */
18500+ .quad 0x0000000000000000 /* 0x43 TLS entry 3 */
18501+ .quad 0x0000000000000000 /* 0x4b reserved */
18502+ .quad 0x0000000000000000 /* 0x53 reserved */
18503+ .quad 0x0000000000000000 /* 0x5b reserved */
18504+
18505+ .quad 0x00cf9b000000ffff /* 0x60 kernel 4GB code at 0x00000000 */
18506+ .quad 0x00cf93000000ffff /* 0x68 kernel 4GB data at 0x00000000 */
18507+ .quad 0x00cffb000000ffff /* 0x73 user 4GB code at 0x00000000 */
18508+ .quad 0x00cff3000000ffff /* 0x7b user 4GB data at 0x00000000 */
18509+
18510+ .quad 0x0000000000000000 /* 0x80 TSS descriptor */
18511+ .quad 0x0000000000000000 /* 0x88 LDT descriptor */
18512+
18513+ /*
18514+ * Segments used for calling PnP BIOS have byte granularity.
18515+ * The code segments and data segments have fixed 64k limits,
18516+ * the transfer segment sizes are set at run time.
18517+ */
18518+ .quad 0x00409b000000ffff /* 0x90 32-bit code */
18519+ .quad 0x00009b000000ffff /* 0x98 16-bit code */
18520+ .quad 0x000093000000ffff /* 0xa0 16-bit data */
18521+ .quad 0x0000930000000000 /* 0xa8 16-bit data */
18522+ .quad 0x0000930000000000 /* 0xb0 16-bit data */
18523+
18524+ /*
18525+ * The APM segments have byte granularity and their bases
18526+ * are set at run time. All have 64k limits.
18527+ */
18528+ .quad 0x00409b000000ffff /* 0xb8 APM CS code */
18529+ .quad 0x00009b000000ffff /* 0xc0 APM CS 16 code (16 bit) */
18530+ .quad 0x004093000000ffff /* 0xc8 APM DS data */
18531+
18532+ .quad 0x00c0930000000000 /* 0xd0 - ESPFIX SS */
18533+ .quad 0x0040930000000000 /* 0xd8 - PERCPU */
18534+ .quad 0x0040910000000017 /* 0xe0 - STACK_CANARY */
18535+ .quad 0x0000000000000000 /* 0xe8 - PCIBIOS_CS */
18536+ .quad 0x0000000000000000 /* 0xf0 - PCIBIOS_DS */
18537+ .quad 0x0000000000000000 /* 0xf8 - GDT entry 31: double-fault TSS */
18538+
18539+ /* Be sure this is zeroed to avoid false validations in Xen */
18540+ .fill PAGE_SIZE_asm - GDT_SIZE,1,0
18541+ .endr
18542diff --git a/arch/x86/kernel/head_64.S b/arch/x86/kernel/head_64.S
18543index 780cd92..758b2a6 100644
18544--- a/arch/x86/kernel/head_64.S
18545+++ b/arch/x86/kernel/head_64.S
18546@@ -19,6 +19,8 @@
18547 #include <asm/cache.h>
18548 #include <asm/processor-flags.h>
18549 #include <asm/percpu.h>
18550+#include <asm/cpufeature.h>
18551+#include <asm/alternative-asm.h>
18552
18553 #ifdef CONFIG_PARAVIRT
18554 #include <asm/asm-offsets.h>
18555@@ -38,6 +40,12 @@ L4_PAGE_OFFSET = pgd_index(__PAGE_OFFSET)
18556 L3_PAGE_OFFSET = pud_index(__PAGE_OFFSET)
18557 L4_START_KERNEL = pgd_index(__START_KERNEL_map)
18558 L3_START_KERNEL = pud_index(__START_KERNEL_map)
18559+L4_VMALLOC_START = pgd_index(VMALLOC_START)
18560+L3_VMALLOC_START = pud_index(VMALLOC_START)
18561+L4_VMALLOC_END = pgd_index(VMALLOC_END)
18562+L3_VMALLOC_END = pud_index(VMALLOC_END)
18563+L4_VMEMMAP_START = pgd_index(VMEMMAP_START)
18564+L3_VMEMMAP_START = pud_index(VMEMMAP_START)
18565
18566 .text
18567 __HEAD
18568@@ -85,35 +93,23 @@ startup_64:
18569 */
18570 addq %rbp, init_level4_pgt + 0(%rip)
18571 addq %rbp, init_level4_pgt + (L4_PAGE_OFFSET*8)(%rip)
18572+ addq %rbp, init_level4_pgt + (L4_VMALLOC_START*8)(%rip)
18573+ addq %rbp, init_level4_pgt + (L4_VMALLOC_END*8)(%rip)
18574+ addq %rbp, init_level4_pgt + (L4_VMEMMAP_START*8)(%rip)
18575 addq %rbp, init_level4_pgt + (L4_START_KERNEL*8)(%rip)
18576
18577 addq %rbp, level3_ident_pgt + 0(%rip)
18578+#ifndef CONFIG_XEN
18579+ addq %rbp, level3_ident_pgt + 8(%rip)
18580+#endif
18581
18582- addq %rbp, level3_kernel_pgt + (510*8)(%rip)
18583- addq %rbp, level3_kernel_pgt + (511*8)(%rip)
18584+ addq %rbp, level3_vmemmap_pgt + (L3_VMEMMAP_START*8)(%rip)
18585+
18586+ addq %rbp, level3_kernel_pgt + (L3_START_KERNEL*8)(%rip)
18587+ addq %rbp, level3_kernel_pgt + (L3_START_KERNEL*8+8)(%rip)
18588
18589 addq %rbp, level2_fixmap_pgt + (506*8)(%rip)
18590-
18591- /* Add an Identity mapping if I am above 1G */
18592- leaq _text(%rip), %rdi
18593- andq $PMD_PAGE_MASK, %rdi
18594-
18595- movq %rdi, %rax
18596- shrq $PUD_SHIFT, %rax
18597- andq $(PTRS_PER_PUD - 1), %rax
18598- jz ident_complete
18599-
18600- leaq (level2_spare_pgt - __START_KERNEL_map + _KERNPG_TABLE)(%rbp), %rdx
18601- leaq level3_ident_pgt(%rip), %rbx
18602- movq %rdx, 0(%rbx, %rax, 8)
18603-
18604- movq %rdi, %rax
18605- shrq $PMD_SHIFT, %rax
18606- andq $(PTRS_PER_PMD - 1), %rax
18607- leaq __PAGE_KERNEL_IDENT_LARGE_EXEC(%rdi), %rdx
18608- leaq level2_spare_pgt(%rip), %rbx
18609- movq %rdx, 0(%rbx, %rax, 8)
18610-ident_complete:
18611+ addq %rbp, level2_fixmap_pgt + (507*8)(%rip)
18612
18613 /*
18614 * Fixup the kernel text+data virtual addresses. Note that
18615@@ -161,8 +157,8 @@ ENTRY(secondary_startup_64)
18616 * after the boot processor executes this code.
18617 */
18618
18619- /* Enable PAE mode and PGE */
18620- movl $(X86_CR4_PAE | X86_CR4_PGE), %eax
18621+ /* Enable PAE mode and PSE/PGE */
18622+ movl $(X86_CR4_PSE | X86_CR4_PAE | X86_CR4_PGE), %eax
18623 movq %rax, %cr4
18624
18625 /* Setup early boot stage 4 level pagetables. */
18626@@ -184,9 +180,16 @@ ENTRY(secondary_startup_64)
18627 movl $MSR_EFER, %ecx
18628 rdmsr
18629 btsl $_EFER_SCE, %eax /* Enable System Call */
18630- btl $20,%edi /* No Execute supported? */
18631+ btl $(X86_FEATURE_NX & 31),%edi /* No Execute supported? */
18632 jnc 1f
18633 btsl $_EFER_NX, %eax
18634+ leaq init_level4_pgt(%rip), %rdi
18635+#ifndef CONFIG_EFI
18636+ btsq $_PAGE_BIT_NX, 8*L4_PAGE_OFFSET(%rdi)
18637+#endif
18638+ btsq $_PAGE_BIT_NX, 8*L4_VMALLOC_START(%rdi)
18639+ btsq $_PAGE_BIT_NX, 8*L4_VMALLOC_END(%rdi)
18640+ btsq $_PAGE_BIT_NX, 8*L4_VMEMMAP_START(%rdi)
18641 1: wrmsr /* Make changes effective */
18642
18643 /* Setup cr0 */
18644@@ -249,6 +252,7 @@ ENTRY(secondary_startup_64)
18645 * jump. In addition we need to ensure %cs is set so we make this
18646 * a far return.
18647 */
18648+ pax_set_fptr_mask
18649 movq initial_code(%rip),%rax
18650 pushq $0 # fake return address to stop unwinder
18651 pushq $__KERNEL_CS # set correct cs
18652@@ -262,16 +266,16 @@ ENTRY(secondary_startup_64)
18653 .quad x86_64_start_kernel
18654 ENTRY(initial_gs)
18655 .quad INIT_PER_CPU_VAR(irq_stack_union)
18656- __FINITDATA
18657
18658 ENTRY(stack_start)
18659 .quad init_thread_union+THREAD_SIZE-8
18660 .word 0
18661+ __FINITDATA
18662
18663 bad_address:
18664 jmp bad_address
18665
18666- .section ".init.text","ax"
18667+ __INIT
18668 #ifdef CONFIG_EARLY_PRINTK
18669 .globl early_idt_handlers
18670 early_idt_handlers:
18671@@ -316,18 +320,23 @@ ENTRY(early_idt_handler)
18672 #endif /* EARLY_PRINTK */
18673 1: hlt
18674 jmp 1b
18675+ .previous
18676
18677 #ifdef CONFIG_EARLY_PRINTK
18678+ __INITDATA
18679 early_recursion_flag:
18680 .long 0
18681+ .previous
18682
18683+ .section .rodata,"a",@progbits
18684 early_idt_msg:
18685 .asciz "PANIC: early exception %02lx rip %lx:%lx error %lx cr2 %lx\n"
18686 early_idt_ripmsg:
18687 .asciz "RIP %s\n"
18688+ .previous
18689 #endif /* CONFIG_EARLY_PRINTK */
18690- .previous
18691
18692+ .section .rodata,"a",@progbits
18693 #define NEXT_PAGE(name) \
18694 .balign PAGE_SIZE; \
18695 ENTRY(name)
18696@@ -350,13 +359,41 @@ NEXT_PAGE(init_level4_pgt)
18697 .quad level3_ident_pgt - __START_KERNEL_map + _KERNPG_TABLE
18698 .org init_level4_pgt + L4_PAGE_OFFSET*8, 0
18699 .quad level3_ident_pgt - __START_KERNEL_map + _KERNPG_TABLE
18700+ .org init_level4_pgt + L4_VMALLOC_START*8, 0
18701+ .quad level3_vmalloc_start_pgt - __START_KERNEL_map + _KERNPG_TABLE
18702+ .org init_level4_pgt + L4_VMALLOC_END*8, 0
18703+ .quad level3_vmalloc_end_pgt - __START_KERNEL_map + _KERNPG_TABLE
18704+ .org init_level4_pgt + L4_VMEMMAP_START*8, 0
18705+ .quad level3_vmemmap_pgt - __START_KERNEL_map + _KERNPG_TABLE
18706 .org init_level4_pgt + L4_START_KERNEL*8, 0
18707 /* (2^48-(2*1024*1024*1024))/(2^39) = 511 */
18708 .quad level3_kernel_pgt - __START_KERNEL_map + _PAGE_TABLE
18709
18710+#ifdef CONFIG_PAX_PER_CPU_PGD
18711+NEXT_PAGE(cpu_pgd)
18712+ .rept NR_CPUS
18713+ .fill 512,8,0
18714+ .endr
18715+#endif
18716+
18717 NEXT_PAGE(level3_ident_pgt)
18718 .quad level2_ident_pgt - __START_KERNEL_map + _KERNPG_TABLE
18719+#ifdef CONFIG_XEN
18720 .fill 511,8,0
18721+#else
18722+ .quad level2_ident_pgt + PAGE_SIZE - __START_KERNEL_map + _KERNPG_TABLE
18723+ .fill 510,8,0
18724+#endif
18725+
18726+NEXT_PAGE(level3_vmalloc_start_pgt)
18727+ .fill 512,8,0
18728+
18729+NEXT_PAGE(level3_vmalloc_end_pgt)
18730+ .fill 512,8,0
18731+
18732+NEXT_PAGE(level3_vmemmap_pgt)
18733+ .fill L3_VMEMMAP_START,8,0
18734+ .quad level2_vmemmap_pgt - __START_KERNEL_map + _KERNPG_TABLE
18735
18736 NEXT_PAGE(level3_kernel_pgt)
18737 .fill L3_START_KERNEL,8,0
18738@@ -364,20 +401,23 @@ NEXT_PAGE(level3_kernel_pgt)
18739 .quad level2_kernel_pgt - __START_KERNEL_map + _KERNPG_TABLE
18740 .quad level2_fixmap_pgt - __START_KERNEL_map + _PAGE_TABLE
18741
18742+NEXT_PAGE(level2_vmemmap_pgt)
18743+ .fill 512,8,0
18744+
18745 NEXT_PAGE(level2_fixmap_pgt)
18746- .fill 506,8,0
18747- .quad level1_fixmap_pgt - __START_KERNEL_map + _PAGE_TABLE
18748- /* 8MB reserved for vsyscalls + a 2MB hole = 4 + 1 entries */
18749- .fill 5,8,0
18750+ .fill 507,8,0
18751+ .quad level1_vsyscall_pgt - __START_KERNEL_map + _PAGE_TABLE
18752+ /* 6MB reserved for vsyscalls + a 2MB hole = 3 + 1 entries */
18753+ .fill 4,8,0
18754
18755-NEXT_PAGE(level1_fixmap_pgt)
18756+NEXT_PAGE(level1_vsyscall_pgt)
18757 .fill 512,8,0
18758
18759-NEXT_PAGE(level2_ident_pgt)
18760- /* Since I easily can, map the first 1G.
18761+ /* Since I easily can, map the first 2G.
18762 * Don't set NX because code runs from these pages.
18763 */
18764- PMDS(0, __PAGE_KERNEL_IDENT_LARGE_EXEC, PTRS_PER_PMD)
18765+NEXT_PAGE(level2_ident_pgt)
18766+ PMDS(0, __PAGE_KERNEL_IDENT_LARGE_EXEC, 2*PTRS_PER_PMD)
18767
18768 NEXT_PAGE(level2_kernel_pgt)
18769 /*
18770@@ -390,33 +430,55 @@ NEXT_PAGE(level2_kernel_pgt)
18771 * If you want to increase this then increase MODULES_VADDR
18772 * too.)
18773 */
18774- PMDS(0, __PAGE_KERNEL_LARGE_EXEC,
18775- KERNEL_IMAGE_SIZE/PMD_SIZE)
18776-
18777-NEXT_PAGE(level2_spare_pgt)
18778- .fill 512, 8, 0
18779+ PMDS(0, __PAGE_KERNEL_LARGE_EXEC, KERNEL_IMAGE_SIZE/PMD_SIZE)
18780
18781 #undef PMDS
18782 #undef NEXT_PAGE
18783
18784- .data
18785+ .align PAGE_SIZE
18786+ENTRY(cpu_gdt_table)
18787+ .rept NR_CPUS
18788+ .quad 0x0000000000000000 /* NULL descriptor */
18789+ .quad 0x00cf9b000000ffff /* __KERNEL32_CS */
18790+ .quad 0x00af9b000000ffff /* __KERNEL_CS */
18791+ .quad 0x00cf93000000ffff /* __KERNEL_DS */
18792+ .quad 0x00cffb000000ffff /* __USER32_CS */
18793+ .quad 0x00cff3000000ffff /* __USER_DS, __USER32_DS */
18794+ .quad 0x00affb000000ffff /* __USER_CS */
18795+
18796+#ifdef CONFIG_PAX_KERNEXEC
18797+ .quad 0x00af9b000000ffff /* __KERNEXEC_KERNEL_CS */
18798+#else
18799+ .quad 0x0 /* unused */
18800+#endif
18801+
18802+ .quad 0,0 /* TSS */
18803+ .quad 0,0 /* LDT */
18804+ .quad 0,0,0 /* three TLS descriptors */
18805+ .quad 0x0000f40000000000 /* node/CPU stored in limit */
18806+ /* asm/segment.h:GDT_ENTRIES must match this */
18807+
18808+ /* zero the remaining page */
18809+ .fill PAGE_SIZE / 8 - GDT_ENTRIES,8,0
18810+ .endr
18811+
18812 .align 16
18813 .globl early_gdt_descr
18814 early_gdt_descr:
18815 .word GDT_ENTRIES*8-1
18816 early_gdt_descr_base:
18817- .quad INIT_PER_CPU_VAR(gdt_page)
18818+ .quad cpu_gdt_table
18819
18820 ENTRY(phys_base)
18821 /* This must match the first entry in level2_kernel_pgt */
18822 .quad 0x0000000000000000
18823
18824 #include "../../x86/xen/xen-head.S"
18825-
18826- .section .bss, "aw", @nobits
18827+
18828+ .section .rodata,"a",@progbits
18829 .align L1_CACHE_BYTES
18830 ENTRY(idt_table)
18831- .skip IDT_ENTRIES * 16
18832+ .fill 512,8,0
18833
18834 __PAGE_ALIGNED_BSS
18835 .align PAGE_SIZE
18836diff --git a/arch/x86/kernel/i386_ksyms_32.c b/arch/x86/kernel/i386_ksyms_32.c
18837index 9c3bd4a..e1d9b35 100644
18838--- a/arch/x86/kernel/i386_ksyms_32.c
18839+++ b/arch/x86/kernel/i386_ksyms_32.c
18840@@ -20,8 +20,12 @@ extern void cmpxchg8b_emu(void);
18841 EXPORT_SYMBOL(cmpxchg8b_emu);
18842 #endif
18843
18844+EXPORT_SYMBOL_GPL(cpu_gdt_table);
18845+
18846 /* Networking helper routines. */
18847 EXPORT_SYMBOL(csum_partial_copy_generic);
18848+EXPORT_SYMBOL(csum_partial_copy_generic_to_user);
18849+EXPORT_SYMBOL(csum_partial_copy_generic_from_user);
18850
18851 EXPORT_SYMBOL(__get_user_1);
18852 EXPORT_SYMBOL(__get_user_2);
18853@@ -36,3 +40,7 @@ EXPORT_SYMBOL(strstr);
18854
18855 EXPORT_SYMBOL(csum_partial);
18856 EXPORT_SYMBOL(empty_zero_page);
18857+
18858+#ifdef CONFIG_PAX_KERNEXEC
18859+EXPORT_SYMBOL(__LOAD_PHYSICAL_ADDR);
18860+#endif
18861diff --git a/arch/x86/kernel/i387.c b/arch/x86/kernel/i387.c
18862index f2f8540..d845509 100644
18863--- a/arch/x86/kernel/i387.c
18864+++ b/arch/x86/kernel/i387.c
18865@@ -176,6 +176,9 @@ int xfpregs_active(struct task_struct *target, const struct user_regset *regset)
18866
18867 int xfpregs_get(struct task_struct *target, const struct user_regset *regset,
18868 unsigned int pos, unsigned int count,
18869+ void *kbuf, void __user *ubuf) __size_overflow(4);
18870+int xfpregs_get(struct task_struct *target, const struct user_regset *regset,
18871+ unsigned int pos, unsigned int count,
18872 void *kbuf, void __user *ubuf)
18873 {
18874 int ret;
18875@@ -193,6 +196,9 @@ int xfpregs_get(struct task_struct *target, const struct user_regset *regset,
18876
18877 int xfpregs_set(struct task_struct *target, const struct user_regset *regset,
18878 unsigned int pos, unsigned int count,
18879+ const void *kbuf, const void __user *ubuf) __size_overflow(4);
18880+int xfpregs_set(struct task_struct *target, const struct user_regset *regset,
18881+ unsigned int pos, unsigned int count,
18882 const void *kbuf, const void __user *ubuf)
18883 {
18884 int ret;
18885@@ -365,6 +371,9 @@ static void convert_to_fxsr(struct task_struct *tsk,
18886
18887 int fpregs_get(struct task_struct *target, const struct user_regset *regset,
18888 unsigned int pos, unsigned int count,
18889+ void *kbuf, void __user *ubuf) __size_overflow(3,4);
18890+int fpregs_get(struct task_struct *target, const struct user_regset *regset,
18891+ unsigned int pos, unsigned int count,
18892 void *kbuf, void __user *ubuf)
18893 {
18894 struct user_i387_ia32_struct env;
18895@@ -395,6 +404,9 @@ int fpregs_get(struct task_struct *target, const struct user_regset *regset,
18896
18897 int fpregs_set(struct task_struct *target, const struct user_regset *regset,
18898 unsigned int pos, unsigned int count,
18899+ const void *kbuf, const void __user *ubuf) __size_overflow(3,4);
18900+int fpregs_set(struct task_struct *target, const struct user_regset *regset,
18901+ unsigned int pos, unsigned int count,
18902 const void *kbuf, const void __user *ubuf)
18903 {
18904 struct user_i387_ia32_struct env;
18905@@ -540,6 +552,8 @@ static inline int restore_i387_fsave(struct _fpstate_ia32 __user *buf)
18906 }
18907
18908 static int restore_i387_fxsave(struct _fpstate_ia32 __user *buf,
18909+ unsigned int size) __size_overflow(2);
18910+static int restore_i387_fxsave(struct _fpstate_ia32 __user *buf,
18911 unsigned int size)
18912 {
18913 struct task_struct *tsk = current;
18914diff --git a/arch/x86/kernel/i8259.c b/arch/x86/kernel/i8259.c
18915index df89102..a244320 100644
18916--- a/arch/x86/kernel/i8259.c
18917+++ b/arch/x86/kernel/i8259.c
18918@@ -208,7 +208,7 @@ spurious_8259A_irq:
18919 "spurious 8259A interrupt: IRQ%d.\n", irq);
18920 spurious_irq_mask |= irqmask;
18921 }
18922- atomic_inc(&irq_err_count);
18923+ atomic_inc_unchecked(&irq_err_count);
18924 /*
18925 * Theoretically we do not have to handle this IRQ,
18926 * but in Linux this does not cause problems and is
18927diff --git a/arch/x86/kernel/init_task.c b/arch/x86/kernel/init_task.c
18928index 3a54dcb..1c22348 100644
18929--- a/arch/x86/kernel/init_task.c
18930+++ b/arch/x86/kernel/init_task.c
18931@@ -20,8 +20,7 @@ static struct sighand_struct init_sighand = INIT_SIGHAND(init_sighand);
18932 * way process stacks are handled. This is done by having a special
18933 * "init_task" linker map entry..
18934 */
18935-union thread_union init_thread_union __init_task_data =
18936- { INIT_THREAD_INFO(init_task) };
18937+union thread_union init_thread_union __init_task_data;
18938
18939 /*
18940 * Initial task structure.
18941@@ -38,5 +37,5 @@ EXPORT_SYMBOL(init_task);
18942 * section. Since TSS's are completely CPU-local, we want them
18943 * on exact cacheline boundaries, to eliminate cacheline ping-pong.
18944 */
18945-DEFINE_PER_CPU_SHARED_ALIGNED(struct tss_struct, init_tss) = INIT_TSS;
18946-
18947+struct tss_struct init_tss[NR_CPUS] ____cacheline_internodealigned_in_smp = { [0 ... NR_CPUS-1] = INIT_TSS };
18948+EXPORT_SYMBOL(init_tss);
18949diff --git a/arch/x86/kernel/ioport.c b/arch/x86/kernel/ioport.c
18950index 99c4d30..74c84e9 100644
18951--- a/arch/x86/kernel/ioport.c
18952+++ b/arch/x86/kernel/ioport.c
18953@@ -6,6 +6,7 @@
18954 #include <linux/sched.h>
18955 #include <linux/kernel.h>
18956 #include <linux/capability.h>
18957+#include <linux/security.h>
18958 #include <linux/errno.h>
18959 #include <linux/types.h>
18960 #include <linux/ioport.h>
18961@@ -41,6 +42,12 @@ asmlinkage long sys_ioperm(unsigned long from, unsigned long num, int turn_on)
18962
18963 if ((from + num <= from) || (from + num > IO_BITMAP_BITS))
18964 return -EINVAL;
18965+#ifdef CONFIG_GRKERNSEC_IO
18966+ if (turn_on && grsec_disable_privio) {
18967+ gr_handle_ioperm();
18968+ return -EPERM;
18969+ }
18970+#endif
18971 if (turn_on && !capable(CAP_SYS_RAWIO))
18972 return -EPERM;
18973
18974@@ -67,7 +74,7 @@ asmlinkage long sys_ioperm(unsigned long from, unsigned long num, int turn_on)
18975 * because the ->io_bitmap_max value must match the bitmap
18976 * contents:
18977 */
18978- tss = &per_cpu(init_tss, get_cpu());
18979+ tss = init_tss + get_cpu();
18980
18981 set_bitmap(t->io_bitmap_ptr, from, num, !turn_on);
18982
18983@@ -111,6 +118,12 @@ static int do_iopl(unsigned int level, struct pt_regs *regs)
18984 return -EINVAL;
18985 /* Trying to gain more privileges? */
18986 if (level > old) {
18987+#ifdef CONFIG_GRKERNSEC_IO
18988+ if (grsec_disable_privio) {
18989+ gr_handle_iopl();
18990+ return -EPERM;
18991+ }
18992+#endif
18993 if (!capable(CAP_SYS_RAWIO))
18994 return -EPERM;
18995 }
18996diff --git a/arch/x86/kernel/irq.c b/arch/x86/kernel/irq.c
18997index 04bbd52..83a07d9 100644
18998--- a/arch/x86/kernel/irq.c
18999+++ b/arch/x86/kernel/irq.c
19000@@ -15,7 +15,7 @@
19001 #include <asm/mce.h>
19002 #include <asm/hw_irq.h>
19003
19004-atomic_t irq_err_count;
19005+atomic_unchecked_t irq_err_count;
19006
19007 /* Function pointer for generic interrupt vector handling */
19008 void (*generic_interrupt_extension)(void) = NULL;
19009@@ -114,9 +114,9 @@ static int show_other_interrupts(struct seq_file *p, int prec)
19010 seq_printf(p, "%10u ", per_cpu(mce_poll_count, j));
19011 seq_printf(p, " Machine check polls\n");
19012 #endif
19013- seq_printf(p, "%*s: %10u\n", prec, "ERR", atomic_read(&irq_err_count));
19014+ seq_printf(p, "%*s: %10u\n", prec, "ERR", atomic_read_unchecked(&irq_err_count));
19015 #if defined(CONFIG_X86_IO_APIC)
19016- seq_printf(p, "%*s: %10u\n", prec, "MIS", atomic_read(&irq_mis_count));
19017+ seq_printf(p, "%*s: %10u\n", prec, "MIS", atomic_read_unchecked(&irq_mis_count));
19018 #endif
19019 return 0;
19020 }
19021@@ -209,10 +209,10 @@ u64 arch_irq_stat_cpu(unsigned int cpu)
19022
19023 u64 arch_irq_stat(void)
19024 {
19025- u64 sum = atomic_read(&irq_err_count);
19026+ u64 sum = atomic_read_unchecked(&irq_err_count);
19027
19028 #ifdef CONFIG_X86_IO_APIC
19029- sum += atomic_read(&irq_mis_count);
19030+ sum += atomic_read_unchecked(&irq_mis_count);
19031 #endif
19032 return sum;
19033 }
19034diff --git a/arch/x86/kernel/irq_32.c b/arch/x86/kernel/irq_32.c
19035index 7d35d0f..03f1d52 100644
19036--- a/arch/x86/kernel/irq_32.c
19037+++ b/arch/x86/kernel/irq_32.c
19038@@ -35,7 +35,7 @@ static int check_stack_overflow(void)
19039 __asm__ __volatile__("andl %%esp,%0" :
19040 "=r" (sp) : "0" (THREAD_SIZE - 1));
19041
19042- return sp < (sizeof(struct thread_info) + STACK_WARN);
19043+ return sp < STACK_WARN;
19044 }
19045
19046 static void print_stack_overflow(void)
19047@@ -54,9 +54,9 @@ static inline void print_stack_overflow(void) { }
19048 * per-CPU IRQ handling contexts (thread information and stack)
19049 */
19050 union irq_ctx {
19051- struct thread_info tinfo;
19052- u32 stack[THREAD_SIZE/sizeof(u32)];
19053-} __attribute__((aligned(PAGE_SIZE)));
19054+ unsigned long previous_esp;
19055+ u32 stack[THREAD_SIZE/sizeof(u32)];
19056+} __attribute__((aligned(THREAD_SIZE)));
19057
19058 static DEFINE_PER_CPU(union irq_ctx *, hardirq_ctx);
19059 static DEFINE_PER_CPU(union irq_ctx *, softirq_ctx);
19060@@ -78,10 +78,9 @@ static void call_on_stack(void *func, void *stack)
19061 static inline int
19062 execute_on_irq_stack(int overflow, struct irq_desc *desc, int irq)
19063 {
19064- union irq_ctx *curctx, *irqctx;
19065+ union irq_ctx *irqctx;
19066 u32 *isp, arg1, arg2;
19067
19068- curctx = (union irq_ctx *) current_thread_info();
19069 irqctx = __get_cpu_var(hardirq_ctx);
19070
19071 /*
19072@@ -90,21 +89,16 @@ execute_on_irq_stack(int overflow, struct irq_desc *desc, int irq)
19073 * handler) we can't do that and just have to keep using the
19074 * current stack (which is the irq stack already after all)
19075 */
19076- if (unlikely(curctx == irqctx))
19077+ if (unlikely((void *)current_stack_pointer - (void *)irqctx < THREAD_SIZE))
19078 return 0;
19079
19080 /* build the stack frame on the IRQ stack */
19081- isp = (u32 *) ((char *)irqctx + sizeof(*irqctx));
19082- irqctx->tinfo.task = curctx->tinfo.task;
19083- irqctx->tinfo.previous_esp = current_stack_pointer;
19084+ isp = (u32 *) ((char *)irqctx + sizeof(*irqctx) - 8);
19085+ irqctx->previous_esp = current_stack_pointer;
19086
19087- /*
19088- * Copy the softirq bits in preempt_count so that the
19089- * softirq checks work in the hardirq context.
19090- */
19091- irqctx->tinfo.preempt_count =
19092- (irqctx->tinfo.preempt_count & ~SOFTIRQ_MASK) |
19093- (curctx->tinfo.preempt_count & SOFTIRQ_MASK);
19094+#ifdef CONFIG_PAX_MEMORY_UDEREF
19095+ __set_fs(MAKE_MM_SEG(0));
19096+#endif
19097
19098 if (unlikely(overflow))
19099 call_on_stack(print_stack_overflow, isp);
19100@@ -116,6 +110,11 @@ execute_on_irq_stack(int overflow, struct irq_desc *desc, int irq)
19101 : "0" (irq), "1" (desc), "2" (isp),
19102 "D" (desc->handle_irq)
19103 : "memory", "cc", "ecx");
19104+
19105+#ifdef CONFIG_PAX_MEMORY_UDEREF
19106+ __set_fs(current_thread_info()->addr_limit);
19107+#endif
19108+
19109 return 1;
19110 }
19111
19112@@ -124,28 +123,11 @@ execute_on_irq_stack(int overflow, struct irq_desc *desc, int irq)
19113 */
19114 void __cpuinit irq_ctx_init(int cpu)
19115 {
19116- union irq_ctx *irqctx;
19117-
19118 if (per_cpu(hardirq_ctx, cpu))
19119 return;
19120
19121- irqctx = &per_cpu(hardirq_stack, cpu);
19122- irqctx->tinfo.task = NULL;
19123- irqctx->tinfo.exec_domain = NULL;
19124- irqctx->tinfo.cpu = cpu;
19125- irqctx->tinfo.preempt_count = HARDIRQ_OFFSET;
19126- irqctx->tinfo.addr_limit = MAKE_MM_SEG(0);
19127-
19128- per_cpu(hardirq_ctx, cpu) = irqctx;
19129-
19130- irqctx = &per_cpu(softirq_stack, cpu);
19131- irqctx->tinfo.task = NULL;
19132- irqctx->tinfo.exec_domain = NULL;
19133- irqctx->tinfo.cpu = cpu;
19134- irqctx->tinfo.preempt_count = 0;
19135- irqctx->tinfo.addr_limit = MAKE_MM_SEG(0);
19136-
19137- per_cpu(softirq_ctx, cpu) = irqctx;
19138+ per_cpu(hardirq_ctx, cpu) = &per_cpu(hardirq_stack, cpu);
19139+ per_cpu(softirq_ctx, cpu) = &per_cpu(softirq_stack, cpu);
19140
19141 printk(KERN_DEBUG "CPU %u irqstacks, hard=%p soft=%p\n",
19142 cpu, per_cpu(hardirq_ctx, cpu), per_cpu(softirq_ctx, cpu));
19143@@ -159,7 +141,6 @@ void irq_ctx_exit(int cpu)
19144 asmlinkage void do_softirq(void)
19145 {
19146 unsigned long flags;
19147- struct thread_info *curctx;
19148 union irq_ctx *irqctx;
19149 u32 *isp;
19150
19151@@ -169,15 +150,22 @@ asmlinkage void do_softirq(void)
19152 local_irq_save(flags);
19153
19154 if (local_softirq_pending()) {
19155- curctx = current_thread_info();
19156 irqctx = __get_cpu_var(softirq_ctx);
19157- irqctx->tinfo.task = curctx->task;
19158- irqctx->tinfo.previous_esp = current_stack_pointer;
19159+ irqctx->previous_esp = current_stack_pointer;
19160
19161 /* build the stack frame on the softirq stack */
19162- isp = (u32 *) ((char *)irqctx + sizeof(*irqctx));
19163+ isp = (u32 *) ((char *)irqctx + sizeof(*irqctx) - 8);
19164+
19165+#ifdef CONFIG_PAX_MEMORY_UDEREF
19166+ __set_fs(MAKE_MM_SEG(0));
19167+#endif
19168
19169 call_on_stack(__do_softirq, isp);
19170+
19171+#ifdef CONFIG_PAX_MEMORY_UDEREF
19172+ __set_fs(current_thread_info()->addr_limit);
19173+#endif
19174+
19175 /*
19176 * Shouldnt happen, we returned above if in_interrupt():
19177 */
19178diff --git a/arch/x86/kernel/kgdb.c b/arch/x86/kernel/kgdb.c
19179index 8d82a77..0baf312 100644
19180--- a/arch/x86/kernel/kgdb.c
19181+++ b/arch/x86/kernel/kgdb.c
19182@@ -390,13 +390,13 @@ int kgdb_arch_handle_exception(int e_vector, int signo, int err_code,
19183
19184 /* clear the trace bit */
19185 linux_regs->flags &= ~X86_EFLAGS_TF;
19186- atomic_set(&kgdb_cpu_doing_single_step, -1);
19187+ atomic_set_unchecked(&kgdb_cpu_doing_single_step, -1);
19188
19189 /* set the trace bit if we're stepping */
19190 if (remcomInBuffer[0] == 's') {
19191 linux_regs->flags |= X86_EFLAGS_TF;
19192 kgdb_single_step = 1;
19193- atomic_set(&kgdb_cpu_doing_single_step,
19194+ atomic_set_unchecked(&kgdb_cpu_doing_single_step,
19195 raw_smp_processor_id());
19196 }
19197
19198@@ -476,7 +476,7 @@ static int __kgdb_notify(struct die_args *args, unsigned long cmd)
19199 break;
19200
19201 case DIE_DEBUG:
19202- if (atomic_read(&kgdb_cpu_doing_single_step) ==
19203+ if (atomic_read_unchecked(&kgdb_cpu_doing_single_step) ==
19204 raw_smp_processor_id()) {
19205 if (user_mode(regs))
19206 return single_step_cont(regs, args);
19207@@ -573,7 +573,7 @@ unsigned long kgdb_arch_pc(int exception, struct pt_regs *regs)
19208 return instruction_pointer(regs);
19209 }
19210
19211-struct kgdb_arch arch_kgdb_ops = {
19212+const struct kgdb_arch arch_kgdb_ops = {
19213 /* Breakpoint instruction: */
19214 .gdb_bpt_instr = { 0xcc },
19215 .flags = KGDB_HW_BREAKPOINT,
19216diff --git a/arch/x86/kernel/kprobes.c b/arch/x86/kernel/kprobes.c
19217index 7a67820..70ea187 100644
19218--- a/arch/x86/kernel/kprobes.c
19219+++ b/arch/x86/kernel/kprobes.c
19220@@ -168,9 +168,13 @@ static void __kprobes set_jmp_op(void *from, void *to)
19221 char op;
19222 s32 raddr;
19223 } __attribute__((packed)) * jop;
19224- jop = (struct __arch_jmp_op *)from;
19225+
19226+ jop = (struct __arch_jmp_op *)(ktla_ktva(from));
19227+
19228+ pax_open_kernel();
19229 jop->raddr = (s32)((long)(to) - ((long)(from) + 5));
19230 jop->op = RELATIVEJUMP_INSTRUCTION;
19231+ pax_close_kernel();
19232 }
19233
19234 /*
19235@@ -195,7 +199,7 @@ static int __kprobes can_boost(kprobe_opcode_t *opcodes)
19236 kprobe_opcode_t opcode;
19237 kprobe_opcode_t *orig_opcodes = opcodes;
19238
19239- if (search_exception_tables((unsigned long)opcodes))
19240+ if (search_exception_tables(ktva_ktla((unsigned long)opcodes)))
19241 return 0; /* Page fault may occur on this address. */
19242
19243 retry:
19244@@ -339,7 +343,9 @@ static void __kprobes fix_riprel(struct kprobe *p)
19245 disp = (u8 *) p->addr + *((s32 *) insn) -
19246 (u8 *) p->ainsn.insn;
19247 BUG_ON((s64) (s32) disp != disp); /* Sanity check. */
19248+ pax_open_kernel();
19249 *(s32 *)insn = (s32) disp;
19250+ pax_close_kernel();
19251 }
19252 }
19253 #endif
19254@@ -347,16 +353,18 @@ static void __kprobes fix_riprel(struct kprobe *p)
19255
19256 static void __kprobes arch_copy_kprobe(struct kprobe *p)
19257 {
19258- memcpy(p->ainsn.insn, p->addr, MAX_INSN_SIZE * sizeof(kprobe_opcode_t));
19259+ pax_open_kernel();
19260+ memcpy(p->ainsn.insn, ktla_ktva(p->addr), MAX_INSN_SIZE * sizeof(kprobe_opcode_t));
19261+ pax_close_kernel();
19262
19263 fix_riprel(p);
19264
19265- if (can_boost(p->addr))
19266+ if (can_boost(ktla_ktva(p->addr)))
19267 p->ainsn.boostable = 0;
19268 else
19269 p->ainsn.boostable = -1;
19270
19271- p->opcode = *p->addr;
19272+ p->opcode = *(ktla_ktva(p->addr));
19273 }
19274
19275 int __kprobes arch_prepare_kprobe(struct kprobe *p)
19276@@ -434,7 +442,7 @@ static void __kprobes prepare_singlestep(struct kprobe *p, struct pt_regs *regs)
19277 if (p->opcode == BREAKPOINT_INSTRUCTION)
19278 regs->ip = (unsigned long)p->addr;
19279 else
19280- regs->ip = (unsigned long)p->ainsn.insn;
19281+ regs->ip = ktva_ktla((unsigned long)p->ainsn.insn);
19282 }
19283
19284 void __kprobes arch_prepare_kretprobe(struct kretprobe_instance *ri,
19285@@ -455,7 +463,7 @@ static void __kprobes setup_singlestep(struct kprobe *p, struct pt_regs *regs,
19286 if (p->ainsn.boostable == 1 && !p->post_handler) {
19287 /* Boost up -- we can execute copied instructions directly */
19288 reset_current_kprobe();
19289- regs->ip = (unsigned long)p->ainsn.insn;
19290+ regs->ip = ktva_ktla((unsigned long)p->ainsn.insn);
19291 preempt_enable_no_resched();
19292 return;
19293 }
19294@@ -525,7 +533,7 @@ static int __kprobes kprobe_handler(struct pt_regs *regs)
19295 struct kprobe_ctlblk *kcb;
19296
19297 addr = (kprobe_opcode_t *)(regs->ip - sizeof(kprobe_opcode_t));
19298- if (*addr != BREAKPOINT_INSTRUCTION) {
19299+ if (*(kprobe_opcode_t *)ktla_ktva((unsigned long)addr) != BREAKPOINT_INSTRUCTION) {
19300 /*
19301 * The breakpoint instruction was removed right
19302 * after we hit it. Another cpu has removed
19303@@ -637,6 +645,9 @@ static void __used __kprobes kretprobe_trampoline_holder(void)
19304 /* Skip orig_ax, ip, cs */
19305 " addq $24, %rsp\n"
19306 " popfq\n"
19307+#ifdef KERNEXEC_PLUGIN
19308+ " btsq $63,(%rsp)\n"
19309+#endif
19310 #else
19311 " pushf\n"
19312 /*
19313@@ -777,7 +788,7 @@ static void __kprobes resume_execution(struct kprobe *p,
19314 struct pt_regs *regs, struct kprobe_ctlblk *kcb)
19315 {
19316 unsigned long *tos = stack_addr(regs);
19317- unsigned long copy_ip = (unsigned long)p->ainsn.insn;
19318+ unsigned long copy_ip = ktva_ktla((unsigned long)p->ainsn.insn);
19319 unsigned long orig_ip = (unsigned long)p->addr;
19320 kprobe_opcode_t *insn = p->ainsn.insn;
19321
19322@@ -960,7 +971,7 @@ int __kprobes kprobe_exceptions_notify(struct notifier_block *self,
19323 struct die_args *args = data;
19324 int ret = NOTIFY_DONE;
19325
19326- if (args->regs && user_mode_vm(args->regs))
19327+ if (args->regs && user_mode(args->regs))
19328 return ret;
19329
19330 switch (val) {
19331diff --git a/arch/x86/kernel/kvm.c b/arch/x86/kernel/kvm.c
19332index 63b0ec8..6d92227 100644
19333--- a/arch/x86/kernel/kvm.c
19334+++ b/arch/x86/kernel/kvm.c
19335@@ -216,6 +216,7 @@ static void __init paravirt_ops_setup(void)
19336 pv_mmu_ops.set_pud = kvm_set_pud;
19337 #if PAGETABLE_LEVELS == 4
19338 pv_mmu_ops.set_pgd = kvm_set_pgd;
19339+ pv_mmu_ops.set_pgd_batched = kvm_set_pgd;
19340 #endif
19341 #endif
19342 pv_mmu_ops.flush_tlb_user = kvm_flush_tlb;
19343diff --git a/arch/x86/kernel/ldt.c b/arch/x86/kernel/ldt.c
19344index ec6ef60..d784780 100644
19345--- a/arch/x86/kernel/ldt.c
19346+++ b/arch/x86/kernel/ldt.c
19347@@ -66,13 +66,13 @@ static int alloc_ldt(mm_context_t *pc, int mincount, int reload)
19348 if (reload) {
19349 #ifdef CONFIG_SMP
19350 preempt_disable();
19351- load_LDT(pc);
19352+ load_LDT_nolock(pc);
19353 if (!cpumask_equal(mm_cpumask(current->mm),
19354 cpumask_of(smp_processor_id())))
19355 smp_call_function(flush_ldt, current->mm, 1);
19356 preempt_enable();
19357 #else
19358- load_LDT(pc);
19359+ load_LDT_nolock(pc);
19360 #endif
19361 }
19362 if (oldsize) {
19363@@ -94,7 +94,7 @@ static inline int copy_ldt(mm_context_t *new, mm_context_t *old)
19364 return err;
19365
19366 for (i = 0; i < old->size; i++)
19367- write_ldt_entry(new->ldt, i, old->ldt + i * LDT_ENTRY_SIZE);
19368+ write_ldt_entry(new->ldt, i, old->ldt + i);
19369 return 0;
19370 }
19371
19372@@ -115,6 +115,24 @@ int init_new_context(struct task_struct *tsk, struct mm_struct *mm)
19373 retval = copy_ldt(&mm->context, &old_mm->context);
19374 mutex_unlock(&old_mm->context.lock);
19375 }
19376+
19377+ if (tsk == current) {
19378+ mm->context.vdso = 0;
19379+
19380+#ifdef CONFIG_X86_32
19381+#if defined(CONFIG_PAX_PAGEEXEC) || defined(CONFIG_PAX_SEGMEXEC)
19382+ mm->context.user_cs_base = 0UL;
19383+ mm->context.user_cs_limit = ~0UL;
19384+
19385+#if defined(CONFIG_PAX_PAGEEXEC) && defined(CONFIG_SMP)
19386+ cpus_clear(mm->context.cpu_user_cs_mask);
19387+#endif
19388+
19389+#endif
19390+#endif
19391+
19392+ }
19393+
19394 return retval;
19395 }
19396
19397@@ -140,6 +158,7 @@ void destroy_context(struct mm_struct *mm)
19398 }
19399 }
19400
19401+static int read_ldt(void __user *ptr, unsigned long bytecount) __size_overflow(2);
19402 static int read_ldt(void __user *ptr, unsigned long bytecount)
19403 {
19404 int err;
19405@@ -229,6 +248,13 @@ static int write_ldt(void __user *ptr, unsigned long bytecount, int oldmode)
19406 }
19407 }
19408
19409+#ifdef CONFIG_PAX_SEGMEXEC
19410+ if ((mm->pax_flags & MF_PAX_SEGMEXEC) && (ldt_info.contents & MODIFY_LDT_CONTENTS_CODE)) {
19411+ error = -EINVAL;
19412+ goto out_unlock;
19413+ }
19414+#endif
19415+
19416 fill_ldt(&ldt, &ldt_info);
19417 if (oldmode)
19418 ldt.avl = 0;
19419diff --git a/arch/x86/kernel/machine_kexec_32.c b/arch/x86/kernel/machine_kexec_32.c
19420index c1c429d..f02eaf9 100644
19421--- a/arch/x86/kernel/machine_kexec_32.c
19422+++ b/arch/x86/kernel/machine_kexec_32.c
19423@@ -26,7 +26,7 @@
19424 #include <asm/system.h>
19425 #include <asm/cacheflush.h>
19426
19427-static void set_idt(void *newidt, __u16 limit)
19428+static void set_idt(struct desc_struct *newidt, __u16 limit)
19429 {
19430 struct desc_ptr curidt;
19431
19432@@ -38,7 +38,7 @@ static void set_idt(void *newidt, __u16 limit)
19433 }
19434
19435
19436-static void set_gdt(void *newgdt, __u16 limit)
19437+static void set_gdt(struct desc_struct *newgdt, __u16 limit)
19438 {
19439 struct desc_ptr curgdt;
19440
19441@@ -217,7 +217,7 @@ void machine_kexec(struct kimage *image)
19442 }
19443
19444 control_page = page_address(image->control_code_page);
19445- memcpy(control_page, relocate_kernel, KEXEC_CONTROL_CODE_MAX_SIZE);
19446+ memcpy(control_page, (void *)ktla_ktva((unsigned long)relocate_kernel), KEXEC_CONTROL_CODE_MAX_SIZE);
19447
19448 relocate_kernel_ptr = control_page;
19449 page_list[PA_CONTROL_PAGE] = __pa(control_page);
19450diff --git a/arch/x86/kernel/microcode_amd.c b/arch/x86/kernel/microcode_amd.c
19451index 1e47679..e73449d 100644
19452--- a/arch/x86/kernel/microcode_amd.c
19453+++ b/arch/x86/kernel/microcode_amd.c
19454@@ -364,7 +364,7 @@ static void microcode_fini_cpu_amd(int cpu)
19455 uci->mc = NULL;
19456 }
19457
19458-static struct microcode_ops microcode_amd_ops = {
19459+static const struct microcode_ops microcode_amd_ops = {
19460 .request_microcode_user = request_microcode_user,
19461 .request_microcode_fw = request_microcode_fw,
19462 .collect_cpu_info = collect_cpu_info_amd,
19463@@ -372,7 +372,7 @@ static struct microcode_ops microcode_amd_ops = {
19464 .microcode_fini_cpu = microcode_fini_cpu_amd,
19465 };
19466
19467-struct microcode_ops * __init init_amd_microcode(void)
19468+const struct microcode_ops * __init init_amd_microcode(void)
19469 {
19470 return &microcode_amd_ops;
19471 }
19472diff --git a/arch/x86/kernel/microcode_core.c b/arch/x86/kernel/microcode_core.c
19473index 378e9a8..b5a6ea9 100644
19474--- a/arch/x86/kernel/microcode_core.c
19475+++ b/arch/x86/kernel/microcode_core.c
19476@@ -90,7 +90,7 @@ MODULE_LICENSE("GPL");
19477
19478 #define MICROCODE_VERSION "2.00"
19479
19480-static struct microcode_ops *microcode_ops;
19481+static const struct microcode_ops *microcode_ops;
19482
19483 /*
19484 * Synchronization.
19485diff --git a/arch/x86/kernel/microcode_intel.c b/arch/x86/kernel/microcode_intel.c
19486index 0d334dd..5a709b5 100644
19487--- a/arch/x86/kernel/microcode_intel.c
19488+++ b/arch/x86/kernel/microcode_intel.c
19489@@ -441,15 +441,16 @@ static enum ucode_state request_microcode_fw(int cpu, struct device *device)
19490 return ret;
19491 }
19492
19493+static int get_ucode_user(void *to, const void *from, size_t n) __size_overflow(3);
19494 static int get_ucode_user(void *to, const void *from, size_t n)
19495 {
19496- return copy_from_user(to, from, n);
19497+ return copy_from_user(to, (const void __force_user *)from, n);
19498 }
19499
19500 static enum ucode_state
19501 request_microcode_user(int cpu, const void __user *buf, size_t size)
19502 {
19503- return generic_load_microcode(cpu, (void *)buf, size, &get_ucode_user);
19504+ return generic_load_microcode(cpu, (__force_kernel void *)buf, size, &get_ucode_user);
19505 }
19506
19507 static void microcode_fini_cpu(int cpu)
19508@@ -460,7 +461,7 @@ static void microcode_fini_cpu(int cpu)
19509 uci->mc = NULL;
19510 }
19511
19512-static struct microcode_ops microcode_intel_ops = {
19513+static const struct microcode_ops microcode_intel_ops = {
19514 .request_microcode_user = request_microcode_user,
19515 .request_microcode_fw = request_microcode_fw,
19516 .collect_cpu_info = collect_cpu_info,
19517@@ -468,7 +469,7 @@ static struct microcode_ops microcode_intel_ops = {
19518 .microcode_fini_cpu = microcode_fini_cpu,
19519 };
19520
19521-struct microcode_ops * __init init_intel_microcode(void)
19522+const struct microcode_ops * __init init_intel_microcode(void)
19523 {
19524 return &microcode_intel_ops;
19525 }
19526diff --git a/arch/x86/kernel/module.c b/arch/x86/kernel/module.c
19527index 89f386f..9028f51 100644
19528--- a/arch/x86/kernel/module.c
19529+++ b/arch/x86/kernel/module.c
19530@@ -34,7 +34,7 @@
19531 #define DEBUGP(fmt...)
19532 #endif
19533
19534-void *module_alloc(unsigned long size)
19535+static void *__module_alloc(unsigned long size, pgprot_t prot)
19536 {
19537 struct vm_struct *area;
19538
19539@@ -48,8 +48,18 @@ void *module_alloc(unsigned long size)
19540 if (!area)
19541 return NULL;
19542
19543- return __vmalloc_area(area, GFP_KERNEL | __GFP_HIGHMEM,
19544- PAGE_KERNEL_EXEC);
19545+ return __vmalloc_area(area, GFP_KERNEL | __GFP_HIGHMEM | __GFP_ZERO, prot);
19546+}
19547+
19548+void *module_alloc(unsigned long size)
19549+{
19550+
19551+#ifdef CONFIG_PAX_KERNEXEC
19552+ return __module_alloc(size, PAGE_KERNEL);
19553+#else
19554+ return __module_alloc(size, PAGE_KERNEL_EXEC);
19555+#endif
19556+
19557 }
19558
19559 /* Free memory returned from module_alloc */
19560@@ -58,6 +68,40 @@ void module_free(struct module *mod, void *module_region)
19561 vfree(module_region);
19562 }
19563
19564+#ifdef CONFIG_PAX_KERNEXEC
19565+#ifdef CONFIG_X86_32
19566+void *module_alloc_exec(unsigned long size)
19567+{
19568+ struct vm_struct *area;
19569+
19570+ if (size == 0)
19571+ return NULL;
19572+
19573+ area = __get_vm_area(size, VM_ALLOC, (unsigned long)&MODULES_EXEC_VADDR, (unsigned long)&MODULES_EXEC_END);
19574+ return area ? area->addr : NULL;
19575+}
19576+EXPORT_SYMBOL(module_alloc_exec);
19577+
19578+void module_free_exec(struct module *mod, void *module_region)
19579+{
19580+ vunmap(module_region);
19581+}
19582+EXPORT_SYMBOL(module_free_exec);
19583+#else
19584+void module_free_exec(struct module *mod, void *module_region)
19585+{
19586+ module_free(mod, module_region);
19587+}
19588+EXPORT_SYMBOL(module_free_exec);
19589+
19590+void *module_alloc_exec(unsigned long size)
19591+{
19592+ return __module_alloc(size, PAGE_KERNEL_RX);
19593+}
19594+EXPORT_SYMBOL(module_alloc_exec);
19595+#endif
19596+#endif
19597+
19598 /* We don't need anything special. */
19599 int module_frob_arch_sections(Elf_Ehdr *hdr,
19600 Elf_Shdr *sechdrs,
19601@@ -77,14 +121,16 @@ int apply_relocate(Elf32_Shdr *sechdrs,
19602 unsigned int i;
19603 Elf32_Rel *rel = (void *)sechdrs[relsec].sh_addr;
19604 Elf32_Sym *sym;
19605- uint32_t *location;
19606+ uint32_t *plocation, location;
19607
19608 DEBUGP("Applying relocate section %u to %u\n", relsec,
19609 sechdrs[relsec].sh_info);
19610 for (i = 0; i < sechdrs[relsec].sh_size / sizeof(*rel); i++) {
19611 /* This is where to make the change */
19612- location = (void *)sechdrs[sechdrs[relsec].sh_info].sh_addr
19613- + rel[i].r_offset;
19614+ plocation = (void *)sechdrs[sechdrs[relsec].sh_info].sh_addr + rel[i].r_offset;
19615+ location = (uint32_t)plocation;
19616+ if (sechdrs[sechdrs[relsec].sh_info].sh_flags & SHF_EXECINSTR)
19617+ plocation = ktla_ktva((void *)plocation);
19618 /* This is the symbol it is referring to. Note that all
19619 undefined symbols have been resolved. */
19620 sym = (Elf32_Sym *)sechdrs[symindex].sh_addr
19621@@ -93,11 +139,15 @@ int apply_relocate(Elf32_Shdr *sechdrs,
19622 switch (ELF32_R_TYPE(rel[i].r_info)) {
19623 case R_386_32:
19624 /* We add the value into the location given */
19625- *location += sym->st_value;
19626+ pax_open_kernel();
19627+ *plocation += sym->st_value;
19628+ pax_close_kernel();
19629 break;
19630 case R_386_PC32:
19631 /* Add the value, subtract its postition */
19632- *location += sym->st_value - (uint32_t)location;
19633+ pax_open_kernel();
19634+ *plocation += sym->st_value - location;
19635+ pax_close_kernel();
19636 break;
19637 default:
19638 printk(KERN_ERR "module %s: Unknown relocation: %u\n",
19639@@ -153,21 +203,30 @@ int apply_relocate_add(Elf64_Shdr *sechdrs,
19640 case R_X86_64_NONE:
19641 break;
19642 case R_X86_64_64:
19643+ pax_open_kernel();
19644 *(u64 *)loc = val;
19645+ pax_close_kernel();
19646 break;
19647 case R_X86_64_32:
19648+ pax_open_kernel();
19649 *(u32 *)loc = val;
19650+ pax_close_kernel();
19651 if (val != *(u32 *)loc)
19652 goto overflow;
19653 break;
19654 case R_X86_64_32S:
19655+ pax_open_kernel();
19656 *(s32 *)loc = val;
19657+ pax_close_kernel();
19658 if ((s64)val != *(s32 *)loc)
19659 goto overflow;
19660 break;
19661 case R_X86_64_PC32:
19662 val -= (u64)loc;
19663+ pax_open_kernel();
19664 *(u32 *)loc = val;
19665+ pax_close_kernel();
19666+
19667 #if 0
19668 if ((s64)val != *(s32 *)loc)
19669 goto overflow;
19670diff --git a/arch/x86/kernel/paravirt-spinlocks.c b/arch/x86/kernel/paravirt-spinlocks.c
19671index 3a7c5a4..9191528 100644
19672--- a/arch/x86/kernel/paravirt-spinlocks.c
19673+++ b/arch/x86/kernel/paravirt-spinlocks.c
19674@@ -13,7 +13,7 @@ default_spin_lock_flags(raw_spinlock_t *lock, unsigned long flags)
19675 __raw_spin_lock(lock);
19676 }
19677
19678-struct pv_lock_ops pv_lock_ops = {
19679+struct pv_lock_ops pv_lock_ops __read_only = {
19680 #ifdef CONFIG_SMP
19681 .spin_is_locked = __ticket_spin_is_locked,
19682 .spin_is_contended = __ticket_spin_is_contended,
19683diff --git a/arch/x86/kernel/paravirt.c b/arch/x86/kernel/paravirt.c
19684index 1b1739d..dea6077 100644
19685--- a/arch/x86/kernel/paravirt.c
19686+++ b/arch/x86/kernel/paravirt.c
19687@@ -53,6 +53,9 @@ u64 _paravirt_ident_64(u64 x)
19688 {
19689 return x;
19690 }
19691+#if defined(CONFIG_X86_32) && defined(CONFIG_X86_PAE)
19692+PV_CALLEE_SAVE_REGS_THUNK(_paravirt_ident_64);
19693+#endif
19694
19695 void __init default_banner(void)
19696 {
19697@@ -122,7 +125,7 @@ unsigned paravirt_patch_jmp(void *insnbuf, const void *target,
19698 * corresponding structure. */
19699 static void *get_call_destination(u8 type)
19700 {
19701- struct paravirt_patch_template tmpl = {
19702+ const struct paravirt_patch_template tmpl = {
19703 .pv_init_ops = pv_init_ops,
19704 .pv_time_ops = pv_time_ops,
19705 .pv_cpu_ops = pv_cpu_ops,
19706@@ -133,6 +136,8 @@ static void *get_call_destination(u8 type)
19707 .pv_lock_ops = pv_lock_ops,
19708 #endif
19709 };
19710+
19711+ pax_track_stack();
19712 return *((void **)&tmpl + type);
19713 }
19714
19715@@ -145,15 +150,19 @@ unsigned paravirt_patch_default(u8 type, u16 clobbers, void *insnbuf,
19716 if (opfunc == NULL)
19717 /* If there's no function, patch it with a ud2a (BUG) */
19718 ret = paravirt_patch_insns(insnbuf, len, ud2a, ud2a+sizeof(ud2a));
19719- else if (opfunc == _paravirt_nop)
19720+ else if (opfunc == (void *)_paravirt_nop)
19721 /* If the operation is a nop, then nop the callsite */
19722 ret = paravirt_patch_nop();
19723
19724 /* identity functions just return their single argument */
19725- else if (opfunc == _paravirt_ident_32)
19726+ else if (opfunc == (void *)_paravirt_ident_32)
19727 ret = paravirt_patch_ident_32(insnbuf, len);
19728- else if (opfunc == _paravirt_ident_64)
19729+ else if (opfunc == (void *)_paravirt_ident_64)
19730 ret = paravirt_patch_ident_64(insnbuf, len);
19731+#if defined(CONFIG_X86_32) && defined(CONFIG_X86_PAE)
19732+ else if (opfunc == (void *)__raw_callee_save__paravirt_ident_64)
19733+ ret = paravirt_patch_ident_64(insnbuf, len);
19734+#endif
19735
19736 else if (type == PARAVIRT_PATCH(pv_cpu_ops.iret) ||
19737 type == PARAVIRT_PATCH(pv_cpu_ops.irq_enable_sysexit) ||
19738@@ -178,7 +187,7 @@ unsigned paravirt_patch_insns(void *insnbuf, unsigned len,
19739 if (insn_len > len || start == NULL)
19740 insn_len = len;
19741 else
19742- memcpy(insnbuf, start, insn_len);
19743+ memcpy(insnbuf, ktla_ktva(start), insn_len);
19744
19745 return insn_len;
19746 }
19747@@ -294,22 +303,22 @@ void arch_flush_lazy_mmu_mode(void)
19748 preempt_enable();
19749 }
19750
19751-struct pv_info pv_info = {
19752+struct pv_info pv_info __read_only = {
19753 .name = "bare hardware",
19754 .paravirt_enabled = 0,
19755 .kernel_rpl = 0,
19756 .shared_kernel_pmd = 1, /* Only used when CONFIG_X86_PAE is set */
19757 };
19758
19759-struct pv_init_ops pv_init_ops = {
19760+struct pv_init_ops pv_init_ops __read_only = {
19761 .patch = native_patch,
19762 };
19763
19764-struct pv_time_ops pv_time_ops = {
19765+struct pv_time_ops pv_time_ops __read_only = {
19766 .sched_clock = native_sched_clock,
19767 };
19768
19769-struct pv_irq_ops pv_irq_ops = {
19770+struct pv_irq_ops pv_irq_ops __read_only = {
19771 .save_fl = __PV_IS_CALLEE_SAVE(native_save_fl),
19772 .restore_fl = __PV_IS_CALLEE_SAVE(native_restore_fl),
19773 .irq_disable = __PV_IS_CALLEE_SAVE(native_irq_disable),
19774@@ -321,7 +330,7 @@ struct pv_irq_ops pv_irq_ops = {
19775 #endif
19776 };
19777
19778-struct pv_cpu_ops pv_cpu_ops = {
19779+struct pv_cpu_ops pv_cpu_ops __read_only = {
19780 .cpuid = native_cpuid,
19781 .get_debugreg = native_get_debugreg,
19782 .set_debugreg = native_set_debugreg,
19783@@ -382,21 +391,26 @@ struct pv_cpu_ops pv_cpu_ops = {
19784 .end_context_switch = paravirt_nop,
19785 };
19786
19787-struct pv_apic_ops pv_apic_ops = {
19788+struct pv_apic_ops pv_apic_ops __read_only = {
19789 #ifdef CONFIG_X86_LOCAL_APIC
19790 .startup_ipi_hook = paravirt_nop,
19791 #endif
19792 };
19793
19794-#if defined(CONFIG_X86_32) && !defined(CONFIG_X86_PAE)
19795+#ifdef CONFIG_X86_32
19796+#ifdef CONFIG_X86_PAE
19797+/* 64-bit pagetable entries */
19798+#define PTE_IDENT PV_CALLEE_SAVE(_paravirt_ident_64)
19799+#else
19800 /* 32-bit pagetable entries */
19801 #define PTE_IDENT __PV_IS_CALLEE_SAVE(_paravirt_ident_32)
19802+#endif
19803 #else
19804 /* 64-bit pagetable entries */
19805 #define PTE_IDENT __PV_IS_CALLEE_SAVE(_paravirt_ident_64)
19806 #endif
19807
19808-struct pv_mmu_ops pv_mmu_ops = {
19809+struct pv_mmu_ops pv_mmu_ops __read_only = {
19810
19811 .read_cr2 = native_read_cr2,
19812 .write_cr2 = native_write_cr2,
19813@@ -448,6 +462,7 @@ struct pv_mmu_ops pv_mmu_ops = {
19814 .make_pud = PTE_IDENT,
19815
19816 .set_pgd = native_set_pgd,
19817+ .set_pgd_batched = native_set_pgd_batched,
19818 #endif
19819 #endif /* PAGETABLE_LEVELS >= 3 */
19820
19821@@ -467,6 +482,12 @@ struct pv_mmu_ops pv_mmu_ops = {
19822 },
19823
19824 .set_fixmap = native_set_fixmap,
19825+
19826+#ifdef CONFIG_PAX_KERNEXEC
19827+ .pax_open_kernel = native_pax_open_kernel,
19828+ .pax_close_kernel = native_pax_close_kernel,
19829+#endif
19830+
19831 };
19832
19833 EXPORT_SYMBOL_GPL(pv_time_ops);
19834diff --git a/arch/x86/kernel/pci-calgary_64.c b/arch/x86/kernel/pci-calgary_64.c
19835index 1a2d4b1..6a0dd55 100644
19836--- a/arch/x86/kernel/pci-calgary_64.c
19837+++ b/arch/x86/kernel/pci-calgary_64.c
19838@@ -477,7 +477,7 @@ static void calgary_free_coherent(struct device *dev, size_t size,
19839 free_pages((unsigned long)vaddr, get_order(size));
19840 }
19841
19842-static struct dma_map_ops calgary_dma_ops = {
19843+static const struct dma_map_ops calgary_dma_ops = {
19844 .alloc_coherent = calgary_alloc_coherent,
19845 .free_coherent = calgary_free_coherent,
19846 .map_sg = calgary_map_sg,
19847diff --git a/arch/x86/kernel/pci-dma.c b/arch/x86/kernel/pci-dma.c
19848index 6ac3931..42b4414 100644
19849--- a/arch/x86/kernel/pci-dma.c
19850+++ b/arch/x86/kernel/pci-dma.c
19851@@ -14,7 +14,7 @@
19852
19853 static int forbid_dac __read_mostly;
19854
19855-struct dma_map_ops *dma_ops;
19856+const struct dma_map_ops *dma_ops;
19857 EXPORT_SYMBOL(dma_ops);
19858
19859 static int iommu_sac_force __read_mostly;
19860@@ -243,7 +243,7 @@ early_param("iommu", iommu_setup);
19861
19862 int dma_supported(struct device *dev, u64 mask)
19863 {
19864- struct dma_map_ops *ops = get_dma_ops(dev);
19865+ const struct dma_map_ops *ops = get_dma_ops(dev);
19866
19867 #ifdef CONFIG_PCI
19868 if (mask > 0xffffffff && forbid_dac > 0) {
19869diff --git a/arch/x86/kernel/pci-gart_64.c b/arch/x86/kernel/pci-gart_64.c
19870index 1c76691..e3632db 100644
19871--- a/arch/x86/kernel/pci-gart_64.c
19872+++ b/arch/x86/kernel/pci-gart_64.c
19873@@ -682,7 +682,7 @@ static __init int init_k8_gatt(struct agp_kern_info *info)
19874 return -1;
19875 }
19876
19877-static struct dma_map_ops gart_dma_ops = {
19878+static const struct dma_map_ops gart_dma_ops = {
19879 .map_sg = gart_map_sg,
19880 .unmap_sg = gart_unmap_sg,
19881 .map_page = gart_map_page,
19882diff --git a/arch/x86/kernel/pci-nommu.c b/arch/x86/kernel/pci-nommu.c
19883index a3933d4..c898869 100644
19884--- a/arch/x86/kernel/pci-nommu.c
19885+++ b/arch/x86/kernel/pci-nommu.c
19886@@ -94,7 +94,7 @@ static void nommu_sync_sg_for_device(struct device *dev,
19887 flush_write_buffers();
19888 }
19889
19890-struct dma_map_ops nommu_dma_ops = {
19891+const struct dma_map_ops nommu_dma_ops = {
19892 .alloc_coherent = dma_generic_alloc_coherent,
19893 .free_coherent = nommu_free_coherent,
19894 .map_sg = nommu_map_sg,
19895diff --git a/arch/x86/kernel/pci-swiotlb.c b/arch/x86/kernel/pci-swiotlb.c
19896index aaa6b78..4de1881 100644
19897--- a/arch/x86/kernel/pci-swiotlb.c
19898+++ b/arch/x86/kernel/pci-swiotlb.c
19899@@ -25,7 +25,7 @@ static void *x86_swiotlb_alloc_coherent(struct device *hwdev, size_t size,
19900 return swiotlb_alloc_coherent(hwdev, size, dma_handle, flags);
19901 }
19902
19903-static struct dma_map_ops swiotlb_dma_ops = {
19904+static const struct dma_map_ops swiotlb_dma_ops = {
19905 .mapping_error = swiotlb_dma_mapping_error,
19906 .alloc_coherent = x86_swiotlb_alloc_coherent,
19907 .free_coherent = swiotlb_free_coherent,
19908diff --git a/arch/x86/kernel/process.c b/arch/x86/kernel/process.c
19909index fc6c84d..0312ca2 100644
19910--- a/arch/x86/kernel/process.c
19911+++ b/arch/x86/kernel/process.c
19912@@ -51,16 +51,33 @@ void free_thread_xstate(struct task_struct *tsk)
19913
19914 void free_thread_info(struct thread_info *ti)
19915 {
19916- free_thread_xstate(ti->task);
19917 free_pages((unsigned long)ti, get_order(THREAD_SIZE));
19918 }
19919
19920+static struct kmem_cache *task_struct_cachep;
19921+
19922 void arch_task_cache_init(void)
19923 {
19924- task_xstate_cachep =
19925- kmem_cache_create("task_xstate", xstate_size,
19926+ /* create a slab on which task_structs can be allocated */
19927+ task_struct_cachep =
19928+ kmem_cache_create("task_struct", sizeof(struct task_struct),
19929+ ARCH_MIN_TASKALIGN, SLAB_PANIC | SLAB_NOTRACK, NULL);
19930+
19931+ task_xstate_cachep =
19932+ kmem_cache_create("task_xstate", xstate_size,
19933 __alignof__(union thread_xstate),
19934- SLAB_PANIC | SLAB_NOTRACK, NULL);
19935+ SLAB_PANIC | SLAB_NOTRACK | SLAB_USERCOPY, NULL);
19936+}
19937+
19938+struct task_struct *alloc_task_struct(void)
19939+{
19940+ return kmem_cache_alloc(task_struct_cachep, GFP_KERNEL);
19941+}
19942+
19943+void free_task_struct(struct task_struct *task)
19944+{
19945+ free_thread_xstate(task);
19946+ kmem_cache_free(task_struct_cachep, task);
19947 }
19948
19949 /*
19950@@ -73,7 +90,7 @@ void exit_thread(void)
19951 unsigned long *bp = t->io_bitmap_ptr;
19952
19953 if (bp) {
19954- struct tss_struct *tss = &per_cpu(init_tss, get_cpu());
19955+ struct tss_struct *tss = init_tss + get_cpu();
19956
19957 t->io_bitmap_ptr = NULL;
19958 clear_thread_flag(TIF_IO_BITMAP);
19959@@ -93,6 +110,9 @@ void flush_thread(void)
19960
19961 clear_tsk_thread_flag(tsk, TIF_DEBUG);
19962
19963+#if defined(CONFIG_X86_32) && !defined(CONFIG_CC_STACKPROTECTOR) && !defined(CONFIG_PAX_MEMORY_UDEREF)
19964+ loadsegment(gs, 0);
19965+#endif
19966 tsk->thread.debugreg0 = 0;
19967 tsk->thread.debugreg1 = 0;
19968 tsk->thread.debugreg2 = 0;
19969@@ -307,7 +327,7 @@ void default_idle(void)
19970 EXPORT_SYMBOL(default_idle);
19971 #endif
19972
19973-void stop_this_cpu(void *dummy)
19974+__noreturn void stop_this_cpu(void *dummy)
19975 {
19976 local_irq_disable();
19977 /*
19978@@ -568,16 +588,38 @@ static int __init idle_setup(char *str)
19979 }
19980 early_param("idle", idle_setup);
19981
19982-unsigned long arch_align_stack(unsigned long sp)
19983+#ifdef CONFIG_PAX_RANDKSTACK
19984+void pax_randomize_kstack(struct pt_regs *regs)
19985 {
19986- if (!(current->personality & ADDR_NO_RANDOMIZE) && randomize_va_space)
19987- sp -= get_random_int() % 8192;
19988- return sp & ~0xf;
19989-}
19990+ struct thread_struct *thread = &current->thread;
19991+ unsigned long time;
19992
19993-unsigned long arch_randomize_brk(struct mm_struct *mm)
19994-{
19995- unsigned long range_end = mm->brk + 0x02000000;
19996- return randomize_range(mm->brk, range_end, 0) ? : mm->brk;
19997+ if (!randomize_va_space)
19998+ return;
19999+
20000+ if (v8086_mode(regs))
20001+ return;
20002+
20003+ rdtscl(time);
20004+
20005+ /* P4 seems to return a 0 LSB, ignore it */
20006+#ifdef CONFIG_MPENTIUM4
20007+ time &= 0x3EUL;
20008+ time <<= 2;
20009+#elif defined(CONFIG_X86_64)
20010+ time &= 0xFUL;
20011+ time <<= 4;
20012+#else
20013+ time &= 0x1FUL;
20014+ time <<= 3;
20015+#endif
20016+
20017+ thread->sp0 ^= time;
20018+ load_sp0(init_tss + smp_processor_id(), thread);
20019+
20020+#ifdef CONFIG_X86_64
20021+ percpu_write(kernel_stack, thread->sp0);
20022+#endif
20023 }
20024+#endif
20025
20026diff --git a/arch/x86/kernel/process_32.c b/arch/x86/kernel/process_32.c
20027index c40c432..6e1df72 100644
20028--- a/arch/x86/kernel/process_32.c
20029+++ b/arch/x86/kernel/process_32.c
20030@@ -67,6 +67,7 @@ asmlinkage void ret_from_fork(void) __asm__("ret_from_fork");
20031 unsigned long thread_saved_pc(struct task_struct *tsk)
20032 {
20033 return ((unsigned long *)tsk->thread.sp)[3];
20034+//XXX return tsk->thread.eip;
20035 }
20036
20037 #ifndef CONFIG_SMP
20038@@ -129,15 +130,14 @@ void __show_regs(struct pt_regs *regs, int all)
20039 unsigned short ss, gs;
20040 const char *board;
20041
20042- if (user_mode_vm(regs)) {
20043+ if (user_mode(regs)) {
20044 sp = regs->sp;
20045 ss = regs->ss & 0xffff;
20046- gs = get_user_gs(regs);
20047 } else {
20048 sp = (unsigned long) (&regs->sp);
20049 savesegment(ss, ss);
20050- savesegment(gs, gs);
20051 }
20052+ gs = get_user_gs(regs);
20053
20054 printk("\n");
20055
20056@@ -210,10 +210,10 @@ int kernel_thread(int (*fn)(void *), void *arg, unsigned long flags)
20057 regs.bx = (unsigned long) fn;
20058 regs.dx = (unsigned long) arg;
20059
20060- regs.ds = __USER_DS;
20061- regs.es = __USER_DS;
20062+ regs.ds = __KERNEL_DS;
20063+ regs.es = __KERNEL_DS;
20064 regs.fs = __KERNEL_PERCPU;
20065- regs.gs = __KERNEL_STACK_CANARY;
20066+ savesegment(gs, regs.gs);
20067 regs.orig_ax = -1;
20068 regs.ip = (unsigned long) kernel_thread_helper;
20069 regs.cs = __KERNEL_CS | get_kernel_rpl();
20070@@ -247,13 +247,14 @@ int copy_thread(unsigned long clone_flags, unsigned long sp,
20071 struct task_struct *tsk;
20072 int err;
20073
20074- childregs = task_pt_regs(p);
20075+ childregs = task_stack_page(p) + THREAD_SIZE - sizeof(struct pt_regs) - 8;
20076 *childregs = *regs;
20077 childregs->ax = 0;
20078 childregs->sp = sp;
20079
20080 p->thread.sp = (unsigned long) childregs;
20081 p->thread.sp0 = (unsigned long) (childregs+1);
20082+ p->tinfo.lowest_stack = (unsigned long)task_stack_page(p);
20083
20084 p->thread.ip = (unsigned long) ret_from_fork;
20085
20086@@ -345,7 +346,7 @@ __switch_to(struct task_struct *prev_p, struct task_struct *next_p)
20087 struct thread_struct *prev = &prev_p->thread,
20088 *next = &next_p->thread;
20089 int cpu = smp_processor_id();
20090- struct tss_struct *tss = &per_cpu(init_tss, cpu);
20091+ struct tss_struct *tss = init_tss + cpu;
20092 bool preload_fpu;
20093
20094 /* never put a printk in __switch_to... printk() calls wake_up*() indirectly */
20095@@ -380,6 +381,10 @@ __switch_to(struct task_struct *prev_p, struct task_struct *next_p)
20096 */
20097 lazy_save_gs(prev->gs);
20098
20099+#ifdef CONFIG_PAX_MEMORY_UDEREF
20100+ __set_fs(task_thread_info(next_p)->addr_limit);
20101+#endif
20102+
20103 /*
20104 * Load the per-thread Thread-Local Storage descriptor.
20105 */
20106@@ -415,6 +420,9 @@ __switch_to(struct task_struct *prev_p, struct task_struct *next_p)
20107 */
20108 arch_end_context_switch(next_p);
20109
20110+ percpu_write(current_task, next_p);
20111+ percpu_write(current_tinfo, &next_p->tinfo);
20112+
20113 if (preload_fpu)
20114 __math_state_restore();
20115
20116@@ -424,8 +432,6 @@ __switch_to(struct task_struct *prev_p, struct task_struct *next_p)
20117 if (prev->gs | next->gs)
20118 lazy_load_gs(next->gs);
20119
20120- percpu_write(current_task, next_p);
20121-
20122 return prev_p;
20123 }
20124
20125@@ -495,4 +501,3 @@ unsigned long get_wchan(struct task_struct *p)
20126 } while (count++ < 16);
20127 return 0;
20128 }
20129-
20130diff --git a/arch/x86/kernel/process_64.c b/arch/x86/kernel/process_64.c
20131index 39493bc..196816d 100644
20132--- a/arch/x86/kernel/process_64.c
20133+++ b/arch/x86/kernel/process_64.c
20134@@ -91,7 +91,7 @@ static void __exit_idle(void)
20135 void exit_idle(void)
20136 {
20137 /* idle loop has pid 0 */
20138- if (current->pid)
20139+ if (task_pid_nr(current))
20140 return;
20141 __exit_idle();
20142 }
20143@@ -170,7 +170,7 @@ void __show_regs(struct pt_regs *regs, int all)
20144 if (!board)
20145 board = "";
20146 printk(KERN_INFO "Pid: %d, comm: %.20s %s %s %.*s %s\n",
20147- current->pid, current->comm, print_tainted(),
20148+ task_pid_nr(current), current->comm, print_tainted(),
20149 init_utsname()->release,
20150 (int)strcspn(init_utsname()->version, " "),
20151 init_utsname()->version, board);
20152@@ -280,8 +280,7 @@ int copy_thread(unsigned long clone_flags, unsigned long sp,
20153 struct pt_regs *childregs;
20154 struct task_struct *me = current;
20155
20156- childregs = ((struct pt_regs *)
20157- (THREAD_SIZE + task_stack_page(p))) - 1;
20158+ childregs = task_stack_page(p) + THREAD_SIZE - sizeof(struct pt_regs) - 16;
20159 *childregs = *regs;
20160
20161 childregs->ax = 0;
20162@@ -292,6 +291,7 @@ int copy_thread(unsigned long clone_flags, unsigned long sp,
20163 p->thread.sp = (unsigned long) childregs;
20164 p->thread.sp0 = (unsigned long) (childregs+1);
20165 p->thread.usersp = me->thread.usersp;
20166+ p->tinfo.lowest_stack = (unsigned long)task_stack_page(p);
20167
20168 set_tsk_thread_flag(p, TIF_FORK);
20169
20170@@ -379,7 +379,7 @@ __switch_to(struct task_struct *prev_p, struct task_struct *next_p)
20171 struct thread_struct *prev = &prev_p->thread;
20172 struct thread_struct *next = &next_p->thread;
20173 int cpu = smp_processor_id();
20174- struct tss_struct *tss = &per_cpu(init_tss, cpu);
20175+ struct tss_struct *tss = init_tss + cpu;
20176 unsigned fsindex, gsindex;
20177 bool preload_fpu;
20178
20179@@ -475,10 +475,9 @@ __switch_to(struct task_struct *prev_p, struct task_struct *next_p)
20180 prev->usersp = percpu_read(old_rsp);
20181 percpu_write(old_rsp, next->usersp);
20182 percpu_write(current_task, next_p);
20183+ percpu_write(current_tinfo, &next_p->tinfo);
20184
20185- percpu_write(kernel_stack,
20186- (unsigned long)task_stack_page(next_p) +
20187- THREAD_SIZE - KERNEL_STACK_OFFSET);
20188+ percpu_write(kernel_stack, next->sp0);
20189
20190 /*
20191 * Now maybe reload the debug registers and handle I/O bitmaps
20192@@ -559,12 +558,11 @@ unsigned long get_wchan(struct task_struct *p)
20193 if (!p || p == current || p->state == TASK_RUNNING)
20194 return 0;
20195 stack = (unsigned long)task_stack_page(p);
20196- if (p->thread.sp < stack || p->thread.sp >= stack+THREAD_SIZE)
20197+ if (p->thread.sp < stack || p->thread.sp > stack+THREAD_SIZE-16-sizeof(u64))
20198 return 0;
20199 fp = *(u64 *)(p->thread.sp);
20200 do {
20201- if (fp < (unsigned long)stack ||
20202- fp >= (unsigned long)stack+THREAD_SIZE)
20203+ if (fp < stack || fp > stack+THREAD_SIZE-16-sizeof(u64))
20204 return 0;
20205 ip = *(u64 *)(fp+8);
20206 if (!in_sched_functions(ip))
20207diff --git a/arch/x86/kernel/ptrace.c b/arch/x86/kernel/ptrace.c
20208index c06acdd..09de221 100644
20209--- a/arch/x86/kernel/ptrace.c
20210+++ b/arch/x86/kernel/ptrace.c
20211@@ -559,6 +559,10 @@ static int ioperm_active(struct task_struct *target,
20212 static int ioperm_get(struct task_struct *target,
20213 const struct user_regset *regset,
20214 unsigned int pos, unsigned int count,
20215+ void *kbuf, void __user *ubuf) __size_overflow(3,4);
20216+static int ioperm_get(struct task_struct *target,
20217+ const struct user_regset *regset,
20218+ unsigned int pos, unsigned int count,
20219 void *kbuf, void __user *ubuf)
20220 {
20221 if (!target->thread.io_bitmap_ptr)
20222@@ -925,7 +929,7 @@ static const struct user_regset_view user_x86_32_view; /* Initialized below. */
20223 long arch_ptrace(struct task_struct *child, long request, long addr, long data)
20224 {
20225 int ret;
20226- unsigned long __user *datap = (unsigned long __user *)data;
20227+ unsigned long __user *datap = (__force unsigned long __user *)data;
20228
20229 switch (request) {
20230 /* read the word at location addr in the USER area. */
20231@@ -1012,14 +1016,14 @@ long arch_ptrace(struct task_struct *child, long request, long addr, long data)
20232 if (addr < 0)
20233 return -EIO;
20234 ret = do_get_thread_area(child, addr,
20235- (struct user_desc __user *) data);
20236+ (__force struct user_desc __user *) data);
20237 break;
20238
20239 case PTRACE_SET_THREAD_AREA:
20240 if (addr < 0)
20241 return -EIO;
20242 ret = do_set_thread_area(child, addr,
20243- (struct user_desc __user *) data, 0);
20244+ (__force struct user_desc __user *) data, 0);
20245 break;
20246 #endif
20247
20248@@ -1038,12 +1042,12 @@ long arch_ptrace(struct task_struct *child, long request, long addr, long data)
20249 #ifdef CONFIG_X86_PTRACE_BTS
20250 case PTRACE_BTS_CONFIG:
20251 ret = ptrace_bts_config
20252- (child, data, (struct ptrace_bts_config __user *)addr);
20253+ (child, data, (__force struct ptrace_bts_config __user *)addr);
20254 break;
20255
20256 case PTRACE_BTS_STATUS:
20257 ret = ptrace_bts_status
20258- (child, data, (struct ptrace_bts_config __user *)addr);
20259+ (child, data, (__force struct ptrace_bts_config __user *)addr);
20260 break;
20261
20262 case PTRACE_BTS_SIZE:
20263@@ -1052,7 +1056,7 @@ long arch_ptrace(struct task_struct *child, long request, long addr, long data)
20264
20265 case PTRACE_BTS_GET:
20266 ret = ptrace_bts_read_record
20267- (child, data, (struct bts_struct __user *) addr);
20268+ (child, data, (__force struct bts_struct __user *) addr);
20269 break;
20270
20271 case PTRACE_BTS_CLEAR:
20272@@ -1061,7 +1065,7 @@ long arch_ptrace(struct task_struct *child, long request, long addr, long data)
20273
20274 case PTRACE_BTS_DRAIN:
20275 ret = ptrace_bts_drain
20276- (child, data, (struct bts_struct __user *) addr);
20277+ (child, data, (__force struct bts_struct __user *) addr);
20278 break;
20279 #endif /* CONFIG_X86_PTRACE_BTS */
20280
20281@@ -1450,7 +1454,7 @@ void send_sigtrap(struct task_struct *tsk, struct pt_regs *regs,
20282 info.si_code = si_code;
20283
20284 /* User-mode ip? */
20285- info.si_addr = user_mode_vm(regs) ? (void __user *) regs->ip : NULL;
20286+ info.si_addr = user_mode(regs) ? (__force void __user *) regs->ip : NULL;
20287
20288 /* Send us the fake SIGTRAP */
20289 force_sig_info(SIGTRAP, &info, tsk);
20290@@ -1469,7 +1473,7 @@ void send_sigtrap(struct task_struct *tsk, struct pt_regs *regs,
20291 * We must return the syscall number to actually look up in the table.
20292 * This can be -1L to skip running any syscall at all.
20293 */
20294-asmregparm long syscall_trace_enter(struct pt_regs *regs)
20295+long syscall_trace_enter(struct pt_regs *regs)
20296 {
20297 long ret = 0;
20298
20299@@ -1514,7 +1518,7 @@ asmregparm long syscall_trace_enter(struct pt_regs *regs)
20300 return ret ?: regs->orig_ax;
20301 }
20302
20303-asmregparm void syscall_trace_leave(struct pt_regs *regs)
20304+void syscall_trace_leave(struct pt_regs *regs)
20305 {
20306 if (unlikely(current->audit_context))
20307 audit_syscall_exit(AUDITSC_RESULT(regs->ax), regs->ax);
20308diff --git a/arch/x86/kernel/reboot.c b/arch/x86/kernel/reboot.c
20309index cf98100..e76e03d 100644
20310--- a/arch/x86/kernel/reboot.c
20311+++ b/arch/x86/kernel/reboot.c
20312@@ -33,7 +33,7 @@ void (*pm_power_off)(void);
20313 EXPORT_SYMBOL(pm_power_off);
20314
20315 static const struct desc_ptr no_idt = {};
20316-static int reboot_mode;
20317+static unsigned short reboot_mode;
20318 enum reboot_type reboot_type = BOOT_KBD;
20319 int reboot_force;
20320
20321@@ -292,12 +292,12 @@ core_initcall(reboot_init);
20322 controller to pulse the CPU reset line, which is more thorough, but
20323 doesn't work with at least one type of 486 motherboard. It is easy
20324 to stop this code working; hence the copious comments. */
20325-static const unsigned long long
20326-real_mode_gdt_entries [3] =
20327+static struct desc_struct
20328+real_mode_gdt_entries [3] __read_only =
20329 {
20330- 0x0000000000000000ULL, /* Null descriptor */
20331- 0x00009b000000ffffULL, /* 16-bit real-mode 64k code at 0x00000000 */
20332- 0x000093000100ffffULL /* 16-bit real-mode 64k data at 0x00000100 */
20333+ GDT_ENTRY_INIT(0, 0, 0), /* Null descriptor */
20334+ GDT_ENTRY_INIT(0x9b, 0, 0xffff), /* 16-bit real-mode 64k code at 0x00000000 */
20335+ GDT_ENTRY_INIT(0x93, 0x100, 0xffff) /* 16-bit real-mode 64k data at 0x00000100 */
20336 };
20337
20338 static const struct desc_ptr
20339@@ -346,7 +346,7 @@ static const unsigned char jump_to_bios [] =
20340 * specified by the code and length parameters.
20341 * We assume that length will aways be less that 100!
20342 */
20343-void machine_real_restart(const unsigned char *code, int length)
20344+__noreturn void machine_real_restart(const unsigned char *code, unsigned int length)
20345 {
20346 local_irq_disable();
20347
20348@@ -366,8 +366,8 @@ void machine_real_restart(const unsigned char *code, int length)
20349 /* Remap the kernel at virtual address zero, as well as offset zero
20350 from the kernel segment. This assumes the kernel segment starts at
20351 virtual address PAGE_OFFSET. */
20352- memcpy(swapper_pg_dir, swapper_pg_dir + KERNEL_PGD_BOUNDARY,
20353- sizeof(swapper_pg_dir [0]) * KERNEL_PGD_PTRS);
20354+ clone_pgd_range(swapper_pg_dir, swapper_pg_dir + KERNEL_PGD_BOUNDARY,
20355+ min_t(unsigned long, KERNEL_PGD_PTRS, KERNEL_PGD_BOUNDARY));
20356
20357 /*
20358 * Use `swapper_pg_dir' as our page directory.
20359@@ -379,16 +379,15 @@ void machine_real_restart(const unsigned char *code, int length)
20360 boot)". This seems like a fairly standard thing that gets set by
20361 REBOOT.COM programs, and the previous reset routine did this
20362 too. */
20363- *((unsigned short *)0x472) = reboot_mode;
20364+ *(unsigned short *)(__va(0x472)) = reboot_mode;
20365
20366 /* For the switch to real mode, copy some code to low memory. It has
20367 to be in the first 64k because it is running in 16-bit mode, and it
20368 has to have the same physical and virtual address, because it turns
20369 off paging. Copy it near the end of the first page, out of the way
20370 of BIOS variables. */
20371- memcpy((void *)(0x1000 - sizeof(real_mode_switch) - 100),
20372- real_mode_switch, sizeof (real_mode_switch));
20373- memcpy((void *)(0x1000 - 100), code, length);
20374+ memcpy(__va(0x1000 - sizeof (real_mode_switch) - 100), real_mode_switch, sizeof (real_mode_switch));
20375+ memcpy(__va(0x1000 - 100), code, length);
20376
20377 /* Set up the IDT for real mode. */
20378 load_idt(&real_mode_idt);
20379@@ -416,6 +415,7 @@ void machine_real_restart(const unsigned char *code, int length)
20380 __asm__ __volatile__ ("ljmp $0x0008,%0"
20381 :
20382 : "i" ((void *)(0x1000 - sizeof (real_mode_switch) - 100)));
20383+ do { } while (1);
20384 }
20385 #ifdef CONFIG_APM_MODULE
20386 EXPORT_SYMBOL(machine_real_restart);
20387@@ -544,7 +544,7 @@ void __attribute__((weak)) mach_reboot_fixups(void)
20388 {
20389 }
20390
20391-static void native_machine_emergency_restart(void)
20392+__noreturn static void native_machine_emergency_restart(void)
20393 {
20394 int i;
20395
20396@@ -659,13 +659,13 @@ void native_machine_shutdown(void)
20397 #endif
20398 }
20399
20400-static void __machine_emergency_restart(int emergency)
20401+static __noreturn void __machine_emergency_restart(int emergency)
20402 {
20403 reboot_emergency = emergency;
20404 machine_ops.emergency_restart();
20405 }
20406
20407-static void native_machine_restart(char *__unused)
20408+static __noreturn void native_machine_restart(char *__unused)
20409 {
20410 printk("machine restart\n");
20411
20412@@ -674,7 +674,7 @@ static void native_machine_restart(char *__unused)
20413 __machine_emergency_restart(0);
20414 }
20415
20416-static void native_machine_halt(void)
20417+static __noreturn void native_machine_halt(void)
20418 {
20419 /* stop other cpus and apics */
20420 machine_shutdown();
20421@@ -685,7 +685,7 @@ static void native_machine_halt(void)
20422 stop_this_cpu(NULL);
20423 }
20424
20425-static void native_machine_power_off(void)
20426+__noreturn static void native_machine_power_off(void)
20427 {
20428 if (pm_power_off) {
20429 if (!reboot_force)
20430@@ -694,6 +694,7 @@ static void native_machine_power_off(void)
20431 }
20432 /* a fallback in case there is no PM info available */
20433 tboot_shutdown(TB_SHUTDOWN_HALT);
20434+ do { } while (1);
20435 }
20436
20437 struct machine_ops machine_ops = {
20438diff --git a/arch/x86/kernel/relocate_kernel_64.S b/arch/x86/kernel/relocate_kernel_64.S
20439index 7a6f3b3..976a959 100644
20440--- a/arch/x86/kernel/relocate_kernel_64.S
20441+++ b/arch/x86/kernel/relocate_kernel_64.S
20442@@ -11,6 +11,7 @@
20443 #include <asm/kexec.h>
20444 #include <asm/processor-flags.h>
20445 #include <asm/pgtable_types.h>
20446+#include <asm/alternative-asm.h>
20447
20448 /*
20449 * Must be relocatable PIC code callable as a C function
20450@@ -167,6 +168,7 @@ identity_mapped:
20451 xorq %r14, %r14
20452 xorq %r15, %r15
20453
20454+ pax_force_retaddr 0, 1
20455 ret
20456
20457 1:
20458diff --git a/arch/x86/kernel/setup.c b/arch/x86/kernel/setup.c
20459index 5449a26..0b6c759 100644
20460--- a/arch/x86/kernel/setup.c
20461+++ b/arch/x86/kernel/setup.c
20462@@ -783,14 +783,14 @@ void __init setup_arch(char **cmdline_p)
20463
20464 if (!boot_params.hdr.root_flags)
20465 root_mountflags &= ~MS_RDONLY;
20466- init_mm.start_code = (unsigned long) _text;
20467- init_mm.end_code = (unsigned long) _etext;
20468+ init_mm.start_code = ktla_ktva((unsigned long) _text);
20469+ init_mm.end_code = ktla_ktva((unsigned long) _etext);
20470 init_mm.end_data = (unsigned long) _edata;
20471 init_mm.brk = _brk_end;
20472
20473- code_resource.start = virt_to_phys(_text);
20474- code_resource.end = virt_to_phys(_etext)-1;
20475- data_resource.start = virt_to_phys(_etext);
20476+ code_resource.start = virt_to_phys(ktla_ktva(_text));
20477+ code_resource.end = virt_to_phys(ktla_ktva(_etext))-1;
20478+ data_resource.start = virt_to_phys(_sdata);
20479 data_resource.end = virt_to_phys(_edata)-1;
20480 bss_resource.start = virt_to_phys(&__bss_start);
20481 bss_resource.end = virt_to_phys(&__bss_stop)-1;
20482diff --git a/arch/x86/kernel/setup_percpu.c b/arch/x86/kernel/setup_percpu.c
20483index d559af9..244f55d 100644
20484--- a/arch/x86/kernel/setup_percpu.c
20485+++ b/arch/x86/kernel/setup_percpu.c
20486@@ -25,19 +25,17 @@
20487 # define DBG(x...)
20488 #endif
20489
20490-DEFINE_PER_CPU(int, cpu_number);
20491+#ifdef CONFIG_SMP
20492+DEFINE_PER_CPU(unsigned int, cpu_number);
20493 EXPORT_PER_CPU_SYMBOL(cpu_number);
20494+#endif
20495
20496-#ifdef CONFIG_X86_64
20497 #define BOOT_PERCPU_OFFSET ((unsigned long)__per_cpu_load)
20498-#else
20499-#define BOOT_PERCPU_OFFSET 0
20500-#endif
20501
20502 DEFINE_PER_CPU(unsigned long, this_cpu_off) = BOOT_PERCPU_OFFSET;
20503 EXPORT_PER_CPU_SYMBOL(this_cpu_off);
20504
20505-unsigned long __per_cpu_offset[NR_CPUS] __read_mostly = {
20506+unsigned long __per_cpu_offset[NR_CPUS] __read_only = {
20507 [0 ... NR_CPUS-1] = BOOT_PERCPU_OFFSET,
20508 };
20509 EXPORT_SYMBOL(__per_cpu_offset);
20510@@ -100,6 +98,8 @@ static bool __init pcpu_need_numa(void)
20511 * Pointer to the allocated area on success, NULL on failure.
20512 */
20513 static void * __init pcpu_alloc_bootmem(unsigned int cpu, unsigned long size,
20514+ unsigned long align) __size_overflow(2);
20515+static void * __init pcpu_alloc_bootmem(unsigned int cpu, unsigned long size,
20516 unsigned long align)
20517 {
20518 const unsigned long goal = __pa(MAX_DMA_ADDRESS);
20519@@ -128,6 +128,8 @@ static void * __init pcpu_alloc_bootmem(unsigned int cpu, unsigned long size,
20520 /*
20521 * Helpers for first chunk memory allocation
20522 */
20523+static void * __init pcpu_fc_alloc(unsigned int cpu, size_t size, size_t align) __size_overflow(2);
20524+
20525 static void * __init pcpu_fc_alloc(unsigned int cpu, size_t size, size_t align)
20526 {
20527 return pcpu_alloc_bootmem(cpu, size, align);
20528@@ -159,10 +161,10 @@ static inline void setup_percpu_segment(int cpu)
20529 {
20530 #ifdef CONFIG_X86_32
20531 struct desc_struct gdt;
20532+ unsigned long base = per_cpu_offset(cpu);
20533
20534- pack_descriptor(&gdt, per_cpu_offset(cpu), 0xFFFFF,
20535- 0x2 | DESCTYPE_S, 0x8);
20536- gdt.s = 1;
20537+ pack_descriptor(&gdt, base, (VMALLOC_END - base - 1) >> PAGE_SHIFT,
20538+ 0x83 | DESCTYPE_S, 0xC);
20539 write_gdt_entry(get_cpu_gdt_table(cpu),
20540 GDT_ENTRY_PERCPU, &gdt, DESCTYPE_S);
20541 #endif
20542@@ -212,6 +214,11 @@ void __init setup_per_cpu_areas(void)
20543 /* alrighty, percpu areas up and running */
20544 delta = (unsigned long)pcpu_base_addr - (unsigned long)__per_cpu_start;
20545 for_each_possible_cpu(cpu) {
20546+#ifdef CONFIG_CC_STACKPROTECTOR
20547+#ifdef CONFIG_X86_32
20548+ unsigned long canary = per_cpu(stack_canary.canary, cpu);
20549+#endif
20550+#endif
20551 per_cpu_offset(cpu) = delta + pcpu_unit_offsets[cpu];
20552 per_cpu(this_cpu_off, cpu) = per_cpu_offset(cpu);
20553 per_cpu(cpu_number, cpu) = cpu;
20554@@ -239,6 +246,12 @@ void __init setup_per_cpu_areas(void)
20555 early_per_cpu_map(x86_cpu_to_node_map, cpu);
20556 #endif
20557 #endif
20558+#ifdef CONFIG_CC_STACKPROTECTOR
20559+#ifdef CONFIG_X86_32
20560+ if (!cpu)
20561+ per_cpu(stack_canary.canary, cpu) = canary;
20562+#endif
20563+#endif
20564 /*
20565 * Up to this point, the boot CPU has been using .data.init
20566 * area. Reload any changed state for the boot CPU.
20567diff --git a/arch/x86/kernel/signal.c b/arch/x86/kernel/signal.c
20568index 6a44a76..a9287a1 100644
20569--- a/arch/x86/kernel/signal.c
20570+++ b/arch/x86/kernel/signal.c
20571@@ -197,7 +197,7 @@ static unsigned long align_sigframe(unsigned long sp)
20572 * Align the stack pointer according to the i386 ABI,
20573 * i.e. so that on function entry ((sp + 4) & 15) == 0.
20574 */
20575- sp = ((sp + 4) & -16ul) - 4;
20576+ sp = ((sp - 12) & -16ul) - 4;
20577 #else /* !CONFIG_X86_32 */
20578 sp = round_down(sp, 16) - 8;
20579 #endif
20580@@ -248,11 +248,11 @@ get_sigframe(struct k_sigaction *ka, struct pt_regs *regs, size_t frame_size,
20581 * Return an always-bogus address instead so we will die with SIGSEGV.
20582 */
20583 if (onsigstack && !likely(on_sig_stack(sp)))
20584- return (void __user *)-1L;
20585+ return (__force void __user *)-1L;
20586
20587 /* save i387 state */
20588 if (used_math() && save_i387_xstate(*fpstate) < 0)
20589- return (void __user *)-1L;
20590+ return (__force void __user *)-1L;
20591
20592 return (void __user *)sp;
20593 }
20594@@ -307,9 +307,9 @@ __setup_frame(int sig, struct k_sigaction *ka, sigset_t *set,
20595 }
20596
20597 if (current->mm->context.vdso)
20598- restorer = VDSO32_SYMBOL(current->mm->context.vdso, sigreturn);
20599+ restorer = (__force void __user *)VDSO32_SYMBOL(current->mm->context.vdso, sigreturn);
20600 else
20601- restorer = &frame->retcode;
20602+ restorer = (void __user *)&frame->retcode;
20603 if (ka->sa.sa_flags & SA_RESTORER)
20604 restorer = ka->sa.sa_restorer;
20605
20606@@ -323,7 +323,7 @@ __setup_frame(int sig, struct k_sigaction *ka, sigset_t *set,
20607 * reasons and because gdb uses it as a signature to notice
20608 * signal handler stack frames.
20609 */
20610- err |= __put_user(*((u64 *)&retcode), (u64 *)frame->retcode);
20611+ err |= __put_user(*((u64 *)&retcode), (u64 __user *)frame->retcode);
20612
20613 if (err)
20614 return -EFAULT;
20615@@ -377,7 +377,10 @@ static int __setup_rt_frame(int sig, struct k_sigaction *ka, siginfo_t *info,
20616 err |= __copy_to_user(&frame->uc.uc_sigmask, set, sizeof(*set));
20617
20618 /* Set up to return from userspace. */
20619- restorer = VDSO32_SYMBOL(current->mm->context.vdso, rt_sigreturn);
20620+ if (current->mm->context.vdso)
20621+ restorer = (__force void __user *)VDSO32_SYMBOL(current->mm->context.vdso, rt_sigreturn);
20622+ else
20623+ restorer = (void __user *)&frame->retcode;
20624 if (ka->sa.sa_flags & SA_RESTORER)
20625 restorer = ka->sa.sa_restorer;
20626 put_user_ex(restorer, &frame->pretcode);
20627@@ -389,7 +392,7 @@ static int __setup_rt_frame(int sig, struct k_sigaction *ka, siginfo_t *info,
20628 * reasons and because gdb uses it as a signature to notice
20629 * signal handler stack frames.
20630 */
20631- put_user_ex(*((u64 *)&rt_retcode), (u64 *)frame->retcode);
20632+ put_user_ex(*((u64 *)&rt_retcode), (u64 __user *)frame->retcode);
20633 } put_user_catch(err);
20634
20635 if (err)
20636@@ -782,6 +785,8 @@ static void do_signal(struct pt_regs *regs)
20637 int signr;
20638 sigset_t *oldset;
20639
20640+ pax_track_stack();
20641+
20642 /*
20643 * We want the common case to go fast, which is why we may in certain
20644 * cases get here from kernel mode. Just return without doing anything
20645@@ -789,7 +794,7 @@ static void do_signal(struct pt_regs *regs)
20646 * X86_32: vm86 regs switched out by assembly code before reaching
20647 * here, so testing against kernel CS suffices.
20648 */
20649- if (!user_mode(regs))
20650+ if (!user_mode_novm(regs))
20651 return;
20652
20653 if (current_thread_info()->status & TS_RESTORE_SIGMASK)
20654diff --git a/arch/x86/kernel/smpboot.c b/arch/x86/kernel/smpboot.c
20655index 7e8e905..64d5c32 100644
20656--- a/arch/x86/kernel/smpboot.c
20657+++ b/arch/x86/kernel/smpboot.c
20658@@ -94,14 +94,14 @@ static DEFINE_PER_CPU(struct task_struct *, idle_thread_array);
20659 */
20660 static DEFINE_MUTEX(x86_cpu_hotplug_driver_mutex);
20661
20662-void cpu_hotplug_driver_lock()
20663+void cpu_hotplug_driver_lock(void)
20664 {
20665- mutex_lock(&x86_cpu_hotplug_driver_mutex);
20666+ mutex_lock(&x86_cpu_hotplug_driver_mutex);
20667 }
20668
20669-void cpu_hotplug_driver_unlock()
20670+void cpu_hotplug_driver_unlock(void)
20671 {
20672- mutex_unlock(&x86_cpu_hotplug_driver_mutex);
20673+ mutex_unlock(&x86_cpu_hotplug_driver_mutex);
20674 }
20675
20676 ssize_t arch_cpu_probe(const char *buf, size_t count) { return -1; }
20677@@ -625,7 +625,7 @@ wakeup_secondary_cpu_via_init(int phys_apicid, unsigned long start_eip)
20678 * target processor state.
20679 */
20680 startup_ipi_hook(phys_apicid, (unsigned long) start_secondary,
20681- (unsigned long)stack_start.sp);
20682+ stack_start);
20683
20684 /*
20685 * Run STARTUP IPI loop.
20686@@ -743,6 +743,7 @@ static int __cpuinit do_boot_cpu(int apicid, int cpu)
20687 set_idle_for_cpu(cpu, c_idle.idle);
20688 do_rest:
20689 per_cpu(current_task, cpu) = c_idle.idle;
20690+ per_cpu(current_tinfo, cpu) = &c_idle.idle->tinfo;
20691 #ifdef CONFIG_X86_32
20692 /* Stack for startup_32 can be just as for start_secondary onwards */
20693 irq_ctx_init(cpu);
20694@@ -750,13 +751,15 @@ do_rest:
20695 #else
20696 clear_tsk_thread_flag(c_idle.idle, TIF_FORK);
20697 initial_gs = per_cpu_offset(cpu);
20698- per_cpu(kernel_stack, cpu) =
20699- (unsigned long)task_stack_page(c_idle.idle) -
20700- KERNEL_STACK_OFFSET + THREAD_SIZE;
20701+ per_cpu(kernel_stack, cpu) = (unsigned long)task_stack_page(c_idle.idle) - 16 + THREAD_SIZE;
20702 #endif
20703+
20704+ pax_open_kernel();
20705 early_gdt_descr.address = (unsigned long)get_cpu_gdt_table(cpu);
20706+ pax_close_kernel();
20707+
20708 initial_code = (unsigned long)start_secondary;
20709- stack_start.sp = (void *) c_idle.idle->thread.sp;
20710+ stack_start = c_idle.idle->thread.sp;
20711
20712 /* start_ip had better be page-aligned! */
20713 start_ip = setup_trampoline();
20714@@ -891,6 +894,12 @@ int __cpuinit native_cpu_up(unsigned int cpu)
20715
20716 per_cpu(cpu_state, cpu) = CPU_UP_PREPARE;
20717
20718+#ifdef CONFIG_PAX_PER_CPU_PGD
20719+ clone_pgd_range(get_cpu_pgd(cpu) + KERNEL_PGD_BOUNDARY,
20720+ swapper_pg_dir + KERNEL_PGD_BOUNDARY,
20721+ KERNEL_PGD_PTRS);
20722+#endif
20723+
20724 err = do_boot_cpu(apicid, cpu);
20725
20726 if (err) {
20727diff --git a/arch/x86/kernel/step.c b/arch/x86/kernel/step.c
20728index 3149032..14f1053 100644
20729--- a/arch/x86/kernel/step.c
20730+++ b/arch/x86/kernel/step.c
20731@@ -27,10 +27,10 @@ unsigned long convert_ip_to_linear(struct task_struct *child, struct pt_regs *re
20732 struct desc_struct *desc;
20733 unsigned long base;
20734
20735- seg &= ~7UL;
20736+ seg >>= 3;
20737
20738 mutex_lock(&child->mm->context.lock);
20739- if (unlikely((seg >> 3) >= child->mm->context.size))
20740+ if (unlikely(seg >= child->mm->context.size))
20741 addr = -1L; /* bogus selector, access would fault */
20742 else {
20743 desc = child->mm->context.ldt + seg;
20744@@ -42,7 +42,8 @@ unsigned long convert_ip_to_linear(struct task_struct *child, struct pt_regs *re
20745 addr += base;
20746 }
20747 mutex_unlock(&child->mm->context.lock);
20748- }
20749+ } else if (seg == __KERNEL_CS || seg == __KERNEXEC_KERNEL_CS)
20750+ addr = ktla_ktva(addr);
20751
20752 return addr;
20753 }
20754@@ -53,6 +54,9 @@ static int is_setting_trap_flag(struct task_struct *child, struct pt_regs *regs)
20755 unsigned char opcode[15];
20756 unsigned long addr = convert_ip_to_linear(child, regs);
20757
20758+ if (addr == -EINVAL)
20759+ return 0;
20760+
20761 copied = access_process_vm(child, addr, opcode, sizeof(opcode), 0);
20762 for (i = 0; i < copied; i++) {
20763 switch (opcode[i]) {
20764@@ -74,7 +78,7 @@ static int is_setting_trap_flag(struct task_struct *child, struct pt_regs *regs)
20765
20766 #ifdef CONFIG_X86_64
20767 case 0x40 ... 0x4f:
20768- if (regs->cs != __USER_CS)
20769+ if ((regs->cs & 0xffff) != __USER_CS)
20770 /* 32-bit mode: register increment */
20771 return 0;
20772 /* 64-bit mode: REX prefix */
20773diff --git a/arch/x86/kernel/sys_i386_32.c b/arch/x86/kernel/sys_i386_32.c
20774index dee1ff7..a397f7f 100644
20775--- a/arch/x86/kernel/sys_i386_32.c
20776+++ b/arch/x86/kernel/sys_i386_32.c
20777@@ -24,6 +24,21 @@
20778
20779 #include <asm/syscalls.h>
20780
20781+int i386_mmap_check(unsigned long addr, unsigned long len, unsigned long flags)
20782+{
20783+ unsigned long pax_task_size = TASK_SIZE;
20784+
20785+#ifdef CONFIG_PAX_SEGMEXEC
20786+ if (current->mm->pax_flags & MF_PAX_SEGMEXEC)
20787+ pax_task_size = SEGMEXEC_TASK_SIZE;
20788+#endif
20789+
20790+ if (len > pax_task_size || addr > pax_task_size - len)
20791+ return -EINVAL;
20792+
20793+ return 0;
20794+}
20795+
20796 /*
20797 * Perform the select(nd, in, out, ex, tv) and mmap() system
20798 * calls. Linux/i386 didn't use to be able to handle more than
20799@@ -58,6 +73,212 @@ out:
20800 return err;
20801 }
20802
20803+unsigned long
20804+arch_get_unmapped_area(struct file *filp, unsigned long addr,
20805+ unsigned long len, unsigned long pgoff, unsigned long flags)
20806+{
20807+ struct mm_struct *mm = current->mm;
20808+ struct vm_area_struct *vma;
20809+ unsigned long start_addr, pax_task_size = TASK_SIZE;
20810+
20811+#ifdef CONFIG_PAX_SEGMEXEC
20812+ if (mm->pax_flags & MF_PAX_SEGMEXEC)
20813+ pax_task_size = SEGMEXEC_TASK_SIZE;
20814+#endif
20815+
20816+ pax_task_size -= PAGE_SIZE;
20817+
20818+ if (len > pax_task_size)
20819+ return -ENOMEM;
20820+
20821+ if (flags & MAP_FIXED)
20822+ return addr;
20823+
20824+#ifdef CONFIG_PAX_RANDMMAP
20825+ if (!(mm->pax_flags & MF_PAX_RANDMMAP))
20826+#endif
20827+
20828+ if (addr) {
20829+ addr = PAGE_ALIGN(addr);
20830+ if (pax_task_size - len >= addr) {
20831+ vma = find_vma(mm, addr);
20832+ if (check_heap_stack_gap(vma, addr, len))
20833+ return addr;
20834+ }
20835+ }
20836+ if (len > mm->cached_hole_size) {
20837+ start_addr = addr = mm->free_area_cache;
20838+ } else {
20839+ start_addr = addr = mm->mmap_base;
20840+ mm->cached_hole_size = 0;
20841+ }
20842+
20843+#ifdef CONFIG_PAX_PAGEEXEC
20844+ if (!nx_enabled && (mm->pax_flags & MF_PAX_PAGEEXEC) && (flags & MAP_EXECUTABLE) && start_addr >= mm->mmap_base) {
20845+ start_addr = 0x00110000UL;
20846+
20847+#ifdef CONFIG_PAX_RANDMMAP
20848+ if (mm->pax_flags & MF_PAX_RANDMMAP)
20849+ start_addr += mm->delta_mmap & 0x03FFF000UL;
20850+#endif
20851+
20852+ if (mm->start_brk <= start_addr && start_addr < mm->mmap_base)
20853+ start_addr = addr = mm->mmap_base;
20854+ else
20855+ addr = start_addr;
20856+ }
20857+#endif
20858+
20859+full_search:
20860+ for (vma = find_vma(mm, addr); ; vma = vma->vm_next) {
20861+ /* At this point: (!vma || addr < vma->vm_end). */
20862+ if (pax_task_size - len < addr) {
20863+ /*
20864+ * Start a new search - just in case we missed
20865+ * some holes.
20866+ */
20867+ if (start_addr != mm->mmap_base) {
20868+ start_addr = addr = mm->mmap_base;
20869+ mm->cached_hole_size = 0;
20870+ goto full_search;
20871+ }
20872+ return -ENOMEM;
20873+ }
20874+ if (check_heap_stack_gap(vma, addr, len))
20875+ break;
20876+ if (addr + mm->cached_hole_size < vma->vm_start)
20877+ mm->cached_hole_size = vma->vm_start - addr;
20878+ addr = vma->vm_end;
20879+ if (mm->start_brk <= addr && addr < mm->mmap_base) {
20880+ start_addr = addr = mm->mmap_base;
20881+ mm->cached_hole_size = 0;
20882+ goto full_search;
20883+ }
20884+ }
20885+
20886+ /*
20887+ * Remember the place where we stopped the search:
20888+ */
20889+ mm->free_area_cache = addr + len;
20890+ return addr;
20891+}
20892+
20893+unsigned long
20894+arch_get_unmapped_area_topdown(struct file *filp, const unsigned long addr0,
20895+ const unsigned long len, const unsigned long pgoff,
20896+ const unsigned long flags)
20897+{
20898+ struct vm_area_struct *vma;
20899+ struct mm_struct *mm = current->mm;
20900+ unsigned long base = mm->mmap_base, addr = addr0, pax_task_size = TASK_SIZE;
20901+
20902+#ifdef CONFIG_PAX_SEGMEXEC
20903+ if (mm->pax_flags & MF_PAX_SEGMEXEC)
20904+ pax_task_size = SEGMEXEC_TASK_SIZE;
20905+#endif
20906+
20907+ pax_task_size -= PAGE_SIZE;
20908+
20909+ /* requested length too big for entire address space */
20910+ if (len > pax_task_size)
20911+ return -ENOMEM;
20912+
20913+ if (flags & MAP_FIXED)
20914+ return addr;
20915+
20916+#ifdef CONFIG_PAX_PAGEEXEC
20917+ if (!nx_enabled && (mm->pax_flags & MF_PAX_PAGEEXEC) && (flags & MAP_EXECUTABLE))
20918+ goto bottomup;
20919+#endif
20920+
20921+#ifdef CONFIG_PAX_RANDMMAP
20922+ if (!(mm->pax_flags & MF_PAX_RANDMMAP))
20923+#endif
20924+
20925+ /* requesting a specific address */
20926+ if (addr) {
20927+ addr = PAGE_ALIGN(addr);
20928+ if (pax_task_size - len >= addr) {
20929+ vma = find_vma(mm, addr);
20930+ if (check_heap_stack_gap(vma, addr, len))
20931+ return addr;
20932+ }
20933+ }
20934+
20935+ /* check if free_area_cache is useful for us */
20936+ if (len <= mm->cached_hole_size) {
20937+ mm->cached_hole_size = 0;
20938+ mm->free_area_cache = mm->mmap_base;
20939+ }
20940+
20941+ /* either no address requested or can't fit in requested address hole */
20942+ addr = mm->free_area_cache;
20943+
20944+ /* make sure it can fit in the remaining address space */
20945+ if (addr > len) {
20946+ vma = find_vma(mm, addr-len);
20947+ if (check_heap_stack_gap(vma, addr - len, len))
20948+ /* remember the address as a hint for next time */
20949+ return (mm->free_area_cache = addr-len);
20950+ }
20951+
20952+ if (mm->mmap_base < len)
20953+ goto bottomup;
20954+
20955+ addr = mm->mmap_base-len;
20956+
20957+ do {
20958+ /*
20959+ * Lookup failure means no vma is above this address,
20960+ * else if new region fits below vma->vm_start,
20961+ * return with success:
20962+ */
20963+ vma = find_vma(mm, addr);
20964+ if (check_heap_stack_gap(vma, addr, len))
20965+ /* remember the address as a hint for next time */
20966+ return (mm->free_area_cache = addr);
20967+
20968+ /* remember the largest hole we saw so far */
20969+ if (addr + mm->cached_hole_size < vma->vm_start)
20970+ mm->cached_hole_size = vma->vm_start - addr;
20971+
20972+ /* try just below the current vma->vm_start */
20973+ addr = skip_heap_stack_gap(vma, len);
20974+ } while (!IS_ERR_VALUE(addr));
20975+
20976+bottomup:
20977+ /*
20978+ * A failed mmap() very likely causes application failure,
20979+ * so fall back to the bottom-up function here. This scenario
20980+ * can happen with large stack limits and large mmap()
20981+ * allocations.
20982+ */
20983+
20984+#ifdef CONFIG_PAX_SEGMEXEC
20985+ if (mm->pax_flags & MF_PAX_SEGMEXEC)
20986+ mm->mmap_base = SEGMEXEC_TASK_UNMAPPED_BASE;
20987+ else
20988+#endif
20989+
20990+ mm->mmap_base = TASK_UNMAPPED_BASE;
20991+
20992+#ifdef CONFIG_PAX_RANDMMAP
20993+ if (mm->pax_flags & MF_PAX_RANDMMAP)
20994+ mm->mmap_base += mm->delta_mmap;
20995+#endif
20996+
20997+ mm->free_area_cache = mm->mmap_base;
20998+ mm->cached_hole_size = ~0UL;
20999+ addr = arch_get_unmapped_area(filp, addr0, len, pgoff, flags);
21000+ /*
21001+ * Restore the topdown base:
21002+ */
21003+ mm->mmap_base = base;
21004+ mm->free_area_cache = base;
21005+ mm->cached_hole_size = ~0UL;
21006+
21007+ return addr;
21008+}
21009
21010 struct sel_arg_struct {
21011 unsigned long n;
21012@@ -93,7 +314,7 @@ asmlinkage int sys_ipc(uint call, int first, int second,
21013 return sys_semtimedop(first, (struct sembuf __user *)ptr, second, NULL);
21014 case SEMTIMEDOP:
21015 return sys_semtimedop(first, (struct sembuf __user *)ptr, second,
21016- (const struct timespec __user *)fifth);
21017+ (__force const struct timespec __user *)fifth);
21018
21019 case SEMGET:
21020 return sys_semget(first, second, third);
21021@@ -140,7 +361,7 @@ asmlinkage int sys_ipc(uint call, int first, int second,
21022 ret = do_shmat(first, (char __user *) ptr, second, &raddr);
21023 if (ret)
21024 return ret;
21025- return put_user(raddr, (ulong __user *) third);
21026+ return put_user(raddr, (__force ulong __user *) third);
21027 }
21028 case 1: /* iBCS2 emulator entry point */
21029 if (!segment_eq(get_fs(), get_ds()))
21030@@ -207,17 +428,3 @@ asmlinkage int sys_olduname(struct oldold_utsname __user *name)
21031
21032 return error;
21033 }
21034-
21035-
21036-/*
21037- * Do a system call from kernel instead of calling sys_execve so we
21038- * end up with proper pt_regs.
21039- */
21040-int kernel_execve(const char *filename, char *const argv[], char *const envp[])
21041-{
21042- long __res;
21043- asm volatile ("push %%ebx ; movl %2,%%ebx ; int $0x80 ; pop %%ebx"
21044- : "=a" (__res)
21045- : "0" (__NR_execve), "ri" (filename), "c" (argv), "d" (envp) : "memory");
21046- return __res;
21047-}
21048diff --git a/arch/x86/kernel/sys_x86_64.c b/arch/x86/kernel/sys_x86_64.c
21049index 8aa2057..b604bc1 100644
21050--- a/arch/x86/kernel/sys_x86_64.c
21051+++ b/arch/x86/kernel/sys_x86_64.c
21052@@ -32,8 +32,8 @@ out:
21053 return error;
21054 }
21055
21056-static void find_start_end(unsigned long flags, unsigned long *begin,
21057- unsigned long *end)
21058+static void find_start_end(struct mm_struct *mm, unsigned long flags,
21059+ unsigned long *begin, unsigned long *end)
21060 {
21061 if (!test_thread_flag(TIF_IA32) && (flags & MAP_32BIT)) {
21062 unsigned long new_begin;
21063@@ -52,7 +52,7 @@ static void find_start_end(unsigned long flags, unsigned long *begin,
21064 *begin = new_begin;
21065 }
21066 } else {
21067- *begin = TASK_UNMAPPED_BASE;
21068+ *begin = mm->mmap_base;
21069 *end = TASK_SIZE;
21070 }
21071 }
21072@@ -69,16 +69,19 @@ arch_get_unmapped_area(struct file *filp, unsigned long addr,
21073 if (flags & MAP_FIXED)
21074 return addr;
21075
21076- find_start_end(flags, &begin, &end);
21077+ find_start_end(mm, flags, &begin, &end);
21078
21079 if (len > end)
21080 return -ENOMEM;
21081
21082+#ifdef CONFIG_PAX_RANDMMAP
21083+ if (!(mm->pax_flags & MF_PAX_RANDMMAP))
21084+#endif
21085+
21086 if (addr) {
21087 addr = PAGE_ALIGN(addr);
21088 vma = find_vma(mm, addr);
21089- if (end - len >= addr &&
21090- (!vma || addr + len <= vma->vm_start))
21091+ if (end - len >= addr && check_heap_stack_gap(vma, addr, len))
21092 return addr;
21093 }
21094 if (((flags & MAP_32BIT) || test_thread_flag(TIF_IA32))
21095@@ -106,7 +109,7 @@ full_search:
21096 }
21097 return -ENOMEM;
21098 }
21099- if (!vma || addr + len <= vma->vm_start) {
21100+ if (check_heap_stack_gap(vma, addr, len)) {
21101 /*
21102 * Remember the place where we stopped the search:
21103 */
21104@@ -128,7 +131,7 @@ arch_get_unmapped_area_topdown(struct file *filp, const unsigned long addr0,
21105 {
21106 struct vm_area_struct *vma;
21107 struct mm_struct *mm = current->mm;
21108- unsigned long addr = addr0;
21109+ unsigned long base = mm->mmap_base, addr = addr0;
21110
21111 /* requested length too big for entire address space */
21112 if (len > TASK_SIZE)
21113@@ -141,13 +144,18 @@ arch_get_unmapped_area_topdown(struct file *filp, const unsigned long addr0,
21114 if (!test_thread_flag(TIF_IA32) && (flags & MAP_32BIT))
21115 goto bottomup;
21116
21117+#ifdef CONFIG_PAX_RANDMMAP
21118+ if (!(mm->pax_flags & MF_PAX_RANDMMAP))
21119+#endif
21120+
21121 /* requesting a specific address */
21122 if (addr) {
21123 addr = PAGE_ALIGN(addr);
21124- vma = find_vma(mm, addr);
21125- if (TASK_SIZE - len >= addr &&
21126- (!vma || addr + len <= vma->vm_start))
21127- return addr;
21128+ if (TASK_SIZE - len >= addr) {
21129+ vma = find_vma(mm, addr);
21130+ if (check_heap_stack_gap(vma, addr, len))
21131+ return addr;
21132+ }
21133 }
21134
21135 /* check if free_area_cache is useful for us */
21136@@ -162,7 +170,7 @@ arch_get_unmapped_area_topdown(struct file *filp, const unsigned long addr0,
21137 /* make sure it can fit in the remaining address space */
21138 if (addr > len) {
21139 vma = find_vma(mm, addr-len);
21140- if (!vma || addr <= vma->vm_start)
21141+ if (check_heap_stack_gap(vma, addr - len, len))
21142 /* remember the address as a hint for next time */
21143 return mm->free_area_cache = addr-len;
21144 }
21145@@ -179,7 +187,7 @@ arch_get_unmapped_area_topdown(struct file *filp, const unsigned long addr0,
21146 * return with success:
21147 */
21148 vma = find_vma(mm, addr);
21149- if (!vma || addr+len <= vma->vm_start)
21150+ if (check_heap_stack_gap(vma, addr, len))
21151 /* remember the address as a hint for next time */
21152 return mm->free_area_cache = addr;
21153
21154@@ -188,8 +196,8 @@ arch_get_unmapped_area_topdown(struct file *filp, const unsigned long addr0,
21155 mm->cached_hole_size = vma->vm_start - addr;
21156
21157 /* try just below the current vma->vm_start */
21158- addr = vma->vm_start-len;
21159- } while (len < vma->vm_start);
21160+ addr = skip_heap_stack_gap(vma, len);
21161+ } while (!IS_ERR_VALUE(addr));
21162
21163 bottomup:
21164 /*
21165@@ -198,13 +206,21 @@ bottomup:
21166 * can happen with large stack limits and large mmap()
21167 * allocations.
21168 */
21169+ mm->mmap_base = TASK_UNMAPPED_BASE;
21170+
21171+#ifdef CONFIG_PAX_RANDMMAP
21172+ if (mm->pax_flags & MF_PAX_RANDMMAP)
21173+ mm->mmap_base += mm->delta_mmap;
21174+#endif
21175+
21176+ mm->free_area_cache = mm->mmap_base;
21177 mm->cached_hole_size = ~0UL;
21178- mm->free_area_cache = TASK_UNMAPPED_BASE;
21179 addr = arch_get_unmapped_area(filp, addr0, len, pgoff, flags);
21180 /*
21181 * Restore the topdown base:
21182 */
21183- mm->free_area_cache = mm->mmap_base;
21184+ mm->mmap_base = base;
21185+ mm->free_area_cache = base;
21186 mm->cached_hole_size = ~0UL;
21187
21188 return addr;
21189diff --git a/arch/x86/kernel/syscall_table_32.S b/arch/x86/kernel/syscall_table_32.S
21190index 76d70a4..4c94a44 100644
21191--- a/arch/x86/kernel/syscall_table_32.S
21192+++ b/arch/x86/kernel/syscall_table_32.S
21193@@ -1,3 +1,4 @@
21194+.section .rodata,"a",@progbits
21195 ENTRY(sys_call_table)
21196 .long sys_restart_syscall /* 0 - old "setup()" system call, used for restarting */
21197 .long sys_exit
21198diff --git a/arch/x86/kernel/tboot.c b/arch/x86/kernel/tboot.c
21199index 46b8277..3349d55 100644
21200--- a/arch/x86/kernel/tboot.c
21201+++ b/arch/x86/kernel/tboot.c
21202@@ -216,7 +216,7 @@ static int tboot_setup_sleep(void)
21203
21204 void tboot_shutdown(u32 shutdown_type)
21205 {
21206- void (*shutdown)(void);
21207+ void (* __noreturn shutdown)(void);
21208
21209 if (!tboot_enabled())
21210 return;
21211@@ -238,7 +238,7 @@ void tboot_shutdown(u32 shutdown_type)
21212
21213 switch_to_tboot_pt();
21214
21215- shutdown = (void(*)(void))(unsigned long)tboot->shutdown_entry;
21216+ shutdown = (void *)tboot->shutdown_entry;
21217 shutdown();
21218
21219 /* should not reach here */
21220@@ -295,7 +295,7 @@ void tboot_sleep(u8 sleep_state, u32 pm1a_control, u32 pm1b_control)
21221 tboot_shutdown(acpi_shutdown_map[sleep_state]);
21222 }
21223
21224-static atomic_t ap_wfs_count;
21225+static atomic_unchecked_t ap_wfs_count;
21226
21227 static int tboot_wait_for_aps(int num_aps)
21228 {
21229@@ -319,9 +319,9 @@ static int __cpuinit tboot_cpu_callback(struct notifier_block *nfb,
21230 {
21231 switch (action) {
21232 case CPU_DYING:
21233- atomic_inc(&ap_wfs_count);
21234+ atomic_inc_unchecked(&ap_wfs_count);
21235 if (num_online_cpus() == 1)
21236- if (tboot_wait_for_aps(atomic_read(&ap_wfs_count)))
21237+ if (tboot_wait_for_aps(atomic_read_unchecked(&ap_wfs_count)))
21238 return NOTIFY_BAD;
21239 break;
21240 }
21241@@ -340,7 +340,7 @@ static __init int tboot_late_init(void)
21242
21243 tboot_create_trampoline();
21244
21245- atomic_set(&ap_wfs_count, 0);
21246+ atomic_set_unchecked(&ap_wfs_count, 0);
21247 register_hotcpu_notifier(&tboot_cpu_notifier);
21248 return 0;
21249 }
21250diff --git a/arch/x86/kernel/time.c b/arch/x86/kernel/time.c
21251index be25734..87fe232 100644
21252--- a/arch/x86/kernel/time.c
21253+++ b/arch/x86/kernel/time.c
21254@@ -26,17 +26,13 @@
21255 int timer_ack;
21256 #endif
21257
21258-#ifdef CONFIG_X86_64
21259-volatile unsigned long __jiffies __section_jiffies = INITIAL_JIFFIES;
21260-#endif
21261-
21262 unsigned long profile_pc(struct pt_regs *regs)
21263 {
21264 unsigned long pc = instruction_pointer(regs);
21265
21266- if (!user_mode_vm(regs) && in_lock_functions(pc)) {
21267+ if (!user_mode(regs) && in_lock_functions(pc)) {
21268 #ifdef CONFIG_FRAME_POINTER
21269- return *(unsigned long *)(regs->bp + sizeof(long));
21270+ return ktla_ktva(*(unsigned long *)(regs->bp + sizeof(long)));
21271 #else
21272 unsigned long *sp =
21273 (unsigned long *)kernel_stack_pointer(regs);
21274@@ -45,11 +41,17 @@ unsigned long profile_pc(struct pt_regs *regs)
21275 * or above a saved flags. Eflags has bits 22-31 zero,
21276 * kernel addresses don't.
21277 */
21278+
21279+#ifdef CONFIG_PAX_KERNEXEC
21280+ return ktla_ktva(sp[0]);
21281+#else
21282 if (sp[0] >> 22)
21283 return sp[0];
21284 if (sp[1] >> 22)
21285 return sp[1];
21286 #endif
21287+
21288+#endif
21289 }
21290 return pc;
21291 }
21292diff --git a/arch/x86/kernel/tls.c b/arch/x86/kernel/tls.c
21293index 6bb7b85..dd853e1 100644
21294--- a/arch/x86/kernel/tls.c
21295+++ b/arch/x86/kernel/tls.c
21296@@ -85,6 +85,11 @@ int do_set_thread_area(struct task_struct *p, int idx,
21297 if (idx < GDT_ENTRY_TLS_MIN || idx > GDT_ENTRY_TLS_MAX)
21298 return -EINVAL;
21299
21300+#ifdef CONFIG_PAX_SEGMEXEC
21301+ if ((p->mm->pax_flags & MF_PAX_SEGMEXEC) && (info.contents & MODIFY_LDT_CONTENTS_CODE))
21302+ return -EINVAL;
21303+#endif
21304+
21305 set_tls_desc(p, idx, &info, 1);
21306
21307 return 0;
21308diff --git a/arch/x86/kernel/tls.h b/arch/x86/kernel/tls.h
21309index 2f083a2..7d3fecc 100644
21310--- a/arch/x86/kernel/tls.h
21311+++ b/arch/x86/kernel/tls.h
21312@@ -16,6 +16,6 @@
21313
21314 extern user_regset_active_fn regset_tls_active;
21315 extern user_regset_get_fn regset_tls_get;
21316-extern user_regset_set_fn regset_tls_set;
21317+extern user_regset_set_fn regset_tls_set __size_overflow(4);
21318
21319 #endif /* _ARCH_X86_KERNEL_TLS_H */
21320diff --git a/arch/x86/kernel/trampoline_32.S b/arch/x86/kernel/trampoline_32.S
21321index 8508237..229b664 100644
21322--- a/arch/x86/kernel/trampoline_32.S
21323+++ b/arch/x86/kernel/trampoline_32.S
21324@@ -32,6 +32,12 @@
21325 #include <asm/segment.h>
21326 #include <asm/page_types.h>
21327
21328+#ifdef CONFIG_PAX_KERNEXEC
21329+#define ta(X) (X)
21330+#else
21331+#define ta(X) ((X) - __PAGE_OFFSET)
21332+#endif
21333+
21334 /* We can free up trampoline after bootup if cpu hotplug is not supported. */
21335 __CPUINITRODATA
21336 .code16
21337@@ -60,7 +66,7 @@ r_base = .
21338 inc %ax # protected mode (PE) bit
21339 lmsw %ax # into protected mode
21340 # flush prefetch and jump to startup_32_smp in arch/i386/kernel/head.S
21341- ljmpl $__BOOT_CS, $(startup_32_smp-__PAGE_OFFSET)
21342+ ljmpl $__BOOT_CS, $ta(startup_32_smp)
21343
21344 # These need to be in the same 64K segment as the above;
21345 # hence we don't use the boot_gdt_descr defined in head.S
21346diff --git a/arch/x86/kernel/trampoline_64.S b/arch/x86/kernel/trampoline_64.S
21347index 3af2dff..ba8aa49 100644
21348--- a/arch/x86/kernel/trampoline_64.S
21349+++ b/arch/x86/kernel/trampoline_64.S
21350@@ -91,7 +91,7 @@ startup_32:
21351 movl $__KERNEL_DS, %eax # Initialize the %ds segment register
21352 movl %eax, %ds
21353
21354- movl $X86_CR4_PAE, %eax
21355+ movl $(X86_CR4_PSE | X86_CR4_PAE | X86_CR4_PGE), %eax
21356 movl %eax, %cr4 # Enable PAE mode
21357
21358 # Setup trampoline 4 level pagetables
21359@@ -127,7 +127,7 @@ startup_64:
21360 no_longmode:
21361 hlt
21362 jmp no_longmode
21363-#include "verify_cpu_64.S"
21364+#include "verify_cpu.S"
21365
21366 # Careful these need to be in the same 64K segment as the above;
21367 tidt:
21368@@ -138,7 +138,7 @@ tidt:
21369 # so the kernel can live anywhere
21370 .balign 4
21371 tgdt:
21372- .short tgdt_end - tgdt # gdt limit
21373+ .short tgdt_end - tgdt - 1 # gdt limit
21374 .long tgdt - r_base
21375 .short 0
21376 .quad 0x00cf9b000000ffff # __KERNEL32_CS
21377diff --git a/arch/x86/kernel/traps.c b/arch/x86/kernel/traps.c
21378index 7e37dce..ec3f8e5 100644
21379--- a/arch/x86/kernel/traps.c
21380+++ b/arch/x86/kernel/traps.c
21381@@ -69,12 +69,6 @@ asmlinkage int system_call(void);
21382
21383 /* Do we ignore FPU interrupts ? */
21384 char ignore_fpu_irq;
21385-
21386-/*
21387- * The IDT has to be page-aligned to simplify the Pentium
21388- * F0 0F bug workaround.
21389- */
21390-gate_desc idt_table[NR_VECTORS] __page_aligned_data = { { { { 0, 0 } } }, };
21391 #endif
21392
21393 DECLARE_BITMAP(used_vectors, NR_VECTORS);
21394@@ -112,19 +106,19 @@ static inline void preempt_conditional_cli(struct pt_regs *regs)
21395 static inline void
21396 die_if_kernel(const char *str, struct pt_regs *regs, long err)
21397 {
21398- if (!user_mode_vm(regs))
21399+ if (!user_mode(regs))
21400 die(str, regs, err);
21401 }
21402 #endif
21403
21404 static void __kprobes
21405-do_trap(int trapnr, int signr, char *str, struct pt_regs *regs,
21406+do_trap(int trapnr, int signr, const char *str, struct pt_regs *regs,
21407 long error_code, siginfo_t *info)
21408 {
21409 struct task_struct *tsk = current;
21410
21411 #ifdef CONFIG_X86_32
21412- if (regs->flags & X86_VM_MASK) {
21413+ if (v8086_mode(regs)) {
21414 /*
21415 * traps 0, 1, 3, 4, and 5 should be forwarded to vm86.
21416 * On nmi (interrupt 2), do_trap should not be called.
21417@@ -135,7 +129,7 @@ do_trap(int trapnr, int signr, char *str, struct pt_regs *regs,
21418 }
21419 #endif
21420
21421- if (!user_mode(regs))
21422+ if (!user_mode_novm(regs))
21423 goto kernel_trap;
21424
21425 #ifdef CONFIG_X86_32
21426@@ -158,7 +152,7 @@ trap_signal:
21427 printk_ratelimit()) {
21428 printk(KERN_INFO
21429 "%s[%d] trap %s ip:%lx sp:%lx error:%lx",
21430- tsk->comm, tsk->pid, str,
21431+ tsk->comm, task_pid_nr(tsk), str,
21432 regs->ip, regs->sp, error_code);
21433 print_vma_addr(" in ", regs->ip);
21434 printk("\n");
21435@@ -175,8 +169,20 @@ kernel_trap:
21436 if (!fixup_exception(regs)) {
21437 tsk->thread.error_code = error_code;
21438 tsk->thread.trap_no = trapnr;
21439+
21440+#if defined(CONFIG_X86_32) && defined(CONFIG_PAX_KERNEXEC)
21441+ if (trapnr == 12 && ((regs->cs & 0xFFFF) == __KERNEL_CS || (regs->cs & 0xFFFF) == __KERNEXEC_KERNEL_CS))
21442+ str = "PAX: suspicious stack segment fault";
21443+#endif
21444+
21445 die(str, regs, error_code);
21446 }
21447+
21448+#ifdef CONFIG_PAX_REFCOUNT
21449+ if (trapnr == 4)
21450+ pax_report_refcount_overflow(regs);
21451+#endif
21452+
21453 return;
21454
21455 #ifdef CONFIG_X86_32
21456@@ -265,14 +271,30 @@ do_general_protection(struct pt_regs *regs, long error_code)
21457 conditional_sti(regs);
21458
21459 #ifdef CONFIG_X86_32
21460- if (regs->flags & X86_VM_MASK)
21461+ if (v8086_mode(regs))
21462 goto gp_in_vm86;
21463 #endif
21464
21465 tsk = current;
21466- if (!user_mode(regs))
21467+ if (!user_mode_novm(regs))
21468 goto gp_in_kernel;
21469
21470+#if defined(CONFIG_X86_32) && defined(CONFIG_PAX_PAGEEXEC)
21471+ if (!nx_enabled && tsk->mm && (tsk->mm->pax_flags & MF_PAX_PAGEEXEC)) {
21472+ struct mm_struct *mm = tsk->mm;
21473+ unsigned long limit;
21474+
21475+ down_write(&mm->mmap_sem);
21476+ limit = mm->context.user_cs_limit;
21477+ if (limit < TASK_SIZE) {
21478+ track_exec_limit(mm, limit, TASK_SIZE, VM_EXEC);
21479+ up_write(&mm->mmap_sem);
21480+ return;
21481+ }
21482+ up_write(&mm->mmap_sem);
21483+ }
21484+#endif
21485+
21486 tsk->thread.error_code = error_code;
21487 tsk->thread.trap_no = 13;
21488
21489@@ -305,6 +327,13 @@ gp_in_kernel:
21490 if (notify_die(DIE_GPF, "general protection fault", regs,
21491 error_code, 13, SIGSEGV) == NOTIFY_STOP)
21492 return;
21493+
21494+#if defined(CONFIG_X86_32) && defined(CONFIG_PAX_KERNEXEC)
21495+ if ((regs->cs & 0xFFFF) == __KERNEL_CS || (regs->cs & 0xFFFF) == __KERNEXEC_KERNEL_CS)
21496+ die("PAX: suspicious general protection fault", regs, error_code);
21497+ else
21498+#endif
21499+
21500 die("general protection fault", regs, error_code);
21501 }
21502
21503@@ -435,6 +464,17 @@ static notrace __kprobes void default_do_nmi(struct pt_regs *regs)
21504 dotraplinkage notrace __kprobes void
21505 do_nmi(struct pt_regs *regs, long error_code)
21506 {
21507+
21508+#if defined(CONFIG_X86_32) && defined(CONFIG_PAX_KERNEXEC)
21509+ if (!user_mode(regs)) {
21510+ unsigned long cs = regs->cs & 0xFFFF;
21511+ unsigned long ip = ktva_ktla(regs->ip);
21512+
21513+ if ((cs == __KERNEL_CS || cs == __KERNEXEC_KERNEL_CS) && ip <= (unsigned long)_etext)
21514+ regs->ip = ip;
21515+ }
21516+#endif
21517+
21518 nmi_enter();
21519
21520 inc_irq_stat(__nmi_count);
21521@@ -558,7 +598,7 @@ dotraplinkage void __kprobes do_debug(struct pt_regs *regs, long error_code)
21522 }
21523
21524 #ifdef CONFIG_X86_32
21525- if (regs->flags & X86_VM_MASK)
21526+ if (v8086_mode(regs))
21527 goto debug_vm86;
21528 #endif
21529
21530@@ -570,7 +610,7 @@ dotraplinkage void __kprobes do_debug(struct pt_regs *regs, long error_code)
21531 * kernel space (but re-enable TF when returning to user mode).
21532 */
21533 if (condition & DR_STEP) {
21534- if (!user_mode(regs))
21535+ if (!user_mode_novm(regs))
21536 goto clear_TF_reenable;
21537 }
21538
21539@@ -757,7 +797,7 @@ do_simd_coprocessor_error(struct pt_regs *regs, long error_code)
21540 * Handle strange cache flush from user space exception
21541 * in all other cases. This is undocumented behaviour.
21542 */
21543- if (regs->flags & X86_VM_MASK) {
21544+ if (v8086_mode(regs)) {
21545 handle_vm86_fault((struct kernel_vm86_regs *)regs, error_code);
21546 return;
21547 }
21548@@ -798,7 +838,7 @@ asmlinkage void __attribute__((weak)) smp_threshold_interrupt(void)
21549 void __math_state_restore(void)
21550 {
21551 struct thread_info *thread = current_thread_info();
21552- struct task_struct *tsk = thread->task;
21553+ struct task_struct *tsk = current;
21554
21555 /*
21556 * Paranoid restore. send a SIGSEGV if we fail to restore the state.
21557@@ -825,8 +865,7 @@ void __math_state_restore(void)
21558 */
21559 asmlinkage void math_state_restore(void)
21560 {
21561- struct thread_info *thread = current_thread_info();
21562- struct task_struct *tsk = thread->task;
21563+ struct task_struct *tsk = current;
21564
21565 if (!tsk_used_math(tsk)) {
21566 local_irq_enable();
21567diff --git a/arch/x86/kernel/verify_cpu.S b/arch/x86/kernel/verify_cpu.S
21568new file mode 100644
21569index 0000000..50c5edd
21570--- /dev/null
21571+++ b/arch/x86/kernel/verify_cpu.S
21572@@ -0,0 +1,140 @@
21573+/*
21574+ *
21575+ * verify_cpu.S - Code for cpu long mode and SSE verification. This
21576+ * code has been borrowed from boot/setup.S and was introduced by
21577+ * Andi Kleen.
21578+ *
21579+ * Copyright (c) 2007 Andi Kleen (ak@suse.de)
21580+ * Copyright (c) 2007 Eric Biederman (ebiederm@xmission.com)
21581+ * Copyright (c) 2007 Vivek Goyal (vgoyal@in.ibm.com)
21582+ * Copyright (c) 2010 Kees Cook (kees.cook@canonical.com)
21583+ *
21584+ * This source code is licensed under the GNU General Public License,
21585+ * Version 2. See the file COPYING for more details.
21586+ *
21587+ * This is a common code for verification whether CPU supports
21588+ * long mode and SSE or not. It is not called directly instead this
21589+ * file is included at various places and compiled in that context.
21590+ * This file is expected to run in 32bit code. Currently:
21591+ *
21592+ * arch/x86/boot/compressed/head_64.S: Boot cpu verification
21593+ * arch/x86/kernel/trampoline_64.S: secondary processor verification
21594+ * arch/x86/kernel/head_32.S: processor startup
21595+ * arch/x86/kernel/acpi/realmode/wakeup.S: 32bit processor resume
21596+ *
21597+ * verify_cpu, returns the status of longmode and SSE in register %eax.
21598+ * 0: Success 1: Failure
21599+ *
21600+ * On Intel, the XD_DISABLE flag will be cleared as a side-effect.
21601+ *
21602+ * The caller needs to check for the error code and take the action
21603+ * appropriately. Either display a message or halt.
21604+ */
21605+
21606+#include <asm/cpufeature.h>
21607+#include <asm/msr-index.h>
21608+
21609+verify_cpu:
21610+ pushfl # Save caller passed flags
21611+ pushl $0 # Kill any dangerous flags
21612+ popfl
21613+
21614+ pushfl # standard way to check for cpuid
21615+ popl %eax
21616+ movl %eax,%ebx
21617+ xorl $0x200000,%eax
21618+ pushl %eax
21619+ popfl
21620+ pushfl
21621+ popl %eax
21622+ cmpl %eax,%ebx
21623+ jz verify_cpu_no_longmode # cpu has no cpuid
21624+
21625+ movl $0x0,%eax # See if cpuid 1 is implemented
21626+ cpuid
21627+ cmpl $0x1,%eax
21628+ jb verify_cpu_no_longmode # no cpuid 1
21629+
21630+ xor %di,%di
21631+ cmpl $0x68747541,%ebx # AuthenticAMD
21632+ jnz verify_cpu_noamd
21633+ cmpl $0x69746e65,%edx
21634+ jnz verify_cpu_noamd
21635+ cmpl $0x444d4163,%ecx
21636+ jnz verify_cpu_noamd
21637+ mov $1,%di # cpu is from AMD
21638+ jmp verify_cpu_check
21639+
21640+verify_cpu_noamd:
21641+ cmpl $0x756e6547,%ebx # GenuineIntel?
21642+ jnz verify_cpu_check
21643+ cmpl $0x49656e69,%edx
21644+ jnz verify_cpu_check
21645+ cmpl $0x6c65746e,%ecx
21646+ jnz verify_cpu_check
21647+
21648+ # only call IA32_MISC_ENABLE when:
21649+ # family > 6 || (family == 6 && model >= 0xd)
21650+ movl $0x1, %eax # check CPU family and model
21651+ cpuid
21652+ movl %eax, %ecx
21653+
21654+ andl $0x0ff00f00, %eax # mask family and extended family
21655+ shrl $8, %eax
21656+ cmpl $6, %eax
21657+ ja verify_cpu_clear_xd # family > 6, ok
21658+ jb verify_cpu_check # family < 6, skip
21659+
21660+ andl $0x000f00f0, %ecx # mask model and extended model
21661+ shrl $4, %ecx
21662+ cmpl $0xd, %ecx
21663+ jb verify_cpu_check # family == 6, model < 0xd, skip
21664+
21665+verify_cpu_clear_xd:
21666+ movl $MSR_IA32_MISC_ENABLE, %ecx
21667+ rdmsr
21668+ btrl $2, %edx # clear MSR_IA32_MISC_ENABLE_XD_DISABLE
21669+ jnc verify_cpu_check # only write MSR if bit was changed
21670+ wrmsr
21671+
21672+verify_cpu_check:
21673+ movl $0x1,%eax # Does the cpu have what it takes
21674+ cpuid
21675+ andl $REQUIRED_MASK0,%edx
21676+ xorl $REQUIRED_MASK0,%edx
21677+ jnz verify_cpu_no_longmode
21678+
21679+ movl $0x80000000,%eax # See if extended cpuid is implemented
21680+ cpuid
21681+ cmpl $0x80000001,%eax
21682+ jb verify_cpu_no_longmode # no extended cpuid
21683+
21684+ movl $0x80000001,%eax # Does the cpu have what it takes
21685+ cpuid
21686+ andl $REQUIRED_MASK1,%edx
21687+ xorl $REQUIRED_MASK1,%edx
21688+ jnz verify_cpu_no_longmode
21689+
21690+verify_cpu_sse_test:
21691+ movl $1,%eax
21692+ cpuid
21693+ andl $SSE_MASK,%edx
21694+ cmpl $SSE_MASK,%edx
21695+ je verify_cpu_sse_ok
21696+ test %di,%di
21697+ jz verify_cpu_no_longmode # only try to force SSE on AMD
21698+ movl $MSR_K7_HWCR,%ecx
21699+ rdmsr
21700+ btr $15,%eax # enable SSE
21701+ wrmsr
21702+ xor %di,%di # don't loop
21703+ jmp verify_cpu_sse_test # try again
21704+
21705+verify_cpu_no_longmode:
21706+ popfl # Restore caller passed flags
21707+ movl $1,%eax
21708+ ret
21709+verify_cpu_sse_ok:
21710+ popfl # Restore caller passed flags
21711+ xorl %eax, %eax
21712+ ret
21713diff --git a/arch/x86/kernel/verify_cpu_64.S b/arch/x86/kernel/verify_cpu_64.S
21714deleted file mode 100644
21715index 45b6f8a..0000000
21716--- a/arch/x86/kernel/verify_cpu_64.S
21717+++ /dev/null
21718@@ -1,105 +0,0 @@
21719-/*
21720- *
21721- * verify_cpu.S - Code for cpu long mode and SSE verification. This
21722- * code has been borrowed from boot/setup.S and was introduced by
21723- * Andi Kleen.
21724- *
21725- * Copyright (c) 2007 Andi Kleen (ak@suse.de)
21726- * Copyright (c) 2007 Eric Biederman (ebiederm@xmission.com)
21727- * Copyright (c) 2007 Vivek Goyal (vgoyal@in.ibm.com)
21728- *
21729- * This source code is licensed under the GNU General Public License,
21730- * Version 2. See the file COPYING for more details.
21731- *
21732- * This is a common code for verification whether CPU supports
21733- * long mode and SSE or not. It is not called directly instead this
21734- * file is included at various places and compiled in that context.
21735- * Following are the current usage.
21736- *
21737- * This file is included by both 16bit and 32bit code.
21738- *
21739- * arch/x86_64/boot/setup.S : Boot cpu verification (16bit)
21740- * arch/x86_64/boot/compressed/head.S: Boot cpu verification (32bit)
21741- * arch/x86_64/kernel/trampoline.S: secondary processor verfication (16bit)
21742- * arch/x86_64/kernel/acpi/wakeup.S:Verfication at resume (16bit)
21743- *
21744- * verify_cpu, returns the status of cpu check in register %eax.
21745- * 0: Success 1: Failure
21746- *
21747- * The caller needs to check for the error code and take the action
21748- * appropriately. Either display a message or halt.
21749- */
21750-
21751-#include <asm/cpufeature.h>
21752-
21753-verify_cpu:
21754- pushfl # Save caller passed flags
21755- pushl $0 # Kill any dangerous flags
21756- popfl
21757-
21758- pushfl # standard way to check for cpuid
21759- popl %eax
21760- movl %eax,%ebx
21761- xorl $0x200000,%eax
21762- pushl %eax
21763- popfl
21764- pushfl
21765- popl %eax
21766- cmpl %eax,%ebx
21767- jz verify_cpu_no_longmode # cpu has no cpuid
21768-
21769- movl $0x0,%eax # See if cpuid 1 is implemented
21770- cpuid
21771- cmpl $0x1,%eax
21772- jb verify_cpu_no_longmode # no cpuid 1
21773-
21774- xor %di,%di
21775- cmpl $0x68747541,%ebx # AuthenticAMD
21776- jnz verify_cpu_noamd
21777- cmpl $0x69746e65,%edx
21778- jnz verify_cpu_noamd
21779- cmpl $0x444d4163,%ecx
21780- jnz verify_cpu_noamd
21781- mov $1,%di # cpu is from AMD
21782-
21783-verify_cpu_noamd:
21784- movl $0x1,%eax # Does the cpu have what it takes
21785- cpuid
21786- andl $REQUIRED_MASK0,%edx
21787- xorl $REQUIRED_MASK0,%edx
21788- jnz verify_cpu_no_longmode
21789-
21790- movl $0x80000000,%eax # See if extended cpuid is implemented
21791- cpuid
21792- cmpl $0x80000001,%eax
21793- jb verify_cpu_no_longmode # no extended cpuid
21794-
21795- movl $0x80000001,%eax # Does the cpu have what it takes
21796- cpuid
21797- andl $REQUIRED_MASK1,%edx
21798- xorl $REQUIRED_MASK1,%edx
21799- jnz verify_cpu_no_longmode
21800-
21801-verify_cpu_sse_test:
21802- movl $1,%eax
21803- cpuid
21804- andl $SSE_MASK,%edx
21805- cmpl $SSE_MASK,%edx
21806- je verify_cpu_sse_ok
21807- test %di,%di
21808- jz verify_cpu_no_longmode # only try to force SSE on AMD
21809- movl $0xc0010015,%ecx # HWCR
21810- rdmsr
21811- btr $15,%eax # enable SSE
21812- wrmsr
21813- xor %di,%di # don't loop
21814- jmp verify_cpu_sse_test # try again
21815-
21816-verify_cpu_no_longmode:
21817- popfl # Restore caller passed flags
21818- movl $1,%eax
21819- ret
21820-verify_cpu_sse_ok:
21821- popfl # Restore caller passed flags
21822- xorl %eax, %eax
21823- ret
21824diff --git a/arch/x86/kernel/vm86_32.c b/arch/x86/kernel/vm86_32.c
21825index 9c4e625..e9bb4ed 100644
21826--- a/arch/x86/kernel/vm86_32.c
21827+++ b/arch/x86/kernel/vm86_32.c
21828@@ -41,6 +41,7 @@
21829 #include <linux/ptrace.h>
21830 #include <linux/audit.h>
21831 #include <linux/stddef.h>
21832+#include <linux/grsecurity.h>
21833
21834 #include <asm/uaccess.h>
21835 #include <asm/io.h>
21836@@ -109,6 +110,9 @@ static int copy_vm86_regs_to_user(struct vm86_regs __user *user,
21837 /* convert vm86_regs to kernel_vm86_regs */
21838 static int copy_vm86_regs_from_user(struct kernel_vm86_regs *regs,
21839 const struct vm86_regs __user *user,
21840+ unsigned extra) __size_overflow(3);
21841+static int copy_vm86_regs_from_user(struct kernel_vm86_regs *regs,
21842+ const struct vm86_regs __user *user,
21843 unsigned extra)
21844 {
21845 int ret = 0;
21846@@ -148,7 +152,7 @@ struct pt_regs *save_v86_state(struct kernel_vm86_regs *regs)
21847 do_exit(SIGSEGV);
21848 }
21849
21850- tss = &per_cpu(init_tss, get_cpu());
21851+ tss = init_tss + get_cpu();
21852 current->thread.sp0 = current->thread.saved_sp0;
21853 current->thread.sysenter_cs = __KERNEL_CS;
21854 load_sp0(tss, &current->thread);
21855@@ -208,6 +212,13 @@ int sys_vm86old(struct pt_regs *regs)
21856 struct task_struct *tsk;
21857 int tmp, ret = -EPERM;
21858
21859+#ifdef CONFIG_GRKERNSEC_VM86
21860+ if (!capable(CAP_SYS_RAWIO)) {
21861+ gr_handle_vm86();
21862+ goto out;
21863+ }
21864+#endif
21865+
21866 tsk = current;
21867 if (tsk->thread.saved_sp0)
21868 goto out;
21869@@ -238,6 +249,14 @@ int sys_vm86(struct pt_regs *regs)
21870 int tmp, ret;
21871 struct vm86plus_struct __user *v86;
21872
21873+#ifdef CONFIG_GRKERNSEC_VM86
21874+ if (!capable(CAP_SYS_RAWIO)) {
21875+ gr_handle_vm86();
21876+ ret = -EPERM;
21877+ goto out;
21878+ }
21879+#endif
21880+
21881 tsk = current;
21882 switch (regs->bx) {
21883 case VM86_REQUEST_IRQ:
21884@@ -324,7 +343,7 @@ static void do_sys_vm86(struct kernel_vm86_struct *info, struct task_struct *tsk
21885 tsk->thread.saved_fs = info->regs32->fs;
21886 tsk->thread.saved_gs = get_user_gs(info->regs32);
21887
21888- tss = &per_cpu(init_tss, get_cpu());
21889+ tss = init_tss + get_cpu();
21890 tsk->thread.sp0 = (unsigned long) &info->VM86_TSS_ESP0;
21891 if (cpu_has_sep)
21892 tsk->thread.sysenter_cs = 0;
21893@@ -529,7 +548,7 @@ static void do_int(struct kernel_vm86_regs *regs, int i,
21894 goto cannot_handle;
21895 if (i == 0x21 && is_revectored(AH(regs), &KVM86->int21_revectored))
21896 goto cannot_handle;
21897- intr_ptr = (unsigned long __user *) (i << 2);
21898+ intr_ptr = (__force unsigned long __user *) (i << 2);
21899 if (get_user(segoffs, intr_ptr))
21900 goto cannot_handle;
21901 if ((segoffs >> 16) == BIOSSEG)
21902diff --git a/arch/x86/kernel/vmi_32.c b/arch/x86/kernel/vmi_32.c
21903index d430e4c..831f817 100644
21904--- a/arch/x86/kernel/vmi_32.c
21905+++ b/arch/x86/kernel/vmi_32.c
21906@@ -44,12 +44,17 @@ typedef u32 __attribute__((regparm(1))) (VROMFUNC)(void);
21907 typedef u64 __attribute__((regparm(2))) (VROMLONGFUNC)(int);
21908
21909 #define call_vrom_func(rom,func) \
21910- (((VROMFUNC *)(rom->func))())
21911+ (((VROMFUNC *)(ktva_ktla(rom.func)))())
21912
21913 #define call_vrom_long_func(rom,func,arg) \
21914- (((VROMLONGFUNC *)(rom->func)) (arg))
21915+({\
21916+ u64 __reloc = ((VROMLONGFUNC *)(ktva_ktla(rom.func))) (arg);\
21917+ struct vmi_relocation_info *const __rel = (struct vmi_relocation_info *)&__reloc;\
21918+ __rel->eip = (unsigned char *)ktva_ktla((unsigned long)__rel->eip);\
21919+ __reloc;\
21920+})
21921
21922-static struct vrom_header *vmi_rom;
21923+static struct vrom_header vmi_rom __attribute((__section__(".vmi.rom"), __aligned__(PAGE_SIZE)));
21924 static int disable_pge;
21925 static int disable_pse;
21926 static int disable_sep;
21927@@ -76,10 +81,10 @@ static struct {
21928 void (*set_initial_ap_state)(int, int);
21929 void (*halt)(void);
21930 void (*set_lazy_mode)(int mode);
21931-} vmi_ops;
21932+} __no_const vmi_ops __read_only;
21933
21934 /* Cached VMI operations */
21935-struct vmi_timer_ops vmi_timer_ops;
21936+struct vmi_timer_ops vmi_timer_ops __read_only;
21937
21938 /*
21939 * VMI patching routines.
21940@@ -94,7 +99,7 @@ struct vmi_timer_ops vmi_timer_ops;
21941 static inline void patch_offset(void *insnbuf,
21942 unsigned long ip, unsigned long dest)
21943 {
21944- *(unsigned long *)(insnbuf+1) = dest-ip-5;
21945+ *(unsigned long *)(insnbuf+1) = dest-ip-5;
21946 }
21947
21948 static unsigned patch_internal(int call, unsigned len, void *insnbuf,
21949@@ -102,6 +107,7 @@ static unsigned patch_internal(int call, unsigned len, void *insnbuf,
21950 {
21951 u64 reloc;
21952 struct vmi_relocation_info *const rel = (struct vmi_relocation_info *)&reloc;
21953+
21954 reloc = call_vrom_long_func(vmi_rom, get_reloc, call);
21955 switch(rel->type) {
21956 case VMI_RELOCATION_CALL_REL:
21957@@ -404,13 +410,13 @@ static void vmi_set_pud(pud_t *pudp, pud_t pudval)
21958
21959 static void vmi_pte_clear(struct mm_struct *mm, unsigned long addr, pte_t *ptep)
21960 {
21961- const pte_t pte = { .pte = 0 };
21962+ const pte_t pte = __pte(0ULL);
21963 vmi_ops.set_pte(pte, ptep, vmi_flags_addr(mm, addr, VMI_PAGE_PT, 0));
21964 }
21965
21966 static void vmi_pmd_clear(pmd_t *pmd)
21967 {
21968- const pte_t pte = { .pte = 0 };
21969+ const pte_t pte = __pte(0ULL);
21970 vmi_ops.set_pte(pte, (pte_t *)pmd, VMI_PAGE_PD);
21971 }
21972 #endif
21973@@ -438,10 +444,10 @@ vmi_startup_ipi_hook(int phys_apicid, unsigned long start_eip,
21974 ap.ss = __KERNEL_DS;
21975 ap.esp = (unsigned long) start_esp;
21976
21977- ap.ds = __USER_DS;
21978- ap.es = __USER_DS;
21979+ ap.ds = __KERNEL_DS;
21980+ ap.es = __KERNEL_DS;
21981 ap.fs = __KERNEL_PERCPU;
21982- ap.gs = __KERNEL_STACK_CANARY;
21983+ savesegment(gs, ap.gs);
21984
21985 ap.eflags = 0;
21986
21987@@ -486,6 +492,18 @@ static void vmi_leave_lazy_mmu(void)
21988 paravirt_leave_lazy_mmu();
21989 }
21990
21991+#ifdef CONFIG_PAX_KERNEXEC
21992+static unsigned long vmi_pax_open_kernel(void)
21993+{
21994+ return 0;
21995+}
21996+
21997+static unsigned long vmi_pax_close_kernel(void)
21998+{
21999+ return 0;
22000+}
22001+#endif
22002+
22003 static inline int __init check_vmi_rom(struct vrom_header *rom)
22004 {
22005 struct pci_header *pci;
22006@@ -498,6 +516,10 @@ static inline int __init check_vmi_rom(struct vrom_header *rom)
22007 return 0;
22008 if (rom->vrom_signature != VMI_SIGNATURE)
22009 return 0;
22010+ if (rom->rom_length * 512 > sizeof(*rom)) {
22011+ printk(KERN_WARNING "PAX: VMI: ROM size too big: %x\n", rom->rom_length * 512);
22012+ return 0;
22013+ }
22014 if (rom->api_version_maj != VMI_API_REV_MAJOR ||
22015 rom->api_version_min+1 < VMI_API_REV_MINOR+1) {
22016 printk(KERN_WARNING "VMI: Found mismatched rom version %d.%d\n",
22017@@ -562,7 +584,7 @@ static inline int __init probe_vmi_rom(void)
22018 struct vrom_header *romstart;
22019 romstart = (struct vrom_header *)isa_bus_to_virt(base);
22020 if (check_vmi_rom(romstart)) {
22021- vmi_rom = romstart;
22022+ vmi_rom = *romstart;
22023 return 1;
22024 }
22025 }
22026@@ -836,6 +858,11 @@ static inline int __init activate_vmi(void)
22027
22028 para_fill(pv_irq_ops.safe_halt, Halt);
22029
22030+#ifdef CONFIG_PAX_KERNEXEC
22031+ pv_mmu_ops.pax_open_kernel = vmi_pax_open_kernel;
22032+ pv_mmu_ops.pax_close_kernel = vmi_pax_close_kernel;
22033+#endif
22034+
22035 /*
22036 * Alternative instruction rewriting doesn't happen soon enough
22037 * to convert VMI_IRET to a call instead of a jump; so we have
22038@@ -853,16 +880,16 @@ static inline int __init activate_vmi(void)
22039
22040 void __init vmi_init(void)
22041 {
22042- if (!vmi_rom)
22043+ if (!vmi_rom.rom_signature)
22044 probe_vmi_rom();
22045 else
22046- check_vmi_rom(vmi_rom);
22047+ check_vmi_rom(&vmi_rom);
22048
22049 /* In case probing for or validating the ROM failed, basil */
22050- if (!vmi_rom)
22051+ if (!vmi_rom.rom_signature)
22052 return;
22053
22054- reserve_top_address(-vmi_rom->virtual_top);
22055+ reserve_top_address(-vmi_rom.virtual_top);
22056
22057 #ifdef CONFIG_X86_IO_APIC
22058 /* This is virtual hardware; timer routing is wired correctly */
22059@@ -874,7 +901,7 @@ void __init vmi_activate(void)
22060 {
22061 unsigned long flags;
22062
22063- if (!vmi_rom)
22064+ if (!vmi_rom.rom_signature)
22065 return;
22066
22067 local_irq_save(flags);
22068diff --git a/arch/x86/kernel/vmlinux.lds.S b/arch/x86/kernel/vmlinux.lds.S
22069index 3c68fe2..12c8280 100644
22070--- a/arch/x86/kernel/vmlinux.lds.S
22071+++ b/arch/x86/kernel/vmlinux.lds.S
22072@@ -26,6 +26,13 @@
22073 #include <asm/page_types.h>
22074 #include <asm/cache.h>
22075 #include <asm/boot.h>
22076+#include <asm/segment.h>
22077+
22078+#if defined(CONFIG_X86_32) && defined(CONFIG_PAX_KERNEXEC)
22079+#define __KERNEL_TEXT_OFFSET (LOAD_OFFSET + ____LOAD_PHYSICAL_ADDR)
22080+#else
22081+#define __KERNEL_TEXT_OFFSET 0
22082+#endif
22083
22084 #undef i386 /* in case the preprocessor is a 32bit one */
22085
22086@@ -34,40 +41,53 @@ OUTPUT_FORMAT(CONFIG_OUTPUT_FORMAT, CONFIG_OUTPUT_FORMAT, CONFIG_OUTPUT_FORMAT)
22087 #ifdef CONFIG_X86_32
22088 OUTPUT_ARCH(i386)
22089 ENTRY(phys_startup_32)
22090-jiffies = jiffies_64;
22091 #else
22092 OUTPUT_ARCH(i386:x86-64)
22093 ENTRY(phys_startup_64)
22094-jiffies_64 = jiffies;
22095 #endif
22096
22097 PHDRS {
22098 text PT_LOAD FLAGS(5); /* R_E */
22099- data PT_LOAD FLAGS(7); /* RWE */
22100+#ifdef CONFIG_X86_32
22101+ module PT_LOAD FLAGS(5); /* R_E */
22102+#endif
22103+#ifdef CONFIG_XEN
22104+ rodata PT_LOAD FLAGS(5); /* R_E */
22105+#else
22106+ rodata PT_LOAD FLAGS(4); /* R__ */
22107+#endif
22108+ data PT_LOAD FLAGS(6); /* RW_ */
22109 #ifdef CONFIG_X86_64
22110 user PT_LOAD FLAGS(5); /* R_E */
22111+#endif
22112+ init.begin PT_LOAD FLAGS(6); /* RW_ */
22113 #ifdef CONFIG_SMP
22114 percpu PT_LOAD FLAGS(6); /* RW_ */
22115 #endif
22116+ text.init PT_LOAD FLAGS(5); /* R_E */
22117+ text.exit PT_LOAD FLAGS(5); /* R_E */
22118 init PT_LOAD FLAGS(7); /* RWE */
22119-#endif
22120 note PT_NOTE FLAGS(0); /* ___ */
22121 }
22122
22123 SECTIONS
22124 {
22125 #ifdef CONFIG_X86_32
22126- . = LOAD_OFFSET + LOAD_PHYSICAL_ADDR;
22127- phys_startup_32 = startup_32 - LOAD_OFFSET;
22128+ . = LOAD_OFFSET + ____LOAD_PHYSICAL_ADDR;
22129 #else
22130- . = __START_KERNEL;
22131- phys_startup_64 = startup_64 - LOAD_OFFSET;
22132+ . = __START_KERNEL;
22133 #endif
22134
22135 /* Text and read-only data */
22136- .text : AT(ADDR(.text) - LOAD_OFFSET) {
22137- _text = .;
22138+ .text (. - __KERNEL_TEXT_OFFSET): AT(ADDR(.text) - LOAD_OFFSET + __KERNEL_TEXT_OFFSET) {
22139 /* bootstrapping code */
22140+#ifdef CONFIG_X86_32
22141+ phys_startup_32 = startup_32 - LOAD_OFFSET + __KERNEL_TEXT_OFFSET;
22142+#else
22143+ phys_startup_64 = startup_64 - LOAD_OFFSET + __KERNEL_TEXT_OFFSET;
22144+#endif
22145+ __LOAD_PHYSICAL_ADDR = . - LOAD_OFFSET + __KERNEL_TEXT_OFFSET;
22146+ _text = .;
22147 HEAD_TEXT
22148 #ifdef CONFIG_X86_32
22149 . = ALIGN(PAGE_SIZE);
22150@@ -82,28 +102,71 @@ SECTIONS
22151 IRQENTRY_TEXT
22152 *(.fixup)
22153 *(.gnu.warning)
22154- /* End of text section */
22155- _etext = .;
22156 } :text = 0x9090
22157
22158- NOTES :text :note
22159+ . += __KERNEL_TEXT_OFFSET;
22160
22161- EXCEPTION_TABLE(16) :text = 0x9090
22162+#ifdef CONFIG_X86_32
22163+ . = ALIGN(PAGE_SIZE);
22164+ .vmi.rom : AT(ADDR(.vmi.rom) - LOAD_OFFSET) {
22165+ *(.vmi.rom)
22166+ } :module
22167+
22168+ . = ALIGN(PAGE_SIZE);
22169+ .module.text : AT(ADDR(.module.text) - LOAD_OFFSET) {
22170+
22171+#if defined(CONFIG_PAX_KERNEXEC) && defined(CONFIG_MODULES)
22172+ MODULES_EXEC_VADDR = .;
22173+ BYTE(0)
22174+ . += (CONFIG_PAX_KERNEXEC_MODULE_TEXT * 1024 * 1024);
22175+ . = ALIGN(HPAGE_SIZE);
22176+ MODULES_EXEC_END = . - 1;
22177+#endif
22178+
22179+ } :module
22180+#endif
22181+
22182+ .text.end : AT(ADDR(.text.end) - LOAD_OFFSET) {
22183+ /* End of text section */
22184+ _etext = . - __KERNEL_TEXT_OFFSET;
22185+ }
22186+
22187+#ifdef CONFIG_X86_32
22188+ . = ALIGN(PAGE_SIZE);
22189+ .rodata.page_aligned : AT(ADDR(.rodata.page_aligned) - LOAD_OFFSET) {
22190+ *(.idt)
22191+ . = ALIGN(PAGE_SIZE);
22192+ *(.empty_zero_page)
22193+ *(.swapper_pg_fixmap)
22194+ *(.swapper_pg_pmd)
22195+ *(.swapper_pg_dir)
22196+ *(.trampoline_pg_dir)
22197+ } :rodata
22198+#endif
22199+
22200+ . = ALIGN(PAGE_SIZE);
22201+ NOTES :rodata :note
22202+
22203+ EXCEPTION_TABLE(16) :rodata
22204
22205 RO_DATA(PAGE_SIZE)
22206
22207 /* Data */
22208 .data : AT(ADDR(.data) - LOAD_OFFSET) {
22209+
22210+#ifdef CONFIG_PAX_KERNEXEC
22211+ . = ALIGN(HPAGE_SIZE);
22212+#else
22213+ . = ALIGN(PAGE_SIZE);
22214+#endif
22215+
22216 /* Start of data section */
22217 _sdata = .;
22218
22219 /* init_task */
22220 INIT_TASK_DATA(THREAD_SIZE)
22221
22222-#ifdef CONFIG_X86_32
22223- /* 32 bit has nosave before _edata */
22224 NOSAVE_DATA
22225-#endif
22226
22227 PAGE_ALIGNED_DATA(PAGE_SIZE)
22228
22229@@ -112,6 +175,8 @@ SECTIONS
22230 DATA_DATA
22231 CONSTRUCTORS
22232
22233+ jiffies = jiffies_64;
22234+
22235 /* rarely changed data like cpu maps */
22236 READ_MOSTLY_DATA(CONFIG_X86_INTERNODE_CACHE_BYTES)
22237
22238@@ -166,12 +231,6 @@ SECTIONS
22239 }
22240 vgetcpu_mode = VVIRT(.vgetcpu_mode);
22241
22242- . = ALIGN(CONFIG_X86_L1_CACHE_BYTES);
22243- .jiffies : AT(VLOAD(.jiffies)) {
22244- *(.jiffies)
22245- }
22246- jiffies = VVIRT(.jiffies);
22247-
22248 .vsyscall_3 ADDR(.vsyscall_0) + 3072: AT(VLOAD(.vsyscall_3)) {
22249 *(.vsyscall_3)
22250 }
22251@@ -187,12 +246,19 @@ SECTIONS
22252 #endif /* CONFIG_X86_64 */
22253
22254 /* Init code and data - will be freed after init */
22255- . = ALIGN(PAGE_SIZE);
22256 .init.begin : AT(ADDR(.init.begin) - LOAD_OFFSET) {
22257+ BYTE(0)
22258+
22259+#ifdef CONFIG_PAX_KERNEXEC
22260+ . = ALIGN(HPAGE_SIZE);
22261+#else
22262+ . = ALIGN(PAGE_SIZE);
22263+#endif
22264+
22265 __init_begin = .; /* paired with __init_end */
22266- }
22267+ } :init.begin
22268
22269-#if defined(CONFIG_X86_64) && defined(CONFIG_SMP)
22270+#ifdef CONFIG_SMP
22271 /*
22272 * percpu offsets are zero-based on SMP. PERCPU_VADDR() changes the
22273 * output PHDR, so the next output section - .init.text - should
22274@@ -201,12 +267,27 @@ SECTIONS
22275 PERCPU_VADDR(0, :percpu)
22276 #endif
22277
22278- INIT_TEXT_SECTION(PAGE_SIZE)
22279-#ifdef CONFIG_X86_64
22280- :init
22281-#endif
22282+ . = ALIGN(PAGE_SIZE);
22283+ init_begin = .;
22284+ .init.text (. - __KERNEL_TEXT_OFFSET): AT(init_begin - LOAD_OFFSET) {
22285+ VMLINUX_SYMBOL(_sinittext) = .;
22286+ INIT_TEXT
22287+ VMLINUX_SYMBOL(_einittext) = .;
22288+ . = ALIGN(PAGE_SIZE);
22289+ } :text.init
22290
22291- INIT_DATA_SECTION(16)
22292+ /*
22293+ * .exit.text is discard at runtime, not link time, to deal with
22294+ * references from .altinstructions and .eh_frame
22295+ */
22296+ .exit.text : AT(ADDR(.exit.text) - LOAD_OFFSET + __KERNEL_TEXT_OFFSET) {
22297+ EXIT_TEXT
22298+ . = ALIGN(16);
22299+ } :text.exit
22300+ . = init_begin + SIZEOF(.init.text) + SIZEOF(.exit.text);
22301+
22302+ . = ALIGN(PAGE_SIZE);
22303+ INIT_DATA_SECTION(16) :init
22304
22305 .x86_cpu_dev.init : AT(ADDR(.x86_cpu_dev.init) - LOAD_OFFSET) {
22306 __x86_cpu_dev_start = .;
22307@@ -232,19 +313,11 @@ SECTIONS
22308 *(.altinstr_replacement)
22309 }
22310
22311- /*
22312- * .exit.text is discard at runtime, not link time, to deal with
22313- * references from .altinstructions and .eh_frame
22314- */
22315- .exit.text : AT(ADDR(.exit.text) - LOAD_OFFSET) {
22316- EXIT_TEXT
22317- }
22318-
22319 .exit.data : AT(ADDR(.exit.data) - LOAD_OFFSET) {
22320 EXIT_DATA
22321 }
22322
22323-#if !defined(CONFIG_X86_64) || !defined(CONFIG_SMP)
22324+#ifndef CONFIG_SMP
22325 PERCPU(PAGE_SIZE)
22326 #endif
22327
22328@@ -267,12 +340,6 @@ SECTIONS
22329 . = ALIGN(PAGE_SIZE);
22330 }
22331
22332-#ifdef CONFIG_X86_64
22333- .data_nosave : AT(ADDR(.data_nosave) - LOAD_OFFSET) {
22334- NOSAVE_DATA
22335- }
22336-#endif
22337-
22338 /* BSS */
22339 . = ALIGN(PAGE_SIZE);
22340 .bss : AT(ADDR(.bss) - LOAD_OFFSET) {
22341@@ -288,6 +355,7 @@ SECTIONS
22342 __brk_base = .;
22343 . += 64 * 1024; /* 64k alignment slop space */
22344 *(.brk_reservation) /* areas brk users have reserved */
22345+ . = ALIGN(HPAGE_SIZE);
22346 __brk_limit = .;
22347 }
22348
22349@@ -316,13 +384,12 @@ SECTIONS
22350 * for the boot processor.
22351 */
22352 #define INIT_PER_CPU(x) init_per_cpu__##x = per_cpu__##x + __per_cpu_load
22353-INIT_PER_CPU(gdt_page);
22354 INIT_PER_CPU(irq_stack_union);
22355
22356 /*
22357 * Build-time check on the image size:
22358 */
22359-. = ASSERT((_end - _text <= KERNEL_IMAGE_SIZE),
22360+. = ASSERT((_end - _text - __KERNEL_TEXT_OFFSET <= KERNEL_IMAGE_SIZE),
22361 "kernel image bigger than KERNEL_IMAGE_SIZE");
22362
22363 #ifdef CONFIG_SMP
22364diff --git a/arch/x86/kernel/vsyscall_64.c b/arch/x86/kernel/vsyscall_64.c
22365index 62f39d7..3bc46a1 100644
22366--- a/arch/x86/kernel/vsyscall_64.c
22367+++ b/arch/x86/kernel/vsyscall_64.c
22368@@ -80,6 +80,7 @@ void update_vsyscall(struct timespec *wall_time, struct clocksource *clock,
22369
22370 write_seqlock_irqsave(&vsyscall_gtod_data.lock, flags);
22371 /* copy vsyscall data */
22372+ strlcpy(vsyscall_gtod_data.clock.name, clock->name, sizeof vsyscall_gtod_data.clock.name);
22373 vsyscall_gtod_data.clock.vread = clock->vread;
22374 vsyscall_gtod_data.clock.cycle_last = clock->cycle_last;
22375 vsyscall_gtod_data.clock.mask = clock->mask;
22376@@ -203,7 +204,7 @@ vgetcpu(unsigned *cpu, unsigned *node, struct getcpu_cache *tcache)
22377 We do this here because otherwise user space would do it on
22378 its own in a likely inferior way (no access to jiffies).
22379 If you don't like it pass NULL. */
22380- if (tcache && tcache->blob[0] == (j = __jiffies)) {
22381+ if (tcache && tcache->blob[0] == (j = jiffies)) {
22382 p = tcache->blob[1];
22383 } else if (__vgetcpu_mode == VGETCPU_RDTSCP) {
22384 /* Load per CPU data from RDTSCP */
22385diff --git a/arch/x86/kernel/x8664_ksyms_64.c b/arch/x86/kernel/x8664_ksyms_64.c
22386index 3909e3b..5433a97 100644
22387--- a/arch/x86/kernel/x8664_ksyms_64.c
22388+++ b/arch/x86/kernel/x8664_ksyms_64.c
22389@@ -30,8 +30,6 @@ EXPORT_SYMBOL(__put_user_8);
22390
22391 EXPORT_SYMBOL(copy_user_generic);
22392 EXPORT_SYMBOL(__copy_user_nocache);
22393-EXPORT_SYMBOL(copy_from_user);
22394-EXPORT_SYMBOL(copy_to_user);
22395 EXPORT_SYMBOL(__copy_from_user_inatomic);
22396
22397 EXPORT_SYMBOL(copy_page);
22398diff --git a/arch/x86/kernel/xsave.c b/arch/x86/kernel/xsave.c
22399index c5ee17e..d63218f 100644
22400--- a/arch/x86/kernel/xsave.c
22401+++ b/arch/x86/kernel/xsave.c
22402@@ -54,7 +54,7 @@ int check_for_xstate(struct i387_fxsave_struct __user *buf,
22403 fx_sw_user->xstate_size > fx_sw_user->extended_size)
22404 return -1;
22405
22406- err = __get_user(magic2, (__u32 *) (((void *)fpstate) +
22407+ err = __get_user(magic2, (__u32 __user *) (((void __user *)fpstate) +
22408 fx_sw_user->extended_size -
22409 FP_XSTATE_MAGIC2_SIZE));
22410 /*
22411@@ -196,7 +196,7 @@ fx_only:
22412 * the other extended state.
22413 */
22414 xrstor_state(init_xstate_buf, pcntxt_mask & ~XSTATE_FPSSE);
22415- return fxrstor_checking((__force struct i387_fxsave_struct *)buf);
22416+ return fxrstor_checking((struct i387_fxsave_struct __force_kernel *)buf);
22417 }
22418
22419 /*
22420@@ -228,7 +228,7 @@ int restore_i387_xstate(void __user *buf)
22421 if (task_thread_info(tsk)->status & TS_XSAVE)
22422 err = restore_user_xstate(buf);
22423 else
22424- err = fxrstor_checking((__force struct i387_fxsave_struct *)
22425+ err = fxrstor_checking((struct i387_fxsave_struct __user *)
22426 buf);
22427 if (unlikely(err)) {
22428 /*
22429diff --git a/arch/x86/kvm/emulate.c b/arch/x86/kvm/emulate.c
22430index 1350e43..a94b011 100644
22431--- a/arch/x86/kvm/emulate.c
22432+++ b/arch/x86/kvm/emulate.c
22433@@ -81,8 +81,8 @@
22434 #define Src2CL (1<<29)
22435 #define Src2ImmByte (2<<29)
22436 #define Src2One (3<<29)
22437-#define Src2Imm16 (4<<29)
22438-#define Src2Mask (7<<29)
22439+#define Src2Imm16 (4U<<29)
22440+#define Src2Mask (7U<<29)
22441
22442 enum {
22443 Group1_80, Group1_81, Group1_82, Group1_83,
22444@@ -411,6 +411,7 @@ static u32 group2_table[] = {
22445
22446 #define ____emulate_2op(_op, _src, _dst, _eflags, _x, _y, _suffix) \
22447 do { \
22448+ unsigned long _tmp; \
22449 __asm__ __volatile__ ( \
22450 _PRE_EFLAGS("0", "4", "2") \
22451 _op _suffix " %"_x"3,%1; " \
22452@@ -424,8 +425,6 @@ static u32 group2_table[] = {
22453 /* Raw emulation: instruction has two explicit operands. */
22454 #define __emulate_2op_nobyte(_op,_src,_dst,_eflags,_wx,_wy,_lx,_ly,_qx,_qy) \
22455 do { \
22456- unsigned long _tmp; \
22457- \
22458 switch ((_dst).bytes) { \
22459 case 2: \
22460 ____emulate_2op(_op,_src,_dst,_eflags,_wx,_wy,"w"); \
22461@@ -441,7 +440,6 @@ static u32 group2_table[] = {
22462
22463 #define __emulate_2op(_op,_src,_dst,_eflags,_bx,_by,_wx,_wy,_lx,_ly,_qx,_qy) \
22464 do { \
22465- unsigned long _tmp; \
22466 switch ((_dst).bytes) { \
22467 case 1: \
22468 ____emulate_2op(_op,_src,_dst,_eflags,_bx,_by,"b"); \
22469diff --git a/arch/x86/kvm/lapic.c b/arch/x86/kvm/lapic.c
22470index 8dfeaaa..4daa395 100644
22471--- a/arch/x86/kvm/lapic.c
22472+++ b/arch/x86/kvm/lapic.c
22473@@ -52,7 +52,7 @@
22474 #define APIC_BUS_CYCLE_NS 1
22475
22476 /* #define apic_debug(fmt,arg...) printk(KERN_WARNING fmt,##arg) */
22477-#define apic_debug(fmt, arg...)
22478+#define apic_debug(fmt, arg...) do {} while (0)
22479
22480 #define APIC_LVT_NUM 6
22481 /* 14 is the version for Xeon and Pentium 8.4.8*/
22482diff --git a/arch/x86/kvm/paging_tmpl.h b/arch/x86/kvm/paging_tmpl.h
22483index 3bc2707..dd157e2 100644
22484--- a/arch/x86/kvm/paging_tmpl.h
22485+++ b/arch/x86/kvm/paging_tmpl.h
22486@@ -416,6 +416,8 @@ static int FNAME(page_fault)(struct kvm_vcpu *vcpu, gva_t addr,
22487 int level = PT_PAGE_TABLE_LEVEL;
22488 unsigned long mmu_seq;
22489
22490+ pax_track_stack();
22491+
22492 pgprintk("%s: addr %lx err %x\n", __func__, addr, error_code);
22493 kvm_mmu_audit(vcpu, "pre page fault");
22494
22495@@ -461,6 +463,7 @@ static int FNAME(page_fault)(struct kvm_vcpu *vcpu, gva_t addr,
22496 kvm_mmu_free_some_pages(vcpu);
22497 sptep = FNAME(fetch)(vcpu, addr, &walker, user_fault, write_fault,
22498 level, &write_pt, pfn);
22499+ (void)sptep;
22500 pgprintk("%s: shadow pte %p %llx ptwrite %d\n", __func__,
22501 sptep, *sptep, write_pt);
22502
22503diff --git a/arch/x86/kvm/svm.c b/arch/x86/kvm/svm.c
22504index 7c6e63e..1b7dac1 100644
22505--- a/arch/x86/kvm/svm.c
22506+++ b/arch/x86/kvm/svm.c
22507@@ -2240,6 +2240,7 @@ static int rdmsr_interception(struct vcpu_svm *svm, struct kvm_run *kvm_run)
22508 return 1;
22509 }
22510
22511+static int svm_set_msr(struct kvm_vcpu *vcpu, unsigned ecx, u64 data) __size_overflow(3);
22512 static int svm_set_msr(struct kvm_vcpu *vcpu, unsigned ecx, u64 data)
22513 {
22514 struct vcpu_svm *svm = to_svm(vcpu);
22515@@ -2486,7 +2487,11 @@ static void reload_tss(struct kvm_vcpu *vcpu)
22516 int cpu = raw_smp_processor_id();
22517
22518 struct svm_cpu_data *svm_data = per_cpu(svm_data, cpu);
22519+
22520+ pax_open_kernel();
22521 svm_data->tss_desc->type = 9; /* available 32/64-bit TSS */
22522+ pax_close_kernel();
22523+
22524 load_TR_desc();
22525 }
22526
22527@@ -2947,7 +2952,7 @@ static bool svm_gb_page_enable(void)
22528 return true;
22529 }
22530
22531-static struct kvm_x86_ops svm_x86_ops = {
22532+static const struct kvm_x86_ops svm_x86_ops = {
22533 .cpu_has_kvm_support = has_svm,
22534 .disabled_by_bios = is_disabled,
22535 .hardware_setup = svm_hardware_setup,
22536diff --git a/arch/x86/kvm/vmx.c b/arch/x86/kvm/vmx.c
22537index e6d925f..8cdd779 100644
22538--- a/arch/x86/kvm/vmx.c
22539+++ b/arch/x86/kvm/vmx.c
22540@@ -570,7 +570,11 @@ static void reload_tss(void)
22541
22542 kvm_get_gdt(&gdt);
22543 descs = (void *)gdt.base;
22544+
22545+ pax_open_kernel();
22546 descs[GDT_ENTRY_TSS].type = 9; /* available TSS */
22547+ pax_close_kernel();
22548+
22549 load_TR_desc();
22550 }
22551
22552@@ -1035,6 +1039,7 @@ static int vmx_get_msr(struct kvm_vcpu *vcpu, u32 msr_index, u64 *pdata)
22553 * Returns 0 on success, non-0 otherwise.
22554 * Assumes vcpu_load() was already called.
22555 */
22556+static int vmx_set_msr(struct kvm_vcpu *vcpu, u32 msr_index, u64 data) __size_overflow(3);
22557 static int vmx_set_msr(struct kvm_vcpu *vcpu, u32 msr_index, u64 data)
22558 {
22559 struct vcpu_vmx *vmx = to_vmx(vcpu);
22560@@ -1410,8 +1415,11 @@ static __init int hardware_setup(void)
22561 if (!cpu_has_vmx_flexpriority())
22562 flexpriority_enabled = 0;
22563
22564- if (!cpu_has_vmx_tpr_shadow())
22565- kvm_x86_ops->update_cr8_intercept = NULL;
22566+ if (!cpu_has_vmx_tpr_shadow()) {
22567+ pax_open_kernel();
22568+ *(void **)&kvm_x86_ops->update_cr8_intercept = NULL;
22569+ pax_close_kernel();
22570+ }
22571
22572 if (enable_ept && !cpu_has_vmx_ept_2m_page())
22573 kvm_disable_largepages();
22574@@ -2362,7 +2370,7 @@ static int vmx_vcpu_setup(struct vcpu_vmx *vmx)
22575 vmcs_writel(HOST_IDTR_BASE, dt.base); /* 22.2.4 */
22576
22577 asm("mov $.Lkvm_vmx_return, %0" : "=r"(kvm_vmx_return));
22578- vmcs_writel(HOST_RIP, kvm_vmx_return); /* 22.2.5 */
22579+ vmcs_writel(HOST_RIP, ktla_ktva(kvm_vmx_return)); /* 22.2.5 */
22580 vmcs_write32(VM_EXIT_MSR_STORE_COUNT, 0);
22581 vmcs_write32(VM_EXIT_MSR_LOAD_COUNT, 0);
22582 vmcs_write32(VM_ENTRY_MSR_LOAD_COUNT, 0);
22583@@ -3718,6 +3726,12 @@ static void vmx_vcpu_run(struct kvm_vcpu *vcpu, struct kvm_run *kvm_run)
22584 "jmp .Lkvm_vmx_return \n\t"
22585 ".Llaunched: " __ex(ASM_VMX_VMRESUME) "\n\t"
22586 ".Lkvm_vmx_return: "
22587+
22588+#if defined(CONFIG_X86_32) && defined(CONFIG_PAX_KERNEXEC)
22589+ "ljmp %[cs],$.Lkvm_vmx_return2\n\t"
22590+ ".Lkvm_vmx_return2: "
22591+#endif
22592+
22593 /* Save guest registers, load host registers, keep flags */
22594 "xchg %0, (%%"R"sp) \n\t"
22595 "mov %%"R"ax, %c[rax](%0) \n\t"
22596@@ -3764,8 +3778,13 @@ static void vmx_vcpu_run(struct kvm_vcpu *vcpu, struct kvm_run *kvm_run)
22597 [r15]"i"(offsetof(struct vcpu_vmx, vcpu.arch.regs[VCPU_REGS_R15])),
22598 #endif
22599 [cr2]"i"(offsetof(struct vcpu_vmx, vcpu.arch.cr2))
22600+
22601+#if defined(CONFIG_X86_32) && defined(CONFIG_PAX_KERNEXEC)
22602+ ,[cs]"i"(__KERNEL_CS)
22603+#endif
22604+
22605 : "cc", "memory"
22606- , R"bx", R"di", R"si"
22607+ , R"ax", R"bx", R"di", R"si"
22608 #ifdef CONFIG_X86_64
22609 , "r8", "r9", "r10", "r11", "r12", "r13", "r14", "r15"
22610 #endif
22611@@ -3782,7 +3801,16 @@ static void vmx_vcpu_run(struct kvm_vcpu *vcpu, struct kvm_run *kvm_run)
22612 if (vmx->rmode.irq.pending)
22613 fixup_rmode_irq(vmx);
22614
22615- asm("mov %0, %%ds; mov %0, %%es" : : "r"(__USER_DS));
22616+ asm("mov %0, %%ds; mov %0, %%es; mov %0, %%ss" : : "r"(__KERNEL_DS));
22617+
22618+#if defined(CONFIG_X86_32) && defined(CONFIG_PAX_KERNEXEC)
22619+ loadsegment(fs, __KERNEL_PERCPU);
22620+#endif
22621+
22622+#if defined(CONFIG_X86_32) && defined(CONFIG_PAX_MEMORY_UDEREF)
22623+ __set_fs(current_thread_info()->addr_limit);
22624+#endif
22625+
22626 vmx->launched = 1;
22627
22628 vmx_complete_interrupts(vmx);
22629@@ -3957,7 +3985,7 @@ static bool vmx_gb_page_enable(void)
22630 return false;
22631 }
22632
22633-static struct kvm_x86_ops vmx_x86_ops = {
22634+static const struct kvm_x86_ops vmx_x86_ops = {
22635 .cpu_has_kvm_support = cpu_has_kvm_support,
22636 .disabled_by_bios = vmx_disabled_by_bios,
22637 .hardware_setup = hardware_setup,
22638diff --git a/arch/x86/kvm/x86.c b/arch/x86/kvm/x86.c
22639index df1cefb..31447ca 100644
22640--- a/arch/x86/kvm/x86.c
22641+++ b/arch/x86/kvm/x86.c
22642@@ -82,7 +82,7 @@ static void update_cr8_intercept(struct kvm_vcpu *vcpu);
22643 static int kvm_dev_ioctl_get_supported_cpuid(struct kvm_cpuid2 *cpuid,
22644 struct kvm_cpuid_entry2 __user *entries);
22645
22646-struct kvm_x86_ops *kvm_x86_ops;
22647+const struct kvm_x86_ops *kvm_x86_ops;
22648 EXPORT_SYMBOL_GPL(kvm_x86_ops);
22649
22650 int ignore_msrs = 0;
22651@@ -547,6 +547,7 @@ static int do_set_msr(struct kvm_vcpu *vcpu, unsigned index, u64 *data)
22652 return kvm_set_msr(vcpu, index, *data);
22653 }
22654
22655+static void kvm_write_wall_clock(struct kvm *kvm, gpa_t wall_clock) __size_overflow(2);
22656 static void kvm_write_wall_clock(struct kvm *kvm, gpa_t wall_clock)
22657 {
22658 int version;
22659@@ -1430,15 +1431,20 @@ static int kvm_vcpu_ioctl_set_cpuid2(struct kvm_vcpu *vcpu,
22660 struct kvm_cpuid2 *cpuid,
22661 struct kvm_cpuid_entry2 __user *entries)
22662 {
22663- int r;
22664+ int r, i;
22665
22666 r = -E2BIG;
22667 if (cpuid->nent > KVM_MAX_CPUID_ENTRIES)
22668 goto out;
22669 r = -EFAULT;
22670- if (copy_from_user(&vcpu->arch.cpuid_entries, entries,
22671- cpuid->nent * sizeof(struct kvm_cpuid_entry2)))
22672+ if (!access_ok(VERIFY_READ, entries, cpuid->nent * sizeof(struct kvm_cpuid_entry2)))
22673 goto out;
22674+ for (i = 0; i < cpuid->nent; ++i) {
22675+ struct kvm_cpuid_entry2 cpuid_entry;
22676+ if (__copy_from_user(&cpuid_entry, entries + i, sizeof(cpuid_entry)))
22677+ goto out;
22678+ vcpu->arch.cpuid_entries[i] = cpuid_entry;
22679+ }
22680 vcpu->arch.cpuid_nent = cpuid->nent;
22681 kvm_apic_set_version(vcpu);
22682 return 0;
22683@@ -1451,16 +1457,20 @@ static int kvm_vcpu_ioctl_get_cpuid2(struct kvm_vcpu *vcpu,
22684 struct kvm_cpuid2 *cpuid,
22685 struct kvm_cpuid_entry2 __user *entries)
22686 {
22687- int r;
22688+ int r, i;
22689
22690 vcpu_load(vcpu);
22691 r = -E2BIG;
22692 if (cpuid->nent < vcpu->arch.cpuid_nent)
22693 goto out;
22694 r = -EFAULT;
22695- if (copy_to_user(entries, &vcpu->arch.cpuid_entries,
22696- vcpu->arch.cpuid_nent * sizeof(struct kvm_cpuid_entry2)))
22697+ if (!access_ok(VERIFY_WRITE, entries, vcpu->arch.cpuid_nent * sizeof(struct kvm_cpuid_entry2)))
22698 goto out;
22699+ for (i = 0; i < vcpu->arch.cpuid_nent; ++i) {
22700+ struct kvm_cpuid_entry2 cpuid_entry = vcpu->arch.cpuid_entries[i];
22701+ if (__copy_to_user(entries + i, &cpuid_entry, sizeof(cpuid_entry)))
22702+ goto out;
22703+ }
22704 return 0;
22705
22706 out:
22707@@ -1678,7 +1688,7 @@ static int kvm_vcpu_ioctl_set_lapic(struct kvm_vcpu *vcpu,
22708 static int kvm_vcpu_ioctl_interrupt(struct kvm_vcpu *vcpu,
22709 struct kvm_interrupt *irq)
22710 {
22711- if (irq->irq < 0 || irq->irq >= 256)
22712+ if (irq->irq >= 256)
22713 return -EINVAL;
22714 if (irqchip_in_kernel(vcpu->kvm))
22715 return -ENXIO;
22716@@ -2768,6 +2778,11 @@ static int emulator_cmpxchg_emulated(unsigned long addr,
22717 const void *old,
22718 const void *new,
22719 unsigned int bytes,
22720+ struct kvm_vcpu *vcpu) __size_overflow(5);
22721+static int emulator_cmpxchg_emulated(unsigned long addr,
22722+ const void *old,
22723+ const void *new,
22724+ unsigned int bytes,
22725 struct kvm_vcpu *vcpu)
22726 {
22727 printk_once(KERN_WARNING "kvm: emulating exchange as write\n");
22728@@ -3260,10 +3275,10 @@ static struct notifier_block kvmclock_cpufreq_notifier_block = {
22729 .notifier_call = kvmclock_cpufreq_notifier
22730 };
22731
22732-int kvm_arch_init(void *opaque)
22733+int kvm_arch_init(const void *opaque)
22734 {
22735 int r, cpu;
22736- struct kvm_x86_ops *ops = (struct kvm_x86_ops *)opaque;
22737+ const struct kvm_x86_ops *ops = (const struct kvm_x86_ops *)opaque;
22738
22739 if (kvm_x86_ops) {
22740 printk(KERN_ERR "kvm: already loaded the other module\n");
22741diff --git a/arch/x86/lguest/boot.c b/arch/x86/lguest/boot.c
22742index 7e59dc1..b88c98f 100644
22743--- a/arch/x86/lguest/boot.c
22744+++ b/arch/x86/lguest/boot.c
22745@@ -1172,9 +1172,10 @@ static __init int early_put_chars(u32 vtermno, const char *buf, int count)
22746 * Rebooting also tells the Host we're finished, but the RESTART flag tells the
22747 * Launcher to reboot us.
22748 */
22749-static void lguest_restart(char *reason)
22750+static __noreturn void lguest_restart(char *reason)
22751 {
22752 kvm_hypercall2(LHCALL_SHUTDOWN, __pa(reason), LGUEST_SHUTDOWN_RESTART);
22753+ BUG();
22754 }
22755
22756 /*G:050
22757diff --git a/arch/x86/lib/atomic64_32.c b/arch/x86/lib/atomic64_32.c
22758index 824fa0b..c619e96 100644
22759--- a/arch/x86/lib/atomic64_32.c
22760+++ b/arch/x86/lib/atomic64_32.c
22761@@ -25,6 +25,12 @@ u64 atomic64_cmpxchg(atomic64_t *ptr, u64 old_val, u64 new_val)
22762 }
22763 EXPORT_SYMBOL(atomic64_cmpxchg);
22764
22765+u64 atomic64_cmpxchg_unchecked(atomic64_unchecked_t *ptr, u64 old_val, u64 new_val)
22766+{
22767+ return cmpxchg8b(&ptr->counter, old_val, new_val);
22768+}
22769+EXPORT_SYMBOL(atomic64_cmpxchg_unchecked);
22770+
22771 /**
22772 * atomic64_xchg - xchg atomic64 variable
22773 * @ptr: pointer to type atomic64_t
22774@@ -56,6 +62,36 @@ u64 atomic64_xchg(atomic64_t *ptr, u64 new_val)
22775 EXPORT_SYMBOL(atomic64_xchg);
22776
22777 /**
22778+ * atomic64_xchg_unchecked - xchg atomic64 variable
22779+ * @ptr: pointer to type atomic64_unchecked_t
22780+ * @new_val: value to assign
22781+ *
22782+ * Atomically xchgs the value of @ptr to @new_val and returns
22783+ * the old value.
22784+ */
22785+u64 atomic64_xchg_unchecked(atomic64_unchecked_t *ptr, u64 new_val)
22786+{
22787+ /*
22788+ * Try first with a (possibly incorrect) assumption about
22789+ * what we have there. We'll do two loops most likely,
22790+ * but we'll get an ownership MESI transaction straight away
22791+ * instead of a read transaction followed by a
22792+ * flush-for-ownership transaction:
22793+ */
22794+ u64 old_val, real_val = 0;
22795+
22796+ do {
22797+ old_val = real_val;
22798+
22799+ real_val = atomic64_cmpxchg_unchecked(ptr, old_val, new_val);
22800+
22801+ } while (real_val != old_val);
22802+
22803+ return old_val;
22804+}
22805+EXPORT_SYMBOL(atomic64_xchg_unchecked);
22806+
22807+/**
22808 * atomic64_set - set atomic64 variable
22809 * @ptr: pointer to type atomic64_t
22810 * @new_val: value to assign
22811@@ -69,7 +105,19 @@ void atomic64_set(atomic64_t *ptr, u64 new_val)
22812 EXPORT_SYMBOL(atomic64_set);
22813
22814 /**
22815-EXPORT_SYMBOL(atomic64_read);
22816+ * atomic64_unchecked_set - set atomic64 variable
22817+ * @ptr: pointer to type atomic64_unchecked_t
22818+ * @new_val: value to assign
22819+ *
22820+ * Atomically sets the value of @ptr to @new_val.
22821+ */
22822+void atomic64_set_unchecked(atomic64_unchecked_t *ptr, u64 new_val)
22823+{
22824+ atomic64_xchg_unchecked(ptr, new_val);
22825+}
22826+EXPORT_SYMBOL(atomic64_set_unchecked);
22827+
22828+/**
22829 * atomic64_add_return - add and return
22830 * @delta: integer value to add
22831 * @ptr: pointer to type atomic64_t
22832@@ -99,24 +147,72 @@ noinline u64 atomic64_add_return(u64 delta, atomic64_t *ptr)
22833 }
22834 EXPORT_SYMBOL(atomic64_add_return);
22835
22836+/**
22837+ * atomic64_add_return_unchecked - add and return
22838+ * @delta: integer value to add
22839+ * @ptr: pointer to type atomic64_unchecked_t
22840+ *
22841+ * Atomically adds @delta to @ptr and returns @delta + *@ptr
22842+ */
22843+noinline u64 atomic64_add_return_unchecked(u64 delta, atomic64_unchecked_t *ptr)
22844+{
22845+ /*
22846+ * Try first with a (possibly incorrect) assumption about
22847+ * what we have there. We'll do two loops most likely,
22848+ * but we'll get an ownership MESI transaction straight away
22849+ * instead of a read transaction followed by a
22850+ * flush-for-ownership transaction:
22851+ */
22852+ u64 old_val, new_val, real_val = 0;
22853+
22854+ do {
22855+ old_val = real_val;
22856+ new_val = old_val + delta;
22857+
22858+ real_val = atomic64_cmpxchg_unchecked(ptr, old_val, new_val);
22859+
22860+ } while (real_val != old_val);
22861+
22862+ return new_val;
22863+}
22864+EXPORT_SYMBOL(atomic64_add_return_unchecked);
22865+
22866 u64 atomic64_sub_return(u64 delta, atomic64_t *ptr)
22867 {
22868 return atomic64_add_return(-delta, ptr);
22869 }
22870 EXPORT_SYMBOL(atomic64_sub_return);
22871
22872+u64 atomic64_sub_return_unchecked(u64 delta, atomic64_unchecked_t *ptr)
22873+{
22874+ return atomic64_add_return_unchecked(-delta, ptr);
22875+}
22876+EXPORT_SYMBOL(atomic64_sub_return_unchecked);
22877+
22878 u64 atomic64_inc_return(atomic64_t *ptr)
22879 {
22880 return atomic64_add_return(1, ptr);
22881 }
22882 EXPORT_SYMBOL(atomic64_inc_return);
22883
22884+u64 atomic64_inc_return_unchecked(atomic64_unchecked_t *ptr)
22885+{
22886+ return atomic64_add_return_unchecked(1, ptr);
22887+}
22888+EXPORT_SYMBOL(atomic64_inc_return_unchecked);
22889+
22890 u64 atomic64_dec_return(atomic64_t *ptr)
22891 {
22892 return atomic64_sub_return(1, ptr);
22893 }
22894 EXPORT_SYMBOL(atomic64_dec_return);
22895
22896+u64 atomic64_dec_return_unchecked(atomic64_unchecked_t *ptr)
22897+{
22898+ return atomic64_sub_return_unchecked(1, ptr);
22899+}
22900+EXPORT_SYMBOL(atomic64_dec_return_unchecked);
22901+
22902 /**
22903 * atomic64_add - add integer to atomic64 variable
22904 * @delta: integer value to add
22905@@ -131,6 +227,19 @@ void atomic64_add(u64 delta, atomic64_t *ptr)
22906 EXPORT_SYMBOL(atomic64_add);
22907
22908 /**
22909+ * atomic64_add_unchecked - add integer to atomic64 variable
22910+ * @delta: integer value to add
22911+ * @ptr: pointer to type atomic64_unchecked_t
22912+ *
22913+ * Atomically adds @delta to @ptr.
22914+ */
22915+void atomic64_add_unchecked(u64 delta, atomic64_unchecked_t *ptr)
22916+{
22917+ atomic64_add_return_unchecked(delta, ptr);
22918+}
22919+EXPORT_SYMBOL(atomic64_add_unchecked);
22920+
22921+/**
22922 * atomic64_sub - subtract the atomic64 variable
22923 * @delta: integer value to subtract
22924 * @ptr: pointer to type atomic64_t
22925@@ -144,6 +253,19 @@ void atomic64_sub(u64 delta, atomic64_t *ptr)
22926 EXPORT_SYMBOL(atomic64_sub);
22927
22928 /**
22929+ * atomic64_sub_unchecked - subtract the atomic64 variable
22930+ * @delta: integer value to subtract
22931+ * @ptr: pointer to type atomic64_unchecked_t
22932+ *
22933+ * Atomically subtracts @delta from @ptr.
22934+ */
22935+void atomic64_sub_unchecked(u64 delta, atomic64_unchecked_t *ptr)
22936+{
22937+ atomic64_add_unchecked(-delta, ptr);
22938+}
22939+EXPORT_SYMBOL(atomic64_sub_unchecked);
22940+
22941+/**
22942 * atomic64_sub_and_test - subtract value from variable and test result
22943 * @delta: integer value to subtract
22944 * @ptr: pointer to type atomic64_t
22945@@ -173,6 +295,18 @@ void atomic64_inc(atomic64_t *ptr)
22946 EXPORT_SYMBOL(atomic64_inc);
22947
22948 /**
22949+ * atomic64_inc_unchecked - increment atomic64 variable
22950+ * @ptr: pointer to type atomic64_unchecked_t
22951+ *
22952+ * Atomically increments @ptr by 1.
22953+ */
22954+void atomic64_inc_unchecked(atomic64_unchecked_t *ptr)
22955+{
22956+ atomic64_add_unchecked(1, ptr);
22957+}
22958+EXPORT_SYMBOL(atomic64_inc_unchecked);
22959+
22960+/**
22961 * atomic64_dec - decrement atomic64 variable
22962 * @ptr: pointer to type atomic64_t
22963 *
22964@@ -185,6 +319,18 @@ void atomic64_dec(atomic64_t *ptr)
22965 EXPORT_SYMBOL(atomic64_dec);
22966
22967 /**
22968+ * atomic64_dec_unchecked - decrement atomic64 variable
22969+ * @ptr: pointer to type atomic64_unchecked_t
22970+ *
22971+ * Atomically decrements @ptr by 1.
22972+ */
22973+void atomic64_dec_unchecked(atomic64_unchecked_t *ptr)
22974+{
22975+ atomic64_sub_unchecked(1, ptr);
22976+}
22977+EXPORT_SYMBOL(atomic64_dec_unchecked);
22978+
22979+/**
22980 * atomic64_dec_and_test - decrement and test
22981 * @ptr: pointer to type atomic64_t
22982 *
22983diff --git a/arch/x86/lib/checksum_32.S b/arch/x86/lib/checksum_32.S
22984index adbccd0..98f96c8 100644
22985--- a/arch/x86/lib/checksum_32.S
22986+++ b/arch/x86/lib/checksum_32.S
22987@@ -28,7 +28,8 @@
22988 #include <linux/linkage.h>
22989 #include <asm/dwarf2.h>
22990 #include <asm/errno.h>
22991-
22992+#include <asm/segment.h>
22993+
22994 /*
22995 * computes a partial checksum, e.g. for TCP/UDP fragments
22996 */
22997@@ -304,9 +305,28 @@ unsigned int csum_partial_copy_generic (const char *src, char *dst,
22998
22999 #define ARGBASE 16
23000 #define FP 12
23001-
23002-ENTRY(csum_partial_copy_generic)
23003+
23004+ENTRY(csum_partial_copy_generic_to_user)
23005 CFI_STARTPROC
23006+
23007+#ifdef CONFIG_PAX_MEMORY_UDEREF
23008+ pushl %gs
23009+ CFI_ADJUST_CFA_OFFSET 4
23010+ popl %es
23011+ CFI_ADJUST_CFA_OFFSET -4
23012+ jmp csum_partial_copy_generic
23013+#endif
23014+
23015+ENTRY(csum_partial_copy_generic_from_user)
23016+
23017+#ifdef CONFIG_PAX_MEMORY_UDEREF
23018+ pushl %gs
23019+ CFI_ADJUST_CFA_OFFSET 4
23020+ popl %ds
23021+ CFI_ADJUST_CFA_OFFSET -4
23022+#endif
23023+
23024+ENTRY(csum_partial_copy_generic)
23025 subl $4,%esp
23026 CFI_ADJUST_CFA_OFFSET 4
23027 pushl %edi
23028@@ -331,7 +351,7 @@ ENTRY(csum_partial_copy_generic)
23029 jmp 4f
23030 SRC(1: movw (%esi), %bx )
23031 addl $2, %esi
23032-DST( movw %bx, (%edi) )
23033+DST( movw %bx, %es:(%edi) )
23034 addl $2, %edi
23035 addw %bx, %ax
23036 adcl $0, %eax
23037@@ -343,30 +363,30 @@ DST( movw %bx, (%edi) )
23038 SRC(1: movl (%esi), %ebx )
23039 SRC( movl 4(%esi), %edx )
23040 adcl %ebx, %eax
23041-DST( movl %ebx, (%edi) )
23042+DST( movl %ebx, %es:(%edi) )
23043 adcl %edx, %eax
23044-DST( movl %edx, 4(%edi) )
23045+DST( movl %edx, %es:4(%edi) )
23046
23047 SRC( movl 8(%esi), %ebx )
23048 SRC( movl 12(%esi), %edx )
23049 adcl %ebx, %eax
23050-DST( movl %ebx, 8(%edi) )
23051+DST( movl %ebx, %es:8(%edi) )
23052 adcl %edx, %eax
23053-DST( movl %edx, 12(%edi) )
23054+DST( movl %edx, %es:12(%edi) )
23055
23056 SRC( movl 16(%esi), %ebx )
23057 SRC( movl 20(%esi), %edx )
23058 adcl %ebx, %eax
23059-DST( movl %ebx, 16(%edi) )
23060+DST( movl %ebx, %es:16(%edi) )
23061 adcl %edx, %eax
23062-DST( movl %edx, 20(%edi) )
23063+DST( movl %edx, %es:20(%edi) )
23064
23065 SRC( movl 24(%esi), %ebx )
23066 SRC( movl 28(%esi), %edx )
23067 adcl %ebx, %eax
23068-DST( movl %ebx, 24(%edi) )
23069+DST( movl %ebx, %es:24(%edi) )
23070 adcl %edx, %eax
23071-DST( movl %edx, 28(%edi) )
23072+DST( movl %edx, %es:28(%edi) )
23073
23074 lea 32(%esi), %esi
23075 lea 32(%edi), %edi
23076@@ -380,7 +400,7 @@ DST( movl %edx, 28(%edi) )
23077 shrl $2, %edx # This clears CF
23078 SRC(3: movl (%esi), %ebx )
23079 adcl %ebx, %eax
23080-DST( movl %ebx, (%edi) )
23081+DST( movl %ebx, %es:(%edi) )
23082 lea 4(%esi), %esi
23083 lea 4(%edi), %edi
23084 dec %edx
23085@@ -392,12 +412,12 @@ DST( movl %ebx, (%edi) )
23086 jb 5f
23087 SRC( movw (%esi), %cx )
23088 leal 2(%esi), %esi
23089-DST( movw %cx, (%edi) )
23090+DST( movw %cx, %es:(%edi) )
23091 leal 2(%edi), %edi
23092 je 6f
23093 shll $16,%ecx
23094 SRC(5: movb (%esi), %cl )
23095-DST( movb %cl, (%edi) )
23096+DST( movb %cl, %es:(%edi) )
23097 6: addl %ecx, %eax
23098 adcl $0, %eax
23099 7:
23100@@ -408,7 +428,7 @@ DST( movb %cl, (%edi) )
23101
23102 6001:
23103 movl ARGBASE+20(%esp), %ebx # src_err_ptr
23104- movl $-EFAULT, (%ebx)
23105+ movl $-EFAULT, %ss:(%ebx)
23106
23107 # zero the complete destination - computing the rest
23108 # is too much work
23109@@ -421,11 +441,19 @@ DST( movb %cl, (%edi) )
23110
23111 6002:
23112 movl ARGBASE+24(%esp), %ebx # dst_err_ptr
23113- movl $-EFAULT,(%ebx)
23114+ movl $-EFAULT,%ss:(%ebx)
23115 jmp 5000b
23116
23117 .previous
23118
23119+ pushl %ss
23120+ CFI_ADJUST_CFA_OFFSET 4
23121+ popl %ds
23122+ CFI_ADJUST_CFA_OFFSET -4
23123+ pushl %ss
23124+ CFI_ADJUST_CFA_OFFSET 4
23125+ popl %es
23126+ CFI_ADJUST_CFA_OFFSET -4
23127 popl %ebx
23128 CFI_ADJUST_CFA_OFFSET -4
23129 CFI_RESTORE ebx
23130@@ -439,26 +467,47 @@ DST( movb %cl, (%edi) )
23131 CFI_ADJUST_CFA_OFFSET -4
23132 ret
23133 CFI_ENDPROC
23134-ENDPROC(csum_partial_copy_generic)
23135+ENDPROC(csum_partial_copy_generic_to_user)
23136
23137 #else
23138
23139 /* Version for PentiumII/PPro */
23140
23141 #define ROUND1(x) \
23142+ nop; nop; nop; \
23143 SRC(movl x(%esi), %ebx ) ; \
23144 addl %ebx, %eax ; \
23145- DST(movl %ebx, x(%edi) ) ;
23146+ DST(movl %ebx, %es:x(%edi)) ;
23147
23148 #define ROUND(x) \
23149+ nop; nop; nop; \
23150 SRC(movl x(%esi), %ebx ) ; \
23151 adcl %ebx, %eax ; \
23152- DST(movl %ebx, x(%edi) ) ;
23153+ DST(movl %ebx, %es:x(%edi)) ;
23154
23155 #define ARGBASE 12
23156-
23157-ENTRY(csum_partial_copy_generic)
23158+
23159+ENTRY(csum_partial_copy_generic_to_user)
23160 CFI_STARTPROC
23161+
23162+#ifdef CONFIG_PAX_MEMORY_UDEREF
23163+ pushl %gs
23164+ CFI_ADJUST_CFA_OFFSET 4
23165+ popl %es
23166+ CFI_ADJUST_CFA_OFFSET -4
23167+ jmp csum_partial_copy_generic
23168+#endif
23169+
23170+ENTRY(csum_partial_copy_generic_from_user)
23171+
23172+#ifdef CONFIG_PAX_MEMORY_UDEREF
23173+ pushl %gs
23174+ CFI_ADJUST_CFA_OFFSET 4
23175+ popl %ds
23176+ CFI_ADJUST_CFA_OFFSET -4
23177+#endif
23178+
23179+ENTRY(csum_partial_copy_generic)
23180 pushl %ebx
23181 CFI_ADJUST_CFA_OFFSET 4
23182 CFI_REL_OFFSET ebx, 0
23183@@ -482,7 +531,7 @@ ENTRY(csum_partial_copy_generic)
23184 subl %ebx, %edi
23185 lea -1(%esi),%edx
23186 andl $-32,%edx
23187- lea 3f(%ebx,%ebx), %ebx
23188+ lea 3f(%ebx,%ebx,2), %ebx
23189 testl %esi, %esi
23190 jmp *%ebx
23191 1: addl $64,%esi
23192@@ -503,19 +552,19 @@ ENTRY(csum_partial_copy_generic)
23193 jb 5f
23194 SRC( movw (%esi), %dx )
23195 leal 2(%esi), %esi
23196-DST( movw %dx, (%edi) )
23197+DST( movw %dx, %es:(%edi) )
23198 leal 2(%edi), %edi
23199 je 6f
23200 shll $16,%edx
23201 5:
23202 SRC( movb (%esi), %dl )
23203-DST( movb %dl, (%edi) )
23204+DST( movb %dl, %es:(%edi) )
23205 6: addl %edx, %eax
23206 adcl $0, %eax
23207 7:
23208 .section .fixup, "ax"
23209 6001: movl ARGBASE+20(%esp), %ebx # src_err_ptr
23210- movl $-EFAULT, (%ebx)
23211+ movl $-EFAULT, %ss:(%ebx)
23212 # zero the complete destination (computing the rest is too much work)
23213 movl ARGBASE+8(%esp),%edi # dst
23214 movl ARGBASE+12(%esp),%ecx # len
23215@@ -523,10 +572,21 @@ DST( movb %dl, (%edi) )
23216 rep; stosb
23217 jmp 7b
23218 6002: movl ARGBASE+24(%esp), %ebx # dst_err_ptr
23219- movl $-EFAULT, (%ebx)
23220+ movl $-EFAULT, %ss:(%ebx)
23221 jmp 7b
23222 .previous
23223
23224+#ifdef CONFIG_PAX_MEMORY_UDEREF
23225+ pushl %ss
23226+ CFI_ADJUST_CFA_OFFSET 4
23227+ popl %ds
23228+ CFI_ADJUST_CFA_OFFSET -4
23229+ pushl %ss
23230+ CFI_ADJUST_CFA_OFFSET 4
23231+ popl %es
23232+ CFI_ADJUST_CFA_OFFSET -4
23233+#endif
23234+
23235 popl %esi
23236 CFI_ADJUST_CFA_OFFSET -4
23237 CFI_RESTORE esi
23238@@ -538,7 +598,7 @@ DST( movb %dl, (%edi) )
23239 CFI_RESTORE ebx
23240 ret
23241 CFI_ENDPROC
23242-ENDPROC(csum_partial_copy_generic)
23243+ENDPROC(csum_partial_copy_generic_to_user)
23244
23245 #undef ROUND
23246 #undef ROUND1
23247diff --git a/arch/x86/lib/clear_page_64.S b/arch/x86/lib/clear_page_64.S
23248index ebeafcc..1e3a402 100644
23249--- a/arch/x86/lib/clear_page_64.S
23250+++ b/arch/x86/lib/clear_page_64.S
23251@@ -1,5 +1,6 @@
23252 #include <linux/linkage.h>
23253 #include <asm/dwarf2.h>
23254+#include <asm/alternative-asm.h>
23255
23256 /*
23257 * Zero a page.
23258@@ -10,6 +11,7 @@ ENTRY(clear_page_c)
23259 movl $4096/8,%ecx
23260 xorl %eax,%eax
23261 rep stosq
23262+ pax_force_retaddr
23263 ret
23264 CFI_ENDPROC
23265 ENDPROC(clear_page_c)
23266@@ -33,6 +35,7 @@ ENTRY(clear_page)
23267 leaq 64(%rdi),%rdi
23268 jnz .Lloop
23269 nop
23270+ pax_force_retaddr
23271 ret
23272 CFI_ENDPROC
23273 .Lclear_page_end:
23274@@ -43,7 +46,7 @@ ENDPROC(clear_page)
23275
23276 #include <asm/cpufeature.h>
23277
23278- .section .altinstr_replacement,"ax"
23279+ .section .altinstr_replacement,"a"
23280 1: .byte 0xeb /* jmp <disp8> */
23281 .byte (clear_page_c - clear_page) - (2f - 1b) /* offset */
23282 2:
23283diff --git a/arch/x86/lib/copy_page_64.S b/arch/x86/lib/copy_page_64.S
23284index 727a5d4..333818a 100644
23285--- a/arch/x86/lib/copy_page_64.S
23286+++ b/arch/x86/lib/copy_page_64.S
23287@@ -2,12 +2,14 @@
23288
23289 #include <linux/linkage.h>
23290 #include <asm/dwarf2.h>
23291+#include <asm/alternative-asm.h>
23292
23293 ALIGN
23294 copy_page_c:
23295 CFI_STARTPROC
23296 movl $4096/8,%ecx
23297 rep movsq
23298+ pax_force_retaddr
23299 ret
23300 CFI_ENDPROC
23301 ENDPROC(copy_page_c)
23302@@ -38,7 +40,7 @@ ENTRY(copy_page)
23303 movq 16 (%rsi), %rdx
23304 movq 24 (%rsi), %r8
23305 movq 32 (%rsi), %r9
23306- movq 40 (%rsi), %r10
23307+ movq 40 (%rsi), %r13
23308 movq 48 (%rsi), %r11
23309 movq 56 (%rsi), %r12
23310
23311@@ -49,7 +51,7 @@ ENTRY(copy_page)
23312 movq %rdx, 16 (%rdi)
23313 movq %r8, 24 (%rdi)
23314 movq %r9, 32 (%rdi)
23315- movq %r10, 40 (%rdi)
23316+ movq %r13, 40 (%rdi)
23317 movq %r11, 48 (%rdi)
23318 movq %r12, 56 (%rdi)
23319
23320@@ -68,7 +70,7 @@ ENTRY(copy_page)
23321 movq 16 (%rsi), %rdx
23322 movq 24 (%rsi), %r8
23323 movq 32 (%rsi), %r9
23324- movq 40 (%rsi), %r10
23325+ movq 40 (%rsi), %r13
23326 movq 48 (%rsi), %r11
23327 movq 56 (%rsi), %r12
23328
23329@@ -77,7 +79,7 @@ ENTRY(copy_page)
23330 movq %rdx, 16 (%rdi)
23331 movq %r8, 24 (%rdi)
23332 movq %r9, 32 (%rdi)
23333- movq %r10, 40 (%rdi)
23334+ movq %r13, 40 (%rdi)
23335 movq %r11, 48 (%rdi)
23336 movq %r12, 56 (%rdi)
23337
23338@@ -94,6 +96,7 @@ ENTRY(copy_page)
23339 CFI_RESTORE r13
23340 addq $3*8,%rsp
23341 CFI_ADJUST_CFA_OFFSET -3*8
23342+ pax_force_retaddr
23343 ret
23344 .Lcopy_page_end:
23345 CFI_ENDPROC
23346@@ -104,7 +107,7 @@ ENDPROC(copy_page)
23347
23348 #include <asm/cpufeature.h>
23349
23350- .section .altinstr_replacement,"ax"
23351+ .section .altinstr_replacement,"a"
23352 1: .byte 0xeb /* jmp <disp8> */
23353 .byte (copy_page_c - copy_page) - (2f - 1b) /* offset */
23354 2:
23355diff --git a/arch/x86/lib/copy_user_64.S b/arch/x86/lib/copy_user_64.S
23356index af8debd..40c75f3 100644
23357--- a/arch/x86/lib/copy_user_64.S
23358+++ b/arch/x86/lib/copy_user_64.S
23359@@ -15,13 +15,15 @@
23360 #include <asm/asm-offsets.h>
23361 #include <asm/thread_info.h>
23362 #include <asm/cpufeature.h>
23363+#include <asm/pgtable.h>
23364+#include <asm/alternative-asm.h>
23365
23366 .macro ALTERNATIVE_JUMP feature,orig,alt
23367 0:
23368 .byte 0xe9 /* 32bit jump */
23369 .long \orig-1f /* by default jump to orig */
23370 1:
23371- .section .altinstr_replacement,"ax"
23372+ .section .altinstr_replacement,"a"
23373 2: .byte 0xe9 /* near jump with 32bit immediate */
23374 .long \alt-1b /* offset */ /* or alternatively to alt */
23375 .previous
23376@@ -64,55 +66,26 @@
23377 #endif
23378 .endm
23379
23380-/* Standard copy_to_user with segment limit checking */
23381-ENTRY(copy_to_user)
23382- CFI_STARTPROC
23383- GET_THREAD_INFO(%rax)
23384- movq %rdi,%rcx
23385- addq %rdx,%rcx
23386- jc bad_to_user
23387- cmpq TI_addr_limit(%rax),%rcx
23388- ja bad_to_user
23389- ALTERNATIVE_JUMP X86_FEATURE_REP_GOOD,copy_user_generic_unrolled,copy_user_generic_string
23390- CFI_ENDPROC
23391-ENDPROC(copy_to_user)
23392-
23393-/* Standard copy_from_user with segment limit checking */
23394-ENTRY(copy_from_user)
23395- CFI_STARTPROC
23396- GET_THREAD_INFO(%rax)
23397- movq %rsi,%rcx
23398- addq %rdx,%rcx
23399- jc bad_from_user
23400- cmpq TI_addr_limit(%rax),%rcx
23401- ja bad_from_user
23402- ALTERNATIVE_JUMP X86_FEATURE_REP_GOOD,copy_user_generic_unrolled,copy_user_generic_string
23403- CFI_ENDPROC
23404-ENDPROC(copy_from_user)
23405-
23406 ENTRY(copy_user_generic)
23407 CFI_STARTPROC
23408 ALTERNATIVE_JUMP X86_FEATURE_REP_GOOD,copy_user_generic_unrolled,copy_user_generic_string
23409 CFI_ENDPROC
23410 ENDPROC(copy_user_generic)
23411
23412-ENTRY(__copy_from_user_inatomic)
23413- CFI_STARTPROC
23414- ALTERNATIVE_JUMP X86_FEATURE_REP_GOOD,copy_user_generic_unrolled,copy_user_generic_string
23415- CFI_ENDPROC
23416-ENDPROC(__copy_from_user_inatomic)
23417-
23418 .section .fixup,"ax"
23419 /* must zero dest */
23420 ENTRY(bad_from_user)
23421 bad_from_user:
23422 CFI_STARTPROC
23423+ testl %edx,%edx
23424+ js bad_to_user
23425 movl %edx,%ecx
23426 xorl %eax,%eax
23427 rep
23428 stosb
23429 bad_to_user:
23430 movl %edx,%eax
23431+ pax_force_retaddr
23432 ret
23433 CFI_ENDPROC
23434 ENDPROC(bad_from_user)
23435@@ -142,19 +115,19 @@ ENTRY(copy_user_generic_unrolled)
23436 jz 17f
23437 1: movq (%rsi),%r8
23438 2: movq 1*8(%rsi),%r9
23439-3: movq 2*8(%rsi),%r10
23440+3: movq 2*8(%rsi),%rax
23441 4: movq 3*8(%rsi),%r11
23442 5: movq %r8,(%rdi)
23443 6: movq %r9,1*8(%rdi)
23444-7: movq %r10,2*8(%rdi)
23445+7: movq %rax,2*8(%rdi)
23446 8: movq %r11,3*8(%rdi)
23447 9: movq 4*8(%rsi),%r8
23448 10: movq 5*8(%rsi),%r9
23449-11: movq 6*8(%rsi),%r10
23450+11: movq 6*8(%rsi),%rax
23451 12: movq 7*8(%rsi),%r11
23452 13: movq %r8,4*8(%rdi)
23453 14: movq %r9,5*8(%rdi)
23454-15: movq %r10,6*8(%rdi)
23455+15: movq %rax,6*8(%rdi)
23456 16: movq %r11,7*8(%rdi)
23457 leaq 64(%rsi),%rsi
23458 leaq 64(%rdi),%rdi
23459@@ -180,6 +153,7 @@ ENTRY(copy_user_generic_unrolled)
23460 decl %ecx
23461 jnz 21b
23462 23: xor %eax,%eax
23463+ pax_force_retaddr
23464 ret
23465
23466 .section .fixup,"ax"
23467@@ -252,6 +226,7 @@ ENTRY(copy_user_generic_string)
23468 3: rep
23469 movsb
23470 4: xorl %eax,%eax
23471+ pax_force_retaddr
23472 ret
23473
23474 .section .fixup,"ax"
23475diff --git a/arch/x86/lib/copy_user_nocache_64.S b/arch/x86/lib/copy_user_nocache_64.S
23476index cb0c112..e3a6895 100644
23477--- a/arch/x86/lib/copy_user_nocache_64.S
23478+++ b/arch/x86/lib/copy_user_nocache_64.S
23479@@ -8,12 +8,14 @@
23480
23481 #include <linux/linkage.h>
23482 #include <asm/dwarf2.h>
23483+#include <asm/alternative-asm.h>
23484
23485 #define FIX_ALIGNMENT 1
23486
23487 #include <asm/current.h>
23488 #include <asm/asm-offsets.h>
23489 #include <asm/thread_info.h>
23490+#include <asm/pgtable.h>
23491
23492 .macro ALIGN_DESTINATION
23493 #ifdef FIX_ALIGNMENT
23494@@ -50,6 +52,15 @@
23495 */
23496 ENTRY(__copy_user_nocache)
23497 CFI_STARTPROC
23498+
23499+#ifdef CONFIG_PAX_MEMORY_UDEREF
23500+ mov $PAX_USER_SHADOW_BASE,%rcx
23501+ cmp %rcx,%rsi
23502+ jae 1f
23503+ add %rcx,%rsi
23504+1:
23505+#endif
23506+
23507 cmpl $8,%edx
23508 jb 20f /* less then 8 bytes, go to byte copy loop */
23509 ALIGN_DESTINATION
23510@@ -59,19 +70,19 @@ ENTRY(__copy_user_nocache)
23511 jz 17f
23512 1: movq (%rsi),%r8
23513 2: movq 1*8(%rsi),%r9
23514-3: movq 2*8(%rsi),%r10
23515+3: movq 2*8(%rsi),%rax
23516 4: movq 3*8(%rsi),%r11
23517 5: movnti %r8,(%rdi)
23518 6: movnti %r9,1*8(%rdi)
23519-7: movnti %r10,2*8(%rdi)
23520+7: movnti %rax,2*8(%rdi)
23521 8: movnti %r11,3*8(%rdi)
23522 9: movq 4*8(%rsi),%r8
23523 10: movq 5*8(%rsi),%r9
23524-11: movq 6*8(%rsi),%r10
23525+11: movq 6*8(%rsi),%rax
23526 12: movq 7*8(%rsi),%r11
23527 13: movnti %r8,4*8(%rdi)
23528 14: movnti %r9,5*8(%rdi)
23529-15: movnti %r10,6*8(%rdi)
23530+15: movnti %rax,6*8(%rdi)
23531 16: movnti %r11,7*8(%rdi)
23532 leaq 64(%rsi),%rsi
23533 leaq 64(%rdi),%rdi
23534@@ -98,6 +109,7 @@ ENTRY(__copy_user_nocache)
23535 jnz 21b
23536 23: xorl %eax,%eax
23537 sfence
23538+ pax_force_retaddr
23539 ret
23540
23541 .section .fixup,"ax"
23542diff --git a/arch/x86/lib/csum-copy_64.S b/arch/x86/lib/csum-copy_64.S
23543index f0dba36..48cb4d6 100644
23544--- a/arch/x86/lib/csum-copy_64.S
23545+++ b/arch/x86/lib/csum-copy_64.S
23546@@ -8,6 +8,7 @@
23547 #include <linux/linkage.h>
23548 #include <asm/dwarf2.h>
23549 #include <asm/errno.h>
23550+#include <asm/alternative-asm.h>
23551
23552 /*
23553 * Checksum copy with exception handling.
23554@@ -228,6 +229,7 @@ ENTRY(csum_partial_copy_generic)
23555 CFI_RESTORE rbp
23556 addq $7*8,%rsp
23557 CFI_ADJUST_CFA_OFFSET -7*8
23558+ pax_force_retaddr 0, 1
23559 ret
23560 CFI_RESTORE_STATE
23561
23562diff --git a/arch/x86/lib/csum-wrappers_64.c b/arch/x86/lib/csum-wrappers_64.c
23563index 459b58a..9570bc7 100644
23564--- a/arch/x86/lib/csum-wrappers_64.c
23565+++ b/arch/x86/lib/csum-wrappers_64.c
23566@@ -52,7 +52,13 @@ csum_partial_copy_from_user(const void __user *src, void *dst,
23567 len -= 2;
23568 }
23569 }
23570- isum = csum_partial_copy_generic((__force const void *)src,
23571+
23572+#ifdef CONFIG_PAX_MEMORY_UDEREF
23573+ if ((unsigned long)src < PAX_USER_SHADOW_BASE)
23574+ src += PAX_USER_SHADOW_BASE;
23575+#endif
23576+
23577+ isum = csum_partial_copy_generic((const void __force_kernel *)src,
23578 dst, len, isum, errp, NULL);
23579 if (unlikely(*errp))
23580 goto out_err;
23581@@ -105,7 +111,13 @@ csum_partial_copy_to_user(const void *src, void __user *dst,
23582 }
23583
23584 *errp = 0;
23585- return csum_partial_copy_generic(src, (void __force *)dst,
23586+
23587+#ifdef CONFIG_PAX_MEMORY_UDEREF
23588+ if ((unsigned long)dst < PAX_USER_SHADOW_BASE)
23589+ dst += PAX_USER_SHADOW_BASE;
23590+#endif
23591+
23592+ return csum_partial_copy_generic(src, (void __force_kernel *)dst,
23593 len, isum, NULL, errp);
23594 }
23595 EXPORT_SYMBOL(csum_partial_copy_to_user);
23596diff --git a/arch/x86/lib/delay.c b/arch/x86/lib/delay.c
23597index ff485d3..b6372ce 100644
23598--- a/arch/x86/lib/delay.c
23599+++ b/arch/x86/lib/delay.c
23600@@ -48,9 +48,9 @@ static void delay_loop(unsigned long loops)
23601 }
23602
23603 /* TSC based delay: */
23604-static void delay_tsc(unsigned long loops)
23605+static void delay_tsc(unsigned long __loops)
23606 {
23607- unsigned long bclock, now;
23608+ u32 bclock, now, loops = __loops;
23609 int cpu;
23610
23611 preempt_disable();
23612diff --git a/arch/x86/lib/getuser.S b/arch/x86/lib/getuser.S
23613index 51f1504..ddac4c1 100644
23614--- a/arch/x86/lib/getuser.S
23615+++ b/arch/x86/lib/getuser.S
23616@@ -33,15 +33,38 @@
23617 #include <asm/asm-offsets.h>
23618 #include <asm/thread_info.h>
23619 #include <asm/asm.h>
23620+#include <asm/segment.h>
23621+#include <asm/pgtable.h>
23622+#include <asm/alternative-asm.h>
23623+
23624+#if defined(CONFIG_X86_32) && defined(CONFIG_PAX_MEMORY_UDEREF)
23625+#define __copyuser_seg gs;
23626+#else
23627+#define __copyuser_seg
23628+#endif
23629
23630 .text
23631 ENTRY(__get_user_1)
23632 CFI_STARTPROC
23633+
23634+#if !defined(CONFIG_X86_32) || !defined(CONFIG_PAX_MEMORY_UDEREF)
23635 GET_THREAD_INFO(%_ASM_DX)
23636 cmp TI_addr_limit(%_ASM_DX),%_ASM_AX
23637 jae bad_get_user
23638-1: movzb (%_ASM_AX),%edx
23639+
23640+#if defined(CONFIG_X86_64) && defined(CONFIG_PAX_MEMORY_UDEREF)
23641+ mov $PAX_USER_SHADOW_BASE,%_ASM_DX
23642+ cmp %_ASM_DX,%_ASM_AX
23643+ jae 1234f
23644+ add %_ASM_DX,%_ASM_AX
23645+1234:
23646+#endif
23647+
23648+#endif
23649+
23650+1: __copyuser_seg movzb (%_ASM_AX),%edx
23651 xor %eax,%eax
23652+ pax_force_retaddr
23653 ret
23654 CFI_ENDPROC
23655 ENDPROC(__get_user_1)
23656@@ -49,12 +72,26 @@ ENDPROC(__get_user_1)
23657 ENTRY(__get_user_2)
23658 CFI_STARTPROC
23659 add $1,%_ASM_AX
23660+
23661+#if !defined(CONFIG_X86_32) || !defined(CONFIG_PAX_MEMORY_UDEREF)
23662 jc bad_get_user
23663 GET_THREAD_INFO(%_ASM_DX)
23664 cmp TI_addr_limit(%_ASM_DX),%_ASM_AX
23665 jae bad_get_user
23666-2: movzwl -1(%_ASM_AX),%edx
23667+
23668+#if defined(CONFIG_X86_64) && defined(CONFIG_PAX_MEMORY_UDEREF)
23669+ mov $PAX_USER_SHADOW_BASE,%_ASM_DX
23670+ cmp %_ASM_DX,%_ASM_AX
23671+ jae 1234f
23672+ add %_ASM_DX,%_ASM_AX
23673+1234:
23674+#endif
23675+
23676+#endif
23677+
23678+2: __copyuser_seg movzwl -1(%_ASM_AX),%edx
23679 xor %eax,%eax
23680+ pax_force_retaddr
23681 ret
23682 CFI_ENDPROC
23683 ENDPROC(__get_user_2)
23684@@ -62,12 +99,26 @@ ENDPROC(__get_user_2)
23685 ENTRY(__get_user_4)
23686 CFI_STARTPROC
23687 add $3,%_ASM_AX
23688+
23689+#if !defined(CONFIG_X86_32) || !defined(CONFIG_PAX_MEMORY_UDEREF)
23690 jc bad_get_user
23691 GET_THREAD_INFO(%_ASM_DX)
23692 cmp TI_addr_limit(%_ASM_DX),%_ASM_AX
23693 jae bad_get_user
23694-3: mov -3(%_ASM_AX),%edx
23695+
23696+#if defined(CONFIG_X86_64) && defined(CONFIG_PAX_MEMORY_UDEREF)
23697+ mov $PAX_USER_SHADOW_BASE,%_ASM_DX
23698+ cmp %_ASM_DX,%_ASM_AX
23699+ jae 1234f
23700+ add %_ASM_DX,%_ASM_AX
23701+1234:
23702+#endif
23703+
23704+#endif
23705+
23706+3: __copyuser_seg mov -3(%_ASM_AX),%edx
23707 xor %eax,%eax
23708+ pax_force_retaddr
23709 ret
23710 CFI_ENDPROC
23711 ENDPROC(__get_user_4)
23712@@ -80,8 +131,18 @@ ENTRY(__get_user_8)
23713 GET_THREAD_INFO(%_ASM_DX)
23714 cmp TI_addr_limit(%_ASM_DX),%_ASM_AX
23715 jae bad_get_user
23716+
23717+#ifdef CONFIG_PAX_MEMORY_UDEREF
23718+ mov $PAX_USER_SHADOW_BASE,%_ASM_DX
23719+ cmp %_ASM_DX,%_ASM_AX
23720+ jae 1234f
23721+ add %_ASM_DX,%_ASM_AX
23722+1234:
23723+#endif
23724+
23725 4: movq -7(%_ASM_AX),%_ASM_DX
23726 xor %eax,%eax
23727+ pax_force_retaddr
23728 ret
23729 CFI_ENDPROC
23730 ENDPROC(__get_user_8)
23731@@ -91,6 +152,7 @@ bad_get_user:
23732 CFI_STARTPROC
23733 xor %edx,%edx
23734 mov $(-EFAULT),%_ASM_AX
23735+ pax_force_retaddr
23736 ret
23737 CFI_ENDPROC
23738 END(bad_get_user)
23739diff --git a/arch/x86/lib/iomap_copy_64.S b/arch/x86/lib/iomap_copy_64.S
23740index 05a95e7..326f2fa 100644
23741--- a/arch/x86/lib/iomap_copy_64.S
23742+++ b/arch/x86/lib/iomap_copy_64.S
23743@@ -17,6 +17,7 @@
23744
23745 #include <linux/linkage.h>
23746 #include <asm/dwarf2.h>
23747+#include <asm/alternative-asm.h>
23748
23749 /*
23750 * override generic version in lib/iomap_copy.c
23751@@ -25,6 +26,7 @@ ENTRY(__iowrite32_copy)
23752 CFI_STARTPROC
23753 movl %edx,%ecx
23754 rep movsd
23755+ pax_force_retaddr
23756 ret
23757 CFI_ENDPROC
23758 ENDPROC(__iowrite32_copy)
23759diff --git a/arch/x86/lib/memcpy_64.S b/arch/x86/lib/memcpy_64.S
23760index ad5441e..610e351 100644
23761--- a/arch/x86/lib/memcpy_64.S
23762+++ b/arch/x86/lib/memcpy_64.S
23763@@ -4,6 +4,7 @@
23764
23765 #include <asm/cpufeature.h>
23766 #include <asm/dwarf2.h>
23767+#include <asm/alternative-asm.h>
23768
23769 /*
23770 * memcpy - Copy a memory block.
23771@@ -34,6 +35,7 @@ memcpy_c:
23772 rep movsq
23773 movl %edx, %ecx
23774 rep movsb
23775+ pax_force_retaddr
23776 ret
23777 CFI_ENDPROC
23778 ENDPROC(memcpy_c)
23779@@ -118,6 +120,7 @@ ENTRY(memcpy)
23780 jnz .Lloop_1
23781
23782 .Lend:
23783+ pax_force_retaddr 0, 1
23784 ret
23785 CFI_ENDPROC
23786 ENDPROC(memcpy)
23787@@ -128,7 +131,7 @@ ENDPROC(__memcpy)
23788 * It is also a lot simpler. Use this when possible:
23789 */
23790
23791- .section .altinstr_replacement, "ax"
23792+ .section .altinstr_replacement, "a"
23793 1: .byte 0xeb /* jmp <disp8> */
23794 .byte (memcpy_c - memcpy) - (2f - 1b) /* offset */
23795 2:
23796diff --git a/arch/x86/lib/memset_64.S b/arch/x86/lib/memset_64.S
23797index 2c59481..7e9ba4e 100644
23798--- a/arch/x86/lib/memset_64.S
23799+++ b/arch/x86/lib/memset_64.S
23800@@ -2,6 +2,7 @@
23801
23802 #include <linux/linkage.h>
23803 #include <asm/dwarf2.h>
23804+#include <asm/alternative-asm.h>
23805
23806 /*
23807 * ISO C memset - set a memory block to a byte value.
23808@@ -28,6 +29,7 @@ memset_c:
23809 movl %r8d,%ecx
23810 rep stosb
23811 movq %r9,%rax
23812+ pax_force_retaddr
23813 ret
23814 CFI_ENDPROC
23815 ENDPROC(memset_c)
23816@@ -35,13 +37,13 @@ ENDPROC(memset_c)
23817 ENTRY(memset)
23818 ENTRY(__memset)
23819 CFI_STARTPROC
23820- movq %rdi,%r10
23821 movq %rdx,%r11
23822
23823 /* expand byte value */
23824 movzbl %sil,%ecx
23825 movabs $0x0101010101010101,%rax
23826 mul %rcx /* with rax, clobbers rdx */
23827+ movq %rdi,%rdx
23828
23829 /* align dst */
23830 movl %edi,%r9d
23831@@ -95,7 +97,8 @@ ENTRY(__memset)
23832 jnz .Lloop_1
23833
23834 .Lende:
23835- movq %r10,%rax
23836+ movq %rdx,%rax
23837+ pax_force_retaddr
23838 ret
23839
23840 CFI_RESTORE_STATE
23841@@ -118,7 +121,7 @@ ENDPROC(__memset)
23842
23843 #include <asm/cpufeature.h>
23844
23845- .section .altinstr_replacement,"ax"
23846+ .section .altinstr_replacement,"a"
23847 1: .byte 0xeb /* jmp <disp8> */
23848 .byte (memset_c - memset) - (2f - 1b) /* offset */
23849 2:
23850diff --git a/arch/x86/lib/mmx_32.c b/arch/x86/lib/mmx_32.c
23851index c9f2d9b..e7fd2c0 100644
23852--- a/arch/x86/lib/mmx_32.c
23853+++ b/arch/x86/lib/mmx_32.c
23854@@ -29,6 +29,7 @@ void *_mmx_memcpy(void *to, const void *from, size_t len)
23855 {
23856 void *p;
23857 int i;
23858+ unsigned long cr0;
23859
23860 if (unlikely(in_interrupt()))
23861 return __memcpy(to, from, len);
23862@@ -39,44 +40,72 @@ void *_mmx_memcpy(void *to, const void *from, size_t len)
23863 kernel_fpu_begin();
23864
23865 __asm__ __volatile__ (
23866- "1: prefetch (%0)\n" /* This set is 28 bytes */
23867- " prefetch 64(%0)\n"
23868- " prefetch 128(%0)\n"
23869- " prefetch 192(%0)\n"
23870- " prefetch 256(%0)\n"
23871+ "1: prefetch (%1)\n" /* This set is 28 bytes */
23872+ " prefetch 64(%1)\n"
23873+ " prefetch 128(%1)\n"
23874+ " prefetch 192(%1)\n"
23875+ " prefetch 256(%1)\n"
23876 "2: \n"
23877 ".section .fixup, \"ax\"\n"
23878- "3: movw $0x1AEB, 1b\n" /* jmp on 26 bytes */
23879+ "3: \n"
23880+
23881+#ifdef CONFIG_PAX_KERNEXEC
23882+ " movl %%cr0, %0\n"
23883+ " movl %0, %%eax\n"
23884+ " andl $0xFFFEFFFF, %%eax\n"
23885+ " movl %%eax, %%cr0\n"
23886+#endif
23887+
23888+ " movw $0x1AEB, 1b\n" /* jmp on 26 bytes */
23889+
23890+#ifdef CONFIG_PAX_KERNEXEC
23891+ " movl %0, %%cr0\n"
23892+#endif
23893+
23894 " jmp 2b\n"
23895 ".previous\n"
23896 _ASM_EXTABLE(1b, 3b)
23897- : : "r" (from));
23898+ : "=&r" (cr0) : "r" (from) : "ax");
23899
23900 for ( ; i > 5; i--) {
23901 __asm__ __volatile__ (
23902- "1: prefetch 320(%0)\n"
23903- "2: movq (%0), %%mm0\n"
23904- " movq 8(%0), %%mm1\n"
23905- " movq 16(%0), %%mm2\n"
23906- " movq 24(%0), %%mm3\n"
23907- " movq %%mm0, (%1)\n"
23908- " movq %%mm1, 8(%1)\n"
23909- " movq %%mm2, 16(%1)\n"
23910- " movq %%mm3, 24(%1)\n"
23911- " movq 32(%0), %%mm0\n"
23912- " movq 40(%0), %%mm1\n"
23913- " movq 48(%0), %%mm2\n"
23914- " movq 56(%0), %%mm3\n"
23915- " movq %%mm0, 32(%1)\n"
23916- " movq %%mm1, 40(%1)\n"
23917- " movq %%mm2, 48(%1)\n"
23918- " movq %%mm3, 56(%1)\n"
23919+ "1: prefetch 320(%1)\n"
23920+ "2: movq (%1), %%mm0\n"
23921+ " movq 8(%1), %%mm1\n"
23922+ " movq 16(%1), %%mm2\n"
23923+ " movq 24(%1), %%mm3\n"
23924+ " movq %%mm0, (%2)\n"
23925+ " movq %%mm1, 8(%2)\n"
23926+ " movq %%mm2, 16(%2)\n"
23927+ " movq %%mm3, 24(%2)\n"
23928+ " movq 32(%1), %%mm0\n"
23929+ " movq 40(%1), %%mm1\n"
23930+ " movq 48(%1), %%mm2\n"
23931+ " movq 56(%1), %%mm3\n"
23932+ " movq %%mm0, 32(%2)\n"
23933+ " movq %%mm1, 40(%2)\n"
23934+ " movq %%mm2, 48(%2)\n"
23935+ " movq %%mm3, 56(%2)\n"
23936 ".section .fixup, \"ax\"\n"
23937- "3: movw $0x05EB, 1b\n" /* jmp on 5 bytes */
23938+ "3:\n"
23939+
23940+#ifdef CONFIG_PAX_KERNEXEC
23941+ " movl %%cr0, %0\n"
23942+ " movl %0, %%eax\n"
23943+ " andl $0xFFFEFFFF, %%eax\n"
23944+ " movl %%eax, %%cr0\n"
23945+#endif
23946+
23947+ " movw $0x05EB, 1b\n" /* jmp on 5 bytes */
23948+
23949+#ifdef CONFIG_PAX_KERNEXEC
23950+ " movl %0, %%cr0\n"
23951+#endif
23952+
23953 " jmp 2b\n"
23954 ".previous\n"
23955 _ASM_EXTABLE(1b, 3b)
23956- : : "r" (from), "r" (to) : "memory");
23957+ : "=&r" (cr0) : "r" (from), "r" (to) : "memory", "ax");
23958
23959 from += 64;
23960 to += 64;
23961@@ -158,6 +187,7 @@ static void fast_clear_page(void *page)
23962 static void fast_copy_page(void *to, void *from)
23963 {
23964 int i;
23965+ unsigned long cr0;
23966
23967 kernel_fpu_begin();
23968
23969@@ -166,42 +196,70 @@ static void fast_copy_page(void *to, void *from)
23970 * but that is for later. -AV
23971 */
23972 __asm__ __volatile__(
23973- "1: prefetch (%0)\n"
23974- " prefetch 64(%0)\n"
23975- " prefetch 128(%0)\n"
23976- " prefetch 192(%0)\n"
23977- " prefetch 256(%0)\n"
23978+ "1: prefetch (%1)\n"
23979+ " prefetch 64(%1)\n"
23980+ " prefetch 128(%1)\n"
23981+ " prefetch 192(%1)\n"
23982+ " prefetch 256(%1)\n"
23983 "2: \n"
23984 ".section .fixup, \"ax\"\n"
23985- "3: movw $0x1AEB, 1b\n" /* jmp on 26 bytes */
23986+ "3: \n"
23987+
23988+#ifdef CONFIG_PAX_KERNEXEC
23989+ " movl %%cr0, %0\n"
23990+ " movl %0, %%eax\n"
23991+ " andl $0xFFFEFFFF, %%eax\n"
23992+ " movl %%eax, %%cr0\n"
23993+#endif
23994+
23995+ " movw $0x1AEB, 1b\n" /* jmp on 26 bytes */
23996+
23997+#ifdef CONFIG_PAX_KERNEXEC
23998+ " movl %0, %%cr0\n"
23999+#endif
24000+
24001 " jmp 2b\n"
24002 ".previous\n"
24003- _ASM_EXTABLE(1b, 3b) : : "r" (from));
24004+ _ASM_EXTABLE(1b, 3b) : "=&r" (cr0) : "r" (from) : "ax");
24005
24006 for (i = 0; i < (4096-320)/64; i++) {
24007 __asm__ __volatile__ (
24008- "1: prefetch 320(%0)\n"
24009- "2: movq (%0), %%mm0\n"
24010- " movntq %%mm0, (%1)\n"
24011- " movq 8(%0), %%mm1\n"
24012- " movntq %%mm1, 8(%1)\n"
24013- " movq 16(%0), %%mm2\n"
24014- " movntq %%mm2, 16(%1)\n"
24015- " movq 24(%0), %%mm3\n"
24016- " movntq %%mm3, 24(%1)\n"
24017- " movq 32(%0), %%mm4\n"
24018- " movntq %%mm4, 32(%1)\n"
24019- " movq 40(%0), %%mm5\n"
24020- " movntq %%mm5, 40(%1)\n"
24021- " movq 48(%0), %%mm6\n"
24022- " movntq %%mm6, 48(%1)\n"
24023- " movq 56(%0), %%mm7\n"
24024- " movntq %%mm7, 56(%1)\n"
24025+ "1: prefetch 320(%1)\n"
24026+ "2: movq (%1), %%mm0\n"
24027+ " movntq %%mm0, (%2)\n"
24028+ " movq 8(%1), %%mm1\n"
24029+ " movntq %%mm1, 8(%2)\n"
24030+ " movq 16(%1), %%mm2\n"
24031+ " movntq %%mm2, 16(%2)\n"
24032+ " movq 24(%1), %%mm3\n"
24033+ " movntq %%mm3, 24(%2)\n"
24034+ " movq 32(%1), %%mm4\n"
24035+ " movntq %%mm4, 32(%2)\n"
24036+ " movq 40(%1), %%mm5\n"
24037+ " movntq %%mm5, 40(%2)\n"
24038+ " movq 48(%1), %%mm6\n"
24039+ " movntq %%mm6, 48(%2)\n"
24040+ " movq 56(%1), %%mm7\n"
24041+ " movntq %%mm7, 56(%2)\n"
24042 ".section .fixup, \"ax\"\n"
24043- "3: movw $0x05EB, 1b\n" /* jmp on 5 bytes */
24044+ "3:\n"
24045+
24046+#ifdef CONFIG_PAX_KERNEXEC
24047+ " movl %%cr0, %0\n"
24048+ " movl %0, %%eax\n"
24049+ " andl $0xFFFEFFFF, %%eax\n"
24050+ " movl %%eax, %%cr0\n"
24051+#endif
24052+
24053+ " movw $0x05EB, 1b\n" /* jmp on 5 bytes */
24054+
24055+#ifdef CONFIG_PAX_KERNEXEC
24056+ " movl %0, %%cr0\n"
24057+#endif
24058+
24059 " jmp 2b\n"
24060 ".previous\n"
24061- _ASM_EXTABLE(1b, 3b) : : "r" (from), "r" (to) : "memory");
24062+ _ASM_EXTABLE(1b, 3b) : "=&r" (cr0) : "r" (from), "r" (to) : "memory", "ax");
24063
24064 from += 64;
24065 to += 64;
24066@@ -280,47 +338,76 @@ static void fast_clear_page(void *page)
24067 static void fast_copy_page(void *to, void *from)
24068 {
24069 int i;
24070+ unsigned long cr0;
24071
24072 kernel_fpu_begin();
24073
24074 __asm__ __volatile__ (
24075- "1: prefetch (%0)\n"
24076- " prefetch 64(%0)\n"
24077- " prefetch 128(%0)\n"
24078- " prefetch 192(%0)\n"
24079- " prefetch 256(%0)\n"
24080+ "1: prefetch (%1)\n"
24081+ " prefetch 64(%1)\n"
24082+ " prefetch 128(%1)\n"
24083+ " prefetch 192(%1)\n"
24084+ " prefetch 256(%1)\n"
24085 "2: \n"
24086 ".section .fixup, \"ax\"\n"
24087- "3: movw $0x1AEB, 1b\n" /* jmp on 26 bytes */
24088+ "3: \n"
24089+
24090+#ifdef CONFIG_PAX_KERNEXEC
24091+ " movl %%cr0, %0\n"
24092+ " movl %0, %%eax\n"
24093+ " andl $0xFFFEFFFF, %%eax\n"
24094+ " movl %%eax, %%cr0\n"
24095+#endif
24096+
24097+ " movw $0x1AEB, 1b\n" /* jmp on 26 bytes */
24098+
24099+#ifdef CONFIG_PAX_KERNEXEC
24100+ " movl %0, %%cr0\n"
24101+#endif
24102+
24103 " jmp 2b\n"
24104 ".previous\n"
24105- _ASM_EXTABLE(1b, 3b) : : "r" (from));
24106+ _ASM_EXTABLE(1b, 3b) : "=&r" (cr0) : "r" (from) : "ax");
24107
24108 for (i = 0; i < 4096/64; i++) {
24109 __asm__ __volatile__ (
24110- "1: prefetch 320(%0)\n"
24111- "2: movq (%0), %%mm0\n"
24112- " movq 8(%0), %%mm1\n"
24113- " movq 16(%0), %%mm2\n"
24114- " movq 24(%0), %%mm3\n"
24115- " movq %%mm0, (%1)\n"
24116- " movq %%mm1, 8(%1)\n"
24117- " movq %%mm2, 16(%1)\n"
24118- " movq %%mm3, 24(%1)\n"
24119- " movq 32(%0), %%mm0\n"
24120- " movq 40(%0), %%mm1\n"
24121- " movq 48(%0), %%mm2\n"
24122- " movq 56(%0), %%mm3\n"
24123- " movq %%mm0, 32(%1)\n"
24124- " movq %%mm1, 40(%1)\n"
24125- " movq %%mm2, 48(%1)\n"
24126- " movq %%mm3, 56(%1)\n"
24127+ "1: prefetch 320(%1)\n"
24128+ "2: movq (%1), %%mm0\n"
24129+ " movq 8(%1), %%mm1\n"
24130+ " movq 16(%1), %%mm2\n"
24131+ " movq 24(%1), %%mm3\n"
24132+ " movq %%mm0, (%2)\n"
24133+ " movq %%mm1, 8(%2)\n"
24134+ " movq %%mm2, 16(%2)\n"
24135+ " movq %%mm3, 24(%2)\n"
24136+ " movq 32(%1), %%mm0\n"
24137+ " movq 40(%1), %%mm1\n"
24138+ " movq 48(%1), %%mm2\n"
24139+ " movq 56(%1), %%mm3\n"
24140+ " movq %%mm0, 32(%2)\n"
24141+ " movq %%mm1, 40(%2)\n"
24142+ " movq %%mm2, 48(%2)\n"
24143+ " movq %%mm3, 56(%2)\n"
24144 ".section .fixup, \"ax\"\n"
24145- "3: movw $0x05EB, 1b\n" /* jmp on 5 bytes */
24146+ "3:\n"
24147+
24148+#ifdef CONFIG_PAX_KERNEXEC
24149+ " movl %%cr0, %0\n"
24150+ " movl %0, %%eax\n"
24151+ " andl $0xFFFEFFFF, %%eax\n"
24152+ " movl %%eax, %%cr0\n"
24153+#endif
24154+
24155+ " movw $0x05EB, 1b\n" /* jmp on 5 bytes */
24156+
24157+#ifdef CONFIG_PAX_KERNEXEC
24158+ " movl %0, %%cr0\n"
24159+#endif
24160+
24161 " jmp 2b\n"
24162 ".previous\n"
24163 _ASM_EXTABLE(1b, 3b)
24164- : : "r" (from), "r" (to) : "memory");
24165+ : "=&r" (cr0) : "r" (from), "r" (to) : "memory", "ax");
24166
24167 from += 64;
24168 to += 64;
24169diff --git a/arch/x86/lib/msr-reg.S b/arch/x86/lib/msr-reg.S
24170index 69fa106..adda88b 100644
24171--- a/arch/x86/lib/msr-reg.S
24172+++ b/arch/x86/lib/msr-reg.S
24173@@ -3,6 +3,7 @@
24174 #include <asm/dwarf2.h>
24175 #include <asm/asm.h>
24176 #include <asm/msr.h>
24177+#include <asm/alternative-asm.h>
24178
24179 #ifdef CONFIG_X86_64
24180 /*
24181@@ -16,7 +17,7 @@ ENTRY(native_\op\()_safe_regs)
24182 CFI_STARTPROC
24183 pushq_cfi %rbx
24184 pushq_cfi %rbp
24185- movq %rdi, %r10 /* Save pointer */
24186+ movq %rdi, %r9 /* Save pointer */
24187 xorl %r11d, %r11d /* Return value */
24188 movl (%rdi), %eax
24189 movl 4(%rdi), %ecx
24190@@ -27,16 +28,17 @@ ENTRY(native_\op\()_safe_regs)
24191 movl 28(%rdi), %edi
24192 CFI_REMEMBER_STATE
24193 1: \op
24194-2: movl %eax, (%r10)
24195+2: movl %eax, (%r9)
24196 movl %r11d, %eax /* Return value */
24197- movl %ecx, 4(%r10)
24198- movl %edx, 8(%r10)
24199- movl %ebx, 12(%r10)
24200- movl %ebp, 20(%r10)
24201- movl %esi, 24(%r10)
24202- movl %edi, 28(%r10)
24203+ movl %ecx, 4(%r9)
24204+ movl %edx, 8(%r9)
24205+ movl %ebx, 12(%r9)
24206+ movl %ebp, 20(%r9)
24207+ movl %esi, 24(%r9)
24208+ movl %edi, 28(%r9)
24209 popq_cfi %rbp
24210 popq_cfi %rbx
24211+ pax_force_retaddr
24212 ret
24213 3:
24214 CFI_RESTORE_STATE
24215diff --git a/arch/x86/lib/putuser.S b/arch/x86/lib/putuser.S
24216index 36b0d15..d381858 100644
24217--- a/arch/x86/lib/putuser.S
24218+++ b/arch/x86/lib/putuser.S
24219@@ -15,7 +15,9 @@
24220 #include <asm/thread_info.h>
24221 #include <asm/errno.h>
24222 #include <asm/asm.h>
24223-
24224+#include <asm/segment.h>
24225+#include <asm/pgtable.h>
24226+#include <asm/alternative-asm.h>
24227
24228 /*
24229 * __put_user_X
24230@@ -29,52 +31,119 @@
24231 * as they get called from within inline assembly.
24232 */
24233
24234-#define ENTER CFI_STARTPROC ; \
24235- GET_THREAD_INFO(%_ASM_BX)
24236-#define EXIT ret ; \
24237+#define ENTER CFI_STARTPROC
24238+#define EXIT pax_force_retaddr; ret ; \
24239 CFI_ENDPROC
24240
24241+#if defined(CONFIG_X86_64) && defined(CONFIG_PAX_MEMORY_UDEREF)
24242+#define _DEST %_ASM_CX,%_ASM_BX
24243+#else
24244+#define _DEST %_ASM_CX
24245+#endif
24246+
24247+#if defined(CONFIG_X86_32) && defined(CONFIG_PAX_MEMORY_UDEREF)
24248+#define __copyuser_seg gs;
24249+#else
24250+#define __copyuser_seg
24251+#endif
24252+
24253 .text
24254 ENTRY(__put_user_1)
24255 ENTER
24256+
24257+#if !defined(CONFIG_X86_32) || !defined(CONFIG_PAX_MEMORY_UDEREF)
24258+ GET_THREAD_INFO(%_ASM_BX)
24259 cmp TI_addr_limit(%_ASM_BX),%_ASM_CX
24260 jae bad_put_user
24261-1: movb %al,(%_ASM_CX)
24262+
24263+#if defined(CONFIG_X86_64) && defined(CONFIG_PAX_MEMORY_UDEREF)
24264+ mov $PAX_USER_SHADOW_BASE,%_ASM_BX
24265+ cmp %_ASM_BX,%_ASM_CX
24266+ jb 1234f
24267+ xor %ebx,%ebx
24268+1234:
24269+#endif
24270+
24271+#endif
24272+
24273+1: __copyuser_seg movb %al,(_DEST)
24274 xor %eax,%eax
24275 EXIT
24276 ENDPROC(__put_user_1)
24277
24278 ENTRY(__put_user_2)
24279 ENTER
24280+
24281+#if !defined(CONFIG_X86_32) || !defined(CONFIG_PAX_MEMORY_UDEREF)
24282+ GET_THREAD_INFO(%_ASM_BX)
24283 mov TI_addr_limit(%_ASM_BX),%_ASM_BX
24284 sub $1,%_ASM_BX
24285 cmp %_ASM_BX,%_ASM_CX
24286 jae bad_put_user
24287-2: movw %ax,(%_ASM_CX)
24288+
24289+#if defined(CONFIG_X86_64) && defined(CONFIG_PAX_MEMORY_UDEREF)
24290+ mov $PAX_USER_SHADOW_BASE,%_ASM_BX
24291+ cmp %_ASM_BX,%_ASM_CX
24292+ jb 1234f
24293+ xor %ebx,%ebx
24294+1234:
24295+#endif
24296+
24297+#endif
24298+
24299+2: __copyuser_seg movw %ax,(_DEST)
24300 xor %eax,%eax
24301 EXIT
24302 ENDPROC(__put_user_2)
24303
24304 ENTRY(__put_user_4)
24305 ENTER
24306+
24307+#if !defined(CONFIG_X86_32) || !defined(CONFIG_PAX_MEMORY_UDEREF)
24308+ GET_THREAD_INFO(%_ASM_BX)
24309 mov TI_addr_limit(%_ASM_BX),%_ASM_BX
24310 sub $3,%_ASM_BX
24311 cmp %_ASM_BX,%_ASM_CX
24312 jae bad_put_user
24313-3: movl %eax,(%_ASM_CX)
24314+
24315+#if defined(CONFIG_X86_64) && defined(CONFIG_PAX_MEMORY_UDEREF)
24316+ mov $PAX_USER_SHADOW_BASE,%_ASM_BX
24317+ cmp %_ASM_BX,%_ASM_CX
24318+ jb 1234f
24319+ xor %ebx,%ebx
24320+1234:
24321+#endif
24322+
24323+#endif
24324+
24325+3: __copyuser_seg movl %eax,(_DEST)
24326 xor %eax,%eax
24327 EXIT
24328 ENDPROC(__put_user_4)
24329
24330 ENTRY(__put_user_8)
24331 ENTER
24332+
24333+#if !defined(CONFIG_X86_32) || !defined(CONFIG_PAX_MEMORY_UDEREF)
24334+ GET_THREAD_INFO(%_ASM_BX)
24335 mov TI_addr_limit(%_ASM_BX),%_ASM_BX
24336 sub $7,%_ASM_BX
24337 cmp %_ASM_BX,%_ASM_CX
24338 jae bad_put_user
24339-4: mov %_ASM_AX,(%_ASM_CX)
24340+
24341+#if defined(CONFIG_X86_64) && defined(CONFIG_PAX_MEMORY_UDEREF)
24342+ mov $PAX_USER_SHADOW_BASE,%_ASM_BX
24343+ cmp %_ASM_BX,%_ASM_CX
24344+ jb 1234f
24345+ xor %ebx,%ebx
24346+1234:
24347+#endif
24348+
24349+#endif
24350+
24351+4: __copyuser_seg mov %_ASM_AX,(_DEST)
24352 #ifdef CONFIG_X86_32
24353-5: movl %edx,4(%_ASM_CX)
24354+5: __copyuser_seg movl %edx,4(_DEST)
24355 #endif
24356 xor %eax,%eax
24357 EXIT
24358diff --git a/arch/x86/lib/rwlock_64.S b/arch/x86/lib/rwlock_64.S
24359index 05ea55f..6345b9a 100644
24360--- a/arch/x86/lib/rwlock_64.S
24361+++ b/arch/x86/lib/rwlock_64.S
24362@@ -2,6 +2,7 @@
24363
24364 #include <linux/linkage.h>
24365 #include <asm/rwlock.h>
24366+#include <asm/asm.h>
24367 #include <asm/alternative-asm.h>
24368 #include <asm/dwarf2.h>
24369
24370@@ -10,13 +11,34 @@ ENTRY(__write_lock_failed)
24371 CFI_STARTPROC
24372 LOCK_PREFIX
24373 addl $RW_LOCK_BIAS,(%rdi)
24374+
24375+#ifdef CONFIG_PAX_REFCOUNT
24376+ jno 1234f
24377+ LOCK_PREFIX
24378+ subl $RW_LOCK_BIAS,(%rdi)
24379+ int $4
24380+1234:
24381+ _ASM_EXTABLE(1234b, 1234b)
24382+#endif
24383+
24384 1: rep
24385 nop
24386 cmpl $RW_LOCK_BIAS,(%rdi)
24387 jne 1b
24388 LOCK_PREFIX
24389 subl $RW_LOCK_BIAS,(%rdi)
24390+
24391+#ifdef CONFIG_PAX_REFCOUNT
24392+ jno 1234f
24393+ LOCK_PREFIX
24394+ addl $RW_LOCK_BIAS,(%rdi)
24395+ int $4
24396+1234:
24397+ _ASM_EXTABLE(1234b, 1234b)
24398+#endif
24399+
24400 jnz __write_lock_failed
24401+ pax_force_retaddr
24402 ret
24403 CFI_ENDPROC
24404 END(__write_lock_failed)
24405@@ -26,13 +48,34 @@ ENTRY(__read_lock_failed)
24406 CFI_STARTPROC
24407 LOCK_PREFIX
24408 incl (%rdi)
24409+
24410+#ifdef CONFIG_PAX_REFCOUNT
24411+ jno 1234f
24412+ LOCK_PREFIX
24413+ decl (%rdi)
24414+ int $4
24415+1234:
24416+ _ASM_EXTABLE(1234b, 1234b)
24417+#endif
24418+
24419 1: rep
24420 nop
24421 cmpl $1,(%rdi)
24422 js 1b
24423 LOCK_PREFIX
24424 decl (%rdi)
24425+
24426+#ifdef CONFIG_PAX_REFCOUNT
24427+ jno 1234f
24428+ LOCK_PREFIX
24429+ incl (%rdi)
24430+ int $4
24431+1234:
24432+ _ASM_EXTABLE(1234b, 1234b)
24433+#endif
24434+
24435 js __read_lock_failed
24436+ pax_force_retaddr
24437 ret
24438 CFI_ENDPROC
24439 END(__read_lock_failed)
24440diff --git a/arch/x86/lib/rwsem_64.S b/arch/x86/lib/rwsem_64.S
24441index 15acecf..f768b10 100644
24442--- a/arch/x86/lib/rwsem_64.S
24443+++ b/arch/x86/lib/rwsem_64.S
24444@@ -48,6 +48,7 @@ ENTRY(call_rwsem_down_read_failed)
24445 call rwsem_down_read_failed
24446 popq %rdx
24447 restore_common_regs
24448+ pax_force_retaddr
24449 ret
24450 ENDPROC(call_rwsem_down_read_failed)
24451
24452@@ -56,6 +57,7 @@ ENTRY(call_rwsem_down_write_failed)
24453 movq %rax,%rdi
24454 call rwsem_down_write_failed
24455 restore_common_regs
24456+ pax_force_retaddr
24457 ret
24458 ENDPROC(call_rwsem_down_write_failed)
24459
24460@@ -66,7 +68,8 @@ ENTRY(call_rwsem_wake)
24461 movq %rax,%rdi
24462 call rwsem_wake
24463 restore_common_regs
24464-1: ret
24465+1: pax_force_retaddr
24466+ ret
24467 ENDPROC(call_rwsem_wake)
24468
24469 /* Fix up special calling conventions */
24470@@ -77,5 +80,6 @@ ENTRY(call_rwsem_downgrade_wake)
24471 call rwsem_downgrade_wake
24472 popq %rdx
24473 restore_common_regs
24474+ pax_force_retaddr
24475 ret
24476 ENDPROC(call_rwsem_downgrade_wake)
24477diff --git a/arch/x86/lib/thunk_64.S b/arch/x86/lib/thunk_64.S
24478index bf9a7d5..fb06ab5 100644
24479--- a/arch/x86/lib/thunk_64.S
24480+++ b/arch/x86/lib/thunk_64.S
24481@@ -10,7 +10,8 @@
24482 #include <asm/dwarf2.h>
24483 #include <asm/calling.h>
24484 #include <asm/rwlock.h>
24485-
24486+ #include <asm/alternative-asm.h>
24487+
24488 /* rdi: arg1 ... normal C conventions. rax is saved/restored. */
24489 .macro thunk name,func
24490 .globl \name
24491@@ -70,6 +71,7 @@
24492 SAVE_ARGS
24493 restore:
24494 RESTORE_ARGS
24495+ pax_force_retaddr
24496 ret
24497 CFI_ENDPROC
24498
24499@@ -77,5 +79,6 @@ restore:
24500 SAVE_ARGS
24501 restore_norax:
24502 RESTORE_ARGS 1
24503+ pax_force_retaddr
24504 ret
24505 CFI_ENDPROC
24506diff --git a/arch/x86/lib/usercopy_32.c b/arch/x86/lib/usercopy_32.c
24507index 1f118d4..fc661b0 100644
24508--- a/arch/x86/lib/usercopy_32.c
24509+++ b/arch/x86/lib/usercopy_32.c
24510@@ -43,7 +43,7 @@ do { \
24511 __asm__ __volatile__( \
24512 " testl %1,%1\n" \
24513 " jz 2f\n" \
24514- "0: lodsb\n" \
24515+ "0: "__copyuser_seg"lodsb\n" \
24516 " stosb\n" \
24517 " testb %%al,%%al\n" \
24518 " jz 1f\n" \
24519@@ -128,10 +128,12 @@ do { \
24520 int __d0; \
24521 might_fault(); \
24522 __asm__ __volatile__( \
24523+ __COPYUSER_SET_ES \
24524 "0: rep; stosl\n" \
24525 " movl %2,%0\n" \
24526 "1: rep; stosb\n" \
24527 "2:\n" \
24528+ __COPYUSER_RESTORE_ES \
24529 ".section .fixup,\"ax\"\n" \
24530 "3: lea 0(%2,%0,4),%0\n" \
24531 " jmp 2b\n" \
24532@@ -200,6 +202,7 @@ long strnlen_user(const char __user *s, long n)
24533 might_fault();
24534
24535 __asm__ __volatile__(
24536+ __COPYUSER_SET_ES
24537 " testl %0, %0\n"
24538 " jz 3f\n"
24539 " andl %0,%%ecx\n"
24540@@ -208,6 +211,7 @@ long strnlen_user(const char __user *s, long n)
24541 " subl %%ecx,%0\n"
24542 " addl %0,%%eax\n"
24543 "1:\n"
24544+ __COPYUSER_RESTORE_ES
24545 ".section .fixup,\"ax\"\n"
24546 "2: xorl %%eax,%%eax\n"
24547 " jmp 1b\n"
24548@@ -227,7 +231,7 @@ EXPORT_SYMBOL(strnlen_user);
24549
24550 #ifdef CONFIG_X86_INTEL_USERCOPY
24551 static unsigned long
24552-__copy_user_intel(void __user *to, const void *from, unsigned long size)
24553+__generic_copy_to_user_intel(void __user *to, const void *from, unsigned long size)
24554 {
24555 int d0, d1;
24556 __asm__ __volatile__(
24557@@ -239,36 +243,36 @@ __copy_user_intel(void __user *to, const void *from, unsigned long size)
24558 " .align 2,0x90\n"
24559 "3: movl 0(%4), %%eax\n"
24560 "4: movl 4(%4), %%edx\n"
24561- "5: movl %%eax, 0(%3)\n"
24562- "6: movl %%edx, 4(%3)\n"
24563+ "5: "__copyuser_seg" movl %%eax, 0(%3)\n"
24564+ "6: "__copyuser_seg" movl %%edx, 4(%3)\n"
24565 "7: movl 8(%4), %%eax\n"
24566 "8: movl 12(%4),%%edx\n"
24567- "9: movl %%eax, 8(%3)\n"
24568- "10: movl %%edx, 12(%3)\n"
24569+ "9: "__copyuser_seg" movl %%eax, 8(%3)\n"
24570+ "10: "__copyuser_seg" movl %%edx, 12(%3)\n"
24571 "11: movl 16(%4), %%eax\n"
24572 "12: movl 20(%4), %%edx\n"
24573- "13: movl %%eax, 16(%3)\n"
24574- "14: movl %%edx, 20(%3)\n"
24575+ "13: "__copyuser_seg" movl %%eax, 16(%3)\n"
24576+ "14: "__copyuser_seg" movl %%edx, 20(%3)\n"
24577 "15: movl 24(%4), %%eax\n"
24578 "16: movl 28(%4), %%edx\n"
24579- "17: movl %%eax, 24(%3)\n"
24580- "18: movl %%edx, 28(%3)\n"
24581+ "17: "__copyuser_seg" movl %%eax, 24(%3)\n"
24582+ "18: "__copyuser_seg" movl %%edx, 28(%3)\n"
24583 "19: movl 32(%4), %%eax\n"
24584 "20: movl 36(%4), %%edx\n"
24585- "21: movl %%eax, 32(%3)\n"
24586- "22: movl %%edx, 36(%3)\n"
24587+ "21: "__copyuser_seg" movl %%eax, 32(%3)\n"
24588+ "22: "__copyuser_seg" movl %%edx, 36(%3)\n"
24589 "23: movl 40(%4), %%eax\n"
24590 "24: movl 44(%4), %%edx\n"
24591- "25: movl %%eax, 40(%3)\n"
24592- "26: movl %%edx, 44(%3)\n"
24593+ "25: "__copyuser_seg" movl %%eax, 40(%3)\n"
24594+ "26: "__copyuser_seg" movl %%edx, 44(%3)\n"
24595 "27: movl 48(%4), %%eax\n"
24596 "28: movl 52(%4), %%edx\n"
24597- "29: movl %%eax, 48(%3)\n"
24598- "30: movl %%edx, 52(%3)\n"
24599+ "29: "__copyuser_seg" movl %%eax, 48(%3)\n"
24600+ "30: "__copyuser_seg" movl %%edx, 52(%3)\n"
24601 "31: movl 56(%4), %%eax\n"
24602 "32: movl 60(%4), %%edx\n"
24603- "33: movl %%eax, 56(%3)\n"
24604- "34: movl %%edx, 60(%3)\n"
24605+ "33: "__copyuser_seg" movl %%eax, 56(%3)\n"
24606+ "34: "__copyuser_seg" movl %%edx, 60(%3)\n"
24607 " addl $-64, %0\n"
24608 " addl $64, %4\n"
24609 " addl $64, %3\n"
24610@@ -278,10 +282,12 @@ __copy_user_intel(void __user *to, const void *from, unsigned long size)
24611 " shrl $2, %0\n"
24612 " andl $3, %%eax\n"
24613 " cld\n"
24614+ __COPYUSER_SET_ES
24615 "99: rep; movsl\n"
24616 "36: movl %%eax, %0\n"
24617 "37: rep; movsb\n"
24618 "100:\n"
24619+ __COPYUSER_RESTORE_ES
24620 ".section .fixup,\"ax\"\n"
24621 "101: lea 0(%%eax,%0,4),%0\n"
24622 " jmp 100b\n"
24623@@ -334,46 +340,155 @@ __copy_user_intel(void __user *to, const void *from, unsigned long size)
24624 }
24625
24626 static unsigned long
24627+__generic_copy_from_user_intel(void *to, const void __user *from, unsigned long size)
24628+{
24629+ int d0, d1;
24630+ __asm__ __volatile__(
24631+ " .align 2,0x90\n"
24632+ "1: "__copyuser_seg" movl 32(%4), %%eax\n"
24633+ " cmpl $67, %0\n"
24634+ " jbe 3f\n"
24635+ "2: "__copyuser_seg" movl 64(%4), %%eax\n"
24636+ " .align 2,0x90\n"
24637+ "3: "__copyuser_seg" movl 0(%4), %%eax\n"
24638+ "4: "__copyuser_seg" movl 4(%4), %%edx\n"
24639+ "5: movl %%eax, 0(%3)\n"
24640+ "6: movl %%edx, 4(%3)\n"
24641+ "7: "__copyuser_seg" movl 8(%4), %%eax\n"
24642+ "8: "__copyuser_seg" movl 12(%4),%%edx\n"
24643+ "9: movl %%eax, 8(%3)\n"
24644+ "10: movl %%edx, 12(%3)\n"
24645+ "11: "__copyuser_seg" movl 16(%4), %%eax\n"
24646+ "12: "__copyuser_seg" movl 20(%4), %%edx\n"
24647+ "13: movl %%eax, 16(%3)\n"
24648+ "14: movl %%edx, 20(%3)\n"
24649+ "15: "__copyuser_seg" movl 24(%4), %%eax\n"
24650+ "16: "__copyuser_seg" movl 28(%4), %%edx\n"
24651+ "17: movl %%eax, 24(%3)\n"
24652+ "18: movl %%edx, 28(%3)\n"
24653+ "19: "__copyuser_seg" movl 32(%4), %%eax\n"
24654+ "20: "__copyuser_seg" movl 36(%4), %%edx\n"
24655+ "21: movl %%eax, 32(%3)\n"
24656+ "22: movl %%edx, 36(%3)\n"
24657+ "23: "__copyuser_seg" movl 40(%4), %%eax\n"
24658+ "24: "__copyuser_seg" movl 44(%4), %%edx\n"
24659+ "25: movl %%eax, 40(%3)\n"
24660+ "26: movl %%edx, 44(%3)\n"
24661+ "27: "__copyuser_seg" movl 48(%4), %%eax\n"
24662+ "28: "__copyuser_seg" movl 52(%4), %%edx\n"
24663+ "29: movl %%eax, 48(%3)\n"
24664+ "30: movl %%edx, 52(%3)\n"
24665+ "31: "__copyuser_seg" movl 56(%4), %%eax\n"
24666+ "32: "__copyuser_seg" movl 60(%4), %%edx\n"
24667+ "33: movl %%eax, 56(%3)\n"
24668+ "34: movl %%edx, 60(%3)\n"
24669+ " addl $-64, %0\n"
24670+ " addl $64, %4\n"
24671+ " addl $64, %3\n"
24672+ " cmpl $63, %0\n"
24673+ " ja 1b\n"
24674+ "35: movl %0, %%eax\n"
24675+ " shrl $2, %0\n"
24676+ " andl $3, %%eax\n"
24677+ " cld\n"
24678+ "99: rep; "__copyuser_seg" movsl\n"
24679+ "36: movl %%eax, %0\n"
24680+ "37: rep; "__copyuser_seg" movsb\n"
24681+ "100:\n"
24682+ ".section .fixup,\"ax\"\n"
24683+ "101: lea 0(%%eax,%0,4),%0\n"
24684+ " jmp 100b\n"
24685+ ".previous\n"
24686+ ".section __ex_table,\"a\"\n"
24687+ " .align 4\n"
24688+ " .long 1b,100b\n"
24689+ " .long 2b,100b\n"
24690+ " .long 3b,100b\n"
24691+ " .long 4b,100b\n"
24692+ " .long 5b,100b\n"
24693+ " .long 6b,100b\n"
24694+ " .long 7b,100b\n"
24695+ " .long 8b,100b\n"
24696+ " .long 9b,100b\n"
24697+ " .long 10b,100b\n"
24698+ " .long 11b,100b\n"
24699+ " .long 12b,100b\n"
24700+ " .long 13b,100b\n"
24701+ " .long 14b,100b\n"
24702+ " .long 15b,100b\n"
24703+ " .long 16b,100b\n"
24704+ " .long 17b,100b\n"
24705+ " .long 18b,100b\n"
24706+ " .long 19b,100b\n"
24707+ " .long 20b,100b\n"
24708+ " .long 21b,100b\n"
24709+ " .long 22b,100b\n"
24710+ " .long 23b,100b\n"
24711+ " .long 24b,100b\n"
24712+ " .long 25b,100b\n"
24713+ " .long 26b,100b\n"
24714+ " .long 27b,100b\n"
24715+ " .long 28b,100b\n"
24716+ " .long 29b,100b\n"
24717+ " .long 30b,100b\n"
24718+ " .long 31b,100b\n"
24719+ " .long 32b,100b\n"
24720+ " .long 33b,100b\n"
24721+ " .long 34b,100b\n"
24722+ " .long 35b,100b\n"
24723+ " .long 36b,100b\n"
24724+ " .long 37b,100b\n"
24725+ " .long 99b,101b\n"
24726+ ".previous"
24727+ : "=&c"(size), "=&D" (d0), "=&S" (d1)
24728+ : "1"(to), "2"(from), "0"(size)
24729+ : "eax", "edx", "memory");
24730+ return size;
24731+}
24732+
24733+static unsigned long
24734+__copy_user_zeroing_intel(void *to, const void __user *from, unsigned long size) __size_overflow(3);
24735+static unsigned long
24736 __copy_user_zeroing_intel(void *to, const void __user *from, unsigned long size)
24737 {
24738 int d0, d1;
24739 __asm__ __volatile__(
24740 " .align 2,0x90\n"
24741- "0: movl 32(%4), %%eax\n"
24742+ "0: "__copyuser_seg" movl 32(%4), %%eax\n"
24743 " cmpl $67, %0\n"
24744 " jbe 2f\n"
24745- "1: movl 64(%4), %%eax\n"
24746+ "1: "__copyuser_seg" movl 64(%4), %%eax\n"
24747 " .align 2,0x90\n"
24748- "2: movl 0(%4), %%eax\n"
24749- "21: movl 4(%4), %%edx\n"
24750+ "2: "__copyuser_seg" movl 0(%4), %%eax\n"
24751+ "21: "__copyuser_seg" movl 4(%4), %%edx\n"
24752 " movl %%eax, 0(%3)\n"
24753 " movl %%edx, 4(%3)\n"
24754- "3: movl 8(%4), %%eax\n"
24755- "31: movl 12(%4),%%edx\n"
24756+ "3: "__copyuser_seg" movl 8(%4), %%eax\n"
24757+ "31: "__copyuser_seg" movl 12(%4),%%edx\n"
24758 " movl %%eax, 8(%3)\n"
24759 " movl %%edx, 12(%3)\n"
24760- "4: movl 16(%4), %%eax\n"
24761- "41: movl 20(%4), %%edx\n"
24762+ "4: "__copyuser_seg" movl 16(%4), %%eax\n"
24763+ "41: "__copyuser_seg" movl 20(%4), %%edx\n"
24764 " movl %%eax, 16(%3)\n"
24765 " movl %%edx, 20(%3)\n"
24766- "10: movl 24(%4), %%eax\n"
24767- "51: movl 28(%4), %%edx\n"
24768+ "10: "__copyuser_seg" movl 24(%4), %%eax\n"
24769+ "51: "__copyuser_seg" movl 28(%4), %%edx\n"
24770 " movl %%eax, 24(%3)\n"
24771 " movl %%edx, 28(%3)\n"
24772- "11: movl 32(%4), %%eax\n"
24773- "61: movl 36(%4), %%edx\n"
24774+ "11: "__copyuser_seg" movl 32(%4), %%eax\n"
24775+ "61: "__copyuser_seg" movl 36(%4), %%edx\n"
24776 " movl %%eax, 32(%3)\n"
24777 " movl %%edx, 36(%3)\n"
24778- "12: movl 40(%4), %%eax\n"
24779- "71: movl 44(%4), %%edx\n"
24780+ "12: "__copyuser_seg" movl 40(%4), %%eax\n"
24781+ "71: "__copyuser_seg" movl 44(%4), %%edx\n"
24782 " movl %%eax, 40(%3)\n"
24783 " movl %%edx, 44(%3)\n"
24784- "13: movl 48(%4), %%eax\n"
24785- "81: movl 52(%4), %%edx\n"
24786+ "13: "__copyuser_seg" movl 48(%4), %%eax\n"
24787+ "81: "__copyuser_seg" movl 52(%4), %%edx\n"
24788 " movl %%eax, 48(%3)\n"
24789 " movl %%edx, 52(%3)\n"
24790- "14: movl 56(%4), %%eax\n"
24791- "91: movl 60(%4), %%edx\n"
24792+ "14: "__copyuser_seg" movl 56(%4), %%eax\n"
24793+ "91: "__copyuser_seg" movl 60(%4), %%edx\n"
24794 " movl %%eax, 56(%3)\n"
24795 " movl %%edx, 60(%3)\n"
24796 " addl $-64, %0\n"
24797@@ -385,9 +500,9 @@ __copy_user_zeroing_intel(void *to, const void __user *from, unsigned long size)
24798 " shrl $2, %0\n"
24799 " andl $3, %%eax\n"
24800 " cld\n"
24801- "6: rep; movsl\n"
24802+ "6: rep; "__copyuser_seg" movsl\n"
24803 " movl %%eax,%0\n"
24804- "7: rep; movsb\n"
24805+ "7: rep; "__copyuser_seg" movsb\n"
24806 "8:\n"
24807 ".section .fixup,\"ax\"\n"
24808 "9: lea 0(%%eax,%0,4),%0\n"
24809@@ -434,47 +549,49 @@ __copy_user_zeroing_intel(void *to, const void __user *from, unsigned long size)
24810 */
24811
24812 static unsigned long __copy_user_zeroing_intel_nocache(void *to,
24813+ const void __user *from, unsigned long size) __size_overflow(3);
24814+static unsigned long __copy_user_zeroing_intel_nocache(void *to,
24815 const void __user *from, unsigned long size)
24816 {
24817 int d0, d1;
24818
24819 __asm__ __volatile__(
24820 " .align 2,0x90\n"
24821- "0: movl 32(%4), %%eax\n"
24822+ "0: "__copyuser_seg" movl 32(%4), %%eax\n"
24823 " cmpl $67, %0\n"
24824 " jbe 2f\n"
24825- "1: movl 64(%4), %%eax\n"
24826+ "1: "__copyuser_seg" movl 64(%4), %%eax\n"
24827 " .align 2,0x90\n"
24828- "2: movl 0(%4), %%eax\n"
24829- "21: movl 4(%4), %%edx\n"
24830+ "2: "__copyuser_seg" movl 0(%4), %%eax\n"
24831+ "21: "__copyuser_seg" movl 4(%4), %%edx\n"
24832 " movnti %%eax, 0(%3)\n"
24833 " movnti %%edx, 4(%3)\n"
24834- "3: movl 8(%4), %%eax\n"
24835- "31: movl 12(%4),%%edx\n"
24836+ "3: "__copyuser_seg" movl 8(%4), %%eax\n"
24837+ "31: "__copyuser_seg" movl 12(%4),%%edx\n"
24838 " movnti %%eax, 8(%3)\n"
24839 " movnti %%edx, 12(%3)\n"
24840- "4: movl 16(%4), %%eax\n"
24841- "41: movl 20(%4), %%edx\n"
24842+ "4: "__copyuser_seg" movl 16(%4), %%eax\n"
24843+ "41: "__copyuser_seg" movl 20(%4), %%edx\n"
24844 " movnti %%eax, 16(%3)\n"
24845 " movnti %%edx, 20(%3)\n"
24846- "10: movl 24(%4), %%eax\n"
24847- "51: movl 28(%4), %%edx\n"
24848+ "10: "__copyuser_seg" movl 24(%4), %%eax\n"
24849+ "51: "__copyuser_seg" movl 28(%4), %%edx\n"
24850 " movnti %%eax, 24(%3)\n"
24851 " movnti %%edx, 28(%3)\n"
24852- "11: movl 32(%4), %%eax\n"
24853- "61: movl 36(%4), %%edx\n"
24854+ "11: "__copyuser_seg" movl 32(%4), %%eax\n"
24855+ "61: "__copyuser_seg" movl 36(%4), %%edx\n"
24856 " movnti %%eax, 32(%3)\n"
24857 " movnti %%edx, 36(%3)\n"
24858- "12: movl 40(%4), %%eax\n"
24859- "71: movl 44(%4), %%edx\n"
24860+ "12: "__copyuser_seg" movl 40(%4), %%eax\n"
24861+ "71: "__copyuser_seg" movl 44(%4), %%edx\n"
24862 " movnti %%eax, 40(%3)\n"
24863 " movnti %%edx, 44(%3)\n"
24864- "13: movl 48(%4), %%eax\n"
24865- "81: movl 52(%4), %%edx\n"
24866+ "13: "__copyuser_seg" movl 48(%4), %%eax\n"
24867+ "81: "__copyuser_seg" movl 52(%4), %%edx\n"
24868 " movnti %%eax, 48(%3)\n"
24869 " movnti %%edx, 52(%3)\n"
24870- "14: movl 56(%4), %%eax\n"
24871- "91: movl 60(%4), %%edx\n"
24872+ "14: "__copyuser_seg" movl 56(%4), %%eax\n"
24873+ "91: "__copyuser_seg" movl 60(%4), %%edx\n"
24874 " movnti %%eax, 56(%3)\n"
24875 " movnti %%edx, 60(%3)\n"
24876 " addl $-64, %0\n"
24877@@ -487,9 +604,9 @@ static unsigned long __copy_user_zeroing_intel_nocache(void *to,
24878 " shrl $2, %0\n"
24879 " andl $3, %%eax\n"
24880 " cld\n"
24881- "6: rep; movsl\n"
24882+ "6: rep; "__copyuser_seg" movsl\n"
24883 " movl %%eax,%0\n"
24884- "7: rep; movsb\n"
24885+ "7: rep; "__copyuser_seg" movsb\n"
24886 "8:\n"
24887 ".section .fixup,\"ax\"\n"
24888 "9: lea 0(%%eax,%0,4),%0\n"
24889@@ -531,47 +648,49 @@ static unsigned long __copy_user_zeroing_intel_nocache(void *to,
24890 }
24891
24892 static unsigned long __copy_user_intel_nocache(void *to,
24893+ const void __user *from, unsigned long size) __size_overflow(3);
24894+static unsigned long __copy_user_intel_nocache(void *to,
24895 const void __user *from, unsigned long size)
24896 {
24897 int d0, d1;
24898
24899 __asm__ __volatile__(
24900 " .align 2,0x90\n"
24901- "0: movl 32(%4), %%eax\n"
24902+ "0: "__copyuser_seg" movl 32(%4), %%eax\n"
24903 " cmpl $67, %0\n"
24904 " jbe 2f\n"
24905- "1: movl 64(%4), %%eax\n"
24906+ "1: "__copyuser_seg" movl 64(%4), %%eax\n"
24907 " .align 2,0x90\n"
24908- "2: movl 0(%4), %%eax\n"
24909- "21: movl 4(%4), %%edx\n"
24910+ "2: "__copyuser_seg" movl 0(%4), %%eax\n"
24911+ "21: "__copyuser_seg" movl 4(%4), %%edx\n"
24912 " movnti %%eax, 0(%3)\n"
24913 " movnti %%edx, 4(%3)\n"
24914- "3: movl 8(%4), %%eax\n"
24915- "31: movl 12(%4),%%edx\n"
24916+ "3: "__copyuser_seg" movl 8(%4), %%eax\n"
24917+ "31: "__copyuser_seg" movl 12(%4),%%edx\n"
24918 " movnti %%eax, 8(%3)\n"
24919 " movnti %%edx, 12(%3)\n"
24920- "4: movl 16(%4), %%eax\n"
24921- "41: movl 20(%4), %%edx\n"
24922+ "4: "__copyuser_seg" movl 16(%4), %%eax\n"
24923+ "41: "__copyuser_seg" movl 20(%4), %%edx\n"
24924 " movnti %%eax, 16(%3)\n"
24925 " movnti %%edx, 20(%3)\n"
24926- "10: movl 24(%4), %%eax\n"
24927- "51: movl 28(%4), %%edx\n"
24928+ "10: "__copyuser_seg" movl 24(%4), %%eax\n"
24929+ "51: "__copyuser_seg" movl 28(%4), %%edx\n"
24930 " movnti %%eax, 24(%3)\n"
24931 " movnti %%edx, 28(%3)\n"
24932- "11: movl 32(%4), %%eax\n"
24933- "61: movl 36(%4), %%edx\n"
24934+ "11: "__copyuser_seg" movl 32(%4), %%eax\n"
24935+ "61: "__copyuser_seg" movl 36(%4), %%edx\n"
24936 " movnti %%eax, 32(%3)\n"
24937 " movnti %%edx, 36(%3)\n"
24938- "12: movl 40(%4), %%eax\n"
24939- "71: movl 44(%4), %%edx\n"
24940+ "12: "__copyuser_seg" movl 40(%4), %%eax\n"
24941+ "71: "__copyuser_seg" movl 44(%4), %%edx\n"
24942 " movnti %%eax, 40(%3)\n"
24943 " movnti %%edx, 44(%3)\n"
24944- "13: movl 48(%4), %%eax\n"
24945- "81: movl 52(%4), %%edx\n"
24946+ "13: "__copyuser_seg" movl 48(%4), %%eax\n"
24947+ "81: "__copyuser_seg" movl 52(%4), %%edx\n"
24948 " movnti %%eax, 48(%3)\n"
24949 " movnti %%edx, 52(%3)\n"
24950- "14: movl 56(%4), %%eax\n"
24951- "91: movl 60(%4), %%edx\n"
24952+ "14: "__copyuser_seg" movl 56(%4), %%eax\n"
24953+ "91: "__copyuser_seg" movl 60(%4), %%edx\n"
24954 " movnti %%eax, 56(%3)\n"
24955 " movnti %%edx, 60(%3)\n"
24956 " addl $-64, %0\n"
24957@@ -584,9 +703,9 @@ static unsigned long __copy_user_intel_nocache(void *to,
24958 " shrl $2, %0\n"
24959 " andl $3, %%eax\n"
24960 " cld\n"
24961- "6: rep; movsl\n"
24962+ "6: rep; "__copyuser_seg" movsl\n"
24963 " movl %%eax,%0\n"
24964- "7: rep; movsb\n"
24965+ "7: rep; "__copyuser_seg" movsb\n"
24966 "8:\n"
24967 ".section .fixup,\"ax\"\n"
24968 "9: lea 0(%%eax,%0,4),%0\n"
24969@@ -629,32 +748,36 @@ static unsigned long __copy_user_intel_nocache(void *to,
24970 */
24971 unsigned long __copy_user_zeroing_intel(void *to, const void __user *from,
24972 unsigned long size);
24973-unsigned long __copy_user_intel(void __user *to, const void *from,
24974+unsigned long __generic_copy_to_user_intel(void __user *to, const void *from,
24975+ unsigned long size);
24976+unsigned long __generic_copy_from_user_intel(void *to, const void __user *from,
24977 unsigned long size);
24978 unsigned long __copy_user_zeroing_intel_nocache(void *to,
24979 const void __user *from, unsigned long size);
24980 #endif /* CONFIG_X86_INTEL_USERCOPY */
24981
24982 /* Generic arbitrary sized copy. */
24983-#define __copy_user(to, from, size) \
24984+#define __copy_user(to, from, size, prefix, set, restore) \
24985 do { \
24986 int __d0, __d1, __d2; \
24987 __asm__ __volatile__( \
24988+ set \
24989 " cmp $7,%0\n" \
24990 " jbe 1f\n" \
24991 " movl %1,%0\n" \
24992 " negl %0\n" \
24993 " andl $7,%0\n" \
24994 " subl %0,%3\n" \
24995- "4: rep; movsb\n" \
24996+ "4: rep; "prefix"movsb\n" \
24997 " movl %3,%0\n" \
24998 " shrl $2,%0\n" \
24999 " andl $3,%3\n" \
25000 " .align 2,0x90\n" \
25001- "0: rep; movsl\n" \
25002+ "0: rep; "prefix"movsl\n" \
25003 " movl %3,%0\n" \
25004- "1: rep; movsb\n" \
25005+ "1: rep; "prefix"movsb\n" \
25006 "2:\n" \
25007+ restore \
25008 ".section .fixup,\"ax\"\n" \
25009 "5: addl %3,%0\n" \
25010 " jmp 2b\n" \
25011@@ -682,14 +805,14 @@ do { \
25012 " negl %0\n" \
25013 " andl $7,%0\n" \
25014 " subl %0,%3\n" \
25015- "4: rep; movsb\n" \
25016+ "4: rep; "__copyuser_seg"movsb\n" \
25017 " movl %3,%0\n" \
25018 " shrl $2,%0\n" \
25019 " andl $3,%3\n" \
25020 " .align 2,0x90\n" \
25021- "0: rep; movsl\n" \
25022+ "0: rep; "__copyuser_seg"movsl\n" \
25023 " movl %3,%0\n" \
25024- "1: rep; movsb\n" \
25025+ "1: rep; "__copyuser_seg"movsb\n" \
25026 "2:\n" \
25027 ".section .fixup,\"ax\"\n" \
25028 "5: addl %3,%0\n" \
25029@@ -775,9 +898,9 @@ survive:
25030 }
25031 #endif
25032 if (movsl_is_ok(to, from, n))
25033- __copy_user(to, from, n);
25034+ __copy_user(to, from, n, "", __COPYUSER_SET_ES, __COPYUSER_RESTORE_ES);
25035 else
25036- n = __copy_user_intel(to, from, n);
25037+ n = __generic_copy_to_user_intel(to, from, n);
25038 return n;
25039 }
25040 EXPORT_SYMBOL(__copy_to_user_ll);
25041@@ -797,10 +920,9 @@ unsigned long __copy_from_user_ll_nozero(void *to, const void __user *from,
25042 unsigned long n)
25043 {
25044 if (movsl_is_ok(to, from, n))
25045- __copy_user(to, from, n);
25046+ __copy_user(to, from, n, __copyuser_seg, "", "");
25047 else
25048- n = __copy_user_intel((void __user *)to,
25049- (const void *)from, n);
25050+ n = __generic_copy_from_user_intel(to, from, n);
25051 return n;
25052 }
25053 EXPORT_SYMBOL(__copy_from_user_ll_nozero);
25054@@ -827,59 +949,38 @@ unsigned long __copy_from_user_ll_nocache_nozero(void *to, const void __user *fr
25055 if (n > 64 && cpu_has_xmm2)
25056 n = __copy_user_intel_nocache(to, from, n);
25057 else
25058- __copy_user(to, from, n);
25059+ __copy_user(to, from, n, __copyuser_seg, "", "");
25060 #else
25061- __copy_user(to, from, n);
25062+ __copy_user(to, from, n, __copyuser_seg, "", "");
25063 #endif
25064 return n;
25065 }
25066 EXPORT_SYMBOL(__copy_from_user_ll_nocache_nozero);
25067
25068-/**
25069- * copy_to_user: - Copy a block of data into user space.
25070- * @to: Destination address, in user space.
25071- * @from: Source address, in kernel space.
25072- * @n: Number of bytes to copy.
25073- *
25074- * Context: User context only. This function may sleep.
25075- *
25076- * Copy data from kernel space to user space.
25077- *
25078- * Returns number of bytes that could not be copied.
25079- * On success, this will be zero.
25080- */
25081-unsigned long
25082-copy_to_user(void __user *to, const void *from, unsigned long n)
25083+#ifdef CONFIG_PAX_MEMORY_UDEREF
25084+void __set_fs(mm_segment_t x)
25085 {
25086- if (access_ok(VERIFY_WRITE, to, n))
25087- n = __copy_to_user(to, from, n);
25088- return n;
25089+ switch (x.seg) {
25090+ case 0:
25091+ loadsegment(gs, 0);
25092+ break;
25093+ case TASK_SIZE_MAX:
25094+ loadsegment(gs, __USER_DS);
25095+ break;
25096+ case -1UL:
25097+ loadsegment(gs, __KERNEL_DS);
25098+ break;
25099+ default:
25100+ BUG();
25101+ }
25102+ return;
25103 }
25104-EXPORT_SYMBOL(copy_to_user);
25105+EXPORT_SYMBOL(__set_fs);
25106
25107-/**
25108- * copy_from_user: - Copy a block of data from user space.
25109- * @to: Destination address, in kernel space.
25110- * @from: Source address, in user space.
25111- * @n: Number of bytes to copy.
25112- *
25113- * Context: User context only. This function may sleep.
25114- *
25115- * Copy data from user space to kernel space.
25116- *
25117- * Returns number of bytes that could not be copied.
25118- * On success, this will be zero.
25119- *
25120- * If some data could not be copied, this function will pad the copied
25121- * data to the requested size using zero bytes.
25122- */
25123-unsigned long
25124-copy_from_user(void *to, const void __user *from, unsigned long n)
25125+void set_fs(mm_segment_t x)
25126 {
25127- if (access_ok(VERIFY_READ, from, n))
25128- n = __copy_from_user(to, from, n);
25129- else
25130- memset(to, 0, n);
25131- return n;
25132+ current_thread_info()->addr_limit = x;
25133+ __set_fs(x);
25134 }
25135-EXPORT_SYMBOL(copy_from_user);
25136+EXPORT_SYMBOL(set_fs);
25137+#endif
25138diff --git a/arch/x86/lib/usercopy_64.c b/arch/x86/lib/usercopy_64.c
25139index b7c2849..8633ad8 100644
25140--- a/arch/x86/lib/usercopy_64.c
25141+++ b/arch/x86/lib/usercopy_64.c
25142@@ -42,6 +42,12 @@ long
25143 __strncpy_from_user(char *dst, const char __user *src, long count)
25144 {
25145 long res;
25146+
25147+#ifdef CONFIG_PAX_MEMORY_UDEREF
25148+ if ((unsigned long)src < PAX_USER_SHADOW_BASE)
25149+ src += PAX_USER_SHADOW_BASE;
25150+#endif
25151+
25152 __do_strncpy_from_user(dst, src, count, res);
25153 return res;
25154 }
25155@@ -65,6 +71,12 @@ unsigned long __clear_user(void __user *addr, unsigned long size)
25156 {
25157 long __d0;
25158 might_fault();
25159+
25160+#ifdef CONFIG_PAX_MEMORY_UDEREF
25161+ if ((unsigned long)addr < PAX_USER_SHADOW_BASE)
25162+ addr += PAX_USER_SHADOW_BASE;
25163+#endif
25164+
25165 /* no memory constraint because it doesn't change any memory gcc knows
25166 about */
25167 asm volatile(
25168@@ -149,12 +161,20 @@ long strlen_user(const char __user *s)
25169 }
25170 EXPORT_SYMBOL(strlen_user);
25171
25172-unsigned long copy_in_user(void __user *to, const void __user *from, unsigned len)
25173+unsigned long copy_in_user(void __user *to, const void __user *from, unsigned long len)
25174 {
25175- if (access_ok(VERIFY_WRITE, to, len) && access_ok(VERIFY_READ, from, len)) {
25176- return copy_user_generic((__force void *)to, (__force void *)from, len);
25177- }
25178- return len;
25179+ if (access_ok(VERIFY_WRITE, to, len) && access_ok(VERIFY_READ, from, len)) {
25180+
25181+#ifdef CONFIG_PAX_MEMORY_UDEREF
25182+ if ((unsigned long)to < PAX_USER_SHADOW_BASE)
25183+ to += PAX_USER_SHADOW_BASE;
25184+ if ((unsigned long)from < PAX_USER_SHADOW_BASE)
25185+ from += PAX_USER_SHADOW_BASE;
25186+#endif
25187+
25188+ return copy_user_generic((void __force_kernel *)to, (void __force_kernel *)from, len);
25189+ }
25190+ return len;
25191 }
25192 EXPORT_SYMBOL(copy_in_user);
25193
25194@@ -164,7 +184,7 @@ EXPORT_SYMBOL(copy_in_user);
25195 * it is not necessary to optimize tail handling.
25196 */
25197 unsigned long
25198-copy_user_handle_tail(char *to, char *from, unsigned len, unsigned zerorest)
25199+copy_user_handle_tail(char __user *to, char __user *from, unsigned long len, unsigned zerorest)
25200 {
25201 char c;
25202 unsigned zero_len;
25203diff --git a/arch/x86/mm/extable.c b/arch/x86/mm/extable.c
25204index 61b41ca..5fef66a 100644
25205--- a/arch/x86/mm/extable.c
25206+++ b/arch/x86/mm/extable.c
25207@@ -1,14 +1,71 @@
25208 #include <linux/module.h>
25209 #include <linux/spinlock.h>
25210+#include <linux/sort.h>
25211 #include <asm/uaccess.h>
25212+#include <asm/pgtable.h>
25213
25214+/*
25215+ * The exception table needs to be sorted so that the binary
25216+ * search that we use to find entries in it works properly.
25217+ * This is used both for the kernel exception table and for
25218+ * the exception tables of modules that get loaded.
25219+ */
25220+static int cmp_ex(const void *a, const void *b)
25221+{
25222+ const struct exception_table_entry *x = a, *y = b;
25223+
25224+ /* avoid overflow */
25225+ if (x->insn > y->insn)
25226+ return 1;
25227+ if (x->insn < y->insn)
25228+ return -1;
25229+ return 0;
25230+}
25231+
25232+static void swap_ex(void *a, void *b, int size)
25233+{
25234+ struct exception_table_entry t, *x = a, *y = b;
25235+
25236+ t = *x;
25237+
25238+ pax_open_kernel();
25239+ *x = *y;
25240+ *y = t;
25241+ pax_close_kernel();
25242+}
25243+
25244+void sort_extable(struct exception_table_entry *start,
25245+ struct exception_table_entry *finish)
25246+{
25247+ sort(start, finish - start, sizeof(struct exception_table_entry),
25248+ cmp_ex, swap_ex);
25249+}
25250+
25251+#ifdef CONFIG_MODULES
25252+/*
25253+ * If the exception table is sorted, any referring to the module init
25254+ * will be at the beginning or the end.
25255+ */
25256+void trim_init_extable(struct module *m)
25257+{
25258+ /*trim the beginning*/
25259+ while (m->num_exentries && within_module_init(m->extable[0].insn, m)) {
25260+ m->extable++;
25261+ m->num_exentries--;
25262+ }
25263+ /*trim the end*/
25264+ while (m->num_exentries &&
25265+ within_module_init(m->extable[m->num_exentries-1].insn, m))
25266+ m->num_exentries--;
25267+}
25268+#endif /* CONFIG_MODULES */
25269
25270 int fixup_exception(struct pt_regs *regs)
25271 {
25272 const struct exception_table_entry *fixup;
25273
25274 #ifdef CONFIG_PNPBIOS
25275- if (unlikely(SEGMENT_IS_PNP_CODE(regs->cs))) {
25276+ if (unlikely(!v8086_mode(regs) && SEGMENT_IS_PNP_CODE(regs->cs))) {
25277 extern u32 pnp_bios_fault_eip, pnp_bios_fault_esp;
25278 extern u32 pnp_bios_is_utter_crap;
25279 pnp_bios_is_utter_crap = 1;
25280diff --git a/arch/x86/mm/fault.c b/arch/x86/mm/fault.c
25281index 8ac0d76..ca501e2 100644
25282--- a/arch/x86/mm/fault.c
25283+++ b/arch/x86/mm/fault.c
25284@@ -11,10 +11,19 @@
25285 #include <linux/kprobes.h> /* __kprobes, ... */
25286 #include <linux/mmiotrace.h> /* kmmio_handler, ... */
25287 #include <linux/perf_event.h> /* perf_sw_event */
25288+#include <linux/unistd.h>
25289+#include <linux/compiler.h>
25290
25291 #include <asm/traps.h> /* dotraplinkage, ... */
25292 #include <asm/pgalloc.h> /* pgd_*(), ... */
25293 #include <asm/kmemcheck.h> /* kmemcheck_*(), ... */
25294+#include <asm/vsyscall.h>
25295+#include <asm/tlbflush.h>
25296+
25297+#if defined(CONFIG_X86_64) && defined(CONFIG_PAX_MEMORY_UDEREF)
25298+#include <asm/stacktrace.h>
25299+#include "../kernel/dumpstack.h"
25300+#endif
25301
25302 /*
25303 * Page fault error code bits:
25304@@ -51,7 +60,7 @@ static inline int notify_page_fault(struct pt_regs *regs)
25305 int ret = 0;
25306
25307 /* kprobe_running() needs smp_processor_id() */
25308- if (kprobes_built_in() && !user_mode_vm(regs)) {
25309+ if (kprobes_built_in() && !user_mode(regs)) {
25310 preempt_disable();
25311 if (kprobe_running() && kprobe_fault_handler(regs, 14))
25312 ret = 1;
25313@@ -112,7 +121,10 @@ check_prefetch_opcode(struct pt_regs *regs, unsigned char *instr,
25314 return !instr_lo || (instr_lo>>1) == 1;
25315 case 0x00:
25316 /* Prefetch instruction is 0x0F0D or 0x0F18 */
25317- if (probe_kernel_address(instr, opcode))
25318+ if (user_mode(regs)) {
25319+ if (__copy_from_user_inatomic(&opcode, (unsigned char __force_user *)(instr), 1))
25320+ return 0;
25321+ } else if (probe_kernel_address(instr, opcode))
25322 return 0;
25323
25324 *prefetch = (instr_lo == 0xF) &&
25325@@ -146,7 +158,10 @@ is_prefetch(struct pt_regs *regs, unsigned long error_code, unsigned long addr)
25326 while (instr < max_instr) {
25327 unsigned char opcode;
25328
25329- if (probe_kernel_address(instr, opcode))
25330+ if (user_mode(regs)) {
25331+ if (__copy_from_user_inatomic(&opcode, (unsigned char __force_user *)(instr), 1))
25332+ break;
25333+ } else if (probe_kernel_address(instr, opcode))
25334 break;
25335
25336 instr++;
25337@@ -172,6 +187,34 @@ force_sig_info_fault(int si_signo, int si_code, unsigned long address,
25338 force_sig_info(si_signo, &info, tsk);
25339 }
25340
25341+#if defined(CONFIG_PAX_PAGEEXEC) || defined(CONFIG_PAX_SEGMEXEC)
25342+static bool pax_is_fetch_fault(struct pt_regs *regs, unsigned long error_code, unsigned long address);
25343+#endif
25344+
25345+#ifdef CONFIG_PAX_EMUTRAMP
25346+static int pax_handle_fetch_fault(struct pt_regs *regs);
25347+#endif
25348+
25349+#ifdef CONFIG_PAX_PAGEEXEC
25350+static inline pmd_t * pax_get_pmd(struct mm_struct *mm, unsigned long address)
25351+{
25352+ pgd_t *pgd;
25353+ pud_t *pud;
25354+ pmd_t *pmd;
25355+
25356+ pgd = pgd_offset(mm, address);
25357+ if (!pgd_present(*pgd))
25358+ return NULL;
25359+ pud = pud_offset(pgd, address);
25360+ if (!pud_present(*pud))
25361+ return NULL;
25362+ pmd = pmd_offset(pud, address);
25363+ if (!pmd_present(*pmd))
25364+ return NULL;
25365+ return pmd;
25366+}
25367+#endif
25368+
25369 DEFINE_SPINLOCK(pgd_lock);
25370 LIST_HEAD(pgd_list);
25371
25372@@ -224,11 +267,24 @@ void vmalloc_sync_all(void)
25373 address += PMD_SIZE) {
25374
25375 unsigned long flags;
25376+
25377+#ifdef CONFIG_PAX_PER_CPU_PGD
25378+ unsigned long cpu;
25379+#else
25380 struct page *page;
25381+#endif
25382
25383 spin_lock_irqsave(&pgd_lock, flags);
25384+
25385+#ifdef CONFIG_PAX_PER_CPU_PGD
25386+ for (cpu = 0; cpu < nr_cpu_ids; ++cpu) {
25387+ pgd_t *pgd = get_cpu_pgd(cpu);
25388+#else
25389 list_for_each_entry(page, &pgd_list, lru) {
25390- if (!vmalloc_sync_one(page_address(page), address))
25391+ pgd_t *pgd = page_address(page);
25392+#endif
25393+
25394+ if (!vmalloc_sync_one(pgd, address))
25395 break;
25396 }
25397 spin_unlock_irqrestore(&pgd_lock, flags);
25398@@ -258,6 +314,11 @@ static noinline int vmalloc_fault(unsigned long address)
25399 * an interrupt in the middle of a task switch..
25400 */
25401 pgd_paddr = read_cr3();
25402+
25403+#ifdef CONFIG_PAX_PER_CPU_PGD
25404+ BUG_ON(__pa(get_cpu_pgd(smp_processor_id())) != (pgd_paddr & PHYSICAL_PAGE_MASK));
25405+#endif
25406+
25407 pmd_k = vmalloc_sync_one(__va(pgd_paddr), address);
25408 if (!pmd_k)
25409 return -1;
25410@@ -332,15 +393,27 @@ void vmalloc_sync_all(void)
25411
25412 const pgd_t *pgd_ref = pgd_offset_k(address);
25413 unsigned long flags;
25414+
25415+#ifdef CONFIG_PAX_PER_CPU_PGD
25416+ unsigned long cpu;
25417+#else
25418 struct page *page;
25419+#endif
25420
25421 if (pgd_none(*pgd_ref))
25422 continue;
25423
25424 spin_lock_irqsave(&pgd_lock, flags);
25425+
25426+#ifdef CONFIG_PAX_PER_CPU_PGD
25427+ for (cpu = 0; cpu < nr_cpu_ids; ++cpu) {
25428+ pgd_t *pgd = pgd_offset_cpu(cpu, address);
25429+#else
25430 list_for_each_entry(page, &pgd_list, lru) {
25431 pgd_t *pgd;
25432 pgd = (pgd_t *)page_address(page) + pgd_index(address);
25433+#endif
25434+
25435 if (pgd_none(*pgd))
25436 set_pgd(pgd, *pgd_ref);
25437 else
25438@@ -373,7 +446,14 @@ static noinline int vmalloc_fault(unsigned long address)
25439 * happen within a race in page table update. In the later
25440 * case just flush:
25441 */
25442+
25443+#ifdef CONFIG_PAX_PER_CPU_PGD
25444+ BUG_ON(__pa(get_cpu_pgd(smp_processor_id())) != (read_cr3() & PHYSICAL_PAGE_MASK));
25445+ pgd = pgd_offset_cpu(smp_processor_id(), address);
25446+#else
25447 pgd = pgd_offset(current->active_mm, address);
25448+#endif
25449+
25450 pgd_ref = pgd_offset_k(address);
25451 if (pgd_none(*pgd_ref))
25452 return -1;
25453@@ -535,7 +615,7 @@ static int is_errata93(struct pt_regs *regs, unsigned long address)
25454 static int is_errata100(struct pt_regs *regs, unsigned long address)
25455 {
25456 #ifdef CONFIG_X86_64
25457- if ((regs->cs == __USER32_CS || (regs->cs & (1<<2))) && (address >> 32))
25458+ if ((regs->cs == __USER32_CS || (regs->cs & SEGMENT_LDT)) && (address >> 32))
25459 return 1;
25460 #endif
25461 return 0;
25462@@ -562,7 +642,7 @@ static int is_f00f_bug(struct pt_regs *regs, unsigned long address)
25463 }
25464
25465 static const char nx_warning[] = KERN_CRIT
25466-"kernel tried to execute NX-protected page - exploit attempt? (uid: %d)\n";
25467+"kernel tried to execute NX-protected page - exploit attempt? (uid: %d, task: %s, pid: %d)\n";
25468
25469 static void
25470 show_fault_oops(struct pt_regs *regs, unsigned long error_code,
25471@@ -571,15 +651,26 @@ show_fault_oops(struct pt_regs *regs, unsigned long error_code,
25472 if (!oops_may_print())
25473 return;
25474
25475- if (error_code & PF_INSTR) {
25476+ if (nx_enabled && (error_code & PF_INSTR)) {
25477 unsigned int level;
25478
25479 pte_t *pte = lookup_address(address, &level);
25480
25481 if (pte && pte_present(*pte) && !pte_exec(*pte))
25482- printk(nx_warning, current_uid());
25483+ printk(nx_warning, current_uid(), current->comm, task_pid_nr(current));
25484 }
25485
25486+#ifdef CONFIG_PAX_KERNEXEC
25487+ if (init_mm.start_code <= address && address < init_mm.end_code) {
25488+ if (current->signal->curr_ip)
25489+ printk(KERN_ERR "PAX: From %pI4: %s:%d, uid/euid: %u/%u, attempted to modify kernel code\n",
25490+ &current->signal->curr_ip, current->comm, task_pid_nr(current), current_uid(), current_euid());
25491+ else
25492+ printk(KERN_ERR "PAX: %s:%d, uid/euid: %u/%u, attempted to modify kernel code\n",
25493+ current->comm, task_pid_nr(current), current_uid(), current_euid());
25494+ }
25495+#endif
25496+
25497 printk(KERN_ALERT "BUG: unable to handle kernel ");
25498 if (address < PAGE_SIZE)
25499 printk(KERN_CONT "NULL pointer dereference");
25500@@ -705,6 +796,23 @@ __bad_area_nosemaphore(struct pt_regs *regs, unsigned long error_code,
25501 {
25502 struct task_struct *tsk = current;
25503
25504+#ifdef CONFIG_X86_64
25505+ struct mm_struct *mm = tsk->mm;
25506+
25507+ if (mm && (error_code & PF_INSTR) && mm->context.vdso) {
25508+ if (regs->ip == (unsigned long)vgettimeofday) {
25509+ regs->ip = (unsigned long)VDSO64_SYMBOL(mm->context.vdso, fallback_gettimeofday);
25510+ return;
25511+ } else if (regs->ip == (unsigned long)vtime) {
25512+ regs->ip = (unsigned long)VDSO64_SYMBOL(mm->context.vdso, fallback_time);
25513+ return;
25514+ } else if (regs->ip == (unsigned long)vgetcpu) {
25515+ regs->ip = (unsigned long)VDSO64_SYMBOL(mm->context.vdso, getcpu);
25516+ return;
25517+ }
25518+ }
25519+#endif
25520+
25521 /* User mode accesses just cause a SIGSEGV */
25522 if (error_code & PF_USER) {
25523 /*
25524@@ -722,6 +830,21 @@ __bad_area_nosemaphore(struct pt_regs *regs, unsigned long error_code,
25525 if (is_errata100(regs, address))
25526 return;
25527
25528+#if defined(CONFIG_PAX_PAGEEXEC) || defined(CONFIG_PAX_SEGMEXEC)
25529+ if (pax_is_fetch_fault(regs, error_code, address)) {
25530+
25531+#ifdef CONFIG_PAX_EMUTRAMP
25532+ switch (pax_handle_fetch_fault(regs)) {
25533+ case 2:
25534+ return;
25535+ }
25536+#endif
25537+
25538+ pax_report_fault(regs, (void *)regs->ip, (void *)regs->sp);
25539+ do_group_exit(SIGKILL);
25540+ }
25541+#endif
25542+
25543 if (unlikely(show_unhandled_signals))
25544 show_signal_msg(regs, error_code, address, tsk);
25545
25546@@ -818,7 +941,7 @@ do_sigbus(struct pt_regs *regs, unsigned long error_code, unsigned long address,
25547 if (fault & VM_FAULT_HWPOISON) {
25548 printk(KERN_ERR
25549 "MCE: Killing %s:%d due to hardware memory corruption fault at %lx\n",
25550- tsk->comm, tsk->pid, address);
25551+ tsk->comm, task_pid_nr(tsk), address);
25552 code = BUS_MCEERR_AR;
25553 }
25554 #endif
25555@@ -857,6 +980,99 @@ static int spurious_fault_check(unsigned long error_code, pte_t *pte)
25556 return 1;
25557 }
25558
25559+#if defined(CONFIG_X86_32) && defined(CONFIG_PAX_PAGEEXEC)
25560+static int pax_handle_pageexec_fault(struct pt_regs *regs, struct mm_struct *mm, unsigned long address, unsigned long error_code)
25561+{
25562+ pte_t *pte;
25563+ pmd_t *pmd;
25564+ spinlock_t *ptl;
25565+ unsigned char pte_mask;
25566+
25567+ if (nx_enabled || (error_code & (PF_PROT|PF_USER)) != (PF_PROT|PF_USER) || v8086_mode(regs) ||
25568+ !(mm->pax_flags & MF_PAX_PAGEEXEC))
25569+ return 0;
25570+
25571+ /* PaX: it's our fault, let's handle it if we can */
25572+
25573+ /* PaX: take a look at read faults before acquiring any locks */
25574+ if (unlikely(!(error_code & PF_WRITE) && (regs->ip == address))) {
25575+ /* instruction fetch attempt from a protected page in user mode */
25576+ up_read(&mm->mmap_sem);
25577+
25578+#ifdef CONFIG_PAX_EMUTRAMP
25579+ switch (pax_handle_fetch_fault(regs)) {
25580+ case 2:
25581+ return 1;
25582+ }
25583+#endif
25584+
25585+ pax_report_fault(regs, (void *)regs->ip, (void *)regs->sp);
25586+ do_group_exit(SIGKILL);
25587+ }
25588+
25589+ pmd = pax_get_pmd(mm, address);
25590+ if (unlikely(!pmd))
25591+ return 0;
25592+
25593+ pte = pte_offset_map_lock(mm, pmd, address, &ptl);
25594+ if (unlikely(!(pte_val(*pte) & _PAGE_PRESENT) || pte_user(*pte))) {
25595+ pte_unmap_unlock(pte, ptl);
25596+ return 0;
25597+ }
25598+
25599+ if (unlikely((error_code & PF_WRITE) && !pte_write(*pte))) {
25600+ /* write attempt to a protected page in user mode */
25601+ pte_unmap_unlock(pte, ptl);
25602+ return 0;
25603+ }
25604+
25605+#ifdef CONFIG_SMP
25606+ if (likely(address > get_limit(regs->cs) && cpu_isset(smp_processor_id(), mm->context.cpu_user_cs_mask)))
25607+#else
25608+ if (likely(address > get_limit(regs->cs)))
25609+#endif
25610+ {
25611+ set_pte(pte, pte_mkread(*pte));
25612+ __flush_tlb_one(address);
25613+ pte_unmap_unlock(pte, ptl);
25614+ up_read(&mm->mmap_sem);
25615+ return 1;
25616+ }
25617+
25618+ pte_mask = _PAGE_ACCESSED | _PAGE_USER | ((error_code & PF_WRITE) << (_PAGE_BIT_DIRTY-1));
25619+
25620+ /*
25621+ * PaX: fill DTLB with user rights and retry
25622+ */
25623+ __asm__ __volatile__ (
25624+ "orb %2,(%1)\n"
25625+#if defined(CONFIG_M586) || defined(CONFIG_M586TSC)
25626+/*
25627+ * PaX: let this uncommented 'invlpg' remind us on the behaviour of Intel's
25628+ * (and AMD's) TLBs. namely, they do not cache PTEs that would raise *any*
25629+ * page fault when examined during a TLB load attempt. this is true not only
25630+ * for PTEs holding a non-present entry but also present entries that will
25631+ * raise a page fault (such as those set up by PaX, or the copy-on-write
25632+ * mechanism). in effect it means that we do *not* need to flush the TLBs
25633+ * for our target pages since their PTEs are simply not in the TLBs at all.
25634+
25635+ * the best thing in omitting it is that we gain around 15-20% speed in the
25636+ * fast path of the page fault handler and can get rid of tracing since we
25637+ * can no longer flush unintended entries.
25638+ */
25639+ "invlpg (%0)\n"
25640+#endif
25641+ __copyuser_seg"testb $0,(%0)\n"
25642+ "xorb %3,(%1)\n"
25643+ :
25644+ : "r" (address), "r" (pte), "q" (pte_mask), "i" (_PAGE_USER)
25645+ : "memory", "cc");
25646+ pte_unmap_unlock(pte, ptl);
25647+ up_read(&mm->mmap_sem);
25648+ return 1;
25649+}
25650+#endif
25651+
25652 /*
25653 * Handle a spurious fault caused by a stale TLB entry.
25654 *
25655@@ -923,6 +1139,9 @@ int show_unhandled_signals = 1;
25656 static inline int
25657 access_error(unsigned long error_code, int write, struct vm_area_struct *vma)
25658 {
25659+ if (nx_enabled && (error_code & PF_INSTR) && !(vma->vm_flags & VM_EXEC))
25660+ return 1;
25661+
25662 if (write) {
25663 /* write, present and write, not present: */
25664 if (unlikely(!(vma->vm_flags & VM_WRITE)))
25665@@ -956,16 +1175,30 @@ do_page_fault(struct pt_regs *regs, unsigned long error_code)
25666 {
25667 struct vm_area_struct *vma;
25668 struct task_struct *tsk;
25669- unsigned long address;
25670 struct mm_struct *mm;
25671 int write;
25672 int fault;
25673
25674- tsk = current;
25675- mm = tsk->mm;
25676-
25677 /* Get the faulting address: */
25678- address = read_cr2();
25679+ unsigned long address = read_cr2();
25680+
25681+#if defined(CONFIG_X86_64) && defined(CONFIG_PAX_MEMORY_UDEREF)
25682+ if (!user_mode(regs) && address < 2 * PAX_USER_SHADOW_BASE) {
25683+ if (!search_exception_tables(regs->ip)) {
25684+ bad_area_nosemaphore(regs, error_code, address);
25685+ return;
25686+ }
25687+ if (address < PAX_USER_SHADOW_BASE) {
25688+ printk(KERN_ERR "PAX: please report this to pageexec@freemail.hu\n");
25689+ printk(KERN_ERR "PAX: faulting IP: %pA\n", (void *)regs->ip);
25690+ show_trace_log_lvl(NULL, NULL, (void *)regs->sp, regs->bp, KERN_ERR);
25691+ } else
25692+ address -= PAX_USER_SHADOW_BASE;
25693+ }
25694+#endif
25695+
25696+ tsk = current;
25697+ mm = tsk->mm;
25698
25699 /*
25700 * Detect and handle instructions that would cause a page fault for
25701@@ -1026,7 +1259,7 @@ do_page_fault(struct pt_regs *regs, unsigned long error_code)
25702 * User-mode registers count as a user access even for any
25703 * potential system fault or CPU buglet:
25704 */
25705- if (user_mode_vm(regs)) {
25706+ if (user_mode(regs)) {
25707 local_irq_enable();
25708 error_code |= PF_USER;
25709 } else {
25710@@ -1080,6 +1313,11 @@ do_page_fault(struct pt_regs *regs, unsigned long error_code)
25711 might_sleep();
25712 }
25713
25714+#if defined(CONFIG_X86_32) && defined(CONFIG_PAX_PAGEEXEC)
25715+ if (pax_handle_pageexec_fault(regs, mm, address, error_code))
25716+ return;
25717+#endif
25718+
25719 vma = find_vma(mm, address);
25720 if (unlikely(!vma)) {
25721 bad_area(regs, error_code, address);
25722@@ -1091,18 +1329,24 @@ do_page_fault(struct pt_regs *regs, unsigned long error_code)
25723 bad_area(regs, error_code, address);
25724 return;
25725 }
25726- if (error_code & PF_USER) {
25727- /*
25728- * Accessing the stack below %sp is always a bug.
25729- * The large cushion allows instructions like enter
25730- * and pusha to work. ("enter $65535, $31" pushes
25731- * 32 pointers and then decrements %sp by 65535.)
25732- */
25733- if (unlikely(address + 65536 + 32 * sizeof(unsigned long) < regs->sp)) {
25734- bad_area(regs, error_code, address);
25735- return;
25736- }
25737+ /*
25738+ * Accessing the stack below %sp is always a bug.
25739+ * The large cushion allows instructions like enter
25740+ * and pusha to work. ("enter $65535, $31" pushes
25741+ * 32 pointers and then decrements %sp by 65535.)
25742+ */
25743+ if (unlikely(address + 65536 + 32 * sizeof(unsigned long) < task_pt_regs(tsk)->sp)) {
25744+ bad_area(regs, error_code, address);
25745+ return;
25746 }
25747+
25748+#ifdef CONFIG_PAX_SEGMEXEC
25749+ if (unlikely((mm->pax_flags & MF_PAX_SEGMEXEC) && vma->vm_end - SEGMEXEC_TASK_SIZE - 1 < address - SEGMEXEC_TASK_SIZE - 1)) {
25750+ bad_area(regs, error_code, address);
25751+ return;
25752+ }
25753+#endif
25754+
25755 if (unlikely(expand_stack(vma, address))) {
25756 bad_area(regs, error_code, address);
25757 return;
25758@@ -1146,3 +1390,292 @@ good_area:
25759
25760 up_read(&mm->mmap_sem);
25761 }
25762+
25763+#if defined(CONFIG_PAX_PAGEEXEC) || defined(CONFIG_PAX_SEGMEXEC)
25764+static bool pax_is_fetch_fault(struct pt_regs *regs, unsigned long error_code, unsigned long address)
25765+{
25766+ struct mm_struct *mm = current->mm;
25767+ unsigned long ip = regs->ip;
25768+
25769+ if (v8086_mode(regs))
25770+ ip = ((regs->cs & 0xffff) << 4) + (ip & 0xffff);
25771+
25772+#ifdef CONFIG_PAX_PAGEEXEC
25773+ if (mm->pax_flags & MF_PAX_PAGEEXEC) {
25774+ if ((__supported_pte_mask & _PAGE_NX) && (error_code & PF_INSTR))
25775+ return true;
25776+ if (!(error_code & (PF_PROT | PF_WRITE)) && ip == address)
25777+ return true;
25778+ return false;
25779+ }
25780+#endif
25781+
25782+#ifdef CONFIG_PAX_SEGMEXEC
25783+ if (mm->pax_flags & MF_PAX_SEGMEXEC) {
25784+ if (!(error_code & (PF_PROT | PF_WRITE)) && (ip + SEGMEXEC_TASK_SIZE == address))
25785+ return true;
25786+ return false;
25787+ }
25788+#endif
25789+
25790+ return false;
25791+}
25792+#endif
25793+
25794+#ifdef CONFIG_PAX_EMUTRAMP
25795+static int pax_handle_fetch_fault_32(struct pt_regs *regs)
25796+{
25797+ int err;
25798+
25799+ do { /* PaX: libffi trampoline emulation */
25800+ unsigned char mov, jmp;
25801+ unsigned int addr1, addr2;
25802+
25803+#ifdef CONFIG_X86_64
25804+ if ((regs->ip + 9) >> 32)
25805+ break;
25806+#endif
25807+
25808+ err = get_user(mov, (unsigned char __user *)regs->ip);
25809+ err |= get_user(addr1, (unsigned int __user *)(regs->ip + 1));
25810+ err |= get_user(jmp, (unsigned char __user *)(regs->ip + 5));
25811+ err |= get_user(addr2, (unsigned int __user *)(regs->ip + 6));
25812+
25813+ if (err)
25814+ break;
25815+
25816+ if (mov == 0xB8 && jmp == 0xE9) {
25817+ regs->ax = addr1;
25818+ regs->ip = (unsigned int)(regs->ip + addr2 + 10);
25819+ return 2;
25820+ }
25821+ } while (0);
25822+
25823+ do { /* PaX: gcc trampoline emulation #1 */
25824+ unsigned char mov1, mov2;
25825+ unsigned short jmp;
25826+ unsigned int addr1, addr2;
25827+
25828+#ifdef CONFIG_X86_64
25829+ if ((regs->ip + 11) >> 32)
25830+ break;
25831+#endif
25832+
25833+ err = get_user(mov1, (unsigned char __user *)regs->ip);
25834+ err |= get_user(addr1, (unsigned int __user *)(regs->ip + 1));
25835+ err |= get_user(mov2, (unsigned char __user *)(regs->ip + 5));
25836+ err |= get_user(addr2, (unsigned int __user *)(regs->ip + 6));
25837+ err |= get_user(jmp, (unsigned short __user *)(regs->ip + 10));
25838+
25839+ if (err)
25840+ break;
25841+
25842+ if (mov1 == 0xB9 && mov2 == 0xB8 && jmp == 0xE0FF) {
25843+ regs->cx = addr1;
25844+ regs->ax = addr2;
25845+ regs->ip = addr2;
25846+ return 2;
25847+ }
25848+ } while (0);
25849+
25850+ do { /* PaX: gcc trampoline emulation #2 */
25851+ unsigned char mov, jmp;
25852+ unsigned int addr1, addr2;
25853+
25854+#ifdef CONFIG_X86_64
25855+ if ((regs->ip + 9) >> 32)
25856+ break;
25857+#endif
25858+
25859+ err = get_user(mov, (unsigned char __user *)regs->ip);
25860+ err |= get_user(addr1, (unsigned int __user *)(regs->ip + 1));
25861+ err |= get_user(jmp, (unsigned char __user *)(regs->ip + 5));
25862+ err |= get_user(addr2, (unsigned int __user *)(regs->ip + 6));
25863+
25864+ if (err)
25865+ break;
25866+
25867+ if (mov == 0xB9 && jmp == 0xE9) {
25868+ regs->cx = addr1;
25869+ regs->ip = (unsigned int)(regs->ip + addr2 + 10);
25870+ return 2;
25871+ }
25872+ } while (0);
25873+
25874+ return 1; /* PaX in action */
25875+}
25876+
25877+#ifdef CONFIG_X86_64
25878+static int pax_handle_fetch_fault_64(struct pt_regs *regs)
25879+{
25880+ int err;
25881+
25882+ do { /* PaX: libffi trampoline emulation */
25883+ unsigned short mov1, mov2, jmp1;
25884+ unsigned char stcclc, jmp2;
25885+ unsigned long addr1, addr2;
25886+
25887+ err = get_user(mov1, (unsigned short __user *)regs->ip);
25888+ err |= get_user(addr1, (unsigned long __user *)(regs->ip + 2));
25889+ err |= get_user(mov2, (unsigned short __user *)(regs->ip + 10));
25890+ err |= get_user(addr2, (unsigned long __user *)(regs->ip + 12));
25891+ err |= get_user(stcclc, (unsigned char __user *)(regs->ip + 20));
25892+ err |= get_user(jmp1, (unsigned short __user *)(regs->ip + 21));
25893+ err |= get_user(jmp2, (unsigned char __user *)(regs->ip + 23));
25894+
25895+ if (err)
25896+ break;
25897+
25898+ if (mov1 == 0xBB49 && mov2 == 0xBA49 && (stcclc == 0xF8 || stcclc == 0xF9) && jmp1 == 0xFF49 && jmp2 == 0xE3) {
25899+ regs->r11 = addr1;
25900+ regs->r10 = addr2;
25901+ if (stcclc == 0xF8)
25902+ regs->flags &= ~X86_EFLAGS_CF;
25903+ else
25904+ regs->flags |= X86_EFLAGS_CF;
25905+ regs->ip = addr1;
25906+ return 2;
25907+ }
25908+ } while (0);
25909+
25910+ do { /* PaX: gcc trampoline emulation #1 */
25911+ unsigned short mov1, mov2, jmp1;
25912+ unsigned char jmp2;
25913+ unsigned int addr1;
25914+ unsigned long addr2;
25915+
25916+ err = get_user(mov1, (unsigned short __user *)regs->ip);
25917+ err |= get_user(addr1, (unsigned int __user *)(regs->ip + 2));
25918+ err |= get_user(mov2, (unsigned short __user *)(regs->ip + 6));
25919+ err |= get_user(addr2, (unsigned long __user *)(regs->ip + 8));
25920+ err |= get_user(jmp1, (unsigned short __user *)(regs->ip + 16));
25921+ err |= get_user(jmp2, (unsigned char __user *)(regs->ip + 18));
25922+
25923+ if (err)
25924+ break;
25925+
25926+ if (mov1 == 0xBB41 && mov2 == 0xBA49 && jmp1 == 0xFF49 && jmp2 == 0xE3) {
25927+ regs->r11 = addr1;
25928+ regs->r10 = addr2;
25929+ regs->ip = addr1;
25930+ return 2;
25931+ }
25932+ } while (0);
25933+
25934+ do { /* PaX: gcc trampoline emulation #2 */
25935+ unsigned short mov1, mov2, jmp1;
25936+ unsigned char jmp2;
25937+ unsigned long addr1, addr2;
25938+
25939+ err = get_user(mov1, (unsigned short __user *)regs->ip);
25940+ err |= get_user(addr1, (unsigned long __user *)(regs->ip + 2));
25941+ err |= get_user(mov2, (unsigned short __user *)(regs->ip + 10));
25942+ err |= get_user(addr2, (unsigned long __user *)(regs->ip + 12));
25943+ err |= get_user(jmp1, (unsigned short __user *)(regs->ip + 20));
25944+ err |= get_user(jmp2, (unsigned char __user *)(regs->ip + 22));
25945+
25946+ if (err)
25947+ break;
25948+
25949+ if (mov1 == 0xBB49 && mov2 == 0xBA49 && jmp1 == 0xFF49 && jmp2 == 0xE3) {
25950+ regs->r11 = addr1;
25951+ regs->r10 = addr2;
25952+ regs->ip = addr1;
25953+ return 2;
25954+ }
25955+ } while (0);
25956+
25957+ return 1; /* PaX in action */
25958+}
25959+#endif
25960+
25961+/*
25962+ * PaX: decide what to do with offenders (regs->ip = fault address)
25963+ *
25964+ * returns 1 when task should be killed
25965+ * 2 when gcc trampoline was detected
25966+ */
25967+static int pax_handle_fetch_fault(struct pt_regs *regs)
25968+{
25969+ if (v8086_mode(regs))
25970+ return 1;
25971+
25972+ if (!(current->mm->pax_flags & MF_PAX_EMUTRAMP))
25973+ return 1;
25974+
25975+#ifdef CONFIG_X86_32
25976+ return pax_handle_fetch_fault_32(regs);
25977+#else
25978+ if (regs->cs == __USER32_CS || (regs->cs & SEGMENT_LDT))
25979+ return pax_handle_fetch_fault_32(regs);
25980+ else
25981+ return pax_handle_fetch_fault_64(regs);
25982+#endif
25983+}
25984+#endif
25985+
25986+#if defined(CONFIG_PAX_PAGEEXEC) || defined(CONFIG_PAX_SEGMEXEC)
25987+void pax_report_insns(struct pt_regs *regs, void *pc, void *sp)
25988+{
25989+ long i;
25990+
25991+ printk(KERN_ERR "PAX: bytes at PC: ");
25992+ for (i = 0; i < 20; i++) {
25993+ unsigned char c;
25994+ if (get_user(c, (unsigned char __force_user *)pc+i))
25995+ printk(KERN_CONT "?? ");
25996+ else
25997+ printk(KERN_CONT "%02x ", c);
25998+ }
25999+ printk("\n");
26000+
26001+ printk(KERN_ERR "PAX: bytes at SP-%lu: ", (unsigned long)sizeof(long));
26002+ for (i = -1; i < 80 / (long)sizeof(long); i++) {
26003+ unsigned long c;
26004+ if (get_user(c, (unsigned long __force_user *)sp+i)) {
26005+#ifdef CONFIG_X86_32
26006+ printk(KERN_CONT "???????? ");
26007+#else
26008+ if ((regs->cs == __USER32_CS || (regs->cs & SEGMENT_LDT)))
26009+ printk(KERN_CONT "???????? ???????? ");
26010+ else
26011+ printk(KERN_CONT "???????????????? ");
26012+#endif
26013+ } else {
26014+#ifdef CONFIG_X86_64
26015+ if ((regs->cs == __USER32_CS || (regs->cs & SEGMENT_LDT))) {
26016+ printk(KERN_CONT "%08x ", (unsigned int)c);
26017+ printk(KERN_CONT "%08x ", (unsigned int)(c >> 32));
26018+ } else
26019+#endif
26020+ printk(KERN_CONT "%0*lx ", 2 * (int)sizeof(long), c);
26021+ }
26022+ }
26023+ printk("\n");
26024+}
26025+#endif
26026+
26027+/**
26028+ * probe_kernel_write(): safely attempt to write to a location
26029+ * @dst: address to write to
26030+ * @src: pointer to the data that shall be written
26031+ * @size: size of the data chunk
26032+ *
26033+ * Safely write to address @dst from the buffer at @src. If a kernel fault
26034+ * happens, handle that and return -EFAULT.
26035+ */
26036+long notrace probe_kernel_write(void *dst, const void *src, size_t size)
26037+{
26038+ long ret;
26039+ mm_segment_t old_fs = get_fs();
26040+
26041+ set_fs(KERNEL_DS);
26042+ pagefault_disable();
26043+ pax_open_kernel();
26044+ ret = __copy_to_user_inatomic((void __force_user *)dst, src, size);
26045+ pax_close_kernel();
26046+ pagefault_enable();
26047+ set_fs(old_fs);
26048+
26049+ return ret ? -EFAULT : 0;
26050+}
26051diff --git a/arch/x86/mm/gup.c b/arch/x86/mm/gup.c
26052index 71da1bc..7a16bf4 100644
26053--- a/arch/x86/mm/gup.c
26054+++ b/arch/x86/mm/gup.c
26055@@ -237,7 +237,7 @@ int __get_user_pages_fast(unsigned long start, int nr_pages, int write,
26056 addr = start;
26057 len = (unsigned long) nr_pages << PAGE_SHIFT;
26058 end = start + len;
26059- if (unlikely(!access_ok(write ? VERIFY_WRITE : VERIFY_READ,
26060+ if (unlikely(!__access_ok(write ? VERIFY_WRITE : VERIFY_READ,
26061 (void __user *)start, len)))
26062 return 0;
26063
26064diff --git a/arch/x86/mm/highmem_32.c b/arch/x86/mm/highmem_32.c
26065index 63a6ba6..79abd7a 100644
26066--- a/arch/x86/mm/highmem_32.c
26067+++ b/arch/x86/mm/highmem_32.c
26068@@ -43,7 +43,10 @@ void *kmap_atomic_prot(struct page *page, enum km_type type, pgprot_t prot)
26069 idx = type + KM_TYPE_NR*smp_processor_id();
26070 vaddr = __fix_to_virt(FIX_KMAP_BEGIN + idx);
26071 BUG_ON(!pte_none(*(kmap_pte-idx)));
26072+
26073+ pax_open_kernel();
26074 set_pte(kmap_pte-idx, mk_pte(page, prot));
26075+ pax_close_kernel();
26076
26077 return (void *)vaddr;
26078 }
26079diff --git a/arch/x86/mm/hugetlbpage.c b/arch/x86/mm/hugetlbpage.c
26080index f46c3407..6ff9a26 100644
26081--- a/arch/x86/mm/hugetlbpage.c
26082+++ b/arch/x86/mm/hugetlbpage.c
26083@@ -267,13 +267,20 @@ static unsigned long hugetlb_get_unmapped_area_bottomup(struct file *file,
26084 struct hstate *h = hstate_file(file);
26085 struct mm_struct *mm = current->mm;
26086 struct vm_area_struct *vma;
26087- unsigned long start_addr;
26088+ unsigned long start_addr, pax_task_size = TASK_SIZE;
26089+
26090+#ifdef CONFIG_PAX_SEGMEXEC
26091+ if (mm->pax_flags & MF_PAX_SEGMEXEC)
26092+ pax_task_size = SEGMEXEC_TASK_SIZE;
26093+#endif
26094+
26095+ pax_task_size -= PAGE_SIZE;
26096
26097 if (len > mm->cached_hole_size) {
26098- start_addr = mm->free_area_cache;
26099+ start_addr = mm->free_area_cache;
26100 } else {
26101- start_addr = TASK_UNMAPPED_BASE;
26102- mm->cached_hole_size = 0;
26103+ start_addr = mm->mmap_base;
26104+ mm->cached_hole_size = 0;
26105 }
26106
26107 full_search:
26108@@ -281,26 +288,27 @@ full_search:
26109
26110 for (vma = find_vma(mm, addr); ; vma = vma->vm_next) {
26111 /* At this point: (!vma || addr < vma->vm_end). */
26112- if (TASK_SIZE - len < addr) {
26113+ if (pax_task_size - len < addr) {
26114 /*
26115 * Start a new search - just in case we missed
26116 * some holes.
26117 */
26118- if (start_addr != TASK_UNMAPPED_BASE) {
26119- start_addr = TASK_UNMAPPED_BASE;
26120+ if (start_addr != mm->mmap_base) {
26121+ start_addr = mm->mmap_base;
26122 mm->cached_hole_size = 0;
26123 goto full_search;
26124 }
26125 return -ENOMEM;
26126 }
26127- if (!vma || addr + len <= vma->vm_start) {
26128- mm->free_area_cache = addr + len;
26129- return addr;
26130- }
26131+ if (check_heap_stack_gap(vma, addr, len))
26132+ break;
26133 if (addr + mm->cached_hole_size < vma->vm_start)
26134 mm->cached_hole_size = vma->vm_start - addr;
26135 addr = ALIGN(vma->vm_end, huge_page_size(h));
26136 }
26137+
26138+ mm->free_area_cache = addr + len;
26139+ return addr;
26140 }
26141
26142 static unsigned long hugetlb_get_unmapped_area_topdown(struct file *file,
26143@@ -309,10 +317,9 @@ static unsigned long hugetlb_get_unmapped_area_topdown(struct file *file,
26144 {
26145 struct hstate *h = hstate_file(file);
26146 struct mm_struct *mm = current->mm;
26147- struct vm_area_struct *vma, *prev_vma;
26148- unsigned long base = mm->mmap_base, addr = addr0;
26149+ struct vm_area_struct *vma;
26150+ unsigned long base = mm->mmap_base, addr;
26151 unsigned long largest_hole = mm->cached_hole_size;
26152- int first_time = 1;
26153
26154 /* don't allow allocations above current base */
26155 if (mm->free_area_cache > base)
26156@@ -322,64 +329,63 @@ static unsigned long hugetlb_get_unmapped_area_topdown(struct file *file,
26157 largest_hole = 0;
26158 mm->free_area_cache = base;
26159 }
26160-try_again:
26161+
26162 /* make sure it can fit in the remaining address space */
26163 if (mm->free_area_cache < len)
26164 goto fail;
26165
26166 /* either no address requested or cant fit in requested address hole */
26167- addr = (mm->free_area_cache - len) & huge_page_mask(h);
26168+ addr = (mm->free_area_cache - len);
26169 do {
26170+ addr &= huge_page_mask(h);
26171+ vma = find_vma(mm, addr);
26172 /*
26173 * Lookup failure means no vma is above this address,
26174 * i.e. return with success:
26175- */
26176- if (!(vma = find_vma_prev(mm, addr, &prev_vma)))
26177- return addr;
26178-
26179- /*
26180 * new region fits between prev_vma->vm_end and
26181 * vma->vm_start, use it:
26182 */
26183- if (addr + len <= vma->vm_start &&
26184- (!prev_vma || (addr >= prev_vma->vm_end))) {
26185+ if (check_heap_stack_gap(vma, addr, len)) {
26186 /* remember the address as a hint for next time */
26187- mm->cached_hole_size = largest_hole;
26188- return (mm->free_area_cache = addr);
26189- } else {
26190- /* pull free_area_cache down to the first hole */
26191- if (mm->free_area_cache == vma->vm_end) {
26192- mm->free_area_cache = vma->vm_start;
26193- mm->cached_hole_size = largest_hole;
26194- }
26195+ mm->cached_hole_size = largest_hole;
26196+ return (mm->free_area_cache = addr);
26197+ }
26198+ /* pull free_area_cache down to the first hole */
26199+ if (mm->free_area_cache == vma->vm_end) {
26200+ mm->free_area_cache = vma->vm_start;
26201+ mm->cached_hole_size = largest_hole;
26202 }
26203
26204 /* remember the largest hole we saw so far */
26205 if (addr + largest_hole < vma->vm_start)
26206- largest_hole = vma->vm_start - addr;
26207+ largest_hole = vma->vm_start - addr;
26208
26209 /* try just below the current vma->vm_start */
26210- addr = (vma->vm_start - len) & huge_page_mask(h);
26211- } while (len <= vma->vm_start);
26212+ addr = skip_heap_stack_gap(vma, len);
26213+ } while (!IS_ERR_VALUE(addr));
26214
26215 fail:
26216 /*
26217- * if hint left us with no space for the requested
26218- * mapping then try again:
26219- */
26220- if (first_time) {
26221- mm->free_area_cache = base;
26222- largest_hole = 0;
26223- first_time = 0;
26224- goto try_again;
26225- }
26226- /*
26227 * A failed mmap() very likely causes application failure,
26228 * so fall back to the bottom-up function here. This scenario
26229 * can happen with large stack limits and large mmap()
26230 * allocations.
26231 */
26232- mm->free_area_cache = TASK_UNMAPPED_BASE;
26233+
26234+#ifdef CONFIG_PAX_SEGMEXEC
26235+ if (mm->pax_flags & MF_PAX_SEGMEXEC)
26236+ mm->mmap_base = SEGMEXEC_TASK_UNMAPPED_BASE;
26237+ else
26238+#endif
26239+
26240+ mm->mmap_base = TASK_UNMAPPED_BASE;
26241+
26242+#ifdef CONFIG_PAX_RANDMMAP
26243+ if (mm->pax_flags & MF_PAX_RANDMMAP)
26244+ mm->mmap_base += mm->delta_mmap;
26245+#endif
26246+
26247+ mm->free_area_cache = mm->mmap_base;
26248 mm->cached_hole_size = ~0UL;
26249 addr = hugetlb_get_unmapped_area_bottomup(file, addr0,
26250 len, pgoff, flags);
26251@@ -387,6 +393,7 @@ fail:
26252 /*
26253 * Restore the topdown base:
26254 */
26255+ mm->mmap_base = base;
26256 mm->free_area_cache = base;
26257 mm->cached_hole_size = ~0UL;
26258
26259@@ -400,10 +407,19 @@ hugetlb_get_unmapped_area(struct file *file, unsigned long addr,
26260 struct hstate *h = hstate_file(file);
26261 struct mm_struct *mm = current->mm;
26262 struct vm_area_struct *vma;
26263+ unsigned long pax_task_size = TASK_SIZE;
26264
26265 if (len & ~huge_page_mask(h))
26266 return -EINVAL;
26267- if (len > TASK_SIZE)
26268+
26269+#ifdef CONFIG_PAX_SEGMEXEC
26270+ if (mm->pax_flags & MF_PAX_SEGMEXEC)
26271+ pax_task_size = SEGMEXEC_TASK_SIZE;
26272+#endif
26273+
26274+ pax_task_size -= PAGE_SIZE;
26275+
26276+ if (len > pax_task_size)
26277 return -ENOMEM;
26278
26279 if (flags & MAP_FIXED) {
26280@@ -415,8 +431,7 @@ hugetlb_get_unmapped_area(struct file *file, unsigned long addr,
26281 if (addr) {
26282 addr = ALIGN(addr, huge_page_size(h));
26283 vma = find_vma(mm, addr);
26284- if (TASK_SIZE - len >= addr &&
26285- (!vma || addr + len <= vma->vm_start))
26286+ if (pax_task_size - len >= addr && check_heap_stack_gap(vma, addr, len))
26287 return addr;
26288 }
26289 if (mm->get_unmapped_area == arch_get_unmapped_area)
26290diff --git a/arch/x86/mm/init.c b/arch/x86/mm/init.c
26291index 73ffd55..f61c2a7 100644
26292--- a/arch/x86/mm/init.c
26293+++ b/arch/x86/mm/init.c
26294@@ -69,11 +69,7 @@ static void __init find_early_table_space(unsigned long end, int use_pse,
26295 * cause a hotspot and fill up ZONE_DMA. The page tables
26296 * need roughly 0.5KB per GB.
26297 */
26298-#ifdef CONFIG_X86_32
26299- start = 0x7000;
26300-#else
26301- start = 0x8000;
26302-#endif
26303+ start = 0x100000;
26304 e820_table_start = find_e820_area(start, max_pfn_mapped<<PAGE_SHIFT,
26305 tables, PAGE_SIZE);
26306 if (e820_table_start == -1UL)
26307@@ -147,7 +143,7 @@ unsigned long __init_refok init_memory_mapping(unsigned long start,
26308 #endif
26309
26310 set_nx();
26311- if (nx_enabled)
26312+ if (nx_enabled && cpu_has_nx)
26313 printk(KERN_INFO "NX (Execute Disable) protection: active\n");
26314
26315 /* Enable PSE if available */
26316@@ -329,10 +325,27 @@ unsigned long __init_refok init_memory_mapping(unsigned long start,
26317 * Access has to be given to non-kernel-ram areas as well, these contain the PCI
26318 * mmio resources as well as potential bios/acpi data regions.
26319 */
26320+
26321 int devmem_is_allowed(unsigned long pagenr)
26322 {
26323+#ifdef CONFIG_GRKERNSEC_KMEM
26324+ /* allow BDA */
26325+ if (!pagenr)
26326+ return 1;
26327+ /* allow EBDA */
26328+ if ((0x9f000 >> PAGE_SHIFT) == pagenr)
26329+ return 1;
26330+ /* allow ISA/video mem */
26331+ if ((ISA_START_ADDRESS >> PAGE_SHIFT) <= pagenr && pagenr < (ISA_END_ADDRESS >> PAGE_SHIFT))
26332+ return 1;
26333+ /* throw out everything else below 1MB */
26334+ if (pagenr <= 256)
26335+ return 0;
26336+#else
26337 if (pagenr <= 256)
26338 return 1;
26339+#endif
26340+
26341 if (iomem_is_exclusive(pagenr << PAGE_SHIFT))
26342 return 0;
26343 if (!page_is_ram(pagenr))
26344@@ -379,6 +392,86 @@ void free_init_pages(char *what, unsigned long begin, unsigned long end)
26345
26346 void free_initmem(void)
26347 {
26348+
26349+#ifdef CONFIG_PAX_KERNEXEC
26350+#ifdef CONFIG_X86_32
26351+ /* PaX: limit KERNEL_CS to actual size */
26352+ unsigned long addr, limit;
26353+ struct desc_struct d;
26354+ int cpu;
26355+
26356+ limit = paravirt_enabled() ? ktva_ktla(0xffffffff) : (unsigned long)&_etext;
26357+ limit = (limit - 1UL) >> PAGE_SHIFT;
26358+
26359+ memset(__LOAD_PHYSICAL_ADDR + PAGE_OFFSET, POISON_FREE_INITMEM, PAGE_SIZE);
26360+ for (cpu = 0; cpu < nr_cpu_ids; cpu++) {
26361+ pack_descriptor(&d, get_desc_base(&get_cpu_gdt_table(cpu)[GDT_ENTRY_KERNEL_CS]), limit, 0x9B, 0xC);
26362+ write_gdt_entry(get_cpu_gdt_table(cpu), GDT_ENTRY_KERNEL_CS, &d, DESCTYPE_S);
26363+ }
26364+
26365+ /* PaX: make KERNEL_CS read-only */
26366+ addr = PFN_ALIGN(ktla_ktva((unsigned long)&_text));
26367+ if (!paravirt_enabled())
26368+ set_memory_ro(addr, (PFN_ALIGN(_sdata) - addr) >> PAGE_SHIFT);
26369+/*
26370+ for (addr = ktla_ktva((unsigned long)&_text); addr < (unsigned long)&_sdata; addr += PMD_SIZE) {
26371+ pgd = pgd_offset_k(addr);
26372+ pud = pud_offset(pgd, addr);
26373+ pmd = pmd_offset(pud, addr);
26374+ set_pmd(pmd, __pmd(pmd_val(*pmd) & ~_PAGE_RW));
26375+ }
26376+*/
26377+#ifdef CONFIG_X86_PAE
26378+ set_memory_nx(PFN_ALIGN(__init_begin), (PFN_ALIGN(__init_end) - PFN_ALIGN(__init_begin)) >> PAGE_SHIFT);
26379+/*
26380+ for (addr = (unsigned long)&__init_begin; addr < (unsigned long)&__init_end; addr += PMD_SIZE) {
26381+ pgd = pgd_offset_k(addr);
26382+ pud = pud_offset(pgd, addr);
26383+ pmd = pmd_offset(pud, addr);
26384+ set_pmd(pmd, __pmd(pmd_val(*pmd) | (_PAGE_NX & __supported_pte_mask)));
26385+ }
26386+*/
26387+#endif
26388+
26389+#ifdef CONFIG_MODULES
26390+ set_memory_4k((unsigned long)MODULES_EXEC_VADDR, (MODULES_EXEC_END - MODULES_EXEC_VADDR) >> PAGE_SHIFT);
26391+#endif
26392+
26393+#else
26394+ pgd_t *pgd;
26395+ pud_t *pud;
26396+ pmd_t *pmd;
26397+ unsigned long addr, end;
26398+
26399+ /* PaX: make kernel code/rodata read-only, rest non-executable */
26400+ for (addr = __START_KERNEL_map; addr < __START_KERNEL_map + KERNEL_IMAGE_SIZE; addr += PMD_SIZE) {
26401+ pgd = pgd_offset_k(addr);
26402+ pud = pud_offset(pgd, addr);
26403+ pmd = pmd_offset(pud, addr);
26404+ if (!pmd_present(*pmd))
26405+ continue;
26406+ if ((unsigned long)_text <= addr && addr < (unsigned long)_sdata)
26407+ set_pmd(pmd, __pmd(pmd_val(*pmd) & ~_PAGE_RW));
26408+ else
26409+ set_pmd(pmd, __pmd(pmd_val(*pmd) | (_PAGE_NX & __supported_pte_mask)));
26410+ }
26411+
26412+ addr = (unsigned long)__va(__pa(__START_KERNEL_map));
26413+ end = addr + KERNEL_IMAGE_SIZE;
26414+ for (; addr < end; addr += PMD_SIZE) {
26415+ pgd = pgd_offset_k(addr);
26416+ pud = pud_offset(pgd, addr);
26417+ pmd = pmd_offset(pud, addr);
26418+ if (!pmd_present(*pmd))
26419+ continue;
26420+ if ((unsigned long)__va(__pa(_text)) <= addr && addr < (unsigned long)__va(__pa(_sdata)))
26421+ set_pmd(pmd, __pmd(pmd_val(*pmd) & ~_PAGE_RW));
26422+ }
26423+#endif
26424+
26425+ flush_tlb_all();
26426+#endif
26427+
26428 free_init_pages("unused kernel memory",
26429 (unsigned long)(&__init_begin),
26430 (unsigned long)(&__init_end));
26431diff --git a/arch/x86/mm/init_32.c b/arch/x86/mm/init_32.c
26432index 30938c1..bda3d5d 100644
26433--- a/arch/x86/mm/init_32.c
26434+++ b/arch/x86/mm/init_32.c
26435@@ -72,36 +72,6 @@ static __init void *alloc_low_page(void)
26436 }
26437
26438 /*
26439- * Creates a middle page table and puts a pointer to it in the
26440- * given global directory entry. This only returns the gd entry
26441- * in non-PAE compilation mode, since the middle layer is folded.
26442- */
26443-static pmd_t * __init one_md_table_init(pgd_t *pgd)
26444-{
26445- pud_t *pud;
26446- pmd_t *pmd_table;
26447-
26448-#ifdef CONFIG_X86_PAE
26449- if (!(pgd_val(*pgd) & _PAGE_PRESENT)) {
26450- if (after_bootmem)
26451- pmd_table = (pmd_t *)alloc_bootmem_pages(PAGE_SIZE);
26452- else
26453- pmd_table = (pmd_t *)alloc_low_page();
26454- paravirt_alloc_pmd(&init_mm, __pa(pmd_table) >> PAGE_SHIFT);
26455- set_pgd(pgd, __pgd(__pa(pmd_table) | _PAGE_PRESENT));
26456- pud = pud_offset(pgd, 0);
26457- BUG_ON(pmd_table != pmd_offset(pud, 0));
26458-
26459- return pmd_table;
26460- }
26461-#endif
26462- pud = pud_offset(pgd, 0);
26463- pmd_table = pmd_offset(pud, 0);
26464-
26465- return pmd_table;
26466-}
26467-
26468-/*
26469 * Create a page table and place a pointer to it in a middle page
26470 * directory entry:
26471 */
26472@@ -121,13 +91,28 @@ static pte_t * __init one_page_table_init(pmd_t *pmd)
26473 page_table = (pte_t *)alloc_low_page();
26474
26475 paravirt_alloc_pte(&init_mm, __pa(page_table) >> PAGE_SHIFT);
26476+#if defined(CONFIG_PAX_PAGEEXEC) || defined(CONFIG_PAX_SEGMEXEC)
26477+ set_pmd(pmd, __pmd(__pa(page_table) | _KERNPG_TABLE));
26478+#else
26479 set_pmd(pmd, __pmd(__pa(page_table) | _PAGE_TABLE));
26480+#endif
26481 BUG_ON(page_table != pte_offset_kernel(pmd, 0));
26482 }
26483
26484 return pte_offset_kernel(pmd, 0);
26485 }
26486
26487+static pmd_t * __init one_md_table_init(pgd_t *pgd)
26488+{
26489+ pud_t *pud;
26490+ pmd_t *pmd_table;
26491+
26492+ pud = pud_offset(pgd, 0);
26493+ pmd_table = pmd_offset(pud, 0);
26494+
26495+ return pmd_table;
26496+}
26497+
26498 pmd_t * __init populate_extra_pmd(unsigned long vaddr)
26499 {
26500 int pgd_idx = pgd_index(vaddr);
26501@@ -201,6 +186,7 @@ page_table_range_init(unsigned long start, unsigned long end, pgd_t *pgd_base)
26502 int pgd_idx, pmd_idx;
26503 unsigned long vaddr;
26504 pgd_t *pgd;
26505+ pud_t *pud;
26506 pmd_t *pmd;
26507 pte_t *pte = NULL;
26508
26509@@ -210,8 +196,13 @@ page_table_range_init(unsigned long start, unsigned long end, pgd_t *pgd_base)
26510 pgd = pgd_base + pgd_idx;
26511
26512 for ( ; (pgd_idx < PTRS_PER_PGD) && (vaddr != end); pgd++, pgd_idx++) {
26513- pmd = one_md_table_init(pgd);
26514- pmd = pmd + pmd_index(vaddr);
26515+ pud = pud_offset(pgd, vaddr);
26516+ pmd = pmd_offset(pud, vaddr);
26517+
26518+#ifdef CONFIG_X86_PAE
26519+ paravirt_alloc_pmd(&init_mm, __pa(pmd) >> PAGE_SHIFT);
26520+#endif
26521+
26522 for (; (pmd_idx < PTRS_PER_PMD) && (vaddr != end);
26523 pmd++, pmd_idx++) {
26524 pte = page_table_kmap_check(one_page_table_init(pmd),
26525@@ -223,11 +214,20 @@ page_table_range_init(unsigned long start, unsigned long end, pgd_t *pgd_base)
26526 }
26527 }
26528
26529-static inline int is_kernel_text(unsigned long addr)
26530+static inline int is_kernel_text(unsigned long start, unsigned long end)
26531 {
26532- if (addr >= PAGE_OFFSET && addr <= (unsigned long)__init_end)
26533- return 1;
26534- return 0;
26535+ if ((start > ktla_ktva((unsigned long)_etext) ||
26536+ end <= ktla_ktva((unsigned long)_stext)) &&
26537+ (start > ktla_ktva((unsigned long)_einittext) ||
26538+ end <= ktla_ktva((unsigned long)_sinittext)) &&
26539+
26540+#ifdef CONFIG_ACPI_SLEEP
26541+ (start > (unsigned long)__va(acpi_wakeup_address) + 0x4000 || end <= (unsigned long)__va(acpi_wakeup_address)) &&
26542+#endif
26543+
26544+ (start > (unsigned long)__va(0xfffff) || end <= (unsigned long)__va(0xc0000)))
26545+ return 0;
26546+ return 1;
26547 }
26548
26549 /*
26550@@ -243,9 +243,10 @@ kernel_physical_mapping_init(unsigned long start,
26551 int use_pse = page_size_mask == (1<<PG_LEVEL_2M);
26552 unsigned long start_pfn, end_pfn;
26553 pgd_t *pgd_base = swapper_pg_dir;
26554- int pgd_idx, pmd_idx, pte_ofs;
26555+ unsigned int pgd_idx, pmd_idx, pte_ofs;
26556 unsigned long pfn;
26557 pgd_t *pgd;
26558+ pud_t *pud;
26559 pmd_t *pmd;
26560 pte_t *pte;
26561 unsigned pages_2m, pages_4k;
26562@@ -278,8 +279,13 @@ repeat:
26563 pfn = start_pfn;
26564 pgd_idx = pgd_index((pfn<<PAGE_SHIFT) + PAGE_OFFSET);
26565 pgd = pgd_base + pgd_idx;
26566- for (; pgd_idx < PTRS_PER_PGD; pgd++, pgd_idx++) {
26567- pmd = one_md_table_init(pgd);
26568+ for (; pgd_idx < PTRS_PER_PGD && pfn < max_low_pfn; pgd++, pgd_idx++) {
26569+ pud = pud_offset(pgd, 0);
26570+ pmd = pmd_offset(pud, 0);
26571+
26572+#ifdef CONFIG_X86_PAE
26573+ paravirt_alloc_pmd(&init_mm, __pa(pmd) >> PAGE_SHIFT);
26574+#endif
26575
26576 if (pfn >= end_pfn)
26577 continue;
26578@@ -291,14 +297,13 @@ repeat:
26579 #endif
26580 for (; pmd_idx < PTRS_PER_PMD && pfn < end_pfn;
26581 pmd++, pmd_idx++) {
26582- unsigned int addr = pfn * PAGE_SIZE + PAGE_OFFSET;
26583+ unsigned long address = pfn * PAGE_SIZE + PAGE_OFFSET;
26584
26585 /*
26586 * Map with big pages if possible, otherwise
26587 * create normal page tables:
26588 */
26589 if (use_pse) {
26590- unsigned int addr2;
26591 pgprot_t prot = PAGE_KERNEL_LARGE;
26592 /*
26593 * first pass will use the same initial
26594@@ -308,11 +313,7 @@ repeat:
26595 __pgprot(PTE_IDENT_ATTR |
26596 _PAGE_PSE);
26597
26598- addr2 = (pfn + PTRS_PER_PTE-1) * PAGE_SIZE +
26599- PAGE_OFFSET + PAGE_SIZE-1;
26600-
26601- if (is_kernel_text(addr) ||
26602- is_kernel_text(addr2))
26603+ if (is_kernel_text(address, address + PMD_SIZE))
26604 prot = PAGE_KERNEL_LARGE_EXEC;
26605
26606 pages_2m++;
26607@@ -329,7 +330,7 @@ repeat:
26608 pte_ofs = pte_index((pfn<<PAGE_SHIFT) + PAGE_OFFSET);
26609 pte += pte_ofs;
26610 for (; pte_ofs < PTRS_PER_PTE && pfn < end_pfn;
26611- pte++, pfn++, pte_ofs++, addr += PAGE_SIZE) {
26612+ pte++, pfn++, pte_ofs++, address += PAGE_SIZE) {
26613 pgprot_t prot = PAGE_KERNEL;
26614 /*
26615 * first pass will use the same initial
26616@@ -337,7 +338,7 @@ repeat:
26617 */
26618 pgprot_t init_prot = __pgprot(PTE_IDENT_ATTR);
26619
26620- if (is_kernel_text(addr))
26621+ if (is_kernel_text(address, address + PAGE_SIZE))
26622 prot = PAGE_KERNEL_EXEC;
26623
26624 pages_4k++;
26625@@ -489,7 +490,7 @@ void __init native_pagetable_setup_start(pgd_t *base)
26626
26627 pud = pud_offset(pgd, va);
26628 pmd = pmd_offset(pud, va);
26629- if (!pmd_present(*pmd))
26630+ if (!pmd_present(*pmd) || pmd_huge(*pmd))
26631 break;
26632
26633 pte = pte_offset_kernel(pmd, va);
26634@@ -541,9 +542,7 @@ void __init early_ioremap_page_table_range_init(void)
26635
26636 static void __init pagetable_init(void)
26637 {
26638- pgd_t *pgd_base = swapper_pg_dir;
26639-
26640- permanent_kmaps_init(pgd_base);
26641+ permanent_kmaps_init(swapper_pg_dir);
26642 }
26643
26644 #ifdef CONFIG_ACPI_SLEEP
26645@@ -551,12 +550,12 @@ static void __init pagetable_init(void)
26646 * ACPI suspend needs this for resume, because things like the intel-agp
26647 * driver might have split up a kernel 4MB mapping.
26648 */
26649-char swsusp_pg_dir[PAGE_SIZE]
26650+pgd_t swsusp_pg_dir[PTRS_PER_PGD]
26651 __attribute__ ((aligned(PAGE_SIZE)));
26652
26653 static inline void save_pg_dir(void)
26654 {
26655- memcpy(swsusp_pg_dir, swapper_pg_dir, PAGE_SIZE);
26656+ clone_pgd_range(swsusp_pg_dir, swapper_pg_dir, PTRS_PER_PGD);
26657 }
26658 #else /* !CONFIG_ACPI_SLEEP */
26659 static inline void save_pg_dir(void)
26660@@ -588,7 +587,7 @@ void zap_low_mappings(bool early)
26661 flush_tlb_all();
26662 }
26663
26664-pteval_t __supported_pte_mask __read_mostly = ~(_PAGE_NX | _PAGE_GLOBAL | _PAGE_IOMAP);
26665+pteval_t __supported_pte_mask __read_only = ~(_PAGE_NX | _PAGE_GLOBAL | _PAGE_IOMAP);
26666 EXPORT_SYMBOL_GPL(__supported_pte_mask);
26667
26668 /* user-defined highmem size */
26669@@ -777,7 +776,7 @@ void __init setup_bootmem_allocator(void)
26670 * Initialize the boot-time allocator (with low memory only):
26671 */
26672 bootmap_size = bootmem_bootmap_pages(max_low_pfn)<<PAGE_SHIFT;
26673- bootmap = find_e820_area(0, max_pfn_mapped<<PAGE_SHIFT, bootmap_size,
26674+ bootmap = find_e820_area(0x100000, max_pfn_mapped<<PAGE_SHIFT, bootmap_size,
26675 PAGE_SIZE);
26676 if (bootmap == -1L)
26677 panic("Cannot find bootmem map of size %ld\n", bootmap_size);
26678@@ -864,6 +863,12 @@ void __init mem_init(void)
26679
26680 pci_iommu_alloc();
26681
26682+#ifdef CONFIG_PAX_PER_CPU_PGD
26683+ clone_pgd_range(get_cpu_pgd(0) + KERNEL_PGD_BOUNDARY,
26684+ swapper_pg_dir + KERNEL_PGD_BOUNDARY,
26685+ KERNEL_PGD_PTRS);
26686+#endif
26687+
26688 #ifdef CONFIG_FLATMEM
26689 BUG_ON(!mem_map);
26690 #endif
26691@@ -881,7 +886,7 @@ void __init mem_init(void)
26692 set_highmem_pages_init();
26693
26694 codesize = (unsigned long) &_etext - (unsigned long) &_text;
26695- datasize = (unsigned long) &_edata - (unsigned long) &_etext;
26696+ datasize = (unsigned long) &_edata - (unsigned long) &_sdata;
26697 initsize = (unsigned long) &__init_end - (unsigned long) &__init_begin;
26698
26699 printk(KERN_INFO "Memory: %luk/%luk available (%dk kernel code, "
26700@@ -923,10 +928,10 @@ void __init mem_init(void)
26701 ((unsigned long)&__init_end -
26702 (unsigned long)&__init_begin) >> 10,
26703
26704- (unsigned long)&_etext, (unsigned long)&_edata,
26705- ((unsigned long)&_edata - (unsigned long)&_etext) >> 10,
26706+ (unsigned long)&_sdata, (unsigned long)&_edata,
26707+ ((unsigned long)&_edata - (unsigned long)&_sdata) >> 10,
26708
26709- (unsigned long)&_text, (unsigned long)&_etext,
26710+ ktla_ktva((unsigned long)&_text), ktla_ktva((unsigned long)&_etext),
26711 ((unsigned long)&_etext - (unsigned long)&_text) >> 10);
26712
26713 /*
26714@@ -1007,6 +1012,7 @@ void set_kernel_text_rw(void)
26715 if (!kernel_set_to_readonly)
26716 return;
26717
26718+ start = ktla_ktva(start);
26719 pr_debug("Set kernel text: %lx - %lx for read write\n",
26720 start, start+size);
26721
26722@@ -1021,6 +1027,7 @@ void set_kernel_text_ro(void)
26723 if (!kernel_set_to_readonly)
26724 return;
26725
26726+ start = ktla_ktva(start);
26727 pr_debug("Set kernel text: %lx - %lx for read only\n",
26728 start, start+size);
26729
26730@@ -1032,6 +1039,7 @@ void mark_rodata_ro(void)
26731 unsigned long start = PFN_ALIGN(_text);
26732 unsigned long size = PFN_ALIGN(_etext) - start;
26733
26734+ start = ktla_ktva(start);
26735 set_pages_ro(virt_to_page(start), size >> PAGE_SHIFT);
26736 printk(KERN_INFO "Write protecting the kernel text: %luk\n",
26737 size >> 10);
26738diff --git a/arch/x86/mm/init_64.c b/arch/x86/mm/init_64.c
26739index 7d095ad..25d2549 100644
26740--- a/arch/x86/mm/init_64.c
26741+++ b/arch/x86/mm/init_64.c
26742@@ -164,7 +164,9 @@ void set_pte_vaddr_pud(pud_t *pud_page, unsigned long vaddr, pte_t new_pte)
26743 pmd = fill_pmd(pud, vaddr);
26744 pte = fill_pte(pmd, vaddr);
26745
26746+ pax_open_kernel();
26747 set_pte(pte, new_pte);
26748+ pax_close_kernel();
26749
26750 /*
26751 * It's enough to flush this one mapping.
26752@@ -223,14 +225,12 @@ static void __init __init_extra_mapping(unsigned long phys, unsigned long size,
26753 pgd = pgd_offset_k((unsigned long)__va(phys));
26754 if (pgd_none(*pgd)) {
26755 pud = (pud_t *) spp_getpage();
26756- set_pgd(pgd, __pgd(__pa(pud) | _KERNPG_TABLE |
26757- _PAGE_USER));
26758+ set_pgd(pgd, __pgd(__pa(pud) | _PAGE_TABLE));
26759 }
26760 pud = pud_offset(pgd, (unsigned long)__va(phys));
26761 if (pud_none(*pud)) {
26762 pmd = (pmd_t *) spp_getpage();
26763- set_pud(pud, __pud(__pa(pmd) | _KERNPG_TABLE |
26764- _PAGE_USER));
26765+ set_pud(pud, __pud(__pa(pmd) | _PAGE_TABLE));
26766 }
26767 pmd = pmd_offset(pud, phys);
26768 BUG_ON(!pmd_none(*pmd));
26769@@ -675,6 +675,12 @@ void __init mem_init(void)
26770
26771 pci_iommu_alloc();
26772
26773+#ifdef CONFIG_PAX_PER_CPU_PGD
26774+ clone_pgd_range(get_cpu_pgd(0) + KERNEL_PGD_BOUNDARY,
26775+ swapper_pg_dir + KERNEL_PGD_BOUNDARY,
26776+ KERNEL_PGD_PTRS);
26777+#endif
26778+
26779 /* clear_bss() already clear the empty_zero_page */
26780
26781 reservedpages = 0;
26782@@ -861,8 +867,8 @@ int kern_addr_valid(unsigned long addr)
26783 static struct vm_area_struct gate_vma = {
26784 .vm_start = VSYSCALL_START,
26785 .vm_end = VSYSCALL_START + (VSYSCALL_MAPPED_PAGES * PAGE_SIZE),
26786- .vm_page_prot = PAGE_READONLY_EXEC,
26787- .vm_flags = VM_READ | VM_EXEC
26788+ .vm_page_prot = PAGE_READONLY,
26789+ .vm_flags = VM_READ
26790 };
26791
26792 struct vm_area_struct *get_gate_vma(struct task_struct *tsk)
26793@@ -896,7 +902,7 @@ int in_gate_area_no_task(unsigned long addr)
26794
26795 const char *arch_vma_name(struct vm_area_struct *vma)
26796 {
26797- if (vma->vm_mm && vma->vm_start == (long)vma->vm_mm->context.vdso)
26798+ if (vma->vm_mm && vma->vm_start == vma->vm_mm->context.vdso)
26799 return "[vdso]";
26800 if (vma == &gate_vma)
26801 return "[vsyscall]";
26802diff --git a/arch/x86/mm/iomap_32.c b/arch/x86/mm/iomap_32.c
26803index 84e236c..69bd3f6 100644
26804--- a/arch/x86/mm/iomap_32.c
26805+++ b/arch/x86/mm/iomap_32.c
26806@@ -65,7 +65,11 @@ void *kmap_atomic_prot_pfn(unsigned long pfn, enum km_type type, pgprot_t prot)
26807 debug_kmap_atomic(type);
26808 idx = type + KM_TYPE_NR * smp_processor_id();
26809 vaddr = __fix_to_virt(FIX_KMAP_BEGIN + idx);
26810+
26811+ pax_open_kernel();
26812 set_pte(kmap_pte - idx, pfn_pte(pfn, prot));
26813+ pax_close_kernel();
26814+
26815 arch_flush_lazy_mmu_mode();
26816
26817 return (void *)vaddr;
26818diff --git a/arch/x86/mm/ioremap.c b/arch/x86/mm/ioremap.c
26819index 2feb9bd..ab91e7b 100644
26820--- a/arch/x86/mm/ioremap.c
26821+++ b/arch/x86/mm/ioremap.c
26822@@ -41,8 +41,8 @@ int page_is_ram(unsigned long pagenr)
26823 * Second special case: Some BIOSen report the PC BIOS
26824 * area (640->1Mb) as ram even though it is not.
26825 */
26826- if (pagenr >= (BIOS_BEGIN >> PAGE_SHIFT) &&
26827- pagenr < (BIOS_END >> PAGE_SHIFT))
26828+ if (pagenr >= (ISA_START_ADDRESS >> PAGE_SHIFT) &&
26829+ pagenr < (ISA_END_ADDRESS >> PAGE_SHIFT))
26830 return 0;
26831
26832 for (i = 0; i < e820.nr_map; i++) {
26833@@ -137,13 +137,10 @@ static void __iomem *__ioremap_caller(resource_size_t phys_addr,
26834 /*
26835 * Don't allow anybody to remap normal RAM that we're using..
26836 */
26837- for (pfn = phys_addr >> PAGE_SHIFT;
26838- (pfn << PAGE_SHIFT) < (last_addr & PAGE_MASK);
26839- pfn++) {
26840-
26841+ for (pfn = phys_addr >> PAGE_SHIFT; ((resource_size_t)pfn << PAGE_SHIFT) < (last_addr & PAGE_MASK); pfn++) {
26842 int is_ram = page_is_ram(pfn);
26843
26844- if (is_ram && pfn_valid(pfn) && !PageReserved(pfn_to_page(pfn)))
26845+ if (is_ram && pfn_valid(pfn) && (pfn >= 0x100 || !PageReserved(pfn_to_page(pfn))))
26846 return NULL;
26847 WARN_ON_ONCE(is_ram);
26848 }
26849@@ -378,6 +375,9 @@ void *xlate_dev_mem_ptr(unsigned long phys)
26850
26851 /* If page is RAM, we can use __va. Otherwise ioremap and unmap. */
26852 if (page_is_ram(start >> PAGE_SHIFT))
26853+#ifdef CONFIG_HIGHMEM
26854+ if ((start >> PAGE_SHIFT) < max_low_pfn)
26855+#endif
26856 return __va(phys);
26857
26858 addr = (void __force *)ioremap_default(start, PAGE_SIZE);
26859@@ -407,7 +407,7 @@ static int __init early_ioremap_debug_setup(char *str)
26860 early_param("early_ioremap_debug", early_ioremap_debug_setup);
26861
26862 static __initdata int after_paging_init;
26863-static pte_t bm_pte[PAGE_SIZE/sizeof(pte_t)] __page_aligned_bss;
26864+static pte_t bm_pte[PAGE_SIZE/sizeof(pte_t)] __read_only __aligned(PAGE_SIZE);
26865
26866 static inline pmd_t * __init early_ioremap_pmd(unsigned long addr)
26867 {
26868@@ -439,8 +439,7 @@ void __init early_ioremap_init(void)
26869 slot_virt[i] = __fix_to_virt(FIX_BTMAP_BEGIN - NR_FIX_BTMAPS*i);
26870
26871 pmd = early_ioremap_pmd(fix_to_virt(FIX_BTMAP_BEGIN));
26872- memset(bm_pte, 0, sizeof(bm_pte));
26873- pmd_populate_kernel(&init_mm, pmd, bm_pte);
26874+ pmd_populate_user(&init_mm, pmd, bm_pte);
26875
26876 /*
26877 * The boot-ioremap range spans multiple pmds, for which
26878diff --git a/arch/x86/mm/kmemcheck/kmemcheck.c b/arch/x86/mm/kmemcheck/kmemcheck.c
26879index 8cc1833..1abbc5b 100644
26880--- a/arch/x86/mm/kmemcheck/kmemcheck.c
26881+++ b/arch/x86/mm/kmemcheck/kmemcheck.c
26882@@ -622,9 +622,9 @@ bool kmemcheck_fault(struct pt_regs *regs, unsigned long address,
26883 * memory (e.g. tracked pages)? For now, we need this to avoid
26884 * invoking kmemcheck for PnP BIOS calls.
26885 */
26886- if (regs->flags & X86_VM_MASK)
26887+ if (v8086_mode(regs))
26888 return false;
26889- if (regs->cs != __KERNEL_CS)
26890+ if (regs->cs != __KERNEL_CS && regs->cs != __KERNEXEC_KERNEL_CS)
26891 return false;
26892
26893 pte = kmemcheck_pte_lookup(address);
26894diff --git a/arch/x86/mm/mmap.c b/arch/x86/mm/mmap.c
26895index c9e57af..07a321b 100644
26896--- a/arch/x86/mm/mmap.c
26897+++ b/arch/x86/mm/mmap.c
26898@@ -49,7 +49,7 @@ static unsigned int stack_maxrandom_size(void)
26899 * Leave an at least ~128 MB hole with possible stack randomization.
26900 */
26901 #define MIN_GAP (128*1024*1024UL + stack_maxrandom_size())
26902-#define MAX_GAP (TASK_SIZE/6*5)
26903+#define MAX_GAP (pax_task_size/6*5)
26904
26905 /*
26906 * True on X86_32 or when emulating IA32 on X86_64
26907@@ -94,27 +94,40 @@ static unsigned long mmap_rnd(void)
26908 return rnd << PAGE_SHIFT;
26909 }
26910
26911-static unsigned long mmap_base(void)
26912+static unsigned long mmap_base(struct mm_struct *mm)
26913 {
26914 unsigned long gap = current->signal->rlim[RLIMIT_STACK].rlim_cur;
26915+ unsigned long pax_task_size = TASK_SIZE;
26916+
26917+#ifdef CONFIG_PAX_SEGMEXEC
26918+ if (mm->pax_flags & MF_PAX_SEGMEXEC)
26919+ pax_task_size = SEGMEXEC_TASK_SIZE;
26920+#endif
26921
26922 if (gap < MIN_GAP)
26923 gap = MIN_GAP;
26924 else if (gap > MAX_GAP)
26925 gap = MAX_GAP;
26926
26927- return PAGE_ALIGN(TASK_SIZE - gap - mmap_rnd());
26928+ return PAGE_ALIGN(pax_task_size - gap - mmap_rnd());
26929 }
26930
26931 /*
26932 * Bottom-up (legacy) layout on X86_32 did not support randomization, X86_64
26933 * does, but not when emulating X86_32
26934 */
26935-static unsigned long mmap_legacy_base(void)
26936+static unsigned long mmap_legacy_base(struct mm_struct *mm)
26937 {
26938- if (mmap_is_ia32())
26939+ if (mmap_is_ia32()) {
26940+
26941+#ifdef CONFIG_PAX_SEGMEXEC
26942+ if (mm->pax_flags & MF_PAX_SEGMEXEC)
26943+ return SEGMEXEC_TASK_UNMAPPED_BASE;
26944+ else
26945+#endif
26946+
26947 return TASK_UNMAPPED_BASE;
26948- else
26949+ } else
26950 return TASK_UNMAPPED_BASE + mmap_rnd();
26951 }
26952
26953@@ -125,11 +138,23 @@ static unsigned long mmap_legacy_base(void)
26954 void arch_pick_mmap_layout(struct mm_struct *mm)
26955 {
26956 if (mmap_is_legacy()) {
26957- mm->mmap_base = mmap_legacy_base();
26958+ mm->mmap_base = mmap_legacy_base(mm);
26959+
26960+#ifdef CONFIG_PAX_RANDMMAP
26961+ if (mm->pax_flags & MF_PAX_RANDMMAP)
26962+ mm->mmap_base += mm->delta_mmap;
26963+#endif
26964+
26965 mm->get_unmapped_area = arch_get_unmapped_area;
26966 mm->unmap_area = arch_unmap_area;
26967 } else {
26968- mm->mmap_base = mmap_base();
26969+ mm->mmap_base = mmap_base(mm);
26970+
26971+#ifdef CONFIG_PAX_RANDMMAP
26972+ if (mm->pax_flags & MF_PAX_RANDMMAP)
26973+ mm->mmap_base -= mm->delta_mmap + mm->delta_stack;
26974+#endif
26975+
26976 mm->get_unmapped_area = arch_get_unmapped_area_topdown;
26977 mm->unmap_area = arch_unmap_area_topdown;
26978 }
26979diff --git a/arch/x86/mm/mmio-mod.c b/arch/x86/mm/mmio-mod.c
26980index 132772a..b961f11 100644
26981--- a/arch/x86/mm/mmio-mod.c
26982+++ b/arch/x86/mm/mmio-mod.c
26983@@ -193,7 +193,7 @@ static void pre(struct kmmio_probe *p, struct pt_regs *regs,
26984 break;
26985 default:
26986 {
26987- unsigned char *ip = (unsigned char *)instptr;
26988+ unsigned char *ip = (unsigned char *)ktla_ktva(instptr);
26989 my_trace->opcode = MMIO_UNKNOWN_OP;
26990 my_trace->width = 0;
26991 my_trace->value = (*ip) << 16 | *(ip + 1) << 8 |
26992@@ -233,7 +233,7 @@ static void post(struct kmmio_probe *p, unsigned long condition,
26993 static void ioremap_trace_core(resource_size_t offset, unsigned long size,
26994 void __iomem *addr)
26995 {
26996- static atomic_t next_id;
26997+ static atomic_unchecked_t next_id;
26998 struct remap_trace *trace = kmalloc(sizeof(*trace), GFP_KERNEL);
26999 /* These are page-unaligned. */
27000 struct mmiotrace_map map = {
27001@@ -257,7 +257,7 @@ static void ioremap_trace_core(resource_size_t offset, unsigned long size,
27002 .private = trace
27003 },
27004 .phys = offset,
27005- .id = atomic_inc_return(&next_id)
27006+ .id = atomic_inc_return_unchecked(&next_id)
27007 };
27008 map.map_id = trace->id;
27009
27010diff --git a/arch/x86/mm/numa_32.c b/arch/x86/mm/numa_32.c
27011index d253006..e56dd6a 100644
27012--- a/arch/x86/mm/numa_32.c
27013+++ b/arch/x86/mm/numa_32.c
27014@@ -98,7 +98,6 @@ unsigned long node_memmap_size_bytes(int nid, unsigned long start_pfn,
27015 }
27016 #endif
27017
27018-extern unsigned long find_max_low_pfn(void);
27019 extern unsigned long highend_pfn, highstart_pfn;
27020
27021 #define LARGE_PAGE_BYTES (PTRS_PER_PTE * PAGE_SIZE)
27022diff --git a/arch/x86/mm/pageattr-test.c b/arch/x86/mm/pageattr-test.c
27023index e1d1069..2251ff3 100644
27024--- a/arch/x86/mm/pageattr-test.c
27025+++ b/arch/x86/mm/pageattr-test.c
27026@@ -36,7 +36,7 @@ enum {
27027
27028 static int pte_testbit(pte_t pte)
27029 {
27030- return pte_flags(pte) & _PAGE_UNUSED1;
27031+ return pte_flags(pte) & _PAGE_CPA_TEST;
27032 }
27033
27034 struct split_state {
27035diff --git a/arch/x86/mm/pageattr.c b/arch/x86/mm/pageattr.c
27036index dd38bfb..b72c63e 100644
27037--- a/arch/x86/mm/pageattr.c
27038+++ b/arch/x86/mm/pageattr.c
27039@@ -261,16 +261,17 @@ static inline pgprot_t static_protections(pgprot_t prot, unsigned long address,
27040 * PCI BIOS based config access (CONFIG_PCI_GOBIOS) support.
27041 */
27042 if (within(pfn, BIOS_BEGIN >> PAGE_SHIFT, BIOS_END >> PAGE_SHIFT))
27043- pgprot_val(forbidden) |= _PAGE_NX;
27044+ pgprot_val(forbidden) |= _PAGE_NX & __supported_pte_mask;
27045
27046 /*
27047 * The kernel text needs to be executable for obvious reasons
27048 * Does not cover __inittext since that is gone later on. On
27049 * 64bit we do not enforce !NX on the low mapping
27050 */
27051- if (within(address, (unsigned long)_text, (unsigned long)_etext))
27052- pgprot_val(forbidden) |= _PAGE_NX;
27053+ if (within(address, ktla_ktva((unsigned long)_text), ktla_ktva((unsigned long)_etext)))
27054+ pgprot_val(forbidden) |= _PAGE_NX & __supported_pte_mask;
27055
27056+#ifdef CONFIG_DEBUG_RODATA
27057 /*
27058 * The .rodata section needs to be read-only. Using the pfn
27059 * catches all aliases.
27060@@ -278,6 +279,14 @@ static inline pgprot_t static_protections(pgprot_t prot, unsigned long address,
27061 if (within(pfn, __pa((unsigned long)__start_rodata) >> PAGE_SHIFT,
27062 __pa((unsigned long)__end_rodata) >> PAGE_SHIFT))
27063 pgprot_val(forbidden) |= _PAGE_RW;
27064+#endif
27065+
27066+#ifdef CONFIG_PAX_KERNEXEC
27067+ if (within(pfn, __pa((unsigned long)&_text), __pa((unsigned long)&_sdata))) {
27068+ pgprot_val(forbidden) |= _PAGE_RW;
27069+ pgprot_val(forbidden) |= _PAGE_NX & __supported_pte_mask;
27070+ }
27071+#endif
27072
27073 prot = __pgprot(pgprot_val(prot) & ~pgprot_val(forbidden));
27074
27075@@ -331,23 +340,37 @@ EXPORT_SYMBOL_GPL(lookup_address);
27076 static void __set_pmd_pte(pte_t *kpte, unsigned long address, pte_t pte)
27077 {
27078 /* change init_mm */
27079+ pax_open_kernel();
27080 set_pte_atomic(kpte, pte);
27081+
27082 #ifdef CONFIG_X86_32
27083 if (!SHARED_KERNEL_PMD) {
27084+
27085+#ifdef CONFIG_PAX_PER_CPU_PGD
27086+ unsigned long cpu;
27087+#else
27088 struct page *page;
27089+#endif
27090
27091+#ifdef CONFIG_PAX_PER_CPU_PGD
27092+ for (cpu = 0; cpu < nr_cpu_ids; ++cpu) {
27093+ pgd_t *pgd = get_cpu_pgd(cpu);
27094+#else
27095 list_for_each_entry(page, &pgd_list, lru) {
27096- pgd_t *pgd;
27097+ pgd_t *pgd = (pgd_t *)page_address(page);
27098+#endif
27099+
27100 pud_t *pud;
27101 pmd_t *pmd;
27102
27103- pgd = (pgd_t *)page_address(page) + pgd_index(address);
27104+ pgd += pgd_index(address);
27105 pud = pud_offset(pgd, address);
27106 pmd = pmd_offset(pud, address);
27107 set_pte_atomic((pte_t *)pmd, pte);
27108 }
27109 }
27110 #endif
27111+ pax_close_kernel();
27112 }
27113
27114 static int
27115diff --git a/arch/x86/mm/pat.c b/arch/x86/mm/pat.c
27116index e78cd0e..de0a817 100644
27117--- a/arch/x86/mm/pat.c
27118+++ b/arch/x86/mm/pat.c
27119@@ -258,7 +258,7 @@ chk_conflict(struct memtype *new, struct memtype *entry, unsigned long *type)
27120
27121 conflict:
27122 printk(KERN_INFO "%s:%d conflicting memory types "
27123- "%Lx-%Lx %s<->%s\n", current->comm, current->pid, new->start,
27124+ "%Lx-%Lx %s<->%s\n", current->comm, task_pid_nr(current), new->start,
27125 new->end, cattr_name(new->type), cattr_name(entry->type));
27126 return -EBUSY;
27127 }
27128@@ -559,7 +559,7 @@ unlock_ret:
27129
27130 if (err) {
27131 printk(KERN_INFO "%s:%d freeing invalid memtype %Lx-%Lx\n",
27132- current->comm, current->pid, start, end);
27133+ current->comm, task_pid_nr(current), start, end);
27134 }
27135
27136 dprintk("free_memtype request 0x%Lx-0x%Lx\n", start, end);
27137@@ -689,8 +689,8 @@ static inline int range_is_allowed(unsigned long pfn, unsigned long size)
27138 while (cursor < to) {
27139 if (!devmem_is_allowed(pfn)) {
27140 printk(KERN_INFO
27141- "Program %s tried to access /dev/mem between %Lx->%Lx.\n",
27142- current->comm, from, to);
27143+ "Program %s tried to access /dev/mem between %Lx->%Lx (%Lx).\n",
27144+ current->comm, from, to, cursor);
27145 return 0;
27146 }
27147 cursor += PAGE_SIZE;
27148@@ -755,7 +755,7 @@ int kernel_map_sync_memtype(u64 base, unsigned long size, unsigned long flags)
27149 printk(KERN_INFO
27150 "%s:%d ioremap_change_attr failed %s "
27151 "for %Lx-%Lx\n",
27152- current->comm, current->pid,
27153+ current->comm, task_pid_nr(current),
27154 cattr_name(flags),
27155 base, (unsigned long long)(base + size));
27156 return -EINVAL;
27157@@ -813,7 +813,7 @@ static int reserve_pfn_range(u64 paddr, unsigned long size, pgprot_t *vma_prot,
27158 free_memtype(paddr, paddr + size);
27159 printk(KERN_ERR "%s:%d map pfn expected mapping type %s"
27160 " for %Lx-%Lx, got %s\n",
27161- current->comm, current->pid,
27162+ current->comm, task_pid_nr(current),
27163 cattr_name(want_flags),
27164 (unsigned long long)paddr,
27165 (unsigned long long)(paddr + size),
27166diff --git a/arch/x86/mm/pf_in.c b/arch/x86/mm/pf_in.c
27167index df3d5c8..c2223e1 100644
27168--- a/arch/x86/mm/pf_in.c
27169+++ b/arch/x86/mm/pf_in.c
27170@@ -148,7 +148,7 @@ enum reason_type get_ins_type(unsigned long ins_addr)
27171 int i;
27172 enum reason_type rv = OTHERS;
27173
27174- p = (unsigned char *)ins_addr;
27175+ p = (unsigned char *)ktla_ktva(ins_addr);
27176 p += skip_prefix(p, &prf);
27177 p += get_opcode(p, &opcode);
27178
27179@@ -168,7 +168,7 @@ static unsigned int get_ins_reg_width(unsigned long ins_addr)
27180 struct prefix_bits prf;
27181 int i;
27182
27183- p = (unsigned char *)ins_addr;
27184+ p = (unsigned char *)ktla_ktva(ins_addr);
27185 p += skip_prefix(p, &prf);
27186 p += get_opcode(p, &opcode);
27187
27188@@ -191,7 +191,7 @@ unsigned int get_ins_mem_width(unsigned long ins_addr)
27189 struct prefix_bits prf;
27190 int i;
27191
27192- p = (unsigned char *)ins_addr;
27193+ p = (unsigned char *)ktla_ktva(ins_addr);
27194 p += skip_prefix(p, &prf);
27195 p += get_opcode(p, &opcode);
27196
27197@@ -417,7 +417,7 @@ unsigned long get_ins_reg_val(unsigned long ins_addr, struct pt_regs *regs)
27198 int i;
27199 unsigned long rv;
27200
27201- p = (unsigned char *)ins_addr;
27202+ p = (unsigned char *)ktla_ktva(ins_addr);
27203 p += skip_prefix(p, &prf);
27204 p += get_opcode(p, &opcode);
27205 for (i = 0; i < ARRAY_SIZE(reg_rop); i++)
27206@@ -472,7 +472,7 @@ unsigned long get_ins_imm_val(unsigned long ins_addr)
27207 int i;
27208 unsigned long rv;
27209
27210- p = (unsigned char *)ins_addr;
27211+ p = (unsigned char *)ktla_ktva(ins_addr);
27212 p += skip_prefix(p, &prf);
27213 p += get_opcode(p, &opcode);
27214 for (i = 0; i < ARRAY_SIZE(imm_wop); i++)
27215diff --git a/arch/x86/mm/pgtable.c b/arch/x86/mm/pgtable.c
27216index e0e6fad..c56b495 100644
27217--- a/arch/x86/mm/pgtable.c
27218+++ b/arch/x86/mm/pgtable.c
27219@@ -83,9 +83,52 @@ static inline void pgd_list_del(pgd_t *pgd)
27220 list_del(&page->lru);
27221 }
27222
27223-#define UNSHARED_PTRS_PER_PGD \
27224- (SHARED_KERNEL_PMD ? KERNEL_PGD_BOUNDARY : PTRS_PER_PGD)
27225+#if defined(CONFIG_X86_64) && defined(CONFIG_PAX_MEMORY_UDEREF)
27226+pgdval_t clone_pgd_mask __read_only = ~_PAGE_PRESENT;
27227
27228+void __shadow_user_pgds(pgd_t *dst, const pgd_t *src, int count)
27229+{
27230+ while (count--)
27231+ *dst++ = __pgd((pgd_val(*src++) | (_PAGE_NX & __supported_pte_mask)) & ~_PAGE_USER);
27232+}
27233+#endif
27234+
27235+#ifdef CONFIG_PAX_PER_CPU_PGD
27236+void __clone_user_pgds(pgd_t *dst, const pgd_t *src, int count)
27237+{
27238+ while (count--)
27239+
27240+#if defined(CONFIG_X86_64) && defined(CONFIG_PAX_MEMORY_UDEREF)
27241+ *dst++ = __pgd(pgd_val(*src++) & clone_pgd_mask);
27242+#else
27243+ *dst++ = *src++;
27244+#endif
27245+
27246+}
27247+#endif
27248+
27249+#ifdef CONFIG_X86_64
27250+#define pxd_t pud_t
27251+#define pyd_t pgd_t
27252+#define paravirt_release_pxd(pfn) paravirt_release_pud(pfn)
27253+#define pxd_free(mm, pud) pud_free((mm), (pud))
27254+#define pyd_populate(mm, pgd, pud) pgd_populate((mm), (pgd), (pud))
27255+#define pyd_offset(mm, address) pgd_offset((mm), (address))
27256+#define PYD_SIZE PGDIR_SIZE
27257+#else
27258+#define pxd_t pmd_t
27259+#define pyd_t pud_t
27260+#define paravirt_release_pxd(pfn) paravirt_release_pmd(pfn)
27261+#define pxd_free(mm, pud) pmd_free((mm), (pud))
27262+#define pyd_populate(mm, pgd, pud) pud_populate((mm), (pgd), (pud))
27263+#define pyd_offset(mm, address) pud_offset((mm), (address))
27264+#define PYD_SIZE PUD_SIZE
27265+#endif
27266+
27267+#ifdef CONFIG_PAX_PER_CPU_PGD
27268+static inline void pgd_ctor(pgd_t *pgd) {}
27269+static inline void pgd_dtor(pgd_t *pgd) {}
27270+#else
27271 static void pgd_ctor(pgd_t *pgd)
27272 {
27273 /* If the pgd points to a shared pagetable level (either the
27274@@ -119,6 +162,7 @@ static void pgd_dtor(pgd_t *pgd)
27275 pgd_list_del(pgd);
27276 spin_unlock_irqrestore(&pgd_lock, flags);
27277 }
27278+#endif
27279
27280 /*
27281 * List of all pgd's needed for non-PAE so it can invalidate entries
27282@@ -131,7 +175,7 @@ static void pgd_dtor(pgd_t *pgd)
27283 * -- wli
27284 */
27285
27286-#ifdef CONFIG_X86_PAE
27287+#if defined(CONFIG_X86_32) && defined(CONFIG_X86_PAE)
27288 /*
27289 * In PAE mode, we need to do a cr3 reload (=tlb flush) when
27290 * updating the top-level pagetable entries to guarantee the
27291@@ -143,7 +187,7 @@ static void pgd_dtor(pgd_t *pgd)
27292 * not shared between pagetables (!SHARED_KERNEL_PMDS), we allocate
27293 * and initialize the kernel pmds here.
27294 */
27295-#define PREALLOCATED_PMDS UNSHARED_PTRS_PER_PGD
27296+#define PREALLOCATED_PXDS (SHARED_KERNEL_PMD ? KERNEL_PGD_BOUNDARY : PTRS_PER_PGD)
27297
27298 void pud_populate(struct mm_struct *mm, pud_t *pudp, pmd_t *pmd)
27299 {
27300@@ -161,36 +205,38 @@ void pud_populate(struct mm_struct *mm, pud_t *pudp, pmd_t *pmd)
27301 */
27302 flush_tlb_mm(mm);
27303 }
27304+#elif defined(CONFIG_X86_64) && defined(CONFIG_PAX_PER_CPU_PGD)
27305+#define PREALLOCATED_PXDS USER_PGD_PTRS
27306 #else /* !CONFIG_X86_PAE */
27307
27308 /* No need to prepopulate any pagetable entries in non-PAE modes. */
27309-#define PREALLOCATED_PMDS 0
27310+#define PREALLOCATED_PXDS 0
27311
27312 #endif /* CONFIG_X86_PAE */
27313
27314-static void free_pmds(pmd_t *pmds[])
27315+static void free_pxds(pxd_t *pxds[])
27316 {
27317 int i;
27318
27319- for(i = 0; i < PREALLOCATED_PMDS; i++)
27320- if (pmds[i])
27321- free_page((unsigned long)pmds[i]);
27322+ for(i = 0; i < PREALLOCATED_PXDS; i++)
27323+ if (pxds[i])
27324+ free_page((unsigned long)pxds[i]);
27325 }
27326
27327-static int preallocate_pmds(pmd_t *pmds[])
27328+static int preallocate_pxds(pxd_t *pxds[])
27329 {
27330 int i;
27331 bool failed = false;
27332
27333- for(i = 0; i < PREALLOCATED_PMDS; i++) {
27334- pmd_t *pmd = (pmd_t *)__get_free_page(PGALLOC_GFP);
27335- if (pmd == NULL)
27336+ for(i = 0; i < PREALLOCATED_PXDS; i++) {
27337+ pxd_t *pxd = (pxd_t *)__get_free_page(PGALLOC_GFP);
27338+ if (pxd == NULL)
27339 failed = true;
27340- pmds[i] = pmd;
27341+ pxds[i] = pxd;
27342 }
27343
27344 if (failed) {
27345- free_pmds(pmds);
27346+ free_pxds(pxds);
27347 return -ENOMEM;
27348 }
27349
27350@@ -203,51 +249,56 @@ static int preallocate_pmds(pmd_t *pmds[])
27351 * preallocate which never got a corresponding vma will need to be
27352 * freed manually.
27353 */
27354-static void pgd_mop_up_pmds(struct mm_struct *mm, pgd_t *pgdp)
27355+static void pgd_mop_up_pxds(struct mm_struct *mm, pgd_t *pgdp)
27356 {
27357 int i;
27358
27359- for(i = 0; i < PREALLOCATED_PMDS; i++) {
27360+ for(i = 0; i < PREALLOCATED_PXDS; i++) {
27361 pgd_t pgd = pgdp[i];
27362
27363 if (pgd_val(pgd) != 0) {
27364- pmd_t *pmd = (pmd_t *)pgd_page_vaddr(pgd);
27365+ pxd_t *pxd = (pxd_t *)pgd_page_vaddr(pgd);
27366
27367- pgdp[i] = native_make_pgd(0);
27368+ set_pgd(pgdp + i, native_make_pgd(0));
27369
27370- paravirt_release_pmd(pgd_val(pgd) >> PAGE_SHIFT);
27371- pmd_free(mm, pmd);
27372+ paravirt_release_pxd(pgd_val(pgd) >> PAGE_SHIFT);
27373+ pxd_free(mm, pxd);
27374 }
27375 }
27376 }
27377
27378-static void pgd_prepopulate_pmd(struct mm_struct *mm, pgd_t *pgd, pmd_t *pmds[])
27379+static void pgd_prepopulate_pxd(struct mm_struct *mm, pgd_t *pgd, pxd_t *pxds[])
27380 {
27381- pud_t *pud;
27382+ pyd_t *pyd;
27383 unsigned long addr;
27384 int i;
27385
27386- if (PREALLOCATED_PMDS == 0) /* Work around gcc-3.4.x bug */
27387+ if (PREALLOCATED_PXDS == 0) /* Work around gcc-3.4.x bug */
27388 return;
27389
27390- pud = pud_offset(pgd, 0);
27391+#ifdef CONFIG_X86_64
27392+ pyd = pyd_offset(mm, 0L);
27393+#else
27394+ pyd = pyd_offset(pgd, 0L);
27395+#endif
27396
27397- for (addr = i = 0; i < PREALLOCATED_PMDS;
27398- i++, pud++, addr += PUD_SIZE) {
27399- pmd_t *pmd = pmds[i];
27400+ for (addr = i = 0; i < PREALLOCATED_PXDS;
27401+ i++, pyd++, addr += PYD_SIZE) {
27402+ pxd_t *pxd = pxds[i];
27403
27404 if (i >= KERNEL_PGD_BOUNDARY)
27405- memcpy(pmd, (pmd_t *)pgd_page_vaddr(swapper_pg_dir[i]),
27406- sizeof(pmd_t) * PTRS_PER_PMD);
27407+ memcpy(pxd, (pxd_t *)pgd_page_vaddr(swapper_pg_dir[i]),
27408+ sizeof(pxd_t) * PTRS_PER_PMD);
27409
27410- pud_populate(mm, pud, pmd);
27411+ pyd_populate(mm, pyd, pxd);
27412 }
27413 }
27414
27415 pgd_t *pgd_alloc(struct mm_struct *mm)
27416 {
27417 pgd_t *pgd;
27418- pmd_t *pmds[PREALLOCATED_PMDS];
27419+ pxd_t *pxds[PREALLOCATED_PXDS];
27420+
27421 unsigned long flags;
27422
27423 pgd = (pgd_t *)__get_free_page(PGALLOC_GFP);
27424@@ -257,11 +308,11 @@ pgd_t *pgd_alloc(struct mm_struct *mm)
27425
27426 mm->pgd = pgd;
27427
27428- if (preallocate_pmds(pmds) != 0)
27429+ if (preallocate_pxds(pxds) != 0)
27430 goto out_free_pgd;
27431
27432 if (paravirt_pgd_alloc(mm) != 0)
27433- goto out_free_pmds;
27434+ goto out_free_pxds;
27435
27436 /*
27437 * Make sure that pre-populating the pmds is atomic with
27438@@ -271,14 +322,14 @@ pgd_t *pgd_alloc(struct mm_struct *mm)
27439 spin_lock_irqsave(&pgd_lock, flags);
27440
27441 pgd_ctor(pgd);
27442- pgd_prepopulate_pmd(mm, pgd, pmds);
27443+ pgd_prepopulate_pxd(mm, pgd, pxds);
27444
27445 spin_unlock_irqrestore(&pgd_lock, flags);
27446
27447 return pgd;
27448
27449-out_free_pmds:
27450- free_pmds(pmds);
27451+out_free_pxds:
27452+ free_pxds(pxds);
27453 out_free_pgd:
27454 free_page((unsigned long)pgd);
27455 out:
27456@@ -287,7 +338,7 @@ out:
27457
27458 void pgd_free(struct mm_struct *mm, pgd_t *pgd)
27459 {
27460- pgd_mop_up_pmds(mm, pgd);
27461+ pgd_mop_up_pxds(mm, pgd);
27462 pgd_dtor(pgd);
27463 paravirt_pgd_free(mm, pgd);
27464 free_page((unsigned long)pgd);
27465diff --git a/arch/x86/mm/pgtable_32.c b/arch/x86/mm/pgtable_32.c
27466index 46c8834..fcab43d 100644
27467--- a/arch/x86/mm/pgtable_32.c
27468+++ b/arch/x86/mm/pgtable_32.c
27469@@ -49,10 +49,13 @@ void set_pte_vaddr(unsigned long vaddr, pte_t pteval)
27470 return;
27471 }
27472 pte = pte_offset_kernel(pmd, vaddr);
27473+
27474+ pax_open_kernel();
27475 if (pte_val(pteval))
27476 set_pte_at(&init_mm, vaddr, pte, pteval);
27477 else
27478 pte_clear(&init_mm, vaddr, pte);
27479+ pax_close_kernel();
27480
27481 /*
27482 * It's enough to flush this one mapping.
27483diff --git a/arch/x86/mm/setup_nx.c b/arch/x86/mm/setup_nx.c
27484index 513d8ed..978c161 100644
27485--- a/arch/x86/mm/setup_nx.c
27486+++ b/arch/x86/mm/setup_nx.c
27487@@ -4,11 +4,10 @@
27488
27489 #include <asm/pgtable.h>
27490
27491+#if defined(CONFIG_X86_32) && defined(CONFIG_X86_PAE)
27492 int nx_enabled;
27493
27494-#if defined(CONFIG_X86_64) || defined(CONFIG_X86_PAE)
27495-static int disable_nx __cpuinitdata;
27496-
27497+#ifndef CONFIG_PAX_PAGEEXEC
27498 /*
27499 * noexec = on|off
27500 *
27501@@ -22,32 +21,26 @@ static int __init noexec_setup(char *str)
27502 if (!str)
27503 return -EINVAL;
27504 if (!strncmp(str, "on", 2)) {
27505- __supported_pte_mask |= _PAGE_NX;
27506- disable_nx = 0;
27507+ nx_enabled = 1;
27508 } else if (!strncmp(str, "off", 3)) {
27509- disable_nx = 1;
27510- __supported_pte_mask &= ~_PAGE_NX;
27511+ nx_enabled = 0;
27512 }
27513 return 0;
27514 }
27515 early_param("noexec", noexec_setup);
27516 #endif
27517+#endif
27518
27519 #ifdef CONFIG_X86_PAE
27520 void __init set_nx(void)
27521 {
27522- unsigned int v[4], l, h;
27523+ if (!nx_enabled && cpu_has_nx) {
27524+ unsigned l, h;
27525
27526- if (cpu_has_pae && (cpuid_eax(0x80000000) > 0x80000001)) {
27527- cpuid(0x80000001, &v[0], &v[1], &v[2], &v[3]);
27528-
27529- if ((v[3] & (1 << 20)) && !disable_nx) {
27530- rdmsr(MSR_EFER, l, h);
27531- l |= EFER_NX;
27532- wrmsr(MSR_EFER, l, h);
27533- nx_enabled = 1;
27534- __supported_pte_mask |= _PAGE_NX;
27535- }
27536+ __supported_pte_mask &= ~_PAGE_NX;
27537+ rdmsr(MSR_EFER, l, h);
27538+ l &= ~EFER_NX;
27539+ wrmsr(MSR_EFER, l, h);
27540 }
27541 }
27542 #else
27543@@ -62,7 +55,7 @@ void __cpuinit check_efer(void)
27544 unsigned long efer;
27545
27546 rdmsrl(MSR_EFER, efer);
27547- if (!(efer & EFER_NX) || disable_nx)
27548+ if (!(efer & EFER_NX) || !nx_enabled)
27549 __supported_pte_mask &= ~_PAGE_NX;
27550 }
27551 #endif
27552diff --git a/arch/x86/mm/tlb.c b/arch/x86/mm/tlb.c
27553index 36fe08e..b123d3a 100644
27554--- a/arch/x86/mm/tlb.c
27555+++ b/arch/x86/mm/tlb.c
27556@@ -61,7 +61,11 @@ void leave_mm(int cpu)
27557 BUG();
27558 cpumask_clear_cpu(cpu,
27559 mm_cpumask(percpu_read(cpu_tlbstate.active_mm)));
27560+
27561+#ifndef CONFIG_PAX_PER_CPU_PGD
27562 load_cr3(swapper_pg_dir);
27563+#endif
27564+
27565 }
27566 EXPORT_SYMBOL_GPL(leave_mm);
27567
27568diff --git a/arch/x86/oprofile/backtrace.c b/arch/x86/oprofile/backtrace.c
27569index 829edf0..672adb3 100644
27570--- a/arch/x86/oprofile/backtrace.c
27571+++ b/arch/x86/oprofile/backtrace.c
27572@@ -115,7 +115,7 @@ x86_backtrace(struct pt_regs * const regs, unsigned int depth)
27573 {
27574 struct frame_head *head = (struct frame_head *)frame_pointer(regs);
27575
27576- if (!user_mode_vm(regs)) {
27577+ if (!user_mode(regs)) {
27578 unsigned long stack = kernel_stack_pointer(regs);
27579 if (depth)
27580 dump_trace(NULL, regs, (unsigned long *)stack, 0,
27581diff --git a/arch/x86/oprofile/op_model_p4.c b/arch/x86/oprofile/op_model_p4.c
27582index e6a160a..36deff6 100644
27583--- a/arch/x86/oprofile/op_model_p4.c
27584+++ b/arch/x86/oprofile/op_model_p4.c
27585@@ -50,7 +50,7 @@ static inline void setup_num_counters(void)
27586 #endif
27587 }
27588
27589-static int inline addr_increment(void)
27590+static inline int addr_increment(void)
27591 {
27592 #ifdef CONFIG_SMP
27593 return smp_num_siblings == 2 ? 2 : 1;
27594diff --git a/arch/x86/pci/common.c b/arch/x86/pci/common.c
27595index 1331fcf..03901b2 100644
27596--- a/arch/x86/pci/common.c
27597+++ b/arch/x86/pci/common.c
27598@@ -31,8 +31,8 @@ int noioapicreroute = 1;
27599 int pcibios_last_bus = -1;
27600 unsigned long pirq_table_addr;
27601 struct pci_bus *pci_root_bus;
27602-struct pci_raw_ops *raw_pci_ops;
27603-struct pci_raw_ops *raw_pci_ext_ops;
27604+const struct pci_raw_ops *raw_pci_ops;
27605+const struct pci_raw_ops *raw_pci_ext_ops;
27606
27607 int raw_pci_read(unsigned int domain, unsigned int bus, unsigned int devfn,
27608 int reg, int len, u32 *val)
27609diff --git a/arch/x86/pci/direct.c b/arch/x86/pci/direct.c
27610index 347d882..4baf6b6 100644
27611--- a/arch/x86/pci/direct.c
27612+++ b/arch/x86/pci/direct.c
27613@@ -79,7 +79,7 @@ static int pci_conf1_write(unsigned int seg, unsigned int bus,
27614
27615 #undef PCI_CONF1_ADDRESS
27616
27617-struct pci_raw_ops pci_direct_conf1 = {
27618+const struct pci_raw_ops pci_direct_conf1 = {
27619 .read = pci_conf1_read,
27620 .write = pci_conf1_write,
27621 };
27622@@ -173,7 +173,7 @@ static int pci_conf2_write(unsigned int seg, unsigned int bus,
27623
27624 #undef PCI_CONF2_ADDRESS
27625
27626-struct pci_raw_ops pci_direct_conf2 = {
27627+const struct pci_raw_ops pci_direct_conf2 = {
27628 .read = pci_conf2_read,
27629 .write = pci_conf2_write,
27630 };
27631@@ -189,7 +189,7 @@ struct pci_raw_ops pci_direct_conf2 = {
27632 * This should be close to trivial, but it isn't, because there are buggy
27633 * chipsets (yes, you guessed it, by Intel and Compaq) that have no class ID.
27634 */
27635-static int __init pci_sanity_check(struct pci_raw_ops *o)
27636+static int __init pci_sanity_check(const struct pci_raw_ops *o)
27637 {
27638 u32 x = 0;
27639 int year, devfn;
27640diff --git a/arch/x86/pci/mmconfig_32.c b/arch/x86/pci/mmconfig_32.c
27641index f10a7e9..0425342 100644
27642--- a/arch/x86/pci/mmconfig_32.c
27643+++ b/arch/x86/pci/mmconfig_32.c
27644@@ -125,7 +125,7 @@ static int pci_mmcfg_write(unsigned int seg, unsigned int bus,
27645 return 0;
27646 }
27647
27648-static struct pci_raw_ops pci_mmcfg = {
27649+static const struct pci_raw_ops pci_mmcfg = {
27650 .read = pci_mmcfg_read,
27651 .write = pci_mmcfg_write,
27652 };
27653diff --git a/arch/x86/pci/mmconfig_64.c b/arch/x86/pci/mmconfig_64.c
27654index 94349f8..41600a7 100644
27655--- a/arch/x86/pci/mmconfig_64.c
27656+++ b/arch/x86/pci/mmconfig_64.c
27657@@ -104,7 +104,7 @@ static int pci_mmcfg_write(unsigned int seg, unsigned int bus,
27658 return 0;
27659 }
27660
27661-static struct pci_raw_ops pci_mmcfg = {
27662+static const struct pci_raw_ops pci_mmcfg = {
27663 .read = pci_mmcfg_read,
27664 .write = pci_mmcfg_write,
27665 };
27666diff --git a/arch/x86/pci/numaq_32.c b/arch/x86/pci/numaq_32.c
27667index 8eb295e..86bd657 100644
27668--- a/arch/x86/pci/numaq_32.c
27669+++ b/arch/x86/pci/numaq_32.c
27670@@ -112,7 +112,7 @@ static int pci_conf1_mq_write(unsigned int seg, unsigned int bus,
27671
27672 #undef PCI_CONF1_MQ_ADDRESS
27673
27674-static struct pci_raw_ops pci_direct_conf1_mq = {
27675+static const struct pci_raw_ops pci_direct_conf1_mq = {
27676 .read = pci_conf1_mq_read,
27677 .write = pci_conf1_mq_write
27678 };
27679diff --git a/arch/x86/pci/olpc.c b/arch/x86/pci/olpc.c
27680index b889d82..5a58a0a 100644
27681--- a/arch/x86/pci/olpc.c
27682+++ b/arch/x86/pci/olpc.c
27683@@ -297,7 +297,7 @@ static int pci_olpc_write(unsigned int seg, unsigned int bus,
27684 return 0;
27685 }
27686
27687-static struct pci_raw_ops pci_olpc_conf = {
27688+static const struct pci_raw_ops pci_olpc_conf = {
27689 .read = pci_olpc_read,
27690 .write = pci_olpc_write,
27691 };
27692diff --git a/arch/x86/pci/pcbios.c b/arch/x86/pci/pcbios.c
27693index 1c975cc..b8e16c2 100644
27694--- a/arch/x86/pci/pcbios.c
27695+++ b/arch/x86/pci/pcbios.c
27696@@ -56,50 +56,93 @@ union bios32 {
27697 static struct {
27698 unsigned long address;
27699 unsigned short segment;
27700-} bios32_indirect = { 0, __KERNEL_CS };
27701+} bios32_indirect __read_only = { 0, __PCIBIOS_CS };
27702
27703 /*
27704 * Returns the entry point for the given service, NULL on error
27705 */
27706
27707-static unsigned long bios32_service(unsigned long service)
27708+static unsigned long __devinit bios32_service(unsigned long service)
27709 {
27710 unsigned char return_code; /* %al */
27711 unsigned long address; /* %ebx */
27712 unsigned long length; /* %ecx */
27713 unsigned long entry; /* %edx */
27714 unsigned long flags;
27715+ struct desc_struct d, *gdt;
27716
27717 local_irq_save(flags);
27718- __asm__("lcall *(%%edi); cld"
27719+
27720+ gdt = get_cpu_gdt_table(smp_processor_id());
27721+
27722+ pack_descriptor(&d, 0UL, 0xFFFFFUL, 0x9B, 0xC);
27723+ write_gdt_entry(gdt, GDT_ENTRY_PCIBIOS_CS, &d, DESCTYPE_S);
27724+ pack_descriptor(&d, 0UL, 0xFFFFFUL, 0x93, 0xC);
27725+ write_gdt_entry(gdt, GDT_ENTRY_PCIBIOS_DS, &d, DESCTYPE_S);
27726+
27727+ __asm__("movw %w7, %%ds; lcall *(%%edi); push %%ss; pop %%ds; cld"
27728 : "=a" (return_code),
27729 "=b" (address),
27730 "=c" (length),
27731 "=d" (entry)
27732 : "0" (service),
27733 "1" (0),
27734- "D" (&bios32_indirect));
27735+ "D" (&bios32_indirect),
27736+ "r"(__PCIBIOS_DS)
27737+ : "memory");
27738+
27739+ pax_open_kernel();
27740+ gdt[GDT_ENTRY_PCIBIOS_CS].a = 0;
27741+ gdt[GDT_ENTRY_PCIBIOS_CS].b = 0;
27742+ gdt[GDT_ENTRY_PCIBIOS_DS].a = 0;
27743+ gdt[GDT_ENTRY_PCIBIOS_DS].b = 0;
27744+ pax_close_kernel();
27745+
27746 local_irq_restore(flags);
27747
27748 switch (return_code) {
27749- case 0:
27750- return address + entry;
27751- case 0x80: /* Not present */
27752- printk(KERN_WARNING "bios32_service(0x%lx): not present\n", service);
27753- return 0;
27754- default: /* Shouldn't happen */
27755- printk(KERN_WARNING "bios32_service(0x%lx): returned 0x%x -- BIOS bug!\n",
27756- service, return_code);
27757+ case 0: {
27758+ int cpu;
27759+ unsigned char flags;
27760+
27761+ printk(KERN_INFO "bios32_service: base:%08lx length:%08lx entry:%08lx\n", address, length, entry);
27762+ if (address >= 0xFFFF0 || length > 0x100000 - address || length <= entry) {
27763+ printk(KERN_WARNING "bios32_service: not valid\n");
27764 return 0;
27765+ }
27766+ address = address + PAGE_OFFSET;
27767+ length += 16UL; /* some BIOSs underreport this... */
27768+ flags = 4;
27769+ if (length >= 64*1024*1024) {
27770+ length >>= PAGE_SHIFT;
27771+ flags |= 8;
27772+ }
27773+
27774+ for (cpu = 0; cpu < nr_cpu_ids; cpu++) {
27775+ gdt = get_cpu_gdt_table(cpu);
27776+ pack_descriptor(&d, address, length, 0x9b, flags);
27777+ write_gdt_entry(gdt, GDT_ENTRY_PCIBIOS_CS, &d, DESCTYPE_S);
27778+ pack_descriptor(&d, address, length, 0x93, flags);
27779+ write_gdt_entry(gdt, GDT_ENTRY_PCIBIOS_DS, &d, DESCTYPE_S);
27780+ }
27781+ return entry;
27782+ }
27783+ case 0x80: /* Not present */
27784+ printk(KERN_WARNING "bios32_service(0x%lx): not present\n", service);
27785+ return 0;
27786+ default: /* Shouldn't happen */
27787+ printk(KERN_WARNING "bios32_service(0x%lx): returned 0x%x -- BIOS bug!\n",
27788+ service, return_code);
27789+ return 0;
27790 }
27791 }
27792
27793 static struct {
27794 unsigned long address;
27795 unsigned short segment;
27796-} pci_indirect = { 0, __KERNEL_CS };
27797+} pci_indirect __read_only = { 0, __PCIBIOS_CS };
27798
27799-static int pci_bios_present;
27800+static int pci_bios_present __read_only;
27801
27802 static int __devinit check_pcibios(void)
27803 {
27804@@ -108,11 +151,13 @@ static int __devinit check_pcibios(void)
27805 unsigned long flags, pcibios_entry;
27806
27807 if ((pcibios_entry = bios32_service(PCI_SERVICE))) {
27808- pci_indirect.address = pcibios_entry + PAGE_OFFSET;
27809+ pci_indirect.address = pcibios_entry;
27810
27811 local_irq_save(flags);
27812- __asm__(
27813- "lcall *(%%edi); cld\n\t"
27814+ __asm__("movw %w6, %%ds\n\t"
27815+ "lcall *%%ss:(%%edi); cld\n\t"
27816+ "push %%ss\n\t"
27817+ "pop %%ds\n\t"
27818 "jc 1f\n\t"
27819 "xor %%ah, %%ah\n"
27820 "1:"
27821@@ -121,7 +166,8 @@ static int __devinit check_pcibios(void)
27822 "=b" (ebx),
27823 "=c" (ecx)
27824 : "1" (PCIBIOS_PCI_BIOS_PRESENT),
27825- "D" (&pci_indirect)
27826+ "D" (&pci_indirect),
27827+ "r" (__PCIBIOS_DS)
27828 : "memory");
27829 local_irq_restore(flags);
27830
27831@@ -165,7 +211,10 @@ static int pci_bios_read(unsigned int seg, unsigned int bus,
27832
27833 switch (len) {
27834 case 1:
27835- __asm__("lcall *(%%esi); cld\n\t"
27836+ __asm__("movw %w6, %%ds\n\t"
27837+ "lcall *%%ss:(%%esi); cld\n\t"
27838+ "push %%ss\n\t"
27839+ "pop %%ds\n\t"
27840 "jc 1f\n\t"
27841 "xor %%ah, %%ah\n"
27842 "1:"
27843@@ -174,7 +223,8 @@ static int pci_bios_read(unsigned int seg, unsigned int bus,
27844 : "1" (PCIBIOS_READ_CONFIG_BYTE),
27845 "b" (bx),
27846 "D" ((long)reg),
27847- "S" (&pci_indirect));
27848+ "S" (&pci_indirect),
27849+ "r" (__PCIBIOS_DS));
27850 /*
27851 * Zero-extend the result beyond 8 bits, do not trust the
27852 * BIOS having done it:
27853@@ -182,7 +232,10 @@ static int pci_bios_read(unsigned int seg, unsigned int bus,
27854 *value &= 0xff;
27855 break;
27856 case 2:
27857- __asm__("lcall *(%%esi); cld\n\t"
27858+ __asm__("movw %w6, %%ds\n\t"
27859+ "lcall *%%ss:(%%esi); cld\n\t"
27860+ "push %%ss\n\t"
27861+ "pop %%ds\n\t"
27862 "jc 1f\n\t"
27863 "xor %%ah, %%ah\n"
27864 "1:"
27865@@ -191,7 +244,8 @@ static int pci_bios_read(unsigned int seg, unsigned int bus,
27866 : "1" (PCIBIOS_READ_CONFIG_WORD),
27867 "b" (bx),
27868 "D" ((long)reg),
27869- "S" (&pci_indirect));
27870+ "S" (&pci_indirect),
27871+ "r" (__PCIBIOS_DS));
27872 /*
27873 * Zero-extend the result beyond 16 bits, do not trust the
27874 * BIOS having done it:
27875@@ -199,7 +253,10 @@ static int pci_bios_read(unsigned int seg, unsigned int bus,
27876 *value &= 0xffff;
27877 break;
27878 case 4:
27879- __asm__("lcall *(%%esi); cld\n\t"
27880+ __asm__("movw %w6, %%ds\n\t"
27881+ "lcall *%%ss:(%%esi); cld\n\t"
27882+ "push %%ss\n\t"
27883+ "pop %%ds\n\t"
27884 "jc 1f\n\t"
27885 "xor %%ah, %%ah\n"
27886 "1:"
27887@@ -208,7 +265,8 @@ static int pci_bios_read(unsigned int seg, unsigned int bus,
27888 : "1" (PCIBIOS_READ_CONFIG_DWORD),
27889 "b" (bx),
27890 "D" ((long)reg),
27891- "S" (&pci_indirect));
27892+ "S" (&pci_indirect),
27893+ "r" (__PCIBIOS_DS));
27894 break;
27895 }
27896
27897@@ -231,7 +289,10 @@ static int pci_bios_write(unsigned int seg, unsigned int bus,
27898
27899 switch (len) {
27900 case 1:
27901- __asm__("lcall *(%%esi); cld\n\t"
27902+ __asm__("movw %w6, %%ds\n\t"
27903+ "lcall *%%ss:(%%esi); cld\n\t"
27904+ "push %%ss\n\t"
27905+ "pop %%ds\n\t"
27906 "jc 1f\n\t"
27907 "xor %%ah, %%ah\n"
27908 "1:"
27909@@ -240,10 +301,14 @@ static int pci_bios_write(unsigned int seg, unsigned int bus,
27910 "c" (value),
27911 "b" (bx),
27912 "D" ((long)reg),
27913- "S" (&pci_indirect));
27914+ "S" (&pci_indirect),
27915+ "r" (__PCIBIOS_DS));
27916 break;
27917 case 2:
27918- __asm__("lcall *(%%esi); cld\n\t"
27919+ __asm__("movw %w6, %%ds\n\t"
27920+ "lcall *%%ss:(%%esi); cld\n\t"
27921+ "push %%ss\n\t"
27922+ "pop %%ds\n\t"
27923 "jc 1f\n\t"
27924 "xor %%ah, %%ah\n"
27925 "1:"
27926@@ -252,10 +317,14 @@ static int pci_bios_write(unsigned int seg, unsigned int bus,
27927 "c" (value),
27928 "b" (bx),
27929 "D" ((long)reg),
27930- "S" (&pci_indirect));
27931+ "S" (&pci_indirect),
27932+ "r" (__PCIBIOS_DS));
27933 break;
27934 case 4:
27935- __asm__("lcall *(%%esi); cld\n\t"
27936+ __asm__("movw %w6, %%ds\n\t"
27937+ "lcall *%%ss:(%%esi); cld\n\t"
27938+ "push %%ss\n\t"
27939+ "pop %%ds\n\t"
27940 "jc 1f\n\t"
27941 "xor %%ah, %%ah\n"
27942 "1:"
27943@@ -264,7 +333,8 @@ static int pci_bios_write(unsigned int seg, unsigned int bus,
27944 "c" (value),
27945 "b" (bx),
27946 "D" ((long)reg),
27947- "S" (&pci_indirect));
27948+ "S" (&pci_indirect),
27949+ "r" (__PCIBIOS_DS));
27950 break;
27951 }
27952
27953@@ -278,7 +348,7 @@ static int pci_bios_write(unsigned int seg, unsigned int bus,
27954 * Function table for BIOS32 access
27955 */
27956
27957-static struct pci_raw_ops pci_bios_access = {
27958+static const struct pci_raw_ops pci_bios_access = {
27959 .read = pci_bios_read,
27960 .write = pci_bios_write
27961 };
27962@@ -287,7 +357,7 @@ static struct pci_raw_ops pci_bios_access = {
27963 * Try to find PCI BIOS.
27964 */
27965
27966-static struct pci_raw_ops * __devinit pci_find_bios(void)
27967+static const struct pci_raw_ops * __devinit pci_find_bios(void)
27968 {
27969 union bios32 *check;
27970 unsigned char sum;
27971@@ -368,10 +438,13 @@ struct irq_routing_table * pcibios_get_irq_routing_table(void)
27972
27973 DBG("PCI: Fetching IRQ routing table... ");
27974 __asm__("push %%es\n\t"
27975+ "movw %w8, %%ds\n\t"
27976 "push %%ds\n\t"
27977 "pop %%es\n\t"
27978- "lcall *(%%esi); cld\n\t"
27979+ "lcall *%%ss:(%%esi); cld\n\t"
27980 "pop %%es\n\t"
27981+ "push %%ss\n\t"
27982+ "pop %%ds\n"
27983 "jc 1f\n\t"
27984 "xor %%ah, %%ah\n"
27985 "1:"
27986@@ -382,7 +455,8 @@ struct irq_routing_table * pcibios_get_irq_routing_table(void)
27987 "1" (0),
27988 "D" ((long) &opt),
27989 "S" (&pci_indirect),
27990- "m" (opt)
27991+ "m" (opt),
27992+ "r" (__PCIBIOS_DS)
27993 : "memory");
27994 DBG("OK ret=%d, size=%d, map=%x\n", ret, opt.size, map);
27995 if (ret & 0xff00)
27996@@ -406,7 +480,10 @@ int pcibios_set_irq_routing(struct pci_dev *dev, int pin, int irq)
27997 {
27998 int ret;
27999
28000- __asm__("lcall *(%%esi); cld\n\t"
28001+ __asm__("movw %w5, %%ds\n\t"
28002+ "lcall *%%ss:(%%esi); cld\n\t"
28003+ "push %%ss\n\t"
28004+ "pop %%ds\n"
28005 "jc 1f\n\t"
28006 "xor %%ah, %%ah\n"
28007 "1:"
28008@@ -414,7 +491,8 @@ int pcibios_set_irq_routing(struct pci_dev *dev, int pin, int irq)
28009 : "0" (PCIBIOS_SET_PCI_HW_INT),
28010 "b" ((dev->bus->number << 8) | dev->devfn),
28011 "c" ((irq << 8) | (pin + 10)),
28012- "S" (&pci_indirect));
28013+ "S" (&pci_indirect),
28014+ "r" (__PCIBIOS_DS));
28015 return !(ret & 0xff00);
28016 }
28017 EXPORT_SYMBOL(pcibios_set_irq_routing);
28018diff --git a/arch/x86/power/cpu.c b/arch/x86/power/cpu.c
28019index fa0f651..9d8f3d9 100644
28020--- a/arch/x86/power/cpu.c
28021+++ b/arch/x86/power/cpu.c
28022@@ -129,7 +129,7 @@ static void do_fpu_end(void)
28023 static void fix_processor_context(void)
28024 {
28025 int cpu = smp_processor_id();
28026- struct tss_struct *t = &per_cpu(init_tss, cpu);
28027+ struct tss_struct *t = init_tss + cpu;
28028
28029 set_tss_desc(cpu, t); /*
28030 * This just modifies memory; should not be
28031@@ -139,7 +139,9 @@ static void fix_processor_context(void)
28032 */
28033
28034 #ifdef CONFIG_X86_64
28035+ pax_open_kernel();
28036 get_cpu_gdt_table(cpu)[GDT_ENTRY_TSS].type = 9;
28037+ pax_close_kernel();
28038
28039 syscall_init(); /* This sets MSR_*STAR and related */
28040 #endif
28041diff --git a/arch/x86/vdso/Makefile b/arch/x86/vdso/Makefile
28042index dd78ef6..f9d928d 100644
28043--- a/arch/x86/vdso/Makefile
28044+++ b/arch/x86/vdso/Makefile
28045@@ -122,7 +122,7 @@ quiet_cmd_vdso = VDSO $@
28046 $(VDSO_LDFLAGS) $(VDSO_LDFLAGS_$(filter %.lds,$(^F))) \
28047 -Wl,-T,$(filter %.lds,$^) $(filter %.o,$^)
28048
28049-VDSO_LDFLAGS = -fPIC -shared $(call cc-ldoption, -Wl$(comma)--hash-style=sysv)
28050+VDSO_LDFLAGS = -fPIC -shared -Wl,--no-undefined $(call cc-ldoption, -Wl$(comma)--hash-style=sysv)
28051 GCOV_PROFILE := n
28052
28053 #
28054diff --git a/arch/x86/vdso/vclock_gettime.c b/arch/x86/vdso/vclock_gettime.c
28055index ee55754..0013b2e 100644
28056--- a/arch/x86/vdso/vclock_gettime.c
28057+++ b/arch/x86/vdso/vclock_gettime.c
28058@@ -22,24 +22,48 @@
28059 #include <asm/hpet.h>
28060 #include <asm/unistd.h>
28061 #include <asm/io.h>
28062+#include <asm/fixmap.h>
28063 #include "vextern.h"
28064
28065 #define gtod vdso_vsyscall_gtod_data
28066
28067+notrace noinline long __vdso_fallback_time(long *t)
28068+{
28069+ long secs;
28070+ asm volatile("syscall"
28071+ : "=a" (secs)
28072+ : "0" (__NR_time),"D" (t) : "r11", "cx", "memory");
28073+ return secs;
28074+}
28075+
28076 notrace static long vdso_fallback_gettime(long clock, struct timespec *ts)
28077 {
28078 long ret;
28079 asm("syscall" : "=a" (ret) :
28080- "0" (__NR_clock_gettime),"D" (clock), "S" (ts) : "memory");
28081+ "0" (__NR_clock_gettime),"D" (clock), "S" (ts) : "r11", "cx", "memory");
28082 return ret;
28083 }
28084
28085+notrace static inline cycle_t __vdso_vread_hpet(void)
28086+{
28087+ return readl((const void __iomem *)fix_to_virt(VSYSCALL_HPET) + 0xf0);
28088+}
28089+
28090+notrace static inline cycle_t __vdso_vread_tsc(void)
28091+{
28092+ cycle_t ret = (cycle_t)vget_cycles();
28093+
28094+ return ret >= gtod->clock.cycle_last ? ret : gtod->clock.cycle_last;
28095+}
28096+
28097 notrace static inline long vgetns(void)
28098 {
28099 long v;
28100- cycles_t (*vread)(void);
28101- vread = gtod->clock.vread;
28102- v = (vread() - gtod->clock.cycle_last) & gtod->clock.mask;
28103+ if (gtod->clock.name[0] == 't' && gtod->clock.name[1] == 's' && gtod->clock.name[2] == 'c' && !gtod->clock.name[3])
28104+ v = __vdso_vread_tsc();
28105+ else
28106+ v = __vdso_vread_hpet();
28107+ v = (v - gtod->clock.cycle_last) & gtod->clock.mask;
28108 return (v * gtod->clock.mult) >> gtod->clock.shift;
28109 }
28110
28111@@ -113,7 +137,9 @@ notrace static noinline int do_monotonic_coarse(struct timespec *ts)
28112
28113 notrace int __vdso_clock_gettime(clockid_t clock, struct timespec *ts)
28114 {
28115- if (likely(gtod->sysctl_enabled))
28116+ if (likely(gtod->sysctl_enabled &&
28117+ ((gtod->clock.name[0] == 'h' && gtod->clock.name[1] == 'p' && gtod->clock.name[2] == 'e' && gtod->clock.name[3] == 't' && !gtod->clock.name[4]) ||
28118+ (gtod->clock.name[0] == 't' && gtod->clock.name[1] == 's' && gtod->clock.name[2] == 'c' && !gtod->clock.name[3]))))
28119 switch (clock) {
28120 case CLOCK_REALTIME:
28121 if (likely(gtod->clock.vread))
28122@@ -133,10 +159,20 @@ notrace int __vdso_clock_gettime(clockid_t clock, struct timespec *ts)
28123 int clock_gettime(clockid_t, struct timespec *)
28124 __attribute__((weak, alias("__vdso_clock_gettime")));
28125
28126+notrace noinline int __vdso_fallback_gettimeofday(struct timeval *tv, struct timezone *tz)
28127+{
28128+ long ret;
28129+ asm("syscall" : "=a" (ret) :
28130+ "0" (__NR_gettimeofday), "D" (tv), "S" (tz) : "r11", "cx", "memory");
28131+ return ret;
28132+}
28133+
28134 notrace int __vdso_gettimeofday(struct timeval *tv, struct timezone *tz)
28135 {
28136- long ret;
28137- if (likely(gtod->sysctl_enabled && gtod->clock.vread)) {
28138+ if (likely(gtod->sysctl_enabled &&
28139+ ((gtod->clock.name[0] == 'h' && gtod->clock.name[1] == 'p' && gtod->clock.name[2] == 'e' && gtod->clock.name[3] == 't' && !gtod->clock.name[4]) ||
28140+ (gtod->clock.name[0] == 't' && gtod->clock.name[1] == 's' && gtod->clock.name[2] == 'c' && !gtod->clock.name[3]))))
28141+ {
28142 if (likely(tv != NULL)) {
28143 BUILD_BUG_ON(offsetof(struct timeval, tv_usec) !=
28144 offsetof(struct timespec, tv_nsec) ||
28145@@ -151,9 +187,7 @@ notrace int __vdso_gettimeofday(struct timeval *tv, struct timezone *tz)
28146 }
28147 return 0;
28148 }
28149- asm("syscall" : "=a" (ret) :
28150- "0" (__NR_gettimeofday), "D" (tv), "S" (tz) : "memory");
28151- return ret;
28152+ return __vdso_fallback_gettimeofday(tv, tz);
28153 }
28154 int gettimeofday(struct timeval *, struct timezone *)
28155 __attribute__((weak, alias("__vdso_gettimeofday")));
28156diff --git a/arch/x86/vdso/vdso.lds.S b/arch/x86/vdso/vdso.lds.S
28157index 4e5dd3b..00ba15e 100644
28158--- a/arch/x86/vdso/vdso.lds.S
28159+++ b/arch/x86/vdso/vdso.lds.S
28160@@ -35,3 +35,9 @@ VDSO64_PRELINK = VDSO_PRELINK;
28161 #define VEXTERN(x) VDSO64_ ## x = vdso_ ## x;
28162 #include "vextern.h"
28163 #undef VEXTERN
28164+
28165+#define VEXTERN(x) VDSO64_ ## x = __vdso_ ## x;
28166+VEXTERN(fallback_gettimeofday)
28167+VEXTERN(fallback_time)
28168+VEXTERN(getcpu)
28169+#undef VEXTERN
28170diff --git a/arch/x86/vdso/vdso32-setup.c b/arch/x86/vdso/vdso32-setup.c
28171index 58bc00f..d53fb48 100644
28172--- a/arch/x86/vdso/vdso32-setup.c
28173+++ b/arch/x86/vdso/vdso32-setup.c
28174@@ -25,6 +25,7 @@
28175 #include <asm/tlbflush.h>
28176 #include <asm/vdso.h>
28177 #include <asm/proto.h>
28178+#include <asm/mman.h>
28179
28180 enum {
28181 VDSO_DISABLED = 0,
28182@@ -226,7 +227,7 @@ static inline void map_compat_vdso(int map)
28183 void enable_sep_cpu(void)
28184 {
28185 int cpu = get_cpu();
28186- struct tss_struct *tss = &per_cpu(init_tss, cpu);
28187+ struct tss_struct *tss = init_tss + cpu;
28188
28189 if (!boot_cpu_has(X86_FEATURE_SEP)) {
28190 put_cpu();
28191@@ -249,7 +250,7 @@ static int __init gate_vma_init(void)
28192 gate_vma.vm_start = FIXADDR_USER_START;
28193 gate_vma.vm_end = FIXADDR_USER_END;
28194 gate_vma.vm_flags = VM_READ | VM_MAYREAD | VM_EXEC | VM_MAYEXEC;
28195- gate_vma.vm_page_prot = __P101;
28196+ gate_vma.vm_page_prot = vm_get_page_prot(gate_vma.vm_flags);
28197 /*
28198 * Make sure the vDSO gets into every core dump.
28199 * Dumping its contents makes post-mortem fully interpretable later
28200@@ -331,14 +332,14 @@ int arch_setup_additional_pages(struct linux_binprm *bprm, int uses_interp)
28201 if (compat)
28202 addr = VDSO_HIGH_BASE;
28203 else {
28204- addr = get_unmapped_area(NULL, 0, PAGE_SIZE, 0, 0);
28205+ addr = get_unmapped_area(NULL, 0, PAGE_SIZE, 0, MAP_EXECUTABLE);
28206 if (IS_ERR_VALUE(addr)) {
28207 ret = addr;
28208 goto up_fail;
28209 }
28210 }
28211
28212- current->mm->context.vdso = (void *)addr;
28213+ current->mm->context.vdso = addr;
28214
28215 if (compat_uses_vma || !compat) {
28216 /*
28217@@ -361,11 +362,11 @@ int arch_setup_additional_pages(struct linux_binprm *bprm, int uses_interp)
28218 }
28219
28220 current_thread_info()->sysenter_return =
28221- VDSO32_SYMBOL(addr, SYSENTER_RETURN);
28222+ (__force void __user *)VDSO32_SYMBOL(addr, SYSENTER_RETURN);
28223
28224 up_fail:
28225 if (ret)
28226- current->mm->context.vdso = NULL;
28227+ current->mm->context.vdso = 0;
28228
28229 up_write(&mm->mmap_sem);
28230
28231@@ -413,8 +414,14 @@ __initcall(ia32_binfmt_init);
28232
28233 const char *arch_vma_name(struct vm_area_struct *vma)
28234 {
28235- if (vma->vm_mm && vma->vm_start == (long)vma->vm_mm->context.vdso)
28236+ if (vma->vm_mm && vma->vm_start == vma->vm_mm->context.vdso)
28237 return "[vdso]";
28238+
28239+#ifdef CONFIG_PAX_SEGMEXEC
28240+ if (vma->vm_mm && vma->vm_mirror && vma->vm_mirror->vm_start == vma->vm_mm->context.vdso)
28241+ return "[vdso]";
28242+#endif
28243+
28244 return NULL;
28245 }
28246
28247@@ -423,7 +430,7 @@ struct vm_area_struct *get_gate_vma(struct task_struct *tsk)
28248 struct mm_struct *mm = tsk->mm;
28249
28250 /* Check to see if this task was created in compat vdso mode */
28251- if (mm && mm->context.vdso == (void *)VDSO_HIGH_BASE)
28252+ if (mm && mm->context.vdso == VDSO_HIGH_BASE)
28253 return &gate_vma;
28254 return NULL;
28255 }
28256diff --git a/arch/x86/vdso/vextern.h b/arch/x86/vdso/vextern.h
28257index 1683ba2..48d07f3 100644
28258--- a/arch/x86/vdso/vextern.h
28259+++ b/arch/x86/vdso/vextern.h
28260@@ -11,6 +11,5 @@
28261 put into vextern.h and be referenced as a pointer with vdso prefix.
28262 The main kernel later fills in the values. */
28263
28264-VEXTERN(jiffies)
28265 VEXTERN(vgetcpu_mode)
28266 VEXTERN(vsyscall_gtod_data)
28267diff --git a/arch/x86/vdso/vma.c b/arch/x86/vdso/vma.c
28268index 21e1aeb..2c0b3c4 100644
28269--- a/arch/x86/vdso/vma.c
28270+++ b/arch/x86/vdso/vma.c
28271@@ -17,8 +17,6 @@
28272 #include "vextern.h" /* Just for VMAGIC. */
28273 #undef VEXTERN
28274
28275-unsigned int __read_mostly vdso_enabled = 1;
28276-
28277 extern char vdso_start[], vdso_end[];
28278 extern unsigned short vdso_sync_cpuid;
28279
28280@@ -27,10 +25,8 @@ static unsigned vdso_size;
28281
28282 static inline void *var_ref(void *p, char *name)
28283 {
28284- if (*(void **)p != (void *)VMAGIC) {
28285- printk("VDSO: variable %s broken\n", name);
28286- vdso_enabled = 0;
28287- }
28288+ if (*(void **)p != (void *)VMAGIC)
28289+ panic("VDSO: variable %s broken\n", name);
28290 return p;
28291 }
28292
28293@@ -57,21 +53,18 @@ static int __init init_vdso_vars(void)
28294 if (!vbase)
28295 goto oom;
28296
28297- if (memcmp(vbase, "\177ELF", 4)) {
28298- printk("VDSO: I'm broken; not ELF\n");
28299- vdso_enabled = 0;
28300- }
28301+ if (memcmp(vbase, ELFMAG, SELFMAG))
28302+ panic("VDSO: I'm broken; not ELF\n");
28303
28304 #define VEXTERN(x) \
28305 *(typeof(__ ## x) **) var_ref(VDSO64_SYMBOL(vbase, x), #x) = &__ ## x;
28306 #include "vextern.h"
28307 #undef VEXTERN
28308+ vunmap(vbase);
28309 return 0;
28310
28311 oom:
28312- printk("Cannot allocate vdso\n");
28313- vdso_enabled = 0;
28314- return -ENOMEM;
28315+ panic("Cannot allocate vdso\n");
28316 }
28317 __initcall(init_vdso_vars);
28318
28319@@ -102,13 +95,15 @@ static unsigned long vdso_addr(unsigned long start, unsigned len)
28320 int arch_setup_additional_pages(struct linux_binprm *bprm, int uses_interp)
28321 {
28322 struct mm_struct *mm = current->mm;
28323- unsigned long addr;
28324+ unsigned long addr = 0;
28325 int ret;
28326
28327- if (!vdso_enabled)
28328- return 0;
28329-
28330 down_write(&mm->mmap_sem);
28331+
28332+#ifdef CONFIG_PAX_RANDMMAP
28333+ if (!(mm->pax_flags & MF_PAX_RANDMMAP))
28334+#endif
28335+
28336 addr = vdso_addr(mm->start_stack, vdso_size);
28337 addr = get_unmapped_area(NULL, addr, vdso_size, 0, 0);
28338 if (IS_ERR_VALUE(addr)) {
28339@@ -116,7 +111,7 @@ int arch_setup_additional_pages(struct linux_binprm *bprm, int uses_interp)
28340 goto up_fail;
28341 }
28342
28343- current->mm->context.vdso = (void *)addr;
28344+ current->mm->context.vdso = addr;
28345
28346 ret = install_special_mapping(mm, addr, vdso_size,
28347 VM_READ|VM_EXEC|
28348@@ -124,7 +119,7 @@ int arch_setup_additional_pages(struct linux_binprm *bprm, int uses_interp)
28349 VM_ALWAYSDUMP,
28350 vdso_pages);
28351 if (ret) {
28352- current->mm->context.vdso = NULL;
28353+ current->mm->context.vdso = 0;
28354 goto up_fail;
28355 }
28356
28357@@ -132,10 +127,3 @@ up_fail:
28358 up_write(&mm->mmap_sem);
28359 return ret;
28360 }
28361-
28362-static __init int vdso_setup(char *s)
28363-{
28364- vdso_enabled = simple_strtoul(s, NULL, 0);
28365- return 0;
28366-}
28367-__setup("vdso=", vdso_setup);
28368diff --git a/arch/x86/xen/enlighten.c b/arch/x86/xen/enlighten.c
28369index 0087b00..eecb34f 100644
28370--- a/arch/x86/xen/enlighten.c
28371+++ b/arch/x86/xen/enlighten.c
28372@@ -71,8 +71,6 @@ EXPORT_SYMBOL_GPL(xen_start_info);
28373
28374 struct shared_info xen_dummy_shared_info;
28375
28376-void *xen_initial_gdt;
28377-
28378 /*
28379 * Point at some empty memory to start with. We map the real shared_info
28380 * page as soon as fixmap is up and running.
28381@@ -548,7 +546,7 @@ static void xen_write_idt_entry(gate_desc *dt, int entrynum, const gate_desc *g)
28382
28383 preempt_disable();
28384
28385- start = __get_cpu_var(idt_desc).address;
28386+ start = (unsigned long)__get_cpu_var(idt_desc).address;
28387 end = start + __get_cpu_var(idt_desc).size + 1;
28388
28389 xen_mc_flush();
28390@@ -993,7 +991,7 @@ static const struct pv_apic_ops xen_apic_ops __initdata = {
28391 #endif
28392 };
28393
28394-static void xen_reboot(int reason)
28395+static __noreturn void xen_reboot(int reason)
28396 {
28397 struct sched_shutdown r = { .reason = reason };
28398
28399@@ -1001,17 +999,17 @@ static void xen_reboot(int reason)
28400 BUG();
28401 }
28402
28403-static void xen_restart(char *msg)
28404+static __noreturn void xen_restart(char *msg)
28405 {
28406 xen_reboot(SHUTDOWN_reboot);
28407 }
28408
28409-static void xen_emergency_restart(void)
28410+static __noreturn void xen_emergency_restart(void)
28411 {
28412 xen_reboot(SHUTDOWN_reboot);
28413 }
28414
28415-static void xen_machine_halt(void)
28416+static __noreturn void xen_machine_halt(void)
28417 {
28418 xen_reboot(SHUTDOWN_poweroff);
28419 }
28420@@ -1095,9 +1093,20 @@ asmlinkage void __init xen_start_kernel(void)
28421 */
28422 __userpte_alloc_gfp &= ~__GFP_HIGHMEM;
28423
28424-#ifdef CONFIG_X86_64
28425 /* Work out if we support NX */
28426- check_efer();
28427+#if defined(CONFIG_X86_64) || defined(CONFIG_X86_PAE)
28428+ if ((cpuid_eax(0x80000000) & 0xffff0000) == 0x80000000 &&
28429+ (cpuid_edx(0x80000001) & (1U << (X86_FEATURE_NX & 31)))) {
28430+ unsigned l, h;
28431+
28432+#ifdef CONFIG_X86_PAE
28433+ nx_enabled = 1;
28434+#endif
28435+ __supported_pte_mask |= _PAGE_NX;
28436+ rdmsr(MSR_EFER, l, h);
28437+ l |= EFER_NX;
28438+ wrmsr(MSR_EFER, l, h);
28439+ }
28440 #endif
28441
28442 xen_setup_features();
28443@@ -1129,13 +1138,6 @@ asmlinkage void __init xen_start_kernel(void)
28444
28445 machine_ops = xen_machine_ops;
28446
28447- /*
28448- * The only reliable way to retain the initial address of the
28449- * percpu gdt_page is to remember it here, so we can go and
28450- * mark it RW later, when the initial percpu area is freed.
28451- */
28452- xen_initial_gdt = &per_cpu(gdt_page, 0);
28453-
28454 xen_smp_init();
28455
28456 pgd = (pgd_t *)xen_start_info->pt_base;
28457diff --git a/arch/x86/xen/mmu.c b/arch/x86/xen/mmu.c
28458index 3f90a2c..2c2ad84 100644
28459--- a/arch/x86/xen/mmu.c
28460+++ b/arch/x86/xen/mmu.c
28461@@ -1719,6 +1719,9 @@ __init pgd_t *xen_setup_kernel_pagetable(pgd_t *pgd,
28462 convert_pfn_mfn(init_level4_pgt);
28463 convert_pfn_mfn(level3_ident_pgt);
28464 convert_pfn_mfn(level3_kernel_pgt);
28465+ convert_pfn_mfn(level3_vmalloc_start_pgt);
28466+ convert_pfn_mfn(level3_vmalloc_end_pgt);
28467+ convert_pfn_mfn(level3_vmemmap_pgt);
28468
28469 l3 = m2v(pgd[pgd_index(__START_KERNEL_map)].pgd);
28470 l2 = m2v(l3[pud_index(__START_KERNEL_map)].pud);
28471@@ -1737,7 +1740,11 @@ __init pgd_t *xen_setup_kernel_pagetable(pgd_t *pgd,
28472 set_page_prot(init_level4_pgt, PAGE_KERNEL_RO);
28473 set_page_prot(level3_ident_pgt, PAGE_KERNEL_RO);
28474 set_page_prot(level3_kernel_pgt, PAGE_KERNEL_RO);
28475+ set_page_prot(level3_vmalloc_start_pgt, PAGE_KERNEL_RO);
28476+ set_page_prot(level3_vmalloc_end_pgt, PAGE_KERNEL_RO);
28477+ set_page_prot(level3_vmemmap_pgt, PAGE_KERNEL_RO);
28478 set_page_prot(level3_user_vsyscall, PAGE_KERNEL_RO);
28479+ set_page_prot(level2_vmemmap_pgt, PAGE_KERNEL_RO);
28480 set_page_prot(level2_kernel_pgt, PAGE_KERNEL_RO);
28481 set_page_prot(level2_fixmap_pgt, PAGE_KERNEL_RO);
28482
28483@@ -1860,6 +1867,7 @@ static __init void xen_post_allocator_init(void)
28484 pv_mmu_ops.set_pud = xen_set_pud;
28485 #if PAGETABLE_LEVELS == 4
28486 pv_mmu_ops.set_pgd = xen_set_pgd;
28487+ pv_mmu_ops.set_pgd_batched = xen_set_pgd;
28488 #endif
28489
28490 /* This will work as long as patching hasn't happened yet
28491@@ -1946,6 +1954,7 @@ static const struct pv_mmu_ops xen_mmu_ops __initdata = {
28492 .pud_val = PV_CALLEE_SAVE(xen_pud_val),
28493 .make_pud = PV_CALLEE_SAVE(xen_make_pud),
28494 .set_pgd = xen_set_pgd_hyper,
28495+ .set_pgd_batched = xen_set_pgd_hyper,
28496
28497 .alloc_pud = xen_alloc_pmd_init,
28498 .release_pud = xen_release_pmd_init,
28499diff --git a/arch/x86/xen/smp.c b/arch/x86/xen/smp.c
28500index a96204a..fca9b8e 100644
28501--- a/arch/x86/xen/smp.c
28502+++ b/arch/x86/xen/smp.c
28503@@ -168,11 +168,6 @@ static void __init xen_smp_prepare_boot_cpu(void)
28504 {
28505 BUG_ON(smp_processor_id() != 0);
28506 native_smp_prepare_boot_cpu();
28507-
28508- /* We've switched to the "real" per-cpu gdt, so make sure the
28509- old memory can be recycled */
28510- make_lowmem_page_readwrite(xen_initial_gdt);
28511-
28512 xen_setup_vcpu_info_placement();
28513 }
28514
28515@@ -241,12 +236,12 @@ cpu_initialize_context(unsigned int cpu, struct task_struct *idle)
28516 gdt = get_cpu_gdt_table(cpu);
28517
28518 ctxt->flags = VGCF_IN_KERNEL;
28519- ctxt->user_regs.ds = __USER_DS;
28520- ctxt->user_regs.es = __USER_DS;
28521+ ctxt->user_regs.ds = __KERNEL_DS;
28522+ ctxt->user_regs.es = __KERNEL_DS;
28523 ctxt->user_regs.ss = __KERNEL_DS;
28524 #ifdef CONFIG_X86_32
28525 ctxt->user_regs.fs = __KERNEL_PERCPU;
28526- ctxt->user_regs.gs = __KERNEL_STACK_CANARY;
28527+ savesegment(gs, ctxt->user_regs.gs);
28528 #else
28529 ctxt->gs_base_kernel = per_cpu_offset(cpu);
28530 #endif
28531@@ -297,13 +292,12 @@ static int __cpuinit xen_cpu_up(unsigned int cpu)
28532 int rc;
28533
28534 per_cpu(current_task, cpu) = idle;
28535+ per_cpu(current_tinfo, cpu) = &idle->tinfo;
28536 #ifdef CONFIG_X86_32
28537 irq_ctx_init(cpu);
28538 #else
28539 clear_tsk_thread_flag(idle, TIF_FORK);
28540- per_cpu(kernel_stack, cpu) =
28541- (unsigned long)task_stack_page(idle) -
28542- KERNEL_STACK_OFFSET + THREAD_SIZE;
28543+ per_cpu(kernel_stack, cpu) = (unsigned long)task_stack_page(idle) - 16 + THREAD_SIZE;
28544 #endif
28545 xen_setup_runstate_info(cpu);
28546 xen_setup_timer(cpu);
28547diff --git a/arch/x86/xen/xen-asm_32.S b/arch/x86/xen/xen-asm_32.S
28548index 9a95a9c..4f39e774 100644
28549--- a/arch/x86/xen/xen-asm_32.S
28550+++ b/arch/x86/xen/xen-asm_32.S
28551@@ -83,14 +83,14 @@ ENTRY(xen_iret)
28552 ESP_OFFSET=4 # bytes pushed onto stack
28553
28554 /*
28555- * Store vcpu_info pointer for easy access. Do it this way to
28556- * avoid having to reload %fs
28557+ * Store vcpu_info pointer for easy access.
28558 */
28559 #ifdef CONFIG_SMP
28560- GET_THREAD_INFO(%eax)
28561- movl TI_cpu(%eax), %eax
28562- movl __per_cpu_offset(,%eax,4), %eax
28563- mov per_cpu__xen_vcpu(%eax), %eax
28564+ push %fs
28565+ mov $(__KERNEL_PERCPU), %eax
28566+ mov %eax, %fs
28567+ mov PER_CPU_VAR(xen_vcpu), %eax
28568+ pop %fs
28569 #else
28570 movl per_cpu__xen_vcpu, %eax
28571 #endif
28572diff --git a/arch/x86/xen/xen-head.S b/arch/x86/xen/xen-head.S
28573index 1a5ff24..a187d40 100644
28574--- a/arch/x86/xen/xen-head.S
28575+++ b/arch/x86/xen/xen-head.S
28576@@ -19,6 +19,17 @@ ENTRY(startup_xen)
28577 #ifdef CONFIG_X86_32
28578 mov %esi,xen_start_info
28579 mov $init_thread_union+THREAD_SIZE,%esp
28580+#ifdef CONFIG_SMP
28581+ movl $cpu_gdt_table,%edi
28582+ movl $__per_cpu_load,%eax
28583+ movw %ax,__KERNEL_PERCPU + 2(%edi)
28584+ rorl $16,%eax
28585+ movb %al,__KERNEL_PERCPU + 4(%edi)
28586+ movb %ah,__KERNEL_PERCPU + 7(%edi)
28587+ movl $__per_cpu_end - 1,%eax
28588+ subl $__per_cpu_start,%eax
28589+ movw %ax,__KERNEL_PERCPU + 0(%edi)
28590+#endif
28591 #else
28592 mov %rsi,xen_start_info
28593 mov $init_thread_union+THREAD_SIZE,%rsp
28594diff --git a/arch/x86/xen/xen-ops.h b/arch/x86/xen/xen-ops.h
28595index f9153a3..51eab3d 100644
28596--- a/arch/x86/xen/xen-ops.h
28597+++ b/arch/x86/xen/xen-ops.h
28598@@ -10,8 +10,6 @@
28599 extern const char xen_hypervisor_callback[];
28600 extern const char xen_failsafe_callback[];
28601
28602-extern void *xen_initial_gdt;
28603-
28604 struct trap_info;
28605 void xen_copy_trap_info(struct trap_info *traps);
28606
28607diff --git a/arch/xtensa/variants/dc232b/include/variant/core.h b/arch/xtensa/variants/dc232b/include/variant/core.h
28608index 525bd3d..ef888b1 100644
28609--- a/arch/xtensa/variants/dc232b/include/variant/core.h
28610+++ b/arch/xtensa/variants/dc232b/include/variant/core.h
28611@@ -119,9 +119,9 @@
28612 ----------------------------------------------------------------------*/
28613
28614 #define XCHAL_ICACHE_LINESIZE 32 /* I-cache line size in bytes */
28615-#define XCHAL_DCACHE_LINESIZE 32 /* D-cache line size in bytes */
28616 #define XCHAL_ICACHE_LINEWIDTH 5 /* log2(I line size in bytes) */
28617 #define XCHAL_DCACHE_LINEWIDTH 5 /* log2(D line size in bytes) */
28618+#define XCHAL_DCACHE_LINESIZE (_AC(1,UL) << XCHAL_DCACHE_LINEWIDTH) /* D-cache line size in bytes */
28619
28620 #define XCHAL_ICACHE_SIZE 16384 /* I-cache size in bytes or 0 */
28621 #define XCHAL_DCACHE_SIZE 16384 /* D-cache size in bytes or 0 */
28622diff --git a/arch/xtensa/variants/fsf/include/variant/core.h b/arch/xtensa/variants/fsf/include/variant/core.h
28623index 2f33760..835e50a 100644
28624--- a/arch/xtensa/variants/fsf/include/variant/core.h
28625+++ b/arch/xtensa/variants/fsf/include/variant/core.h
28626@@ -11,6 +11,7 @@
28627 #ifndef _XTENSA_CORE_H
28628 #define _XTENSA_CORE_H
28629
28630+#include <linux/const.h>
28631
28632 /****************************************************************************
28633 Parameters Useful for Any Code, USER or PRIVILEGED
28634@@ -112,9 +113,9 @@
28635 ----------------------------------------------------------------------*/
28636
28637 #define XCHAL_ICACHE_LINESIZE 16 /* I-cache line size in bytes */
28638-#define XCHAL_DCACHE_LINESIZE 16 /* D-cache line size in bytes */
28639 #define XCHAL_ICACHE_LINEWIDTH 4 /* log2(I line size in bytes) */
28640 #define XCHAL_DCACHE_LINEWIDTH 4 /* log2(D line size in bytes) */
28641+#define XCHAL_DCACHE_LINESIZE (_AC(1,UL) << XCHAL_DCACHE_LINEWIDTH) /* D-cache line size in bytes */
28642
28643 #define XCHAL_ICACHE_SIZE 8192 /* I-cache size in bytes or 0 */
28644 #define XCHAL_DCACHE_SIZE 8192 /* D-cache size in bytes or 0 */
28645diff --git a/arch/xtensa/variants/s6000/include/variant/core.h b/arch/xtensa/variants/s6000/include/variant/core.h
28646index af00795..2bb8105 100644
28647--- a/arch/xtensa/variants/s6000/include/variant/core.h
28648+++ b/arch/xtensa/variants/s6000/include/variant/core.h
28649@@ -11,6 +11,7 @@
28650 #ifndef _XTENSA_CORE_CONFIGURATION_H
28651 #define _XTENSA_CORE_CONFIGURATION_H
28652
28653+#include <linux/const.h>
28654
28655 /****************************************************************************
28656 Parameters Useful for Any Code, USER or PRIVILEGED
28657@@ -118,9 +119,9 @@
28658 ----------------------------------------------------------------------*/
28659
28660 #define XCHAL_ICACHE_LINESIZE 16 /* I-cache line size in bytes */
28661-#define XCHAL_DCACHE_LINESIZE 16 /* D-cache line size in bytes */
28662 #define XCHAL_ICACHE_LINEWIDTH 4 /* log2(I line size in bytes) */
28663 #define XCHAL_DCACHE_LINEWIDTH 4 /* log2(D line size in bytes) */
28664+#define XCHAL_DCACHE_LINESIZE (_AC(1,UL) << XCHAL_DCACHE_LINEWIDTH) /* D-cache line size in bytes */
28665
28666 #define XCHAL_ICACHE_SIZE 32768 /* I-cache size in bytes or 0 */
28667 #define XCHAL_DCACHE_SIZE 32768 /* D-cache size in bytes or 0 */
28668diff --git a/block/blk-integrity.c b/block/blk-integrity.c
28669index 15c6308..96e83c2 100644
28670--- a/block/blk-integrity.c
28671+++ b/block/blk-integrity.c
28672@@ -278,7 +278,7 @@ static struct attribute *integrity_attrs[] = {
28673 NULL,
28674 };
28675
28676-static struct sysfs_ops integrity_ops = {
28677+static const struct sysfs_ops integrity_ops = {
28678 .show = &integrity_attr_show,
28679 .store = &integrity_attr_store,
28680 };
28681diff --git a/block/blk-ioc.c b/block/blk-ioc.c
28682index d4ed600..cbdabb0 100644
28683--- a/block/blk-ioc.c
28684+++ b/block/blk-ioc.c
28685@@ -66,22 +66,22 @@ static void cfq_exit(struct io_context *ioc)
28686 }
28687
28688 /* Called by the exitting task */
28689-void exit_io_context(void)
28690+void exit_io_context(struct task_struct *task)
28691 {
28692 struct io_context *ioc;
28693
28694- task_lock(current);
28695- ioc = current->io_context;
28696- current->io_context = NULL;
28697- task_unlock(current);
28698+ task_lock(task);
28699+ ioc = task->io_context;
28700+ task->io_context = NULL;
28701+ task_unlock(task);
28702
28703 if (atomic_dec_and_test(&ioc->nr_tasks)) {
28704 if (ioc->aic && ioc->aic->exit)
28705 ioc->aic->exit(ioc->aic);
28706 cfq_exit(ioc);
28707
28708- put_io_context(ioc);
28709 }
28710+ put_io_context(ioc);
28711 }
28712
28713 struct io_context *alloc_io_context(gfp_t gfp_flags, int node)
28714diff --git a/block/blk-iopoll.c b/block/blk-iopoll.c
28715index ca56420..f2fc409 100644
28716--- a/block/blk-iopoll.c
28717+++ b/block/blk-iopoll.c
28718@@ -77,7 +77,7 @@ void blk_iopoll_complete(struct blk_iopoll *iopoll)
28719 }
28720 EXPORT_SYMBOL(blk_iopoll_complete);
28721
28722-static void blk_iopoll_softirq(struct softirq_action *h)
28723+static void blk_iopoll_softirq(void)
28724 {
28725 struct list_head *list = &__get_cpu_var(blk_cpu_iopoll);
28726 int rearm = 0, budget = blk_iopoll_budget;
28727diff --git a/block/blk-map.c b/block/blk-map.c
28728index 30a7e51..0aeec6a 100644
28729--- a/block/blk-map.c
28730+++ b/block/blk-map.c
28731@@ -54,7 +54,7 @@ static int __blk_rq_map_user(struct request_queue *q, struct request *rq,
28732 * direct dma. else, set up kernel bounce buffers
28733 */
28734 uaddr = (unsigned long) ubuf;
28735- if (blk_rq_aligned(q, ubuf, len) && !map_data)
28736+ if (blk_rq_aligned(q, (__force void *)ubuf, len) && !map_data)
28737 bio = bio_map_user(q, NULL, uaddr, len, reading, gfp_mask);
28738 else
28739 bio = bio_copy_user(q, map_data, uaddr, len, reading, gfp_mask);
28740@@ -201,12 +201,13 @@ int blk_rq_map_user_iov(struct request_queue *q, struct request *rq,
28741 for (i = 0; i < iov_count; i++) {
28742 unsigned long uaddr = (unsigned long)iov[i].iov_base;
28743
28744+ if (!iov[i].iov_len)
28745+ return -EINVAL;
28746+
28747 if (uaddr & queue_dma_alignment(q)) {
28748 unaligned = 1;
28749 break;
28750 }
28751- if (!iov[i].iov_len)
28752- return -EINVAL;
28753 }
28754
28755 if (unaligned || (q->dma_pad_mask & len) || map_data)
28756@@ -299,7 +300,7 @@ int blk_rq_map_kern(struct request_queue *q, struct request *rq, void *kbuf,
28757 if (!len || !kbuf)
28758 return -EINVAL;
28759
28760- do_copy = !blk_rq_aligned(q, kbuf, len) || object_is_on_stack(kbuf);
28761+ do_copy = !blk_rq_aligned(q, kbuf, len) || object_starts_on_stack(kbuf);
28762 if (do_copy)
28763 bio = bio_copy_kern(q, kbuf, len, gfp_mask, reading);
28764 else
28765diff --git a/block/blk-softirq.c b/block/blk-softirq.c
28766index ee9c216..58d410a 100644
28767--- a/block/blk-softirq.c
28768+++ b/block/blk-softirq.c
28769@@ -17,7 +17,7 @@ static DEFINE_PER_CPU(struct list_head, blk_cpu_done);
28770 * Softirq action handler - move entries to local list and loop over them
28771 * while passing them to the queue registered handler.
28772 */
28773-static void blk_done_softirq(struct softirq_action *h)
28774+static void blk_done_softirq(void)
28775 {
28776 struct list_head *cpu_list, local_list;
28777
28778diff --git a/block/blk-sysfs.c b/block/blk-sysfs.c
28779index bb9c5ea..5330d48 100644
28780--- a/block/blk-sysfs.c
28781+++ b/block/blk-sysfs.c
28782@@ -414,7 +414,7 @@ static void blk_release_queue(struct kobject *kobj)
28783 kmem_cache_free(blk_requestq_cachep, q);
28784 }
28785
28786-static struct sysfs_ops queue_sysfs_ops = {
28787+static const struct sysfs_ops queue_sysfs_ops = {
28788 .show = queue_attr_show,
28789 .store = queue_attr_store,
28790 };
28791diff --git a/block/bsg.c b/block/bsg.c
28792index e3e3241..759ebf7 100644
28793--- a/block/bsg.c
28794+++ b/block/bsg.c
28795@@ -175,16 +175,24 @@ static int blk_fill_sgv4_hdr_rq(struct request_queue *q, struct request *rq,
28796 struct sg_io_v4 *hdr, struct bsg_device *bd,
28797 fmode_t has_write_perm)
28798 {
28799+ unsigned char tmpcmd[sizeof(rq->__cmd)];
28800+ unsigned char *cmdptr;
28801+
28802 if (hdr->request_len > BLK_MAX_CDB) {
28803 rq->cmd = kzalloc(hdr->request_len, GFP_KERNEL);
28804 if (!rq->cmd)
28805 return -ENOMEM;
28806- }
28807+ cmdptr = rq->cmd;
28808+ } else
28809+ cmdptr = tmpcmd;
28810
28811- if (copy_from_user(rq->cmd, (void *)(unsigned long)hdr->request,
28812+ if (copy_from_user(cmdptr, (void __user *)(unsigned long)hdr->request,
28813 hdr->request_len))
28814 return -EFAULT;
28815
28816+ if (cmdptr != rq->cmd)
28817+ memcpy(rq->cmd, cmdptr, hdr->request_len);
28818+
28819 if (hdr->subprotocol == BSG_SUB_PROTOCOL_SCSI_CMD) {
28820 if (blk_verify_command(rq->cmd, has_write_perm))
28821 return -EPERM;
28822@@ -282,7 +290,7 @@ bsg_map_hdr(struct bsg_device *bd, struct sg_io_v4 *hdr, fmode_t has_write_perm,
28823 rq->next_rq = next_rq;
28824 next_rq->cmd_type = rq->cmd_type;
28825
28826- dxferp = (void*)(unsigned long)hdr->din_xferp;
28827+ dxferp = (void __user *)(unsigned long)hdr->din_xferp;
28828 ret = blk_rq_map_user(q, next_rq, NULL, dxferp,
28829 hdr->din_xfer_len, GFP_KERNEL);
28830 if (ret)
28831@@ -291,10 +299,10 @@ bsg_map_hdr(struct bsg_device *bd, struct sg_io_v4 *hdr, fmode_t has_write_perm,
28832
28833 if (hdr->dout_xfer_len) {
28834 dxfer_len = hdr->dout_xfer_len;
28835- dxferp = (void*)(unsigned long)hdr->dout_xferp;
28836+ dxferp = (void __user *)(unsigned long)hdr->dout_xferp;
28837 } else if (hdr->din_xfer_len) {
28838 dxfer_len = hdr->din_xfer_len;
28839- dxferp = (void*)(unsigned long)hdr->din_xferp;
28840+ dxferp = (void __user *)(unsigned long)hdr->din_xferp;
28841 } else
28842 dxfer_len = 0;
28843
28844@@ -436,7 +444,7 @@ static int blk_complete_sgv4_hdr_rq(struct request *rq, struct sg_io_v4 *hdr,
28845 int len = min_t(unsigned int, hdr->max_response_len,
28846 rq->sense_len);
28847
28848- ret = copy_to_user((void*)(unsigned long)hdr->response,
28849+ ret = copy_to_user((void __user *)(unsigned long)hdr->response,
28850 rq->sense, len);
28851 if (!ret)
28852 hdr->response_len = len;
28853diff --git a/block/compat_ioctl.c b/block/compat_ioctl.c
28854index 9bd086c..ca1fc22 100644
28855--- a/block/compat_ioctl.c
28856+++ b/block/compat_ioctl.c
28857@@ -354,7 +354,7 @@ static int compat_fd_ioctl(struct block_device *bdev, fmode_t mode,
28858 err |= __get_user(f->spec1, &uf->spec1);
28859 err |= __get_user(f->fmt_gap, &uf->fmt_gap);
28860 err |= __get_user(name, &uf->name);
28861- f->name = compat_ptr(name);
28862+ f->name = (void __force_kernel *)compat_ptr(name);
28863 if (err) {
28864 err = -EFAULT;
28865 goto out;
28866diff --git a/block/elevator.c b/block/elevator.c
28867index a847046..75a1746 100644
28868--- a/block/elevator.c
28869+++ b/block/elevator.c
28870@@ -889,7 +889,7 @@ elv_attr_store(struct kobject *kobj, struct attribute *attr,
28871 return error;
28872 }
28873
28874-static struct sysfs_ops elv_sysfs_ops = {
28875+static const struct sysfs_ops elv_sysfs_ops = {
28876 .show = elv_attr_show,
28877 .store = elv_attr_store,
28878 };
28879diff --git a/block/scsi_ioctl.c b/block/scsi_ioctl.c
28880index 2be0a97..bded3fd 100644
28881--- a/block/scsi_ioctl.c
28882+++ b/block/scsi_ioctl.c
28883@@ -221,8 +221,20 @@ EXPORT_SYMBOL(blk_verify_command);
28884 static int blk_fill_sghdr_rq(struct request_queue *q, struct request *rq,
28885 struct sg_io_hdr *hdr, fmode_t mode)
28886 {
28887- if (copy_from_user(rq->cmd, hdr->cmdp, hdr->cmd_len))
28888+ unsigned char tmpcmd[sizeof(rq->__cmd)];
28889+ unsigned char *cmdptr;
28890+
28891+ if (rq->cmd != rq->__cmd)
28892+ cmdptr = rq->cmd;
28893+ else
28894+ cmdptr = tmpcmd;
28895+
28896+ if (copy_from_user(cmdptr, hdr->cmdp, hdr->cmd_len))
28897 return -EFAULT;
28898+
28899+ if (cmdptr != rq->cmd)
28900+ memcpy(rq->cmd, cmdptr, hdr->cmd_len);
28901+
28902 if (blk_verify_command(rq->cmd, mode & FMODE_WRITE))
28903 return -EPERM;
28904
28905@@ -431,6 +443,8 @@ int sg_scsi_ioctl(struct request_queue *q, struct gendisk *disk, fmode_t mode,
28906 int err;
28907 unsigned int in_len, out_len, bytes, opcode, cmdlen;
28908 char *buffer = NULL, sense[SCSI_SENSE_BUFFERSIZE];
28909+ unsigned char tmpcmd[sizeof(rq->__cmd)];
28910+ unsigned char *cmdptr;
28911
28912 if (!sic)
28913 return -EINVAL;
28914@@ -464,9 +478,18 @@ int sg_scsi_ioctl(struct request_queue *q, struct gendisk *disk, fmode_t mode,
28915 */
28916 err = -EFAULT;
28917 rq->cmd_len = cmdlen;
28918- if (copy_from_user(rq->cmd, sic->data, cmdlen))
28919+
28920+ if (rq->cmd != rq->__cmd)
28921+ cmdptr = rq->cmd;
28922+ else
28923+ cmdptr = tmpcmd;
28924+
28925+ if (copy_from_user(cmdptr, sic->data, cmdlen))
28926 goto error;
28927
28928+ if (rq->cmd != cmdptr)
28929+ memcpy(rq->cmd, cmdptr, cmdlen);
28930+
28931 if (in_len && copy_from_user(buffer, sic->data + cmdlen, in_len))
28932 goto error;
28933
28934diff --git a/crypto/ablkcipher.c b/crypto/ablkcipher.c
28935index f6f0833..514d986 100644
28936--- a/crypto/ablkcipher.c
28937+++ b/crypto/ablkcipher.c
28938@@ -29,6 +29,8 @@
28939 static const char *skcipher_default_geniv __read_mostly;
28940
28941 static int setkey_unaligned(struct crypto_ablkcipher *tfm, const u8 *key,
28942+ unsigned int keylen) __size_overflow(3);
28943+static int setkey_unaligned(struct crypto_ablkcipher *tfm, const u8 *key,
28944 unsigned int keylen)
28945 {
28946 struct ablkcipher_alg *cipher = crypto_ablkcipher_alg(tfm);
28947@@ -51,6 +53,8 @@ static int setkey_unaligned(struct crypto_ablkcipher *tfm, const u8 *key,
28948 }
28949
28950 static int setkey(struct crypto_ablkcipher *tfm, const u8 *key,
28951+ unsigned int keylen) __size_overflow(3);
28952+static int setkey(struct crypto_ablkcipher *tfm, const u8 *key,
28953 unsigned int keylen)
28954 {
28955 struct ablkcipher_alg *cipher = crypto_ablkcipher_alg(tfm);
28956diff --git a/crypto/aead.c b/crypto/aead.c
28957index 0a55da7..9256a04 100644
28958--- a/crypto/aead.c
28959+++ b/crypto/aead.c
28960@@ -25,6 +25,8 @@
28961 #include "internal.h"
28962
28963 static int setkey_unaligned(struct crypto_aead *tfm, const u8 *key,
28964+ unsigned int keylen) __size_overflow(3);
28965+static int setkey_unaligned(struct crypto_aead *tfm, const u8 *key,
28966 unsigned int keylen)
28967 {
28968 struct aead_alg *aead = crypto_aead_alg(tfm);
28969@@ -46,6 +48,7 @@ static int setkey_unaligned(struct crypto_aead *tfm, const u8 *key,
28970 return ret;
28971 }
28972
28973+static int setkey(struct crypto_aead *tfm, const u8 *key, unsigned int keylen) __size_overflow(3);
28974 static int setkey(struct crypto_aead *tfm, const u8 *key, unsigned int keylen)
28975 {
28976 struct aead_alg *aead = crypto_aead_alg(tfm);
28977diff --git a/crypto/blkcipher.c b/crypto/blkcipher.c
28978index 90d26c9..3db7c03 100644
28979--- a/crypto/blkcipher.c
28980+++ b/crypto/blkcipher.c
28981@@ -357,6 +357,8 @@ int blkcipher_walk_virt_block(struct blkcipher_desc *desc,
28982 EXPORT_SYMBOL_GPL(blkcipher_walk_virt_block);
28983
28984 static int setkey_unaligned(struct crypto_tfm *tfm, const u8 *key,
28985+ unsigned int keylen) __size_overflow(3);
28986+static int setkey_unaligned(struct crypto_tfm *tfm, const u8 *key,
28987 unsigned int keylen)
28988 {
28989 struct blkcipher_alg *cipher = &tfm->__crt_alg->cra_blkcipher;
28990@@ -378,6 +380,7 @@ static int setkey_unaligned(struct crypto_tfm *tfm, const u8 *key,
28991 return ret;
28992 }
28993
28994+static int setkey(struct crypto_tfm *tfm, const u8 *key, unsigned int keylen) __size_overflow(3);
28995 static int setkey(struct crypto_tfm *tfm, const u8 *key, unsigned int keylen)
28996 {
28997 struct blkcipher_alg *cipher = &tfm->__crt_alg->cra_blkcipher;
28998diff --git a/crypto/cipher.c b/crypto/cipher.c
28999index 9a1a731..41454c2 100644
29000--- a/crypto/cipher.c
29001+++ b/crypto/cipher.c
29002@@ -21,6 +21,8 @@
29003 #include "internal.h"
29004
29005 static int setkey_unaligned(struct crypto_tfm *tfm, const u8 *key,
29006+ unsigned int keylen) __size_overflow(3);
29007+static int setkey_unaligned(struct crypto_tfm *tfm, const u8 *key,
29008 unsigned int keylen)
29009 {
29010 struct cipher_alg *cia = &tfm->__crt_alg->cra_cipher;
29011@@ -43,6 +45,7 @@ static int setkey_unaligned(struct crypto_tfm *tfm, const u8 *key,
29012
29013 }
29014
29015+static int setkey(struct crypto_tfm *tfm, const u8 *key, unsigned int keylen) __size_overflow(3);
29016 static int setkey(struct crypto_tfm *tfm, const u8 *key, unsigned int keylen)
29017 {
29018 struct cipher_alg *cia = &tfm->__crt_alg->cra_cipher;
29019diff --git a/crypto/cryptd.c b/crypto/cryptd.c
29020index 3533582..f143117 100644
29021--- a/crypto/cryptd.c
29022+++ b/crypto/cryptd.c
29023@@ -50,7 +50,7 @@ struct cryptd_blkcipher_ctx {
29024
29025 struct cryptd_blkcipher_request_ctx {
29026 crypto_completion_t complete;
29027-};
29028+} __no_const;
29029
29030 struct cryptd_hash_ctx {
29031 struct crypto_shash *child;
29032diff --git a/crypto/gf128mul.c b/crypto/gf128mul.c
29033index a90d260..7a9765e 100644
29034--- a/crypto/gf128mul.c
29035+++ b/crypto/gf128mul.c
29036@@ -182,7 +182,7 @@ void gf128mul_lle(be128 *r, const be128 *b)
29037 for (i = 0; i < 7; ++i)
29038 gf128mul_x_lle(&p[i + 1], &p[i]);
29039
29040- memset(r, 0, sizeof(r));
29041+ memset(r, 0, sizeof(*r));
29042 for (i = 0;;) {
29043 u8 ch = ((u8 *)b)[15 - i];
29044
29045@@ -220,7 +220,7 @@ void gf128mul_bbe(be128 *r, const be128 *b)
29046 for (i = 0; i < 7; ++i)
29047 gf128mul_x_bbe(&p[i + 1], &p[i]);
29048
29049- memset(r, 0, sizeof(r));
29050+ memset(r, 0, sizeof(*r));
29051 for (i = 0;;) {
29052 u8 ch = ((u8 *)b)[i];
29053
29054diff --git a/crypto/serpent.c b/crypto/serpent.c
29055index b651a55..023297d 100644
29056--- a/crypto/serpent.c
29057+++ b/crypto/serpent.c
29058@@ -21,6 +21,7 @@
29059 #include <asm/byteorder.h>
29060 #include <linux/crypto.h>
29061 #include <linux/types.h>
29062+#include <linux/sched.h>
29063
29064 /* Key is padded to the maximum of 256 bits before round key generation.
29065 * Any key length <= 256 bits (32 bytes) is allowed by the algorithm.
29066@@ -224,6 +225,8 @@ static int serpent_setkey(struct crypto_tfm *tfm, const u8 *key,
29067 u32 r0,r1,r2,r3,r4;
29068 int i;
29069
29070+ pax_track_stack();
29071+
29072 /* Copy key, add padding */
29073
29074 for (i = 0; i < keylen; ++i)
29075diff --git a/drivers/acpi/acpi_pad.c b/drivers/acpi/acpi_pad.c
29076index 0d2cdb8..d8de48d 100644
29077--- a/drivers/acpi/acpi_pad.c
29078+++ b/drivers/acpi/acpi_pad.c
29079@@ -30,7 +30,7 @@
29080 #include <acpi/acpi_bus.h>
29081 #include <acpi/acpi_drivers.h>
29082
29083-#define ACPI_PROCESSOR_AGGREGATOR_CLASS "processor_aggregator"
29084+#define ACPI_PROCESSOR_AGGREGATOR_CLASS "acpi_pad"
29085 #define ACPI_PROCESSOR_AGGREGATOR_DEVICE_NAME "Processor Aggregator"
29086 #define ACPI_PROCESSOR_AGGREGATOR_NOTIFY 0x80
29087 static DEFINE_MUTEX(isolated_cpus_lock);
29088diff --git a/drivers/acpi/battery.c b/drivers/acpi/battery.c
29089index 3f4602b..1978af1 100644
29090--- a/drivers/acpi/battery.c
29091+++ b/drivers/acpi/battery.c
29092@@ -678,6 +678,9 @@ static int acpi_battery_print_alarm(struct seq_file *seq, int result)
29093
29094 static ssize_t acpi_battery_write_alarm(struct file *file,
29095 const char __user * buffer,
29096+ size_t count, loff_t * ppos) __size_overflow(3);
29097+static ssize_t acpi_battery_write_alarm(struct file *file,
29098+ const char __user * buffer,
29099 size_t count, loff_t * ppos)
29100 {
29101 int result = 0;
29102@@ -763,7 +766,7 @@ DECLARE_FILE_FUNCTIONS(alarm);
29103 }
29104
29105 static struct battery_file {
29106- struct file_operations ops;
29107+ const struct file_operations ops;
29108 mode_t mode;
29109 const char *name;
29110 } acpi_battery_file[] = {
29111diff --git a/drivers/acpi/dock.c b/drivers/acpi/dock.c
29112index 7338b6a..82f0257 100644
29113--- a/drivers/acpi/dock.c
29114+++ b/drivers/acpi/dock.c
29115@@ -77,7 +77,7 @@ struct dock_dependent_device {
29116 struct list_head list;
29117 struct list_head hotplug_list;
29118 acpi_handle handle;
29119- struct acpi_dock_ops *ops;
29120+ const struct acpi_dock_ops *ops;
29121 void *context;
29122 };
29123
29124@@ -605,7 +605,7 @@ EXPORT_SYMBOL_GPL(unregister_dock_notifier);
29125 * the dock driver after _DCK is executed.
29126 */
29127 int
29128-register_hotplug_dock_device(acpi_handle handle, struct acpi_dock_ops *ops,
29129+register_hotplug_dock_device(acpi_handle handle, const struct acpi_dock_ops *ops,
29130 void *context)
29131 {
29132 struct dock_dependent_device *dd;
29133diff --git a/drivers/acpi/osl.c b/drivers/acpi/osl.c
29134index 7c1c59e..2993595 100644
29135--- a/drivers/acpi/osl.c
29136+++ b/drivers/acpi/osl.c
29137@@ -523,6 +523,8 @@ acpi_os_read_memory(acpi_physical_address phys_addr, u32 * value, u32 width)
29138 void __iomem *virt_addr;
29139
29140 virt_addr = ioremap(phys_addr, width);
29141+ if (!virt_addr)
29142+ return AE_NO_MEMORY;
29143 if (!value)
29144 value = &dummy;
29145
29146@@ -551,6 +553,8 @@ acpi_os_write_memory(acpi_physical_address phys_addr, u32 value, u32 width)
29147 void __iomem *virt_addr;
29148
29149 virt_addr = ioremap(phys_addr, width);
29150+ if (!virt_addr)
29151+ return AE_NO_MEMORY;
29152
29153 switch (width) {
29154 case 8:
29155diff --git a/drivers/acpi/power_meter.c b/drivers/acpi/power_meter.c
29156index c216062..eec10d2 100644
29157--- a/drivers/acpi/power_meter.c
29158+++ b/drivers/acpi/power_meter.c
29159@@ -315,8 +315,6 @@ static ssize_t set_trip(struct device *dev, struct device_attribute *devattr,
29160 return res;
29161
29162 temp /= 1000;
29163- if (temp < 0)
29164- return -EINVAL;
29165
29166 mutex_lock(&resource->lock);
29167 resource->trip[attr->index - 7] = temp;
29168diff --git a/drivers/acpi/proc.c b/drivers/acpi/proc.c
29169index d0d25e2..961643d 100644
29170--- a/drivers/acpi/proc.c
29171+++ b/drivers/acpi/proc.c
29172@@ -391,20 +391,15 @@ acpi_system_write_wakeup_device(struct file *file,
29173 size_t count, loff_t * ppos)
29174 {
29175 struct list_head *node, *next;
29176- char strbuf[5];
29177- char str[5] = "";
29178- unsigned int len = count;
29179+ char strbuf[5] = {0};
29180 struct acpi_device *found_dev = NULL;
29181
29182- if (len > 4)
29183- len = 4;
29184- if (len < 0)
29185- return -EFAULT;
29186+ if (count > 4)
29187+ count = 4;
29188
29189- if (copy_from_user(strbuf, buffer, len))
29190+ if (copy_from_user(strbuf, buffer, count))
29191 return -EFAULT;
29192- strbuf[len] = '\0';
29193- sscanf(strbuf, "%s", str);
29194+ strbuf[count] = '\0';
29195
29196 mutex_lock(&acpi_device_lock);
29197 list_for_each_safe(node, next, &acpi_wakeup_device_list) {
29198@@ -413,7 +408,7 @@ acpi_system_write_wakeup_device(struct file *file,
29199 if (!dev->wakeup.flags.valid)
29200 continue;
29201
29202- if (!strncmp(dev->pnp.bus_id, str, 4)) {
29203+ if (!strncmp(dev->pnp.bus_id, strbuf, 4)) {
29204 dev->wakeup.state.enabled =
29205 dev->wakeup.state.enabled ? 0 : 1;
29206 found_dev = dev;
29207diff --git a/drivers/acpi/processor_core.c b/drivers/acpi/processor_core.c
29208index 7102474..de8ad22 100644
29209--- a/drivers/acpi/processor_core.c
29210+++ b/drivers/acpi/processor_core.c
29211@@ -790,7 +790,7 @@ static int __cpuinit acpi_processor_add(struct acpi_device *device)
29212 return 0;
29213 }
29214
29215- BUG_ON((pr->id >= nr_cpu_ids) || (pr->id < 0));
29216+ BUG_ON(pr->id >= nr_cpu_ids);
29217
29218 /*
29219 * Buggy BIOS check
29220diff --git a/drivers/acpi/sbs.c b/drivers/acpi/sbs.c
29221index 52b9db8..a519aab 100644
29222--- a/drivers/acpi/sbs.c
29223+++ b/drivers/acpi/sbs.c
29224@@ -647,6 +647,9 @@ static int acpi_battery_read_alarm(struct seq_file *seq, void *offset)
29225
29226 static ssize_t
29227 acpi_battery_write_alarm(struct file *file, const char __user * buffer,
29228+ size_t count, loff_t * ppos) __size_overflow(3);
29229+static ssize_t
29230+acpi_battery_write_alarm(struct file *file, const char __user * buffer,
29231 size_t count, loff_t * ppos)
29232 {
29233 struct seq_file *seq = file->private_data;
29234diff --git a/drivers/acpi/sbshc.c b/drivers/acpi/sbshc.c
29235index d933980..5761f13 100644
29236--- a/drivers/acpi/sbshc.c
29237+++ b/drivers/acpi/sbshc.c
29238@@ -17,7 +17,7 @@
29239
29240 #define PREFIX "ACPI: "
29241
29242-#define ACPI_SMB_HC_CLASS "smbus_host_controller"
29243+#define ACPI_SMB_HC_CLASS "smbus_host_ctl"
29244 #define ACPI_SMB_HC_DEVICE_NAME "ACPI SMBus HC"
29245
29246 struct acpi_smb_hc {
29247diff --git a/drivers/acpi/sleep.c b/drivers/acpi/sleep.c
29248index 0458094..6978e7b 100644
29249--- a/drivers/acpi/sleep.c
29250+++ b/drivers/acpi/sleep.c
29251@@ -283,7 +283,7 @@ static int acpi_suspend_state_valid(suspend_state_t pm_state)
29252 }
29253 }
29254
29255-static struct platform_suspend_ops acpi_suspend_ops = {
29256+static const struct platform_suspend_ops acpi_suspend_ops = {
29257 .valid = acpi_suspend_state_valid,
29258 .begin = acpi_suspend_begin,
29259 .prepare_late = acpi_pm_prepare,
29260@@ -311,7 +311,7 @@ static int acpi_suspend_begin_old(suspend_state_t pm_state)
29261 * The following callbacks are used if the pre-ACPI 2.0 suspend ordering has
29262 * been requested.
29263 */
29264-static struct platform_suspend_ops acpi_suspend_ops_old = {
29265+static const struct platform_suspend_ops acpi_suspend_ops_old = {
29266 .valid = acpi_suspend_state_valid,
29267 .begin = acpi_suspend_begin_old,
29268 .prepare_late = acpi_pm_disable_gpes,
29269@@ -460,7 +460,7 @@ static void acpi_pm_enable_gpes(void)
29270 acpi_enable_all_runtime_gpes();
29271 }
29272
29273-static struct platform_hibernation_ops acpi_hibernation_ops = {
29274+static const struct platform_hibernation_ops acpi_hibernation_ops = {
29275 .begin = acpi_hibernation_begin,
29276 .end = acpi_pm_end,
29277 .pre_snapshot = acpi_hibernation_pre_snapshot,
29278@@ -513,7 +513,7 @@ static int acpi_hibernation_pre_snapshot_old(void)
29279 * The following callbacks are used if the pre-ACPI 2.0 suspend ordering has
29280 * been requested.
29281 */
29282-static struct platform_hibernation_ops acpi_hibernation_ops_old = {
29283+static const struct platform_hibernation_ops acpi_hibernation_ops_old = {
29284 .begin = acpi_hibernation_begin_old,
29285 .end = acpi_pm_end,
29286 .pre_snapshot = acpi_hibernation_pre_snapshot_old,
29287diff --git a/drivers/acpi/video.c b/drivers/acpi/video.c
29288index 05dff63..b662ab7 100644
29289--- a/drivers/acpi/video.c
29290+++ b/drivers/acpi/video.c
29291@@ -359,7 +359,7 @@ static int acpi_video_set_brightness(struct backlight_device *bd)
29292 vd->brightness->levels[request_level]);
29293 }
29294
29295-static struct backlight_ops acpi_backlight_ops = {
29296+static const struct backlight_ops acpi_backlight_ops = {
29297 .get_brightness = acpi_video_get_brightness,
29298 .update_status = acpi_video_set_brightness,
29299 };
29300diff --git a/drivers/ata/ahci.c b/drivers/ata/ahci.c
29301index 6787aab..23ffb0e 100644
29302--- a/drivers/ata/ahci.c
29303+++ b/drivers/ata/ahci.c
29304@@ -387,7 +387,7 @@ static struct scsi_host_template ahci_sht = {
29305 .sdev_attrs = ahci_sdev_attrs,
29306 };
29307
29308-static struct ata_port_operations ahci_ops = {
29309+static const struct ata_port_operations ahci_ops = {
29310 .inherits = &sata_pmp_port_ops,
29311
29312 .qc_defer = sata_pmp_qc_defer_cmd_switch,
29313@@ -424,17 +424,17 @@ static struct ata_port_operations ahci_ops = {
29314 .port_stop = ahci_port_stop,
29315 };
29316
29317-static struct ata_port_operations ahci_vt8251_ops = {
29318+static const struct ata_port_operations ahci_vt8251_ops = {
29319 .inherits = &ahci_ops,
29320 .hardreset = ahci_vt8251_hardreset,
29321 };
29322
29323-static struct ata_port_operations ahci_p5wdh_ops = {
29324+static const struct ata_port_operations ahci_p5wdh_ops = {
29325 .inherits = &ahci_ops,
29326 .hardreset = ahci_p5wdh_hardreset,
29327 };
29328
29329-static struct ata_port_operations ahci_sb600_ops = {
29330+static const struct ata_port_operations ahci_sb600_ops = {
29331 .inherits = &ahci_ops,
29332 .softreset = ahci_sb600_softreset,
29333 .pmp_softreset = ahci_sb600_softreset,
29334diff --git a/drivers/ata/ata_generic.c b/drivers/ata/ata_generic.c
29335index 99e7196..4968c77 100644
29336--- a/drivers/ata/ata_generic.c
29337+++ b/drivers/ata/ata_generic.c
29338@@ -104,7 +104,7 @@ static struct scsi_host_template generic_sht = {
29339 ATA_BMDMA_SHT(DRV_NAME),
29340 };
29341
29342-static struct ata_port_operations generic_port_ops = {
29343+static const struct ata_port_operations generic_port_ops = {
29344 .inherits = &ata_bmdma_port_ops,
29345 .cable_detect = ata_cable_unknown,
29346 .set_mode = generic_set_mode,
29347diff --git a/drivers/ata/ata_piix.c b/drivers/ata/ata_piix.c
29348index c33591d..000c121 100644
29349--- a/drivers/ata/ata_piix.c
29350+++ b/drivers/ata/ata_piix.c
29351@@ -318,7 +318,7 @@ static struct scsi_host_template piix_sht = {
29352 ATA_BMDMA_SHT(DRV_NAME),
29353 };
29354
29355-static struct ata_port_operations piix_pata_ops = {
29356+static const struct ata_port_operations piix_pata_ops = {
29357 .inherits = &ata_bmdma32_port_ops,
29358 .cable_detect = ata_cable_40wire,
29359 .set_piomode = piix_set_piomode,
29360@@ -326,22 +326,22 @@ static struct ata_port_operations piix_pata_ops = {
29361 .prereset = piix_pata_prereset,
29362 };
29363
29364-static struct ata_port_operations piix_vmw_ops = {
29365+static const struct ata_port_operations piix_vmw_ops = {
29366 .inherits = &piix_pata_ops,
29367 .bmdma_status = piix_vmw_bmdma_status,
29368 };
29369
29370-static struct ata_port_operations ich_pata_ops = {
29371+static const struct ata_port_operations ich_pata_ops = {
29372 .inherits = &piix_pata_ops,
29373 .cable_detect = ich_pata_cable_detect,
29374 .set_dmamode = ich_set_dmamode,
29375 };
29376
29377-static struct ata_port_operations piix_sata_ops = {
29378+static const struct ata_port_operations piix_sata_ops = {
29379 .inherits = &ata_bmdma_port_ops,
29380 };
29381
29382-static struct ata_port_operations piix_sidpr_sata_ops = {
29383+static const struct ata_port_operations piix_sidpr_sata_ops = {
29384 .inherits = &piix_sata_ops,
29385 .hardreset = sata_std_hardreset,
29386 .scr_read = piix_sidpr_scr_read,
29387diff --git a/drivers/ata/libata-acpi.c b/drivers/ata/libata-acpi.c
29388index b0882cd..c295d65 100644
29389--- a/drivers/ata/libata-acpi.c
29390+++ b/drivers/ata/libata-acpi.c
29391@@ -223,12 +223,12 @@ static void ata_acpi_dev_uevent(acpi_handle handle, u32 event, void *data)
29392 ata_acpi_uevent(dev->link->ap, dev, event);
29393 }
29394
29395-static struct acpi_dock_ops ata_acpi_dev_dock_ops = {
29396+static const struct acpi_dock_ops ata_acpi_dev_dock_ops = {
29397 .handler = ata_acpi_dev_notify_dock,
29398 .uevent = ata_acpi_dev_uevent,
29399 };
29400
29401-static struct acpi_dock_ops ata_acpi_ap_dock_ops = {
29402+static const struct acpi_dock_ops ata_acpi_ap_dock_ops = {
29403 .handler = ata_acpi_ap_notify_dock,
29404 .uevent = ata_acpi_ap_uevent,
29405 };
29406diff --git a/drivers/ata/libata-core.c b/drivers/ata/libata-core.c
29407index d4f7f99..94f603e 100644
29408--- a/drivers/ata/libata-core.c
29409+++ b/drivers/ata/libata-core.c
29410@@ -4954,7 +4954,7 @@ void ata_qc_free(struct ata_queued_cmd *qc)
29411 struct ata_port *ap;
29412 unsigned int tag;
29413
29414- WARN_ON_ONCE(qc == NULL); /* ata_qc_from_tag _might_ return NULL */
29415+ BUG_ON(qc == NULL); /* ata_qc_from_tag _might_ return NULL */
29416 ap = qc->ap;
29417
29418 qc->flags = 0;
29419@@ -4970,7 +4970,7 @@ void __ata_qc_complete(struct ata_queued_cmd *qc)
29420 struct ata_port *ap;
29421 struct ata_link *link;
29422
29423- WARN_ON_ONCE(qc == NULL); /* ata_qc_from_tag _might_ return NULL */
29424+ BUG_ON(qc == NULL); /* ata_qc_from_tag _might_ return NULL */
29425 WARN_ON_ONCE(!(qc->flags & ATA_QCFLAG_ACTIVE));
29426 ap = qc->ap;
29427 link = qc->dev->link;
29428@@ -5987,7 +5987,7 @@ static void ata_host_stop(struct device *gendev, void *res)
29429 * LOCKING:
29430 * None.
29431 */
29432-static void ata_finalize_port_ops(struct ata_port_operations *ops)
29433+static void ata_finalize_port_ops(const struct ata_port_operations *ops)
29434 {
29435 static DEFINE_SPINLOCK(lock);
29436 const struct ata_port_operations *cur;
29437@@ -5999,6 +5999,7 @@ static void ata_finalize_port_ops(struct ata_port_operations *ops)
29438 return;
29439
29440 spin_lock(&lock);
29441+ pax_open_kernel();
29442
29443 for (cur = ops->inherits; cur; cur = cur->inherits) {
29444 void **inherit = (void **)cur;
29445@@ -6012,8 +6013,9 @@ static void ata_finalize_port_ops(struct ata_port_operations *ops)
29446 if (IS_ERR(*pp))
29447 *pp = NULL;
29448
29449- ops->inherits = NULL;
29450+ *(struct ata_port_operations **)&ops->inherits = NULL;
29451
29452+ pax_close_kernel();
29453 spin_unlock(&lock);
29454 }
29455
29456@@ -6110,7 +6112,7 @@ int ata_host_start(struct ata_host *host)
29457 */
29458 /* KILLME - the only user left is ipr */
29459 void ata_host_init(struct ata_host *host, struct device *dev,
29460- unsigned long flags, struct ata_port_operations *ops)
29461+ unsigned long flags, const struct ata_port_operations *ops)
29462 {
29463 spin_lock_init(&host->lock);
29464 host->dev = dev;
29465@@ -6773,7 +6775,7 @@ static void ata_dummy_error_handler(struct ata_port *ap)
29466 /* truly dummy */
29467 }
29468
29469-struct ata_port_operations ata_dummy_port_ops = {
29470+const struct ata_port_operations ata_dummy_port_ops = {
29471 .qc_prep = ata_noop_qc_prep,
29472 .qc_issue = ata_dummy_qc_issue,
29473 .error_handler = ata_dummy_error_handler,
29474diff --git a/drivers/ata/libata-eh.c b/drivers/ata/libata-eh.c
29475index e5bdb9b..45a8e72 100644
29476--- a/drivers/ata/libata-eh.c
29477+++ b/drivers/ata/libata-eh.c
29478@@ -2423,6 +2423,8 @@ void ata_eh_report(struct ata_port *ap)
29479 {
29480 struct ata_link *link;
29481
29482+ pax_track_stack();
29483+
29484 ata_for_each_link(link, ap, HOST_FIRST)
29485 ata_eh_link_report(link);
29486 }
29487@@ -3594,7 +3596,7 @@ void ata_do_eh(struct ata_port *ap, ata_prereset_fn_t prereset,
29488 */
29489 void ata_std_error_handler(struct ata_port *ap)
29490 {
29491- struct ata_port_operations *ops = ap->ops;
29492+ const struct ata_port_operations *ops = ap->ops;
29493 ata_reset_fn_t hardreset = ops->hardreset;
29494
29495 /* ignore built-in hardreset if SCR access is not available */
29496diff --git a/drivers/ata/libata-pmp.c b/drivers/ata/libata-pmp.c
29497index 51f0ffb..19ce3e3 100644
29498--- a/drivers/ata/libata-pmp.c
29499+++ b/drivers/ata/libata-pmp.c
29500@@ -841,7 +841,7 @@ static int sata_pmp_handle_link_fail(struct ata_link *link, int *link_tries)
29501 */
29502 static int sata_pmp_eh_recover(struct ata_port *ap)
29503 {
29504- struct ata_port_operations *ops = ap->ops;
29505+ const struct ata_port_operations *ops = ap->ops;
29506 int pmp_tries, link_tries[SATA_PMP_MAX_PORTS];
29507 struct ata_link *pmp_link = &ap->link;
29508 struct ata_device *pmp_dev = pmp_link->device;
29509diff --git a/drivers/ata/pata_acpi.c b/drivers/ata/pata_acpi.c
29510index d8f35fe..288180a 100644
29511--- a/drivers/ata/pata_acpi.c
29512+++ b/drivers/ata/pata_acpi.c
29513@@ -215,7 +215,7 @@ static struct scsi_host_template pacpi_sht = {
29514 ATA_BMDMA_SHT(DRV_NAME),
29515 };
29516
29517-static struct ata_port_operations pacpi_ops = {
29518+static const struct ata_port_operations pacpi_ops = {
29519 .inherits = &ata_bmdma_port_ops,
29520 .qc_issue = pacpi_qc_issue,
29521 .cable_detect = pacpi_cable_detect,
29522diff --git a/drivers/ata/pata_ali.c b/drivers/ata/pata_ali.c
29523index 9434114..1f2f364 100644
29524--- a/drivers/ata/pata_ali.c
29525+++ b/drivers/ata/pata_ali.c
29526@@ -365,7 +365,7 @@ static struct scsi_host_template ali_sht = {
29527 * Port operations for PIO only ALi
29528 */
29529
29530-static struct ata_port_operations ali_early_port_ops = {
29531+static const struct ata_port_operations ali_early_port_ops = {
29532 .inherits = &ata_sff_port_ops,
29533 .cable_detect = ata_cable_40wire,
29534 .set_piomode = ali_set_piomode,
29535@@ -382,7 +382,7 @@ static const struct ata_port_operations ali_dma_base_ops = {
29536 * Port operations for DMA capable ALi without cable
29537 * detect
29538 */
29539-static struct ata_port_operations ali_20_port_ops = {
29540+static const struct ata_port_operations ali_20_port_ops = {
29541 .inherits = &ali_dma_base_ops,
29542 .cable_detect = ata_cable_40wire,
29543 .mode_filter = ali_20_filter,
29544@@ -393,7 +393,7 @@ static struct ata_port_operations ali_20_port_ops = {
29545 /*
29546 * Port operations for DMA capable ALi with cable detect
29547 */
29548-static struct ata_port_operations ali_c2_port_ops = {
29549+static const struct ata_port_operations ali_c2_port_ops = {
29550 .inherits = &ali_dma_base_ops,
29551 .check_atapi_dma = ali_check_atapi_dma,
29552 .cable_detect = ali_c2_cable_detect,
29553@@ -404,7 +404,7 @@ static struct ata_port_operations ali_c2_port_ops = {
29554 /*
29555 * Port operations for DMA capable ALi with cable detect
29556 */
29557-static struct ata_port_operations ali_c4_port_ops = {
29558+static const struct ata_port_operations ali_c4_port_ops = {
29559 .inherits = &ali_dma_base_ops,
29560 .check_atapi_dma = ali_check_atapi_dma,
29561 .cable_detect = ali_c2_cable_detect,
29562@@ -414,7 +414,7 @@ static struct ata_port_operations ali_c4_port_ops = {
29563 /*
29564 * Port operations for DMA capable ALi with cable detect and LBA48
29565 */
29566-static struct ata_port_operations ali_c5_port_ops = {
29567+static const struct ata_port_operations ali_c5_port_ops = {
29568 .inherits = &ali_dma_base_ops,
29569 .check_atapi_dma = ali_check_atapi_dma,
29570 .dev_config = ali_warn_atapi_dma,
29571diff --git a/drivers/ata/pata_amd.c b/drivers/ata/pata_amd.c
29572index 567f3f7..c8ee0da 100644
29573--- a/drivers/ata/pata_amd.c
29574+++ b/drivers/ata/pata_amd.c
29575@@ -397,28 +397,28 @@ static const struct ata_port_operations amd_base_port_ops = {
29576 .prereset = amd_pre_reset,
29577 };
29578
29579-static struct ata_port_operations amd33_port_ops = {
29580+static const struct ata_port_operations amd33_port_ops = {
29581 .inherits = &amd_base_port_ops,
29582 .cable_detect = ata_cable_40wire,
29583 .set_piomode = amd33_set_piomode,
29584 .set_dmamode = amd33_set_dmamode,
29585 };
29586
29587-static struct ata_port_operations amd66_port_ops = {
29588+static const struct ata_port_operations amd66_port_ops = {
29589 .inherits = &amd_base_port_ops,
29590 .cable_detect = ata_cable_unknown,
29591 .set_piomode = amd66_set_piomode,
29592 .set_dmamode = amd66_set_dmamode,
29593 };
29594
29595-static struct ata_port_operations amd100_port_ops = {
29596+static const struct ata_port_operations amd100_port_ops = {
29597 .inherits = &amd_base_port_ops,
29598 .cable_detect = ata_cable_unknown,
29599 .set_piomode = amd100_set_piomode,
29600 .set_dmamode = amd100_set_dmamode,
29601 };
29602
29603-static struct ata_port_operations amd133_port_ops = {
29604+static const struct ata_port_operations amd133_port_ops = {
29605 .inherits = &amd_base_port_ops,
29606 .cable_detect = amd_cable_detect,
29607 .set_piomode = amd133_set_piomode,
29608@@ -433,13 +433,13 @@ static const struct ata_port_operations nv_base_port_ops = {
29609 .host_stop = nv_host_stop,
29610 };
29611
29612-static struct ata_port_operations nv100_port_ops = {
29613+static const struct ata_port_operations nv100_port_ops = {
29614 .inherits = &nv_base_port_ops,
29615 .set_piomode = nv100_set_piomode,
29616 .set_dmamode = nv100_set_dmamode,
29617 };
29618
29619-static struct ata_port_operations nv133_port_ops = {
29620+static const struct ata_port_operations nv133_port_ops = {
29621 .inherits = &nv_base_port_ops,
29622 .set_piomode = nv133_set_piomode,
29623 .set_dmamode = nv133_set_dmamode,
29624diff --git a/drivers/ata/pata_artop.c b/drivers/ata/pata_artop.c
29625index d332cfd..4b7eaae 100644
29626--- a/drivers/ata/pata_artop.c
29627+++ b/drivers/ata/pata_artop.c
29628@@ -311,7 +311,7 @@ static struct scsi_host_template artop_sht = {
29629 ATA_BMDMA_SHT(DRV_NAME),
29630 };
29631
29632-static struct ata_port_operations artop6210_ops = {
29633+static const struct ata_port_operations artop6210_ops = {
29634 .inherits = &ata_bmdma_port_ops,
29635 .cable_detect = ata_cable_40wire,
29636 .set_piomode = artop6210_set_piomode,
29637@@ -320,7 +320,7 @@ static struct ata_port_operations artop6210_ops = {
29638 .qc_defer = artop6210_qc_defer,
29639 };
29640
29641-static struct ata_port_operations artop6260_ops = {
29642+static const struct ata_port_operations artop6260_ops = {
29643 .inherits = &ata_bmdma_port_ops,
29644 .cable_detect = artop6260_cable_detect,
29645 .set_piomode = artop6260_set_piomode,
29646diff --git a/drivers/ata/pata_at32.c b/drivers/ata/pata_at32.c
29647index 5c129f9..7bb7ccb 100644
29648--- a/drivers/ata/pata_at32.c
29649+++ b/drivers/ata/pata_at32.c
29650@@ -172,7 +172,7 @@ static struct scsi_host_template at32_sht = {
29651 ATA_PIO_SHT(DRV_NAME),
29652 };
29653
29654-static struct ata_port_operations at32_port_ops = {
29655+static const struct ata_port_operations at32_port_ops = {
29656 .inherits = &ata_sff_port_ops,
29657 .cable_detect = ata_cable_40wire,
29658 .set_piomode = pata_at32_set_piomode,
29659diff --git a/drivers/ata/pata_at91.c b/drivers/ata/pata_at91.c
29660index 41c94b1..829006d 100644
29661--- a/drivers/ata/pata_at91.c
29662+++ b/drivers/ata/pata_at91.c
29663@@ -195,7 +195,7 @@ static struct scsi_host_template pata_at91_sht = {
29664 ATA_PIO_SHT(DRV_NAME),
29665 };
29666
29667-static struct ata_port_operations pata_at91_port_ops = {
29668+static const struct ata_port_operations pata_at91_port_ops = {
29669 .inherits = &ata_sff_port_ops,
29670
29671 .sff_data_xfer = pata_at91_data_xfer_noirq,
29672diff --git a/drivers/ata/pata_atiixp.c b/drivers/ata/pata_atiixp.c
29673index ae4454d..d391eb4 100644
29674--- a/drivers/ata/pata_atiixp.c
29675+++ b/drivers/ata/pata_atiixp.c
29676@@ -205,7 +205,7 @@ static struct scsi_host_template atiixp_sht = {
29677 .sg_tablesize = LIBATA_DUMB_MAX_PRD,
29678 };
29679
29680-static struct ata_port_operations atiixp_port_ops = {
29681+static const struct ata_port_operations atiixp_port_ops = {
29682 .inherits = &ata_bmdma_port_ops,
29683
29684 .qc_prep = ata_sff_dumb_qc_prep,
29685diff --git a/drivers/ata/pata_atp867x.c b/drivers/ata/pata_atp867x.c
29686index 6fe7ded..2a425dc 100644
29687--- a/drivers/ata/pata_atp867x.c
29688+++ b/drivers/ata/pata_atp867x.c
29689@@ -274,7 +274,7 @@ static struct scsi_host_template atp867x_sht = {
29690 ATA_BMDMA_SHT(DRV_NAME),
29691 };
29692
29693-static struct ata_port_operations atp867x_ops = {
29694+static const struct ata_port_operations atp867x_ops = {
29695 .inherits = &ata_bmdma_port_ops,
29696 .cable_detect = atp867x_cable_detect,
29697 .set_piomode = atp867x_set_piomode,
29698diff --git a/drivers/ata/pata_bf54x.c b/drivers/ata/pata_bf54x.c
29699index c4b47a3..b27a367 100644
29700--- a/drivers/ata/pata_bf54x.c
29701+++ b/drivers/ata/pata_bf54x.c
29702@@ -1464,7 +1464,7 @@ static struct scsi_host_template bfin_sht = {
29703 .dma_boundary = ATA_DMA_BOUNDARY,
29704 };
29705
29706-static struct ata_port_operations bfin_pata_ops = {
29707+static const struct ata_port_operations bfin_pata_ops = {
29708 .inherits = &ata_sff_port_ops,
29709
29710 .set_piomode = bfin_set_piomode,
29711diff --git a/drivers/ata/pata_cmd640.c b/drivers/ata/pata_cmd640.c
29712index 5acf9fa..84248be 100644
29713--- a/drivers/ata/pata_cmd640.c
29714+++ b/drivers/ata/pata_cmd640.c
29715@@ -168,7 +168,7 @@ static struct scsi_host_template cmd640_sht = {
29716 ATA_BMDMA_SHT(DRV_NAME),
29717 };
29718
29719-static struct ata_port_operations cmd640_port_ops = {
29720+static const struct ata_port_operations cmd640_port_ops = {
29721 .inherits = &ata_bmdma_port_ops,
29722 /* In theory xfer_noirq is not needed once we kill the prefetcher */
29723 .sff_data_xfer = ata_sff_data_xfer_noirq,
29724diff --git a/drivers/ata/pata_cmd64x.c b/drivers/ata/pata_cmd64x.c
29725index ccd2694..c869c3d 100644
29726--- a/drivers/ata/pata_cmd64x.c
29727+++ b/drivers/ata/pata_cmd64x.c
29728@@ -271,18 +271,18 @@ static const struct ata_port_operations cmd64x_base_ops = {
29729 .set_dmamode = cmd64x_set_dmamode,
29730 };
29731
29732-static struct ata_port_operations cmd64x_port_ops = {
29733+static const struct ata_port_operations cmd64x_port_ops = {
29734 .inherits = &cmd64x_base_ops,
29735 .cable_detect = ata_cable_40wire,
29736 };
29737
29738-static struct ata_port_operations cmd646r1_port_ops = {
29739+static const struct ata_port_operations cmd646r1_port_ops = {
29740 .inherits = &cmd64x_base_ops,
29741 .bmdma_stop = cmd646r1_bmdma_stop,
29742 .cable_detect = ata_cable_40wire,
29743 };
29744
29745-static struct ata_port_operations cmd648_port_ops = {
29746+static const struct ata_port_operations cmd648_port_ops = {
29747 .inherits = &cmd64x_base_ops,
29748 .bmdma_stop = cmd648_bmdma_stop,
29749 .cable_detect = cmd648_cable_detect,
29750diff --git a/drivers/ata/pata_cs5520.c b/drivers/ata/pata_cs5520.c
29751index 0df83cf..d7595b0 100644
29752--- a/drivers/ata/pata_cs5520.c
29753+++ b/drivers/ata/pata_cs5520.c
29754@@ -144,7 +144,7 @@ static struct scsi_host_template cs5520_sht = {
29755 .sg_tablesize = LIBATA_DUMB_MAX_PRD,
29756 };
29757
29758-static struct ata_port_operations cs5520_port_ops = {
29759+static const struct ata_port_operations cs5520_port_ops = {
29760 .inherits = &ata_bmdma_port_ops,
29761 .qc_prep = ata_sff_dumb_qc_prep,
29762 .cable_detect = ata_cable_40wire,
29763diff --git a/drivers/ata/pata_cs5530.c b/drivers/ata/pata_cs5530.c
29764index c974b05..6d26b11 100644
29765--- a/drivers/ata/pata_cs5530.c
29766+++ b/drivers/ata/pata_cs5530.c
29767@@ -164,7 +164,7 @@ static struct scsi_host_template cs5530_sht = {
29768 .sg_tablesize = LIBATA_DUMB_MAX_PRD,
29769 };
29770
29771-static struct ata_port_operations cs5530_port_ops = {
29772+static const struct ata_port_operations cs5530_port_ops = {
29773 .inherits = &ata_bmdma_port_ops,
29774
29775 .qc_prep = ata_sff_dumb_qc_prep,
29776diff --git a/drivers/ata/pata_cs5535.c b/drivers/ata/pata_cs5535.c
29777index 403f561..aacd26b 100644
29778--- a/drivers/ata/pata_cs5535.c
29779+++ b/drivers/ata/pata_cs5535.c
29780@@ -160,7 +160,7 @@ static struct scsi_host_template cs5535_sht = {
29781 ATA_BMDMA_SHT(DRV_NAME),
29782 };
29783
29784-static struct ata_port_operations cs5535_port_ops = {
29785+static const struct ata_port_operations cs5535_port_ops = {
29786 .inherits = &ata_bmdma_port_ops,
29787 .cable_detect = cs5535_cable_detect,
29788 .set_piomode = cs5535_set_piomode,
29789diff --git a/drivers/ata/pata_cs5536.c b/drivers/ata/pata_cs5536.c
29790index 6da4cb4..de24a25 100644
29791--- a/drivers/ata/pata_cs5536.c
29792+++ b/drivers/ata/pata_cs5536.c
29793@@ -223,7 +223,7 @@ static struct scsi_host_template cs5536_sht = {
29794 ATA_BMDMA_SHT(DRV_NAME),
29795 };
29796
29797-static struct ata_port_operations cs5536_port_ops = {
29798+static const struct ata_port_operations cs5536_port_ops = {
29799 .inherits = &ata_bmdma_port_ops,
29800 .cable_detect = cs5536_cable_detect,
29801 .set_piomode = cs5536_set_piomode,
29802diff --git a/drivers/ata/pata_cypress.c b/drivers/ata/pata_cypress.c
29803index 8fb040b..b16a9c9 100644
29804--- a/drivers/ata/pata_cypress.c
29805+++ b/drivers/ata/pata_cypress.c
29806@@ -113,7 +113,7 @@ static struct scsi_host_template cy82c693_sht = {
29807 ATA_BMDMA_SHT(DRV_NAME),
29808 };
29809
29810-static struct ata_port_operations cy82c693_port_ops = {
29811+static const struct ata_port_operations cy82c693_port_ops = {
29812 .inherits = &ata_bmdma_port_ops,
29813 .cable_detect = ata_cable_40wire,
29814 .set_piomode = cy82c693_set_piomode,
29815diff --git a/drivers/ata/pata_efar.c b/drivers/ata/pata_efar.c
29816index 2a6412f..555ee11 100644
29817--- a/drivers/ata/pata_efar.c
29818+++ b/drivers/ata/pata_efar.c
29819@@ -222,7 +222,7 @@ static struct scsi_host_template efar_sht = {
29820 ATA_BMDMA_SHT(DRV_NAME),
29821 };
29822
29823-static struct ata_port_operations efar_ops = {
29824+static const struct ata_port_operations efar_ops = {
29825 .inherits = &ata_bmdma_port_ops,
29826 .cable_detect = efar_cable_detect,
29827 .set_piomode = efar_set_piomode,
29828diff --git a/drivers/ata/pata_hpt366.c b/drivers/ata/pata_hpt366.c
29829index b9d8836..0b92030 100644
29830--- a/drivers/ata/pata_hpt366.c
29831+++ b/drivers/ata/pata_hpt366.c
29832@@ -282,7 +282,7 @@ static struct scsi_host_template hpt36x_sht = {
29833 * Configuration for HPT366/68
29834 */
29835
29836-static struct ata_port_operations hpt366_port_ops = {
29837+static const struct ata_port_operations hpt366_port_ops = {
29838 .inherits = &ata_bmdma_port_ops,
29839 .cable_detect = hpt36x_cable_detect,
29840 .mode_filter = hpt366_filter,
29841diff --git a/drivers/ata/pata_hpt37x.c b/drivers/ata/pata_hpt37x.c
29842index 5af7f19..00c4980 100644
29843--- a/drivers/ata/pata_hpt37x.c
29844+++ b/drivers/ata/pata_hpt37x.c
29845@@ -576,7 +576,7 @@ static struct scsi_host_template hpt37x_sht = {
29846 * Configuration for HPT370
29847 */
29848
29849-static struct ata_port_operations hpt370_port_ops = {
29850+static const struct ata_port_operations hpt370_port_ops = {
29851 .inherits = &ata_bmdma_port_ops,
29852
29853 .bmdma_stop = hpt370_bmdma_stop,
29854@@ -591,7 +591,7 @@ static struct ata_port_operations hpt370_port_ops = {
29855 * Configuration for HPT370A. Close to 370 but less filters
29856 */
29857
29858-static struct ata_port_operations hpt370a_port_ops = {
29859+static const struct ata_port_operations hpt370a_port_ops = {
29860 .inherits = &hpt370_port_ops,
29861 .mode_filter = hpt370a_filter,
29862 };
29863@@ -601,7 +601,7 @@ static struct ata_port_operations hpt370a_port_ops = {
29864 * and DMA mode setting functionality.
29865 */
29866
29867-static struct ata_port_operations hpt372_port_ops = {
29868+static const struct ata_port_operations hpt372_port_ops = {
29869 .inherits = &ata_bmdma_port_ops,
29870
29871 .bmdma_stop = hpt37x_bmdma_stop,
29872@@ -616,7 +616,7 @@ static struct ata_port_operations hpt372_port_ops = {
29873 * but we have a different cable detection procedure for function 1.
29874 */
29875
29876-static struct ata_port_operations hpt374_fn1_port_ops = {
29877+static const struct ata_port_operations hpt374_fn1_port_ops = {
29878 .inherits = &hpt372_port_ops,
29879 .prereset = hpt374_fn1_pre_reset,
29880 };
29881diff --git a/drivers/ata/pata_hpt3x2n.c b/drivers/ata/pata_hpt3x2n.c
29882index 100f227..2e39382 100644
29883--- a/drivers/ata/pata_hpt3x2n.c
29884+++ b/drivers/ata/pata_hpt3x2n.c
29885@@ -337,7 +337,7 @@ static struct scsi_host_template hpt3x2n_sht = {
29886 * Configuration for HPT3x2n.
29887 */
29888
29889-static struct ata_port_operations hpt3x2n_port_ops = {
29890+static const struct ata_port_operations hpt3x2n_port_ops = {
29891 .inherits = &ata_bmdma_port_ops,
29892
29893 .bmdma_stop = hpt3x2n_bmdma_stop,
29894diff --git a/drivers/ata/pata_hpt3x3.c b/drivers/ata/pata_hpt3x3.c
29895index 7e31025..6fca8f4 100644
29896--- a/drivers/ata/pata_hpt3x3.c
29897+++ b/drivers/ata/pata_hpt3x3.c
29898@@ -141,7 +141,7 @@ static struct scsi_host_template hpt3x3_sht = {
29899 ATA_BMDMA_SHT(DRV_NAME),
29900 };
29901
29902-static struct ata_port_operations hpt3x3_port_ops = {
29903+static const struct ata_port_operations hpt3x3_port_ops = {
29904 .inherits = &ata_bmdma_port_ops,
29905 .cable_detect = ata_cable_40wire,
29906 .set_piomode = hpt3x3_set_piomode,
29907diff --git a/drivers/ata/pata_icside.c b/drivers/ata/pata_icside.c
29908index b663b7f..9a26c2a 100644
29909--- a/drivers/ata/pata_icside.c
29910+++ b/drivers/ata/pata_icside.c
29911@@ -319,7 +319,7 @@ static void pata_icside_postreset(struct ata_link *link, unsigned int *classes)
29912 }
29913 }
29914
29915-static struct ata_port_operations pata_icside_port_ops = {
29916+static const struct ata_port_operations pata_icside_port_ops = {
29917 .inherits = &ata_sff_port_ops,
29918 /* no need to build any PRD tables for DMA */
29919 .qc_prep = ata_noop_qc_prep,
29920diff --git a/drivers/ata/pata_isapnp.c b/drivers/ata/pata_isapnp.c
29921index 4bceb88..457dfb6 100644
29922--- a/drivers/ata/pata_isapnp.c
29923+++ b/drivers/ata/pata_isapnp.c
29924@@ -23,12 +23,12 @@ static struct scsi_host_template isapnp_sht = {
29925 ATA_PIO_SHT(DRV_NAME),
29926 };
29927
29928-static struct ata_port_operations isapnp_port_ops = {
29929+static const struct ata_port_operations isapnp_port_ops = {
29930 .inherits = &ata_sff_port_ops,
29931 .cable_detect = ata_cable_40wire,
29932 };
29933
29934-static struct ata_port_operations isapnp_noalt_port_ops = {
29935+static const struct ata_port_operations isapnp_noalt_port_ops = {
29936 .inherits = &ata_sff_port_ops,
29937 .cable_detect = ata_cable_40wire,
29938 /* No altstatus so we don't want to use the lost interrupt poll */
29939diff --git a/drivers/ata/pata_it8213.c b/drivers/ata/pata_it8213.c
29940index f156da8..24976e2 100644
29941--- a/drivers/ata/pata_it8213.c
29942+++ b/drivers/ata/pata_it8213.c
29943@@ -234,7 +234,7 @@ static struct scsi_host_template it8213_sht = {
29944 };
29945
29946
29947-static struct ata_port_operations it8213_ops = {
29948+static const struct ata_port_operations it8213_ops = {
29949 .inherits = &ata_bmdma_port_ops,
29950 .cable_detect = it8213_cable_detect,
29951 .set_piomode = it8213_set_piomode,
29952diff --git a/drivers/ata/pata_it821x.c b/drivers/ata/pata_it821x.c
29953index 188bc2f..ca9e785 100644
29954--- a/drivers/ata/pata_it821x.c
29955+++ b/drivers/ata/pata_it821x.c
29956@@ -800,7 +800,7 @@ static struct scsi_host_template it821x_sht = {
29957 ATA_BMDMA_SHT(DRV_NAME),
29958 };
29959
29960-static struct ata_port_operations it821x_smart_port_ops = {
29961+static const struct ata_port_operations it821x_smart_port_ops = {
29962 .inherits = &ata_bmdma_port_ops,
29963
29964 .check_atapi_dma= it821x_check_atapi_dma,
29965@@ -814,7 +814,7 @@ static struct ata_port_operations it821x_smart_port_ops = {
29966 .port_start = it821x_port_start,
29967 };
29968
29969-static struct ata_port_operations it821x_passthru_port_ops = {
29970+static const struct ata_port_operations it821x_passthru_port_ops = {
29971 .inherits = &ata_bmdma_port_ops,
29972
29973 .check_atapi_dma= it821x_check_atapi_dma,
29974@@ -830,7 +830,7 @@ static struct ata_port_operations it821x_passthru_port_ops = {
29975 .port_start = it821x_port_start,
29976 };
29977
29978-static struct ata_port_operations it821x_rdc_port_ops = {
29979+static const struct ata_port_operations it821x_rdc_port_ops = {
29980 .inherits = &ata_bmdma_port_ops,
29981
29982 .check_atapi_dma= it821x_check_atapi_dma,
29983diff --git a/drivers/ata/pata_ixp4xx_cf.c b/drivers/ata/pata_ixp4xx_cf.c
29984index ba54b08..4b952b7 100644
29985--- a/drivers/ata/pata_ixp4xx_cf.c
29986+++ b/drivers/ata/pata_ixp4xx_cf.c
29987@@ -89,7 +89,7 @@ static struct scsi_host_template ixp4xx_sht = {
29988 ATA_PIO_SHT(DRV_NAME),
29989 };
29990
29991-static struct ata_port_operations ixp4xx_port_ops = {
29992+static const struct ata_port_operations ixp4xx_port_ops = {
29993 .inherits = &ata_sff_port_ops,
29994 .sff_data_xfer = ixp4xx_mmio_data_xfer,
29995 .cable_detect = ata_cable_40wire,
29996diff --git a/drivers/ata/pata_jmicron.c b/drivers/ata/pata_jmicron.c
29997index 3a1474a..434b0ff 100644
29998--- a/drivers/ata/pata_jmicron.c
29999+++ b/drivers/ata/pata_jmicron.c
30000@@ -111,7 +111,7 @@ static struct scsi_host_template jmicron_sht = {
30001 ATA_BMDMA_SHT(DRV_NAME),
30002 };
30003
30004-static struct ata_port_operations jmicron_ops = {
30005+static const struct ata_port_operations jmicron_ops = {
30006 .inherits = &ata_bmdma_port_ops,
30007 .prereset = jmicron_pre_reset,
30008 };
30009diff --git a/drivers/ata/pata_legacy.c b/drivers/ata/pata_legacy.c
30010index 6932e56..220e71d 100644
30011--- a/drivers/ata/pata_legacy.c
30012+++ b/drivers/ata/pata_legacy.c
30013@@ -106,7 +106,7 @@ struct legacy_probe {
30014
30015 struct legacy_controller {
30016 const char *name;
30017- struct ata_port_operations *ops;
30018+ const struct ata_port_operations *ops;
30019 unsigned int pio_mask;
30020 unsigned int flags;
30021 unsigned int pflags;
30022@@ -223,12 +223,12 @@ static const struct ata_port_operations legacy_base_port_ops = {
30023 * pio_mask as well.
30024 */
30025
30026-static struct ata_port_operations simple_port_ops = {
30027+static const struct ata_port_operations simple_port_ops = {
30028 .inherits = &legacy_base_port_ops,
30029 .sff_data_xfer = ata_sff_data_xfer_noirq,
30030 };
30031
30032-static struct ata_port_operations legacy_port_ops = {
30033+static const struct ata_port_operations legacy_port_ops = {
30034 .inherits = &legacy_base_port_ops,
30035 .sff_data_xfer = ata_sff_data_xfer_noirq,
30036 .set_mode = legacy_set_mode,
30037@@ -324,7 +324,7 @@ static unsigned int pdc_data_xfer_vlb(struct ata_device *dev,
30038 return buflen;
30039 }
30040
30041-static struct ata_port_operations pdc20230_port_ops = {
30042+static const struct ata_port_operations pdc20230_port_ops = {
30043 .inherits = &legacy_base_port_ops,
30044 .set_piomode = pdc20230_set_piomode,
30045 .sff_data_xfer = pdc_data_xfer_vlb,
30046@@ -357,7 +357,7 @@ static void ht6560a_set_piomode(struct ata_port *ap, struct ata_device *adev)
30047 ioread8(ap->ioaddr.status_addr);
30048 }
30049
30050-static struct ata_port_operations ht6560a_port_ops = {
30051+static const struct ata_port_operations ht6560a_port_ops = {
30052 .inherits = &legacy_base_port_ops,
30053 .set_piomode = ht6560a_set_piomode,
30054 };
30055@@ -400,7 +400,7 @@ static void ht6560b_set_piomode(struct ata_port *ap, struct ata_device *adev)
30056 ioread8(ap->ioaddr.status_addr);
30057 }
30058
30059-static struct ata_port_operations ht6560b_port_ops = {
30060+static const struct ata_port_operations ht6560b_port_ops = {
30061 .inherits = &legacy_base_port_ops,
30062 .set_piomode = ht6560b_set_piomode,
30063 };
30064@@ -499,7 +499,7 @@ static void opti82c611a_set_piomode(struct ata_port *ap,
30065 }
30066
30067
30068-static struct ata_port_operations opti82c611a_port_ops = {
30069+static const struct ata_port_operations opti82c611a_port_ops = {
30070 .inherits = &legacy_base_port_ops,
30071 .set_piomode = opti82c611a_set_piomode,
30072 };
30073@@ -609,7 +609,7 @@ static unsigned int opti82c46x_qc_issue(struct ata_queued_cmd *qc)
30074 return ata_sff_qc_issue(qc);
30075 }
30076
30077-static struct ata_port_operations opti82c46x_port_ops = {
30078+static const struct ata_port_operations opti82c46x_port_ops = {
30079 .inherits = &legacy_base_port_ops,
30080 .set_piomode = opti82c46x_set_piomode,
30081 .qc_issue = opti82c46x_qc_issue,
30082@@ -771,20 +771,20 @@ static int qdi_port(struct platform_device *dev,
30083 return 0;
30084 }
30085
30086-static struct ata_port_operations qdi6500_port_ops = {
30087+static const struct ata_port_operations qdi6500_port_ops = {
30088 .inherits = &legacy_base_port_ops,
30089 .set_piomode = qdi6500_set_piomode,
30090 .qc_issue = qdi_qc_issue,
30091 .sff_data_xfer = vlb32_data_xfer,
30092 };
30093
30094-static struct ata_port_operations qdi6580_port_ops = {
30095+static const struct ata_port_operations qdi6580_port_ops = {
30096 .inherits = &legacy_base_port_ops,
30097 .set_piomode = qdi6580_set_piomode,
30098 .sff_data_xfer = vlb32_data_xfer,
30099 };
30100
30101-static struct ata_port_operations qdi6580dp_port_ops = {
30102+static const struct ata_port_operations qdi6580dp_port_ops = {
30103 .inherits = &legacy_base_port_ops,
30104 .set_piomode = qdi6580dp_set_piomode,
30105 .sff_data_xfer = vlb32_data_xfer,
30106@@ -855,7 +855,7 @@ static int winbond_port(struct platform_device *dev,
30107 return 0;
30108 }
30109
30110-static struct ata_port_operations winbond_port_ops = {
30111+static const struct ata_port_operations winbond_port_ops = {
30112 .inherits = &legacy_base_port_ops,
30113 .set_piomode = winbond_set_piomode,
30114 .sff_data_xfer = vlb32_data_xfer,
30115@@ -978,7 +978,7 @@ static __init int legacy_init_one(struct legacy_probe *probe)
30116 int pio_modes = controller->pio_mask;
30117 unsigned long io = probe->port;
30118 u32 mask = (1 << probe->slot);
30119- struct ata_port_operations *ops = controller->ops;
30120+ const struct ata_port_operations *ops = controller->ops;
30121 struct legacy_data *ld = &legacy_data[probe->slot];
30122 struct ata_host *host = NULL;
30123 struct ata_port *ap;
30124diff --git a/drivers/ata/pata_marvell.c b/drivers/ata/pata_marvell.c
30125index 2096fb7..4d090fc 100644
30126--- a/drivers/ata/pata_marvell.c
30127+++ b/drivers/ata/pata_marvell.c
30128@@ -100,7 +100,7 @@ static struct scsi_host_template marvell_sht = {
30129 ATA_BMDMA_SHT(DRV_NAME),
30130 };
30131
30132-static struct ata_port_operations marvell_ops = {
30133+static const struct ata_port_operations marvell_ops = {
30134 .inherits = &ata_bmdma_port_ops,
30135 .cable_detect = marvell_cable_detect,
30136 .prereset = marvell_pre_reset,
30137diff --git a/drivers/ata/pata_mpc52xx.c b/drivers/ata/pata_mpc52xx.c
30138index 99d41be..7d56aa8 100644
30139--- a/drivers/ata/pata_mpc52xx.c
30140+++ b/drivers/ata/pata_mpc52xx.c
30141@@ -609,7 +609,7 @@ static struct scsi_host_template mpc52xx_ata_sht = {
30142 ATA_PIO_SHT(DRV_NAME),
30143 };
30144
30145-static struct ata_port_operations mpc52xx_ata_port_ops = {
30146+static const struct ata_port_operations mpc52xx_ata_port_ops = {
30147 .inherits = &ata_bmdma_port_ops,
30148 .sff_dev_select = mpc52xx_ata_dev_select,
30149 .set_piomode = mpc52xx_ata_set_piomode,
30150diff --git a/drivers/ata/pata_mpiix.c b/drivers/ata/pata_mpiix.c
30151index b21f002..0a27e7f 100644
30152--- a/drivers/ata/pata_mpiix.c
30153+++ b/drivers/ata/pata_mpiix.c
30154@@ -140,7 +140,7 @@ static struct scsi_host_template mpiix_sht = {
30155 ATA_PIO_SHT(DRV_NAME),
30156 };
30157
30158-static struct ata_port_operations mpiix_port_ops = {
30159+static const struct ata_port_operations mpiix_port_ops = {
30160 .inherits = &ata_sff_port_ops,
30161 .qc_issue = mpiix_qc_issue,
30162 .cable_detect = ata_cable_40wire,
30163diff --git a/drivers/ata/pata_netcell.c b/drivers/ata/pata_netcell.c
30164index f0d52f7..89c3be3 100644
30165--- a/drivers/ata/pata_netcell.c
30166+++ b/drivers/ata/pata_netcell.c
30167@@ -34,7 +34,7 @@ static struct scsi_host_template netcell_sht = {
30168 ATA_BMDMA_SHT(DRV_NAME),
30169 };
30170
30171-static struct ata_port_operations netcell_ops = {
30172+static const struct ata_port_operations netcell_ops = {
30173 .inherits = &ata_bmdma_port_ops,
30174 .cable_detect = ata_cable_80wire,
30175 .read_id = netcell_read_id,
30176diff --git a/drivers/ata/pata_ninja32.c b/drivers/ata/pata_ninja32.c
30177index dd53a66..a3f4317 100644
30178--- a/drivers/ata/pata_ninja32.c
30179+++ b/drivers/ata/pata_ninja32.c
30180@@ -81,7 +81,7 @@ static struct scsi_host_template ninja32_sht = {
30181 ATA_BMDMA_SHT(DRV_NAME),
30182 };
30183
30184-static struct ata_port_operations ninja32_port_ops = {
30185+static const struct ata_port_operations ninja32_port_ops = {
30186 .inherits = &ata_bmdma_port_ops,
30187 .sff_dev_select = ninja32_dev_select,
30188 .cable_detect = ata_cable_40wire,
30189diff --git a/drivers/ata/pata_ns87410.c b/drivers/ata/pata_ns87410.c
30190index ca53fac..9aa93ef 100644
30191--- a/drivers/ata/pata_ns87410.c
30192+++ b/drivers/ata/pata_ns87410.c
30193@@ -132,7 +132,7 @@ static struct scsi_host_template ns87410_sht = {
30194 ATA_PIO_SHT(DRV_NAME),
30195 };
30196
30197-static struct ata_port_operations ns87410_port_ops = {
30198+static const struct ata_port_operations ns87410_port_ops = {
30199 .inherits = &ata_sff_port_ops,
30200 .qc_issue = ns87410_qc_issue,
30201 .cable_detect = ata_cable_40wire,
30202diff --git a/drivers/ata/pata_ns87415.c b/drivers/ata/pata_ns87415.c
30203index 773b159..55f454e 100644
30204--- a/drivers/ata/pata_ns87415.c
30205+++ b/drivers/ata/pata_ns87415.c
30206@@ -299,7 +299,7 @@ static u8 ns87560_bmdma_status(struct ata_port *ap)
30207 }
30208 #endif /* 87560 SuperIO Support */
30209
30210-static struct ata_port_operations ns87415_pata_ops = {
30211+static const struct ata_port_operations ns87415_pata_ops = {
30212 .inherits = &ata_bmdma_port_ops,
30213
30214 .check_atapi_dma = ns87415_check_atapi_dma,
30215@@ -313,7 +313,7 @@ static struct ata_port_operations ns87415_pata_ops = {
30216 };
30217
30218 #if defined(CONFIG_SUPERIO)
30219-static struct ata_port_operations ns87560_pata_ops = {
30220+static const struct ata_port_operations ns87560_pata_ops = {
30221 .inherits = &ns87415_pata_ops,
30222 .sff_tf_read = ns87560_tf_read,
30223 .sff_check_status = ns87560_check_status,
30224diff --git a/drivers/ata/pata_octeon_cf.c b/drivers/ata/pata_octeon_cf.c
30225index d6f6956..639295b 100644
30226--- a/drivers/ata/pata_octeon_cf.c
30227+++ b/drivers/ata/pata_octeon_cf.c
30228@@ -801,6 +801,7 @@ static unsigned int octeon_cf_qc_issue(struct ata_queued_cmd *qc)
30229 return 0;
30230 }
30231
30232+/* cannot be const */
30233 static struct ata_port_operations octeon_cf_ops = {
30234 .inherits = &ata_sff_port_ops,
30235 .check_atapi_dma = octeon_cf_check_atapi_dma,
30236diff --git a/drivers/ata/pata_oldpiix.c b/drivers/ata/pata_oldpiix.c
30237index 84ac503..adee1cd 100644
30238--- a/drivers/ata/pata_oldpiix.c
30239+++ b/drivers/ata/pata_oldpiix.c
30240@@ -208,7 +208,7 @@ static struct scsi_host_template oldpiix_sht = {
30241 ATA_BMDMA_SHT(DRV_NAME),
30242 };
30243
30244-static struct ata_port_operations oldpiix_pata_ops = {
30245+static const struct ata_port_operations oldpiix_pata_ops = {
30246 .inherits = &ata_bmdma_port_ops,
30247 .qc_issue = oldpiix_qc_issue,
30248 .cable_detect = ata_cable_40wire,
30249diff --git a/drivers/ata/pata_opti.c b/drivers/ata/pata_opti.c
30250index 99eddda..3a4c0aa 100644
30251--- a/drivers/ata/pata_opti.c
30252+++ b/drivers/ata/pata_opti.c
30253@@ -152,7 +152,7 @@ static struct scsi_host_template opti_sht = {
30254 ATA_PIO_SHT(DRV_NAME),
30255 };
30256
30257-static struct ata_port_operations opti_port_ops = {
30258+static const struct ata_port_operations opti_port_ops = {
30259 .inherits = &ata_sff_port_ops,
30260 .cable_detect = ata_cable_40wire,
30261 .set_piomode = opti_set_piomode,
30262diff --git a/drivers/ata/pata_optidma.c b/drivers/ata/pata_optidma.c
30263index 86885a4..8e9968d 100644
30264--- a/drivers/ata/pata_optidma.c
30265+++ b/drivers/ata/pata_optidma.c
30266@@ -337,7 +337,7 @@ static struct scsi_host_template optidma_sht = {
30267 ATA_BMDMA_SHT(DRV_NAME),
30268 };
30269
30270-static struct ata_port_operations optidma_port_ops = {
30271+static const struct ata_port_operations optidma_port_ops = {
30272 .inherits = &ata_bmdma_port_ops,
30273 .cable_detect = ata_cable_40wire,
30274 .set_piomode = optidma_set_pio_mode,
30275@@ -346,7 +346,7 @@ static struct ata_port_operations optidma_port_ops = {
30276 .prereset = optidma_pre_reset,
30277 };
30278
30279-static struct ata_port_operations optiplus_port_ops = {
30280+static const struct ata_port_operations optiplus_port_ops = {
30281 .inherits = &optidma_port_ops,
30282 .set_piomode = optiplus_set_pio_mode,
30283 .set_dmamode = optiplus_set_dma_mode,
30284diff --git a/drivers/ata/pata_palmld.c b/drivers/ata/pata_palmld.c
30285index 11fb4cc..1a14022 100644
30286--- a/drivers/ata/pata_palmld.c
30287+++ b/drivers/ata/pata_palmld.c
30288@@ -37,7 +37,7 @@ static struct scsi_host_template palmld_sht = {
30289 ATA_PIO_SHT(DRV_NAME),
30290 };
30291
30292-static struct ata_port_operations palmld_port_ops = {
30293+static const struct ata_port_operations palmld_port_ops = {
30294 .inherits = &ata_sff_port_ops,
30295 .sff_data_xfer = ata_sff_data_xfer_noirq,
30296 .cable_detect = ata_cable_40wire,
30297diff --git a/drivers/ata/pata_pcmcia.c b/drivers/ata/pata_pcmcia.c
30298index dc99e26..7f4b1e4 100644
30299--- a/drivers/ata/pata_pcmcia.c
30300+++ b/drivers/ata/pata_pcmcia.c
30301@@ -162,14 +162,14 @@ static struct scsi_host_template pcmcia_sht = {
30302 ATA_PIO_SHT(DRV_NAME),
30303 };
30304
30305-static struct ata_port_operations pcmcia_port_ops = {
30306+static const struct ata_port_operations pcmcia_port_ops = {
30307 .inherits = &ata_sff_port_ops,
30308 .sff_data_xfer = ata_sff_data_xfer_noirq,
30309 .cable_detect = ata_cable_40wire,
30310 .set_mode = pcmcia_set_mode,
30311 };
30312
30313-static struct ata_port_operations pcmcia_8bit_port_ops = {
30314+static const struct ata_port_operations pcmcia_8bit_port_ops = {
30315 .inherits = &ata_sff_port_ops,
30316 .sff_data_xfer = ata_data_xfer_8bit,
30317 .cable_detect = ata_cable_40wire,
30318@@ -256,7 +256,7 @@ static int pcmcia_init_one(struct pcmcia_device *pdev)
30319 unsigned long io_base, ctl_base;
30320 void __iomem *io_addr, *ctl_addr;
30321 int n_ports = 1;
30322- struct ata_port_operations *ops = &pcmcia_port_ops;
30323+ const struct ata_port_operations *ops = &pcmcia_port_ops;
30324
30325 info = kzalloc(sizeof(*info), GFP_KERNEL);
30326 if (info == NULL)
30327diff --git a/drivers/ata/pata_pdc2027x.c b/drivers/ata/pata_pdc2027x.c
30328index ca5cad0..3a1f125 100644
30329--- a/drivers/ata/pata_pdc2027x.c
30330+++ b/drivers/ata/pata_pdc2027x.c
30331@@ -132,14 +132,14 @@ static struct scsi_host_template pdc2027x_sht = {
30332 ATA_BMDMA_SHT(DRV_NAME),
30333 };
30334
30335-static struct ata_port_operations pdc2027x_pata100_ops = {
30336+static const struct ata_port_operations pdc2027x_pata100_ops = {
30337 .inherits = &ata_bmdma_port_ops,
30338 .check_atapi_dma = pdc2027x_check_atapi_dma,
30339 .cable_detect = pdc2027x_cable_detect,
30340 .prereset = pdc2027x_prereset,
30341 };
30342
30343-static struct ata_port_operations pdc2027x_pata133_ops = {
30344+static const struct ata_port_operations pdc2027x_pata133_ops = {
30345 .inherits = &pdc2027x_pata100_ops,
30346 .mode_filter = pdc2027x_mode_filter,
30347 .set_piomode = pdc2027x_set_piomode,
30348diff --git a/drivers/ata/pata_pdc202xx_old.c b/drivers/ata/pata_pdc202xx_old.c
30349index 2911120..4bf62aa 100644
30350--- a/drivers/ata/pata_pdc202xx_old.c
30351+++ b/drivers/ata/pata_pdc202xx_old.c
30352@@ -274,7 +274,7 @@ static struct scsi_host_template pdc202xx_sht = {
30353 ATA_BMDMA_SHT(DRV_NAME),
30354 };
30355
30356-static struct ata_port_operations pdc2024x_port_ops = {
30357+static const struct ata_port_operations pdc2024x_port_ops = {
30358 .inherits = &ata_bmdma_port_ops,
30359
30360 .cable_detect = ata_cable_40wire,
30361@@ -284,7 +284,7 @@ static struct ata_port_operations pdc2024x_port_ops = {
30362 .sff_exec_command = pdc202xx_exec_command,
30363 };
30364
30365-static struct ata_port_operations pdc2026x_port_ops = {
30366+static const struct ata_port_operations pdc2026x_port_ops = {
30367 .inherits = &pdc2024x_port_ops,
30368
30369 .check_atapi_dma = pdc2026x_check_atapi_dma,
30370diff --git a/drivers/ata/pata_platform.c b/drivers/ata/pata_platform.c
30371index 3f6ebc6..a18c358 100644
30372--- a/drivers/ata/pata_platform.c
30373+++ b/drivers/ata/pata_platform.c
30374@@ -48,7 +48,7 @@ static struct scsi_host_template pata_platform_sht = {
30375 ATA_PIO_SHT(DRV_NAME),
30376 };
30377
30378-static struct ata_port_operations pata_platform_port_ops = {
30379+static const struct ata_port_operations pata_platform_port_ops = {
30380 .inherits = &ata_sff_port_ops,
30381 .sff_data_xfer = ata_sff_data_xfer_noirq,
30382 .cable_detect = ata_cable_unknown,
30383diff --git a/drivers/ata/pata_qdi.c b/drivers/ata/pata_qdi.c
30384index 45879dc..165a9f9 100644
30385--- a/drivers/ata/pata_qdi.c
30386+++ b/drivers/ata/pata_qdi.c
30387@@ -157,7 +157,7 @@ static struct scsi_host_template qdi_sht = {
30388 ATA_PIO_SHT(DRV_NAME),
30389 };
30390
30391-static struct ata_port_operations qdi6500_port_ops = {
30392+static const struct ata_port_operations qdi6500_port_ops = {
30393 .inherits = &ata_sff_port_ops,
30394 .qc_issue = qdi_qc_issue,
30395 .sff_data_xfer = qdi_data_xfer,
30396@@ -165,7 +165,7 @@ static struct ata_port_operations qdi6500_port_ops = {
30397 .set_piomode = qdi6500_set_piomode,
30398 };
30399
30400-static struct ata_port_operations qdi6580_port_ops = {
30401+static const struct ata_port_operations qdi6580_port_ops = {
30402 .inherits = &qdi6500_port_ops,
30403 .set_piomode = qdi6580_set_piomode,
30404 };
30405diff --git a/drivers/ata/pata_radisys.c b/drivers/ata/pata_radisys.c
30406index 4401b33..716c5cc 100644
30407--- a/drivers/ata/pata_radisys.c
30408+++ b/drivers/ata/pata_radisys.c
30409@@ -187,7 +187,7 @@ static struct scsi_host_template radisys_sht = {
30410 ATA_BMDMA_SHT(DRV_NAME),
30411 };
30412
30413-static struct ata_port_operations radisys_pata_ops = {
30414+static const struct ata_port_operations radisys_pata_ops = {
30415 .inherits = &ata_bmdma_port_ops,
30416 .qc_issue = radisys_qc_issue,
30417 .cable_detect = ata_cable_unknown,
30418diff --git a/drivers/ata/pata_rb532_cf.c b/drivers/ata/pata_rb532_cf.c
30419index 45f1e10..fab6bca 100644
30420--- a/drivers/ata/pata_rb532_cf.c
30421+++ b/drivers/ata/pata_rb532_cf.c
30422@@ -68,7 +68,7 @@ static irqreturn_t rb532_pata_irq_handler(int irq, void *dev_instance)
30423 return IRQ_HANDLED;
30424 }
30425
30426-static struct ata_port_operations rb532_pata_port_ops = {
30427+static const struct ata_port_operations rb532_pata_port_ops = {
30428 .inherits = &ata_sff_port_ops,
30429 .sff_data_xfer = ata_sff_data_xfer32,
30430 };
30431diff --git a/drivers/ata/pata_rdc.c b/drivers/ata/pata_rdc.c
30432index c843a1e..b5853c3 100644
30433--- a/drivers/ata/pata_rdc.c
30434+++ b/drivers/ata/pata_rdc.c
30435@@ -272,7 +272,7 @@ static void rdc_set_dmamode(struct ata_port *ap, struct ata_device *adev)
30436 pci_write_config_byte(dev, 0x48, udma_enable);
30437 }
30438
30439-static struct ata_port_operations rdc_pata_ops = {
30440+static const struct ata_port_operations rdc_pata_ops = {
30441 .inherits = &ata_bmdma32_port_ops,
30442 .cable_detect = rdc_pata_cable_detect,
30443 .set_piomode = rdc_set_piomode,
30444diff --git a/drivers/ata/pata_rz1000.c b/drivers/ata/pata_rz1000.c
30445index a5e4dfe..080c8c9 100644
30446--- a/drivers/ata/pata_rz1000.c
30447+++ b/drivers/ata/pata_rz1000.c
30448@@ -54,7 +54,7 @@ static struct scsi_host_template rz1000_sht = {
30449 ATA_PIO_SHT(DRV_NAME),
30450 };
30451
30452-static struct ata_port_operations rz1000_port_ops = {
30453+static const struct ata_port_operations rz1000_port_ops = {
30454 .inherits = &ata_sff_port_ops,
30455 .cable_detect = ata_cable_40wire,
30456 .set_mode = rz1000_set_mode,
30457diff --git a/drivers/ata/pata_sc1200.c b/drivers/ata/pata_sc1200.c
30458index 3bbed83..e309daf 100644
30459--- a/drivers/ata/pata_sc1200.c
30460+++ b/drivers/ata/pata_sc1200.c
30461@@ -207,7 +207,7 @@ static struct scsi_host_template sc1200_sht = {
30462 .sg_tablesize = LIBATA_DUMB_MAX_PRD,
30463 };
30464
30465-static struct ata_port_operations sc1200_port_ops = {
30466+static const struct ata_port_operations sc1200_port_ops = {
30467 .inherits = &ata_bmdma_port_ops,
30468 .qc_prep = ata_sff_dumb_qc_prep,
30469 .qc_issue = sc1200_qc_issue,
30470diff --git a/drivers/ata/pata_scc.c b/drivers/ata/pata_scc.c
30471index 4257d6b..4c1d9d5 100644
30472--- a/drivers/ata/pata_scc.c
30473+++ b/drivers/ata/pata_scc.c
30474@@ -965,7 +965,7 @@ static struct scsi_host_template scc_sht = {
30475 ATA_BMDMA_SHT(DRV_NAME),
30476 };
30477
30478-static struct ata_port_operations scc_pata_ops = {
30479+static const struct ata_port_operations scc_pata_ops = {
30480 .inherits = &ata_bmdma_port_ops,
30481
30482 .set_piomode = scc_set_piomode,
30483diff --git a/drivers/ata/pata_sch.c b/drivers/ata/pata_sch.c
30484index 99cceb45..e2e0a87 100644
30485--- a/drivers/ata/pata_sch.c
30486+++ b/drivers/ata/pata_sch.c
30487@@ -75,7 +75,7 @@ static struct scsi_host_template sch_sht = {
30488 ATA_BMDMA_SHT(DRV_NAME),
30489 };
30490
30491-static struct ata_port_operations sch_pata_ops = {
30492+static const struct ata_port_operations sch_pata_ops = {
30493 .inherits = &ata_bmdma_port_ops,
30494 .cable_detect = ata_cable_unknown,
30495 .set_piomode = sch_set_piomode,
30496diff --git a/drivers/ata/pata_serverworks.c b/drivers/ata/pata_serverworks.c
30497index beaed12..39969f1 100644
30498--- a/drivers/ata/pata_serverworks.c
30499+++ b/drivers/ata/pata_serverworks.c
30500@@ -299,7 +299,7 @@ static struct scsi_host_template serverworks_sht = {
30501 ATA_BMDMA_SHT(DRV_NAME),
30502 };
30503
30504-static struct ata_port_operations serverworks_osb4_port_ops = {
30505+static const struct ata_port_operations serverworks_osb4_port_ops = {
30506 .inherits = &ata_bmdma_port_ops,
30507 .cable_detect = serverworks_cable_detect,
30508 .mode_filter = serverworks_osb4_filter,
30509@@ -307,7 +307,7 @@ static struct ata_port_operations serverworks_osb4_port_ops = {
30510 .set_dmamode = serverworks_set_dmamode,
30511 };
30512
30513-static struct ata_port_operations serverworks_csb_port_ops = {
30514+static const struct ata_port_operations serverworks_csb_port_ops = {
30515 .inherits = &serverworks_osb4_port_ops,
30516 .mode_filter = serverworks_csb_filter,
30517 };
30518diff --git a/drivers/ata/pata_sil680.c b/drivers/ata/pata_sil680.c
30519index a2ace48..0463b44 100644
30520--- a/drivers/ata/pata_sil680.c
30521+++ b/drivers/ata/pata_sil680.c
30522@@ -194,7 +194,7 @@ static struct scsi_host_template sil680_sht = {
30523 ATA_BMDMA_SHT(DRV_NAME),
30524 };
30525
30526-static struct ata_port_operations sil680_port_ops = {
30527+static const struct ata_port_operations sil680_port_ops = {
30528 .inherits = &ata_bmdma32_port_ops,
30529 .cable_detect = sil680_cable_detect,
30530 .set_piomode = sil680_set_piomode,
30531diff --git a/drivers/ata/pata_sis.c b/drivers/ata/pata_sis.c
30532index 488e77b..b3724d5 100644
30533--- a/drivers/ata/pata_sis.c
30534+++ b/drivers/ata/pata_sis.c
30535@@ -503,47 +503,47 @@ static struct scsi_host_template sis_sht = {
30536 ATA_BMDMA_SHT(DRV_NAME),
30537 };
30538
30539-static struct ata_port_operations sis_133_for_sata_ops = {
30540+static const struct ata_port_operations sis_133_for_sata_ops = {
30541 .inherits = &ata_bmdma_port_ops,
30542 .set_piomode = sis_133_set_piomode,
30543 .set_dmamode = sis_133_set_dmamode,
30544 .cable_detect = sis_133_cable_detect,
30545 };
30546
30547-static struct ata_port_operations sis_base_ops = {
30548+static const struct ata_port_operations sis_base_ops = {
30549 .inherits = &ata_bmdma_port_ops,
30550 .prereset = sis_pre_reset,
30551 };
30552
30553-static struct ata_port_operations sis_133_ops = {
30554+static const struct ata_port_operations sis_133_ops = {
30555 .inherits = &sis_base_ops,
30556 .set_piomode = sis_133_set_piomode,
30557 .set_dmamode = sis_133_set_dmamode,
30558 .cable_detect = sis_133_cable_detect,
30559 };
30560
30561-static struct ata_port_operations sis_133_early_ops = {
30562+static const struct ata_port_operations sis_133_early_ops = {
30563 .inherits = &sis_base_ops,
30564 .set_piomode = sis_100_set_piomode,
30565 .set_dmamode = sis_133_early_set_dmamode,
30566 .cable_detect = sis_66_cable_detect,
30567 };
30568
30569-static struct ata_port_operations sis_100_ops = {
30570+static const struct ata_port_operations sis_100_ops = {
30571 .inherits = &sis_base_ops,
30572 .set_piomode = sis_100_set_piomode,
30573 .set_dmamode = sis_100_set_dmamode,
30574 .cable_detect = sis_66_cable_detect,
30575 };
30576
30577-static struct ata_port_operations sis_66_ops = {
30578+static const struct ata_port_operations sis_66_ops = {
30579 .inherits = &sis_base_ops,
30580 .set_piomode = sis_old_set_piomode,
30581 .set_dmamode = sis_66_set_dmamode,
30582 .cable_detect = sis_66_cable_detect,
30583 };
30584
30585-static struct ata_port_operations sis_old_ops = {
30586+static const struct ata_port_operations sis_old_ops = {
30587 .inherits = &sis_base_ops,
30588 .set_piomode = sis_old_set_piomode,
30589 .set_dmamode = sis_old_set_dmamode,
30590diff --git a/drivers/ata/pata_sl82c105.c b/drivers/ata/pata_sl82c105.c
30591index 29f733c..43e9ca0 100644
30592--- a/drivers/ata/pata_sl82c105.c
30593+++ b/drivers/ata/pata_sl82c105.c
30594@@ -231,7 +231,7 @@ static struct scsi_host_template sl82c105_sht = {
30595 ATA_BMDMA_SHT(DRV_NAME),
30596 };
30597
30598-static struct ata_port_operations sl82c105_port_ops = {
30599+static const struct ata_port_operations sl82c105_port_ops = {
30600 .inherits = &ata_bmdma_port_ops,
30601 .qc_defer = sl82c105_qc_defer,
30602 .bmdma_start = sl82c105_bmdma_start,
30603diff --git a/drivers/ata/pata_triflex.c b/drivers/ata/pata_triflex.c
30604index f1f13ff..df39e99 100644
30605--- a/drivers/ata/pata_triflex.c
30606+++ b/drivers/ata/pata_triflex.c
30607@@ -178,7 +178,7 @@ static struct scsi_host_template triflex_sht = {
30608 ATA_BMDMA_SHT(DRV_NAME),
30609 };
30610
30611-static struct ata_port_operations triflex_port_ops = {
30612+static const struct ata_port_operations triflex_port_ops = {
30613 .inherits = &ata_bmdma_port_ops,
30614 .bmdma_start = triflex_bmdma_start,
30615 .bmdma_stop = triflex_bmdma_stop,
30616diff --git a/drivers/ata/pata_via.c b/drivers/ata/pata_via.c
30617index 1d73b8d..98a4b29 100644
30618--- a/drivers/ata/pata_via.c
30619+++ b/drivers/ata/pata_via.c
30620@@ -419,7 +419,7 @@ static struct scsi_host_template via_sht = {
30621 ATA_BMDMA_SHT(DRV_NAME),
30622 };
30623
30624-static struct ata_port_operations via_port_ops = {
30625+static const struct ata_port_operations via_port_ops = {
30626 .inherits = &ata_bmdma_port_ops,
30627 .cable_detect = via_cable_detect,
30628 .set_piomode = via_set_piomode,
30629@@ -429,7 +429,7 @@ static struct ata_port_operations via_port_ops = {
30630 .port_start = via_port_start,
30631 };
30632
30633-static struct ata_port_operations via_port_ops_noirq = {
30634+static const struct ata_port_operations via_port_ops_noirq = {
30635 .inherits = &via_port_ops,
30636 .sff_data_xfer = ata_sff_data_xfer_noirq,
30637 };
30638diff --git a/drivers/ata/pata_winbond.c b/drivers/ata/pata_winbond.c
30639index 6d8619b..ad511c4 100644
30640--- a/drivers/ata/pata_winbond.c
30641+++ b/drivers/ata/pata_winbond.c
30642@@ -125,7 +125,7 @@ static struct scsi_host_template winbond_sht = {
30643 ATA_PIO_SHT(DRV_NAME),
30644 };
30645
30646-static struct ata_port_operations winbond_port_ops = {
30647+static const struct ata_port_operations winbond_port_ops = {
30648 .inherits = &ata_sff_port_ops,
30649 .sff_data_xfer = winbond_data_xfer,
30650 .cable_detect = ata_cable_40wire,
30651diff --git a/drivers/ata/pdc_adma.c b/drivers/ata/pdc_adma.c
30652index 6c65b07..f996ec7 100644
30653--- a/drivers/ata/pdc_adma.c
30654+++ b/drivers/ata/pdc_adma.c
30655@@ -145,7 +145,7 @@ static struct scsi_host_template adma_ata_sht = {
30656 .dma_boundary = ADMA_DMA_BOUNDARY,
30657 };
30658
30659-static struct ata_port_operations adma_ata_ops = {
30660+static const struct ata_port_operations adma_ata_ops = {
30661 .inherits = &ata_sff_port_ops,
30662
30663 .lost_interrupt = ATA_OP_NULL,
30664diff --git a/drivers/ata/sata_fsl.c b/drivers/ata/sata_fsl.c
30665index 172b57e..c49bc1e 100644
30666--- a/drivers/ata/sata_fsl.c
30667+++ b/drivers/ata/sata_fsl.c
30668@@ -1258,7 +1258,7 @@ static struct scsi_host_template sata_fsl_sht = {
30669 .dma_boundary = ATA_DMA_BOUNDARY,
30670 };
30671
30672-static struct ata_port_operations sata_fsl_ops = {
30673+static const struct ata_port_operations sata_fsl_ops = {
30674 .inherits = &sata_pmp_port_ops,
30675
30676 .qc_defer = ata_std_qc_defer,
30677diff --git a/drivers/ata/sata_inic162x.c b/drivers/ata/sata_inic162x.c
30678index 4406902..60603ef 100644
30679--- a/drivers/ata/sata_inic162x.c
30680+++ b/drivers/ata/sata_inic162x.c
30681@@ -721,7 +721,7 @@ static int inic_port_start(struct ata_port *ap)
30682 return 0;
30683 }
30684
30685-static struct ata_port_operations inic_port_ops = {
30686+static const struct ata_port_operations inic_port_ops = {
30687 .inherits = &sata_port_ops,
30688
30689 .check_atapi_dma = inic_check_atapi_dma,
30690diff --git a/drivers/ata/sata_mv.c b/drivers/ata/sata_mv.c
30691index cf41126..8107be6 100644
30692--- a/drivers/ata/sata_mv.c
30693+++ b/drivers/ata/sata_mv.c
30694@@ -656,7 +656,7 @@ static struct scsi_host_template mv6_sht = {
30695 .dma_boundary = MV_DMA_BOUNDARY,
30696 };
30697
30698-static struct ata_port_operations mv5_ops = {
30699+static const struct ata_port_operations mv5_ops = {
30700 .inherits = &ata_sff_port_ops,
30701
30702 .lost_interrupt = ATA_OP_NULL,
30703@@ -678,7 +678,7 @@ static struct ata_port_operations mv5_ops = {
30704 .port_stop = mv_port_stop,
30705 };
30706
30707-static struct ata_port_operations mv6_ops = {
30708+static const struct ata_port_operations mv6_ops = {
30709 .inherits = &mv5_ops,
30710 .dev_config = mv6_dev_config,
30711 .scr_read = mv_scr_read,
30712@@ -698,7 +698,7 @@ static struct ata_port_operations mv6_ops = {
30713 .bmdma_status = mv_bmdma_status,
30714 };
30715
30716-static struct ata_port_operations mv_iie_ops = {
30717+static const struct ata_port_operations mv_iie_ops = {
30718 .inherits = &mv6_ops,
30719 .dev_config = ATA_OP_NULL,
30720 .qc_prep = mv_qc_prep_iie,
30721diff --git a/drivers/ata/sata_nv.c b/drivers/ata/sata_nv.c
30722index ae2297c..d5c9c33 100644
30723--- a/drivers/ata/sata_nv.c
30724+++ b/drivers/ata/sata_nv.c
30725@@ -464,7 +464,7 @@ static struct scsi_host_template nv_swncq_sht = {
30726 * cases. Define nv_hardreset() which only kicks in for post-boot
30727 * probing and use it for all variants.
30728 */
30729-static struct ata_port_operations nv_generic_ops = {
30730+static const struct ata_port_operations nv_generic_ops = {
30731 .inherits = &ata_bmdma_port_ops,
30732 .lost_interrupt = ATA_OP_NULL,
30733 .scr_read = nv_scr_read,
30734@@ -472,20 +472,20 @@ static struct ata_port_operations nv_generic_ops = {
30735 .hardreset = nv_hardreset,
30736 };
30737
30738-static struct ata_port_operations nv_nf2_ops = {
30739+static const struct ata_port_operations nv_nf2_ops = {
30740 .inherits = &nv_generic_ops,
30741 .freeze = nv_nf2_freeze,
30742 .thaw = nv_nf2_thaw,
30743 };
30744
30745-static struct ata_port_operations nv_ck804_ops = {
30746+static const struct ata_port_operations nv_ck804_ops = {
30747 .inherits = &nv_generic_ops,
30748 .freeze = nv_ck804_freeze,
30749 .thaw = nv_ck804_thaw,
30750 .host_stop = nv_ck804_host_stop,
30751 };
30752
30753-static struct ata_port_operations nv_adma_ops = {
30754+static const struct ata_port_operations nv_adma_ops = {
30755 .inherits = &nv_ck804_ops,
30756
30757 .check_atapi_dma = nv_adma_check_atapi_dma,
30758@@ -509,7 +509,7 @@ static struct ata_port_operations nv_adma_ops = {
30759 .host_stop = nv_adma_host_stop,
30760 };
30761
30762-static struct ata_port_operations nv_swncq_ops = {
30763+static const struct ata_port_operations nv_swncq_ops = {
30764 .inherits = &nv_generic_ops,
30765
30766 .qc_defer = ata_std_qc_defer,
30767diff --git a/drivers/ata/sata_promise.c b/drivers/ata/sata_promise.c
30768index 07d8d00..6cc70bb 100644
30769--- a/drivers/ata/sata_promise.c
30770+++ b/drivers/ata/sata_promise.c
30771@@ -195,7 +195,7 @@ static const struct ata_port_operations pdc_common_ops = {
30772 .error_handler = pdc_error_handler,
30773 };
30774
30775-static struct ata_port_operations pdc_sata_ops = {
30776+static const struct ata_port_operations pdc_sata_ops = {
30777 .inherits = &pdc_common_ops,
30778 .cable_detect = pdc_sata_cable_detect,
30779 .freeze = pdc_sata_freeze,
30780@@ -208,14 +208,14 @@ static struct ata_port_operations pdc_sata_ops = {
30781
30782 /* First-generation chips need a more restrictive ->check_atapi_dma op,
30783 and ->freeze/thaw that ignore the hotplug controls. */
30784-static struct ata_port_operations pdc_old_sata_ops = {
30785+static const struct ata_port_operations pdc_old_sata_ops = {
30786 .inherits = &pdc_sata_ops,
30787 .freeze = pdc_freeze,
30788 .thaw = pdc_thaw,
30789 .check_atapi_dma = pdc_old_sata_check_atapi_dma,
30790 };
30791
30792-static struct ata_port_operations pdc_pata_ops = {
30793+static const struct ata_port_operations pdc_pata_ops = {
30794 .inherits = &pdc_common_ops,
30795 .cable_detect = pdc_pata_cable_detect,
30796 .freeze = pdc_freeze,
30797diff --git a/drivers/ata/sata_qstor.c b/drivers/ata/sata_qstor.c
30798index 326c0cf..36ecebe 100644
30799--- a/drivers/ata/sata_qstor.c
30800+++ b/drivers/ata/sata_qstor.c
30801@@ -132,7 +132,7 @@ static struct scsi_host_template qs_ata_sht = {
30802 .dma_boundary = QS_DMA_BOUNDARY,
30803 };
30804
30805-static struct ata_port_operations qs_ata_ops = {
30806+static const struct ata_port_operations qs_ata_ops = {
30807 .inherits = &ata_sff_port_ops,
30808
30809 .check_atapi_dma = qs_check_atapi_dma,
30810diff --git a/drivers/ata/sata_sil.c b/drivers/ata/sata_sil.c
30811index 3cb69d5..0871d3c 100644
30812--- a/drivers/ata/sata_sil.c
30813+++ b/drivers/ata/sata_sil.c
30814@@ -182,7 +182,7 @@ static struct scsi_host_template sil_sht = {
30815 .sg_tablesize = ATA_MAX_PRD
30816 };
30817
30818-static struct ata_port_operations sil_ops = {
30819+static const struct ata_port_operations sil_ops = {
30820 .inherits = &ata_bmdma32_port_ops,
30821 .dev_config = sil_dev_config,
30822 .set_mode = sil_set_mode,
30823diff --git a/drivers/ata/sata_sil24.c b/drivers/ata/sata_sil24.c
30824index e6946fc..eddb794 100644
30825--- a/drivers/ata/sata_sil24.c
30826+++ b/drivers/ata/sata_sil24.c
30827@@ -388,7 +388,7 @@ static struct scsi_host_template sil24_sht = {
30828 .dma_boundary = ATA_DMA_BOUNDARY,
30829 };
30830
30831-static struct ata_port_operations sil24_ops = {
30832+static const struct ata_port_operations sil24_ops = {
30833 .inherits = &sata_pmp_port_ops,
30834
30835 .qc_defer = sil24_qc_defer,
30836diff --git a/drivers/ata/sata_sis.c b/drivers/ata/sata_sis.c
30837index f8a91bf..9cb06b6 100644
30838--- a/drivers/ata/sata_sis.c
30839+++ b/drivers/ata/sata_sis.c
30840@@ -89,7 +89,7 @@ static struct scsi_host_template sis_sht = {
30841 ATA_BMDMA_SHT(DRV_NAME),
30842 };
30843
30844-static struct ata_port_operations sis_ops = {
30845+static const struct ata_port_operations sis_ops = {
30846 .inherits = &ata_bmdma_port_ops,
30847 .scr_read = sis_scr_read,
30848 .scr_write = sis_scr_write,
30849diff --git a/drivers/ata/sata_svw.c b/drivers/ata/sata_svw.c
30850index 7257f2d..d04c6f5 100644
30851--- a/drivers/ata/sata_svw.c
30852+++ b/drivers/ata/sata_svw.c
30853@@ -344,7 +344,7 @@ static struct scsi_host_template k2_sata_sht = {
30854 };
30855
30856
30857-static struct ata_port_operations k2_sata_ops = {
30858+static const struct ata_port_operations k2_sata_ops = {
30859 .inherits = &ata_bmdma_port_ops,
30860 .sff_tf_load = k2_sata_tf_load,
30861 .sff_tf_read = k2_sata_tf_read,
30862diff --git a/drivers/ata/sata_sx4.c b/drivers/ata/sata_sx4.c
30863index bbcf970..cd0df0d 100644
30864--- a/drivers/ata/sata_sx4.c
30865+++ b/drivers/ata/sata_sx4.c
30866@@ -248,7 +248,7 @@ static struct scsi_host_template pdc_sata_sht = {
30867 };
30868
30869 /* TODO: inherit from base port_ops after converting to new EH */
30870-static struct ata_port_operations pdc_20621_ops = {
30871+static const struct ata_port_operations pdc_20621_ops = {
30872 .inherits = &ata_sff_port_ops,
30873
30874 .check_atapi_dma = pdc_check_atapi_dma,
30875diff --git a/drivers/ata/sata_uli.c b/drivers/ata/sata_uli.c
30876index e5bff47..089d859 100644
30877--- a/drivers/ata/sata_uli.c
30878+++ b/drivers/ata/sata_uli.c
30879@@ -79,7 +79,7 @@ static struct scsi_host_template uli_sht = {
30880 ATA_BMDMA_SHT(DRV_NAME),
30881 };
30882
30883-static struct ata_port_operations uli_ops = {
30884+static const struct ata_port_operations uli_ops = {
30885 .inherits = &ata_bmdma_port_ops,
30886 .scr_read = uli_scr_read,
30887 .scr_write = uli_scr_write,
30888diff --git a/drivers/ata/sata_via.c b/drivers/ata/sata_via.c
30889index f5dcca7..77b94eb 100644
30890--- a/drivers/ata/sata_via.c
30891+++ b/drivers/ata/sata_via.c
30892@@ -115,32 +115,32 @@ static struct scsi_host_template svia_sht = {
30893 ATA_BMDMA_SHT(DRV_NAME),
30894 };
30895
30896-static struct ata_port_operations svia_base_ops = {
30897+static const struct ata_port_operations svia_base_ops = {
30898 .inherits = &ata_bmdma_port_ops,
30899 .sff_tf_load = svia_tf_load,
30900 };
30901
30902-static struct ata_port_operations vt6420_sata_ops = {
30903+static const struct ata_port_operations vt6420_sata_ops = {
30904 .inherits = &svia_base_ops,
30905 .freeze = svia_noop_freeze,
30906 .prereset = vt6420_prereset,
30907 .bmdma_start = vt6420_bmdma_start,
30908 };
30909
30910-static struct ata_port_operations vt6421_pata_ops = {
30911+static const struct ata_port_operations vt6421_pata_ops = {
30912 .inherits = &svia_base_ops,
30913 .cable_detect = vt6421_pata_cable_detect,
30914 .set_piomode = vt6421_set_pio_mode,
30915 .set_dmamode = vt6421_set_dma_mode,
30916 };
30917
30918-static struct ata_port_operations vt6421_sata_ops = {
30919+static const struct ata_port_operations vt6421_sata_ops = {
30920 .inherits = &svia_base_ops,
30921 .scr_read = svia_scr_read,
30922 .scr_write = svia_scr_write,
30923 };
30924
30925-static struct ata_port_operations vt8251_ops = {
30926+static const struct ata_port_operations vt8251_ops = {
30927 .inherits = &svia_base_ops,
30928 .hardreset = sata_std_hardreset,
30929 .scr_read = vt8251_scr_read,
30930diff --git a/drivers/ata/sata_vsc.c b/drivers/ata/sata_vsc.c
30931index 8b2a278..51e65d3 100644
30932--- a/drivers/ata/sata_vsc.c
30933+++ b/drivers/ata/sata_vsc.c
30934@@ -306,7 +306,7 @@ static struct scsi_host_template vsc_sata_sht = {
30935 };
30936
30937
30938-static struct ata_port_operations vsc_sata_ops = {
30939+static const struct ata_port_operations vsc_sata_ops = {
30940 .inherits = &ata_bmdma_port_ops,
30941 /* The IRQ handling is not quite standard SFF behaviour so we
30942 cannot use the default lost interrupt handler */
30943diff --git a/drivers/atm/adummy.c b/drivers/atm/adummy.c
30944index 5effec6..7e4019a 100644
30945--- a/drivers/atm/adummy.c
30946+++ b/drivers/atm/adummy.c
30947@@ -77,7 +77,7 @@ adummy_send(struct atm_vcc *vcc, struct sk_buff *skb)
30948 vcc->pop(vcc, skb);
30949 else
30950 dev_kfree_skb_any(skb);
30951- atomic_inc(&vcc->stats->tx);
30952+ atomic_inc_unchecked(&vcc->stats->tx);
30953
30954 return 0;
30955 }
30956diff --git a/drivers/atm/ambassador.c b/drivers/atm/ambassador.c
30957index 66e1813..26a27c6 100644
30958--- a/drivers/atm/ambassador.c
30959+++ b/drivers/atm/ambassador.c
30960@@ -453,7 +453,7 @@ static void tx_complete (amb_dev * dev, tx_out * tx) {
30961 PRINTD (DBG_FLOW|DBG_TX, "tx_complete %p %p", dev, tx);
30962
30963 // VC layer stats
30964- atomic_inc(&ATM_SKB(skb)->vcc->stats->tx);
30965+ atomic_inc_unchecked(&ATM_SKB(skb)->vcc->stats->tx);
30966
30967 // free the descriptor
30968 kfree (tx_descr);
30969@@ -494,7 +494,7 @@ static void rx_complete (amb_dev * dev, rx_out * rx) {
30970 dump_skb ("<<<", vc, skb);
30971
30972 // VC layer stats
30973- atomic_inc(&atm_vcc->stats->rx);
30974+ atomic_inc_unchecked(&atm_vcc->stats->rx);
30975 __net_timestamp(skb);
30976 // end of our responsability
30977 atm_vcc->push (atm_vcc, skb);
30978@@ -509,7 +509,7 @@ static void rx_complete (amb_dev * dev, rx_out * rx) {
30979 } else {
30980 PRINTK (KERN_INFO, "dropped over-size frame");
30981 // should we count this?
30982- atomic_inc(&atm_vcc->stats->rx_drop);
30983+ atomic_inc_unchecked(&atm_vcc->stats->rx_drop);
30984 }
30985
30986 } else {
30987@@ -1341,7 +1341,7 @@ static int amb_send (struct atm_vcc * atm_vcc, struct sk_buff * skb) {
30988 }
30989
30990 if (check_area (skb->data, skb->len)) {
30991- atomic_inc(&atm_vcc->stats->tx_err);
30992+ atomic_inc_unchecked(&atm_vcc->stats->tx_err);
30993 return -ENOMEM; // ?
30994 }
30995
30996diff --git a/drivers/atm/atmtcp.c b/drivers/atm/atmtcp.c
30997index 02ad83d..6daffeb 100644
30998--- a/drivers/atm/atmtcp.c
30999+++ b/drivers/atm/atmtcp.c
31000@@ -206,7 +206,7 @@ static int atmtcp_v_send(struct atm_vcc *vcc,struct sk_buff *skb)
31001 if (vcc->pop) vcc->pop(vcc,skb);
31002 else dev_kfree_skb(skb);
31003 if (dev_data) return 0;
31004- atomic_inc(&vcc->stats->tx_err);
31005+ atomic_inc_unchecked(&vcc->stats->tx_err);
31006 return -ENOLINK;
31007 }
31008 size = skb->len+sizeof(struct atmtcp_hdr);
31009@@ -214,7 +214,7 @@ static int atmtcp_v_send(struct atm_vcc *vcc,struct sk_buff *skb)
31010 if (!new_skb) {
31011 if (vcc->pop) vcc->pop(vcc,skb);
31012 else dev_kfree_skb(skb);
31013- atomic_inc(&vcc->stats->tx_err);
31014+ atomic_inc_unchecked(&vcc->stats->tx_err);
31015 return -ENOBUFS;
31016 }
31017 hdr = (void *) skb_put(new_skb,sizeof(struct atmtcp_hdr));
31018@@ -225,8 +225,8 @@ static int atmtcp_v_send(struct atm_vcc *vcc,struct sk_buff *skb)
31019 if (vcc->pop) vcc->pop(vcc,skb);
31020 else dev_kfree_skb(skb);
31021 out_vcc->push(out_vcc,new_skb);
31022- atomic_inc(&vcc->stats->tx);
31023- atomic_inc(&out_vcc->stats->rx);
31024+ atomic_inc_unchecked(&vcc->stats->tx);
31025+ atomic_inc_unchecked(&out_vcc->stats->rx);
31026 return 0;
31027 }
31028
31029@@ -300,7 +300,7 @@ static int atmtcp_c_send(struct atm_vcc *vcc,struct sk_buff *skb)
31030 out_vcc = find_vcc(dev, ntohs(hdr->vpi), ntohs(hdr->vci));
31031 read_unlock(&vcc_sklist_lock);
31032 if (!out_vcc) {
31033- atomic_inc(&vcc->stats->tx_err);
31034+ atomic_inc_unchecked(&vcc->stats->tx_err);
31035 goto done;
31036 }
31037 skb_pull(skb,sizeof(struct atmtcp_hdr));
31038@@ -312,8 +312,8 @@ static int atmtcp_c_send(struct atm_vcc *vcc,struct sk_buff *skb)
31039 __net_timestamp(new_skb);
31040 skb_copy_from_linear_data(skb, skb_put(new_skb, skb->len), skb->len);
31041 out_vcc->push(out_vcc,new_skb);
31042- atomic_inc(&vcc->stats->tx);
31043- atomic_inc(&out_vcc->stats->rx);
31044+ atomic_inc_unchecked(&vcc->stats->tx);
31045+ atomic_inc_unchecked(&out_vcc->stats->rx);
31046 done:
31047 if (vcc->pop) vcc->pop(vcc,skb);
31048 else dev_kfree_skb(skb);
31049diff --git a/drivers/atm/eni.c b/drivers/atm/eni.c
31050index 0c30261..3da356e 100644
31051--- a/drivers/atm/eni.c
31052+++ b/drivers/atm/eni.c
31053@@ -525,7 +525,7 @@ static int rx_aal0(struct atm_vcc *vcc)
31054 DPRINTK(DEV_LABEL "(itf %d): trashing empty cell\n",
31055 vcc->dev->number);
31056 length = 0;
31057- atomic_inc(&vcc->stats->rx_err);
31058+ atomic_inc_unchecked(&vcc->stats->rx_err);
31059 }
31060 else {
31061 length = ATM_CELL_SIZE-1; /* no HEC */
31062@@ -580,7 +580,7 @@ static int rx_aal5(struct atm_vcc *vcc)
31063 size);
31064 }
31065 eff = length = 0;
31066- atomic_inc(&vcc->stats->rx_err);
31067+ atomic_inc_unchecked(&vcc->stats->rx_err);
31068 }
31069 else {
31070 size = (descr & MID_RED_COUNT)*(ATM_CELL_PAYLOAD >> 2);
31071@@ -597,7 +597,7 @@ static int rx_aal5(struct atm_vcc *vcc)
31072 "(VCI=%d,length=%ld,size=%ld (descr 0x%lx))\n",
31073 vcc->dev->number,vcc->vci,length,size << 2,descr);
31074 length = eff = 0;
31075- atomic_inc(&vcc->stats->rx_err);
31076+ atomic_inc_unchecked(&vcc->stats->rx_err);
31077 }
31078 }
31079 skb = eff ? atm_alloc_charge(vcc,eff << 2,GFP_ATOMIC) : NULL;
31080@@ -770,7 +770,7 @@ rx_dequeued++;
31081 vcc->push(vcc,skb);
31082 pushed++;
31083 }
31084- atomic_inc(&vcc->stats->rx);
31085+ atomic_inc_unchecked(&vcc->stats->rx);
31086 }
31087 wake_up(&eni_dev->rx_wait);
31088 }
31089@@ -1227,7 +1227,7 @@ static void dequeue_tx(struct atm_dev *dev)
31090 PCI_DMA_TODEVICE);
31091 if (vcc->pop) vcc->pop(vcc,skb);
31092 else dev_kfree_skb_irq(skb);
31093- atomic_inc(&vcc->stats->tx);
31094+ atomic_inc_unchecked(&vcc->stats->tx);
31095 wake_up(&eni_dev->tx_wait);
31096 dma_complete++;
31097 }
31098@@ -1570,7 +1570,7 @@ tx_complete++;
31099 /*--------------------------------- entries ---------------------------------*/
31100
31101
31102-static const char *media_name[] __devinitdata = {
31103+static const char *media_name[] __devinitconst = {
31104 "MMF", "SMF", "MMF", "03?", /* 0- 3 */
31105 "UTP", "05?", "06?", "07?", /* 4- 7 */
31106 "TAXI","09?", "10?", "11?", /* 8-11 */
31107diff --git a/drivers/atm/firestream.c b/drivers/atm/firestream.c
31108index cd5049a..a51209f 100644
31109--- a/drivers/atm/firestream.c
31110+++ b/drivers/atm/firestream.c
31111@@ -748,7 +748,7 @@ static void process_txdone_queue (struct fs_dev *dev, struct queue *q)
31112 }
31113 }
31114
31115- atomic_inc(&ATM_SKB(skb)->vcc->stats->tx);
31116+ atomic_inc_unchecked(&ATM_SKB(skb)->vcc->stats->tx);
31117
31118 fs_dprintk (FS_DEBUG_TXMEM, "i");
31119 fs_dprintk (FS_DEBUG_ALLOC, "Free t-skb: %p\n", skb);
31120@@ -815,7 +815,7 @@ static void process_incoming (struct fs_dev *dev, struct queue *q)
31121 #endif
31122 skb_put (skb, qe->p1 & 0xffff);
31123 ATM_SKB(skb)->vcc = atm_vcc;
31124- atomic_inc(&atm_vcc->stats->rx);
31125+ atomic_inc_unchecked(&atm_vcc->stats->rx);
31126 __net_timestamp(skb);
31127 fs_dprintk (FS_DEBUG_ALLOC, "Free rec-skb: %p (pushed)\n", skb);
31128 atm_vcc->push (atm_vcc, skb);
31129@@ -836,12 +836,12 @@ static void process_incoming (struct fs_dev *dev, struct queue *q)
31130 kfree (pe);
31131 }
31132 if (atm_vcc)
31133- atomic_inc(&atm_vcc->stats->rx_drop);
31134+ atomic_inc_unchecked(&atm_vcc->stats->rx_drop);
31135 break;
31136 case 0x1f: /* Reassembly abort: no buffers. */
31137 /* Silently increment error counter. */
31138 if (atm_vcc)
31139- atomic_inc(&atm_vcc->stats->rx_drop);
31140+ atomic_inc_unchecked(&atm_vcc->stats->rx_drop);
31141 break;
31142 default: /* Hmm. Haven't written the code to handle the others yet... -- REW */
31143 printk (KERN_WARNING "Don't know what to do with RX status %x: %s.\n",
31144diff --git a/drivers/atm/fore200e.c b/drivers/atm/fore200e.c
31145index f766cc4..a34002e 100644
31146--- a/drivers/atm/fore200e.c
31147+++ b/drivers/atm/fore200e.c
31148@@ -931,9 +931,9 @@ fore200e_tx_irq(struct fore200e* fore200e)
31149 #endif
31150 /* check error condition */
31151 if (*entry->status & STATUS_ERROR)
31152- atomic_inc(&vcc->stats->tx_err);
31153+ atomic_inc_unchecked(&vcc->stats->tx_err);
31154 else
31155- atomic_inc(&vcc->stats->tx);
31156+ atomic_inc_unchecked(&vcc->stats->tx);
31157 }
31158 }
31159
31160@@ -1082,7 +1082,7 @@ fore200e_push_rpd(struct fore200e* fore200e, struct atm_vcc* vcc, struct rpd* rp
31161 if (skb == NULL) {
31162 DPRINTK(2, "unable to alloc new skb, rx PDU length = %d\n", pdu_len);
31163
31164- atomic_inc(&vcc->stats->rx_drop);
31165+ atomic_inc_unchecked(&vcc->stats->rx_drop);
31166 return -ENOMEM;
31167 }
31168
31169@@ -1125,14 +1125,14 @@ fore200e_push_rpd(struct fore200e* fore200e, struct atm_vcc* vcc, struct rpd* rp
31170
31171 dev_kfree_skb_any(skb);
31172
31173- atomic_inc(&vcc->stats->rx_drop);
31174+ atomic_inc_unchecked(&vcc->stats->rx_drop);
31175 return -ENOMEM;
31176 }
31177
31178 ASSERT(atomic_read(&sk_atm(vcc)->sk_wmem_alloc) >= 0);
31179
31180 vcc->push(vcc, skb);
31181- atomic_inc(&vcc->stats->rx);
31182+ atomic_inc_unchecked(&vcc->stats->rx);
31183
31184 ASSERT(atomic_read(&sk_atm(vcc)->sk_wmem_alloc) >= 0);
31185
31186@@ -1210,7 +1210,7 @@ fore200e_rx_irq(struct fore200e* fore200e)
31187 DPRINTK(2, "damaged PDU on %d.%d.%d\n",
31188 fore200e->atm_dev->number,
31189 entry->rpd->atm_header.vpi, entry->rpd->atm_header.vci);
31190- atomic_inc(&vcc->stats->rx_err);
31191+ atomic_inc_unchecked(&vcc->stats->rx_err);
31192 }
31193 }
31194
31195@@ -1655,7 +1655,7 @@ fore200e_send(struct atm_vcc *vcc, struct sk_buff *skb)
31196 goto retry_here;
31197 }
31198
31199- atomic_inc(&vcc->stats->tx_err);
31200+ atomic_inc_unchecked(&vcc->stats->tx_err);
31201
31202 fore200e->tx_sat++;
31203 DPRINTK(2, "tx queue of device %s is saturated, PDU dropped - heartbeat is %08x\n",
31204diff --git a/drivers/atm/he.c b/drivers/atm/he.c
31205index 7066703..2b130de 100644
31206--- a/drivers/atm/he.c
31207+++ b/drivers/atm/he.c
31208@@ -1769,7 +1769,7 @@ he_service_rbrq(struct he_dev *he_dev, int group)
31209
31210 if (RBRQ_HBUF_ERR(he_dev->rbrq_head)) {
31211 hprintk("HBUF_ERR! (cid 0x%x)\n", cid);
31212- atomic_inc(&vcc->stats->rx_drop);
31213+ atomic_inc_unchecked(&vcc->stats->rx_drop);
31214 goto return_host_buffers;
31215 }
31216
31217@@ -1802,7 +1802,7 @@ he_service_rbrq(struct he_dev *he_dev, int group)
31218 RBRQ_LEN_ERR(he_dev->rbrq_head)
31219 ? "LEN_ERR" : "",
31220 vcc->vpi, vcc->vci);
31221- atomic_inc(&vcc->stats->rx_err);
31222+ atomic_inc_unchecked(&vcc->stats->rx_err);
31223 goto return_host_buffers;
31224 }
31225
31226@@ -1861,7 +1861,7 @@ he_service_rbrq(struct he_dev *he_dev, int group)
31227 vcc->push(vcc, skb);
31228 spin_lock(&he_dev->global_lock);
31229
31230- atomic_inc(&vcc->stats->rx);
31231+ atomic_inc_unchecked(&vcc->stats->rx);
31232
31233 return_host_buffers:
31234 ++pdus_assembled;
31235@@ -2206,7 +2206,7 @@ __enqueue_tpd(struct he_dev *he_dev, struct he_tpd *tpd, unsigned cid)
31236 tpd->vcc->pop(tpd->vcc, tpd->skb);
31237 else
31238 dev_kfree_skb_any(tpd->skb);
31239- atomic_inc(&tpd->vcc->stats->tx_err);
31240+ atomic_inc_unchecked(&tpd->vcc->stats->tx_err);
31241 }
31242 pci_pool_free(he_dev->tpd_pool, tpd, TPD_ADDR(tpd->status));
31243 return;
31244@@ -2618,7 +2618,7 @@ he_send(struct atm_vcc *vcc, struct sk_buff *skb)
31245 vcc->pop(vcc, skb);
31246 else
31247 dev_kfree_skb_any(skb);
31248- atomic_inc(&vcc->stats->tx_err);
31249+ atomic_inc_unchecked(&vcc->stats->tx_err);
31250 return -EINVAL;
31251 }
31252
31253@@ -2629,7 +2629,7 @@ he_send(struct atm_vcc *vcc, struct sk_buff *skb)
31254 vcc->pop(vcc, skb);
31255 else
31256 dev_kfree_skb_any(skb);
31257- atomic_inc(&vcc->stats->tx_err);
31258+ atomic_inc_unchecked(&vcc->stats->tx_err);
31259 return -EINVAL;
31260 }
31261 #endif
31262@@ -2641,7 +2641,7 @@ he_send(struct atm_vcc *vcc, struct sk_buff *skb)
31263 vcc->pop(vcc, skb);
31264 else
31265 dev_kfree_skb_any(skb);
31266- atomic_inc(&vcc->stats->tx_err);
31267+ atomic_inc_unchecked(&vcc->stats->tx_err);
31268 spin_unlock_irqrestore(&he_dev->global_lock, flags);
31269 return -ENOMEM;
31270 }
31271@@ -2683,7 +2683,7 @@ he_send(struct atm_vcc *vcc, struct sk_buff *skb)
31272 vcc->pop(vcc, skb);
31273 else
31274 dev_kfree_skb_any(skb);
31275- atomic_inc(&vcc->stats->tx_err);
31276+ atomic_inc_unchecked(&vcc->stats->tx_err);
31277 spin_unlock_irqrestore(&he_dev->global_lock, flags);
31278 return -ENOMEM;
31279 }
31280@@ -2714,7 +2714,7 @@ he_send(struct atm_vcc *vcc, struct sk_buff *skb)
31281 __enqueue_tpd(he_dev, tpd, cid);
31282 spin_unlock_irqrestore(&he_dev->global_lock, flags);
31283
31284- atomic_inc(&vcc->stats->tx);
31285+ atomic_inc_unchecked(&vcc->stats->tx);
31286
31287 return 0;
31288 }
31289diff --git a/drivers/atm/horizon.c b/drivers/atm/horizon.c
31290index 4e49021..01b1512 100644
31291--- a/drivers/atm/horizon.c
31292+++ b/drivers/atm/horizon.c
31293@@ -1033,7 +1033,7 @@ static void rx_schedule (hrz_dev * dev, int irq) {
31294 {
31295 struct atm_vcc * vcc = ATM_SKB(skb)->vcc;
31296 // VC layer stats
31297- atomic_inc(&vcc->stats->rx);
31298+ atomic_inc_unchecked(&vcc->stats->rx);
31299 __net_timestamp(skb);
31300 // end of our responsability
31301 vcc->push (vcc, skb);
31302@@ -1185,7 +1185,7 @@ static void tx_schedule (hrz_dev * const dev, int irq) {
31303 dev->tx_iovec = NULL;
31304
31305 // VC layer stats
31306- atomic_inc(&ATM_SKB(skb)->vcc->stats->tx);
31307+ atomic_inc_unchecked(&ATM_SKB(skb)->vcc->stats->tx);
31308
31309 // free the skb
31310 hrz_kfree_skb (skb);
31311diff --git a/drivers/atm/idt77252.c b/drivers/atm/idt77252.c
31312index e33ae00..9deb4ab 100644
31313--- a/drivers/atm/idt77252.c
31314+++ b/drivers/atm/idt77252.c
31315@@ -810,7 +810,7 @@ drain_scq(struct idt77252_dev *card, struct vc_map *vc)
31316 else
31317 dev_kfree_skb(skb);
31318
31319- atomic_inc(&vcc->stats->tx);
31320+ atomic_inc_unchecked(&vcc->stats->tx);
31321 }
31322
31323 atomic_dec(&scq->used);
31324@@ -1073,13 +1073,13 @@ dequeue_rx(struct idt77252_dev *card, struct rsq_entry *rsqe)
31325 if ((sb = dev_alloc_skb(64)) == NULL) {
31326 printk("%s: Can't allocate buffers for aal0.\n",
31327 card->name);
31328- atomic_add(i, &vcc->stats->rx_drop);
31329+ atomic_add_unchecked(i, &vcc->stats->rx_drop);
31330 break;
31331 }
31332 if (!atm_charge(vcc, sb->truesize)) {
31333 RXPRINTK("%s: atm_charge() dropped aal0 packets.\n",
31334 card->name);
31335- atomic_add(i - 1, &vcc->stats->rx_drop);
31336+ atomic_add_unchecked(i - 1, &vcc->stats->rx_drop);
31337 dev_kfree_skb(sb);
31338 break;
31339 }
31340@@ -1096,7 +1096,7 @@ dequeue_rx(struct idt77252_dev *card, struct rsq_entry *rsqe)
31341 ATM_SKB(sb)->vcc = vcc;
31342 __net_timestamp(sb);
31343 vcc->push(vcc, sb);
31344- atomic_inc(&vcc->stats->rx);
31345+ atomic_inc_unchecked(&vcc->stats->rx);
31346
31347 cell += ATM_CELL_PAYLOAD;
31348 }
31349@@ -1133,13 +1133,13 @@ dequeue_rx(struct idt77252_dev *card, struct rsq_entry *rsqe)
31350 "(CDC: %08x)\n",
31351 card->name, len, rpp->len, readl(SAR_REG_CDC));
31352 recycle_rx_pool_skb(card, rpp);
31353- atomic_inc(&vcc->stats->rx_err);
31354+ atomic_inc_unchecked(&vcc->stats->rx_err);
31355 return;
31356 }
31357 if (stat & SAR_RSQE_CRC) {
31358 RXPRINTK("%s: AAL5 CRC error.\n", card->name);
31359 recycle_rx_pool_skb(card, rpp);
31360- atomic_inc(&vcc->stats->rx_err);
31361+ atomic_inc_unchecked(&vcc->stats->rx_err);
31362 return;
31363 }
31364 if (skb_queue_len(&rpp->queue) > 1) {
31365@@ -1150,7 +1150,7 @@ dequeue_rx(struct idt77252_dev *card, struct rsq_entry *rsqe)
31366 RXPRINTK("%s: Can't alloc RX skb.\n",
31367 card->name);
31368 recycle_rx_pool_skb(card, rpp);
31369- atomic_inc(&vcc->stats->rx_err);
31370+ atomic_inc_unchecked(&vcc->stats->rx_err);
31371 return;
31372 }
31373 if (!atm_charge(vcc, skb->truesize)) {
31374@@ -1169,7 +1169,7 @@ dequeue_rx(struct idt77252_dev *card, struct rsq_entry *rsqe)
31375 __net_timestamp(skb);
31376
31377 vcc->push(vcc, skb);
31378- atomic_inc(&vcc->stats->rx);
31379+ atomic_inc_unchecked(&vcc->stats->rx);
31380
31381 return;
31382 }
31383@@ -1191,7 +1191,7 @@ dequeue_rx(struct idt77252_dev *card, struct rsq_entry *rsqe)
31384 __net_timestamp(skb);
31385
31386 vcc->push(vcc, skb);
31387- atomic_inc(&vcc->stats->rx);
31388+ atomic_inc_unchecked(&vcc->stats->rx);
31389
31390 if (skb->truesize > SAR_FB_SIZE_3)
31391 add_rx_skb(card, 3, SAR_FB_SIZE_3, 1);
31392@@ -1303,14 +1303,14 @@ idt77252_rx_raw(struct idt77252_dev *card)
31393 if (vcc->qos.aal != ATM_AAL0) {
31394 RPRINTK("%s: raw cell for non AAL0 vc %u.%u\n",
31395 card->name, vpi, vci);
31396- atomic_inc(&vcc->stats->rx_drop);
31397+ atomic_inc_unchecked(&vcc->stats->rx_drop);
31398 goto drop;
31399 }
31400
31401 if ((sb = dev_alloc_skb(64)) == NULL) {
31402 printk("%s: Can't allocate buffers for AAL0.\n",
31403 card->name);
31404- atomic_inc(&vcc->stats->rx_err);
31405+ atomic_inc_unchecked(&vcc->stats->rx_err);
31406 goto drop;
31407 }
31408
31409@@ -1329,7 +1329,7 @@ idt77252_rx_raw(struct idt77252_dev *card)
31410 ATM_SKB(sb)->vcc = vcc;
31411 __net_timestamp(sb);
31412 vcc->push(vcc, sb);
31413- atomic_inc(&vcc->stats->rx);
31414+ atomic_inc_unchecked(&vcc->stats->rx);
31415
31416 drop:
31417 skb_pull(queue, 64);
31418@@ -1954,13 +1954,13 @@ idt77252_send_skb(struct atm_vcc *vcc, struct sk_buff *skb, int oam)
31419
31420 if (vc == NULL) {
31421 printk("%s: NULL connection in send().\n", card->name);
31422- atomic_inc(&vcc->stats->tx_err);
31423+ atomic_inc_unchecked(&vcc->stats->tx_err);
31424 dev_kfree_skb(skb);
31425 return -EINVAL;
31426 }
31427 if (!test_bit(VCF_TX, &vc->flags)) {
31428 printk("%s: Trying to transmit on a non-tx VC.\n", card->name);
31429- atomic_inc(&vcc->stats->tx_err);
31430+ atomic_inc_unchecked(&vcc->stats->tx_err);
31431 dev_kfree_skb(skb);
31432 return -EINVAL;
31433 }
31434@@ -1972,14 +1972,14 @@ idt77252_send_skb(struct atm_vcc *vcc, struct sk_buff *skb, int oam)
31435 break;
31436 default:
31437 printk("%s: Unsupported AAL: %d\n", card->name, vcc->qos.aal);
31438- atomic_inc(&vcc->stats->tx_err);
31439+ atomic_inc_unchecked(&vcc->stats->tx_err);
31440 dev_kfree_skb(skb);
31441 return -EINVAL;
31442 }
31443
31444 if (skb_shinfo(skb)->nr_frags != 0) {
31445 printk("%s: No scatter-gather yet.\n", card->name);
31446- atomic_inc(&vcc->stats->tx_err);
31447+ atomic_inc_unchecked(&vcc->stats->tx_err);
31448 dev_kfree_skb(skb);
31449 return -EINVAL;
31450 }
31451@@ -1987,7 +1987,7 @@ idt77252_send_skb(struct atm_vcc *vcc, struct sk_buff *skb, int oam)
31452
31453 err = queue_skb(card, vc, skb, oam);
31454 if (err) {
31455- atomic_inc(&vcc->stats->tx_err);
31456+ atomic_inc_unchecked(&vcc->stats->tx_err);
31457 dev_kfree_skb(skb);
31458 return err;
31459 }
31460@@ -2010,7 +2010,7 @@ idt77252_send_oam(struct atm_vcc *vcc, void *cell, int flags)
31461 skb = dev_alloc_skb(64);
31462 if (!skb) {
31463 printk("%s: Out of memory in send_oam().\n", card->name);
31464- atomic_inc(&vcc->stats->tx_err);
31465+ atomic_inc_unchecked(&vcc->stats->tx_err);
31466 return -ENOMEM;
31467 }
31468 atomic_add(skb->truesize, &sk_atm(vcc)->sk_wmem_alloc);
31469diff --git a/drivers/atm/iphase.c b/drivers/atm/iphase.c
31470index b2c1b37..faa672b 100644
31471--- a/drivers/atm/iphase.c
31472+++ b/drivers/atm/iphase.c
31473@@ -1123,7 +1123,7 @@ static int rx_pkt(struct atm_dev *dev)
31474 status = (u_short) (buf_desc_ptr->desc_mode);
31475 if (status & (RX_CER | RX_PTE | RX_OFL))
31476 {
31477- atomic_inc(&vcc->stats->rx_err);
31478+ atomic_inc_unchecked(&vcc->stats->rx_err);
31479 IF_ERR(printk("IA: bad packet, dropping it");)
31480 if (status & RX_CER) {
31481 IF_ERR(printk(" cause: packet CRC error\n");)
31482@@ -1146,7 +1146,7 @@ static int rx_pkt(struct atm_dev *dev)
31483 len = dma_addr - buf_addr;
31484 if (len > iadev->rx_buf_sz) {
31485 printk("Over %d bytes sdu received, dropped!!!\n", iadev->rx_buf_sz);
31486- atomic_inc(&vcc->stats->rx_err);
31487+ atomic_inc_unchecked(&vcc->stats->rx_err);
31488 goto out_free_desc;
31489 }
31490
31491@@ -1296,7 +1296,7 @@ static void rx_dle_intr(struct atm_dev *dev)
31492 ia_vcc = INPH_IA_VCC(vcc);
31493 if (ia_vcc == NULL)
31494 {
31495- atomic_inc(&vcc->stats->rx_err);
31496+ atomic_inc_unchecked(&vcc->stats->rx_err);
31497 dev_kfree_skb_any(skb);
31498 atm_return(vcc, atm_guess_pdu2truesize(len));
31499 goto INCR_DLE;
31500@@ -1308,7 +1308,7 @@ static void rx_dle_intr(struct atm_dev *dev)
31501 if ((length > iadev->rx_buf_sz) || (length >
31502 (skb->len - sizeof(struct cpcs_trailer))))
31503 {
31504- atomic_inc(&vcc->stats->rx_err);
31505+ atomic_inc_unchecked(&vcc->stats->rx_err);
31506 IF_ERR(printk("rx_dle_intr: Bad AAL5 trailer %d (skb len %d)",
31507 length, skb->len);)
31508 dev_kfree_skb_any(skb);
31509@@ -1324,7 +1324,7 @@ static void rx_dle_intr(struct atm_dev *dev)
31510
31511 IF_RX(printk("rx_dle_intr: skb push");)
31512 vcc->push(vcc,skb);
31513- atomic_inc(&vcc->stats->rx);
31514+ atomic_inc_unchecked(&vcc->stats->rx);
31515 iadev->rx_pkt_cnt++;
31516 }
31517 INCR_DLE:
31518@@ -2806,15 +2806,15 @@ static int ia_ioctl(struct atm_dev *dev, unsigned int cmd, void __user *arg)
31519 {
31520 struct k_sonet_stats *stats;
31521 stats = &PRIV(_ia_dev[board])->sonet_stats;
31522- printk("section_bip: %d\n", atomic_read(&stats->section_bip));
31523- printk("line_bip : %d\n", atomic_read(&stats->line_bip));
31524- printk("path_bip : %d\n", atomic_read(&stats->path_bip));
31525- printk("line_febe : %d\n", atomic_read(&stats->line_febe));
31526- printk("path_febe : %d\n", atomic_read(&stats->path_febe));
31527- printk("corr_hcs : %d\n", atomic_read(&stats->corr_hcs));
31528- printk("uncorr_hcs : %d\n", atomic_read(&stats->uncorr_hcs));
31529- printk("tx_cells : %d\n", atomic_read(&stats->tx_cells));
31530- printk("rx_cells : %d\n", atomic_read(&stats->rx_cells));
31531+ printk("section_bip: %d\n", atomic_read_unchecked(&stats->section_bip));
31532+ printk("line_bip : %d\n", atomic_read_unchecked(&stats->line_bip));
31533+ printk("path_bip : %d\n", atomic_read_unchecked(&stats->path_bip));
31534+ printk("line_febe : %d\n", atomic_read_unchecked(&stats->line_febe));
31535+ printk("path_febe : %d\n", atomic_read_unchecked(&stats->path_febe));
31536+ printk("corr_hcs : %d\n", atomic_read_unchecked(&stats->corr_hcs));
31537+ printk("uncorr_hcs : %d\n", atomic_read_unchecked(&stats->uncorr_hcs));
31538+ printk("tx_cells : %d\n", atomic_read_unchecked(&stats->tx_cells));
31539+ printk("rx_cells : %d\n", atomic_read_unchecked(&stats->rx_cells));
31540 }
31541 ia_cmds.status = 0;
31542 break;
31543@@ -2919,7 +2919,7 @@ static int ia_pkt_tx (struct atm_vcc *vcc, struct sk_buff *skb) {
31544 if ((desc == 0) || (desc > iadev->num_tx_desc))
31545 {
31546 IF_ERR(printk(DEV_LABEL "invalid desc for send: %d\n", desc);)
31547- atomic_inc(&vcc->stats->tx);
31548+ atomic_inc_unchecked(&vcc->stats->tx);
31549 if (vcc->pop)
31550 vcc->pop(vcc, skb);
31551 else
31552@@ -3024,14 +3024,14 @@ static int ia_pkt_tx (struct atm_vcc *vcc, struct sk_buff *skb) {
31553 ATM_DESC(skb) = vcc->vci;
31554 skb_queue_tail(&iadev->tx_dma_q, skb);
31555
31556- atomic_inc(&vcc->stats->tx);
31557+ atomic_inc_unchecked(&vcc->stats->tx);
31558 iadev->tx_pkt_cnt++;
31559 /* Increment transaction counter */
31560 writel(2, iadev->dma+IPHASE5575_TX_COUNTER);
31561
31562 #if 0
31563 /* add flow control logic */
31564- if (atomic_read(&vcc->stats->tx) % 20 == 0) {
31565+ if (atomic_read_unchecked(&vcc->stats->tx) % 20 == 0) {
31566 if (iavcc->vc_desc_cnt > 10) {
31567 vcc->tx_quota = vcc->tx_quota * 3 / 4;
31568 printk("Tx1: vcc->tx_quota = %d \n", (u32)vcc->tx_quota );
31569diff --git a/drivers/atm/lanai.c b/drivers/atm/lanai.c
31570index cf97c34..8d30655 100644
31571--- a/drivers/atm/lanai.c
31572+++ b/drivers/atm/lanai.c
31573@@ -1305,7 +1305,7 @@ static void lanai_send_one_aal5(struct lanai_dev *lanai,
31574 vcc_tx_add_aal5_trailer(lvcc, skb->len, 0, 0);
31575 lanai_endtx(lanai, lvcc);
31576 lanai_free_skb(lvcc->tx.atmvcc, skb);
31577- atomic_inc(&lvcc->tx.atmvcc->stats->tx);
31578+ atomic_inc_unchecked(&lvcc->tx.atmvcc->stats->tx);
31579 }
31580
31581 /* Try to fill the buffer - don't call unless there is backlog */
31582@@ -1428,7 +1428,7 @@ static void vcc_rx_aal5(struct lanai_vcc *lvcc, int endptr)
31583 ATM_SKB(skb)->vcc = lvcc->rx.atmvcc;
31584 __net_timestamp(skb);
31585 lvcc->rx.atmvcc->push(lvcc->rx.atmvcc, skb);
31586- atomic_inc(&lvcc->rx.atmvcc->stats->rx);
31587+ atomic_inc_unchecked(&lvcc->rx.atmvcc->stats->rx);
31588 out:
31589 lvcc->rx.buf.ptr = end;
31590 cardvcc_write(lvcc, endptr, vcc_rxreadptr);
31591@@ -1670,7 +1670,7 @@ static int handle_service(struct lanai_dev *lanai, u32 s)
31592 DPRINTK("(itf %d) got RX service entry 0x%X for non-AAL5 "
31593 "vcc %d\n", lanai->number, (unsigned int) s, vci);
31594 lanai->stats.service_rxnotaal5++;
31595- atomic_inc(&lvcc->rx.atmvcc->stats->rx_err);
31596+ atomic_inc_unchecked(&lvcc->rx.atmvcc->stats->rx_err);
31597 return 0;
31598 }
31599 if (likely(!(s & (SERVICE_TRASH | SERVICE_STREAM | SERVICE_CRCERR)))) {
31600@@ -1682,7 +1682,7 @@ static int handle_service(struct lanai_dev *lanai, u32 s)
31601 int bytes;
31602 read_unlock(&vcc_sklist_lock);
31603 DPRINTK("got trashed rx pdu on vci %d\n", vci);
31604- atomic_inc(&lvcc->rx.atmvcc->stats->rx_err);
31605+ atomic_inc_unchecked(&lvcc->rx.atmvcc->stats->rx_err);
31606 lvcc->stats.x.aal5.service_trash++;
31607 bytes = (SERVICE_GET_END(s) * 16) -
31608 (((unsigned long) lvcc->rx.buf.ptr) -
31609@@ -1694,7 +1694,7 @@ static int handle_service(struct lanai_dev *lanai, u32 s)
31610 }
31611 if (s & SERVICE_STREAM) {
31612 read_unlock(&vcc_sklist_lock);
31613- atomic_inc(&lvcc->rx.atmvcc->stats->rx_err);
31614+ atomic_inc_unchecked(&lvcc->rx.atmvcc->stats->rx_err);
31615 lvcc->stats.x.aal5.service_stream++;
31616 printk(KERN_ERR DEV_LABEL "(itf %d): Got AAL5 stream "
31617 "PDU on VCI %d!\n", lanai->number, vci);
31618@@ -1702,7 +1702,7 @@ static int handle_service(struct lanai_dev *lanai, u32 s)
31619 return 0;
31620 }
31621 DPRINTK("got rx crc error on vci %d\n", vci);
31622- atomic_inc(&lvcc->rx.atmvcc->stats->rx_err);
31623+ atomic_inc_unchecked(&lvcc->rx.atmvcc->stats->rx_err);
31624 lvcc->stats.x.aal5.service_rxcrc++;
31625 lvcc->rx.buf.ptr = &lvcc->rx.buf.start[SERVICE_GET_END(s) * 4];
31626 cardvcc_write(lvcc, SERVICE_GET_END(s), vcc_rxreadptr);
31627diff --git a/drivers/atm/nicstar.c b/drivers/atm/nicstar.c
31628index 3da804b..d3b0eed 100644
31629--- a/drivers/atm/nicstar.c
31630+++ b/drivers/atm/nicstar.c
31631@@ -1723,7 +1723,7 @@ static int ns_send(struct atm_vcc *vcc, struct sk_buff *skb)
31632 if ((vc = (vc_map *) vcc->dev_data) == NULL)
31633 {
31634 printk("nicstar%d: vcc->dev_data == NULL on ns_send().\n", card->index);
31635- atomic_inc(&vcc->stats->tx_err);
31636+ atomic_inc_unchecked(&vcc->stats->tx_err);
31637 dev_kfree_skb_any(skb);
31638 return -EINVAL;
31639 }
31640@@ -1731,7 +1731,7 @@ static int ns_send(struct atm_vcc *vcc, struct sk_buff *skb)
31641 if (!vc->tx)
31642 {
31643 printk("nicstar%d: Trying to transmit on a non-tx VC.\n", card->index);
31644- atomic_inc(&vcc->stats->tx_err);
31645+ atomic_inc_unchecked(&vcc->stats->tx_err);
31646 dev_kfree_skb_any(skb);
31647 return -EINVAL;
31648 }
31649@@ -1739,7 +1739,7 @@ static int ns_send(struct atm_vcc *vcc, struct sk_buff *skb)
31650 if (vcc->qos.aal != ATM_AAL5 && vcc->qos.aal != ATM_AAL0)
31651 {
31652 printk("nicstar%d: Only AAL0 and AAL5 are supported.\n", card->index);
31653- atomic_inc(&vcc->stats->tx_err);
31654+ atomic_inc_unchecked(&vcc->stats->tx_err);
31655 dev_kfree_skb_any(skb);
31656 return -EINVAL;
31657 }
31658@@ -1747,7 +1747,7 @@ static int ns_send(struct atm_vcc *vcc, struct sk_buff *skb)
31659 if (skb_shinfo(skb)->nr_frags != 0)
31660 {
31661 printk("nicstar%d: No scatter-gather yet.\n", card->index);
31662- atomic_inc(&vcc->stats->tx_err);
31663+ atomic_inc_unchecked(&vcc->stats->tx_err);
31664 dev_kfree_skb_any(skb);
31665 return -EINVAL;
31666 }
31667@@ -1792,11 +1792,11 @@ static int ns_send(struct atm_vcc *vcc, struct sk_buff *skb)
31668
31669 if (push_scqe(card, vc, scq, &scqe, skb) != 0)
31670 {
31671- atomic_inc(&vcc->stats->tx_err);
31672+ atomic_inc_unchecked(&vcc->stats->tx_err);
31673 dev_kfree_skb_any(skb);
31674 return -EIO;
31675 }
31676- atomic_inc(&vcc->stats->tx);
31677+ atomic_inc_unchecked(&vcc->stats->tx);
31678
31679 return 0;
31680 }
31681@@ -2111,14 +2111,14 @@ static void dequeue_rx(ns_dev *card, ns_rsqe *rsqe)
31682 {
31683 printk("nicstar%d: Can't allocate buffers for aal0.\n",
31684 card->index);
31685- atomic_add(i,&vcc->stats->rx_drop);
31686+ atomic_add_unchecked(i,&vcc->stats->rx_drop);
31687 break;
31688 }
31689 if (!atm_charge(vcc, sb->truesize))
31690 {
31691 RXPRINTK("nicstar%d: atm_charge() dropped aal0 packets.\n",
31692 card->index);
31693- atomic_add(i-1,&vcc->stats->rx_drop); /* already increased by 1 */
31694+ atomic_add_unchecked(i-1,&vcc->stats->rx_drop); /* already increased by 1 */
31695 dev_kfree_skb_any(sb);
31696 break;
31697 }
31698@@ -2133,7 +2133,7 @@ static void dequeue_rx(ns_dev *card, ns_rsqe *rsqe)
31699 ATM_SKB(sb)->vcc = vcc;
31700 __net_timestamp(sb);
31701 vcc->push(vcc, sb);
31702- atomic_inc(&vcc->stats->rx);
31703+ atomic_inc_unchecked(&vcc->stats->rx);
31704 cell += ATM_CELL_PAYLOAD;
31705 }
31706
31707@@ -2152,7 +2152,7 @@ static void dequeue_rx(ns_dev *card, ns_rsqe *rsqe)
31708 if (iovb == NULL)
31709 {
31710 printk("nicstar%d: Out of iovec buffers.\n", card->index);
31711- atomic_inc(&vcc->stats->rx_drop);
31712+ atomic_inc_unchecked(&vcc->stats->rx_drop);
31713 recycle_rx_buf(card, skb);
31714 return;
31715 }
31716@@ -2182,7 +2182,7 @@ static void dequeue_rx(ns_dev *card, ns_rsqe *rsqe)
31717 else if (NS_SKB(iovb)->iovcnt >= NS_MAX_IOVECS)
31718 {
31719 printk("nicstar%d: received too big AAL5 SDU.\n", card->index);
31720- atomic_inc(&vcc->stats->rx_err);
31721+ atomic_inc_unchecked(&vcc->stats->rx_err);
31722 recycle_iovec_rx_bufs(card, (struct iovec *) iovb->data, NS_MAX_IOVECS);
31723 NS_SKB(iovb)->iovcnt = 0;
31724 iovb->len = 0;
31725@@ -2202,7 +2202,7 @@ static void dequeue_rx(ns_dev *card, ns_rsqe *rsqe)
31726 printk("nicstar%d: Expected a small buffer, and this is not one.\n",
31727 card->index);
31728 which_list(card, skb);
31729- atomic_inc(&vcc->stats->rx_err);
31730+ atomic_inc_unchecked(&vcc->stats->rx_err);
31731 recycle_rx_buf(card, skb);
31732 vc->rx_iov = NULL;
31733 recycle_iov_buf(card, iovb);
31734@@ -2216,7 +2216,7 @@ static void dequeue_rx(ns_dev *card, ns_rsqe *rsqe)
31735 printk("nicstar%d: Expected a large buffer, and this is not one.\n",
31736 card->index);
31737 which_list(card, skb);
31738- atomic_inc(&vcc->stats->rx_err);
31739+ atomic_inc_unchecked(&vcc->stats->rx_err);
31740 recycle_iovec_rx_bufs(card, (struct iovec *) iovb->data,
31741 NS_SKB(iovb)->iovcnt);
31742 vc->rx_iov = NULL;
31743@@ -2240,7 +2240,7 @@ static void dequeue_rx(ns_dev *card, ns_rsqe *rsqe)
31744 printk(" - PDU size mismatch.\n");
31745 else
31746 printk(".\n");
31747- atomic_inc(&vcc->stats->rx_err);
31748+ atomic_inc_unchecked(&vcc->stats->rx_err);
31749 recycle_iovec_rx_bufs(card, (struct iovec *) iovb->data,
31750 NS_SKB(iovb)->iovcnt);
31751 vc->rx_iov = NULL;
31752@@ -2256,7 +2256,7 @@ static void dequeue_rx(ns_dev *card, ns_rsqe *rsqe)
31753 if (!atm_charge(vcc, skb->truesize))
31754 {
31755 push_rxbufs(card, skb);
31756- atomic_inc(&vcc->stats->rx_drop);
31757+ atomic_inc_unchecked(&vcc->stats->rx_drop);
31758 }
31759 else
31760 {
31761@@ -2268,7 +2268,7 @@ static void dequeue_rx(ns_dev *card, ns_rsqe *rsqe)
31762 ATM_SKB(skb)->vcc = vcc;
31763 __net_timestamp(skb);
31764 vcc->push(vcc, skb);
31765- atomic_inc(&vcc->stats->rx);
31766+ atomic_inc_unchecked(&vcc->stats->rx);
31767 }
31768 }
31769 else if (NS_SKB(iovb)->iovcnt == 2) /* One small plus one large buffer */
31770@@ -2283,7 +2283,7 @@ static void dequeue_rx(ns_dev *card, ns_rsqe *rsqe)
31771 if (!atm_charge(vcc, sb->truesize))
31772 {
31773 push_rxbufs(card, sb);
31774- atomic_inc(&vcc->stats->rx_drop);
31775+ atomic_inc_unchecked(&vcc->stats->rx_drop);
31776 }
31777 else
31778 {
31779@@ -2295,7 +2295,7 @@ static void dequeue_rx(ns_dev *card, ns_rsqe *rsqe)
31780 ATM_SKB(sb)->vcc = vcc;
31781 __net_timestamp(sb);
31782 vcc->push(vcc, sb);
31783- atomic_inc(&vcc->stats->rx);
31784+ atomic_inc_unchecked(&vcc->stats->rx);
31785 }
31786
31787 push_rxbufs(card, skb);
31788@@ -2306,7 +2306,7 @@ static void dequeue_rx(ns_dev *card, ns_rsqe *rsqe)
31789 if (!atm_charge(vcc, skb->truesize))
31790 {
31791 push_rxbufs(card, skb);
31792- atomic_inc(&vcc->stats->rx_drop);
31793+ atomic_inc_unchecked(&vcc->stats->rx_drop);
31794 }
31795 else
31796 {
31797@@ -2320,7 +2320,7 @@ static void dequeue_rx(ns_dev *card, ns_rsqe *rsqe)
31798 ATM_SKB(skb)->vcc = vcc;
31799 __net_timestamp(skb);
31800 vcc->push(vcc, skb);
31801- atomic_inc(&vcc->stats->rx);
31802+ atomic_inc_unchecked(&vcc->stats->rx);
31803 }
31804
31805 push_rxbufs(card, sb);
31806@@ -2342,7 +2342,7 @@ static void dequeue_rx(ns_dev *card, ns_rsqe *rsqe)
31807 if (hb == NULL)
31808 {
31809 printk("nicstar%d: Out of huge buffers.\n", card->index);
31810- atomic_inc(&vcc->stats->rx_drop);
31811+ atomic_inc_unchecked(&vcc->stats->rx_drop);
31812 recycle_iovec_rx_bufs(card, (struct iovec *) iovb->data,
31813 NS_SKB(iovb)->iovcnt);
31814 vc->rx_iov = NULL;
31815@@ -2393,7 +2393,7 @@ static void dequeue_rx(ns_dev *card, ns_rsqe *rsqe)
31816 }
31817 else
31818 dev_kfree_skb_any(hb);
31819- atomic_inc(&vcc->stats->rx_drop);
31820+ atomic_inc_unchecked(&vcc->stats->rx_drop);
31821 }
31822 else
31823 {
31824@@ -2427,7 +2427,7 @@ static void dequeue_rx(ns_dev *card, ns_rsqe *rsqe)
31825 #endif /* NS_USE_DESTRUCTORS */
31826 __net_timestamp(hb);
31827 vcc->push(vcc, hb);
31828- atomic_inc(&vcc->stats->rx);
31829+ atomic_inc_unchecked(&vcc->stats->rx);
31830 }
31831 }
31832
31833diff --git a/drivers/atm/solos-pci.c b/drivers/atm/solos-pci.c
31834index 84c93ff..e6ed269 100644
31835--- a/drivers/atm/solos-pci.c
31836+++ b/drivers/atm/solos-pci.c
31837@@ -708,7 +708,7 @@ void solos_bh(unsigned long card_arg)
31838 }
31839 atm_charge(vcc, skb->truesize);
31840 vcc->push(vcc, skb);
31841- atomic_inc(&vcc->stats->rx);
31842+ atomic_inc_unchecked(&vcc->stats->rx);
31843 break;
31844
31845 case PKT_STATUS:
31846@@ -914,6 +914,8 @@ static int print_buffer(struct sk_buff *buf)
31847 char msg[500];
31848 char item[10];
31849
31850+ pax_track_stack();
31851+
31852 len = buf->len;
31853 for (i = 0; i < len; i++){
31854 if(i % 8 == 0)
31855@@ -1023,7 +1025,7 @@ static uint32_t fpga_tx(struct solos_card *card)
31856 vcc = SKB_CB(oldskb)->vcc;
31857
31858 if (vcc) {
31859- atomic_inc(&vcc->stats->tx);
31860+ atomic_inc_unchecked(&vcc->stats->tx);
31861 solos_pop(vcc, oldskb);
31862 } else
31863 dev_kfree_skb_irq(oldskb);
31864diff --git a/drivers/atm/suni.c b/drivers/atm/suni.c
31865index 6dd3f59..ee377f3 100644
31866--- a/drivers/atm/suni.c
31867+++ b/drivers/atm/suni.c
31868@@ -49,8 +49,8 @@ static DEFINE_SPINLOCK(sunis_lock);
31869
31870
31871 #define ADD_LIMITED(s,v) \
31872- atomic_add((v),&stats->s); \
31873- if (atomic_read(&stats->s) < 0) atomic_set(&stats->s,INT_MAX);
31874+ atomic_add_unchecked((v),&stats->s); \
31875+ if (atomic_read_unchecked(&stats->s) < 0) atomic_set_unchecked(&stats->s,INT_MAX);
31876
31877
31878 static void suni_hz(unsigned long from_timer)
31879diff --git a/drivers/atm/uPD98402.c b/drivers/atm/uPD98402.c
31880index fc8cb07..4a80e53 100644
31881--- a/drivers/atm/uPD98402.c
31882+++ b/drivers/atm/uPD98402.c
31883@@ -41,7 +41,7 @@ static int fetch_stats(struct atm_dev *dev,struct sonet_stats __user *arg,int ze
31884 struct sonet_stats tmp;
31885 int error = 0;
31886
31887- atomic_add(GET(HECCT),&PRIV(dev)->sonet_stats.uncorr_hcs);
31888+ atomic_add_unchecked(GET(HECCT),&PRIV(dev)->sonet_stats.uncorr_hcs);
31889 sonet_copy_stats(&PRIV(dev)->sonet_stats,&tmp);
31890 if (arg) error = copy_to_user(arg,&tmp,sizeof(tmp));
31891 if (zero && !error) {
31892@@ -160,9 +160,9 @@ static int uPD98402_ioctl(struct atm_dev *dev,unsigned int cmd,void __user *arg)
31893
31894
31895 #define ADD_LIMITED(s,v) \
31896- { atomic_add(GET(v),&PRIV(dev)->sonet_stats.s); \
31897- if (atomic_read(&PRIV(dev)->sonet_stats.s) < 0) \
31898- atomic_set(&PRIV(dev)->sonet_stats.s,INT_MAX); }
31899+ { atomic_add_unchecked(GET(v),&PRIV(dev)->sonet_stats.s); \
31900+ if (atomic_read_unchecked(&PRIV(dev)->sonet_stats.s) < 0) \
31901+ atomic_set_unchecked(&PRIV(dev)->sonet_stats.s,INT_MAX); }
31902
31903
31904 static void stat_event(struct atm_dev *dev)
31905@@ -193,7 +193,7 @@ static void uPD98402_int(struct atm_dev *dev)
31906 if (reason & uPD98402_INT_PFM) stat_event(dev);
31907 if (reason & uPD98402_INT_PCO) {
31908 (void) GET(PCOCR); /* clear interrupt cause */
31909- atomic_add(GET(HECCT),
31910+ atomic_add_unchecked(GET(HECCT),
31911 &PRIV(dev)->sonet_stats.uncorr_hcs);
31912 }
31913 if ((reason & uPD98402_INT_RFO) &&
31914@@ -221,9 +221,9 @@ static int uPD98402_start(struct atm_dev *dev)
31915 PUT(~(uPD98402_INT_PFM | uPD98402_INT_ALM | uPD98402_INT_RFO |
31916 uPD98402_INT_LOS),PIMR); /* enable them */
31917 (void) fetch_stats(dev,NULL,1); /* clear kernel counters */
31918- atomic_set(&PRIV(dev)->sonet_stats.corr_hcs,-1);
31919- atomic_set(&PRIV(dev)->sonet_stats.tx_cells,-1);
31920- atomic_set(&PRIV(dev)->sonet_stats.rx_cells,-1);
31921+ atomic_set_unchecked(&PRIV(dev)->sonet_stats.corr_hcs,-1);
31922+ atomic_set_unchecked(&PRIV(dev)->sonet_stats.tx_cells,-1);
31923+ atomic_set_unchecked(&PRIV(dev)->sonet_stats.rx_cells,-1);
31924 return 0;
31925 }
31926
31927diff --git a/drivers/atm/zatm.c b/drivers/atm/zatm.c
31928index 2e9635b..32927b4 100644
31929--- a/drivers/atm/zatm.c
31930+++ b/drivers/atm/zatm.c
31931@@ -458,7 +458,7 @@ printk("dummy: 0x%08lx, 0x%08lx\n",dummy[0],dummy[1]);
31932 }
31933 if (!size) {
31934 dev_kfree_skb_irq(skb);
31935- if (vcc) atomic_inc(&vcc->stats->rx_err);
31936+ if (vcc) atomic_inc_unchecked(&vcc->stats->rx_err);
31937 continue;
31938 }
31939 if (!atm_charge(vcc,skb->truesize)) {
31940@@ -468,7 +468,7 @@ printk("dummy: 0x%08lx, 0x%08lx\n",dummy[0],dummy[1]);
31941 skb->len = size;
31942 ATM_SKB(skb)->vcc = vcc;
31943 vcc->push(vcc,skb);
31944- atomic_inc(&vcc->stats->rx);
31945+ atomic_inc_unchecked(&vcc->stats->rx);
31946 }
31947 zout(pos & 0xffff,MTA(mbx));
31948 #if 0 /* probably a stupid idea */
31949@@ -732,7 +732,7 @@ if (*ZATM_PRV_DSC(skb) != (uPD98401_TXPD_V | uPD98401_TXPD_DP |
31950 skb_queue_head(&zatm_vcc->backlog,skb);
31951 break;
31952 }
31953- atomic_inc(&vcc->stats->tx);
31954+ atomic_inc_unchecked(&vcc->stats->tx);
31955 wake_up(&zatm_vcc->tx_wait);
31956 }
31957
31958diff --git a/drivers/base/bus.c b/drivers/base/bus.c
31959index 63c143e..fece183 100644
31960--- a/drivers/base/bus.c
31961+++ b/drivers/base/bus.c
31962@@ -70,7 +70,7 @@ static ssize_t drv_attr_store(struct kobject *kobj, struct attribute *attr,
31963 return ret;
31964 }
31965
31966-static struct sysfs_ops driver_sysfs_ops = {
31967+static const struct sysfs_ops driver_sysfs_ops = {
31968 .show = drv_attr_show,
31969 .store = drv_attr_store,
31970 };
31971@@ -115,7 +115,7 @@ static ssize_t bus_attr_store(struct kobject *kobj, struct attribute *attr,
31972 return ret;
31973 }
31974
31975-static struct sysfs_ops bus_sysfs_ops = {
31976+static const struct sysfs_ops bus_sysfs_ops = {
31977 .show = bus_attr_show,
31978 .store = bus_attr_store,
31979 };
31980@@ -154,7 +154,7 @@ static int bus_uevent_filter(struct kset *kset, struct kobject *kobj)
31981 return 0;
31982 }
31983
31984-static struct kset_uevent_ops bus_uevent_ops = {
31985+static const struct kset_uevent_ops bus_uevent_ops = {
31986 .filter = bus_uevent_filter,
31987 };
31988
31989diff --git a/drivers/base/class.c b/drivers/base/class.c
31990index 6e2c3b0..cb61871 100644
31991--- a/drivers/base/class.c
31992+++ b/drivers/base/class.c
31993@@ -63,7 +63,7 @@ static void class_release(struct kobject *kobj)
31994 kfree(cp);
31995 }
31996
31997-static struct sysfs_ops class_sysfs_ops = {
31998+static const struct sysfs_ops class_sysfs_ops = {
31999 .show = class_attr_show,
32000 .store = class_attr_store,
32001 };
32002diff --git a/drivers/base/core.c b/drivers/base/core.c
32003index f33d768..a9358d0 100644
32004--- a/drivers/base/core.c
32005+++ b/drivers/base/core.c
32006@@ -100,7 +100,7 @@ static ssize_t dev_attr_store(struct kobject *kobj, struct attribute *attr,
32007 return ret;
32008 }
32009
32010-static struct sysfs_ops dev_sysfs_ops = {
32011+static const struct sysfs_ops dev_sysfs_ops = {
32012 .show = dev_attr_show,
32013 .store = dev_attr_store,
32014 };
32015@@ -252,7 +252,7 @@ static int dev_uevent(struct kset *kset, struct kobject *kobj,
32016 return retval;
32017 }
32018
32019-static struct kset_uevent_ops device_uevent_ops = {
32020+static const struct kset_uevent_ops device_uevent_ops = {
32021 .filter = dev_uevent_filter,
32022 .name = dev_uevent_name,
32023 .uevent = dev_uevent,
32024diff --git a/drivers/base/memory.c b/drivers/base/memory.c
32025index 989429c..2272b00 100644
32026--- a/drivers/base/memory.c
32027+++ b/drivers/base/memory.c
32028@@ -44,7 +44,7 @@ static int memory_uevent(struct kset *kset, struct kobject *obj, struct kobj_uev
32029 return retval;
32030 }
32031
32032-static struct kset_uevent_ops memory_uevent_ops = {
32033+static const struct kset_uevent_ops memory_uevent_ops = {
32034 .name = memory_uevent_name,
32035 .uevent = memory_uevent,
32036 };
32037diff --git a/drivers/base/sys.c b/drivers/base/sys.c
32038index 3f202f7..61c4a6f 100644
32039--- a/drivers/base/sys.c
32040+++ b/drivers/base/sys.c
32041@@ -54,7 +54,7 @@ sysdev_store(struct kobject *kobj, struct attribute *attr,
32042 return -EIO;
32043 }
32044
32045-static struct sysfs_ops sysfs_ops = {
32046+static const struct sysfs_ops sysfs_ops = {
32047 .show = sysdev_show,
32048 .store = sysdev_store,
32049 };
32050@@ -104,7 +104,7 @@ static ssize_t sysdev_class_store(struct kobject *kobj, struct attribute *attr,
32051 return -EIO;
32052 }
32053
32054-static struct sysfs_ops sysfs_class_ops = {
32055+static const struct sysfs_ops sysfs_class_ops = {
32056 .show = sysdev_class_show,
32057 .store = sysdev_class_store,
32058 };
32059diff --git a/drivers/block/DAC960.c b/drivers/block/DAC960.c
32060index eb4fa19..1954777 100644
32061--- a/drivers/block/DAC960.c
32062+++ b/drivers/block/DAC960.c
32063@@ -1973,6 +1973,8 @@ static bool DAC960_V1_ReadDeviceConfiguration(DAC960_Controller_T
32064 unsigned long flags;
32065 int Channel, TargetID;
32066
32067+ pax_track_stack();
32068+
32069 if (!init_dma_loaf(Controller->PCIDevice, &local_dma,
32070 DAC960_V1_MaxChannels*(sizeof(DAC960_V1_DCDB_T) +
32071 sizeof(DAC960_SCSI_Inquiry_T) +
32072diff --git a/drivers/block/cciss.c b/drivers/block/cciss.c
32073index 68b90d9..7e2e3f3 100644
32074--- a/drivers/block/cciss.c
32075+++ b/drivers/block/cciss.c
32076@@ -1011,6 +1011,8 @@ static int cciss_ioctl32_passthru(struct block_device *bdev, fmode_t mode,
32077 int err;
32078 u32 cp;
32079
32080+ memset(&arg64, 0, sizeof(arg64));
32081+
32082 err = 0;
32083 err |=
32084 copy_from_user(&arg64.LUN_info, &arg32->LUN_info,
32085@@ -2852,7 +2854,7 @@ static unsigned long pollcomplete(int ctlr)
32086 /* Wait (up to 20 seconds) for a command to complete */
32087
32088 for (i = 20 * HZ; i > 0; i--) {
32089- done = hba[ctlr]->access.command_completed(hba[ctlr]);
32090+ done = hba[ctlr]->access->command_completed(hba[ctlr]);
32091 if (done == FIFO_EMPTY)
32092 schedule_timeout_uninterruptible(1);
32093 else
32094@@ -2876,7 +2878,7 @@ static int sendcmd_core(ctlr_info_t *h, CommandList_struct *c)
32095 resend_cmd1:
32096
32097 /* Disable interrupt on the board. */
32098- h->access.set_intr_mask(h, CCISS_INTR_OFF);
32099+ h->access->set_intr_mask(h, CCISS_INTR_OFF);
32100
32101 /* Make sure there is room in the command FIFO */
32102 /* Actually it should be completely empty at this time */
32103@@ -2884,13 +2886,13 @@ resend_cmd1:
32104 /* tape side of the driver. */
32105 for (i = 200000; i > 0; i--) {
32106 /* if fifo isn't full go */
32107- if (!(h->access.fifo_full(h)))
32108+ if (!(h->access->fifo_full(h)))
32109 break;
32110 udelay(10);
32111 printk(KERN_WARNING "cciss cciss%d: SendCmd FIFO full,"
32112 " waiting!\n", h->ctlr);
32113 }
32114- h->access.submit_command(h, c); /* Send the cmd */
32115+ h->access->submit_command(h, c); /* Send the cmd */
32116 do {
32117 complete = pollcomplete(h->ctlr);
32118
32119@@ -3023,7 +3025,7 @@ static void start_io(ctlr_info_t *h)
32120 while (!hlist_empty(&h->reqQ)) {
32121 c = hlist_entry(h->reqQ.first, CommandList_struct, list);
32122 /* can't do anything if fifo is full */
32123- if ((h->access.fifo_full(h))) {
32124+ if ((h->access->fifo_full(h))) {
32125 printk(KERN_WARNING "cciss: fifo full\n");
32126 break;
32127 }
32128@@ -3033,7 +3035,7 @@ static void start_io(ctlr_info_t *h)
32129 h->Qdepth--;
32130
32131 /* Tell the controller execute command */
32132- h->access.submit_command(h, c);
32133+ h->access->submit_command(h, c);
32134
32135 /* Put job onto the completed Q */
32136 addQ(&h->cmpQ, c);
32137@@ -3393,17 +3395,17 @@ startio:
32138
32139 static inline unsigned long get_next_completion(ctlr_info_t *h)
32140 {
32141- return h->access.command_completed(h);
32142+ return h->access->command_completed(h);
32143 }
32144
32145 static inline int interrupt_pending(ctlr_info_t *h)
32146 {
32147- return h->access.intr_pending(h);
32148+ return h->access->intr_pending(h);
32149 }
32150
32151 static inline long interrupt_not_for_us(ctlr_info_t *h)
32152 {
32153- return (((h->access.intr_pending(h) == 0) ||
32154+ return (((h->access->intr_pending(h) == 0) ||
32155 (h->interrupts_enabled == 0)));
32156 }
32157
32158@@ -3892,7 +3894,7 @@ static int __devinit cciss_pci_init(ctlr_info_t *c, struct pci_dev *pdev)
32159 */
32160 c->max_commands = readl(&(c->cfgtable->CmdsOutMax));
32161 c->product_name = products[prod_index].product_name;
32162- c->access = *(products[prod_index].access);
32163+ c->access = products[prod_index].access;
32164 c->nr_cmds = c->max_commands - 4;
32165 if ((readb(&c->cfgtable->Signature[0]) != 'C') ||
32166 (readb(&c->cfgtable->Signature[1]) != 'I') ||
32167@@ -4291,7 +4293,7 @@ static int __devinit cciss_init_one(struct pci_dev *pdev,
32168 }
32169
32170 /* make sure the board interrupts are off */
32171- hba[i]->access.set_intr_mask(hba[i], CCISS_INTR_OFF);
32172+ hba[i]->access->set_intr_mask(hba[i], CCISS_INTR_OFF);
32173 if (request_irq(hba[i]->intr[SIMPLE_MODE_INT], do_cciss_intr,
32174 IRQF_DISABLED | IRQF_SHARED, hba[i]->devname, hba[i])) {
32175 printk(KERN_ERR "cciss: Unable to get irq %d for %s\n",
32176@@ -4341,7 +4343,7 @@ static int __devinit cciss_init_one(struct pci_dev *pdev,
32177 cciss_scsi_setup(i);
32178
32179 /* Turn the interrupts on so we can service requests */
32180- hba[i]->access.set_intr_mask(hba[i], CCISS_INTR_ON);
32181+ hba[i]->access->set_intr_mask(hba[i], CCISS_INTR_ON);
32182
32183 /* Get the firmware version */
32184 inq_buff = kzalloc(sizeof(InquiryData_struct), GFP_KERNEL);
32185diff --git a/drivers/block/cciss.h b/drivers/block/cciss.h
32186index 04d6bf8..36e712d 100644
32187--- a/drivers/block/cciss.h
32188+++ b/drivers/block/cciss.h
32189@@ -90,7 +90,7 @@ struct ctlr_info
32190 // information about each logical volume
32191 drive_info_struct *drv[CISS_MAX_LUN];
32192
32193- struct access_method access;
32194+ struct access_method *access;
32195
32196 /* queue and queue Info */
32197 struct hlist_head reqQ;
32198diff --git a/drivers/block/cpqarray.c b/drivers/block/cpqarray.c
32199index 6422651..bb1bdef 100644
32200--- a/drivers/block/cpqarray.c
32201+++ b/drivers/block/cpqarray.c
32202@@ -402,7 +402,7 @@ static int __init cpqarray_register_ctlr( int i, struct pci_dev *pdev)
32203 if (register_blkdev(COMPAQ_SMART2_MAJOR+i, hba[i]->devname)) {
32204 goto Enomem4;
32205 }
32206- hba[i]->access.set_intr_mask(hba[i], 0);
32207+ hba[i]->access->set_intr_mask(hba[i], 0);
32208 if (request_irq(hba[i]->intr, do_ida_intr,
32209 IRQF_DISABLED|IRQF_SHARED, hba[i]->devname, hba[i]))
32210 {
32211@@ -460,7 +460,7 @@ static int __init cpqarray_register_ctlr( int i, struct pci_dev *pdev)
32212 add_timer(&hba[i]->timer);
32213
32214 /* Enable IRQ now that spinlock and rate limit timer are set up */
32215- hba[i]->access.set_intr_mask(hba[i], FIFO_NOT_EMPTY);
32216+ hba[i]->access->set_intr_mask(hba[i], FIFO_NOT_EMPTY);
32217
32218 for(j=0; j<NWD; j++) {
32219 struct gendisk *disk = ida_gendisk[i][j];
32220@@ -695,7 +695,7 @@ DBGINFO(
32221 for(i=0; i<NR_PRODUCTS; i++) {
32222 if (board_id == products[i].board_id) {
32223 c->product_name = products[i].product_name;
32224- c->access = *(products[i].access);
32225+ c->access = products[i].access;
32226 break;
32227 }
32228 }
32229@@ -793,7 +793,7 @@ static int __init cpqarray_eisa_detect(void)
32230 hba[ctlr]->intr = intr;
32231 sprintf(hba[ctlr]->devname, "ida%d", nr_ctlr);
32232 hba[ctlr]->product_name = products[j].product_name;
32233- hba[ctlr]->access = *(products[j].access);
32234+ hba[ctlr]->access = products[j].access;
32235 hba[ctlr]->ctlr = ctlr;
32236 hba[ctlr]->board_id = board_id;
32237 hba[ctlr]->pci_dev = NULL; /* not PCI */
32238@@ -896,6 +896,8 @@ static void do_ida_request(struct request_queue *q)
32239 struct scatterlist tmp_sg[SG_MAX];
32240 int i, dir, seg;
32241
32242+ pax_track_stack();
32243+
32244 if (blk_queue_plugged(q))
32245 goto startio;
32246
32247@@ -968,7 +970,7 @@ static void start_io(ctlr_info_t *h)
32248
32249 while((c = h->reqQ) != NULL) {
32250 /* Can't do anything if we're busy */
32251- if (h->access.fifo_full(h) == 0)
32252+ if (h->access->fifo_full(h) == 0)
32253 return;
32254
32255 /* Get the first entry from the request Q */
32256@@ -976,7 +978,7 @@ static void start_io(ctlr_info_t *h)
32257 h->Qdepth--;
32258
32259 /* Tell the controller to do our bidding */
32260- h->access.submit_command(h, c);
32261+ h->access->submit_command(h, c);
32262
32263 /* Get onto the completion Q */
32264 addQ(&h->cmpQ, c);
32265@@ -1038,7 +1040,7 @@ static irqreturn_t do_ida_intr(int irq, void *dev_id)
32266 unsigned long flags;
32267 __u32 a,a1;
32268
32269- istat = h->access.intr_pending(h);
32270+ istat = h->access->intr_pending(h);
32271 /* Is this interrupt for us? */
32272 if (istat == 0)
32273 return IRQ_NONE;
32274@@ -1049,7 +1051,7 @@ static irqreturn_t do_ida_intr(int irq, void *dev_id)
32275 */
32276 spin_lock_irqsave(IDA_LOCK(h->ctlr), flags);
32277 if (istat & FIFO_NOT_EMPTY) {
32278- while((a = h->access.command_completed(h))) {
32279+ while((a = h->access->command_completed(h))) {
32280 a1 = a; a &= ~3;
32281 if ((c = h->cmpQ) == NULL)
32282 {
32283@@ -1434,11 +1436,11 @@ static int sendcmd(
32284 /*
32285 * Disable interrupt
32286 */
32287- info_p->access.set_intr_mask(info_p, 0);
32288+ info_p->access->set_intr_mask(info_p, 0);
32289 /* Make sure there is room in the command FIFO */
32290 /* Actually it should be completely empty at this time. */
32291 for (i = 200000; i > 0; i--) {
32292- temp = info_p->access.fifo_full(info_p);
32293+ temp = info_p->access->fifo_full(info_p);
32294 if (temp != 0) {
32295 break;
32296 }
32297@@ -1451,7 +1453,7 @@ DBG(
32298 /*
32299 * Send the cmd
32300 */
32301- info_p->access.submit_command(info_p, c);
32302+ info_p->access->submit_command(info_p, c);
32303 complete = pollcomplete(ctlr);
32304
32305 pci_unmap_single(info_p->pci_dev, (dma_addr_t) c->req.sg[0].addr,
32306@@ -1534,9 +1536,9 @@ static int revalidate_allvol(ctlr_info_t *host)
32307 * we check the new geometry. Then turn interrupts back on when
32308 * we're done.
32309 */
32310- host->access.set_intr_mask(host, 0);
32311+ host->access->set_intr_mask(host, 0);
32312 getgeometry(ctlr);
32313- host->access.set_intr_mask(host, FIFO_NOT_EMPTY);
32314+ host->access->set_intr_mask(host, FIFO_NOT_EMPTY);
32315
32316 for(i=0; i<NWD; i++) {
32317 struct gendisk *disk = ida_gendisk[ctlr][i];
32318@@ -1576,7 +1578,7 @@ static int pollcomplete(int ctlr)
32319 /* Wait (up to 2 seconds) for a command to complete */
32320
32321 for (i = 200000; i > 0; i--) {
32322- done = hba[ctlr]->access.command_completed(hba[ctlr]);
32323+ done = hba[ctlr]->access->command_completed(hba[ctlr]);
32324 if (done == 0) {
32325 udelay(10); /* a short fixed delay */
32326 } else
32327diff --git a/drivers/block/cpqarray.h b/drivers/block/cpqarray.h
32328index be73e9d..7fbf140 100644
32329--- a/drivers/block/cpqarray.h
32330+++ b/drivers/block/cpqarray.h
32331@@ -99,7 +99,7 @@ struct ctlr_info {
32332 drv_info_t drv[NWD];
32333 struct proc_dir_entry *proc;
32334
32335- struct access_method access;
32336+ struct access_method *access;
32337
32338 cmdlist_t *reqQ;
32339 cmdlist_t *cmpQ;
32340diff --git a/drivers/block/loop.c b/drivers/block/loop.c
32341index 8ec2d70..2804b30 100644
32342--- a/drivers/block/loop.c
32343+++ b/drivers/block/loop.c
32344@@ -282,7 +282,7 @@ static int __do_lo_send_write(struct file *file,
32345 mm_segment_t old_fs = get_fs();
32346
32347 set_fs(get_ds());
32348- bw = file->f_op->write(file, buf, len, &pos);
32349+ bw = file->f_op->write(file, (const char __force_user *)buf, len, &pos);
32350 set_fs(old_fs);
32351 if (likely(bw == len))
32352 return 0;
32353diff --git a/drivers/block/nbd.c b/drivers/block/nbd.c
32354index 26ada47..083c480 100644
32355--- a/drivers/block/nbd.c
32356+++ b/drivers/block/nbd.c
32357@@ -155,6 +155,8 @@ static int sock_xmit(struct nbd_device *lo, int send, void *buf, int size,
32358 struct kvec iov;
32359 sigset_t blocked, oldset;
32360
32361+ pax_track_stack();
32362+
32363 if (unlikely(!sock)) {
32364 printk(KERN_ERR "%s: Attempted %s on closed socket in sock_xmit\n",
32365 lo->disk->disk_name, (send ? "send" : "recv"));
32366@@ -569,6 +571,8 @@ static void do_nbd_request(struct request_queue *q)
32367 static int __nbd_ioctl(struct block_device *bdev, struct nbd_device *lo,
32368 unsigned int cmd, unsigned long arg)
32369 {
32370+ pax_track_stack();
32371+
32372 switch (cmd) {
32373 case NBD_DISCONNECT: {
32374 struct request sreq;
32375diff --git a/drivers/block/pktcdvd.c b/drivers/block/pktcdvd.c
32376index a5d585d..d087be3 100644
32377--- a/drivers/block/pktcdvd.c
32378+++ b/drivers/block/pktcdvd.c
32379@@ -284,7 +284,7 @@ static ssize_t kobj_pkt_store(struct kobject *kobj,
32380 return len;
32381 }
32382
32383-static struct sysfs_ops kobj_pkt_ops = {
32384+static const struct sysfs_ops kobj_pkt_ops = {
32385 .show = kobj_pkt_show,
32386 .store = kobj_pkt_store
32387 };
32388diff --git a/drivers/char/Kconfig b/drivers/char/Kconfig
32389index 6aad99e..89cd142 100644
32390--- a/drivers/char/Kconfig
32391+++ b/drivers/char/Kconfig
32392@@ -90,7 +90,8 @@ config VT_HW_CONSOLE_BINDING
32393
32394 config DEVKMEM
32395 bool "/dev/kmem virtual device support"
32396- default y
32397+ default n
32398+ depends on !GRKERNSEC_KMEM
32399 help
32400 Say Y here if you want to support the /dev/kmem device. The
32401 /dev/kmem device is rarely used, but can be used for certain
32402@@ -1114,6 +1115,7 @@ config DEVPORT
32403 bool
32404 depends on !M68K
32405 depends on ISA || PCI
32406+ depends on !GRKERNSEC_KMEM
32407 default y
32408
32409 source "drivers/s390/char/Kconfig"
32410diff --git a/drivers/char/agp/frontend.c b/drivers/char/agp/frontend.c
32411index a96f319..a778a5b 100644
32412--- a/drivers/char/agp/frontend.c
32413+++ b/drivers/char/agp/frontend.c
32414@@ -824,7 +824,7 @@ static int agpioc_reserve_wrap(struct agp_file_private *priv, void __user *arg)
32415 if (copy_from_user(&reserve, arg, sizeof(struct agp_region)))
32416 return -EFAULT;
32417
32418- if ((unsigned) reserve.seg_count >= ~0U/sizeof(struct agp_segment))
32419+ if ((unsigned) reserve.seg_count >= ~0U/sizeof(struct agp_segment_priv))
32420 return -EFAULT;
32421
32422 client = agp_find_client_by_pid(reserve.pid);
32423diff --git a/drivers/char/briq_panel.c b/drivers/char/briq_panel.c
32424index d8cff90..9628e70 100644
32425--- a/drivers/char/briq_panel.c
32426+++ b/drivers/char/briq_panel.c
32427@@ -10,6 +10,7 @@
32428 #include <linux/types.h>
32429 #include <linux/errno.h>
32430 #include <linux/tty.h>
32431+#include <linux/mutex.h>
32432 #include <linux/timer.h>
32433 #include <linux/kernel.h>
32434 #include <linux/wait.h>
32435@@ -36,6 +37,7 @@ static int vfd_is_open;
32436 static unsigned char vfd[40];
32437 static int vfd_cursor;
32438 static unsigned char ledpb, led;
32439+static DEFINE_MUTEX(vfd_mutex);
32440
32441 static void update_vfd(void)
32442 {
32443@@ -142,12 +144,15 @@ static ssize_t briq_panel_write(struct file *file, const char __user *buf, size_
32444 if (!vfd_is_open)
32445 return -EBUSY;
32446
32447+ mutex_lock(&vfd_mutex);
32448 for (;;) {
32449 char c;
32450 if (!indx)
32451 break;
32452- if (get_user(c, buf))
32453+ if (get_user(c, buf)) {
32454+ mutex_unlock(&vfd_mutex);
32455 return -EFAULT;
32456+ }
32457 if (esc) {
32458 set_led(c);
32459 esc = 0;
32460@@ -177,6 +182,7 @@ static ssize_t briq_panel_write(struct file *file, const char __user *buf, size_
32461 buf++;
32462 }
32463 update_vfd();
32464+ mutex_unlock(&vfd_mutex);
32465
32466 return len;
32467 }
32468diff --git a/drivers/char/genrtc.c b/drivers/char/genrtc.c
32469index 31e7c91..161afc0 100644
32470--- a/drivers/char/genrtc.c
32471+++ b/drivers/char/genrtc.c
32472@@ -272,6 +272,7 @@ static int gen_rtc_ioctl(struct inode *inode, struct file *file,
32473 switch (cmd) {
32474
32475 case RTC_PLL_GET:
32476+ memset(&pll, 0, sizeof(pll));
32477 if (get_rtc_pll(&pll))
32478 return -EINVAL;
32479 else
32480diff --git a/drivers/char/hpet.c b/drivers/char/hpet.c
32481index 006466d..a2bb21c 100644
32482--- a/drivers/char/hpet.c
32483+++ b/drivers/char/hpet.c
32484@@ -430,7 +430,7 @@ static int hpet_release(struct inode *inode, struct file *file)
32485 return 0;
32486 }
32487
32488-static int hpet_ioctl_common(struct hpet_dev *, int, unsigned long, int);
32489+static int hpet_ioctl_common(struct hpet_dev *, unsigned int, unsigned long, int);
32490
32491 static int
32492 hpet_ioctl(struct inode *inode, struct file *file, unsigned int cmd,
32493@@ -565,7 +565,7 @@ static inline unsigned long hpet_time_div(struct hpets *hpets,
32494 }
32495
32496 static int
32497-hpet_ioctl_common(struct hpet_dev *devp, int cmd, unsigned long arg, int kernel)
32498+hpet_ioctl_common(struct hpet_dev *devp, unsigned int cmd, unsigned long arg, int kernel)
32499 {
32500 struct hpet_timer __iomem *timer;
32501 struct hpet __iomem *hpet;
32502@@ -608,11 +608,11 @@ hpet_ioctl_common(struct hpet_dev *devp, int cmd, unsigned long arg, int kernel)
32503 {
32504 struct hpet_info info;
32505
32506+ memset(&info, 0, sizeof(info));
32507+
32508 if (devp->hd_ireqfreq)
32509 info.hi_ireqfreq =
32510 hpet_time_div(hpetp, devp->hd_ireqfreq);
32511- else
32512- info.hi_ireqfreq = 0;
32513 info.hi_flags =
32514 readq(&timer->hpet_config) & Tn_PER_INT_CAP_MASK;
32515 info.hi_hpet = hpetp->hp_which;
32516diff --git a/drivers/char/hvc_beat.c b/drivers/char/hvc_beat.c
32517index 0afc8b8..6913fc3 100644
32518--- a/drivers/char/hvc_beat.c
32519+++ b/drivers/char/hvc_beat.c
32520@@ -84,7 +84,7 @@ static int hvc_beat_put_chars(uint32_t vtermno, const char *buf, int cnt)
32521 return cnt;
32522 }
32523
32524-static struct hv_ops hvc_beat_get_put_ops = {
32525+static const struct hv_ops hvc_beat_get_put_ops = {
32526 .get_chars = hvc_beat_get_chars,
32527 .put_chars = hvc_beat_put_chars,
32528 };
32529diff --git a/drivers/char/hvc_console.c b/drivers/char/hvc_console.c
32530index 98097f2..407dddc 100644
32531--- a/drivers/char/hvc_console.c
32532+++ b/drivers/char/hvc_console.c
32533@@ -125,7 +125,7 @@ static struct hvc_struct *hvc_get_by_index(int index)
32534 * console interfaces but can still be used as a tty device. This has to be
32535 * static because kmalloc will not work during early console init.
32536 */
32537-static struct hv_ops *cons_ops[MAX_NR_HVC_CONSOLES];
32538+static const struct hv_ops *cons_ops[MAX_NR_HVC_CONSOLES];
32539 static uint32_t vtermnos[MAX_NR_HVC_CONSOLES] =
32540 {[0 ... MAX_NR_HVC_CONSOLES - 1] = -1};
32541
32542@@ -249,7 +249,7 @@ static void destroy_hvc_struct(struct kref *kref)
32543 * vty adapters do NOT get an hvc_instantiate() callback since they
32544 * appear after early console init.
32545 */
32546-int hvc_instantiate(uint32_t vtermno, int index, struct hv_ops *ops)
32547+int hvc_instantiate(uint32_t vtermno, int index, const struct hv_ops *ops)
32548 {
32549 struct hvc_struct *hp;
32550
32551@@ -758,7 +758,7 @@ static const struct tty_operations hvc_ops = {
32552 };
32553
32554 struct hvc_struct __devinit *hvc_alloc(uint32_t vtermno, int data,
32555- struct hv_ops *ops, int outbuf_size)
32556+ const struct hv_ops *ops, int outbuf_size)
32557 {
32558 struct hvc_struct *hp;
32559 int i;
32560diff --git a/drivers/char/hvc_console.h b/drivers/char/hvc_console.h
32561index 10950ca..ed176c3 100644
32562--- a/drivers/char/hvc_console.h
32563+++ b/drivers/char/hvc_console.h
32564@@ -55,7 +55,7 @@ struct hvc_struct {
32565 int outbuf_size;
32566 int n_outbuf;
32567 uint32_t vtermno;
32568- struct hv_ops *ops;
32569+ const struct hv_ops *ops;
32570 int irq_requested;
32571 int data;
32572 struct winsize ws;
32573@@ -76,11 +76,11 @@ struct hv_ops {
32574 };
32575
32576 /* Register a vterm and a slot index for use as a console (console_init) */
32577-extern int hvc_instantiate(uint32_t vtermno, int index, struct hv_ops *ops);
32578+extern int hvc_instantiate(uint32_t vtermno, int index, const struct hv_ops *ops);
32579
32580 /* register a vterm for hvc tty operation (module_init or hotplug add) */
32581 extern struct hvc_struct * __devinit hvc_alloc(uint32_t vtermno, int data,
32582- struct hv_ops *ops, int outbuf_size);
32583+ const struct hv_ops *ops, int outbuf_size);
32584 /* remove a vterm from hvc tty operation (module_exit or hotplug remove) */
32585 extern int hvc_remove(struct hvc_struct *hp);
32586
32587diff --git a/drivers/char/hvc_iseries.c b/drivers/char/hvc_iseries.c
32588index 936d05b..fd02426 100644
32589--- a/drivers/char/hvc_iseries.c
32590+++ b/drivers/char/hvc_iseries.c
32591@@ -197,7 +197,7 @@ done:
32592 return sent;
32593 }
32594
32595-static struct hv_ops hvc_get_put_ops = {
32596+static const struct hv_ops hvc_get_put_ops = {
32597 .get_chars = get_chars,
32598 .put_chars = put_chars,
32599 .notifier_add = notifier_add_irq,
32600diff --git a/drivers/char/hvc_iucv.c b/drivers/char/hvc_iucv.c
32601index b0e168f..69cda2a 100644
32602--- a/drivers/char/hvc_iucv.c
32603+++ b/drivers/char/hvc_iucv.c
32604@@ -924,7 +924,7 @@ static int hvc_iucv_pm_restore_thaw(struct device *dev)
32605
32606
32607 /* HVC operations */
32608-static struct hv_ops hvc_iucv_ops = {
32609+static const struct hv_ops hvc_iucv_ops = {
32610 .get_chars = hvc_iucv_get_chars,
32611 .put_chars = hvc_iucv_put_chars,
32612 .notifier_add = hvc_iucv_notifier_add,
32613diff --git a/drivers/char/hvc_rtas.c b/drivers/char/hvc_rtas.c
32614index 88590d0..61c4a61 100644
32615--- a/drivers/char/hvc_rtas.c
32616+++ b/drivers/char/hvc_rtas.c
32617@@ -71,7 +71,7 @@ static int hvc_rtas_read_console(uint32_t vtermno, char *buf, int count)
32618 return i;
32619 }
32620
32621-static struct hv_ops hvc_rtas_get_put_ops = {
32622+static const struct hv_ops hvc_rtas_get_put_ops = {
32623 .get_chars = hvc_rtas_read_console,
32624 .put_chars = hvc_rtas_write_console,
32625 };
32626diff --git a/drivers/char/hvc_udbg.c b/drivers/char/hvc_udbg.c
32627index bd63ba8..b0957e6 100644
32628--- a/drivers/char/hvc_udbg.c
32629+++ b/drivers/char/hvc_udbg.c
32630@@ -58,7 +58,7 @@ static int hvc_udbg_get(uint32_t vtermno, char *buf, int count)
32631 return i;
32632 }
32633
32634-static struct hv_ops hvc_udbg_ops = {
32635+static const struct hv_ops hvc_udbg_ops = {
32636 .get_chars = hvc_udbg_get,
32637 .put_chars = hvc_udbg_put,
32638 };
32639diff --git a/drivers/char/hvc_vio.c b/drivers/char/hvc_vio.c
32640index 10be343..27370e9 100644
32641--- a/drivers/char/hvc_vio.c
32642+++ b/drivers/char/hvc_vio.c
32643@@ -77,7 +77,7 @@ static int filtered_get_chars(uint32_t vtermno, char *buf, int count)
32644 return got;
32645 }
32646
32647-static struct hv_ops hvc_get_put_ops = {
32648+static const struct hv_ops hvc_get_put_ops = {
32649 .get_chars = filtered_get_chars,
32650 .put_chars = hvc_put_chars,
32651 .notifier_add = notifier_add_irq,
32652diff --git a/drivers/char/hvc_xen.c b/drivers/char/hvc_xen.c
32653index a6ee32b..94f8c26 100644
32654--- a/drivers/char/hvc_xen.c
32655+++ b/drivers/char/hvc_xen.c
32656@@ -120,7 +120,7 @@ static int read_console(uint32_t vtermno, char *buf, int len)
32657 return recv;
32658 }
32659
32660-static struct hv_ops hvc_ops = {
32661+static const struct hv_ops hvc_ops = {
32662 .get_chars = read_console,
32663 .put_chars = write_console,
32664 .notifier_add = notifier_add_irq,
32665diff --git a/drivers/char/hvcs.c b/drivers/char/hvcs.c
32666index 266b858..f3ee0bb 100644
32667--- a/drivers/char/hvcs.c
32668+++ b/drivers/char/hvcs.c
32669@@ -82,6 +82,7 @@
32670 #include <asm/hvcserver.h>
32671 #include <asm/uaccess.h>
32672 #include <asm/vio.h>
32673+#include <asm/local.h>
32674
32675 /*
32676 * 1.3.0 -> 1.3.1 In hvcs_open memset(..,0x00,..) instead of memset(..,0x3F,00).
32677@@ -269,7 +270,7 @@ struct hvcs_struct {
32678 unsigned int index;
32679
32680 struct tty_struct *tty;
32681- int open_count;
32682+ local_t open_count;
32683
32684 /*
32685 * Used to tell the driver kernel_thread what operations need to take
32686@@ -419,7 +420,7 @@ static ssize_t hvcs_vterm_state_store(struct device *dev, struct device_attribut
32687
32688 spin_lock_irqsave(&hvcsd->lock, flags);
32689
32690- if (hvcsd->open_count > 0) {
32691+ if (local_read(&hvcsd->open_count) > 0) {
32692 spin_unlock_irqrestore(&hvcsd->lock, flags);
32693 printk(KERN_INFO "HVCS: vterm state unchanged. "
32694 "The hvcs device node is still in use.\n");
32695@@ -1135,7 +1136,7 @@ static int hvcs_open(struct tty_struct *tty, struct file *filp)
32696 if ((retval = hvcs_partner_connect(hvcsd)))
32697 goto error_release;
32698
32699- hvcsd->open_count = 1;
32700+ local_set(&hvcsd->open_count, 1);
32701 hvcsd->tty = tty;
32702 tty->driver_data = hvcsd;
32703
32704@@ -1169,7 +1170,7 @@ fast_open:
32705
32706 spin_lock_irqsave(&hvcsd->lock, flags);
32707 kref_get(&hvcsd->kref);
32708- hvcsd->open_count++;
32709+ local_inc(&hvcsd->open_count);
32710 hvcsd->todo_mask |= HVCS_SCHED_READ;
32711 spin_unlock_irqrestore(&hvcsd->lock, flags);
32712
32713@@ -1213,7 +1214,7 @@ static void hvcs_close(struct tty_struct *tty, struct file *filp)
32714 hvcsd = tty->driver_data;
32715
32716 spin_lock_irqsave(&hvcsd->lock, flags);
32717- if (--hvcsd->open_count == 0) {
32718+ if (local_dec_and_test(&hvcsd->open_count)) {
32719
32720 vio_disable_interrupts(hvcsd->vdev);
32721
32722@@ -1239,10 +1240,10 @@ static void hvcs_close(struct tty_struct *tty, struct file *filp)
32723 free_irq(irq, hvcsd);
32724 kref_put(&hvcsd->kref, destroy_hvcs_struct);
32725 return;
32726- } else if (hvcsd->open_count < 0) {
32727+ } else if (local_read(&hvcsd->open_count) < 0) {
32728 printk(KERN_ERR "HVCS: vty-server@%X open_count: %d"
32729 " is missmanaged.\n",
32730- hvcsd->vdev->unit_address, hvcsd->open_count);
32731+ hvcsd->vdev->unit_address, local_read(&hvcsd->open_count));
32732 }
32733
32734 spin_unlock_irqrestore(&hvcsd->lock, flags);
32735@@ -1258,7 +1259,7 @@ static void hvcs_hangup(struct tty_struct * tty)
32736
32737 spin_lock_irqsave(&hvcsd->lock, flags);
32738 /* Preserve this so that we know how many kref refs to put */
32739- temp_open_count = hvcsd->open_count;
32740+ temp_open_count = local_read(&hvcsd->open_count);
32741
32742 /*
32743 * Don't kref put inside the spinlock because the destruction
32744@@ -1273,7 +1274,7 @@ static void hvcs_hangup(struct tty_struct * tty)
32745 hvcsd->tty->driver_data = NULL;
32746 hvcsd->tty = NULL;
32747
32748- hvcsd->open_count = 0;
32749+ local_set(&hvcsd->open_count, 0);
32750
32751 /* This will drop any buffered data on the floor which is OK in a hangup
32752 * scenario. */
32753@@ -1344,7 +1345,7 @@ static int hvcs_write(struct tty_struct *tty,
32754 * the middle of a write operation? This is a crummy place to do this
32755 * but we want to keep it all in the spinlock.
32756 */
32757- if (hvcsd->open_count <= 0) {
32758+ if (local_read(&hvcsd->open_count) <= 0) {
32759 spin_unlock_irqrestore(&hvcsd->lock, flags);
32760 return -ENODEV;
32761 }
32762@@ -1418,7 +1419,7 @@ static int hvcs_write_room(struct tty_struct *tty)
32763 {
32764 struct hvcs_struct *hvcsd = tty->driver_data;
32765
32766- if (!hvcsd || hvcsd->open_count <= 0)
32767+ if (!hvcsd || local_read(&hvcsd->open_count) <= 0)
32768 return 0;
32769
32770 return HVCS_BUFF_LEN - hvcsd->chars_in_buffer;
32771diff --git a/drivers/char/ipmi/ipmi_msghandler.c b/drivers/char/ipmi/ipmi_msghandler.c
32772index ec5e3f8..02455ba 100644
32773--- a/drivers/char/ipmi/ipmi_msghandler.c
32774+++ b/drivers/char/ipmi/ipmi_msghandler.c
32775@@ -414,7 +414,7 @@ struct ipmi_smi {
32776 struct proc_dir_entry *proc_dir;
32777 char proc_dir_name[10];
32778
32779- atomic_t stats[IPMI_NUM_STATS];
32780+ atomic_unchecked_t stats[IPMI_NUM_STATS];
32781
32782 /*
32783 * run_to_completion duplicate of smb_info, smi_info
32784@@ -447,9 +447,9 @@ static DEFINE_MUTEX(smi_watchers_mutex);
32785
32786
32787 #define ipmi_inc_stat(intf, stat) \
32788- atomic_inc(&(intf)->stats[IPMI_STAT_ ## stat])
32789+ atomic_inc_unchecked(&(intf)->stats[IPMI_STAT_ ## stat])
32790 #define ipmi_get_stat(intf, stat) \
32791- ((unsigned int) atomic_read(&(intf)->stats[IPMI_STAT_ ## stat]))
32792+ ((unsigned int) atomic_read_unchecked(&(intf)->stats[IPMI_STAT_ ## stat]))
32793
32794 static int is_lan_addr(struct ipmi_addr *addr)
32795 {
32796@@ -2808,7 +2808,7 @@ int ipmi_register_smi(struct ipmi_smi_handlers *handlers,
32797 INIT_LIST_HEAD(&intf->cmd_rcvrs);
32798 init_waitqueue_head(&intf->waitq);
32799 for (i = 0; i < IPMI_NUM_STATS; i++)
32800- atomic_set(&intf->stats[i], 0);
32801+ atomic_set_unchecked(&intf->stats[i], 0);
32802
32803 intf->proc_dir = NULL;
32804
32805@@ -4160,6 +4160,8 @@ static void send_panic_events(char *str)
32806 struct ipmi_smi_msg smi_msg;
32807 struct ipmi_recv_msg recv_msg;
32808
32809+ pax_track_stack();
32810+
32811 si = (struct ipmi_system_interface_addr *) &addr;
32812 si->addr_type = IPMI_SYSTEM_INTERFACE_ADDR_TYPE;
32813 si->channel = IPMI_BMC_CHANNEL;
32814diff --git a/drivers/char/ipmi/ipmi_si_intf.c b/drivers/char/ipmi/ipmi_si_intf.c
32815index abae8c9..8021979 100644
32816--- a/drivers/char/ipmi/ipmi_si_intf.c
32817+++ b/drivers/char/ipmi/ipmi_si_intf.c
32818@@ -277,7 +277,7 @@ struct smi_info {
32819 unsigned char slave_addr;
32820
32821 /* Counters and things for the proc filesystem. */
32822- atomic_t stats[SI_NUM_STATS];
32823+ atomic_unchecked_t stats[SI_NUM_STATS];
32824
32825 struct task_struct *thread;
32826
32827@@ -285,9 +285,9 @@ struct smi_info {
32828 };
32829
32830 #define smi_inc_stat(smi, stat) \
32831- atomic_inc(&(smi)->stats[SI_STAT_ ## stat])
32832+ atomic_inc_unchecked(&(smi)->stats[SI_STAT_ ## stat])
32833 #define smi_get_stat(smi, stat) \
32834- ((unsigned int) atomic_read(&(smi)->stats[SI_STAT_ ## stat]))
32835+ ((unsigned int) atomic_read_unchecked(&(smi)->stats[SI_STAT_ ## stat]))
32836
32837 #define SI_MAX_PARMS 4
32838
32839@@ -2931,7 +2931,7 @@ static int try_smi_init(struct smi_info *new_smi)
32840 atomic_set(&new_smi->req_events, 0);
32841 new_smi->run_to_completion = 0;
32842 for (i = 0; i < SI_NUM_STATS; i++)
32843- atomic_set(&new_smi->stats[i], 0);
32844+ atomic_set_unchecked(&new_smi->stats[i], 0);
32845
32846 new_smi->interrupt_disabled = 0;
32847 atomic_set(&new_smi->stop_operation, 0);
32848diff --git a/drivers/char/istallion.c b/drivers/char/istallion.c
32849index 402838f..55e2200 100644
32850--- a/drivers/char/istallion.c
32851+++ b/drivers/char/istallion.c
32852@@ -187,7 +187,6 @@ static struct ktermios stli_deftermios = {
32853 * re-used for each stats call.
32854 */
32855 static comstats_t stli_comstats;
32856-static combrd_t stli_brdstats;
32857 static struct asystats stli_cdkstats;
32858
32859 /*****************************************************************************/
32860@@ -4058,6 +4057,7 @@ static int stli_getbrdstats(combrd_t __user *bp)
32861 {
32862 struct stlibrd *brdp;
32863 unsigned int i;
32864+ combrd_t stli_brdstats;
32865
32866 if (copy_from_user(&stli_brdstats, bp, sizeof(combrd_t)))
32867 return -EFAULT;
32868@@ -4269,6 +4269,8 @@ static int stli_getportstruct(struct stliport __user *arg)
32869 struct stliport stli_dummyport;
32870 struct stliport *portp;
32871
32872+ pax_track_stack();
32873+
32874 if (copy_from_user(&stli_dummyport, arg, sizeof(struct stliport)))
32875 return -EFAULT;
32876 portp = stli_getport(stli_dummyport.brdnr, stli_dummyport.panelnr,
32877@@ -4291,6 +4293,8 @@ static int stli_getbrdstruct(struct stlibrd __user *arg)
32878 struct stlibrd stli_dummybrd;
32879 struct stlibrd *brdp;
32880
32881+ pax_track_stack();
32882+
32883 if (copy_from_user(&stli_dummybrd, arg, sizeof(struct stlibrd)))
32884 return -EFAULT;
32885 if (stli_dummybrd.brdnr >= STL_MAXBRDS)
32886diff --git a/drivers/char/keyboard.c b/drivers/char/keyboard.c
32887index 950837c..e55a288 100644
32888--- a/drivers/char/keyboard.c
32889+++ b/drivers/char/keyboard.c
32890@@ -635,6 +635,16 @@ static void k_spec(struct vc_data *vc, unsigned char value, char up_flag)
32891 kbd->kbdmode == VC_MEDIUMRAW) &&
32892 value != KVAL(K_SAK))
32893 return; /* SAK is allowed even in raw mode */
32894+
32895+#if defined(CONFIG_GRKERNSEC_PROC) || defined(CONFIG_GRKERNSEC_PROC_MEMMAP)
32896+ {
32897+ void *func = fn_handler[value];
32898+ if (func == fn_show_state || func == fn_show_ptregs ||
32899+ func == fn_show_mem)
32900+ return;
32901+ }
32902+#endif
32903+
32904 fn_handler[value](vc);
32905 }
32906
32907@@ -1386,7 +1396,7 @@ static const struct input_device_id kbd_ids[] = {
32908 .evbit = { BIT_MASK(EV_SND) },
32909 },
32910
32911- { }, /* Terminating entry */
32912+ { 0 }, /* Terminating entry */
32913 };
32914
32915 MODULE_DEVICE_TABLE(input, kbd_ids);
32916diff --git a/drivers/char/mbcs.c b/drivers/char/mbcs.c
32917index 87c67b4..230527a 100644
32918--- a/drivers/char/mbcs.c
32919+++ b/drivers/char/mbcs.c
32920@@ -799,7 +799,7 @@ static int mbcs_remove(struct cx_dev *dev)
32921 return 0;
32922 }
32923
32924-static const struct cx_device_id __devinitdata mbcs_id_table[] = {
32925+static const struct cx_device_id __devinitconst mbcs_id_table[] = {
32926 {
32927 .part_num = MBCS_PART_NUM,
32928 .mfg_num = MBCS_MFG_NUM,
32929diff --git a/drivers/char/mem.c b/drivers/char/mem.c
32930index 1270f64..8495f49 100644
32931--- a/drivers/char/mem.c
32932+++ b/drivers/char/mem.c
32933@@ -18,6 +18,7 @@
32934 #include <linux/raw.h>
32935 #include <linux/tty.h>
32936 #include <linux/capability.h>
32937+#include <linux/security.h>
32938 #include <linux/ptrace.h>
32939 #include <linux/device.h>
32940 #include <linux/highmem.h>
32941@@ -35,6 +36,10 @@
32942 # include <linux/efi.h>
32943 #endif
32944
32945+#if defined(CONFIG_GRKERNSEC) && !defined(CONFIG_GRKERNSEC_NO_RBAC)
32946+extern struct file_operations grsec_fops;
32947+#endif
32948+
32949 static inline unsigned long size_inside_page(unsigned long start,
32950 unsigned long size)
32951 {
32952@@ -102,9 +107,13 @@ static inline int range_is_allowed(unsigned long pfn, unsigned long size)
32953
32954 while (cursor < to) {
32955 if (!devmem_is_allowed(pfn)) {
32956+#ifdef CONFIG_GRKERNSEC_KMEM
32957+ gr_handle_mem_readwrite(from, to);
32958+#else
32959 printk(KERN_INFO
32960 "Program %s tried to access /dev/mem between %Lx->%Lx.\n",
32961 current->comm, from, to);
32962+#endif
32963 return 0;
32964 }
32965 cursor += PAGE_SIZE;
32966@@ -112,6 +121,11 @@ static inline int range_is_allowed(unsigned long pfn, unsigned long size)
32967 }
32968 return 1;
32969 }
32970+#elif defined(CONFIG_GRKERNSEC_KMEM)
32971+static inline int range_is_allowed(unsigned long pfn, unsigned long size)
32972+{
32973+ return 0;
32974+}
32975 #else
32976 static inline int range_is_allowed(unsigned long pfn, unsigned long size)
32977 {
32978@@ -155,6 +169,8 @@ static ssize_t read_mem(struct file * file, char __user * buf,
32979 #endif
32980
32981 while (count > 0) {
32982+ char *temp;
32983+
32984 /*
32985 * Handle first page in case it's not aligned
32986 */
32987@@ -177,11 +193,31 @@ static ssize_t read_mem(struct file * file, char __user * buf,
32988 if (!ptr)
32989 return -EFAULT;
32990
32991- if (copy_to_user(buf, ptr, sz)) {
32992+#ifdef CONFIG_PAX_USERCOPY
32993+ temp = kmalloc(sz, GFP_KERNEL);
32994+ if (!temp) {
32995+ unxlate_dev_mem_ptr(p, ptr);
32996+ return -ENOMEM;
32997+ }
32998+ memcpy(temp, ptr, sz);
32999+#else
33000+ temp = ptr;
33001+#endif
33002+
33003+ if (copy_to_user(buf, temp, sz)) {
33004+
33005+#ifdef CONFIG_PAX_USERCOPY
33006+ kfree(temp);
33007+#endif
33008+
33009 unxlate_dev_mem_ptr(p, ptr);
33010 return -EFAULT;
33011 }
33012
33013+#ifdef CONFIG_PAX_USERCOPY
33014+ kfree(temp);
33015+#endif
33016+
33017 unxlate_dev_mem_ptr(p, ptr);
33018
33019 buf += sz;
33020@@ -419,9 +455,8 @@ static ssize_t read_kmem(struct file *file, char __user *buf,
33021 size_t count, loff_t *ppos)
33022 {
33023 unsigned long p = *ppos;
33024- ssize_t low_count, read, sz;
33025+ ssize_t low_count, read, sz, err = 0;
33026 char * kbuf; /* k-addr because vread() takes vmlist_lock rwlock */
33027- int err = 0;
33028
33029 read = 0;
33030 if (p < (unsigned long) high_memory) {
33031@@ -444,6 +479,8 @@ static ssize_t read_kmem(struct file *file, char __user *buf,
33032 }
33033 #endif
33034 while (low_count > 0) {
33035+ char *temp;
33036+
33037 sz = size_inside_page(p, low_count);
33038
33039 /*
33040@@ -453,7 +490,22 @@ static ssize_t read_kmem(struct file *file, char __user *buf,
33041 */
33042 kbuf = xlate_dev_kmem_ptr((char *)p);
33043
33044- if (copy_to_user(buf, kbuf, sz))
33045+#ifdef CONFIG_PAX_USERCOPY
33046+ temp = kmalloc(sz, GFP_KERNEL);
33047+ if (!temp)
33048+ return -ENOMEM;
33049+ memcpy(temp, kbuf, sz);
33050+#else
33051+ temp = kbuf;
33052+#endif
33053+
33054+ err = copy_to_user(buf, temp, sz);
33055+
33056+#ifdef CONFIG_PAX_USERCOPY
33057+ kfree(temp);
33058+#endif
33059+
33060+ if (err)
33061 return -EFAULT;
33062 buf += sz;
33063 p += sz;
33064@@ -889,6 +941,9 @@ static const struct memdev {
33065 #ifdef CONFIG_CRASH_DUMP
33066 [12] = { "oldmem", 0, &oldmem_fops, NULL },
33067 #endif
33068+#if defined(CONFIG_GRKERNSEC) && !defined(CONFIG_GRKERNSEC_NO_RBAC)
33069+ [13] = { "grsec",S_IRUSR | S_IWUGO, &grsec_fops, NULL },
33070+#endif
33071 };
33072
33073 static int memory_open(struct inode *inode, struct file *filp)
33074diff --git a/drivers/char/mmtimer.c b/drivers/char/mmtimer.c
33075index 918711a..4ffaf5e 100644
33076--- a/drivers/char/mmtimer.c
33077+++ b/drivers/char/mmtimer.c
33078@@ -756,7 +756,7 @@ static int sgi_timer_set(struct k_itimer *timr, int flags,
33079 return err;
33080 }
33081
33082-static struct k_clock sgi_clock = {
33083+static k_clock_no_const sgi_clock = {
33084 .res = 0,
33085 .clock_set = sgi_clock_set,
33086 .clock_get = sgi_clock_get,
33087diff --git a/drivers/char/pcmcia/ipwireless/tty.c b/drivers/char/pcmcia/ipwireless/tty.c
33088index 674b3ab..a8d1970 100644
33089--- a/drivers/char/pcmcia/ipwireless/tty.c
33090+++ b/drivers/char/pcmcia/ipwireless/tty.c
33091@@ -29,6 +29,7 @@
33092 #include <linux/tty_driver.h>
33093 #include <linux/tty_flip.h>
33094 #include <linux/uaccess.h>
33095+#include <asm/local.h>
33096
33097 #include "tty.h"
33098 #include "network.h"
33099@@ -51,7 +52,7 @@ struct ipw_tty {
33100 int tty_type;
33101 struct ipw_network *network;
33102 struct tty_struct *linux_tty;
33103- int open_count;
33104+ local_t open_count;
33105 unsigned int control_lines;
33106 struct mutex ipw_tty_mutex;
33107 int tx_bytes_queued;
33108@@ -127,10 +128,10 @@ static int ipw_open(struct tty_struct *linux_tty, struct file *filp)
33109 mutex_unlock(&tty->ipw_tty_mutex);
33110 return -ENODEV;
33111 }
33112- if (tty->open_count == 0)
33113+ if (local_read(&tty->open_count) == 0)
33114 tty->tx_bytes_queued = 0;
33115
33116- tty->open_count++;
33117+ local_inc(&tty->open_count);
33118
33119 tty->linux_tty = linux_tty;
33120 linux_tty->driver_data = tty;
33121@@ -146,9 +147,7 @@ static int ipw_open(struct tty_struct *linux_tty, struct file *filp)
33122
33123 static void do_ipw_close(struct ipw_tty *tty)
33124 {
33125- tty->open_count--;
33126-
33127- if (tty->open_count == 0) {
33128+ if (local_dec_return(&tty->open_count) == 0) {
33129 struct tty_struct *linux_tty = tty->linux_tty;
33130
33131 if (linux_tty != NULL) {
33132@@ -169,7 +168,7 @@ static void ipw_hangup(struct tty_struct *linux_tty)
33133 return;
33134
33135 mutex_lock(&tty->ipw_tty_mutex);
33136- if (tty->open_count == 0) {
33137+ if (local_read(&tty->open_count) == 0) {
33138 mutex_unlock(&tty->ipw_tty_mutex);
33139 return;
33140 }
33141@@ -198,7 +197,7 @@ void ipwireless_tty_received(struct ipw_tty *tty, unsigned char *data,
33142 return;
33143 }
33144
33145- if (!tty->open_count) {
33146+ if (!local_read(&tty->open_count)) {
33147 mutex_unlock(&tty->ipw_tty_mutex);
33148 return;
33149 }
33150@@ -240,7 +239,7 @@ static int ipw_write(struct tty_struct *linux_tty,
33151 return -ENODEV;
33152
33153 mutex_lock(&tty->ipw_tty_mutex);
33154- if (!tty->open_count) {
33155+ if (!local_read(&tty->open_count)) {
33156 mutex_unlock(&tty->ipw_tty_mutex);
33157 return -EINVAL;
33158 }
33159@@ -280,7 +279,7 @@ static int ipw_write_room(struct tty_struct *linux_tty)
33160 if (!tty)
33161 return -ENODEV;
33162
33163- if (!tty->open_count)
33164+ if (!local_read(&tty->open_count))
33165 return -EINVAL;
33166
33167 room = IPWIRELESS_TX_QUEUE_SIZE - tty->tx_bytes_queued;
33168@@ -322,7 +321,7 @@ static int ipw_chars_in_buffer(struct tty_struct *linux_tty)
33169 if (!tty)
33170 return 0;
33171
33172- if (!tty->open_count)
33173+ if (!local_read(&tty->open_count))
33174 return 0;
33175
33176 return tty->tx_bytes_queued;
33177@@ -403,7 +402,7 @@ static int ipw_tiocmget(struct tty_struct *linux_tty, struct file *file)
33178 if (!tty)
33179 return -ENODEV;
33180
33181- if (!tty->open_count)
33182+ if (!local_read(&tty->open_count))
33183 return -EINVAL;
33184
33185 return get_control_lines(tty);
33186@@ -419,7 +418,7 @@ ipw_tiocmset(struct tty_struct *linux_tty, struct file *file,
33187 if (!tty)
33188 return -ENODEV;
33189
33190- if (!tty->open_count)
33191+ if (!local_read(&tty->open_count))
33192 return -EINVAL;
33193
33194 return set_control_lines(tty, set, clear);
33195@@ -433,7 +432,7 @@ static int ipw_ioctl(struct tty_struct *linux_tty, struct file *file,
33196 if (!tty)
33197 return -ENODEV;
33198
33199- if (!tty->open_count)
33200+ if (!local_read(&tty->open_count))
33201 return -EINVAL;
33202
33203 /* FIXME: Exactly how is the tty object locked here .. */
33204@@ -591,7 +590,7 @@ void ipwireless_tty_free(struct ipw_tty *tty)
33205 against a parallel ioctl etc */
33206 mutex_lock(&ttyj->ipw_tty_mutex);
33207 }
33208- while (ttyj->open_count)
33209+ while (local_read(&ttyj->open_count))
33210 do_ipw_close(ttyj);
33211 ipwireless_disassociate_network_ttys(network,
33212 ttyj->channel_idx);
33213diff --git a/drivers/char/pty.c b/drivers/char/pty.c
33214index 62f282e..e45c45c 100644
33215--- a/drivers/char/pty.c
33216+++ b/drivers/char/pty.c
33217@@ -736,8 +736,10 @@ static void __init unix98_pty_init(void)
33218 register_sysctl_table(pty_root_table);
33219
33220 /* Now create the /dev/ptmx special device */
33221+ pax_open_kernel();
33222 tty_default_fops(&ptmx_fops);
33223- ptmx_fops.open = ptmx_open;
33224+ *(void **)&ptmx_fops.open = ptmx_open;
33225+ pax_close_kernel();
33226
33227 cdev_init(&ptmx_cdev, &ptmx_fops);
33228 if (cdev_add(&ptmx_cdev, MKDEV(TTYAUX_MAJOR, 2), 1) ||
33229diff --git a/drivers/char/random.c b/drivers/char/random.c
33230index 3a19e2d..6ed09d3 100644
33231--- a/drivers/char/random.c
33232+++ b/drivers/char/random.c
33233@@ -254,8 +254,13 @@
33234 /*
33235 * Configuration information
33236 */
33237+#ifdef CONFIG_GRKERNSEC_RANDNET
33238+#define INPUT_POOL_WORDS 512
33239+#define OUTPUT_POOL_WORDS 128
33240+#else
33241 #define INPUT_POOL_WORDS 128
33242 #define OUTPUT_POOL_WORDS 32
33243+#endif
33244 #define SEC_XFER_SIZE 512
33245
33246 /*
33247@@ -292,10 +297,17 @@ static struct poolinfo {
33248 int poolwords;
33249 int tap1, tap2, tap3, tap4, tap5;
33250 } poolinfo_table[] = {
33251+#ifdef CONFIG_GRKERNSEC_RANDNET
33252+ /* x^512 + x^411 + x^308 + x^208 +x^104 + x + 1 -- 225 */
33253+ { 512, 411, 308, 208, 104, 1 },
33254+ /* x^128 + x^103 + x^76 + x^51 + x^25 + x + 1 -- 105 */
33255+ { 128, 103, 76, 51, 25, 1 },
33256+#else
33257 /* x^128 + x^103 + x^76 + x^51 +x^25 + x + 1 -- 105 */
33258 { 128, 103, 76, 51, 25, 1 },
33259 /* x^32 + x^26 + x^20 + x^14 + x^7 + x + 1 -- 15 */
33260 { 32, 26, 20, 14, 7, 1 },
33261+#endif
33262 #if 0
33263 /* x^2048 + x^1638 + x^1231 + x^819 + x^411 + x + 1 -- 115 */
33264 { 2048, 1638, 1231, 819, 411, 1 },
33265@@ -1209,7 +1221,7 @@ EXPORT_SYMBOL(generate_random_uuid);
33266 #include <linux/sysctl.h>
33267
33268 static int min_read_thresh = 8, min_write_thresh;
33269-static int max_read_thresh = INPUT_POOL_WORDS * 32;
33270+static int max_read_thresh = OUTPUT_POOL_WORDS * 32;
33271 static int max_write_thresh = INPUT_POOL_WORDS * 32;
33272 static char sysctl_bootid[16];
33273
33274diff --git a/drivers/char/rocket.c b/drivers/char/rocket.c
33275index 0e29a23..0efc2c2 100644
33276--- a/drivers/char/rocket.c
33277+++ b/drivers/char/rocket.c
33278@@ -1266,6 +1266,8 @@ static int get_ports(struct r_port *info, struct rocket_ports __user *retports)
33279 struct rocket_ports tmp;
33280 int board;
33281
33282+ pax_track_stack();
33283+
33284 if (!retports)
33285 return -EFAULT;
33286 memset(&tmp, 0, sizeof (tmp));
33287diff --git a/drivers/char/sonypi.c b/drivers/char/sonypi.c
33288index 8c262aa..4d3b058 100644
33289--- a/drivers/char/sonypi.c
33290+++ b/drivers/char/sonypi.c
33291@@ -55,6 +55,7 @@
33292 #include <asm/uaccess.h>
33293 #include <asm/io.h>
33294 #include <asm/system.h>
33295+#include <asm/local.h>
33296
33297 #include <linux/sonypi.h>
33298
33299@@ -491,7 +492,7 @@ static struct sonypi_device {
33300 spinlock_t fifo_lock;
33301 wait_queue_head_t fifo_proc_list;
33302 struct fasync_struct *fifo_async;
33303- int open_count;
33304+ local_t open_count;
33305 int model;
33306 struct input_dev *input_jog_dev;
33307 struct input_dev *input_key_dev;
33308@@ -895,7 +896,7 @@ static int sonypi_misc_fasync(int fd, struct file *filp, int on)
33309 static int sonypi_misc_release(struct inode *inode, struct file *file)
33310 {
33311 mutex_lock(&sonypi_device.lock);
33312- sonypi_device.open_count--;
33313+ local_dec(&sonypi_device.open_count);
33314 mutex_unlock(&sonypi_device.lock);
33315 return 0;
33316 }
33317@@ -905,9 +906,9 @@ static int sonypi_misc_open(struct inode *inode, struct file *file)
33318 lock_kernel();
33319 mutex_lock(&sonypi_device.lock);
33320 /* Flush input queue on first open */
33321- if (!sonypi_device.open_count)
33322+ if (!local_read(&sonypi_device.open_count))
33323 kfifo_reset(sonypi_device.fifo);
33324- sonypi_device.open_count++;
33325+ local_inc(&sonypi_device.open_count);
33326 mutex_unlock(&sonypi_device.lock);
33327 unlock_kernel();
33328 return 0;
33329diff --git a/drivers/char/stallion.c b/drivers/char/stallion.c
33330index db6dcfa..13834cb 100644
33331--- a/drivers/char/stallion.c
33332+++ b/drivers/char/stallion.c
33333@@ -2448,6 +2448,8 @@ static int stl_getportstruct(struct stlport __user *arg)
33334 struct stlport stl_dummyport;
33335 struct stlport *portp;
33336
33337+ pax_track_stack();
33338+
33339 if (copy_from_user(&stl_dummyport, arg, sizeof(struct stlport)))
33340 return -EFAULT;
33341 portp = stl_getport(stl_dummyport.brdnr, stl_dummyport.panelnr,
33342diff --git a/drivers/char/tpm/tpm.c b/drivers/char/tpm/tpm.c
33343index a0789f6..cea3902 100644
33344--- a/drivers/char/tpm/tpm.c
33345+++ b/drivers/char/tpm/tpm.c
33346@@ -405,7 +405,7 @@ static ssize_t tpm_transmit(struct tpm_chip *chip, const char *buf,
33347 chip->vendor.req_complete_val)
33348 goto out_recv;
33349
33350- if ((status == chip->vendor.req_canceled)) {
33351+ if (status == chip->vendor.req_canceled) {
33352 dev_err(chip->dev, "Operation Canceled\n");
33353 rc = -ECANCELED;
33354 goto out;
33355@@ -824,6 +824,8 @@ ssize_t tpm_show_pubek(struct device *dev, struct device_attribute *attr,
33356
33357 struct tpm_chip *chip = dev_get_drvdata(dev);
33358
33359+ pax_track_stack();
33360+
33361 tpm_cmd.header.in = tpm_readpubek_header;
33362 err = transmit_cmd(chip, &tpm_cmd, READ_PUBEK_RESULT_SIZE,
33363 "attempting to read the PUBEK");
33364diff --git a/drivers/char/tpm/tpm_bios.c b/drivers/char/tpm/tpm_bios.c
33365index bf2170f..ce8cab9 100644
33366--- a/drivers/char/tpm/tpm_bios.c
33367+++ b/drivers/char/tpm/tpm_bios.c
33368@@ -172,7 +172,7 @@ static void *tpm_bios_measurements_start(struct seq_file *m, loff_t *pos)
33369 event = addr;
33370
33371 if ((event->event_type == 0 && event->event_size == 0) ||
33372- ((addr + sizeof(struct tcpa_event) + event->event_size) >= limit))
33373+ (event->event_size >= limit - addr - sizeof(struct tcpa_event)))
33374 return NULL;
33375
33376 return addr;
33377@@ -197,7 +197,7 @@ static void *tpm_bios_measurements_next(struct seq_file *m, void *v,
33378 return NULL;
33379
33380 if ((event->event_type == 0 && event->event_size == 0) ||
33381- ((v + sizeof(struct tcpa_event) + event->event_size) >= limit))
33382+ (event->event_size >= limit - v - sizeof(struct tcpa_event)))
33383 return NULL;
33384
33385 (*pos)++;
33386@@ -290,7 +290,8 @@ static int tpm_binary_bios_measurements_show(struct seq_file *m, void *v)
33387 int i;
33388
33389 for (i = 0; i < sizeof(struct tcpa_event) + event->event_size; i++)
33390- seq_putc(m, data[i]);
33391+ if (!seq_putc(m, data[i]))
33392+ return -EFAULT;
33393
33394 return 0;
33395 }
33396@@ -409,8 +410,13 @@ static int read_log(struct tpm_bios_log *log)
33397 log->bios_event_log_end = log->bios_event_log + len;
33398
33399 virt = acpi_os_map_memory(start, len);
33400+ if (!virt) {
33401+ kfree(log->bios_event_log);
33402+ log->bios_event_log = NULL;
33403+ return -EFAULT;
33404+ }
33405
33406- memcpy(log->bios_event_log, virt, len);
33407+ memcpy(log->bios_event_log, (const char __force_kernel *)virt, len);
33408
33409 acpi_os_unmap_memory(virt, len);
33410 return 0;
33411diff --git a/drivers/char/tty_io.c b/drivers/char/tty_io.c
33412index 123cedf..6664cb4 100644
33413--- a/drivers/char/tty_io.c
33414+++ b/drivers/char/tty_io.c
33415@@ -146,7 +146,7 @@ static int tty_open(struct inode *, struct file *);
33416 static int tty_release(struct inode *, struct file *);
33417 long tty_ioctl(struct file *file, unsigned int cmd, unsigned long arg);
33418 #ifdef CONFIG_COMPAT
33419-static long tty_compat_ioctl(struct file *file, unsigned int cmd,
33420+long tty_compat_ioctl(struct file *file, unsigned int cmd,
33421 unsigned long arg);
33422 #else
33423 #define tty_compat_ioctl NULL
33424@@ -1774,6 +1774,7 @@ got_driver:
33425
33426 if (IS_ERR(tty)) {
33427 mutex_unlock(&tty_mutex);
33428+ tty_driver_kref_put(driver);
33429 return PTR_ERR(tty);
33430 }
33431 }
33432@@ -2603,8 +2604,10 @@ long tty_ioctl(struct file *file, unsigned int cmd, unsigned long arg)
33433 return retval;
33434 }
33435
33436+EXPORT_SYMBOL(tty_ioctl);
33437+
33438 #ifdef CONFIG_COMPAT
33439-static long tty_compat_ioctl(struct file *file, unsigned int cmd,
33440+long tty_compat_ioctl(struct file *file, unsigned int cmd,
33441 unsigned long arg)
33442 {
33443 struct inode *inode = file->f_dentry->d_inode;
33444@@ -2628,6 +2631,8 @@ static long tty_compat_ioctl(struct file *file, unsigned int cmd,
33445
33446 return retval;
33447 }
33448+
33449+EXPORT_SYMBOL(tty_compat_ioctl);
33450 #endif
33451
33452 /*
33453@@ -3073,7 +3078,7 @@ EXPORT_SYMBOL_GPL(get_current_tty);
33454
33455 void tty_default_fops(struct file_operations *fops)
33456 {
33457- *fops = tty_fops;
33458+ memcpy((void *)fops, &tty_fops, sizeof(tty_fops));
33459 }
33460
33461 /*
33462diff --git a/drivers/char/tty_ldisc.c b/drivers/char/tty_ldisc.c
33463index d814a3d..b55b9c9 100644
33464--- a/drivers/char/tty_ldisc.c
33465+++ b/drivers/char/tty_ldisc.c
33466@@ -74,7 +74,7 @@ static void put_ldisc(struct tty_ldisc *ld)
33467 if (atomic_dec_and_lock(&ld->users, &tty_ldisc_lock)) {
33468 struct tty_ldisc_ops *ldo = ld->ops;
33469
33470- ldo->refcount--;
33471+ atomic_dec(&ldo->refcount);
33472 module_put(ldo->owner);
33473 spin_unlock_irqrestore(&tty_ldisc_lock, flags);
33474
33475@@ -109,7 +109,7 @@ int tty_register_ldisc(int disc, struct tty_ldisc_ops *new_ldisc)
33476 spin_lock_irqsave(&tty_ldisc_lock, flags);
33477 tty_ldiscs[disc] = new_ldisc;
33478 new_ldisc->num = disc;
33479- new_ldisc->refcount = 0;
33480+ atomic_set(&new_ldisc->refcount, 0);
33481 spin_unlock_irqrestore(&tty_ldisc_lock, flags);
33482
33483 return ret;
33484@@ -137,7 +137,7 @@ int tty_unregister_ldisc(int disc)
33485 return -EINVAL;
33486
33487 spin_lock_irqsave(&tty_ldisc_lock, flags);
33488- if (tty_ldiscs[disc]->refcount)
33489+ if (atomic_read(&tty_ldiscs[disc]->refcount))
33490 ret = -EBUSY;
33491 else
33492 tty_ldiscs[disc] = NULL;
33493@@ -158,7 +158,7 @@ static struct tty_ldisc_ops *get_ldops(int disc)
33494 if (ldops) {
33495 ret = ERR_PTR(-EAGAIN);
33496 if (try_module_get(ldops->owner)) {
33497- ldops->refcount++;
33498+ atomic_inc(&ldops->refcount);
33499 ret = ldops;
33500 }
33501 }
33502@@ -171,7 +171,7 @@ static void put_ldops(struct tty_ldisc_ops *ldops)
33503 unsigned long flags;
33504
33505 spin_lock_irqsave(&tty_ldisc_lock, flags);
33506- ldops->refcount--;
33507+ atomic_dec(&ldops->refcount);
33508 module_put(ldops->owner);
33509 spin_unlock_irqrestore(&tty_ldisc_lock, flags);
33510 }
33511diff --git a/drivers/char/virtio_console.c b/drivers/char/virtio_console.c
33512index a035ae3..c27fe2c 100644
33513--- a/drivers/char/virtio_console.c
33514+++ b/drivers/char/virtio_console.c
33515@@ -133,7 +133,9 @@ static int get_chars(u32 vtermno, char *buf, int count)
33516 * virtqueue, so we let the drivers do some boutique early-output thing. */
33517 int __init virtio_cons_early_init(int (*put_chars)(u32, const char *, int))
33518 {
33519- virtio_cons.put_chars = put_chars;
33520+ pax_open_kernel();
33521+ *(void **)&virtio_cons.put_chars = put_chars;
33522+ pax_close_kernel();
33523 return hvc_instantiate(0, 0, &virtio_cons);
33524 }
33525
33526@@ -213,11 +215,13 @@ static int __devinit virtcons_probe(struct virtio_device *dev)
33527 out_vq = vqs[1];
33528
33529 /* Start using the new console output. */
33530- virtio_cons.get_chars = get_chars;
33531- virtio_cons.put_chars = put_chars;
33532- virtio_cons.notifier_add = notifier_add_vio;
33533- virtio_cons.notifier_del = notifier_del_vio;
33534- virtio_cons.notifier_hangup = notifier_del_vio;
33535+ pax_open_kernel();
33536+ *(void **)&virtio_cons.get_chars = get_chars;
33537+ *(void **)&virtio_cons.put_chars = put_chars;
33538+ *(void **)&virtio_cons.notifier_add = notifier_add_vio;
33539+ *(void **)&virtio_cons.notifier_del = notifier_del_vio;
33540+ *(void **)&virtio_cons.notifier_hangup = notifier_del_vio;
33541+ pax_close_kernel();
33542
33543 /* The first argument of hvc_alloc() is the virtual console number, so
33544 * we use zero. The second argument is the parameter for the
33545diff --git a/drivers/char/vt.c b/drivers/char/vt.c
33546index 0c80c68..53d59c1 100644
33547--- a/drivers/char/vt.c
33548+++ b/drivers/char/vt.c
33549@@ -243,7 +243,7 @@ EXPORT_SYMBOL_GPL(unregister_vt_notifier);
33550
33551 static void notify_write(struct vc_data *vc, unsigned int unicode)
33552 {
33553- struct vt_notifier_param param = { .vc = vc, unicode = unicode };
33554+ struct vt_notifier_param param = { .vc = vc, .c = unicode };
33555 atomic_notifier_call_chain(&vt_notifier_list, VT_WRITE, &param);
33556 }
33557
33558diff --git a/drivers/char/vt_ioctl.c b/drivers/char/vt_ioctl.c
33559index 6351a26..999af95 100644
33560--- a/drivers/char/vt_ioctl.c
33561+++ b/drivers/char/vt_ioctl.c
33562@@ -210,9 +210,6 @@ do_kdsk_ioctl(int cmd, struct kbentry __user *user_kbe, int perm, struct kbd_str
33563 if (copy_from_user(&tmp, user_kbe, sizeof(struct kbentry)))
33564 return -EFAULT;
33565
33566- if (!capable(CAP_SYS_TTY_CONFIG))
33567- perm = 0;
33568-
33569 switch (cmd) {
33570 case KDGKBENT:
33571 key_map = key_maps[s];
33572@@ -224,8 +221,12 @@ do_kdsk_ioctl(int cmd, struct kbentry __user *user_kbe, int perm, struct kbd_str
33573 val = (i ? K_HOLE : K_NOSUCHMAP);
33574 return put_user(val, &user_kbe->kb_value);
33575 case KDSKBENT:
33576+ if (!capable(CAP_SYS_TTY_CONFIG))
33577+ perm = 0;
33578+
33579 if (!perm)
33580 return -EPERM;
33581+
33582 if (!i && v == K_NOSUCHMAP) {
33583 /* deallocate map */
33584 key_map = key_maps[s];
33585@@ -325,9 +326,6 @@ do_kdgkb_ioctl(int cmd, struct kbsentry __user *user_kdgkb, int perm)
33586 int i, j, k;
33587 int ret;
33588
33589- if (!capable(CAP_SYS_TTY_CONFIG))
33590- perm = 0;
33591-
33592 kbs = kmalloc(sizeof(*kbs), GFP_KERNEL);
33593 if (!kbs) {
33594 ret = -ENOMEM;
33595@@ -361,6 +359,9 @@ do_kdgkb_ioctl(int cmd, struct kbsentry __user *user_kdgkb, int perm)
33596 kfree(kbs);
33597 return ((p && *p) ? -EOVERFLOW : 0);
33598 case KDSKBSENT:
33599+ if (!capable(CAP_SYS_TTY_CONFIG))
33600+ perm = 0;
33601+
33602 if (!perm) {
33603 ret = -EPERM;
33604 goto reterr;
33605diff --git a/drivers/cpufreq/cpufreq.c b/drivers/cpufreq/cpufreq.c
33606index c7ae026..1769c1d 100644
33607--- a/drivers/cpufreq/cpufreq.c
33608+++ b/drivers/cpufreq/cpufreq.c
33609@@ -750,7 +750,7 @@ static void cpufreq_sysfs_release(struct kobject *kobj)
33610 complete(&policy->kobj_unregister);
33611 }
33612
33613-static struct sysfs_ops sysfs_ops = {
33614+static const struct sysfs_ops sysfs_ops = {
33615 .show = show,
33616 .store = store,
33617 };
33618diff --git a/drivers/cpuidle/sysfs.c b/drivers/cpuidle/sysfs.c
33619index 97b0038..2056670 100644
33620--- a/drivers/cpuidle/sysfs.c
33621+++ b/drivers/cpuidle/sysfs.c
33622@@ -191,7 +191,7 @@ static ssize_t cpuidle_store(struct kobject * kobj, struct attribute * attr,
33623 return ret;
33624 }
33625
33626-static struct sysfs_ops cpuidle_sysfs_ops = {
33627+static const struct sysfs_ops cpuidle_sysfs_ops = {
33628 .show = cpuidle_show,
33629 .store = cpuidle_store,
33630 };
33631@@ -277,7 +277,7 @@ static ssize_t cpuidle_state_show(struct kobject * kobj,
33632 return ret;
33633 }
33634
33635-static struct sysfs_ops cpuidle_state_sysfs_ops = {
33636+static const struct sysfs_ops cpuidle_state_sysfs_ops = {
33637 .show = cpuidle_state_show,
33638 };
33639
33640@@ -294,7 +294,7 @@ static struct kobj_type ktype_state_cpuidle = {
33641 .release = cpuidle_state_sysfs_release,
33642 };
33643
33644-static void inline cpuidle_free_state_kobj(struct cpuidle_device *device, int i)
33645+static inline void cpuidle_free_state_kobj(struct cpuidle_device *device, int i)
33646 {
33647 kobject_put(&device->kobjs[i]->kobj);
33648 wait_for_completion(&device->kobjs[i]->kobj_unregister);
33649diff --git a/drivers/crypto/hifn_795x.c b/drivers/crypto/hifn_795x.c
33650index 5f753fc..0377ae9 100644
33651--- a/drivers/crypto/hifn_795x.c
33652+++ b/drivers/crypto/hifn_795x.c
33653@@ -1655,6 +1655,8 @@ static int hifn_test(struct hifn_device *dev, int encdec, u8 snum)
33654 0xCA, 0x34, 0x2B, 0x2E};
33655 struct scatterlist sg;
33656
33657+ pax_track_stack();
33658+
33659 memset(src, 0, sizeof(src));
33660 memset(ctx.key, 0, sizeof(ctx.key));
33661
33662diff --git a/drivers/crypto/padlock-aes.c b/drivers/crypto/padlock-aes.c
33663index 71e6482..de8d96c 100644
33664--- a/drivers/crypto/padlock-aes.c
33665+++ b/drivers/crypto/padlock-aes.c
33666@@ -108,6 +108,8 @@ static int aes_set_key(struct crypto_tfm *tfm, const u8 *in_key,
33667 struct crypto_aes_ctx gen_aes;
33668 int cpu;
33669
33670+ pax_track_stack();
33671+
33672 if (key_len % 8) {
33673 *flags |= CRYPTO_TFM_RES_BAD_KEY_LEN;
33674 return -EINVAL;
33675diff --git a/drivers/dma/ioat/dma.c b/drivers/dma/ioat/dma.c
33676index dcc4ab7..cc834bb 100644
33677--- a/drivers/dma/ioat/dma.c
33678+++ b/drivers/dma/ioat/dma.c
33679@@ -1146,7 +1146,7 @@ ioat_attr_show(struct kobject *kobj, struct attribute *attr, char *page)
33680 return entry->show(&chan->common, page);
33681 }
33682
33683-struct sysfs_ops ioat_sysfs_ops = {
33684+const struct sysfs_ops ioat_sysfs_ops = {
33685 .show = ioat_attr_show,
33686 };
33687
33688diff --git a/drivers/dma/ioat/dma.h b/drivers/dma/ioat/dma.h
33689index bbc3e78..f2db62c 100644
33690--- a/drivers/dma/ioat/dma.h
33691+++ b/drivers/dma/ioat/dma.h
33692@@ -347,7 +347,7 @@ bool ioat_cleanup_preamble(struct ioat_chan_common *chan,
33693 unsigned long *phys_complete);
33694 void ioat_kobject_add(struct ioatdma_device *device, struct kobj_type *type);
33695 void ioat_kobject_del(struct ioatdma_device *device);
33696-extern struct sysfs_ops ioat_sysfs_ops;
33697+extern const struct sysfs_ops ioat_sysfs_ops;
33698 extern struct ioat_sysfs_entry ioat_version_attr;
33699 extern struct ioat_sysfs_entry ioat_cap_attr;
33700 #endif /* IOATDMA_H */
33701diff --git a/drivers/dma/ioat/dma_v3.c b/drivers/dma/ioat/dma_v3.c
33702index 9908c9e..3ceb0e5 100644
33703--- a/drivers/dma/ioat/dma_v3.c
33704+++ b/drivers/dma/ioat/dma_v3.c
33705@@ -71,10 +71,10 @@
33706 /* provide a lookup table for setting the source address in the base or
33707 * extended descriptor of an xor or pq descriptor
33708 */
33709-static const u8 xor_idx_to_desc __read_mostly = 0xd0;
33710-static const u8 xor_idx_to_field[] __read_mostly = { 1, 4, 5, 6, 7, 0, 1, 2 };
33711-static const u8 pq_idx_to_desc __read_mostly = 0xf8;
33712-static const u8 pq_idx_to_field[] __read_mostly = { 1, 4, 5, 0, 1, 2, 4, 5 };
33713+static const u8 xor_idx_to_desc = 0xd0;
33714+static const u8 xor_idx_to_field[] = { 1, 4, 5, 6, 7, 0, 1, 2 };
33715+static const u8 pq_idx_to_desc = 0xf8;
33716+static const u8 pq_idx_to_field[] = { 1, 4, 5, 0, 1, 2, 4, 5 };
33717
33718 static dma_addr_t xor_get_src(struct ioat_raw_descriptor *descs[2], int idx)
33719 {
33720diff --git a/drivers/edac/amd64_edac.c b/drivers/edac/amd64_edac.c
33721index 85c464a..afd1e73 100644
33722--- a/drivers/edac/amd64_edac.c
33723+++ b/drivers/edac/amd64_edac.c
33724@@ -3099,7 +3099,7 @@ static void __devexit amd64_remove_one_instance(struct pci_dev *pdev)
33725 * PCI core identifies what devices are on a system during boot, and then
33726 * inquiry this table to see if this driver is for a given device found.
33727 */
33728-static const struct pci_device_id amd64_pci_table[] __devinitdata = {
33729+static const struct pci_device_id amd64_pci_table[] __devinitconst = {
33730 {
33731 .vendor = PCI_VENDOR_ID_AMD,
33732 .device = PCI_DEVICE_ID_AMD_K8_NB_MEMCTL,
33733diff --git a/drivers/edac/amd76x_edac.c b/drivers/edac/amd76x_edac.c
33734index 2b95f1a..4f52793 100644
33735--- a/drivers/edac/amd76x_edac.c
33736+++ b/drivers/edac/amd76x_edac.c
33737@@ -322,7 +322,7 @@ static void __devexit amd76x_remove_one(struct pci_dev *pdev)
33738 edac_mc_free(mci);
33739 }
33740
33741-static const struct pci_device_id amd76x_pci_tbl[] __devinitdata = {
33742+static const struct pci_device_id amd76x_pci_tbl[] __devinitconst = {
33743 {
33744 PCI_VEND_DEV(AMD, FE_GATE_700C), PCI_ANY_ID, PCI_ANY_ID, 0, 0,
33745 AMD762},
33746diff --git a/drivers/edac/e752x_edac.c b/drivers/edac/e752x_edac.c
33747index d205d49..74c9672 100644
33748--- a/drivers/edac/e752x_edac.c
33749+++ b/drivers/edac/e752x_edac.c
33750@@ -1282,7 +1282,7 @@ static void __devexit e752x_remove_one(struct pci_dev *pdev)
33751 edac_mc_free(mci);
33752 }
33753
33754-static const struct pci_device_id e752x_pci_tbl[] __devinitdata = {
33755+static const struct pci_device_id e752x_pci_tbl[] __devinitconst = {
33756 {
33757 PCI_VEND_DEV(INTEL, 7520_0), PCI_ANY_ID, PCI_ANY_ID, 0, 0,
33758 E7520},
33759diff --git a/drivers/edac/e7xxx_edac.c b/drivers/edac/e7xxx_edac.c
33760index c7d11cc..c59c1ca 100644
33761--- a/drivers/edac/e7xxx_edac.c
33762+++ b/drivers/edac/e7xxx_edac.c
33763@@ -526,7 +526,7 @@ static void __devexit e7xxx_remove_one(struct pci_dev *pdev)
33764 edac_mc_free(mci);
33765 }
33766
33767-static const struct pci_device_id e7xxx_pci_tbl[] __devinitdata = {
33768+static const struct pci_device_id e7xxx_pci_tbl[] __devinitconst = {
33769 {
33770 PCI_VEND_DEV(INTEL, 7205_0), PCI_ANY_ID, PCI_ANY_ID, 0, 0,
33771 E7205},
33772diff --git a/drivers/edac/edac_device_sysfs.c b/drivers/edac/edac_device_sysfs.c
33773index 5376457..5fdedbc 100644
33774--- a/drivers/edac/edac_device_sysfs.c
33775+++ b/drivers/edac/edac_device_sysfs.c
33776@@ -137,7 +137,7 @@ static ssize_t edac_dev_ctl_info_store(struct kobject *kobj,
33777 }
33778
33779 /* edac_dev file operations for an 'ctl_info' */
33780-static struct sysfs_ops device_ctl_info_ops = {
33781+static const struct sysfs_ops device_ctl_info_ops = {
33782 .show = edac_dev_ctl_info_show,
33783 .store = edac_dev_ctl_info_store
33784 };
33785@@ -373,7 +373,7 @@ static ssize_t edac_dev_instance_store(struct kobject *kobj,
33786 }
33787
33788 /* edac_dev file operations for an 'instance' */
33789-static struct sysfs_ops device_instance_ops = {
33790+static const struct sysfs_ops device_instance_ops = {
33791 .show = edac_dev_instance_show,
33792 .store = edac_dev_instance_store
33793 };
33794@@ -476,7 +476,7 @@ static ssize_t edac_dev_block_store(struct kobject *kobj,
33795 }
33796
33797 /* edac_dev file operations for a 'block' */
33798-static struct sysfs_ops device_block_ops = {
33799+static const struct sysfs_ops device_block_ops = {
33800 .show = edac_dev_block_show,
33801 .store = edac_dev_block_store
33802 };
33803diff --git a/drivers/edac/edac_mc_sysfs.c b/drivers/edac/edac_mc_sysfs.c
33804index e1d4ce0..88840e9 100644
33805--- a/drivers/edac/edac_mc_sysfs.c
33806+++ b/drivers/edac/edac_mc_sysfs.c
33807@@ -245,7 +245,7 @@ static ssize_t csrowdev_store(struct kobject *kobj, struct attribute *attr,
33808 return -EIO;
33809 }
33810
33811-static struct sysfs_ops csrowfs_ops = {
33812+static const struct sysfs_ops csrowfs_ops = {
33813 .show = csrowdev_show,
33814 .store = csrowdev_store
33815 };
33816@@ -575,7 +575,7 @@ static ssize_t mcidev_store(struct kobject *kobj, struct attribute *attr,
33817 }
33818
33819 /* Intermediate show/store table */
33820-static struct sysfs_ops mci_ops = {
33821+static const struct sysfs_ops mci_ops = {
33822 .show = mcidev_show,
33823 .store = mcidev_store
33824 };
33825diff --git a/drivers/edac/edac_pci_sysfs.c b/drivers/edac/edac_pci_sysfs.c
33826index 422728c..d8d9c88 100644
33827--- a/drivers/edac/edac_pci_sysfs.c
33828+++ b/drivers/edac/edac_pci_sysfs.c
33829@@ -25,8 +25,8 @@ static int edac_pci_log_pe = 1; /* log PCI parity errors */
33830 static int edac_pci_log_npe = 1; /* log PCI non-parity error errors */
33831 static int edac_pci_poll_msec = 1000; /* one second workq period */
33832
33833-static atomic_t pci_parity_count = ATOMIC_INIT(0);
33834-static atomic_t pci_nonparity_count = ATOMIC_INIT(0);
33835+static atomic_unchecked_t pci_parity_count = ATOMIC_INIT(0);
33836+static atomic_unchecked_t pci_nonparity_count = ATOMIC_INIT(0);
33837
33838 static struct kobject *edac_pci_top_main_kobj;
33839 static atomic_t edac_pci_sysfs_refcount = ATOMIC_INIT(0);
33840@@ -121,7 +121,7 @@ static ssize_t edac_pci_instance_store(struct kobject *kobj,
33841 }
33842
33843 /* fs_ops table */
33844-static struct sysfs_ops pci_instance_ops = {
33845+static const struct sysfs_ops pci_instance_ops = {
33846 .show = edac_pci_instance_show,
33847 .store = edac_pci_instance_store
33848 };
33849@@ -261,7 +261,7 @@ static ssize_t edac_pci_dev_store(struct kobject *kobj,
33850 return -EIO;
33851 }
33852
33853-static struct sysfs_ops edac_pci_sysfs_ops = {
33854+static const struct sysfs_ops edac_pci_sysfs_ops = {
33855 .show = edac_pci_dev_show,
33856 .store = edac_pci_dev_store
33857 };
33858@@ -579,7 +579,7 @@ static void edac_pci_dev_parity_test(struct pci_dev *dev)
33859 edac_printk(KERN_CRIT, EDAC_PCI,
33860 "Signaled System Error on %s\n",
33861 pci_name(dev));
33862- atomic_inc(&pci_nonparity_count);
33863+ atomic_inc_unchecked(&pci_nonparity_count);
33864 }
33865
33866 if (status & (PCI_STATUS_PARITY)) {
33867@@ -587,7 +587,7 @@ static void edac_pci_dev_parity_test(struct pci_dev *dev)
33868 "Master Data Parity Error on %s\n",
33869 pci_name(dev));
33870
33871- atomic_inc(&pci_parity_count);
33872+ atomic_inc_unchecked(&pci_parity_count);
33873 }
33874
33875 if (status & (PCI_STATUS_DETECTED_PARITY)) {
33876@@ -595,7 +595,7 @@ static void edac_pci_dev_parity_test(struct pci_dev *dev)
33877 "Detected Parity Error on %s\n",
33878 pci_name(dev));
33879
33880- atomic_inc(&pci_parity_count);
33881+ atomic_inc_unchecked(&pci_parity_count);
33882 }
33883 }
33884
33885@@ -616,7 +616,7 @@ static void edac_pci_dev_parity_test(struct pci_dev *dev)
33886 edac_printk(KERN_CRIT, EDAC_PCI, "Bridge "
33887 "Signaled System Error on %s\n",
33888 pci_name(dev));
33889- atomic_inc(&pci_nonparity_count);
33890+ atomic_inc_unchecked(&pci_nonparity_count);
33891 }
33892
33893 if (status & (PCI_STATUS_PARITY)) {
33894@@ -624,7 +624,7 @@ static void edac_pci_dev_parity_test(struct pci_dev *dev)
33895 "Master Data Parity Error on "
33896 "%s\n", pci_name(dev));
33897
33898- atomic_inc(&pci_parity_count);
33899+ atomic_inc_unchecked(&pci_parity_count);
33900 }
33901
33902 if (status & (PCI_STATUS_DETECTED_PARITY)) {
33903@@ -632,7 +632,7 @@ static void edac_pci_dev_parity_test(struct pci_dev *dev)
33904 "Detected Parity Error on %s\n",
33905 pci_name(dev));
33906
33907- atomic_inc(&pci_parity_count);
33908+ atomic_inc_unchecked(&pci_parity_count);
33909 }
33910 }
33911 }
33912@@ -674,7 +674,7 @@ void edac_pci_do_parity_check(void)
33913 if (!check_pci_errors)
33914 return;
33915
33916- before_count = atomic_read(&pci_parity_count);
33917+ before_count = atomic_read_unchecked(&pci_parity_count);
33918
33919 /* scan all PCI devices looking for a Parity Error on devices and
33920 * bridges.
33921@@ -686,7 +686,7 @@ void edac_pci_do_parity_check(void)
33922 /* Only if operator has selected panic on PCI Error */
33923 if (edac_pci_get_panic_on_pe()) {
33924 /* If the count is different 'after' from 'before' */
33925- if (before_count != atomic_read(&pci_parity_count))
33926+ if (before_count != atomic_read_unchecked(&pci_parity_count))
33927 panic("EDAC: PCI Parity Error");
33928 }
33929 }
33930diff --git a/drivers/edac/i3000_edac.c b/drivers/edac/i3000_edac.c
33931index 6c9a0f2..9c1cf7e 100644
33932--- a/drivers/edac/i3000_edac.c
33933+++ b/drivers/edac/i3000_edac.c
33934@@ -471,7 +471,7 @@ static void __devexit i3000_remove_one(struct pci_dev *pdev)
33935 edac_mc_free(mci);
33936 }
33937
33938-static const struct pci_device_id i3000_pci_tbl[] __devinitdata = {
33939+static const struct pci_device_id i3000_pci_tbl[] __devinitconst = {
33940 {
33941 PCI_VEND_DEV(INTEL, 3000_HB), PCI_ANY_ID, PCI_ANY_ID, 0, 0,
33942 I3000},
33943diff --git a/drivers/edac/i3200_edac.c b/drivers/edac/i3200_edac.c
33944index fde4db9..fe108f9 100644
33945--- a/drivers/edac/i3200_edac.c
33946+++ b/drivers/edac/i3200_edac.c
33947@@ -444,7 +444,7 @@ static void __devexit i3200_remove_one(struct pci_dev *pdev)
33948 edac_mc_free(mci);
33949 }
33950
33951-static const struct pci_device_id i3200_pci_tbl[] __devinitdata = {
33952+static const struct pci_device_id i3200_pci_tbl[] __devinitconst = {
33953 {
33954 PCI_VEND_DEV(INTEL, 3200_HB), PCI_ANY_ID, PCI_ANY_ID, 0, 0,
33955 I3200},
33956diff --git a/drivers/edac/i5000_edac.c b/drivers/edac/i5000_edac.c
33957index adc10a2..57d4ccf 100644
33958--- a/drivers/edac/i5000_edac.c
33959+++ b/drivers/edac/i5000_edac.c
33960@@ -1516,7 +1516,7 @@ static void __devexit i5000_remove_one(struct pci_dev *pdev)
33961 *
33962 * The "E500P" device is the first device supported.
33963 */
33964-static const struct pci_device_id i5000_pci_tbl[] __devinitdata = {
33965+static const struct pci_device_id i5000_pci_tbl[] __devinitconst = {
33966 {PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_I5000_DEV16),
33967 .driver_data = I5000P},
33968
33969diff --git a/drivers/edac/i5100_edac.c b/drivers/edac/i5100_edac.c
33970index 22db05a..b2b5503 100644
33971--- a/drivers/edac/i5100_edac.c
33972+++ b/drivers/edac/i5100_edac.c
33973@@ -944,7 +944,7 @@ static void __devexit i5100_remove_one(struct pci_dev *pdev)
33974 edac_mc_free(mci);
33975 }
33976
33977-static const struct pci_device_id i5100_pci_tbl[] __devinitdata = {
33978+static const struct pci_device_id i5100_pci_tbl[] __devinitconst = {
33979 /* Device 16, Function 0, Channel 0 Memory Map, Error Flag/Mask, ... */
33980 { PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_5100_16) },
33981 { 0, }
33982diff --git a/drivers/edac/i5400_edac.c b/drivers/edac/i5400_edac.c
33983index f99d106..f050710 100644
33984--- a/drivers/edac/i5400_edac.c
33985+++ b/drivers/edac/i5400_edac.c
33986@@ -1383,7 +1383,7 @@ static void __devexit i5400_remove_one(struct pci_dev *pdev)
33987 *
33988 * The "E500P" device is the first device supported.
33989 */
33990-static const struct pci_device_id i5400_pci_tbl[] __devinitdata = {
33991+static const struct pci_device_id i5400_pci_tbl[] __devinitconst = {
33992 {PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_5400_ERR)},
33993 {0,} /* 0 terminated list. */
33994 };
33995diff --git a/drivers/edac/i82443bxgx_edac.c b/drivers/edac/i82443bxgx_edac.c
33996index 577760a..9ce16ce 100644
33997--- a/drivers/edac/i82443bxgx_edac.c
33998+++ b/drivers/edac/i82443bxgx_edac.c
33999@@ -381,7 +381,7 @@ static void __devexit i82443bxgx_edacmc_remove_one(struct pci_dev *pdev)
34000
34001 EXPORT_SYMBOL_GPL(i82443bxgx_edacmc_remove_one);
34002
34003-static const struct pci_device_id i82443bxgx_pci_tbl[] __devinitdata = {
34004+static const struct pci_device_id i82443bxgx_pci_tbl[] __devinitconst = {
34005 {PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_82443BX_0)},
34006 {PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_82443BX_2)},
34007 {PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_82443GX_0)},
34008diff --git a/drivers/edac/i82860_edac.c b/drivers/edac/i82860_edac.c
34009index c0088ba..64a7b98 100644
34010--- a/drivers/edac/i82860_edac.c
34011+++ b/drivers/edac/i82860_edac.c
34012@@ -271,7 +271,7 @@ static void __devexit i82860_remove_one(struct pci_dev *pdev)
34013 edac_mc_free(mci);
34014 }
34015
34016-static const struct pci_device_id i82860_pci_tbl[] __devinitdata = {
34017+static const struct pci_device_id i82860_pci_tbl[] __devinitconst = {
34018 {
34019 PCI_VEND_DEV(INTEL, 82860_0), PCI_ANY_ID, PCI_ANY_ID, 0, 0,
34020 I82860},
34021diff --git a/drivers/edac/i82875p_edac.c b/drivers/edac/i82875p_edac.c
34022index b2d83b9..a34357b 100644
34023--- a/drivers/edac/i82875p_edac.c
34024+++ b/drivers/edac/i82875p_edac.c
34025@@ -512,7 +512,7 @@ static void __devexit i82875p_remove_one(struct pci_dev *pdev)
34026 edac_mc_free(mci);
34027 }
34028
34029-static const struct pci_device_id i82875p_pci_tbl[] __devinitdata = {
34030+static const struct pci_device_id i82875p_pci_tbl[] __devinitconst = {
34031 {
34032 PCI_VEND_DEV(INTEL, 82875_0), PCI_ANY_ID, PCI_ANY_ID, 0, 0,
34033 I82875P},
34034diff --git a/drivers/edac/i82975x_edac.c b/drivers/edac/i82975x_edac.c
34035index 2eed3ea..87bbbd1 100644
34036--- a/drivers/edac/i82975x_edac.c
34037+++ b/drivers/edac/i82975x_edac.c
34038@@ -586,7 +586,7 @@ static void __devexit i82975x_remove_one(struct pci_dev *pdev)
34039 edac_mc_free(mci);
34040 }
34041
34042-static const struct pci_device_id i82975x_pci_tbl[] __devinitdata = {
34043+static const struct pci_device_id i82975x_pci_tbl[] __devinitconst = {
34044 {
34045 PCI_VEND_DEV(INTEL, 82975_0), PCI_ANY_ID, PCI_ANY_ID, 0, 0,
34046 I82975X
34047diff --git a/drivers/edac/r82600_edac.c b/drivers/edac/r82600_edac.c
34048index 9900675..78ac2b6 100644
34049--- a/drivers/edac/r82600_edac.c
34050+++ b/drivers/edac/r82600_edac.c
34051@@ -374,7 +374,7 @@ static void __devexit r82600_remove_one(struct pci_dev *pdev)
34052 edac_mc_free(mci);
34053 }
34054
34055-static const struct pci_device_id r82600_pci_tbl[] __devinitdata = {
34056+static const struct pci_device_id r82600_pci_tbl[] __devinitconst = {
34057 {
34058 PCI_DEVICE(PCI_VENDOR_ID_RADISYS, R82600_BRIDGE_ID)
34059 },
34060diff --git a/drivers/edac/x38_edac.c b/drivers/edac/x38_edac.c
34061index d4ec605..4cfec4e 100644
34062--- a/drivers/edac/x38_edac.c
34063+++ b/drivers/edac/x38_edac.c
34064@@ -441,7 +441,7 @@ static void __devexit x38_remove_one(struct pci_dev *pdev)
34065 edac_mc_free(mci);
34066 }
34067
34068-static const struct pci_device_id x38_pci_tbl[] __devinitdata = {
34069+static const struct pci_device_id x38_pci_tbl[] __devinitconst = {
34070 {
34071 PCI_VEND_DEV(INTEL, X38_HB), PCI_ANY_ID, PCI_ANY_ID, 0, 0,
34072 X38},
34073diff --git a/drivers/firewire/core-card.c b/drivers/firewire/core-card.c
34074index 3fc2ceb..daf098f 100644
34075--- a/drivers/firewire/core-card.c
34076+++ b/drivers/firewire/core-card.c
34077@@ -558,7 +558,7 @@ void fw_card_release(struct kref *kref)
34078
34079 void fw_core_remove_card(struct fw_card *card)
34080 {
34081- struct fw_card_driver dummy_driver = dummy_driver_template;
34082+ fw_card_driver_no_const dummy_driver = dummy_driver_template;
34083
34084 card->driver->update_phy_reg(card, 4,
34085 PHY_LINK_ACTIVE | PHY_CONTENDER, 0);
34086diff --git a/drivers/firewire/core-cdev.c b/drivers/firewire/core-cdev.c
34087index 4560d8f..36db24a 100644
34088--- a/drivers/firewire/core-cdev.c
34089+++ b/drivers/firewire/core-cdev.c
34090@@ -1141,8 +1141,7 @@ static int init_iso_resource(struct client *client,
34091 int ret;
34092
34093 if ((request->channels == 0 && request->bandwidth == 0) ||
34094- request->bandwidth > BANDWIDTH_AVAILABLE_INITIAL ||
34095- request->bandwidth < 0)
34096+ request->bandwidth > BANDWIDTH_AVAILABLE_INITIAL)
34097 return -EINVAL;
34098
34099 r = kmalloc(sizeof(*r), GFP_KERNEL);
34100diff --git a/drivers/firewire/core-transaction.c b/drivers/firewire/core-transaction.c
34101index da628c7..cf54a2c 100644
34102--- a/drivers/firewire/core-transaction.c
34103+++ b/drivers/firewire/core-transaction.c
34104@@ -36,6 +36,7 @@
34105 #include <linux/string.h>
34106 #include <linux/timer.h>
34107 #include <linux/types.h>
34108+#include <linux/sched.h>
34109
34110 #include <asm/byteorder.h>
34111
34112@@ -344,6 +345,8 @@ int fw_run_transaction(struct fw_card *card, int tcode, int destination_id,
34113 struct transaction_callback_data d;
34114 struct fw_transaction t;
34115
34116+ pax_track_stack();
34117+
34118 init_completion(&d.done);
34119 d.payload = payload;
34120 fw_send_request(card, &t, tcode, destination_id, generation, speed,
34121diff --git a/drivers/firewire/core.h b/drivers/firewire/core.h
34122index 7ff6e75..a2965d9 100644
34123--- a/drivers/firewire/core.h
34124+++ b/drivers/firewire/core.h
34125@@ -86,6 +86,7 @@ struct fw_card_driver {
34126
34127 int (*stop_iso)(struct fw_iso_context *ctx);
34128 };
34129+typedef struct fw_card_driver __no_const fw_card_driver_no_const;
34130
34131 void fw_card_initialize(struct fw_card *card,
34132 const struct fw_card_driver *driver, struct device *device);
34133diff --git a/drivers/firmware/dmi_scan.c b/drivers/firmware/dmi_scan.c
34134index 3a2ccb0..82fd7c4 100644
34135--- a/drivers/firmware/dmi_scan.c
34136+++ b/drivers/firmware/dmi_scan.c
34137@@ -391,11 +391,6 @@ void __init dmi_scan_machine(void)
34138 }
34139 }
34140 else {
34141- /*
34142- * no iounmap() for that ioremap(); it would be a no-op, but
34143- * it's so early in setup that sucker gets confused into doing
34144- * what it shouldn't if we actually call it.
34145- */
34146 p = dmi_ioremap(0xF0000, 0x10000);
34147 if (p == NULL)
34148 goto error;
34149@@ -667,7 +662,7 @@ int dmi_walk(void (*decode)(const struct dmi_header *, void *),
34150 if (buf == NULL)
34151 return -1;
34152
34153- dmi_table(buf, dmi_len, dmi_num, decode, private_data);
34154+ dmi_table((char __force_kernel *)buf, dmi_len, dmi_num, decode, private_data);
34155
34156 iounmap(buf);
34157 return 0;
34158diff --git a/drivers/firmware/edd.c b/drivers/firmware/edd.c
34159index 9e4f59d..110e24e 100644
34160--- a/drivers/firmware/edd.c
34161+++ b/drivers/firmware/edd.c
34162@@ -122,7 +122,7 @@ edd_attr_show(struct kobject * kobj, struct attribute *attr, char *buf)
34163 return ret;
34164 }
34165
34166-static struct sysfs_ops edd_attr_ops = {
34167+static const struct sysfs_ops edd_attr_ops = {
34168 .show = edd_attr_show,
34169 };
34170
34171diff --git a/drivers/firmware/efivars.c b/drivers/firmware/efivars.c
34172index f4f709d..082f06e 100644
34173--- a/drivers/firmware/efivars.c
34174+++ b/drivers/firmware/efivars.c
34175@@ -362,7 +362,7 @@ static ssize_t efivar_attr_store(struct kobject *kobj, struct attribute *attr,
34176 return ret;
34177 }
34178
34179-static struct sysfs_ops efivar_attr_ops = {
34180+static const struct sysfs_ops efivar_attr_ops = {
34181 .show = efivar_attr_show,
34182 .store = efivar_attr_store,
34183 };
34184diff --git a/drivers/firmware/iscsi_ibft.c b/drivers/firmware/iscsi_ibft.c
34185index 051d1eb..0a5d4e7 100644
34186--- a/drivers/firmware/iscsi_ibft.c
34187+++ b/drivers/firmware/iscsi_ibft.c
34188@@ -525,7 +525,7 @@ static ssize_t ibft_show_attribute(struct kobject *kobj,
34189 return ret;
34190 }
34191
34192-static struct sysfs_ops ibft_attr_ops = {
34193+static const struct sysfs_ops ibft_attr_ops = {
34194 .show = ibft_show_attribute,
34195 };
34196
34197diff --git a/drivers/firmware/memmap.c b/drivers/firmware/memmap.c
34198index 56f9234..8c58c7b 100644
34199--- a/drivers/firmware/memmap.c
34200+++ b/drivers/firmware/memmap.c
34201@@ -74,7 +74,7 @@ static struct attribute *def_attrs[] = {
34202 NULL
34203 };
34204
34205-static struct sysfs_ops memmap_attr_ops = {
34206+static const struct sysfs_ops memmap_attr_ops = {
34207 .show = memmap_attr_show,
34208 };
34209
34210diff --git a/drivers/gpio/vr41xx_giu.c b/drivers/gpio/vr41xx_giu.c
34211index b16c9a8..2af7d3f 100644
34212--- a/drivers/gpio/vr41xx_giu.c
34213+++ b/drivers/gpio/vr41xx_giu.c
34214@@ -204,7 +204,7 @@ static int giu_get_irq(unsigned int irq)
34215 printk(KERN_ERR "spurious GIU interrupt: %04x(%04x),%04x(%04x)\n",
34216 maskl, pendl, maskh, pendh);
34217
34218- atomic_inc(&irq_err_count);
34219+ atomic_inc_unchecked(&irq_err_count);
34220
34221 return -EINVAL;
34222 }
34223diff --git a/drivers/gpu/drm/drm_crtc.c b/drivers/gpu/drm/drm_crtc.c
34224index bea6efc..3dc0f42 100644
34225--- a/drivers/gpu/drm/drm_crtc.c
34226+++ b/drivers/gpu/drm/drm_crtc.c
34227@@ -1323,7 +1323,7 @@ int drm_mode_getconnector(struct drm_device *dev, void *data,
34228 */
34229 if ((out_resp->count_modes >= mode_count) && mode_count) {
34230 copied = 0;
34231- mode_ptr = (struct drm_mode_modeinfo *)(unsigned long)out_resp->modes_ptr;
34232+ mode_ptr = (struct drm_mode_modeinfo __user *)(unsigned long)out_resp->modes_ptr;
34233 list_for_each_entry(mode, &connector->modes, head) {
34234 drm_crtc_convert_to_umode(&u_mode, mode);
34235 if (copy_to_user(mode_ptr + copied,
34236@@ -1338,8 +1338,8 @@ int drm_mode_getconnector(struct drm_device *dev, void *data,
34237
34238 if ((out_resp->count_props >= props_count) && props_count) {
34239 copied = 0;
34240- prop_ptr = (uint32_t *)(unsigned long)(out_resp->props_ptr);
34241- prop_values = (uint64_t *)(unsigned long)(out_resp->prop_values_ptr);
34242+ prop_ptr = (uint32_t __user *)(unsigned long)(out_resp->props_ptr);
34243+ prop_values = (uint64_t __user *)(unsigned long)(out_resp->prop_values_ptr);
34244 for (i = 0; i < DRM_CONNECTOR_MAX_PROPERTY; i++) {
34245 if (connector->property_ids[i] != 0) {
34246 if (put_user(connector->property_ids[i],
34247@@ -1361,7 +1361,7 @@ int drm_mode_getconnector(struct drm_device *dev, void *data,
34248
34249 if ((out_resp->count_encoders >= encoders_count) && encoders_count) {
34250 copied = 0;
34251- encoder_ptr = (uint32_t *)(unsigned long)(out_resp->encoders_ptr);
34252+ encoder_ptr = (uint32_t __user *)(unsigned long)(out_resp->encoders_ptr);
34253 for (i = 0; i < DRM_CONNECTOR_MAX_ENCODER; i++) {
34254 if (connector->encoder_ids[i] != 0) {
34255 if (put_user(connector->encoder_ids[i],
34256@@ -1513,7 +1513,7 @@ int drm_mode_setcrtc(struct drm_device *dev, void *data,
34257 }
34258
34259 for (i = 0; i < crtc_req->count_connectors; i++) {
34260- set_connectors_ptr = (uint32_t *)(unsigned long)crtc_req->set_connectors_ptr;
34261+ set_connectors_ptr = (uint32_t __user *)(unsigned long)crtc_req->set_connectors_ptr;
34262 if (get_user(out_id, &set_connectors_ptr[i])) {
34263 ret = -EFAULT;
34264 goto out;
34265@@ -2118,7 +2118,7 @@ int drm_mode_getproperty_ioctl(struct drm_device *dev,
34266 out_resp->flags = property->flags;
34267
34268 if ((out_resp->count_values >= value_count) && value_count) {
34269- values_ptr = (uint64_t *)(unsigned long)out_resp->values_ptr;
34270+ values_ptr = (uint64_t __user *)(unsigned long)out_resp->values_ptr;
34271 for (i = 0; i < value_count; i++) {
34272 if (copy_to_user(values_ptr + i, &property->values[i], sizeof(uint64_t))) {
34273 ret = -EFAULT;
34274@@ -2131,7 +2131,7 @@ int drm_mode_getproperty_ioctl(struct drm_device *dev,
34275 if (property->flags & DRM_MODE_PROP_ENUM) {
34276 if ((out_resp->count_enum_blobs >= enum_count) && enum_count) {
34277 copied = 0;
34278- enum_ptr = (struct drm_mode_property_enum *)(unsigned long)out_resp->enum_blob_ptr;
34279+ enum_ptr = (struct drm_mode_property_enum __user *)(unsigned long)out_resp->enum_blob_ptr;
34280 list_for_each_entry(prop_enum, &property->enum_blob_list, head) {
34281
34282 if (copy_to_user(&enum_ptr[copied].value, &prop_enum->value, sizeof(uint64_t))) {
34283@@ -2154,7 +2154,7 @@ int drm_mode_getproperty_ioctl(struct drm_device *dev,
34284 if ((out_resp->count_enum_blobs >= blob_count) && blob_count) {
34285 copied = 0;
34286 blob_id_ptr = (uint32_t *)(unsigned long)out_resp->enum_blob_ptr;
34287- blob_length_ptr = (uint32_t *)(unsigned long)out_resp->values_ptr;
34288+ blob_length_ptr = (uint32_t __user *)(unsigned long)out_resp->values_ptr;
34289
34290 list_for_each_entry(prop_blob, &property->enum_blob_list, head) {
34291 if (put_user(prop_blob->base.id, blob_id_ptr + copied)) {
34292@@ -2226,7 +2226,7 @@ int drm_mode_getblob_ioctl(struct drm_device *dev,
34293 blob = obj_to_blob(obj);
34294
34295 if (out_resp->length == blob->length) {
34296- blob_ptr = (void *)(unsigned long)out_resp->data;
34297+ blob_ptr = (void __user *)(unsigned long)out_resp->data;
34298 if (copy_to_user(blob_ptr, blob->data, blob->length)){
34299 ret = -EFAULT;
34300 goto done;
34301diff --git a/drivers/gpu/drm/drm_crtc_helper.c b/drivers/gpu/drm/drm_crtc_helper.c
34302index 1b8745d..92fdbf6 100644
34303--- a/drivers/gpu/drm/drm_crtc_helper.c
34304+++ b/drivers/gpu/drm/drm_crtc_helper.c
34305@@ -573,7 +573,7 @@ static bool drm_encoder_crtc_ok(struct drm_encoder *encoder,
34306 struct drm_crtc *tmp;
34307 int crtc_mask = 1;
34308
34309- WARN(!crtc, "checking null crtc?");
34310+ BUG_ON(!crtc);
34311
34312 dev = crtc->dev;
34313
34314@@ -642,6 +642,8 @@ bool drm_crtc_helper_set_mode(struct drm_crtc *crtc,
34315
34316 adjusted_mode = drm_mode_duplicate(dev, mode);
34317
34318+ pax_track_stack();
34319+
34320 crtc->enabled = drm_helper_crtc_in_use(crtc);
34321
34322 if (!crtc->enabled)
34323diff --git a/drivers/gpu/drm/drm_drv.c b/drivers/gpu/drm/drm_drv.c
34324index 0e27d98..dec8768 100644
34325--- a/drivers/gpu/drm/drm_drv.c
34326+++ b/drivers/gpu/drm/drm_drv.c
34327@@ -417,7 +417,7 @@ int drm_ioctl(struct inode *inode, struct file *filp,
34328 char *kdata = NULL;
34329
34330 atomic_inc(&dev->ioctl_count);
34331- atomic_inc(&dev->counts[_DRM_STAT_IOCTLS]);
34332+ atomic_inc_unchecked(&dev->counts[_DRM_STAT_IOCTLS]);
34333 ++file_priv->ioctl_count;
34334
34335 DRM_DEBUG("pid=%d, cmd=0x%02x, nr=0x%02x, dev 0x%lx, auth=%d\n",
34336diff --git a/drivers/gpu/drm/drm_fops.c b/drivers/gpu/drm/drm_fops.c
34337index 519161e..98c840c 100644
34338--- a/drivers/gpu/drm/drm_fops.c
34339+++ b/drivers/gpu/drm/drm_fops.c
34340@@ -66,7 +66,7 @@ static int drm_setup(struct drm_device * dev)
34341 }
34342
34343 for (i = 0; i < ARRAY_SIZE(dev->counts); i++)
34344- atomic_set(&dev->counts[i], 0);
34345+ atomic_set_unchecked(&dev->counts[i], 0);
34346
34347 dev->sigdata.lock = NULL;
34348
34349@@ -130,9 +130,9 @@ int drm_open(struct inode *inode, struct file *filp)
34350
34351 retcode = drm_open_helper(inode, filp, dev);
34352 if (!retcode) {
34353- atomic_inc(&dev->counts[_DRM_STAT_OPENS]);
34354+ atomic_inc_unchecked(&dev->counts[_DRM_STAT_OPENS]);
34355 spin_lock(&dev->count_lock);
34356- if (!dev->open_count++) {
34357+ if (local_inc_return(&dev->open_count) == 1) {
34358 spin_unlock(&dev->count_lock);
34359 retcode = drm_setup(dev);
34360 goto out;
34361@@ -435,7 +435,7 @@ int drm_release(struct inode *inode, struct file *filp)
34362
34363 lock_kernel();
34364
34365- DRM_DEBUG("open_count = %d\n", dev->open_count);
34366+ DRM_DEBUG("open_count = %d\n", local_read(&dev->open_count));
34367
34368 if (dev->driver->preclose)
34369 dev->driver->preclose(dev, file_priv);
34370@@ -447,7 +447,7 @@ int drm_release(struct inode *inode, struct file *filp)
34371 DRM_DEBUG("pid = %d, device = 0x%lx, open_count = %d\n",
34372 task_pid_nr(current),
34373 (long)old_encode_dev(file_priv->minor->device),
34374- dev->open_count);
34375+ local_read(&dev->open_count));
34376
34377 /* Release any auth tokens that might point to this file_priv,
34378 (do that under the drm_global_mutex) */
34379@@ -529,9 +529,9 @@ int drm_release(struct inode *inode, struct file *filp)
34380 * End inline drm_release
34381 */
34382
34383- atomic_inc(&dev->counts[_DRM_STAT_CLOSES]);
34384+ atomic_inc_unchecked(&dev->counts[_DRM_STAT_CLOSES]);
34385 spin_lock(&dev->count_lock);
34386- if (!--dev->open_count) {
34387+ if (local_dec_and_test(&dev->open_count)) {
34388 if (atomic_read(&dev->ioctl_count)) {
34389 DRM_ERROR("Device busy: %d\n",
34390 atomic_read(&dev->ioctl_count));
34391diff --git a/drivers/gpu/drm/drm_gem.c b/drivers/gpu/drm/drm_gem.c
34392index 8bf3770..79422805 100644
34393--- a/drivers/gpu/drm/drm_gem.c
34394+++ b/drivers/gpu/drm/drm_gem.c
34395@@ -83,11 +83,11 @@ drm_gem_init(struct drm_device *dev)
34396 spin_lock_init(&dev->object_name_lock);
34397 idr_init(&dev->object_name_idr);
34398 atomic_set(&dev->object_count, 0);
34399- atomic_set(&dev->object_memory, 0);
34400+ atomic_set_unchecked(&dev->object_memory, 0);
34401 atomic_set(&dev->pin_count, 0);
34402- atomic_set(&dev->pin_memory, 0);
34403+ atomic_set_unchecked(&dev->pin_memory, 0);
34404 atomic_set(&dev->gtt_count, 0);
34405- atomic_set(&dev->gtt_memory, 0);
34406+ atomic_set_unchecked(&dev->gtt_memory, 0);
34407
34408 mm = kzalloc(sizeof(struct drm_gem_mm), GFP_KERNEL);
34409 if (!mm) {
34410@@ -150,7 +150,7 @@ drm_gem_object_alloc(struct drm_device *dev, size_t size)
34411 goto fput;
34412 }
34413 atomic_inc(&dev->object_count);
34414- atomic_add(obj->size, &dev->object_memory);
34415+ atomic_add_unchecked(obj->size, &dev->object_memory);
34416 return obj;
34417 fput:
34418 fput(obj->filp);
34419@@ -429,7 +429,7 @@ drm_gem_object_free(struct kref *kref)
34420
34421 fput(obj->filp);
34422 atomic_dec(&dev->object_count);
34423- atomic_sub(obj->size, &dev->object_memory);
34424+ atomic_sub_unchecked(obj->size, &dev->object_memory);
34425 kfree(obj);
34426 }
34427 EXPORT_SYMBOL(drm_gem_object_free);
34428diff --git a/drivers/gpu/drm/drm_info.c b/drivers/gpu/drm/drm_info.c
34429index f0f6c6b..34af322 100644
34430--- a/drivers/gpu/drm/drm_info.c
34431+++ b/drivers/gpu/drm/drm_info.c
34432@@ -75,10 +75,14 @@ int drm_vm_info(struct seq_file *m, void *data)
34433 struct drm_local_map *map;
34434 struct drm_map_list *r_list;
34435
34436- /* Hardcoded from _DRM_FRAME_BUFFER,
34437- _DRM_REGISTERS, _DRM_SHM, _DRM_AGP, and
34438- _DRM_SCATTER_GATHER and _DRM_CONSISTENT */
34439- const char *types[] = { "FB", "REG", "SHM", "AGP", "SG", "PCI" };
34440+ static const char * const types[] = {
34441+ [_DRM_FRAME_BUFFER] = "FB",
34442+ [_DRM_REGISTERS] = "REG",
34443+ [_DRM_SHM] = "SHM",
34444+ [_DRM_AGP] = "AGP",
34445+ [_DRM_SCATTER_GATHER] = "SG",
34446+ [_DRM_CONSISTENT] = "PCI",
34447+ [_DRM_GEM] = "GEM" };
34448 const char *type;
34449 int i;
34450
34451@@ -89,7 +93,7 @@ int drm_vm_info(struct seq_file *m, void *data)
34452 map = r_list->map;
34453 if (!map)
34454 continue;
34455- if (map->type < 0 || map->type > 5)
34456+ if (map->type >= ARRAY_SIZE(types))
34457 type = "??";
34458 else
34459 type = types[map->type];
34460@@ -265,10 +269,10 @@ int drm_gem_object_info(struct seq_file *m, void* data)
34461 struct drm_device *dev = node->minor->dev;
34462
34463 seq_printf(m, "%d objects\n", atomic_read(&dev->object_count));
34464- seq_printf(m, "%d object bytes\n", atomic_read(&dev->object_memory));
34465+ seq_printf(m, "%d object bytes\n", atomic_read_unchecked(&dev->object_memory));
34466 seq_printf(m, "%d pinned\n", atomic_read(&dev->pin_count));
34467- seq_printf(m, "%d pin bytes\n", atomic_read(&dev->pin_memory));
34468- seq_printf(m, "%d gtt bytes\n", atomic_read(&dev->gtt_memory));
34469+ seq_printf(m, "%d pin bytes\n", atomic_read_unchecked(&dev->pin_memory));
34470+ seq_printf(m, "%d gtt bytes\n", atomic_read_unchecked(&dev->gtt_memory));
34471 seq_printf(m, "%d gtt total\n", dev->gtt_total);
34472 return 0;
34473 }
34474@@ -288,7 +292,11 @@ int drm_vma_info(struct seq_file *m, void *data)
34475 mutex_lock(&dev->struct_mutex);
34476 seq_printf(m, "vma use count: %d, high_memory = %p, 0x%08llx\n",
34477 atomic_read(&dev->vma_count),
34478+#ifdef CONFIG_GRKERNSEC_HIDESYM
34479+ NULL, 0);
34480+#else
34481 high_memory, (u64)virt_to_phys(high_memory));
34482+#endif
34483
34484 list_for_each_entry(pt, &dev->vmalist, head) {
34485 vma = pt->vma;
34486@@ -296,14 +304,23 @@ int drm_vma_info(struct seq_file *m, void *data)
34487 continue;
34488 seq_printf(m,
34489 "\n%5d 0x%08lx-0x%08lx %c%c%c%c%c%c 0x%08lx000",
34490- pt->pid, vma->vm_start, vma->vm_end,
34491+ pt->pid,
34492+#ifdef CONFIG_GRKERNSEC_HIDESYM
34493+ 0, 0,
34494+#else
34495+ vma->vm_start, vma->vm_end,
34496+#endif
34497 vma->vm_flags & VM_READ ? 'r' : '-',
34498 vma->vm_flags & VM_WRITE ? 'w' : '-',
34499 vma->vm_flags & VM_EXEC ? 'x' : '-',
34500 vma->vm_flags & VM_MAYSHARE ? 's' : 'p',
34501 vma->vm_flags & VM_LOCKED ? 'l' : '-',
34502 vma->vm_flags & VM_IO ? 'i' : '-',
34503+#ifdef CONFIG_GRKERNSEC_HIDESYM
34504+ 0);
34505+#else
34506 vma->vm_pgoff);
34507+#endif
34508
34509 #if defined(__i386__)
34510 pgprot = pgprot_val(vma->vm_page_prot);
34511diff --git a/drivers/gpu/drm/drm_ioc32.c b/drivers/gpu/drm/drm_ioc32.c
34512index 282d9fd..71e5f11 100644
34513--- a/drivers/gpu/drm/drm_ioc32.c
34514+++ b/drivers/gpu/drm/drm_ioc32.c
34515@@ -463,7 +463,7 @@ static int compat_drm_infobufs(struct file *file, unsigned int cmd,
34516 request = compat_alloc_user_space(nbytes);
34517 if (!access_ok(VERIFY_WRITE, request, nbytes))
34518 return -EFAULT;
34519- list = (struct drm_buf_desc *) (request + 1);
34520+ list = (struct drm_buf_desc __user *) (request + 1);
34521
34522 if (__put_user(count, &request->count)
34523 || __put_user(list, &request->list))
34524@@ -525,7 +525,7 @@ static int compat_drm_mapbufs(struct file *file, unsigned int cmd,
34525 request = compat_alloc_user_space(nbytes);
34526 if (!access_ok(VERIFY_WRITE, request, nbytes))
34527 return -EFAULT;
34528- list = (struct drm_buf_pub *) (request + 1);
34529+ list = (struct drm_buf_pub __user *) (request + 1);
34530
34531 if (__put_user(count, &request->count)
34532 || __put_user(list, &request->list))
34533diff --git a/drivers/gpu/drm/drm_ioctl.c b/drivers/gpu/drm/drm_ioctl.c
34534index 9b9ff46..4ea724c 100644
34535--- a/drivers/gpu/drm/drm_ioctl.c
34536+++ b/drivers/gpu/drm/drm_ioctl.c
34537@@ -283,7 +283,7 @@ int drm_getstats(struct drm_device *dev, void *data,
34538 stats->data[i].value =
34539 (file_priv->master->lock.hw_lock ? file_priv->master->lock.hw_lock->lock : 0);
34540 else
34541- stats->data[i].value = atomic_read(&dev->counts[i]);
34542+ stats->data[i].value = atomic_read_unchecked(&dev->counts[i]);
34543 stats->data[i].type = dev->types[i];
34544 }
34545
34546diff --git a/drivers/gpu/drm/drm_lock.c b/drivers/gpu/drm/drm_lock.c
34547index e2f70a5..c703e86 100644
34548--- a/drivers/gpu/drm/drm_lock.c
34549+++ b/drivers/gpu/drm/drm_lock.c
34550@@ -87,7 +87,7 @@ int drm_lock(struct drm_device *dev, void *data, struct drm_file *file_priv)
34551 if (drm_lock_take(&master->lock, lock->context)) {
34552 master->lock.file_priv = file_priv;
34553 master->lock.lock_time = jiffies;
34554- atomic_inc(&dev->counts[_DRM_STAT_LOCKS]);
34555+ atomic_inc_unchecked(&dev->counts[_DRM_STAT_LOCKS]);
34556 break; /* Got lock */
34557 }
34558
34559@@ -165,7 +165,7 @@ int drm_unlock(struct drm_device *dev, void *data, struct drm_file *file_priv)
34560 return -EINVAL;
34561 }
34562
34563- atomic_inc(&dev->counts[_DRM_STAT_UNLOCKS]);
34564+ atomic_inc_unchecked(&dev->counts[_DRM_STAT_UNLOCKS]);
34565
34566 /* kernel_context_switch isn't used by any of the x86 drm
34567 * modules but is required by the Sparc driver.
34568diff --git a/drivers/gpu/drm/i810/i810_dma.c b/drivers/gpu/drm/i810/i810_dma.c
34569index 7d1d88c..b9131b2 100644
34570--- a/drivers/gpu/drm/i810/i810_dma.c
34571+++ b/drivers/gpu/drm/i810/i810_dma.c
34572@@ -952,8 +952,8 @@ static int i810_dma_vertex(struct drm_device *dev, void *data,
34573 dma->buflist[vertex->idx],
34574 vertex->discard, vertex->used);
34575
34576- atomic_add(vertex->used, &dev->counts[_DRM_STAT_SECONDARY]);
34577- atomic_inc(&dev->counts[_DRM_STAT_DMA]);
34578+ atomic_add_unchecked(vertex->used, &dev->counts[_DRM_STAT_SECONDARY]);
34579+ atomic_inc_unchecked(&dev->counts[_DRM_STAT_DMA]);
34580 sarea_priv->last_enqueue = dev_priv->counter - 1;
34581 sarea_priv->last_dispatch = (int)hw_status[5];
34582
34583@@ -1115,8 +1115,8 @@ static int i810_dma_mc(struct drm_device *dev, void *data,
34584 i810_dma_dispatch_mc(dev, dma->buflist[mc->idx], mc->used,
34585 mc->last_render);
34586
34587- atomic_add(mc->used, &dev->counts[_DRM_STAT_SECONDARY]);
34588- atomic_inc(&dev->counts[_DRM_STAT_DMA]);
34589+ atomic_add_unchecked(mc->used, &dev->counts[_DRM_STAT_SECONDARY]);
34590+ atomic_inc_unchecked(&dev->counts[_DRM_STAT_DMA]);
34591 sarea_priv->last_enqueue = dev_priv->counter - 1;
34592 sarea_priv->last_dispatch = (int)hw_status[5];
34593
34594diff --git a/drivers/gpu/drm/i810/i810_drv.h b/drivers/gpu/drm/i810/i810_drv.h
34595index 21e2691..7321edd 100644
34596--- a/drivers/gpu/drm/i810/i810_drv.h
34597+++ b/drivers/gpu/drm/i810/i810_drv.h
34598@@ -108,8 +108,8 @@ typedef struct drm_i810_private {
34599 int page_flipping;
34600
34601 wait_queue_head_t irq_queue;
34602- atomic_t irq_received;
34603- atomic_t irq_emitted;
34604+ atomic_unchecked_t irq_received;
34605+ atomic_unchecked_t irq_emitted;
34606
34607 int front_offset;
34608 } drm_i810_private_t;
34609diff --git a/drivers/gpu/drm/i830/i830_drv.h b/drivers/gpu/drm/i830/i830_drv.h
34610index da82afe..48a45de 100644
34611--- a/drivers/gpu/drm/i830/i830_drv.h
34612+++ b/drivers/gpu/drm/i830/i830_drv.h
34613@@ -115,8 +115,8 @@ typedef struct drm_i830_private {
34614 int page_flipping;
34615
34616 wait_queue_head_t irq_queue;
34617- atomic_t irq_received;
34618- atomic_t irq_emitted;
34619+ atomic_unchecked_t irq_received;
34620+ atomic_unchecked_t irq_emitted;
34621
34622 int use_mi_batchbuffer_start;
34623
34624diff --git a/drivers/gpu/drm/i830/i830_irq.c b/drivers/gpu/drm/i830/i830_irq.c
34625index 91ec2bb..6f21fab 100644
34626--- a/drivers/gpu/drm/i830/i830_irq.c
34627+++ b/drivers/gpu/drm/i830/i830_irq.c
34628@@ -47,7 +47,7 @@ irqreturn_t i830_driver_irq_handler(DRM_IRQ_ARGS)
34629
34630 I830_WRITE16(I830REG_INT_IDENTITY_R, temp);
34631
34632- atomic_inc(&dev_priv->irq_received);
34633+ atomic_inc_unchecked(&dev_priv->irq_received);
34634 wake_up_interruptible(&dev_priv->irq_queue);
34635
34636 return IRQ_HANDLED;
34637@@ -60,14 +60,14 @@ static int i830_emit_irq(struct drm_device * dev)
34638
34639 DRM_DEBUG("%s\n", __func__);
34640
34641- atomic_inc(&dev_priv->irq_emitted);
34642+ atomic_inc_unchecked(&dev_priv->irq_emitted);
34643
34644 BEGIN_LP_RING(2);
34645 OUT_RING(0);
34646 OUT_RING(GFX_OP_USER_INTERRUPT);
34647 ADVANCE_LP_RING();
34648
34649- return atomic_read(&dev_priv->irq_emitted);
34650+ return atomic_read_unchecked(&dev_priv->irq_emitted);
34651 }
34652
34653 static int i830_wait_irq(struct drm_device * dev, int irq_nr)
34654@@ -79,7 +79,7 @@ static int i830_wait_irq(struct drm_device * dev, int irq_nr)
34655
34656 DRM_DEBUG("%s\n", __func__);
34657
34658- if (atomic_read(&dev_priv->irq_received) >= irq_nr)
34659+ if (atomic_read_unchecked(&dev_priv->irq_received) >= irq_nr)
34660 return 0;
34661
34662 dev_priv->sarea_priv->perf_boxes |= I830_BOX_WAIT;
34663@@ -88,7 +88,7 @@ static int i830_wait_irq(struct drm_device * dev, int irq_nr)
34664
34665 for (;;) {
34666 __set_current_state(TASK_INTERRUPTIBLE);
34667- if (atomic_read(&dev_priv->irq_received) >= irq_nr)
34668+ if (atomic_read_unchecked(&dev_priv->irq_received) >= irq_nr)
34669 break;
34670 if ((signed)(end - jiffies) <= 0) {
34671 DRM_ERROR("timeout iir %x imr %x ier %x hwstam %x\n",
34672@@ -163,8 +163,8 @@ void i830_driver_irq_preinstall(struct drm_device * dev)
34673 I830_WRITE16(I830REG_HWSTAM, 0xffff);
34674 I830_WRITE16(I830REG_INT_MASK_R, 0x0);
34675 I830_WRITE16(I830REG_INT_ENABLE_R, 0x0);
34676- atomic_set(&dev_priv->irq_received, 0);
34677- atomic_set(&dev_priv->irq_emitted, 0);
34678+ atomic_set_unchecked(&dev_priv->irq_received, 0);
34679+ atomic_set_unchecked(&dev_priv->irq_emitted, 0);
34680 init_waitqueue_head(&dev_priv->irq_queue);
34681 }
34682
34683diff --git a/drivers/gpu/drm/i915/dvo.h b/drivers/gpu/drm/i915/dvo.h
34684index 288fc50..c6092055 100644
34685--- a/drivers/gpu/drm/i915/dvo.h
34686+++ b/drivers/gpu/drm/i915/dvo.h
34687@@ -135,23 +135,23 @@ struct intel_dvo_dev_ops {
34688 *
34689 * \return singly-linked list of modes or NULL if no modes found.
34690 */
34691- struct drm_display_mode *(*get_modes)(struct intel_dvo_device *dvo);
34692+ struct drm_display_mode *(* const get_modes)(struct intel_dvo_device *dvo);
34693
34694 /**
34695 * Clean up driver-specific bits of the output
34696 */
34697- void (*destroy) (struct intel_dvo_device *dvo);
34698+ void (* const destroy) (struct intel_dvo_device *dvo);
34699
34700 /**
34701 * Debugging hook to dump device registers to log file
34702 */
34703- void (*dump_regs)(struct intel_dvo_device *dvo);
34704+ void (* const dump_regs)(struct intel_dvo_device *dvo);
34705 };
34706
34707-extern struct intel_dvo_dev_ops sil164_ops;
34708-extern struct intel_dvo_dev_ops ch7xxx_ops;
34709-extern struct intel_dvo_dev_ops ivch_ops;
34710-extern struct intel_dvo_dev_ops tfp410_ops;
34711-extern struct intel_dvo_dev_ops ch7017_ops;
34712+extern const struct intel_dvo_dev_ops sil164_ops;
34713+extern const struct intel_dvo_dev_ops ch7xxx_ops;
34714+extern const struct intel_dvo_dev_ops ivch_ops;
34715+extern const struct intel_dvo_dev_ops tfp410_ops;
34716+extern const struct intel_dvo_dev_ops ch7017_ops;
34717
34718 #endif /* _INTEL_DVO_H */
34719diff --git a/drivers/gpu/drm/i915/dvo_ch7017.c b/drivers/gpu/drm/i915/dvo_ch7017.c
34720index 621815b..499d82e 100644
34721--- a/drivers/gpu/drm/i915/dvo_ch7017.c
34722+++ b/drivers/gpu/drm/i915/dvo_ch7017.c
34723@@ -443,7 +443,7 @@ static void ch7017_destroy(struct intel_dvo_device *dvo)
34724 }
34725 }
34726
34727-struct intel_dvo_dev_ops ch7017_ops = {
34728+const struct intel_dvo_dev_ops ch7017_ops = {
34729 .init = ch7017_init,
34730 .detect = ch7017_detect,
34731 .mode_valid = ch7017_mode_valid,
34732diff --git a/drivers/gpu/drm/i915/dvo_ch7xxx.c b/drivers/gpu/drm/i915/dvo_ch7xxx.c
34733index a9b8962..ac769ba 100644
34734--- a/drivers/gpu/drm/i915/dvo_ch7xxx.c
34735+++ b/drivers/gpu/drm/i915/dvo_ch7xxx.c
34736@@ -356,7 +356,7 @@ static void ch7xxx_destroy(struct intel_dvo_device *dvo)
34737 }
34738 }
34739
34740-struct intel_dvo_dev_ops ch7xxx_ops = {
34741+const struct intel_dvo_dev_ops ch7xxx_ops = {
34742 .init = ch7xxx_init,
34743 .detect = ch7xxx_detect,
34744 .mode_valid = ch7xxx_mode_valid,
34745diff --git a/drivers/gpu/drm/i915/dvo_ivch.c b/drivers/gpu/drm/i915/dvo_ivch.c
34746index aa176f9..ed2930c 100644
34747--- a/drivers/gpu/drm/i915/dvo_ivch.c
34748+++ b/drivers/gpu/drm/i915/dvo_ivch.c
34749@@ -430,7 +430,7 @@ static void ivch_destroy(struct intel_dvo_device *dvo)
34750 }
34751 }
34752
34753-struct intel_dvo_dev_ops ivch_ops= {
34754+const struct intel_dvo_dev_ops ivch_ops= {
34755 .init = ivch_init,
34756 .dpms = ivch_dpms,
34757 .save = ivch_save,
34758diff --git a/drivers/gpu/drm/i915/dvo_sil164.c b/drivers/gpu/drm/i915/dvo_sil164.c
34759index e1c1f73..7dbebcf 100644
34760--- a/drivers/gpu/drm/i915/dvo_sil164.c
34761+++ b/drivers/gpu/drm/i915/dvo_sil164.c
34762@@ -290,7 +290,7 @@ static void sil164_destroy(struct intel_dvo_device *dvo)
34763 }
34764 }
34765
34766-struct intel_dvo_dev_ops sil164_ops = {
34767+const struct intel_dvo_dev_ops sil164_ops = {
34768 .init = sil164_init,
34769 .detect = sil164_detect,
34770 .mode_valid = sil164_mode_valid,
34771diff --git a/drivers/gpu/drm/i915/dvo_tfp410.c b/drivers/gpu/drm/i915/dvo_tfp410.c
34772index 16dce84..7e1b6f8 100644
34773--- a/drivers/gpu/drm/i915/dvo_tfp410.c
34774+++ b/drivers/gpu/drm/i915/dvo_tfp410.c
34775@@ -323,7 +323,7 @@ static void tfp410_destroy(struct intel_dvo_device *dvo)
34776 }
34777 }
34778
34779-struct intel_dvo_dev_ops tfp410_ops = {
34780+const struct intel_dvo_dev_ops tfp410_ops = {
34781 .init = tfp410_init,
34782 .detect = tfp410_detect,
34783 .mode_valid = tfp410_mode_valid,
34784diff --git a/drivers/gpu/drm/i915/i915_debugfs.c b/drivers/gpu/drm/i915/i915_debugfs.c
34785index 7e859d6..7d1cf2b 100644
34786--- a/drivers/gpu/drm/i915/i915_debugfs.c
34787+++ b/drivers/gpu/drm/i915/i915_debugfs.c
34788@@ -192,7 +192,7 @@ static int i915_interrupt_info(struct seq_file *m, void *data)
34789 I915_READ(GTIMR));
34790 }
34791 seq_printf(m, "Interrupts received: %d\n",
34792- atomic_read(&dev_priv->irq_received));
34793+ atomic_read_unchecked(&dev_priv->irq_received));
34794 if (dev_priv->hw_status_page != NULL) {
34795 seq_printf(m, "Current sequence: %d\n",
34796 i915_get_gem_seqno(dev));
34797diff --git a/drivers/gpu/drm/i915/i915_drv.c b/drivers/gpu/drm/i915/i915_drv.c
34798index 5449239..7e4f68d 100644
34799--- a/drivers/gpu/drm/i915/i915_drv.c
34800+++ b/drivers/gpu/drm/i915/i915_drv.c
34801@@ -285,7 +285,7 @@ i915_pci_resume(struct pci_dev *pdev)
34802 return i915_resume(dev);
34803 }
34804
34805-static struct vm_operations_struct i915_gem_vm_ops = {
34806+static const struct vm_operations_struct i915_gem_vm_ops = {
34807 .fault = i915_gem_fault,
34808 .open = drm_gem_vm_open,
34809 .close = drm_gem_vm_close,
34810diff --git a/drivers/gpu/drm/i915/i915_drv.h b/drivers/gpu/drm/i915/i915_drv.h
34811index 97163f7..c24c7c7 100644
34812--- a/drivers/gpu/drm/i915/i915_drv.h
34813+++ b/drivers/gpu/drm/i915/i915_drv.h
34814@@ -168,7 +168,7 @@ struct drm_i915_display_funcs {
34815 /* display clock increase/decrease */
34816 /* pll clock increase/decrease */
34817 /* clock gating init */
34818-};
34819+} __no_const;
34820
34821 typedef struct drm_i915_private {
34822 struct drm_device *dev;
34823@@ -197,7 +197,7 @@ typedef struct drm_i915_private {
34824 int page_flipping;
34825
34826 wait_queue_head_t irq_queue;
34827- atomic_t irq_received;
34828+ atomic_unchecked_t irq_received;
34829 /** Protects user_irq_refcount and irq_mask_reg */
34830 spinlock_t user_irq_lock;
34831 /** Refcount for i915_user_irq_get() versus i915_user_irq_put(). */
34832diff --git a/drivers/gpu/drm/i915/i915_gem.c b/drivers/gpu/drm/i915/i915_gem.c
34833index 27a3074..eb3f959 100644
34834--- a/drivers/gpu/drm/i915/i915_gem.c
34835+++ b/drivers/gpu/drm/i915/i915_gem.c
34836@@ -102,7 +102,7 @@ i915_gem_get_aperture_ioctl(struct drm_device *dev, void *data,
34837
34838 args->aper_size = dev->gtt_total;
34839 args->aper_available_size = (args->aper_size -
34840- atomic_read(&dev->pin_memory));
34841+ atomic_read_unchecked(&dev->pin_memory));
34842
34843 return 0;
34844 }
34845@@ -2058,7 +2058,7 @@ i915_gem_object_unbind(struct drm_gem_object *obj)
34846
34847 if (obj_priv->gtt_space) {
34848 atomic_dec(&dev->gtt_count);
34849- atomic_sub(obj->size, &dev->gtt_memory);
34850+ atomic_sub_unchecked(obj->size, &dev->gtt_memory);
34851
34852 drm_mm_put_block(obj_priv->gtt_space);
34853 obj_priv->gtt_space = NULL;
34854@@ -2701,7 +2701,7 @@ i915_gem_object_bind_to_gtt(struct drm_gem_object *obj, unsigned alignment)
34855 goto search_free;
34856 }
34857 atomic_inc(&dev->gtt_count);
34858- atomic_add(obj->size, &dev->gtt_memory);
34859+ atomic_add_unchecked(obj->size, &dev->gtt_memory);
34860
34861 /* Assert that the object is not currently in any GPU domain. As it
34862 * wasn't in the GTT, there shouldn't be any way it could have been in
34863@@ -3755,9 +3755,9 @@ i915_gem_execbuffer(struct drm_device *dev, void *data,
34864 "%d/%d gtt bytes\n",
34865 atomic_read(&dev->object_count),
34866 atomic_read(&dev->pin_count),
34867- atomic_read(&dev->object_memory),
34868- atomic_read(&dev->pin_memory),
34869- atomic_read(&dev->gtt_memory),
34870+ atomic_read_unchecked(&dev->object_memory),
34871+ atomic_read_unchecked(&dev->pin_memory),
34872+ atomic_read_unchecked(&dev->gtt_memory),
34873 dev->gtt_total);
34874 }
34875 goto err;
34876@@ -3989,7 +3989,7 @@ i915_gem_object_pin(struct drm_gem_object *obj, uint32_t alignment)
34877 */
34878 if (obj_priv->pin_count == 1) {
34879 atomic_inc(&dev->pin_count);
34880- atomic_add(obj->size, &dev->pin_memory);
34881+ atomic_add_unchecked(obj->size, &dev->pin_memory);
34882 if (!obj_priv->active &&
34883 (obj->write_domain & I915_GEM_GPU_DOMAINS) == 0 &&
34884 !list_empty(&obj_priv->list))
34885@@ -4022,7 +4022,7 @@ i915_gem_object_unpin(struct drm_gem_object *obj)
34886 list_move_tail(&obj_priv->list,
34887 &dev_priv->mm.inactive_list);
34888 atomic_dec(&dev->pin_count);
34889- atomic_sub(obj->size, &dev->pin_memory);
34890+ atomic_sub_unchecked(obj->size, &dev->pin_memory);
34891 }
34892 i915_verify_inactive(dev, __FILE__, __LINE__);
34893 }
34894diff --git a/drivers/gpu/drm/i915/i915_irq.c b/drivers/gpu/drm/i915/i915_irq.c
34895index 63f28ad..f5469da 100644
34896--- a/drivers/gpu/drm/i915/i915_irq.c
34897+++ b/drivers/gpu/drm/i915/i915_irq.c
34898@@ -528,7 +528,7 @@ irqreturn_t i915_driver_irq_handler(DRM_IRQ_ARGS)
34899 int irq_received;
34900 int ret = IRQ_NONE;
34901
34902- atomic_inc(&dev_priv->irq_received);
34903+ atomic_inc_unchecked(&dev_priv->irq_received);
34904
34905 if (IS_IGDNG(dev))
34906 return igdng_irq_handler(dev);
34907@@ -1021,7 +1021,7 @@ void i915_driver_irq_preinstall(struct drm_device * dev)
34908 {
34909 drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
34910
34911- atomic_set(&dev_priv->irq_received, 0);
34912+ atomic_set_unchecked(&dev_priv->irq_received, 0);
34913
34914 INIT_WORK(&dev_priv->hotplug_work, i915_hotplug_work_func);
34915 INIT_WORK(&dev_priv->error_work, i915_error_work_func);
34916diff --git a/drivers/gpu/drm/i915/intel_sdvo.c b/drivers/gpu/drm/i915/intel_sdvo.c
34917index 5d9c6a7..d1b0e29 100644
34918--- a/drivers/gpu/drm/i915/intel_sdvo.c
34919+++ b/drivers/gpu/drm/i915/intel_sdvo.c
34920@@ -2795,7 +2795,9 @@ bool intel_sdvo_init(struct drm_device *dev, int output_device)
34921 sdvo_priv->slave_addr = intel_sdvo_get_slave_addr(dev, output_device);
34922
34923 /* Save the bit-banging i2c functionality for use by the DDC wrapper */
34924- intel_sdvo_i2c_bit_algo.functionality = intel_output->i2c_bus->algo->functionality;
34925+ pax_open_kernel();
34926+ *(void **)&intel_sdvo_i2c_bit_algo.functionality = intel_output->i2c_bus->algo->functionality;
34927+ pax_close_kernel();
34928
34929 /* Read the regs to test if we can talk to the device */
34930 for (i = 0; i < 0x40; i++) {
34931diff --git a/drivers/gpu/drm/mga/mga_drv.h b/drivers/gpu/drm/mga/mga_drv.h
34932index be6c6b9..8615d9c 100644
34933--- a/drivers/gpu/drm/mga/mga_drv.h
34934+++ b/drivers/gpu/drm/mga/mga_drv.h
34935@@ -120,9 +120,9 @@ typedef struct drm_mga_private {
34936 u32 clear_cmd;
34937 u32 maccess;
34938
34939- atomic_t vbl_received; /**< Number of vblanks received. */
34940+ atomic_unchecked_t vbl_received; /**< Number of vblanks received. */
34941 wait_queue_head_t fence_queue;
34942- atomic_t last_fence_retired;
34943+ atomic_unchecked_t last_fence_retired;
34944 u32 next_fence_to_post;
34945
34946 unsigned int fb_cpp;
34947diff --git a/drivers/gpu/drm/mga/mga_irq.c b/drivers/gpu/drm/mga/mga_irq.c
34948index daa6041..a28a5da 100644
34949--- a/drivers/gpu/drm/mga/mga_irq.c
34950+++ b/drivers/gpu/drm/mga/mga_irq.c
34951@@ -44,7 +44,7 @@ u32 mga_get_vblank_counter(struct drm_device *dev, int crtc)
34952 if (crtc != 0)
34953 return 0;
34954
34955- return atomic_read(&dev_priv->vbl_received);
34956+ return atomic_read_unchecked(&dev_priv->vbl_received);
34957 }
34958
34959
34960@@ -60,7 +60,7 @@ irqreturn_t mga_driver_irq_handler(DRM_IRQ_ARGS)
34961 /* VBLANK interrupt */
34962 if (status & MGA_VLINEPEN) {
34963 MGA_WRITE(MGA_ICLEAR, MGA_VLINEICLR);
34964- atomic_inc(&dev_priv->vbl_received);
34965+ atomic_inc_unchecked(&dev_priv->vbl_received);
34966 drm_handle_vblank(dev, 0);
34967 handled = 1;
34968 }
34969@@ -80,7 +80,7 @@ irqreturn_t mga_driver_irq_handler(DRM_IRQ_ARGS)
34970 MGA_WRITE(MGA_PRIMEND, prim_end);
34971 }
34972
34973- atomic_inc(&dev_priv->last_fence_retired);
34974+ atomic_inc_unchecked(&dev_priv->last_fence_retired);
34975 DRM_WAKEUP(&dev_priv->fence_queue);
34976 handled = 1;
34977 }
34978@@ -131,7 +131,7 @@ int mga_driver_fence_wait(struct drm_device * dev, unsigned int *sequence)
34979 * using fences.
34980 */
34981 DRM_WAIT_ON(ret, dev_priv->fence_queue, 3 * DRM_HZ,
34982- (((cur_fence = atomic_read(&dev_priv->last_fence_retired))
34983+ (((cur_fence = atomic_read_unchecked(&dev_priv->last_fence_retired))
34984 - *sequence) <= (1 << 23)));
34985
34986 *sequence = cur_fence;
34987diff --git a/drivers/gpu/drm/r128/r128_cce.c b/drivers/gpu/drm/r128/r128_cce.c
34988index 4c39a40..b22a9ea 100644
34989--- a/drivers/gpu/drm/r128/r128_cce.c
34990+++ b/drivers/gpu/drm/r128/r128_cce.c
34991@@ -377,7 +377,7 @@ static int r128_do_init_cce(struct drm_device * dev, drm_r128_init_t * init)
34992
34993 /* GH: Simple idle check.
34994 */
34995- atomic_set(&dev_priv->idle_count, 0);
34996+ atomic_set_unchecked(&dev_priv->idle_count, 0);
34997
34998 /* We don't support anything other than bus-mastering ring mode,
34999 * but the ring can be in either AGP or PCI space for the ring
35000diff --git a/drivers/gpu/drm/r128/r128_drv.h b/drivers/gpu/drm/r128/r128_drv.h
35001index 3c60829..4faf484 100644
35002--- a/drivers/gpu/drm/r128/r128_drv.h
35003+++ b/drivers/gpu/drm/r128/r128_drv.h
35004@@ -90,14 +90,14 @@ typedef struct drm_r128_private {
35005 int is_pci;
35006 unsigned long cce_buffers_offset;
35007
35008- atomic_t idle_count;
35009+ atomic_unchecked_t idle_count;
35010
35011 int page_flipping;
35012 int current_page;
35013 u32 crtc_offset;
35014 u32 crtc_offset_cntl;
35015
35016- atomic_t vbl_received;
35017+ atomic_unchecked_t vbl_received;
35018
35019 u32 color_fmt;
35020 unsigned int front_offset;
35021diff --git a/drivers/gpu/drm/r128/r128_irq.c b/drivers/gpu/drm/r128/r128_irq.c
35022index 69810fb..97bf17a 100644
35023--- a/drivers/gpu/drm/r128/r128_irq.c
35024+++ b/drivers/gpu/drm/r128/r128_irq.c
35025@@ -42,7 +42,7 @@ u32 r128_get_vblank_counter(struct drm_device *dev, int crtc)
35026 if (crtc != 0)
35027 return 0;
35028
35029- return atomic_read(&dev_priv->vbl_received);
35030+ return atomic_read_unchecked(&dev_priv->vbl_received);
35031 }
35032
35033 irqreturn_t r128_driver_irq_handler(DRM_IRQ_ARGS)
35034@@ -56,7 +56,7 @@ irqreturn_t r128_driver_irq_handler(DRM_IRQ_ARGS)
35035 /* VBLANK interrupt */
35036 if (status & R128_CRTC_VBLANK_INT) {
35037 R128_WRITE(R128_GEN_INT_STATUS, R128_CRTC_VBLANK_INT_AK);
35038- atomic_inc(&dev_priv->vbl_received);
35039+ atomic_inc_unchecked(&dev_priv->vbl_received);
35040 drm_handle_vblank(dev, 0);
35041 return IRQ_HANDLED;
35042 }
35043diff --git a/drivers/gpu/drm/r128/r128_state.c b/drivers/gpu/drm/r128/r128_state.c
35044index af2665c..51922d2 100644
35045--- a/drivers/gpu/drm/r128/r128_state.c
35046+++ b/drivers/gpu/drm/r128/r128_state.c
35047@@ -323,10 +323,10 @@ static void r128_clear_box(drm_r128_private_t * dev_priv,
35048
35049 static void r128_cce_performance_boxes(drm_r128_private_t * dev_priv)
35050 {
35051- if (atomic_read(&dev_priv->idle_count) == 0) {
35052+ if (atomic_read_unchecked(&dev_priv->idle_count) == 0) {
35053 r128_clear_box(dev_priv, 64, 4, 8, 8, 0, 255, 0);
35054 } else {
35055- atomic_set(&dev_priv->idle_count, 0);
35056+ atomic_set_unchecked(&dev_priv->idle_count, 0);
35057 }
35058 }
35059
35060diff --git a/drivers/gpu/drm/radeon/atom.c b/drivers/gpu/drm/radeon/atom.c
35061index dd72b91..8644b3c 100644
35062--- a/drivers/gpu/drm/radeon/atom.c
35063+++ b/drivers/gpu/drm/radeon/atom.c
35064@@ -1115,6 +1115,8 @@ struct atom_context *atom_parse(struct card_info *card, void *bios)
35065 char name[512];
35066 int i;
35067
35068+ pax_track_stack();
35069+
35070 ctx->card = card;
35071 ctx->bios = bios;
35072
35073diff --git a/drivers/gpu/drm/radeon/mkregtable.c b/drivers/gpu/drm/radeon/mkregtable.c
35074index 0d79577..efaa7a5 100644
35075--- a/drivers/gpu/drm/radeon/mkregtable.c
35076+++ b/drivers/gpu/drm/radeon/mkregtable.c
35077@@ -637,14 +637,14 @@ static int parser_auth(struct table *t, const char *filename)
35078 regex_t mask_rex;
35079 regmatch_t match[4];
35080 char buf[1024];
35081- size_t end;
35082+ long end;
35083 int len;
35084 int done = 0;
35085 int r;
35086 unsigned o;
35087 struct offset *offset;
35088 char last_reg_s[10];
35089- int last_reg;
35090+ unsigned long last_reg;
35091
35092 if (regcomp
35093 (&mask_rex, "(0x[0-9a-fA-F]*) *([_a-zA-Z0-9]*)", REG_EXTENDED)) {
35094diff --git a/drivers/gpu/drm/radeon/radeon.h b/drivers/gpu/drm/radeon/radeon.h
35095index 6735213..38c2c67 100644
35096--- a/drivers/gpu/drm/radeon/radeon.h
35097+++ b/drivers/gpu/drm/radeon/radeon.h
35098@@ -149,7 +149,7 @@ int radeon_pm_init(struct radeon_device *rdev);
35099 */
35100 struct radeon_fence_driver {
35101 uint32_t scratch_reg;
35102- atomic_t seq;
35103+ atomic_unchecked_t seq;
35104 uint32_t last_seq;
35105 unsigned long count_timeout;
35106 wait_queue_head_t queue;
35107@@ -640,7 +640,7 @@ struct radeon_asic {
35108 uint32_t offset, uint32_t obj_size);
35109 int (*clear_surface_reg)(struct radeon_device *rdev, int reg);
35110 void (*bandwidth_update)(struct radeon_device *rdev);
35111-};
35112+} __no_const;
35113
35114 /*
35115 * Asic structures
35116diff --git a/drivers/gpu/drm/radeon/radeon_atombios.c b/drivers/gpu/drm/radeon/radeon_atombios.c
35117index 4e928b9..d8b6008 100644
35118--- a/drivers/gpu/drm/radeon/radeon_atombios.c
35119+++ b/drivers/gpu/drm/radeon/radeon_atombios.c
35120@@ -275,6 +275,8 @@ bool radeon_get_atom_connector_info_from_object_table(struct drm_device *dev)
35121 bool linkb;
35122 struct radeon_i2c_bus_rec ddc_bus;
35123
35124+ pax_track_stack();
35125+
35126 atom_parse_data_header(ctx, index, &size, &frev, &crev, &data_offset);
35127
35128 if (data_offset == 0)
35129@@ -520,13 +522,13 @@ static uint16_t atombios_get_connector_object_id(struct drm_device *dev,
35130 }
35131 }
35132
35133-struct bios_connector {
35134+static struct bios_connector {
35135 bool valid;
35136 uint16_t line_mux;
35137 uint16_t devices;
35138 int connector_type;
35139 struct radeon_i2c_bus_rec ddc_bus;
35140-};
35141+} bios_connectors[ATOM_MAX_SUPPORTED_DEVICE];
35142
35143 bool radeon_get_atom_connector_info_from_supported_devices_table(struct
35144 drm_device
35145@@ -542,7 +544,6 @@ bool radeon_get_atom_connector_info_from_supported_devices_table(struct
35146 uint8_t dac;
35147 union atom_supported_devices *supported_devices;
35148 int i, j;
35149- struct bios_connector bios_connectors[ATOM_MAX_SUPPORTED_DEVICE];
35150
35151 atom_parse_data_header(ctx, index, &size, &frev, &crev, &data_offset);
35152
35153diff --git a/drivers/gpu/drm/radeon/radeon_display.c b/drivers/gpu/drm/radeon/radeon_display.c
35154index 083a181..ccccae0 100644
35155--- a/drivers/gpu/drm/radeon/radeon_display.c
35156+++ b/drivers/gpu/drm/radeon/radeon_display.c
35157@@ -482,7 +482,7 @@ void radeon_compute_pll(struct radeon_pll *pll,
35158
35159 if (flags & RADEON_PLL_PREFER_CLOSEST_LOWER) {
35160 error = freq - current_freq;
35161- error = error < 0 ? 0xffffffff : error;
35162+ error = (int32_t)error < 0 ? 0xffffffff : error;
35163 } else
35164 error = abs(current_freq - freq);
35165 vco_diff = abs(vco - best_vco);
35166diff --git a/drivers/gpu/drm/radeon/radeon_drv.h b/drivers/gpu/drm/radeon/radeon_drv.h
35167index 76e4070..193fa7f 100644
35168--- a/drivers/gpu/drm/radeon/radeon_drv.h
35169+++ b/drivers/gpu/drm/radeon/radeon_drv.h
35170@@ -253,7 +253,7 @@ typedef struct drm_radeon_private {
35171
35172 /* SW interrupt */
35173 wait_queue_head_t swi_queue;
35174- atomic_t swi_emitted;
35175+ atomic_unchecked_t swi_emitted;
35176 int vblank_crtc;
35177 uint32_t irq_enable_reg;
35178 uint32_t r500_disp_irq_reg;
35179diff --git a/drivers/gpu/drm/radeon/radeon_fence.c b/drivers/gpu/drm/radeon/radeon_fence.c
35180index 3beb26d..6ce9c4a 100644
35181--- a/drivers/gpu/drm/radeon/radeon_fence.c
35182+++ b/drivers/gpu/drm/radeon/radeon_fence.c
35183@@ -47,7 +47,7 @@ int radeon_fence_emit(struct radeon_device *rdev, struct radeon_fence *fence)
35184 write_unlock_irqrestore(&rdev->fence_drv.lock, irq_flags);
35185 return 0;
35186 }
35187- fence->seq = atomic_add_return(1, &rdev->fence_drv.seq);
35188+ fence->seq = atomic_add_return_unchecked(1, &rdev->fence_drv.seq);
35189 if (!rdev->cp.ready) {
35190 /* FIXME: cp is not running assume everythings is done right
35191 * away
35192@@ -364,7 +364,7 @@ int radeon_fence_driver_init(struct radeon_device *rdev)
35193 return r;
35194 }
35195 WREG32(rdev->fence_drv.scratch_reg, 0);
35196- atomic_set(&rdev->fence_drv.seq, 0);
35197+ atomic_set_unchecked(&rdev->fence_drv.seq, 0);
35198 INIT_LIST_HEAD(&rdev->fence_drv.created);
35199 INIT_LIST_HEAD(&rdev->fence_drv.emited);
35200 INIT_LIST_HEAD(&rdev->fence_drv.signaled);
35201diff --git a/drivers/gpu/drm/radeon/radeon_ioc32.c b/drivers/gpu/drm/radeon/radeon_ioc32.c
35202index a1bf11d..4a123c0 100644
35203--- a/drivers/gpu/drm/radeon/radeon_ioc32.c
35204+++ b/drivers/gpu/drm/radeon/radeon_ioc32.c
35205@@ -368,7 +368,7 @@ static int compat_radeon_cp_setparam(struct file *file, unsigned int cmd,
35206 request = compat_alloc_user_space(sizeof(*request));
35207 if (!access_ok(VERIFY_WRITE, request, sizeof(*request))
35208 || __put_user(req32.param, &request->param)
35209- || __put_user((void __user *)(unsigned long)req32.value,
35210+ || __put_user((unsigned long)req32.value,
35211 &request->value))
35212 return -EFAULT;
35213
35214diff --git a/drivers/gpu/drm/radeon/radeon_irq.c b/drivers/gpu/drm/radeon/radeon_irq.c
35215index b79ecc4..8dab92d 100644
35216--- a/drivers/gpu/drm/radeon/radeon_irq.c
35217+++ b/drivers/gpu/drm/radeon/radeon_irq.c
35218@@ -225,8 +225,8 @@ static int radeon_emit_irq(struct drm_device * dev)
35219 unsigned int ret;
35220 RING_LOCALS;
35221
35222- atomic_inc(&dev_priv->swi_emitted);
35223- ret = atomic_read(&dev_priv->swi_emitted);
35224+ atomic_inc_unchecked(&dev_priv->swi_emitted);
35225+ ret = atomic_read_unchecked(&dev_priv->swi_emitted);
35226
35227 BEGIN_RING(4);
35228 OUT_RING_REG(RADEON_LAST_SWI_REG, ret);
35229@@ -352,7 +352,7 @@ int radeon_driver_irq_postinstall(struct drm_device *dev)
35230 drm_radeon_private_t *dev_priv =
35231 (drm_radeon_private_t *) dev->dev_private;
35232
35233- atomic_set(&dev_priv->swi_emitted, 0);
35234+ atomic_set_unchecked(&dev_priv->swi_emitted, 0);
35235 DRM_INIT_WAITQUEUE(&dev_priv->swi_queue);
35236
35237 dev->max_vblank_count = 0x001fffff;
35238diff --git a/drivers/gpu/drm/radeon/radeon_state.c b/drivers/gpu/drm/radeon/radeon_state.c
35239index 4747910..48ca4b3 100644
35240--- a/drivers/gpu/drm/radeon/radeon_state.c
35241+++ b/drivers/gpu/drm/radeon/radeon_state.c
35242@@ -3021,7 +3021,7 @@ static int radeon_cp_getparam(struct drm_device *dev, void *data, struct drm_fil
35243 {
35244 drm_radeon_private_t *dev_priv = dev->dev_private;
35245 drm_radeon_getparam_t *param = data;
35246- int value;
35247+ int value = 0;
35248
35249 DRM_DEBUG("pid=%d\n", DRM_CURRENTPID);
35250
35251diff --git a/drivers/gpu/drm/radeon/radeon_ttm.c b/drivers/gpu/drm/radeon/radeon_ttm.c
35252index 1381e06..0e53b17 100644
35253--- a/drivers/gpu/drm/radeon/radeon_ttm.c
35254+++ b/drivers/gpu/drm/radeon/radeon_ttm.c
35255@@ -535,27 +535,10 @@ void radeon_ttm_fini(struct radeon_device *rdev)
35256 DRM_INFO("radeon: ttm finalized\n");
35257 }
35258
35259-static struct vm_operations_struct radeon_ttm_vm_ops;
35260-static const struct vm_operations_struct *ttm_vm_ops = NULL;
35261-
35262-static int radeon_ttm_fault(struct vm_area_struct *vma, struct vm_fault *vmf)
35263-{
35264- struct ttm_buffer_object *bo;
35265- int r;
35266-
35267- bo = (struct ttm_buffer_object *)vma->vm_private_data;
35268- if (bo == NULL) {
35269- return VM_FAULT_NOPAGE;
35270- }
35271- r = ttm_vm_ops->fault(vma, vmf);
35272- return r;
35273-}
35274-
35275 int radeon_mmap(struct file *filp, struct vm_area_struct *vma)
35276 {
35277 struct drm_file *file_priv;
35278 struct radeon_device *rdev;
35279- int r;
35280
35281 if (unlikely(vma->vm_pgoff < DRM_FILE_PAGE_OFFSET)) {
35282 return drm_mmap(filp, vma);
35283@@ -563,20 +546,9 @@ int radeon_mmap(struct file *filp, struct vm_area_struct *vma)
35284
35285 file_priv = (struct drm_file *)filp->private_data;
35286 rdev = file_priv->minor->dev->dev_private;
35287- if (rdev == NULL) {
35288+ if (!rdev)
35289 return -EINVAL;
35290- }
35291- r = ttm_bo_mmap(filp, vma, &rdev->mman.bdev);
35292- if (unlikely(r != 0)) {
35293- return r;
35294- }
35295- if (unlikely(ttm_vm_ops == NULL)) {
35296- ttm_vm_ops = vma->vm_ops;
35297- radeon_ttm_vm_ops = *ttm_vm_ops;
35298- radeon_ttm_vm_ops.fault = &radeon_ttm_fault;
35299- }
35300- vma->vm_ops = &radeon_ttm_vm_ops;
35301- return 0;
35302+ return ttm_bo_mmap(filp, vma, &rdev->mman.bdev);
35303 }
35304
35305
35306diff --git a/drivers/gpu/drm/radeon/rs690.c b/drivers/gpu/drm/radeon/rs690.c
35307index b12ff76..0bd0c6e 100644
35308--- a/drivers/gpu/drm/radeon/rs690.c
35309+++ b/drivers/gpu/drm/radeon/rs690.c
35310@@ -302,9 +302,11 @@ void rs690_crtc_bandwidth_compute(struct radeon_device *rdev,
35311 if (rdev->pm.max_bandwidth.full > rdev->pm.sideport_bandwidth.full &&
35312 rdev->pm.sideport_bandwidth.full)
35313 rdev->pm.max_bandwidth = rdev->pm.sideport_bandwidth;
35314- read_delay_latency.full = rfixed_const(370 * 800 * 1000);
35315+ read_delay_latency.full = rfixed_const(800 * 1000);
35316 read_delay_latency.full = rfixed_div(read_delay_latency,
35317 rdev->pm.igp_sideport_mclk);
35318+ a.full = rfixed_const(370);
35319+ read_delay_latency.full = rfixed_mul(read_delay_latency, a);
35320 } else {
35321 if (rdev->pm.max_bandwidth.full > rdev->pm.k8_bandwidth.full &&
35322 rdev->pm.k8_bandwidth.full)
35323diff --git a/drivers/gpu/drm/ttm/ttm_bo.c b/drivers/gpu/drm/ttm/ttm_bo.c
35324index 0ed436e..e6e7ce3 100644
35325--- a/drivers/gpu/drm/ttm/ttm_bo.c
35326+++ b/drivers/gpu/drm/ttm/ttm_bo.c
35327@@ -67,7 +67,7 @@ static struct attribute *ttm_bo_global_attrs[] = {
35328 NULL
35329 };
35330
35331-static struct sysfs_ops ttm_bo_global_ops = {
35332+static const struct sysfs_ops ttm_bo_global_ops = {
35333 .show = &ttm_bo_global_show
35334 };
35335
35336diff --git a/drivers/gpu/drm/ttm/ttm_bo_vm.c b/drivers/gpu/drm/ttm/ttm_bo_vm.c
35337index 1c040d0..f9e4af8 100644
35338--- a/drivers/gpu/drm/ttm/ttm_bo_vm.c
35339+++ b/drivers/gpu/drm/ttm/ttm_bo_vm.c
35340@@ -73,7 +73,7 @@ static int ttm_bo_vm_fault(struct vm_area_struct *vma, struct vm_fault *vmf)
35341 {
35342 struct ttm_buffer_object *bo = (struct ttm_buffer_object *)
35343 vma->vm_private_data;
35344- struct ttm_bo_device *bdev = bo->bdev;
35345+ struct ttm_bo_device *bdev;
35346 unsigned long bus_base;
35347 unsigned long bus_offset;
35348 unsigned long bus_size;
35349@@ -88,6 +88,10 @@ static int ttm_bo_vm_fault(struct vm_area_struct *vma, struct vm_fault *vmf)
35350 unsigned long address = (unsigned long)vmf->virtual_address;
35351 int retval = VM_FAULT_NOPAGE;
35352
35353+ if (!bo)
35354+ return VM_FAULT_NOPAGE;
35355+ bdev = bo->bdev;
35356+
35357 /*
35358 * Work around locking order reversal in fault / nopfn
35359 * between mmap_sem and bo_reserve: Perform a trylock operation
35360diff --git a/drivers/gpu/drm/ttm/ttm_global.c b/drivers/gpu/drm/ttm/ttm_global.c
35361index b170071..28ae90e 100644
35362--- a/drivers/gpu/drm/ttm/ttm_global.c
35363+++ b/drivers/gpu/drm/ttm/ttm_global.c
35364@@ -36,7 +36,7 @@
35365 struct ttm_global_item {
35366 struct mutex mutex;
35367 void *object;
35368- int refcount;
35369+ atomic_t refcount;
35370 };
35371
35372 static struct ttm_global_item glob[TTM_GLOBAL_NUM];
35373@@ -49,7 +49,7 @@ void ttm_global_init(void)
35374 struct ttm_global_item *item = &glob[i];
35375 mutex_init(&item->mutex);
35376 item->object = NULL;
35377- item->refcount = 0;
35378+ atomic_set(&item->refcount, 0);
35379 }
35380 }
35381
35382@@ -59,7 +59,7 @@ void ttm_global_release(void)
35383 for (i = 0; i < TTM_GLOBAL_NUM; ++i) {
35384 struct ttm_global_item *item = &glob[i];
35385 BUG_ON(item->object != NULL);
35386- BUG_ON(item->refcount != 0);
35387+ BUG_ON(atomic_read(&item->refcount) != 0);
35388 }
35389 }
35390
35391@@ -70,7 +70,7 @@ int ttm_global_item_ref(struct ttm_global_reference *ref)
35392 void *object;
35393
35394 mutex_lock(&item->mutex);
35395- if (item->refcount == 0) {
35396+ if (atomic_read(&item->refcount) == 0) {
35397 item->object = kzalloc(ref->size, GFP_KERNEL);
35398 if (unlikely(item->object == NULL)) {
35399 ret = -ENOMEM;
35400@@ -83,7 +83,7 @@ int ttm_global_item_ref(struct ttm_global_reference *ref)
35401 goto out_err;
35402
35403 }
35404- ++item->refcount;
35405+ atomic_inc(&item->refcount);
35406 ref->object = item->object;
35407 object = item->object;
35408 mutex_unlock(&item->mutex);
35409@@ -100,9 +100,9 @@ void ttm_global_item_unref(struct ttm_global_reference *ref)
35410 struct ttm_global_item *item = &glob[ref->global_type];
35411
35412 mutex_lock(&item->mutex);
35413- BUG_ON(item->refcount == 0);
35414+ BUG_ON(atomic_read(&item->refcount) == 0);
35415 BUG_ON(ref->object != item->object);
35416- if (--item->refcount == 0) {
35417+ if (atomic_dec_and_test(&item->refcount)) {
35418 ref->release(ref);
35419 item->object = NULL;
35420 }
35421diff --git a/drivers/gpu/drm/ttm/ttm_memory.c b/drivers/gpu/drm/ttm/ttm_memory.c
35422index 072c281..d8ef483 100644
35423--- a/drivers/gpu/drm/ttm/ttm_memory.c
35424+++ b/drivers/gpu/drm/ttm/ttm_memory.c
35425@@ -152,7 +152,7 @@ static struct attribute *ttm_mem_zone_attrs[] = {
35426 NULL
35427 };
35428
35429-static struct sysfs_ops ttm_mem_zone_ops = {
35430+static const struct sysfs_ops ttm_mem_zone_ops = {
35431 .show = &ttm_mem_zone_show,
35432 .store = &ttm_mem_zone_store
35433 };
35434diff --git a/drivers/gpu/drm/via/via_drv.h b/drivers/gpu/drm/via/via_drv.h
35435index cafcb84..b8e66cc 100644
35436--- a/drivers/gpu/drm/via/via_drv.h
35437+++ b/drivers/gpu/drm/via/via_drv.h
35438@@ -51,7 +51,7 @@ typedef struct drm_via_ring_buffer {
35439 typedef uint32_t maskarray_t[5];
35440
35441 typedef struct drm_via_irq {
35442- atomic_t irq_received;
35443+ atomic_unchecked_t irq_received;
35444 uint32_t pending_mask;
35445 uint32_t enable_mask;
35446 wait_queue_head_t irq_queue;
35447@@ -75,7 +75,7 @@ typedef struct drm_via_private {
35448 struct timeval last_vblank;
35449 int last_vblank_valid;
35450 unsigned usec_per_vblank;
35451- atomic_t vbl_received;
35452+ atomic_unchecked_t vbl_received;
35453 drm_via_state_t hc_state;
35454 char pci_buf[VIA_PCI_BUF_SIZE];
35455 const uint32_t *fire_offsets[VIA_FIRE_BUF_SIZE];
35456diff --git a/drivers/gpu/drm/via/via_irq.c b/drivers/gpu/drm/via/via_irq.c
35457index 5935b88..127a8a6 100644
35458--- a/drivers/gpu/drm/via/via_irq.c
35459+++ b/drivers/gpu/drm/via/via_irq.c
35460@@ -102,7 +102,7 @@ u32 via_get_vblank_counter(struct drm_device *dev, int crtc)
35461 if (crtc != 0)
35462 return 0;
35463
35464- return atomic_read(&dev_priv->vbl_received);
35465+ return atomic_read_unchecked(&dev_priv->vbl_received);
35466 }
35467
35468 irqreturn_t via_driver_irq_handler(DRM_IRQ_ARGS)
35469@@ -117,8 +117,8 @@ irqreturn_t via_driver_irq_handler(DRM_IRQ_ARGS)
35470
35471 status = VIA_READ(VIA_REG_INTERRUPT);
35472 if (status & VIA_IRQ_VBLANK_PENDING) {
35473- atomic_inc(&dev_priv->vbl_received);
35474- if (!(atomic_read(&dev_priv->vbl_received) & 0x0F)) {
35475+ atomic_inc_unchecked(&dev_priv->vbl_received);
35476+ if (!(atomic_read_unchecked(&dev_priv->vbl_received) & 0x0F)) {
35477 do_gettimeofday(&cur_vblank);
35478 if (dev_priv->last_vblank_valid) {
35479 dev_priv->usec_per_vblank =
35480@@ -128,7 +128,7 @@ irqreturn_t via_driver_irq_handler(DRM_IRQ_ARGS)
35481 dev_priv->last_vblank = cur_vblank;
35482 dev_priv->last_vblank_valid = 1;
35483 }
35484- if (!(atomic_read(&dev_priv->vbl_received) & 0xFF)) {
35485+ if (!(atomic_read_unchecked(&dev_priv->vbl_received) & 0xFF)) {
35486 DRM_DEBUG("US per vblank is: %u\n",
35487 dev_priv->usec_per_vblank);
35488 }
35489@@ -138,7 +138,7 @@ irqreturn_t via_driver_irq_handler(DRM_IRQ_ARGS)
35490
35491 for (i = 0; i < dev_priv->num_irqs; ++i) {
35492 if (status & cur_irq->pending_mask) {
35493- atomic_inc(&cur_irq->irq_received);
35494+ atomic_inc_unchecked(&cur_irq->irq_received);
35495 DRM_WAKEUP(&cur_irq->irq_queue);
35496 handled = 1;
35497 if (dev_priv->irq_map[drm_via_irq_dma0_td] == i) {
35498@@ -244,11 +244,11 @@ via_driver_irq_wait(struct drm_device * dev, unsigned int irq, int force_sequenc
35499 DRM_WAIT_ON(ret, cur_irq->irq_queue, 3 * DRM_HZ,
35500 ((VIA_READ(masks[irq][2]) & masks[irq][3]) ==
35501 masks[irq][4]));
35502- cur_irq_sequence = atomic_read(&cur_irq->irq_received);
35503+ cur_irq_sequence = atomic_read_unchecked(&cur_irq->irq_received);
35504 } else {
35505 DRM_WAIT_ON(ret, cur_irq->irq_queue, 3 * DRM_HZ,
35506 (((cur_irq_sequence =
35507- atomic_read(&cur_irq->irq_received)) -
35508+ atomic_read_unchecked(&cur_irq->irq_received)) -
35509 *sequence) <= (1 << 23)));
35510 }
35511 *sequence = cur_irq_sequence;
35512@@ -286,7 +286,7 @@ void via_driver_irq_preinstall(struct drm_device * dev)
35513 }
35514
35515 for (i = 0; i < dev_priv->num_irqs; ++i) {
35516- atomic_set(&cur_irq->irq_received, 0);
35517+ atomic_set_unchecked(&cur_irq->irq_received, 0);
35518 cur_irq->enable_mask = dev_priv->irq_masks[i][0];
35519 cur_irq->pending_mask = dev_priv->irq_masks[i][1];
35520 DRM_INIT_WAITQUEUE(&cur_irq->irq_queue);
35521@@ -368,7 +368,7 @@ int via_wait_irq(struct drm_device *dev, void *data, struct drm_file *file_priv)
35522 switch (irqwait->request.type & ~VIA_IRQ_FLAGS_MASK) {
35523 case VIA_IRQ_RELATIVE:
35524 irqwait->request.sequence +=
35525- atomic_read(&cur_irq->irq_received);
35526+ atomic_read_unchecked(&cur_irq->irq_received);
35527 irqwait->request.type &= ~_DRM_VBLANK_RELATIVE;
35528 case VIA_IRQ_ABSOLUTE:
35529 break;
35530diff --git a/drivers/gpu/vga/vgaarb.c b/drivers/gpu/vga/vgaarb.c
35531index aa8688d..6a0140c 100644
35532--- a/drivers/gpu/vga/vgaarb.c
35533+++ b/drivers/gpu/vga/vgaarb.c
35534@@ -894,14 +894,20 @@ static ssize_t vga_arb_write(struct file *file, const char __user * buf,
35535 uc = &priv->cards[i];
35536 }
35537
35538- if (!uc)
35539- return -EINVAL;
35540+ if (!uc) {
35541+ ret_val = -EINVAL;
35542+ goto done;
35543+ }
35544
35545- if (io_state & VGA_RSRC_LEGACY_IO && uc->io_cnt == 0)
35546- return -EINVAL;
35547+ if (io_state & VGA_RSRC_LEGACY_IO && uc->io_cnt == 0) {
35548+ ret_val = -EINVAL;
35549+ goto done;
35550+ }
35551
35552- if (io_state & VGA_RSRC_LEGACY_MEM && uc->mem_cnt == 0)
35553- return -EINVAL;
35554+ if (io_state & VGA_RSRC_LEGACY_MEM && uc->mem_cnt == 0) {
35555+ ret_val = -EINVAL;
35556+ goto done;
35557+ }
35558
35559 vga_put(pdev, io_state);
35560
35561diff --git a/drivers/hid/hid-core.c b/drivers/hid/hid-core.c
35562index 11f8069..4783396 100644
35563--- a/drivers/hid/hid-core.c
35564+++ b/drivers/hid/hid-core.c
35565@@ -1752,7 +1752,7 @@ static bool hid_ignore(struct hid_device *hdev)
35566
35567 int hid_add_device(struct hid_device *hdev)
35568 {
35569- static atomic_t id = ATOMIC_INIT(0);
35570+ static atomic_unchecked_t id = ATOMIC_INIT(0);
35571 int ret;
35572
35573 if (WARN_ON(hdev->status & HID_STAT_ADDED))
35574@@ -1766,7 +1766,7 @@ int hid_add_device(struct hid_device *hdev)
35575 /* XXX hack, any other cleaner solution after the driver core
35576 * is converted to allow more than 20 bytes as the device name? */
35577 dev_set_name(&hdev->dev, "%04X:%04X:%04X.%04X", hdev->bus,
35578- hdev->vendor, hdev->product, atomic_inc_return(&id));
35579+ hdev->vendor, hdev->product, atomic_inc_return_unchecked(&id));
35580
35581 ret = device_add(&hdev->dev);
35582 if (!ret)
35583diff --git a/drivers/hid/usbhid/hiddev.c b/drivers/hid/usbhid/hiddev.c
35584index 8b6ee24..70f657d 100644
35585--- a/drivers/hid/usbhid/hiddev.c
35586+++ b/drivers/hid/usbhid/hiddev.c
35587@@ -617,7 +617,7 @@ static long hiddev_ioctl(struct file *file, unsigned int cmd, unsigned long arg)
35588 return put_user(HID_VERSION, (int __user *)arg);
35589
35590 case HIDIOCAPPLICATION:
35591- if (arg < 0 || arg >= hid->maxapplication)
35592+ if (arg >= hid->maxapplication)
35593 return -EINVAL;
35594
35595 for (i = 0; i < hid->maxcollection; i++)
35596diff --git a/drivers/hwmon/lis3lv02d.c b/drivers/hwmon/lis3lv02d.c
35597index 5d5ed69..f40533e 100644
35598--- a/drivers/hwmon/lis3lv02d.c
35599+++ b/drivers/hwmon/lis3lv02d.c
35600@@ -146,7 +146,7 @@ static irqreturn_t lis302dl_interrupt(int irq, void *dummy)
35601 * the lid is closed. This leads to interrupts as soon as a little move
35602 * is done.
35603 */
35604- atomic_inc(&lis3_dev.count);
35605+ atomic_inc_unchecked(&lis3_dev.count);
35606
35607 wake_up_interruptible(&lis3_dev.misc_wait);
35608 kill_fasync(&lis3_dev.async_queue, SIGIO, POLL_IN);
35609@@ -160,7 +160,7 @@ static int lis3lv02d_misc_open(struct inode *inode, struct file *file)
35610 if (test_and_set_bit(0, &lis3_dev.misc_opened))
35611 return -EBUSY; /* already open */
35612
35613- atomic_set(&lis3_dev.count, 0);
35614+ atomic_set_unchecked(&lis3_dev.count, 0);
35615
35616 /*
35617 * The sensor can generate interrupts for free-fall and direction
35618@@ -206,7 +206,7 @@ static ssize_t lis3lv02d_misc_read(struct file *file, char __user *buf,
35619 add_wait_queue(&lis3_dev.misc_wait, &wait);
35620 while (true) {
35621 set_current_state(TASK_INTERRUPTIBLE);
35622- data = atomic_xchg(&lis3_dev.count, 0);
35623+ data = atomic_xchg_unchecked(&lis3_dev.count, 0);
35624 if (data)
35625 break;
35626
35627@@ -244,7 +244,7 @@ out:
35628 static unsigned int lis3lv02d_misc_poll(struct file *file, poll_table *wait)
35629 {
35630 poll_wait(file, &lis3_dev.misc_wait, wait);
35631- if (atomic_read(&lis3_dev.count))
35632+ if (atomic_read_unchecked(&lis3_dev.count))
35633 return POLLIN | POLLRDNORM;
35634 return 0;
35635 }
35636diff --git a/drivers/hwmon/lis3lv02d.h b/drivers/hwmon/lis3lv02d.h
35637index 7cdd76f..fe0efdf 100644
35638--- a/drivers/hwmon/lis3lv02d.h
35639+++ b/drivers/hwmon/lis3lv02d.h
35640@@ -201,7 +201,7 @@ struct lis3lv02d {
35641
35642 struct input_polled_dev *idev; /* input device */
35643 struct platform_device *pdev; /* platform device */
35644- atomic_t count; /* interrupt count after last read */
35645+ atomic_unchecked_t count; /* interrupt count after last read */
35646 int xcalib; /* calibrated null value for x */
35647 int ycalib; /* calibrated null value for y */
35648 int zcalib; /* calibrated null value for z */
35649diff --git a/drivers/hwmon/sht15.c b/drivers/hwmon/sht15.c
35650index 740785e..5a5c6c6 100644
35651--- a/drivers/hwmon/sht15.c
35652+++ b/drivers/hwmon/sht15.c
35653@@ -112,7 +112,7 @@ struct sht15_data {
35654 int supply_uV;
35655 int supply_uV_valid;
35656 struct work_struct update_supply_work;
35657- atomic_t interrupt_handled;
35658+ atomic_unchecked_t interrupt_handled;
35659 };
35660
35661 /**
35662@@ -245,13 +245,13 @@ static inline int sht15_update_single_val(struct sht15_data *data,
35663 return ret;
35664
35665 gpio_direction_input(data->pdata->gpio_data);
35666- atomic_set(&data->interrupt_handled, 0);
35667+ atomic_set_unchecked(&data->interrupt_handled, 0);
35668
35669 enable_irq(gpio_to_irq(data->pdata->gpio_data));
35670 if (gpio_get_value(data->pdata->gpio_data) == 0) {
35671 disable_irq_nosync(gpio_to_irq(data->pdata->gpio_data));
35672 /* Only relevant if the interrupt hasn't occured. */
35673- if (!atomic_read(&data->interrupt_handled))
35674+ if (!atomic_read_unchecked(&data->interrupt_handled))
35675 schedule_work(&data->read_work);
35676 }
35677 ret = wait_event_timeout(data->wait_queue,
35678@@ -398,7 +398,7 @@ static irqreturn_t sht15_interrupt_fired(int irq, void *d)
35679 struct sht15_data *data = d;
35680 /* First disable the interrupt */
35681 disable_irq_nosync(irq);
35682- atomic_inc(&data->interrupt_handled);
35683+ atomic_inc_unchecked(&data->interrupt_handled);
35684 /* Then schedule a reading work struct */
35685 if (data->flag != SHT15_READING_NOTHING)
35686 schedule_work(&data->read_work);
35687@@ -449,11 +449,11 @@ static void sht15_bh_read_data(struct work_struct *work_s)
35688 here as could have gone low in meantime so verify
35689 it hasn't!
35690 */
35691- atomic_set(&data->interrupt_handled, 0);
35692+ atomic_set_unchecked(&data->interrupt_handled, 0);
35693 enable_irq(gpio_to_irq(data->pdata->gpio_data));
35694 /* If still not occured or another handler has been scheduled */
35695 if (gpio_get_value(data->pdata->gpio_data)
35696- || atomic_read(&data->interrupt_handled))
35697+ || atomic_read_unchecked(&data->interrupt_handled))
35698 return;
35699 }
35700 /* Read the data back from the device */
35701diff --git a/drivers/hwmon/w83791d.c b/drivers/hwmon/w83791d.c
35702index 97851c5..cb40626 100644
35703--- a/drivers/hwmon/w83791d.c
35704+++ b/drivers/hwmon/w83791d.c
35705@@ -330,8 +330,8 @@ static int w83791d_detect(struct i2c_client *client, int kind,
35706 struct i2c_board_info *info);
35707 static int w83791d_remove(struct i2c_client *client);
35708
35709-static int w83791d_read(struct i2c_client *client, u8 register);
35710-static int w83791d_write(struct i2c_client *client, u8 register, u8 value);
35711+static int w83791d_read(struct i2c_client *client, u8 reg);
35712+static int w83791d_write(struct i2c_client *client, u8 reg, u8 value);
35713 static struct w83791d_data *w83791d_update_device(struct device *dev);
35714
35715 #ifdef DEBUG
35716diff --git a/drivers/i2c/busses/i2c-amd756-s4882.c b/drivers/i2c/busses/i2c-amd756-s4882.c
35717index 378fcb5..5e91fa8 100644
35718--- a/drivers/i2c/busses/i2c-amd756-s4882.c
35719+++ b/drivers/i2c/busses/i2c-amd756-s4882.c
35720@@ -43,7 +43,7 @@
35721 extern struct i2c_adapter amd756_smbus;
35722
35723 static struct i2c_adapter *s4882_adapter;
35724-static struct i2c_algorithm *s4882_algo;
35725+static i2c_algorithm_no_const *s4882_algo;
35726
35727 /* Wrapper access functions for multiplexed SMBus */
35728 static DEFINE_MUTEX(amd756_lock);
35729diff --git a/drivers/i2c/busses/i2c-nforce2-s4985.c b/drivers/i2c/busses/i2c-nforce2-s4985.c
35730index 29015eb..af2d8e9 100644
35731--- a/drivers/i2c/busses/i2c-nforce2-s4985.c
35732+++ b/drivers/i2c/busses/i2c-nforce2-s4985.c
35733@@ -41,7 +41,7 @@
35734 extern struct i2c_adapter *nforce2_smbus;
35735
35736 static struct i2c_adapter *s4985_adapter;
35737-static struct i2c_algorithm *s4985_algo;
35738+static i2c_algorithm_no_const *s4985_algo;
35739
35740 /* Wrapper access functions for multiplexed SMBus */
35741 static DEFINE_MUTEX(nforce2_lock);
35742diff --git a/drivers/ide/aec62xx.c b/drivers/ide/aec62xx.c
35743index 878f8ec..12376fc 100644
35744--- a/drivers/ide/aec62xx.c
35745+++ b/drivers/ide/aec62xx.c
35746@@ -180,7 +180,7 @@ static const struct ide_port_ops atp86x_port_ops = {
35747 .cable_detect = atp86x_cable_detect,
35748 };
35749
35750-static const struct ide_port_info aec62xx_chipsets[] __devinitdata = {
35751+static const struct ide_port_info aec62xx_chipsets[] __devinitconst = {
35752 { /* 0: AEC6210 */
35753 .name = DRV_NAME,
35754 .init_chipset = init_chipset_aec62xx,
35755diff --git a/drivers/ide/alim15x3.c b/drivers/ide/alim15x3.c
35756index e59b6de..4b4fc65 100644
35757--- a/drivers/ide/alim15x3.c
35758+++ b/drivers/ide/alim15x3.c
35759@@ -509,7 +509,7 @@ static const struct ide_dma_ops ali_dma_ops = {
35760 .dma_sff_read_status = ide_dma_sff_read_status,
35761 };
35762
35763-static const struct ide_port_info ali15x3_chipset __devinitdata = {
35764+static const struct ide_port_info ali15x3_chipset __devinitconst = {
35765 .name = DRV_NAME,
35766 .init_chipset = init_chipset_ali15x3,
35767 .init_hwif = init_hwif_ali15x3,
35768diff --git a/drivers/ide/amd74xx.c b/drivers/ide/amd74xx.c
35769index 628cd2e..087a414 100644
35770--- a/drivers/ide/amd74xx.c
35771+++ b/drivers/ide/amd74xx.c
35772@@ -221,7 +221,7 @@ static const struct ide_port_ops amd_port_ops = {
35773 .udma_mask = udma, \
35774 }
35775
35776-static const struct ide_port_info amd74xx_chipsets[] __devinitdata = {
35777+static const struct ide_port_info amd74xx_chipsets[] __devinitconst = {
35778 /* 0: AMD7401 */ DECLARE_AMD_DEV(0x00, ATA_UDMA2),
35779 /* 1: AMD7409 */ DECLARE_AMD_DEV(ATA_SWDMA2, ATA_UDMA4),
35780 /* 2: AMD7411/7441 */ DECLARE_AMD_DEV(ATA_SWDMA2, ATA_UDMA5),
35781diff --git a/drivers/ide/atiixp.c b/drivers/ide/atiixp.c
35782index 837322b..837fd71 100644
35783--- a/drivers/ide/atiixp.c
35784+++ b/drivers/ide/atiixp.c
35785@@ -137,7 +137,7 @@ static const struct ide_port_ops atiixp_port_ops = {
35786 .cable_detect = atiixp_cable_detect,
35787 };
35788
35789-static const struct ide_port_info atiixp_pci_info[] __devinitdata = {
35790+static const struct ide_port_info atiixp_pci_info[] __devinitconst = {
35791 { /* 0: IXP200/300/400/700 */
35792 .name = DRV_NAME,
35793 .enablebits = {{0x48,0x01,0x00}, {0x48,0x08,0x00}},
35794diff --git a/drivers/ide/cmd64x.c b/drivers/ide/cmd64x.c
35795index ca0c46f..d55318a 100644
35796--- a/drivers/ide/cmd64x.c
35797+++ b/drivers/ide/cmd64x.c
35798@@ -372,7 +372,7 @@ static const struct ide_dma_ops cmd646_rev1_dma_ops = {
35799 .dma_sff_read_status = ide_dma_sff_read_status,
35800 };
35801
35802-static const struct ide_port_info cmd64x_chipsets[] __devinitdata = {
35803+static const struct ide_port_info cmd64x_chipsets[] __devinitconst = {
35804 { /* 0: CMD643 */
35805 .name = DRV_NAME,
35806 .init_chipset = init_chipset_cmd64x,
35807diff --git a/drivers/ide/cs5520.c b/drivers/ide/cs5520.c
35808index 09f98ed..cebc5bc 100644
35809--- a/drivers/ide/cs5520.c
35810+++ b/drivers/ide/cs5520.c
35811@@ -93,7 +93,7 @@ static const struct ide_port_ops cs5520_port_ops = {
35812 .set_dma_mode = cs5520_set_dma_mode,
35813 };
35814
35815-static const struct ide_port_info cyrix_chipset __devinitdata = {
35816+static const struct ide_port_info cyrix_chipset __devinitconst = {
35817 .name = DRV_NAME,
35818 .enablebits = { { 0x60, 0x01, 0x01 }, { 0x60, 0x02, 0x02 } },
35819 .port_ops = &cs5520_port_ops,
35820diff --git a/drivers/ide/cs5530.c b/drivers/ide/cs5530.c
35821index 40bf05e..7d58ca0 100644
35822--- a/drivers/ide/cs5530.c
35823+++ b/drivers/ide/cs5530.c
35824@@ -244,7 +244,7 @@ static const struct ide_port_ops cs5530_port_ops = {
35825 .udma_filter = cs5530_udma_filter,
35826 };
35827
35828-static const struct ide_port_info cs5530_chipset __devinitdata = {
35829+static const struct ide_port_info cs5530_chipset __devinitconst = {
35830 .name = DRV_NAME,
35831 .init_chipset = init_chipset_cs5530,
35832 .init_hwif = init_hwif_cs5530,
35833diff --git a/drivers/ide/cs5535.c b/drivers/ide/cs5535.c
35834index 983d957..53e6172 100644
35835--- a/drivers/ide/cs5535.c
35836+++ b/drivers/ide/cs5535.c
35837@@ -170,7 +170,7 @@ static const struct ide_port_ops cs5535_port_ops = {
35838 .cable_detect = cs5535_cable_detect,
35839 };
35840
35841-static const struct ide_port_info cs5535_chipset __devinitdata = {
35842+static const struct ide_port_info cs5535_chipset __devinitconst = {
35843 .name = DRV_NAME,
35844 .port_ops = &cs5535_port_ops,
35845 .host_flags = IDE_HFLAG_SINGLE | IDE_HFLAG_POST_SET_MODE,
35846diff --git a/drivers/ide/cy82c693.c b/drivers/ide/cy82c693.c
35847index 74fc540..8e933d8 100644
35848--- a/drivers/ide/cy82c693.c
35849+++ b/drivers/ide/cy82c693.c
35850@@ -288,7 +288,7 @@ static const struct ide_port_ops cy82c693_port_ops = {
35851 .set_dma_mode = cy82c693_set_dma_mode,
35852 };
35853
35854-static const struct ide_port_info cy82c693_chipset __devinitdata = {
35855+static const struct ide_port_info cy82c693_chipset __devinitconst = {
35856 .name = DRV_NAME,
35857 .init_iops = init_iops_cy82c693,
35858 .port_ops = &cy82c693_port_ops,
35859diff --git a/drivers/ide/hpt366.c b/drivers/ide/hpt366.c
35860index 7ce68ef..e78197d 100644
35861--- a/drivers/ide/hpt366.c
35862+++ b/drivers/ide/hpt366.c
35863@@ -507,7 +507,7 @@ static struct hpt_timings hpt37x_timings = {
35864 }
35865 };
35866
35867-static const struct hpt_info hpt36x __devinitdata = {
35868+static const struct hpt_info hpt36x __devinitconst = {
35869 .chip_name = "HPT36x",
35870 .chip_type = HPT36x,
35871 .udma_mask = HPT366_ALLOW_ATA66_3 ? (HPT366_ALLOW_ATA66_4 ? ATA_UDMA4 : ATA_UDMA3) : ATA_UDMA2,
35872@@ -515,7 +515,7 @@ static const struct hpt_info hpt36x __devinitdata = {
35873 .timings = &hpt36x_timings
35874 };
35875
35876-static const struct hpt_info hpt370 __devinitdata = {
35877+static const struct hpt_info hpt370 __devinitconst = {
35878 .chip_name = "HPT370",
35879 .chip_type = HPT370,
35880 .udma_mask = HPT370_ALLOW_ATA100_5 ? ATA_UDMA5 : ATA_UDMA4,
35881@@ -523,7 +523,7 @@ static const struct hpt_info hpt370 __devinitdata = {
35882 .timings = &hpt37x_timings
35883 };
35884
35885-static const struct hpt_info hpt370a __devinitdata = {
35886+static const struct hpt_info hpt370a __devinitconst = {
35887 .chip_name = "HPT370A",
35888 .chip_type = HPT370A,
35889 .udma_mask = HPT370_ALLOW_ATA100_5 ? ATA_UDMA5 : ATA_UDMA4,
35890@@ -531,7 +531,7 @@ static const struct hpt_info hpt370a __devinitdata = {
35891 .timings = &hpt37x_timings
35892 };
35893
35894-static const struct hpt_info hpt374 __devinitdata = {
35895+static const struct hpt_info hpt374 __devinitconst = {
35896 .chip_name = "HPT374",
35897 .chip_type = HPT374,
35898 .udma_mask = ATA_UDMA5,
35899@@ -539,7 +539,7 @@ static const struct hpt_info hpt374 __devinitdata = {
35900 .timings = &hpt37x_timings
35901 };
35902
35903-static const struct hpt_info hpt372 __devinitdata = {
35904+static const struct hpt_info hpt372 __devinitconst = {
35905 .chip_name = "HPT372",
35906 .chip_type = HPT372,
35907 .udma_mask = HPT372_ALLOW_ATA133_6 ? ATA_UDMA6 : ATA_UDMA5,
35908@@ -547,7 +547,7 @@ static const struct hpt_info hpt372 __devinitdata = {
35909 .timings = &hpt37x_timings
35910 };
35911
35912-static const struct hpt_info hpt372a __devinitdata = {
35913+static const struct hpt_info hpt372a __devinitconst = {
35914 .chip_name = "HPT372A",
35915 .chip_type = HPT372A,
35916 .udma_mask = HPT372_ALLOW_ATA133_6 ? ATA_UDMA6 : ATA_UDMA5,
35917@@ -555,7 +555,7 @@ static const struct hpt_info hpt372a __devinitdata = {
35918 .timings = &hpt37x_timings
35919 };
35920
35921-static const struct hpt_info hpt302 __devinitdata = {
35922+static const struct hpt_info hpt302 __devinitconst = {
35923 .chip_name = "HPT302",
35924 .chip_type = HPT302,
35925 .udma_mask = HPT302_ALLOW_ATA133_6 ? ATA_UDMA6 : ATA_UDMA5,
35926@@ -563,7 +563,7 @@ static const struct hpt_info hpt302 __devinitdata = {
35927 .timings = &hpt37x_timings
35928 };
35929
35930-static const struct hpt_info hpt371 __devinitdata = {
35931+static const struct hpt_info hpt371 __devinitconst = {
35932 .chip_name = "HPT371",
35933 .chip_type = HPT371,
35934 .udma_mask = HPT371_ALLOW_ATA133_6 ? ATA_UDMA6 : ATA_UDMA5,
35935@@ -571,7 +571,7 @@ static const struct hpt_info hpt371 __devinitdata = {
35936 .timings = &hpt37x_timings
35937 };
35938
35939-static const struct hpt_info hpt372n __devinitdata = {
35940+static const struct hpt_info hpt372n __devinitconst = {
35941 .chip_name = "HPT372N",
35942 .chip_type = HPT372N,
35943 .udma_mask = HPT372_ALLOW_ATA133_6 ? ATA_UDMA6 : ATA_UDMA5,
35944@@ -579,7 +579,7 @@ static const struct hpt_info hpt372n __devinitdata = {
35945 .timings = &hpt37x_timings
35946 };
35947
35948-static const struct hpt_info hpt302n __devinitdata = {
35949+static const struct hpt_info hpt302n __devinitconst = {
35950 .chip_name = "HPT302N",
35951 .chip_type = HPT302N,
35952 .udma_mask = HPT302_ALLOW_ATA133_6 ? ATA_UDMA6 : ATA_UDMA5,
35953@@ -587,7 +587,7 @@ static const struct hpt_info hpt302n __devinitdata = {
35954 .timings = &hpt37x_timings
35955 };
35956
35957-static const struct hpt_info hpt371n __devinitdata = {
35958+static const struct hpt_info hpt371n __devinitconst = {
35959 .chip_name = "HPT371N",
35960 .chip_type = HPT371N,
35961 .udma_mask = HPT371_ALLOW_ATA133_6 ? ATA_UDMA6 : ATA_UDMA5,
35962@@ -1422,7 +1422,7 @@ static const struct ide_dma_ops hpt36x_dma_ops = {
35963 .dma_sff_read_status = ide_dma_sff_read_status,
35964 };
35965
35966-static const struct ide_port_info hpt366_chipsets[] __devinitdata = {
35967+static const struct ide_port_info hpt366_chipsets[] __devinitconst = {
35968 { /* 0: HPT36x */
35969 .name = DRV_NAME,
35970 .init_chipset = init_chipset_hpt366,
35971diff --git a/drivers/ide/ide-cd.c b/drivers/ide/ide-cd.c
35972index 2de76cc..74186a1 100644
35973--- a/drivers/ide/ide-cd.c
35974+++ b/drivers/ide/ide-cd.c
35975@@ -774,7 +774,7 @@ static void cdrom_do_block_pc(ide_drive_t *drive, struct request *rq)
35976 alignment = queue_dma_alignment(q) | q->dma_pad_mask;
35977 if ((unsigned long)buf & alignment
35978 || blk_rq_bytes(rq) & q->dma_pad_mask
35979- || object_is_on_stack(buf))
35980+ || object_starts_on_stack(buf))
35981 drive->dma = 0;
35982 }
35983 }
35984diff --git a/drivers/ide/ide-floppy.c b/drivers/ide/ide-floppy.c
35985index fefbdfc..62ff465 100644
35986--- a/drivers/ide/ide-floppy.c
35987+++ b/drivers/ide/ide-floppy.c
35988@@ -373,6 +373,8 @@ static int ide_floppy_get_capacity(ide_drive_t *drive)
35989 u8 pc_buf[256], header_len, desc_cnt;
35990 int i, rc = 1, blocks, length;
35991
35992+ pax_track_stack();
35993+
35994 ide_debug_log(IDE_DBG_FUNC, "enter");
35995
35996 drive->bios_cyl = 0;
35997diff --git a/drivers/ide/ide-pci-generic.c b/drivers/ide/ide-pci-generic.c
35998index 39d4e01..11538ce 100644
35999--- a/drivers/ide/ide-pci-generic.c
36000+++ b/drivers/ide/ide-pci-generic.c
36001@@ -53,7 +53,7 @@ static const struct ide_port_ops netcell_port_ops = {
36002 .udma_mask = ATA_UDMA6, \
36003 }
36004
36005-static const struct ide_port_info generic_chipsets[] __devinitdata = {
36006+static const struct ide_port_info generic_chipsets[] __devinitconst = {
36007 /* 0: Unknown */
36008 DECLARE_GENERIC_PCI_DEV(0),
36009
36010diff --git a/drivers/ide/it8172.c b/drivers/ide/it8172.c
36011index 0d266a5..aaca790 100644
36012--- a/drivers/ide/it8172.c
36013+++ b/drivers/ide/it8172.c
36014@@ -115,7 +115,7 @@ static const struct ide_port_ops it8172_port_ops = {
36015 .set_dma_mode = it8172_set_dma_mode,
36016 };
36017
36018-static const struct ide_port_info it8172_port_info __devinitdata = {
36019+static const struct ide_port_info it8172_port_info __devinitconst = {
36020 .name = DRV_NAME,
36021 .port_ops = &it8172_port_ops,
36022 .enablebits = { {0x41, 0x80, 0x80}, {0x00, 0x00, 0x00} },
36023diff --git a/drivers/ide/it8213.c b/drivers/ide/it8213.c
36024index 4797616..4be488a 100644
36025--- a/drivers/ide/it8213.c
36026+++ b/drivers/ide/it8213.c
36027@@ -156,7 +156,7 @@ static const struct ide_port_ops it8213_port_ops = {
36028 .cable_detect = it8213_cable_detect,
36029 };
36030
36031-static const struct ide_port_info it8213_chipset __devinitdata = {
36032+static const struct ide_port_info it8213_chipset __devinitconst = {
36033 .name = DRV_NAME,
36034 .enablebits = { {0x41, 0x80, 0x80} },
36035 .port_ops = &it8213_port_ops,
36036diff --git a/drivers/ide/it821x.c b/drivers/ide/it821x.c
36037index 51aa745..146ee60 100644
36038--- a/drivers/ide/it821x.c
36039+++ b/drivers/ide/it821x.c
36040@@ -627,7 +627,7 @@ static const struct ide_port_ops it821x_port_ops = {
36041 .cable_detect = it821x_cable_detect,
36042 };
36043
36044-static const struct ide_port_info it821x_chipset __devinitdata = {
36045+static const struct ide_port_info it821x_chipset __devinitconst = {
36046 .name = DRV_NAME,
36047 .init_chipset = init_chipset_it821x,
36048 .init_hwif = init_hwif_it821x,
36049diff --git a/drivers/ide/jmicron.c b/drivers/ide/jmicron.c
36050index bf2be64..9270098 100644
36051--- a/drivers/ide/jmicron.c
36052+++ b/drivers/ide/jmicron.c
36053@@ -102,7 +102,7 @@ static const struct ide_port_ops jmicron_port_ops = {
36054 .cable_detect = jmicron_cable_detect,
36055 };
36056
36057-static const struct ide_port_info jmicron_chipset __devinitdata = {
36058+static const struct ide_port_info jmicron_chipset __devinitconst = {
36059 .name = DRV_NAME,
36060 .enablebits = { { 0x40, 0x01, 0x01 }, { 0x40, 0x10, 0x10 } },
36061 .port_ops = &jmicron_port_ops,
36062diff --git a/drivers/ide/ns87415.c b/drivers/ide/ns87415.c
36063index 95327a2..73f78d8 100644
36064--- a/drivers/ide/ns87415.c
36065+++ b/drivers/ide/ns87415.c
36066@@ -293,7 +293,7 @@ static const struct ide_dma_ops ns87415_dma_ops = {
36067 .dma_sff_read_status = superio_dma_sff_read_status,
36068 };
36069
36070-static const struct ide_port_info ns87415_chipset __devinitdata = {
36071+static const struct ide_port_info ns87415_chipset __devinitconst = {
36072 .name = DRV_NAME,
36073 .init_hwif = init_hwif_ns87415,
36074 .tp_ops = &ns87415_tp_ops,
36075diff --git a/drivers/ide/opti621.c b/drivers/ide/opti621.c
36076index f1d70d6..e1de05b 100644
36077--- a/drivers/ide/opti621.c
36078+++ b/drivers/ide/opti621.c
36079@@ -202,7 +202,7 @@ static const struct ide_port_ops opti621_port_ops = {
36080 .set_pio_mode = opti621_set_pio_mode,
36081 };
36082
36083-static const struct ide_port_info opti621_chipset __devinitdata = {
36084+static const struct ide_port_info opti621_chipset __devinitconst = {
36085 .name = DRV_NAME,
36086 .enablebits = { {0x45, 0x80, 0x00}, {0x40, 0x08, 0x00} },
36087 .port_ops = &opti621_port_ops,
36088diff --git a/drivers/ide/pdc202xx_new.c b/drivers/ide/pdc202xx_new.c
36089index 65ba823..7311f4d 100644
36090--- a/drivers/ide/pdc202xx_new.c
36091+++ b/drivers/ide/pdc202xx_new.c
36092@@ -465,7 +465,7 @@ static const struct ide_port_ops pdcnew_port_ops = {
36093 .udma_mask = udma, \
36094 }
36095
36096-static const struct ide_port_info pdcnew_chipsets[] __devinitdata = {
36097+static const struct ide_port_info pdcnew_chipsets[] __devinitconst = {
36098 /* 0: PDC202{68,70} */ DECLARE_PDCNEW_DEV(ATA_UDMA5),
36099 /* 1: PDC202{69,71,75,76,77} */ DECLARE_PDCNEW_DEV(ATA_UDMA6),
36100 };
36101diff --git a/drivers/ide/pdc202xx_old.c b/drivers/ide/pdc202xx_old.c
36102index cb812f3..af816ef 100644
36103--- a/drivers/ide/pdc202xx_old.c
36104+++ b/drivers/ide/pdc202xx_old.c
36105@@ -285,7 +285,7 @@ static const struct ide_dma_ops pdc2026x_dma_ops = {
36106 .max_sectors = sectors, \
36107 }
36108
36109-static const struct ide_port_info pdc202xx_chipsets[] __devinitdata = {
36110+static const struct ide_port_info pdc202xx_chipsets[] __devinitconst = {
36111 { /* 0: PDC20246 */
36112 .name = DRV_NAME,
36113 .init_chipset = init_chipset_pdc202xx,
36114diff --git a/drivers/ide/piix.c b/drivers/ide/piix.c
36115index bf14f39..15c4b98 100644
36116--- a/drivers/ide/piix.c
36117+++ b/drivers/ide/piix.c
36118@@ -344,7 +344,7 @@ static const struct ide_port_ops ich_port_ops = {
36119 .udma_mask = udma, \
36120 }
36121
36122-static const struct ide_port_info piix_pci_info[] __devinitdata = {
36123+static const struct ide_port_info piix_pci_info[] __devinitconst = {
36124 /* 0: MPIIX */
36125 { /*
36126 * MPIIX actually has only a single IDE channel mapped to
36127diff --git a/drivers/ide/rz1000.c b/drivers/ide/rz1000.c
36128index a6414a8..c04173e 100644
36129--- a/drivers/ide/rz1000.c
36130+++ b/drivers/ide/rz1000.c
36131@@ -38,7 +38,7 @@ static int __devinit rz1000_disable_readahead(struct pci_dev *dev)
36132 }
36133 }
36134
36135-static const struct ide_port_info rz1000_chipset __devinitdata = {
36136+static const struct ide_port_info rz1000_chipset __devinitconst = {
36137 .name = DRV_NAME,
36138 .host_flags = IDE_HFLAG_NO_DMA,
36139 };
36140diff --git a/drivers/ide/sc1200.c b/drivers/ide/sc1200.c
36141index d467478..9203942 100644
36142--- a/drivers/ide/sc1200.c
36143+++ b/drivers/ide/sc1200.c
36144@@ -290,7 +290,7 @@ static const struct ide_dma_ops sc1200_dma_ops = {
36145 .dma_sff_read_status = ide_dma_sff_read_status,
36146 };
36147
36148-static const struct ide_port_info sc1200_chipset __devinitdata = {
36149+static const struct ide_port_info sc1200_chipset __devinitconst = {
36150 .name = DRV_NAME,
36151 .port_ops = &sc1200_port_ops,
36152 .dma_ops = &sc1200_dma_ops,
36153diff --git a/drivers/ide/scc_pata.c b/drivers/ide/scc_pata.c
36154index 1104bb3..59c5194 100644
36155--- a/drivers/ide/scc_pata.c
36156+++ b/drivers/ide/scc_pata.c
36157@@ -811,7 +811,7 @@ static const struct ide_dma_ops scc_dma_ops = {
36158 .dma_sff_read_status = scc_dma_sff_read_status,
36159 };
36160
36161-static const struct ide_port_info scc_chipset __devinitdata = {
36162+static const struct ide_port_info scc_chipset __devinitconst = {
36163 .name = "sccIDE",
36164 .init_iops = init_iops_scc,
36165 .init_dma = scc_init_dma,
36166diff --git a/drivers/ide/serverworks.c b/drivers/ide/serverworks.c
36167index b6554ef..6cc2cc3 100644
36168--- a/drivers/ide/serverworks.c
36169+++ b/drivers/ide/serverworks.c
36170@@ -353,7 +353,7 @@ static const struct ide_port_ops svwks_port_ops = {
36171 .cable_detect = svwks_cable_detect,
36172 };
36173
36174-static const struct ide_port_info serverworks_chipsets[] __devinitdata = {
36175+static const struct ide_port_info serverworks_chipsets[] __devinitconst = {
36176 { /* 0: OSB4 */
36177 .name = DRV_NAME,
36178 .init_chipset = init_chipset_svwks,
36179diff --git a/drivers/ide/setup-pci.c b/drivers/ide/setup-pci.c
36180index ab3db61..afed580 100644
36181--- a/drivers/ide/setup-pci.c
36182+++ b/drivers/ide/setup-pci.c
36183@@ -542,6 +542,8 @@ int ide_pci_init_two(struct pci_dev *dev1, struct pci_dev *dev2,
36184 int ret, i, n_ports = dev2 ? 4 : 2;
36185 struct ide_hw hw[4], *hws[] = { NULL, NULL, NULL, NULL };
36186
36187+ pax_track_stack();
36188+
36189 for (i = 0; i < n_ports / 2; i++) {
36190 ret = ide_setup_pci_controller(pdev[i], d, !i);
36191 if (ret < 0)
36192diff --git a/drivers/ide/siimage.c b/drivers/ide/siimage.c
36193index d95df52..0b03a39 100644
36194--- a/drivers/ide/siimage.c
36195+++ b/drivers/ide/siimage.c
36196@@ -719,7 +719,7 @@ static const struct ide_dma_ops sil_dma_ops = {
36197 .udma_mask = ATA_UDMA6, \
36198 }
36199
36200-static const struct ide_port_info siimage_chipsets[] __devinitdata = {
36201+static const struct ide_port_info siimage_chipsets[] __devinitconst = {
36202 /* 0: SiI680 */ DECLARE_SII_DEV(&sil_pata_port_ops),
36203 /* 1: SiI3112 */ DECLARE_SII_DEV(&sil_sata_port_ops)
36204 };
36205diff --git a/drivers/ide/sis5513.c b/drivers/ide/sis5513.c
36206index 3b88eba..ca8699d 100644
36207--- a/drivers/ide/sis5513.c
36208+++ b/drivers/ide/sis5513.c
36209@@ -561,7 +561,7 @@ static const struct ide_port_ops sis_ata133_port_ops = {
36210 .cable_detect = sis_cable_detect,
36211 };
36212
36213-static const struct ide_port_info sis5513_chipset __devinitdata = {
36214+static const struct ide_port_info sis5513_chipset __devinitconst = {
36215 .name = DRV_NAME,
36216 .init_chipset = init_chipset_sis5513,
36217 .enablebits = { {0x4a, 0x02, 0x02}, {0x4a, 0x04, 0x04} },
36218diff --git a/drivers/ide/sl82c105.c b/drivers/ide/sl82c105.c
36219index d698da4..fca42a4 100644
36220--- a/drivers/ide/sl82c105.c
36221+++ b/drivers/ide/sl82c105.c
36222@@ -319,7 +319,7 @@ static const struct ide_dma_ops sl82c105_dma_ops = {
36223 .dma_sff_read_status = ide_dma_sff_read_status,
36224 };
36225
36226-static const struct ide_port_info sl82c105_chipset __devinitdata = {
36227+static const struct ide_port_info sl82c105_chipset __devinitconst = {
36228 .name = DRV_NAME,
36229 .init_chipset = init_chipset_sl82c105,
36230 .enablebits = {{0x40,0x01,0x01}, {0x40,0x10,0x10}},
36231diff --git a/drivers/ide/slc90e66.c b/drivers/ide/slc90e66.c
36232index 1ccfb40..83d5779 100644
36233--- a/drivers/ide/slc90e66.c
36234+++ b/drivers/ide/slc90e66.c
36235@@ -131,7 +131,7 @@ static const struct ide_port_ops slc90e66_port_ops = {
36236 .cable_detect = slc90e66_cable_detect,
36237 };
36238
36239-static const struct ide_port_info slc90e66_chipset __devinitdata = {
36240+static const struct ide_port_info slc90e66_chipset __devinitconst = {
36241 .name = DRV_NAME,
36242 .enablebits = { {0x41, 0x80, 0x80}, {0x43, 0x80, 0x80} },
36243 .port_ops = &slc90e66_port_ops,
36244diff --git a/drivers/ide/tc86c001.c b/drivers/ide/tc86c001.c
36245index 05a93d6..5f9e325 100644
36246--- a/drivers/ide/tc86c001.c
36247+++ b/drivers/ide/tc86c001.c
36248@@ -190,7 +190,7 @@ static const struct ide_dma_ops tc86c001_dma_ops = {
36249 .dma_sff_read_status = ide_dma_sff_read_status,
36250 };
36251
36252-static const struct ide_port_info tc86c001_chipset __devinitdata = {
36253+static const struct ide_port_info tc86c001_chipset __devinitconst = {
36254 .name = DRV_NAME,
36255 .init_hwif = init_hwif_tc86c001,
36256 .port_ops = &tc86c001_port_ops,
36257diff --git a/drivers/ide/triflex.c b/drivers/ide/triflex.c
36258index 8773c3b..7907d6c 100644
36259--- a/drivers/ide/triflex.c
36260+++ b/drivers/ide/triflex.c
36261@@ -92,7 +92,7 @@ static const struct ide_port_ops triflex_port_ops = {
36262 .set_dma_mode = triflex_set_mode,
36263 };
36264
36265-static const struct ide_port_info triflex_device __devinitdata = {
36266+static const struct ide_port_info triflex_device __devinitconst = {
36267 .name = DRV_NAME,
36268 .enablebits = {{0x80, 0x01, 0x01}, {0x80, 0x02, 0x02}},
36269 .port_ops = &triflex_port_ops,
36270diff --git a/drivers/ide/trm290.c b/drivers/ide/trm290.c
36271index 4b42ca0..e494a98 100644
36272--- a/drivers/ide/trm290.c
36273+++ b/drivers/ide/trm290.c
36274@@ -324,7 +324,7 @@ static struct ide_dma_ops trm290_dma_ops = {
36275 .dma_check = trm290_dma_check,
36276 };
36277
36278-static const struct ide_port_info trm290_chipset __devinitdata = {
36279+static const struct ide_port_info trm290_chipset __devinitconst = {
36280 .name = DRV_NAME,
36281 .init_hwif = init_hwif_trm290,
36282 .tp_ops = &trm290_tp_ops,
36283diff --git a/drivers/ide/via82cxxx.c b/drivers/ide/via82cxxx.c
36284index 028de26..520d5d5 100644
36285--- a/drivers/ide/via82cxxx.c
36286+++ b/drivers/ide/via82cxxx.c
36287@@ -374,7 +374,7 @@ static const struct ide_port_ops via_port_ops = {
36288 .cable_detect = via82cxxx_cable_detect,
36289 };
36290
36291-static const struct ide_port_info via82cxxx_chipset __devinitdata = {
36292+static const struct ide_port_info via82cxxx_chipset __devinitconst = {
36293 .name = DRV_NAME,
36294 .init_chipset = init_chipset_via82cxxx,
36295 .enablebits = { { 0x40, 0x02, 0x02 }, { 0x40, 0x01, 0x01 } },
36296diff --git a/drivers/ieee1394/dv1394.c b/drivers/ieee1394/dv1394.c
36297index 2cd00b5..14de699 100644
36298--- a/drivers/ieee1394/dv1394.c
36299+++ b/drivers/ieee1394/dv1394.c
36300@@ -739,7 +739,7 @@ static void frame_prepare(struct video_card *video, unsigned int this_frame)
36301 based upon DIF section and sequence
36302 */
36303
36304-static void inline
36305+static inline void
36306 frame_put_packet (struct frame *f, struct packet *p)
36307 {
36308 int section_type = p->data[0] >> 5; /* section type is in bits 5 - 7 */
36309diff --git a/drivers/ieee1394/hosts.c b/drivers/ieee1394/hosts.c
36310index e947d8f..6a966b9 100644
36311--- a/drivers/ieee1394/hosts.c
36312+++ b/drivers/ieee1394/hosts.c
36313@@ -78,6 +78,7 @@ static int dummy_isoctl(struct hpsb_iso *iso, enum isoctl_cmd command,
36314 }
36315
36316 static struct hpsb_host_driver dummy_driver = {
36317+ .name = "dummy",
36318 .transmit_packet = dummy_transmit_packet,
36319 .devctl = dummy_devctl,
36320 .isoctl = dummy_isoctl
36321diff --git a/drivers/ieee1394/init_ohci1394_dma.c b/drivers/ieee1394/init_ohci1394_dma.c
36322index ddaab6e..8d37435 100644
36323--- a/drivers/ieee1394/init_ohci1394_dma.c
36324+++ b/drivers/ieee1394/init_ohci1394_dma.c
36325@@ -257,7 +257,7 @@ void __init init_ohci1394_dma_on_all_controllers(void)
36326 for (func = 0; func < 8; func++) {
36327 u32 class = read_pci_config(num,slot,func,
36328 PCI_CLASS_REVISION);
36329- if ((class == 0xffffffff))
36330+ if (class == 0xffffffff)
36331 continue; /* No device at this func */
36332
36333 if (class>>8 != PCI_CLASS_SERIAL_FIREWIRE_OHCI)
36334diff --git a/drivers/ieee1394/ohci1394.c b/drivers/ieee1394/ohci1394.c
36335index 65c1429..5d8c11f 100644
36336--- a/drivers/ieee1394/ohci1394.c
36337+++ b/drivers/ieee1394/ohci1394.c
36338@@ -147,9 +147,9 @@ printk(level "%s: " fmt "\n" , OHCI1394_DRIVER_NAME , ## args)
36339 printk(level "%s: fw-host%d: " fmt "\n" , OHCI1394_DRIVER_NAME, ohci->host->id , ## args)
36340
36341 /* Module Parameters */
36342-static int phys_dma = 1;
36343+static int phys_dma;
36344 module_param(phys_dma, int, 0444);
36345-MODULE_PARM_DESC(phys_dma, "Enable physical DMA (default = 1).");
36346+MODULE_PARM_DESC(phys_dma, "Enable physical DMA (default = 0).");
36347
36348 static void dma_trm_tasklet(unsigned long data);
36349 static void dma_trm_reset(struct dma_trm_ctx *d);
36350diff --git a/drivers/ieee1394/sbp2.c b/drivers/ieee1394/sbp2.c
36351index f199896..78c9fc8 100644
36352--- a/drivers/ieee1394/sbp2.c
36353+++ b/drivers/ieee1394/sbp2.c
36354@@ -2111,7 +2111,7 @@ MODULE_DESCRIPTION("IEEE-1394 SBP-2 protocol driver");
36355 MODULE_SUPPORTED_DEVICE(SBP2_DEVICE_NAME);
36356 MODULE_LICENSE("GPL");
36357
36358-static int sbp2_module_init(void)
36359+static int __init sbp2_module_init(void)
36360 {
36361 int ret;
36362
36363diff --git a/drivers/infiniband/core/cm.c b/drivers/infiniband/core/cm.c
36364index a5dea6b..0cefe8f 100644
36365--- a/drivers/infiniband/core/cm.c
36366+++ b/drivers/infiniband/core/cm.c
36367@@ -112,7 +112,7 @@ static char const counter_group_names[CM_COUNTER_GROUPS]
36368
36369 struct cm_counter_group {
36370 struct kobject obj;
36371- atomic_long_t counter[CM_ATTR_COUNT];
36372+ atomic_long_unchecked_t counter[CM_ATTR_COUNT];
36373 };
36374
36375 struct cm_counter_attribute {
36376@@ -1386,7 +1386,7 @@ static void cm_dup_req_handler(struct cm_work *work,
36377 struct ib_mad_send_buf *msg = NULL;
36378 int ret;
36379
36380- atomic_long_inc(&work->port->counter_group[CM_RECV_DUPLICATES].
36381+ atomic_long_inc_unchecked(&work->port->counter_group[CM_RECV_DUPLICATES].
36382 counter[CM_REQ_COUNTER]);
36383
36384 /* Quick state check to discard duplicate REQs. */
36385@@ -1764,7 +1764,7 @@ static void cm_dup_rep_handler(struct cm_work *work)
36386 if (!cm_id_priv)
36387 return;
36388
36389- atomic_long_inc(&work->port->counter_group[CM_RECV_DUPLICATES].
36390+ atomic_long_inc_unchecked(&work->port->counter_group[CM_RECV_DUPLICATES].
36391 counter[CM_REP_COUNTER]);
36392 ret = cm_alloc_response_msg(work->port, work->mad_recv_wc, &msg);
36393 if (ret)
36394@@ -1931,7 +1931,7 @@ static int cm_rtu_handler(struct cm_work *work)
36395 if (cm_id_priv->id.state != IB_CM_REP_SENT &&
36396 cm_id_priv->id.state != IB_CM_MRA_REP_RCVD) {
36397 spin_unlock_irq(&cm_id_priv->lock);
36398- atomic_long_inc(&work->port->counter_group[CM_RECV_DUPLICATES].
36399+ atomic_long_inc_unchecked(&work->port->counter_group[CM_RECV_DUPLICATES].
36400 counter[CM_RTU_COUNTER]);
36401 goto out;
36402 }
36403@@ -2110,7 +2110,7 @@ static int cm_dreq_handler(struct cm_work *work)
36404 cm_id_priv = cm_acquire_id(dreq_msg->remote_comm_id,
36405 dreq_msg->local_comm_id);
36406 if (!cm_id_priv) {
36407- atomic_long_inc(&work->port->counter_group[CM_RECV_DUPLICATES].
36408+ atomic_long_inc_unchecked(&work->port->counter_group[CM_RECV_DUPLICATES].
36409 counter[CM_DREQ_COUNTER]);
36410 cm_issue_drep(work->port, work->mad_recv_wc);
36411 return -EINVAL;
36412@@ -2131,7 +2131,7 @@ static int cm_dreq_handler(struct cm_work *work)
36413 case IB_CM_MRA_REP_RCVD:
36414 break;
36415 case IB_CM_TIMEWAIT:
36416- atomic_long_inc(&work->port->counter_group[CM_RECV_DUPLICATES].
36417+ atomic_long_inc_unchecked(&work->port->counter_group[CM_RECV_DUPLICATES].
36418 counter[CM_DREQ_COUNTER]);
36419 if (cm_alloc_response_msg(work->port, work->mad_recv_wc, &msg))
36420 goto unlock;
36421@@ -2145,7 +2145,7 @@ static int cm_dreq_handler(struct cm_work *work)
36422 cm_free_msg(msg);
36423 goto deref;
36424 case IB_CM_DREQ_RCVD:
36425- atomic_long_inc(&work->port->counter_group[CM_RECV_DUPLICATES].
36426+ atomic_long_inc_unchecked(&work->port->counter_group[CM_RECV_DUPLICATES].
36427 counter[CM_DREQ_COUNTER]);
36428 goto unlock;
36429 default:
36430@@ -2501,7 +2501,7 @@ static int cm_mra_handler(struct cm_work *work)
36431 ib_modify_mad(cm_id_priv->av.port->mad_agent,
36432 cm_id_priv->msg, timeout)) {
36433 if (cm_id_priv->id.lap_state == IB_CM_MRA_LAP_RCVD)
36434- atomic_long_inc(&work->port->
36435+ atomic_long_inc_unchecked(&work->port->
36436 counter_group[CM_RECV_DUPLICATES].
36437 counter[CM_MRA_COUNTER]);
36438 goto out;
36439@@ -2510,7 +2510,7 @@ static int cm_mra_handler(struct cm_work *work)
36440 break;
36441 case IB_CM_MRA_REQ_RCVD:
36442 case IB_CM_MRA_REP_RCVD:
36443- atomic_long_inc(&work->port->counter_group[CM_RECV_DUPLICATES].
36444+ atomic_long_inc_unchecked(&work->port->counter_group[CM_RECV_DUPLICATES].
36445 counter[CM_MRA_COUNTER]);
36446 /* fall through */
36447 default:
36448@@ -2672,7 +2672,7 @@ static int cm_lap_handler(struct cm_work *work)
36449 case IB_CM_LAP_IDLE:
36450 break;
36451 case IB_CM_MRA_LAP_SENT:
36452- atomic_long_inc(&work->port->counter_group[CM_RECV_DUPLICATES].
36453+ atomic_long_inc_unchecked(&work->port->counter_group[CM_RECV_DUPLICATES].
36454 counter[CM_LAP_COUNTER]);
36455 if (cm_alloc_response_msg(work->port, work->mad_recv_wc, &msg))
36456 goto unlock;
36457@@ -2688,7 +2688,7 @@ static int cm_lap_handler(struct cm_work *work)
36458 cm_free_msg(msg);
36459 goto deref;
36460 case IB_CM_LAP_RCVD:
36461- atomic_long_inc(&work->port->counter_group[CM_RECV_DUPLICATES].
36462+ atomic_long_inc_unchecked(&work->port->counter_group[CM_RECV_DUPLICATES].
36463 counter[CM_LAP_COUNTER]);
36464 goto unlock;
36465 default:
36466@@ -2972,7 +2972,7 @@ static int cm_sidr_req_handler(struct cm_work *work)
36467 cur_cm_id_priv = cm_insert_remote_sidr(cm_id_priv);
36468 if (cur_cm_id_priv) {
36469 spin_unlock_irq(&cm.lock);
36470- atomic_long_inc(&work->port->counter_group[CM_RECV_DUPLICATES].
36471+ atomic_long_inc_unchecked(&work->port->counter_group[CM_RECV_DUPLICATES].
36472 counter[CM_SIDR_REQ_COUNTER]);
36473 goto out; /* Duplicate message. */
36474 }
36475@@ -3184,10 +3184,10 @@ static void cm_send_handler(struct ib_mad_agent *mad_agent,
36476 if (!msg->context[0] && (attr_index != CM_REJ_COUNTER))
36477 msg->retries = 1;
36478
36479- atomic_long_add(1 + msg->retries,
36480+ atomic_long_add_unchecked(1 + msg->retries,
36481 &port->counter_group[CM_XMIT].counter[attr_index]);
36482 if (msg->retries)
36483- atomic_long_add(msg->retries,
36484+ atomic_long_add_unchecked(msg->retries,
36485 &port->counter_group[CM_XMIT_RETRIES].
36486 counter[attr_index]);
36487
36488@@ -3397,7 +3397,7 @@ static void cm_recv_handler(struct ib_mad_agent *mad_agent,
36489 }
36490
36491 attr_id = be16_to_cpu(mad_recv_wc->recv_buf.mad->mad_hdr.attr_id);
36492- atomic_long_inc(&port->counter_group[CM_RECV].
36493+ atomic_long_inc_unchecked(&port->counter_group[CM_RECV].
36494 counter[attr_id - CM_ATTR_ID_OFFSET]);
36495
36496 work = kmalloc(sizeof *work + sizeof(struct ib_sa_path_rec) * paths,
36497@@ -3595,10 +3595,10 @@ static ssize_t cm_show_counter(struct kobject *obj, struct attribute *attr,
36498 cm_attr = container_of(attr, struct cm_counter_attribute, attr);
36499
36500 return sprintf(buf, "%ld\n",
36501- atomic_long_read(&group->counter[cm_attr->index]));
36502+ atomic_long_read_unchecked(&group->counter[cm_attr->index]));
36503 }
36504
36505-static struct sysfs_ops cm_counter_ops = {
36506+static const struct sysfs_ops cm_counter_ops = {
36507 .show = cm_show_counter
36508 };
36509
36510diff --git a/drivers/infiniband/core/cma.c b/drivers/infiniband/core/cma.c
36511index 8fd3a6f..61d8075 100644
36512--- a/drivers/infiniband/core/cma.c
36513+++ b/drivers/infiniband/core/cma.c
36514@@ -2267,6 +2267,9 @@ static int cma_resolve_ib_udp(struct rdma_id_private *id_priv,
36515
36516 req.private_data_len = sizeof(struct cma_hdr) +
36517 conn_param->private_data_len;
36518+ if (req.private_data_len < conn_param->private_data_len)
36519+ return -EINVAL;
36520+
36521 req.private_data = kzalloc(req.private_data_len, GFP_ATOMIC);
36522 if (!req.private_data)
36523 return -ENOMEM;
36524@@ -2314,6 +2317,9 @@ static int cma_connect_ib(struct rdma_id_private *id_priv,
36525 memset(&req, 0, sizeof req);
36526 offset = cma_user_data_offset(id_priv->id.ps);
36527 req.private_data_len = offset + conn_param->private_data_len;
36528+ if (req.private_data_len < conn_param->private_data_len)
36529+ return -EINVAL;
36530+
36531 private_data = kzalloc(req.private_data_len, GFP_ATOMIC);
36532 if (!private_data)
36533 return -ENOMEM;
36534diff --git a/drivers/infiniband/core/fmr_pool.c b/drivers/infiniband/core/fmr_pool.c
36535index 4507043..14ad522 100644
36536--- a/drivers/infiniband/core/fmr_pool.c
36537+++ b/drivers/infiniband/core/fmr_pool.c
36538@@ -97,8 +97,8 @@ struct ib_fmr_pool {
36539
36540 struct task_struct *thread;
36541
36542- atomic_t req_ser;
36543- atomic_t flush_ser;
36544+ atomic_unchecked_t req_ser;
36545+ atomic_unchecked_t flush_ser;
36546
36547 wait_queue_head_t force_wait;
36548 };
36549@@ -179,10 +179,10 @@ static int ib_fmr_cleanup_thread(void *pool_ptr)
36550 struct ib_fmr_pool *pool = pool_ptr;
36551
36552 do {
36553- if (atomic_read(&pool->flush_ser) - atomic_read(&pool->req_ser) < 0) {
36554+ if (atomic_read_unchecked(&pool->flush_ser) - atomic_read_unchecked(&pool->req_ser) < 0) {
36555 ib_fmr_batch_release(pool);
36556
36557- atomic_inc(&pool->flush_ser);
36558+ atomic_inc_unchecked(&pool->flush_ser);
36559 wake_up_interruptible(&pool->force_wait);
36560
36561 if (pool->flush_function)
36562@@ -190,7 +190,7 @@ static int ib_fmr_cleanup_thread(void *pool_ptr)
36563 }
36564
36565 set_current_state(TASK_INTERRUPTIBLE);
36566- if (atomic_read(&pool->flush_ser) - atomic_read(&pool->req_ser) >= 0 &&
36567+ if (atomic_read_unchecked(&pool->flush_ser) - atomic_read_unchecked(&pool->req_ser) >= 0 &&
36568 !kthread_should_stop())
36569 schedule();
36570 __set_current_state(TASK_RUNNING);
36571@@ -282,8 +282,8 @@ struct ib_fmr_pool *ib_create_fmr_pool(struct ib_pd *pd,
36572 pool->dirty_watermark = params->dirty_watermark;
36573 pool->dirty_len = 0;
36574 spin_lock_init(&pool->pool_lock);
36575- atomic_set(&pool->req_ser, 0);
36576- atomic_set(&pool->flush_ser, 0);
36577+ atomic_set_unchecked(&pool->req_ser, 0);
36578+ atomic_set_unchecked(&pool->flush_ser, 0);
36579 init_waitqueue_head(&pool->force_wait);
36580
36581 pool->thread = kthread_run(ib_fmr_cleanup_thread,
36582@@ -411,11 +411,11 @@ int ib_flush_fmr_pool(struct ib_fmr_pool *pool)
36583 }
36584 spin_unlock_irq(&pool->pool_lock);
36585
36586- serial = atomic_inc_return(&pool->req_ser);
36587+ serial = atomic_inc_return_unchecked(&pool->req_ser);
36588 wake_up_process(pool->thread);
36589
36590 if (wait_event_interruptible(pool->force_wait,
36591- atomic_read(&pool->flush_ser) - serial >= 0))
36592+ atomic_read_unchecked(&pool->flush_ser) - serial >= 0))
36593 return -EINTR;
36594
36595 return 0;
36596@@ -525,7 +525,7 @@ int ib_fmr_pool_unmap(struct ib_pool_fmr *fmr)
36597 } else {
36598 list_add_tail(&fmr->list, &pool->dirty_list);
36599 if (++pool->dirty_len >= pool->dirty_watermark) {
36600- atomic_inc(&pool->req_ser);
36601+ atomic_inc_unchecked(&pool->req_ser);
36602 wake_up_process(pool->thread);
36603 }
36604 }
36605diff --git a/drivers/infiniband/core/sysfs.c b/drivers/infiniband/core/sysfs.c
36606index 158a214..1558bb7 100644
36607--- a/drivers/infiniband/core/sysfs.c
36608+++ b/drivers/infiniband/core/sysfs.c
36609@@ -79,7 +79,7 @@ static ssize_t port_attr_show(struct kobject *kobj,
36610 return port_attr->show(p, port_attr, buf);
36611 }
36612
36613-static struct sysfs_ops port_sysfs_ops = {
36614+static const struct sysfs_ops port_sysfs_ops = {
36615 .show = port_attr_show
36616 };
36617
36618diff --git a/drivers/infiniband/core/uverbs_marshall.c b/drivers/infiniband/core/uverbs_marshall.c
36619index 5440da0..1194ecb 100644
36620--- a/drivers/infiniband/core/uverbs_marshall.c
36621+++ b/drivers/infiniband/core/uverbs_marshall.c
36622@@ -40,18 +40,21 @@ void ib_copy_ah_attr_to_user(struct ib_uverbs_ah_attr *dst,
36623 dst->grh.sgid_index = src->grh.sgid_index;
36624 dst->grh.hop_limit = src->grh.hop_limit;
36625 dst->grh.traffic_class = src->grh.traffic_class;
36626+ memset(&dst->grh.reserved, 0, sizeof(dst->grh.reserved));
36627 dst->dlid = src->dlid;
36628 dst->sl = src->sl;
36629 dst->src_path_bits = src->src_path_bits;
36630 dst->static_rate = src->static_rate;
36631 dst->is_global = src->ah_flags & IB_AH_GRH ? 1 : 0;
36632 dst->port_num = src->port_num;
36633+ dst->reserved = 0;
36634 }
36635 EXPORT_SYMBOL(ib_copy_ah_attr_to_user);
36636
36637 void ib_copy_qp_attr_to_user(struct ib_uverbs_qp_attr *dst,
36638 struct ib_qp_attr *src)
36639 {
36640+ dst->qp_state = src->qp_state;
36641 dst->cur_qp_state = src->cur_qp_state;
36642 dst->path_mtu = src->path_mtu;
36643 dst->path_mig_state = src->path_mig_state;
36644@@ -83,6 +86,7 @@ void ib_copy_qp_attr_to_user(struct ib_uverbs_qp_attr *dst,
36645 dst->rnr_retry = src->rnr_retry;
36646 dst->alt_port_num = src->alt_port_num;
36647 dst->alt_timeout = src->alt_timeout;
36648+ memset(dst->reserved, 0, sizeof(dst->reserved));
36649 }
36650 EXPORT_SYMBOL(ib_copy_qp_attr_to_user);
36651
36652diff --git a/drivers/infiniband/hw/ipath/ipath_fs.c b/drivers/infiniband/hw/ipath/ipath_fs.c
36653index 100da85..e0d6609 100644
36654--- a/drivers/infiniband/hw/ipath/ipath_fs.c
36655+++ b/drivers/infiniband/hw/ipath/ipath_fs.c
36656@@ -110,6 +110,8 @@ static ssize_t atomic_counters_read(struct file *file, char __user *buf,
36657 struct infinipath_counters counters;
36658 struct ipath_devdata *dd;
36659
36660+ pax_track_stack();
36661+
36662 dd = file->f_path.dentry->d_inode->i_private;
36663 dd->ipath_f_read_counters(dd, &counters);
36664
36665@@ -122,6 +124,8 @@ static const struct file_operations atomic_counters_ops = {
36666 };
36667
36668 static ssize_t flash_read(struct file *file, char __user *buf,
36669+ size_t count, loff_t *ppos) __size_overflow(3);
36670+static ssize_t flash_read(struct file *file, char __user *buf,
36671 size_t count, loff_t *ppos)
36672 {
36673 struct ipath_devdata *dd;
36674diff --git a/drivers/infiniband/hw/nes/nes.c b/drivers/infiniband/hw/nes/nes.c
36675index cbde0cf..afaf55c 100644
36676--- a/drivers/infiniband/hw/nes/nes.c
36677+++ b/drivers/infiniband/hw/nes/nes.c
36678@@ -102,7 +102,7 @@ MODULE_PARM_DESC(limit_maxrdreqsz, "Limit max read request size to 256 Bytes");
36679 LIST_HEAD(nes_adapter_list);
36680 static LIST_HEAD(nes_dev_list);
36681
36682-atomic_t qps_destroyed;
36683+atomic_unchecked_t qps_destroyed;
36684
36685 static unsigned int ee_flsh_adapter;
36686 static unsigned int sysfs_nonidx_addr;
36687@@ -259,7 +259,7 @@ static void nes_cqp_rem_ref_callback(struct nes_device *nesdev, struct nes_cqp_r
36688 struct nes_adapter *nesadapter = nesdev->nesadapter;
36689 u32 qp_id;
36690
36691- atomic_inc(&qps_destroyed);
36692+ atomic_inc_unchecked(&qps_destroyed);
36693
36694 /* Free the control structures */
36695
36696diff --git a/drivers/infiniband/hw/nes/nes.h b/drivers/infiniband/hw/nes/nes.h
36697index bcc6abc..9c76b2f 100644
36698--- a/drivers/infiniband/hw/nes/nes.h
36699+++ b/drivers/infiniband/hw/nes/nes.h
36700@@ -174,17 +174,17 @@ extern unsigned int nes_debug_level;
36701 extern unsigned int wqm_quanta;
36702 extern struct list_head nes_adapter_list;
36703
36704-extern atomic_t cm_connects;
36705-extern atomic_t cm_accepts;
36706-extern atomic_t cm_disconnects;
36707-extern atomic_t cm_closes;
36708-extern atomic_t cm_connecteds;
36709-extern atomic_t cm_connect_reqs;
36710-extern atomic_t cm_rejects;
36711-extern atomic_t mod_qp_timouts;
36712-extern atomic_t qps_created;
36713-extern atomic_t qps_destroyed;
36714-extern atomic_t sw_qps_destroyed;
36715+extern atomic_unchecked_t cm_connects;
36716+extern atomic_unchecked_t cm_accepts;
36717+extern atomic_unchecked_t cm_disconnects;
36718+extern atomic_unchecked_t cm_closes;
36719+extern atomic_unchecked_t cm_connecteds;
36720+extern atomic_unchecked_t cm_connect_reqs;
36721+extern atomic_unchecked_t cm_rejects;
36722+extern atomic_unchecked_t mod_qp_timouts;
36723+extern atomic_unchecked_t qps_created;
36724+extern atomic_unchecked_t qps_destroyed;
36725+extern atomic_unchecked_t sw_qps_destroyed;
36726 extern u32 mh_detected;
36727 extern u32 mh_pauses_sent;
36728 extern u32 cm_packets_sent;
36729@@ -196,11 +196,11 @@ extern u32 cm_packets_retrans;
36730 extern u32 cm_listens_created;
36731 extern u32 cm_listens_destroyed;
36732 extern u32 cm_backlog_drops;
36733-extern atomic_t cm_loopbacks;
36734-extern atomic_t cm_nodes_created;
36735-extern atomic_t cm_nodes_destroyed;
36736-extern atomic_t cm_accel_dropped_pkts;
36737-extern atomic_t cm_resets_recvd;
36738+extern atomic_unchecked_t cm_loopbacks;
36739+extern atomic_unchecked_t cm_nodes_created;
36740+extern atomic_unchecked_t cm_nodes_destroyed;
36741+extern atomic_unchecked_t cm_accel_dropped_pkts;
36742+extern atomic_unchecked_t cm_resets_recvd;
36743
36744 extern u32 int_mod_timer_init;
36745 extern u32 int_mod_cq_depth_256;
36746diff --git a/drivers/infiniband/hw/nes/nes_cm.c b/drivers/infiniband/hw/nes/nes_cm.c
36747index 73473db..5ed06e8 100644
36748--- a/drivers/infiniband/hw/nes/nes_cm.c
36749+++ b/drivers/infiniband/hw/nes/nes_cm.c
36750@@ -69,11 +69,11 @@ u32 cm_packets_received;
36751 u32 cm_listens_created;
36752 u32 cm_listens_destroyed;
36753 u32 cm_backlog_drops;
36754-atomic_t cm_loopbacks;
36755-atomic_t cm_nodes_created;
36756-atomic_t cm_nodes_destroyed;
36757-atomic_t cm_accel_dropped_pkts;
36758-atomic_t cm_resets_recvd;
36759+atomic_unchecked_t cm_loopbacks;
36760+atomic_unchecked_t cm_nodes_created;
36761+atomic_unchecked_t cm_nodes_destroyed;
36762+atomic_unchecked_t cm_accel_dropped_pkts;
36763+atomic_unchecked_t cm_resets_recvd;
36764
36765 static inline int mini_cm_accelerated(struct nes_cm_core *,
36766 struct nes_cm_node *);
36767@@ -149,13 +149,13 @@ static struct nes_cm_ops nes_cm_api = {
36768
36769 static struct nes_cm_core *g_cm_core;
36770
36771-atomic_t cm_connects;
36772-atomic_t cm_accepts;
36773-atomic_t cm_disconnects;
36774-atomic_t cm_closes;
36775-atomic_t cm_connecteds;
36776-atomic_t cm_connect_reqs;
36777-atomic_t cm_rejects;
36778+atomic_unchecked_t cm_connects;
36779+atomic_unchecked_t cm_accepts;
36780+atomic_unchecked_t cm_disconnects;
36781+atomic_unchecked_t cm_closes;
36782+atomic_unchecked_t cm_connecteds;
36783+atomic_unchecked_t cm_connect_reqs;
36784+atomic_unchecked_t cm_rejects;
36785
36786
36787 /**
36788@@ -1195,7 +1195,7 @@ static struct nes_cm_node *make_cm_node(struct nes_cm_core *cm_core,
36789 cm_node->rem_mac);
36790
36791 add_hte_node(cm_core, cm_node);
36792- atomic_inc(&cm_nodes_created);
36793+ atomic_inc_unchecked(&cm_nodes_created);
36794
36795 return cm_node;
36796 }
36797@@ -1253,7 +1253,7 @@ static int rem_ref_cm_node(struct nes_cm_core *cm_core,
36798 }
36799
36800 atomic_dec(&cm_core->node_cnt);
36801- atomic_inc(&cm_nodes_destroyed);
36802+ atomic_inc_unchecked(&cm_nodes_destroyed);
36803 nesqp = cm_node->nesqp;
36804 if (nesqp) {
36805 nesqp->cm_node = NULL;
36806@@ -1320,7 +1320,7 @@ static int process_options(struct nes_cm_node *cm_node, u8 *optionsloc,
36807
36808 static void drop_packet(struct sk_buff *skb)
36809 {
36810- atomic_inc(&cm_accel_dropped_pkts);
36811+ atomic_inc_unchecked(&cm_accel_dropped_pkts);
36812 dev_kfree_skb_any(skb);
36813 }
36814
36815@@ -1377,7 +1377,7 @@ static void handle_rst_pkt(struct nes_cm_node *cm_node, struct sk_buff *skb,
36816
36817 int reset = 0; /* whether to send reset in case of err.. */
36818 int passive_state;
36819- atomic_inc(&cm_resets_recvd);
36820+ atomic_inc_unchecked(&cm_resets_recvd);
36821 nes_debug(NES_DBG_CM, "Received Reset, cm_node = %p, state = %u."
36822 " refcnt=%d\n", cm_node, cm_node->state,
36823 atomic_read(&cm_node->ref_count));
36824@@ -2000,7 +2000,7 @@ static struct nes_cm_node *mini_cm_connect(struct nes_cm_core *cm_core,
36825 rem_ref_cm_node(cm_node->cm_core, cm_node);
36826 return NULL;
36827 }
36828- atomic_inc(&cm_loopbacks);
36829+ atomic_inc_unchecked(&cm_loopbacks);
36830 loopbackremotenode->loopbackpartner = cm_node;
36831 loopbackremotenode->tcp_cntxt.rcv_wscale =
36832 NES_CM_DEFAULT_RCV_WND_SCALE;
36833@@ -2262,7 +2262,7 @@ static int mini_cm_recv_pkt(struct nes_cm_core *cm_core,
36834 add_ref_cm_node(cm_node);
36835 } else if (cm_node->state == NES_CM_STATE_TSA) {
36836 rem_ref_cm_node(cm_core, cm_node);
36837- atomic_inc(&cm_accel_dropped_pkts);
36838+ atomic_inc_unchecked(&cm_accel_dropped_pkts);
36839 dev_kfree_skb_any(skb);
36840 break;
36841 }
36842@@ -2568,7 +2568,7 @@ static int nes_cm_disconn_true(struct nes_qp *nesqp)
36843
36844 if ((cm_id) && (cm_id->event_handler)) {
36845 if (issue_disconn) {
36846- atomic_inc(&cm_disconnects);
36847+ atomic_inc_unchecked(&cm_disconnects);
36848 cm_event.event = IW_CM_EVENT_DISCONNECT;
36849 cm_event.status = disconn_status;
36850 cm_event.local_addr = cm_id->local_addr;
36851@@ -2590,7 +2590,7 @@ static int nes_cm_disconn_true(struct nes_qp *nesqp)
36852 }
36853
36854 if (issue_close) {
36855- atomic_inc(&cm_closes);
36856+ atomic_inc_unchecked(&cm_closes);
36857 nes_disconnect(nesqp, 1);
36858
36859 cm_id->provider_data = nesqp;
36860@@ -2710,7 +2710,7 @@ int nes_accept(struct iw_cm_id *cm_id, struct iw_cm_conn_param *conn_param)
36861
36862 nes_debug(NES_DBG_CM, "QP%u, cm_node=%p, jiffies = %lu listener = %p\n",
36863 nesqp->hwqp.qp_id, cm_node, jiffies, cm_node->listener);
36864- atomic_inc(&cm_accepts);
36865+ atomic_inc_unchecked(&cm_accepts);
36866
36867 nes_debug(NES_DBG_CM, "netdev refcnt = %u.\n",
36868 atomic_read(&nesvnic->netdev->refcnt));
36869@@ -2919,7 +2919,7 @@ int nes_reject(struct iw_cm_id *cm_id, const void *pdata, u8 pdata_len)
36870
36871 struct nes_cm_core *cm_core;
36872
36873- atomic_inc(&cm_rejects);
36874+ atomic_inc_unchecked(&cm_rejects);
36875 cm_node = (struct nes_cm_node *) cm_id->provider_data;
36876 loopback = cm_node->loopbackpartner;
36877 cm_core = cm_node->cm_core;
36878@@ -2982,7 +2982,7 @@ int nes_connect(struct iw_cm_id *cm_id, struct iw_cm_conn_param *conn_param)
36879 ntohl(cm_id->local_addr.sin_addr.s_addr),
36880 ntohs(cm_id->local_addr.sin_port));
36881
36882- atomic_inc(&cm_connects);
36883+ atomic_inc_unchecked(&cm_connects);
36884 nesqp->active_conn = 1;
36885
36886 /* cache the cm_id in the qp */
36887@@ -3195,7 +3195,7 @@ static void cm_event_connected(struct nes_cm_event *event)
36888 if (nesqp->destroyed) {
36889 return;
36890 }
36891- atomic_inc(&cm_connecteds);
36892+ atomic_inc_unchecked(&cm_connecteds);
36893 nes_debug(NES_DBG_CM, "QP%u attempting to connect to 0x%08X:0x%04X on"
36894 " local port 0x%04X. jiffies = %lu.\n",
36895 nesqp->hwqp.qp_id,
36896@@ -3403,7 +3403,7 @@ static void cm_event_reset(struct nes_cm_event *event)
36897
36898 ret = cm_id->event_handler(cm_id, &cm_event);
36899 cm_id->add_ref(cm_id);
36900- atomic_inc(&cm_closes);
36901+ atomic_inc_unchecked(&cm_closes);
36902 cm_event.event = IW_CM_EVENT_CLOSE;
36903 cm_event.status = IW_CM_EVENT_STATUS_OK;
36904 cm_event.provider_data = cm_id->provider_data;
36905@@ -3439,7 +3439,7 @@ static void cm_event_mpa_req(struct nes_cm_event *event)
36906 return;
36907 cm_id = cm_node->cm_id;
36908
36909- atomic_inc(&cm_connect_reqs);
36910+ atomic_inc_unchecked(&cm_connect_reqs);
36911 nes_debug(NES_DBG_CM, "cm_node = %p - cm_id = %p, jiffies = %lu\n",
36912 cm_node, cm_id, jiffies);
36913
36914@@ -3477,7 +3477,7 @@ static void cm_event_mpa_reject(struct nes_cm_event *event)
36915 return;
36916 cm_id = cm_node->cm_id;
36917
36918- atomic_inc(&cm_connect_reqs);
36919+ atomic_inc_unchecked(&cm_connect_reqs);
36920 nes_debug(NES_DBG_CM, "cm_node = %p - cm_id = %p, jiffies = %lu\n",
36921 cm_node, cm_id, jiffies);
36922
36923diff --git a/drivers/infiniband/hw/nes/nes_nic.c b/drivers/infiniband/hw/nes/nes_nic.c
36924index e593af3..870694a 100644
36925--- a/drivers/infiniband/hw/nes/nes_nic.c
36926+++ b/drivers/infiniband/hw/nes/nes_nic.c
36927@@ -1210,17 +1210,17 @@ static void nes_netdev_get_ethtool_stats(struct net_device *netdev,
36928 target_stat_values[++index] = mh_detected;
36929 target_stat_values[++index] = mh_pauses_sent;
36930 target_stat_values[++index] = nesvnic->endnode_ipv4_tcp_retransmits;
36931- target_stat_values[++index] = atomic_read(&cm_connects);
36932- target_stat_values[++index] = atomic_read(&cm_accepts);
36933- target_stat_values[++index] = atomic_read(&cm_disconnects);
36934- target_stat_values[++index] = atomic_read(&cm_connecteds);
36935- target_stat_values[++index] = atomic_read(&cm_connect_reqs);
36936- target_stat_values[++index] = atomic_read(&cm_rejects);
36937- target_stat_values[++index] = atomic_read(&mod_qp_timouts);
36938- target_stat_values[++index] = atomic_read(&qps_created);
36939- target_stat_values[++index] = atomic_read(&sw_qps_destroyed);
36940- target_stat_values[++index] = atomic_read(&qps_destroyed);
36941- target_stat_values[++index] = atomic_read(&cm_closes);
36942+ target_stat_values[++index] = atomic_read_unchecked(&cm_connects);
36943+ target_stat_values[++index] = atomic_read_unchecked(&cm_accepts);
36944+ target_stat_values[++index] = atomic_read_unchecked(&cm_disconnects);
36945+ target_stat_values[++index] = atomic_read_unchecked(&cm_connecteds);
36946+ target_stat_values[++index] = atomic_read_unchecked(&cm_connect_reqs);
36947+ target_stat_values[++index] = atomic_read_unchecked(&cm_rejects);
36948+ target_stat_values[++index] = atomic_read_unchecked(&mod_qp_timouts);
36949+ target_stat_values[++index] = atomic_read_unchecked(&qps_created);
36950+ target_stat_values[++index] = atomic_read_unchecked(&sw_qps_destroyed);
36951+ target_stat_values[++index] = atomic_read_unchecked(&qps_destroyed);
36952+ target_stat_values[++index] = atomic_read_unchecked(&cm_closes);
36953 target_stat_values[++index] = cm_packets_sent;
36954 target_stat_values[++index] = cm_packets_bounced;
36955 target_stat_values[++index] = cm_packets_created;
36956@@ -1230,11 +1230,11 @@ static void nes_netdev_get_ethtool_stats(struct net_device *netdev,
36957 target_stat_values[++index] = cm_listens_created;
36958 target_stat_values[++index] = cm_listens_destroyed;
36959 target_stat_values[++index] = cm_backlog_drops;
36960- target_stat_values[++index] = atomic_read(&cm_loopbacks);
36961- target_stat_values[++index] = atomic_read(&cm_nodes_created);
36962- target_stat_values[++index] = atomic_read(&cm_nodes_destroyed);
36963- target_stat_values[++index] = atomic_read(&cm_accel_dropped_pkts);
36964- target_stat_values[++index] = atomic_read(&cm_resets_recvd);
36965+ target_stat_values[++index] = atomic_read_unchecked(&cm_loopbacks);
36966+ target_stat_values[++index] = atomic_read_unchecked(&cm_nodes_created);
36967+ target_stat_values[++index] = atomic_read_unchecked(&cm_nodes_destroyed);
36968+ target_stat_values[++index] = atomic_read_unchecked(&cm_accel_dropped_pkts);
36969+ target_stat_values[++index] = atomic_read_unchecked(&cm_resets_recvd);
36970 target_stat_values[++index] = int_mod_timer_init;
36971 target_stat_values[++index] = int_mod_cq_depth_1;
36972 target_stat_values[++index] = int_mod_cq_depth_4;
36973diff --git a/drivers/infiniband/hw/nes/nes_verbs.c b/drivers/infiniband/hw/nes/nes_verbs.c
36974index a680c42..f914deb 100644
36975--- a/drivers/infiniband/hw/nes/nes_verbs.c
36976+++ b/drivers/infiniband/hw/nes/nes_verbs.c
36977@@ -45,9 +45,9 @@
36978
36979 #include <rdma/ib_umem.h>
36980
36981-atomic_t mod_qp_timouts;
36982-atomic_t qps_created;
36983-atomic_t sw_qps_destroyed;
36984+atomic_unchecked_t mod_qp_timouts;
36985+atomic_unchecked_t qps_created;
36986+atomic_unchecked_t sw_qps_destroyed;
36987
36988 static void nes_unregister_ofa_device(struct nes_ib_device *nesibdev);
36989
36990@@ -1240,7 +1240,7 @@ static struct ib_qp *nes_create_qp(struct ib_pd *ibpd,
36991 if (init_attr->create_flags)
36992 return ERR_PTR(-EINVAL);
36993
36994- atomic_inc(&qps_created);
36995+ atomic_inc_unchecked(&qps_created);
36996 switch (init_attr->qp_type) {
36997 case IB_QPT_RC:
36998 if (nes_drv_opt & NES_DRV_OPT_NO_INLINE_DATA) {
36999@@ -1568,7 +1568,7 @@ static int nes_destroy_qp(struct ib_qp *ibqp)
37000 struct iw_cm_event cm_event;
37001 int ret;
37002
37003- atomic_inc(&sw_qps_destroyed);
37004+ atomic_inc_unchecked(&sw_qps_destroyed);
37005 nesqp->destroyed = 1;
37006
37007 /* Blow away the connection if it exists. */
37008diff --git a/drivers/input/gameport/gameport.c b/drivers/input/gameport/gameport.c
37009index ac11be0..3883c04 100644
37010--- a/drivers/input/gameport/gameport.c
37011+++ b/drivers/input/gameport/gameport.c
37012@@ -515,13 +515,13 @@ EXPORT_SYMBOL(gameport_set_phys);
37013 */
37014 static void gameport_init_port(struct gameport *gameport)
37015 {
37016- static atomic_t gameport_no = ATOMIC_INIT(0);
37017+ static atomic_unchecked_t gameport_no = ATOMIC_INIT(0);
37018
37019 __module_get(THIS_MODULE);
37020
37021 mutex_init(&gameport->drv_mutex);
37022 device_initialize(&gameport->dev);
37023- dev_set_name(&gameport->dev, "gameport%lu", (unsigned long)atomic_inc_return(&gameport_no) - 1);
37024+ dev_set_name(&gameport->dev, "gameport%lu", (unsigned long)atomic_inc_return_unchecked(&gameport_no) - 1);
37025 gameport->dev.bus = &gameport_bus;
37026 gameport->dev.release = gameport_release_port;
37027 if (gameport->parent)
37028diff --git a/drivers/input/input.c b/drivers/input/input.c
37029index c82ae82..8cfb9cb 100644
37030--- a/drivers/input/input.c
37031+++ b/drivers/input/input.c
37032@@ -1558,7 +1558,7 @@ EXPORT_SYMBOL(input_set_capability);
37033 */
37034 int input_register_device(struct input_dev *dev)
37035 {
37036- static atomic_t input_no = ATOMIC_INIT(0);
37037+ static atomic_unchecked_t input_no = ATOMIC_INIT(0);
37038 struct input_handler *handler;
37039 const char *path;
37040 int error;
37041@@ -1585,7 +1585,7 @@ int input_register_device(struct input_dev *dev)
37042 dev->setkeycode = input_default_setkeycode;
37043
37044 dev_set_name(&dev->dev, "input%ld",
37045- (unsigned long) atomic_inc_return(&input_no) - 1);
37046+ (unsigned long) atomic_inc_return_unchecked(&input_no) - 1);
37047
37048 error = device_add(&dev->dev);
37049 if (error)
37050diff --git a/drivers/input/joystick/sidewinder.c b/drivers/input/joystick/sidewinder.c
37051index ca13a6b..b032b0c 100644
37052--- a/drivers/input/joystick/sidewinder.c
37053+++ b/drivers/input/joystick/sidewinder.c
37054@@ -30,6 +30,7 @@
37055 #include <linux/kernel.h>
37056 #include <linux/module.h>
37057 #include <linux/slab.h>
37058+#include <linux/sched.h>
37059 #include <linux/init.h>
37060 #include <linux/input.h>
37061 #include <linux/gameport.h>
37062@@ -428,6 +429,8 @@ static int sw_read(struct sw *sw)
37063 unsigned char buf[SW_LENGTH];
37064 int i;
37065
37066+ pax_track_stack();
37067+
37068 i = sw_read_packet(sw->gameport, buf, sw->length, 0);
37069
37070 if (sw->type == SW_ID_3DP && sw->length == 66 && i != 66) { /* Broken packet, try to fix */
37071diff --git a/drivers/input/joystick/xpad.c b/drivers/input/joystick/xpad.c
37072index 79e3edc..01412b9 100644
37073--- a/drivers/input/joystick/xpad.c
37074+++ b/drivers/input/joystick/xpad.c
37075@@ -621,7 +621,7 @@ static void xpad_led_set(struct led_classdev *led_cdev,
37076
37077 static int xpad_led_probe(struct usb_xpad *xpad)
37078 {
37079- static atomic_t led_seq = ATOMIC_INIT(0);
37080+ static atomic_unchecked_t led_seq = ATOMIC_INIT(0);
37081 long led_no;
37082 struct xpad_led *led;
37083 struct led_classdev *led_cdev;
37084@@ -634,7 +634,7 @@ static int xpad_led_probe(struct usb_xpad *xpad)
37085 if (!led)
37086 return -ENOMEM;
37087
37088- led_no = (long)atomic_inc_return(&led_seq) - 1;
37089+ led_no = (long)atomic_inc_return_unchecked(&led_seq) - 1;
37090
37091 snprintf(led->name, sizeof(led->name), "xpad%ld", led_no);
37092 led->xpad = xpad;
37093diff --git a/drivers/input/serio/serio.c b/drivers/input/serio/serio.c
37094index 0236f0d..c7327f1 100644
37095--- a/drivers/input/serio/serio.c
37096+++ b/drivers/input/serio/serio.c
37097@@ -527,7 +527,7 @@ static void serio_release_port(struct device *dev)
37098 */
37099 static void serio_init_port(struct serio *serio)
37100 {
37101- static atomic_t serio_no = ATOMIC_INIT(0);
37102+ static atomic_unchecked_t serio_no = ATOMIC_INIT(0);
37103
37104 __module_get(THIS_MODULE);
37105
37106@@ -536,7 +536,7 @@ static void serio_init_port(struct serio *serio)
37107 mutex_init(&serio->drv_mutex);
37108 device_initialize(&serio->dev);
37109 dev_set_name(&serio->dev, "serio%ld",
37110- (long)atomic_inc_return(&serio_no) - 1);
37111+ (long)atomic_inc_return_unchecked(&serio_no) - 1);
37112 serio->dev.bus = &serio_bus;
37113 serio->dev.release = serio_release_port;
37114 if (serio->parent) {
37115diff --git a/drivers/isdn/gigaset/common.c b/drivers/isdn/gigaset/common.c
37116index 33dcd8d..2783d25 100644
37117--- a/drivers/isdn/gigaset/common.c
37118+++ b/drivers/isdn/gigaset/common.c
37119@@ -712,7 +712,7 @@ struct cardstate *gigaset_initcs(struct gigaset_driver *drv, int channels,
37120 cs->commands_pending = 0;
37121 cs->cur_at_seq = 0;
37122 cs->gotfwver = -1;
37123- cs->open_count = 0;
37124+ local_set(&cs->open_count, 0);
37125 cs->dev = NULL;
37126 cs->tty = NULL;
37127 cs->tty_dev = NULL;
37128diff --git a/drivers/isdn/gigaset/gigaset.h b/drivers/isdn/gigaset/gigaset.h
37129index a2f6125..6a70677 100644
37130--- a/drivers/isdn/gigaset/gigaset.h
37131+++ b/drivers/isdn/gigaset/gigaset.h
37132@@ -34,6 +34,7 @@
37133 #include <linux/tty_driver.h>
37134 #include <linux/list.h>
37135 #include <asm/atomic.h>
37136+#include <asm/local.h>
37137
37138 #define GIG_VERSION {0,5,0,0}
37139 #define GIG_COMPAT {0,4,0,0}
37140@@ -446,7 +447,7 @@ struct cardstate {
37141 spinlock_t cmdlock;
37142 unsigned curlen, cmdbytes;
37143
37144- unsigned open_count;
37145+ local_t open_count;
37146 struct tty_struct *tty;
37147 struct tasklet_struct if_wake_tasklet;
37148 unsigned control_state;
37149diff --git a/drivers/isdn/gigaset/interface.c b/drivers/isdn/gigaset/interface.c
37150index b3065b8..c7e8cc9 100644
37151--- a/drivers/isdn/gigaset/interface.c
37152+++ b/drivers/isdn/gigaset/interface.c
37153@@ -165,9 +165,7 @@ static int if_open(struct tty_struct *tty, struct file *filp)
37154 return -ERESTARTSYS; // FIXME -EINTR?
37155 tty->driver_data = cs;
37156
37157- ++cs->open_count;
37158-
37159- if (cs->open_count == 1) {
37160+ if (local_inc_return(&cs->open_count) == 1) {
37161 spin_lock_irqsave(&cs->lock, flags);
37162 cs->tty = tty;
37163 spin_unlock_irqrestore(&cs->lock, flags);
37164@@ -195,10 +193,10 @@ static void if_close(struct tty_struct *tty, struct file *filp)
37165
37166 if (!cs->connected)
37167 gig_dbg(DEBUG_IF, "not connected"); /* nothing to do */
37168- else if (!cs->open_count)
37169+ else if (!local_read(&cs->open_count))
37170 dev_warn(cs->dev, "%s: device not opened\n", __func__);
37171 else {
37172- if (!--cs->open_count) {
37173+ if (!local_dec_return(&cs->open_count)) {
37174 spin_lock_irqsave(&cs->lock, flags);
37175 cs->tty = NULL;
37176 spin_unlock_irqrestore(&cs->lock, flags);
37177@@ -233,7 +231,7 @@ static int if_ioctl(struct tty_struct *tty, struct file *file,
37178 if (!cs->connected) {
37179 gig_dbg(DEBUG_IF, "not connected");
37180 retval = -ENODEV;
37181- } else if (!cs->open_count)
37182+ } else if (!local_read(&cs->open_count))
37183 dev_warn(cs->dev, "%s: device not opened\n", __func__);
37184 else {
37185 retval = 0;
37186@@ -361,7 +359,7 @@ static int if_write(struct tty_struct *tty, const unsigned char *buf, int count)
37187 if (!cs->connected) {
37188 gig_dbg(DEBUG_IF, "not connected");
37189 retval = -ENODEV;
37190- } else if (!cs->open_count)
37191+ } else if (!local_read(&cs->open_count))
37192 dev_warn(cs->dev, "%s: device not opened\n", __func__);
37193 else if (cs->mstate != MS_LOCKED) {
37194 dev_warn(cs->dev, "can't write to unlocked device\n");
37195@@ -395,7 +393,7 @@ static int if_write_room(struct tty_struct *tty)
37196 if (!cs->connected) {
37197 gig_dbg(DEBUG_IF, "not connected");
37198 retval = -ENODEV;
37199- } else if (!cs->open_count)
37200+ } else if (!local_read(&cs->open_count))
37201 dev_warn(cs->dev, "%s: device not opened\n", __func__);
37202 else if (cs->mstate != MS_LOCKED) {
37203 dev_warn(cs->dev, "can't write to unlocked device\n");
37204@@ -425,7 +423,7 @@ static int if_chars_in_buffer(struct tty_struct *tty)
37205
37206 if (!cs->connected)
37207 gig_dbg(DEBUG_IF, "not connected");
37208- else if (!cs->open_count)
37209+ else if (!local_read(&cs->open_count))
37210 dev_warn(cs->dev, "%s: device not opened\n", __func__);
37211 else if (cs->mstate != MS_LOCKED)
37212 dev_warn(cs->dev, "can't write to unlocked device\n");
37213@@ -453,7 +451,7 @@ static void if_throttle(struct tty_struct *tty)
37214
37215 if (!cs->connected)
37216 gig_dbg(DEBUG_IF, "not connected"); /* nothing to do */
37217- else if (!cs->open_count)
37218+ else if (!local_read(&cs->open_count))
37219 dev_warn(cs->dev, "%s: device not opened\n", __func__);
37220 else {
37221 //FIXME
37222@@ -478,7 +476,7 @@ static void if_unthrottle(struct tty_struct *tty)
37223
37224 if (!cs->connected)
37225 gig_dbg(DEBUG_IF, "not connected"); /* nothing to do */
37226- else if (!cs->open_count)
37227+ else if (!local_read(&cs->open_count))
37228 dev_warn(cs->dev, "%s: device not opened\n", __func__);
37229 else {
37230 //FIXME
37231@@ -510,7 +508,7 @@ static void if_set_termios(struct tty_struct *tty, struct ktermios *old)
37232 goto out;
37233 }
37234
37235- if (!cs->open_count) {
37236+ if (!local_read(&cs->open_count)) {
37237 dev_warn(cs->dev, "%s: device not opened\n", __func__);
37238 goto out;
37239 }
37240diff --git a/drivers/isdn/hardware/avm/b1.c b/drivers/isdn/hardware/avm/b1.c
37241index a7c0083..62a7cb6 100644
37242--- a/drivers/isdn/hardware/avm/b1.c
37243+++ b/drivers/isdn/hardware/avm/b1.c
37244@@ -173,7 +173,7 @@ int b1_load_t4file(avmcard *card, capiloaddatapart * t4file)
37245 }
37246 if (left) {
37247 if (t4file->user) {
37248- if (copy_from_user(buf, dp, left))
37249+ if (left > sizeof buf || copy_from_user(buf, dp, left))
37250 return -EFAULT;
37251 } else {
37252 memcpy(buf, dp, left);
37253@@ -221,7 +221,7 @@ int b1_load_config(avmcard *card, capiloaddatapart * config)
37254 }
37255 if (left) {
37256 if (config->user) {
37257- if (copy_from_user(buf, dp, left))
37258+ if (left > sizeof buf || copy_from_user(buf, dp, left))
37259 return -EFAULT;
37260 } else {
37261 memcpy(buf, dp, left);
37262diff --git a/drivers/isdn/hardware/eicon/capidtmf.c b/drivers/isdn/hardware/eicon/capidtmf.c
37263index f130724..c373c68 100644
37264--- a/drivers/isdn/hardware/eicon/capidtmf.c
37265+++ b/drivers/isdn/hardware/eicon/capidtmf.c
37266@@ -498,6 +498,7 @@ void capidtmf_recv_block (t_capidtmf_state *p_state, byte *buffer, word leng
37267 byte goertzel_result_buffer[CAPIDTMF_RECV_TOTAL_FREQUENCY_COUNT];
37268 short windowed_sample_buffer[CAPIDTMF_RECV_WINDOWED_SAMPLES];
37269
37270+ pax_track_stack();
37271
37272 if (p_state->recv.state & CAPIDTMF_RECV_STATE_DTMF_ACTIVE)
37273 {
37274diff --git a/drivers/isdn/hardware/eicon/capifunc.c b/drivers/isdn/hardware/eicon/capifunc.c
37275index 4d425c6..a9be6c4 100644
37276--- a/drivers/isdn/hardware/eicon/capifunc.c
37277+++ b/drivers/isdn/hardware/eicon/capifunc.c
37278@@ -1055,6 +1055,8 @@ static int divacapi_connect_didd(void)
37279 IDI_SYNC_REQ req;
37280 DESCRIPTOR DIDD_Table[MAX_DESCRIPTORS];
37281
37282+ pax_track_stack();
37283+
37284 DIVA_DIDD_Read(DIDD_Table, sizeof(DIDD_Table));
37285
37286 for (x = 0; x < MAX_DESCRIPTORS; x++) {
37287diff --git a/drivers/isdn/hardware/eicon/diddfunc.c b/drivers/isdn/hardware/eicon/diddfunc.c
37288index 3029234..ef0d9e2 100644
37289--- a/drivers/isdn/hardware/eicon/diddfunc.c
37290+++ b/drivers/isdn/hardware/eicon/diddfunc.c
37291@@ -54,6 +54,8 @@ static int DIVA_INIT_FUNCTION connect_didd(void)
37292 IDI_SYNC_REQ req;
37293 DESCRIPTOR DIDD_Table[MAX_DESCRIPTORS];
37294
37295+ pax_track_stack();
37296+
37297 DIVA_DIDD_Read(DIDD_Table, sizeof(DIDD_Table));
37298
37299 for (x = 0; x < MAX_DESCRIPTORS; x++) {
37300diff --git a/drivers/isdn/hardware/eicon/divasfunc.c b/drivers/isdn/hardware/eicon/divasfunc.c
37301index d36a4c0..11e7d1a 100644
37302--- a/drivers/isdn/hardware/eicon/divasfunc.c
37303+++ b/drivers/isdn/hardware/eicon/divasfunc.c
37304@@ -161,6 +161,8 @@ static int DIVA_INIT_FUNCTION connect_didd(void)
37305 IDI_SYNC_REQ req;
37306 DESCRIPTOR DIDD_Table[MAX_DESCRIPTORS];
37307
37308+ pax_track_stack();
37309+
37310 DIVA_DIDD_Read(DIDD_Table, sizeof(DIDD_Table));
37311
37312 for (x = 0; x < MAX_DESCRIPTORS; x++) {
37313diff --git a/drivers/isdn/hardware/eicon/divasync.h b/drivers/isdn/hardware/eicon/divasync.h
37314index 85784a7..a19ca98 100644
37315--- a/drivers/isdn/hardware/eicon/divasync.h
37316+++ b/drivers/isdn/hardware/eicon/divasync.h
37317@@ -146,7 +146,7 @@ typedef struct _diva_didd_add_adapter {
37318 } diva_didd_add_adapter_t;
37319 typedef struct _diva_didd_remove_adapter {
37320 IDI_CALL p_request;
37321-} diva_didd_remove_adapter_t;
37322+} __no_const diva_didd_remove_adapter_t;
37323 typedef struct _diva_didd_read_adapter_array {
37324 void * buffer;
37325 dword length;
37326diff --git a/drivers/isdn/hardware/eicon/idifunc.c b/drivers/isdn/hardware/eicon/idifunc.c
37327index db87d51..7d09acf 100644
37328--- a/drivers/isdn/hardware/eicon/idifunc.c
37329+++ b/drivers/isdn/hardware/eicon/idifunc.c
37330@@ -188,6 +188,8 @@ static int DIVA_INIT_FUNCTION connect_didd(void)
37331 IDI_SYNC_REQ req;
37332 DESCRIPTOR DIDD_Table[MAX_DESCRIPTORS];
37333
37334+ pax_track_stack();
37335+
37336 DIVA_DIDD_Read(DIDD_Table, sizeof(DIDD_Table));
37337
37338 for (x = 0; x < MAX_DESCRIPTORS; x++) {
37339diff --git a/drivers/isdn/hardware/eicon/message.c b/drivers/isdn/hardware/eicon/message.c
37340index ae89fb8..0fab299 100644
37341--- a/drivers/isdn/hardware/eicon/message.c
37342+++ b/drivers/isdn/hardware/eicon/message.c
37343@@ -4889,6 +4889,8 @@ static void sig_ind(PLCI *plci)
37344 dword d;
37345 word w;
37346
37347+ pax_track_stack();
37348+
37349 a = plci->adapter;
37350 Id = ((word)plci->Id<<8)|a->Id;
37351 PUT_WORD(&SS_Ind[4],0x0000);
37352@@ -7484,6 +7486,8 @@ static word add_b1(PLCI *plci, API_PARSE *bp, word b_channel_info,
37353 word j, n, w;
37354 dword d;
37355
37356+ pax_track_stack();
37357+
37358
37359 for(i=0;i<8;i++) bp_parms[i].length = 0;
37360 for(i=0;i<2;i++) global_config[i].length = 0;
37361@@ -7958,6 +7962,8 @@ static word add_b23(PLCI *plci, API_PARSE *bp)
37362 const byte llc3[] = {4,3,2,2,6,6,0};
37363 const byte header[] = {0,2,3,3,0,0,0};
37364
37365+ pax_track_stack();
37366+
37367 for(i=0;i<8;i++) bp_parms[i].length = 0;
37368 for(i=0;i<6;i++) b2_config_parms[i].length = 0;
37369 for(i=0;i<5;i++) b3_config_parms[i].length = 0;
37370@@ -14761,6 +14767,8 @@ static void group_optimization(DIVA_CAPI_ADAPTER * a, PLCI * plci)
37371 word appl_number_group_type[MAX_APPL];
37372 PLCI *auxplci;
37373
37374+ pax_track_stack();
37375+
37376 set_group_ind_mask (plci); /* all APPLs within this inc. call are allowed to dial in */
37377
37378 if(!a->group_optimization_enabled)
37379diff --git a/drivers/isdn/hardware/eicon/mntfunc.c b/drivers/isdn/hardware/eicon/mntfunc.c
37380index a564b75..f3cf8b5 100644
37381--- a/drivers/isdn/hardware/eicon/mntfunc.c
37382+++ b/drivers/isdn/hardware/eicon/mntfunc.c
37383@@ -79,6 +79,8 @@ static int DIVA_INIT_FUNCTION connect_didd(void)
37384 IDI_SYNC_REQ req;
37385 DESCRIPTOR DIDD_Table[MAX_DESCRIPTORS];
37386
37387+ pax_track_stack();
37388+
37389 DIVA_DIDD_Read(DIDD_Table, sizeof(DIDD_Table));
37390
37391 for (x = 0; x < MAX_DESCRIPTORS; x++) {
37392diff --git a/drivers/isdn/hardware/eicon/xdi_adapter.h b/drivers/isdn/hardware/eicon/xdi_adapter.h
37393index a3bd163..8956575 100644
37394--- a/drivers/isdn/hardware/eicon/xdi_adapter.h
37395+++ b/drivers/isdn/hardware/eicon/xdi_adapter.h
37396@@ -44,7 +44,7 @@ typedef struct _xdi_mbox_t {
37397 typedef struct _diva_os_idi_adapter_interface {
37398 diva_init_card_proc_t cleanup_adapter_proc;
37399 diva_cmd_card_proc_t cmd_proc;
37400-} diva_os_idi_adapter_interface_t;
37401+} __no_const diva_os_idi_adapter_interface_t;
37402
37403 typedef struct _diva_os_xdi_adapter {
37404 struct list_head link;
37405diff --git a/drivers/isdn/i4l/isdn_common.c b/drivers/isdn/i4l/isdn_common.c
37406index adb1e8c..21b590b 100644
37407--- a/drivers/isdn/i4l/isdn_common.c
37408+++ b/drivers/isdn/i4l/isdn_common.c
37409@@ -1290,6 +1290,8 @@ isdn_ioctl(struct inode *inode, struct file *file, uint cmd, ulong arg)
37410 } iocpar;
37411 void __user *argp = (void __user *)arg;
37412
37413+ pax_track_stack();
37414+
37415 #define name iocpar.name
37416 #define bname iocpar.bname
37417 #define iocts iocpar.iocts
37418diff --git a/drivers/isdn/i4l/isdn_net.c b/drivers/isdn/i4l/isdn_net.c
37419index 90b56ed..5ed3305 100644
37420--- a/drivers/isdn/i4l/isdn_net.c
37421+++ b/drivers/isdn/i4l/isdn_net.c
37422@@ -1902,7 +1902,7 @@ static int isdn_net_header(struct sk_buff *skb, struct net_device *dev,
37423 {
37424 isdn_net_local *lp = netdev_priv(dev);
37425 unsigned char *p;
37426- ushort len = 0;
37427+ int len = 0;
37428
37429 switch (lp->p_encap) {
37430 case ISDN_NET_ENCAP_ETHER:
37431diff --git a/drivers/isdn/icn/icn.c b/drivers/isdn/icn/icn.c
37432index bf7997a..cf091db 100644
37433--- a/drivers/isdn/icn/icn.c
37434+++ b/drivers/isdn/icn/icn.c
37435@@ -1044,7 +1044,7 @@ icn_writecmd(const u_char * buf, int len, int user, icn_card * card)
37436 if (count > len)
37437 count = len;
37438 if (user) {
37439- if (copy_from_user(msg, buf, count))
37440+ if (count > sizeof msg || copy_from_user(msg, buf, count))
37441 return -EFAULT;
37442 } else
37443 memcpy(msg, buf, count);
37444diff --git a/drivers/isdn/mISDN/socket.c b/drivers/isdn/mISDN/socket.c
37445index feb0fa4..f76f830 100644
37446--- a/drivers/isdn/mISDN/socket.c
37447+++ b/drivers/isdn/mISDN/socket.c
37448@@ -391,6 +391,7 @@ data_sock_ioctl(struct socket *sock, unsigned int cmd, unsigned long arg)
37449 if (dev) {
37450 struct mISDN_devinfo di;
37451
37452+ memset(&di, 0, sizeof(di));
37453 di.id = dev->id;
37454 di.Dprotocols = dev->Dprotocols;
37455 di.Bprotocols = dev->Bprotocols | get_all_Bprotocols();
37456@@ -671,6 +672,7 @@ base_sock_ioctl(struct socket *sock, unsigned int cmd, unsigned long arg)
37457 if (dev) {
37458 struct mISDN_devinfo di;
37459
37460+ memset(&di, 0, sizeof(di));
37461 di.id = dev->id;
37462 di.Dprotocols = dev->Dprotocols;
37463 di.Bprotocols = dev->Bprotocols | get_all_Bprotocols();
37464diff --git a/drivers/isdn/sc/interrupt.c b/drivers/isdn/sc/interrupt.c
37465index 485be8b..f0225bc 100644
37466--- a/drivers/isdn/sc/interrupt.c
37467+++ b/drivers/isdn/sc/interrupt.c
37468@@ -112,11 +112,19 @@ irqreturn_t interrupt_handler(int dummy, void *card_inst)
37469 }
37470 else if(callid>=0x0000 && callid<=0x7FFF)
37471 {
37472+ int len;
37473+
37474 pr_debug("%s: Got Incoming Call\n",
37475 sc_adapter[card]->devicename);
37476- strcpy(setup.phone,&(rcvmsg.msg_data.byte_array[4]));
37477- strcpy(setup.eazmsn,
37478- sc_adapter[card]->channel[rcvmsg.phy_link_no-1].dn);
37479+ len = strlcpy(setup.phone, &(rcvmsg.msg_data.byte_array[4]),
37480+ sizeof(setup.phone));
37481+ if (len >= sizeof(setup.phone))
37482+ continue;
37483+ len = strlcpy(setup.eazmsn,
37484+ sc_adapter[card]->channel[rcvmsg.phy_link_no - 1].dn,
37485+ sizeof(setup.eazmsn));
37486+ if (len >= sizeof(setup.eazmsn))
37487+ continue;
37488 setup.si1 = 7;
37489 setup.si2 = 0;
37490 setup.plan = 0;
37491@@ -176,7 +184,9 @@ irqreturn_t interrupt_handler(int dummy, void *card_inst)
37492 * Handle a GetMyNumber Rsp
37493 */
37494 if (IS_CE_MESSAGE(rcvmsg,Call,0,GetMyNumber)){
37495- strcpy(sc_adapter[card]->channel[rcvmsg.phy_link_no-1].dn,rcvmsg.msg_data.byte_array);
37496+ strlcpy(sc_adapter[card]->channel[rcvmsg.phy_link_no - 1].dn,
37497+ rcvmsg.msg_data.byte_array,
37498+ sizeof(rcvmsg.msg_data.byte_array));
37499 continue;
37500 }
37501
37502diff --git a/drivers/lguest/core.c b/drivers/lguest/core.c
37503index 8744d24..d1f9a9a 100644
37504--- a/drivers/lguest/core.c
37505+++ b/drivers/lguest/core.c
37506@@ -91,9 +91,17 @@ static __init int map_switcher(void)
37507 * it's worked so far. The end address needs +1 because __get_vm_area
37508 * allocates an extra guard page, so we need space for that.
37509 */
37510+
37511+#if defined(CONFIG_MODULES) && defined(CONFIG_X86_32) && defined(CONFIG_PAX_KERNEXEC)
37512+ switcher_vma = __get_vm_area(TOTAL_SWITCHER_PAGES * PAGE_SIZE,
37513+ VM_ALLOC | VM_KERNEXEC, SWITCHER_ADDR, SWITCHER_ADDR
37514+ + (TOTAL_SWITCHER_PAGES+1) * PAGE_SIZE);
37515+#else
37516 switcher_vma = __get_vm_area(TOTAL_SWITCHER_PAGES * PAGE_SIZE,
37517 VM_ALLOC, SWITCHER_ADDR, SWITCHER_ADDR
37518 + (TOTAL_SWITCHER_PAGES+1) * PAGE_SIZE);
37519+#endif
37520+
37521 if (!switcher_vma) {
37522 err = -ENOMEM;
37523 printk("lguest: could not map switcher pages high\n");
37524@@ -118,7 +126,7 @@ static __init int map_switcher(void)
37525 * Now the Switcher is mapped at the right address, we can't fail!
37526 * Copy in the compiled-in Switcher code (from <arch>_switcher.S).
37527 */
37528- memcpy(switcher_vma->addr, start_switcher_text,
37529+ memcpy(switcher_vma->addr, ktla_ktva(start_switcher_text),
37530 end_switcher_text - start_switcher_text);
37531
37532 printk(KERN_INFO "lguest: mapped switcher at %p\n",
37533diff --git a/drivers/lguest/lguest_user.c b/drivers/lguest/lguest_user.c
37534index bd16323..ab460f7 100644
37535--- a/drivers/lguest/lguest_user.c
37536+++ b/drivers/lguest/lguest_user.c
37537@@ -194,6 +194,7 @@ static int user_send_irq(struct lg_cpu *cpu, const unsigned long __user *input)
37538 * Once our Guest is initialized, the Launcher makes it run by reading
37539 * from /dev/lguest.
37540 */
37541+static ssize_t read(struct file *file, char __user *user, size_t size,loff_t*o) __size_overflow(3);
37542 static ssize_t read(struct file *file, char __user *user, size_t size,loff_t*o)
37543 {
37544 struct lguest *lg = file->private_data;
37545diff --git a/drivers/lguest/x86/core.c b/drivers/lguest/x86/core.c
37546index 6ae3888..8b38145 100644
37547--- a/drivers/lguest/x86/core.c
37548+++ b/drivers/lguest/x86/core.c
37549@@ -59,7 +59,7 @@ static struct {
37550 /* Offset from where switcher.S was compiled to where we've copied it */
37551 static unsigned long switcher_offset(void)
37552 {
37553- return SWITCHER_ADDR - (unsigned long)start_switcher_text;
37554+ return SWITCHER_ADDR - (unsigned long)ktla_ktva(start_switcher_text);
37555 }
37556
37557 /* This cpu's struct lguest_pages. */
37558@@ -100,7 +100,13 @@ static void copy_in_guest_info(struct lg_cpu *cpu, struct lguest_pages *pages)
37559 * These copies are pretty cheap, so we do them unconditionally: */
37560 /* Save the current Host top-level page directory.
37561 */
37562+
37563+#ifdef CONFIG_PAX_PER_CPU_PGD
37564+ pages->state.host_cr3 = read_cr3();
37565+#else
37566 pages->state.host_cr3 = __pa(current->mm->pgd);
37567+#endif
37568+
37569 /*
37570 * Set up the Guest's page tables to see this CPU's pages (and no
37571 * other CPU's pages).
37572@@ -535,7 +541,7 @@ void __init lguest_arch_host_init(void)
37573 * compiled-in switcher code and the high-mapped copy we just made.
37574 */
37575 for (i = 0; i < IDT_ENTRIES; i++)
37576- default_idt_entries[i] += switcher_offset();
37577+ default_idt_entries[i] = ktla_ktva(default_idt_entries[i]) + switcher_offset();
37578
37579 /*
37580 * Set up the Switcher's per-cpu areas.
37581@@ -618,7 +624,7 @@ void __init lguest_arch_host_init(void)
37582 * it will be undisturbed when we switch. To change %cs and jump we
37583 * need this structure to feed to Intel's "lcall" instruction.
37584 */
37585- lguest_entry.offset = (long)switch_to_guest + switcher_offset();
37586+ lguest_entry.offset = (long)ktla_ktva(switch_to_guest) + switcher_offset();
37587 lguest_entry.segment = LGUEST_CS;
37588
37589 /*
37590diff --git a/drivers/lguest/x86/switcher_32.S b/drivers/lguest/x86/switcher_32.S
37591index 40634b0..4f5855e 100644
37592--- a/drivers/lguest/x86/switcher_32.S
37593+++ b/drivers/lguest/x86/switcher_32.S
37594@@ -87,6 +87,7 @@
37595 #include <asm/page.h>
37596 #include <asm/segment.h>
37597 #include <asm/lguest.h>
37598+#include <asm/processor-flags.h>
37599
37600 // We mark the start of the code to copy
37601 // It's placed in .text tho it's never run here
37602@@ -149,6 +150,13 @@ ENTRY(switch_to_guest)
37603 // Changes type when we load it: damn Intel!
37604 // For after we switch over our page tables
37605 // That entry will be read-only: we'd crash.
37606+
37607+#ifdef CONFIG_PAX_KERNEXEC
37608+ mov %cr0, %edx
37609+ xor $X86_CR0_WP, %edx
37610+ mov %edx, %cr0
37611+#endif
37612+
37613 movl $(GDT_ENTRY_TSS*8), %edx
37614 ltr %dx
37615
37616@@ -157,9 +165,15 @@ ENTRY(switch_to_guest)
37617 // Let's clear it again for our return.
37618 // The GDT descriptor of the Host
37619 // Points to the table after two "size" bytes
37620- movl (LGUEST_PAGES_host_gdt_desc+2)(%eax), %edx
37621+ movl (LGUEST_PAGES_host_gdt_desc+2)(%eax), %eax
37622 // Clear "used" from type field (byte 5, bit 2)
37623- andb $0xFD, (GDT_ENTRY_TSS*8 + 5)(%edx)
37624+ andb $0xFD, (GDT_ENTRY_TSS*8 + 5)(%eax)
37625+
37626+#ifdef CONFIG_PAX_KERNEXEC
37627+ mov %cr0, %eax
37628+ xor $X86_CR0_WP, %eax
37629+ mov %eax, %cr0
37630+#endif
37631
37632 // Once our page table's switched, the Guest is live!
37633 // The Host fades as we run this final step.
37634@@ -295,13 +309,12 @@ deliver_to_host:
37635 // I consulted gcc, and it gave
37636 // These instructions, which I gladly credit:
37637 leal (%edx,%ebx,8), %eax
37638- movzwl (%eax),%edx
37639- movl 4(%eax), %eax
37640- xorw %ax, %ax
37641- orl %eax, %edx
37642+ movl 4(%eax), %edx
37643+ movw (%eax), %dx
37644 // Now the address of the handler's in %edx
37645 // We call it now: its "iret" drops us home.
37646- jmp *%edx
37647+ ljmp $__KERNEL_CS, $1f
37648+1: jmp *%edx
37649
37650 // Every interrupt can come to us here
37651 // But we must truly tell each apart.
37652diff --git a/drivers/macintosh/macio_asic.c b/drivers/macintosh/macio_asic.c
37653index 588a5b0..b71db89 100644
37654--- a/drivers/macintosh/macio_asic.c
37655+++ b/drivers/macintosh/macio_asic.c
37656@@ -701,7 +701,7 @@ static void __devexit macio_pci_remove(struct pci_dev* pdev)
37657 * MacIO is matched against any Apple ID, it's probe() function
37658 * will then decide wether it applies or not
37659 */
37660-static const struct pci_device_id __devinitdata pci_ids [] = { {
37661+static const struct pci_device_id __devinitconst pci_ids [] = { {
37662 .vendor = PCI_VENDOR_ID_APPLE,
37663 .device = PCI_ANY_ID,
37664 .subvendor = PCI_ANY_ID,
37665diff --git a/drivers/macintosh/via-pmu-backlight.c b/drivers/macintosh/via-pmu-backlight.c
37666index a348bb0..ecd9b3f 100644
37667--- a/drivers/macintosh/via-pmu-backlight.c
37668+++ b/drivers/macintosh/via-pmu-backlight.c
37669@@ -15,7 +15,7 @@
37670
37671 #define MAX_PMU_LEVEL 0xFF
37672
37673-static struct backlight_ops pmu_backlight_data;
37674+static const struct backlight_ops pmu_backlight_data;
37675 static DEFINE_SPINLOCK(pmu_backlight_lock);
37676 static int sleeping, uses_pmu_bl;
37677 static u8 bl_curve[FB_BACKLIGHT_LEVELS];
37678@@ -115,7 +115,7 @@ static int pmu_backlight_get_brightness(struct backlight_device *bd)
37679 return bd->props.brightness;
37680 }
37681
37682-static struct backlight_ops pmu_backlight_data = {
37683+static const struct backlight_ops pmu_backlight_data = {
37684 .get_brightness = pmu_backlight_get_brightness,
37685 .update_status = pmu_backlight_update_status,
37686
37687diff --git a/drivers/macintosh/via-pmu.c b/drivers/macintosh/via-pmu.c
37688index 6f308a4..b5f7ff7 100644
37689--- a/drivers/macintosh/via-pmu.c
37690+++ b/drivers/macintosh/via-pmu.c
37691@@ -2232,7 +2232,7 @@ static int pmu_sleep_valid(suspend_state_t state)
37692 && (pmac_call_feature(PMAC_FTR_SLEEP_STATE, NULL, 0, -1) >= 0);
37693 }
37694
37695-static struct platform_suspend_ops pmu_pm_ops = {
37696+static const struct platform_suspend_ops pmu_pm_ops = {
37697 .enter = powerbook_sleep,
37698 .valid = pmu_sleep_valid,
37699 };
37700diff --git a/drivers/md/dm-ioctl.c b/drivers/md/dm-ioctl.c
37701index 818b617..4656e38 100644
37702--- a/drivers/md/dm-ioctl.c
37703+++ b/drivers/md/dm-ioctl.c
37704@@ -1437,7 +1437,7 @@ static int validate_params(uint cmd, struct dm_ioctl *param)
37705 cmd == DM_LIST_VERSIONS_CMD)
37706 return 0;
37707
37708- if ((cmd == DM_DEV_CREATE_CMD)) {
37709+ if (cmd == DM_DEV_CREATE_CMD) {
37710 if (!*param->name) {
37711 DMWARN("name not supplied when creating device");
37712 return -EINVAL;
37713diff --git a/drivers/md/dm-raid1.c b/drivers/md/dm-raid1.c
37714index 6021d0a..a878643 100644
37715--- a/drivers/md/dm-raid1.c
37716+++ b/drivers/md/dm-raid1.c
37717@@ -41,7 +41,7 @@ enum dm_raid1_error {
37718
37719 struct mirror {
37720 struct mirror_set *ms;
37721- atomic_t error_count;
37722+ atomic_unchecked_t error_count;
37723 unsigned long error_type;
37724 struct dm_dev *dev;
37725 sector_t offset;
37726@@ -203,7 +203,7 @@ static void fail_mirror(struct mirror *m, enum dm_raid1_error error_type)
37727 * simple way to tell if a device has encountered
37728 * errors.
37729 */
37730- atomic_inc(&m->error_count);
37731+ atomic_inc_unchecked(&m->error_count);
37732
37733 if (test_and_set_bit(error_type, &m->error_type))
37734 return;
37735@@ -225,7 +225,7 @@ static void fail_mirror(struct mirror *m, enum dm_raid1_error error_type)
37736 }
37737
37738 for (new = ms->mirror; new < ms->mirror + ms->nr_mirrors; new++)
37739- if (!atomic_read(&new->error_count)) {
37740+ if (!atomic_read_unchecked(&new->error_count)) {
37741 set_default_mirror(new);
37742 break;
37743 }
37744@@ -363,7 +363,7 @@ static struct mirror *choose_mirror(struct mirror_set *ms, sector_t sector)
37745 struct mirror *m = get_default_mirror(ms);
37746
37747 do {
37748- if (likely(!atomic_read(&m->error_count)))
37749+ if (likely(!atomic_read_unchecked(&m->error_count)))
37750 return m;
37751
37752 if (m-- == ms->mirror)
37753@@ -377,7 +377,7 @@ static int default_ok(struct mirror *m)
37754 {
37755 struct mirror *default_mirror = get_default_mirror(m->ms);
37756
37757- return !atomic_read(&default_mirror->error_count);
37758+ return !atomic_read_unchecked(&default_mirror->error_count);
37759 }
37760
37761 static int mirror_available(struct mirror_set *ms, struct bio *bio)
37762@@ -484,7 +484,7 @@ static void do_reads(struct mirror_set *ms, struct bio_list *reads)
37763 */
37764 if (likely(region_in_sync(ms, region, 1)))
37765 m = choose_mirror(ms, bio->bi_sector);
37766- else if (m && atomic_read(&m->error_count))
37767+ else if (m && atomic_read_unchecked(&m->error_count))
37768 m = NULL;
37769
37770 if (likely(m))
37771@@ -855,7 +855,7 @@ static int get_mirror(struct mirror_set *ms, struct dm_target *ti,
37772 }
37773
37774 ms->mirror[mirror].ms = ms;
37775- atomic_set(&(ms->mirror[mirror].error_count), 0);
37776+ atomic_set_unchecked(&(ms->mirror[mirror].error_count), 0);
37777 ms->mirror[mirror].error_type = 0;
37778 ms->mirror[mirror].offset = offset;
37779
37780@@ -1241,7 +1241,7 @@ static void mirror_resume(struct dm_target *ti)
37781 */
37782 static char device_status_char(struct mirror *m)
37783 {
37784- if (!atomic_read(&(m->error_count)))
37785+ if (!atomic_read_unchecked(&(m->error_count)))
37786 return 'A';
37787
37788 return (test_bit(DM_RAID1_WRITE_ERROR, &(m->error_type))) ? 'D' :
37789diff --git a/drivers/md/dm-stripe.c b/drivers/md/dm-stripe.c
37790index bd58703..9f26571 100644
37791--- a/drivers/md/dm-stripe.c
37792+++ b/drivers/md/dm-stripe.c
37793@@ -20,7 +20,7 @@ struct stripe {
37794 struct dm_dev *dev;
37795 sector_t physical_start;
37796
37797- atomic_t error_count;
37798+ atomic_unchecked_t error_count;
37799 };
37800
37801 struct stripe_c {
37802@@ -188,7 +188,7 @@ static int stripe_ctr(struct dm_target *ti, unsigned int argc, char **argv)
37803 kfree(sc);
37804 return r;
37805 }
37806- atomic_set(&(sc->stripe[i].error_count), 0);
37807+ atomic_set_unchecked(&(sc->stripe[i].error_count), 0);
37808 }
37809
37810 ti->private = sc;
37811@@ -257,7 +257,7 @@ static int stripe_status(struct dm_target *ti,
37812 DMEMIT("%d ", sc->stripes);
37813 for (i = 0; i < sc->stripes; i++) {
37814 DMEMIT("%s ", sc->stripe[i].dev->name);
37815- buffer[i] = atomic_read(&(sc->stripe[i].error_count)) ?
37816+ buffer[i] = atomic_read_unchecked(&(sc->stripe[i].error_count)) ?
37817 'D' : 'A';
37818 }
37819 buffer[i] = '\0';
37820@@ -304,8 +304,8 @@ static int stripe_end_io(struct dm_target *ti, struct bio *bio,
37821 */
37822 for (i = 0; i < sc->stripes; i++)
37823 if (!strcmp(sc->stripe[i].dev->name, major_minor)) {
37824- atomic_inc(&(sc->stripe[i].error_count));
37825- if (atomic_read(&(sc->stripe[i].error_count)) <
37826+ atomic_inc_unchecked(&(sc->stripe[i].error_count));
37827+ if (atomic_read_unchecked(&(sc->stripe[i].error_count)) <
37828 DM_IO_ERROR_THRESHOLD)
37829 queue_work(kstriped, &sc->kstriped_ws);
37830 }
37831diff --git a/drivers/md/dm-sysfs.c b/drivers/md/dm-sysfs.c
37832index 4b04590..13a77b2 100644
37833--- a/drivers/md/dm-sysfs.c
37834+++ b/drivers/md/dm-sysfs.c
37835@@ -75,7 +75,7 @@ static struct attribute *dm_attrs[] = {
37836 NULL,
37837 };
37838
37839-static struct sysfs_ops dm_sysfs_ops = {
37840+static const struct sysfs_ops dm_sysfs_ops = {
37841 .show = dm_attr_show,
37842 };
37843
37844diff --git a/drivers/md/dm-table.c b/drivers/md/dm-table.c
37845index 03345bb..332250d 100644
37846--- a/drivers/md/dm-table.c
37847+++ b/drivers/md/dm-table.c
37848@@ -376,7 +376,7 @@ static int device_area_is_invalid(struct dm_target *ti, struct dm_dev *dev,
37849 if (!dev_size)
37850 return 0;
37851
37852- if ((start >= dev_size) || (start + len > dev_size)) {
37853+ if ((start >= dev_size) || (len > dev_size - start)) {
37854 DMWARN("%s: %s too small for target: "
37855 "start=%llu, len=%llu, dev_size=%llu",
37856 dm_device_name(ti->table->md), bdevname(bdev, b),
37857diff --git a/drivers/md/dm.c b/drivers/md/dm.c
37858index c988ac2..c418141 100644
37859--- a/drivers/md/dm.c
37860+++ b/drivers/md/dm.c
37861@@ -165,9 +165,9 @@ struct mapped_device {
37862 /*
37863 * Event handling.
37864 */
37865- atomic_t event_nr;
37866+ atomic_unchecked_t event_nr;
37867 wait_queue_head_t eventq;
37868- atomic_t uevent_seq;
37869+ atomic_unchecked_t uevent_seq;
37870 struct list_head uevent_list;
37871 spinlock_t uevent_lock; /* Protect access to uevent_list */
37872
37873@@ -1776,8 +1776,8 @@ static struct mapped_device *alloc_dev(int minor)
37874 rwlock_init(&md->map_lock);
37875 atomic_set(&md->holders, 1);
37876 atomic_set(&md->open_count, 0);
37877- atomic_set(&md->event_nr, 0);
37878- atomic_set(&md->uevent_seq, 0);
37879+ atomic_set_unchecked(&md->event_nr, 0);
37880+ atomic_set_unchecked(&md->uevent_seq, 0);
37881 INIT_LIST_HEAD(&md->uevent_list);
37882 spin_lock_init(&md->uevent_lock);
37883
37884@@ -1927,7 +1927,7 @@ static void event_callback(void *context)
37885
37886 dm_send_uevents(&uevents, &disk_to_dev(md->disk)->kobj);
37887
37888- atomic_inc(&md->event_nr);
37889+ atomic_inc_unchecked(&md->event_nr);
37890 wake_up(&md->eventq);
37891 }
37892
37893@@ -2562,18 +2562,18 @@ void dm_kobject_uevent(struct mapped_device *md, enum kobject_action action,
37894
37895 uint32_t dm_next_uevent_seq(struct mapped_device *md)
37896 {
37897- return atomic_add_return(1, &md->uevent_seq);
37898+ return atomic_add_return_unchecked(1, &md->uevent_seq);
37899 }
37900
37901 uint32_t dm_get_event_nr(struct mapped_device *md)
37902 {
37903- return atomic_read(&md->event_nr);
37904+ return atomic_read_unchecked(&md->event_nr);
37905 }
37906
37907 int dm_wait_event(struct mapped_device *md, int event_nr)
37908 {
37909 return wait_event_interruptible(md->eventq,
37910- (event_nr != atomic_read(&md->event_nr)));
37911+ (event_nr != atomic_read_unchecked(&md->event_nr)));
37912 }
37913
37914 void dm_uevent_add(struct mapped_device *md, struct list_head *elist)
37915diff --git a/drivers/md/md.c b/drivers/md/md.c
37916index 4ce6e2f..7a9530a 100644
37917--- a/drivers/md/md.c
37918+++ b/drivers/md/md.c
37919@@ -153,10 +153,10 @@ static int start_readonly;
37920 * start build, activate spare
37921 */
37922 static DECLARE_WAIT_QUEUE_HEAD(md_event_waiters);
37923-static atomic_t md_event_count;
37924+static atomic_unchecked_t md_event_count;
37925 void md_new_event(mddev_t *mddev)
37926 {
37927- atomic_inc(&md_event_count);
37928+ atomic_inc_unchecked(&md_event_count);
37929 wake_up(&md_event_waiters);
37930 }
37931 EXPORT_SYMBOL_GPL(md_new_event);
37932@@ -166,7 +166,7 @@ EXPORT_SYMBOL_GPL(md_new_event);
37933 */
37934 static void md_new_event_inintr(mddev_t *mddev)
37935 {
37936- atomic_inc(&md_event_count);
37937+ atomic_inc_unchecked(&md_event_count);
37938 wake_up(&md_event_waiters);
37939 }
37940
37941@@ -1226,7 +1226,7 @@ static int super_1_load(mdk_rdev_t *rdev, mdk_rdev_t *refdev, int minor_version)
37942
37943 rdev->preferred_minor = 0xffff;
37944 rdev->data_offset = le64_to_cpu(sb->data_offset);
37945- atomic_set(&rdev->corrected_errors, le32_to_cpu(sb->cnt_corrected_read));
37946+ atomic_set_unchecked(&rdev->corrected_errors, le32_to_cpu(sb->cnt_corrected_read));
37947
37948 rdev->sb_size = le32_to_cpu(sb->max_dev) * 2 + 256;
37949 bmask = queue_logical_block_size(rdev->bdev->bd_disk->queue)-1;
37950@@ -1400,7 +1400,7 @@ static void super_1_sync(mddev_t *mddev, mdk_rdev_t *rdev)
37951 else
37952 sb->resync_offset = cpu_to_le64(0);
37953
37954- sb->cnt_corrected_read = cpu_to_le32(atomic_read(&rdev->corrected_errors));
37955+ sb->cnt_corrected_read = cpu_to_le32(atomic_read_unchecked(&rdev->corrected_errors));
37956
37957 sb->raid_disks = cpu_to_le32(mddev->raid_disks);
37958 sb->size = cpu_to_le64(mddev->dev_sectors);
37959@@ -2222,7 +2222,7 @@ __ATTR(state, S_IRUGO|S_IWUSR, state_show, state_store);
37960 static ssize_t
37961 errors_show(mdk_rdev_t *rdev, char *page)
37962 {
37963- return sprintf(page, "%d\n", atomic_read(&rdev->corrected_errors));
37964+ return sprintf(page, "%d\n", atomic_read_unchecked(&rdev->corrected_errors));
37965 }
37966
37967 static ssize_t
37968@@ -2231,7 +2231,7 @@ errors_store(mdk_rdev_t *rdev, const char *buf, size_t len)
37969 char *e;
37970 unsigned long n = simple_strtoul(buf, &e, 10);
37971 if (*buf && (*e == 0 || *e == '\n')) {
37972- atomic_set(&rdev->corrected_errors, n);
37973+ atomic_set_unchecked(&rdev->corrected_errors, n);
37974 return len;
37975 }
37976 return -EINVAL;
37977@@ -2525,7 +2525,7 @@ static void rdev_free(struct kobject *ko)
37978 mdk_rdev_t *rdev = container_of(ko, mdk_rdev_t, kobj);
37979 kfree(rdev);
37980 }
37981-static struct sysfs_ops rdev_sysfs_ops = {
37982+static const struct sysfs_ops rdev_sysfs_ops = {
37983 .show = rdev_attr_show,
37984 .store = rdev_attr_store,
37985 };
37986@@ -2574,8 +2574,8 @@ static mdk_rdev_t *md_import_device(dev_t newdev, int super_format, int super_mi
37987 rdev->data_offset = 0;
37988 rdev->sb_events = 0;
37989 atomic_set(&rdev->nr_pending, 0);
37990- atomic_set(&rdev->read_errors, 0);
37991- atomic_set(&rdev->corrected_errors, 0);
37992+ atomic_set_unchecked(&rdev->read_errors, 0);
37993+ atomic_set_unchecked(&rdev->corrected_errors, 0);
37994
37995 size = rdev->bdev->bd_inode->i_size >> BLOCK_SIZE_BITS;
37996 if (!size) {
37997@@ -3895,7 +3895,7 @@ static void md_free(struct kobject *ko)
37998 kfree(mddev);
37999 }
38000
38001-static struct sysfs_ops md_sysfs_ops = {
38002+static const struct sysfs_ops md_sysfs_ops = {
38003 .show = md_attr_show,
38004 .store = md_attr_store,
38005 };
38006@@ -4482,7 +4482,8 @@ out:
38007 err = 0;
38008 blk_integrity_unregister(disk);
38009 md_new_event(mddev);
38010- sysfs_notify_dirent(mddev->sysfs_state);
38011+ if (mddev->sysfs_state)
38012+ sysfs_notify_dirent(mddev->sysfs_state);
38013 return err;
38014 }
38015
38016@@ -5962,7 +5963,7 @@ static int md_seq_show(struct seq_file *seq, void *v)
38017
38018 spin_unlock(&pers_lock);
38019 seq_printf(seq, "\n");
38020- mi->event = atomic_read(&md_event_count);
38021+ mi->event = atomic_read_unchecked(&md_event_count);
38022 return 0;
38023 }
38024 if (v == (void*)2) {
38025@@ -6051,7 +6052,7 @@ static int md_seq_show(struct seq_file *seq, void *v)
38026 chunk_kb ? "KB" : "B");
38027 if (bitmap->file) {
38028 seq_printf(seq, ", file: ");
38029- seq_path(seq, &bitmap->file->f_path, " \t\n");
38030+ seq_path(seq, &bitmap->file->f_path, " \t\n\\");
38031 }
38032
38033 seq_printf(seq, "\n");
38034@@ -6085,7 +6086,7 @@ static int md_seq_open(struct inode *inode, struct file *file)
38035 else {
38036 struct seq_file *p = file->private_data;
38037 p->private = mi;
38038- mi->event = atomic_read(&md_event_count);
38039+ mi->event = atomic_read_unchecked(&md_event_count);
38040 }
38041 return error;
38042 }
38043@@ -6101,7 +6102,7 @@ static unsigned int mdstat_poll(struct file *filp, poll_table *wait)
38044 /* always allow read */
38045 mask = POLLIN | POLLRDNORM;
38046
38047- if (mi->event != atomic_read(&md_event_count))
38048+ if (mi->event != atomic_read_unchecked(&md_event_count))
38049 mask |= POLLERR | POLLPRI;
38050 return mask;
38051 }
38052@@ -6145,7 +6146,7 @@ static int is_mddev_idle(mddev_t *mddev, int init)
38053 struct gendisk *disk = rdev->bdev->bd_contains->bd_disk;
38054 curr_events = (int)part_stat_read(&disk->part0, sectors[0]) +
38055 (int)part_stat_read(&disk->part0, sectors[1]) -
38056- atomic_read(&disk->sync_io);
38057+ atomic_read_unchecked(&disk->sync_io);
38058 /* sync IO will cause sync_io to increase before the disk_stats
38059 * as sync_io is counted when a request starts, and
38060 * disk_stats is counted when it completes.
38061diff --git a/drivers/md/md.h b/drivers/md/md.h
38062index 87430fe..0024a4c 100644
38063--- a/drivers/md/md.h
38064+++ b/drivers/md/md.h
38065@@ -94,10 +94,10 @@ struct mdk_rdev_s
38066 * only maintained for arrays that
38067 * support hot removal
38068 */
38069- atomic_t read_errors; /* number of consecutive read errors that
38070+ atomic_unchecked_t read_errors; /* number of consecutive read errors that
38071 * we have tried to ignore.
38072 */
38073- atomic_t corrected_errors; /* number of corrected read errors,
38074+ atomic_unchecked_t corrected_errors; /* number of corrected read errors,
38075 * for reporting to userspace and storing
38076 * in superblock.
38077 */
38078@@ -304,7 +304,7 @@ static inline void rdev_dec_pending(mdk_rdev_t *rdev, mddev_t *mddev)
38079
38080 static inline void md_sync_acct(struct block_device *bdev, unsigned long nr_sectors)
38081 {
38082- atomic_add(nr_sectors, &bdev->bd_contains->bd_disk->sync_io);
38083+ atomic_add_unchecked(nr_sectors, &bdev->bd_contains->bd_disk->sync_io);
38084 }
38085
38086 struct mdk_personality
38087diff --git a/drivers/md/raid1.c b/drivers/md/raid1.c
38088index 968cb14..f0ad2e4 100644
38089--- a/drivers/md/raid1.c
38090+++ b/drivers/md/raid1.c
38091@@ -1415,7 +1415,7 @@ static void sync_request_write(mddev_t *mddev, r1bio_t *r1_bio)
38092 if (r1_bio->bios[d]->bi_end_io != end_sync_read)
38093 continue;
38094 rdev = conf->mirrors[d].rdev;
38095- atomic_add(s, &rdev->corrected_errors);
38096+ atomic_add_unchecked(s, &rdev->corrected_errors);
38097 if (sync_page_io(rdev->bdev,
38098 sect + rdev->data_offset,
38099 s<<9,
38100@@ -1564,7 +1564,7 @@ static void fix_read_error(conf_t *conf, int read_disk,
38101 /* Well, this device is dead */
38102 md_error(mddev, rdev);
38103 else {
38104- atomic_add(s, &rdev->corrected_errors);
38105+ atomic_add_unchecked(s, &rdev->corrected_errors);
38106 printk(KERN_INFO
38107 "raid1:%s: read error corrected "
38108 "(%d sectors at %llu on %s)\n",
38109diff --git a/drivers/md/raid10.c b/drivers/md/raid10.c
38110index 1b4e232..cf0f534b 100644
38111--- a/drivers/md/raid10.c
38112+++ b/drivers/md/raid10.c
38113@@ -1255,7 +1255,7 @@ static void end_sync_read(struct bio *bio, int error)
38114 if (test_bit(BIO_UPTODATE, &bio->bi_flags))
38115 set_bit(R10BIO_Uptodate, &r10_bio->state);
38116 else {
38117- atomic_add(r10_bio->sectors,
38118+ atomic_add_unchecked(r10_bio->sectors,
38119 &conf->mirrors[d].rdev->corrected_errors);
38120 if (!test_bit(MD_RECOVERY_SYNC, &conf->mddev->recovery))
38121 md_error(r10_bio->mddev,
38122@@ -1520,7 +1520,7 @@ static void fix_read_error(conf_t *conf, mddev_t *mddev, r10bio_t *r10_bio)
38123 test_bit(In_sync, &rdev->flags)) {
38124 atomic_inc(&rdev->nr_pending);
38125 rcu_read_unlock();
38126- atomic_add(s, &rdev->corrected_errors);
38127+ atomic_add_unchecked(s, &rdev->corrected_errors);
38128 if (sync_page_io(rdev->bdev,
38129 r10_bio->devs[sl].addr +
38130 sect + rdev->data_offset,
38131diff --git a/drivers/md/raid5.c b/drivers/md/raid5.c
38132index 883215d..675bf47 100644
38133--- a/drivers/md/raid5.c
38134+++ b/drivers/md/raid5.c
38135@@ -482,7 +482,7 @@ static void ops_run_io(struct stripe_head *sh, struct stripe_head_state *s)
38136 bi->bi_next = NULL;
38137 if ((rw & WRITE) &&
38138 test_bit(R5_ReWrite, &sh->dev[i].flags))
38139- atomic_add(STRIPE_SECTORS,
38140+ atomic_add_unchecked(STRIPE_SECTORS,
38141 &rdev->corrected_errors);
38142 generic_make_request(bi);
38143 } else {
38144@@ -1517,15 +1517,15 @@ static void raid5_end_read_request(struct bio * bi, int error)
38145 clear_bit(R5_ReadError, &sh->dev[i].flags);
38146 clear_bit(R5_ReWrite, &sh->dev[i].flags);
38147 }
38148- if (atomic_read(&conf->disks[i].rdev->read_errors))
38149- atomic_set(&conf->disks[i].rdev->read_errors, 0);
38150+ if (atomic_read_unchecked(&conf->disks[i].rdev->read_errors))
38151+ atomic_set_unchecked(&conf->disks[i].rdev->read_errors, 0);
38152 } else {
38153 const char *bdn = bdevname(conf->disks[i].rdev->bdev, b);
38154 int retry = 0;
38155 rdev = conf->disks[i].rdev;
38156
38157 clear_bit(R5_UPTODATE, &sh->dev[i].flags);
38158- atomic_inc(&rdev->read_errors);
38159+ atomic_inc_unchecked(&rdev->read_errors);
38160 if (conf->mddev->degraded >= conf->max_degraded)
38161 printk_rl(KERN_WARNING
38162 "raid5:%s: read error not correctable "
38163@@ -1543,7 +1543,7 @@ static void raid5_end_read_request(struct bio * bi, int error)
38164 (unsigned long long)(sh->sector
38165 + rdev->data_offset),
38166 bdn);
38167- else if (atomic_read(&rdev->read_errors)
38168+ else if (atomic_read_unchecked(&rdev->read_errors)
38169 > conf->max_nr_stripes)
38170 printk(KERN_WARNING
38171 "raid5:%s: Too many read errors, failing device %s.\n",
38172@@ -1870,6 +1870,7 @@ static sector_t compute_blocknr(struct stripe_head *sh, int i, int previous)
38173 sector_t r_sector;
38174 struct stripe_head sh2;
38175
38176+ pax_track_stack();
38177
38178 chunk_offset = sector_div(new_sector, sectors_per_chunk);
38179 stripe = new_sector;
38180diff --git a/drivers/media/common/saa7146_hlp.c b/drivers/media/common/saa7146_hlp.c
38181index 05bde9c..2f31d40 100644
38182--- a/drivers/media/common/saa7146_hlp.c
38183+++ b/drivers/media/common/saa7146_hlp.c
38184@@ -353,6 +353,8 @@ static void calculate_clipping_registers_rect(struct saa7146_dev *dev, struct sa
38185
38186 int x[32], y[32], w[32], h[32];
38187
38188+ pax_track_stack();
38189+
38190 /* clear out memory */
38191 memset(&line_list[0], 0x00, sizeof(u32)*32);
38192 memset(&pixel_list[0], 0x00, sizeof(u32)*32);
38193diff --git a/drivers/media/dvb/dvb-core/dvb_ca_en50221.c b/drivers/media/dvb/dvb-core/dvb_ca_en50221.c
38194index cb22da5..82b686e 100644
38195--- a/drivers/media/dvb/dvb-core/dvb_ca_en50221.c
38196+++ b/drivers/media/dvb/dvb-core/dvb_ca_en50221.c
38197@@ -590,6 +590,8 @@ static int dvb_ca_en50221_read_data(struct dvb_ca_private *ca, int slot, u8 * eb
38198 u8 buf[HOST_LINK_BUF_SIZE];
38199 int i;
38200
38201+ pax_track_stack();
38202+
38203 dprintk("%s\n", __func__);
38204
38205 /* check if we have space for a link buf in the rx_buffer */
38206@@ -1285,6 +1287,8 @@ static ssize_t dvb_ca_en50221_io_write(struct file *file,
38207 unsigned long timeout;
38208 int written;
38209
38210+ pax_track_stack();
38211+
38212 dprintk("%s\n", __func__);
38213
38214 /* Incoming packet has a 2 byte header. hdr[0] = slot_id, hdr[1] = connection_id */
38215diff --git a/drivers/media/dvb/dvb-core/dvb_demux.h b/drivers/media/dvb/dvb-core/dvb_demux.h
38216index 2fe05d0..a3289c4 100644
38217--- a/drivers/media/dvb/dvb-core/dvb_demux.h
38218+++ b/drivers/media/dvb/dvb-core/dvb_demux.h
38219@@ -71,7 +71,7 @@ struct dvb_demux_feed {
38220 union {
38221 dmx_ts_cb ts;
38222 dmx_section_cb sec;
38223- } cb;
38224+ } __no_const cb;
38225
38226 struct dvb_demux *demux;
38227 void *priv;
38228diff --git a/drivers/media/dvb/dvb-core/dvbdev.c b/drivers/media/dvb/dvb-core/dvbdev.c
38229index 94159b9..376bd8e 100644
38230--- a/drivers/media/dvb/dvb-core/dvbdev.c
38231+++ b/drivers/media/dvb/dvb-core/dvbdev.c
38232@@ -191,7 +191,7 @@ int dvb_register_device(struct dvb_adapter *adap, struct dvb_device **pdvbdev,
38233 const struct dvb_device *template, void *priv, int type)
38234 {
38235 struct dvb_device *dvbdev;
38236- struct file_operations *dvbdevfops;
38237+ file_operations_no_const *dvbdevfops;
38238 struct device *clsdev;
38239 int minor;
38240 int id;
38241diff --git a/drivers/media/dvb/dvb-usb/cxusb.c b/drivers/media/dvb/dvb-usb/cxusb.c
38242index 2a53dd0..db8c07a 100644
38243--- a/drivers/media/dvb/dvb-usb/cxusb.c
38244+++ b/drivers/media/dvb/dvb-usb/cxusb.c
38245@@ -1040,7 +1040,7 @@ static struct dib0070_config dib7070p_dib0070_config = {
38246 struct dib0700_adapter_state {
38247 int (*set_param_save) (struct dvb_frontend *,
38248 struct dvb_frontend_parameters *);
38249-};
38250+} __no_const;
38251
38252 static int dib7070_set_param_override(struct dvb_frontend *fe,
38253 struct dvb_frontend_parameters *fep)
38254diff --git a/drivers/media/dvb/dvb-usb/dib0700_core.c b/drivers/media/dvb/dvb-usb/dib0700_core.c
38255index db7f7f7..f55e96f 100644
38256--- a/drivers/media/dvb/dvb-usb/dib0700_core.c
38257+++ b/drivers/media/dvb/dvb-usb/dib0700_core.c
38258@@ -332,6 +332,8 @@ int dib0700_download_firmware(struct usb_device *udev, const struct firmware *fw
38259
38260 u8 buf[260];
38261
38262+ pax_track_stack();
38263+
38264 while ((ret = dvb_usb_get_hexline(fw, &hx, &pos)) > 0) {
38265 deb_fwdata("writing to address 0x%08x (buffer: 0x%02x %02x)\n",hx.addr, hx.len, hx.chk);
38266
38267diff --git a/drivers/media/dvb/dvb-usb/dib0700_devices.c b/drivers/media/dvb/dvb-usb/dib0700_devices.c
38268index 524acf5..5ffc403 100644
38269--- a/drivers/media/dvb/dvb-usb/dib0700_devices.c
38270+++ b/drivers/media/dvb/dvb-usb/dib0700_devices.c
38271@@ -28,7 +28,7 @@ MODULE_PARM_DESC(force_lna_activation, "force the activation of Low-Noise-Amplif
38272
38273 struct dib0700_adapter_state {
38274 int (*set_param_save) (struct dvb_frontend *, struct dvb_frontend_parameters *);
38275-};
38276+} __no_const;
38277
38278 /* Hauppauge Nova-T 500 (aka Bristol)
38279 * has a LNA on GPIO0 which is enabled by setting 1 */
38280diff --git a/drivers/media/dvb/frontends/dib3000.h b/drivers/media/dvb/frontends/dib3000.h
38281index ba91735..4261d84 100644
38282--- a/drivers/media/dvb/frontends/dib3000.h
38283+++ b/drivers/media/dvb/frontends/dib3000.h
38284@@ -39,7 +39,7 @@ struct dib_fe_xfer_ops
38285 int (*fifo_ctrl)(struct dvb_frontend *fe, int onoff);
38286 int (*pid_ctrl)(struct dvb_frontend *fe, int index, int pid, int onoff);
38287 int (*tuner_pass_ctrl)(struct dvb_frontend *fe, int onoff, u8 pll_ctrl);
38288-};
38289+} __no_const;
38290
38291 #if defined(CONFIG_DVB_DIB3000MB) || (defined(CONFIG_DVB_DIB3000MB_MODULE) && defined(MODULE))
38292 extern struct dvb_frontend* dib3000mb_attach(const struct dib3000_config* config,
38293diff --git a/drivers/media/dvb/frontends/or51211.c b/drivers/media/dvb/frontends/or51211.c
38294index c709ce6..b3fe620 100644
38295--- a/drivers/media/dvb/frontends/or51211.c
38296+++ b/drivers/media/dvb/frontends/or51211.c
38297@@ -113,6 +113,8 @@ static int or51211_load_firmware (struct dvb_frontend* fe,
38298 u8 tudata[585];
38299 int i;
38300
38301+ pax_track_stack();
38302+
38303 dprintk("Firmware is %zd bytes\n",fw->size);
38304
38305 /* Get eprom data */
38306diff --git a/drivers/media/radio/radio-cadet.c b/drivers/media/radio/radio-cadet.c
38307index 482d0f3..ee1e202 100644
38308--- a/drivers/media/radio/radio-cadet.c
38309+++ b/drivers/media/radio/radio-cadet.c
38310@@ -347,7 +347,7 @@ static ssize_t cadet_read(struct file *file, char __user *data, size_t count, lo
38311 while (i < count && dev->rdsin != dev->rdsout)
38312 readbuf[i++] = dev->rdsbuf[dev->rdsout++];
38313
38314- if (copy_to_user(data, readbuf, i))
38315+ if (i > sizeof readbuf || copy_to_user(data, readbuf, i))
38316 return -EFAULT;
38317 return i;
38318 }
38319diff --git a/drivers/media/video/cx18/cx18-driver.c b/drivers/media/video/cx18/cx18-driver.c
38320index 6dd51e2..0359b92 100644
38321--- a/drivers/media/video/cx18/cx18-driver.c
38322+++ b/drivers/media/video/cx18/cx18-driver.c
38323@@ -56,7 +56,7 @@ static struct pci_device_id cx18_pci_tbl[] __devinitdata = {
38324
38325 MODULE_DEVICE_TABLE(pci, cx18_pci_tbl);
38326
38327-static atomic_t cx18_instance = ATOMIC_INIT(0);
38328+static atomic_unchecked_t cx18_instance = ATOMIC_INIT(0);
38329
38330 /* Parameter declarations */
38331 static int cardtype[CX18_MAX_CARDS];
38332@@ -288,6 +288,8 @@ void cx18_read_eeprom(struct cx18 *cx, struct tveeprom *tv)
38333 struct i2c_client c;
38334 u8 eedata[256];
38335
38336+ pax_track_stack();
38337+
38338 memset(&c, 0, sizeof(c));
38339 strlcpy(c.name, "cx18 tveeprom tmp", sizeof(c.name));
38340 c.adapter = &cx->i2c_adap[0];
38341@@ -800,7 +802,7 @@ static int __devinit cx18_probe(struct pci_dev *pci_dev,
38342 struct cx18 *cx;
38343
38344 /* FIXME - module parameter arrays constrain max instances */
38345- i = atomic_inc_return(&cx18_instance) - 1;
38346+ i = atomic_inc_return_unchecked(&cx18_instance) - 1;
38347 if (i >= CX18_MAX_CARDS) {
38348 printk(KERN_ERR "cx18: cannot manage card %d, driver has a "
38349 "limit of 0 - %d\n", i, CX18_MAX_CARDS - 1);
38350diff --git a/drivers/media/video/ivtv/ivtv-driver.c b/drivers/media/video/ivtv/ivtv-driver.c
38351index 463ec34..2f4625a 100644
38352--- a/drivers/media/video/ivtv/ivtv-driver.c
38353+++ b/drivers/media/video/ivtv/ivtv-driver.c
38354@@ -79,7 +79,7 @@ static struct pci_device_id ivtv_pci_tbl[] __devinitdata = {
38355 MODULE_DEVICE_TABLE(pci,ivtv_pci_tbl);
38356
38357 /* ivtv instance counter */
38358-static atomic_t ivtv_instance = ATOMIC_INIT(0);
38359+static atomic_unchecked_t ivtv_instance = ATOMIC_INIT(0);
38360
38361 /* Parameter declarations */
38362 static int cardtype[IVTV_MAX_CARDS];
38363diff --git a/drivers/media/video/omap24xxcam.c b/drivers/media/video/omap24xxcam.c
38364index 5fc4ac0..652a54a 100644
38365--- a/drivers/media/video/omap24xxcam.c
38366+++ b/drivers/media/video/omap24xxcam.c
38367@@ -401,7 +401,7 @@ static void omap24xxcam_vbq_complete(struct omap24xxcam_sgdma *sgdma,
38368 spin_unlock_irqrestore(&cam->core_enable_disable_lock, flags);
38369
38370 do_gettimeofday(&vb->ts);
38371- vb->field_count = atomic_add_return(2, &fh->field_count);
38372+ vb->field_count = atomic_add_return_unchecked(2, &fh->field_count);
38373 if (csr & csr_error) {
38374 vb->state = VIDEOBUF_ERROR;
38375 if (!atomic_read(&fh->cam->in_reset)) {
38376diff --git a/drivers/media/video/omap24xxcam.h b/drivers/media/video/omap24xxcam.h
38377index 2ce67f5..cf26a5b 100644
38378--- a/drivers/media/video/omap24xxcam.h
38379+++ b/drivers/media/video/omap24xxcam.h
38380@@ -533,7 +533,7 @@ struct omap24xxcam_fh {
38381 spinlock_t vbq_lock; /* spinlock for the videobuf queue */
38382 struct videobuf_queue vbq;
38383 struct v4l2_pix_format pix; /* serialise pix by vbq->lock */
38384- atomic_t field_count; /* field counter for videobuf_buffer */
38385+ atomic_unchecked_t field_count; /* field counter for videobuf_buffer */
38386 /* accessing cam here doesn't need serialisation: it's constant */
38387 struct omap24xxcam_device *cam;
38388 };
38389diff --git a/drivers/media/video/pvrusb2/pvrusb2-eeprom.c b/drivers/media/video/pvrusb2/pvrusb2-eeprom.c
38390index 299afa4..eb47459 100644
38391--- a/drivers/media/video/pvrusb2/pvrusb2-eeprom.c
38392+++ b/drivers/media/video/pvrusb2/pvrusb2-eeprom.c
38393@@ -119,6 +119,8 @@ int pvr2_eeprom_analyze(struct pvr2_hdw *hdw)
38394 u8 *eeprom;
38395 struct tveeprom tvdata;
38396
38397+ pax_track_stack();
38398+
38399 memset(&tvdata,0,sizeof(tvdata));
38400
38401 eeprom = pvr2_eeprom_fetch(hdw);
38402diff --git a/drivers/media/video/pvrusb2/pvrusb2-hdw-internal.h b/drivers/media/video/pvrusb2/pvrusb2-hdw-internal.h
38403index 5b152ff..3320638 100644
38404--- a/drivers/media/video/pvrusb2/pvrusb2-hdw-internal.h
38405+++ b/drivers/media/video/pvrusb2/pvrusb2-hdw-internal.h
38406@@ -195,7 +195,7 @@ struct pvr2_hdw {
38407
38408 /* I2C stuff */
38409 struct i2c_adapter i2c_adap;
38410- struct i2c_algorithm i2c_algo;
38411+ i2c_algorithm_no_const i2c_algo;
38412 pvr2_i2c_func i2c_func[PVR2_I2C_FUNC_CNT];
38413 int i2c_cx25840_hack_state;
38414 int i2c_linked;
38415diff --git a/drivers/media/video/saa7134/saa6752hs.c b/drivers/media/video/saa7134/saa6752hs.c
38416index 1eabff6..8e2313a 100644
38417--- a/drivers/media/video/saa7134/saa6752hs.c
38418+++ b/drivers/media/video/saa7134/saa6752hs.c
38419@@ -683,6 +683,8 @@ static int saa6752hs_init(struct v4l2_subdev *sd, u32 leading_null_bytes)
38420 unsigned char localPAT[256];
38421 unsigned char localPMT[256];
38422
38423+ pax_track_stack();
38424+
38425 /* Set video format - must be done first as it resets other settings */
38426 set_reg8(client, 0x41, h->video_format);
38427
38428diff --git a/drivers/media/video/saa7164/saa7164-cmd.c b/drivers/media/video/saa7164/saa7164-cmd.c
38429index 9c1d3ac..b1b49e9 100644
38430--- a/drivers/media/video/saa7164/saa7164-cmd.c
38431+++ b/drivers/media/video/saa7164/saa7164-cmd.c
38432@@ -87,6 +87,8 @@ int saa7164_irq_dequeue(struct saa7164_dev *dev)
38433 wait_queue_head_t *q = 0;
38434 dprintk(DBGLVL_CMD, "%s()\n", __func__);
38435
38436+ pax_track_stack();
38437+
38438 /* While any outstand message on the bus exists... */
38439 do {
38440
38441@@ -126,6 +128,8 @@ int saa7164_cmd_dequeue(struct saa7164_dev *dev)
38442 u8 tmp[512];
38443 dprintk(DBGLVL_CMD, "%s()\n", __func__);
38444
38445+ pax_track_stack();
38446+
38447 while (loop) {
38448
38449 tmComResInfo_t tRsp = { 0, 0, 0, 0, 0, 0 };
38450diff --git a/drivers/media/video/usbvideo/ibmcam.c b/drivers/media/video/usbvideo/ibmcam.c
38451index b085496..cde0270 100644
38452--- a/drivers/media/video/usbvideo/ibmcam.c
38453+++ b/drivers/media/video/usbvideo/ibmcam.c
38454@@ -3947,15 +3947,15 @@ static struct usb_device_id id_table[] = {
38455 static int __init ibmcam_init(void)
38456 {
38457 struct usbvideo_cb cbTbl;
38458- memset(&cbTbl, 0, sizeof(cbTbl));
38459- cbTbl.probe = ibmcam_probe;
38460- cbTbl.setupOnOpen = ibmcam_setup_on_open;
38461- cbTbl.videoStart = ibmcam_video_start;
38462- cbTbl.videoStop = ibmcam_video_stop;
38463- cbTbl.processData = ibmcam_ProcessIsocData;
38464- cbTbl.postProcess = usbvideo_DeinterlaceFrame;
38465- cbTbl.adjustPicture = ibmcam_adjust_picture;
38466- cbTbl.getFPS = ibmcam_calculate_fps;
38467+ memset((void *)&cbTbl, 0, sizeof(cbTbl));
38468+ *(void **)&cbTbl.probe = ibmcam_probe;
38469+ *(void **)&cbTbl.setupOnOpen = ibmcam_setup_on_open;
38470+ *(void **)&cbTbl.videoStart = ibmcam_video_start;
38471+ *(void **)&cbTbl.videoStop = ibmcam_video_stop;
38472+ *(void **)&cbTbl.processData = ibmcam_ProcessIsocData;
38473+ *(void **)&cbTbl.postProcess = usbvideo_DeinterlaceFrame;
38474+ *(void **)&cbTbl.adjustPicture = ibmcam_adjust_picture;
38475+ *(void **)&cbTbl.getFPS = ibmcam_calculate_fps;
38476 return usbvideo_register(
38477 &cams,
38478 MAX_IBMCAM,
38479diff --git a/drivers/media/video/usbvideo/konicawc.c b/drivers/media/video/usbvideo/konicawc.c
38480index 31d57f2..600b735 100644
38481--- a/drivers/media/video/usbvideo/konicawc.c
38482+++ b/drivers/media/video/usbvideo/konicawc.c
38483@@ -225,7 +225,7 @@ static void konicawc_register_input(struct konicawc *cam, struct usb_device *dev
38484 int error;
38485
38486 usb_make_path(dev, cam->input_physname, sizeof(cam->input_physname));
38487- strncat(cam->input_physname, "/input0", sizeof(cam->input_physname));
38488+ strlcat(cam->input_physname, "/input0", sizeof(cam->input_physname));
38489
38490 cam->input = input_dev = input_allocate_device();
38491 if (!input_dev) {
38492@@ -935,16 +935,16 @@ static int __init konicawc_init(void)
38493 struct usbvideo_cb cbTbl;
38494 printk(KERN_INFO KBUILD_MODNAME ": " DRIVER_VERSION ":"
38495 DRIVER_DESC "\n");
38496- memset(&cbTbl, 0, sizeof(cbTbl));
38497- cbTbl.probe = konicawc_probe;
38498- cbTbl.setupOnOpen = konicawc_setup_on_open;
38499- cbTbl.processData = konicawc_process_isoc;
38500- cbTbl.getFPS = konicawc_calculate_fps;
38501- cbTbl.setVideoMode = konicawc_set_video_mode;
38502- cbTbl.startDataPump = konicawc_start_data;
38503- cbTbl.stopDataPump = konicawc_stop_data;
38504- cbTbl.adjustPicture = konicawc_adjust_picture;
38505- cbTbl.userFree = konicawc_free_uvd;
38506+ memset((void * )&cbTbl, 0, sizeof(cbTbl));
38507+ *(void **)&cbTbl.probe = konicawc_probe;
38508+ *(void **)&cbTbl.setupOnOpen = konicawc_setup_on_open;
38509+ *(void **)&cbTbl.processData = konicawc_process_isoc;
38510+ *(void **)&cbTbl.getFPS = konicawc_calculate_fps;
38511+ *(void **)&cbTbl.setVideoMode = konicawc_set_video_mode;
38512+ *(void **)&cbTbl.startDataPump = konicawc_start_data;
38513+ *(void **)&cbTbl.stopDataPump = konicawc_stop_data;
38514+ *(void **)&cbTbl.adjustPicture = konicawc_adjust_picture;
38515+ *(void **)&cbTbl.userFree = konicawc_free_uvd;
38516 return usbvideo_register(
38517 &cams,
38518 MAX_CAMERAS,
38519diff --git a/drivers/media/video/usbvideo/quickcam_messenger.c b/drivers/media/video/usbvideo/quickcam_messenger.c
38520index 803d3e4..c4d1b96 100644
38521--- a/drivers/media/video/usbvideo/quickcam_messenger.c
38522+++ b/drivers/media/video/usbvideo/quickcam_messenger.c
38523@@ -89,7 +89,7 @@ static void qcm_register_input(struct qcm *cam, struct usb_device *dev)
38524 int error;
38525
38526 usb_make_path(dev, cam->input_physname, sizeof(cam->input_physname));
38527- strncat(cam->input_physname, "/input0", sizeof(cam->input_physname));
38528+ strlcat(cam->input_physname, "/input0", sizeof(cam->input_physname));
38529
38530 cam->input = input_dev = input_allocate_device();
38531 if (!input_dev) {
38532diff --git a/drivers/media/video/usbvideo/ultracam.c b/drivers/media/video/usbvideo/ultracam.c
38533index fbd1b63..292f9f0 100644
38534--- a/drivers/media/video/usbvideo/ultracam.c
38535+++ b/drivers/media/video/usbvideo/ultracam.c
38536@@ -655,14 +655,14 @@ static int __init ultracam_init(void)
38537 {
38538 struct usbvideo_cb cbTbl;
38539 memset(&cbTbl, 0, sizeof(cbTbl));
38540- cbTbl.probe = ultracam_probe;
38541- cbTbl.setupOnOpen = ultracam_setup_on_open;
38542- cbTbl.videoStart = ultracam_video_start;
38543- cbTbl.videoStop = ultracam_video_stop;
38544- cbTbl.processData = ultracam_ProcessIsocData;
38545- cbTbl.postProcess = usbvideo_DeinterlaceFrame;
38546- cbTbl.adjustPicture = ultracam_adjust_picture;
38547- cbTbl.getFPS = ultracam_calculate_fps;
38548+ *(void **)&cbTbl.probe = ultracam_probe;
38549+ *(void **)&cbTbl.setupOnOpen = ultracam_setup_on_open;
38550+ *(void **)&cbTbl.videoStart = ultracam_video_start;
38551+ *(void **)&cbTbl.videoStop = ultracam_video_stop;
38552+ *(void **)&cbTbl.processData = ultracam_ProcessIsocData;
38553+ *(void **)&cbTbl.postProcess = usbvideo_DeinterlaceFrame;
38554+ *(void **)&cbTbl.adjustPicture = ultracam_adjust_picture;
38555+ *(void **)&cbTbl.getFPS = ultracam_calculate_fps;
38556 return usbvideo_register(
38557 &cams,
38558 MAX_CAMERAS,
38559diff --git a/drivers/media/video/usbvideo/usbvideo.c b/drivers/media/video/usbvideo/usbvideo.c
38560index dea8b32..34f6878 100644
38561--- a/drivers/media/video/usbvideo/usbvideo.c
38562+++ b/drivers/media/video/usbvideo/usbvideo.c
38563@@ -697,15 +697,15 @@ int usbvideo_register(
38564 __func__, cams, base_size, num_cams);
38565
38566 /* Copy callbacks, apply defaults for those that are not set */
38567- memmove(&cams->cb, cbTbl, sizeof(cams->cb));
38568+ memmove((void *)&cams->cb, cbTbl, sizeof(cams->cb));
38569 if (cams->cb.getFrame == NULL)
38570- cams->cb.getFrame = usbvideo_GetFrame;
38571+ *(void **)&cams->cb.getFrame = usbvideo_GetFrame;
38572 if (cams->cb.disconnect == NULL)
38573- cams->cb.disconnect = usbvideo_Disconnect;
38574+ *(void **)&cams->cb.disconnect = usbvideo_Disconnect;
38575 if (cams->cb.startDataPump == NULL)
38576- cams->cb.startDataPump = usbvideo_StartDataPump;
38577+ *(void **)&cams->cb.startDataPump = usbvideo_StartDataPump;
38578 if (cams->cb.stopDataPump == NULL)
38579- cams->cb.stopDataPump = usbvideo_StopDataPump;
38580+ *(void **)&cams->cb.stopDataPump = usbvideo_StopDataPump;
38581
38582 cams->num_cameras = num_cams;
38583 cams->cam = (struct uvd *) &cams[1];
38584diff --git a/drivers/media/video/usbvideo/usbvideo.h b/drivers/media/video/usbvideo/usbvideo.h
38585index c66985b..7fa143a 100644
38586--- a/drivers/media/video/usbvideo/usbvideo.h
38587+++ b/drivers/media/video/usbvideo/usbvideo.h
38588@@ -268,7 +268,7 @@ struct usbvideo_cb {
38589 int (*startDataPump)(struct uvd *uvd);
38590 void (*stopDataPump)(struct uvd *uvd);
38591 int (*setVideoMode)(struct uvd *uvd, struct video_window *vw);
38592-};
38593+} __no_const;
38594
38595 struct usbvideo {
38596 int num_cameras; /* As allocated */
38597diff --git a/drivers/media/video/usbvision/usbvision-core.c b/drivers/media/video/usbvision/usbvision-core.c
38598index e0f91e4..37554ea 100644
38599--- a/drivers/media/video/usbvision/usbvision-core.c
38600+++ b/drivers/media/video/usbvision/usbvision-core.c
38601@@ -820,6 +820,8 @@ static enum ParseState usbvision_parse_compress(struct usb_usbvision *usbvision,
38602 unsigned char rv, gv, bv;
38603 static unsigned char *Y, *U, *V;
38604
38605+ pax_track_stack();
38606+
38607 frame = usbvision->curFrame;
38608 imageSize = frame->frmwidth * frame->frmheight;
38609 if ( (frame->v4l2_format.format == V4L2_PIX_FMT_YUV422P) ||
38610diff --git a/drivers/media/video/v4l2-device.c b/drivers/media/video/v4l2-device.c
38611index 0d06e7c..3d17d24 100644
38612--- a/drivers/media/video/v4l2-device.c
38613+++ b/drivers/media/video/v4l2-device.c
38614@@ -50,9 +50,9 @@ int v4l2_device_register(struct device *dev, struct v4l2_device *v4l2_dev)
38615 EXPORT_SYMBOL_GPL(v4l2_device_register);
38616
38617 int v4l2_device_set_name(struct v4l2_device *v4l2_dev, const char *basename,
38618- atomic_t *instance)
38619+ atomic_unchecked_t *instance)
38620 {
38621- int num = atomic_inc_return(instance) - 1;
38622+ int num = atomic_inc_return_unchecked(instance) - 1;
38623 int len = strlen(basename);
38624
38625 if (basename[len - 1] >= '0' && basename[len - 1] <= '9')
38626diff --git a/drivers/media/video/videobuf-dma-sg.c b/drivers/media/video/videobuf-dma-sg.c
38627index 032ebae..4ebd8e8 100644
38628--- a/drivers/media/video/videobuf-dma-sg.c
38629+++ b/drivers/media/video/videobuf-dma-sg.c
38630@@ -631,6 +631,9 @@ static int __videobuf_mmap_mapper(struct videobuf_queue *q,
38631
38632 static int __videobuf_copy_to_user ( struct videobuf_queue *q,
38633 char __user *data, size_t count,
38634+ int nonblocking ) __size_overflow(3);
38635+static int __videobuf_copy_to_user ( struct videobuf_queue *q,
38636+ char __user *data, size_t count,
38637 int nonblocking )
38638 {
38639 struct videobuf_dma_sg_memory *mem = q->read_buf->priv;
38640@@ -693,6 +696,8 @@ void *videobuf_sg_alloc(size_t size)
38641 {
38642 struct videobuf_queue q;
38643
38644+ pax_track_stack();
38645+
38646 /* Required to make generic handler to call __videobuf_alloc */
38647 q.int_ops = &sg_ops;
38648
38649diff --git a/drivers/media/video/videobuf-vmalloc.c b/drivers/media/video/videobuf-vmalloc.c
38650index 35f3900..aa7c2f1 100644
38651--- a/drivers/media/video/videobuf-vmalloc.c
38652+++ b/drivers/media/video/videobuf-vmalloc.c
38653@@ -330,6 +330,9 @@ error:
38654
38655 static int __videobuf_copy_to_user ( struct videobuf_queue *q,
38656 char __user *data, size_t count,
38657+ int nonblocking ) __size_overflow(3);
38658+static int __videobuf_copy_to_user ( struct videobuf_queue *q,
38659+ char __user *data, size_t count,
38660 int nonblocking )
38661 {
38662 struct videobuf_vmalloc_memory *mem=q->read_buf->priv;
38663diff --git a/drivers/message/fusion/mptbase.c b/drivers/message/fusion/mptbase.c
38664index b6992b7..9fa7547 100644
38665--- a/drivers/message/fusion/mptbase.c
38666+++ b/drivers/message/fusion/mptbase.c
38667@@ -6709,8 +6709,14 @@ procmpt_iocinfo_read(char *buf, char **start, off_t offset, int request, int *eo
38668 len += sprintf(buf+len, " MaxChainDepth = 0x%02x frames\n", ioc->facts.MaxChainDepth);
38669 len += sprintf(buf+len, " MinBlockSize = 0x%02x bytes\n", 4*ioc->facts.BlockSize);
38670
38671+#ifdef CONFIG_GRKERNSEC_HIDESYM
38672+ len += sprintf(buf+len, " RequestFrames @ 0x%p (Dma @ 0x%p)\n",
38673+ NULL, NULL);
38674+#else
38675 len += sprintf(buf+len, " RequestFrames @ 0x%p (Dma @ 0x%p)\n",
38676 (void *)ioc->req_frames, (void *)(ulong)ioc->req_frames_dma);
38677+#endif
38678+
38679 /*
38680 * Rounding UP to nearest 4-kB boundary here...
38681 */
38682diff --git a/drivers/message/fusion/mptsas.c b/drivers/message/fusion/mptsas.c
38683index 83873e3..e360e9a 100644
38684--- a/drivers/message/fusion/mptsas.c
38685+++ b/drivers/message/fusion/mptsas.c
38686@@ -436,6 +436,23 @@ mptsas_is_end_device(struct mptsas_devinfo * attached)
38687 return 0;
38688 }
38689
38690+static inline void
38691+mptsas_set_rphy(MPT_ADAPTER *ioc, struct mptsas_phyinfo *phy_info, struct sas_rphy *rphy)
38692+{
38693+ if (phy_info->port_details) {
38694+ phy_info->port_details->rphy = rphy;
38695+ dsaswideprintk(ioc, printk(MYIOC_s_DEBUG_FMT "sas_rphy_add: rphy=%p\n",
38696+ ioc->name, rphy));
38697+ }
38698+
38699+ if (rphy) {
38700+ dsaswideprintk(ioc, dev_printk(KERN_DEBUG,
38701+ &rphy->dev, MYIOC_s_FMT "add:", ioc->name));
38702+ dsaswideprintk(ioc, printk(MYIOC_s_DEBUG_FMT "rphy=%p release=%p\n",
38703+ ioc->name, rphy, rphy->dev.release));
38704+ }
38705+}
38706+
38707 /* no mutex */
38708 static void
38709 mptsas_port_delete(MPT_ADAPTER *ioc, struct mptsas_portinfo_details * port_details)
38710@@ -474,23 +491,6 @@ mptsas_get_rphy(struct mptsas_phyinfo *phy_info)
38711 return NULL;
38712 }
38713
38714-static inline void
38715-mptsas_set_rphy(MPT_ADAPTER *ioc, struct mptsas_phyinfo *phy_info, struct sas_rphy *rphy)
38716-{
38717- if (phy_info->port_details) {
38718- phy_info->port_details->rphy = rphy;
38719- dsaswideprintk(ioc, printk(MYIOC_s_DEBUG_FMT "sas_rphy_add: rphy=%p\n",
38720- ioc->name, rphy));
38721- }
38722-
38723- if (rphy) {
38724- dsaswideprintk(ioc, dev_printk(KERN_DEBUG,
38725- &rphy->dev, MYIOC_s_FMT "add:", ioc->name));
38726- dsaswideprintk(ioc, printk(MYIOC_s_DEBUG_FMT "rphy=%p release=%p\n",
38727- ioc->name, rphy, rphy->dev.release));
38728- }
38729-}
38730-
38731 static inline struct sas_port *
38732 mptsas_get_port(struct mptsas_phyinfo *phy_info)
38733 {
38734diff --git a/drivers/message/fusion/mptscsih.c b/drivers/message/fusion/mptscsih.c
38735index bd096ca..332cf76 100644
38736--- a/drivers/message/fusion/mptscsih.c
38737+++ b/drivers/message/fusion/mptscsih.c
38738@@ -1248,15 +1248,16 @@ mptscsih_info(struct Scsi_Host *SChost)
38739
38740 h = shost_priv(SChost);
38741
38742- if (h) {
38743- if (h->info_kbuf == NULL)
38744- if ((h->info_kbuf = kmalloc(0x1000 /* 4Kb */, GFP_KERNEL)) == NULL)
38745- return h->info_kbuf;
38746- h->info_kbuf[0] = '\0';
38747+ if (!h)
38748+ return NULL;
38749
38750- mpt_print_ioc_summary(h->ioc, h->info_kbuf, &size, 0, 0);
38751- h->info_kbuf[size-1] = '\0';
38752- }
38753+ if (h->info_kbuf == NULL)
38754+ if ((h->info_kbuf = kmalloc(0x1000 /* 4Kb */, GFP_KERNEL)) == NULL)
38755+ return h->info_kbuf;
38756+ h->info_kbuf[0] = '\0';
38757+
38758+ mpt_print_ioc_summary(h->ioc, h->info_kbuf, &size, 0, 0);
38759+ h->info_kbuf[size-1] = '\0';
38760
38761 return h->info_kbuf;
38762 }
38763diff --git a/drivers/message/i2o/i2o_config.c b/drivers/message/i2o/i2o_config.c
38764index efba702..59b2c0f 100644
38765--- a/drivers/message/i2o/i2o_config.c
38766+++ b/drivers/message/i2o/i2o_config.c
38767@@ -787,6 +787,8 @@ static int i2o_cfg_passthru(unsigned long arg)
38768 struct i2o_message *msg;
38769 unsigned int iop;
38770
38771+ pax_track_stack();
38772+
38773 if (get_user(iop, &cmd->iop) || get_user(user_msg, &cmd->msg))
38774 return -EFAULT;
38775
38776diff --git a/drivers/message/i2o/i2o_proc.c b/drivers/message/i2o/i2o_proc.c
38777index 7045c45..c07b170 100644
38778--- a/drivers/message/i2o/i2o_proc.c
38779+++ b/drivers/message/i2o/i2o_proc.c
38780@@ -259,13 +259,6 @@ static char *scsi_devices[] = {
38781 "Array Controller Device"
38782 };
38783
38784-static char *chtostr(u8 * chars, int n)
38785-{
38786- char tmp[256];
38787- tmp[0] = 0;
38788- return strncat(tmp, (char *)chars, n);
38789-}
38790-
38791 static int i2o_report_query_status(struct seq_file *seq, int block_status,
38792 char *group)
38793 {
38794@@ -842,8 +835,7 @@ static int i2o_seq_show_ddm_table(struct seq_file *seq, void *v)
38795
38796 seq_printf(seq, "%-#7x", ddm_table.i2o_vendor_id);
38797 seq_printf(seq, "%-#8x", ddm_table.module_id);
38798- seq_printf(seq, "%-29s",
38799- chtostr(ddm_table.module_name_version, 28));
38800+ seq_printf(seq, "%-.28s", ddm_table.module_name_version);
38801 seq_printf(seq, "%9d ", ddm_table.data_size);
38802 seq_printf(seq, "%8d", ddm_table.code_size);
38803
38804@@ -944,8 +936,8 @@ static int i2o_seq_show_drivers_stored(struct seq_file *seq, void *v)
38805
38806 seq_printf(seq, "%-#7x", dst->i2o_vendor_id);
38807 seq_printf(seq, "%-#8x", dst->module_id);
38808- seq_printf(seq, "%-29s", chtostr(dst->module_name_version, 28));
38809- seq_printf(seq, "%-9s", chtostr(dst->date, 8));
38810+ seq_printf(seq, "%-.28s", dst->module_name_version);
38811+ seq_printf(seq, "%-.8s", dst->date);
38812 seq_printf(seq, "%8d ", dst->module_size);
38813 seq_printf(seq, "%8d ", dst->mpb_size);
38814 seq_printf(seq, "0x%04x", dst->module_flags);
38815@@ -1276,14 +1268,10 @@ static int i2o_seq_show_dev_identity(struct seq_file *seq, void *v)
38816 seq_printf(seq, "Device Class : %s\n", i2o_get_class_name(work16[0]));
38817 seq_printf(seq, "Owner TID : %0#5x\n", work16[2]);
38818 seq_printf(seq, "Parent TID : %0#5x\n", work16[3]);
38819- seq_printf(seq, "Vendor info : %s\n",
38820- chtostr((u8 *) (work32 + 2), 16));
38821- seq_printf(seq, "Product info : %s\n",
38822- chtostr((u8 *) (work32 + 6), 16));
38823- seq_printf(seq, "Description : %s\n",
38824- chtostr((u8 *) (work32 + 10), 16));
38825- seq_printf(seq, "Product rev. : %s\n",
38826- chtostr((u8 *) (work32 + 14), 8));
38827+ seq_printf(seq, "Vendor info : %.16s\n", (u8 *) (work32 + 2));
38828+ seq_printf(seq, "Product info : %.16s\n", (u8 *) (work32 + 6));
38829+ seq_printf(seq, "Description : %.16s\n", (u8 *) (work32 + 10));
38830+ seq_printf(seq, "Product rev. : %.8s\n", (u8 *) (work32 + 14));
38831
38832 seq_printf(seq, "Serial number : ");
38833 print_serial_number(seq, (u8 *) (work32 + 16),
38834@@ -1328,10 +1316,8 @@ static int i2o_seq_show_ddm_identity(struct seq_file *seq, void *v)
38835 }
38836
38837 seq_printf(seq, "Registering DDM TID : 0x%03x\n", result.ddm_tid);
38838- seq_printf(seq, "Module name : %s\n",
38839- chtostr(result.module_name, 24));
38840- seq_printf(seq, "Module revision : %s\n",
38841- chtostr(result.module_rev, 8));
38842+ seq_printf(seq, "Module name : %.24s\n", result.module_name);
38843+ seq_printf(seq, "Module revision : %.8s\n", result.module_rev);
38844
38845 seq_printf(seq, "Serial number : ");
38846 print_serial_number(seq, result.serial_number, sizeof(result) - 36);
38847@@ -1362,14 +1348,10 @@ static int i2o_seq_show_uinfo(struct seq_file *seq, void *v)
38848 return 0;
38849 }
38850
38851- seq_printf(seq, "Device name : %s\n",
38852- chtostr(result.device_name, 64));
38853- seq_printf(seq, "Service name : %s\n",
38854- chtostr(result.service_name, 64));
38855- seq_printf(seq, "Physical name : %s\n",
38856- chtostr(result.physical_location, 64));
38857- seq_printf(seq, "Instance number : %s\n",
38858- chtostr(result.instance_number, 4));
38859+ seq_printf(seq, "Device name : %.64s\n", result.device_name);
38860+ seq_printf(seq, "Service name : %.64s\n", result.service_name);
38861+ seq_printf(seq, "Physical name : %.64s\n", result.physical_location);
38862+ seq_printf(seq, "Instance number : %.4s\n", result.instance_number);
38863
38864 return 0;
38865 }
38866diff --git a/drivers/message/i2o/iop.c b/drivers/message/i2o/iop.c
38867index 27cf4af..b1205b8 100644
38868--- a/drivers/message/i2o/iop.c
38869+++ b/drivers/message/i2o/iop.c
38870@@ -110,10 +110,10 @@ u32 i2o_cntxt_list_add(struct i2o_controller * c, void *ptr)
38871
38872 spin_lock_irqsave(&c->context_list_lock, flags);
38873
38874- if (unlikely(atomic_inc_and_test(&c->context_list_counter)))
38875- atomic_inc(&c->context_list_counter);
38876+ if (unlikely(atomic_inc_and_test_unchecked(&c->context_list_counter)))
38877+ atomic_inc_unchecked(&c->context_list_counter);
38878
38879- entry->context = atomic_read(&c->context_list_counter);
38880+ entry->context = atomic_read_unchecked(&c->context_list_counter);
38881
38882 list_add(&entry->list, &c->context_list);
38883
38884@@ -1076,7 +1076,7 @@ struct i2o_controller *i2o_iop_alloc(void)
38885
38886 #if BITS_PER_LONG == 64
38887 spin_lock_init(&c->context_list_lock);
38888- atomic_set(&c->context_list_counter, 0);
38889+ atomic_set_unchecked(&c->context_list_counter, 0);
38890 INIT_LIST_HEAD(&c->context_list);
38891 #endif
38892
38893diff --git a/drivers/mfd/ab3100-core.c b/drivers/mfd/ab3100-core.c
38894index 78e3e85..66c9a0d 100644
38895--- a/drivers/mfd/ab3100-core.c
38896+++ b/drivers/mfd/ab3100-core.c
38897@@ -777,7 +777,7 @@ struct ab_family_id {
38898 char *name;
38899 };
38900
38901-static const struct ab_family_id ids[] __initdata = {
38902+static const struct ab_family_id ids[] __initconst = {
38903 /* AB3100 */
38904 {
38905 .id = 0xc0,
38906diff --git a/drivers/mfd/wm8350-i2c.c b/drivers/mfd/wm8350-i2c.c
38907index 8d8c932..8104515 100644
38908--- a/drivers/mfd/wm8350-i2c.c
38909+++ b/drivers/mfd/wm8350-i2c.c
38910@@ -43,6 +43,8 @@ static int wm8350_i2c_write_device(struct wm8350 *wm8350, char reg,
38911 u8 msg[(WM8350_MAX_REGISTER << 1) + 1];
38912 int ret;
38913
38914+ pax_track_stack();
38915+
38916 if (bytes > ((WM8350_MAX_REGISTER << 1) + 1))
38917 return -EINVAL;
38918
38919diff --git a/drivers/misc/kgdbts.c b/drivers/misc/kgdbts.c
38920index e4ff50b..4cc3f04 100644
38921--- a/drivers/misc/kgdbts.c
38922+++ b/drivers/misc/kgdbts.c
38923@@ -118,7 +118,7 @@
38924 } while (0)
38925 #define MAX_CONFIG_LEN 40
38926
38927-static struct kgdb_io kgdbts_io_ops;
38928+static const struct kgdb_io kgdbts_io_ops;
38929 static char get_buf[BUFMAX];
38930 static int get_buf_cnt;
38931 static char put_buf[BUFMAX];
38932@@ -1102,7 +1102,7 @@ static void kgdbts_post_exp_handler(void)
38933 module_put(THIS_MODULE);
38934 }
38935
38936-static struct kgdb_io kgdbts_io_ops = {
38937+static const struct kgdb_io kgdbts_io_ops = {
38938 .name = "kgdbts",
38939 .read_char = kgdbts_get_char,
38940 .write_char = kgdbts_put_char,
38941diff --git a/drivers/misc/sgi-gru/gruhandles.c b/drivers/misc/sgi-gru/gruhandles.c
38942index 37e7cfc..67cfb76 100644
38943--- a/drivers/misc/sgi-gru/gruhandles.c
38944+++ b/drivers/misc/sgi-gru/gruhandles.c
38945@@ -39,8 +39,8 @@ struct mcs_op_statistic mcs_op_statistics[mcsop_last];
38946
38947 static void update_mcs_stats(enum mcs_op op, unsigned long clks)
38948 {
38949- atomic_long_inc(&mcs_op_statistics[op].count);
38950- atomic_long_add(clks, &mcs_op_statistics[op].total);
38951+ atomic_long_inc_unchecked(&mcs_op_statistics[op].count);
38952+ atomic_long_add_unchecked(clks, &mcs_op_statistics[op].total);
38953 if (mcs_op_statistics[op].max < clks)
38954 mcs_op_statistics[op].max = clks;
38955 }
38956diff --git a/drivers/misc/sgi-gru/gruprocfs.c b/drivers/misc/sgi-gru/gruprocfs.c
38957index 3f2375c..467c6e6 100644
38958--- a/drivers/misc/sgi-gru/gruprocfs.c
38959+++ b/drivers/misc/sgi-gru/gruprocfs.c
38960@@ -32,9 +32,9 @@
38961
38962 #define printstat(s, f) printstat_val(s, &gru_stats.f, #f)
38963
38964-static void printstat_val(struct seq_file *s, atomic_long_t *v, char *id)
38965+static void printstat_val(struct seq_file *s, atomic_long_unchecked_t *v, char *id)
38966 {
38967- unsigned long val = atomic_long_read(v);
38968+ unsigned long val = atomic_long_read_unchecked(v);
38969
38970 if (val)
38971 seq_printf(s, "%16lu %s\n", val, id);
38972@@ -136,8 +136,8 @@ static int mcs_statistics_show(struct seq_file *s, void *p)
38973 "cch_interrupt_sync", "cch_deallocate", "tgh_invalidate"};
38974
38975 for (op = 0; op < mcsop_last; op++) {
38976- count = atomic_long_read(&mcs_op_statistics[op].count);
38977- total = atomic_long_read(&mcs_op_statistics[op].total);
38978+ count = atomic_long_read_unchecked(&mcs_op_statistics[op].count);
38979+ total = atomic_long_read_unchecked(&mcs_op_statistics[op].total);
38980 max = mcs_op_statistics[op].max;
38981 seq_printf(s, "%-20s%12ld%12ld%12ld\n", id[op], count,
38982 count ? total / count : 0, max);
38983diff --git a/drivers/misc/sgi-gru/grutables.h b/drivers/misc/sgi-gru/grutables.h
38984index 46990bc..4a251b5 100644
38985--- a/drivers/misc/sgi-gru/grutables.h
38986+++ b/drivers/misc/sgi-gru/grutables.h
38987@@ -167,84 +167,84 @@ extern unsigned int gru_max_gids;
38988 * GRU statistics.
38989 */
38990 struct gru_stats_s {
38991- atomic_long_t vdata_alloc;
38992- atomic_long_t vdata_free;
38993- atomic_long_t gts_alloc;
38994- atomic_long_t gts_free;
38995- atomic_long_t vdata_double_alloc;
38996- atomic_long_t gts_double_allocate;
38997- atomic_long_t assign_context;
38998- atomic_long_t assign_context_failed;
38999- atomic_long_t free_context;
39000- atomic_long_t load_user_context;
39001- atomic_long_t load_kernel_context;
39002- atomic_long_t lock_kernel_context;
39003- atomic_long_t unlock_kernel_context;
39004- atomic_long_t steal_user_context;
39005- atomic_long_t steal_kernel_context;
39006- atomic_long_t steal_context_failed;
39007- atomic_long_t nopfn;
39008- atomic_long_t break_cow;
39009- atomic_long_t asid_new;
39010- atomic_long_t asid_next;
39011- atomic_long_t asid_wrap;
39012- atomic_long_t asid_reuse;
39013- atomic_long_t intr;
39014- atomic_long_t intr_mm_lock_failed;
39015- atomic_long_t call_os;
39016- atomic_long_t call_os_offnode_reference;
39017- atomic_long_t call_os_check_for_bug;
39018- atomic_long_t call_os_wait_queue;
39019- atomic_long_t user_flush_tlb;
39020- atomic_long_t user_unload_context;
39021- atomic_long_t user_exception;
39022- atomic_long_t set_context_option;
39023- atomic_long_t migrate_check;
39024- atomic_long_t migrated_retarget;
39025- atomic_long_t migrated_unload;
39026- atomic_long_t migrated_unload_delay;
39027- atomic_long_t migrated_nopfn_retarget;
39028- atomic_long_t migrated_nopfn_unload;
39029- atomic_long_t tlb_dropin;
39030- atomic_long_t tlb_dropin_fail_no_asid;
39031- atomic_long_t tlb_dropin_fail_upm;
39032- atomic_long_t tlb_dropin_fail_invalid;
39033- atomic_long_t tlb_dropin_fail_range_active;
39034- atomic_long_t tlb_dropin_fail_idle;
39035- atomic_long_t tlb_dropin_fail_fmm;
39036- atomic_long_t tlb_dropin_fail_no_exception;
39037- atomic_long_t tlb_dropin_fail_no_exception_war;
39038- atomic_long_t tfh_stale_on_fault;
39039- atomic_long_t mmu_invalidate_range;
39040- atomic_long_t mmu_invalidate_page;
39041- atomic_long_t mmu_clear_flush_young;
39042- atomic_long_t flush_tlb;
39043- atomic_long_t flush_tlb_gru;
39044- atomic_long_t flush_tlb_gru_tgh;
39045- atomic_long_t flush_tlb_gru_zero_asid;
39046+ atomic_long_unchecked_t vdata_alloc;
39047+ atomic_long_unchecked_t vdata_free;
39048+ atomic_long_unchecked_t gts_alloc;
39049+ atomic_long_unchecked_t gts_free;
39050+ atomic_long_unchecked_t vdata_double_alloc;
39051+ atomic_long_unchecked_t gts_double_allocate;
39052+ atomic_long_unchecked_t assign_context;
39053+ atomic_long_unchecked_t assign_context_failed;
39054+ atomic_long_unchecked_t free_context;
39055+ atomic_long_unchecked_t load_user_context;
39056+ atomic_long_unchecked_t load_kernel_context;
39057+ atomic_long_unchecked_t lock_kernel_context;
39058+ atomic_long_unchecked_t unlock_kernel_context;
39059+ atomic_long_unchecked_t steal_user_context;
39060+ atomic_long_unchecked_t steal_kernel_context;
39061+ atomic_long_unchecked_t steal_context_failed;
39062+ atomic_long_unchecked_t nopfn;
39063+ atomic_long_unchecked_t break_cow;
39064+ atomic_long_unchecked_t asid_new;
39065+ atomic_long_unchecked_t asid_next;
39066+ atomic_long_unchecked_t asid_wrap;
39067+ atomic_long_unchecked_t asid_reuse;
39068+ atomic_long_unchecked_t intr;
39069+ atomic_long_unchecked_t intr_mm_lock_failed;
39070+ atomic_long_unchecked_t call_os;
39071+ atomic_long_unchecked_t call_os_offnode_reference;
39072+ atomic_long_unchecked_t call_os_check_for_bug;
39073+ atomic_long_unchecked_t call_os_wait_queue;
39074+ atomic_long_unchecked_t user_flush_tlb;
39075+ atomic_long_unchecked_t user_unload_context;
39076+ atomic_long_unchecked_t user_exception;
39077+ atomic_long_unchecked_t set_context_option;
39078+ atomic_long_unchecked_t migrate_check;
39079+ atomic_long_unchecked_t migrated_retarget;
39080+ atomic_long_unchecked_t migrated_unload;
39081+ atomic_long_unchecked_t migrated_unload_delay;
39082+ atomic_long_unchecked_t migrated_nopfn_retarget;
39083+ atomic_long_unchecked_t migrated_nopfn_unload;
39084+ atomic_long_unchecked_t tlb_dropin;
39085+ atomic_long_unchecked_t tlb_dropin_fail_no_asid;
39086+ atomic_long_unchecked_t tlb_dropin_fail_upm;
39087+ atomic_long_unchecked_t tlb_dropin_fail_invalid;
39088+ atomic_long_unchecked_t tlb_dropin_fail_range_active;
39089+ atomic_long_unchecked_t tlb_dropin_fail_idle;
39090+ atomic_long_unchecked_t tlb_dropin_fail_fmm;
39091+ atomic_long_unchecked_t tlb_dropin_fail_no_exception;
39092+ atomic_long_unchecked_t tlb_dropin_fail_no_exception_war;
39093+ atomic_long_unchecked_t tfh_stale_on_fault;
39094+ atomic_long_unchecked_t mmu_invalidate_range;
39095+ atomic_long_unchecked_t mmu_invalidate_page;
39096+ atomic_long_unchecked_t mmu_clear_flush_young;
39097+ atomic_long_unchecked_t flush_tlb;
39098+ atomic_long_unchecked_t flush_tlb_gru;
39099+ atomic_long_unchecked_t flush_tlb_gru_tgh;
39100+ atomic_long_unchecked_t flush_tlb_gru_zero_asid;
39101
39102- atomic_long_t copy_gpa;
39103+ atomic_long_unchecked_t copy_gpa;
39104
39105- atomic_long_t mesq_receive;
39106- atomic_long_t mesq_receive_none;
39107- atomic_long_t mesq_send;
39108- atomic_long_t mesq_send_failed;
39109- atomic_long_t mesq_noop;
39110- atomic_long_t mesq_send_unexpected_error;
39111- atomic_long_t mesq_send_lb_overflow;
39112- atomic_long_t mesq_send_qlimit_reached;
39113- atomic_long_t mesq_send_amo_nacked;
39114- atomic_long_t mesq_send_put_nacked;
39115- atomic_long_t mesq_qf_not_full;
39116- atomic_long_t mesq_qf_locked;
39117- atomic_long_t mesq_qf_noop_not_full;
39118- atomic_long_t mesq_qf_switch_head_failed;
39119- atomic_long_t mesq_qf_unexpected_error;
39120- atomic_long_t mesq_noop_unexpected_error;
39121- atomic_long_t mesq_noop_lb_overflow;
39122- atomic_long_t mesq_noop_qlimit_reached;
39123- atomic_long_t mesq_noop_amo_nacked;
39124- atomic_long_t mesq_noop_put_nacked;
39125+ atomic_long_unchecked_t mesq_receive;
39126+ atomic_long_unchecked_t mesq_receive_none;
39127+ atomic_long_unchecked_t mesq_send;
39128+ atomic_long_unchecked_t mesq_send_failed;
39129+ atomic_long_unchecked_t mesq_noop;
39130+ atomic_long_unchecked_t mesq_send_unexpected_error;
39131+ atomic_long_unchecked_t mesq_send_lb_overflow;
39132+ atomic_long_unchecked_t mesq_send_qlimit_reached;
39133+ atomic_long_unchecked_t mesq_send_amo_nacked;
39134+ atomic_long_unchecked_t mesq_send_put_nacked;
39135+ atomic_long_unchecked_t mesq_qf_not_full;
39136+ atomic_long_unchecked_t mesq_qf_locked;
39137+ atomic_long_unchecked_t mesq_qf_noop_not_full;
39138+ atomic_long_unchecked_t mesq_qf_switch_head_failed;
39139+ atomic_long_unchecked_t mesq_qf_unexpected_error;
39140+ atomic_long_unchecked_t mesq_noop_unexpected_error;
39141+ atomic_long_unchecked_t mesq_noop_lb_overflow;
39142+ atomic_long_unchecked_t mesq_noop_qlimit_reached;
39143+ atomic_long_unchecked_t mesq_noop_amo_nacked;
39144+ atomic_long_unchecked_t mesq_noop_put_nacked;
39145
39146 };
39147
39148@@ -252,8 +252,8 @@ enum mcs_op {cchop_allocate, cchop_start, cchop_interrupt, cchop_interrupt_sync,
39149 cchop_deallocate, tghop_invalidate, mcsop_last};
39150
39151 struct mcs_op_statistic {
39152- atomic_long_t count;
39153- atomic_long_t total;
39154+ atomic_long_unchecked_t count;
39155+ atomic_long_unchecked_t total;
39156 unsigned long max;
39157 };
39158
39159@@ -276,7 +276,7 @@ extern struct mcs_op_statistic mcs_op_statistics[mcsop_last];
39160
39161 #define STAT(id) do { \
39162 if (gru_options & OPT_STATS) \
39163- atomic_long_inc(&gru_stats.id); \
39164+ atomic_long_inc_unchecked(&gru_stats.id); \
39165 } while (0)
39166
39167 #ifdef CONFIG_SGI_GRU_DEBUG
39168diff --git a/drivers/misc/sgi-xp/xp.h b/drivers/misc/sgi-xp/xp.h
39169index 2275126..12a9dbfb 100644
39170--- a/drivers/misc/sgi-xp/xp.h
39171+++ b/drivers/misc/sgi-xp/xp.h
39172@@ -289,7 +289,7 @@ struct xpc_interface {
39173 xpc_notify_func, void *);
39174 void (*received) (short, int, void *);
39175 enum xp_retval (*partid_to_nasids) (short, void *);
39176-};
39177+} __no_const;
39178
39179 extern struct xpc_interface xpc_interface;
39180
39181diff --git a/drivers/misc/sgi-xp/xpc.h b/drivers/misc/sgi-xp/xpc.h
39182index b94d5f7..7f494c5 100644
39183--- a/drivers/misc/sgi-xp/xpc.h
39184+++ b/drivers/misc/sgi-xp/xpc.h
39185@@ -835,6 +835,7 @@ struct xpc_arch_operations {
39186 void (*received_payload) (struct xpc_channel *, void *);
39187 void (*notify_senders_of_disconnect) (struct xpc_channel *);
39188 };
39189+typedef struct xpc_arch_operations __no_const xpc_arch_operations_no_const;
39190
39191 /* struct xpc_partition act_state values (for XPC HB) */
39192
39193@@ -876,7 +877,7 @@ extern struct xpc_registration xpc_registrations[];
39194 /* found in xpc_main.c */
39195 extern struct device *xpc_part;
39196 extern struct device *xpc_chan;
39197-extern struct xpc_arch_operations xpc_arch_ops;
39198+extern xpc_arch_operations_no_const xpc_arch_ops;
39199 extern int xpc_disengage_timelimit;
39200 extern int xpc_disengage_timedout;
39201 extern int xpc_activate_IRQ_rcvd;
39202diff --git a/drivers/misc/sgi-xp/xpc_main.c b/drivers/misc/sgi-xp/xpc_main.c
39203index fd3688a..7e211a4 100644
39204--- a/drivers/misc/sgi-xp/xpc_main.c
39205+++ b/drivers/misc/sgi-xp/xpc_main.c
39206@@ -169,7 +169,7 @@ static struct notifier_block xpc_die_notifier = {
39207 .notifier_call = xpc_system_die,
39208 };
39209
39210-struct xpc_arch_operations xpc_arch_ops;
39211+xpc_arch_operations_no_const xpc_arch_ops;
39212
39213 /*
39214 * Timer function to enforce the timelimit on the partition disengage.
39215diff --git a/drivers/misc/sgi-xp/xpc_sn2.c b/drivers/misc/sgi-xp/xpc_sn2.c
39216index 8b70e03..700bda6 100644
39217--- a/drivers/misc/sgi-xp/xpc_sn2.c
39218+++ b/drivers/misc/sgi-xp/xpc_sn2.c
39219@@ -2350,7 +2350,7 @@ xpc_received_payload_sn2(struct xpc_channel *ch, void *payload)
39220 xpc_acknowledge_msgs_sn2(ch, get, msg->flags);
39221 }
39222
39223-static struct xpc_arch_operations xpc_arch_ops_sn2 = {
39224+static const struct xpc_arch_operations xpc_arch_ops_sn2 = {
39225 .setup_partitions = xpc_setup_partitions_sn2,
39226 .teardown_partitions = xpc_teardown_partitions_sn2,
39227 .process_activate_IRQ_rcvd = xpc_process_activate_IRQ_rcvd_sn2,
39228@@ -2413,7 +2413,9 @@ xpc_init_sn2(void)
39229 int ret;
39230 size_t buf_size;
39231
39232- xpc_arch_ops = xpc_arch_ops_sn2;
39233+ pax_open_kernel();
39234+ memcpy((void *)&xpc_arch_ops, &xpc_arch_ops_sn2, sizeof(xpc_arch_ops_sn2));
39235+ pax_close_kernel();
39236
39237 if (offsetof(struct xpc_msg_sn2, payload) > XPC_MSG_HDR_MAX_SIZE) {
39238 dev_err(xpc_part, "header portion of struct xpc_msg_sn2 is "
39239diff --git a/drivers/misc/sgi-xp/xpc_uv.c b/drivers/misc/sgi-xp/xpc_uv.c
39240index 8e08d71..7cb8c9b 100644
39241--- a/drivers/misc/sgi-xp/xpc_uv.c
39242+++ b/drivers/misc/sgi-xp/xpc_uv.c
39243@@ -1669,7 +1669,7 @@ xpc_received_payload_uv(struct xpc_channel *ch, void *payload)
39244 XPC_DEACTIVATE_PARTITION(&xpc_partitions[ch->partid], ret);
39245 }
39246
39247-static struct xpc_arch_operations xpc_arch_ops_uv = {
39248+static const struct xpc_arch_operations xpc_arch_ops_uv = {
39249 .setup_partitions = xpc_setup_partitions_uv,
39250 .teardown_partitions = xpc_teardown_partitions_uv,
39251 .process_activate_IRQ_rcvd = xpc_process_activate_IRQ_rcvd_uv,
39252@@ -1729,7 +1729,9 @@ static struct xpc_arch_operations xpc_arch_ops_uv = {
39253 int
39254 xpc_init_uv(void)
39255 {
39256- xpc_arch_ops = xpc_arch_ops_uv;
39257+ pax_open_kernel();
39258+ memcpy((void *)&xpc_arch_ops, &xpc_arch_ops_uv, sizeof(xpc_arch_ops_uv));
39259+ pax_close_kernel();
39260
39261 if (sizeof(struct xpc_notify_mq_msghdr_uv) > XPC_MSG_HDR_MAX_SIZE) {
39262 dev_err(xpc_part, "xpc_notify_mq_msghdr_uv is larger than %d\n",
39263diff --git a/drivers/mmc/host/sdhci-pci.c b/drivers/mmc/host/sdhci-pci.c
39264index 6fd20b42..650efe3 100644
39265--- a/drivers/mmc/host/sdhci-pci.c
39266+++ b/drivers/mmc/host/sdhci-pci.c
39267@@ -297,7 +297,7 @@ static const struct sdhci_pci_fixes sdhci_via = {
39268 .probe = via_probe,
39269 };
39270
39271-static const struct pci_device_id pci_ids[] __devinitdata = {
39272+static const struct pci_device_id pci_ids[] __devinitconst = {
39273 {
39274 .vendor = PCI_VENDOR_ID_RICOH,
39275 .device = PCI_DEVICE_ID_RICOH_R5C822,
39276diff --git a/drivers/mtd/chips/cfi_cmdset_0001.c b/drivers/mtd/chips/cfi_cmdset_0001.c
39277index e7563a9..5f90ce5 100644
39278--- a/drivers/mtd/chips/cfi_cmdset_0001.c
39279+++ b/drivers/mtd/chips/cfi_cmdset_0001.c
39280@@ -743,6 +743,8 @@ static int chip_ready (struct map_info *map, struct flchip *chip, unsigned long
39281 struct cfi_pri_intelext *cfip = cfi->cmdset_priv;
39282 unsigned long timeo = jiffies + HZ;
39283
39284+ pax_track_stack();
39285+
39286 /* Prevent setting state FL_SYNCING for chip in suspended state. */
39287 if (mode == FL_SYNCING && chip->oldstate != FL_READY)
39288 goto sleep;
39289@@ -1642,6 +1644,8 @@ static int __xipram do_write_buffer(struct map_info *map, struct flchip *chip,
39290 unsigned long initial_adr;
39291 int initial_len = len;
39292
39293+ pax_track_stack();
39294+
39295 wbufsize = cfi_interleave(cfi) << cfi->cfiq->MaxBufWriteSize;
39296 adr += chip->start;
39297 initial_adr = adr;
39298@@ -1860,6 +1864,8 @@ static int __xipram do_erase_oneblock(struct map_info *map, struct flchip *chip,
39299 int retries = 3;
39300 int ret;
39301
39302+ pax_track_stack();
39303+
39304 adr += chip->start;
39305
39306 retry:
39307diff --git a/drivers/mtd/chips/cfi_cmdset_0020.c b/drivers/mtd/chips/cfi_cmdset_0020.c
39308index 0667a67..3ab97ed 100644
39309--- a/drivers/mtd/chips/cfi_cmdset_0020.c
39310+++ b/drivers/mtd/chips/cfi_cmdset_0020.c
39311@@ -255,6 +255,8 @@ static inline int do_read_onechip(struct map_info *map, struct flchip *chip, lof
39312 unsigned long cmd_addr;
39313 struct cfi_private *cfi = map->fldrv_priv;
39314
39315+ pax_track_stack();
39316+
39317 adr += chip->start;
39318
39319 /* Ensure cmd read/writes are aligned. */
39320@@ -428,6 +430,8 @@ static inline int do_write_buffer(struct map_info *map, struct flchip *chip,
39321 DECLARE_WAITQUEUE(wait, current);
39322 int wbufsize, z;
39323
39324+ pax_track_stack();
39325+
39326 /* M58LW064A requires bus alignment for buffer wriets -- saw */
39327 if (adr & (map_bankwidth(map)-1))
39328 return -EINVAL;
39329@@ -742,6 +746,8 @@ static inline int do_erase_oneblock(struct map_info *map, struct flchip *chip, u
39330 DECLARE_WAITQUEUE(wait, current);
39331 int ret = 0;
39332
39333+ pax_track_stack();
39334+
39335 adr += chip->start;
39336
39337 /* Let's determine this according to the interleave only once */
39338@@ -1047,6 +1053,8 @@ static inline int do_lock_oneblock(struct map_info *map, struct flchip *chip, un
39339 unsigned long timeo = jiffies + HZ;
39340 DECLARE_WAITQUEUE(wait, current);
39341
39342+ pax_track_stack();
39343+
39344 adr += chip->start;
39345
39346 /* Let's determine this according to the interleave only once */
39347@@ -1196,6 +1204,8 @@ static inline int do_unlock_oneblock(struct map_info *map, struct flchip *chip,
39348 unsigned long timeo = jiffies + HZ;
39349 DECLARE_WAITQUEUE(wait, current);
39350
39351+ pax_track_stack();
39352+
39353 adr += chip->start;
39354
39355 /* Let's determine this according to the interleave only once */
39356diff --git a/drivers/mtd/devices/doc2000.c b/drivers/mtd/devices/doc2000.c
39357index 5bf5f46..c5de373 100644
39358--- a/drivers/mtd/devices/doc2000.c
39359+++ b/drivers/mtd/devices/doc2000.c
39360@@ -776,7 +776,7 @@ static int doc_write(struct mtd_info *mtd, loff_t to, size_t len,
39361
39362 /* The ECC will not be calculated correctly if less than 512 is written */
39363 /* DBB-
39364- if (len != 0x200 && eccbuf)
39365+ if (len != 0x200)
39366 printk(KERN_WARNING
39367 "ECC needs a full sector write (adr: %lx size %lx)\n",
39368 (long) to, (long) len);
39369diff --git a/drivers/mtd/devices/doc2001.c b/drivers/mtd/devices/doc2001.c
39370index 0990f78..bb4e8a4 100644
39371--- a/drivers/mtd/devices/doc2001.c
39372+++ b/drivers/mtd/devices/doc2001.c
39373@@ -393,7 +393,7 @@ static int doc_read (struct mtd_info *mtd, loff_t from, size_t len,
39374 struct Nand *mychip = &this->chips[from >> (this->chipshift)];
39375
39376 /* Don't allow read past end of device */
39377- if (from >= this->totlen)
39378+ if (from >= this->totlen || !len)
39379 return -EINVAL;
39380
39381 /* Don't allow a single read to cross a 512-byte block boundary */
39382diff --git a/drivers/mtd/ftl.c b/drivers/mtd/ftl.c
39383index e56d6b4..f07e6cf 100644
39384--- a/drivers/mtd/ftl.c
39385+++ b/drivers/mtd/ftl.c
39386@@ -474,6 +474,8 @@ static int copy_erase_unit(partition_t *part, uint16_t srcunit,
39387 loff_t offset;
39388 uint16_t srcunitswap = cpu_to_le16(srcunit);
39389
39390+ pax_track_stack();
39391+
39392 eun = &part->EUNInfo[srcunit];
39393 xfer = &part->XferInfo[xferunit];
39394 DEBUG(2, "ftl_cs: copying block 0x%x to 0x%x\n",
39395diff --git a/drivers/mtd/inftlcore.c b/drivers/mtd/inftlcore.c
39396index 8aca552..146446e 100755
39397--- a/drivers/mtd/inftlcore.c
39398+++ b/drivers/mtd/inftlcore.c
39399@@ -260,6 +260,8 @@ static u16 INFTL_foldchain(struct INFTLrecord *inftl, unsigned thisVUC, unsigned
39400 struct inftl_oob oob;
39401 size_t retlen;
39402
39403+ pax_track_stack();
39404+
39405 DEBUG(MTD_DEBUG_LEVEL3, "INFTL: INFTL_foldchain(inftl=%p,thisVUC=%d,"
39406 "pending=%d)\n", inftl, thisVUC, pendingblock);
39407
39408diff --git a/drivers/mtd/inftlmount.c b/drivers/mtd/inftlmount.c
39409index 32e82ae..ed50953 100644
39410--- a/drivers/mtd/inftlmount.c
39411+++ b/drivers/mtd/inftlmount.c
39412@@ -54,6 +54,8 @@ static int find_boot_record(struct INFTLrecord *inftl)
39413 struct INFTLPartition *ip;
39414 size_t retlen;
39415
39416+ pax_track_stack();
39417+
39418 DEBUG(MTD_DEBUG_LEVEL3, "INFTL: find_boot_record(inftl=%p)\n", inftl);
39419
39420 /*
39421diff --git a/drivers/mtd/lpddr/qinfo_probe.c b/drivers/mtd/lpddr/qinfo_probe.c
39422index 79bf40f..fe5f8fd 100644
39423--- a/drivers/mtd/lpddr/qinfo_probe.c
39424+++ b/drivers/mtd/lpddr/qinfo_probe.c
39425@@ -106,6 +106,8 @@ static int lpddr_pfow_present(struct map_info *map, struct lpddr_private *lpddr)
39426 {
39427 map_word pfow_val[4];
39428
39429+ pax_track_stack();
39430+
39431 /* Check identification string */
39432 pfow_val[0] = map_read(map, map->pfow_base + PFOW_QUERY_STRING_P);
39433 pfow_val[1] = map_read(map, map->pfow_base + PFOW_QUERY_STRING_F);
39434diff --git a/drivers/mtd/mtdchar.c b/drivers/mtd/mtdchar.c
39435index 726a1b8..f46b460 100644
39436--- a/drivers/mtd/mtdchar.c
39437+++ b/drivers/mtd/mtdchar.c
39438@@ -461,6 +461,8 @@ static int mtd_ioctl(struct inode *inode, struct file *file,
39439 u_long size;
39440 struct mtd_info_user info;
39441
39442+ pax_track_stack();
39443+
39444 DEBUG(MTD_DEBUG_LEVEL0, "MTD_ioctl\n");
39445
39446 size = (cmd & IOCSIZE_MASK) >> IOCSIZE_SHIFT;
39447diff --git a/drivers/mtd/nftlcore.c b/drivers/mtd/nftlcore.c
39448index 1002e18..26d82d5 100644
39449--- a/drivers/mtd/nftlcore.c
39450+++ b/drivers/mtd/nftlcore.c
39451@@ -254,6 +254,8 @@ static u16 NFTL_foldchain (struct NFTLrecord *nftl, unsigned thisVUC, unsigned p
39452 int inplace = 1;
39453 size_t retlen;
39454
39455+ pax_track_stack();
39456+
39457 memset(BlockMap, 0xff, sizeof(BlockMap));
39458 memset(BlockFreeFound, 0, sizeof(BlockFreeFound));
39459
39460diff --git a/drivers/mtd/nftlmount.c b/drivers/mtd/nftlmount.c
39461index 8b22b18..6fada85 100644
39462--- a/drivers/mtd/nftlmount.c
39463+++ b/drivers/mtd/nftlmount.c
39464@@ -23,6 +23,7 @@
39465 #include <asm/errno.h>
39466 #include <linux/delay.h>
39467 #include <linux/slab.h>
39468+#include <linux/sched.h>
39469 #include <linux/mtd/mtd.h>
39470 #include <linux/mtd/nand.h>
39471 #include <linux/mtd/nftl.h>
39472@@ -44,6 +45,8 @@ static int find_boot_record(struct NFTLrecord *nftl)
39473 struct mtd_info *mtd = nftl->mbd.mtd;
39474 unsigned int i;
39475
39476+ pax_track_stack();
39477+
39478 /* Assume logical EraseSize == physical erasesize for starting the scan.
39479 We'll sort it out later if we find a MediaHeader which says otherwise */
39480 /* Actually, we won't. The new DiskOnChip driver has already scanned
39481diff --git a/drivers/mtd/ubi/build.c b/drivers/mtd/ubi/build.c
39482index 14cec04..09d8519 100644
39483--- a/drivers/mtd/ubi/build.c
39484+++ b/drivers/mtd/ubi/build.c
39485@@ -1255,7 +1255,7 @@ module_exit(ubi_exit);
39486 static int __init bytes_str_to_int(const char *str)
39487 {
39488 char *endp;
39489- unsigned long result;
39490+ unsigned long result, scale = 1;
39491
39492 result = simple_strtoul(str, &endp, 0);
39493 if (str == endp || result >= INT_MAX) {
39494@@ -1266,11 +1266,11 @@ static int __init bytes_str_to_int(const char *str)
39495
39496 switch (*endp) {
39497 case 'G':
39498- result *= 1024;
39499+ scale *= 1024;
39500 case 'M':
39501- result *= 1024;
39502+ scale *= 1024;
39503 case 'K':
39504- result *= 1024;
39505+ scale *= 1024;
39506 if (endp[1] == 'i' && endp[2] == 'B')
39507 endp += 2;
39508 case '\0':
39509@@ -1281,7 +1281,13 @@ static int __init bytes_str_to_int(const char *str)
39510 return -EINVAL;
39511 }
39512
39513- return result;
39514+ if (result*scale >= INT_MAX) {
39515+ printk(KERN_ERR "UBI error: incorrect bytes count: \"%s\"\n",
39516+ str);
39517+ return -EINVAL;
39518+ }
39519+
39520+ return result*scale;
39521 }
39522
39523 /**
39524diff --git a/drivers/net/atlx/atl2.c b/drivers/net/atlx/atl2.c
39525index ab68886..ca405e8 100644
39526--- a/drivers/net/atlx/atl2.c
39527+++ b/drivers/net/atlx/atl2.c
39528@@ -2845,7 +2845,7 @@ static void atl2_force_ps(struct atl2_hw *hw)
39529 */
39530
39531 #define ATL2_PARAM(X, desc) \
39532- static const int __devinitdata X[ATL2_MAX_NIC + 1] = ATL2_PARAM_INIT; \
39533+ static const int __devinitconst X[ATL2_MAX_NIC + 1] = ATL2_PARAM_INIT; \
39534 MODULE_PARM(X, "1-" __MODULE_STRING(ATL2_MAX_NIC) "i"); \
39535 MODULE_PARM_DESC(X, desc);
39536 #else
39537diff --git a/drivers/net/benet/Makefile b/drivers/net/benet/Makefile
39538index a60cd80..0ed11ef 100644
39539--- a/drivers/net/benet/Makefile
39540+++ b/drivers/net/benet/Makefile
39541@@ -1,7 +1,9 @@
39542 #
39543-# Makefile to build the network driver for ServerEngine's BladeEngine.
39544+# Makefile to build the be2net network driver
39545 #
39546
39547+EXTRA_CFLAGS += -DCONFIG_PALAU
39548+
39549 obj-$(CONFIG_BE2NET) += be2net.o
39550
39551-be2net-y := be_main.o be_cmds.o be_ethtool.o
39552+be2net-y := be_main.o be_cmds.o be_ethtool.o be_compat.o be_misc.o
39553diff --git a/drivers/net/benet/be.h b/drivers/net/benet/be.h
39554index 5c74ff0..7382603 100644
39555--- a/drivers/net/benet/be.h
39556+++ b/drivers/net/benet/be.h
39557@@ -1,18 +1,18 @@
39558 /*
39559- * Copyright (C) 2005 - 2009 ServerEngines
39560+ * Copyright (C) 2005 - 2011 Emulex
39561 * All rights reserved.
39562 *
39563 * This program is free software; you can redistribute it and/or
39564 * modify it under the terms of the GNU General Public License version 2
39565- * as published by the Free Software Foundation. The full GNU General
39566+ * as published by the Free Software Foundation. The full GNU General
39567 * Public License is included in this distribution in the file called COPYING.
39568 *
39569 * Contact Information:
39570- * linux-drivers@serverengines.com
39571+ * linux-drivers@emulex.com
39572 *
39573- * ServerEngines
39574- * 209 N. Fair Oaks Ave
39575- * Sunnyvale, CA 94085
39576+ * Emulex
39577+ * 3333 Susan Street
39578+ * Costa Mesa, CA 92626
39579 */
39580
39581 #ifndef BE_H
39582@@ -29,32 +29,53 @@
39583 #include <linux/workqueue.h>
39584 #include <linux/interrupt.h>
39585 #include <linux/firmware.h>
39586+#include <linux/jhash.h>
39587+#ifndef CONFIG_PALAU
39588+#include <linux/inet_lro.h>
39589+#endif
39590
39591+#ifdef CONFIG_PALAU
39592+#include "be_compat.h"
39593+#endif
39594 #include "be_hw.h"
39595
39596-#define DRV_VER "2.101.205"
39597+#ifdef CONFIG_PALAU
39598+#include "version.h"
39599+#define DRV_VER STR_BE_MAJOR "." STR_BE_MINOR "."\
39600+ STR_BE_BUILD "." STR_BE_BRANCH
39601+#else
39602+#define DRV_VER "2.0.348"
39603+#endif
39604 #define DRV_NAME "be2net"
39605-#define BE_NAME "ServerEngines BladeEngine2 10Gbps NIC"
39606-#define BE3_NAME "ServerEngines BladeEngine3 10Gbps NIC"
39607-#define OC_NAME "Emulex OneConnect 10Gbps NIC"
39608-#define OC_NAME1 "Emulex OneConnect 10Gbps NIC (be3)"
39609-#define DRV_DESC BE_NAME "Driver"
39610+#define BE_NAME "Emulex BladeEngine2"
39611+#define BE3_NAME "Emulex BladeEngine3"
39612+#define OC_NAME "Emulex OneConnect"
39613+#define OC_NAME_BE OC_NAME "(be3)"
39614+#define OC_NAME_LANCER OC_NAME "(Lancer)"
39615+#define DRV_DESC "Emulex OneConnect 10Gbps NIC Driver"
39616
39617-#define BE_VENDOR_ID 0x19a2
39618+#define BE_VENDOR_ID 0x19a2
39619+#define EMULEX_VENDOR_ID 0x10df
39620 #define BE_DEVICE_ID1 0x211
39621 #define BE_DEVICE_ID2 0x221
39622-#define OC_DEVICE_ID1 0x700
39623-#define OC_DEVICE_ID2 0x701
39624-#define OC_DEVICE_ID3 0x710
39625+#define OC_DEVICE_ID1 0x700 /* Device Id for BE2 cards */
39626+#define OC_DEVICE_ID2 0x710 /* Device Id for BE3 cards */
39627+#define OC_DEVICE_ID3 0xe220 /* Device id for Lancer cards */
39628+
39629+#define OC_SUBSYS_DEVICE_ID1 0xE602
39630+#define OC_SUBSYS_DEVICE_ID2 0xE642
39631+#define OC_SUBSYS_DEVICE_ID3 0xE612
39632+#define OC_SUBSYS_DEVICE_ID4 0xE652
39633
39634 static inline char *nic_name(struct pci_dev *pdev)
39635 {
39636 switch (pdev->device) {
39637 case OC_DEVICE_ID1:
39638- case OC_DEVICE_ID2:
39639 return OC_NAME;
39640+ case OC_DEVICE_ID2:
39641+ return OC_NAME_BE;
39642 case OC_DEVICE_ID3:
39643- return OC_NAME1;
39644+ return OC_NAME_LANCER;
39645 case BE_DEVICE_ID2:
39646 return BE3_NAME;
39647 default:
39648@@ -63,7 +84,7 @@ static inline char *nic_name(struct pci_dev *pdev)
39649 }
39650
39651 /* Number of bytes of an RX frame that are copied to skb->data */
39652-#define BE_HDR_LEN 64
39653+#define BE_HDR_LEN ((u16) 64)
39654 #define BE_MAX_JUMBO_FRAME_SIZE 9018
39655 #define BE_MIN_MTU 256
39656
39657@@ -79,10 +100,24 @@ static inline char *nic_name(struct pci_dev *pdev)
39658 #define MCC_Q_LEN 128 /* total size not to exceed 8 pages */
39659 #define MCC_CQ_LEN 256
39660
39661+#define MAX_RSS_QS 4 /* BE limit is 4 queues/port */
39662+
39663+#define MAX_RX_QS (MAX_RSS_QS + 1)
39664+
39665+#ifdef MQ_TX
39666+#define MAX_TX_QS 8
39667+#else
39668+#define MAX_TX_QS 1
39669+#endif
39670+
39671+#define BE_MAX_MSIX_VECTORS (MAX_RX_QS + 1)/* RSS qs + 1 def Rx + Tx */
39672 #define BE_NAPI_WEIGHT 64
39673-#define MAX_RX_POST BE_NAPI_WEIGHT /* Frags posted at a time */
39674+#define MAX_RX_POST BE_NAPI_WEIGHT /* Frags posted at a time */
39675 #define RX_FRAGS_REFILL_WM (RX_Q_LEN - MAX_RX_POST)
39676
39677+#define BE_MAX_LRO_DESCRIPTORS 16
39678+#define BE_MAX_FRAGS_PER_FRAME (min((u32) 16, (u32) MAX_SKB_FRAGS))
39679+
39680 #define FW_VER_LEN 32
39681
39682 struct be_dma_mem {
39683@@ -127,6 +162,11 @@ static inline void *queue_tail_node(struct be_queue_info *q)
39684 return q->dma_mem.va + q->tail * q->entry_size;
39685 }
39686
39687+static inline void *queue_index_node(struct be_queue_info *q, u16 index)
39688+{
39689+ return q->dma_mem.va + index * q->entry_size;
39690+}
39691+
39692 static inline void queue_head_inc(struct be_queue_info *q)
39693 {
39694 index_inc(&q->head, q->len);
39695@@ -137,6 +177,7 @@ static inline void queue_tail_inc(struct be_queue_info *q)
39696 index_inc(&q->tail, q->len);
39697 }
39698
39699+
39700 struct be_eq_obj {
39701 struct be_queue_info q;
39702 char desc[32];
39703@@ -146,6 +187,7 @@ struct be_eq_obj {
39704 u16 min_eqd; /* in usecs */
39705 u16 max_eqd; /* in usecs */
39706 u16 cur_eqd; /* in usecs */
39707+ u8 eq_idx;
39708
39709 struct napi_struct napi;
39710 };
39711@@ -153,49 +195,20 @@ struct be_eq_obj {
39712 struct be_mcc_obj {
39713 struct be_queue_info q;
39714 struct be_queue_info cq;
39715+ bool rearm_cq;
39716 };
39717
39718-struct be_drvr_stats {
39719+struct be_tx_stats {
39720 u32 be_tx_reqs; /* number of TX requests initiated */
39721 u32 be_tx_stops; /* number of times TX Q was stopped */
39722- u32 be_fwd_reqs; /* number of send reqs through forwarding i/f */
39723 u32 be_tx_wrbs; /* number of tx WRBs used */
39724- u32 be_tx_events; /* number of tx completion events */
39725 u32 be_tx_compl; /* number of tx completion entries processed */
39726 ulong be_tx_jiffies;
39727 u64 be_tx_bytes;
39728 u64 be_tx_bytes_prev;
39729 u64 be_tx_pkts;
39730 u32 be_tx_rate;
39731-
39732- u32 cache_barrier[16];
39733-
39734- u32 be_ethrx_post_fail;/* number of ethrx buffer alloc failures */
39735- u32 be_polls; /* number of times NAPI called poll function */
39736- u32 be_rx_events; /* number of ucast rx completion events */
39737- u32 be_rx_compl; /* number of rx completion entries processed */
39738- ulong be_rx_jiffies;
39739- u64 be_rx_bytes;
39740- u64 be_rx_bytes_prev;
39741- u64 be_rx_pkts;
39742- u32 be_rx_rate;
39743- /* number of non ether type II frames dropped where
39744- * frame len > length field of Mac Hdr */
39745- u32 be_802_3_dropped_frames;
39746- /* number of non ether type II frames malformed where
39747- * in frame len < length field of Mac Hdr */
39748- u32 be_802_3_malformed_frames;
39749- u32 be_rxcp_err; /* Num rx completion entries w/ err set. */
39750- ulong rx_fps_jiffies; /* jiffies at last FPS calc */
39751- u32 be_rx_frags;
39752- u32 be_prev_rx_frags;
39753- u32 be_rx_fps; /* Rx frags per second */
39754-};
39755-
39756-struct be_stats_obj {
39757- struct be_drvr_stats drvr_stats;
39758- struct net_device_stats net_stats;
39759- struct be_dma_mem cmd;
39760+ u32 be_ipv6_ext_hdr_tx_drop;
39761 };
39762
39763 struct be_tx_obj {
39764@@ -203,23 +216,124 @@ struct be_tx_obj {
39765 struct be_queue_info cq;
39766 /* Remember the skbs that were transmitted */
39767 struct sk_buff *sent_skb_list[TX_Q_LEN];
39768+ struct be_tx_stats stats;
39769 };
39770
39771 /* Struct to remember the pages posted for rx frags */
39772 struct be_rx_page_info {
39773 struct page *page;
39774- dma_addr_t bus;
39775+ DEFINE_DMA_UNMAP_ADDR(bus);
39776 u16 page_offset;
39777 bool last_page_user;
39778 };
39779
39780+struct be_rx_stats {
39781+ u32 rx_post_fail;/* number of ethrx buffer alloc failures */
39782+ u32 rx_polls; /* number of times NAPI called poll function */
39783+ u32 rx_events; /* number of ucast rx completion events */
39784+ u32 rx_compl; /* number of rx completion entries processed */
39785+ ulong rx_jiffies;
39786+ u64 rx_bytes;
39787+ u64 rx_bytes_prev;
39788+ u64 rx_pkts;
39789+ u32 rx_rate;
39790+ u32 rx_mcast_pkts;
39791+ u32 rxcp_err; /* Num rx completion entries w/ err set. */
39792+ ulong rx_fps_jiffies; /* jiffies at last FPS calc */
39793+ u32 rx_frags;
39794+ u32 prev_rx_frags;
39795+ u32 rx_fps; /* Rx frags per second */
39796+ u32 rx_drops_no_frags;
39797+};
39798+
39799+struct be_rx_compl_info {
39800+ u32 rss_hash;
39801+ u16 vlan_tag;
39802+ u16 pkt_size;
39803+ u16 rxq_idx;
39804+ u16 port;
39805+ u8 vlanf;
39806+ u8 num_rcvd;
39807+ u8 err;
39808+ u8 ipf;
39809+ u8 tcpf;
39810+ u8 udpf;
39811+ u8 ip_csum;
39812+ u8 l4_csum;
39813+ u8 ipv6;
39814+ u8 vtm;
39815+ u8 pkt_type;
39816+};
39817+
39818 struct be_rx_obj {
39819+ struct be_adapter *adapter;
39820 struct be_queue_info q;
39821 struct be_queue_info cq;
39822- struct be_rx_page_info page_info_tbl[RX_Q_LEN];
39823+ struct be_rx_compl_info rxcp;
39824+ struct be_rx_page_info *page_info_tbl;
39825+ struct net_lro_mgr lro_mgr;
39826+ struct net_lro_desc lro_desc[BE_MAX_LRO_DESCRIPTORS];
39827+ struct be_eq_obj rx_eq;
39828+ struct be_rx_stats stats;
39829+ u8 rss_id;
39830+ bool rx_post_starved; /* Zero rx frags have been posted to BE */
39831+ u16 prev_frag_idx;
39832+ u32 cache_line_barrier[16];
39833 };
39834
39835-#define BE_NUM_MSIX_VECTORS 2 /* 1 each for Tx and Rx */
39836+struct be_drv_stats {
39837+ u32 be_on_die_temperature;
39838+ u32 be_tx_events;
39839+ u32 eth_red_drops;
39840+ u32 rx_drops_no_pbuf;
39841+ u32 rx_drops_no_txpb;
39842+ u32 rx_drops_no_erx_descr;
39843+ u32 rx_drops_no_tpre_descr;
39844+ u32 rx_drops_too_many_frags;
39845+ u32 rx_drops_invalid_ring;
39846+ u32 forwarded_packets;
39847+ u32 rx_drops_mtu;
39848+ u32 rx_crc_errors;
39849+ u32 rx_alignment_symbol_errors;
39850+ u32 rx_pause_frames;
39851+ u32 rx_priority_pause_frames;
39852+ u32 rx_control_frames;
39853+ u32 rx_in_range_errors;
39854+ u32 rx_out_range_errors;
39855+ u32 rx_frame_too_long;
39856+ u32 rx_address_match_errors;
39857+ u32 rx_dropped_too_small;
39858+ u32 rx_dropped_too_short;
39859+ u32 rx_dropped_header_too_small;
39860+ u32 rx_dropped_tcp_length;
39861+ u32 rx_dropped_runt;
39862+ u32 rx_ip_checksum_errs;
39863+ u32 rx_tcp_checksum_errs;
39864+ u32 rx_udp_checksum_errs;
39865+ u32 rx_switched_unicast_packets;
39866+ u32 rx_switched_multicast_packets;
39867+ u32 rx_switched_broadcast_packets;
39868+ u32 tx_pauseframes;
39869+ u32 tx_priority_pauseframes;
39870+ u32 tx_controlframes;
39871+ u32 rxpp_fifo_overflow_drop;
39872+ u32 rx_input_fifo_overflow_drop;
39873+ u32 pmem_fifo_overflow_drop;
39874+ u32 jabber_events;
39875+};
39876+
39877+struct be_vf_cfg {
39878+ unsigned char vf_mac_addr[ETH_ALEN];
39879+ u32 vf_if_handle;
39880+ u32 vf_pmac_id;
39881+ u16 vf_def_vid;
39882+ u16 vf_vlan_tag;
39883+ u32 vf_tx_rate;
39884+};
39885+
39886+#define BE_INVALID_PMAC_ID 0xffffffff
39887+#define BE_FLAGS_DCBX (1 << 16)
39888+
39889 struct be_adapter {
39890 struct pci_dev *pdev;
39891 struct net_device *netdev;
39892@@ -228,7 +342,7 @@ struct be_adapter {
39893 u8 __iomem *db; /* Door Bell */
39894 u8 __iomem *pcicfg; /* PCI config space */
39895
39896- spinlock_t mbox_lock; /* For serializing mbox cmds to BE card */
39897+ struct mutex mbox_lock; /* For serializing mbox cmds to BE card */
39898 struct be_dma_mem mbox_mem;
39899 /* Mbox mem is adjusted to align to 16 bytes. The allocated addr
39900 * is stored for freeing purpose */
39901@@ -238,66 +352,121 @@ struct be_adapter {
39902 spinlock_t mcc_lock; /* For serializing mcc cmds to BE card */
39903 spinlock_t mcc_cq_lock;
39904
39905- struct msix_entry msix_entries[BE_NUM_MSIX_VECTORS];
39906- bool msix_enabled;
39907+ struct msix_entry msix_entries[BE_MAX_MSIX_VECTORS];
39908+ u32 num_msix_vec;
39909 bool isr_registered;
39910
39911 /* TX Rings */
39912 struct be_eq_obj tx_eq;
39913- struct be_tx_obj tx_obj;
39914+ struct be_tx_obj tx_obj[MAX_TX_QS];
39915+ u8 num_tx_qs;
39916+ u8 prio_tc_map[MAX_TX_QS]; /* prio_tc_map[prio] => tc-id */
39917+ u8 tc_txq_map[MAX_TX_QS]; /* tc_txq_map[tc-id] => txq index */
39918
39919 u32 cache_line_break[8];
39920
39921 /* Rx rings */
39922- struct be_eq_obj rx_eq;
39923- struct be_rx_obj rx_obj;
39924+ struct be_rx_obj rx_obj[MAX_RX_QS]; /* one default non-rss Q */
39925+ u32 num_rx_qs;
39926+
39927+ struct be_dma_mem stats_cmd;
39928+ struct net_device_stats net_stats;
39929+ struct be_drv_stats drv_stats;
39930 u32 big_page_size; /* Compounded page size shared by rx wrbs */
39931- bool rx_post_starved; /* Zero rx frags have been posted to BE */
39932
39933 struct vlan_group *vlan_grp;
39934- u16 num_vlans;
39935+ u16 vlans_added;
39936+ u16 max_vlans; /* Number of vlans supported */
39937 u8 vlan_tag[VLAN_GROUP_ARRAY_LEN];
39938+ u8 vlan_prio_bmap; /* Available priority BitMap */
39939+ u16 recommended_prio; /* Recommended Priority */
39940+ struct be_dma_mem rx_filter;
39941
39942- struct be_stats_obj stats;
39943 /* Work queue used to perform periodic tasks like getting statistics */
39944 struct delayed_work work;
39945+ u16 work_counter;
39946
39947- /* Ethtool knobs and info */
39948- bool rx_csum; /* BE card must perform rx-checksumming */
39949+ u32 flags;
39950+ bool rx_csum; /* BE card must perform rx-checksumming */
39951+ u32 max_rx_coal;
39952 char fw_ver[FW_VER_LEN];
39953 u32 if_handle; /* Used to configure filtering */
39954 u32 pmac_id; /* MAC addr handle used by BE card */
39955+ u32 beacon_state; /* for set_phys_id */
39956
39957- bool link_up;
39958+ bool eeh_err;
39959+ int link_status;
39960 u32 port_num;
39961+ u32 hba_port_num;
39962 bool promiscuous;
39963- u32 cap;
39964+ bool wol;
39965+ u32 function_mode;
39966+ u32 function_caps;
39967 u32 rx_fc; /* Rx flow control */
39968 u32 tx_fc; /* Tx flow control */
39969+ bool ue_detected;
39970+ bool stats_cmd_sent;
39971+ bool gro_supported;
39972+ int link_speed;
39973+ u8 port_type;
39974+ u8 transceiver;
39975+ u8 autoneg;
39976 u8 generation; /* BladeEngine ASIC generation */
39977+ u32 flash_status;
39978+ struct completion flash_compl;
39979+
39980+ u8 eq_next_idx;
39981+ bool be3_native;
39982+ u16 num_vfs;
39983+ struct be_vf_cfg *vf_cfg;
39984+ u8 is_virtfn;
39985+ u16 pvid;
39986+ u32 sli_family;
39987+ u8 port_name[4];
39988+ char model_number[32];
39989 };
39990
39991 /* BladeEngine Generation numbers */
39992 #define BE_GEN2 2
39993 #define BE_GEN3 3
39994
39995-extern const struct ethtool_ops be_ethtool_ops;
39996+#define ON 1
39997+#define OFF 0
39998+#define lancer_chip(adapter) (adapter->pdev->device == OC_DEVICE_ID3)
39999+#define lancer_A0_chip(adapter) \
40000+ (adapter->sli_family == LANCER_A0_SLI_FAMILY)
40001
40002-#define drvr_stats(adapter) (&adapter->stats.drvr_stats)
40003+extern struct ethtool_ops be_ethtool_ops;
40004
40005-static inline unsigned int be_pci_func(struct be_adapter *adapter)
40006-{
40007- return PCI_FUNC(adapter->pdev->devfn);
40008-}
40009+#define msix_enabled(adapter) (adapter->num_msix_vec > 0)
40010+#define tx_stats(txo) (&txo->stats)
40011+#define rx_stats(rxo) (&rxo->stats)
40012
40013+#if LINUX_VERSION_CODE < KERNEL_VERSION(2, 6, 29)
40014+#define BE_SET_NETDEV_OPS(netdev, ops) be_netdev_ops_init(netdev, ops)
40015+#else
40016 #define BE_SET_NETDEV_OPS(netdev, ops) (netdev->netdev_ops = ops)
40017+#endif
40018+
40019+#define for_all_rx_queues(adapter, rxo, i) \
40020+ for (i = 0, rxo = &adapter->rx_obj[i]; i < adapter->num_rx_qs; \
40021+ i++, rxo++)
40022+
40023+/* Just skip the first default non-rss queue */
40024+#define for_all_rss_queues(adapter, rxo, i) \
40025+ for (i = 0, rxo = &adapter->rx_obj[i+1]; i < (adapter->num_rx_qs - 1);\
40026+ i++, rxo++)
40027+
40028+#define for_all_tx_queues(adapter, txo, i) \
40029+ for (i = 0, txo = &adapter->tx_obj[i]; i < adapter->num_tx_qs; \
40030+ i++, txo++)
40031
40032 #define PAGE_SHIFT_4K 12
40033 #define PAGE_SIZE_4K (1 << PAGE_SHIFT_4K)
40034
40035 /* Returns number of pages spanned by the data starting at the given addr */
40036-#define PAGES_4K_SPANNED(_address, size) \
40037- ((u32)((((size_t)(_address) & (PAGE_SIZE_4K - 1)) + \
40038+#define PAGES_4K_SPANNED(_address, size) \
40039+ ((u32)((((size_t)(_address) & (PAGE_SIZE_4K - 1)) + \
40040 (size) + (PAGE_SIZE_4K - 1)) >> PAGE_SHIFT_4K))
40041
40042 /* Byte offset into the page corresponding to given address */
40043@@ -305,7 +474,7 @@ static inline unsigned int be_pci_func(struct be_adapter *adapter)
40044 ((size_t)(addr) & (PAGE_SIZE_4K-1))
40045
40046 /* Returns bit offset within a DWORD of a bitfield */
40047-#define AMAP_BIT_OFFSET(_struct, field) \
40048+#define AMAP_BIT_OFFSET(_struct, field) \
40049 (((size_t)&(((_struct *)0)->field))%32)
40050
40051 /* Returns the bit mask of the field that is NOT shifted into location. */
40052@@ -356,6 +525,11 @@ static inline void swap_dws(void *wrb, int len)
40053 #endif /* __BIG_ENDIAN */
40054 }
40055
40056+static inline bool vlan_configured(struct be_adapter *adapter)
40057+{
40058+ return adapter->vlan_grp && adapter->vlans_added;
40059+}
40060+
40061 static inline u8 is_tcp_pkt(struct sk_buff *skb)
40062 {
40063 u8 val = 0;
40064@@ -380,9 +554,65 @@ static inline u8 is_udp_pkt(struct sk_buff *skb)
40065 return val;
40066 }
40067
40068+static inline u8 is_ipv6_ext_hdr(struct sk_buff *skb)
40069+{
40070+#if defined(CONFIG_IPV6) || defined(CONFIG_IPV6_MODULE)
40071+ if (ip_hdr(skb)->version == 6)
40072+ return ipv6_ext_hdr(ipv6_hdr(skb)->nexthdr);
40073+ else
40074+#endif
40075+ return 0;
40076+}
40077+
40078+static inline void be_check_sriov_fn_type(struct be_adapter *adapter)
40079+{
40080+ u32 sli_intf;
40081+
40082+ pci_read_config_dword(adapter->pdev, SLI_INTF_REG_OFFSET, &sli_intf);
40083+ adapter->is_virtfn = (sli_intf & SLI_INTF_FT_MASK) ? 1 : 0;
40084+}
40085+
40086+static inline void be_vf_eth_addr_generate(struct be_adapter *adapter, u8 *mac)
40087+{
40088+ u32 addr;
40089+
40090+ addr = jhash(adapter->netdev->dev_addr, ETH_ALEN, 0);
40091+
40092+ mac[5] = (u8)(addr & 0xFF);
40093+ mac[4] = (u8)((addr >> 8) & 0xFF);
40094+ mac[3] = (u8)((addr >> 16) & 0xFF);
40095+ /* Use the OUI programmed in hardware */
40096+ memcpy(mac, adapter->netdev->dev_addr, 3);
40097+}
40098+
40099+static inline u16 be_get_tx_vlan_tag(struct be_adapter *adapter,
40100+ struct sk_buff *skb)
40101+{
40102+ u8 vlan_prio = 0;
40103+ u16 vlan_tag = 0;
40104+
40105+ vlan_tag = vlan_tx_tag_get(skb);
40106+ vlan_prio = (vlan_tag & VLAN_PRIO_MASK) >> VLAN_PRIO_SHIFT;
40107+ /* If vlan priority provided by OS is NOT in available bmap */
40108+ if (!(adapter->vlan_prio_bmap & (1 << vlan_prio)))
40109+ vlan_tag = (vlan_tag & ~VLAN_PRIO_MASK) |
40110+ adapter->recommended_prio;
40111+
40112+ return vlan_tag;
40113+}
40114+
40115+#define be_physfn(adapter) (!adapter->is_virtfn)
40116+
40117 extern void be_cq_notify(struct be_adapter *adapter, u16 qid, bool arm,
40118 u16 num_popped);
40119-extern void be_link_status_update(struct be_adapter *adapter, bool link_up);
40120+extern void be_link_status_update(struct be_adapter *adapter, int link_status);
40121 extern void netdev_stats_update(struct be_adapter *adapter);
40122+extern void be_parse_stats(struct be_adapter *adapter);
40123 extern int be_load_fw(struct be_adapter *adapter, u8 *func);
40124+
40125+#ifdef CONFIG_PALAU
40126+extern void be_sysfs_create_group(struct be_adapter *adapter);
40127+extern void be_sysfs_remove_group(struct be_adapter *adapter);
40128+#endif
40129+
40130 #endif /* BE_H */
40131diff --git a/drivers/net/benet/be_cmds.c b/drivers/net/benet/be_cmds.c
40132index 28a0eda..b4ca89c 100644
40133--- a/drivers/net/benet/be_cmds.c
40134+++ b/drivers/net/benet/be_cmds.c
40135@@ -1,30 +1,45 @@
40136 /*
40137- * Copyright (C) 2005 - 2009 ServerEngines
40138+ * Copyright (C) 2005 - 2011 Emulex
40139 * All rights reserved.
40140 *
40141 * This program is free software; you can redistribute it and/or
40142 * modify it under the terms of the GNU General Public License version 2
40143- * as published by the Free Software Foundation. The full GNU General
40144+ * as published by the Free Software Foundation. The full GNU General
40145 * Public License is included in this distribution in the file called COPYING.
40146 *
40147 * Contact Information:
40148- * linux-drivers@serverengines.com
40149+ * linux-drivers@emulex.com
40150 *
40151- * ServerEngines
40152- * 209 N. Fair Oaks Ave
40153- * Sunnyvale, CA 94085
40154+ * Emulex
40155+ * 3333 Susan Street
40156+ * Costa Mesa, CA 92626
40157 */
40158
40159 #include "be.h"
40160 #include "be_cmds.h"
40161
40162+/* Must be a power of 2 or else MODULO will BUG_ON */
40163+static int be_get_temp_freq = 64;
40164+
40165+static inline void *embedded_payload(struct be_mcc_wrb *wrb)
40166+{
40167+ return wrb->payload.embedded_payload;
40168+}
40169+
40170 static void be_mcc_notify(struct be_adapter *adapter)
40171 {
40172 struct be_queue_info *mccq = &adapter->mcc_obj.q;
40173 u32 val = 0;
40174
40175+ if (adapter->eeh_err) {
40176+ dev_info(&adapter->pdev->dev, "Error in Card Detected! Cannot issue commands\n");
40177+ return;
40178+ }
40179+
40180 val |= mccq->id & DB_MCCQ_RING_ID_MASK;
40181 val |= 1 << DB_MCCQ_NUM_POSTED_SHIFT;
40182+
40183+ wmb();
40184 iowrite32(val, adapter->db + DB_MCCQ_OFFSET);
40185 }
40186
40187@@ -59,21 +74,67 @@ static int be_mcc_compl_process(struct be_adapter *adapter,
40188
40189 compl_status = (compl->status >> CQE_STATUS_COMPL_SHIFT) &
40190 CQE_STATUS_COMPL_MASK;
40191+
40192+ if ((compl->tag0 == OPCODE_COMMON_WRITE_FLASHROM) &&
40193+ (compl->tag1 == CMD_SUBSYSTEM_COMMON)) {
40194+ adapter->flash_status = compl_status;
40195+ complete(&adapter->flash_compl);
40196+ }
40197+
40198 if (compl_status == MCC_STATUS_SUCCESS) {
40199- if (compl->tag0 == OPCODE_ETH_GET_STATISTICS) {
40200- struct be_cmd_resp_get_stats *resp =
40201- adapter->stats.cmd.va;
40202- be_dws_le_to_cpu(&resp->hw_stats,
40203- sizeof(resp->hw_stats));
40204+ if ((compl->tag0 == OPCODE_ETH_GET_STATISTICS) &&
40205+ (compl->tag1 == CMD_SUBSYSTEM_ETH)) {
40206+ if (adapter->generation == BE_GEN3) {
40207+ struct be_cmd_resp_get_stats_v1 *resp =
40208+ adapter->stats_cmd.va;
40209+
40210+ be_dws_le_to_cpu(&resp->hw_stats,
40211+ sizeof(resp->hw_stats));
40212+ } else {
40213+ struct be_cmd_resp_get_stats_v0 *resp =
40214+ adapter->stats_cmd.va;
40215+
40216+ be_dws_le_to_cpu(&resp->hw_stats,
40217+ sizeof(resp->hw_stats));
40218+ }
40219+ be_parse_stats(adapter);
40220 netdev_stats_update(adapter);
40221+ adapter->stats_cmd_sent = false;
40222+ }
40223+ if (compl->tag0 ==
40224+ OPCODE_COMMON_GET_CNTL_ADDITIONAL_ATTRIBUTES) {
40225+ struct be_mcc_wrb *mcc_wrb =
40226+ queue_index_node(&adapter->mcc_obj.q,
40227+ compl->tag1);
40228+ struct be_cmd_resp_get_cntl_addnl_attribs *resp =
40229+ embedded_payload(mcc_wrb);
40230+ adapter->drv_stats.be_on_die_temperature =
40231+ resp->on_die_temperature;
40232+ }
40233+ } else {
40234+ if (compl->tag0 == OPCODE_COMMON_GET_CNTL_ADDITIONAL_ATTRIBUTES)
40235+ be_get_temp_freq = 0;
40236+
40237+ if (compl->tag1 == MCC_WRB_PASS_THRU)
40238+ goto done;
40239+
40240+ if (compl_status == MCC_STATUS_NOT_SUPPORTED ||
40241+ compl_status == MCC_STATUS_ILLEGAL_REQUEST)
40242+ goto done;
40243+
40244+ if (compl_status == MCC_STATUS_UNAUTHORIZED_REQUEST) {
40245+ dev_warn(&adapter->pdev->dev, "This domain(VM) is not "
40246+ "permitted to execute this cmd (opcode %d)\n",
40247+ compl->tag0);
40248+ } else {
40249+ extd_status = (compl->status >> CQE_STATUS_EXTD_SHIFT) &
40250+ CQE_STATUS_EXTD_MASK;
40251+ dev_err(&adapter->pdev->dev, "Cmd (opcode %d) failed:"
40252+ "status %d, extd-status %d\n",
40253+ compl->tag0, compl_status, extd_status);
40254 }
40255- } else if (compl_status != MCC_STATUS_NOT_SUPPORTED) {
40256- extd_status = (compl->status >> CQE_STATUS_EXTD_SHIFT) &
40257- CQE_STATUS_EXTD_MASK;
40258- dev_warn(&adapter->pdev->dev,
40259- "Error in cmd completion: status(compl/extd)=%d/%d\n",
40260- compl_status, extd_status);
40261 }
40262+done:
40263 return compl_status;
40264 }
40265
40266@@ -82,7 +143,70 @@ static void be_async_link_state_process(struct be_adapter *adapter,
40267 struct be_async_event_link_state *evt)
40268 {
40269 be_link_status_update(adapter,
40270- evt->port_link_status == ASYNC_EVENT_LINK_UP);
40271+ ((evt->port_link_status & ~ASYNC_EVENT_LOGICAL) ==
40272+ ASYNC_EVENT_LINK_UP ? LINK_UP : LINK_DOWN));
40273+}
40274+
40275+/* Grp5 CoS Priority evt */
40276+static void be_async_grp5_cos_priority_process(struct be_adapter *adapter,
40277+ struct be_async_event_grp5_cos_priority *evt)
40278+{
40279+ if (evt->valid) {
40280+ adapter->vlan_prio_bmap = evt->available_priority_bmap;
40281+ adapter->recommended_prio &= ~VLAN_PRIO_MASK;
40282+ adapter->recommended_prio =
40283+ evt->reco_default_priority << VLAN_PRIO_SHIFT;
40284+ }
40285+}
40286+
40287+/* Grp5 QOS Speed evt */
40288+static void be_async_grp5_qos_speed_process(struct be_adapter *adapter,
40289+ struct be_async_event_grp5_qos_link_speed *evt)
40290+{
40291+ if (evt->physical_port == adapter->hba_port_num) {
40292+ /* qos_link_speed is in units of 10 Mbps */
40293+ adapter->link_speed = evt->qos_link_speed * 10;
40294+ }
40295+}
40296+
40297+/*Grp5 PVID evt*/
40298+static void be_async_grp5_pvid_state_process(struct be_adapter *adapter,
40299+ struct be_async_event_grp5_pvid_state *evt)
40300+{
40301+ if (evt->enabled)
40302+ adapter->pvid = le16_to_cpu(evt->tag) & VLAN_VID_MASK ;
40303+ else
40304+ adapter->pvid = 0;
40305+}
40306+
40307+static void be_async_grp5_evt_process(struct be_adapter *adapter,
40308+ u32 trailer, struct be_mcc_compl *evt)
40309+{
40310+ u8 event_type = 0;
40311+
40312+ event_type = (trailer >> ASYNC_TRAILER_EVENT_TYPE_SHIFT) &
40313+ ASYNC_TRAILER_EVENT_TYPE_MASK;
40314+
40315+ switch (event_type) {
40316+ case ASYNC_EVENT_COS_PRIORITY:
40317+ be_async_grp5_cos_priority_process(adapter,
40318+ (struct be_async_event_grp5_cos_priority *)evt);
40319+ break;
40320+ case ASYNC_EVENT_QOS_SPEED:
40321+ be_async_grp5_qos_speed_process(adapter,
40322+ (struct be_async_event_grp5_qos_link_speed *)evt);
40323+ break;
40324+ case ASYNC_EVENT_PVID_STATE:
40325+ be_async_grp5_pvid_state_process(adapter,
40326+ (struct be_async_event_grp5_pvid_state *)evt);
40327+ break;
40328+ case GRP5_TYPE_PRIO_TC_MAP:
40329+ memcpy(adapter->prio_tc_map, evt, MAX_TX_QS);
40330+ break;
40331+ default:
40332+ printk(KERN_WARNING "Unknown grp5 event!\n");
40333+ break;
40334+ }
40335 }
40336
40337 static inline bool is_link_state_evt(u32 trailer)
40338@@ -92,6 +216,13 @@ static inline bool is_link_state_evt(u32 trailer)
40339 ASYNC_EVENT_CODE_LINK_STATE);
40340 }
40341
40342+static inline bool is_grp5_evt(u32 trailer)
40343+{
40344+ return (((trailer >> ASYNC_TRAILER_EVENT_CODE_SHIFT) &
40345+ ASYNC_TRAILER_EVENT_CODE_MASK) ==
40346+ ASYNC_EVENT_CODE_GRP_5);
40347+}
40348+
40349 static struct be_mcc_compl *be_mcc_compl_get(struct be_adapter *adapter)
40350 {
40351 struct be_queue_info *mcc_cq = &adapter->mcc_obj.cq;
40352@@ -104,46 +235,67 @@ static struct be_mcc_compl *be_mcc_compl_get(struct be_adapter *adapter)
40353 return NULL;
40354 }
40355
40356-int be_process_mcc(struct be_adapter *adapter)
40357+void be_async_mcc_enable(struct be_adapter *adapter)
40358+{
40359+ spin_lock_bh(&adapter->mcc_cq_lock);
40360+
40361+ be_cq_notify(adapter, adapter->mcc_obj.cq.id, true, 0);
40362+ adapter->mcc_obj.rearm_cq = true;
40363+
40364+ spin_unlock_bh(&adapter->mcc_cq_lock);
40365+}
40366+
40367+void be_async_mcc_disable(struct be_adapter *adapter)
40368+{
40369+ adapter->mcc_obj.rearm_cq = false;
40370+}
40371+
40372+int be_process_mcc(struct be_adapter *adapter, int *status)
40373 {
40374 struct be_mcc_compl *compl;
40375- int num = 0, status = 0;
40376+ int num = 0;
40377+ struct be_mcc_obj *mcc_obj = &adapter->mcc_obj;
40378
40379 spin_lock_bh(&adapter->mcc_cq_lock);
40380 while ((compl = be_mcc_compl_get(adapter))) {
40381 if (compl->flags & CQE_FLAGS_ASYNC_MASK) {
40382 /* Interpret flags as an async trailer */
40383- BUG_ON(!is_link_state_evt(compl->flags));
40384-
40385- /* Interpret compl as a async link evt */
40386- be_async_link_state_process(adapter,
40387+ if (is_link_state_evt(compl->flags))
40388+ be_async_link_state_process(adapter,
40389 (struct be_async_event_link_state *) compl);
40390+ else if (is_grp5_evt(compl->flags))
40391+ be_async_grp5_evt_process(adapter,
40392+ compl->flags, compl);
40393+
40394 } else if (compl->flags & CQE_FLAGS_COMPLETED_MASK) {
40395- status = be_mcc_compl_process(adapter, compl);
40396- atomic_dec(&adapter->mcc_obj.q.used);
40397+ *status = be_mcc_compl_process(adapter, compl);
40398+ atomic_dec(&mcc_obj->q.used);
40399 }
40400 be_mcc_compl_use(compl);
40401 num++;
40402 }
40403
40404- if (num)
40405- be_cq_notify(adapter, adapter->mcc_obj.cq.id, true, num);
40406-
40407 spin_unlock_bh(&adapter->mcc_cq_lock);
40408- return status;
40409+ return num;
40410 }
40411
40412 /* Wait till no more pending mcc requests are present */
40413 static int be_mcc_wait_compl(struct be_adapter *adapter)
40414 {
40415 #define mcc_timeout 120000 /* 12s timeout */
40416- int i, status;
40417+ int i, num, status = 0;
40418+ struct be_mcc_obj *mcc_obj = &adapter->mcc_obj;
40419+
40420+ if (adapter->eeh_err)
40421+ return -EIO;
40422+
40423 for (i = 0; i < mcc_timeout; i++) {
40424- status = be_process_mcc(adapter);
40425- if (status)
40426- return status;
40427+ num = be_process_mcc(adapter, &status);
40428+ if (num)
40429+ be_cq_notify(adapter, mcc_obj->cq.id,
40430+ mcc_obj->rearm_cq, num);
40431
40432- if (atomic_read(&adapter->mcc_obj.q.used) == 0)
40433+ if (atomic_read(&mcc_obj->q.used) == 0)
40434 break;
40435 udelay(100);
40436 }
40437@@ -151,7 +303,7 @@ static int be_mcc_wait_compl(struct be_adapter *adapter)
40438 dev_err(&adapter->pdev->dev, "mccq poll timed out\n");
40439 return -1;
40440 }
40441- return 0;
40442+ return status;
40443 }
40444
40445 /* Notify MCC requests and wait for completion */
40446@@ -163,23 +315,34 @@ static int be_mcc_notify_wait(struct be_adapter *adapter)
40447
40448 static int be_mbox_db_ready_wait(struct be_adapter *adapter, void __iomem *db)
40449 {
40450- int cnt = 0, wait = 5;
40451+ int msecs = 0;
40452 u32 ready;
40453
40454+ if (adapter->eeh_err) {
40455+ dev_err(&adapter->pdev->dev, "Error detected in card.Cannot issue commands\n");
40456+ return -EIO;
40457+ }
40458 do {
40459- ready = ioread32(db) & MPU_MAILBOX_DB_RDY_MASK;
40460+ ready = ioread32(db);
40461+ if (ready == 0xffffffff) {
40462+ dev_err(&adapter->pdev->dev,
40463+ "pci slot disconnected\n");
40464+ return -1;
40465+ }
40466+
40467+ ready &= MPU_MAILBOX_DB_RDY_MASK;
40468 if (ready)
40469 break;
40470
40471- if (cnt > 4000000) {
40472+ if (msecs > 4000) {
40473 dev_err(&adapter->pdev->dev, "mbox poll timed out\n");
40474+ be_detect_dump_ue(adapter);
40475 return -1;
40476 }
40477
40478- if (cnt > 50)
40479- wait = 200;
40480- cnt += wait;
40481- udelay(wait);
40482+ set_current_state(TASK_UNINTERRUPTIBLE);
40483+ schedule_timeout(msecs_to_jiffies(1));
40484+ msecs++;
40485 } while (true);
40486
40487 return 0;
40488@@ -198,6 +361,11 @@ static int be_mbox_notify_wait(struct be_adapter *adapter)
40489 struct be_mcc_mailbox *mbox = mbox_mem->va;
40490 struct be_mcc_compl *compl = &mbox->compl;
40491
40492+ /* wait for ready to be set */
40493+ status = be_mbox_db_ready_wait(adapter, db);
40494+ if (status != 0)
40495+ return status;
40496+
40497 val |= MPU_MAILBOX_DB_HI_MASK;
40498 /* at bits 2 - 31 place mbox dma addr msb bits 34 - 63 */
40499 val |= (upper_32_bits(mbox_mem->dma) >> 2) << 2;
40500@@ -232,7 +400,12 @@ static int be_mbox_notify_wait(struct be_adapter *adapter)
40501
40502 static int be_POST_stage_get(struct be_adapter *adapter, u16 *stage)
40503 {
40504- u32 sem = ioread32(adapter->csr + MPU_EP_SEMAPHORE_OFFSET);
40505+ u32 sem;
40506+
40507+ if (lancer_chip(adapter))
40508+ sem = ioread32(adapter->db + MPU_EP_SEMAPHORE_IF_TYPE2_OFFSET);
40509+ else
40510+ sem = ioread32(adapter->csr + MPU_EP_SEMAPHORE_OFFSET);
40511
40512 *stage = sem & EP_SEMAPHORE_POST_STAGE_MASK;
40513 if ((sem >> EP_SEMAPHORE_POST_ERR_SHIFT) & EP_SEMAPHORE_POST_ERR_MASK)
40514@@ -245,30 +418,29 @@ int be_cmd_POST(struct be_adapter *adapter)
40515 {
40516 u16 stage;
40517 int status, timeout = 0;
40518+ struct device *dev = &adapter->pdev->dev;
40519
40520 do {
40521 status = be_POST_stage_get(adapter, &stage);
40522 if (status) {
40523- dev_err(&adapter->pdev->dev, "POST error; stage=0x%x\n",
40524- stage);
40525+ dev_err(dev, "POST error; stage=0x%x\n", stage);
40526 return -1;
40527 } else if (stage != POST_STAGE_ARMFW_RDY) {
40528 set_current_state(TASK_INTERRUPTIBLE);
40529- schedule_timeout(2 * HZ);
40530+ if (schedule_timeout(2 * HZ)) {
40531+ dev_err(dev, "POST cmd aborted\n");
40532+ return -EINTR;
40533+ }
40534 timeout += 2;
40535 } else {
40536 return 0;
40537 }
40538- } while (timeout < 20);
40539+ } while (timeout < 40);
40540
40541- dev_err(&adapter->pdev->dev, "POST timeout; stage=0x%x\n", stage);
40542+ dev_err(dev, "POST timeout; stage=0x%x\n", stage);
40543 return -1;
40544 }
40545
40546-static inline void *embedded_payload(struct be_mcc_wrb *wrb)
40547-{
40548- return wrb->payload.embedded_payload;
40549-}
40550
40551 static inline struct be_sge *nonembedded_sgl(struct be_mcc_wrb *wrb)
40552 {
40553@@ -277,7 +449,7 @@ static inline struct be_sge *nonembedded_sgl(struct be_mcc_wrb *wrb)
40554
40555 /* Don't touch the hdr after it's prepared */
40556 static void be_wrb_hdr_prepare(struct be_mcc_wrb *wrb, int payload_len,
40557- bool embedded, u8 sge_cnt)
40558+ bool embedded, u8 sge_cnt, u32 opcode)
40559 {
40560 if (embedded)
40561 wrb->embedded |= MCC_WRB_EMBEDDED_MASK;
40562@@ -285,7 +457,8 @@ static void be_wrb_hdr_prepare(struct be_mcc_wrb *wrb, int payload_len,
40563 wrb->embedded |= (sge_cnt & MCC_WRB_SGE_CNT_MASK) <<
40564 MCC_WRB_SGE_CNT_SHIFT;
40565 wrb->payload_length = payload_len;
40566- be_dws_cpu_to_le(wrb, 20);
40567+ wrb->tag0 = opcode;
40568+ be_dws_cpu_to_le(wrb, 8);
40569 }
40570
40571 /* Don't touch the hdr after it's prepared */
40572@@ -295,6 +468,7 @@ static void be_cmd_hdr_prepare(struct be_cmd_req_hdr *req_hdr,
40573 req_hdr->opcode = opcode;
40574 req_hdr->subsystem = subsystem;
40575 req_hdr->request_length = cpu_to_le32(cmd_len - sizeof(*req_hdr));
40576+ req_hdr->version = 0;
40577 }
40578
40579 static void be_cmd_page_addrs_prepare(struct phys_addr *pages, u32 max_pages,
40580@@ -349,7 +523,11 @@ static struct be_mcc_wrb *wrb_from_mccq(struct be_adapter *adapter)
40581 struct be_queue_info *mccq = &adapter->mcc_obj.q;
40582 struct be_mcc_wrb *wrb;
40583
40584- BUG_ON(atomic_read(&mccq->used) >= mccq->len);
40585+ if (atomic_read(&mccq->used) >= mccq->len) {
40586+ dev_err(&adapter->pdev->dev, "Out of MCCQ wrbs\n");
40587+ return NULL;
40588+ }
40589+
40590 wrb = queue_head_node(mccq);
40591 queue_head_inc(mccq);
40592 atomic_inc(&mccq->used);
40593@@ -357,6 +535,59 @@ static struct be_mcc_wrb *wrb_from_mccq(struct be_adapter *adapter)
40594 return wrb;
40595 }
40596
40597+/* Tell fw we're about to start firing cmds by writing a
40598+ * special pattern across the wrb hdr; uses mbox
40599+ */
40600+int be_cmd_fw_init(struct be_adapter *adapter)
40601+{
40602+ u8 *wrb;
40603+ int status;
40604+
40605+ if (mutex_lock_interruptible(&adapter->mbox_lock))
40606+ return -1;
40607+
40608+ wrb = (u8 *)wrb_from_mbox(adapter);
40609+ *wrb++ = 0xFF;
40610+ *wrb++ = 0x12;
40611+ *wrb++ = 0x34;
40612+ *wrb++ = 0xFF;
40613+ *wrb++ = 0xFF;
40614+ *wrb++ = 0x56;
40615+ *wrb++ = 0x78;
40616+ *wrb = 0xFF;
40617+
40618+ status = be_mbox_notify_wait(adapter);
40619+
40620+ mutex_unlock(&adapter->mbox_lock);
40621+ return status;
40622+}
40623+
40624+/* Tell fw we're done with firing cmds by writing a
40625+ * special pattern across the wrb hdr; uses mbox
40626+ */
40627+int be_cmd_fw_clean(struct be_adapter *adapter)
40628+{
40629+ u8 *wrb;
40630+ int status;
40631+
40632+ if (mutex_lock_interruptible(&adapter->mbox_lock))
40633+ return -1;
40634+
40635+ wrb = (u8 *)wrb_from_mbox(adapter);
40636+ *wrb++ = 0xFF;
40637+ *wrb++ = 0xAA;
40638+ *wrb++ = 0xBB;
40639+ *wrb++ = 0xFF;
40640+ *wrb++ = 0xFF;
40641+ *wrb++ = 0xCC;
40642+ *wrb++ = 0xDD;
40643+ *wrb = 0xFF;
40644+
40645+ status = be_mbox_notify_wait(adapter);
40646+
40647+ mutex_unlock(&adapter->mbox_lock);
40648+ return status;
40649+}
40650 int be_cmd_eq_create(struct be_adapter *adapter,
40651 struct be_queue_info *eq, int eq_delay)
40652 {
40653@@ -365,20 +596,19 @@ int be_cmd_eq_create(struct be_adapter *adapter,
40654 struct be_dma_mem *q_mem = &eq->dma_mem;
40655 int status;
40656
40657- spin_lock(&adapter->mbox_lock);
40658+ if (mutex_lock_interruptible(&adapter->mbox_lock))
40659+ return -1;
40660
40661 wrb = wrb_from_mbox(adapter);
40662 req = embedded_payload(wrb);
40663
40664- be_wrb_hdr_prepare(wrb, sizeof(*req), true, 0);
40665+ be_wrb_hdr_prepare(wrb, sizeof(*req), true, 0, OPCODE_COMMON_EQ_CREATE);
40666
40667 be_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_COMMON,
40668 OPCODE_COMMON_EQ_CREATE, sizeof(*req));
40669
40670 req->num_pages = cpu_to_le16(PAGES_4K_SPANNED(q_mem->va, q_mem->size));
40671
40672- AMAP_SET_BITS(struct amap_eq_context, func, req->context,
40673- be_pci_func(adapter));
40674 AMAP_SET_BITS(struct amap_eq_context, valid, req->context, 1);
40675 /* 4byte eqe*/
40676 AMAP_SET_BITS(struct amap_eq_context, size, req->context, 0);
40677@@ -397,7 +627,7 @@ int be_cmd_eq_create(struct be_adapter *adapter,
40678 eq->created = true;
40679 }
40680
40681- spin_unlock(&adapter->mbox_lock);
40682+ mutex_unlock(&adapter->mbox_lock);
40683 return status;
40684 }
40685
40686@@ -409,12 +639,14 @@ int be_cmd_mac_addr_query(struct be_adapter *adapter, u8 *mac_addr,
40687 struct be_cmd_req_mac_query *req;
40688 int status;
40689
40690- spin_lock(&adapter->mbox_lock);
40691+ if (mutex_lock_interruptible(&adapter->mbox_lock))
40692+ return -1;
40693
40694 wrb = wrb_from_mbox(adapter);
40695 req = embedded_payload(wrb);
40696
40697- be_wrb_hdr_prepare(wrb, sizeof(*req), true, 0);
40698+ be_wrb_hdr_prepare(wrb, sizeof(*req), true, 0,
40699+ OPCODE_COMMON_NTWK_MAC_QUERY);
40700
40701 be_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_COMMON,
40702 OPCODE_COMMON_NTWK_MAC_QUERY, sizeof(*req));
40703@@ -433,13 +665,13 @@ int be_cmd_mac_addr_query(struct be_adapter *adapter, u8 *mac_addr,
40704 memcpy(mac_addr, resp->mac.addr, ETH_ALEN);
40705 }
40706
40707- spin_unlock(&adapter->mbox_lock);
40708+ mutex_unlock(&adapter->mbox_lock);
40709 return status;
40710 }
40711
40712 /* Uses synchronous MCCQ */
40713 int be_cmd_pmac_add(struct be_adapter *adapter, u8 *mac_addr,
40714- u32 if_id, u32 *pmac_id)
40715+ u32 if_id, u32 *pmac_id, u32 domain)
40716 {
40717 struct be_mcc_wrb *wrb;
40718 struct be_cmd_req_pmac_add *req;
40719@@ -448,13 +680,19 @@ int be_cmd_pmac_add(struct be_adapter *adapter, u8 *mac_addr,
40720 spin_lock_bh(&adapter->mcc_lock);
40721
40722 wrb = wrb_from_mccq(adapter);
40723+ if (!wrb) {
40724+ status = -EBUSY;
40725+ goto err;
40726+ }
40727 req = embedded_payload(wrb);
40728
40729- be_wrb_hdr_prepare(wrb, sizeof(*req), true, 0);
40730+ be_wrb_hdr_prepare(wrb, sizeof(*req), true, 0,
40731+ OPCODE_COMMON_NTWK_PMAC_ADD);
40732
40733 be_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_COMMON,
40734 OPCODE_COMMON_NTWK_PMAC_ADD, sizeof(*req));
40735
40736+ req->hdr.domain = domain;
40737 req->if_id = cpu_to_le32(if_id);
40738 memcpy(req->mac_address, mac_addr, ETH_ALEN);
40739
40740@@ -464,12 +702,13 @@ int be_cmd_pmac_add(struct be_adapter *adapter, u8 *mac_addr,
40741 *pmac_id = le32_to_cpu(resp->pmac_id);
40742 }
40743
40744+err:
40745 spin_unlock_bh(&adapter->mcc_lock);
40746 return status;
40747 }
40748
40749 /* Uses synchronous MCCQ */
40750-int be_cmd_pmac_del(struct be_adapter *adapter, u32 if_id, u32 pmac_id)
40751+int be_cmd_pmac_del(struct be_adapter *adapter, u32 if_id, u32 pmac_id, u32 dom)
40752 {
40753 struct be_mcc_wrb *wrb;
40754 struct be_cmd_req_pmac_del *req;
40755@@ -478,20 +717,26 @@ int be_cmd_pmac_del(struct be_adapter *adapter, u32 if_id, u32 pmac_id)
40756 spin_lock_bh(&adapter->mcc_lock);
40757
40758 wrb = wrb_from_mccq(adapter);
40759+ if (!wrb) {
40760+ status = -EBUSY;
40761+ goto err;
40762+ }
40763 req = embedded_payload(wrb);
40764
40765- be_wrb_hdr_prepare(wrb, sizeof(*req), true, 0);
40766+ be_wrb_hdr_prepare(wrb, sizeof(*req), true, 0,
40767+ OPCODE_COMMON_NTWK_PMAC_DEL);
40768
40769 be_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_COMMON,
40770 OPCODE_COMMON_NTWK_PMAC_DEL, sizeof(*req));
40771
40772+ req->hdr.domain = dom;
40773 req->if_id = cpu_to_le32(if_id);
40774 req->pmac_id = cpu_to_le32(pmac_id);
40775
40776 status = be_mcc_notify_wait(adapter);
40777
40778+err:
40779 spin_unlock_bh(&adapter->mcc_lock);
40780-
40781 return status;
40782 }
40783
40784@@ -506,29 +751,51 @@ int be_cmd_cq_create(struct be_adapter *adapter,
40785 void *ctxt;
40786 int status;
40787
40788- spin_lock(&adapter->mbox_lock);
40789+ if (mutex_lock_interruptible(&adapter->mbox_lock))
40790+ return -1;
40791
40792 wrb = wrb_from_mbox(adapter);
40793 req = embedded_payload(wrb);
40794 ctxt = &req->context;
40795
40796- be_wrb_hdr_prepare(wrb, sizeof(*req), true, 0);
40797+ be_wrb_hdr_prepare(wrb, sizeof(*req), true, 0,
40798+ OPCODE_COMMON_CQ_CREATE);
40799
40800 be_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_COMMON,
40801 OPCODE_COMMON_CQ_CREATE, sizeof(*req));
40802
40803 req->num_pages = cpu_to_le16(PAGES_4K_SPANNED(q_mem->va, q_mem->size));
40804
40805- AMAP_SET_BITS(struct amap_cq_context, coalescwm, ctxt, coalesce_wm);
40806- AMAP_SET_BITS(struct amap_cq_context, nodelay, ctxt, no_delay);
40807- AMAP_SET_BITS(struct amap_cq_context, count, ctxt,
40808- __ilog2_u32(cq->len/256));
40809- AMAP_SET_BITS(struct amap_cq_context, valid, ctxt, 1);
40810- AMAP_SET_BITS(struct amap_cq_context, solevent, ctxt, sol_evts);
40811- AMAP_SET_BITS(struct amap_cq_context, eventable, ctxt, 1);
40812- AMAP_SET_BITS(struct amap_cq_context, eqid, ctxt, eq->id);
40813- AMAP_SET_BITS(struct amap_cq_context, armed, ctxt, 1);
40814- AMAP_SET_BITS(struct amap_cq_context, func, ctxt, be_pci_func(adapter));
40815+ if (lancer_chip(adapter)) {
40816+ req->hdr.version = 2;
40817+ req->page_size = 1; /* 1 for 4K */
40818+ AMAP_SET_BITS(struct amap_cq_context_lancer, coalescwm, ctxt,
40819+ coalesce_wm);
40820+ AMAP_SET_BITS(struct amap_cq_context_lancer, nodelay, ctxt,
40821+ no_delay);
40822+ AMAP_SET_BITS(struct amap_cq_context_lancer, count, ctxt,
40823+ __ilog2_u32(cq->len/256));
40824+ AMAP_SET_BITS(struct amap_cq_context_lancer, valid, ctxt, 1);
40825+ AMAP_SET_BITS(struct amap_cq_context_lancer, eventable,
40826+ ctxt, 1);
40827+ AMAP_SET_BITS(struct amap_cq_context_lancer, eqid,
40828+ ctxt, eq->id);
40829+ AMAP_SET_BITS(struct amap_cq_context_lancer, armed, ctxt, 1);
40830+ } else {
40831+ AMAP_SET_BITS(struct amap_cq_context_be, coalescwm, ctxt,
40832+ coalesce_wm);
40833+ AMAP_SET_BITS(struct amap_cq_context_be, nodelay,
40834+ ctxt, no_delay);
40835+ AMAP_SET_BITS(struct amap_cq_context_be, count, ctxt,
40836+ __ilog2_u32(cq->len/256));
40837+ AMAP_SET_BITS(struct amap_cq_context_be, valid, ctxt, 1);
40838+ AMAP_SET_BITS(struct amap_cq_context_be, solevent,
40839+ ctxt, sol_evts);
40840+ AMAP_SET_BITS(struct amap_cq_context_be, eventable, ctxt, 1);
40841+ AMAP_SET_BITS(struct amap_cq_context_be, eqid, ctxt, eq->id);
40842+ AMAP_SET_BITS(struct amap_cq_context_be, armed, ctxt, 1);
40843+ }
40844+
40845 be_dws_cpu_to_le(ctxt, sizeof(req->context));
40846
40847 be_cmd_page_addrs_prepare(req->pages, ARRAY_SIZE(req->pages), q_mem);
40848@@ -540,8 +807,7 @@ int be_cmd_cq_create(struct be_adapter *adapter,
40849 cq->created = true;
40850 }
40851
40852- spin_unlock(&adapter->mbox_lock);
40853-
40854+ mutex_unlock(&adapter->mbox_lock);
40855 return status;
40856 }
40857
40858@@ -553,7 +819,68 @@ static u32 be_encoded_q_len(int q_len)
40859 return len_encoded;
40860 }
40861
40862-int be_cmd_mccq_create(struct be_adapter *adapter,
40863+int be_cmd_mccq_ext_create(struct be_adapter *adapter,
40864+ struct be_queue_info *mccq,
40865+ struct be_queue_info *cq)
40866+{
40867+ struct be_mcc_wrb *wrb;
40868+ struct be_cmd_req_mcc_ext_create *req;
40869+ struct be_dma_mem *q_mem = &mccq->dma_mem;
40870+ void *ctxt;
40871+ int status;
40872+
40873+ if (mutex_lock_interruptible(&adapter->mbox_lock))
40874+ return -1;
40875+
40876+ wrb = wrb_from_mbox(adapter);
40877+ req = embedded_payload(wrb);
40878+ ctxt = &req->context;
40879+
40880+ be_wrb_hdr_prepare(wrb, sizeof(*req), true, 0,
40881+ OPCODE_COMMON_MCC_CREATE_EXT);
40882+
40883+ be_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_COMMON,
40884+ OPCODE_COMMON_MCC_CREATE_EXT, sizeof(*req));
40885+
40886+ req->num_pages = cpu_to_le16(PAGES_4K_SPANNED(q_mem->va, q_mem->size));
40887+ if (lancer_chip(adapter)) {
40888+ req->hdr.version = 1;
40889+ req->cq_id = cpu_to_le16(cq->id);
40890+
40891+ AMAP_SET_BITS(struct amap_mcc_context_lancer, ring_size, ctxt,
40892+ be_encoded_q_len(mccq->len));
40893+ AMAP_SET_BITS(struct amap_mcc_context_lancer, valid, ctxt, 1);
40894+ AMAP_SET_BITS(struct amap_mcc_context_lancer, async_cq_id,
40895+ ctxt, cq->id);
40896+ AMAP_SET_BITS(struct amap_mcc_context_lancer, async_cq_valid,
40897+ ctxt, 1);
40898+
40899+ } else {
40900+ AMAP_SET_BITS(struct amap_mcc_context_be, valid, ctxt, 1);
40901+ AMAP_SET_BITS(struct amap_mcc_context_be, ring_size, ctxt,
40902+ be_encoded_q_len(mccq->len));
40903+ AMAP_SET_BITS(struct amap_mcc_context_be, cq_id, ctxt, cq->id);
40904+ }
40905+
40906+ /* Subscribe to Link State and Group 5 Events(bits 1 and 5 set) */
40907+ req->async_event_bitmap[0] |= cpu_to_le32(0x00000022);
40908+
40909+ be_dws_cpu_to_le(ctxt, sizeof(req->context));
40910+
40911+ be_cmd_page_addrs_prepare(req->pages, ARRAY_SIZE(req->pages), q_mem);
40912+
40913+ status = be_mbox_notify_wait(adapter);
40914+ if (!status) {
40915+ struct be_cmd_resp_mcc_create *resp = embedded_payload(wrb);
40916+ mccq->id = le16_to_cpu(resp->id);
40917+ mccq->created = true;
40918+ }
40919+
40920+ mutex_unlock(&adapter->mbox_lock);
40921+ return status;
40922+}
40923+
40924+int be_cmd_mccq_org_create(struct be_adapter *adapter,
40925 struct be_queue_info *mccq,
40926 struct be_queue_info *cq)
40927 {
40928@@ -563,24 +890,25 @@ int be_cmd_mccq_create(struct be_adapter *adapter,
40929 void *ctxt;
40930 int status;
40931
40932- spin_lock(&adapter->mbox_lock);
40933+ if (mutex_lock_interruptible(&adapter->mbox_lock))
40934+ return -1;
40935
40936 wrb = wrb_from_mbox(adapter);
40937 req = embedded_payload(wrb);
40938 ctxt = &req->context;
40939
40940- be_wrb_hdr_prepare(wrb, sizeof(*req), true, 0);
40941+ be_wrb_hdr_prepare(wrb, sizeof(*req), true, 0,
40942+ OPCODE_COMMON_MCC_CREATE);
40943
40944 be_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_COMMON,
40945 OPCODE_COMMON_MCC_CREATE, sizeof(*req));
40946
40947- req->num_pages = PAGES_4K_SPANNED(q_mem->va, q_mem->size);
40948+ req->num_pages = cpu_to_le16(PAGES_4K_SPANNED(q_mem->va, q_mem->size));
40949
40950- AMAP_SET_BITS(struct amap_mcc_context, fid, ctxt, be_pci_func(adapter));
40951- AMAP_SET_BITS(struct amap_mcc_context, valid, ctxt, 1);
40952- AMAP_SET_BITS(struct amap_mcc_context, ring_size, ctxt,
40953- be_encoded_q_len(mccq->len));
40954- AMAP_SET_BITS(struct amap_mcc_context, cq_id, ctxt, cq->id);
40955+ AMAP_SET_BITS(struct amap_mcc_context_be, valid, ctxt, 1);
40956+ AMAP_SET_BITS(struct amap_mcc_context_be, ring_size, ctxt,
40957+ be_encoded_q_len(mccq->len));
40958+ AMAP_SET_BITS(struct amap_mcc_context_be, cq_id, ctxt, cq->id);
40959
40960 be_dws_cpu_to_le(ctxt, sizeof(req->context));
40961
40962@@ -592,75 +920,93 @@ int be_cmd_mccq_create(struct be_adapter *adapter,
40963 mccq->id = le16_to_cpu(resp->id);
40964 mccq->created = true;
40965 }
40966- spin_unlock(&adapter->mbox_lock);
40967
40968+ mutex_unlock(&adapter->mbox_lock);
40969 return status;
40970 }
40971
40972-int be_cmd_txq_create(struct be_adapter *adapter,
40973- struct be_queue_info *txq,
40974+int be_cmd_mccq_create(struct be_adapter *adapter,
40975+ struct be_queue_info *mccq,
40976 struct be_queue_info *cq)
40977 {
40978+ int status;
40979+
40980+ status = be_cmd_mccq_ext_create(adapter, mccq, cq);
40981+ if (status && !lancer_chip(adapter)) {
40982+ dev_warn(&adapter->pdev->dev, "Upgrade to F/W ver 2.102.235.0 "
40983+ "or newer to avoid conflicting priorities between NIC "
40984+ "and FCoE traffic");
40985+ status = be_cmd_mccq_org_create(adapter, mccq, cq);
40986+ }
40987+ return status;
40988+}
40989+
40990+int be_cmd_txq_create(struct be_adapter *adapter, struct be_queue_info *txq,
40991+ struct be_queue_info *cq, u8 *tc_id)
40992+{
40993 struct be_mcc_wrb *wrb;
40994 struct be_cmd_req_eth_tx_create *req;
40995 struct be_dma_mem *q_mem = &txq->dma_mem;
40996- void *ctxt;
40997 int status;
40998
40999- spin_lock(&adapter->mbox_lock);
41000+ if (mutex_lock_interruptible(&adapter->mbox_lock))
41001+ return -1;
41002
41003 wrb = wrb_from_mbox(adapter);
41004 req = embedded_payload(wrb);
41005- ctxt = &req->context;
41006-
41007- be_wrb_hdr_prepare(wrb, sizeof(*req), true, 0);
41008
41009+ be_wrb_hdr_prepare(wrb, sizeof(*req), true, 0, OPCODE_ETH_TX_CREATE);
41010 be_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_ETH, OPCODE_ETH_TX_CREATE,
41011 sizeof(*req));
41012
41013- req->num_pages = PAGES_4K_SPANNED(q_mem->va, q_mem->size);
41014+ if (adapter->flags & BE_FLAGS_DCBX || lancer_chip(adapter)) {
41015+ req->hdr.version = 1;
41016+ req->if_id = cpu_to_le16(adapter->if_handle);
41017+ }
41018+ if (adapter->flags & BE_FLAGS_DCBX)
41019+ req->type = cpu_to_le16(ETX_QUEUE_TYPE_PRIORITY);
41020+ else
41021+ req->type = cpu_to_le16(ETX_QUEUE_TYPE_STANDARD);
41022 req->ulp_num = BE_ULP1_NUM;
41023- req->type = BE_ETH_TX_RING_TYPE_STANDARD;
41024-
41025- AMAP_SET_BITS(struct amap_tx_context, tx_ring_size, ctxt,
41026- be_encoded_q_len(txq->len));
41027- AMAP_SET_BITS(struct amap_tx_context, pci_func_id, ctxt,
41028- be_pci_func(adapter));
41029- AMAP_SET_BITS(struct amap_tx_context, ctx_valid, ctxt, 1);
41030- AMAP_SET_BITS(struct amap_tx_context, cq_id_send, ctxt, cq->id);
41031-
41032- be_dws_cpu_to_le(ctxt, sizeof(req->context));
41033-
41034+ req->cq_id = cpu_to_le16(cq->id);
41035+ req->queue_size = be_encoded_q_len(txq->len);
41036+ req->num_pages = PAGES_4K_SPANNED(q_mem->va, q_mem->size);
41037 be_cmd_page_addrs_prepare(req->pages, ARRAY_SIZE(req->pages), q_mem);
41038
41039 status = be_mbox_notify_wait(adapter);
41040 if (!status) {
41041 struct be_cmd_resp_eth_tx_create *resp = embedded_payload(wrb);
41042 txq->id = le16_to_cpu(resp->cid);
41043+ if (adapter->flags & BE_FLAGS_DCBX)
41044+ *tc_id = resp->tc_id;
41045 txq->created = true;
41046 }
41047
41048- spin_unlock(&adapter->mbox_lock);
41049-
41050+ mutex_unlock(&adapter->mbox_lock);
41051 return status;
41052 }
41053
41054-/* Uses mbox */
41055+/* Uses MCC */
41056 int be_cmd_rxq_create(struct be_adapter *adapter,
41057 struct be_queue_info *rxq, u16 cq_id, u16 frag_size,
41058- u16 max_frame_size, u32 if_id, u32 rss)
41059+ u16 max_frame_size, u32 if_id, u32 rss, u8 *rss_id)
41060 {
41061 struct be_mcc_wrb *wrb;
41062 struct be_cmd_req_eth_rx_create *req;
41063 struct be_dma_mem *q_mem = &rxq->dma_mem;
41064 int status;
41065
41066- spin_lock(&adapter->mbox_lock);
41067+ spin_lock_bh(&adapter->mcc_lock);
41068
41069- wrb = wrb_from_mbox(adapter);
41070+ wrb = wrb_from_mccq(adapter);
41071+ if (!wrb) {
41072+ status = -EBUSY;
41073+ goto err;
41074+ }
41075 req = embedded_payload(wrb);
41076
41077- be_wrb_hdr_prepare(wrb, sizeof(*req), true, 0);
41078+ be_wrb_hdr_prepare(wrb, sizeof(*req), true, 0,
41079+ OPCODE_ETH_RX_CREATE);
41080
41081 be_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_ETH, OPCODE_ETH_RX_CREATE,
41082 sizeof(*req));
41083@@ -673,15 +1019,16 @@ int be_cmd_rxq_create(struct be_adapter *adapter,
41084 req->max_frame_size = cpu_to_le16(max_frame_size);
41085 req->rss_queue = cpu_to_le32(rss);
41086
41087- status = be_mbox_notify_wait(adapter);
41088+ status = be_mcc_notify_wait(adapter);
41089 if (!status) {
41090 struct be_cmd_resp_eth_rx_create *resp = embedded_payload(wrb);
41091 rxq->id = le16_to_cpu(resp->id);
41092 rxq->created = true;
41093+ *rss_id = resp->rss_id;
41094 }
41095
41096- spin_unlock(&adapter->mbox_lock);
41097-
41098+err:
41099+ spin_unlock_bh(&adapter->mcc_lock);
41100 return status;
41101 }
41102
41103@@ -696,13 +1043,12 @@ int be_cmd_q_destroy(struct be_adapter *adapter, struct be_queue_info *q,
41104 u8 subsys = 0, opcode = 0;
41105 int status;
41106
41107- spin_lock(&adapter->mbox_lock);
41108+ if (mutex_lock_interruptible(&adapter->mbox_lock))
41109+ return -1;
41110
41111 wrb = wrb_from_mbox(adapter);
41112 req = embedded_payload(wrb);
41113
41114- be_wrb_hdr_prepare(wrb, sizeof(*req), true, 0);
41115-
41116 switch (queue_type) {
41117 case QTYPE_EQ:
41118 subsys = CMD_SUBSYSTEM_COMMON;
41119@@ -727,13 +1073,47 @@ int be_cmd_q_destroy(struct be_adapter *adapter, struct be_queue_info *q,
41120 default:
41121 BUG();
41122 }
41123+
41124+ be_wrb_hdr_prepare(wrb, sizeof(*req), true, 0, opcode);
41125+
41126 be_cmd_hdr_prepare(&req->hdr, subsys, opcode, sizeof(*req));
41127 req->id = cpu_to_le16(q->id);
41128
41129 status = be_mbox_notify_wait(adapter);
41130+ if (!status)
41131+ q->created = false;
41132
41133- spin_unlock(&adapter->mbox_lock);
41134+ mutex_unlock(&adapter->mbox_lock);
41135+ return status;
41136+}
41137
41138+/* Uses MCC */
41139+int be_cmd_rxq_destroy(struct be_adapter *adapter, struct be_queue_info *q)
41140+{
41141+ struct be_mcc_wrb *wrb;
41142+ struct be_cmd_req_q_destroy *req;
41143+ int status;
41144+
41145+ spin_lock_bh(&adapter->mcc_lock);
41146+
41147+ wrb = wrb_from_mccq(adapter);
41148+ if (!wrb) {
41149+ status = -EBUSY;
41150+ goto err;
41151+ }
41152+ req = embedded_payload(wrb);
41153+
41154+ be_wrb_hdr_prepare(wrb, sizeof(*req), true, 0, OPCODE_ETH_RX_DESTROY);
41155+ be_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_ETH, OPCODE_ETH_RX_DESTROY,
41156+ sizeof(*req));
41157+ req->id = cpu_to_le16(q->id);
41158+
41159+ status = be_mcc_notify_wait(adapter);
41160+ if (!status)
41161+ q->created = false;
41162+
41163+err:
41164+ spin_unlock_bh(&adapter->mcc_lock);
41165 return status;
41166 }
41167
41168@@ -741,22 +1121,26 @@ int be_cmd_q_destroy(struct be_adapter *adapter, struct be_queue_info *q,
41169 * Uses mbox
41170 */
41171 int be_cmd_if_create(struct be_adapter *adapter, u32 cap_flags, u32 en_flags,
41172- u8 *mac, bool pmac_invalid, u32 *if_handle, u32 *pmac_id)
41173+ u8 *mac, bool pmac_invalid, u32 *if_handle, u32 *pmac_id,
41174+ u32 domain)
41175 {
41176 struct be_mcc_wrb *wrb;
41177 struct be_cmd_req_if_create *req;
41178 int status;
41179
41180- spin_lock(&adapter->mbox_lock);
41181+ if (mutex_lock_interruptible(&adapter->mbox_lock))
41182+ return -1;
41183
41184 wrb = wrb_from_mbox(adapter);
41185 req = embedded_payload(wrb);
41186
41187- be_wrb_hdr_prepare(wrb, sizeof(*req), true, 0);
41188+ be_wrb_hdr_prepare(wrb, sizeof(*req), true, 0,
41189+ OPCODE_COMMON_NTWK_INTERFACE_CREATE);
41190
41191 be_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_COMMON,
41192 OPCODE_COMMON_NTWK_INTERFACE_CREATE, sizeof(*req));
41193
41194+ req->hdr.domain = domain;
41195 req->capability_flags = cpu_to_le32(cap_flags);
41196 req->enable_flags = cpu_to_le32(en_flags);
41197 req->pmac_invalid = pmac_invalid;
41198@@ -771,33 +1155,35 @@ int be_cmd_if_create(struct be_adapter *adapter, u32 cap_flags, u32 en_flags,
41199 *pmac_id = le32_to_cpu(resp->pmac_id);
41200 }
41201
41202- spin_unlock(&adapter->mbox_lock);
41203+ mutex_unlock(&adapter->mbox_lock);
41204 return status;
41205 }
41206
41207 /* Uses mbox */
41208-int be_cmd_if_destroy(struct be_adapter *adapter, u32 interface_id)
41209+int be_cmd_if_destroy(struct be_adapter *adapter, u32 interface_id, u32 domain)
41210 {
41211 struct be_mcc_wrb *wrb;
41212 struct be_cmd_req_if_destroy *req;
41213 int status;
41214
41215- spin_lock(&adapter->mbox_lock);
41216+ if (mutex_lock_interruptible(&adapter->mbox_lock))
41217+ return -1;
41218
41219 wrb = wrb_from_mbox(adapter);
41220 req = embedded_payload(wrb);
41221
41222- be_wrb_hdr_prepare(wrb, sizeof(*req), true, 0);
41223+ be_wrb_hdr_prepare(wrb, sizeof(*req), true, 0,
41224+ OPCODE_COMMON_NTWK_INTERFACE_DESTROY);
41225
41226 be_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_COMMON,
41227 OPCODE_COMMON_NTWK_INTERFACE_DESTROY, sizeof(*req));
41228
41229+ req->hdr.domain = domain;
41230 req->interface_id = cpu_to_le32(interface_id);
41231
41232 status = be_mbox_notify_wait(adapter);
41233
41234- spin_unlock(&adapter->mbox_lock);
41235-
41236+ mutex_unlock(&adapter->mbox_lock);
41237 return status;
41238 }
41239
41240@@ -808,33 +1194,48 @@ int be_cmd_if_destroy(struct be_adapter *adapter, u32 interface_id)
41241 int be_cmd_get_stats(struct be_adapter *adapter, struct be_dma_mem *nonemb_cmd)
41242 {
41243 struct be_mcc_wrb *wrb;
41244- struct be_cmd_req_get_stats *req;
41245+ struct be_cmd_req_hdr *hdr;
41246 struct be_sge *sge;
41247+ int status = 0;
41248+
41249+ if (MODULO(adapter->work_counter, be_get_temp_freq) == 0)
41250+ be_cmd_get_die_temperature(adapter);
41251
41252 spin_lock_bh(&adapter->mcc_lock);
41253
41254 wrb = wrb_from_mccq(adapter);
41255- req = nonemb_cmd->va;
41256+ if (!wrb) {
41257+ status = -EBUSY;
41258+ goto err;
41259+ }
41260+ hdr = nonemb_cmd->va;
41261 sge = nonembedded_sgl(wrb);
41262
41263- be_wrb_hdr_prepare(wrb, sizeof(*req), false, 1);
41264- wrb->tag0 = OPCODE_ETH_GET_STATISTICS;
41265+ be_wrb_hdr_prepare(wrb, nonemb_cmd->size, false, 1,
41266+ OPCODE_ETH_GET_STATISTICS);
41267
41268- be_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_ETH,
41269- OPCODE_ETH_GET_STATISTICS, sizeof(*req));
41270+ be_cmd_hdr_prepare(hdr, CMD_SUBSYSTEM_ETH,
41271+ OPCODE_ETH_GET_STATISTICS, nonemb_cmd->size);
41272+
41273+ if (adapter->generation == BE_GEN3)
41274+ hdr->version = 1;
41275+
41276+ wrb->tag1 = CMD_SUBSYSTEM_ETH;
41277 sge->pa_hi = cpu_to_le32(upper_32_bits(nonemb_cmd->dma));
41278 sge->pa_lo = cpu_to_le32(nonemb_cmd->dma & 0xFFFFFFFF);
41279 sge->len = cpu_to_le32(nonemb_cmd->size);
41280
41281 be_mcc_notify(adapter);
41282+ adapter->stats_cmd_sent = true;
41283
41284+err:
41285 spin_unlock_bh(&adapter->mcc_lock);
41286- return 0;
41287+ return status;
41288 }
41289
41290 /* Uses synchronous mcc */
41291 int be_cmd_link_status_query(struct be_adapter *adapter,
41292- bool *link_up)
41293+ int *link_status, u8 *mac_speed, u16 *link_speed, u32 dom)
41294 {
41295 struct be_mcc_wrb *wrb;
41296 struct be_cmd_req_link_status *req;
41297@@ -843,50 +1244,216 @@ int be_cmd_link_status_query(struct be_adapter *adapter,
41298 spin_lock_bh(&adapter->mcc_lock);
41299
41300 wrb = wrb_from_mccq(adapter);
41301+ if (!wrb) {
41302+ status = -EBUSY;
41303+ goto err;
41304+ }
41305 req = embedded_payload(wrb);
41306
41307- *link_up = false;
41308+ *link_status = LINK_DOWN;
41309
41310- be_wrb_hdr_prepare(wrb, sizeof(*req), true, 0);
41311+ be_wrb_hdr_prepare(wrb, sizeof(*req), true, 0,
41312+ OPCODE_COMMON_NTWK_LINK_STATUS_QUERY);
41313
41314 be_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_COMMON,
41315 OPCODE_COMMON_NTWK_LINK_STATUS_QUERY, sizeof(*req));
41316
41317+ req->hdr.domain = dom;
41318+
41319 status = be_mcc_notify_wait(adapter);
41320 if (!status) {
41321 struct be_cmd_resp_link_status *resp = embedded_payload(wrb);
41322- if (resp->mac_speed != PHY_LINK_SPEED_ZERO)
41323- *link_up = true;
41324+ if (resp->mac_speed != PHY_LINK_SPEED_ZERO) {
41325+ *link_status = LINK_UP;
41326+ *link_speed = le16_to_cpu(resp->link_speed);
41327+ *mac_speed = resp->mac_speed;
41328+ }
41329 }
41330
41331+err:
41332 spin_unlock_bh(&adapter->mcc_lock);
41333 return status;
41334 }
41335
41336-/* Uses Mbox */
41337-int be_cmd_get_fw_ver(struct be_adapter *adapter, char *fw_ver)
41338+/* Uses synchronous mcc */
41339+int be_cmd_get_die_temperature(struct be_adapter *adapter)
41340+{
41341+ struct be_mcc_wrb *wrb;
41342+ struct be_cmd_req_get_cntl_addnl_attribs *req;
41343+ u16 mccq_index;
41344+ int status;
41345+
41346+ spin_lock_bh(&adapter->mcc_lock);
41347+
41348+ mccq_index = adapter->mcc_obj.q.head;
41349+
41350+ wrb = wrb_from_mccq(adapter);
41351+ if (!wrb) {
41352+ status = -EBUSY;
41353+ goto err;
41354+ }
41355+ req = embedded_payload(wrb);
41356+
41357+ be_wrb_hdr_prepare(wrb, sizeof(*req), true, 0,
41358+ OPCODE_COMMON_GET_CNTL_ADDITIONAL_ATTRIBUTES);
41359+
41360+ be_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_COMMON,
41361+ OPCODE_COMMON_GET_CNTL_ADDITIONAL_ATTRIBUTES, sizeof(*req));
41362+
41363+ wrb->tag1 = mccq_index;
41364+
41365+ be_mcc_notify(adapter);
41366+
41367+err:
41368+ spin_unlock_bh(&adapter->mcc_lock);
41369+ return status;
41370+}
41371+
41372+
41373+/* Uses synchronous mcc */
41374+int be_cmd_get_reg_len(struct be_adapter *adapter, u32 *log_size)
41375+{
41376+ struct be_mcc_wrb *wrb;
41377+ struct be_cmd_req_get_fat *req;
41378+ int status;
41379+
41380+ spin_lock_bh(&adapter->mcc_lock);
41381+
41382+ wrb = wrb_from_mccq(adapter);
41383+ if (!wrb) {
41384+ status = -EBUSY;
41385+ goto err;
41386+ }
41387+ req = embedded_payload(wrb);
41388+
41389+ be_wrb_hdr_prepare(wrb, sizeof(*req), true, 0,
41390+ OPCODE_COMMON_MANAGE_FAT);
41391+
41392+ be_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_COMMON,
41393+ OPCODE_COMMON_MANAGE_FAT, sizeof(*req));
41394+ req->fat_operation = cpu_to_le32(QUERY_FAT);
41395+ status = be_mcc_notify_wait(adapter);
41396+ if (!status) {
41397+ struct be_cmd_resp_get_fat *resp = embedded_payload(wrb);
41398+ if (log_size && resp->log_size)
41399+ *log_size = le32_to_cpu(resp->log_size) -
41400+ sizeof(u32);
41401+ }
41402+err:
41403+ spin_unlock_bh(&adapter->mcc_lock);
41404+ return status;
41405+}
41406+
41407+void be_cmd_get_regs(struct be_adapter *adapter, u32 buf_len, void *buf)
41408+{
41409+ struct be_dma_mem get_fat_cmd;
41410+ struct be_mcc_wrb *wrb;
41411+ struct be_cmd_req_get_fat *req;
41412+ struct be_sge *sge;
41413+ u32 offset = 0, total_size, buf_size,
41414+ log_offset = sizeof(u32), payload_len;
41415+ int status;
41416+
41417+ if (buf_len == 0)
41418+ return;
41419+
41420+ total_size = buf_len;
41421+
41422+ get_fat_cmd.size = sizeof(struct be_cmd_req_get_fat) + 60*1024;
41423+ get_fat_cmd.va = pci_alloc_consistent(adapter->pdev,
41424+ get_fat_cmd.size,
41425+ &get_fat_cmd.dma);
41426+ if (!get_fat_cmd.va) {
41427+ status = -ENOMEM;
41428+ dev_err(&adapter->pdev->dev,
41429+ "Memory allocation failure while retrieving FAT data\n");
41430+ return;
41431+ }
41432+
41433+ spin_lock_bh(&adapter->mcc_lock);
41434+
41435+ while (total_size) {
41436+ buf_size = min(total_size, (u32)60*1024);
41437+ total_size -= buf_size;
41438+
41439+ wrb = wrb_from_mccq(adapter);
41440+ if (!wrb) {
41441+ status = -EBUSY;
41442+ goto err;
41443+ }
41444+ req = get_fat_cmd.va;
41445+ sge = nonembedded_sgl(wrb);
41446+
41447+ payload_len = sizeof(struct be_cmd_req_get_fat) + buf_size;
41448+ be_wrb_hdr_prepare(wrb, payload_len, false, 1,
41449+ OPCODE_COMMON_MANAGE_FAT);
41450+
41451+ be_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_COMMON,
41452+ OPCODE_COMMON_MANAGE_FAT, payload_len);
41453+
41454+ sge->pa_hi = cpu_to_le32(upper_32_bits(get_fat_cmd.dma));
41455+ sge->pa_lo = cpu_to_le32(get_fat_cmd.dma & 0xFFFFFFFF);
41456+ sge->len = cpu_to_le32(get_fat_cmd.size);
41457+
41458+ req->fat_operation = cpu_to_le32(RETRIEVE_FAT);
41459+ req->read_log_offset = cpu_to_le32(log_offset);
41460+ req->read_log_length = cpu_to_le32(buf_size);
41461+ req->data_buffer_size = cpu_to_le32(buf_size);
41462+
41463+ status = be_mcc_notify_wait(adapter);
41464+ if (!status) {
41465+ struct be_cmd_resp_get_fat *resp = get_fat_cmd.va;
41466+ memcpy(buf + offset,
41467+ resp->data_buffer,
41468+ le32_to_cpu(resp->read_log_length));
41469+ } else {
41470+ dev_err(&adapter->pdev->dev, "FAT Table Retrieve error\n");
41471+ goto err;
41472+ }
41473+ offset += buf_size;
41474+ log_offset += buf_size;
41475+ }
41476+err:
41477+ pci_free_consistent(adapter->pdev, get_fat_cmd.size,
41478+ get_fat_cmd.va,
41479+ get_fat_cmd.dma);
41480+ spin_unlock_bh(&adapter->mcc_lock);
41481+}
41482+
41483+/* Uses synchronous mcc */
41484+int be_cmd_get_fw_ver(struct be_adapter *adapter, char *fw_ver,
41485+ char *fw_on_flash)
41486 {
41487 struct be_mcc_wrb *wrb;
41488 struct be_cmd_req_get_fw_version *req;
41489 int status;
41490
41491- spin_lock(&adapter->mbox_lock);
41492+ spin_lock_bh(&adapter->mcc_lock);
41493+
41494+ wrb = wrb_from_mccq(adapter);
41495+ if (!wrb) {
41496+ status = -EBUSY;
41497+ goto err;
41498+ }
41499
41500- wrb = wrb_from_mbox(adapter);
41501 req = embedded_payload(wrb);
41502
41503- be_wrb_hdr_prepare(wrb, sizeof(*req), true, 0);
41504+ be_wrb_hdr_prepare(wrb, sizeof(*req), true, 0,
41505+ OPCODE_COMMON_GET_FW_VERSION);
41506
41507 be_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_COMMON,
41508 OPCODE_COMMON_GET_FW_VERSION, sizeof(*req));
41509
41510- status = be_mbox_notify_wait(adapter);
41511+ status = be_mcc_notify_wait(adapter);
41512 if (!status) {
41513 struct be_cmd_resp_get_fw_version *resp = embedded_payload(wrb);
41514- strncpy(fw_ver, resp->firmware_version_string, FW_VER_LEN);
41515+ strncpy(fw_ver, resp->firmware_version_string, FW_VER_LEN-1);
41516+ if (fw_on_flash)
41517+ strncpy(fw_on_flash, resp->fw_on_flash_version_string,
41518+ FW_VER_LEN-1);
41519 }
41520-
41521- spin_unlock(&adapter->mbox_lock);
41522+err:
41523+ spin_unlock_bh(&adapter->mcc_lock);
41524 return status;
41525 }
41526
41527@@ -897,13 +1464,19 @@ int be_cmd_modify_eqd(struct be_adapter *adapter, u32 eq_id, u32 eqd)
41528 {
41529 struct be_mcc_wrb *wrb;
41530 struct be_cmd_req_modify_eq_delay *req;
41531+ int status = 0;
41532
41533 spin_lock_bh(&adapter->mcc_lock);
41534
41535 wrb = wrb_from_mccq(adapter);
41536+ if (!wrb) {
41537+ status = -EBUSY;
41538+ goto err;
41539+ }
41540 req = embedded_payload(wrb);
41541
41542- be_wrb_hdr_prepare(wrb, sizeof(*req), true, 0);
41543+ be_wrb_hdr_prepare(wrb, sizeof(*req), true, 0,
41544+ OPCODE_COMMON_MODIFY_EQ_DELAY);
41545
41546 be_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_COMMON,
41547 OPCODE_COMMON_MODIFY_EQ_DELAY, sizeof(*req));
41548@@ -915,8 +1488,9 @@ int be_cmd_modify_eqd(struct be_adapter *adapter, u32 eq_id, u32 eqd)
41549
41550 be_mcc_notify(adapter);
41551
41552+err:
41553 spin_unlock_bh(&adapter->mcc_lock);
41554- return 0;
41555+ return status;
41556 }
41557
41558 /* Uses sycnhronous mcc */
41559@@ -930,9 +1504,14 @@ int be_cmd_vlan_config(struct be_adapter *adapter, u32 if_id, u16 *vtag_array,
41560 spin_lock_bh(&adapter->mcc_lock);
41561
41562 wrb = wrb_from_mccq(adapter);
41563+ if (!wrb) {
41564+ status = -EBUSY;
41565+ goto err;
41566+ }
41567 req = embedded_payload(wrb);
41568
41569- be_wrb_hdr_prepare(wrb, sizeof(*req), true, 0);
41570+ be_wrb_hdr_prepare(wrb, sizeof(*req), true, 0,
41571+ OPCODE_COMMON_NTWK_VLAN_CONFIG);
41572
41573 be_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_COMMON,
41574 OPCODE_COMMON_NTWK_VLAN_CONFIG, sizeof(*req));
41575@@ -948,79 +1527,63 @@ int be_cmd_vlan_config(struct be_adapter *adapter, u32 if_id, u16 *vtag_array,
41576
41577 status = be_mcc_notify_wait(adapter);
41578
41579+err:
41580 spin_unlock_bh(&adapter->mcc_lock);
41581 return status;
41582 }
41583
41584-/* Uses MCC for this command as it may be called in BH context
41585- * Uses synchronous mcc
41586- */
41587-int be_cmd_promiscuous_config(struct be_adapter *adapter, u8 port_num, bool en)
41588+int be_cmd_rx_filter(struct be_adapter *adapter, u32 flags, u32 value)
41589 {
41590 struct be_mcc_wrb *wrb;
41591- struct be_cmd_req_promiscuous_config *req;
41592+ struct be_dma_mem *mem = &adapter->rx_filter;
41593+ struct be_cmd_req_rx_filter *req = mem->va;
41594+ struct be_sge *sge;
41595 int status;
41596
41597 spin_lock_bh(&adapter->mcc_lock);
41598
41599 wrb = wrb_from_mccq(adapter);
41600- req = embedded_payload(wrb);
41601-
41602- be_wrb_hdr_prepare(wrb, sizeof(*req), true, 0);
41603-
41604- be_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_ETH,
41605- OPCODE_ETH_PROMISCUOUS, sizeof(*req));
41606-
41607- if (port_num)
41608- req->port1_promiscuous = en;
41609- else
41610- req->port0_promiscuous = en;
41611-
41612- status = be_mcc_notify_wait(adapter);
41613-
41614- spin_unlock_bh(&adapter->mcc_lock);
41615- return status;
41616-}
41617-
41618-/*
41619- * Uses MCC for this command as it may be called in BH context
41620- * (mc == NULL) => multicast promiscous
41621- */
41622-int be_cmd_multicast_set(struct be_adapter *adapter, u32 if_id,
41623- struct dev_mc_list *mc_list, u32 mc_count)
41624-{
41625-#define BE_MAX_MC 32 /* set mcast promisc if > 32 */
41626- struct be_mcc_wrb *wrb;
41627- struct be_cmd_req_mcast_mac_config *req;
41628-
41629- spin_lock_bh(&adapter->mcc_lock);
41630-
41631- wrb = wrb_from_mccq(adapter);
41632- req = embedded_payload(wrb);
41633-
41634- be_wrb_hdr_prepare(wrb, sizeof(*req), true, 0);
41635-
41636+ if (!wrb) {
41637+ status = -EBUSY;
41638+ goto err;
41639+ }
41640+ sge = nonembedded_sgl(wrb);
41641+ sge->pa_hi = cpu_to_le32(upper_32_bits(mem->dma));
41642+ sge->pa_lo = cpu_to_le32(mem->dma & 0xFFFFFFFF);
41643+ sge->len = cpu_to_le32(mem->size);
41644+ be_wrb_hdr_prepare(wrb, sizeof(*req), false, 1,
41645+ OPCODE_COMMON_NTWK_RX_FILTER);
41646+
41647+ memset(req, 0, sizeof(*req));
41648 be_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_COMMON,
41649- OPCODE_COMMON_NTWK_MULTICAST_SET, sizeof(*req));
41650+ OPCODE_COMMON_NTWK_RX_FILTER, sizeof(*req));
41651
41652- req->interface_id = if_id;
41653- if (mc_list && mc_count <= BE_MAX_MC) {
41654- int i;
41655- struct dev_mc_list *mc;
41656-
41657- req->num_mac = cpu_to_le16(mc_count);
41658-
41659- for (mc = mc_list, i = 0; mc; mc = mc->next, i++)
41660- memcpy(req->mac[i].byte, mc->dmi_addr, ETH_ALEN);
41661+ req->if_id = cpu_to_le32(adapter->if_handle);
41662+ if (flags & IFF_PROMISC) {
41663+ req->if_flags_mask = cpu_to_le32(BE_IF_FLAGS_PROMISCUOUS |
41664+ BE_IF_FLAGS_VLAN_PROMISCUOUS);
41665+ if (value == ON)
41666+ req->if_flags = cpu_to_le32(BE_IF_FLAGS_PROMISCUOUS |
41667+ BE_IF_FLAGS_VLAN_PROMISCUOUS);
41668+ } else if (flags & IFF_ALLMULTI) {
41669+ req->if_flags_mask = req->if_flags =
41670+ cpu_to_le32(BE_IF_FLAGS_MCAST_PROMISCUOUS);
41671 } else {
41672- req->promiscuous = 1;
41673- }
41674+ struct netdev_hw_addr *ha;
41675+ int i = 0;
41676
41677- be_mcc_notify_wait(adapter);
41678+ req->if_flags_mask = req->if_flags =
41679+ cpu_to_le32(BE_IF_FLAGS_MULTICAST);
41680+ req->mcast_num = cpu_to_le16(netdev_mc_count(adapter->netdev));
41681+ netdev_for_each_mc_addr(ha, adapter->netdev)
41682+ memcpy(req->mcast_mac[i++].byte, ha->DMI_ADDR,
41683+ ETH_ALEN);
41684+ }
41685+ status = be_mcc_notify_wait(adapter);
41686
41687+err:
41688 spin_unlock_bh(&adapter->mcc_lock);
41689-
41690- return 0;
41691+ return status;
41692 }
41693
41694 /* Uses synchrounous mcc */
41695@@ -1033,9 +1596,14 @@ int be_cmd_set_flow_control(struct be_adapter *adapter, u32 tx_fc, u32 rx_fc)
41696 spin_lock_bh(&adapter->mcc_lock);
41697
41698 wrb = wrb_from_mccq(adapter);
41699+ if (!wrb) {
41700+ status = -EBUSY;
41701+ goto err;
41702+ }
41703 req = embedded_payload(wrb);
41704
41705- be_wrb_hdr_prepare(wrb, sizeof(*req), true, 0);
41706+ be_wrb_hdr_prepare(wrb, sizeof(*req), true, 0,
41707+ OPCODE_COMMON_SET_FLOW_CONTROL);
41708
41709 be_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_COMMON,
41710 OPCODE_COMMON_SET_FLOW_CONTROL, sizeof(*req));
41711@@ -1045,6 +1613,7 @@ int be_cmd_set_flow_control(struct be_adapter *adapter, u32 tx_fc, u32 rx_fc)
41712
41713 status = be_mcc_notify_wait(adapter);
41714
41715+err:
41716 spin_unlock_bh(&adapter->mcc_lock);
41717 return status;
41718 }
41719@@ -1059,9 +1628,14 @@ int be_cmd_get_flow_control(struct be_adapter *adapter, u32 *tx_fc, u32 *rx_fc)
41720 spin_lock_bh(&adapter->mcc_lock);
41721
41722 wrb = wrb_from_mccq(adapter);
41723+ if (!wrb) {
41724+ status = -EBUSY;
41725+ goto err;
41726+ }
41727 req = embedded_payload(wrb);
41728
41729- be_wrb_hdr_prepare(wrb, sizeof(*req), true, 0);
41730+ be_wrb_hdr_prepare(wrb, sizeof(*req), true, 0,
41731+ OPCODE_COMMON_GET_FLOW_CONTROL);
41732
41733 be_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_COMMON,
41734 OPCODE_COMMON_GET_FLOW_CONTROL, sizeof(*req));
41735@@ -1074,23 +1648,27 @@ int be_cmd_get_flow_control(struct be_adapter *adapter, u32 *tx_fc, u32 *rx_fc)
41736 *rx_fc = le16_to_cpu(resp->rx_flow_control);
41737 }
41738
41739+err:
41740 spin_unlock_bh(&adapter->mcc_lock);
41741 return status;
41742 }
41743
41744 /* Uses mbox */
41745-int be_cmd_query_fw_cfg(struct be_adapter *adapter, u32 *port_num, u32 *cap)
41746+int be_cmd_query_fw_cfg(struct be_adapter *adapter, u32 *port_num,
41747+ u32 *mode, u32 *function_caps)
41748 {
41749 struct be_mcc_wrb *wrb;
41750 struct be_cmd_req_query_fw_cfg *req;
41751 int status;
41752
41753- spin_lock(&adapter->mbox_lock);
41754+ if (mutex_lock_interruptible(&adapter->mbox_lock))
41755+ return -1;
41756
41757 wrb = wrb_from_mbox(adapter);
41758 req = embedded_payload(wrb);
41759
41760- be_wrb_hdr_prepare(wrb, sizeof(*req), true, 0);
41761+ be_wrb_hdr_prepare(wrb, sizeof(*req), true, 0,
41762+ OPCODE_COMMON_QUERY_FIRMWARE_CONFIG);
41763
41764 be_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_COMMON,
41765 OPCODE_COMMON_QUERY_FIRMWARE_CONFIG, sizeof(*req));
41766@@ -1099,10 +1677,11 @@ int be_cmd_query_fw_cfg(struct be_adapter *adapter, u32 *port_num, u32 *cap)
41767 if (!status) {
41768 struct be_cmd_resp_query_fw_cfg *resp = embedded_payload(wrb);
41769 *port_num = le32_to_cpu(resp->phys_port);
41770- *cap = le32_to_cpu(resp->function_cap);
41771+ *mode = le32_to_cpu(resp->function_mode);
41772+ *function_caps = le32_to_cpu(resp->function_caps);
41773 }
41774
41775- spin_unlock(&adapter->mbox_lock);
41776+ mutex_unlock(&adapter->mbox_lock);
41777 return status;
41778 }
41779
41780@@ -1113,19 +1692,161 @@ int be_cmd_reset_function(struct be_adapter *adapter)
41781 struct be_cmd_req_hdr *req;
41782 int status;
41783
41784- spin_lock(&adapter->mbox_lock);
41785+ if (mutex_lock_interruptible(&adapter->mbox_lock))
41786+ return -1;
41787
41788 wrb = wrb_from_mbox(adapter);
41789 req = embedded_payload(wrb);
41790
41791- be_wrb_hdr_prepare(wrb, sizeof(*req), true, 0);
41792+ be_wrb_hdr_prepare(wrb, sizeof(*req), true, 0,
41793+ OPCODE_COMMON_FUNCTION_RESET);
41794
41795 be_cmd_hdr_prepare(req, CMD_SUBSYSTEM_COMMON,
41796 OPCODE_COMMON_FUNCTION_RESET, sizeof(*req));
41797
41798 status = be_mbox_notify_wait(adapter);
41799
41800- spin_unlock(&adapter->mbox_lock);
41801+ mutex_unlock(&adapter->mbox_lock);
41802+ return status;
41803+}
41804+
41805+int be_cmd_rss_config(struct be_adapter *adapter, u8 *rsstable, u16 table_size)
41806+{
41807+ struct be_mcc_wrb *wrb;
41808+ struct be_cmd_req_rss_config *req;
41809+ u32 myhash[10] = {0x0123, 0x4567, 0x89AB, 0xCDEF, 0x01EF,
41810+ 0x0123, 0x4567, 0x89AB, 0xCDEF, 0x01EF};
41811+ int status;
41812+
41813+ if (mutex_lock_interruptible(&adapter->mbox_lock))
41814+ return -1;
41815+
41816+ wrb = wrb_from_mbox(adapter);
41817+ req = embedded_payload(wrb);
41818+
41819+ be_wrb_hdr_prepare(wrb, sizeof(*req), true, 0,
41820+ OPCODE_ETH_RSS_CONFIG);
41821+
41822+ be_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_ETH,
41823+ OPCODE_ETH_RSS_CONFIG, sizeof(*req));
41824+
41825+ req->if_id = cpu_to_le32(adapter->if_handle);
41826+ req->enable_rss = cpu_to_le16(RSS_ENABLE_TCP_IPV4 | RSS_ENABLE_IPV4);
41827+ req->cpu_table_size_log2 = cpu_to_le16(fls(table_size) - 1);
41828+ memcpy(req->cpu_table, rsstable, table_size);
41829+ memcpy(req->hash, myhash, sizeof(myhash));
41830+ be_dws_cpu_to_le(req->hash, sizeof(req->hash));
41831+
41832+ status = be_mbox_notify_wait(adapter);
41833+
41834+ mutex_unlock(&adapter->mbox_lock);
41835+ return status;
41836+}
41837+
41838+/* Uses sync mcc */
41839+int be_cmd_set_beacon_state(struct be_adapter *adapter, u8 port_num,
41840+ u8 bcn, u8 sts, u8 state)
41841+{
41842+ struct be_mcc_wrb *wrb;
41843+ struct be_cmd_req_enable_disable_beacon *req;
41844+ int status;
41845+
41846+ spin_lock_bh(&adapter->mcc_lock);
41847+
41848+ wrb = wrb_from_mccq(adapter);
41849+ if (!wrb) {
41850+ status = -EBUSY;
41851+ goto err;
41852+ }
41853+ req = embedded_payload(wrb);
41854+
41855+ be_wrb_hdr_prepare(wrb, sizeof(*req), true, 0,
41856+ OPCODE_COMMON_ENABLE_DISABLE_BEACON);
41857+
41858+ be_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_COMMON,
41859+ OPCODE_COMMON_ENABLE_DISABLE_BEACON, sizeof(*req));
41860+
41861+ req->port_num = port_num;
41862+ req->beacon_state = state;
41863+ req->beacon_duration = bcn;
41864+ req->status_duration = sts;
41865+
41866+ status = be_mcc_notify_wait(adapter);
41867+
41868+err:
41869+ spin_unlock_bh(&adapter->mcc_lock);
41870+ return status;
41871+}
41872+
41873+/* Uses sync mcc */
41874+int be_cmd_get_beacon_state(struct be_adapter *adapter, u8 port_num, u32 *state)
41875+{
41876+ struct be_mcc_wrb *wrb;
41877+ struct be_cmd_req_get_beacon_state *req;
41878+ int status;
41879+
41880+ spin_lock_bh(&adapter->mcc_lock);
41881+
41882+ wrb = wrb_from_mccq(adapter);
41883+ if (!wrb) {
41884+ status = -EBUSY;
41885+ goto err;
41886+ }
41887+ req = embedded_payload(wrb);
41888+
41889+ be_wrb_hdr_prepare(wrb, sizeof(*req), true, 0,
41890+ OPCODE_COMMON_GET_BEACON_STATE);
41891+
41892+ be_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_COMMON,
41893+ OPCODE_COMMON_GET_BEACON_STATE, sizeof(*req));
41894+
41895+ req->port_num = port_num;
41896+
41897+ status = be_mcc_notify_wait(adapter);
41898+ if (!status) {
41899+ struct be_cmd_resp_get_beacon_state *resp =
41900+ embedded_payload(wrb);
41901+ *state = resp->beacon_state;
41902+ }
41903+
41904+err:
41905+ spin_unlock_bh(&adapter->mcc_lock);
41906+ return status;
41907+}
41908+
41909+/* Uses sync mcc */
41910+int be_cmd_read_port_type(struct be_adapter *adapter, u32 port,
41911+ u8 *connector)
41912+{
41913+ struct be_mcc_wrb *wrb;
41914+ struct be_cmd_req_port_type *req;
41915+ int status;
41916+
41917+ spin_lock_bh(&adapter->mcc_lock);
41918+
41919+ wrb = wrb_from_mccq(adapter);
41920+ if (!wrb) {
41921+ status = -EBUSY;
41922+ goto err;
41923+ }
41924+ req = embedded_payload(wrb);
41925+
41926+ be_wrb_hdr_prepare(wrb, sizeof(struct be_cmd_resp_port_type), true, 0,
41927+ OPCODE_COMMON_READ_TRANSRECV_DATA);
41928+
41929+ be_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_COMMON,
41930+ OPCODE_COMMON_READ_TRANSRECV_DATA, sizeof(*req));
41931+
41932+ req->port = cpu_to_le32(port);
41933+ req->page_num = cpu_to_le32(TR_PAGE_A0);
41934+ status = be_mcc_notify_wait(adapter);
41935+ if (!status) {
41936+ struct be_cmd_resp_port_type *resp = embedded_payload(wrb);
41937+ *connector = resp->data.connector;
41938+ }
41939+
41940+err:
41941+ spin_unlock_bh(&adapter->mcc_lock);
41942 return status;
41943 }
41944
41945@@ -1133,16 +1854,24 @@ int be_cmd_write_flashrom(struct be_adapter *adapter, struct be_dma_mem *cmd,
41946 u32 flash_type, u32 flash_opcode, u32 buf_size)
41947 {
41948 struct be_mcc_wrb *wrb;
41949- struct be_cmd_write_flashrom *req = cmd->va;
41950+ struct be_cmd_write_flashrom *req;
41951 struct be_sge *sge;
41952 int status;
41953
41954 spin_lock_bh(&adapter->mcc_lock);
41955+ adapter->flash_status = 0;
41956
41957 wrb = wrb_from_mccq(adapter);
41958+ if (!wrb) {
41959+ status = -EBUSY;
41960+ goto err_unlock;
41961+ }
41962+ req = cmd->va;
41963 sge = nonembedded_sgl(wrb);
41964
41965- be_wrb_hdr_prepare(wrb, cmd->size, false, 1);
41966+ be_wrb_hdr_prepare(wrb, cmd->size, false, 1,
41967+ OPCODE_COMMON_WRITE_FLASHROM);
41968+ wrb->tag1 = CMD_SUBSYSTEM_COMMON;
41969
41970 be_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_COMMON,
41971 OPCODE_COMMON_WRITE_FLASHROM, cmd->size);
41972@@ -1154,8 +1883,852 @@ int be_cmd_write_flashrom(struct be_adapter *adapter, struct be_dma_mem *cmd,
41973 req->params.op_code = cpu_to_le32(flash_opcode);
41974 req->params.data_buf_size = cpu_to_le32(buf_size);
41975
41976+ be_mcc_notify(adapter);
41977+ spin_unlock_bh(&adapter->mcc_lock);
41978+
41979+ if (!wait_for_completion_timeout(&adapter->flash_compl,
41980+ msecs_to_jiffies(40000)))
41981+ status = -1;
41982+ else
41983+ status = adapter->flash_status;
41984+
41985+ return status;
41986+
41987+err_unlock:
41988+ spin_unlock_bh(&adapter->mcc_lock);
41989+ return status;
41990+}
41991+
41992+int be_cmd_get_flash_crc(struct be_adapter *adapter, u8 *flashed_crc,
41993+ int offset)
41994+{
41995+ struct be_mcc_wrb *wrb;
41996+ struct be_cmd_write_flashrom *req;
41997+ int status;
41998+
41999+ spin_lock_bh(&adapter->mcc_lock);
42000+
42001+ wrb = wrb_from_mccq(adapter);
42002+ if (!wrb) {
42003+ status = -EBUSY;
42004+ goto err;
42005+ }
42006+ req = embedded_payload(wrb);
42007+
42008+ be_wrb_hdr_prepare(wrb, sizeof(*req)+4, true, 0,
42009+ OPCODE_COMMON_READ_FLASHROM);
42010+
42011+ be_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_COMMON,
42012+ OPCODE_COMMON_READ_FLASHROM, sizeof(*req)+4);
42013+
42014+ req->params.op_type = cpu_to_le32(IMG_TYPE_REDBOOT);
42015+ req->params.op_code = cpu_to_le32(FLASHROM_OPER_REPORT);
42016+ req->params.offset = cpu_to_le32(offset);
42017+ req->params.data_buf_size = cpu_to_le32(0x4);
42018+
42019+ status = be_mcc_notify_wait(adapter);
42020+ if (!status)
42021+ memcpy(flashed_crc, req->params.data_buf, 4);
42022+
42023+err:
42024+ spin_unlock_bh(&adapter->mcc_lock);
42025+ return status;
42026+}
42027+
42028+int be_cmd_enable_magic_wol(struct be_adapter *adapter, u8 *mac,
42029+ struct be_dma_mem *nonemb_cmd)
42030+{
42031+ struct be_mcc_wrb *wrb;
42032+ struct be_cmd_req_acpi_wol_magic_config *req;
42033+ struct be_sge *sge;
42034+ int status;
42035+
42036+ spin_lock_bh(&adapter->mcc_lock);
42037+
42038+ wrb = wrb_from_mccq(adapter);
42039+ if (!wrb) {
42040+ status = -EBUSY;
42041+ goto err;
42042+ }
42043+ req = nonemb_cmd->va;
42044+ sge = nonembedded_sgl(wrb);
42045+
42046+ be_wrb_hdr_prepare(wrb, sizeof(*req), false, 1,
42047+ OPCODE_ETH_ACPI_WOL_MAGIC_CONFIG);
42048+
42049+ be_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_ETH,
42050+ OPCODE_ETH_ACPI_WOL_MAGIC_CONFIG, sizeof(*req));
42051+ memcpy(req->magic_mac, mac, ETH_ALEN);
42052+
42053+ sge->pa_hi = cpu_to_le32(upper_32_bits(nonemb_cmd->dma));
42054+ sge->pa_lo = cpu_to_le32(nonemb_cmd->dma & 0xFFFFFFFF);
42055+ sge->len = cpu_to_le32(nonemb_cmd->size);
42056+
42057+ status = be_mcc_notify_wait(adapter);
42058+
42059+err:
42060+ spin_unlock_bh(&adapter->mcc_lock);
42061+ return status;
42062+}
42063+
42064+int be_cmd_set_loopback(struct be_adapter *adapter, u8 port_num,
42065+ u8 loopback_type, u8 enable)
42066+{
42067+ struct be_mcc_wrb *wrb;
42068+ struct be_cmd_req_set_lmode *req;
42069+ int status;
42070+
42071+ spin_lock_bh(&adapter->mcc_lock);
42072+
42073+ wrb = wrb_from_mccq(adapter);
42074+ if (!wrb) {
42075+ status = -EBUSY;
42076+ goto err;
42077+ }
42078+
42079+ req = embedded_payload(wrb);
42080+
42081+ be_wrb_hdr_prepare(wrb, sizeof(*req), true, 0,
42082+ OPCODE_LOWLEVEL_SET_LOOPBACK_MODE);
42083+
42084+ be_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_LOWLEVEL,
42085+ OPCODE_LOWLEVEL_SET_LOOPBACK_MODE,
42086+ sizeof(*req));
42087+
42088+ req->src_port = port_num;
42089+ req->dest_port = port_num;
42090+ req->loopback_type = loopback_type;
42091+ req->loopback_state = enable;
42092+
42093+ status = be_mcc_notify_wait(adapter);
42094+err:
42095+ spin_unlock_bh(&adapter->mcc_lock);
42096+ return status;
42097+}
42098+
42099+int be_cmd_loopback_test(struct be_adapter *adapter, u32 port_num,
42100+ u32 loopback_type, u32 pkt_size, u32 num_pkts, u64 pattern)
42101+{
42102+ struct be_mcc_wrb *wrb;
42103+ struct be_cmd_req_loopback_test *req;
42104+ int status;
42105+
42106+ spin_lock_bh(&adapter->mcc_lock);
42107+
42108+ wrb = wrb_from_mccq(adapter);
42109+ if (!wrb) {
42110+ status = -EBUSY;
42111+ goto err;
42112+ }
42113+
42114+ req = embedded_payload(wrb);
42115+
42116+ be_wrb_hdr_prepare(wrb, sizeof(*req), true, 0,
42117+ OPCODE_LOWLEVEL_LOOPBACK_TEST);
42118+
42119+ be_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_LOWLEVEL,
42120+ OPCODE_LOWLEVEL_LOOPBACK_TEST, sizeof(*req));
42121+ req->hdr.timeout = cpu_to_le32(4);
42122+
42123+ req->pattern = cpu_to_le64(pattern);
42124+ req->src_port = cpu_to_le32(port_num);
42125+ req->dest_port = cpu_to_le32(port_num);
42126+ req->pkt_size = cpu_to_le32(pkt_size);
42127+ req->num_pkts = cpu_to_le32(num_pkts);
42128+ req->loopback_type = cpu_to_le32(loopback_type);
42129+
42130+ status = be_mcc_notify_wait(adapter);
42131+ if (!status) {
42132+ struct be_cmd_resp_loopback_test *resp = embedded_payload(wrb);
42133+ status = le32_to_cpu(resp->status);
42134+ }
42135+
42136+err:
42137+ spin_unlock_bh(&adapter->mcc_lock);
42138+ return status;
42139+}
42140+
42141+int be_cmd_ddr_dma_test(struct be_adapter *adapter, u64 pattern,
42142+ u32 byte_cnt, struct be_dma_mem *cmd)
42143+{
42144+ struct be_mcc_wrb *wrb;
42145+ struct be_cmd_req_ddrdma_test *req;
42146+ struct be_sge *sge;
42147+ int status;
42148+ int i, j = 0;
42149+
42150+ spin_lock_bh(&adapter->mcc_lock);
42151+
42152+ wrb = wrb_from_mccq(adapter);
42153+ if (!wrb) {
42154+ status = -EBUSY;
42155+ goto err;
42156+ }
42157+ req = cmd->va;
42158+ sge = nonembedded_sgl(wrb);
42159+ be_wrb_hdr_prepare(wrb, cmd->size, false, 1,
42160+ OPCODE_LOWLEVEL_HOST_DDR_DMA);
42161+ be_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_LOWLEVEL,
42162+ OPCODE_LOWLEVEL_HOST_DDR_DMA, cmd->size);
42163+
42164+ sge->pa_hi = cpu_to_le32(upper_32_bits(cmd->dma));
42165+ sge->pa_lo = cpu_to_le32(cmd->dma & 0xFFFFFFFF);
42166+ sge->len = cpu_to_le32(cmd->size);
42167+
42168+ req->pattern = cpu_to_le64(pattern);
42169+ req->byte_count = cpu_to_le32(byte_cnt);
42170+ for (i = 0; i < byte_cnt; i++) {
42171+ req->snd_buff[i] = (u8)(pattern >> (j*8));
42172+ j++;
42173+ if (j > 7)
42174+ j = 0;
42175+ }
42176+
42177+ status = be_mcc_notify_wait(adapter);
42178+
42179+ if (!status) {
42180+ struct be_cmd_resp_ddrdma_test *resp;
42181+ resp = cmd->va;
42182+ if ((memcmp(resp->rcv_buff, req->snd_buff, byte_cnt) != 0) ||
42183+ resp->snd_err) {
42184+ status = -1;
42185+ }
42186+ }
42187+
42188+err:
42189+ spin_unlock_bh(&adapter->mcc_lock);
42190+ return status;
42191+}
42192+
42193+int be_cmd_get_seeprom_data(struct be_adapter *adapter,
42194+ struct be_dma_mem *nonemb_cmd)
42195+{
42196+ struct be_mcc_wrb *wrb;
42197+ struct be_cmd_req_seeprom_read *req;
42198+ struct be_sge *sge;
42199+ int status;
42200+
42201+ spin_lock_bh(&adapter->mcc_lock);
42202+
42203+ wrb = wrb_from_mccq(adapter);
42204+ req = nonemb_cmd->va;
42205+ sge = nonembedded_sgl(wrb);
42206+
42207+ be_wrb_hdr_prepare(wrb, sizeof(*req), false, 1,
42208+ OPCODE_COMMON_SEEPROM_READ);
42209+
42210+ be_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_COMMON,
42211+ OPCODE_COMMON_SEEPROM_READ, sizeof(*req));
42212+
42213+ sge->pa_hi = cpu_to_le32(upper_32_bits(nonemb_cmd->dma));
42214+ sge->pa_lo = cpu_to_le32(nonemb_cmd->dma & 0xFFFFFFFF);
42215+ sge->len = cpu_to_le32(nonemb_cmd->size);
42216+
42217+ status = be_mcc_notify_wait(adapter);
42218+
42219+ spin_unlock_bh(&adapter->mcc_lock);
42220+ return status;
42221+}
42222+
42223+int be_cmd_get_phy_info(struct be_adapter *adapter,
42224+ struct be_phy_info *phy_info)
42225+{
42226+ struct be_mcc_wrb *wrb;
42227+ struct be_cmd_req_get_phy_info *req;
42228+ struct be_sge *sge;
42229+ struct be_dma_mem cmd;
42230+ struct be_phy_info *resp_phy_info;
42231+ int status;
42232+
42233+ spin_lock_bh(&adapter->mcc_lock);
42234+ wrb = wrb_from_mccq(adapter);
42235+ if (!wrb) {
42236+ status = -EBUSY;
42237+ goto err;
42238+ }
42239+ cmd.size = sizeof(struct be_cmd_req_get_phy_info);
42240+ cmd.va = pci_alloc_consistent(adapter->pdev, cmd.size,
42241+ &cmd.dma);
42242+ if (!cmd.va) {
42243+ dev_err(&adapter->pdev->dev, "Memory alloc failure\n");
42244+ status = -ENOMEM;
42245+ goto err;
42246+ }
42247+
42248+ req = cmd.va;
42249+ sge = nonembedded_sgl(wrb);
42250+
42251+ be_wrb_hdr_prepare(wrb, sizeof(*req), false, 1,
42252+ OPCODE_COMMON_GET_PHY_DETAILS);
42253+
42254+ be_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_COMMON,
42255+ OPCODE_COMMON_GET_PHY_DETAILS,
42256+ sizeof(*req));
42257+
42258+ sge->pa_hi = cpu_to_le32(upper_32_bits(cmd.dma));
42259+ sge->pa_lo = cpu_to_le32(cmd.dma & 0xFFFFFFFF);
42260+ sge->len = cpu_to_le32(cmd.size);
42261+
42262+ status = be_mcc_notify_wait(adapter);
42263+ if (!status) {
42264+ resp_phy_info = cmd.va + sizeof(struct be_cmd_req_hdr);
42265+ phy_info->phy_type = le16_to_cpu(resp_phy_info->phy_type);
42266+ phy_info->interface_type =
42267+ le16_to_cpu(resp_phy_info->interface_type);
42268+ phy_info->auto_speeds_supported =
42269+ le16_to_cpu(resp_phy_info->auto_speeds_supported);
42270+ phy_info->fixed_speeds_supported =
42271+ le16_to_cpu(resp_phy_info->fixed_speeds_supported);
42272+ phy_info->misc_params =
42273+ le32_to_cpu(resp_phy_info->misc_params);
42274+ }
42275+ pci_free_consistent(adapter->pdev, cmd.size,
42276+ cmd.va, cmd.dma);
42277+err:
42278+ spin_unlock_bh(&adapter->mcc_lock);
42279+ return status;
42280+}
42281+
42282+int be_cmd_set_qos(struct be_adapter *adapter, u32 bps, u32 domain)
42283+{
42284+ struct be_mcc_wrb *wrb;
42285+ struct be_cmd_req_set_qos *req;
42286+ int status;
42287+
42288+ spin_lock_bh(&adapter->mcc_lock);
42289+
42290+ wrb = wrb_from_mccq(adapter);
42291+ if (!wrb) {
42292+ status = -EBUSY;
42293+ goto err;
42294+ }
42295+
42296+ req = embedded_payload(wrb);
42297+
42298+ be_wrb_hdr_prepare(wrb, sizeof(*req), true, 0,
42299+ OPCODE_COMMON_SET_QOS);
42300+
42301+ be_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_COMMON,
42302+ OPCODE_COMMON_SET_QOS, sizeof(*req));
42303+
42304+ req->hdr.domain = domain;
42305+ req->valid_bits = cpu_to_le32(BE_QOS_BITS_NIC);
42306+ req->max_bps_nic = cpu_to_le32(bps);
42307+
42308+ status = be_mcc_notify_wait(adapter);
42309+err:
42310+ spin_unlock_bh(&adapter->mcc_lock);
42311+ return status;
42312+}
42313+
42314+int be_cmd_get_cntl_attributes(struct be_adapter *adapter)
42315+{
42316+ struct be_mcc_wrb *wrb;
42317+ struct be_cmd_req_cntl_attribs *req;
42318+ struct be_cmd_resp_cntl_attribs *resp;
42319+ struct be_sge *sge;
42320+ int status;
42321+ int payload_len = max(sizeof(*req), sizeof(*resp));
42322+ struct mgmt_controller_attrib *attribs;
42323+ struct be_dma_mem attribs_cmd;
42324+
42325+ memset(&attribs_cmd, 0, sizeof(struct be_dma_mem));
42326+ attribs_cmd.size = sizeof(struct be_cmd_resp_cntl_attribs);
42327+ attribs_cmd.va = pci_alloc_consistent(adapter->pdev, attribs_cmd.size,
42328+ &attribs_cmd.dma);
42329+ if (!attribs_cmd.va) {
42330+ dev_err(&adapter->pdev->dev,
42331+ "Memory allocation failure\n");
42332+ return -ENOMEM;
42333+ }
42334+
42335+ if (mutex_lock_interruptible(&adapter->mbox_lock))
42336+ return -1;
42337+
42338+ wrb = wrb_from_mbox(adapter);
42339+ if (!wrb) {
42340+ status = -EBUSY;
42341+ goto err;
42342+ }
42343+ req = attribs_cmd.va;
42344+ sge = nonembedded_sgl(wrb);
42345+
42346+ be_wrb_hdr_prepare(wrb, payload_len, false, 1,
42347+ OPCODE_COMMON_GET_CNTL_ATTRIBUTES);
42348+ be_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_COMMON,
42349+ OPCODE_COMMON_GET_CNTL_ATTRIBUTES, payload_len);
42350+ sge->pa_hi = cpu_to_le32(upper_32_bits(attribs_cmd.dma));
42351+ sge->pa_lo = cpu_to_le32(attribs_cmd.dma & 0xFFFFFFFF);
42352+ sge->len = cpu_to_le32(attribs_cmd.size);
42353+
42354+ status = be_mbox_notify_wait(adapter);
42355+ if (!status) {
42356+ attribs = (struct mgmt_controller_attrib *)(attribs_cmd.va +
42357+ sizeof(struct be_cmd_resp_hdr));
42358+ adapter->hba_port_num = attribs->hba_attribs.phy_port;
42359+ strncpy(adapter->model_number,
42360+ attribs->hba_attribs.controller_model_number, 31);
42361+ }
42362+
42363+err:
42364+ mutex_unlock(&adapter->mbox_lock);
42365+ pci_free_consistent(adapter->pdev, attribs_cmd.size, attribs_cmd.va,
42366+ attribs_cmd.dma);
42367+ return status;
42368+}
42369+
42370+/* Uses mbox */
42371+int be_cmd_req_native_mode(struct be_adapter *adapter)
42372+{
42373+ struct be_mcc_wrb *wrb;
42374+ struct be_cmd_req_set_func_cap *req;
42375+ int status;
42376+
42377+ if (mutex_lock_interruptible(&adapter->mbox_lock))
42378+ return -1;
42379+
42380+ wrb = wrb_from_mbox(adapter);
42381+ if (!wrb) {
42382+ status = -EBUSY;
42383+ goto err;
42384+ }
42385+
42386+ req = embedded_payload(wrb);
42387+
42388+ be_wrb_hdr_prepare(wrb, sizeof(*req), true, 0,
42389+ OPCODE_COMMON_SET_DRIVER_FUNCTION_CAP);
42390+
42391+ be_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_COMMON,
42392+ OPCODE_COMMON_SET_DRIVER_FUNCTION_CAP, sizeof(*req));
42393+
42394+ req->valid_cap_flags = cpu_to_le32(CAPABILITY_SW_TIMESTAMPS |
42395+ CAPABILITY_BE3_NATIVE_ERX_API);
42396+ req->cap_flags = cpu_to_le32(CAPABILITY_BE3_NATIVE_ERX_API);
42397+
42398+ status = be_mbox_notify_wait(adapter);
42399+ if (!status) {
42400+ struct be_cmd_resp_set_func_cap *resp = embedded_payload(wrb);
42401+ adapter->be3_native = le32_to_cpu(resp->cap_flags) &
42402+ CAPABILITY_BE3_NATIVE_ERX_API;
42403+ }
42404+err:
42405+ mutex_unlock(&adapter->mbox_lock);
42406+ return status;
42407+}
42408+
42409+static void encode_port_names(struct be_adapter *adapter)
42410+{
42411+ switch (adapter->port_name[adapter->hba_port_num]) {
42412+ case '0':
42413+ adapter->port_name[adapter->hba_port_num] = 0;
42414+ break;
42415+ case '1':
42416+ adapter->port_name[adapter->hba_port_num] = 1;
42417+ break;
42418+ case '2':
42419+ adapter->port_name[adapter->hba_port_num] = 2;
42420+ break;
42421+ case '3':
42422+ adapter->port_name[adapter->hba_port_num] = 3;
42423+ break;
42424+ case '4':
42425+ adapter->port_name[adapter->hba_port_num] = 4;
42426+ break;
42427+ case 'A':
42428+ adapter->port_name[adapter->hba_port_num] = 5;
42429+ break;
42430+ case 'B':
42431+ adapter->port_name[adapter->hba_port_num] = 6;
42432+ break;
42433+ case 'C':
42434+ adapter->port_name[adapter->hba_port_num] = 7;
42435+ break;
42436+ case 'D':
42437+ adapter->port_name[adapter->hba_port_num] = 8;
42438+ break;
42439+ }
42440+}
42441+
42442+int be_cmd_query_port_names_v0(struct be_adapter *adapter, u8 *port_name)
42443+{
42444+ struct be_mcc_wrb *wrb;
42445+ struct be_cmd_req_get_port_name *req;
42446+ int status;
42447+
42448+ spin_lock_bh(&adapter->mcc_lock);
42449+
42450+ wrb = wrb_from_mccq(adapter);
42451+ if (!wrb) {
42452+ status = -EBUSY;
42453+ goto err;
42454+ }
42455+
42456+ req = embedded_payload(wrb);
42457+
42458+ be_wrb_hdr_prepare(wrb, sizeof(*req), true, 0,
42459+ OPCODE_COMMON_GET_PORT_NAME);
42460+ be_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_COMMON,
42461+ OPCODE_COMMON_GET_PORT_NAME, sizeof(*req));
42462+
42463+ status = be_mcc_notify_wait(adapter);
42464+ if (!status) {
42465+ struct be_cmd_resp_get_port_name *resp = embedded_payload(wrb);
42466+ port_name[0] = resp->port0_name;
42467+ port_name[1] = resp->port1_name;
42468+ }
42469+
42470+err:
42471+ spin_unlock_bh(&adapter->mcc_lock);
42472+
42473+ if(!status)
42474+ encode_port_names(adapter);
42475+ return status;
42476+}
42477+
42478+int be_cmd_query_port_names_v1(struct be_adapter *adapter, u8 *port_name)
42479+{
42480+ struct be_mcc_wrb *wrb;
42481+ struct be_cmd_req_get_port_name *req;
42482+ int status;
42483+
42484+ spin_lock_bh(&adapter->mcc_lock);
42485+
42486+ wrb = wrb_from_mccq(adapter);
42487+ if (!wrb) {
42488+ status = -EBUSY;
42489+ goto err;
42490+ }
42491+ req = embedded_payload(wrb);
42492+
42493+ be_wrb_hdr_prepare(wrb, sizeof(*req), true, 0,
42494+ OPCODE_COMMON_GET_PORT_NAME);
42495+ be_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_COMMON,
42496+ OPCODE_COMMON_GET_PORT_NAME, sizeof(*req));
42497+ req->hdr.version = 1;
42498+
42499 status = be_mcc_notify_wait(adapter);
42500+ if (!status) {
42501+ struct be_cmd_resp_get_port_name_v1 *resp = embedded_payload(wrb);
42502+ port_name[0] = resp->port0_name;
42503+ port_name[1] = resp->port1_name;
42504+ port_name[2] = resp->port2_name;
42505+ port_name[3] = resp->port3_name;
42506+ }
42507+
42508+err:
42509+ spin_unlock_bh(&adapter->mcc_lock);
42510+
42511+ if (!status)
42512+ encode_port_names(adapter);
42513+ return status;
42514+}
42515+
42516+int be_cmd_req_pg_pfc(struct be_adapter *adapter, int *fw_num_txqs)
42517+{
42518+ struct be_mcc_wrb *wrb;
42519+ struct be_cmd_req_pg *req;
42520+ int status, num = 0;
42521+ bool query = true;
42522+
42523+ *fw_num_txqs = MAX_TX_QS;
42524+
42525+ if (mutex_lock_interruptible(&adapter->mbox_lock))
42526+ return -1;
42527+
42528+enable_pfc:
42529+ wrb = wrb_from_mbox(adapter);
42530+ req = embedded_payload(wrb);
42531+
42532+ be_wrb_hdr_prepare(wrb, sizeof(*req), true, 0,
42533+ OPCODE_ETH_PG_FEATURE_QUERY_REQUEST);
42534+
42535+ be_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_ETH,
42536+ OPCODE_ETH_PG_FEATURE_QUERY_REQUEST, sizeof(*req));
42537+
42538+ if (query)
42539+ req->query |= cpu_to_le32(REQ_PG_QUERY);
42540+ req->pfc_pg |= cpu_to_le32(REQ_PG_FEAT);
42541+
42542+ status = be_mbox_notify_wait(adapter);
42543+ if (!status) {
42544+ struct be_cmd_resp_pg *resp = embedded_payload(wrb);
42545+ if (query) {
42546+ if (le32_to_cpu(resp->pfc_pg) & REQ_PG_FEAT) {
42547+ num = le32_to_cpu(resp->num_tx_rings);
42548+ query = false;
42549+ goto enable_pfc;
42550+ }
42551+ } else {
42552+ adapter->flags |= BE_FLAGS_DCBX;
42553+ *fw_num_txqs = num;
42554+ }
42555+ }
42556+
42557+ mutex_unlock(&adapter->mbox_lock);
42558+ return status;
42559+}
42560+
42561+/* Set privilege(s) for a function */
42562+int be_cmd_set_fn_privileges(struct be_adapter *adapter, u32 mask, u32 *prev,
42563+ u32 domain)
42564+{
42565+ struct be_mcc_wrb *wrb;
42566+ struct be_cmd_req_set_fn_privileges *req;
42567+ int status;
42568+
42569+ spin_lock_bh(&adapter->mcc_lock);
42570+
42571+ wrb = wrb_from_mccq(adapter);
42572+ if (!wrb) {
42573+ status = -EBUSY;
42574+ goto err;
42575+ }
42576+
42577+ req = embedded_payload(wrb);
42578+
42579+ be_wrb_hdr_prepare(wrb, sizeof(*req), true, 0,
42580+ OPCODE_COMMON_SET_FN_PRIVILEGES);
42581+
42582+ be_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_COMMON,
42583+ OPCODE_COMMON_SET_FN_PRIVILEGES, sizeof(*req));
42584+
42585+ req->hdr.domain = domain;
42586+ req->privilege_mask = cpu_to_le32(mask);
42587+
42588+ status = be_mcc_notify_wait(adapter);
42589+
42590+err:
42591+ spin_unlock_bh(&adapter->mcc_lock);
42592+ return status;
42593+}
42594+
42595+/* Get privilege(s) for a function */
42596+int be_cmd_get_fn_privileges(struct be_adapter *adapter, u32 *privilege,
42597+ u32 domain)
42598+{
42599+ struct be_mcc_wrb *wrb;
42600+ struct be_cmd_req_get_fn_privileges *req;
42601+ int status;
42602+
42603+ spin_lock_bh(&adapter->mcc_lock);
42604+
42605+ wrb = wrb_from_mccq(adapter);
42606+ if (!wrb) {
42607+ status = -EBUSY;
42608+ goto err;
42609+ }
42610+
42611+ req = embedded_payload(wrb);
42612+
42613+ be_wrb_hdr_prepare(wrb, sizeof(*req), true, 0,
42614+ OPCODE_COMMON_GET_FN_PRIVILEGES);
42615
42616+ be_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_COMMON,
42617+ OPCODE_COMMON_GET_FN_PRIVILEGES, sizeof(*req));
42618+
42619+ req->hdr.domain = domain;
42620+
42621+ status = be_mcc_notify_wait(adapter);
42622+ if (!status) {
42623+ struct be_cmd_resp_get_fn_privileges *resp =
42624+ embedded_payload(wrb);
42625+ *privilege = le32_to_cpu(resp->privilege_mask);
42626+ } else
42627+ *privilege = 0;
42628+
42629+err:
42630+ spin_unlock_bh(&adapter->mcc_lock);
42631+ return status;
42632+}
42633+
42634+/* Set Hyper switch config */
42635+int be_cmd_set_hsw_config(struct be_adapter *adapter, u16 pvid,
42636+ u32 domain, u16 intf_id)
42637+{
42638+ struct be_mcc_wrb *wrb;
42639+ struct be_cmd_req_set_hsw_config *req;
42640+ void *ctxt;
42641+ int status;
42642+
42643+ spin_lock_bh(&adapter->mcc_lock);
42644+
42645+ wrb = wrb_from_mccq(adapter);
42646+ if (!wrb) {
42647+ status = -EBUSY;
42648+ goto err;
42649+ }
42650+
42651+ req = embedded_payload(wrb);
42652+ ctxt = &req->context;
42653+
42654+ be_wrb_hdr_prepare(wrb, sizeof(*req), true, 0,
42655+ OPCODE_COMMON_SET_HSW_CONFIG);
42656+
42657+ be_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_COMMON,
42658+ OPCODE_COMMON_SET_HSW_CONFIG, sizeof(*req));
42659+
42660+ req->hdr.domain = domain;
42661+ AMAP_SET_BITS(struct amap_set_hsw_context, interface_id, ctxt, intf_id);
42662+ if (pvid) {
42663+ AMAP_SET_BITS(struct amap_set_hsw_context, pvid_valid, ctxt, 1);
42664+ AMAP_SET_BITS(struct amap_set_hsw_context, pvid, ctxt, pvid);
42665+ }
42666+
42667+ be_dws_cpu_to_le(req->context, sizeof(req->context));
42668+ status = be_mcc_notify_wait(adapter);
42669+
42670+err:
42671+ spin_unlock_bh(&adapter->mcc_lock);
42672+ return status;
42673+}
42674+
42675+/* Get Hyper switch config */
42676+int be_cmd_get_hsw_config(struct be_adapter *adapter, u16 *pvid,
42677+ u32 domain, u16 intf_id)
42678+{
42679+ struct be_mcc_wrb *wrb;
42680+ struct be_cmd_req_get_hsw_config *req;
42681+ void *ctxt;
42682+ int status;
42683+ u16 vid;
42684+
42685+ spin_lock_bh(&adapter->mcc_lock);
42686+
42687+ wrb = wrb_from_mccq(adapter);
42688+ if (!wrb) {
42689+ status = -EBUSY;
42690+ goto err;
42691+ }
42692+
42693+ req = embedded_payload(wrb);
42694+ ctxt = &req->context;
42695+
42696+ be_wrb_hdr_prepare(wrb, sizeof(*req), true, 0,
42697+ OPCODE_COMMON_GET_HSW_CONFIG);
42698+
42699+ be_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_COMMON,
42700+ OPCODE_COMMON_GET_HSW_CONFIG, sizeof(*req));
42701+
42702+ req->hdr.domain = domain;
42703+ AMAP_SET_BITS(struct amap_get_hsw_req_context, interface_id, ctxt,
42704+ intf_id);
42705+ AMAP_SET_BITS(struct amap_get_hsw_req_context, pvid_valid, ctxt, 1);
42706+ be_dws_cpu_to_le(req->context, sizeof(req->context));
42707+
42708+ status = be_mcc_notify_wait(adapter);
42709+ if (!status) {
42710+ struct be_cmd_resp_get_hsw_config *resp =
42711+ embedded_payload(wrb);
42712+ be_dws_le_to_cpu(&resp->context,
42713+ sizeof(resp->context));
42714+ vid = AMAP_GET_BITS(struct amap_get_hsw_resp_context,
42715+ pvid, &resp->context);
42716+ *pvid = le16_to_cpu(vid);
42717+ }
42718+
42719+err:
42720+ spin_unlock_bh(&adapter->mcc_lock);
42721+ return status;
42722+}
42723+
42724+int be_cmd_get_port_speed(struct be_adapter *adapter,
42725+ u8 port_num, u16 *dac_cable_len, u16 *port_speed)
42726+{
42727+ struct be_mcc_wrb *wrb;
42728+ struct be_cmd_req_get_port_speed *req;
42729+ int status = 0;
42730+
42731+ spin_lock_bh(&adapter->mcc_lock);
42732+
42733+ wrb = wrb_from_mccq(adapter);
42734+ if (!wrb) {
42735+ status = -EBUSY;
42736+ goto err;
42737+ }
42738+
42739+ req = embedded_payload(wrb);
42740+ be_wrb_hdr_prepare(wrb, sizeof(*req), true, 0,
42741+ OPCODE_COMMON_NTWK_GET_LINK_SPEED);
42742+ be_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_COMMON,
42743+ OPCODE_COMMON_NTWK_GET_LINK_SPEED,
42744+ sizeof(*req));
42745+ req->port_num = port_num;
42746+ status = be_mcc_notify_wait(adapter);
42747+ if (!status) {
42748+ struct be_cmd_resp_get_port_speed *resp =
42749+ embedded_payload(wrb);
42750+ *dac_cable_len = resp->dac_cable_length;
42751+ *port_speed = resp->mac_speed;
42752+ }
42753+
42754+err:
42755+ spin_unlock_bh(&adapter->mcc_lock);
42756+ return status;
42757+}
42758+
42759+int be_cmd_set_port_speed_v1(struct be_adapter *adapter,
42760+ u8 port_num, u16 mac_speed,
42761+ u16 dac_cable_len)
42762+{
42763+ struct be_mcc_wrb *wrb;
42764+ struct be_cmd_req_set_port_speed_v1 *req;
42765+ int status = 0;
42766+
42767+ spin_lock_bh(&adapter->mcc_lock);
42768+
42769+ wrb = wrb_from_mccq(adapter);
42770+ if (!wrb) {
42771+ status = -EBUSY;
42772+ goto err;
42773+ }
42774+ req = embedded_payload(wrb);
42775+ be_wrb_hdr_prepare(wrb, sizeof(*req), true, 0,
42776+ OPCODE_COMMON_NTWK_SET_LINK_SPEED);
42777+
42778+ be_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_COMMON,
42779+ OPCODE_COMMON_NTWK_SET_LINK_SPEED,
42780+ sizeof(*req));
42781+ req->hdr.version=1;
42782+
42783+ req->port_num = port_num;
42784+ req->virt_port = port_num;
42785+ req->mac_speed = mac_speed;
42786+ req->dac_cable_length = dac_cable_len;
42787+ status = be_mcc_notify_wait(adapter);
42788+err:
42789+ spin_unlock_bh(&adapter->mcc_lock);
42790+ return status;
42791+}
42792+
42793+
42794+/* Uses sync mcc */
42795+#ifdef CONFIG_PALAU
42796+int be_cmd_pass_ext_ioctl(struct be_adapter *adapter, dma_addr_t dma,
42797+ int req_size, void *va)
42798+{
42799+ struct be_mcc_wrb *wrb;
42800+ struct be_sge *sge;
42801+ int status;
42802+ struct be_cmd_req_hdr *hdr = (struct be_cmd_req_hdr *) va;
42803+
42804+ spin_lock_bh(&adapter->mcc_lock);
42805+
42806+ wrb = wrb_from_mccq(adapter);
42807+ if (!wrb) {
42808+ status = -EBUSY;
42809+ goto err;
42810+ }
42811+ sge = nonembedded_sgl(wrb);
42812+
42813+ be_wrb_hdr_prepare(wrb, req_size, false, 1, hdr->opcode);
42814+ wrb->tag1 = MCC_WRB_PASS_THRU;
42815+ sge->pa_hi = cpu_to_le32(upper_32_bits(dma));
42816+ sge->pa_lo = cpu_to_le32(dma & 0xFFFFFFFF);
42817+ sge->len = cpu_to_le32(req_size);
42818+
42819+ status = be_mcc_notify_wait(adapter);
42820+err:
42821 spin_unlock_bh(&adapter->mcc_lock);
42822 return status;
42823 }
42824+#endif
42825diff --git a/drivers/net/benet/be_cmds.h b/drivers/net/benet/be_cmds.h
42826index ad33d55..35aa5c7 100644
42827--- a/drivers/net/benet/be_cmds.h
42828+++ b/drivers/net/benet/be_cmds.h
42829@@ -1,20 +1,23 @@
42830 /*
42831- * Copyright (C) 2005 - 2009 ServerEngines
42832+ * Copyright (C) 2005 - 2011 Emulex
42833 * All rights reserved.
42834 *
42835 * This program is free software; you can redistribute it and/or
42836 * modify it under the terms of the GNU General Public License version 2
42837- * as published by the Free Software Foundation. The full GNU General
42838+ * as published by the Free Software Foundation. The full GNU General
42839 * Public License is included in this distribution in the file called COPYING.
42840 *
42841 * Contact Information:
42842- * linux-drivers@serverengines.com
42843+ * linux-drivers@emulex.com
42844 *
42845- * ServerEngines
42846- * 209 N. Fair Oaks Ave
42847- * Sunnyvale, CA 94085
42848+ * Emulex
42849+ * 3333 Susan Street
42850+ * Costa Mesa, CA 92626
42851 */
42852
42853+#ifndef BE_CMDS_H
42854+#define BE_CMDS_H
42855+
42856 /*
42857 * The driver sends configuration and managements command requests to the
42858 * firmware in the BE. These requests are communicated to the processor
42859@@ -29,9 +32,10 @@ struct be_sge {
42860 u32 len;
42861 };
42862
42863-#define MCC_WRB_EMBEDDED_MASK 1 /* bit 0 of dword 0*/
42864+#define MCC_WRB_EMBEDDED_MASK 1 /* bit 0 of dword 0*/
42865 #define MCC_WRB_SGE_CNT_SHIFT 3 /* bits 3 - 7 of dword 0 */
42866 #define MCC_WRB_SGE_CNT_MASK 0x1F /* bits 3 - 7 of dword 0 */
42867+#define MCC_WRB_PASS_THRU 0xFF /* this wrb is used for pass thru cmd */
42868 struct be_mcc_wrb {
42869 u32 embedded; /* dword 0 */
42870 u32 payload_length; /* dword 1 */
42871@@ -44,24 +48,19 @@ struct be_mcc_wrb {
42872 } payload;
42873 };
42874
42875-#define CQE_FLAGS_VALID_MASK (1 << 31)
42876-#define CQE_FLAGS_ASYNC_MASK (1 << 30)
42877-#define CQE_FLAGS_COMPLETED_MASK (1 << 28)
42878-#define CQE_FLAGS_CONSUMED_MASK (1 << 27)
42879+#define CQE_FLAGS_VALID_MASK (1 << 31)
42880+#define CQE_FLAGS_ASYNC_MASK (1 << 30)
42881+#define CQE_FLAGS_COMPLETED_MASK (1 << 28)
42882+#define CQE_FLAGS_CONSUMED_MASK (1 << 27)
42883
42884 /* Completion Status */
42885 enum {
42886- MCC_STATUS_SUCCESS = 0x0,
42887-/* The client does not have sufficient privileges to execute the command */
42888- MCC_STATUS_INSUFFICIENT_PRIVILEGES = 0x1,
42889-/* A parameter in the command was invalid. */
42890- MCC_STATUS_INVALID_PARAMETER = 0x2,
42891-/* There are insufficient chip resources to execute the command */
42892- MCC_STATUS_INSUFFICIENT_RESOURCES = 0x3,
42893-/* The command is completing because the queue was getting flushed */
42894- MCC_STATUS_QUEUE_FLUSHING = 0x4,
42895-/* The command is completing with a DMA error */
42896- MCC_STATUS_DMA_FAILED = 0x5,
42897+ MCC_STATUS_SUCCESS = 0,
42898+ MCC_STATUS_FAILED = 1,
42899+ MCC_STATUS_ILLEGAL_REQUEST = 2,
42900+ MCC_STATUS_ILLEGAL_FIELD = 3,
42901+ MCC_STATUS_INSUFFICIENT_BUFFER = 4,
42902+ MCC_STATUS_UNAUTHORIZED_REQUEST = 5,
42903 MCC_STATUS_NOT_SUPPORTED = 66
42904 };
42905
42906@@ -81,15 +80,24 @@ struct be_mcc_compl {
42907 * mcc_compl is interpreted as follows:
42908 */
42909 #define ASYNC_TRAILER_EVENT_CODE_SHIFT 8 /* bits 8 - 15 */
42910+#define ASYNC_TRAILER_EVENT_TYPE_SHIFT 16 /* bits 16 - 23 */
42911 #define ASYNC_TRAILER_EVENT_CODE_MASK 0xFF
42912+#define ASYNC_TRAILER_EVENT_TYPE_MASK 0xFF
42913 #define ASYNC_EVENT_CODE_LINK_STATE 0x1
42914+#define ASYNC_EVENT_CODE_GRP_5 0x5
42915+#define ASYNC_EVENT_QOS_SPEED 0x1
42916+#define ASYNC_EVENT_COS_PRIORITY 0x2
42917+#define ASYNC_EVENT_PVID_STATE 0x3
42918+#define GRP5_TYPE_PRIO_TC_MAP 4
42919+
42920 struct be_async_event_trailer {
42921 u32 code;
42922 };
42923
42924 enum {
42925- ASYNC_EVENT_LINK_DOWN = 0x0,
42926- ASYNC_EVENT_LINK_UP = 0x1
42927+ ASYNC_EVENT_LINK_DOWN = 0x0,
42928+ ASYNC_EVENT_LINK_UP = 0x1,
42929+ ASYNC_EVENT_LOGICAL = 0x2
42930 };
42931
42932 /* When the event code of an async trailer is link-state, the mcc_compl
42933@@ -101,7 +109,51 @@ struct be_async_event_link_state {
42934 u8 port_duplex;
42935 u8 port_speed;
42936 u8 port_fault;
42937- u8 rsvd0[7];
42938+ u8 rsvd0;
42939+ u16 qos_link_speed;
42940+ u32 event_tag;
42941+ struct be_async_event_trailer trailer;
42942+} __packed;
42943+
42944+/* When the event code of an async trailer is GRP-5 and event_type is QOS_SPEED
42945+ * the mcc_compl must be interpreted as follows
42946+ */
42947+struct be_async_event_grp5_qos_link_speed {
42948+ u8 physical_port;
42949+ u8 rsvd[5];
42950+ u16 qos_link_speed;
42951+ u32 event_tag;
42952+ struct be_async_event_trailer trailer;
42953+} __packed;
42954+
42955+/* When the event code of an async trailer is GRP5 and event type is
42956+ * CoS-Priority, the mcc_compl must be interpreted as follows
42957+ */
42958+struct be_async_event_grp5_cos_priority {
42959+ u8 physical_port;
42960+ u8 available_priority_bmap;
42961+ u8 reco_default_priority;
42962+ u8 valid;
42963+ u8 rsvd0;
42964+ u8 event_tag;
42965+ struct be_async_event_trailer trailer;
42966+} __packed;
42967+
42968+/* When the event code of an async trailer is GRP5 and event type is
42969+ * PVID state, the mcc_compl must be interpreted as follows
42970+ */
42971+struct be_async_event_grp5_pvid_state {
42972+ u8 enabled;
42973+ u8 rsvd0;
42974+ u16 tag;
42975+ u32 event_tag;
42976+ u32 rsvd1;
42977+ struct be_async_event_trailer trailer;
42978+} __packed;
42979+
42980+/* GRP5 prio-tc-map event */
42981+struct be_async_event_grp5_prio_tc_map {
42982+ u8 prio_tc_map[8]; /* map[prio] -> tc_id */
42983 struct be_async_event_trailer trailer;
42984 } __packed;
42985
42986@@ -111,41 +163,68 @@ struct be_mcc_mailbox {
42987 };
42988
42989 #define CMD_SUBSYSTEM_COMMON 0x1
42990-#define CMD_SUBSYSTEM_ETH 0x3
42991+#define CMD_SUBSYSTEM_ETH 0x3
42992+#define CMD_SUBSYSTEM_LOWLEVEL 0xb
42993
42994 #define OPCODE_COMMON_NTWK_MAC_QUERY 1
42995 #define OPCODE_COMMON_NTWK_MAC_SET 2
42996 #define OPCODE_COMMON_NTWK_MULTICAST_SET 3
42997-#define OPCODE_COMMON_NTWK_VLAN_CONFIG 4
42998+#define OPCODE_COMMON_NTWK_VLAN_CONFIG 4
42999 #define OPCODE_COMMON_NTWK_LINK_STATUS_QUERY 5
43000+#define OPCODE_COMMON_READ_FLASHROM 6
43001 #define OPCODE_COMMON_WRITE_FLASHROM 7
43002 #define OPCODE_COMMON_CQ_CREATE 12
43003 #define OPCODE_COMMON_EQ_CREATE 13
43004-#define OPCODE_COMMON_MCC_CREATE 21
43005-#define OPCODE_COMMON_NTWK_RX_FILTER 34
43006+#define OPCODE_COMMON_MCC_CREATE 21
43007+#define OPCODE_COMMON_SET_QOS 28
43008+#define OPCODE_COMMON_MCC_CREATE_EXT 90
43009+#define OPCODE_COMMON_SEEPROM_READ 30
43010+#define OPCODE_COMMON_GET_CNTL_ATTRIBUTES 32
43011+#define OPCODE_COMMON_NTWK_RX_FILTER 34
43012 #define OPCODE_COMMON_GET_FW_VERSION 35
43013 #define OPCODE_COMMON_SET_FLOW_CONTROL 36
43014 #define OPCODE_COMMON_GET_FLOW_CONTROL 37
43015 #define OPCODE_COMMON_SET_FRAME_SIZE 39
43016 #define OPCODE_COMMON_MODIFY_EQ_DELAY 41
43017 #define OPCODE_COMMON_FIRMWARE_CONFIG 42
43018-#define OPCODE_COMMON_NTWK_INTERFACE_CREATE 50
43019-#define OPCODE_COMMON_NTWK_INTERFACE_DESTROY 51
43020-#define OPCODE_COMMON_MCC_DESTROY 53
43021-#define OPCODE_COMMON_CQ_DESTROY 54
43022-#define OPCODE_COMMON_EQ_DESTROY 55
43023+#define OPCODE_COMMON_NTWK_INTERFACE_CREATE 50
43024+#define OPCODE_COMMON_NTWK_INTERFACE_DESTROY 51
43025+#define OPCODE_COMMON_MCC_DESTROY 53
43026+#define OPCODE_COMMON_CQ_DESTROY 54
43027+#define OPCODE_COMMON_EQ_DESTROY 55
43028+#define OPCODE_COMMON_NTWK_SET_LINK_SPEED 57
43029 #define OPCODE_COMMON_QUERY_FIRMWARE_CONFIG 58
43030 #define OPCODE_COMMON_NTWK_PMAC_ADD 59
43031 #define OPCODE_COMMON_NTWK_PMAC_DEL 60
43032 #define OPCODE_COMMON_FUNCTION_RESET 61
43033+#define OPCODE_COMMON_MANAGE_FAT 68
43034+#define OPCODE_COMMON_ENABLE_DISABLE_BEACON 69
43035+#define OPCODE_COMMON_GET_BEACON_STATE 70
43036+#define OPCODE_COMMON_READ_TRANSRECV_DATA 73
43037+#define OPCODE_COMMON_GET_PORT_NAME 77
43038+#define OPCODE_COMMON_SET_FN_PRIVILEGES 100
43039+#define OPCODE_COMMON_GET_PHY_DETAILS 102
43040+#define OPCODE_COMMON_SET_DRIVER_FUNCTION_CAP 103
43041+#define OPCODE_COMMON_GET_CNTL_ADDITIONAL_ATTRIBUTES 121
43042+#define OPCODE_COMMON_NTWK_GET_LINK_SPEED 134
43043+#define OPCODE_COMMON_GET_HSW_CONFIG 152
43044+#define OPCODE_COMMON_SET_HSW_CONFIG 153
43045+#define OPCODE_COMMON_GET_FN_PRIVILEGES 170
43046
43047+#define OPCODE_ETH_RSS_CONFIG 1
43048 #define OPCODE_ETH_ACPI_CONFIG 2
43049 #define OPCODE_ETH_PROMISCUOUS 3
43050 #define OPCODE_ETH_GET_STATISTICS 4
43051 #define OPCODE_ETH_TX_CREATE 7
43052-#define OPCODE_ETH_RX_CREATE 8
43053-#define OPCODE_ETH_TX_DESTROY 9
43054-#define OPCODE_ETH_RX_DESTROY 10
43055+#define OPCODE_ETH_RX_CREATE 8
43056+#define OPCODE_ETH_TX_DESTROY 9
43057+#define OPCODE_ETH_RX_DESTROY 10
43058+#define OPCODE_ETH_ACPI_WOL_MAGIC_CONFIG 12
43059+#define OPCODE_ETH_PG_FEATURE_QUERY_REQUEST 23
43060+
43061+#define OPCODE_LOWLEVEL_HOST_DDR_DMA 17
43062+#define OPCODE_LOWLEVEL_LOOPBACK_TEST 18
43063+#define OPCODE_LOWLEVEL_SET_LOOPBACK_MODE 19
43064
43065 struct be_cmd_req_hdr {
43066 u8 opcode; /* dword 0 */
43067@@ -159,7 +238,7 @@ struct be_cmd_req_hdr {
43068 };
43069
43070 #define RESP_HDR_INFO_OPCODE_SHIFT 0 /* bits 0 - 7 */
43071-#define RESP_HDR_INFO_SUBSYS_SHIFT 8 /* bits 8 - 15 */
43072+#define RESP_HDR_INFO_SUBSYS_SHIFT 8 /* bits 8 - 15 */
43073 struct be_cmd_resp_hdr {
43074 u32 info; /* dword 0 */
43075 u32 status; /* dword 1 */
43076@@ -265,7 +344,7 @@ struct be_cmd_req_pmac_del {
43077 /******************** Create CQ ***************************/
43078 /* Pseudo amap definition in which each bit of the actual structure is defined
43079 * as a byte: used to calculate offset/shift/mask of each field */
43080-struct amap_cq_context {
43081+struct amap_cq_context_be {
43082 u8 cidx[11]; /* dword 0*/
43083 u8 rsvd0; /* dword 0*/
43084 u8 coalescwm[2]; /* dword 0*/
43085@@ -288,11 +367,28 @@ struct amap_cq_context {
43086 u8 rsvd5[32]; /* dword 3*/
43087 } __packed;
43088
43089+struct amap_cq_context_lancer {
43090+ u8 rsvd0[12]; /* dword 0*/
43091+ u8 coalescwm[2]; /* dword 0*/
43092+ u8 nodelay; /* dword 0*/
43093+ u8 rsvd1[12]; /* dword 0*/
43094+ u8 count[2]; /* dword 0*/
43095+ u8 valid; /* dword 0*/
43096+ u8 rsvd2; /* dword 0*/
43097+ u8 eventable; /* dword 0*/
43098+ u8 eqid[16]; /* dword 1*/
43099+ u8 rsvd3[15]; /* dword 1*/
43100+ u8 armed; /* dword 1*/
43101+ u8 rsvd4[32]; /* dword 2*/
43102+ u8 rsvd5[32]; /* dword 3*/
43103+} __packed;
43104+
43105 struct be_cmd_req_cq_create {
43106 struct be_cmd_req_hdr hdr;
43107 u16 num_pages;
43108- u16 rsvd0;
43109- u8 context[sizeof(struct amap_cq_context) / 8];
43110+ u8 page_size;
43111+ u8 rsvd0;
43112+ u8 context[sizeof(struct amap_cq_context_be) / 8];
43113 struct phys_addr pages[8];
43114 } __packed;
43115
43116@@ -302,10 +398,28 @@ struct be_cmd_resp_cq_create {
43117 u16 rsvd0;
43118 } __packed;
43119
43120+struct be_cmd_req_get_fat {
43121+ struct be_cmd_req_hdr hdr;
43122+ u32 fat_operation;
43123+ u32 read_log_offset;
43124+ u32 read_log_length;
43125+ u32 data_buffer_size;
43126+ u32 data_buffer[1];
43127+} __packed;
43128+
43129+struct be_cmd_resp_get_fat {
43130+ struct be_cmd_resp_hdr hdr;
43131+ u32 log_size;
43132+ u32 read_log_length;
43133+ u32 rsvd[2];
43134+ u32 data_buffer[1];
43135+} __packed;
43136+
43137+
43138 /******************** Create MCCQ ***************************/
43139 /* Pseudo amap definition in which each bit of the actual structure is defined
43140 * as a byte: used to calculate offset/shift/mask of each field */
43141-struct amap_mcc_context {
43142+struct amap_mcc_context_be {
43143 u8 con_index[14];
43144 u8 rsvd0[2];
43145 u8 ring_size[4];
43146@@ -320,11 +434,31 @@ struct amap_mcc_context {
43147 u8 rsvd2[32];
43148 } __packed;
43149
43150+struct amap_mcc_context_lancer {
43151+ u8 async_cq_id[16];
43152+ u8 ring_size[4];
43153+ u8 rsvd0[12];
43154+ u8 rsvd1[31];
43155+ u8 valid;
43156+ u8 async_cq_valid[1];
43157+ u8 rsvd2[31];
43158+ u8 rsvd3[32];
43159+} __packed;
43160+
43161 struct be_cmd_req_mcc_create {
43162 struct be_cmd_req_hdr hdr;
43163 u16 num_pages;
43164- u16 rsvd0;
43165- u8 context[sizeof(struct amap_mcc_context) / 8];
43166+ u16 cq_id;
43167+ u8 context[sizeof(struct amap_mcc_context_be) / 8];
43168+ struct phys_addr pages[8];
43169+} __packed;
43170+
43171+struct be_cmd_req_mcc_ext_create {
43172+ struct be_cmd_req_hdr hdr;
43173+ u16 num_pages;
43174+ u16 cq_id;
43175+ u32 async_event_bitmap[1];
43176+ u8 context[sizeof(struct amap_mcc_context_be) / 8];
43177 struct phys_addr pages[8];
43178 } __packed;
43179
43180@@ -335,49 +469,32 @@ struct be_cmd_resp_mcc_create {
43181 } __packed;
43182
43183 /******************** Create TxQ ***************************/
43184-#define BE_ETH_TX_RING_TYPE_STANDARD 2
43185+#define ETX_QUEUE_TYPE_STANDARD 0x2
43186+#define ETX_QUEUE_TYPE_PRIORITY 0x10
43187 #define BE_ULP1_NUM 1
43188
43189-/* Pseudo amap definition in which each bit of the actual structure is defined
43190- * as a byte: used to calculate offset/shift/mask of each field */
43191-struct amap_tx_context {
43192- u8 rsvd0[16]; /* dword 0 */
43193- u8 tx_ring_size[4]; /* dword 0 */
43194- u8 rsvd1[26]; /* dword 0 */
43195- u8 pci_func_id[8]; /* dword 1 */
43196- u8 rsvd2[9]; /* dword 1 */
43197- u8 ctx_valid; /* dword 1 */
43198- u8 cq_id_send[16]; /* dword 2 */
43199- u8 rsvd3[16]; /* dword 2 */
43200- u8 rsvd4[32]; /* dword 3 */
43201- u8 rsvd5[32]; /* dword 4 */
43202- u8 rsvd6[32]; /* dword 5 */
43203- u8 rsvd7[32]; /* dword 6 */
43204- u8 rsvd8[32]; /* dword 7 */
43205- u8 rsvd9[32]; /* dword 8 */
43206- u8 rsvd10[32]; /* dword 9 */
43207- u8 rsvd11[32]; /* dword 10 */
43208- u8 rsvd12[32]; /* dword 11 */
43209- u8 rsvd13[32]; /* dword 12 */
43210- u8 rsvd14[32]; /* dword 13 */
43211- u8 rsvd15[32]; /* dword 14 */
43212- u8 rsvd16[32]; /* dword 15 */
43213-} __packed;
43214-
43215 struct be_cmd_req_eth_tx_create {
43216 struct be_cmd_req_hdr hdr;
43217 u8 num_pages;
43218 u8 ulp_num;
43219- u8 type;
43220- u8 bound_port;
43221- u8 context[sizeof(struct amap_tx_context) / 8];
43222+ u16 type;
43223+ u16 if_id;
43224+ u8 queue_size;
43225+ u8 rsvd1;
43226+ u32 rsvd2;
43227+ u16 cq_id;
43228+ u16 rsvd3;
43229+ u32 rsvd4[13];
43230 struct phys_addr pages[8];
43231 } __packed;
43232
43233 struct be_cmd_resp_eth_tx_create {
43234 struct be_cmd_resp_hdr hdr;
43235 u16 cid;
43236- u16 rsvd0;
43237+ u16 rid;
43238+ u32 db_offset;
43239+ u8 tc_id;
43240+ u8 rsvd0[3];
43241 } __packed;
43242
43243 /******************** Create RxQ ***************************/
43244@@ -396,7 +513,7 @@ struct be_cmd_req_eth_rx_create {
43245 struct be_cmd_resp_eth_rx_create {
43246 struct be_cmd_resp_hdr hdr;
43247 u16 id;
43248- u8 cpu_id;
43249+ u8 rss_id;
43250 u8 rsvd0;
43251 } __packed;
43252
43253@@ -429,14 +546,15 @@ enum be_if_flags {
43254 BE_IF_FLAGS_VLAN = 0x100,
43255 BE_IF_FLAGS_MCAST_PROMISCUOUS = 0x200,
43256 BE_IF_FLAGS_PASS_L2_ERRORS = 0x400,
43257- BE_IF_FLAGS_PASS_L3L4_ERRORS = 0x800
43258+ BE_IF_FLAGS_PASS_L3L4_ERRORS = 0x800,
43259+ BE_IF_FLAGS_MULTICAST = 0x1000
43260 };
43261
43262 /* An RX interface is an object with one or more MAC addresses and
43263 * filtering capabilities. */
43264 struct be_cmd_req_if_create {
43265 struct be_cmd_req_hdr hdr;
43266- u32 version; /* ignore currntly */
43267+ u32 version; /* ignore currently */
43268 u32 capability_flags;
43269 u32 enable_flags;
43270 u8 mac_addr[ETH_ALEN];
43271@@ -458,7 +576,7 @@ struct be_cmd_req_if_destroy {
43272 };
43273
43274 /*************** HW Stats Get **********************************/
43275-struct be_port_rxf_stats {
43276+struct be_port_rxf_stats_v0 {
43277 u32 rx_bytes_lsd; /* dword 0*/
43278 u32 rx_bytes_msd; /* dword 1*/
43279 u32 rx_total_frames; /* dword 2*/
43280@@ -527,8 +645,8 @@ struct be_port_rxf_stats {
43281 u32 rx_input_fifo_overflow; /* dword 65*/
43282 };
43283
43284-struct be_rxf_stats {
43285- struct be_port_rxf_stats port[2];
43286+struct be_rxf_stats_v0 {
43287+ struct be_port_rxf_stats_v0 port[2];
43288 u32 rx_drops_no_pbuf; /* dword 132*/
43289 u32 rx_drops_no_txpb; /* dword 133*/
43290 u32 rx_drops_no_erx_descr; /* dword 134*/
43291@@ -545,31 +663,51 @@ struct be_rxf_stats {
43292 u32 rx_drops_invalid_ring; /* dword 145*/
43293 u32 forwarded_packets; /* dword 146*/
43294 u32 rx_drops_mtu; /* dword 147*/
43295- u32 rsvd0[15];
43296+ u32 rsvd0[7];
43297+ u32 port0_jabber_events;
43298+ u32 port1_jabber_events;
43299+ u32 rsvd1[6];
43300 };
43301
43302-struct be_erx_stats {
43303+struct be_erx_stats_v0 {
43304 u32 rx_drops_no_fragments[44]; /* dwordS 0 to 43*/
43305- u32 debug_wdma_sent_hold; /* dword 44*/
43306- u32 debug_wdma_pbfree_sent_hold; /* dword 45*/
43307- u32 debug_wdma_zerobyte_pbfree_sent_hold; /* dword 46*/
43308- u32 debug_pmem_pbuf_dealloc; /* dword 47*/
43309+ u32 rsvd[4];
43310 };
43311
43312-struct be_hw_stats {
43313- struct be_rxf_stats rxf;
43314+struct be_pmem_stats {
43315+ u32 eth_red_drops;
43316+ u32 rsvd[5];
43317+};
43318+
43319+struct be_hw_stats_v0 {
43320+ struct be_rxf_stats_v0 rxf;
43321 u32 rsvd[48];
43322- struct be_erx_stats erx;
43323+ struct be_erx_stats_v0 erx;
43324+ struct be_pmem_stats pmem;
43325 };
43326
43327-struct be_cmd_req_get_stats {
43328+struct be_cmd_req_get_stats_v0 {
43329 struct be_cmd_req_hdr hdr;
43330- u8 rsvd[sizeof(struct be_hw_stats)];
43331+ u8 rsvd[sizeof(struct be_hw_stats_v0)];
43332 };
43333
43334-struct be_cmd_resp_get_stats {
43335+struct be_cmd_resp_get_stats_v0 {
43336 struct be_cmd_resp_hdr hdr;
43337- struct be_hw_stats hw_stats;
43338+ struct be_hw_stats_v0 hw_stats;
43339+};
43340+
43341+struct be_cmd_req_get_cntl_addnl_attribs {
43342+ struct be_cmd_req_hdr hdr;
43343+ u8 rsvd[8];
43344+};
43345+
43346+struct be_cmd_resp_get_cntl_addnl_attribs {
43347+ struct be_cmd_resp_hdr hdr;
43348+ u16 ipl_file_number;
43349+ u8 ipl_file_version;
43350+ u8 rsvd0;
43351+ u8 on_die_temperature; /* in degrees centigrade*/
43352+ u8 rsvd1[3];
43353 };
43354
43355 struct be_cmd_req_vlan_config {
43356@@ -581,30 +719,22 @@ struct be_cmd_req_vlan_config {
43357 u16 normal_vlan[64];
43358 } __packed;
43359
43360-struct be_cmd_req_promiscuous_config {
43361- struct be_cmd_req_hdr hdr;
43362- u8 port0_promiscuous;
43363- u8 port1_promiscuous;
43364- u16 rsvd0;
43365-} __packed;
43366-
43367+/******************** RX FILTER ******************************/
43368+#define BE_MAX_MC 64 /* set mcast promisc if > 64 */
43369 struct macaddr {
43370 u8 byte[ETH_ALEN];
43371 };
43372
43373-struct be_cmd_req_mcast_mac_config {
43374+struct be_cmd_req_rx_filter {
43375 struct be_cmd_req_hdr hdr;
43376- u16 num_mac;
43377- u8 promiscuous;
43378- u8 interface_id;
43379- struct macaddr mac[32];
43380-} __packed;
43381-
43382-static inline struct be_hw_stats *
43383-hw_stats_from_cmd(struct be_cmd_resp_get_stats *cmd)
43384-{
43385- return &cmd->hw_stats;
43386-}
43387+ u32 global_flags_mask;
43388+ u32 global_flags;
43389+ u32 if_flags_mask;
43390+ u32 if_flags;
43391+ u32 if_id;
43392+ u32 mcast_num;
43393+ struct macaddr mcast_mac[BE_MAX_MC];
43394+};
43395
43396 /******************** Link Status Query *******************/
43397 struct be_cmd_req_link_status {
43398@@ -619,13 +749,18 @@ enum {
43399 };
43400
43401 enum {
43402- PHY_LINK_SPEED_ZERO = 0x0, /* => No link */
43403+ PHY_LINK_SPEED_ZERO = 0x0, /* => No link */
43404 PHY_LINK_SPEED_10MBPS = 0x1,
43405 PHY_LINK_SPEED_100MBPS = 0x2,
43406 PHY_LINK_SPEED_1GBPS = 0x3,
43407 PHY_LINK_SPEED_10GBPS = 0x4
43408 };
43409
43410+enum {
43411+ LINK_DOWN = 0x0,
43412+ LINK_UP = 0X1
43413+};
43414+
43415 struct be_cmd_resp_link_status {
43416 struct be_cmd_resp_hdr hdr;
43417 u8 physical_port;
43418@@ -634,9 +769,47 @@ struct be_cmd_resp_link_status {
43419 u8 mac_fault;
43420 u8 mgmt_mac_duplex;
43421 u8 mgmt_mac_speed;
43422- u16 rsvd0;
43423+ u16 link_speed;
43424+ u32 logical_link_status;
43425 } __packed;
43426
43427+/******************** Port Identification ***************************/
43428+/* Identifies the type of port attached to NIC */
43429+struct be_cmd_req_port_type {
43430+ struct be_cmd_req_hdr hdr;
43431+ u32 page_num;
43432+ u32 port;
43433+};
43434+
43435+enum {
43436+ TR_PAGE_A0 = 0xa0,
43437+ TR_PAGE_A2 = 0xa2
43438+};
43439+
43440+struct be_cmd_resp_port_type {
43441+ struct be_cmd_resp_hdr hdr;
43442+ u32 page_num;
43443+ u32 port;
43444+ struct data {
43445+ u8 identifier;
43446+ u8 identifier_ext;
43447+ u8 connector;
43448+ u8 transceiver[8];
43449+ u8 rsvd0[3];
43450+ u8 length_km;
43451+ u8 length_hm;
43452+ u8 length_om1;
43453+ u8 length_om2;
43454+ u8 length_cu;
43455+ u8 length_cu_m;
43456+ u8 vendor_name[16];
43457+ u8 rsvd;
43458+ u8 vendor_oui[3];
43459+ u8 vendor_pn[16];
43460+ u8 vendor_rev[4];
43461+ } data;
43462+};
43463+
43464 /******************** Get FW Version *******************/
43465 struct be_cmd_req_get_fw_version {
43466 struct be_cmd_req_hdr hdr;
43467@@ -686,9 +859,13 @@ struct be_cmd_resp_modify_eq_delay {
43468 } __packed;
43469
43470 /******************** Get FW Config *******************/
43471+#define FLEX10_MODE 0x400
43472+#define VNIC_MODE 0x20000
43473+#define UMC_ENABLED 0x1000000
43474+
43475 struct be_cmd_req_query_fw_cfg {
43476 struct be_cmd_req_hdr hdr;
43477- u32 rsvd[30];
43478+ u32 rsvd[31];
43479 };
43480
43481 struct be_cmd_resp_query_fw_cfg {
43482@@ -696,10 +873,61 @@ struct be_cmd_resp_query_fw_cfg {
43483 u32 be_config_number;
43484 u32 asic_revision;
43485 u32 phys_port;
43486- u32 function_cap;
43487+ u32 function_mode;
43488 u32 rsvd[26];
43489+ u32 function_caps;
43490 };
43491
43492+/******************** RSS Config *******************/
43493+/* RSS types */
43494+#define RSS_ENABLE_NONE 0x0
43495+#define RSS_ENABLE_IPV4 0x1
43496+#define RSS_ENABLE_TCP_IPV4 0x2
43497+#define RSS_ENABLE_IPV6 0x4
43498+#define RSS_ENABLE_TCP_IPV6 0x8
43499+
43500+struct be_cmd_req_rss_config {
43501+ struct be_cmd_req_hdr hdr;
43502+ u32 if_id;
43503+ u16 enable_rss;
43504+ u16 cpu_table_size_log2;
43505+ u32 hash[10];
43506+ u8 cpu_table[128];
43507+ u8 flush;
43508+ u8 rsvd0[3];
43509+};
43510+
43511+/******************** Port Beacon ***************************/
43512+
43513+#define BEACON_STATE_ENABLED 0x1
43514+#define BEACON_STATE_DISABLED 0x0
43515+
43516+struct be_cmd_req_enable_disable_beacon {
43517+ struct be_cmd_req_hdr hdr;
43518+ u8 port_num;
43519+ u8 beacon_state;
43520+ u8 beacon_duration;
43521+ u8 status_duration;
43522+} __packed;
43523+
43524+struct be_cmd_resp_enable_disable_beacon {
43525+ struct be_cmd_resp_hdr resp_hdr;
43526+ u32 rsvd0;
43527+} __packed;
43528+
43529+struct be_cmd_req_get_beacon_state {
43530+ struct be_cmd_req_hdr hdr;
43531+ u8 port_num;
43532+ u8 rsvd0;
43533+ u16 rsvd1;
43534+} __packed;
43535+
43536+struct be_cmd_resp_get_beacon_state {
43537+ struct be_cmd_resp_hdr resp_hdr;
43538+ u8 beacon_state;
43539+ u8 rsvd0[3];
43540+} __packed;
43541+
43542 /****************** Firmware Flash ******************/
43543 struct flashrom_params {
43544 u32 op_code;
43545@@ -714,17 +942,468 @@ struct be_cmd_write_flashrom {
43546 struct flashrom_params params;
43547 };
43548
43549+/************************ WOL *******************************/
43550+struct be_cmd_req_acpi_wol_magic_config {
43551+ struct be_cmd_req_hdr hdr;
43552+ u32 rsvd0[145];
43553+ u8 magic_mac[6];
43554+ u8 rsvd2[2];
43555+} __packed;
43556+
43557+/********************** LoopBack test *********************/
43558+struct be_cmd_req_loopback_test {
43559+ struct be_cmd_req_hdr hdr;
43560+ u32 loopback_type;
43561+ u32 num_pkts;
43562+ u64 pattern;
43563+ u32 src_port;
43564+ u32 dest_port;
43565+ u32 pkt_size;
43566+};
43567+
43568+struct be_cmd_resp_loopback_test {
43569+ struct be_cmd_resp_hdr resp_hdr;
43570+ u32 status;
43571+ u32 num_txfer;
43572+ u32 num_rx;
43573+ u32 miscomp_off;
43574+ u32 ticks_compl;
43575+};
43576+
43577+struct be_cmd_req_set_lmode {
43578+ struct be_cmd_req_hdr hdr;
43579+ u8 src_port;
43580+ u8 dest_port;
43581+ u8 loopback_type;
43582+ u8 loopback_state;
43583+};
43584+
43585+struct be_cmd_resp_set_lmode {
43586+ struct be_cmd_resp_hdr resp_hdr;
43587+ u8 rsvd0[4];
43588+};
43589+
43590+/********************** DDR DMA test *********************/
43591+struct be_cmd_req_ddrdma_test {
43592+ struct be_cmd_req_hdr hdr;
43593+ u64 pattern;
43594+ u32 byte_count;
43595+ u32 rsvd0;
43596+ u8 snd_buff[4096];
43597+ u8 rsvd1[4096];
43598+};
43599+
43600+struct be_cmd_resp_ddrdma_test {
43601+ struct be_cmd_resp_hdr hdr;
43602+ u64 pattern;
43603+ u32 byte_cnt;
43604+ u32 snd_err;
43605+ u8 rsvd0[4096];
43606+ u8 rcv_buff[4096];
43607+};
43608+
43609+/*********************** SEEPROM Read ***********************/
43610+
43611+#define BE_READ_SEEPROM_LEN 1024
43612+struct be_cmd_req_seeprom_read {
43613+ struct be_cmd_req_hdr hdr;
43614+ u8 rsvd0[BE_READ_SEEPROM_LEN];
43615+};
43616+
43617+struct be_cmd_resp_seeprom_read {
43618+ struct be_cmd_req_hdr hdr;
43619+ u8 seeprom_data[BE_READ_SEEPROM_LEN];
43620+};
43621+
43622+enum {
43623+ PHY_TYPE_CX4_10GB = 0,
43624+ PHY_TYPE_XFP_10GB,
43625+ PHY_TYPE_SFP_1GB,
43626+ PHY_TYPE_SFP_PLUS_10GB,
43627+ PHY_TYPE_KR_10GB,
43628+ PHY_TYPE_KX4_10GB,
43629+ PHY_TYPE_BASET_10GB,
43630+ PHY_TYPE_BASET_1GB,
43631+ PHY_TYPE_BASEX_1GB,
43632+ PHY_TYPE_SGMII,
43633+ PHY_TYPE_DISABLED = 255
43634+};
43635+
43636+#define BE_AN_EN 0x2
43637+#define BE_PAUSE_SYM_EN 0x80
43638+
43639+struct be_cmd_req_get_phy_info {
43640+ struct be_cmd_req_hdr hdr;
43641+ u8 rsvd0[24];
43642+};
43643+
43644+struct be_phy_info {
43645+ u16 phy_type;
43646+ u16 interface_type;
43647+ u32 misc_params;
43648+ u16 ext_phy_details;
43649+ u16 rsvd;
43650+ u16 auto_speeds_supported;
43651+ u16 fixed_speeds_supported;
43652+ u32 future_use[2];
43653+};
43654+
43655+struct be_cmd_resp_get_phy_info {
43656+ struct be_cmd_req_hdr hdr;
43657+ struct be_phy_info phy_info;
43658+};
43659+
43660+/*********************** Set QOS ***********************/
43661+
43662+#define BE_QOS_BITS_NIC 1
43663+
43664+struct be_cmd_req_set_qos {
43665+ struct be_cmd_req_hdr hdr;
43666+ u32 valid_bits;
43667+ u32 max_bps_nic;
43668+ u32 rsvd[7];
43669+};
43670+
43671+struct be_cmd_resp_set_qos {
43672+ struct be_cmd_resp_hdr hdr;
43673+ u32 rsvd;
43674+};
43675+
43676+/*********************** Controller Attributes ***********************/
43677+struct be_cmd_req_cntl_attribs {
43678+ struct be_cmd_req_hdr hdr;
43679+};
43680+
43681+struct be_cmd_resp_cntl_attribs {
43682+ struct be_cmd_resp_hdr hdr;
43683+ struct mgmt_controller_attrib attribs;
43684+};
43685+
43686+/******************* get port names ***************/
43687+struct be_cmd_req_get_port_name {
43688+ struct be_cmd_req_hdr hdr;
43689+ u32 rsvd0;
43690+};
43691+
43692+struct be_cmd_resp_get_port_name {
43693+ struct be_cmd_req_hdr hdr;
43694+ u8 port0_name;
43695+ u8 port1_name;
43696+ u8 rsvd0[2];
43697+};
43698+
43699+struct be_cmd_resp_get_port_name_v1 {
43700+ struct be_cmd_req_hdr hdr;
43701+ u32 pt : 2;
43702+ u32 rsvd0 : 30;
43703+ u8 port0_name;
43704+ u8 port1_name;
43705+ u8 port2_name;
43706+ u8 port3_name;
43707+};
43708+
43709+/*********************** Set driver function ***********************/
43710+#define CAPABILITY_SW_TIMESTAMPS 2
43711+#define CAPABILITY_BE3_NATIVE_ERX_API 4
43712+
43713+struct be_cmd_req_set_func_cap {
43714+ struct be_cmd_req_hdr hdr;
43715+ u32 valid_cap_flags;
43716+ u32 cap_flags;
43717+ u8 rsvd[212];
43718+};
43719+
43720+struct be_cmd_resp_set_func_cap {
43721+ struct be_cmd_resp_hdr hdr;
43722+ u32 valid_cap_flags;
43723+ u32 cap_flags;
43724+ u8 rsvd[212];
43725+};
43726+
43727+/*********************** PG Query Request ****************************/
43728+#define REQ_PG_QUERY 0x1
43729+#define REQ_PG_FEAT 0x1
43730+struct be_cmd_req_pg {
43731+ struct be_cmd_req_hdr hdr;
43732+ u32 query;
43733+ u32 pfc_pg;
43734+};
43735+
43736+struct be_cmd_resp_pg {
43737+ struct be_cmd_resp_hdr hdr;
43738+ u32 pfc_pg;
43739+ u32 num_tx_rings;
43740+};
43741+
43742+/*********************** Function Privileges ***********************/
43743+enum {
43744+ BE_PRIV_DEFAULT = 0x1,
43745+ BE_PRIV_LNKQUERY = 0x2,
43746+ BE_PRIV_LNKSTATS = 0x4,
43747+ BE_PRIV_LNKMGMT = 0x8,
43748+ BE_PRIV_LNKDIAG = 0x10,
43749+ BE_PRIV_UTILQUERY = 0x20,
43750+ BE_PRIV_FILTMGMT = 0x40,
43751+ BE_PRIV_IFACEMGMT = 0x80,
43752+ BE_PRIV_VHADM = 0x100,
43753+ BE_PRIV_DEVCFG = 0x200,
43754+ BE_PRIV_DEVSEC = 0x400
43755+};
43756+
43757+struct be_cmd_req_get_fn_privileges {
43758+ struct be_cmd_req_hdr hdr;
43759+ u32 rsvd;
43760+};
43761+
43762+struct be_cmd_resp_get_fn_privileges {
43763+ struct be_cmd_resp_hdr hdr;
43764+ u32 privilege_mask;
43765+};
43766+
43767+struct be_cmd_req_set_fn_privileges {
43768+ struct be_cmd_req_hdr hdr;
43769+ u32 privilege_mask;
43770+};
43771+
43772+struct be_cmd_resp_set_fn_privileges {
43773+ struct be_cmd_resp_hdr hdr;
43774+ u32 prev_privilege_mask;
43775+};
43776+
43777+/*********************** HSW Config ***********************/
43778+struct amap_set_hsw_context {
43779+ u8 interface_id[16];
43780+ u8 rsvd0[14];
43781+ u8 pvid_valid;
43782+ u8 rsvd1;
43783+ u8 rsvd2[16];
43784+ u8 pvid[16];
43785+ u8 rsvd3[32];
43786+ u8 rsvd4[32];
43787+ u8 rsvd5[32];
43788+} __packed;
43789+
43790+struct be_cmd_req_set_hsw_config {
43791+ struct be_cmd_req_hdr hdr;
43792+ u8 context[sizeof(struct amap_set_hsw_context) / 8];
43793+} __packed;
43794+
43795+struct be_cmd_resp_set_hsw_config {
43796+ struct be_cmd_resp_hdr hdr;
43797+ u32 rsvd;
43798+};
43799+
43800+struct amap_get_hsw_req_context {
43801+ u8 interface_id[16];
43802+ u8 rsvd0[14];
43803+ u8 pvid_valid;
43804+ u8 pport;
43805+} __packed;
43806+
43807+struct amap_get_hsw_resp_context {
43808+ u8 rsvd1[16];
43809+ u8 pvid[16];
43810+ u8 rsvd2[32];
43811+ u8 rsvd3[32];
43812+ u8 rsvd4[32];
43813+} __packed;
43814+
43815+struct be_cmd_req_get_hsw_config {
43816+ struct be_cmd_req_hdr hdr;
43817+ u8 context[sizeof(struct amap_get_hsw_req_context) / 8];
43818+} __packed;
43819+
43820+struct be_cmd_resp_get_hsw_config {
43821+ struct be_cmd_resp_hdr hdr;
43822+ u8 context[sizeof(struct amap_get_hsw_resp_context) / 8];
43823+ u32 rsvd;
43824+};
43825+
43826+/*************** Set speed ********************/
43827+struct be_cmd_req_set_port_speed_v1 {
43828+ struct be_cmd_req_hdr hdr;
43829+ u8 port_num;
43830+ u8 virt_port;
43831+ u16 mac_speed;
43832+ u16 dac_cable_length;
43833+ u16 rsvd0;
43834+};
43835+
43836+struct be_cmd_resp_set_port_speed_v1 {
43837+ struct be_cmd_resp_hdr hdr;
43838+ u32 rsvd0;
43839+};
43840+
43841+/************** get port speed *******************/
43842+struct be_cmd_req_get_port_speed {
43843+ struct be_cmd_req_hdr hdr;
43844+ u8 port_num;
43845+};
43846+
43847+struct be_cmd_resp_get_port_speed {
43848+ struct be_cmd_req_hdr hdr;
43849+ u16 mac_speed;
43850+ u16 dac_cable_length;
43851+};
43852+
43853+/*************** HW Stats Get v1 **********************************/
43854+#define BE_TXP_SW_SZ 48
43855+struct be_port_rxf_stats_v1 {
43856+ u32 rsvd0[12];
43857+ u32 rx_crc_errors;
43858+ u32 rx_alignment_symbol_errors;
43859+ u32 rx_pause_frames;
43860+ u32 rx_priority_pause_frames;
43861+ u32 rx_control_frames;
43862+ u32 rx_in_range_errors;
43863+ u32 rx_out_range_errors;
43864+ u32 rx_frame_too_long;
43865+ u32 rx_address_match_errors;
43866+ u32 rx_dropped_too_small;
43867+ u32 rx_dropped_too_short;
43868+ u32 rx_dropped_header_too_small;
43869+ u32 rx_dropped_tcp_length;
43870+ u32 rx_dropped_runt;
43871+ u32 rsvd1[10];
43872+ u32 rx_ip_checksum_errs;
43873+ u32 rx_tcp_checksum_errs;
43874+ u32 rx_udp_checksum_errs;
43875+ u32 rsvd2[7];
43876+ u32 rx_switched_unicast_packets;
43877+ u32 rx_switched_multicast_packets;
43878+ u32 rx_switched_broadcast_packets;
43879+ u32 rsvd3[3];
43880+ u32 tx_pauseframes;
43881+ u32 tx_priority_pauseframes;
43882+ u32 tx_controlframes;
43883+ u32 rsvd4[10];
43884+ u32 rxpp_fifo_overflow_drop;
43885+ u32 rx_input_fifo_overflow_drop;
43886+ u32 pmem_fifo_overflow_drop;
43887+ u32 jabber_events;
43888+ u32 rsvd5[3];
43889+};
43890+
43891+
43892+struct be_rxf_stats_v1 {
43893+ struct be_port_rxf_stats_v1 port[4];
43894+ u32 rsvd0[2];
43895+ u32 rx_drops_no_pbuf;
43896+ u32 rx_drops_no_txpb;
43897+ u32 rx_drops_no_erx_descr;
43898+ u32 rx_drops_no_tpre_descr;
43899+ u32 rsvd1[6];
43900+ u32 rx_drops_too_many_frags;
43901+ u32 rx_drops_invalid_ring;
43902+ u32 forwarded_packets;
43903+ u32 rx_drops_mtu;
43904+ u32 rsvd2[14];
43905+};
43906+
43907+struct be_erx_stats_v1 {
43908+ u32 rx_drops_no_fragments[68]; /* dwordS 0 to 67*/
43909+ u32 rsvd[4];
43910+};
43911+
43912+struct be_hw_stats_v1 {
43913+ struct be_rxf_stats_v1 rxf;
43914+ u32 rsvd0[BE_TXP_SW_SZ];
43915+ struct be_erx_stats_v1 erx;
43916+ struct be_pmem_stats pmem;
43917+ u32 rsvd1[3];
43918+};
43919+
43920+struct be_cmd_req_get_stats_v1 {
43921+ struct be_cmd_req_hdr hdr;
43922+ u8 rsvd[sizeof(struct be_hw_stats_v1)];
43923+};
43924+
43925+struct be_cmd_resp_get_stats_v1 {
43926+ struct be_cmd_resp_hdr hdr;
43927+ struct be_hw_stats_v1 hw_stats;
43928+};
43929+
43930+static inline void *
43931+hw_stats_from_cmd(struct be_adapter *adapter)
43932+{
43933+ if (adapter->generation == BE_GEN3) {
43934+ struct be_cmd_resp_get_stats_v1 *cmd = adapter->stats_cmd.va;
43935+
43936+ return &cmd->hw_stats;
43937+ } else {
43938+ struct be_cmd_resp_get_stats_v0 *cmd = adapter->stats_cmd.va;
43939+
43940+ return &cmd->hw_stats;
43941+ }
43942+}
43943+
43944+static inline void *be_port_rxf_stats_from_cmd(struct be_adapter *adapter)
43945+{
43946+ if (adapter->generation == BE_GEN3) {
43947+ struct be_hw_stats_v1 *hw_stats = hw_stats_from_cmd(adapter);
43948+ struct be_rxf_stats_v1 *rxf_stats = &hw_stats->rxf;
43949+
43950+ return &rxf_stats->port[adapter->port_num];
43951+ } else {
43952+ struct be_hw_stats_v0 *hw_stats = hw_stats_from_cmd(adapter);
43953+ struct be_rxf_stats_v0 *rxf_stats = &hw_stats->rxf;
43954+
43955+ return &rxf_stats->port[adapter->port_num];
43956+ }
43957+}
43958+
43959+static inline void *be_rxf_stats_from_cmd(struct be_adapter *adapter)
43960+{
43961+ if (adapter->generation == BE_GEN3) {
43962+ struct be_hw_stats_v1 *hw_stats = hw_stats_from_cmd(adapter);
43963+
43964+ return &hw_stats->rxf;
43965+ } else {
43966+ struct be_hw_stats_v0 *hw_stats = hw_stats_from_cmd(adapter);
43967+
43968+ return &hw_stats->rxf;
43969+ }
43970+}
43971+
43972+static inline void *be_erx_stats_from_cmd(struct be_adapter *adapter)
43973+{
43974+ if (adapter->generation == BE_GEN3) {
43975+ struct be_hw_stats_v1 *hw_stats = hw_stats_from_cmd(adapter);
43976+
43977+ return &hw_stats->erx;
43978+ } else {
43979+ struct be_hw_stats_v0 *hw_stats = hw_stats_from_cmd(adapter);
43980+
43981+ return &hw_stats->erx;
43982+ }
43983+}
43984+
43985+static inline void *be_pmem_stats_from_cmd(struct be_adapter *adapter)
43986+{
43987+ if (adapter->generation == BE_GEN3) {
43988+ struct be_hw_stats_v1 *hw_stats = hw_stats_from_cmd(adapter);
43989+
43990+ return &hw_stats->pmem;
43991+ } else {
43992+ struct be_hw_stats_v0 *hw_stats = hw_stats_from_cmd(adapter);
43993+
43994+ return &hw_stats->pmem;
43995+ }
43996+}
43997+
43998 extern int be_pci_fnum_get(struct be_adapter *adapter);
43999 extern int be_cmd_POST(struct be_adapter *adapter);
44000 extern int be_cmd_mac_addr_query(struct be_adapter *adapter, u8 *mac_addr,
44001 u8 type, bool permanent, u32 if_handle);
44002 extern int be_cmd_pmac_add(struct be_adapter *adapter, u8 *mac_addr,
44003- u32 if_id, u32 *pmac_id);
44004-extern int be_cmd_pmac_del(struct be_adapter *adapter, u32 if_id, u32 pmac_id);
44005+ u32 if_id, u32 *pmac_id, u32 domain);
44006+extern int be_cmd_pmac_del(struct be_adapter *adapter, u32 if_id, u32 pmac_id,
44007+ u32 domain);
44008 extern int be_cmd_if_create(struct be_adapter *adapter, u32 cap_flags,
44009 u32 en_flags, u8 *mac, bool pmac_invalid,
44010- u32 *if_handle, u32 *pmac_id);
44011-extern int be_cmd_if_destroy(struct be_adapter *adapter, u32 if_handle);
44012+ u32 *if_handle, u32 *pmac_id, u32 domain);
44013+extern int be_cmd_if_destroy(struct be_adapter *adapter, u32 if_handle,
44014+ u32 domain);
44015 extern int be_cmd_eq_create(struct be_adapter *adapter,
44016 struct be_queue_info *eq, int eq_delay);
44017 extern int be_cmd_cq_create(struct be_adapter *adapter,
44018@@ -736,36 +1415,92 @@ extern int be_cmd_mccq_create(struct be_adapter *adapter,
44019 struct be_queue_info *cq);
44020 extern int be_cmd_txq_create(struct be_adapter *adapter,
44021 struct be_queue_info *txq,
44022- struct be_queue_info *cq);
44023+ struct be_queue_info *cq, u8 *tc_id);
44024 extern int be_cmd_rxq_create(struct be_adapter *adapter,
44025 struct be_queue_info *rxq, u16 cq_id,
44026 u16 frag_size, u16 max_frame_size, u32 if_id,
44027- u32 rss);
44028+ u32 rss, u8 *rss_id);
44029 extern int be_cmd_q_destroy(struct be_adapter *adapter, struct be_queue_info *q,
44030 int type);
44031+extern int be_cmd_rxq_destroy(struct be_adapter *adapter,
44032+ struct be_queue_info *q);
44033 extern int be_cmd_link_status_query(struct be_adapter *adapter,
44034- bool *link_up);
44035+ int *link_status, u8 *mac_speed, u16 *link_speed, u32 dom);
44036 extern int be_cmd_reset(struct be_adapter *adapter);
44037 extern int be_cmd_get_stats(struct be_adapter *adapter,
44038 struct be_dma_mem *nonemb_cmd);
44039-extern int be_cmd_get_fw_ver(struct be_adapter *adapter, char *fw_ver);
44040+extern int be_cmd_get_fw_ver(struct be_adapter *adapter, char *fw_ver,
44041+ char *fw_on_flash);
44042
44043 extern int be_cmd_modify_eqd(struct be_adapter *adapter, u32 eq_id, u32 eqd);
44044 extern int be_cmd_vlan_config(struct be_adapter *adapter, u32 if_id,
44045 u16 *vtag_array, u32 num, bool untagged,
44046 bool promiscuous);
44047-extern int be_cmd_promiscuous_config(struct be_adapter *adapter,
44048- u8 port_num, bool en);
44049-extern int be_cmd_multicast_set(struct be_adapter *adapter, u32 if_id,
44050- struct dev_mc_list *mc_list, u32 mc_count);
44051+extern int be_cmd_rx_filter(struct be_adapter *adapter, u32 flags, u32 status);
44052 extern int be_cmd_set_flow_control(struct be_adapter *adapter,
44053 u32 tx_fc, u32 rx_fc);
44054 extern int be_cmd_get_flow_control(struct be_adapter *adapter,
44055 u32 *tx_fc, u32 *rx_fc);
44056-extern int be_cmd_query_fw_cfg(struct be_adapter *adapter,
44057- u32 *port_num, u32 *cap);
44058+extern int be_cmd_query_fw_cfg(struct be_adapter *adapter, u32 *port_num,
44059+ u32 *function_mode, u32 *functions_caps);
44060 extern int be_cmd_reset_function(struct be_adapter *adapter);
44061-extern int be_process_mcc(struct be_adapter *adapter);
44062+extern int be_cmd_rss_config(struct be_adapter *adapter, u8 *rsstable,
44063+ u16 table_size);
44064+extern int be_process_mcc(struct be_adapter *adapter, int *status);
44065+extern int be_cmd_set_beacon_state(struct be_adapter *adapter,
44066+ u8 port_num, u8 beacon, u8 status, u8 state);
44067+extern int be_cmd_get_beacon_state(struct be_adapter *adapter,
44068+ u8 port_num, u32 *state);
44069+extern int be_cmd_read_port_type(struct be_adapter *adapter, u32 port,
44070+ u8 *connector);
44071 extern int be_cmd_write_flashrom(struct be_adapter *adapter,
44072 struct be_dma_mem *cmd, u32 flash_oper,
44073 u32 flash_opcode, u32 buf_size);
44074+int be_cmd_get_flash_crc(struct be_adapter *adapter, u8 *flashed_crc,
44075+ int offset);
44076+extern int be_cmd_enable_magic_wol(struct be_adapter *adapter, u8 *mac,
44077+ struct be_dma_mem *nonemb_cmd);
44078+extern int be_cmd_fw_init(struct be_adapter *adapter);
44079+extern int be_cmd_fw_clean(struct be_adapter *adapter);
44080+extern void be_async_mcc_enable(struct be_adapter *adapter);
44081+extern void be_async_mcc_disable(struct be_adapter *adapter);
44082+extern int be_cmd_loopback_test(struct be_adapter *adapter, u32 port_num,
44083+ u32 loopback_type, u32 pkt_size,
44084+ u32 num_pkts, u64 pattern);
44085+extern int be_cmd_ddr_dma_test(struct be_adapter *adapter, u64 pattern,
44086+ u32 byte_cnt, struct be_dma_mem *cmd);
44087+extern int be_cmd_get_seeprom_data(struct be_adapter *adapter,
44088+ struct be_dma_mem *nonemb_cmd);
44089+extern int be_cmd_set_loopback(struct be_adapter *adapter, u8 port_num,
44090+ u8 loopback_type, u8 enable);
44091+extern int be_cmd_get_phy_info(struct be_adapter *adapter,
44092+ struct be_phy_info *phy_info);
44093+extern int be_cmd_set_qos(struct be_adapter *adapter, u32 bps, u32 domain);
44094+extern void be_detect_dump_ue(struct be_adapter *adapter);
44095+extern int be_cmd_get_die_temperature(struct be_adapter *adapter);
44096+extern int be_cmd_get_cntl_attributes(struct be_adapter *adapter);
44097+extern int be_cmd_get_reg_len(struct be_adapter *adapter, u32 *log_size);
44098+extern void be_cmd_get_regs(struct be_adapter *adapter, u32 buf_len, void *buf);
44099+extern int be_cmd_req_native_mode(struct be_adapter *adapter);
44100+extern int be_cmd_query_port_names_v0(struct be_adapter *adapter, u8 *port_name);
44101+extern int be_cmd_query_port_names_v1(struct be_adapter *adapter, u8 *port_name);
44102+extern int be_cmd_req_pg_pfc(struct be_adapter *adapter, int *fw_num_txqs);
44103+
44104+extern int be_cmd_get_fn_privileges(struct be_adapter *adapter,
44105+ u32 *privilege, u32 domain);
44106+extern int be_cmd_set_fn_privileges(struct be_adapter *adapter,
44107+ u32 mask, u32 *prev, u32 domain);
44108+extern int be_cmd_set_hsw_config(struct be_adapter *adapter, u16 pvid,
44109+ u32 domain, u16 intf_id);
44110+extern int be_cmd_get_hsw_config(struct be_adapter *adapter, u16 *pvid,
44111+ u32 domain, u16 intf_id);
44112+extern int be_cmd_set_port_speed_v1(struct be_adapter *adapter, u8 port_num,
44113+ u16 mac_speed, u16 dac_cable_len);
44114+extern int be_cmd_get_port_speed(struct be_adapter *adapter, u8 port_num,
44115+ u16 *dac_cable_len, u16 *port_speed);
44116+#ifdef CONFIG_PALAU
44117+int be_cmd_pass_ext_ioctl(struct be_adapter *adapter, dma_addr_t dma,
44118+ int req_size, void *va);
44119+#endif
44120+
44121+#endif /* !BE_CMDS_H */
44122diff --git a/drivers/net/benet/be_compat.c b/drivers/net/benet/be_compat.c
44123new file mode 100644
44124index 0000000..bdd1dba
44125--- /dev/null
44126+++ b/drivers/net/benet/be_compat.c
44127@@ -0,0 +1,630 @@
44128+/*
44129+ * Copyright (C) 2005 - 2011 Emulex
44130+ * All rights reserved.
44131+ *
44132+ * This program is free software; you can redistribute it and/or
44133+ * modify it under the terms of the GNU General Public License version 2
44134+ * as published by the Free Software Foundation. The full GNU General
44135+ * Public License is included in this distribution in the file called COPYING.
44136+ *
44137+ * Contact Information:
44138+ * linux-drivers@emulex.com
44139+ *
44140+ * Emulex
44141+ * 3333 Susan Street
44142+ * Costa Mesa, CA 92626
44143+ */
44144+
44145+#include "be.h"
44146+
44147+#if LINUX_VERSION_CODE < KERNEL_VERSION(2, 6, 28)
44148+void be_netdev_ops_init(struct net_device *netdev, struct net_device_ops *ops)
44149+{
44150+ netdev->open = ops->ndo_open;
44151+ netdev->stop = ops->ndo_stop;
44152+ netdev->hard_start_xmit = ops->ndo_start_xmit;
44153+ netdev->set_mac_address = ops->ndo_set_mac_address;
44154+ netdev->get_stats = ops->ndo_get_stats;
44155+ netdev->set_multicast_list = ops->ndo_set_rx_mode;
44156+ netdev->change_mtu = ops->ndo_change_mtu;
44157+ netdev->vlan_rx_register = ops->ndo_vlan_rx_register;
44158+ netdev->vlan_rx_add_vid = ops->ndo_vlan_rx_add_vid;
44159+ netdev->vlan_rx_kill_vid = ops->ndo_vlan_rx_kill_vid;
44160+ netdev->do_ioctl = ops->ndo_do_ioctl;
44161+#ifdef CONFIG_NET_POLL_CONTROLLER
44162+ netdev->poll_controller = ops->ndo_poll_controller;
44163+#endif
44164+#if LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 27)
44165+ netdev->select_queue = ops->ndo_select_queue;
44166+#endif
44167+}
44168+#endif
44169+
44170+#if LINUX_VERSION_CODE < KERNEL_VERSION(2, 6, 28)
44171+int eth_validate_addr(struct net_device *netdev)
44172+{
44173+ return 0;
44174+}
44175+#endif
44176+
44177+/* New NAPI backport */
44178+#if LINUX_VERSION_CODE <= KERNEL_VERSION(2, 6, 24)
44179+
44180+int be_poll_compat(struct net_device *netdev, int *budget)
44181+{
44182+ struct napi_struct *napi = netdev->priv;
44183+ u32 work_done, can_do;
44184+
44185+ can_do = min(*budget, netdev->quota);
44186+ work_done = napi->poll(napi, can_do);
44187+
44188+ *budget -= work_done;
44189+ netdev->quota -= work_done;
44190+ if (napi->rx)
44191+ return (work_done >= can_do);
44192+ return 0;
44193+}
44194+
44195+
44196+#endif /* New NAPI backport */
44197+
44198+int be_netif_napi_add(struct net_device *netdev,
44199+ struct napi_struct *napi,
44200+ int (*poll) (struct napi_struct *, int), int weight)
44201+{
44202+#ifdef HAVE_SIMULATED_MULTI_NAPI
44203+ struct be_adapter *adapter = netdev_priv(netdev);
44204+ struct net_device *nd;
44205+
44206+ nd = alloc_netdev(0, "", ether_setup);
44207+ if (!nd)
44208+ return -ENOMEM;
44209+ nd->priv = napi;
44210+ nd->weight = BE_NAPI_WEIGHT;
44211+ nd->poll = be_poll_compat;
44212+ set_bit(__LINK_STATE_START, &nd->state);
44213+
44214+ if (napi == &adapter->rx_obj[0].rx_eq.napi)
44215+ napi->rx = true;
44216+ napi->poll = poll;
44217+ napi->dev = nd;
44218+#ifdef RHEL_NEW_NAPI
44219+ napi->napi.dev = netdev;
44220+#endif
44221+ return 0;
44222+#else
44223+ netif_napi_add(netdev, napi, poll, weight);
44224+ return 0;
44225+#endif
44226+}
44227+void be_netif_napi_del(struct net_device *netdev)
44228+{
44229+#ifdef HAVE_SIMULATED_MULTI_NAPI
44230+ struct be_adapter *adapter = netdev_priv(netdev);
44231+ struct napi_struct *napi;
44232+ struct be_rx_obj *rxo;
44233+ int i;
44234+
44235+ for_all_rx_queues(adapter, rxo, i) {
44236+ napi = &rxo->rx_eq.napi;
44237+ if (napi->dev) {
44238+ free_netdev(napi->dev);
44239+ napi->dev = NULL;
44240+ }
44241+ }
44242+
44243+ napi = &adapter->tx_eq.napi;
44244+ if (napi->dev) {
44245+ free_netdev(napi->dev);
44246+ napi->dev = NULL;
44247+ }
44248+#endif
44249+}
44250+/* INET_LRO backport */
44251+#if LINUX_VERSION_CODE < KERNEL_VERSION(2, 6, 27)
44252+
44253+#define TCP_HDR_LEN(tcph) (tcph->doff << 2)
44254+#define IP_HDR_LEN(iph) (iph->ihl << 2)
44255+#define TCP_PAYLOAD_LENGTH(iph, tcph) (ntohs(iph->tot_len) - IP_HDR_LEN(iph) \
44256+ - TCP_HDR_LEN(tcph))
44257+
44258+#define IPH_LEN_WO_OPTIONS 5
44259+#define TCPH_LEN_WO_OPTIONS 5
44260+#define TCPH_LEN_W_TIMESTAMP 8
44261+
44262+#define LRO_MAX_PG_HLEN 64
44263+#define LRO_INC_STATS(lro_mgr, attr) { lro_mgr->stats.attr++; }
44264+/*
44265+ * Basic tcp checks whether packet is suitable for LRO
44266+ */
44267+static int lro_tcp_ip_check(struct iphdr *iph, struct tcphdr *tcph,
44268+ int len, struct net_lro_desc *lro_desc)
44269+{
44270+ /* check ip header: don't aggregate padded frames */
44271+ if (ntohs(iph->tot_len) != len)
44272+ return -1;
44273+
44274+ if (iph->ihl != IPH_LEN_WO_OPTIONS)
44275+ return -1;
44276+
44277+ if (tcph->cwr || tcph->ece || tcph->urg || !tcph->ack
44278+ || tcph->rst || tcph->syn || tcph->fin)
44279+ return -1;
44280+
44281+ if (INET_ECN_is_ce(ipv4_get_dsfield(iph)))
44282+ return -1;
44283+
44284+ if (tcph->doff != TCPH_LEN_WO_OPTIONS
44285+ && tcph->doff != TCPH_LEN_W_TIMESTAMP)
44286+ return -1;
44287+
44288+ /* check tcp options (only timestamp allowed) */
44289+ if (tcph->doff == TCPH_LEN_W_TIMESTAMP) {
44290+ u32 *topt = (u32 *)(tcph + 1);
44291+
44292+ if (*topt != htonl((TCPOPT_NOP << 24) | (TCPOPT_NOP << 16)
44293+ | (TCPOPT_TIMESTAMP << 8)
44294+ | TCPOLEN_TIMESTAMP))
44295+ return -1;
44296+
44297+ /* timestamp should be in right order */
44298+ topt++;
44299+ if (lro_desc && after(ntohl(lro_desc->tcp_rcv_tsval),
44300+ ntohl(*topt)))
44301+ return -1;
44302+
44303+ /* timestamp reply should not be zero */
44304+ topt++;
44305+ if (*topt == 0)
44306+ return -1;
44307+ }
44308+
44309+ return 0;
44310+}
44311+
44312+static void lro_update_tcp_ip_header(struct net_lro_desc *lro_desc)
44313+{
44314+ struct iphdr *iph = lro_desc->iph;
44315+ struct tcphdr *tcph = lro_desc->tcph;
44316+ u32 *p;
44317+ __wsum tcp_hdr_csum;
44318+
44319+ tcph->ack_seq = lro_desc->tcp_ack;
44320+ tcph->window = lro_desc->tcp_window;
44321+
44322+ if (lro_desc->tcp_saw_tstamp) {
44323+ p = (u32 *)(tcph + 1);
44324+ *(p+2) = lro_desc->tcp_rcv_tsecr;
44325+ }
44326+
44327+ iph->tot_len = htons(lro_desc->ip_tot_len);
44328+
44329+ iph->check = 0;
44330+ iph->check = ip_fast_csum((u8 *)lro_desc->iph, iph->ihl);
44331+
44332+ tcph->check = 0;
44333+ tcp_hdr_csum = csum_partial((u8 *)tcph, TCP_HDR_LEN(tcph), 0);
44334+ lro_desc->data_csum = csum_add(lro_desc->data_csum, tcp_hdr_csum);
44335+ tcph->check = csum_tcpudp_magic(iph->saddr, iph->daddr,
44336+ lro_desc->ip_tot_len -
44337+ IP_HDR_LEN(iph), IPPROTO_TCP,
44338+ lro_desc->data_csum);
44339+}
44340+
44341+static __wsum lro_tcp_data_csum(struct iphdr *iph, struct tcphdr *tcph, int len)
44342+{
44343+ __wsum tcp_csum;
44344+ __wsum tcp_hdr_csum;
44345+ __wsum tcp_ps_hdr_csum;
44346+
44347+ tcp_csum = ~csum_unfold(tcph->check);
44348+ tcp_hdr_csum = csum_partial((u8 *)tcph, TCP_HDR_LEN(tcph), tcp_csum);
44349+
44350+ tcp_ps_hdr_csum = csum_tcpudp_nofold(iph->saddr, iph->daddr,
44351+ len + TCP_HDR_LEN(tcph),
44352+ IPPROTO_TCP, 0);
44353+
44354+ return csum_sub(csum_sub(tcp_csum, tcp_hdr_csum),
44355+ tcp_ps_hdr_csum);
44356+}
44357+
44358+static void lro_init_desc(struct net_lro_desc *lro_desc, struct sk_buff *skb,
44359+ struct iphdr *iph, struct tcphdr *tcph,
44360+ u16 vlan_tag, struct vlan_group *vgrp)
44361+{
44362+ int nr_frags;
44363+ u32 *ptr;
44364+ u32 tcp_data_len = TCP_PAYLOAD_LENGTH(iph, tcph);
44365+
44366+ nr_frags = skb_shinfo(skb)->nr_frags;
44367+ lro_desc->parent = skb;
44368+ lro_desc->next_frag = &(skb_shinfo(skb)->frags[nr_frags]);
44369+ lro_desc->iph = iph;
44370+ lro_desc->tcph = tcph;
44371+ lro_desc->tcp_next_seq = ntohl(tcph->seq) + tcp_data_len;
44372+ lro_desc->tcp_ack = ntohl(tcph->ack_seq);
44373+ lro_desc->tcp_window = tcph->window;
44374+
44375+ lro_desc->pkt_aggr_cnt = 1;
44376+ lro_desc->ip_tot_len = ntohs(iph->tot_len);
44377+
44378+ if (tcph->doff == 8) {
44379+ ptr = (u32 *)(tcph+1);
44380+ lro_desc->tcp_saw_tstamp = 1;
44381+ lro_desc->tcp_rcv_tsval = *(ptr+1);
44382+ lro_desc->tcp_rcv_tsecr = *(ptr+2);
44383+ }
44384+
44385+ lro_desc->mss = tcp_data_len;
44386+ lro_desc->vgrp = vgrp;
44387+ lro_desc->vlan_tag = vlan_tag;
44388+ lro_desc->active = 1;
44389+
44390+ if (tcp_data_len)
44391+ lro_desc->data_csum = lro_tcp_data_csum(iph, tcph,
44392+ tcp_data_len);
44393+
44394+ if (!tcp_data_len)
44395+ lro_desc->ack_cnt++;
44396+}
44397+
44398+static inline void lro_clear_desc(struct net_lro_desc *lro_desc)
44399+{
44400+ memset(lro_desc, 0, sizeof(struct net_lro_desc));
44401+}
44402+
44403+static void lro_add_common(struct net_lro_desc *lro_desc, struct iphdr *iph,
44404+ struct tcphdr *tcph, int tcp_data_len)
44405+{
44406+ struct sk_buff *parent = lro_desc->parent;
44407+ u32 *topt;
44408+
44409+ lro_desc->pkt_aggr_cnt++;
44410+ lro_desc->ip_tot_len += tcp_data_len;
44411+ lro_desc->tcp_next_seq += tcp_data_len;
44412+ lro_desc->tcp_window = tcph->window;
44413+ lro_desc->tcp_ack = tcph->ack_seq;
44414+
44415+ /* don't update tcp_rcv_tsval, would not work with PAWS */
44416+ if (lro_desc->tcp_saw_tstamp) {
44417+ topt = (u32 *) (tcph + 1);
44418+ lro_desc->tcp_rcv_tsecr = *(topt + 2);
44419+ }
44420+
44421+ if (tcp_data_len)
44422+ lro_desc->data_csum = csum_block_add(lro_desc->data_csum,
44423+ lro_tcp_data_csum(iph, tcph,
44424+ tcp_data_len),
44425+ parent->len);
44426+
44427+ parent->len += tcp_data_len;
44428+ parent->data_len += tcp_data_len;
44429+ if (tcp_data_len > lro_desc->mss)
44430+ lro_desc->mss = tcp_data_len;
44431+}
44432+
44433+static void lro_add_frags(struct net_lro_desc *lro_desc,
44434+ int len, int hlen, int truesize,
44435+ struct skb_frag_struct *skb_frags,
44436+ struct iphdr *iph, struct tcphdr *tcph)
44437+{
44438+ struct sk_buff *skb = lro_desc->parent;
44439+ int tcp_data_len = TCP_PAYLOAD_LENGTH(iph, tcph);
44440+
44441+ lro_add_common(lro_desc, iph, tcph, tcp_data_len);
44442+
44443+ skb->truesize += truesize;
44444+
44445+ if (!tcp_data_len) {
44446+ put_page(skb_frags[0].page);
44447+ lro_desc->ack_cnt++;
44448+ return;
44449+ }
44450+
44451+ skb_frags[0].page_offset += hlen;
44452+ skb_frags[0].size -= hlen;
44453+
44454+ while (tcp_data_len > 0) {
44455+ *(lro_desc->next_frag) = *skb_frags;
44456+ tcp_data_len -= skb_frags->size;
44457+ lro_desc->next_frag++;
44458+ skb_frags++;
44459+ skb_shinfo(skb)->nr_frags++;
44460+ }
44461+}
44462+
44463+static int lro_check_tcp_conn(struct net_lro_desc *lro_desc,
44464+ struct iphdr *iph,
44465+ struct tcphdr *tcph)
44466+{
44467+ if ((lro_desc->iph->saddr != iph->saddr)
44468+ || (lro_desc->iph->daddr != iph->daddr)
44469+ || (lro_desc->tcph->source != tcph->source)
44470+ || (lro_desc->tcph->dest != tcph->dest))
44471+ return -1;
44472+ return 0;
44473+}
44474+
44475+static struct net_lro_desc *lro_get_desc(struct net_lro_mgr *lro_mgr,
44476+ struct net_lro_desc *lro_arr,
44477+ struct iphdr *iph,
44478+ struct tcphdr *tcph)
44479+{
44480+ struct net_lro_desc *lro_desc = NULL;
44481+ struct net_lro_desc *tmp;
44482+ int max_desc = lro_mgr->max_desc;
44483+ int i;
44484+
44485+ for (i = 0; i < max_desc; i++) {
44486+ tmp = &lro_arr[i];
44487+ if (tmp->active)
44488+ if (!lro_check_tcp_conn(tmp, iph, tcph)) {
44489+ lro_desc = tmp;
44490+ goto out;
44491+ }
44492+ }
44493+
44494+ for (i = 0; i < max_desc; i++) {
44495+ if (!lro_arr[i].active) {
44496+ lro_desc = &lro_arr[i];
44497+ goto out;
44498+ }
44499+ }
44500+
44501+ LRO_INC_STATS(lro_mgr, no_desc);
44502+out:
44503+ return lro_desc;
44504+}
44505+
44506+static void lro_flush(struct net_lro_mgr *lro_mgr,
44507+ struct net_lro_desc *lro_desc)
44508+{
44509+ struct be_adapter *adapter = netdev_priv(lro_mgr->dev);
44510+
44511+ if (lro_desc->pkt_aggr_cnt > 1)
44512+ lro_update_tcp_ip_header(lro_desc);
44513+
44514+ skb_shinfo(lro_desc->parent)->gso_size = lro_desc->mss;
44515+
44516+ if (lro_desc->vgrp) {
44517+ if (test_bit(LRO_F_NAPI, &lro_mgr->features))
44518+ vlan_hwaccel_receive_skb(lro_desc->parent,
44519+ lro_desc->vgrp,
44520+ lro_desc->vlan_tag);
44521+ else
44522+ vlan_hwaccel_rx(lro_desc->parent,
44523+ lro_desc->vgrp,
44524+ lro_desc->vlan_tag);
44525+
44526+ } else {
44527+ if (test_bit(LRO_F_NAPI, &lro_mgr->features))
44528+ netif_receive_skb(lro_desc->parent);
44529+ else
44530+ netif_rx(lro_desc->parent);
44531+ }
44532+
44533+ LRO_INC_STATS(lro_mgr, flushed);
44534+ lro_clear_desc(lro_desc);
44535+}
44536+
44537+static struct sk_buff *lro_gen_skb(struct net_lro_mgr *lro_mgr,
44538+ struct skb_frag_struct *frags,
44539+ int len, int true_size,
44540+ void *mac_hdr,
44541+ int hlen, __wsum sum,
44542+ u32 ip_summed)
44543+{
44544+ struct sk_buff *skb;
44545+ struct skb_frag_struct *skb_frags;
44546+ int data_len = len;
44547+ int hdr_len = min(len, hlen);
44548+
44549+ skb = netdev_alloc_skb(lro_mgr->dev, hlen);
44550+ if (!skb)
44551+ return NULL;
44552+
44553+ skb->len = len;
44554+ skb->data_len = len - hdr_len;
44555+ skb->truesize += true_size;
44556+ skb->tail += hdr_len;
44557+
44558+ memcpy(skb->data, mac_hdr, hdr_len);
44559+
44560+ if (skb->data_len) {
44561+ skb_frags = skb_shinfo(skb)->frags;
44562+ while (data_len > 0) {
44563+ *skb_frags = *frags;
44564+ data_len -= frags->size;
44565+ skb_frags++;
44566+ frags++;
44567+ skb_shinfo(skb)->nr_frags++;
44568+ }
44569+ skb_shinfo(skb)->frags[0].page_offset += hdr_len;
44570+ skb_shinfo(skb)->frags[0].size -= hdr_len;
44571+ } else {
44572+ put_page(frags[0].page);
44573+ }
44574+
44575+
44576+ skb->ip_summed = ip_summed;
44577+ skb->csum = sum;
44578+ skb->protocol = eth_type_trans(skb, lro_mgr->dev);
44579+ return skb;
44580+}
44581+
44582+static struct sk_buff *__lro_proc_segment(struct net_lro_mgr *lro_mgr,
44583+ struct skb_frag_struct *frags,
44584+ int len, int true_size,
44585+ struct vlan_group *vgrp,
44586+ u16 vlan_tag, void *priv, __wsum sum)
44587+{
44588+ struct net_lro_desc *lro_desc;
44589+ struct iphdr *iph;
44590+ struct tcphdr *tcph;
44591+ struct sk_buff *skb;
44592+ u64 flags;
44593+ void *mac_hdr;
44594+ int mac_hdr_len;
44595+ int hdr_len = LRO_MAX_PG_HLEN;
44596+ int vlan_hdr_len = 0;
44597+ u8 pad_bytes;
44598+
44599+ if (!lro_mgr->get_frag_header
44600+ || lro_mgr->get_frag_header(frags, (void *)&mac_hdr, (void *)&iph,
44601+ (void *)&tcph, &flags, priv)) {
44602+ mac_hdr = page_address(frags->page) + frags->page_offset;
44603+ goto out1;
44604+ }
44605+
44606+ if (!(flags & LRO_IPV4) || !(flags & LRO_TCP))
44607+ goto out1;
44608+
44609+ hdr_len = (int)((void *)(tcph) + TCP_HDR_LEN(tcph) - mac_hdr);
44610+ mac_hdr_len = (int)((void *)(iph) - mac_hdr);
44611+
44612+ lro_desc = lro_get_desc(lro_mgr, lro_mgr->lro_arr, iph, tcph);
44613+ if (!lro_desc)
44614+ goto out1;
44615+
44616+ pad_bytes = len - (ntohs(iph->tot_len) + mac_hdr_len);
44617+ if (!TCP_PAYLOAD_LENGTH(iph, tcph) && pad_bytes) {
44618+ len -= pad_bytes; /* trim the packet */
44619+ frags[0].size -= pad_bytes;
44620+ true_size -= pad_bytes;
44621+ }
44622+
44623+ if (!lro_desc->active) { /* start new lro session */
44624+ if (lro_tcp_ip_check(iph, tcph, len - mac_hdr_len, NULL))
44625+ goto out1;
44626+
44627+ skb = lro_gen_skb(lro_mgr, frags, len, true_size, mac_hdr,
44628+ hdr_len, 0, lro_mgr->ip_summed_aggr);
44629+ if (!skb)
44630+ goto out;
44631+
44632+ if ((skb->protocol == htons(ETH_P_8021Q))
44633+ && !test_bit(LRO_F_EXTRACT_VLAN_ID, &lro_mgr->features))
44634+ vlan_hdr_len = VLAN_HLEN;
44635+
44636+ iph = (void *)(skb->data + vlan_hdr_len);
44637+ tcph = (void *)((u8 *)skb->data + vlan_hdr_len
44638+ + IP_HDR_LEN(iph));
44639+
44640+ lro_init_desc(lro_desc, skb, iph, tcph, vlan_tag, vgrp);
44641+ LRO_INC_STATS(lro_mgr, aggregated);
44642+ return 0;
44643+ }
44644+
44645+ if (lro_desc->tcp_next_seq != ntohl(tcph->seq))
44646+ goto out2;
44647+
44648+ if (lro_tcp_ip_check(iph, tcph, len - mac_hdr_len, lro_desc))
44649+ goto out2;
44650+
44651+ lro_add_frags(lro_desc, len, hdr_len, true_size, frags, iph, tcph);
44652+ LRO_INC_STATS(lro_mgr, aggregated);
44653+
44654+ if ((skb_shinfo(lro_desc->parent)->nr_frags >= lro_mgr->max_aggr) ||
44655+ lro_desc->parent->len > (0xFFFF - lro_mgr->dev->mtu))
44656+ lro_flush(lro_mgr, lro_desc);
44657+
44658+ return NULL;
44659+
44660+out2: /* send aggregated packets to the stack */
44661+ lro_flush(lro_mgr, lro_desc);
44662+
44663+out1: /* Original packet has to be posted to the stack */
44664+ skb = lro_gen_skb(lro_mgr, frags, len, true_size, mac_hdr,
44665+ hdr_len, sum, lro_mgr->ip_summed);
44666+out:
44667+ return skb;
44668+}
44669+
44670+void lro_receive_frags_compat(struct net_lro_mgr *lro_mgr,
44671+ struct skb_frag_struct *frags,
44672+ int len, int true_size, void *priv, __wsum sum)
44673+{
44674+ struct sk_buff *skb;
44675+
44676+ skb = __lro_proc_segment(lro_mgr, frags, len, true_size, NULL, 0,
44677+ priv, sum);
44678+ if (!skb)
44679+ return;
44680+
44681+ if (test_bit(LRO_F_NAPI, &lro_mgr->features))
44682+ netif_receive_skb(skb);
44683+ else
44684+ netif_rx(skb);
44685+}
44686+
44687+void lro_vlan_hwaccel_receive_frags_compat(struct net_lro_mgr *lro_mgr,
44688+ struct skb_frag_struct *frags,
44689+ int len, int true_size,
44690+ struct vlan_group *vgrp,
44691+ u16 vlan_tag, void *priv, __wsum sum)
44692+{
44693+ struct sk_buff *skb;
44694+
44695+ skb = __lro_proc_segment(lro_mgr, frags, len, true_size, vgrp,
44696+ vlan_tag, priv, sum);
44697+ if (!skb)
44698+ return;
44699+
44700+ if (test_bit(LRO_F_NAPI, &lro_mgr->features))
44701+ vlan_hwaccel_receive_skb(skb, vgrp, vlan_tag);
44702+ else
44703+ vlan_hwaccel_rx(skb, vgrp, vlan_tag);
44704+}
44705+
44706+void lro_flush_all_compat(struct net_lro_mgr *lro_mgr)
44707+{
44708+ int i;
44709+ struct net_lro_desc *lro_desc = lro_mgr->lro_arr;
44710+
44711+ for (i = 0; i < lro_mgr->max_desc; i++) {
44712+ if (lro_desc[i].active)
44713+ lro_flush(lro_mgr, &lro_desc[i]);
44714+ }
44715+}
44716+#endif /* INET_LRO backport */
44717+
44718+#ifndef TX_MQ
44719+struct net_device *alloc_etherdev_mq_compat(int sizeof_priv,
44720+ unsigned int queue_count)
44721+{
44722+ return alloc_etherdev(sizeof_priv);
44723+}
44724+
44725+void netif_wake_subqueue_compat(struct net_device *dev, u16 queue_index)
44726+{
44727+ netif_wake_queue(dev);
44728+}
44729+
44730+void netif_stop_subqueue_compat(struct net_device *dev, u16 queue_index)
44731+{
44732+ netif_stop_queue(dev);
44733+}
44734+
44735+int __netif_subqueue_stopped_compat(const struct net_device *dev,
44736+ u16 queue_index)
44737+{
44738+ return netif_queue_stopped(dev);
44739+}
44740+
44741+u16 skb_get_queue_mapping_compat(const struct sk_buff *skb)
44742+{
44743+ return 0;
44744+}
44745+
44746+void netif_set_real_num_tx_queues_compat(struct net_device *dev,
44747+ unsigned int txq)
44748+{
44749+ return;
44750+}
44751+
44752+u16 skb_tx_hash_compat(const struct net_device *dev,
44753+ const struct sk_buff *skb)
44754+{
44755+ return 0;
44756+}
44757+#endif
44758diff --git a/drivers/net/benet/be_compat.h b/drivers/net/benet/be_compat.h
44759new file mode 100644
44760index 0000000..8ceecc8
44761--- /dev/null
44762+++ b/drivers/net/benet/be_compat.h
44763@@ -0,0 +1,621 @@
44764+/*
44765+ * Copyright (C) 2005 - 2011 Emulex
44766+ * All rights reserved.
44767+ *
44768+ * This program is free software; you can redistribute it and/or
44769+ * modify it under the terms of the GNU General Public License version 2
44770+ * as published by the Free Software Foundation. The full GNU General
44771+ * Public License is included in this distribution in the file called COPYING.
44772+ *
44773+ * Contact Information:
44774+ * linux-drivers@emulex.com
44775+ *
44776+ * Emulex
44777+ * 3333 Susan Street
44778+ * Costa Mesa, CA 92626
44779+ */
44780+
44781+#ifndef BE_COMPAT_H
44782+#define BE_COMPAT_H
44783+
44784+/****************** RHEL5 and SLES10 backport ***************************/
44785+#if LINUX_VERSION_CODE <= KERNEL_VERSION(2, 6, 18)
44786+
44787+#ifndef upper_32_bits
44788+#define upper_32_bits(n) ((u32)(((n) >> 16) >> 16))
44789+#endif
44790+
44791+#ifndef CHECKSUM_PARTIAL
44792+#define CHECKSUM_PARTIAL CHECKSUM_HW
44793+#define CHECKSUM_COMPLETE CHECKSUM_HW
44794+#endif
44795+
44796+#if !defined(ip_hdr)
44797+#define ip_hdr(skb) (skb->nh.iph)
44798+#define ipv6_hdr(skb) (skb->nh.ipv6h)
44799+#endif
44800+
44801+#if !defined(__packed)
44802+#define __packed __attribute__ ((packed))
44803+#endif
44804+
44805+#if !defined(RHEL_MINOR)
44806+/* Only for RH5U1 (Maui) and SLES10 NIC driver */
44807+enum {
44808+ false = 0,
44809+ true = 1
44810+};
44811+#if LINUX_VERSION_CODE == KERNEL_VERSION(2, 6, 18)
44812+/* Only for RH5U1 (Maui) NIC driver */
44813+static inline __attribute__((const))
44814+int __ilog2_u32(u32 n)
44815+{
44816+ return fls(n) - 1;
44817+}
44818+#endif
44819+#endif
44820+
44821+#define ETH_FCS_LEN 4
44822+#define bool u8
44823+#ifndef PTR_ALIGN
44824+#define PTR_ALIGN(p, a) ((typeof(p)) \
44825+ ALIGN((unsigned long)(p), (a)))
44826+#endif
44827+#define list_first_entry(ptr, type, member) \
44828+ list_entry((ptr)->next, type, member)
44829+
44830+#if (defined(RHEL_MINOR) && RHEL_MINOR < 6) || \
44831+ LINUX_VERSION_CODE <= KERNEL_VERSION(2, 6, 16)
44832+#define DEFINE_PCI_DEVICE_TABLE(_table) struct pci_device_id _table[] \
44833+ __devinitdata
44834+#endif
44835+
44836+/* Backport of request_irq */
44837+typedef irqreturn_t(*backport_irq_handler_t) (int, void *);
44838+static inline int
44839+backport_request_irq(unsigned int irq, irqreturn_t(*handler) (int, void *),
44840+ unsigned long flags, const char *dev_name, void *dev_id)
44841+{
44842+ return request_irq(irq,
44843+ (irqreturn_t(*) (int, void *, struct pt_regs *))handler,
44844+ flags, dev_name, dev_id);
44845+}
44846+#define request_irq backport_request_irq
44847+
44848+#endif /*** RHEL5 and SLES10 backport ***/
44849+
44850+#if !defined(__packed)
44851+#define __packed __attribute__ ((packed))
44852+#endif
44853+
44854+/****************** SLES10 only backport ***************************/
44855+#if LINUX_VERSION_CODE < KERNEL_VERSION(2, 6, 18)
44856+
44857+#include <linux/tifm.h>
44858+
44859+#define FIELD_SIZEOF(t, f) (sizeof(((t *)0)->f))
44860+#define IRQF_SHARED SA_SHIRQ
44861+#define CHECKSUM_PARTIAL CHECKSUM_HW
44862+#define CHECKSUM_COMPLETE CHECKSUM_HW
44863+#define DIV_ROUND_UP(n, d) (((n) + (d) - 1) / (d))
44864+#define NETIF_F_IPV6_CSUM NETIF_F_IP_CSUM
44865+#define NETIF_F_TSO6 NETIF_F_TSO
44866+
44867+
44868+static inline struct sk_buff *netdev_alloc_skb(struct net_device *dev,
44869+ unsigned int length)
44870+{
44871+ /* 16 == NET_PAD_SKB */
44872+ struct sk_buff *skb;
44873+ skb = alloc_skb(length + 16, GFP_ATOMIC);
44874+ if (likely(skb != NULL)) {
44875+ skb_reserve(skb, 16);
44876+ skb->dev = dev;
44877+ }
44878+ return skb;
44879+}
44880+
44881+#define PCI_SAVE_STATE(x)
44882+
44883+#else /* SLES10 only backport */
44884+
44885+#define PCI_SAVE_STATE(x) pci_save_state(x)
44886+
44887+#endif /* SLES10 only backport */
44888+
44889+#if LINUX_VERSION_CODE <= KERNEL_VERSION(2, 6, 31)
44890+#define netdev_tx_t int
44891+#endif
44892+
44893+#ifndef VLAN_PRIO_MASK
44894+#define VLAN_PRIO_MASK 0xe000 /* Priority Code Point */
44895+#define VLAN_PRIO_SHIFT 13
44896+#endif
44897+
44898+/*
44899+ * Backport of netdev ops struct
44900+ */
44901+#if LINUX_VERSION_CODE < KERNEL_VERSION(2, 6, 28)
44902+struct net_device_ops {
44903+ int (*ndo_init)(struct net_device *dev);
44904+ void (*ndo_uninit)(struct net_device *dev);
44905+ int (*ndo_open)(struct net_device *dev);
44906+ int (*ndo_stop)(struct net_device *dev);
44907+ int (*ndo_start_xmit) (struct sk_buff *skb, struct net_device *dev);
44908+ u16 (*ndo_select_queue)(struct net_device *dev,
44909+ struct sk_buff *skb);
44910+ void (*ndo_change_rx_flags)(struct net_device *dev, int flags);
44911+ void (*ndo_set_rx_mode)(struct net_device *dev);
44912+ void (*ndo_set_multicast_list)(struct net_device *dev);
44913+ int (*ndo_set_mac_address)(struct net_device *dev, void *addr);
44914+ int (*ndo_validate_addr)(struct net_device *dev);
44915+ int (*ndo_do_ioctl)(struct net_device *dev,
44916+ struct ifreq *ifr, int cmd);
44917+ int (*ndo_set_config)(struct net_device *dev, struct ifmap *map);
44918+ int (*ndo_change_mtu)(struct net_device *dev, int new_mtu);
44919+ int (*ndo_neigh_setup)(struct net_device *dev,
44920+ struct neigh_parms *);
44921+ void (*ndo_tx_timeout) (struct net_device *dev);
44922+
44923+ struct net_device_stats* (*ndo_get_stats)(struct net_device *dev);
44924+
44925+ void (*ndo_vlan_rx_register)(struct net_device *dev,
44926+ struct vlan_group *grp);
44927+ void (*ndo_vlan_rx_add_vid)(struct net_device *dev,
44928+ unsigned short vid);
44929+ void (*ndo_vlan_rx_kill_vid)(struct net_device *dev,
44930+ unsigned short vid);
44931+#ifdef CONFIG_NET_POLL_CONTROLLER
44932+#define HAVE_NETDEV_POLL
44933+ void (*ndo_poll_controller)(struct net_device *dev);
44934+#endif
44935+};
44936+extern void be_netdev_ops_init(struct net_device *netdev,
44937+ struct net_device_ops *ops);
44938+extern int eth_validate_addr(struct net_device *);
44939+
44940+#endif /* Netdev ops backport */
44941+
44942+#if LINUX_VERSION_CODE == KERNEL_VERSION(2, 6, 29)
44943+#undef NETIF_F_GRO
44944+#endif
44945+
44946+#ifdef NO_GRO
44947+#if ((defined(RHEL_MAJOR) && (RHEL_MAJOR == 5)))
44948+#undef NETIF_F_GRO
44949+#endif
44950+#endif
44951+
44952+#if LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 32)
44953+#define HAVE_ETHTOOL_FLASH
44954+#endif
44955+
44956+/*
44957+ * Backport of NAPI
44958+ */
44959+#if LINUX_VERSION_CODE <= KERNEL_VERSION(2, 6, 24)
44960+
44961+#if defined(RHEL_MINOR) && (RHEL_MINOR > 3)
44962+#define RHEL_NEW_NAPI
44963+#endif
44964+
44965+/* We need a new struct that has some meta data beyond rhel 5.4's napi_struct
44966+ * to fix rhel5.4's half-baked new napi implementation.
44967+ * We don't want to use rhel 5.4's broken napi_complete; so
44968+ * define a new be_napi_complete that executes the logic only for Rx
44969+ */
44970+
44971+#ifdef RHEL_NEW_NAPI
44972+#define napi_complete be_napi_complete
44973+typedef struct napi_struct rhel_napi_struct;
44974+#endif
44975+#define napi_struct be_napi_struct
44976+#define napi_gro_frags(napi) napi_gro_frags((rhel_napi_struct *) napi)
44977+#define vlan_gro_frags(napi, vlan_grp, vid)\
44978+ vlan_gro_frags((rhel_napi_struct *) napi, vlan_grp, vid)
44979+#define napi_get_frags(napi) napi_get_frags((rhel_napi_struct *) napi)
44980+
44981+struct napi_struct {
44982+#ifdef RHEL_NEW_NAPI
44983+ rhel_napi_struct napi; /* must be the first member */
44984+#endif
44985+ struct net_device *dev;
44986+ int (*poll) (struct napi_struct *napi, int budget);
44987+ bool rx;
44988+};
44989+
44990+static inline void napi_complete(struct napi_struct *napi)
44991+{
44992+#ifdef NETIF_F_GRO
44993+ napi_gro_flush((rhel_napi_struct *)napi);
44994+#endif
44995+ netif_rx_complete(napi->dev);
44996+}
44997+
44998+static inline void napi_schedule(struct napi_struct *napi)
44999+{
45000+ netif_rx_schedule(napi->dev);
45001+}
45002+
45003+static inline void napi_enable(struct napi_struct *napi)
45004+{
45005+ netif_poll_enable(napi->dev);
45006+}
45007+
45008+static inline void napi_disable(struct napi_struct *napi)
45009+{
45010+ netif_poll_disable(napi->dev);
45011+}
45012+
45013+#if (defined(RHEL_MINOR) && RHEL_MINOR < 6) || \
45014+ LINUX_VERSION_CODE <= KERNEL_VERSION(2, 6, 16)
45015+static inline void vlan_group_set_device(struct vlan_group *vg,
45016+ u16 vlan_id,
45017+ struct net_device *dev)
45018+{
45019+ struct net_device **array;
45020+ if (!vg)
45021+ return;
45022+ array = vg->vlan_devices;
45023+ array[vlan_id] = dev;
45024+}
45025+#endif
45026+
45027+#endif /* New NAPI backport */
45028+
45029+extern int be_netif_napi_add(struct net_device *netdev,
45030+ struct napi_struct *napi,
45031+ int (*poll) (struct napi_struct *, int), int weight);
45032+extern void be_netif_napi_del(struct net_device *netdev);
45033+#if LINUX_VERSION_CODE < KERNEL_VERSION(2, 6, 27)
45034+#define HAVE_SIMULATED_MULTI_NAPI
45035+#endif
45036+
45037+/************** Backport of Delayed work queues interface ****************/
45038+#if LINUX_VERSION_CODE <= KERNEL_VERSION(2, 6, 19)
45039+#if (defined(RHEL_MINOR) && RHEL_MINOR < 6) || \
45040+ LINUX_VERSION_CODE <= KERNEL_VERSION(2, 6, 16)
45041+struct delayed_work {
45042+ struct work_struct work;
45043+};
45044+#endif
45045+
45046+#define INIT_DELAYED_WORK(_work, _func) \
45047+ INIT_WORK(&(_work)->work, _func, &(_work)->work)
45048+
45049+static inline int backport_cancel_delayed_work_sync(struct delayed_work *work)
45050+{
45051+ cancel_rearming_delayed_work(&work->work);
45052+ return 0;
45053+}
45054+#define cancel_delayed_work_sync backport_cancel_delayed_work_sync
45055+
45056+static inline int backport_schedule_delayed_work(struct delayed_work *work,
45057+ unsigned long delay)
45058+{
45059+ if (unlikely(!delay))
45060+ return schedule_work(&work->work);
45061+ else
45062+ return schedule_delayed_work(&work->work, delay);
45063+}
45064+#define schedule_delayed_work backport_schedule_delayed_work
45065+#endif /* backport delayed workqueue */
45066+
45067+
45068+/************** Backport of INET_LRO **********************************/
45069+#if LINUX_VERSION_CODE > KERNEL_VERSION(2, 6, 18)
45070+
45071+#include <linux/inet_lro.h>
45072+
45073+#else
45074+
45075+#if LINUX_VERSION_CODE == KERNEL_VERSION(2, 6, 18)
45076+
45077+#if defined(RHEL_MINOR) && RHEL_MINOR < 6
45078+typedef __u16 __bitwise __sum16;
45079+typedef __u32 __bitwise __wsum;
45080+#endif
45081+
45082+#if ((defined(RHEL_MAJOR) && (RHEL_MAJOR == 5) && (RHEL_MINOR <= 3)) || \
45083+ (!defined(RHEL_MINOR)))
45084+static inline __wsum csum_unfold(__sum16 n)
45085+{
45086+ return (__force __wsum)n;
45087+}
45088+#endif
45089+
45090+#endif
45091+
45092+#define lro_flush_all lro_flush_all_compat
45093+#define lro_vlan_hwaccel_receive_frags lro_vlan_hwaccel_receive_frags_compat
45094+#define lro_receive_frags lro_receive_frags_compat
45095+
45096+struct net_lro_stats {
45097+ unsigned long aggregated;
45098+ unsigned long flushed;
45099+ unsigned long no_desc;
45100+};
45101+
45102+struct net_lro_desc {
45103+ struct sk_buff *parent;
45104+ struct sk_buff *last_skb;
45105+ struct skb_frag_struct *next_frag;
45106+ struct iphdr *iph;
45107+ struct tcphdr *tcph;
45108+ struct vlan_group *vgrp;
45109+ __wsum data_csum;
45110+ u32 tcp_rcv_tsecr;
45111+ u32 tcp_rcv_tsval;
45112+ u32 tcp_ack;
45113+ u32 tcp_next_seq;
45114+ u32 skb_tot_frags_len;
45115+ u32 ack_cnt;
45116+ u16 ip_tot_len;
45117+ u16 tcp_saw_tstamp; /* timestamps enabled */
45118+ u16 tcp_window;
45119+ u16 vlan_tag;
45120+ int pkt_aggr_cnt; /* counts aggregated packets */
45121+ int vlan_packet;
45122+ int mss;
45123+ int active;
45124+};
45125+
45126+struct net_lro_mgr {
45127+ struct net_device *dev;
45128+ struct net_lro_stats stats;
45129+
45130+ /* LRO features */
45131+ unsigned long features;
45132+#define LRO_F_NAPI 1 /* Pass packets to stack via NAPI */
45133+#define LRO_F_EXTRACT_VLAN_ID 2 /* Set flag if VLAN IDs are extracted
45134+ from received packets and eth protocol
45135+ is still ETH_P_8021Q */
45136+
45137+ u32 ip_summed; /* Set in non generated SKBs in page mode */
45138+ u32 ip_summed_aggr; /* Set in aggregated SKBs: CHECKSUM_UNNECESSARY
45139+ * or CHECKSUM_NONE */
45140+
45141+ int max_desc; /* Max number of LRO descriptors */
45142+ int max_aggr; /* Max number of LRO packets to be aggregated */
45143+
45144+ struct net_lro_desc *lro_arr; /* Array of LRO descriptors */
45145+
45146+ /* Optimized driver functions
45147+ * get_skb_header: returns tcp and ip header for packet in SKB
45148+ */
45149+ int (*get_skb_header)(struct sk_buff *skb, void **ip_hdr,
45150+ void **tcpudp_hdr, u64 *hdr_flags, void *priv);
45151+
45152+ /* hdr_flags: */
45153+#define LRO_IPV4 1 /* ip_hdr is IPv4 header */
45154+#define LRO_TCP 2 /* tcpudp_hdr is TCP header */
45155+
45156+ /*
45157+ * get_frag_header: returns mac, tcp and ip header for packet in SKB
45158+ *
45159+ * @hdr_flags: Indicate what kind of LRO has to be done
45160+ * (IPv4/IPv6/TCP/UDP)
45161+ */
45162+ int (*get_frag_header)(struct skb_frag_struct *frag, void **mac_hdr,
45163+ void **ip_hdr, void **tcpudp_hdr, u64 *hdr_flags,
45164+ void *priv);
45165+};
45166+
45167+extern void lro_receive_skb(struct net_lro_mgr *lro_mgr, struct sk_buff *skb,
45168+ void *priv);
45169+
45170+extern void lro_vlan_hwaccel_receive_skb(struct net_lro_mgr *lro_mgr,
45171+ struct sk_buff *skb, struct vlan_group *vgrp,
45172+ u16 vlan_tag, void *priv);
45173+
45174+/* This functions aggregate fragments and generate SKBs do pass
45175+ * the packets to the stack.
45176+ *
45177+ * @lro_mgr: LRO manager to use
45178+ * @frags: Fragment to be processed. Must contain entire header in first
45179+ * element.
45180+ * @len: Length of received data
45181+ * @true_size: Actual size of memory the fragment is consuming
45182+ * @priv: Private data that may be used by driver functions
45183+ * (for example get_tcp_ip_hdr)
45184+ */
45185+extern void lro_receive_frags_compat(struct net_lro_mgr *lro_mgr,
45186+ struct skb_frag_struct *frags, int len, int true_size,
45187+ void *priv, __wsum sum);
45188+
45189+extern void lro_vlan_hwaccel_receive_frags_compat(struct net_lro_mgr *lro_mgr,
45190+ struct skb_frag_struct *frags, int len, int true_size,
45191+ struct vlan_group *vgrp, u16 vlan_tag, void *priv,
45192+ __wsum sum);
45193+
45194+/* Forward all aggregated SKBs held by lro_mgr to network stack */
45195+extern void lro_flush_all_compat(struct net_lro_mgr *lro_mgr);
45196+
45197+extern void lro_flush_pkt(struct net_lro_mgr *lro_mgr, struct iphdr *iph,
45198+ struct tcphdr *tcph);
45199+#endif /* backport of inet_lro */
45200+
45201+#ifndef ETHTOOL_FLASH_MAX_FILENAME
45202+#define ETHTOOL_FLASH_MAX_FILENAME 128
45203+#endif
45204+
45205+#if defined(CONFIG_XEN) && !defined(NETIF_F_GRO)
45206+#define BE_INIT_FRAGS_PER_FRAME (u32) 1
45207+#else
45208+#define BE_INIT_FRAGS_PER_FRAME (min((u32) 16, (u32) MAX_SKB_FRAGS))
45209+#endif
45210+
45211+#if LINUX_VERSION_CODE < KERNEL_VERSION(2, 6, 32)
45212+#ifdef CONFIG_PCI_IOV
45213+#if (!(defined(RHEL_MAJOR) && (RHEL_MAJOR == 5) && (RHEL_MINOR == 6)))
45214+#undef CONFIG_PCI_IOV
45215+#endif
45216+#endif
45217+#endif
45218+
45219+#if LINUX_VERSION_CODE < KERNEL_VERSION(2, 6, 20)
45220+#define dev_to_node(dev) -1
45221+#endif
45222+
45223+
45224+#if LINUX_VERSION_CODE < KERNEL_VERSION(2, 6, 32)
45225+#if (!(defined(RHEL_MAJOR) && (RHEL_MAJOR == 5) && (RHEL_MINOR > 6)))
45226+static inline struct sk_buff *netdev_alloc_skb_ip_align(struct net_device *dev,
45227+ unsigned int length)
45228+{
45229+ struct sk_buff *skb = netdev_alloc_skb(dev, length + NET_IP_ALIGN);
45230+
45231+ if (NET_IP_ALIGN && skb)
45232+ skb_reserve(skb, NET_IP_ALIGN);
45233+ return skb;
45234+}
45235+#endif
45236+#endif
45237+
45238+#if LINUX_VERSION_CODE < KERNEL_VERSION(2, 6, 26)
45239+#ifndef netif_set_gso_max_size
45240+#define netif_set_gso_max_size(netdev, size) do {} while (0)
45241+#endif
45242+#endif
45243+
45244+#if (LINUX_VERSION_CODE == KERNEL_VERSION(2, 6, 18))
45245+#if defined(RHEL_MINOR) && (RHEL_MINOR <= 4)
45246+static inline int skb_is_gso_v6(const struct sk_buff *skb)
45247+{
45248+ return skb_shinfo(skb)->gso_type & SKB_GSO_TCPV6;
45249+}
45250+#endif
45251+#endif
45252+
45253+#if (LINUX_VERSION_CODE < KERNEL_VERSION(2, 6, 18))
45254+static inline int skb_is_gso_v6(const struct sk_buff *skb)
45255+{
45256+ return (ip_hdr(skb)->version == 6);
45257+}
45258+#endif
45259+
45260+#if (LINUX_VERSION_CODE < KERNEL_VERSION(2, 6, 18))
45261+#define DMA_BIT_MASK(n) (((n) == 64) ? ~0ULL : ((1ULL<<(n))-1))
45262+#endif
45263+
45264+#if LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 32)
45265+#if ((defined(RHEL_MAJOR) && (RHEL_MAJOR == 6)))
45266+#define HAVE_SRIOV_CONFIG
45267+#endif
45268+#endif
45269+
45270+#ifndef NETIF_F_VLAN_SG
45271+#define NETIF_F_VLAN_SG NETIF_F_SG
45272+#endif
45273+
45274+#ifndef NETIF_F_VLAN_CSUM
45275+#define NETIF_F_VLAN_CSUM NETIF_F_HW_CSUM
45276+#endif
45277+
45278+#ifndef NETIF_F_VLAN_TSO
45279+#define NETIF_F_VLAN_TSO NETIF_F_TSO
45280+#endif
45281+
45282+#if (LINUX_VERSION_CODE < KERNEL_VERSION(2, 6, 27))
45283+#define vlan_features features
45284+#endif
45285+
45286+#ifndef DEFINE_DMA_UNMAP_ADDR
45287+#define DEFINE_DMA_UNMAP_ADDR(bus) dma_addr_t bus
45288+#endif
45289+
45290+#if LINUX_VERSION_CODE < KERNEL_VERSION(3, 0, 0)
45291+
45292+#ifndef netdev_mc_count
45293+#define netdev_mc_count(nd) (nd->mc_count)
45294+#endif
45295+
45296+#ifndef netdev_hw_addr
45297+#define netdev_hw_addr dev_mc_list
45298+#endif
45299+
45300+#ifndef netdev_for_each_mc_addr
45301+#define netdev_for_each_mc_addr(ha, nd) \
45302+ for (ha = (nd)->mc_list; ha; ha = ha->next)
45303+#endif
45304+
45305+#define DMI_ADDR dmi_addr
45306+#else
45307+#define DMI_ADDR addr
45308+#endif
45309+
45310+#ifndef VLAN_GROUP_ARRAY_LEN
45311+#define VLAN_GROUP_ARRAY_LEN VLAN_N_VID
45312+#endif
45313+/**************************** Multi TXQ Support ******************************/
45314+
45315+/* Supported only in RHEL6 and SL11.1 (barring one execption) */
45316+#if LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 32)
45317+#define MQ_TX
45318+#endif
45319+
45320+#if LINUX_VERSION_CODE < KERNEL_VERSION(2, 6, 27)
45321+#define alloc_etherdev_mq(sz, cnt) alloc_etherdev(sz)
45322+#define skb_get_queue_mapping(skb) 0
45323+#define skb_tx_hash(dev, skb) 0
45324+#define netif_set_real_num_tx_queues(dev, txq) do {} while(0)
45325+#define netif_wake_subqueue(dev, idx) netif_wake_queue(dev)
45326+#define netif_stop_subqueue(dev, idx) netif_stop_queue(dev)
45327+#define __netif_subqueue_stopped(dev, idx) netif_queue_stopped(dev)
45328+#endif /* < 2.6.27 */
45329+
45330+#if ((LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 27)) && \
45331+ (LINUX_VERSION_CODE < KERNEL_VERSION(2, 6, 32)))
45332+#define skb_tx_hash(dev, skb) 0
45333+#define netif_set_real_num_tx_queues(dev, txq) do {} while(0)
45334+#endif
45335+
45336+#if LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 32)
45337+#define netif_set_real_num_tx_queues be_set_real_num_tx_queues
45338+static inline void be_set_real_num_tx_queues(struct net_device *dev,
45339+ unsigned int txq)
45340+{
45341+ dev->real_num_tx_queues = txq;
45342+}
45343+#endif
45344+
45345+#include <linux/if_vlan.h>
45346+static inline void be_reset_skb_tx_vlan(struct sk_buff *skb)
45347+{
45348+#if LINUX_VERSION_CODE > KERNEL_VERSION(2, 6, 18)
45349+ skb->vlan_tci = 0;
45350+#else
45351+ struct vlan_skb_tx_cookie *cookie;
45352+
45353+ cookie = VLAN_TX_SKB_CB(skb);
45354+ cookie->magic = 0;
45355+#endif
45356+}
45357+
45358+#if (LINUX_VERSION_CODE < KERNEL_VERSION(2, 6, 18))
45359+static inline void skb_set_network_header(struct sk_buff *skb, const int offset)
45360+{
45361+ skb->nh.raw = skb->data + offset;
45362+}
45363+#endif
45364+
45365+static inline struct sk_buff *be_vlan_put_tag(struct sk_buff *skb,
45366+ unsigned short vlan_tag)
45367+{
45368+ struct sk_buff *new_skb = __vlan_put_tag(skb, vlan_tag);
45369+ /* On kernel versions < 2.6.27 the __vlan_put_tag() function
45370+ * distorts the network layer hdr pointer in the skb which
45371+ * affects the detection of UDP/TCP packets down the line in
45372+ * wrb_fill_hdr().This work-around sets it right.
45373+ */
45374+#if (LINUX_VERSION_CODE < KERNEL_VERSION(2, 6, 27))
45375+ skb_set_network_header(new_skb, VLAN_ETH_HLEN);
45376+#endif
45377+ return new_skb;
45378+}
45379+
45380+#ifndef ACCESS_ONCE
45381+#define ACCESS_ONCE(x) (*(volatile typeof(x) *)&(x))
45382+#endif
45383+
45384+#endif /* BE_COMPAT_H */
45385diff --git a/drivers/net/benet/be_ethtool.c b/drivers/net/benet/be_ethtool.c
45386index f0fd95b..37bad99 100644
45387--- a/drivers/net/benet/be_ethtool.c
45388+++ b/drivers/net/benet/be_ethtool.c
45389@@ -1,18 +1,18 @@
45390 /*
45391- * Copyright (C) 2005 - 2009 ServerEngines
45392+ * Copyright (C) 2005 - 2011 Emulex
45393 * All rights reserved.
45394 *
45395 * This program is free software; you can redistribute it and/or
45396 * modify it under the terms of the GNU General Public License version 2
45397- * as published by the Free Software Foundation. The full GNU General
45398+ * as published by the Free Software Foundation. The full GNU General
45399 * Public License is included in this distribution in the file called COPYING.
45400 *
45401 * Contact Information:
45402- * linux-drivers@serverengines.com
45403+ * linux-drivers@emulex.com
45404 *
45405- * ServerEngines
45406- * 209 N. Fair Oaks Ave
45407- * Sunnyvale, CA 94085
45408+ * Emulex
45409+ * 3333 Susan Street
45410+ * Costa Mesa, CA 92626
45411 */
45412
45413 #include "be.h"
45414@@ -26,21 +26,19 @@ struct be_ethtool_stat {
45415 int offset;
45416 };
45417
45418-enum {NETSTAT, PORTSTAT, MISCSTAT, DRVSTAT, ERXSTAT};
45419+enum {NETSTAT, DRVSTAT_TX, DRVSTAT_RX, DRVSTAT};
45420 #define FIELDINFO(_struct, field) FIELD_SIZEOF(_struct, field), \
45421 offsetof(_struct, field)
45422-#define NETSTAT_INFO(field) #field, NETSTAT,\
45423+#define NETSTAT_INFO(field) #field, NETSTAT,\
45424 FIELDINFO(struct net_device_stats,\
45425 field)
45426-#define DRVSTAT_INFO(field) #field, DRVSTAT,\
45427- FIELDINFO(struct be_drvr_stats, field)
45428-#define MISCSTAT_INFO(field) #field, MISCSTAT,\
45429- FIELDINFO(struct be_rxf_stats, field)
45430-#define PORTSTAT_INFO(field) #field, PORTSTAT,\
45431- FIELDINFO(struct be_port_rxf_stats, \
45432+#define DRVSTAT_TX_INFO(field) #field, DRVSTAT_TX,\
45433+ FIELDINFO(struct be_tx_stats, field)
45434+#define DRVSTAT_RX_INFO(field) #field, DRVSTAT_RX,\
45435+ FIELDINFO(struct be_rx_stats, field)
45436+#define DRVSTAT_INFO(field) #field, DRVSTAT,\
45437+ FIELDINFO(struct be_drv_stats, \
45438 field)
45439-#define ERXSTAT_INFO(field) #field, ERXSTAT,\
45440- FIELDINFO(struct be_erx_stats, field)
45441
45442 static const struct be_ethtool_stat et_stats[] = {
45443 {NETSTAT_INFO(rx_packets)},
45444@@ -51,70 +49,131 @@ static const struct be_ethtool_stat et_stats[] = {
45445 {NETSTAT_INFO(tx_errors)},
45446 {NETSTAT_INFO(rx_dropped)},
45447 {NETSTAT_INFO(tx_dropped)},
45448- {DRVSTAT_INFO(be_tx_reqs)},
45449- {DRVSTAT_INFO(be_tx_stops)},
45450- {DRVSTAT_INFO(be_fwd_reqs)},
45451- {DRVSTAT_INFO(be_tx_wrbs)},
45452- {DRVSTAT_INFO(be_polls)},
45453 {DRVSTAT_INFO(be_tx_events)},
45454- {DRVSTAT_INFO(be_rx_events)},
45455- {DRVSTAT_INFO(be_tx_compl)},
45456- {DRVSTAT_INFO(be_rx_compl)},
45457- {DRVSTAT_INFO(be_ethrx_post_fail)},
45458- {DRVSTAT_INFO(be_802_3_dropped_frames)},
45459- {DRVSTAT_INFO(be_802_3_malformed_frames)},
45460- {DRVSTAT_INFO(be_tx_rate)},
45461- {DRVSTAT_INFO(be_rx_rate)},
45462- {PORTSTAT_INFO(rx_unicast_frames)},
45463- {PORTSTAT_INFO(rx_multicast_frames)},
45464- {PORTSTAT_INFO(rx_broadcast_frames)},
45465- {PORTSTAT_INFO(rx_crc_errors)},
45466- {PORTSTAT_INFO(rx_alignment_symbol_errors)},
45467- {PORTSTAT_INFO(rx_pause_frames)},
45468- {PORTSTAT_INFO(rx_control_frames)},
45469- {PORTSTAT_INFO(rx_in_range_errors)},
45470- {PORTSTAT_INFO(rx_out_range_errors)},
45471- {PORTSTAT_INFO(rx_frame_too_long)},
45472- {PORTSTAT_INFO(rx_address_match_errors)},
45473- {PORTSTAT_INFO(rx_vlan_mismatch)},
45474- {PORTSTAT_INFO(rx_dropped_too_small)},
45475- {PORTSTAT_INFO(rx_dropped_too_short)},
45476- {PORTSTAT_INFO(rx_dropped_header_too_small)},
45477- {PORTSTAT_INFO(rx_dropped_tcp_length)},
45478- {PORTSTAT_INFO(rx_dropped_runt)},
45479- {PORTSTAT_INFO(rx_fifo_overflow)},
45480- {PORTSTAT_INFO(rx_input_fifo_overflow)},
45481- {PORTSTAT_INFO(rx_ip_checksum_errs)},
45482- {PORTSTAT_INFO(rx_tcp_checksum_errs)},
45483- {PORTSTAT_INFO(rx_udp_checksum_errs)},
45484- {PORTSTAT_INFO(rx_non_rss_packets)},
45485- {PORTSTAT_INFO(rx_ipv4_packets)},
45486- {PORTSTAT_INFO(rx_ipv6_packets)},
45487- {PORTSTAT_INFO(tx_unicastframes)},
45488- {PORTSTAT_INFO(tx_multicastframes)},
45489- {PORTSTAT_INFO(tx_broadcastframes)},
45490- {PORTSTAT_INFO(tx_pauseframes)},
45491- {PORTSTAT_INFO(tx_controlframes)},
45492- {MISCSTAT_INFO(rx_drops_no_pbuf)},
45493- {MISCSTAT_INFO(rx_drops_no_txpb)},
45494- {MISCSTAT_INFO(rx_drops_no_erx_descr)},
45495- {MISCSTAT_INFO(rx_drops_no_tpre_descr)},
45496- {MISCSTAT_INFO(rx_drops_too_many_frags)},
45497- {MISCSTAT_INFO(rx_drops_invalid_ring)},
45498- {MISCSTAT_INFO(forwarded_packets)},
45499- {MISCSTAT_INFO(rx_drops_mtu)},
45500- {ERXSTAT_INFO(rx_drops_no_fragments)},
45501+ {DRVSTAT_INFO(rx_crc_errors)},
45502+ {DRVSTAT_INFO(rx_alignment_symbol_errors)},
45503+ {DRVSTAT_INFO(rx_pause_frames)},
45504+ {DRVSTAT_INFO(rx_control_frames)},
45505+ {DRVSTAT_INFO(rx_in_range_errors)},
45506+ {DRVSTAT_INFO(rx_out_range_errors)},
45507+ {DRVSTAT_INFO(rx_frame_too_long)},
45508+ {DRVSTAT_INFO(rx_address_match_errors)},
45509+ {DRVSTAT_INFO(rx_dropped_too_small)},
45510+ {DRVSTAT_INFO(rx_dropped_too_short)},
45511+ {DRVSTAT_INFO(rx_dropped_header_too_small)},
45512+ {DRVSTAT_INFO(rx_dropped_tcp_length)},
45513+ {DRVSTAT_INFO(rx_dropped_runt)},
45514+ {DRVSTAT_INFO(rxpp_fifo_overflow_drop)},
45515+ {DRVSTAT_INFO(rx_input_fifo_overflow_drop)},
45516+ {DRVSTAT_INFO(rx_ip_checksum_errs)},
45517+ {DRVSTAT_INFO(rx_tcp_checksum_errs)},
45518+ {DRVSTAT_INFO(rx_udp_checksum_errs)},
45519+ {DRVSTAT_INFO(rx_switched_unicast_packets)},
45520+ {DRVSTAT_INFO(rx_switched_multicast_packets)},
45521+ {DRVSTAT_INFO(rx_switched_broadcast_packets)},
45522+ {DRVSTAT_INFO(tx_pauseframes)},
45523+ {DRVSTAT_INFO(tx_controlframes)},
45524+ {DRVSTAT_INFO(rx_priority_pause_frames)},
45525+ {DRVSTAT_INFO(pmem_fifo_overflow_drop)},
45526+ {DRVSTAT_INFO(jabber_events)},
45527+ {DRVSTAT_INFO(rx_drops_no_pbuf)},
45528+ {DRVSTAT_INFO(rx_drops_no_txpb)},
45529+ {DRVSTAT_INFO(rx_drops_no_erx_descr)},
45530+ {DRVSTAT_INFO(rx_drops_no_tpre_descr)},
45531+ {DRVSTAT_INFO(rx_drops_too_many_frags)},
45532+ {DRVSTAT_INFO(rx_drops_invalid_ring)},
45533+ {DRVSTAT_INFO(forwarded_packets)},
45534+ {DRVSTAT_INFO(rx_drops_mtu)},
45535+ {DRVSTAT_INFO(eth_red_drops)},
45536+ {DRVSTAT_INFO(be_on_die_temperature)}
45537 };
45538 #define ETHTOOL_STATS_NUM ARRAY_SIZE(et_stats)
45539
45540+/* Stats related to multi RX queues */
45541+static const struct be_ethtool_stat et_rx_stats[] = {
45542+ {DRVSTAT_RX_INFO(rx_bytes)},
45543+ {DRVSTAT_RX_INFO(rx_pkts)},
45544+ {DRVSTAT_RX_INFO(rx_rate)},
45545+ {DRVSTAT_RX_INFO(rx_polls)},
45546+ {DRVSTAT_RX_INFO(rx_events)},
45547+ {DRVSTAT_RX_INFO(rx_compl)},
45548+ {DRVSTAT_RX_INFO(rx_mcast_pkts)},
45549+ {DRVSTAT_RX_INFO(rx_post_fail)},
45550+ {DRVSTAT_RX_INFO(rx_drops_no_frags)}
45551+};
45552+#define ETHTOOL_RXSTATS_NUM (ARRAY_SIZE(et_rx_stats))
45553+
45554+/* Stats related to multi TX queues */
45555+static const struct be_ethtool_stat et_tx_stats[] = {
45556+ {DRVSTAT_TX_INFO(be_tx_rate)},
45557+ {DRVSTAT_TX_INFO(be_tx_reqs)},
45558+ {DRVSTAT_TX_INFO(be_tx_wrbs)},
45559+ {DRVSTAT_TX_INFO(be_tx_stops)},
45560+ {DRVSTAT_TX_INFO(be_tx_compl)},
45561+ {DRVSTAT_TX_INFO(be_ipv6_ext_hdr_tx_drop)}
45562+};
45563+#define ETHTOOL_TXSTATS_NUM (ARRAY_SIZE(et_tx_stats))
45564+
45565+static const char et_self_tests[][ETH_GSTRING_LEN] = {
45566+ "MAC Loopback test",
45567+ "PHY Loopback test",
45568+ "External Loopback test",
45569+ "DDR DMA test",
45570+ "Link test"
45571+};
45572+
45573+#define ETHTOOL_TESTS_NUM ARRAY_SIZE(et_self_tests)
45574+#define BE_MAC_LOOPBACK 0x0
45575+#define BE_PHY_LOOPBACK 0x1
45576+#define BE_ONE_PORT_EXT_LOOPBACK 0x2
45577+#define BE_NO_LOOPBACK 0xff
45578+
45579+/* MAC speed valid values */
45580+#define SPEED_DEFAULT 0x0
45581+#define SPEED_FORCED_10GB 0x1
45582+#define SPEED_FORCED_1GB 0x2
45583+#define SPEED_AUTONEG_10GB 0x3
45584+#define SPEED_AUTONEG_1GB 0x4
45585+#define SPEED_AUTONEG_100MB 0x5
45586+#define SPEED_AUTONEG_10GB_1GB 0x6
45587+#define SPEED_AUTONEG_10GB_1GB_100MB 0x7
45588+#define SPEED_AUTONEG_1GB_100MB 0x8
45589+#define SPEED_AUTONEG_10MB 0x9
45590+#define SPEED_AUTONEG_1GB_100MB_10MB 0xa
45591+#define SPEED_AUTONEG_100MB_10MB 0xb
45592+#define SPEED_FORCED_100MB 0xc
45593+#define SPEED_FORCED_10MB 0xd
45594+
45595+
45596+
45597 static void
45598 be_get_drvinfo(struct net_device *netdev, struct ethtool_drvinfo *drvinfo)
45599 {
45600 struct be_adapter *adapter = netdev_priv(netdev);
45601+ int len;
45602+ char fw_on_flash[FW_VER_LEN];
45603+
45604+ memset(fw_on_flash, 0 , sizeof(fw_on_flash));
45605+
45606+ be_cmd_get_fw_ver(adapter, adapter->fw_ver,
45607+ fw_on_flash);
45608
45609 strcpy(drvinfo->driver, DRV_NAME);
45610 strcpy(drvinfo->version, DRV_VER);
45611+
45612 strncpy(drvinfo->fw_version, adapter->fw_ver, FW_VER_LEN);
45613+ if (memcmp(adapter->fw_ver, fw_on_flash,
45614+ FW_VER_LEN) != 0) {
45615+ len = strlen(drvinfo->fw_version);
45616+ strncpy(drvinfo->fw_version+len, " [",
45617+ FW_VER_LEN-len-1);
45618+ len = strlen(drvinfo->fw_version);
45619+ strncpy(drvinfo->fw_version+len, fw_on_flash,
45620+ FW_VER_LEN-len-1);
45621+ len = strlen(drvinfo->fw_version);
45622+ strncpy(drvinfo->fw_version+len, "]", FW_VER_LEN-len-1);
45623+ }
45624+
45625 strcpy(drvinfo->bus_info, pci_name(adapter->pdev));
45626 drvinfo->testinfo_len = 0;
45627 drvinfo->regdump_len = 0;
45628@@ -122,12 +181,37 @@ be_get_drvinfo(struct net_device *netdev, struct ethtool_drvinfo *drvinfo)
45629 }
45630
45631 static int
45632+be_get_reg_len(struct net_device *netdev)
45633+{
45634+ struct be_adapter *adapter = netdev_priv(netdev);
45635+ u32 log_size = 0;
45636+
45637+ if (be_physfn(adapter))
45638+ be_cmd_get_reg_len(adapter, &log_size);
45639+
45640+ return log_size;
45641+}
45642+
45643+static void
45644+be_get_regs(struct net_device *netdev, struct ethtool_regs *regs, void *buf)
45645+{
45646+ struct be_adapter *adapter = netdev_priv(netdev);
45647+
45648+ if (be_physfn(adapter)) {
45649+ memset(buf, 0, regs->len);
45650+ be_cmd_get_regs(adapter, regs->len, buf);
45651+ }
45652+}
45653+
45654+static int
45655 be_get_coalesce(struct net_device *netdev, struct ethtool_coalesce *coalesce)
45656 {
45657 struct be_adapter *adapter = netdev_priv(netdev);
45658- struct be_eq_obj *rx_eq = &adapter->rx_eq;
45659+ struct be_eq_obj *rx_eq = &adapter->rx_obj[0].rx_eq;
45660 struct be_eq_obj *tx_eq = &adapter->tx_eq;
45661
45662+ coalesce->rx_max_coalesced_frames = adapter->max_rx_coal;
45663+
45664 coalesce->rx_coalesce_usecs = rx_eq->cur_eqd;
45665 coalesce->rx_coalesce_usecs_high = rx_eq->max_eqd;
45666 coalesce->rx_coalesce_usecs_low = rx_eq->min_eqd;
45667@@ -149,25 +233,52 @@ static int
45668 be_set_coalesce(struct net_device *netdev, struct ethtool_coalesce *coalesce)
45669 {
45670 struct be_adapter *adapter = netdev_priv(netdev);
45671- struct be_eq_obj *rx_eq = &adapter->rx_eq;
45672+ struct be_rx_obj *rxo;
45673+ struct be_eq_obj *rx_eq;
45674 struct be_eq_obj *tx_eq = &adapter->tx_eq;
45675 u32 tx_max, tx_min, tx_cur;
45676 u32 rx_max, rx_min, rx_cur;
45677- int status = 0;
45678+ int status = 0, i;
45679
45680 if (coalesce->use_adaptive_tx_coalesce == 1)
45681 return -EINVAL;
45682+ adapter->max_rx_coal = coalesce->rx_max_coalesced_frames;
45683+ if (adapter->max_rx_coal > BE_MAX_FRAGS_PER_FRAME)
45684+ adapter->max_rx_coal = BE_MAX_FRAGS_PER_FRAME;
45685
45686- /* if AIC is being turned on now, start with an EQD of 0 */
45687- if (rx_eq->enable_aic == 0 &&
45688- coalesce->use_adaptive_rx_coalesce == 1) {
45689- rx_eq->cur_eqd = 0;
45690+ for_all_rx_queues(adapter, rxo, i) {
45691+ rx_eq = &rxo->rx_eq;
45692+
45693+ if (!rx_eq->enable_aic && coalesce->use_adaptive_rx_coalesce)
45694+ rx_eq->cur_eqd = 0;
45695+ rx_eq->enable_aic = coalesce->use_adaptive_rx_coalesce;
45696+
45697+ rx_max = coalesce->rx_coalesce_usecs_high;
45698+ rx_min = coalesce->rx_coalesce_usecs_low;
45699+ rx_cur = coalesce->rx_coalesce_usecs;
45700+
45701+ if (rx_eq->enable_aic) {
45702+ if (rx_max > BE_MAX_EQD)
45703+ rx_max = BE_MAX_EQD;
45704+ if (rx_min > rx_max)
45705+ rx_min = rx_max;
45706+ rx_eq->max_eqd = rx_max;
45707+ rx_eq->min_eqd = rx_min;
45708+ if (rx_eq->cur_eqd > rx_max)
45709+ rx_eq->cur_eqd = rx_max;
45710+ if (rx_eq->cur_eqd < rx_min)
45711+ rx_eq->cur_eqd = rx_min;
45712+ } else {
45713+ if (rx_cur > BE_MAX_EQD)
45714+ rx_cur = BE_MAX_EQD;
45715+ if (rx_eq->cur_eqd != rx_cur) {
45716+ status = be_cmd_modify_eqd(adapter, rx_eq->q.id,
45717+ rx_cur);
45718+ if (!status)
45719+ rx_eq->cur_eqd = rx_cur;
45720+ }
45721+ }
45722 }
45723- rx_eq->enable_aic = coalesce->use_adaptive_rx_coalesce;
45724-
45725- rx_max = coalesce->rx_coalesce_usecs_high;
45726- rx_min = coalesce->rx_coalesce_usecs_low;
45727- rx_cur = coalesce->rx_coalesce_usecs;
45728
45729 tx_max = coalesce->tx_coalesce_usecs_high;
45730 tx_min = coalesce->tx_coalesce_usecs_low;
45731@@ -181,27 +292,6 @@ be_set_coalesce(struct net_device *netdev, struct ethtool_coalesce *coalesce)
45732 tx_eq->cur_eqd = tx_cur;
45733 }
45734
45735- if (rx_eq->enable_aic) {
45736- if (rx_max > BE_MAX_EQD)
45737- rx_max = BE_MAX_EQD;
45738- if (rx_min > rx_max)
45739- rx_min = rx_max;
45740- rx_eq->max_eqd = rx_max;
45741- rx_eq->min_eqd = rx_min;
45742- if (rx_eq->cur_eqd > rx_max)
45743- rx_eq->cur_eqd = rx_max;
45744- if (rx_eq->cur_eqd < rx_min)
45745- rx_eq->cur_eqd = rx_min;
45746- } else {
45747- if (rx_cur > BE_MAX_EQD)
45748- rx_cur = BE_MAX_EQD;
45749- if (rx_eq->cur_eqd != rx_cur) {
45750- status = be_cmd_modify_eqd(adapter, rx_eq->q.id,
45751- rx_cur);
45752- if (!status)
45753- rx_eq->cur_eqd = rx_cur;
45754- }
45755- }
45756 return 0;
45757 }
45758
45759@@ -229,81 +319,294 @@ be_get_ethtool_stats(struct net_device *netdev,
45760 struct ethtool_stats *stats, uint64_t *data)
45761 {
45762 struct be_adapter *adapter = netdev_priv(netdev);
45763- struct be_drvr_stats *drvr_stats = &adapter->stats.drvr_stats;
45764- struct be_hw_stats *hw_stats = hw_stats_from_cmd(adapter->stats.cmd.va);
45765- struct be_rxf_stats *rxf_stats = &hw_stats->rxf;
45766- struct be_port_rxf_stats *port_stats =
45767- &rxf_stats->port[adapter->port_num];
45768- struct net_device_stats *net_stats = &adapter->stats.net_stats;
45769- struct be_erx_stats *erx_stats = &hw_stats->erx;
45770+ struct be_rx_obj *rxo;
45771+ struct be_tx_obj *txo;
45772 void *p = NULL;
45773- int i;
45774+ int i, j, base;
45775
45776 for (i = 0; i < ETHTOOL_STATS_NUM; i++) {
45777 switch (et_stats[i].type) {
45778 case NETSTAT:
45779- p = net_stats;
45780+ p = &adapter->net_stats;
45781 break;
45782 case DRVSTAT:
45783- p = drvr_stats;
45784- break;
45785- case PORTSTAT:
45786- p = port_stats;
45787- break;
45788- case MISCSTAT:
45789- p = rxf_stats;
45790- break;
45791- case ERXSTAT: /* Currently only one ERX stat is provided */
45792- p = (u32 *)erx_stats + adapter->rx_obj.q.id;
45793+ p = &adapter->drv_stats;
45794 break;
45795 }
45796
45797 p = (u8 *)p + et_stats[i].offset;
45798 data[i] = (et_stats[i].size == sizeof(u64)) ?
45799- *(u64 *)p: *(u32 *)p;
45800+ *(u64 *)p:(*(u32 *)p);
45801 }
45802
45803- return;
45804+ base = ETHTOOL_STATS_NUM;
45805+ for_all_rx_queues(adapter, rxo, j) {
45806+ for (i = 0; i < ETHTOOL_RXSTATS_NUM; i++) {
45807+ p = (u8 *)&rxo->stats + et_rx_stats[i].offset;
45808+ data[base + j * ETHTOOL_RXSTATS_NUM + i] =
45809+ (et_rx_stats[i].size == sizeof(u64)) ?
45810+ *(u64 *)p: *(u32 *)p;
45811+ }
45812+ }
45813+
45814+ base = ETHTOOL_STATS_NUM + adapter->num_rx_qs * ETHTOOL_RXSTATS_NUM;
45815+ for_all_tx_queues(adapter, txo, j) {
45816+ for (i = 0; i < ETHTOOL_TXSTATS_NUM; i++) {
45817+ p = (u8 *)&txo->stats + et_tx_stats[i].offset;
45818+ data[base + j * ETHTOOL_TXSTATS_NUM + i] =
45819+ (et_tx_stats[i].size == sizeof(u64)) ?
45820+ *(u64 *)p: *(u32 *)p;
45821+ }
45822+ }
45823 }
45824
45825 static void
45826 be_get_stat_strings(struct net_device *netdev, uint32_t stringset,
45827 uint8_t *data)
45828 {
45829- int i;
45830+ struct be_adapter *adapter = netdev_priv(netdev);
45831+ int i, j;
45832+
45833 switch (stringset) {
45834 case ETH_SS_STATS:
45835 for (i = 0; i < ETHTOOL_STATS_NUM; i++) {
45836 memcpy(data, et_stats[i].desc, ETH_GSTRING_LEN);
45837 data += ETH_GSTRING_LEN;
45838 }
45839+ for (i = 0; i < adapter->num_rx_qs; i++) {
45840+ for (j = 0; j < ETHTOOL_RXSTATS_NUM; j++) {
45841+ sprintf(data, "rxq%d: %s", i,
45842+ et_rx_stats[j].desc);
45843+ data += ETH_GSTRING_LEN;
45844+ }
45845+ }
45846+ for (i = 0; i < adapter->num_tx_qs; i++) {
45847+ for (j = 0; j < ETHTOOL_TXSTATS_NUM; j++) {
45848+ sprintf(data, "txq%d: %s", i,
45849+ et_tx_stats[j].desc);
45850+ data += ETH_GSTRING_LEN;
45851+ }
45852+ }
45853+ break;
45854+ case ETH_SS_TEST:
45855+ for (i = 0; i < ETHTOOL_TESTS_NUM; i++) {
45856+ memcpy(data, et_self_tests[i], ETH_GSTRING_LEN);
45857+ data += ETH_GSTRING_LEN;
45858+ }
45859 break;
45860 }
45861 }
45862
45863+#if LINUX_VERSION_CODE < KERNEL_VERSION(3, 0, 0)
45864 static int be_get_stats_count(struct net_device *netdev)
45865 {
45866- return ETHTOOL_STATS_NUM;
45867+ struct be_adapter *adapter = netdev_priv(netdev);
45868+
45869+ return ETHTOOL_STATS_NUM + adapter->num_rx_qs * ETHTOOL_RXSTATS_NUM
45870+ + adapter->num_tx_qs * ETHTOOL_TXSTATS_NUM;
45871 }
45872+static int
45873+be_self_test_count(struct net_device *dev)
45874+{
45875+ return ETHTOOL_TESTS_NUM;
45876+}
45877+#else
45878+
45879+static int be_get_sset_count(struct net_device *netdev, int stringset)
45880+{
45881+ struct be_adapter *adapter = netdev_priv(netdev);
45882+
45883+ switch (stringset) {
45884+ case ETH_SS_TEST:
45885+ return ETHTOOL_TESTS_NUM;
45886+ case ETH_SS_STATS:
45887+ return ETHTOOL_STATS_NUM +
45888+ adapter->num_rx_qs * ETHTOOL_RXSTATS_NUM +
45889+ adapter->num_tx_qs * ETHTOOL_TXSTATS_NUM;
45890+ default:
45891+ return -EINVAL;
45892+ }
45893+}
45894+#endif
45895
45896 static int be_get_settings(struct net_device *netdev, struct ethtool_cmd *ecmd)
45897 {
45898- ecmd->speed = SPEED_10000;
45899+ struct be_adapter *adapter = netdev_priv(netdev);
45900+ struct be_phy_info phy_info;
45901+ u8 mac_speed = 0;
45902+ u16 link_speed = 0;
45903+ int link_status = LINK_DOWN;
45904+ int status;
45905+
45906+ if ((adapter->link_speed < 0) || (!(netdev->flags & IFF_UP))) {
45907+ status = be_cmd_link_status_query(adapter, &link_status,
45908+ &mac_speed, &link_speed, 0);
45909+
45910+ be_link_status_update(adapter, link_status);
45911+ /* link_speed is in units of 10 Mbps */
45912+ if (link_speed) {
45913+ ecmd->speed = link_speed*10;
45914+ } else {
45915+ switch (mac_speed) {
45916+ case PHY_LINK_SPEED_10MBPS:
45917+ ecmd->speed = SPEED_10;
45918+ break;
45919+ case PHY_LINK_SPEED_100MBPS:
45920+ ecmd->speed = SPEED_100;
45921+ break;
45922+ case PHY_LINK_SPEED_1GBPS:
45923+ ecmd->speed = SPEED_1000;
45924+ break;
45925+ case PHY_LINK_SPEED_10GBPS:
45926+ ecmd->speed = SPEED_10000;
45927+ break;
45928+ case PHY_LINK_SPEED_ZERO:
45929+ ecmd->speed = 0;
45930+ break;
45931+ }
45932+ }
45933+
45934+ status = be_cmd_get_phy_info(adapter, &phy_info);
45935+ if (!status) {
45936+ switch (phy_info.interface_type) {
45937+ case PHY_TYPE_XFP_10GB:
45938+ case PHY_TYPE_SFP_1GB:
45939+ case PHY_TYPE_SFP_PLUS_10GB:
45940+ ecmd->port = PORT_FIBRE;
45941+ break;
45942+ default:
45943+ ecmd->port = PORT_TP;
45944+ break;
45945+ }
45946+
45947+ switch (phy_info.interface_type) {
45948+ case PHY_TYPE_KR_10GB:
45949+ case PHY_TYPE_KX4_10GB:
45950+ ecmd->transceiver = XCVR_INTERNAL;
45951+ break;
45952+ default:
45953+ ecmd->transceiver = XCVR_EXTERNAL;
45954+ break;
45955+ }
45956+
45957+ if (phy_info.auto_speeds_supported) {
45958+ ecmd->supported |= SUPPORTED_Autoneg;
45959+ ecmd->autoneg = AUTONEG_ENABLE;
45960+ ecmd->advertising |= ADVERTISED_Autoneg;
45961+ }
45962+
45963+ if (phy_info.misc_params & BE_PAUSE_SYM_EN) {
45964+ ecmd->supported |= SUPPORTED_Pause;
45965+ ecmd->advertising |= ADVERTISED_Pause;
45966+ }
45967+
45968+ }
45969+
45970+ /* Save for future use */
45971+ adapter->link_speed = ecmd->speed;
45972+ adapter->port_type = ecmd->port;
45973+ adapter->transceiver = ecmd->transceiver;
45974+ adapter->autoneg = ecmd->autoneg;
45975+ } else {
45976+ ecmd->speed = adapter->link_speed;
45977+ ecmd->port = adapter->port_type;
45978+ ecmd->transceiver = adapter->transceiver;
45979+ ecmd->autoneg = adapter->autoneg;
45980+ }
45981+
45982 ecmd->duplex = DUPLEX_FULL;
45983- ecmd->autoneg = AUTONEG_DISABLE;
45984+ ecmd->phy_address = (adapter->hba_port_num << 4) |
45985+ (adapter->port_name[adapter->hba_port_num]);
45986+ switch (ecmd->port) {
45987+ case PORT_FIBRE:
45988+ ecmd->supported = (SUPPORTED_10000baseT_Full | SUPPORTED_FIBRE);
45989+ break;
45990+ case PORT_TP:
45991+ ecmd->supported = (SUPPORTED_10000baseT_Full | SUPPORTED_TP);
45992+ break;
45993+ }
45994+
45995+ if (ecmd->autoneg) {
45996+ ecmd->supported |= SUPPORTED_1000baseT_Full;
45997+ ecmd->advertising |= (ADVERTISED_10000baseT_Full |
45998+ ADVERTISED_1000baseT_Full);
45999+ }
46000+
46001 return 0;
46002 }
46003
46004+static int be_set_settings(struct net_device *netdev,
46005+ struct ethtool_cmd *ecmd)
46006+{
46007+ struct be_adapter *adapter = netdev_priv(netdev);
46008+ struct be_phy_info phy_info;
46009+ u16 mac_speed=0;
46010+ u16 dac_cable_len=0;
46011+ u16 port_speed = 0;
46012+ int status;
46013+
46014+ status = be_cmd_get_phy_info(adapter, &phy_info);
46015+ if (status) {
46016+ dev_warn(&adapter->pdev->dev, "port speed set failed.\n");
46017+ return status;
46018+ }
46019+
46020+ if (ecmd->autoneg == AUTONEG_ENABLE) {
46021+ switch(phy_info.interface_type) {
46022+ case PHY_TYPE_SFP_1GB:
46023+ case PHY_TYPE_BASET_1GB:
46024+ case PHY_TYPE_BASEX_1GB:
46025+ case PHY_TYPE_SGMII:
46026+ mac_speed = SPEED_AUTONEG_1GB_100MB_10MB;
46027+ break;
46028+ case PHY_TYPE_SFP_PLUS_10GB:
46029+ dev_warn(&adapter->pdev->dev,
46030+ "Autoneg not supported on this module. \n");
46031+ return -EINVAL;
46032+ case PHY_TYPE_KR_10GB:
46033+ case PHY_TYPE_KX4_10GB:
46034+ mac_speed = SPEED_AUTONEG_10GB_1GB;
46035+ break;
46036+ case PHY_TYPE_BASET_10GB:
46037+ mac_speed = SPEED_AUTONEG_10GB_1GB_100MB;
46038+ break;
46039+ }
46040+ } else if(ecmd->autoneg == AUTONEG_DISABLE) {
46041+ if(ecmd->speed == SPEED_10) {
46042+ mac_speed = SPEED_FORCED_10MB;
46043+ } else if(ecmd->speed == SPEED_100) {
46044+ mac_speed = SPEED_FORCED_100MB;
46045+ } else if(ecmd->speed == SPEED_1000) {
46046+ mac_speed = SPEED_FORCED_1GB;
46047+ } else if(ecmd->speed == SPEED_10000) {
46048+ mac_speed = SPEED_FORCED_10GB;
46049+ }
46050+ }
46051+
46052+ status = be_cmd_get_port_speed(adapter, adapter->hba_port_num,
46053+ &dac_cable_len, &port_speed);
46054+
46055+ if (!status && port_speed != mac_speed)
46056+ status = be_cmd_set_port_speed_v1(adapter,
46057+ adapter->hba_port_num, mac_speed,
46058+ dac_cable_len);
46059+ if (status)
46060+ dev_warn(&adapter->pdev->dev, "port speed set failed.\n");
46061+
46062+ return status;
46063+
46064+}
46065+
46066 static void
46067 be_get_ringparam(struct net_device *netdev, struct ethtool_ringparam *ring)
46068 {
46069 struct be_adapter *adapter = netdev_priv(netdev);
46070
46071- ring->rx_max_pending = adapter->rx_obj.q.len;
46072- ring->tx_max_pending = adapter->tx_obj.q.len;
46073+ ring->rx_max_pending = adapter->rx_obj[0].q.len;
46074+ ring->tx_max_pending = adapter->tx_obj[0].q.len;
46075
46076- ring->rx_pending = atomic_read(&adapter->rx_obj.q.used);
46077- ring->tx_pending = atomic_read(&adapter->tx_obj.q.used);
46078+ ring->rx_pending = atomic_read(&adapter->rx_obj[0].q.used);
46079+ ring->tx_pending = atomic_read(&adapter->tx_obj[0].q.used);
46080 }
46081
46082 static void
46083@@ -312,7 +615,7 @@ be_get_pauseparam(struct net_device *netdev, struct ethtool_pauseparam *ecmd)
46084 struct be_adapter *adapter = netdev_priv(netdev);
46085
46086 be_cmd_get_flow_control(adapter, &ecmd->tx_pause, &ecmd->rx_pause);
46087- ecmd->autoneg = 0;
46088+ ecmd->autoneg = adapter->autoneg;
46089 }
46090
46091 static int
46092@@ -334,6 +637,203 @@ be_set_pauseparam(struct net_device *netdev, struct ethtool_pauseparam *ecmd)
46093 return status;
46094 }
46095
46096+#if LINUX_VERSION_CODE < KERNEL_VERSION(3, 0, 0)
46097+static int
46098+be_phys_id(struct net_device *netdev, u32 data)
46099+{
46100+ struct be_adapter *adapter = netdev_priv(netdev);
46101+ int status;
46102+ u32 cur;
46103+
46104+ be_cmd_get_beacon_state(adapter, adapter->hba_port_num, &cur);
46105+
46106+ if (cur == BEACON_STATE_ENABLED)
46107+ return 0;
46108+
46109+ if (data < 2)
46110+ data = 2;
46111+
46112+ status = be_cmd_set_beacon_state(adapter, adapter->hba_port_num, 0, 0,
46113+ BEACON_STATE_ENABLED);
46114+ set_current_state(TASK_INTERRUPTIBLE);
46115+ schedule_timeout(data*HZ);
46116+
46117+ status = be_cmd_set_beacon_state(adapter, adapter->hba_port_num, 0, 0,
46118+ BEACON_STATE_DISABLED);
46119+
46120+ return status;
46121+}
46122+#else
46123+static int
46124+be_set_phys_id(struct net_device *netdev,
46125+ enum ethtool_phys_id_state state)
46126+{
46127+ struct be_adapter *adapter = netdev_priv(netdev);
46128+
46129+ switch (state) {
46130+ case ETHTOOL_ID_ACTIVE:
46131+ be_cmd_get_beacon_state(adapter, adapter->hba_port_num,
46132+ &adapter->beacon_state);
46133+ return 1; /* cycle on/off once per second */
46134+
46135+ case ETHTOOL_ID_ON:
46136+ be_cmd_set_beacon_state(adapter, adapter->hba_port_num, 0, 0,
46137+ BEACON_STATE_ENABLED);
46138+ break;
46139+
46140+ case ETHTOOL_ID_OFF:
46141+ be_cmd_set_beacon_state(adapter, adapter->hba_port_num, 0, 0,
46142+ BEACON_STATE_DISABLED);
46143+ break;
46144+
46145+ case ETHTOOL_ID_INACTIVE:
46146+ be_cmd_set_beacon_state(adapter, adapter->hba_port_num, 0, 0,
46147+ adapter->beacon_state);
46148+ }
46149+
46150+ return 0;
46151+}
46152+#endif
46153+
46154+static bool
46155+be_is_wol_supported(struct be_adapter *adapter)
46156+{
46157+ struct pci_dev *pdev = adapter->pdev;
46158+
46159+ if (!be_physfn(adapter))
46160+ return false;
46161+
46162+ switch (pdev->subsystem_device) {
46163+ case OC_SUBSYS_DEVICE_ID1:
46164+ case OC_SUBSYS_DEVICE_ID2:
46165+ case OC_SUBSYS_DEVICE_ID3:
46166+ case OC_SUBSYS_DEVICE_ID4:
46167+ return false;
46168+ default:
46169+ return true;
46170+ }
46171+}
46172+
46173+static void
46174+be_get_wol(struct net_device *netdev, struct ethtool_wolinfo *wol)
46175+{
46176+ struct be_adapter *adapter = netdev_priv(netdev);
46177+
46178+ if (be_is_wol_supported(adapter))
46179+ wol->supported = WAKE_MAGIC;
46180+ if (adapter->wol)
46181+ wol->wolopts = WAKE_MAGIC;
46182+ else
46183+ wol->wolopts = 0;
46184+ memset(&wol->sopass, 0, sizeof(wol->sopass));
46185+}
46186+
46187+static int
46188+be_set_wol(struct net_device *netdev, struct ethtool_wolinfo *wol)
46189+{
46190+ struct be_adapter *adapter = netdev_priv(netdev);
46191+
46192+ if (wol->wolopts & ~WAKE_MAGIC)
46193+ return -EOPNOTSUPP;
46194+
46195+ if (!be_is_wol_supported(adapter)) {
46196+ dev_warn(&adapter->pdev->dev,
46197+ "WOL not supported for this subsystemid: %x\n",
46198+ adapter->pdev->subsystem_device);
46199+ return -EOPNOTSUPP;
46200+ }
46201+
46202+ if (wol->wolopts & WAKE_MAGIC)
46203+ adapter->wol = true;
46204+ else
46205+ adapter->wol = false;
46206+
46207+ return 0;
46208+}
46209+
46210+static int
46211+be_test_ddr_dma(struct be_adapter *adapter)
46212+{
46213+ int ret, i;
46214+ struct be_dma_mem ddrdma_cmd;
46215+ u64 pattern[2] = {0x5a5a5a5a5a5a5a5aULL, 0xa5a5a5a5a5a5a5a5ULL};
46216+
46217+ ddrdma_cmd.size = sizeof(struct be_cmd_req_ddrdma_test);
46218+ ddrdma_cmd.va = pci_alloc_consistent(adapter->pdev, ddrdma_cmd.size,
46219+ &ddrdma_cmd.dma);
46220+ if (!ddrdma_cmd.va) {
46221+ dev_err(&adapter->pdev->dev, "Memory allocation failure\n");
46222+ return -ENOMEM;
46223+ }
46224+
46225+ for (i = 0; i < 2; i++) {
46226+ ret = be_cmd_ddr_dma_test(adapter, pattern[i],
46227+ 4096, &ddrdma_cmd);
46228+ if (ret != 0)
46229+ goto err;
46230+ }
46231+
46232+err:
46233+ pci_free_consistent(adapter->pdev, ddrdma_cmd.size,
46234+ ddrdma_cmd.va, ddrdma_cmd.dma);
46235+ return ret;
46236+}
46237+
46238+static u64 be_loopback_test(struct be_adapter *adapter, u8 loopback_type,
46239+ u64 *status)
46240+{
46241+ be_cmd_set_loopback(adapter, adapter->hba_port_num,
46242+ loopback_type, 1);
46243+ *status = be_cmd_loopback_test(adapter, adapter->hba_port_num,
46244+ loopback_type, 1500,
46245+ 2, 0xabc);
46246+ be_cmd_set_loopback(adapter, adapter->hba_port_num,
46247+ BE_NO_LOOPBACK, 1);
46248+ return *status;
46249+}
46250+
46251+static void
46252+be_self_test(struct net_device *netdev, struct ethtool_test *test, u64 *data)
46253+{
46254+ struct be_adapter *adapter = netdev_priv(netdev);
46255+ int link_status;
46256+ u8 mac_speed = 0;
46257+ u16 qos_link_speed = 0;
46258+
46259+ memset(data, 0, sizeof(u64) * ETHTOOL_TESTS_NUM);
46260+
46261+ if (test->flags & ETH_TEST_FL_OFFLINE) {
46262+ if (be_loopback_test(adapter, BE_MAC_LOOPBACK,
46263+ &data[0]) != 0) {
46264+ test->flags |= ETH_TEST_FL_FAILED;
46265+ }
46266+ if (be_loopback_test(adapter, BE_PHY_LOOPBACK,
46267+ &data[1]) != 0) {
46268+ test->flags |= ETH_TEST_FL_FAILED;
46269+ }
46270+ if (be_loopback_test(adapter, BE_ONE_PORT_EXT_LOOPBACK,
46271+ &data[2]) != 0) {
46272+ test->flags |= ETH_TEST_FL_FAILED;
46273+ }
46274+ }
46275+
46276+ if (be_test_ddr_dma(adapter) != 0) {
46277+ data[3] = 1;
46278+ test->flags |= ETH_TEST_FL_FAILED;
46279+ }
46280+
46281+ if (be_cmd_link_status_query(adapter, &link_status, &mac_speed,
46282+ &qos_link_speed, 0) != 0) {
46283+ test->flags |= ETH_TEST_FL_FAILED;
46284+ data[4] = -1;
46285+ } else if (!mac_speed) {
46286+ test->flags |= ETH_TEST_FL_FAILED;
46287+ data[4] = 1;
46288+ }
46289+
46290+}
46291+
46292+#ifdef HAVE_ETHTOOL_FLASH
46293 static int
46294 be_do_flash(struct net_device *netdev, struct ethtool_flash *efl)
46295 {
46296@@ -347,11 +847,73 @@ be_do_flash(struct net_device *netdev, struct ethtool_flash *efl)
46297
46298 return be_load_fw(adapter, file_name);
46299 }
46300+#endif
46301
46302-const struct ethtool_ops be_ethtool_ops = {
46303+static int
46304+be_get_eeprom_len(struct net_device *netdev)
46305+{
46306+ return BE_READ_SEEPROM_LEN;
46307+}
46308+
46309+static int
46310+be_read_eeprom(struct net_device *netdev, struct ethtool_eeprom *eeprom,
46311+ uint8_t *data)
46312+{
46313+ struct be_adapter *adapter = netdev_priv(netdev);
46314+ struct be_dma_mem eeprom_cmd;
46315+ struct be_cmd_resp_seeprom_read *resp;
46316+ int status;
46317+
46318+ if (!eeprom->len)
46319+ return -EINVAL;
46320+
46321+ eeprom->magic = BE_VENDOR_ID | (adapter->pdev->device<<16);
46322+
46323+ memset(&eeprom_cmd, 0, sizeof(struct be_dma_mem));
46324+ eeprom_cmd.size = sizeof(struct be_cmd_req_seeprom_read);
46325+ eeprom_cmd.va = pci_alloc_consistent(adapter->pdev, eeprom_cmd.size,
46326+ &eeprom_cmd.dma);
46327+
46328+ if (!eeprom_cmd.va) {
46329+ dev_err(&adapter->pdev->dev,
46330+ "Memory allocation failure. Could not read eeprom\n");
46331+ return -ENOMEM;
46332+ }
46333+
46334+ status = be_cmd_get_seeprom_data(adapter, &eeprom_cmd);
46335+
46336+ if (!status) {
46337+ resp = (struct be_cmd_resp_seeprom_read *) eeprom_cmd.va;
46338+ memcpy(data, resp->seeprom_data + eeprom->offset, eeprom->len);
46339+ }
46340+ pci_free_consistent(adapter->pdev, eeprom_cmd.size, eeprom_cmd.va,
46341+ eeprom_cmd.dma);
46342+
46343+ return status;
46344+}
46345+
46346+static int be_set_tso(struct net_device *netdev, uint32_t data)
46347+{
46348+ if (data) {
46349+ netdev->features |= NETIF_F_TSO;
46350+ netdev->features |= NETIF_F_TSO6;
46351+ } else {
46352+ netdev->features &= ~NETIF_F_TSO;
46353+ netdev->features &= ~NETIF_F_TSO6;
46354+ }
46355+ return 0;
46356+}
46357+
46358+
46359+struct ethtool_ops be_ethtool_ops = {
46360 .get_settings = be_get_settings,
46361+ .set_settings = be_set_settings,
46362 .get_drvinfo = be_get_drvinfo,
46363+ .get_wol = be_get_wol,
46364+ .set_wol = be_set_wol,
46365 .get_link = ethtool_op_get_link,
46366+ .get_eeprom_len = be_get_eeprom_len,
46367+ .get_eeprom = be_read_eeprom,
46368 .get_coalesce = be_get_coalesce,
46369 .set_coalesce = be_set_coalesce,
46370 .get_ringparam = be_get_ringparam,
46371@@ -364,9 +926,21 @@ const struct ethtool_ops be_ethtool_ops = {
46372 .get_sg = ethtool_op_get_sg,
46373 .set_sg = ethtool_op_set_sg,
46374 .get_tso = ethtool_op_get_tso,
46375- .set_tso = ethtool_op_set_tso,
46376+ .set_tso = be_set_tso,
46377 .get_strings = be_get_stat_strings,
46378+#if LINUX_VERSION_CODE < KERNEL_VERSION(3, 0, 0)
46379+ .phys_id = be_phys_id,
46380 .get_stats_count = be_get_stats_count,
46381+ .self_test_count = be_self_test_count,
46382+#else
46383+ .set_phys_id = be_set_phys_id,
46384+ .get_sset_count = be_get_sset_count,
46385+#endif
46386 .get_ethtool_stats = be_get_ethtool_stats,
46387+ .get_regs_len = be_get_reg_len,
46388+ .get_regs = be_get_regs,
46389+#ifdef HAVE_ETHTOOL_FLASH
46390 .flash_device = be_do_flash,
46391+#endif
46392+ .self_test = be_self_test
46393 };
46394diff --git a/drivers/net/benet/be_hw.h b/drivers/net/benet/be_hw.h
46395index a3394b4..f871d8c 100644
46396--- a/drivers/net/benet/be_hw.h
46397+++ b/drivers/net/benet/be_hw.h
46398@@ -1,18 +1,18 @@
46399 /*
46400- * Copyright (C) 2005 - 2009 ServerEngines
46401+ * Copyright (C) 2005 - 2011 Emulex
46402 * All rights reserved.
46403 *
46404 * This program is free software; you can redistribute it and/or
46405 * modify it under the terms of the GNU General Public License version 2
46406- * as published by the Free Software Foundation. The full GNU General
46407+ * as published by the Free Software Foundation. The full GNU General
46408 * Public License is included in this distribution in the file called COPYING.
46409 *
46410 * Contact Information:
46411- * linux-drivers@serverengines.com
46412+ * linux-drivers@emulex.com
46413 *
46414- * ServerEngines
46415- * 209 N. Fair Oaks Ave
46416- * Sunnyvale, CA 94085
46417+ * Emulex
46418+ * 3333 Susan Street
46419+ * Costa Mesa, CA 92626
46420 */
46421
46422 /********* Mailbox door bell *************/
46423@@ -26,24 +26,34 @@
46424 * queue entry.
46425 */
46426 #define MPU_MAILBOX_DB_OFFSET 0x160
46427-#define MPU_MAILBOX_DB_RDY_MASK 0x1 /* bit 0 */
46428+#define MPU_MAILBOX_DB_RDY_MASK 0x1 /* bit 0 */
46429 #define MPU_MAILBOX_DB_HI_MASK 0x2 /* bit 1 */
46430
46431-#define MPU_EP_CONTROL 0
46432+#define MPU_EP_CONTROL 0
46433
46434 /********** MPU semphore ******************/
46435-#define MPU_EP_SEMAPHORE_OFFSET 0xac
46436+#define MPU_EP_SEMAPHORE_OFFSET 0xac
46437+#define MPU_EP_SEMAPHORE_IF_TYPE2_OFFSET 0x400
46438 #define EP_SEMAPHORE_POST_STAGE_MASK 0x0000FFFF
46439 #define EP_SEMAPHORE_POST_ERR_MASK 0x1
46440 #define EP_SEMAPHORE_POST_ERR_SHIFT 31
46441 /* MPU semphore POST stage values */
46442-#define POST_STAGE_AWAITING_HOST_RDY 0x1 /* FW awaiting goahead from host */
46443-#define POST_STAGE_HOST_RDY 0x2 /* Host has given go-ahed to FW */
46444+#define POST_STAGE_AWAITING_HOST_RDY 0x1 /* FW awaiting goahead from host */
46445+#define POST_STAGE_HOST_RDY 0x2 /* Host has given go-ahed to FW */
46446 #define POST_STAGE_BE_RESET 0x3 /* Host wants to reset chip */
46447 #define POST_STAGE_ARMFW_RDY 0xc000 /* FW is done with POST */
46448
46449+/* Lancer SLIPORT_CONTROL SLIPORT_STATUS registers */
46450+#define SLIPORT_STATUS_OFFSET 0x404
46451+#define SLIPORT_CONTROL_OFFSET 0x408
46452+
46453+#define SLIPORT_STATUS_ERR_MASK 0x80000000
46454+#define SLIPORT_STATUS_RN_MASK 0x01000000
46455+#define SLIPORT_STATUS_RDY_MASK 0x00800000
46456+#define SLI_PORT_CONTROL_IP_MASK 0x08000000
46457+
46458 /********* Memory BAR register ************/
46459-#define PCICFG_MEMBAR_CTRL_INT_CTRL_OFFSET 0xfc
46460+#define PCICFG_MEMBAR_CTRL_INT_CTRL_OFFSET 0xfc
46461 /* Host Interrupt Enable, if set interrupts are enabled although "PCI Interrupt
46462 * Disable" may still globally block interrupts in addition to individual
46463 * interrupt masks; a mechanism for the device driver to block all interrupts
46464@@ -52,13 +62,70 @@
46465 */
46466 #define MEMBAR_CTRL_INT_CTRL_HOSTINTR_MASK (1 << 29) /* bit 29 */
46467
46468+/********* Link Status CSR ****************/
46469+#define PCICFG_PCIE_LINK_STATUS_OFFSET 0xd0
46470+#define PCIE_LINK_STATUS_SPEED_MASK 0xFF /* bits 16 - 19 */
46471+#define PCIE_LINK_STATUS_SPEED_SHIFT 16
46472+#define PCIE_LINK_STATUS_NEG_WIDTH_MASK 0x3F /* bits 20 - 25 */
46473+#define PCIE_LINK_STATUS_NEG_WIDTH_SHIFT 20
46474+
46475+/********* Link Capability CSR ************/
46476+#define PCICFG_PCIE_LINK_CAP_OFFSET 0xcc
46477+#define PCIE_LINK_CAP_MAX_SPEED_MASK 0xFF /* bits 0 - 3 */
46478+#define PCIE_LINK_CAP_MAX_SPEED_SHIFT 0
46479+#define PCIE_LINK_CAP_MAX_WIDTH_MASK 0x3F /* bits 4 - 9 */
46480+#define PCIE_LINK_CAP_MAX_WIDTH_SHIFT 4
46481+
46482+/********* PCI Function Capability ************/
46483+#define BE_FUNCTION_CAPS_UNCLASSIFIED_STATS 0x1
46484+#define BE_FUNCTION_CAPS_RSS 0x2
46485+#define BE_FUNCTION_CAPS_PROMISCUOUS 0x4
46486+#define BE_FUNCTION_CAPS_LEGACY_MODE 0x8
46487+
46488+/********* Power managment (WOL) **********/
46489+#define PCICFG_PM_CONTROL_OFFSET 0x44
46490+#define PCICFG_PM_CONTROL_MASK 0x108 /* bits 3 & 8 */
46491+
46492+/********* Online Control Registers *******/
46493+#define PCICFG_ONLINE0 0xB0
46494+#define PCICFG_ONLINE1 0xB4
46495+
46496+/********* UE Status and Mask Registers ***/
46497+#define PCICFG_UE_STATUS_LOW 0xA0
46498+#define PCICFG_UE_STATUS_HIGH 0xA4
46499+#define PCICFG_UE_STATUS_LOW_MASK 0xA8
46500+#define PCICFG_UE_STATUS_HI_MASK 0xAC
46501+
46502+/******** SLI_INTF ***********************/
46503+#define SLI_INTF_REG_OFFSET 0x58
46504+#define SLI_INTF_VALID_MASK 0xE0000000
46505+#define SLI_INTF_VALID 0xC0000000
46506+#define SLI_INTF_HINT2_MASK 0x1F000000
46507+#define SLI_INTF_HINT2_SHIFT 24
46508+#define SLI_INTF_HINT1_MASK 0x00FF0000
46509+#define SLI_INTF_HINT1_SHIFT 16
46510+#define SLI_INTF_FAMILY_MASK 0x00000F00
46511+#define SLI_INTF_FAMILY_SHIFT 8
46512+#define SLI_INTF_IF_TYPE_MASK 0x0000F000
46513+#define SLI_INTF_IF_TYPE_SHIFT 12
46514+#define SLI_INTF_REV_MASK 0x000000F0
46515+#define SLI_INTF_REV_SHIFT 4
46516+#define SLI_INTF_FT_MASK 0x00000001
46517+
46518+/* SLI family */
46519+#define BE_SLI_FAMILY 0x0
46520+#define LANCER_A0_SLI_FAMILY 0xA
46521+
46522 /********* ISR0 Register offset **********/
46523-#define CEV_ISR0_OFFSET 0xC18
46524+#define CEV_ISR0_OFFSET 0xC18
46525 #define CEV_ISR_SIZE 4
46526
46527 /********* Event Q door bell *************/
46528 #define DB_EQ_OFFSET DB_CQ_OFFSET
46529 #define DB_EQ_RING_ID_MASK 0x1FF /* bits 0 - 8 */
46530+#define DB_EQ_RING_ID_EXT_MASK 0x3e00 /* bits 9-13 */
46531+#define DB_EQ_RING_ID_EXT_MASK_SHIFT (2) /* qid bits 9-13 placing at 11-15 */
46532+
46533 /* Clear the interrupt for this eq */
46534 #define DB_EQ_CLR_SHIFT (9) /* bit 9 */
46535 /* Must be 1 */
46536@@ -69,12 +136,16 @@
46537 #define DB_EQ_REARM_SHIFT (29) /* bit 29 */
46538
46539 /********* Compl Q door bell *************/
46540-#define DB_CQ_OFFSET 0x120
46541+#define DB_CQ_OFFSET 0x120
46542 #define DB_CQ_RING_ID_MASK 0x3FF /* bits 0 - 9 */
46543+#define DB_CQ_RING_ID_EXT_MASK 0x7C00 /* bits 10-14 */
46544+#define DB_CQ_RING_ID_EXT_MASK_SHIFT (1) /* qid bits 10-14
46545+ placing at 11-15 */
46546+
46547 /* Number of event entries processed */
46548-#define DB_CQ_NUM_POPPED_SHIFT (16) /* bits 16 - 28 */
46549+#define DB_CQ_NUM_POPPED_SHIFT (16) /* bits 16 - 28 */
46550 /* Rearm bit */
46551-#define DB_CQ_REARM_SHIFT (29) /* bit 29 */
46552+#define DB_CQ_REARM_SHIFT (29) /* bit 29 */
46553
46554 /********** TX ULP door bell *************/
46555 #define DB_TXULP1_OFFSET 0x60
46556@@ -84,25 +155,103 @@
46557 #define DB_TXULP_NUM_POSTED_MASK 0x3FFF /* bits 16 - 29 */
46558
46559 /********** RQ(erx) door bell ************/
46560-#define DB_RQ_OFFSET 0x100
46561+#define DB_RQ_OFFSET 0x100
46562 #define DB_RQ_RING_ID_MASK 0x3FF /* bits 0 - 9 */
46563 /* Number of rx frags posted */
46564 #define DB_RQ_NUM_POSTED_SHIFT (24) /* bits 24 - 31 */
46565
46566 /********** MCC door bell ************/
46567-#define DB_MCCQ_OFFSET 0x140
46568+#define DB_MCCQ_OFFSET 0x140
46569 #define DB_MCCQ_RING_ID_MASK 0x7FF /* bits 0 - 10 */
46570 /* Number of entries posted */
46571 #define DB_MCCQ_NUM_POSTED_SHIFT (16) /* bits 16 - 29 */
46572
46573+/********** SRIOV VF PCICFG OFFSET ********/
46574+#define SRIOV_VF_PCICFG_OFFSET (4096)
46575+
46576+/********** FAT TABLE ********/
46577+#define RETRIEVE_FAT 0
46578+#define QUERY_FAT 1
46579+
46580+/* Flashrom related descriptors */
46581+#define IMAGE_TYPE_FIRMWARE 160
46582+#define IMAGE_TYPE_BOOTCODE 224
46583+#define IMAGE_TYPE_OPTIONROM 32
46584+
46585+#define NUM_FLASHDIR_ENTRIES 32
46586+
46587+#define IMG_TYPE_ISCSI_ACTIVE 0
46588+#define IMG_TYPE_REDBOOT 1
46589+#define IMG_TYPE_BIOS 2
46590+#define IMG_TYPE_PXE_BIOS 3
46591+#define IMG_TYPE_FCOE_BIOS 8
46592+#define IMG_TYPE_ISCSI_BACKUP 9
46593+#define IMG_TYPE_FCOE_FW_ACTIVE 10
46594+#define IMG_TYPE_FCOE_FW_BACKUP 11
46595+#define IMG_TYPE_NCSI_FW 13
46596+#define IMG_TYPE_PHY_FW 99
46597+#define TN_8022 13
46598+
46599+#define ILLEGAL_IOCTL_REQ 2
46600+#define FLASHROM_OPER_PHY_FLASH 9
46601+#define FLASHROM_OPER_PHY_SAVE 10
46602+#define FLASHROM_OPER_FLASH 1
46603+#define FLASHROM_OPER_SAVE 2
46604+#define FLASHROM_OPER_REPORT 4
46605+
46606+#define FLASH_IMAGE_MAX_SIZE_g2 (1310720) /* Max firmware image size */
46607+#define FLASH_BIOS_IMAGE_MAX_SIZE_g2 (262144) /* Max OPTION ROM image sz */
46608+#define FLASH_REDBOOT_IMAGE_MAX_SIZE_g2 (262144) /* Max Redboot image sz */
46609+#define FLASH_IMAGE_MAX_SIZE_g3 (2097152) /* Max firmware image size */
46610+#define FLASH_BIOS_IMAGE_MAX_SIZE_g3 (524288) /* Max OPTION ROM image sz */
46611+#define FLASH_REDBOOT_IMAGE_MAX_SIZE_g3 (1048576) /* Max Redboot image sz */
46612+#define FLASH_NCSI_IMAGE_MAX_SIZE_g3 (262144)
46613+#define FLASH_PHY_FW_IMAGE_MAX_SIZE_g3 (262144)
46614+
46615+#define FLASH_NCSI_MAGIC (0x16032009)
46616+#define FLASH_NCSI_DISABLED (0)
46617+#define FLASH_NCSI_ENABLED (1)
46618+
46619+#define FLASH_NCSI_BITFILE_HDR_OFFSET (0x600000)
46620+
46621+/* Offsets for components on Flash. */
46622+#define FLASH_iSCSI_PRIMARY_IMAGE_START_g2 (1048576)
46623+#define FLASH_iSCSI_BACKUP_IMAGE_START_g2 (2359296)
46624+#define FLASH_FCoE_PRIMARY_IMAGE_START_g2 (3670016)
46625+#define FLASH_FCoE_BACKUP_IMAGE_START_g2 (4980736)
46626+#define FLASH_iSCSI_BIOS_START_g2 (7340032)
46627+#define FLASH_PXE_BIOS_START_g2 (7864320)
46628+#define FLASH_FCoE_BIOS_START_g2 (524288)
46629+#define FLASH_REDBOOT_START_g2 (0)
46630+
46631+#define FLASH_NCSI_START_g3 (15990784)
46632+#define FLASH_iSCSI_PRIMARY_IMAGE_START_g3 (2097152)
46633+#define FLASH_iSCSI_BACKUP_IMAGE_START_g3 (4194304)
46634+#define FLASH_FCoE_PRIMARY_IMAGE_START_g3 (6291456)
46635+#define FLASH_FCoE_BACKUP_IMAGE_START_g3 (8388608)
46636+#define FLASH_iSCSI_BIOS_START_g3 (12582912)
46637+#define FLASH_PXE_BIOS_START_g3 (13107200)
46638+#define FLASH_FCoE_BIOS_START_g3 (13631488)
46639+#define FLASH_REDBOOT_START_g3 (262144)
46640+#define FLASH_PHY_FW_START_g3 (1310720)
46641+
46642+/************* Rx Packet Type Encoding **************/
46643+#define BE_UNICAST_PACKET 0
46644+#define BE_MULTICAST_PACKET 1
46645+#define BE_BROADCAST_PACKET 2
46646+#define BE_RSVD_PACKET 3
46647+
46648 /*
46649 * BE descriptors: host memory data structures whose formats
46650 * are hardwired in BE silicon.
46651 */
46652 /* Event Queue Descriptor */
46653-#define EQ_ENTRY_VALID_MASK 0x1 /* bit 0 */
46654-#define EQ_ENTRY_RES_ID_MASK 0xFFFF /* bits 16 - 31 */
46655-#define EQ_ENTRY_RES_ID_SHIFT 16
46656+#define EQ_ENTRY_VALID_MASK 0x1 /* bit 0 */
46657+#define EQ_ENTRY_RES_ID_MASK 0xFFFF /* bits 16 - 31 */
46658+#define EQ_ENTRY_RES_ID_SHIFT 16
46659+
46660+#define BE_MAC_PROMISCUOUS 62 /* Promiscuous mode */
46661+
46662 struct be_eq_entry {
46663 u32 evt;
46664 };
46665@@ -126,7 +275,7 @@ struct amap_eth_hdr_wrb {
46666 u8 event;
46667 u8 crc;
46668 u8 forward;
46669- u8 ipsec;
46670+ u8 lso6;
46671 u8 mgmt;
46672 u8 ipcs;
46673 u8 udpcs;
46674@@ -151,7 +300,7 @@ struct be_eth_hdr_wrb {
46675 * offset/shift/mask of each field */
46676 struct amap_eth_tx_compl {
46677 u8 wrb_index[16]; /* dword 0 */
46678- u8 ct[2]; /* dword 0 */
46679+ u8 ct[2]; /* dword 0 */
46680 u8 port[2]; /* dword 0 */
46681 u8 rsvd0[8]; /* dword 0 */
46682 u8 status[4]; /* dword 0 */
46683@@ -179,10 +328,10 @@ struct be_eth_rx_d {
46684
46685 /* RX Compl Queue Descriptor */
46686
46687-/* Pseudo amap definition for eth_rx_compl in which each bit of the
46688- * actual structure is defined as a byte: used to calculate
46689+/* Pseudo amap definition for BE2 and BE3 legacy mode eth_rx_compl in which
46690+ * each bit of the actual structure is defined as a byte: used to calculate
46691 * offset/shift/mask of each field */
46692-struct amap_eth_rx_compl {
46693+struct amap_eth_rx_compl_v0 {
46694 u8 vlan_tag[16]; /* dword 0 */
46695 u8 pktsize[14]; /* dword 0 */
46696 u8 port; /* dword 0 */
46697@@ -213,39 +362,91 @@ struct amap_eth_rx_compl {
46698 u8 rsshash[32]; /* dword 3 */
46699 } __packed;
46700
46701+/* Pseudo amap definition for BE3 native mode eth_rx_compl in which
46702+ * each bit of the actual structure is defined as a byte: used to calculate
46703+ * offset/shift/mask of each field */
46704+struct amap_eth_rx_compl_v1 {
46705+ u8 vlan_tag[16]; /* dword 0 */
46706+ u8 pktsize[14]; /* dword 0 */
46707+ u8 vtp; /* dword 0 */
46708+ u8 ip_opt; /* dword 0 */
46709+ u8 err; /* dword 1 */
46710+ u8 rsshp; /* dword 1 */
46711+ u8 ipf; /* dword 1 */
46712+ u8 tcpf; /* dword 1 */
46713+ u8 udpf; /* dword 1 */
46714+ u8 ipcksm; /* dword 1 */
46715+ u8 l4_cksm; /* dword 1 */
46716+ u8 ip_version; /* dword 1 */
46717+ u8 macdst[7]; /* dword 1 */
46718+ u8 rsvd0; /* dword 1 */
46719+ u8 fragndx[10]; /* dword 1 */
46720+ u8 ct[2]; /* dword 1 */
46721+ u8 sw; /* dword 1 */
46722+ u8 numfrags[3]; /* dword 1 */
46723+ u8 rss_flush; /* dword 2 */
46724+ u8 cast_enc[2]; /* dword 2 */
46725+ u8 vtm; /* dword 2 */
46726+ u8 rss_bank; /* dword 2 */
46727+ u8 port[2]; /* dword 2 */
46728+ u8 vntagp; /* dword 2 */
46729+ u8 header_len[8]; /* dword 2 */
46730+ u8 header_split[2]; /* dword 2 */
46731+ u8 rsvd1[13]; /* dword 2 */
46732+ u8 valid; /* dword 2 */
46733+ u8 rsshash[32]; /* dword 3 */
46734+} __packed;
46735+
46736 struct be_eth_rx_compl {
46737 u32 dw[4];
46738 };
46739
46740-/* Flashrom related descriptors */
46741-#define IMAGE_TYPE_FIRMWARE 160
46742-#define IMAGE_TYPE_BOOTCODE 224
46743-#define IMAGE_TYPE_OPTIONROM 32
46744+struct mgmt_hba_attribs {
46745+ u8 flashrom_version_string[32];
46746+ u8 manufacturer_name[32];
46747+ u32 supported_modes;
46748+ u32 rsvd0[3];
46749+ u8 ncsi_ver_string[12];
46750+ u32 default_extended_timeout;
46751+ u8 controller_model_number[32];
46752+ u8 controller_description[64];
46753+ u8 controller_serial_number[32];
46754+ u8 ip_version_string[32];
46755+ u8 firmware_version_string[32];
46756+ u8 bios_version_string[32];
46757+ u8 redboot_version_string[32];
46758+ u8 driver_version_string[32];
46759+ u8 fw_on_flash_version_string[32];
46760+ u32 functionalities_supported;
46761+ u16 max_cdblength;
46762+ u8 asic_revision;
46763+ u8 generational_guid[16];
46764+ u8 hba_port_count;
46765+ u16 default_link_down_timeout;
46766+ u8 iscsi_ver_min_max;
46767+ u8 multifunction_device;
46768+ u8 cache_valid;
46769+ u8 hba_status;
46770+ u8 max_domains_supported;
46771+ u8 phy_port;
46772+ u32 firmware_post_status;
46773+ u32 hba_mtu[8];
46774+ u32 rsvd1[4];
46775+};
46776
46777-#define NUM_FLASHDIR_ENTRIES 32
46778-
46779-#define FLASHROM_TYPE_ISCSI_ACTIVE 0
46780-#define FLASHROM_TYPE_BIOS 2
46781-#define FLASHROM_TYPE_PXE_BIOS 3
46782-#define FLASHROM_TYPE_FCOE_BIOS 8
46783-#define FLASHROM_TYPE_ISCSI_BACKUP 9
46784-#define FLASHROM_TYPE_FCOE_FW_ACTIVE 10
46785-#define FLASHROM_TYPE_FCOE_FW_BACKUP 11
46786-
46787-#define FLASHROM_OPER_FLASH 1
46788-#define FLASHROM_OPER_SAVE 2
46789-
46790-#define FLASH_IMAGE_MAX_SIZE (1310720) /* Max firmware image size */
46791-#define FLASH_BIOS_IMAGE_MAX_SIZE (262144) /* Max OPTION ROM image sz */
46792-
46793-/* Offsets for components on Flash. */
46794-#define FLASH_iSCSI_PRIMARY_IMAGE_START (1048576)
46795-#define FLASH_iSCSI_BACKUP_IMAGE_START (2359296)
46796-#define FLASH_FCoE_PRIMARY_IMAGE_START (3670016)
46797-#define FLASH_FCoE_BACKUP_IMAGE_START (4980736)
46798-#define FLASH_iSCSI_BIOS_START (7340032)
46799-#define FLASH_PXE_BIOS_START (7864320)
46800-#define FLASH_FCoE_BIOS_START (524288)
46801+struct mgmt_controller_attrib {
46802+ struct mgmt_hba_attribs hba_attribs;
46803+ u16 pci_vendor_id;
46804+ u16 pci_device_id;
46805+ u16 pci_sub_vendor_id;
46806+ u16 pci_sub_system_id;
46807+ u8 pci_bus_number;
46808+ u8 pci_device_number;
46809+ u8 pci_function_number;
46810+ u8 interface_type;
46811+ u64 unique_identifier;
46812+ u32 rsvd0[5];
46813+};
46814
46815 struct controller_id {
46816 u32 vendor;
46817@@ -254,7 +455,20 @@ struct controller_id {
46818 u32 subdevice;
46819 };
46820
46821-struct flash_file_hdr {
46822+struct flash_comp {
46823+ unsigned long offset;
46824+ int optype;
46825+ int size;
46826+};
46827+
46828+struct image_hdr {
46829+ u32 imageid;
46830+ u32 imageoffset;
46831+ u32 imagelength;
46832+ u32 image_checksum;
46833+ u8 image_version[32];
46834+};
46835+struct flash_file_hdr_g2 {
46836 u8 sign[32];
46837 u32 cksum;
46838 u32 antidote;
46839@@ -266,6 +480,17 @@ struct flash_file_hdr {
46840 u8 build[24];
46841 };
46842
46843+struct flash_file_hdr_g3 {
46844+ u8 sign[52];
46845+ u8 ufi_version[4];
46846+ u32 file_len;
46847+ u32 cksum;
46848+ u32 antidote;
46849+ u32 num_imgs;
46850+ u8 build[24];
46851+ u8 rsvd[32];
46852+};
46853+
46854 struct flash_section_hdr {
46855 u32 format_rev;
46856 u32 cksum;
46857@@ -299,3 +524,19 @@ struct flash_section_info {
46858 struct flash_section_hdr fsec_hdr;
46859 struct flash_section_entry fsec_entry[32];
46860 };
46861+
46862+struct flash_ncsi_image_hdr {
46863+ u32 magic;
46864+ u8 hdr_len;
46865+ u8 type;
46866+ u16 hdr_ver;
46867+ u8 rsvd0[2];
46868+ u16 load_offset;
46869+ u32 len;
46870+ u32 flash_offset;
46871+ u8 ver[16];
46872+ u8 name[24];
46873+ u32 img_cksum;
46874+ u8 rsvd1[4];
46875+ u32 hdr_cksum;
46876+};
46877diff --git a/drivers/net/benet/be_main.c b/drivers/net/benet/be_main.c
46878index 000e377..f501aa3 100644
46879--- a/drivers/net/benet/be_main.c
46880+++ b/drivers/net/benet/be_main.c
46881@@ -1,18 +1,18 @@
46882 /*
46883- * Copyright (C) 2005 - 2009 ServerEngines
46884+ * Copyright (C) 2005 - 2011 Emulex
46885 * All rights reserved.
46886 *
46887 * This program is free software; you can redistribute it and/or
46888 * modify it under the terms of the GNU General Public License version 2
46889- * as published by the Free Software Foundation. The full GNU General
46890+ * as published by the Free Software Foundation. The full GNU General
46891 * Public License is included in this distribution in the file called COPYING.
46892 *
46893 * Contact Information:
46894- * linux-drivers@serverengines.com
46895+ * linux-drivers@emulex.com
46896 *
46897- * ServerEngines
46898- * 209 N. Fair Oaks Ave
46899- * Sunnyvale, CA 94085
46900+ * Emulex
46901+ * 3333 Susan Street
46902+ * Costa Mesa, CA 92626
46903 */
46904
46905 #include "be.h"
46906@@ -22,23 +22,119 @@
46907 MODULE_VERSION(DRV_VER);
46908 MODULE_DEVICE_TABLE(pci, be_dev_ids);
46909 MODULE_DESCRIPTION(DRV_DESC " " DRV_VER);
46910-MODULE_AUTHOR("ServerEngines Corporation");
46911+MODULE_AUTHOR("Emulex Corporation");
46912 MODULE_LICENSE("GPL");
46913+MODULE_INFO(supported, "external");
46914
46915-static unsigned int rx_frag_size = 2048;
46916-module_param(rx_frag_size, uint, S_IRUGO);
46917-MODULE_PARM_DESC(rx_frag_size, "Size of a fragment that holds rcvd data.");
46918+static ushort rx_frag_size = 2048;
46919+static unsigned int num_vfs;
46920+static unsigned int msix = 1;
46921+module_param(rx_frag_size, ushort, S_IRUGO);
46922+module_param(num_vfs, uint, S_IRUGO);
46923+module_param(msix, uint, S_IRUGO);
46924+MODULE_PARM_DESC(rx_frag_size, "Size of receive fragment buffer"
46925+ " - 2048 (default), 4096 or 8192");
46926+MODULE_PARM_DESC(num_vfs, "Number of PCI VFs to initialize");
46927+MODULE_PARM_DESC(msix, "Enable and disable the MSI"
46928+ "x (By default MSIx is enabled)");
46929+static unsigned int gro = 1;
46930+module_param(gro, uint, S_IRUGO);
46931+MODULE_PARM_DESC(gro, "Enable or Disable GRO. Enabled by default");
46932+
46933+static unsigned int multi_rxq = true;
46934+module_param(multi_rxq, uint, S_IRUGO);
46935+MODULE_PARM_DESC(multi_rxq, "Multi Rx Queue support. Enabled by default");
46936
46937 static DEFINE_PCI_DEVICE_TABLE(be_dev_ids) = {
46938 { PCI_DEVICE(BE_VENDOR_ID, BE_DEVICE_ID1) },
46939 { PCI_DEVICE(BE_VENDOR_ID, BE_DEVICE_ID2) },
46940 { PCI_DEVICE(BE_VENDOR_ID, OC_DEVICE_ID1) },
46941 { PCI_DEVICE(BE_VENDOR_ID, OC_DEVICE_ID2) },
46942- { PCI_DEVICE(BE_VENDOR_ID, OC_DEVICE_ID3) },
46943+ /*
46944+ * Lancer is not part of Palau 4.0
46945+ * { PCI_DEVICE(EMULEX_VENDOR_ID, OC_DEVICE_ID3)},
46946+ */
46947 { 0 }
46948 };
46949 MODULE_DEVICE_TABLE(pci, be_dev_ids);
46950
46951+/* UE Status Low CSR */
46952+static char *ue_status_low_desc[] = {
46953+ "CEV",
46954+ "CTX",
46955+ "DBUF",
46956+ "ERX",
46957+ "Host",
46958+ "MPU",
46959+ "NDMA",
46960+ "PTC ",
46961+ "RDMA ",
46962+ "RXF ",
46963+ "RXIPS ",
46964+ "RXULP0 ",
46965+ "RXULP1 ",
46966+ "RXULP2 ",
46967+ "TIM ",
46968+ "TPOST ",
46969+ "TPRE ",
46970+ "TXIPS ",
46971+ "TXULP0 ",
46972+ "TXULP1 ",
46973+ "UC ",
46974+ "WDMA ",
46975+ "TXULP2 ",
46976+ "HOST1 ",
46977+ "P0_OB_LINK ",
46978+ "P1_OB_LINK ",
46979+ "HOST_GPIO ",
46980+ "MBOX ",
46981+ "AXGMAC0",
46982+ "AXGMAC1",
46983+ "JTAG",
46984+ "MPU_INTPEND"
46985+};
46986+
46987+/* UE Status High CSR */
46988+static char *ue_status_hi_desc[] = {
46989+ "LPCMEMHOST",
46990+ "MGMT_MAC",
46991+ "PCS0ONLINE",
46992+ "MPU_IRAM",
46993+ "PCS1ONLINE",
46994+ "PCTL0",
46995+ "PCTL1",
46996+ "PMEM",
46997+ "RR",
46998+ "TXPB",
46999+ "RXPP",
47000+ "XAUI",
47001+ "TXP",
47002+ "ARM",
47003+ "IPC",
47004+ "HOST2",
47005+ "HOST3",
47006+ "HOST4",
47007+ "HOST5",
47008+ "HOST6",
47009+ "HOST7",
47010+ "HOST8",
47011+ "HOST9",
47012+ "NETC",
47013+ "Unknown",
47014+ "Unknown",
47015+ "Unknown",
47016+ "Unknown",
47017+ "Unknown",
47018+ "Unknown",
47019+ "Unknown",
47020+ "Unknown"
47021+};
47022+
47023+static inline bool be_multi_rxq(struct be_adapter *adapter)
47024+{
47025+ return (adapter->num_rx_qs > 1);
47026+}
47027+
47028 static void be_queue_free(struct be_adapter *adapter, struct be_queue_info *q)
47029 {
47030 struct be_dma_mem *mem = &q->dma_mem;
47031@@ -69,6 +165,9 @@ static void be_intr_set(struct be_adapter *adapter, bool enable)
47032 u32 reg = ioread32(addr);
47033 u32 enabled = reg & MEMBAR_CTRL_INT_CTRL_HOSTINTR_MASK;
47034
47035+ if (adapter->eeh_err)
47036+ return;
47037+
47038 if (!enabled && enable)
47039 reg |= MEMBAR_CTRL_INT_CTRL_HOSTINTR_MASK;
47040 else if (enabled && !enable)
47041@@ -84,6 +183,8 @@ static void be_rxq_notify(struct be_adapter *adapter, u16 qid, u16 posted)
47042 u32 val = 0;
47043 val |= qid & DB_RQ_RING_ID_MASK;
47044 val |= posted << DB_RQ_NUM_POSTED_SHIFT;
47045+
47046+ wmb();
47047 iowrite32(val, adapter->db + DB_RQ_OFFSET);
47048 }
47049
47050@@ -92,6 +193,8 @@ static void be_txq_notify(struct be_adapter *adapter, u16 qid, u16 posted)
47051 u32 val = 0;
47052 val |= qid & DB_TXULP_RING_ID_MASK;
47053 val |= (posted & DB_TXULP_NUM_POSTED_MASK) << DB_TXULP_NUM_POSTED_SHIFT;
47054+
47055+ wmb();
47056 iowrite32(val, adapter->db + DB_TXULP1_OFFSET);
47057 }
47058
47059@@ -100,6 +203,12 @@ static void be_eq_notify(struct be_adapter *adapter, u16 qid,
47060 {
47061 u32 val = 0;
47062 val |= qid & DB_EQ_RING_ID_MASK;
47063+ val |= ((qid & DB_EQ_RING_ID_EXT_MASK) <<
47064+ DB_EQ_RING_ID_EXT_MASK_SHIFT);
47065+
47066+ if (adapter->eeh_err)
47067+ return;
47068+
47069 if (arm)
47070 val |= 1 << DB_EQ_REARM_SHIFT;
47071 if (clear_int)
47072@@ -113,6 +222,12 @@ void be_cq_notify(struct be_adapter *adapter, u16 qid, bool arm, u16 num_popped)
47073 {
47074 u32 val = 0;
47075 val |= qid & DB_CQ_RING_ID_MASK;
47076+ val |= ((qid & DB_CQ_RING_ID_EXT_MASK) <<
47077+ DB_CQ_RING_ID_EXT_MASK_SHIFT);
47078+
47079+ if (adapter->eeh_err)
47080+ return;
47081+
47082 if (arm)
47083 val |= 1 << DB_CQ_REARM_SHIFT;
47084 val |= num_popped << DB_CQ_NUM_POPPED_SHIFT;
47085@@ -124,96 +239,250 @@ static int be_mac_addr_set(struct net_device *netdev, void *p)
47086 struct be_adapter *adapter = netdev_priv(netdev);
47087 struct sockaddr *addr = p;
47088 int status = 0;
47089+ u8 current_mac[ETH_ALEN];
47090+ u32 pmac_id = adapter->pmac_id;
47091
47092- status = be_cmd_pmac_del(adapter, adapter->if_handle, adapter->pmac_id);
47093+ if (!is_valid_ether_addr(addr->sa_data))
47094+ return -EADDRNOTAVAIL;
47095+
47096+ status = be_cmd_mac_addr_query(adapter, current_mac,
47097+ MAC_ADDRESS_TYPE_NETWORK, false,
47098+ adapter->if_handle);
47099 if (status)
47100- return status;
47101+ goto err;
47102+
47103+ if (!memcmp(addr->sa_data, current_mac, ETH_ALEN))
47104+ goto done;
47105
47106 status = be_cmd_pmac_add(adapter, (u8 *)addr->sa_data,
47107- adapter->if_handle, &adapter->pmac_id);
47108- if (!status)
47109+ adapter->if_handle, &adapter->pmac_id, 0);
47110+
47111+ if (!status) {
47112+ status = be_cmd_pmac_del(adapter, adapter->if_handle,
47113+ pmac_id, 0);
47114 memcpy(netdev->dev_addr, addr->sa_data, netdev->addr_len);
47115+ goto done;
47116+ }
47117
47118- return status;
47119+err:
47120+ if (status == MCC_STATUS_UNAUTHORIZED_REQUEST)
47121+ return -EPERM;
47122+ else
47123+ dev_err(&adapter->pdev->dev, "MAC %pM set Failed\n",
47124+ addr->sa_data);
47125+done:
47126+ return status;
47127+}
47128+
47129+static void populate_be2_stats(struct be_adapter *adapter)
47130+{
47131+
47132+ struct be_drv_stats *drvs = &adapter->drv_stats;
47133+ struct be_pmem_stats *pmem_sts = be_pmem_stats_from_cmd(adapter);
47134+ struct be_port_rxf_stats_v0 *port_stats =
47135+ be_port_rxf_stats_from_cmd(adapter);
47136+ struct be_rxf_stats_v0 *rxf_stats =
47137+ be_rxf_stats_from_cmd(adapter);
47138+
47139+ drvs->rx_pause_frames = port_stats->rx_pause_frames;
47140+ drvs->rx_crc_errors = port_stats->rx_crc_errors;
47141+ drvs->rx_control_frames = port_stats->rx_control_frames;
47142+ drvs->rx_in_range_errors = port_stats->rx_in_range_errors;
47143+ drvs->rx_frame_too_long = port_stats->rx_frame_too_long;
47144+ drvs->rx_dropped_runt = port_stats->rx_dropped_runt;
47145+ drvs->rx_ip_checksum_errs = port_stats->rx_ip_checksum_errs;
47146+ drvs->rx_tcp_checksum_errs = port_stats->rx_tcp_checksum_errs;
47147+ drvs->rx_udp_checksum_errs = port_stats->rx_udp_checksum_errs;
47148+ drvs->rxpp_fifo_overflow_drop = port_stats->rx_fifo_overflow;
47149+ drvs->rx_dropped_tcp_length = port_stats->rx_dropped_tcp_length;
47150+ drvs->rx_dropped_too_small = port_stats->rx_dropped_too_small;
47151+ drvs->rx_dropped_too_short = port_stats->rx_dropped_too_short;
47152+ drvs->rx_out_range_errors = port_stats->rx_out_range_errors;
47153+ drvs->rx_input_fifo_overflow_drop =
47154+ port_stats->rx_input_fifo_overflow;
47155+ drvs->rx_dropped_header_too_small =
47156+ port_stats->rx_dropped_header_too_small;
47157+ drvs->rx_address_match_errors =
47158+ port_stats->rx_address_match_errors;
47159+ drvs->rx_alignment_symbol_errors =
47160+ port_stats->rx_alignment_symbol_errors;
47161+
47162+ drvs->tx_pauseframes = port_stats->tx_pauseframes;
47163+ drvs->tx_controlframes = port_stats->tx_controlframes;
47164+
47165+ if (adapter->port_num)
47166+ drvs->jabber_events =
47167+ rxf_stats->port1_jabber_events;
47168+ else
47169+ drvs->jabber_events =
47170+ rxf_stats->port0_jabber_events;
47171+ drvs->rx_drops_no_pbuf = rxf_stats->rx_drops_no_pbuf;
47172+ drvs->rx_drops_no_txpb = rxf_stats->rx_drops_no_txpb;
47173+ drvs->rx_drops_no_erx_descr = rxf_stats->rx_drops_no_erx_descr;
47174+ drvs->rx_drops_invalid_ring = rxf_stats->rx_drops_invalid_ring;
47175+ drvs->forwarded_packets = rxf_stats->forwarded_packets;
47176+ drvs->rx_drops_mtu = rxf_stats->rx_drops_mtu;
47177+ drvs->rx_drops_no_tpre_descr =
47178+ rxf_stats->rx_drops_no_tpre_descr;
47179+ drvs->rx_drops_too_many_frags =
47180+ rxf_stats->rx_drops_too_many_frags;
47181+ adapter->drv_stats.eth_red_drops = pmem_sts->eth_red_drops;
47182+}
47183+
47184+static void populate_be3_stats(struct be_adapter *adapter)
47185+{
47186+ struct be_drv_stats *drvs = &adapter->drv_stats;
47187+ struct be_pmem_stats *pmem_sts = be_pmem_stats_from_cmd(adapter);
47188+
47189+ struct be_rxf_stats_v1 *rxf_stats =
47190+ be_rxf_stats_from_cmd(adapter);
47191+ struct be_port_rxf_stats_v1 *port_stats =
47192+ be_port_rxf_stats_from_cmd(adapter);
47193+
47194+ drvs->pmem_fifo_overflow_drop = port_stats->pmem_fifo_overflow_drop;
47195+ drvs->rx_priority_pause_frames = port_stats->rx_priority_pause_frames;
47196+ drvs->rx_pause_frames = port_stats->rx_pause_frames;
47197+ drvs->rx_crc_errors = port_stats->rx_crc_errors;
47198+ drvs->rx_control_frames = port_stats->rx_control_frames;
47199+ drvs->rx_in_range_errors = port_stats->rx_in_range_errors;
47200+ drvs->rx_frame_too_long = port_stats->rx_frame_too_long;
47201+ drvs->rx_dropped_runt = port_stats->rx_dropped_runt;
47202+ drvs->rx_ip_checksum_errs = port_stats->rx_ip_checksum_errs;
47203+ drvs->rx_tcp_checksum_errs = port_stats->rx_tcp_checksum_errs;
47204+ drvs->rx_udp_checksum_errs = port_stats->rx_udp_checksum_errs;
47205+ drvs->rx_dropped_tcp_length = port_stats->rx_dropped_tcp_length;
47206+ drvs->rx_dropped_too_small = port_stats->rx_dropped_too_small;
47207+ drvs->rx_dropped_too_short = port_stats->rx_dropped_too_short;
47208+ drvs->rx_out_range_errors = port_stats->rx_out_range_errors;
47209+ drvs->rx_dropped_header_too_small =
47210+ port_stats->rx_dropped_header_too_small;
47211+ drvs->rx_input_fifo_overflow_drop =
47212+ port_stats->rx_input_fifo_overflow_drop;
47213+ drvs->rx_address_match_errors =
47214+ port_stats->rx_address_match_errors;
47215+ drvs->rx_alignment_symbol_errors =
47216+ port_stats->rx_alignment_symbol_errors;
47217+ drvs->rxpp_fifo_overflow_drop =
47218+ port_stats->rxpp_fifo_overflow_drop;
47219+ drvs->tx_pauseframes = port_stats->tx_pauseframes;
47220+ drvs->tx_controlframes = port_stats->tx_controlframes;
47221+ drvs->jabber_events = port_stats->jabber_events;
47222+ drvs->rx_drops_no_pbuf = rxf_stats->rx_drops_no_pbuf;
47223+ drvs->rx_drops_no_txpb = rxf_stats->rx_drops_no_txpb;
47224+ drvs->rx_drops_no_erx_descr = rxf_stats->rx_drops_no_erx_descr;
47225+ drvs->rx_drops_invalid_ring = rxf_stats->rx_drops_invalid_ring;
47226+ drvs->forwarded_packets = rxf_stats->forwarded_packets;
47227+ drvs->rx_drops_mtu = rxf_stats->rx_drops_mtu;
47228+ drvs->rx_drops_no_tpre_descr =
47229+ rxf_stats->rx_drops_no_tpre_descr;
47230+ drvs->rx_drops_too_many_frags =
47231+ rxf_stats->rx_drops_too_many_frags;
47232+ adapter->drv_stats.eth_red_drops = pmem_sts->eth_red_drops;
47233+}
47234+
47235+
47236+static void accumulate_16bit_val(u32 *acc, u16 val)
47237+{
47238+#define lo(x) (x & 0xFFFF)
47239+#define hi(x) (x & 0xFFFF0000)
47240+ bool wrapped = val < lo(*acc);
47241+ u32 newacc = hi(*acc) + val;
47242+
47243+ if (wrapped)
47244+ newacc += 65536;
47245+ ACCESS_ONCE_RW(*acc) = newacc;
47246+}
47247+
47248+void be_parse_stats(struct be_adapter *adapter)
47249+{
47250+ struct be_erx_stats_v1 *erx = be_erx_stats_from_cmd(adapter);
47251+ struct be_rx_obj *rxo;
47252+ int i;
47253+
47254+ if (adapter->generation == BE_GEN3) {
47255+ populate_be3_stats(adapter);
47256+ } else {
47257+ populate_be2_stats(adapter);
47258+ }
47259+
47260+ /* as erx_v1 is longer than v0, ok to use v1 defn for v0 access */
47261+ for_all_rx_queues(adapter, rxo, i) {
47262+ /* below erx HW counter can actually wrap around after
47263+ * 65535. Driver accumulates a 32-bit value
47264+ */
47265+ accumulate_16bit_val(&rx_stats(rxo)->rx_drops_no_frags,
47266+ (u16)erx->rx_drops_no_fragments[rxo->q.id]);
47267+ }
47268 }
47269
47270 void netdev_stats_update(struct be_adapter *adapter)
47271 {
47272- struct be_hw_stats *hw_stats = hw_stats_from_cmd(adapter->stats.cmd.va);
47273- struct be_rxf_stats *rxf_stats = &hw_stats->rxf;
47274- struct be_port_rxf_stats *port_stats =
47275- &rxf_stats->port[adapter->port_num];
47276- struct net_device_stats *dev_stats = &adapter->stats.net_stats;
47277- struct be_erx_stats *erx_stats = &hw_stats->erx;
47278+ struct be_drv_stats *drvs = &adapter->drv_stats;
47279+ struct net_device_stats *dev_stats = &adapter->net_stats;
47280+ struct be_rx_obj *rxo;
47281+ struct be_tx_obj *txo;
47282+ unsigned long pkts = 0, bytes = 0, mcast = 0, drops = 0;
47283+ int i;
47284
47285- dev_stats->rx_packets = drvr_stats(adapter)->be_rx_pkts;
47286- dev_stats->tx_packets = drvr_stats(adapter)->be_tx_pkts;
47287- dev_stats->rx_bytes = drvr_stats(adapter)->be_rx_bytes;
47288- dev_stats->tx_bytes = drvr_stats(adapter)->be_tx_bytes;
47289+ for_all_rx_queues(adapter, rxo, i) {
47290+ pkts += rx_stats(rxo)->rx_pkts;
47291+ bytes += rx_stats(rxo)->rx_bytes;
47292+ mcast += rx_stats(rxo)->rx_mcast_pkts;
47293+ drops += rx_stats(rxo)->rx_drops_no_frags;
47294+ }
47295+ dev_stats->rx_packets = pkts;
47296+ dev_stats->rx_bytes = bytes;
47297+ dev_stats->multicast = mcast;
47298+ dev_stats->rx_dropped = drops;
47299+
47300+ pkts = bytes = 0;
47301+ for_all_tx_queues(adapter, txo, i) {
47302+ pkts += tx_stats(txo)->be_tx_pkts;
47303+ bytes += tx_stats(txo)->be_tx_bytes;
47304+ }
47305+ dev_stats->tx_packets = pkts;
47306+ dev_stats->tx_bytes = bytes;
47307
47308 /* bad pkts received */
47309- dev_stats->rx_errors = port_stats->rx_crc_errors +
47310- port_stats->rx_alignment_symbol_errors +
47311- port_stats->rx_in_range_errors +
47312- port_stats->rx_out_range_errors +
47313- port_stats->rx_frame_too_long +
47314- port_stats->rx_dropped_too_small +
47315- port_stats->rx_dropped_too_short +
47316- port_stats->rx_dropped_header_too_small +
47317- port_stats->rx_dropped_tcp_length +
47318- port_stats->rx_dropped_runt +
47319- port_stats->rx_tcp_checksum_errs +
47320- port_stats->rx_ip_checksum_errs +
47321- port_stats->rx_udp_checksum_errs;
47322-
47323- /* no space in linux buffers: best possible approximation */
47324- dev_stats->rx_dropped = erx_stats->rx_drops_no_fragments[0];
47325+ dev_stats->rx_errors = drvs->rx_crc_errors +
47326+ drvs->rx_alignment_symbol_errors +
47327+ drvs->rx_in_range_errors +
47328+ drvs->rx_out_range_errors +
47329+ drvs->rx_frame_too_long +
47330+ drvs->rx_dropped_too_small +
47331+ drvs->rx_dropped_too_short +
47332+ drvs->rx_dropped_header_too_small +
47333+ drvs->rx_dropped_tcp_length +
47334+ drvs->rx_dropped_runt +
47335+ drvs->rx_tcp_checksum_errs +
47336+ drvs->rx_ip_checksum_errs +
47337+ drvs->rx_udp_checksum_errs;
47338
47339 /* detailed rx errors */
47340- dev_stats->rx_length_errors = port_stats->rx_in_range_errors +
47341- port_stats->rx_out_range_errors +
47342- port_stats->rx_frame_too_long;
47343+ dev_stats->rx_length_errors = drvs->rx_in_range_errors +
47344+ drvs->rx_out_range_errors +
47345+ drvs->rx_frame_too_long;
47346
47347- /* receive ring buffer overflow */
47348- dev_stats->rx_over_errors = 0;
47349-
47350- dev_stats->rx_crc_errors = port_stats->rx_crc_errors;
47351+ dev_stats->rx_crc_errors = drvs->rx_crc_errors;
47352
47353 /* frame alignment errors */
47354- dev_stats->rx_frame_errors = port_stats->rx_alignment_symbol_errors;
47355+ dev_stats->rx_frame_errors = drvs->rx_alignment_symbol_errors;
47356
47357 /* receiver fifo overrun */
47358 /* drops_no_pbuf is no per i/f, it's per BE card */
47359- dev_stats->rx_fifo_errors = port_stats->rx_fifo_overflow +
47360- port_stats->rx_input_fifo_overflow +
47361- rxf_stats->rx_drops_no_pbuf;
47362- /* receiver missed packetd */
47363- dev_stats->rx_missed_errors = 0;
47364-
47365- /* packet transmit problems */
47366- dev_stats->tx_errors = 0;
47367-
47368- /* no space available in linux */
47369- dev_stats->tx_dropped = 0;
47370-
47371- dev_stats->multicast = port_stats->rx_multicast_frames;
47372- dev_stats->collisions = 0;
47373-
47374- /* detailed tx_errors */
47375- dev_stats->tx_aborted_errors = 0;
47376- dev_stats->tx_carrier_errors = 0;
47377- dev_stats->tx_fifo_errors = 0;
47378- dev_stats->tx_heartbeat_errors = 0;
47379- dev_stats->tx_window_errors = 0;
47380+ dev_stats->rx_fifo_errors = drvs->rxpp_fifo_overflow_drop +
47381+ drvs->rx_input_fifo_overflow_drop +
47382+ drvs->rx_drops_no_pbuf;
47383 }
47384
47385-void be_link_status_update(struct be_adapter *adapter, bool link_up)
47386+void be_link_status_update(struct be_adapter *adapter, int link_status)
47387 {
47388 struct net_device *netdev = adapter->netdev;
47389
47390 /* If link came up or went down */
47391- if (adapter->link_up != link_up) {
47392- if (link_up) {
47393+ if (adapter->link_status != link_status) {
47394+ adapter->link_speed = -1;
47395+ if (link_status == LINK_UP) {
47396 netif_start_queue(netdev);
47397 netif_carrier_on(netdev);
47398 printk(KERN_INFO "%s: Link up\n", netdev->name);
47399@@ -222,15 +491,15 @@ void be_link_status_update(struct be_adapter *adapter, bool link_up)
47400 netif_carrier_off(netdev);
47401 printk(KERN_INFO "%s: Link down\n", netdev->name);
47402 }
47403- adapter->link_up = link_up;
47404+ adapter->link_status = link_status;
47405 }
47406 }
47407
47408 /* Update the EQ delay n BE based on the RX frags consumed / sec */
47409-static void be_rx_eqd_update(struct be_adapter *adapter)
47410+static void be_rx_eqd_update(struct be_adapter *adapter, struct be_rx_obj *rxo)
47411 {
47412- struct be_eq_obj *rx_eq = &adapter->rx_eq;
47413- struct be_drvr_stats *stats = &adapter->stats.drvr_stats;
47414+ struct be_eq_obj *rx_eq = &rxo->rx_eq;
47415+ struct be_rx_stats *stats = &rxo->stats;
47416 ulong now = jiffies;
47417 u32 eqd;
47418
47419@@ -247,19 +516,17 @@ static void be_rx_eqd_update(struct be_adapter *adapter)
47420 if ((now - stats->rx_fps_jiffies) < HZ)
47421 return;
47422
47423- stats->be_rx_fps = (stats->be_rx_frags - stats->be_prev_rx_frags) /
47424+ stats->rx_fps = (stats->rx_frags - stats->prev_rx_frags) /
47425 ((now - stats->rx_fps_jiffies) / HZ);
47426
47427 stats->rx_fps_jiffies = now;
47428- stats->be_prev_rx_frags = stats->be_rx_frags;
47429- eqd = stats->be_rx_fps / 110000;
47430+ stats->prev_rx_frags = stats->rx_frags;
47431+ eqd = stats->rx_fps / 110000;
47432 eqd = eqd << 3;
47433 if (eqd > rx_eq->max_eqd)
47434 eqd = rx_eq->max_eqd;
47435 if (eqd < rx_eq->min_eqd)
47436 eqd = rx_eq->min_eqd;
47437- if (eqd < 10)
47438- eqd = 0;
47439 if (eqd != rx_eq->cur_eqd)
47440 be_cmd_modify_eqd(adapter, rx_eq->q.id, eqd);
47441
47442@@ -270,7 +537,7 @@ static struct net_device_stats *be_get_stats(struct net_device *dev)
47443 {
47444 struct be_adapter *adapter = netdev_priv(dev);
47445
47446- return &adapter->stats.net_stats;
47447+ return &adapter->net_stats;
47448 }
47449
47450 static u32 be_calc_rate(u64 bytes, unsigned long ticks)
47451@@ -284,9 +551,9 @@ static u32 be_calc_rate(u64 bytes, unsigned long ticks)
47452 return rate;
47453 }
47454
47455-static void be_tx_rate_update(struct be_adapter *adapter)
47456+static void be_tx_rate_update(struct be_tx_obj *txo)
47457 {
47458- struct be_drvr_stats *stats = drvr_stats(adapter);
47459+ struct be_tx_stats *stats = tx_stats(txo);
47460 ulong now = jiffies;
47461
47462 /* Wrapped around? */
47463@@ -305,10 +572,11 @@ static void be_tx_rate_update(struct be_adapter *adapter)
47464 }
47465 }
47466
47467-static void be_tx_stats_update(struct be_adapter *adapter,
47468+static void be_tx_stats_update(struct be_tx_obj *txo,
47469 u32 wrb_cnt, u32 copied, u32 gso_segs, bool stopped)
47470 {
47471- struct be_drvr_stats *stats = drvr_stats(adapter);
47472+ struct be_tx_stats *stats = tx_stats(txo);
47473+
47474 stats->be_tx_reqs++;
47475 stats->be_tx_wrbs += wrb_cnt;
47476 stats->be_tx_bytes += copied;
47477@@ -318,7 +586,8 @@ static void be_tx_stats_update(struct be_adapter *adapter,
47478 }
47479
47480 /* Determine number of WRB entries needed to xmit data in an skb */
47481-static u32 wrb_cnt_for_skb(struct sk_buff *skb, bool *dummy)
47482+static u32 wrb_cnt_for_skb(struct be_adapter *adapter, struct sk_buff *skb,
47483+ bool *dummy)
47484 {
47485 int cnt = (skb->len > skb->data_len);
47486
47487@@ -326,12 +595,13 @@ static u32 wrb_cnt_for_skb(struct sk_buff *skb, bool *dummy)
47488
47489 /* to account for hdr wrb */
47490 cnt++;
47491- if (cnt & 1) {
47492+ if (lancer_chip(adapter) || !(cnt & 1)) {
47493+ *dummy = false;
47494+ } else {
47495 /* add a dummy to make it an even num */
47496 cnt++;
47497 *dummy = true;
47498- } else
47499- *dummy = false;
47500+ }
47501 BUG_ON(cnt > BE_MAX_TX_FRAG_COUNT);
47502 return cnt;
47503 }
47504@@ -343,17 +613,31 @@ static inline void wrb_fill(struct be_eth_wrb *wrb, u64 addr, int len)
47505 wrb->frag_len = len & ETH_WRB_FRAG_LEN_MASK;
47506 }
47507
47508-static void wrb_fill_hdr(struct be_eth_hdr_wrb *hdr, struct sk_buff *skb,
47509- bool vlan, u32 wrb_cnt, u32 len)
47510+static void wrb_fill_hdr(struct be_adapter *adapter, struct be_eth_hdr_wrb *hdr,
47511+ struct sk_buff *skb, u32 wrb_cnt, u32 len)
47512 {
47513+ u16 vlan_tag = 0;
47514+
47515 memset(hdr, 0, sizeof(*hdr));
47516
47517 AMAP_SET_BITS(struct amap_eth_hdr_wrb, crc, hdr, 1);
47518
47519- if (skb_shinfo(skb)->gso_segs > 1 && skb_shinfo(skb)->gso_size) {
47520+ if (skb_is_gso(skb)) {
47521 AMAP_SET_BITS(struct amap_eth_hdr_wrb, lso, hdr, 1);
47522 AMAP_SET_BITS(struct amap_eth_hdr_wrb, lso_mss,
47523 hdr, skb_shinfo(skb)->gso_size);
47524+ if (skb_is_gso_v6(skb) && !lancer_chip(adapter))
47525+ AMAP_SET_BITS(struct amap_eth_hdr_wrb, lso6, hdr, 1);
47526+
47527+ if (lancer_A0_chip(adapter)) {
47528+ AMAP_SET_BITS(struct amap_eth_hdr_wrb, ipcs, hdr, 1);
47529+ if (is_tcp_pkt(skb))
47530+ AMAP_SET_BITS(struct amap_eth_hdr_wrb,
47531+ tcpcs, hdr, 1);
47532+ else if (is_udp_pkt(skb))
47533+ AMAP_SET_BITS(struct amap_eth_hdr_wrb,
47534+ udpcs, hdr, 1);
47535+ }
47536 } else if (skb->ip_summed == CHECKSUM_PARTIAL) {
47537 if (is_tcp_pkt(skb))
47538 AMAP_SET_BITS(struct amap_eth_hdr_wrb, tcpcs, hdr, 1);
47539@@ -361,10 +645,10 @@ static void wrb_fill_hdr(struct be_eth_hdr_wrb *hdr, struct sk_buff *skb,
47540 AMAP_SET_BITS(struct amap_eth_hdr_wrb, udpcs, hdr, 1);
47541 }
47542
47543- if (vlan && vlan_tx_tag_present(skb)) {
47544+ if (adapter->vlan_grp && vlan_tx_tag_present(skb)) {
47545 AMAP_SET_BITS(struct amap_eth_hdr_wrb, vlan, hdr, 1);
47546- AMAP_SET_BITS(struct amap_eth_hdr_wrb, vlan_tag,
47547- hdr, vlan_tx_tag_get(skb));
47548+ vlan_tag = be_get_tx_vlan_tag(adapter, skb);
47549+ AMAP_SET_BITS(struct amap_eth_hdr_wrb, vlan_tag, hdr, vlan_tag);
47550 }
47551
47552 AMAP_SET_BITS(struct amap_eth_hdr_wrb, event, hdr, 1);
47553@@ -374,14 +658,13 @@ static void wrb_fill_hdr(struct be_eth_hdr_wrb *hdr, struct sk_buff *skb,
47554 }
47555
47556
47557-static int make_tx_wrbs(struct be_adapter *adapter,
47558+static int make_tx_wrbs(struct be_adapter *adapter, struct be_queue_info *txq,
47559 struct sk_buff *skb, u32 wrb_cnt, bool dummy_wrb)
47560 {
47561- u64 busaddr;
47562- u32 i, copied = 0;
47563+ dma_addr_t busaddr;
47564+ int i, copied = 0;
47565 struct pci_dev *pdev = adapter->pdev;
47566 struct sk_buff *first_skb = skb;
47567- struct be_queue_info *txq = &adapter->tx_obj.q;
47568 struct be_eth_wrb *wrb;
47569 struct be_eth_hdr_wrb *hdr;
47570
47571@@ -389,15 +672,11 @@ static int make_tx_wrbs(struct be_adapter *adapter,
47572 atomic_add(wrb_cnt, &txq->used);
47573 queue_head_inc(txq);
47574
47575- if (skb_dma_map(&pdev->dev, skb, DMA_TO_DEVICE)) {
47576- dev_err(&pdev->dev, "TX DMA mapping failed\n");
47577- return 0;
47578- }
47579-
47580 if (skb->len > skb->data_len) {
47581- int len = skb->len - skb->data_len;
47582+ int len = skb_headlen(skb);
47583+ busaddr = pci_map_single(pdev, skb->data, len,
47584+ PCI_DMA_TODEVICE);
47585 wrb = queue_head_node(txq);
47586- busaddr = skb_shinfo(skb)->dma_head;
47587 wrb_fill(wrb, busaddr, len);
47588 be_dws_cpu_to_le(wrb, sizeof(*wrb));
47589 queue_head_inc(txq);
47590@@ -407,8 +686,9 @@ static int make_tx_wrbs(struct be_adapter *adapter,
47591 for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) {
47592 struct skb_frag_struct *frag =
47593 &skb_shinfo(skb)->frags[i];
47594-
47595- busaddr = skb_shinfo(skb)->dma_maps[i];
47596+ busaddr = pci_map_page(pdev, frag->page,
47597+ frag->page_offset,
47598+ frag->size, PCI_DMA_TODEVICE);
47599 wrb = queue_head_node(txq);
47600 wrb_fill(wrb, busaddr, frag->size);
47601 be_dws_cpu_to_le(wrb, sizeof(*wrb));
47602@@ -423,8 +703,7 @@ static int make_tx_wrbs(struct be_adapter *adapter,
47603 queue_head_inc(txq);
47604 }
47605
47606- wrb_fill_hdr(hdr, first_skb, adapter->vlan_grp ? true : false,
47607- wrb_cnt, copied);
47608+ wrb_fill_hdr(adapter, hdr, first_skb, wrb_cnt, copied);
47609 be_dws_cpu_to_le(hdr, sizeof(*hdr));
47610
47611 return copied;
47612@@ -434,19 +713,70 @@ static netdev_tx_t be_xmit(struct sk_buff *skb,
47613 struct net_device *netdev)
47614 {
47615 struct be_adapter *adapter = netdev_priv(netdev);
47616- struct be_tx_obj *tx_obj = &adapter->tx_obj;
47617- struct be_queue_info *txq = &tx_obj->q;
47618+ struct be_tx_obj *txo = &adapter->tx_obj[skb_get_queue_mapping(skb)];
47619+ struct be_queue_info *txq = &txo->q;
47620 u32 wrb_cnt = 0, copied = 0;
47621 u32 start = txq->head;
47622 bool dummy_wrb, stopped = false;
47623
47624- wrb_cnt = wrb_cnt_for_skb(skb, &dummy_wrb);
47625+ if (unlikely((skb_shinfo(skb)->gso_segs > 1) &&
47626+ skb_shinfo(skb)->gso_size && is_ipv6_ext_hdr(skb))) {
47627+ tx_stats(txo)->be_ipv6_ext_hdr_tx_drop++;
47628+ goto tx_drop;
47629+ }
47630
47631- copied = make_tx_wrbs(adapter, skb, wrb_cnt, dummy_wrb);
47632+ /* If the skb is a large pkt forwarded to this interface
47633+ * after being LRO'd on another interface, drop the pkt.
47634+ * HW cannot handle such pkts. LRO must be disabled when
47635+ * using the server as a router.
47636+ */
47637+ if (!skb_is_gso(skb)) {
47638+ int eth_hdr_len = ntohs(skb->protocol) == ETH_P_8021Q ?
47639+ VLAN_ETH_HLEN : ETH_HLEN;
47640+
47641+ if ((skb->len - eth_hdr_len) > adapter->netdev->mtu)
47642+ goto tx_drop;
47643+ }
47644+
47645+ /* The ASIC is calculating checksum for Vlan tagged pkts
47646+ * though CSO is disabled.
47647+ * To work around this, insert the Vlan tag in the driver
47648+ * and donot set the vlan bit, cso bit in the Tx WRB.
47649+ */
47650+ if (unlikely(vlan_tx_tag_present(skb) &&
47651+ ((skb->ip_summed != CHECKSUM_PARTIAL) || (skb->len <= 60)))) {
47652+ /* Bug 28694: Don't embed the host VLAN tag in SKB
47653+ * when UMC mode enabled on that interface
47654+ */
47655+ if (!(adapter->function_mode & UMC_ENABLED)) {
47656+ skb = skb_share_check(skb, GFP_ATOMIC);
47657+ if (unlikely(!skb))
47658+ goto tx_drop;
47659+
47660+ skb = be_vlan_put_tag(skb,
47661+ be_get_tx_vlan_tag(adapter, skb));
47662+ if (unlikely(!skb))
47663+ goto tx_drop;
47664+
47665+ be_reset_skb_tx_vlan(skb);
47666+ }
47667+ }
47668+
47669+ /* Bug 12422: the stack can send us skbs with length more than 65535
47670+ * BE cannot handle such requests. Hack the extra data out and drop it.
47671+ */
47672+ if (skb->len > 65535) {
47673+ int err = __pskb_trim(skb, 65535);
47674+ BUG_ON(err);
47675+ }
47676+
47677+ wrb_cnt = wrb_cnt_for_skb(adapter, skb, &dummy_wrb);
47678+
47679+ copied = make_tx_wrbs(adapter, txq, skb, wrb_cnt, dummy_wrb);
47680 if (copied) {
47681 /* record the sent skb in the sent_skb table */
47682- BUG_ON(tx_obj->sent_skb_list[start]);
47683- tx_obj->sent_skb_list[start] = skb;
47684+ BUG_ON(txo->sent_skb_list[start]);
47685+ txo->sent_skb_list[start] = skb;
47686
47687 /* Ensure txq has space for the next skb; Else stop the queue
47688 * *BEFORE* ringing the tx doorbell, so that we serialze the
47689@@ -454,16 +784,21 @@ static netdev_tx_t be_xmit(struct sk_buff *skb,
47690 */
47691 if ((BE_MAX_TX_FRAG_COUNT + atomic_read(&txq->used)) >=
47692 txq->len) {
47693- netif_stop_queue(netdev);
47694+ netif_stop_subqueue(netdev, skb_get_queue_mapping(skb));
47695 stopped = true;
47696 }
47697
47698 be_txq_notify(adapter, txq->id, wrb_cnt);
47699
47700- be_tx_stats_update(adapter, wrb_cnt, copied,
47701+#if LINUX_VERSION_CODE < KERNEL_VERSION(2, 6, 32)
47702+ netdev->trans_start = jiffies;
47703+#endif
47704+
47705+ be_tx_stats_update(txo, wrb_cnt, copied,
47706 skb_shinfo(skb)->gso_segs, stopped);
47707 } else {
47708 txq->head = start;
47709+tx_drop:
47710 dev_kfree_skb_any(skb);
47711 }
47712 return NETDEV_TX_OK;
47713@@ -473,10 +808,12 @@ static int be_change_mtu(struct net_device *netdev, int new_mtu)
47714 {
47715 struct be_adapter *adapter = netdev_priv(netdev);
47716 if (new_mtu < BE_MIN_MTU ||
47717- new_mtu > BE_MAX_JUMBO_FRAME_SIZE) {
47718+ new_mtu > (BE_MAX_JUMBO_FRAME_SIZE -
47719+ (ETH_HLEN + ETH_FCS_LEN))) {
47720 dev_info(&adapter->pdev->dev,
47721 "MTU must be between %d and %d bytes\n",
47722- BE_MIN_MTU, BE_MAX_JUMBO_FRAME_SIZE);
47723+ BE_MIN_MTU,
47724+ (BE_MAX_JUMBO_FRAME_SIZE - (ETH_HLEN + ETH_FCS_LEN)));
47725 return -EINVAL;
47726 }
47727 dev_info(&adapter->pdev->dev, "MTU changed from %d to %d bytes\n",
47728@@ -486,17 +823,19 @@ static int be_change_mtu(struct net_device *netdev, int new_mtu)
47729 }
47730
47731 /*
47732- * if there are BE_NUM_VLANS_SUPPORTED or lesser number of VLANS configured,
47733- * program them in BE. If more than BE_NUM_VLANS_SUPPORTED are configured,
47734- * set the BE in promiscuous VLAN mode.
47735+ * A max of 64 (BE_NUM_VLANS_SUPPORTED) vlans can be configured in BE.
47736+ * If the user configures more, place BE in vlan promiscuous mode.
47737 */
47738-static int be_vid_config(struct be_adapter *adapter)
47739+static int be_vid_config(struct be_adapter *adapter, bool vf, u32 vf_num)
47740 {
47741 u16 vtag[BE_NUM_VLANS_SUPPORTED];
47742 u16 ntags = 0, i;
47743- int status;
47744+ int status = 0;
47745
47746- if (adapter->num_vlans <= BE_NUM_VLANS_SUPPORTED) {
47747+ /* No need to change the VLAN state if the I/F is in promiscous */
47748+ if (adapter->promiscuous)
47749+ return 0;
47750+ if (adapter->vlans_added <= adapter->max_vlans) {
47751 /* Construct VLAN Table to give to HW */
47752 for (i = 0; i < VLAN_GROUP_ARRAY_LEN; i++) {
47753 if (adapter->vlan_tag[i]) {
47754@@ -504,47 +843,46 @@ static int be_vid_config(struct be_adapter *adapter)
47755 ntags++;
47756 }
47757 }
47758- status = be_cmd_vlan_config(adapter, adapter->if_handle,
47759- vtag, ntags, 1, 0);
47760+ /* Send command only if there is something to be programmed */
47761+ if (ntags)
47762+ status = be_cmd_vlan_config(adapter, adapter->if_handle,
47763+ vtag, ntags, 1, 0);
47764 } else {
47765 status = be_cmd_vlan_config(adapter, adapter->if_handle,
47766- NULL, 0, 1, 1);
47767+ NULL, 0, 1, 1);
47768 }
47769+
47770 return status;
47771 }
47772
47773 static void be_vlan_register(struct net_device *netdev, struct vlan_group *grp)
47774 {
47775 struct be_adapter *adapter = netdev_priv(netdev);
47776- struct be_eq_obj *rx_eq = &adapter->rx_eq;
47777- struct be_eq_obj *tx_eq = &adapter->tx_eq;
47778
47779- be_eq_notify(adapter, rx_eq->q.id, false, false, 0);
47780- be_eq_notify(adapter, tx_eq->q.id, false, false, 0);
47781 adapter->vlan_grp = grp;
47782- be_eq_notify(adapter, rx_eq->q.id, true, false, 0);
47783- be_eq_notify(adapter, tx_eq->q.id, true, false, 0);
47784 }
47785
47786 static void be_vlan_add_vid(struct net_device *netdev, u16 vid)
47787 {
47788 struct be_adapter *adapter = netdev_priv(netdev);
47789
47790- adapter->num_vlans++;
47791+ adapter->vlans_added++;
47792+
47793 adapter->vlan_tag[vid] = 1;
47794-
47795- be_vid_config(adapter);
47796+ if (adapter->vlans_added <= (adapter->max_vlans + 1))
47797+ be_vid_config(adapter, false, 0);
47798 }
47799
47800 static void be_vlan_rem_vid(struct net_device *netdev, u16 vid)
47801 {
47802 struct be_adapter *adapter = netdev_priv(netdev);
47803
47804- adapter->num_vlans--;
47805- adapter->vlan_tag[vid] = 0;
47806-
47807+ adapter->vlans_added--;
47808 vlan_group_set_device(adapter->vlan_grp, vid, NULL);
47809- be_vid_config(adapter);
47810+
47811+ adapter->vlan_tag[vid] = 0;
47812+ if (adapter->vlans_added <= adapter->max_vlans)
47813+ be_vid_config(adapter, false, 0);
47814 }
47815
47816 static void be_set_multicast_list(struct net_device *netdev)
47817@@ -552,7 +890,7 @@ static void be_set_multicast_list(struct net_device *netdev)
47818 struct be_adapter *adapter = netdev_priv(netdev);
47819
47820 if (netdev->flags & IFF_PROMISC) {
47821- be_cmd_promiscuous_config(adapter, adapter->port_num, 1);
47822+ be_cmd_rx_filter(adapter, IFF_PROMISC, ON);
47823 adapter->promiscuous = true;
47824 goto done;
47825 }
47826@@ -560,81 +898,244 @@ static void be_set_multicast_list(struct net_device *netdev)
47827 /* BE was previously in promiscous mode; disable it */
47828 if (adapter->promiscuous) {
47829 adapter->promiscuous = false;
47830- be_cmd_promiscuous_config(adapter, adapter->port_num, 0);
47831+ be_cmd_rx_filter(adapter, IFF_PROMISC, OFF);
47832+
47833+ if (adapter->vlans_added)
47834+ be_vid_config(adapter, false, 0);
47835 }
47836
47837- if (netdev->flags & IFF_ALLMULTI) {
47838- be_cmd_multicast_set(adapter, adapter->if_handle, NULL, 0);
47839+ /* Enable multicast promisc if num configured exceeds what we support */
47840+ if (netdev->flags & IFF_ALLMULTI ||
47841+ netdev_mc_count(netdev) > BE_MAX_MC) {
47842+ be_cmd_rx_filter(adapter, IFF_ALLMULTI, ON);
47843 goto done;
47844 }
47845
47846- be_cmd_multicast_set(adapter, adapter->if_handle, netdev->mc_list,
47847- netdev->mc_count);
47848+ be_cmd_rx_filter(adapter, IFF_MULTICAST, ON);
47849 done:
47850 return;
47851 }
47852
47853-static void be_rx_rate_update(struct be_adapter *adapter)
47854+#ifdef HAVE_SRIOV_CONFIG
47855+static int be_set_vf_mac(struct net_device *netdev, int vf, u8 *mac)
47856 {
47857- struct be_drvr_stats *stats = drvr_stats(adapter);
47858+ struct be_adapter *adapter = netdev_priv(netdev);
47859+ int status;
47860+
47861+ if (adapter->num_vfs == 0)
47862+ return -EPERM;
47863+
47864+ if (!is_valid_ether_addr(mac) || (vf >= adapter->num_vfs))
47865+ return -EINVAL;
47866+
47867+ if (adapter->vf_cfg[vf].vf_pmac_id != BE_INVALID_PMAC_ID)
47868+ status = be_cmd_pmac_del(adapter,
47869+ adapter->vf_cfg[vf].vf_if_handle,
47870+ adapter->vf_cfg[vf].vf_pmac_id, vf + 1);
47871+
47872+ status = be_cmd_pmac_add(adapter, mac,
47873+ adapter->vf_cfg[vf].vf_if_handle,
47874+ &adapter->vf_cfg[vf].vf_pmac_id, vf + 1);
47875+
47876+ if (status)
47877+ dev_err(&adapter->pdev->dev, "MAC %pM set on VF %d Failed\n",
47878+ mac, vf);
47879+ else
47880+ memcpy(adapter->vf_cfg[vf].vf_mac_addr, mac, ETH_ALEN);
47881+
47882+ return status;
47883+}
47884+
47885+static int be_get_vf_config(struct net_device *netdev, int vf,
47886+ struct ifla_vf_info *vi)
47887+{
47888+ struct be_adapter *adapter = netdev_priv(netdev);
47889+
47890+ if (adapter->num_vfs == 0)
47891+ return -EPERM;
47892+
47893+ if (vf >= adapter->num_vfs)
47894+ return -EINVAL;
47895+
47896+ vi->vf = vf;
47897+ vi->tx_rate = adapter->vf_cfg[vf].vf_tx_rate;
47898+ vi->vlan = adapter->vf_cfg[vf].vf_vlan_tag & VLAN_VID_MASK;
47899+ vi->qos = adapter->vf_cfg[vf].vf_vlan_tag >> VLAN_PRIO_SHIFT;
47900+ memcpy(&vi->mac, adapter->vf_cfg[vf].vf_mac_addr, ETH_ALEN);
47901+
47902+ return 0;
47903+}
47904+
47905+/*
47906+ * Entry point to configure vlan behavior for a VF.
47907+ * 1. By default a VF is vlan Challenged.
47908+ * 2. It may or may not have Transparent Tagging enabled.
47909+ * 3. Vlan privilege for a VF can be toggled using special VID 4095.
47910+ * 4. When removing the Vlan privilege for a VF there is no need set default vid
47911+ * 5. Transparent Tagging configured for a VF resets its Vlan privilege
47912+ * 6. To disable the current Transparet Tagging for a VF:
47913+ * 6a. run the last iproute command with vlan set to 0.
47914+ * 6b. programing the default vid will disable Transparent Tagging in ARM/ASIC
47915+ */
47916+static int be_set_vf_vlan(struct net_device *netdev,
47917+ int vf, u16 vlan, u8 qos)
47918+{
47919+ struct be_adapter *adapter = netdev_priv(netdev);
47920+ int status = 0;
47921+ u32 en = 0;
47922+
47923+ if (adapter->num_vfs == 0)
47924+ return -EPERM;
47925+
47926+ if ((vf >= adapter->num_vfs) || (vlan > 4095) || (qos > 7))
47927+ return -EINVAL;
47928+
47929+ status = be_cmd_get_fn_privileges(adapter, &en, vf + 1);
47930+ if (status)
47931+ goto sts;
47932+
47933+ if (vlan == 4095) {
47934+ if (en & BE_PRIV_FILTMGMT) {
47935+ /* Knock off filtering privileges */
47936+ en &= ~BE_PRIV_FILTMGMT;
47937+ } else {
47938+ en |= BE_PRIV_FILTMGMT;
47939+ /* Transparent Tagging is currently enabled, Reset it */
47940+ if (adapter->vf_cfg[vf].vf_vlan_tag) {
47941+ adapter->vf_cfg[vf].vf_vlan_tag = 0;
47942+ vlan = adapter->vf_cfg[vf].vf_def_vid;
47943+ be_cmd_set_hsw_config(adapter, vlan, vf + 1,
47944+ adapter->vf_cfg[vf].vf_if_handle);
47945+ }
47946+ }
47947+
47948+ adapter->vf_cfg[vf].vf_vlan_tag = 0;
47949+ status = be_cmd_set_fn_privileges(adapter, en, NULL, vf + 1);
47950+
47951+ goto sts;
47952+ }
47953+
47954+ if (vlan || qos) {
47955+ if (en & BE_PRIV_FILTMGMT) {
47956+ /* Check privilege and reset it to default */
47957+ en &= ~BE_PRIV_FILTMGMT;
47958+ be_cmd_set_fn_privileges(adapter, en, NULL, vf + 1);
47959+ }
47960+
47961+ vlan |= qos << VLAN_PRIO_SHIFT;
47962+ if (adapter->vf_cfg[vf].vf_vlan_tag != vlan) {
47963+ /* If this is new value, program it. Else skip. */
47964+ adapter->vf_cfg[vf].vf_vlan_tag = vlan;
47965+
47966+ status = be_cmd_set_hsw_config(adapter, vlan,
47967+ vf + 1, adapter->vf_cfg[vf].vf_if_handle);
47968+ }
47969+
47970+ } else {
47971+ /* Reset Transparent Vlan Tagging. */
47972+ adapter->vf_cfg[vf].vf_vlan_tag = 0;
47973+ vlan = adapter->vf_cfg[vf].vf_def_vid;
47974+ status = be_cmd_set_hsw_config(adapter, vlan, vf + 1,
47975+ adapter->vf_cfg[vf].vf_if_handle);
47976+ }
47977+
47978+sts:
47979+ if (status)
47980+ dev_info(&adapter->pdev->dev,
47981+ "VLAN %d config on VF %d failed\n", vlan, vf);
47982+ return status;
47983+}
47984+
47985+static int be_set_vf_tx_rate(struct net_device *netdev,
47986+ int vf, int rate)
47987+{
47988+ struct be_adapter *adapter = netdev_priv(netdev);
47989+ int status = 0;
47990+
47991+ if (adapter->num_vfs == 0)
47992+ return -EPERM;
47993+
47994+ if ((vf >= adapter->num_vfs) || (rate < 0))
47995+ return -EINVAL;
47996+
47997+ if (rate > 10000)
47998+ rate = 10000;
47999+
48000+ adapter->vf_cfg[vf].vf_tx_rate = rate;
48001+ status = be_cmd_set_qos(adapter, rate / 10, vf + 1);
48002+
48003+ if (status)
48004+ dev_info(&adapter->pdev->dev,
48005+ "tx rate %d on VF %d failed\n", rate, vf);
48006+ return status;
48007+}
48008+#endif /* HAVE_SRIOV_CONFIG */
48009+
48010+static void be_rx_rate_update(struct be_rx_obj *rxo)
48011+{
48012+ struct be_rx_stats *stats = &rxo->stats;
48013 ulong now = jiffies;
48014
48015 /* Wrapped around */
48016- if (time_before(now, stats->be_rx_jiffies)) {
48017- stats->be_rx_jiffies = now;
48018+ if (time_before(now, stats->rx_jiffies)) {
48019+ stats->rx_jiffies = now;
48020 return;
48021 }
48022
48023 /* Update the rate once in two seconds */
48024- if ((now - stats->be_rx_jiffies) < 2 * HZ)
48025+ if ((now - stats->rx_jiffies) < 2 * HZ)
48026 return;
48027
48028- stats->be_rx_rate = be_calc_rate(stats->be_rx_bytes
48029- - stats->be_rx_bytes_prev,
48030- now - stats->be_rx_jiffies);
48031- stats->be_rx_jiffies = now;
48032- stats->be_rx_bytes_prev = stats->be_rx_bytes;
48033+ stats->rx_rate = be_calc_rate(stats->rx_bytes - stats->rx_bytes_prev,
48034+ now - stats->rx_jiffies);
48035+ stats->rx_jiffies = now;
48036+ stats->rx_bytes_prev = stats->rx_bytes;
48037 }
48038
48039-static void be_rx_stats_update(struct be_adapter *adapter,
48040- u32 pktsize, u16 numfrags)
48041+static void be_rx_stats_update(struct be_rx_obj *rxo,
48042+ struct be_rx_compl_info *rxcp)
48043 {
48044- struct be_drvr_stats *stats = drvr_stats(adapter);
48045+ struct be_rx_stats *stats = &rxo->stats;
48046
48047- stats->be_rx_compl++;
48048- stats->be_rx_frags += numfrags;
48049- stats->be_rx_bytes += pktsize;
48050- stats->be_rx_pkts++;
48051+ stats->rx_compl++;
48052+ stats->rx_frags += rxcp->num_rcvd;
48053+ stats->rx_bytes += rxcp->pkt_size;
48054+ stats->rx_pkts++;
48055+ if (rxcp->pkt_type == BE_MULTICAST_PACKET)
48056+ stats->rx_mcast_pkts++;
48057+ if (rxcp->err)
48058+ stats->rxcp_err++;
48059 }
48060
48061-static inline bool do_pkt_csum(struct be_eth_rx_compl *rxcp, bool cso)
48062+static inline bool csum_passed(struct be_rx_compl_info *rxcp)
48063 {
48064- u8 l4_cksm, ip_version, ipcksm, tcpf = 0, udpf = 0, ipv6_chk;
48065-
48066- l4_cksm = AMAP_GET_BITS(struct amap_eth_rx_compl, l4_cksm, rxcp);
48067- ipcksm = AMAP_GET_BITS(struct amap_eth_rx_compl, ipcksm, rxcp);
48068- ip_version = AMAP_GET_BITS(struct amap_eth_rx_compl, ip_version, rxcp);
48069- if (ip_version) {
48070- tcpf = AMAP_GET_BITS(struct amap_eth_rx_compl, tcpf, rxcp);
48071- udpf = AMAP_GET_BITS(struct amap_eth_rx_compl, udpf, rxcp);
48072- }
48073- ipv6_chk = (ip_version && (tcpf || udpf));
48074-
48075- return ((l4_cksm && ipv6_chk && ipcksm) && cso) ? false : true;
48076+ /* L4 checksum is not reliable for non TCP/UDP packets.
48077+ * Also ignore ipcksm for ipv6 pkts */
48078+ return (rxcp->tcpf || rxcp->udpf) && rxcp->l4_csum &&
48079+ (rxcp->ip_csum || rxcp->ipv6);
48080 }
48081
48082 static struct be_rx_page_info *
48083-get_rx_page_info(struct be_adapter *adapter, u16 frag_idx)
48084+get_rx_page_info(struct be_adapter *adapter, struct be_rx_obj *rxo,
48085+ u16 frag_idx)
48086 {
48087 struct be_rx_page_info *rx_page_info;
48088- struct be_queue_info *rxq = &adapter->rx_obj.q;
48089+ struct be_queue_info *rxq = &rxo->q;
48090
48091- rx_page_info = &adapter->rx_obj.page_info_tbl[frag_idx];
48092- BUG_ON(!rx_page_info->page);
48093+ rx_page_info = &rxo->page_info_tbl[frag_idx];
48094+ if (!rx_page_info->page) {
48095+ printk(KERN_EMERG "curr_idx=%d prev_dix=%d rxq->head=%d\n",
48096+ frag_idx, rxo->prev_frag_idx, rxq->head);
48097+ BUG_ON(!rx_page_info->page);
48098+ }
48099
48100- if (rx_page_info->last_page_user)
48101+ if (rx_page_info->last_page_user) {
48102 pci_unmap_page(adapter->pdev, pci_unmap_addr(rx_page_info, bus),
48103 adapter->big_page_size, PCI_DMA_FROMDEVICE);
48104+ rx_page_info->last_page_user = false;
48105+ }
48106+
48107+ rxo->prev_frag_idx = frag_idx;
48108
48109 atomic_dec(&rxq->used);
48110 return rx_page_info;
48111@@ -642,20 +1143,26 @@ get_rx_page_info(struct be_adapter *adapter, u16 frag_idx)
48112
48113 /* Throwaway the data in the Rx completion */
48114 static void be_rx_compl_discard(struct be_adapter *adapter,
48115- struct be_eth_rx_compl *rxcp)
48116+ struct be_rx_obj *rxo,
48117+ struct be_rx_compl_info *rxcp)
48118 {
48119- struct be_queue_info *rxq = &adapter->rx_obj.q;
48120+ struct be_queue_info *rxq = &rxo->q;
48121 struct be_rx_page_info *page_info;
48122- u16 rxq_idx, i, num_rcvd;
48123+ u16 i;
48124+ bool oob_error;
48125+ u16 num_rcvd = rxcp->num_rcvd;
48126
48127- rxq_idx = AMAP_GET_BITS(struct amap_eth_rx_compl, fragndx, rxcp);
48128- num_rcvd = AMAP_GET_BITS(struct amap_eth_rx_compl, numfrags, rxcp);
48129+ oob_error = lancer_A0_chip(adapter) && rxcp->err;
48130+
48131+ /* In case of OOB error num_rcvd will be 1 more than actual */
48132+ if (oob_error && num_rcvd)
48133+ num_rcvd -= 1;
48134
48135 for (i = 0; i < num_rcvd; i++) {
48136- page_info = get_rx_page_info(adapter, rxq_idx);
48137+ page_info = get_rx_page_info(adapter, rxo, rxcp->rxq_idx);
48138 put_page(page_info->page);
48139 memset(page_info, 0, sizeof(*page_info));
48140- index_inc(&rxq_idx, rxq->len);
48141+ index_inc(&rxcp->rxq_idx, rxq->len);
48142 }
48143 }
48144
48145@@ -663,29 +1170,24 @@ static void be_rx_compl_discard(struct be_adapter *adapter,
48146 * skb_fill_rx_data forms a complete skb for an ether frame
48147 * indicated by rxcp.
48148 */
48149-static void skb_fill_rx_data(struct be_adapter *adapter,
48150- struct sk_buff *skb, struct be_eth_rx_compl *rxcp)
48151+static void skb_fill_rx_data(struct be_adapter *adapter, struct be_rx_obj *rxo,
48152+ struct sk_buff *skb, struct be_rx_compl_info *rxcp)
48153 {
48154- struct be_queue_info *rxq = &adapter->rx_obj.q;
48155+ struct be_queue_info *rxq = &rxo->q;
48156 struct be_rx_page_info *page_info;
48157- u16 rxq_idx, i, num_rcvd, j;
48158- u32 pktsize, hdr_len, curr_frag_len, size;
48159+ u16 i, j;
48160+ u16 hdr_len, curr_frag_len, remaining;
48161 u8 *start;
48162
48163- rxq_idx = AMAP_GET_BITS(struct amap_eth_rx_compl, fragndx, rxcp);
48164- pktsize = AMAP_GET_BITS(struct amap_eth_rx_compl, pktsize, rxcp);
48165- num_rcvd = AMAP_GET_BITS(struct amap_eth_rx_compl, numfrags, rxcp);
48166-
48167- page_info = get_rx_page_info(adapter, rxq_idx);
48168-
48169+ page_info = get_rx_page_info(adapter, rxo, rxcp->rxq_idx);
48170 start = page_address(page_info->page) + page_info->page_offset;
48171 prefetch(start);
48172
48173 /* Copy data in the first descriptor of this completion */
48174- curr_frag_len = min(pktsize, rx_frag_size);
48175+ curr_frag_len = min(rxcp->pkt_size, rx_frag_size);
48176
48177 /* Copy the header portion into skb_data */
48178- hdr_len = min((u32)BE_HDR_LEN, curr_frag_len);
48179+ hdr_len = min(BE_HDR_LEN, curr_frag_len);
48180 memcpy(skb->data, start, hdr_len);
48181 skb->len = curr_frag_len;
48182 if (curr_frag_len <= BE_HDR_LEN) { /* tiny packet */
48183@@ -702,21 +1204,19 @@ static void skb_fill_rx_data(struct be_adapter *adapter,
48184 skb->data_len = curr_frag_len - hdr_len;
48185 skb->tail += hdr_len;
48186 }
48187- memset(page_info, 0, sizeof(*page_info));
48188+ page_info->page = NULL;
48189
48190- if (pktsize <= rx_frag_size) {
48191- BUG_ON(num_rcvd != 1);
48192- goto done;
48193+ if (rxcp->pkt_size <= rx_frag_size) {
48194+ BUG_ON(rxcp->num_rcvd != 1);
48195+ return;
48196 }
48197
48198 /* More frags present for this completion */
48199- size = pktsize;
48200- for (i = 1, j = 0; i < num_rcvd; i++) {
48201- size -= curr_frag_len;
48202- index_inc(&rxq_idx, rxq->len);
48203- page_info = get_rx_page_info(adapter, rxq_idx);
48204-
48205- curr_frag_len = min(size, rx_frag_size);
48206+ index_inc(&rxcp->rxq_idx, rxq->len);
48207+ remaining = rxcp->pkt_size - curr_frag_len;
48208+ for (i = 1, j = 0; i < rxcp->num_rcvd; i++) {
48209+ page_info = get_rx_page_info(adapter, rxo, rxcp->rxq_idx);
48210+ curr_frag_len = min(remaining, rx_frag_size);
48211
48212 /* Coalesce all frags from the same physical page in one slot */
48213 if (page_info->page_offset == 0) {
48214@@ -735,99 +1235,122 @@ static void skb_fill_rx_data(struct be_adapter *adapter,
48215 skb->len += curr_frag_len;
48216 skb->data_len += curr_frag_len;
48217
48218- memset(page_info, 0, sizeof(*page_info));
48219+ remaining -= curr_frag_len;
48220+ index_inc(&rxcp->rxq_idx, rxq->len);
48221+ page_info->page = NULL;
48222 }
48223 BUG_ON(j > MAX_SKB_FRAGS);
48224-
48225-done:
48226- be_rx_stats_update(adapter, pktsize, num_rcvd);
48227- return;
48228 }
48229
48230-/* Process the RX completion indicated by rxcp when GRO is disabled */
48231+/* Process the RX completion indicated by rxcp when LRO is disabled */
48232 static void be_rx_compl_process(struct be_adapter *adapter,
48233- struct be_eth_rx_compl *rxcp)
48234+ struct be_rx_obj *rxo,
48235+ struct be_rx_compl_info *rxcp)
48236 {
48237 struct sk_buff *skb;
48238- u32 vlanf, vid;
48239- u8 vtm;
48240
48241- vlanf = AMAP_GET_BITS(struct amap_eth_rx_compl, vtp, rxcp);
48242- vtm = AMAP_GET_BITS(struct amap_eth_rx_compl, vtm, rxcp);
48243-
48244- /* vlanf could be wrongly set in some cards.
48245- * ignore if vtm is not set */
48246- if ((adapter->cap == 0x400) && !vtm)
48247- vlanf = 0;
48248-
48249- skb = netdev_alloc_skb(adapter->netdev, BE_HDR_LEN + NET_IP_ALIGN);
48250- if (!skb) {
48251+ skb = netdev_alloc_skb_ip_align(adapter->netdev, BE_HDR_LEN);
48252+ if (unlikely(!skb)) {
48253 if (net_ratelimit())
48254 dev_warn(&adapter->pdev->dev, "skb alloc failed\n");
48255- be_rx_compl_discard(adapter, rxcp);
48256+ be_rx_compl_discard(adapter, rxo, rxcp);
48257 return;
48258 }
48259
48260- skb_reserve(skb, NET_IP_ALIGN);
48261+ skb_fill_rx_data(adapter, rxo, skb, rxcp);
48262
48263- skb_fill_rx_data(adapter, skb, rxcp);
48264-
48265- if (do_pkt_csum(rxcp, adapter->rx_csum))
48266- skb->ip_summed = CHECKSUM_NONE;
48267- else
48268+ if (likely(adapter->rx_csum && csum_passed(rxcp)))
48269 skb->ip_summed = CHECKSUM_UNNECESSARY;
48270+ else
48271+ skb->ip_summed = CHECKSUM_NONE;
48272
48273 skb->truesize = skb->len + sizeof(struct sk_buff);
48274+ if (unlikely(rxcp->vlanf) &&
48275+ unlikely(!vlan_configured(adapter))) {
48276+ __vlan_put_tag(skb, rxcp->vlan_tag);
48277+ }
48278 skb->protocol = eth_type_trans(skb, adapter->netdev);
48279 skb->dev = adapter->netdev;
48280
48281- if (vlanf) {
48282- if (!adapter->vlan_grp || adapter->num_vlans == 0) {
48283- kfree_skb(skb);
48284- return;
48285- }
48286- vid = AMAP_GET_BITS(struct amap_eth_rx_compl, vlan_tag, rxcp);
48287- vid = be16_to_cpu(vid);
48288- vlan_hwaccel_receive_skb(skb, adapter->vlan_grp, vid);
48289- } else {
48290+ if (unlikely(rxcp->vlanf) &&
48291+ vlan_configured(adapter))
48292+ vlan_hwaccel_receive_skb(skb, adapter->vlan_grp,
48293+ rxcp->vlan_tag);
48294+ else
48295 netif_receive_skb(skb);
48296+
48297+ return;
48298+}
48299+
48300+/* Process the RX completion indicated by rxcp when LRO is enabled */
48301+static void be_rx_compl_process_lro(struct be_adapter *adapter,
48302+ struct be_rx_obj *rxo,
48303+ struct be_rx_compl_info *rxcp)
48304+{
48305+ struct be_rx_page_info *page_info;
48306+ struct skb_frag_struct rx_frags[BE_MAX_FRAGS_PER_FRAME];
48307+ struct be_queue_info *rxq = &rxo->q;
48308+ u16 remaining, curr_frag_len;
48309+ u16 i, j;
48310+
48311+ remaining = rxcp->pkt_size;
48312+ for (i = 0, j = -1; i < rxcp->num_rcvd; i++) {
48313+ page_info = get_rx_page_info(adapter, rxo, rxcp->rxq_idx);
48314+
48315+ curr_frag_len = min(remaining, rx_frag_size);
48316+
48317+ /* Coalesce all frags from the same physical page in one slot */
48318+ if (i == 0 || page_info->page_offset == 0) {
48319+ /* First frag or Fresh page */
48320+ j++;
48321+ rx_frags[j].page = page_info->page;
48322+ rx_frags[j].page_offset = page_info->page_offset;
48323+ rx_frags[j].size = 0;
48324+ } else {
48325+ put_page(page_info->page);
48326+ }
48327+ rx_frags[j].size += curr_frag_len;
48328+
48329+ remaining -= curr_frag_len;
48330+ index_inc(&rxcp->rxq_idx, rxq->len);
48331+ memset(page_info, 0, sizeof(*page_info));
48332+ }
48333+ BUG_ON(j > MAX_SKB_FRAGS);
48334+
48335+ if (likely(!rxcp->vlanf)) {
48336+ lro_receive_frags(&rxo->lro_mgr, rx_frags, rxcp->pkt_size,
48337+ rxcp->pkt_size, NULL, 0);
48338+ } else {
48339+ lro_vlan_hwaccel_receive_frags(&rxo->lro_mgr, rx_frags,
48340+ rxcp->pkt_size, rxcp->pkt_size, adapter->vlan_grp,
48341+ rxcp->vlan_tag, NULL, 0);
48342 }
48343
48344 return;
48345 }
48346
48347 /* Process the RX completion indicated by rxcp when GRO is enabled */
48348-static void be_rx_compl_process_gro(struct be_adapter *adapter,
48349- struct be_eth_rx_compl *rxcp)
48350+void be_rx_compl_process_gro(struct be_adapter *adapter,
48351+ struct be_rx_obj *rxo,
48352+ struct be_rx_compl_info *rxcp)
48353 {
48354+#ifdef NETIF_F_GRO
48355 struct be_rx_page_info *page_info;
48356 struct sk_buff *skb = NULL;
48357- struct be_queue_info *rxq = &adapter->rx_obj.q;
48358- struct be_eq_obj *eq_obj = &adapter->rx_eq;
48359- u32 num_rcvd, pkt_size, remaining, vlanf, curr_frag_len;
48360- u16 i, rxq_idx = 0, vid, j;
48361- u8 vtm;
48362-
48363- num_rcvd = AMAP_GET_BITS(struct amap_eth_rx_compl, numfrags, rxcp);
48364- pkt_size = AMAP_GET_BITS(struct amap_eth_rx_compl, pktsize, rxcp);
48365- vlanf = AMAP_GET_BITS(struct amap_eth_rx_compl, vtp, rxcp);
48366- rxq_idx = AMAP_GET_BITS(struct amap_eth_rx_compl, fragndx, rxcp);
48367- vtm = AMAP_GET_BITS(struct amap_eth_rx_compl, vtm, rxcp);
48368-
48369- /* vlanf could be wrongly set in some cards.
48370- * ignore if vtm is not set */
48371- if ((adapter->cap == 0x400) && !vtm)
48372- vlanf = 0;
48373+ struct be_queue_info *rxq = &rxo->q;
48374+ struct be_eq_obj *eq_obj = &rxo->rx_eq;
48375+ u16 remaining, curr_frag_len;
48376+ u16 i, j;
48377
48378 skb = napi_get_frags(&eq_obj->napi);
48379 if (!skb) {
48380- be_rx_compl_discard(adapter, rxcp);
48381+ be_rx_compl_discard(adapter, rxo, rxcp);
48382 return;
48383 }
48384
48385- remaining = pkt_size;
48386- for (i = 0, j = -1; i < num_rcvd; i++) {
48387- page_info = get_rx_page_info(adapter, rxq_idx);
48388+ remaining = rxcp->pkt_size;
48389+ for (i = 0, j = -1; i < rxcp->num_rcvd; i++) {
48390+ page_info = get_rx_page_info(adapter, rxo, rxcp->rxq_idx);
48391
48392 curr_frag_len = min(remaining, rx_frag_size);
48393
48394@@ -845,55 +1368,129 @@ static void be_rx_compl_process_gro(struct be_adapter *adapter,
48395 skb_shinfo(skb)->frags[j].size += curr_frag_len;
48396
48397 remaining -= curr_frag_len;
48398- index_inc(&rxq_idx, rxq->len);
48399+ index_inc(&rxcp->rxq_idx, rxq->len);
48400 memset(page_info, 0, sizeof(*page_info));
48401 }
48402 BUG_ON(j > MAX_SKB_FRAGS);
48403
48404 skb_shinfo(skb)->nr_frags = j + 1;
48405- skb->len = pkt_size;
48406- skb->data_len = pkt_size;
48407- skb->truesize += pkt_size;
48408+ skb->len = rxcp->pkt_size;
48409+ skb->data_len = rxcp->pkt_size;
48410+ skb->truesize += rxcp->pkt_size;
48411 skb->ip_summed = CHECKSUM_UNNECESSARY;
48412
48413- if (likely(!vlanf)) {
48414+ if (likely(!rxcp->vlanf))
48415 napi_gro_frags(&eq_obj->napi);
48416- } else {
48417- vid = AMAP_GET_BITS(struct amap_eth_rx_compl, vlan_tag, rxcp);
48418- vid = be16_to_cpu(vid);
48419+ else
48420+ vlan_gro_frags(&eq_obj->napi,
48421+ adapter->vlan_grp, rxcp->vlan_tag);
48422+#endif
48423
48424- if (!adapter->vlan_grp || adapter->num_vlans == 0)
48425- return;
48426-
48427- vlan_gro_frags(&eq_obj->napi, adapter->vlan_grp, vid);
48428- }
48429-
48430- be_rx_stats_update(adapter, pkt_size, num_rcvd);
48431 return;
48432 }
48433
48434-static struct be_eth_rx_compl *be_rx_compl_get(struct be_adapter *adapter)
48435+static void be_parse_rx_compl_v1(struct be_adapter *adapter,
48436+ struct be_eth_rx_compl *compl,
48437+ struct be_rx_compl_info *rxcp)
48438 {
48439- struct be_eth_rx_compl *rxcp = queue_tail_node(&adapter->rx_obj.cq);
48440+ rxcp->pkt_size =
48441+ AMAP_GET_BITS(struct amap_eth_rx_compl_v1, pktsize, compl);
48442+ rxcp->vlanf = AMAP_GET_BITS(struct amap_eth_rx_compl_v1, vtp, compl);
48443+ rxcp->err = AMAP_GET_BITS(struct amap_eth_rx_compl_v1, err, compl);
48444+ rxcp->tcpf = AMAP_GET_BITS(struct amap_eth_rx_compl_v1, tcpf, compl);
48445+ rxcp->udpf = AMAP_GET_BITS(struct amap_eth_rx_compl_v1, udpf, compl);
48446+ rxcp->ip_csum =
48447+ AMAP_GET_BITS(struct amap_eth_rx_compl_v1, ipcksm, compl);
48448+ rxcp->l4_csum =
48449+ AMAP_GET_BITS(struct amap_eth_rx_compl_v1, l4_cksm, compl);
48450+ rxcp->ipv6 =
48451+ AMAP_GET_BITS(struct amap_eth_rx_compl_v1, ip_version, compl);
48452+ rxcp->rxq_idx =
48453+ AMAP_GET_BITS(struct amap_eth_rx_compl_v1, fragndx, compl);
48454+ rxcp->num_rcvd =
48455+ AMAP_GET_BITS(struct amap_eth_rx_compl_v1, numfrags, compl);
48456+ rxcp->pkt_type =
48457+ AMAP_GET_BITS(struct amap_eth_rx_compl_v1, cast_enc, compl);
48458+ if (rxcp->vlanf) {
48459+ rxcp->vtm = AMAP_GET_BITS(struct amap_eth_rx_compl_v1, vtm,
48460+ compl);
48461+ rxcp->vlan_tag = AMAP_GET_BITS(struct amap_eth_rx_compl_v1,
48462+ vlan_tag, compl);
48463+ }
48464+ rxcp->port = AMAP_GET_BITS(struct amap_eth_rx_compl_v1, port, compl);
48465+}
48466
48467- if (rxcp->dw[offsetof(struct amap_eth_rx_compl, valid) / 32] == 0)
48468+static void be_parse_rx_compl_v0(struct be_adapter *adapter,
48469+ struct be_eth_rx_compl *compl,
48470+ struct be_rx_compl_info *rxcp)
48471+{
48472+ rxcp->pkt_size =
48473+ AMAP_GET_BITS(struct amap_eth_rx_compl_v0, pktsize, compl);
48474+ rxcp->vlanf = AMAP_GET_BITS(struct amap_eth_rx_compl_v0, vtp, compl);
48475+ rxcp->err = AMAP_GET_BITS(struct amap_eth_rx_compl_v0, err, compl);
48476+ rxcp->tcpf = AMAP_GET_BITS(struct amap_eth_rx_compl_v0, tcpf, compl);
48477+ rxcp->udpf = AMAP_GET_BITS(struct amap_eth_rx_compl_v0, udpf, compl);
48478+ rxcp->ip_csum =
48479+ AMAP_GET_BITS(struct amap_eth_rx_compl_v0, ipcksm, compl);
48480+ rxcp->l4_csum =
48481+ AMAP_GET_BITS(struct amap_eth_rx_compl_v0, l4_cksm, compl);
48482+ rxcp->ipv6 =
48483+ AMAP_GET_BITS(struct amap_eth_rx_compl_v0, ip_version, compl);
48484+ rxcp->rxq_idx =
48485+ AMAP_GET_BITS(struct amap_eth_rx_compl_v0, fragndx, compl);
48486+ rxcp->num_rcvd =
48487+ AMAP_GET_BITS(struct amap_eth_rx_compl_v0, numfrags, compl);
48488+ rxcp->pkt_type =
48489+ AMAP_GET_BITS(struct amap_eth_rx_compl_v0, cast_enc, compl);
48490+ if (rxcp->vlanf) {
48491+ rxcp->vtm = AMAP_GET_BITS(struct amap_eth_rx_compl_v0, vtm,
48492+ compl);
48493+ rxcp->vlan_tag = AMAP_GET_BITS(struct amap_eth_rx_compl_v0,
48494+ vlan_tag, compl);
48495+ }
48496+ rxcp->port = AMAP_GET_BITS(struct amap_eth_rx_compl_v0, port, compl);
48497+}
48498+
48499+static struct be_rx_compl_info *be_rx_compl_get(struct be_rx_obj *rxo)
48500+{
48501+ struct be_eth_rx_compl *compl = queue_tail_node(&rxo->cq);
48502+ struct be_rx_compl_info *rxcp = &rxo->rxcp;
48503+ struct be_adapter *adapter = rxo->adapter;
48504+
48505+ /* For checking the valid bit it is Ok to use either definition as the
48506+ * valid bit is at the same position in both v0 and v1 Rx compl */
48507+ if (compl->dw[offsetof(struct amap_eth_rx_compl_v1, valid) / 32] == 0)
48508 return NULL;
48509
48510- be_dws_le_to_cpu(rxcp, sizeof(*rxcp));
48511+ rmb();
48512+ be_dws_le_to_cpu(compl, sizeof(*compl));
48513
48514- queue_tail_inc(&adapter->rx_obj.cq);
48515+ if (adapter->be3_native)
48516+ be_parse_rx_compl_v1(adapter, compl, rxcp);
48517+ else
48518+ be_parse_rx_compl_v0(adapter, compl, rxcp);
48519+
48520+ if (rxcp->vlanf) {
48521+ /* vlanf could be wrongly set in some cards.
48522+ * ignore if vtm is not set */
48523+ if ((adapter->function_mode & FLEX10_MODE) && !rxcp->vtm)
48524+ rxcp->vlanf = 0;
48525+
48526+ if (!lancer_chip(adapter))
48527+ rxcp->vlan_tag = swab16(rxcp->vlan_tag);
48528+
48529+ if ((adapter->pvid == (rxcp->vlan_tag & VLAN_VID_MASK)) &&
48530+ !adapter->vlan_tag[rxcp->vlan_tag])
48531+ rxcp->vlanf = 0;
48532+ }
48533+
48534+ /* As the compl has been parsed, reset it; we wont touch it again */
48535+ compl->dw[offsetof(struct amap_eth_rx_compl_v1, valid) / 32] = 0;
48536+
48537+ queue_tail_inc(&rxo->cq);
48538 return rxcp;
48539 }
48540
48541-/* To reset the valid bit, we need to reset the whole word as
48542- * when walking the queue the valid entries are little-endian
48543- * and invalid entries are host endian
48544- */
48545-static inline void be_rx_compl_reset(struct be_eth_rx_compl *rxcp)
48546-{
48547- rxcp->dw[offsetof(struct amap_eth_rx_compl, valid) / 32] = 0;
48548-}
48549-
48550 static inline struct page *be_alloc_pages(u32 size)
48551 {
48552 gfp_t alloc_flags = GFP_ATOMIC;
48553@@ -907,11 +1504,12 @@ static inline struct page *be_alloc_pages(u32 size)
48554 * Allocate a page, split it to fragments of size rx_frag_size and post as
48555 * receive buffers to BE
48556 */
48557-static void be_post_rx_frags(struct be_adapter *adapter)
48558+static void be_post_rx_frags(struct be_rx_obj *rxo)
48559 {
48560- struct be_rx_page_info *page_info_tbl = adapter->rx_obj.page_info_tbl;
48561- struct be_rx_page_info *page_info = NULL;
48562- struct be_queue_info *rxq = &adapter->rx_obj.q;
48563+ struct be_adapter *adapter = rxo->adapter;
48564+ struct be_rx_page_info *page_info_tbl = rxo->page_info_tbl;
48565+ struct be_rx_page_info *page_info = NULL, *prev_page_info = NULL;
48566+ struct be_queue_info *rxq = &rxo->q;
48567 struct page *pagep = NULL;
48568 struct be_eth_rx_d *rxd;
48569 u64 page_dmaaddr = 0, frag_dmaaddr;
48570@@ -922,7 +1520,7 @@ static void be_post_rx_frags(struct be_adapter *adapter)
48571 if (!pagep) {
48572 pagep = be_alloc_pages(adapter->big_page_size);
48573 if (unlikely(!pagep)) {
48574- drvr_stats(adapter)->be_ethrx_post_fail++;
48575+ rxo->stats.rx_post_fail++;
48576 break;
48577 }
48578 page_dmaaddr = pci_map_page(adapter->pdev, pagep, 0,
48579@@ -941,7 +1539,6 @@ static void be_post_rx_frags(struct be_adapter *adapter)
48580 rxd = queue_head_node(rxq);
48581 rxd->fragpa_lo = cpu_to_le32(frag_dmaaddr & 0xFFFFFFFF);
48582 rxd->fragpa_hi = cpu_to_le32(upper_32_bits(frag_dmaaddr));
48583- queue_head_inc(rxq);
48584
48585 /* Any space left in the current big page for another frag? */
48586 if ((page_offset + rx_frag_size + rx_frag_size) >
48587@@ -949,17 +1546,24 @@ static void be_post_rx_frags(struct be_adapter *adapter)
48588 pagep = NULL;
48589 page_info->last_page_user = true;
48590 }
48591+
48592+ prev_page_info = page_info;
48593+ queue_head_inc(rxq);
48594 page_info = &page_info_tbl[rxq->head];
48595 }
48596 if (pagep)
48597- page_info->last_page_user = true;
48598+ prev_page_info->last_page_user = true;
48599
48600+ /* Ensure that posting buffers is the last thing done by this
48601+ * routine to avoid racing between rx bottom-half and
48602+ * be_worker (process) contexts.
48603+ */
48604 if (posted) {
48605 atomic_add(posted, &rxq->used);
48606 be_rxq_notify(adapter, rxq->id, posted);
48607 } else if (atomic_read(&rxq->used) == 0) {
48608 /* Let be_worker replenish when memory is available */
48609- adapter->rx_post_starved = true;
48610+ rxo->rx_post_starved = true;
48611 }
48612
48613 return;
48614@@ -972,6 +1576,7 @@ static struct be_eth_tx_compl *be_tx_compl_get(struct be_queue_info *tx_cq)
48615 if (txcp->dw[offsetof(struct amap_eth_tx_compl, valid) / 32] == 0)
48616 return NULL;
48617
48618+ rmb();
48619 be_dws_le_to_cpu(txcp, sizeof(*txcp));
48620
48621 txcp->dw[offsetof(struct amap_eth_tx_compl, valid) / 32] = 0;
48622@@ -980,11 +1585,14 @@ static struct be_eth_tx_compl *be_tx_compl_get(struct be_queue_info *tx_cq)
48623 return txcp;
48624 }
48625
48626-static void be_tx_compl_process(struct be_adapter *adapter, u16 last_index)
48627+static u16 be_tx_compl_process(struct be_adapter *adapter,
48628+ struct be_tx_obj *txo, u16 last_index)
48629 {
48630- struct be_queue_info *txq = &adapter->tx_obj.q;
48631- struct sk_buff **sent_skbs = adapter->tx_obj.sent_skb_list;
48632+ struct be_queue_info *txq = &txo->q;
48633+ struct be_eth_wrb *wrb;
48634+ struct sk_buff **sent_skbs = txo->sent_skb_list;
48635 struct sk_buff *sent_skb;
48636+ u64 busaddr;
48637 u16 cur_index, num_wrbs = 0;
48638
48639 cur_index = txq->tail;
48640@@ -992,15 +1600,31 @@ static void be_tx_compl_process(struct be_adapter *adapter, u16 last_index)
48641 BUG_ON(!sent_skb);
48642 sent_skbs[cur_index] = NULL;
48643
48644- do {
48645+ wrb = queue_tail_node(txq);
48646+ be_dws_le_to_cpu(wrb, sizeof(*wrb));
48647+ busaddr = ((u64)wrb->frag_pa_hi << 32) | (u64)wrb->frag_pa_lo;
48648+ if (busaddr != 0) {
48649+ pci_unmap_single(adapter->pdev, busaddr,
48650+ wrb->frag_len, PCI_DMA_TODEVICE);
48651+ }
48652+ num_wrbs++;
48653+ queue_tail_inc(txq);
48654+
48655+ while (cur_index != last_index) {
48656 cur_index = txq->tail;
48657+ wrb = queue_tail_node(txq);
48658+ be_dws_le_to_cpu(wrb, sizeof(*wrb));
48659+ busaddr = ((u64)wrb->frag_pa_hi << 32) | (u64)wrb->frag_pa_lo;
48660+ if (busaddr != 0) {
48661+ pci_unmap_page(adapter->pdev, busaddr,
48662+ wrb->frag_len, PCI_DMA_TODEVICE);
48663+ }
48664 num_wrbs++;
48665 queue_tail_inc(txq);
48666- } while (cur_index != last_index);
48667+ }
48668
48669- atomic_sub(num_wrbs, &txq->used);
48670- skb_dma_unmap(&adapter->pdev->dev, sent_skb, DMA_TO_DEVICE);
48671 kfree_skb(sent_skb);
48672+ return num_wrbs;
48673 }
48674
48675 static inline struct be_eq_entry *event_get(struct be_eq_obj *eq_obj)
48676@@ -1010,13 +1634,15 @@ static inline struct be_eq_entry *event_get(struct be_eq_obj *eq_obj)
48677 if (!eqe->evt)
48678 return NULL;
48679
48680+ rmb();
48681 eqe->evt = le32_to_cpu(eqe->evt);
48682 queue_tail_inc(&eq_obj->q);
48683 return eqe;
48684 }
48685
48686 static int event_handle(struct be_adapter *adapter,
48687- struct be_eq_obj *eq_obj)
48688+ struct be_eq_obj *eq_obj,
48689+ bool rearm)
48690 {
48691 struct be_eq_entry *eqe;
48692 u16 num = 0;
48693@@ -1029,7 +1655,10 @@ static int event_handle(struct be_adapter *adapter,
48694 /* Deal with any spurious interrupts that come
48695 * without events
48696 */
48697- be_eq_notify(adapter, eq_obj->q.id, true, true, num);
48698+ if (!num)
48699+ rearm = true;
48700+
48701+ be_eq_notify(adapter, eq_obj->q.id, rearm, true, num);
48702 if (num)
48703 napi_schedule(&eq_obj->napi);
48704
48705@@ -1053,49 +1682,55 @@ static void be_eq_clean(struct be_adapter *adapter,
48706 be_eq_notify(adapter, eq_obj->q.id, false, true, num);
48707 }
48708
48709-static void be_rx_q_clean(struct be_adapter *adapter)
48710+static void be_rx_q_clean(struct be_adapter *adapter, struct be_rx_obj *rxo)
48711 {
48712 struct be_rx_page_info *page_info;
48713- struct be_queue_info *rxq = &adapter->rx_obj.q;
48714- struct be_queue_info *rx_cq = &adapter->rx_obj.cq;
48715- struct be_eth_rx_compl *rxcp;
48716+ struct be_queue_info *rxq = &rxo->q;
48717+ struct be_queue_info *rx_cq = &rxo->cq;
48718+ struct be_rx_compl_info *rxcp;
48719 u16 tail;
48720
48721 /* First cleanup pending rx completions */
48722- while ((rxcp = be_rx_compl_get(adapter)) != NULL) {
48723- be_rx_compl_discard(adapter, rxcp);
48724- be_rx_compl_reset(rxcp);
48725+ while ((rxcp = be_rx_compl_get(rxo)) != NULL) {
48726+ be_rx_compl_discard(adapter, rxo, rxcp);
48727 be_cq_notify(adapter, rx_cq->id, true, 1);
48728 }
48729
48730 /* Then free posted rx buffer that were not used */
48731 tail = (rxq->head + rxq->len - atomic_read(&rxq->used)) % rxq->len;
48732 for (; atomic_read(&rxq->used) > 0; index_inc(&tail, rxq->len)) {
48733- page_info = get_rx_page_info(adapter, tail);
48734+ page_info = get_rx_page_info(adapter, rxo, tail);
48735 put_page(page_info->page);
48736 memset(page_info, 0, sizeof(*page_info));
48737 }
48738 BUG_ON(atomic_read(&rxq->used));
48739+ rxq->tail = rxq->head = 0;
48740 }
48741
48742-static void be_tx_compl_clean(struct be_adapter *adapter)
48743+static void be_tx_compl_clean(struct be_adapter *adapter,
48744+ struct be_tx_obj *txo)
48745 {
48746- struct be_queue_info *tx_cq = &adapter->tx_obj.cq;
48747- struct be_queue_info *txq = &adapter->tx_obj.q;
48748+ struct be_queue_info *tx_cq = &txo->cq;
48749+ struct be_queue_info *txq = &txo->q;
48750 struct be_eth_tx_compl *txcp;
48751- u16 end_idx, cmpl = 0, timeo = 0;
48752+ u16 end_idx, cmpl = 0, timeo = 0, num_wrbs = 0;
48753+ struct sk_buff **sent_skbs = txo->sent_skb_list;
48754+ struct sk_buff *sent_skb;
48755+ bool dummy_wrb;
48756
48757 /* Wait for a max of 200ms for all the tx-completions to arrive. */
48758 do {
48759 while ((txcp = be_tx_compl_get(tx_cq))) {
48760 end_idx = AMAP_GET_BITS(struct amap_eth_tx_compl,
48761 wrb_index, txcp);
48762- be_tx_compl_process(adapter, end_idx);
48763+ num_wrbs += be_tx_compl_process(adapter, txo, end_idx);
48764 cmpl++;
48765 }
48766 if (cmpl) {
48767 be_cq_notify(adapter, tx_cq->id, false, cmpl);
48768+ atomic_sub(num_wrbs, &txq->used);
48769 cmpl = 0;
48770+ num_wrbs = 0;
48771 }
48772
48773 if (atomic_read(&txq->used) == 0 || ++timeo > 200)
48774@@ -1107,6 +1742,17 @@ static void be_tx_compl_clean(struct be_adapter *adapter)
48775 if (atomic_read(&txq->used))
48776 dev_err(&adapter->pdev->dev, "%d pending tx-completions\n",
48777 atomic_read(&txq->used));
48778+
48779+ /* free posted tx for which compls will never arrive */
48780+ while (atomic_read(&txq->used)) {
48781+ sent_skb = sent_skbs[txq->tail];
48782+ end_idx = txq->tail;
48783+ index_adv(&end_idx,
48784+ wrb_cnt_for_skb(adapter, sent_skb, &dummy_wrb) - 1,
48785+ txq->len);
48786+ num_wrbs = be_tx_compl_process(adapter, txo, end_idx);
48787+ atomic_sub(num_wrbs, &txq->used);
48788+ }
48789 }
48790
48791 static void be_mcc_queues_destroy(struct be_adapter *adapter)
48792@@ -1145,8 +1791,9 @@ static int be_mcc_queues_create(struct be_adapter *adapter)
48793 goto mcc_cq_destroy;
48794
48795 /* Ask BE to create MCC queue */
48796- if (be_cmd_mccq_create(adapter, q, cq))
48797+ if (be_cmd_mccq_create(adapter, q, cq)) {
48798 goto mcc_q_free;
48799+ }
48800
48801 return 0;
48802
48803@@ -1163,16 +1810,20 @@ err:
48804 static void be_tx_queues_destroy(struct be_adapter *adapter)
48805 {
48806 struct be_queue_info *q;
48807+ struct be_tx_obj *txo;
48808+ u8 i;
48809
48810- q = &adapter->tx_obj.q;
48811- if (q->created)
48812- be_cmd_q_destroy(adapter, q, QTYPE_TXQ);
48813- be_queue_free(adapter, q);
48814+ for_all_tx_queues(adapter, txo, i) {
48815+ q = &txo->q;
48816+ if (q->created)
48817+ be_cmd_q_destroy(adapter, q, QTYPE_TXQ);
48818+ be_queue_free(adapter, q);
48819
48820- q = &adapter->tx_obj.cq;
48821- if (q->created)
48822- be_cmd_q_destroy(adapter, q, QTYPE_CQ);
48823- be_queue_free(adapter, q);
48824+ q = &txo->cq;
48825+ if (q->created)
48826+ be_cmd_q_destroy(adapter, q, QTYPE_CQ);
48827+ be_queue_free(adapter, q);
48828+ }
48829
48830 /* Clear any residual events */
48831 be_eq_clean(adapter, &adapter->tx_eq);
48832@@ -1183,168 +1834,210 @@ static void be_tx_queues_destroy(struct be_adapter *adapter)
48833 be_queue_free(adapter, q);
48834 }
48835
48836+/* One TX event queue is shared by all TX compl qs */
48837 static int be_tx_queues_create(struct be_adapter *adapter)
48838 {
48839 struct be_queue_info *eq, *q, *cq;
48840+ struct be_tx_obj *txo;
48841+ u8 i, tc_id;
48842
48843 adapter->tx_eq.max_eqd = 0;
48844 adapter->tx_eq.min_eqd = 0;
48845 adapter->tx_eq.cur_eqd = 96;
48846 adapter->tx_eq.enable_aic = false;
48847- /* Alloc Tx Event queue */
48848+
48849 eq = &adapter->tx_eq.q;
48850- if (be_queue_alloc(adapter, eq, EVNT_Q_LEN, sizeof(struct be_eq_entry)))
48851+ if (be_queue_alloc(adapter, eq, EVNT_Q_LEN,
48852+ sizeof(struct be_eq_entry)))
48853 return -1;
48854
48855- /* Ask BE to create Tx Event queue */
48856 if (be_cmd_eq_create(adapter, eq, adapter->tx_eq.cur_eqd))
48857- goto tx_eq_free;
48858- /* Alloc TX eth compl queue */
48859- cq = &adapter->tx_obj.cq;
48860- if (be_queue_alloc(adapter, cq, TX_CQ_LEN,
48861+ goto err;
48862+ adapter->tx_eq.eq_idx = adapter->eq_next_idx++;
48863+
48864+ for_all_tx_queues(adapter, txo, i) {
48865+ cq = &txo->cq;
48866+ if (be_queue_alloc(adapter, cq, TX_CQ_LEN,
48867 sizeof(struct be_eth_tx_compl)))
48868- goto tx_eq_destroy;
48869+ goto err;
48870
48871- /* Ask BE to create Tx eth compl queue */
48872- if (be_cmd_cq_create(adapter, cq, eq, false, false, 3))
48873- goto tx_cq_free;
48874+ if (be_cmd_cq_create(adapter, cq, eq, false, false, 3))
48875+ goto err;
48876
48877- /* Alloc TX eth queue */
48878- q = &adapter->tx_obj.q;
48879- if (be_queue_alloc(adapter, q, TX_Q_LEN, sizeof(struct be_eth_wrb)))
48880- goto tx_cq_destroy;
48881+ q = &txo->q;
48882+ if (be_queue_alloc(adapter, q, TX_Q_LEN,
48883+ sizeof(struct be_eth_wrb)))
48884+ goto err;
48885
48886- /* Ask BE to create Tx eth queue */
48887- if (be_cmd_txq_create(adapter, q, cq))
48888- goto tx_q_free;
48889+ if (be_cmd_txq_create(adapter, q, cq, &tc_id))
48890+ goto err;
48891+
48892+ if (adapter->flags & BE_FLAGS_DCBX)
48893+ adapter->tc_txq_map[tc_id] = i;
48894+ }
48895 return 0;
48896
48897-tx_q_free:
48898- be_queue_free(adapter, q);
48899-tx_cq_destroy:
48900- be_cmd_q_destroy(adapter, cq, QTYPE_CQ);
48901-tx_cq_free:
48902- be_queue_free(adapter, cq);
48903-tx_eq_destroy:
48904- be_cmd_q_destroy(adapter, eq, QTYPE_EQ);
48905-tx_eq_free:
48906- be_queue_free(adapter, eq);
48907+err:
48908+ be_tx_queues_destroy(adapter);
48909 return -1;
48910 }
48911
48912 static void be_rx_queues_destroy(struct be_adapter *adapter)
48913 {
48914 struct be_queue_info *q;
48915+ struct be_rx_obj *rxo;
48916+ int i;
48917
48918- q = &adapter->rx_obj.q;
48919- if (q->created) {
48920- be_cmd_q_destroy(adapter, q, QTYPE_RXQ);
48921- be_rx_q_clean(adapter);
48922- }
48923- be_queue_free(adapter, q);
48924+ for_all_rx_queues(adapter, rxo, i) {
48925+ be_queue_free(adapter, &rxo->q);
48926+
48927+ q = &rxo->cq;
48928+ if (q->created)
48929+ be_cmd_q_destroy(adapter, q, QTYPE_CQ);
48930+ be_queue_free(adapter, q);
48931
48932- q = &adapter->rx_obj.cq;
48933- if (q->created)
48934- be_cmd_q_destroy(adapter, q, QTYPE_CQ);
48935- be_queue_free(adapter, q);
48936+ q = &rxo->rx_eq.q;
48937+ if (q->created)
48938+ be_cmd_q_destroy(adapter, q, QTYPE_EQ);
48939+ be_queue_free(adapter, q);
48940
48941- /* Clear any residual events */
48942- be_eq_clean(adapter, &adapter->rx_eq);
48943+ kfree(rxo->page_info_tbl);
48944+ }
48945+}
48946
48947- q = &adapter->rx_eq.q;
48948- if (q->created)
48949- be_cmd_q_destroy(adapter, q, QTYPE_EQ);
48950- be_queue_free(adapter, q);
48951+/* Is BE in a multi-channel mode */
48952+static inline bool be_is_mc(struct be_adapter *adapter) {
48953+ return (adapter->function_mode & FLEX10_MODE ||
48954+ adapter->function_mode & VNIC_MODE ||
48955+ adapter->function_mode & UMC_ENABLED);
48956+}
48957+
48958+static u32 be_num_rxqs_want(struct be_adapter *adapter)
48959+{
48960+ if (multi_rxq && (adapter->function_caps & BE_FUNCTION_CAPS_RSS) &&
48961+ adapter->num_vfs == 0 && be_physfn(adapter) &&
48962+ !be_is_mc(adapter)) {
48963+ return 1 + MAX_RSS_QS; /* one default non-RSS queue */
48964+ } else {
48965+ dev_warn(&adapter->pdev->dev,
48966+ "No support for multiple RX queues\n");
48967+ return 1;
48968+ }
48969 }
48970
48971 static int be_rx_queues_create(struct be_adapter *adapter)
48972 {
48973 struct be_queue_info *eq, *q, *cq;
48974- int rc;
48975+ struct be_rx_obj *rxo;
48976+ int rc, i;
48977
48978+ adapter->num_rx_qs = min(be_num_rxqs_want(adapter),
48979+ msix_enabled(adapter) ?
48980+ adapter->num_msix_vec - 1 : 1);
48981+ if (adapter->num_rx_qs != MAX_RX_QS)
48982+ dev_warn(&adapter->pdev->dev,
48983+ "Could create only %d receive queues",
48984+ adapter->num_rx_qs);
48985+
48986+ adapter->max_rx_coal = gro ? BE_INIT_FRAGS_PER_FRAME : 1;
48987 adapter->big_page_size = (1 << get_order(rx_frag_size)) * PAGE_SIZE;
48988- adapter->rx_eq.max_eqd = BE_MAX_EQD;
48989- adapter->rx_eq.min_eqd = 0;
48990- adapter->rx_eq.cur_eqd = 0;
48991- adapter->rx_eq.enable_aic = true;
48992+ for_all_rx_queues(adapter, rxo, i) {
48993+ rxo->adapter = adapter;
48994+ rxo->rx_eq.max_eqd = BE_MAX_EQD;
48995+ rxo->rx_eq.enable_aic = true;
48996
48997- /* Alloc Rx Event queue */
48998- eq = &adapter->rx_eq.q;
48999- rc = be_queue_alloc(adapter, eq, EVNT_Q_LEN,
49000- sizeof(struct be_eq_entry));
49001- if (rc)
49002- return rc;
49003+ /* EQ */
49004+ eq = &rxo->rx_eq.q;
49005+ rc = be_queue_alloc(adapter, eq, EVNT_Q_LEN,
49006+ sizeof(struct be_eq_entry));
49007+ if (rc)
49008+ goto err;
49009
49010- /* Ask BE to create Rx Event queue */
49011- rc = be_cmd_eq_create(adapter, eq, adapter->rx_eq.cur_eqd);
49012- if (rc)
49013- goto rx_eq_free;
49014+ rc = be_cmd_eq_create(adapter, eq, rxo->rx_eq.cur_eqd);
49015+ if (rc)
49016+ goto err;
49017
49018- /* Alloc RX eth compl queue */
49019- cq = &adapter->rx_obj.cq;
49020- rc = be_queue_alloc(adapter, cq, RX_CQ_LEN,
49021- sizeof(struct be_eth_rx_compl));
49022- if (rc)
49023- goto rx_eq_destroy;
49024+ rxo->rx_eq.eq_idx = adapter->eq_next_idx++;
49025
49026- /* Ask BE to create Rx eth compl queue */
49027- rc = be_cmd_cq_create(adapter, cq, eq, false, false, 3);
49028- if (rc)
49029- goto rx_cq_free;
49030+ /* CQ */
49031+ cq = &rxo->cq;
49032+ rc = be_queue_alloc(adapter, cq, RX_CQ_LEN,
49033+ sizeof(struct be_eth_rx_compl));
49034+ if (rc)
49035+ goto err;
49036
49037- /* Alloc RX eth queue */
49038- q = &adapter->rx_obj.q;
49039- rc = be_queue_alloc(adapter, q, RX_Q_LEN, sizeof(struct be_eth_rx_d));
49040- if (rc)
49041- goto rx_cq_destroy;
49042+ rc = be_cmd_cq_create(adapter, cq, eq, false, false, 3);
49043+ if (rc)
49044+ goto err;
49045
49046- /* Ask BE to create Rx eth queue */
49047- rc = be_cmd_rxq_create(adapter, q, cq->id, rx_frag_size,
49048- BE_MAX_JUMBO_FRAME_SIZE, adapter->if_handle, false);
49049- if (rc)
49050- goto rx_q_free;
49051+ /* Rx Q - will be created in be_open() */
49052+ q = &rxo->q;
49053+ rc = be_queue_alloc(adapter, q, RX_Q_LEN,
49054+ sizeof(struct be_eth_rx_d));
49055+ if (rc)
49056+ goto err;
49057+
49058+ rxo->page_info_tbl = kzalloc(sizeof(struct be_rx_page_info) *
49059+ RX_Q_LEN, GFP_KERNEL);
49060+ if (!rxo->page_info_tbl)
49061+ goto err;
49062+ }
49063
49064 return 0;
49065-rx_q_free:
49066- be_queue_free(adapter, q);
49067-rx_cq_destroy:
49068- be_cmd_q_destroy(adapter, cq, QTYPE_CQ);
49069-rx_cq_free:
49070- be_queue_free(adapter, cq);
49071-rx_eq_destroy:
49072- be_cmd_q_destroy(adapter, eq, QTYPE_EQ);
49073-rx_eq_free:
49074- be_queue_free(adapter, eq);
49075- return rc;
49076+err:
49077+ be_rx_queues_destroy(adapter);
49078+ return -1;
49079 }
49080
49081-/* There are 8 evt ids per func. Retruns the evt id's bit number */
49082-static inline int be_evt_bit_get(struct be_adapter *adapter, u32 eq_id)
49083+static bool event_peek(struct be_eq_obj *eq_obj)
49084 {
49085- return eq_id - 8 * be_pci_func(adapter);
49086+ struct be_eq_entry *eqe = queue_tail_node(&eq_obj->q);
49087+ if (!eqe->evt)
49088+ return false;
49089+ else
49090+ return true;
49091 }
49092
49093 static irqreturn_t be_intx(int irq, void *dev)
49094 {
49095 struct be_adapter *adapter = dev;
49096- int isr;
49097+ struct be_rx_obj *rxo;
49098+ int isr, i, tx = 0 , rx = 0;
49099
49100- isr = ioread32(adapter->csr + CEV_ISR0_OFFSET +
49101- be_pci_func(adapter) * CEV_ISR_SIZE);
49102- if (!isr)
49103- return IRQ_NONE;
49104+ if (lancer_chip(adapter)) {
49105+ if (event_peek(&adapter->tx_eq))
49106+ tx = event_handle(adapter, &adapter->tx_eq, false);
49107+ for_all_rx_queues(adapter, rxo, i) {
49108+ if (event_peek(&rxo->rx_eq))
49109+ rx |= event_handle(adapter, &rxo->rx_eq, true);
49110+ }
49111
49112- event_handle(adapter, &adapter->tx_eq);
49113- event_handle(adapter, &adapter->rx_eq);
49114+ if (!(tx || rx))
49115+ return IRQ_NONE;
49116+ } else {
49117+ isr = ioread32(adapter->csr + CEV_ISR0_OFFSET +
49118+ (adapter->tx_eq.q.id / 8) * CEV_ISR_SIZE);
49119+ if (!isr)
49120+ return IRQ_NONE;
49121+
49122+ if ((1 << adapter->tx_eq.eq_idx & isr))
49123+ event_handle(adapter, &adapter->tx_eq, false);
49124+
49125+ for_all_rx_queues(adapter, rxo, i) {
49126+ if ((1 << rxo->rx_eq.eq_idx & isr))
49127+ event_handle(adapter, &rxo->rx_eq, true);
49128+ }
49129+ }
49130
49131 return IRQ_HANDLED;
49132 }
49133
49134 static irqreturn_t be_msix_rx(int irq, void *dev)
49135 {
49136- struct be_adapter *adapter = dev;
49137+ struct be_rx_obj *rxo = dev;
49138+ struct be_adapter *adapter = rxo->adapter;
49139
49140- event_handle(adapter, &adapter->rx_eq);
49141+ event_handle(adapter, &rxo->rx_eq, true);
49142
49143 return IRQ_HANDLED;
49144 }
49145@@ -1353,48 +2046,72 @@ static irqreturn_t be_msix_tx_mcc(int irq, void *dev)
49146 {
49147 struct be_adapter *adapter = dev;
49148
49149- event_handle(adapter, &adapter->tx_eq);
49150+ event_handle(adapter, &adapter->tx_eq, false);
49151
49152 return IRQ_HANDLED;
49153 }
49154
49155 static inline bool do_gro(struct be_adapter *adapter,
49156- struct be_eth_rx_compl *rxcp)
49157+ struct be_rx_compl_info *rxcp)
49158 {
49159- int err = AMAP_GET_BITS(struct amap_eth_rx_compl, err, rxcp);
49160- int tcp_frame = AMAP_GET_BITS(struct amap_eth_rx_compl, tcpf, rxcp);
49161-
49162- if (err)
49163- drvr_stats(adapter)->be_rxcp_err++;
49164-
49165- return (tcp_frame && !err) ? true : false;
49166+ return (!rxcp->tcpf || rxcp->err || adapter->max_rx_coal <= 1 ||
49167+ (rxcp->vlanf && !vlan_configured(adapter))) ?
49168+ false : true;
49169 }
49170
49171 int be_poll_rx(struct napi_struct *napi, int budget)
49172 {
49173 struct be_eq_obj *rx_eq = container_of(napi, struct be_eq_obj, napi);
49174- struct be_adapter *adapter =
49175- container_of(rx_eq, struct be_adapter, rx_eq);
49176- struct be_queue_info *rx_cq = &adapter->rx_obj.cq;
49177- struct be_eth_rx_compl *rxcp;
49178+ struct be_rx_obj *rxo = container_of(rx_eq, struct be_rx_obj, rx_eq);
49179+ struct be_adapter *adapter = rxo->adapter;
49180+ struct be_queue_info *rx_cq = &rxo->cq;
49181+ struct be_rx_compl_info *rxcp;
49182 u32 work_done;
49183+ bool flush_lro = false;
49184
49185+ rxo->stats.rx_polls++;
49186 for (work_done = 0; work_done < budget; work_done++) {
49187- rxcp = be_rx_compl_get(adapter);
49188+ rxcp = be_rx_compl_get(rxo);
49189 if (!rxcp)
49190 break;
49191
49192- if (do_gro(adapter, rxcp))
49193- be_rx_compl_process_gro(adapter, rxcp);
49194- else
49195- be_rx_compl_process(adapter, rxcp);
49196+ /* Is it a flush compl that has no data */
49197+ if (unlikely(rxcp->num_rcvd == 0))
49198+ continue;
49199
49200- be_rx_compl_reset(rxcp);
49201+ if (unlikely(rxcp->port != adapter->port_num)) {
49202+ be_rx_compl_discard(adapter, rxo, rxcp);
49203+ be_rx_stats_update(rxo, rxcp);
49204+ continue;
49205+ }
49206+
49207+ if (likely((lancer_A0_chip(adapter) && !rxcp->err) ||
49208+ !lancer_A0_chip(adapter))) {
49209+ if (do_gro(adapter, rxcp)) {
49210+ if (adapter->gro_supported) {
49211+ be_rx_compl_process_gro(adapter, rxo,
49212+ rxcp);
49213+ } else {
49214+ be_rx_compl_process_lro(adapter, rxo,
49215+ rxcp);
49216+ flush_lro = true;
49217+ }
49218+ } else {
49219+ be_rx_compl_process(adapter, rxo, rxcp);
49220+ }
49221+ } else if (lancer_A0_chip(adapter) && rxcp->err) {
49222+ be_rx_compl_discard(adapter, rxo, rxcp);
49223+ }
49224+
49225+ be_rx_stats_update(rxo, rxcp);
49226 }
49227
49228+ if (flush_lro)
49229+ lro_flush_all(&rxo->lro_mgr);
49230+
49231 /* Refill the queue */
49232- if (atomic_read(&adapter->rx_obj.q.used) < RX_FRAGS_REFILL_WM)
49233- be_post_rx_frags(adapter);
49234+ if (work_done && atomic_read(&rxo->q.used) < RX_FRAGS_REFILL_WM)
49235+ be_post_rx_frags(rxo);
49236
49237 /* All consumed */
49238 if (work_done < budget) {
49239@@ -1404,40 +2121,13 @@ int be_poll_rx(struct napi_struct *napi, int budget)
49240 /* More to be consumed; continue with interrupts disabled */
49241 be_cq_notify(adapter, rx_cq->id, false, work_done);
49242 }
49243+
49244+#if LINUX_VERSION_CODE < KERNEL_VERSION(2, 6, 32)
49245+ adapter->netdev->last_rx = jiffies;
49246+#endif
49247 return work_done;
49248 }
49249
49250-void be_process_tx(struct be_adapter *adapter)
49251-{
49252- struct be_queue_info *txq = &adapter->tx_obj.q;
49253- struct be_queue_info *tx_cq = &adapter->tx_obj.cq;
49254- struct be_eth_tx_compl *txcp;
49255- u32 num_cmpl = 0;
49256- u16 end_idx;
49257-
49258- while ((txcp = be_tx_compl_get(tx_cq))) {
49259- end_idx = AMAP_GET_BITS(struct amap_eth_tx_compl,
49260- wrb_index, txcp);
49261- be_tx_compl_process(adapter, end_idx);
49262- num_cmpl++;
49263- }
49264-
49265- if (num_cmpl) {
49266- be_cq_notify(adapter, tx_cq->id, true, num_cmpl);
49267-
49268- /* As Tx wrbs have been freed up, wake up netdev queue if
49269- * it was stopped due to lack of tx wrbs.
49270- */
49271- if (netif_queue_stopped(adapter->netdev) &&
49272- atomic_read(&txq->used) < txq->len / 2) {
49273- netif_wake_queue(adapter->netdev);
49274- }
49275-
49276- drvr_stats(adapter)->be_tx_events++;
49277- drvr_stats(adapter)->be_tx_compl += num_cmpl;
49278- }
49279-}
49280-
49281 /* As TX and MCC share the same EQ check for both TX and MCC completions.
49282 * For TX/MCC we don't honour budget; consume everything
49283 */
49284@@ -1446,96 +2136,264 @@ static int be_poll_tx_mcc(struct napi_struct *napi, int budget)
49285 struct be_eq_obj *tx_eq = container_of(napi, struct be_eq_obj, napi);
49286 struct be_adapter *adapter =
49287 container_of(tx_eq, struct be_adapter, tx_eq);
49288+ struct be_tx_obj *txo;
49289+ struct be_eth_tx_compl *txcp;
49290+ int tx_compl, mcc_compl, status = 0;
49291+ u8 i;
49292+ u16 num_wrbs;
49293+
49294+ for_all_tx_queues(adapter, txo, i) {
49295+ tx_compl = 0;
49296+ num_wrbs = 0;
49297+ while ((txcp = be_tx_compl_get(&txo->cq))) {
49298+ num_wrbs += be_tx_compl_process(adapter, txo,
49299+ AMAP_GET_BITS(struct amap_eth_tx_compl,
49300+ wrb_index, txcp));
49301+ tx_compl++;
49302+ }
49303+ if (tx_compl) {
49304+ be_cq_notify(adapter, txo->cq.id, true, tx_compl);
49305+
49306+ atomic_sub(num_wrbs, &txo->q.used);
49307+
49308+ /* As Tx wrbs have been freed up, wake up netdev queue
49309+ * if it was stopped due to lack of tx wrbs. */
49310+ if (__netif_subqueue_stopped(adapter->netdev, i) &&
49311+ atomic_read(&txo->q.used) < txo->q.len / 2) {
49312+ netif_wake_subqueue(adapter->netdev, i);
49313+ }
49314+
49315+ adapter->drv_stats.be_tx_events++;
49316+ txo->stats.be_tx_compl += tx_compl;
49317+ }
49318+ }
49319+
49320+ mcc_compl = be_process_mcc(adapter, &status);
49321+
49322+ if (mcc_compl) {
49323+ struct be_mcc_obj *mcc_obj = &adapter->mcc_obj;
49324+ be_cq_notify(adapter, mcc_obj->cq.id, true, mcc_compl);
49325+ }
49326
49327 napi_complete(napi);
49328
49329- be_process_tx(adapter);
49330-
49331- be_process_mcc(adapter);
49332-
49333+ be_eq_notify(adapter, tx_eq->q.id, true, false, 0);
49334 return 1;
49335 }
49336
49337+void be_detect_dump_ue(struct be_adapter *adapter)
49338+{
49339+ u32 ue_status_lo, ue_status_hi, ue_status_lo_mask, ue_status_hi_mask;
49340+ u32 i;
49341+
49342+ pci_read_config_dword(adapter->pdev,
49343+ PCICFG_UE_STATUS_LOW, &ue_status_lo);
49344+ pci_read_config_dword(adapter->pdev,
49345+ PCICFG_UE_STATUS_HIGH, &ue_status_hi);
49346+ pci_read_config_dword(adapter->pdev,
49347+ PCICFG_UE_STATUS_LOW_MASK, &ue_status_lo_mask);
49348+ pci_read_config_dword(adapter->pdev,
49349+ PCICFG_UE_STATUS_HI_MASK, &ue_status_hi_mask);
49350+
49351+ ue_status_lo = (ue_status_lo & (~ue_status_lo_mask));
49352+ ue_status_hi = (ue_status_hi & (~ue_status_hi_mask));
49353+
49354+ if (ue_status_lo || ue_status_hi) {
49355+ adapter->ue_detected = true;
49356+ adapter->eeh_err = true;
49357+ dev_err(&adapter->pdev->dev, "UE Detected!!\n");
49358+ }
49359+
49360+ if (ue_status_lo) {
49361+ for (i = 0; ue_status_lo; ue_status_lo >>= 1, i++) {
49362+ if (ue_status_lo & 1)
49363+ dev_err(&adapter->pdev->dev,
49364+ "UE: %s bit set\n", ue_status_low_desc[i]);
49365+ }
49366+ }
49367+ if (ue_status_hi) {
49368+ for (i = 0; ue_status_hi; ue_status_hi >>= 1, i++) {
49369+ if (ue_status_hi & 1)
49370+ dev_err(&adapter->pdev->dev,
49371+ "UE: %s bit set\n", ue_status_hi_desc[i]);
49372+ }
49373+ }
49374+
49375+}
49376+
49377 static void be_worker(struct work_struct *work)
49378 {
49379 struct be_adapter *adapter =
49380 container_of(work, struct be_adapter, work.work);
49381+ struct be_rx_obj *rxo;
49382+ struct be_tx_obj *txo;
49383+ int i;
49384
49385- be_cmd_get_stats(adapter, &adapter->stats.cmd);
49386+ if (!adapter->ue_detected && !lancer_chip(adapter))
49387+ be_detect_dump_ue(adapter);
49388
49389- /* Set EQ delay */
49390- be_rx_eqd_update(adapter);
49391+ /* when interrupts are not yet enabled, just reap any pending
49392+ * mcc completions */
49393+ if (!netif_running(adapter->netdev)) {
49394+ int mcc_compl, status = 0;
49395
49396- be_tx_rate_update(adapter);
49397- be_rx_rate_update(adapter);
49398+ mcc_compl = be_process_mcc(adapter, &status);
49399
49400- if (adapter->rx_post_starved) {
49401- adapter->rx_post_starved = false;
49402- be_post_rx_frags(adapter);
49403+ if (mcc_compl) {
49404+ struct be_mcc_obj *mcc_obj = &adapter->mcc_obj;
49405+ be_cq_notify(adapter, mcc_obj->cq.id, false, mcc_compl);
49406+ }
49407+
49408+ goto reschedule;
49409+ }
49410+
49411+ if (!adapter->stats_cmd_sent)
49412+ be_cmd_get_stats(adapter, &adapter->stats_cmd);
49413+
49414+ for_all_tx_queues(adapter, txo, i)
49415+ be_tx_rate_update(txo);
49416+
49417+ for_all_rx_queues(adapter, rxo, i) {
49418+ be_rx_rate_update(rxo);
49419+ be_rx_eqd_update(adapter, rxo);
49420+
49421+ if (rxo->rx_post_starved) {
49422+ rxo->rx_post_starved = false;
49423+ be_post_rx_frags(rxo);
49424+ }
49425 }
49426
49427+reschedule:
49428+ adapter->work_counter++;
49429 schedule_delayed_work(&adapter->work, msecs_to_jiffies(1000));
49430 }
49431
49432+static void be_msix_disable(struct be_adapter *adapter)
49433+{
49434+ if (msix_enabled(adapter)) {
49435+ pci_disable_msix(adapter->pdev);
49436+ adapter->num_msix_vec = 0;
49437+ }
49438+}
49439+
49440 static void be_msix_enable(struct be_adapter *adapter)
49441 {
49442- int i, status;
49443+#define BE_MIN_MSIX_VECTORS (1 + 1) /* Rx + Tx */
49444+ int i, status, num_vec;
49445
49446- for (i = 0; i < BE_NUM_MSIX_VECTORS; i++)
49447+ num_vec = be_num_rxqs_want(adapter) + 1;
49448+
49449+ for (i = 0; i < num_vec; i++)
49450 adapter->msix_entries[i].entry = i;
49451
49452- status = pci_enable_msix(adapter->pdev, adapter->msix_entries,
49453- BE_NUM_MSIX_VECTORS);
49454- if (status == 0)
49455- adapter->msix_enabled = true;
49456+ status = pci_enable_msix(adapter->pdev, adapter->msix_entries, num_vec);
49457+ if (status == 0) {
49458+ goto done;
49459+ } else if (status >= BE_MIN_MSIX_VECTORS) {
49460+ num_vec = status;
49461+ if (pci_enable_msix(adapter->pdev, adapter->msix_entries,
49462+ num_vec) == 0)
49463+ goto done;
49464+ }
49465 return;
49466+done:
49467+ adapter->num_msix_vec = num_vec;
49468+ return;
49469+}
49470+
49471+static void be_sriov_enable(struct be_adapter *adapter)
49472+{
49473+ be_check_sriov_fn_type(adapter);
49474+#ifdef CONFIG_PCI_IOV
49475+ if (be_physfn(adapter) && num_vfs) {
49476+ int status, pos;
49477+ u16 nvfs;
49478+
49479+ pos = pci_find_ext_capability(adapter->pdev,
49480+ PCI_EXT_CAP_ID_SRIOV);
49481+ pci_read_config_word(adapter->pdev,
49482+ pos + PCI_SRIOV_TOTAL_VF, &nvfs);
49483+ adapter->num_vfs = num_vfs;
49484+ if (num_vfs > nvfs) {
49485+ dev_info(&adapter->pdev->dev,
49486+ "Device supports %d VFs and not %d\n",
49487+ nvfs, num_vfs);
49488+ adapter->num_vfs = nvfs;
49489+ }
49490+
49491+ status = pci_enable_sriov(adapter->pdev, adapter->num_vfs);
49492+ if (status)
49493+ adapter->num_vfs = 0;
49494+ }
49495+#endif
49496+}
49497+
49498+static void be_sriov_disable(struct be_adapter *adapter)
49499+{
49500+#ifdef CONFIG_PCI_IOV
49501+ if (adapter->num_vfs > 0) {
49502+ pci_disable_sriov(adapter->pdev);
49503+ adapter->num_vfs = 0;
49504+ }
49505+#endif
49506 }
49507
49508-static inline int be_msix_vec_get(struct be_adapter *adapter, u32 eq_id)
49509+static inline int be_msix_vec_get(struct be_adapter *adapter,
49510+ struct be_eq_obj *eq_obj)
49511 {
49512- return adapter->msix_entries[
49513- be_evt_bit_get(adapter, eq_id)].vector;
49514+ return adapter->msix_entries[eq_obj->eq_idx].vector;
49515 }
49516
49517 static int be_request_irq(struct be_adapter *adapter,
49518 struct be_eq_obj *eq_obj,
49519- void *handler, char *desc)
49520+ void *handler, char *desc, void *context)
49521 {
49522 struct net_device *netdev = adapter->netdev;
49523 int vec;
49524
49525 sprintf(eq_obj->desc, "%s-%s", netdev->name, desc);
49526- vec = be_msix_vec_get(adapter, eq_obj->q.id);
49527- return request_irq(vec, handler, 0, eq_obj->desc, adapter);
49528+ vec = be_msix_vec_get(adapter, eq_obj);
49529+ return request_irq(vec, handler, 0, eq_obj->desc, context);
49530 }
49531
49532-static void be_free_irq(struct be_adapter *adapter, struct be_eq_obj *eq_obj)
49533+static void be_free_irq(struct be_adapter *adapter, struct be_eq_obj *eq_obj,
49534+ void *context)
49535 {
49536- int vec = be_msix_vec_get(adapter, eq_obj->q.id);
49537- free_irq(vec, adapter);
49538+ int vec = be_msix_vec_get(adapter, eq_obj);
49539+ free_irq(vec, context);
49540 }
49541
49542 static int be_msix_register(struct be_adapter *adapter)
49543 {
49544- int status;
49545+ struct be_rx_obj *rxo;
49546+ int status, i;
49547+ char qname[10];
49548
49549- status = be_request_irq(adapter, &adapter->tx_eq, be_msix_tx_mcc, "tx");
49550+ status = be_request_irq(adapter, &adapter->tx_eq, be_msix_tx_mcc, "tx",
49551+ adapter);
49552 if (status)
49553 goto err;
49554
49555- status = be_request_irq(adapter, &adapter->rx_eq, be_msix_rx, "rx");
49556- if (status)
49557- goto free_tx_irq;
49558+ for_all_rx_queues(adapter, rxo, i) {
49559+ sprintf(qname, "rxq%d", i);
49560+ status = be_request_irq(adapter, &rxo->rx_eq, be_msix_rx,
49561+ qname, rxo);
49562+ if (status)
49563+ goto err_msix;
49564+ }
49565
49566 return 0;
49567
49568-free_tx_irq:
49569- be_free_irq(adapter, &adapter->tx_eq);
49570+err_msix:
49571+ be_free_irq(adapter, &adapter->tx_eq, adapter);
49572+
49573+ for (i--, rxo = &adapter->rx_obj[i]; i >= 0; i--, rxo--)
49574+ be_free_irq(adapter, &rxo->rx_eq, rxo);
49575+
49576 err:
49577 dev_warn(&adapter->pdev->dev,
49578 "MSIX Request IRQ failed - err %d\n", status);
49579- pci_disable_msix(adapter->pdev);
49580- adapter->msix_enabled = false;
49581+ be_msix_disable(adapter);
49582 return status;
49583 }
49584
49585@@ -1544,10 +2402,13 @@ static int be_irq_register(struct be_adapter *adapter)
49586 struct net_device *netdev = adapter->netdev;
49587 int status;
49588
49589- if (adapter->msix_enabled) {
49590+ if (msix_enabled(adapter)) {
49591 status = be_msix_register(adapter);
49592 if (status == 0)
49593 goto done;
49594+ /* INTx is not supported for VF */
49595+ if (!be_physfn(adapter))
49596+ return status;
49597 }
49598
49599 /* INTx */
49600@@ -1567,87 +2428,363 @@ done:
49601 static void be_irq_unregister(struct be_adapter *adapter)
49602 {
49603 struct net_device *netdev = adapter->netdev;
49604+ struct be_rx_obj *rxo;
49605+ int i;
49606
49607 if (!adapter->isr_registered)
49608 return;
49609
49610 /* INTx */
49611- if (!adapter->msix_enabled) {
49612+ if (!msix_enabled(adapter)) {
49613 free_irq(netdev->irq, adapter);
49614 goto done;
49615 }
49616
49617 /* MSIx */
49618- be_free_irq(adapter, &adapter->tx_eq);
49619- be_free_irq(adapter, &adapter->rx_eq);
49620+ be_free_irq(adapter, &adapter->tx_eq, adapter);
49621+
49622+ for_all_rx_queues(adapter, rxo, i)
49623+ be_free_irq(adapter, &rxo->rx_eq, rxo);
49624+
49625 done:
49626 adapter->isr_registered = false;
49627- return;
49628 }
49629
49630-static int be_open(struct net_device *netdev)
49631+static u16 be_select_queue(struct net_device *netdev,
49632+ struct sk_buff *skb)
49633 {
49634 struct be_adapter *adapter = netdev_priv(netdev);
49635- struct be_eq_obj *rx_eq = &adapter->rx_eq;
49636+ u8 prio;
49637+
49638+ if (adapter->num_tx_qs == 1)
49639+ return 0;
49640+
49641+ prio = (vlan_tx_tag_get(skb) & VLAN_PRIO_MASK) >> VLAN_PRIO_SHIFT;
49642+ return adapter->tc_txq_map[adapter->prio_tc_map[prio]];
49643+}
49644+
49645+static void be_rx_queues_clear(struct be_adapter *adapter)
49646+{
49647+ struct be_queue_info *q;
49648+ struct be_rx_obj *rxo;
49649+ int i;
49650+
49651+ for_all_rx_queues(adapter, rxo, i) {
49652+ q = &rxo->q;
49653+ if (q->created) {
49654+ be_cmd_rxq_destroy(adapter, q);
49655+ /* After the rxq is invalidated, wait for a grace time
49656+ * of 1ms for all dma to end and the flush compl to
49657+ * arrive
49658+ */
49659+ mdelay(1);
49660+ be_rx_q_clean(adapter, rxo);
49661+ }
49662+
49663+ /* Clear any residual events */
49664+ q = &rxo->rx_eq.q;
49665+ if (q->created)
49666+ be_eq_clean(adapter, &rxo->rx_eq);
49667+ }
49668+}
49669+
49670+static int be_close(struct net_device *netdev)
49671+{
49672+ struct be_adapter *adapter = netdev_priv(netdev);
49673+ struct be_rx_obj *rxo;
49674+ struct be_tx_obj *txo;
49675 struct be_eq_obj *tx_eq = &adapter->tx_eq;
49676- bool link_up;
49677- int status;
49678+ int vec, i;
49679+
49680+ be_async_mcc_disable(adapter);
49681+
49682+ netif_stop_queue(netdev);
49683+ netif_carrier_off(netdev);
49684+ adapter->link_status = LINK_DOWN;
49685+
49686+ if (!lancer_chip(adapter))
49687+ be_intr_set(adapter, false);
49688+
49689+ for_all_rx_queues(adapter, rxo, i)
49690+ napi_disable(&rxo->rx_eq.napi);
49691+
49692+ napi_disable(&tx_eq->napi);
49693+
49694+ if (lancer_chip(adapter)) {
49695+ be_cq_notify(adapter, adapter->mcc_obj.cq.id, false, 0);
49696+ for_all_rx_queues(adapter, rxo, i)
49697+ be_cq_notify(adapter, rxo->cq.id, false, 0);
49698+ for_all_tx_queues(adapter, txo, i)
49699+ be_cq_notify(adapter, txo->cq.id, false, 0);
49700+ }
49701+
49702+ if (msix_enabled(adapter)) {
49703+ vec = be_msix_vec_get(adapter, tx_eq);
49704+ synchronize_irq(vec);
49705+
49706+ for_all_rx_queues(adapter, rxo, i) {
49707+ vec = be_msix_vec_get(adapter, &rxo->rx_eq);
49708+ synchronize_irq(vec);
49709+ }
49710+ } else {
49711+ synchronize_irq(netdev->irq);
49712+ }
49713+ be_irq_unregister(adapter);
49714+
49715+ /* Wait for all pending tx completions to arrive so that
49716+ * all tx skbs are freed.
49717+ */
49718+ for_all_tx_queues(adapter, txo, i)
49719+ be_tx_compl_clean(adapter, txo);
49720+
49721+ be_rx_queues_clear(adapter);
49722+ return 0;
49723+}
49724+
49725+static int be_rx_queues_setup(struct be_adapter *adapter)
49726+{
49727+ struct be_rx_obj *rxo;
49728+ int rc, i;
49729+ u8 rsstable[MAX_RSS_QS];
49730+
49731+ for_all_rx_queues(adapter, rxo, i) {
49732+ rc = be_cmd_rxq_create(adapter, &rxo->q, rxo->cq.id,
49733+ rx_frag_size, BE_MAX_JUMBO_FRAME_SIZE,
49734+ adapter->if_handle,
49735+ (i > 0) ? 1 : 0/* rss enable */, &rxo->rss_id);
49736+ if (rc)
49737+ return rc;
49738+ }
49739+
49740+ if (be_multi_rxq(adapter)) {
49741+ for_all_rss_queues(adapter, rxo, i)
49742+ rsstable[i] = rxo->rss_id;
49743+
49744+ rc = be_cmd_rss_config(adapter, rsstable,
49745+ adapter->num_rx_qs - 1);
49746+ if (rc)
49747+ return rc;
49748+ }
49749
49750 /* First time posting */
49751- be_post_rx_frags(adapter);
49752+ for_all_rx_queues(adapter, rxo, i) {
49753+ be_post_rx_frags(rxo);
49754+ napi_enable(&rxo->rx_eq.napi);
49755+ }
49756+ return 0;
49757+}
49758+
49759+static int be_open(struct net_device *netdev)
49760+{
49761+ struct be_adapter *adapter = netdev_priv(netdev);
49762+ struct be_eq_obj *tx_eq = &adapter->tx_eq;
49763+ struct be_rx_obj *rxo;
49764+ int link_status;
49765+ int status, i;
49766+ u8 mac_speed;
49767+ u16 link_speed;
49768+
49769+ status = be_rx_queues_setup(adapter);
49770+ if (status)
49771+ goto err;
49772
49773- napi_enable(&rx_eq->napi);
49774 napi_enable(&tx_eq->napi);
49775
49776 be_irq_register(adapter);
49777
49778- be_intr_set(adapter, true);
49779+ if (!lancer_chip(adapter))
49780+ be_intr_set(adapter, true);
49781
49782 /* The evt queues are created in unarmed state; arm them */
49783- be_eq_notify(adapter, rx_eq->q.id, true, false, 0);
49784+ for_all_rx_queues(adapter, rxo, i) {
49785+ be_eq_notify(adapter, rxo->rx_eq.q.id, true, false, 0);
49786+ be_cq_notify(adapter, rxo->cq.id, true, 0);
49787+ }
49788 be_eq_notify(adapter, tx_eq->q.id, true, false, 0);
49789
49790- /* Rx compl queue may be in unarmed state; rearm it */
49791- be_cq_notify(adapter, adapter->rx_obj.cq.id, true, 0);
49792+ /* Now that interrupts are on we can process async mcc */
49793+ be_async_mcc_enable(adapter);
49794
49795- status = be_cmd_link_status_query(adapter, &link_up);
49796+ status = be_cmd_link_status_query(adapter, &link_status, &mac_speed,
49797+ &link_speed, 0);
49798 if (status)
49799- goto ret_sts;
49800- be_link_status_update(adapter, link_up);
49801+ goto err;
49802+ be_link_status_update(adapter, link_status);
49803
49804- status = be_vid_config(adapter);
49805+ status = be_vid_config(adapter, false, 0);
49806 if (status)
49807- goto ret_sts;
49808+ goto err;
49809
49810- status = be_cmd_set_flow_control(adapter,
49811- adapter->tx_fc, adapter->rx_fc);
49812- if (status)
49813- goto ret_sts;
49814+ if (be_physfn(adapter)) {
49815+ status = be_cmd_set_flow_control(adapter,
49816+ adapter->tx_fc, adapter->rx_fc);
49817+ if (status)
49818+ goto err;
49819+ }
49820+
49821+ return 0;
49822+err:
49823+ be_close(adapter->netdev);
49824+ return -EIO;
49825+}
49826+
49827+static int be_setup_wol(struct be_adapter *adapter, bool enable)
49828+{
49829+ struct be_dma_mem cmd;
49830+ int status = 0;
49831+ u8 mac[ETH_ALEN];
49832+
49833+ memset(mac, 0, ETH_ALEN);
49834+
49835+ cmd.size = sizeof(struct be_cmd_req_acpi_wol_magic_config);
49836+ cmd.va = pci_alloc_consistent(adapter->pdev, cmd.size, &cmd.dma);
49837+ if (cmd.va == NULL)
49838+ return -1;
49839+ memset(cmd.va, 0, cmd.size);
49840+
49841+ if (enable) {
49842+ status = pci_write_config_dword(adapter->pdev,
49843+ PCICFG_PM_CONTROL_OFFSET, PCICFG_PM_CONTROL_MASK);
49844+ if (status) {
49845+ dev_err(&adapter->pdev->dev,
49846+ "Could not enable Wake-on-lan\n");
49847+ pci_free_consistent(adapter->pdev, cmd.size, cmd.va,
49848+ cmd.dma);
49849+ return status;
49850+ }
49851+ status = be_cmd_enable_magic_wol(adapter,
49852+ adapter->netdev->dev_addr, &cmd);
49853+ pci_enable_wake(adapter->pdev, PCI_D3hot, 1);
49854+ pci_enable_wake(adapter->pdev, PCI_D3cold, 1);
49855+ } else {
49856+ status = be_cmd_enable_magic_wol(adapter, mac, &cmd);
49857+ pci_enable_wake(adapter->pdev, PCI_D3hot, 0);
49858+ pci_enable_wake(adapter->pdev, PCI_D3cold, 0);
49859+ }
49860+
49861+ pci_free_consistent(adapter->pdev, cmd.size, cmd.va, cmd.dma);
49862+ return status;
49863+}
49864+
49865+/*
49866+ * Generate a seed MAC address from the PF MAC Address using jhash.
49867+ * MAC Address for VFs are assigned incrementally starting from the seed.
49868+ * These addresses are programmed in the ASIC by the PF and the VF driver
49869+ * queries for the MAC address during its probe.
49870+ */
49871+static inline int be_vf_eth_addr_config(struct be_adapter *adapter)
49872+{
49873+ u32 vf = 0;
49874+ int status = 0;
49875+ u8 mac[ETH_ALEN];
49876+
49877+ be_vf_eth_addr_generate(adapter, mac);
49878+
49879+ for (vf = 0; vf < adapter->num_vfs; vf++) {
49880+ status = be_cmd_pmac_add(adapter, mac,
49881+ adapter->vf_cfg[vf].vf_if_handle,
49882+ &adapter->vf_cfg[vf].vf_pmac_id,
49883+ vf + 1);
49884+ if (status)
49885+ dev_err(&adapter->pdev->dev,
49886+ "Mac address add failed for VF %d\n", vf);
49887+ else
49888+ memcpy(adapter->vf_cfg[vf].vf_mac_addr, mac, ETH_ALEN);
49889
49890- schedule_delayed_work(&adapter->work, msecs_to_jiffies(100));
49891-ret_sts:
49892+ mac[5] += 1;
49893+ }
49894 return status;
49895 }
49896
49897+static inline void be_vf_eth_addr_rem(struct be_adapter *adapter)
49898+{
49899+ u32 vf;
49900+
49901+ for (vf = 0; vf < adapter->num_vfs; vf++) {
49902+ if (adapter->vf_cfg[vf].vf_pmac_id != BE_INVALID_PMAC_ID)
49903+ be_cmd_pmac_del(adapter,
49904+ adapter->vf_cfg[vf].vf_if_handle,
49905+ adapter->vf_cfg[vf].vf_pmac_id, vf + 1);
49906+ }
49907+}
49908+
49909+static int be_num_txqs_want(struct be_adapter *adapter)
49910+{
49911+ if (adapter->num_vfs > 0 || be_is_mc(adapter) ||
49912+ lancer_chip(adapter) || !be_physfn(adapter) ||
49913+ adapter->generation == BE_GEN2)
49914+ return 1;
49915+ else
49916+ return MAX_TX_QS;
49917+}
49918+
49919 static int be_setup(struct be_adapter *adapter)
49920 {
49921 struct net_device *netdev = adapter->netdev;
49922- u32 cap_flags, en_flags;
49923- int status;
49924-
49925- cap_flags = BE_IF_FLAGS_UNTAGGED | BE_IF_FLAGS_BROADCAST |
49926- BE_IF_FLAGS_MCAST_PROMISCUOUS |
49927- BE_IF_FLAGS_PROMISCUOUS |
49928- BE_IF_FLAGS_PASS_L3L4_ERRORS;
49929- en_flags = BE_IF_FLAGS_UNTAGGED | BE_IF_FLAGS_BROADCAST |
49930- BE_IF_FLAGS_PASS_L3L4_ERRORS;
49931+ int status, fw_num_txqs, num_txqs;
49932+ u32 cap_flags, en_flags, vf = 0;
49933+ u8 mac[ETH_ALEN];
49934+
49935+ num_txqs = be_num_txqs_want(adapter);
49936+ if (num_txqs > 1) {
49937+ be_cmd_req_pg_pfc(adapter, &fw_num_txqs);
49938+ num_txqs = min(num_txqs, fw_num_txqs);
49939+ }
49940+ adapter->num_tx_qs = num_txqs;
49941+ if (adapter->num_tx_qs != MAX_TX_QS)
49942+ netif_set_real_num_tx_queues(adapter->netdev,
49943+ adapter->num_tx_qs);
49944+
49945+ be_cmd_req_native_mode(adapter);
49946+
49947+ cap_flags = en_flags = BE_IF_FLAGS_UNTAGGED |
49948+ BE_IF_FLAGS_BROADCAST |
49949+ BE_IF_FLAGS_MULTICAST;
49950+
49951+ if (be_physfn(adapter)) {
49952+ if (adapter->function_caps & BE_FUNCTION_CAPS_RSS) {
49953+ cap_flags |= BE_IF_FLAGS_RSS;
49954+ en_flags |= BE_IF_FLAGS_RSS;
49955+ }
49956+ cap_flags |= BE_IF_FLAGS_MCAST_PROMISCUOUS |
49957+ BE_IF_FLAGS_PROMISCUOUS;
49958+ if (!lancer_A0_chip(adapter)) {
49959+ cap_flags |= BE_IF_FLAGS_PASS_L3L4_ERRORS;
49960+ en_flags |= BE_IF_FLAGS_PASS_L3L4_ERRORS;
49961+ }
49962+ }
49963
49964 status = be_cmd_if_create(adapter, cap_flags, en_flags,
49965 netdev->dev_addr, false/* pmac_invalid */,
49966- &adapter->if_handle, &adapter->pmac_id);
49967+ &adapter->if_handle, &adapter->pmac_id, 0);
49968 if (status != 0)
49969 goto do_none;
49970
49971+ if (be_physfn(adapter)) {
49972+ while (vf < adapter->num_vfs) {
49973+ cap_flags = en_flags = BE_IF_FLAGS_UNTAGGED |
49974+ BE_IF_FLAGS_BROADCAST;
49975+ status = be_cmd_if_create(adapter, cap_flags,
49976+ en_flags, mac, true,
49977+ &adapter->vf_cfg[vf].vf_if_handle,
49978+ NULL, vf+1);
49979+ if (status) {
49980+ dev_err(&adapter->pdev->dev,
49981+ "Interface Create failed for VF %d\n", vf);
49982+ goto if_destroy;
49983+ }
49984+ adapter->vf_cfg[vf].vf_pmac_id = BE_INVALID_PMAC_ID;
49985+ vf++;
49986+ }
49987+ } else {
49988+ status = be_cmd_mac_addr_query(adapter, mac,
49989+ MAC_ADDRESS_TYPE_NETWORK, false, adapter->if_handle);
49990+ if (!status) {
49991+ memcpy(adapter->netdev->dev_addr, mac, ETH_ALEN);
49992+ memcpy(adapter->netdev->perm_addr, mac, ETH_ALEN);
49993+ }
49994+ }
49995+
49996 status = be_tx_queues_create(adapter);
49997 if (status != 0)
49998 goto if_destroy;
49999@@ -1656,10 +2793,15 @@ static int be_setup(struct be_adapter *adapter)
50000 if (status != 0)
50001 goto tx_qs_destroy;
50002
50003+ /* Allow all priorities by default. A GRP5 evt may modify this */
50004+ adapter->vlan_prio_bmap = 0xff;
50005+
50006 status = be_mcc_queues_create(adapter);
50007 if (status != 0)
50008 goto rx_qs_destroy;
50009
50010+ adapter->link_speed = -1;
50011+
50012 return 0;
50013
50014 rx_qs_destroy:
50015@@ -1667,158 +2809,392 @@ rx_qs_destroy:
50016 tx_qs_destroy:
50017 be_tx_queues_destroy(adapter);
50018 if_destroy:
50019- be_cmd_if_destroy(adapter, adapter->if_handle);
50020+ if (be_physfn(adapter)) {
50021+ for (vf = 0; vf < adapter->num_vfs; vf++)
50022+ if (adapter->vf_cfg[vf].vf_if_handle)
50023+ be_cmd_if_destroy(adapter,
50024+ adapter->vf_cfg[vf].vf_if_handle,
50025+ vf + 1);
50026+ }
50027+ be_cmd_if_destroy(adapter, adapter->if_handle, 0);
50028 do_none:
50029 return status;
50030 }
50031
50032 static int be_clear(struct be_adapter *adapter)
50033 {
50034+ int vf;
50035+
50036+ if (be_physfn(adapter) && adapter->num_vfs)
50037+ be_vf_eth_addr_rem(adapter);
50038+
50039 be_mcc_queues_destroy(adapter);
50040 be_rx_queues_destroy(adapter);
50041 be_tx_queues_destroy(adapter);
50042+ adapter->eq_next_idx = 0;
50043
50044- be_cmd_if_destroy(adapter, adapter->if_handle);
50045+ if (be_physfn(adapter)) {
50046+ for (vf = 0; vf < adapter->num_vfs; vf++)
50047+ if (adapter->vf_cfg[vf].vf_if_handle)
50048+ be_cmd_if_destroy(adapter,
50049+ adapter->vf_cfg[vf].vf_if_handle, vf + 1);
50050+ }
50051+ be_cmd_if_destroy(adapter, adapter->if_handle, 0);
50052
50053+ /* tell fw we're done with firing cmds */
50054+ be_cmd_fw_clean(adapter);
50055 return 0;
50056 }
50057
50058-static int be_close(struct net_device *netdev)
50059+static void be_cpy_drv_ver(struct be_adapter *adapter, void *va)
50060+{
50061+ struct mgmt_controller_attrib *attrib =
50062+ (struct mgmt_controller_attrib *) ((u8*) va +
50063+ sizeof(struct be_cmd_resp_hdr));
50064+
50065+ memcpy(attrib->hba_attribs.driver_version_string,
50066+ DRV_VER, sizeof(DRV_VER));
50067+ attrib->pci_bus_number = adapter->pdev->bus->number;
50068+ attrib->pci_device_number = PCI_SLOT(adapter->pdev->devfn);
50069+ return;
50070+}
50071+
50072+#define IOCTL_COOKIE "SERVERENGINES CORP"
50073+static int be_do_ioctl(struct net_device *netdev,
50074+ struct ifreq *ifr, int cmd)
50075 {
50076 struct be_adapter *adapter = netdev_priv(netdev);
50077- struct be_eq_obj *rx_eq = &adapter->rx_eq;
50078- struct be_eq_obj *tx_eq = &adapter->tx_eq;
50079- int vec;
50080+ struct be_cmd_req_hdr req;
50081+ struct be_cmd_resp_hdr *resp;
50082+ void *data = ifr->ifr_data;
50083+ void *ioctl_ptr;
50084+ void *va;
50085+ dma_addr_t dma;
50086+ u32 req_size;
50087+ int status, ret = 0;
50088+ u8 cookie[32];
50089+
50090+ switch (cmd) {
50091+ case SIOCDEVPRIVATE:
50092+ if (copy_from_user(cookie, data, strlen(IOCTL_COOKIE)))
50093+ return -EFAULT;
50094+
50095+ if (memcmp(cookie, IOCTL_COOKIE, strlen(IOCTL_COOKIE)))
50096+ return -EINVAL;
50097
50098- cancel_delayed_work_sync(&adapter->work);
50099+ ioctl_ptr = (u8 *)data + strlen(IOCTL_COOKIE);
50100+ if (copy_from_user(&req, ioctl_ptr,
50101+ sizeof(struct be_cmd_req_hdr)))
50102+ return -EFAULT;
50103
50104- netif_stop_queue(netdev);
50105- netif_carrier_off(netdev);
50106- adapter->link_up = false;
50107+ req_size = le32_to_cpu(req.request_length);
50108+ if (req_size > 65536)
50109+ return -EINVAL;
50110
50111- be_intr_set(adapter, false);
50112+ req_size += sizeof(struct be_cmd_req_hdr);
50113+ va = pci_alloc_consistent(adapter->pdev, req_size, &dma);
50114+ if (!va)
50115+ return -ENOMEM;
50116+ if (copy_from_user(va, ioctl_ptr, req_size)) {
50117+ ret = -EFAULT;
50118+ break;
50119+ }
50120
50121- if (adapter->msix_enabled) {
50122- vec = be_msix_vec_get(adapter, tx_eq->q.id);
50123- synchronize_irq(vec);
50124- vec = be_msix_vec_get(adapter, rx_eq->q.id);
50125- synchronize_irq(vec);
50126- } else {
50127- synchronize_irq(netdev->irq);
50128+ status = be_cmd_pass_ext_ioctl(adapter, dma, req_size, va);
50129+ if (status == -1) {
50130+ ret = -EIO;
50131+ break;
50132+ }
50133+
50134+ resp = (struct be_cmd_resp_hdr *) va;
50135+ if (!status) {
50136+ if (req.opcode == OPCODE_COMMON_GET_CNTL_ATTRIBUTES)
50137+ be_cpy_drv_ver(adapter, va);
50138+ }
50139+
50140+ if (copy_to_user(ioctl_ptr, va, req_size)) {
50141+ ret = -EFAULT;
50142+ break;
50143+ }
50144+ break;
50145+ default:
50146+ return -EOPNOTSUPP;
50147 }
50148- be_irq_unregister(adapter);
50149
50150- napi_disable(&rx_eq->napi);
50151- napi_disable(&tx_eq->napi);
50152+ if (va)
50153+ pci_free_consistent(adapter->pdev, req_size, va, dma);
50154+
50155+ return ret;
50156+}
50157+
50158+#ifdef CONFIG_NET_POLL_CONTROLLER
50159+static void be_netpoll(struct net_device *netdev)
50160+{
50161+ struct be_adapter *adapter = netdev_priv(netdev);
50162+ struct be_rx_obj *rxo;
50163+ int i;
50164
50165- /* Wait for all pending tx completions to arrive so that
50166- * all tx skbs are freed.
50167- */
50168- be_tx_compl_clean(adapter);
50169+ event_handle(adapter, &adapter->tx_eq, false);
50170+ for_all_rx_queues(adapter, rxo, i)
50171+ event_handle(adapter, &rxo->rx_eq, true);
50172+
50173+ return;
50174+}
50175+#endif
50176+
50177+static int be_get_frag_header(struct skb_frag_struct *frag, void **mac_hdr,
50178+ void **ip_hdr, void **tcpudp_hdr,
50179+ u64 *hdr_flags, void *priv)
50180+{
50181+ struct ethhdr *eh;
50182+ struct vlan_ethhdr *veh;
50183+ struct iphdr *iph;
50184+ u8 *va = page_address(frag->page) + frag->page_offset;
50185+ unsigned long ll_hlen;
50186+
50187+ prefetch(va);
50188+ eh = (struct ethhdr *)va;
50189+ *mac_hdr = eh;
50190+ ll_hlen = ETH_HLEN;
50191+ if (eh->h_proto != htons(ETH_P_IP)) {
50192+ if (eh->h_proto == htons(ETH_P_8021Q)) {
50193+ veh = (struct vlan_ethhdr *)va;
50194+ if (veh->h_vlan_encapsulated_proto != htons(ETH_P_IP))
50195+ return -1;
50196+
50197+ ll_hlen += VLAN_HLEN;
50198+ } else {
50199+ return -1;
50200+ }
50201+ }
50202+ *hdr_flags = LRO_IPV4;
50203+ iph = (struct iphdr *)(va + ll_hlen);
50204+ *ip_hdr = iph;
50205+ if (iph->protocol != IPPROTO_TCP)
50206+ return -1;
50207+ *hdr_flags |= LRO_TCP;
50208+ *tcpudp_hdr = (u8 *) (*ip_hdr) + (iph->ihl << 2);
50209
50210 return 0;
50211 }
50212
50213-#define FW_FILE_HDR_SIGN "ServerEngines Corp. "
50214+static void be_lro_init(struct be_adapter *adapter, struct net_device *netdev)
50215+{
50216+ struct net_lro_mgr *lro_mgr;
50217+ struct be_rx_obj *rxo;
50218+ int i;
50219+
50220+ for_all_rx_queues(adapter, rxo, i) {
50221+ lro_mgr = &rxo->lro_mgr;
50222+ lro_mgr->dev = netdev;
50223+ lro_mgr->features = LRO_F_NAPI;
50224+ lro_mgr->ip_summed = CHECKSUM_UNNECESSARY;
50225+ lro_mgr->ip_summed_aggr = CHECKSUM_UNNECESSARY;
50226+ lro_mgr->max_desc = BE_MAX_LRO_DESCRIPTORS;
50227+ lro_mgr->lro_arr = rxo->lro_desc;
50228+ lro_mgr->get_frag_header = be_get_frag_header;
50229+ lro_mgr->max_aggr = BE_MAX_FRAGS_PER_FRAME;
50230+ }
50231+
50232+#ifdef NETIF_F_GRO
50233+ netdev->features |= NETIF_F_GRO;
50234+ adapter->gro_supported = true;
50235+#endif
50236+}
50237+
50238+#define FW_FILE_HDR_SIGN "ServerEngines Corp. "
50239 char flash_cookie[2][16] = {"*** SE FLAS",
50240 "H DIRECTORY *** "};
50241-static int be_flash_image(struct be_adapter *adapter,
50242+
50243+static bool be_flash_redboot(struct be_adapter *adapter,
50244+ const u8 *p, u32 img_start, int image_size,
50245+ int hdr_size)
50246+{
50247+ u32 crc_offset;
50248+ u8 flashed_crc[4];
50249+ int status;
50250+
50251+ crc_offset = hdr_size + img_start + image_size - 4;
50252+
50253+ p += crc_offset;
50254+
50255+ status = be_cmd_get_flash_crc(adapter, flashed_crc,
50256+ (image_size - 4));
50257+ if (status) {
50258+ dev_err(&adapter->pdev->dev,
50259+ "could not get crc from flash, not flashing redboot\n");
50260+ return false;
50261+ }
50262+
50263+ /*update redboot only if crc does not match*/
50264+ if (!memcmp(flashed_crc, p, 4))
50265+ return false;
50266+ else
50267+ return true;
50268+}
50269+
50270+static bool phy_flashing_required(struct be_adapter *adapter)
50271+{
50272+ int status = 0;
50273+ struct be_phy_info phy_info;
50274+
50275+ status = be_cmd_get_phy_info(adapter, &phy_info);
50276+ if (status)
50277+ return false;
50278+ if ((phy_info.phy_type == TN_8022) &&
50279+ (phy_info.interface_type == PHY_TYPE_BASET_10GB)) {
50280+ return true;
50281+ }
50282+ return false;
50283+}
50284+
50285+static int be_flash_data(struct be_adapter *adapter,
50286 const struct firmware *fw,
50287- struct be_dma_mem *flash_cmd, u32 flash_type)
50288+ struct be_dma_mem *flash_cmd, int num_of_images)
50289+
50290 {
50291- int status;
50292- u32 flash_op, image_offset = 0, total_bytes, image_size = 0;
50293+ int status = 0, i, filehdr_size = 0;
50294+ u32 total_bytes = 0, flash_op;
50295 int num_bytes;
50296 const u8 *p = fw->data;
50297 struct be_cmd_write_flashrom *req = flash_cmd->va;
50298+ struct flash_comp *pflashcomp;
50299+ int num_comp;
50300
50301- switch (flash_type) {
50302- case FLASHROM_TYPE_ISCSI_ACTIVE:
50303- image_offset = FLASH_iSCSI_PRIMARY_IMAGE_START;
50304- image_size = FLASH_IMAGE_MAX_SIZE;
50305- break;
50306- case FLASHROM_TYPE_ISCSI_BACKUP:
50307- image_offset = FLASH_iSCSI_BACKUP_IMAGE_START;
50308- image_size = FLASH_IMAGE_MAX_SIZE;
50309- break;
50310- case FLASHROM_TYPE_FCOE_FW_ACTIVE:
50311- image_offset = FLASH_FCoE_PRIMARY_IMAGE_START;
50312- image_size = FLASH_IMAGE_MAX_SIZE;
50313- break;
50314- case FLASHROM_TYPE_FCOE_FW_BACKUP:
50315- image_offset = FLASH_FCoE_BACKUP_IMAGE_START;
50316- image_size = FLASH_IMAGE_MAX_SIZE;
50317- break;
50318- case FLASHROM_TYPE_BIOS:
50319- image_offset = FLASH_iSCSI_BIOS_START;
50320- image_size = FLASH_BIOS_IMAGE_MAX_SIZE;
50321- break;
50322- case FLASHROM_TYPE_FCOE_BIOS:
50323- image_offset = FLASH_FCoE_BIOS_START;
50324- image_size = FLASH_BIOS_IMAGE_MAX_SIZE;
50325- break;
50326- case FLASHROM_TYPE_PXE_BIOS:
50327- image_offset = FLASH_PXE_BIOS_START;
50328- image_size = FLASH_BIOS_IMAGE_MAX_SIZE;
50329- break;
50330- default:
50331- return 0;
50332+ struct flash_comp gen3_flash_types[10] = {
50333+ { FLASH_iSCSI_PRIMARY_IMAGE_START_g3, IMG_TYPE_ISCSI_ACTIVE,
50334+ FLASH_IMAGE_MAX_SIZE_g3},
50335+ { FLASH_REDBOOT_START_g3, IMG_TYPE_REDBOOT,
50336+ FLASH_REDBOOT_IMAGE_MAX_SIZE_g3},
50337+ { FLASH_iSCSI_BIOS_START_g3, IMG_TYPE_BIOS,
50338+ FLASH_BIOS_IMAGE_MAX_SIZE_g3},
50339+ { FLASH_PXE_BIOS_START_g3, IMG_TYPE_PXE_BIOS,
50340+ FLASH_BIOS_IMAGE_MAX_SIZE_g3},
50341+ { FLASH_FCoE_BIOS_START_g3, IMG_TYPE_FCOE_BIOS,
50342+ FLASH_BIOS_IMAGE_MAX_SIZE_g3},
50343+ { FLASH_iSCSI_BACKUP_IMAGE_START_g3, IMG_TYPE_ISCSI_BACKUP,
50344+ FLASH_IMAGE_MAX_SIZE_g3},
50345+ { FLASH_FCoE_PRIMARY_IMAGE_START_g3, IMG_TYPE_FCOE_FW_ACTIVE,
50346+ FLASH_IMAGE_MAX_SIZE_g3},
50347+ { FLASH_FCoE_BACKUP_IMAGE_START_g3, IMG_TYPE_FCOE_FW_BACKUP,
50348+ FLASH_IMAGE_MAX_SIZE_g3},
50349+ { FLASH_NCSI_START_g3, IMG_TYPE_NCSI_FW,
50350+ FLASH_NCSI_IMAGE_MAX_SIZE_g3},
50351+ { FLASH_PHY_FW_START_g3, IMG_TYPE_PHY_FW,
50352+ FLASH_PHY_FW_IMAGE_MAX_SIZE_g3}
50353+ };
50354+ struct flash_comp gen2_flash_types[8] = {
50355+ { FLASH_iSCSI_PRIMARY_IMAGE_START_g2, IMG_TYPE_ISCSI_ACTIVE,
50356+ FLASH_IMAGE_MAX_SIZE_g2},
50357+ { FLASH_REDBOOT_START_g2, IMG_TYPE_REDBOOT,
50358+ FLASH_REDBOOT_IMAGE_MAX_SIZE_g2},
50359+ { FLASH_iSCSI_BIOS_START_g2, IMG_TYPE_BIOS,
50360+ FLASH_BIOS_IMAGE_MAX_SIZE_g2},
50361+ { FLASH_PXE_BIOS_START_g2, IMG_TYPE_PXE_BIOS,
50362+ FLASH_BIOS_IMAGE_MAX_SIZE_g2},
50363+ { FLASH_FCoE_BIOS_START_g2, IMG_TYPE_FCOE_BIOS,
50364+ FLASH_BIOS_IMAGE_MAX_SIZE_g2},
50365+ { FLASH_iSCSI_BACKUP_IMAGE_START_g2, IMG_TYPE_ISCSI_BACKUP,
50366+ FLASH_IMAGE_MAX_SIZE_g2},
50367+ { FLASH_FCoE_PRIMARY_IMAGE_START_g2, IMG_TYPE_FCOE_FW_ACTIVE,
50368+ FLASH_IMAGE_MAX_SIZE_g2},
50369+ { FLASH_FCoE_BACKUP_IMAGE_START_g2, IMG_TYPE_FCOE_FW_BACKUP,
50370+ FLASH_IMAGE_MAX_SIZE_g2}
50371+ };
50372+ if (adapter->generation == BE_GEN3) {
50373+ pflashcomp = gen3_flash_types;
50374+ filehdr_size = sizeof(struct flash_file_hdr_g3);
50375+ num_comp = ARRAY_SIZE(gen3_flash_types);
50376+ } else {
50377+ pflashcomp = gen2_flash_types;
50378+ filehdr_size = sizeof(struct flash_file_hdr_g2);
50379+ num_comp = ARRAY_SIZE(gen2_flash_types);
50380 }
50381+ for (i = 0; i < num_comp; i++) {
50382+ if ((pflashcomp[i].optype == IMG_TYPE_NCSI_FW) &&
50383+ memcmp(adapter->fw_ver, "3.102.148.0", 11) < 0)
50384+ continue;
50385+ if (pflashcomp[i].optype == IMG_TYPE_PHY_FW) {
50386+ if (!phy_flashing_required(adapter))
50387+ continue;
50388+ }
50389+ if ((pflashcomp[i].optype == IMG_TYPE_REDBOOT) &&
50390+ (!be_flash_redboot(adapter, fw->data,
50391+ pflashcomp[i].offset, pflashcomp[i].size, filehdr_size +
50392+ (num_of_images * sizeof(struct image_hdr)))))
50393+ continue;
50394
50395- p += sizeof(struct flash_file_hdr) + image_offset;
50396- if (p + image_size > fw->data + fw->size)
50397- return -1;
50398-
50399- total_bytes = image_size;
50400-
50401- while (total_bytes) {
50402- if (total_bytes > 32*1024)
50403- num_bytes = 32*1024;
50404- else
50405- num_bytes = total_bytes;
50406- total_bytes -= num_bytes;
50407-
50408- if (!total_bytes)
50409- flash_op = FLASHROM_OPER_FLASH;
50410- else
50411- flash_op = FLASHROM_OPER_SAVE;
50412- memcpy(req->params.data_buf, p, num_bytes);
50413- p += num_bytes;
50414- status = be_cmd_write_flashrom(adapter, flash_cmd,
50415- flash_type, flash_op, num_bytes);
50416- if (status) {
50417- dev_err(&adapter->pdev->dev,
50418- "cmd to write to flash rom failed. type/op %d/%d\n",
50419- flash_type, flash_op);
50420+ p = fw->data;
50421+ p += filehdr_size + pflashcomp[i].offset
50422+ + (num_of_images * sizeof(struct image_hdr));
50423+ if (p + pflashcomp[i].size > fw->data + fw->size)
50424 return -1;
50425+ total_bytes = pflashcomp[i].size;
50426+ while (total_bytes) {
50427+ if (total_bytes > 32*1024)
50428+ num_bytes = 32*1024;
50429+ else
50430+ num_bytes = total_bytes;
50431+ total_bytes -= num_bytes;
50432+ if (!total_bytes) {
50433+ if (pflashcomp[i].optype == IMG_TYPE_PHY_FW)
50434+ flash_op = FLASHROM_OPER_PHY_FLASH;
50435+ else
50436+ flash_op = FLASHROM_OPER_FLASH;
50437+ } else {
50438+ if (pflashcomp[i].optype == IMG_TYPE_PHY_FW)
50439+ flash_op = FLASHROM_OPER_PHY_SAVE;
50440+ else
50441+ flash_op = FLASHROM_OPER_SAVE;
50442+ }
50443+ memcpy(req->params.data_buf, p, num_bytes);
50444+ p += num_bytes;
50445+ status = be_cmd_write_flashrom(adapter, flash_cmd,
50446+ pflashcomp[i].optype, flash_op, num_bytes);
50447+ if (status) {
50448+ if ((status == ILLEGAL_IOCTL_REQ) &&
50449+ (pflashcomp[i].optype ==
50450+ IMG_TYPE_PHY_FW))
50451+ break;
50452+ dev_err(&adapter->pdev->dev,
50453+ "cmd to write to flash rom failed.\n");
50454+ return -1;
50455+ }
50456+ yield();
50457 }
50458- yield();
50459 }
50460-
50461 return 0;
50462 }
50463
50464+static int get_ufigen_type(struct flash_file_hdr_g2 *fhdr)
50465+{
50466+ if (fhdr == NULL)
50467+ return 0;
50468+ if (fhdr->build[0] == '3')
50469+ return BE_GEN3;
50470+ else if (fhdr->build[0] == '2')
50471+ return BE_GEN2;
50472+ else
50473+ return 0;
50474+}
50475+
50476 int be_load_fw(struct be_adapter *adapter, u8 *func)
50477 {
50478 char fw_file[ETHTOOL_FLASH_MAX_FILENAME];
50479 const struct firmware *fw;
50480- struct flash_file_hdr *fhdr;
50481- struct flash_section_info *fsec = NULL;
50482+ struct flash_file_hdr_g2 *fhdr;
50483+ struct flash_file_hdr_g3 *fhdr3;
50484+ struct image_hdr *img_hdr_ptr = NULL;
50485 struct be_dma_mem flash_cmd;
50486- int status;
50487+ int status, i = 0, num_imgs = 0;
50488 const u8 *p;
50489- bool entry_found = false;
50490- int flash_type;
50491- char fw_ver[FW_VER_LEN];
50492- char fw_cfg;
50493
50494- status = be_cmd_get_fw_ver(adapter, fw_ver);
50495- if (status)
50496- return status;
50497+ if (!netif_running(adapter->netdev)) {
50498+ dev_err(&adapter->pdev->dev,
50499+ "Firmware load not allowed (interface is down)\n");
50500+ return -1;
50501+ }
50502
50503- fw_cfg = *(fw_ver + 2);
50504- if (fw_cfg == '0')
50505- fw_cfg = '1';
50506 strcpy(fw_file, func);
50507
50508 status = request_firmware(&fw, fw_file, &adapter->pdev->dev);
50509@@ -1826,34 +3202,9 @@ int be_load_fw(struct be_adapter *adapter, u8 *func)
50510 goto fw_exit;
50511
50512 p = fw->data;
50513- fhdr = (struct flash_file_hdr *) p;
50514- if (memcmp(fhdr->sign, FW_FILE_HDR_SIGN, strlen(FW_FILE_HDR_SIGN))) {
50515- dev_err(&adapter->pdev->dev,
50516- "Firmware(%s) load error (signature did not match)\n",
50517- fw_file);
50518- status = -1;
50519- goto fw_exit;
50520- }
50521-
50522+ fhdr = (struct flash_file_hdr_g2 *) p;
50523 dev_info(&adapter->pdev->dev, "Flashing firmware file %s\n", fw_file);
50524
50525- p += sizeof(struct flash_file_hdr);
50526- while (p < (fw->data + fw->size)) {
50527- fsec = (struct flash_section_info *)p;
50528- if (!memcmp(flash_cookie, fsec->cookie, sizeof(flash_cookie))) {
50529- entry_found = true;
50530- break;
50531- }
50532- p += 32;
50533- }
50534-
50535- if (!entry_found) {
50536- status = -1;
50537- dev_err(&adapter->pdev->dev,
50538- "Flash cookie not found in firmware image\n");
50539- goto fw_exit;
50540- }
50541-
50542 flash_cmd.size = sizeof(struct be_cmd_write_flashrom) + 32*1024;
50543 flash_cmd.va = pci_alloc_consistent(adapter->pdev, flash_cmd.size,
50544 &flash_cmd.dma);
50545@@ -1864,12 +3215,25 @@ int be_load_fw(struct be_adapter *adapter, u8 *func)
50546 goto fw_exit;
50547 }
50548
50549- for (flash_type = FLASHROM_TYPE_ISCSI_ACTIVE;
50550- flash_type <= FLASHROM_TYPE_FCOE_FW_BACKUP; flash_type++) {
50551- status = be_flash_image(adapter, fw, &flash_cmd,
50552- flash_type);
50553- if (status)
50554- break;
50555+ if ((adapter->generation == BE_GEN3) &&
50556+ (get_ufigen_type(fhdr) == BE_GEN3)) {
50557+ fhdr3 = (struct flash_file_hdr_g3 *) fw->data;
50558+ num_imgs = le32_to_cpu(fhdr3->num_imgs);
50559+ for (i = 0; i < num_imgs; i++) {
50560+ img_hdr_ptr = (struct image_hdr *) (fw->data +
50561+ (sizeof(struct flash_file_hdr_g3) +
50562+ i * sizeof(struct image_hdr)));
50563+ if (le32_to_cpu(img_hdr_ptr->imageid) == 1)
50564+ status = be_flash_data(adapter, fw, &flash_cmd,
50565+ num_imgs);
50566+ }
50567+ } else if ((adapter->generation == BE_GEN2) &&
50568+ (get_ufigen_type(fhdr) == BE_GEN2)) {
50569+ status = be_flash_data(adapter, fw, &flash_cmd, 0);
50570+ } else {
50571+ dev_err(&adapter->pdev->dev,
50572+ "UFI and Interface are not compatible for flashing\n");
50573+ status = -1;
50574 }
50575
50576 pci_free_consistent(adapter->pdev, flash_cmd.size, flash_cmd.va,
50577@@ -1879,14 +3243,14 @@ int be_load_fw(struct be_adapter *adapter, u8 *func)
50578 goto fw_exit;
50579 }
50580
50581- dev_info(&adapter->pdev->dev, "Firmware flashed succesfully\n");
50582+ dev_info(&adapter->pdev->dev, "Firmware flashed successfully\n");
50583
50584 fw_exit:
50585 release_firmware(fw);
50586 return status;
50587 }
50588
50589-static struct net_device_ops be_netdev_ops = {
50590+static net_device_ops_no_const be_netdev_ops = {
50591 .ndo_open = be_open,
50592 .ndo_stop = be_close,
50593 .ndo_start_xmit = be_xmit,
50594@@ -1898,15 +3262,32 @@ static struct net_device_ops be_netdev_ops = {
50595 .ndo_vlan_rx_register = be_vlan_register,
50596 .ndo_vlan_rx_add_vid = be_vlan_add_vid,
50597 .ndo_vlan_rx_kill_vid = be_vlan_rem_vid,
50598+#ifdef HAVE_SRIOV_CONFIG
50599+ .ndo_set_vf_mac = be_set_vf_mac,
50600+ .ndo_set_vf_vlan = be_set_vf_vlan,
50601+ .ndo_set_vf_tx_rate = be_set_vf_tx_rate,
50602+ .ndo_get_vf_config = be_get_vf_config,
50603+#endif
50604+ .ndo_do_ioctl = be_do_ioctl,
50605+#ifdef CONFIG_NET_POLL_CONTROLLER
50606+ .ndo_poll_controller = be_netpoll,
50607+#endif
50608 };
50609
50610-static void be_netdev_init(struct net_device *netdev)
50611+static int be_netdev_init(struct net_device *netdev)
50612 {
50613 struct be_adapter *adapter = netdev_priv(netdev);
50614+ struct be_rx_obj *rxo;
50615+ int i, status = 0;
50616
50617 netdev->features |= NETIF_F_SG | NETIF_F_HW_VLAN_RX | NETIF_F_TSO |
50618- NETIF_F_HW_VLAN_TX | NETIF_F_HW_VLAN_FILTER | NETIF_F_HW_CSUM |
50619- NETIF_F_GRO;
50620+ NETIF_F_HW_VLAN_TX | NETIF_F_HW_CSUM | NETIF_F_TSO6;
50621+
50622+ netdev->vlan_features |= NETIF_F_SG | NETIF_F_TSO | NETIF_F_TSO6 |
50623+ NETIF_F_HW_CSUM;
50624+
50625+ netdev->features |= NETIF_F_VLAN_SG | NETIF_F_VLAN_TSO |
50626+ NETIF_F_VLAN_CSUM;
50627
50628 netdev->flags |= IFF_MULTICAST;
50629
50630@@ -1918,17 +3299,30 @@ static void be_netdev_init(struct net_device *netdev)
50631
50632 netif_set_gso_max_size(netdev, 65535);
50633
50634+ if (adapter->flags & BE_FLAGS_DCBX)
50635+ be_netdev_ops.ndo_select_queue = be_select_queue;
50636 BE_SET_NETDEV_OPS(netdev, &be_netdev_ops);
50637-
50638+
50639 SET_ETHTOOL_OPS(netdev, &be_ethtool_ops);
50640
50641- netif_napi_add(netdev, &adapter->rx_eq.napi, be_poll_rx,
50642- BE_NAPI_WEIGHT);
50643- netif_napi_add(netdev, &adapter->tx_eq.napi, be_poll_tx_mcc,
50644+ be_lro_init(adapter, netdev);
50645+
50646+ for_all_rx_queues(adapter, rxo, i) {
50647+ status = be_netif_napi_add(netdev, &rxo->rx_eq.napi, be_poll_rx,
50648+ BE_NAPI_WEIGHT);
50649+ if (status) {
50650+ dev_err(&adapter->pdev->dev, "dummy netdev alloc fail"
50651+ "for rxo:%d\n", i);
50652+ return status;
50653+ }
50654+ }
50655+ status = be_netif_napi_add(netdev, &adapter->tx_eq.napi, be_poll_tx_mcc,
50656 BE_NAPI_WEIGHT);
50657+ if (status)
50658+ dev_err(&adapter->pdev->dev, "dummy netdev alloc fail"
50659+ "for tx\n");
50660
50661- netif_carrier_off(netdev);
50662- netif_stop_queue(netdev);
50663+ return status;
50664 }
50665
50666 static void be_unmap_pci_bars(struct be_adapter *adapter)
50667@@ -1937,37 +3331,62 @@ static void be_unmap_pci_bars(struct be_adapter *adapter)
50668 iounmap(adapter->csr);
50669 if (adapter->db)
50670 iounmap(adapter->db);
50671- if (adapter->pcicfg)
50672+ if (adapter->pcicfg && be_physfn(adapter))
50673 iounmap(adapter->pcicfg);
50674 }
50675
50676 static int be_map_pci_bars(struct be_adapter *adapter)
50677 {
50678+ struct pci_dev *pdev = adapter->pdev;
50679 u8 __iomem *addr;
50680- int pcicfg_reg;
50681+ int pcicfg_reg, db_reg;
50682
50683- addr = ioremap_nocache(pci_resource_start(adapter->pdev, 2),
50684- pci_resource_len(adapter->pdev, 2));
50685- if (addr == NULL)
50686- return -ENOMEM;
50687- adapter->csr = addr;
50688+ if (lancer_chip(adapter)) {
50689+ addr = ioremap_nocache(pci_resource_start(pdev, 0),
50690+ pci_resource_len(adapter->pdev, 0));
50691+ if (addr == NULL)
50692+ return -ENOMEM;
50693+ adapter->db = addr;
50694+ return 0;
50695+ }
50696
50697- addr = ioremap_nocache(pci_resource_start(adapter->pdev, 4),
50698- 128 * 1024);
50699- if (addr == NULL)
50700- goto pci_map_err;
50701- adapter->db = addr;
50702+ if (be_physfn(adapter)) {
50703+ addr = ioremap_nocache(pci_resource_start(pdev, 2),
50704+ pci_resource_len(pdev, 2));
50705+ if (addr == NULL)
50706+ return -ENOMEM;
50707+ adapter->csr = addr;
50708+ adapter->netdev->mem_start = pci_resource_start(pdev, 2);
50709+ adapter->netdev->mem_end = pci_resource_start(pdev, 2) +
50710+ pci_resource_len(pdev, 2);
50711+ }
50712
50713- if (adapter->generation == BE_GEN2)
50714+ if (adapter->generation == BE_GEN2) {
50715 pcicfg_reg = 1;
50716- else
50717+ db_reg = 4;
50718+ } else {
50719 pcicfg_reg = 0;
50720+ if (be_physfn(adapter))
50721+ db_reg = 4;
50722+ else
50723+ db_reg = 0;
50724+ }
50725
50726- addr = ioremap_nocache(pci_resource_start(adapter->pdev, pcicfg_reg),
50727- pci_resource_len(adapter->pdev, pcicfg_reg));
50728+ addr = ioremap_nocache(pci_resource_start(pdev, db_reg),
50729+ pci_resource_len(pdev, db_reg));
50730 if (addr == NULL)
50731 goto pci_map_err;
50732- adapter->pcicfg = addr;
50733+ adapter->db = addr;
50734+
50735+ if (be_physfn(adapter)) {
50736+ addr = ioremap_nocache(
50737+ pci_resource_start(pdev, pcicfg_reg),
50738+ pci_resource_len(pdev, pcicfg_reg));
50739+ if (addr == NULL)
50740+ goto pci_map_err;
50741+ adapter->pcicfg = addr;
50742+ } else
50743+ adapter->pcicfg = adapter->db + SRIOV_VF_PCICFG_OFFSET;
50744
50745 return 0;
50746 pci_map_err:
50747@@ -1985,40 +3404,69 @@ static void be_ctrl_cleanup(struct be_adapter *adapter)
50748 if (mem->va)
50749 pci_free_consistent(adapter->pdev, mem->size,
50750 mem->va, mem->dma);
50751+
50752+ mem = &adapter->rx_filter;
50753+ if (mem->va)
50754+ pci_free_consistent(adapter->pdev, mem->size,
50755+ mem->va, mem->dma);
50756 }
50757
50758 static int be_ctrl_init(struct be_adapter *adapter)
50759 {
50760 struct be_dma_mem *mbox_mem_alloc = &adapter->mbox_mem_alloced;
50761 struct be_dma_mem *mbox_mem_align = &adapter->mbox_mem;
50762+ struct be_dma_mem *rx_filter = &adapter->rx_filter;
50763 int status;
50764
50765 status = be_map_pci_bars(adapter);
50766 if (status)
50767- return status;
50768+ goto done;
50769
50770 mbox_mem_alloc->size = sizeof(struct be_mcc_mailbox) + 16;
50771 mbox_mem_alloc->va = pci_alloc_consistent(adapter->pdev,
50772 mbox_mem_alloc->size, &mbox_mem_alloc->dma);
50773 if (!mbox_mem_alloc->va) {
50774- be_unmap_pci_bars(adapter);
50775- return -1;
50776+ status = -ENOMEM;
50777+ goto unmap_pci_bars;
50778 }
50779+
50780 mbox_mem_align->size = sizeof(struct be_mcc_mailbox);
50781 mbox_mem_align->va = PTR_ALIGN(mbox_mem_alloc->va, 16);
50782 mbox_mem_align->dma = PTR_ALIGN(mbox_mem_alloc->dma, 16);
50783 memset(mbox_mem_align->va, 0, sizeof(struct be_mcc_mailbox));
50784- spin_lock_init(&adapter->mbox_lock);
50785+
50786+ rx_filter->size = sizeof(struct be_cmd_req_rx_filter);
50787+ rx_filter->va = pci_alloc_consistent(adapter->pdev, rx_filter->size,
50788+ &rx_filter->dma);
50789+ if (rx_filter->va == NULL) {
50790+ status = -ENOMEM;
50791+ goto free_mbox;
50792+ }
50793+ memset(rx_filter->va, 0, rx_filter->size);
50794+
50795+ mutex_init(&adapter->mbox_lock);
50796 spin_lock_init(&adapter->mcc_lock);
50797 spin_lock_init(&adapter->mcc_cq_lock);
50798
50799+ init_completion(&adapter->flash_compl);
50800+
50801+ PCI_SAVE_STATE(adapter->pdev);
50802 return 0;
50803+
50804+free_mbox:
50805+ pci_free_consistent(adapter->pdev, mbox_mem_alloc->size,
50806+ mbox_mem_alloc->va, mbox_mem_alloc->dma);
50807+
50808+unmap_pci_bars:
50809+ be_unmap_pci_bars(adapter);
50810+
50811+done:
50812+ return status;
50813 }
50814
50815 static void be_stats_cleanup(struct be_adapter *adapter)
50816 {
50817- struct be_stats_obj *stats = &adapter->stats;
50818- struct be_dma_mem *cmd = &stats->cmd;
50819+ struct be_dma_mem *cmd = &adapter->stats_cmd;
50820
50821 if (cmd->va)
50822 pci_free_consistent(adapter->pdev, cmd->size,
50823@@ -2027,10 +3475,12 @@ static void be_stats_cleanup(struct be_adapter *adapter)
50824
50825 static int be_stats_init(struct be_adapter *adapter)
50826 {
50827- struct be_stats_obj *stats = &adapter->stats;
50828- struct be_dma_mem *cmd = &stats->cmd;
50829+ struct be_dma_mem *cmd = &adapter->stats_cmd;
50830
50831- cmd->size = sizeof(struct be_cmd_req_get_stats);
50832+ if (adapter->generation == BE_GEN2)
50833+ cmd->size = sizeof(struct be_cmd_req_get_stats_v0);
50834+ else
50835+ cmd->size = sizeof(struct be_cmd_req_get_stats_v1);
50836 cmd->va = pci_alloc_consistent(adapter->pdev, cmd->size, &cmd->dma);
50837 if (cmd->va == NULL)
50838 return -1;
50839@@ -2041,9 +3491,17 @@ static int be_stats_init(struct be_adapter *adapter)
50840 static void __devexit be_remove(struct pci_dev *pdev)
50841 {
50842 struct be_adapter *adapter = pci_get_drvdata(pdev);
50843+
50844 if (!adapter)
50845 return;
50846
50847+ cancel_delayed_work_sync(&adapter->work);
50848+
50849+#ifdef CONFIG_PALAU
50850+ be_sysfs_remove_group(adapter);
50851+#endif
50852+
50853+ /* be_close() gets called if the device is open by unregister */
50854 unregister_netdev(adapter->netdev);
50855
50856 be_clear(adapter);
50857@@ -2052,36 +3510,203 @@ static void __devexit be_remove(struct pci_dev *pdev)
50858
50859 be_ctrl_cleanup(adapter);
50860
50861- if (adapter->msix_enabled) {
50862- pci_disable_msix(adapter->pdev);
50863- adapter->msix_enabled = false;
50864- }
50865+ kfree(adapter->vf_cfg);
50866+ be_sriov_disable(adapter);
50867+
50868+ be_msix_disable(adapter);
50869
50870 pci_set_drvdata(pdev, NULL);
50871 pci_release_regions(pdev);
50872 pci_disable_device(pdev);
50873-
50874+ be_netif_napi_del(adapter->netdev);
50875 free_netdev(adapter->netdev);
50876 }
50877
50878-static int be_hw_up(struct be_adapter *adapter)
50879+static void be_pcie_slot_check(struct be_adapter *adapter)
50880+{
50881+ u32 curr, max, width, max_wd, speed, max_sp;
50882+
50883+ pci_read_config_dword(adapter->pdev, PCICFG_PCIE_LINK_STATUS_OFFSET,
50884+ &curr);
50885+ width = (curr >> PCIE_LINK_STATUS_NEG_WIDTH_SHIFT) &
50886+ PCIE_LINK_STATUS_NEG_WIDTH_MASK;
50887+ speed = (curr >> PCIE_LINK_STATUS_SPEED_SHIFT) &
50888+ PCIE_LINK_STATUS_SPEED_MASK;
50889+
50890+ pci_read_config_dword(adapter->pdev, PCICFG_PCIE_LINK_CAP_OFFSET,
50891+ &max);
50892+ max_wd = (max >> PCIE_LINK_CAP_MAX_WIDTH_SHIFT) &
50893+ PCIE_LINK_CAP_MAX_WIDTH_MASK;
50894+ max_sp = (max >> PCIE_LINK_CAP_MAX_SPEED_SHIFT) &
50895+ PCIE_LINK_CAP_MAX_SPEED_MASK;
50896+
50897+ if (width < max_wd || speed < max_sp)
50898+ dev_warn(&adapter->pdev->dev,
50899+ "Found network device in a Gen%s x%d PCIe slot. It "
50900+ "should be in a Gen2 x%d slot for best performance\n",
50901+ speed < max_sp ? "1" : "2", width, max_wd);
50902+}
50903+
50904+static int be_get_ioctl_version(char *fw_version) {
50905+ char *str[4];
50906+ int i;
50907+ int val[4];
50908+ char *endptr;
50909+
50910+ if(!fw_version)
50911+ return 0;
50912+ for(i=0; i<3; i++) {
50913+ str[i] = strsep(&fw_version, ".");
50914+ val[i] = simple_strtol(str[i], &endptr, 10);
50915+ }
50916+
50917+ if (val[0]>4 || (val[0]>3 && val[2]>143))
50918+ return 1;
50919+ return 0;
50920+}
50921+
50922+static int be_get_port_names(struct be_adapter *adapter)
50923 {
50924 int status;
50925+ int ver;
50926
50927- status = be_cmd_POST(adapter);
50928+ status = be_cmd_get_fw_ver(adapter,
50929+ adapter->fw_ver, NULL);
50930 if (status)
50931 return status;
50932+ ver = be_get_ioctl_version(adapter->fw_ver);
50933+ if (ver && (adapter->generation == BE_GEN3))
50934+ status = be_cmd_query_port_names_v1(adapter,
50935+ adapter->port_name);
50936+ else
50937+ status = be_cmd_query_port_names_v0(adapter,
50938+ adapter->port_name);
50939+ return status;
50940+}
50941
50942- status = be_cmd_reset_function(adapter);
50943+static int be_get_config(struct be_adapter *adapter)
50944+{
50945+ int status;
50946+ u8 mac[ETH_ALEN];
50947+
50948+ status = be_cmd_query_fw_cfg(adapter, &adapter->port_num,
50949+ &adapter->function_mode,
50950+ &adapter->function_caps);
50951 if (status)
50952 return status;
50953
50954- status = be_cmd_get_fw_ver(adapter, adapter->fw_ver);
50955+ status = be_cmd_get_cntl_attributes(adapter);
50956 if (status)
50957 return status;
50958
50959- status = be_cmd_query_fw_cfg(adapter,
50960- &adapter->port_num, &adapter->cap);
50961+ memset(mac, 0, ETH_ALEN);
50962+ be_pcie_slot_check(adapter);
50963+
50964+ if (be_physfn(adapter)) {
50965+ status = be_cmd_mac_addr_query(adapter, mac,
50966+ MAC_ADDRESS_TYPE_NETWORK, true /*permanent */, 0);
50967+
50968+ if (status)
50969+ return status;
50970+
50971+ if (!is_valid_ether_addr(mac))
50972+ return -EADDRNOTAVAIL;
50973+
50974+ memcpy(adapter->netdev->dev_addr, mac, ETH_ALEN);
50975+ memcpy(adapter->netdev->perm_addr, mac, ETH_ALEN);
50976+ }
50977+
50978+ if (adapter->function_mode & FLEX10_MODE)
50979+ adapter->max_vlans = BE_NUM_VLANS_SUPPORTED/8;
50980+ else
50981+ adapter->max_vlans = BE_NUM_VLANS_SUPPORTED;
50982+
50983+ return 0;
50984+}
50985+
50986+static int be_dev_family_check(struct be_adapter *adapter)
50987+{
50988+ struct pci_dev *pdev = adapter->pdev;
50989+ u32 sli_intf = 0, if_type;
50990+
50991+ switch (pdev->device) {
50992+ case BE_DEVICE_ID1:
50993+ case OC_DEVICE_ID1:
50994+ adapter->generation = BE_GEN2;
50995+ break;
50996+ case BE_DEVICE_ID2:
50997+ case OC_DEVICE_ID2:
50998+ adapter->generation = BE_GEN3;
50999+ break;
51000+ case OC_DEVICE_ID3:
51001+ pci_read_config_dword(pdev, SLI_INTF_REG_OFFSET, &sli_intf);
51002+ if_type = (sli_intf & SLI_INTF_IF_TYPE_MASK) >>
51003+ SLI_INTF_IF_TYPE_SHIFT;
51004+
51005+ if (((sli_intf & SLI_INTF_VALID_MASK) != SLI_INTF_VALID) ||
51006+ if_type != 0x02) {
51007+ dev_err(&pdev->dev, "SLI_INTF reg val is not valid\n");
51008+ return -EINVAL;
51009+ }
51010+ if (num_vfs > 0) {
51011+ dev_err(&pdev->dev, "VFs not supported\n");
51012+ return -EINVAL;
51013+ }
51014+ adapter->sli_family = ((sli_intf & SLI_INTF_FAMILY_MASK) >>
51015+ SLI_INTF_FAMILY_SHIFT);
51016+ adapter->generation = BE_GEN3;
51017+ break;
51018+ default:
51019+ adapter->generation = 0;
51020+ }
51021+ return 0;
51022+}
51023+
51024+static int lancer_wait_ready(struct be_adapter *adapter)
51025+{
51026+#define SLIPORT_READY_TIMEOUT 500
51027+ u32 sliport_status;
51028+ int status = 0, i;
51029+
51030+ for (i = 0; i < SLIPORT_READY_TIMEOUT; i++) {
51031+ sliport_status = ioread32(adapter->db + SLIPORT_STATUS_OFFSET);
51032+ if (sliport_status & SLIPORT_STATUS_RDY_MASK)
51033+ break;
51034+
51035+ msleep(20);
51036+ }
51037+
51038+ if (i == SLIPORT_READY_TIMEOUT)
51039+ status = -1;
51040+
51041+ return status;
51042+}
51043+
51044+static int lancer_test_and_set_rdy_state(struct be_adapter *adapter)
51045+{
51046+ int status;
51047+ u32 sliport_status, err, reset_needed;
51048+ status = lancer_wait_ready(adapter);
51049+ if (!status) {
51050+ sliport_status = ioread32(adapter->db + SLIPORT_STATUS_OFFSET);
51051+ err = sliport_status & SLIPORT_STATUS_ERR_MASK;
51052+ reset_needed = sliport_status & SLIPORT_STATUS_RN_MASK;
51053+ if (err && reset_needed) {
51054+ iowrite32(SLI_PORT_CONTROL_IP_MASK,
51055+ adapter->db + SLIPORT_CONTROL_OFFSET);
51056+
51057+ /* check adapter has corrected the error */
51058+ status = lancer_wait_ready(adapter);
51059+ sliport_status = ioread32(adapter->db +
51060+ SLIPORT_STATUS_OFFSET);
51061+ sliport_status &= (SLIPORT_STATUS_ERR_MASK |
51062+ SLIPORT_STATUS_RN_MASK);
51063+ if (status || sliport_status)
51064+ status = -1;
51065+ } else if (err || reset_needed) {
51066+ status = -1;
51067+ }
51068+ }
51069 return status;
51070 }
51071
51072@@ -2091,7 +3716,7 @@ static int __devinit be_probe(struct pci_dev *pdev,
51073 int status = 0;
51074 struct be_adapter *adapter;
51075 struct net_device *netdev;
51076- u8 mac[ETH_ALEN];
51077+ u32 en;
51078
51079 status = pci_enable_device(pdev);
51080 if (status)
51081@@ -2102,31 +3727,22 @@ static int __devinit be_probe(struct pci_dev *pdev,
51082 goto disable_dev;
51083 pci_set_master(pdev);
51084
51085- netdev = alloc_etherdev(sizeof(struct be_adapter));
51086+ netdev = alloc_etherdev_mq(sizeof(struct be_adapter), MAX_TX_QS);
51087 if (netdev == NULL) {
51088 status = -ENOMEM;
51089 goto rel_reg;
51090 }
51091 adapter = netdev_priv(netdev);
51092
51093- switch (pdev->device) {
51094- case BE_DEVICE_ID1:
51095- case OC_DEVICE_ID1:
51096- adapter->generation = BE_GEN2;
51097- break;
51098- case BE_DEVICE_ID2:
51099- case OC_DEVICE_ID2:
51100- adapter->generation = BE_GEN3;
51101- break;
51102- default:
51103- adapter->generation = 0;
51104- }
51105-
51106 adapter->pdev = pdev;
51107+
51108+ status = be_dev_family_check(adapter);
51109+ if (status)
51110+ goto free_netdev;
51111+
51112 pci_set_drvdata(pdev, adapter);
51113 adapter->netdev = netdev;
51114-
51115- be_msix_enable(adapter);
51116+ SET_NETDEV_DEV(netdev, &pdev->dev);
51117
51118 status = pci_set_dma_mask(pdev, DMA_BIT_MASK(64));
51119 if (!status) {
51120@@ -2139,46 +3755,150 @@ static int __devinit be_probe(struct pci_dev *pdev,
51121 }
51122 }
51123
51124+ be_sriov_enable(adapter);
51125+ if (adapter->num_vfs > 0) {
51126+ adapter->vf_cfg = kcalloc(adapter->num_vfs,
51127+ sizeof(struct be_vf_cfg), GFP_KERNEL);
51128+
51129+ if (!adapter->vf_cfg)
51130+ goto free_netdev;
51131+ }
51132+
51133 status = be_ctrl_init(adapter);
51134 if (status)
51135- goto free_netdev;
51136+ goto free_vf_cfg;
51137+
51138+ if (lancer_chip(adapter)) {
51139+ status = lancer_test_and_set_rdy_state(adapter);
51140+ if (status) {
51141+ dev_err(&pdev->dev, "Adapter in non recoverable error\n");
51142+ goto ctrl_clean;
51143+ }
51144+ }
51145+
51146+ /* sync up with fw's ready state */
51147+ if (be_physfn(adapter)) {
51148+ status = be_cmd_POST(adapter);
51149+ if (status)
51150+ goto ctrl_clean;
51151+ }
51152+
51153+ /* tell fw we're ready to fire cmds */
51154+ status = be_cmd_fw_init(adapter);
51155+ if (status)
51156+ goto ctrl_clean;
51157+
51158+ status = be_cmd_reset_function(adapter);
51159+ if (status)
51160+ goto ctrl_clean;
51161
51162 status = be_stats_init(adapter);
51163 if (status)
51164 goto ctrl_clean;
51165
51166- status = be_hw_up(adapter);
51167+ status = be_get_config(adapter);
51168 if (status)
51169 goto stats_clean;
51170
51171- status = be_cmd_mac_addr_query(adapter, mac, MAC_ADDRESS_TYPE_NETWORK,
51172- true /* permanent */, 0);
51173- if (status)
51174- goto stats_clean;
51175- memcpy(netdev->dev_addr, mac, ETH_ALEN);
51176+ /* This bit is zero in normal boot case, but in crash kernel case this
51177+ is not cleared. clear this bit here, until we are ready with the irqs
51178+ i.e in be_open call.*/
51179+ if (!lancer_chip(adapter))
51180+ be_intr_set(adapter, false);
51181+
51182+ if (msix)
51183+ be_msix_enable(adapter);
51184
51185 INIT_DELAYED_WORK(&adapter->work, be_worker);
51186- be_netdev_init(netdev);
51187- SET_NETDEV_DEV(netdev, &adapter->pdev->dev);
51188
51189 status = be_setup(adapter);
51190 if (status)
51191- goto stats_clean;
51192+ goto msix_disable;
51193+
51194+ /* Initilize the link status to -1 */
51195+ adapter->link_status = -1;
51196+
51197+ status = be_netdev_init(netdev);
51198+ if (status)
51199+ goto unsetup;
51200+
51201 status = register_netdev(netdev);
51202 if (status != 0)
51203 goto unsetup;
51204
51205- dev_info(&pdev->dev, "%s port %d\n", nic_name(pdev), adapter->port_num);
51206+ be_cmd_get_fw_ver(adapter, adapter->fw_ver, NULL);
51207+
51208+ if (be_physfn(adapter) && adapter->num_vfs) {
51209+ u8 mac_speed;
51210+ int link_status;
51211+ u16 def_vlan, vf, lnk_speed;
51212+
51213+ status = be_vf_eth_addr_config(adapter);
51214+ if (status)
51215+ goto unreg_netdev;
51216+
51217+ for (vf = 0; vf < adapter->num_vfs; vf++) {
51218+ status = be_cmd_get_hsw_config(adapter, &def_vlan,
51219+ vf + 1, adapter->vf_cfg[vf].vf_if_handle);
51220+ if (!status)
51221+ adapter->vf_cfg[vf].vf_def_vid = def_vlan;
51222+ else
51223+ goto unreg_netdev;
51224+
51225+ status = be_cmd_link_status_query(adapter, &link_status,
51226+ &mac_speed, &lnk_speed, vf + 1);
51227+ if (!status)
51228+ adapter->vf_cfg[vf].vf_tx_rate = lnk_speed * 10;
51229+ else
51230+ goto unreg_netdev;
51231+ }
51232+ }
51233+ if (be_physfn(adapter)) {
51234+ /* Temp fix ofr bug# 23034. Till ARM
51235+ * f/w fixes privilege lvl */
51236+ be_get_port_names(adapter);
51237+ }
51238+
51239+ /* Enable Vlan capability based on privileges.
51240+ * PF will have Vlan capability anyway. */
51241+ be_cmd_get_fn_privileges(adapter, &en, 0);
51242+
51243+ if ((en & (BE_PRIV_FILTMGMT | BE_PRIV_VHADM | BE_PRIV_DEVCFG)) ||
51244+ be_physfn(adapter))
51245+ netdev->features |= NETIF_F_HW_VLAN_FILTER;
51246+ else
51247+ netdev->features |= NETIF_F_VLAN_CHALLENGED;
51248+
51249+ dev_info(&pdev->dev, "%s: numa node %d\n", netdev->name,
51250+ dev_to_node(&pdev->dev));
51251+ dev_info(&pdev->dev, "%s %s \"%s\" port %d\n", nic_name(pdev),
51252+ (adapter->port_num > 1 ? "1Gbps NIC" : "10Gbps NIC"),
51253+ adapter->model_number, adapter->hba_port_num);
51254+
51255+
51256+#ifdef CONFIG_PALAU
51257+ be_sysfs_create_group(adapter);
51258+#endif
51259+ schedule_delayed_work(&adapter->work, msecs_to_jiffies(100));
51260 return 0;
51261
51262+unreg_netdev:
51263+ unregister_netdev(netdev);
51264 unsetup:
51265 be_clear(adapter);
51266+msix_disable:
51267+ be_msix_disable(adapter);
51268 stats_clean:
51269 be_stats_cleanup(adapter);
51270 ctrl_clean:
51271 be_ctrl_cleanup(adapter);
51272+free_vf_cfg:
51273+ kfree(adapter->vf_cfg);
51274 free_netdev:
51275- free_netdev(adapter->netdev);
51276+ be_sriov_disable(adapter);
51277+ be_netif_napi_del(netdev);
51278+ free_netdev(netdev);
51279+ pci_set_drvdata(pdev, NULL);
51280 rel_reg:
51281 pci_release_regions(pdev);
51282 disable_dev:
51283@@ -2193,6 +3913,10 @@ static int be_suspend(struct pci_dev *pdev, pm_message_t state)
51284 struct be_adapter *adapter = pci_get_drvdata(pdev);
51285 struct net_device *netdev = adapter->netdev;
51286
51287+ cancel_delayed_work_sync(&adapter->work);
51288+ if (adapter->wol)
51289+ be_setup_wol(adapter, true);
51290+
51291 netif_device_detach(netdev);
51292 if (netif_running(netdev)) {
51293 rtnl_lock();
51294@@ -2202,6 +3926,7 @@ static int be_suspend(struct pci_dev *pdev, pm_message_t state)
51295 be_cmd_get_flow_control(adapter, &adapter->tx_fc, &adapter->rx_fc);
51296 be_clear(adapter);
51297
51298+ be_msix_disable(adapter);
51299 pci_save_state(pdev);
51300 pci_disable_device(pdev);
51301 pci_set_power_state(pdev, pci_choose_state(pdev, state));
51302@@ -2223,6 +3948,12 @@ static int be_resume(struct pci_dev *pdev)
51303 pci_set_power_state(pdev, 0);
51304 pci_restore_state(pdev);
51305
51306+ be_msix_enable(adapter);
51307+ /* tell fw we're ready to fire cmds */
51308+ status = be_cmd_fw_init(adapter);
51309+ if (status)
51310+ return status;
51311+
51312 be_setup(adapter);
51313 if (netif_running(netdev)) {
51314 rtnl_lock();
51315@@ -2230,28 +3961,152 @@ static int be_resume(struct pci_dev *pdev)
51316 rtnl_unlock();
51317 }
51318 netif_device_attach(netdev);
51319+
51320+ if (adapter->wol)
51321+ be_setup_wol(adapter, false);
51322+
51323+ schedule_delayed_work(&adapter->work, msecs_to_jiffies(100));
51324 return 0;
51325 }
51326
51327+/*
51328+ * An FLR will stop BE from DMAing any data.
51329+ */
51330+static void be_shutdown(struct pci_dev *pdev)
51331+{
51332+ struct be_adapter *adapter = pci_get_drvdata(pdev);
51333+
51334+ if (!adapter)
51335+ return;
51336+
51337+ cancel_delayed_work_sync(&adapter->work);
51338+
51339+ netif_device_detach(adapter->netdev);
51340+
51341+ if (adapter->wol)
51342+ be_setup_wol(adapter, true);
51343+
51344+ be_cmd_reset_function(adapter);
51345+
51346+ pci_disable_device(pdev);
51347+}
51348+
51349+static pci_ers_result_t be_eeh_err_detected(struct pci_dev *pdev,
51350+ pci_channel_state_t state)
51351+{
51352+ struct be_adapter *adapter = pci_get_drvdata(pdev);
51353+ struct net_device *netdev = adapter->netdev;
51354+
51355+ dev_err(&adapter->pdev->dev, "EEH error detected\n");
51356+
51357+ adapter->eeh_err = true;
51358+
51359+ netif_device_detach(netdev);
51360+
51361+ if (netif_running(netdev)) {
51362+ rtnl_lock();
51363+ be_close(netdev);
51364+ rtnl_unlock();
51365+ }
51366+ be_clear(adapter);
51367+
51368+ if (state == pci_channel_io_perm_failure)
51369+ return PCI_ERS_RESULT_DISCONNECT;
51370+
51371+ pci_disable_device(pdev);
51372+
51373+ return PCI_ERS_RESULT_NEED_RESET;
51374+}
51375+
51376+static pci_ers_result_t be_eeh_reset(struct pci_dev *pdev)
51377+{
51378+ struct be_adapter *adapter = pci_get_drvdata(pdev);
51379+ int status;
51380+
51381+ dev_info(&adapter->pdev->dev, "EEH reset\n");
51382+ adapter->eeh_err = false;
51383+
51384+ status = pci_enable_device(pdev);
51385+ if (status)
51386+ return PCI_ERS_RESULT_DISCONNECT;
51387+
51388+ pci_set_master(pdev);
51389+ pci_set_power_state(pdev, 0);
51390+ pci_restore_state(pdev);
51391+
51392+ /* Check if card is ok and fw is ready */
51393+ status = be_cmd_POST(adapter);
51394+ if (status)
51395+ return PCI_ERS_RESULT_DISCONNECT;
51396+
51397+ return PCI_ERS_RESULT_RECOVERED;
51398+}
51399+
51400+static void be_eeh_resume(struct pci_dev *pdev)
51401+{
51402+ int status = 0;
51403+ struct be_adapter *adapter = pci_get_drvdata(pdev);
51404+ struct net_device *netdev = adapter->netdev;
51405+
51406+ dev_info(&adapter->pdev->dev, "EEH resume\n");
51407+
51408+ pci_save_state(pdev);
51409+
51410+ /* tell fw we're ready to fire cmds */
51411+ status = be_cmd_fw_init(adapter);
51412+ if (status)
51413+ goto err;
51414+
51415+ status = be_setup(adapter);
51416+ if (status)
51417+ goto err;
51418+
51419+ if (netif_running(netdev)) {
51420+ status = be_open(netdev);
51421+ if (status)
51422+ goto err;
51423+ }
51424+ netif_device_attach(netdev);
51425+ return;
51426+err:
51427+ dev_err(&adapter->pdev->dev, "EEH resume failed\n");
51428+ return;
51429+}
51430+
51431+static struct pci_error_handlers be_eeh_handlers = {
51432+ .error_detected = be_eeh_err_detected,
51433+ .slot_reset = be_eeh_reset,
51434+ .resume = be_eeh_resume,
51435+};
51436+
51437 static struct pci_driver be_driver = {
51438 .name = DRV_NAME,
51439 .id_table = be_dev_ids,
51440 .probe = be_probe,
51441 .remove = be_remove,
51442 .suspend = be_suspend,
51443- .resume = be_resume
51444+ .resume = be_resume,
51445+ .shutdown = be_shutdown,
51446+ .err_handler = &be_eeh_handlers
51447 };
51448
51449 static int __init be_init_module(void)
51450 {
51451- if (rx_frag_size != 8192 && rx_frag_size != 4096
51452- && rx_frag_size != 2048) {
51453+ if (rx_frag_size != 8192 && rx_frag_size != 4096 &&
51454+ rx_frag_size != 2048) {
51455 printk(KERN_WARNING DRV_NAME
51456 " : Module param rx_frag_size must be 2048/4096/8192."
51457 " Using 2048\n");
51458 rx_frag_size = 2048;
51459 }
51460
51461+ if (!msix && num_vfs > 0) {
51462+ printk(KERN_WARNING DRV_NAME
51463+ " : MSIx required for num_vfs > 0. Ignoring msix=0\n");
51464+ msix = 1;
51465+ }
51466+
51467+
51468 return pci_register_driver(&be_driver);
51469 }
51470 module_init(be_init_module);
51471diff --git a/drivers/net/benet/be_misc.c b/drivers/net/benet/be_misc.c
51472new file mode 100644
51473index 0000000..4ab499f
51474--- /dev/null
51475+++ b/drivers/net/benet/be_misc.c
51476@@ -0,0 +1,106 @@
51477+/*
51478+ * Copyright (C) 2005 - 2011 Emulex
51479+ * All rights reserved.
51480+ *
51481+ * This program is free software; you can redistribute it and/or
51482+ * modify it under the terms of the GNU General Public License version 2
51483+ * as published by the Free Software Foundation. The full GNU General
51484+ * Public License is included in this distribution in the file called COPYING.
51485+ *
51486+ * Contact Information:
51487+ * linux-drivers@emulex.com
51488+ *
51489+ * Emulex
51490+ * 3333 Susan Street
51491+ * Costa Mesa, CA 92626
51492+ */
51493+#include "be.h"
51494+
51495+#if LINUX_VERSION_CODE < KERNEL_VERSION(2, 6, 24)
51496+static ssize_t
51497+flash_fw_store(struct class_device *cd, const char *buf, size_t len)
51498+{
51499+ struct be_adapter *adapter =
51500+ netdev_priv(container_of(cd, struct net_device, class_dev));
51501+ char file_name[ETHTOOL_FLASH_MAX_FILENAME];
51502+ int status;
51503+
51504+ if (!capable(CAP_NET_ADMIN))
51505+ return -EPERM;
51506+
51507+ file_name[ETHTOOL_FLASH_MAX_FILENAME - 1] = 0;
51508+ strncpy(file_name, buf, (ETHTOOL_FLASH_MAX_FILENAME - 1));
51509+
51510+ /* Removing new-line char given by sysfs */
51511+ file_name[strlen(file_name) - 1] = '\0';
51512+
51513+ status = be_load_fw(adapter, file_name);
51514+ if (!status)
51515+ return len;
51516+ else
51517+ return status;
51518+}
51519+
51520+static CLASS_DEVICE_ATTR(flash_fw, S_IWUSR, NULL, flash_fw_store);
51521+
51522+static struct attribute *benet_attrs[] = {
51523+ &class_device_attr_flash_fw.attr,
51524+ NULL,
51525+};
51526+#else
51527+
51528+static ssize_t
51529+flash_fw_store(struct device *dev, struct device_attribute *attr,
51530+ const char *buf, size_t len)
51531+{
51532+ struct be_adapter *adapter =
51533+ netdev_priv(container_of(dev, struct net_device, dev));
51534+ char file_name[ETHTOOL_FLASH_MAX_FILENAME];
51535+ int status;
51536+
51537+ if (!capable(CAP_NET_ADMIN))
51538+ return -EPERM;
51539+
51540+ file_name[ETHTOOL_FLASH_MAX_FILENAME - 1] = 0;
51541+ strncpy(file_name, buf, (ETHTOOL_FLASH_MAX_FILENAME - 1));
51542+
51543+ /* Removing new-line char given by sysfs */
51544+ file_name[strlen(file_name) - 1] = '\0';
51545+
51546+ status = be_load_fw(adapter, file_name);
51547+ if (!status)
51548+ return len;
51549+ else
51550+ return status;
51551+}
51552+
51553+static DEVICE_ATTR(flash_fw, S_IWUSR, NULL, flash_fw_store);
51554+
51555+static struct attribute *benet_attrs[] = {
51556+ &dev_attr_flash_fw.attr,
51557+ NULL,
51558+};
51559+#endif
51560+
51561+#if LINUX_VERSION_CODE < KERNEL_VERSION(2, 6, 24)
51562+#define CLASS_DEV class_dev
51563+#else
51564+#define CLASS_DEV dev
51565+#endif
51566+
51567+static struct attribute_group benet_attr_group = {.attrs = benet_attrs };
51568+
51569+void be_sysfs_create_group(struct be_adapter *adapter)
51570+{
51571+ int status;
51572+
51573+ status = sysfs_create_group(&adapter->netdev->CLASS_DEV.kobj,
51574+ &benet_attr_group);
51575+ if (status)
51576+ dev_err(&adapter->pdev->dev, "Could not create sysfs group\n");
51577+}
51578+
51579+void be_sysfs_remove_group(struct be_adapter *adapter)
51580+{
51581+ sysfs_remove_group(&adapter->netdev->CLASS_DEV.kobj, &benet_attr_group);
51582+}
51583diff --git a/drivers/net/benet/be_proc.c b/drivers/net/benet/be_proc.c
51584new file mode 100644
51585index 0000000..0bfdb3b
51586--- /dev/null
51587+++ b/drivers/net/benet/be_proc.c
51588@@ -0,0 +1,513 @@
51589+/*
51590+ * Copyright (C) 2005 - 2011 ServerEngines
51591+ * All rights reserved.
51592+ *
51593+ * This program is free software; you can redistribute it and/or
51594+ * modify it under the terms of the GNU General Public License version 2
51595+ * as published by the Free Software Foundation. The full GNU General
51596+ * Public License is included in this distribution in the file called COPYING.
51597+ *
51598+ * Contact Information:
51599+ * linux-drivers@serverengines.com
51600+ *
51601+ * ServerEngines
51602+ * 209 N. Fair Oaks Ave
51603+ * Sunnyvale, CA 94085
51604+ */
51605+#include <linux/proc_fs.h>
51606+#include "be.h"
51607+
51608+char *be_adpt_name[] = {
51609+ "driver/be2net0",
51610+ "driver/be2net1",
51611+ "driver/be2net2",
51612+ "driver/be2net3",
51613+ "driver/be2net4",
51614+ "driver/be2net5",
51615+ "driver/be2net6",
51616+ "driver/be2net7"
51617+};
51618+
51619+#define MAX_BE_DEVICES 8
51620+struct proc_dir_entry *be_proc_dir[MAX_BE_DEVICES];
51621+
51622+/*File to read Eth Ring Information */
51623+#define BE_ETH_RING_FILE "eth_ring"
51624+#define BE_DRVR_STAT_FILE "drvr_stat"
51625+
51626+/*
51627+ * this file enables user to read a 32 bit CSR register.
51628+ * to read 32 bit value of a register at offset 0x1234,
51629+ * first write the offset 0x1234 (echo "0x1234") in
51630+ * the file and then read the value from this file.
51631+ * the written offset is latched until another value is written
51632+ */
51633+#define BE_CSR_R_FILE "csrr"
51634+/*
51635+ * this file enables user to write to a 32 bit CSR register.
51636+ * to write a value 0xdeadbeef to a register at offset 0x1234,
51637+ * write 0x1234 0xdeadbeef (echo "0x1234 0xdeadbeeb") to
51638+ * the file.
51639+ */
51640+#define BE_CSR_W_FILE "csrw"
51641+
51642+#define BE_PROC_MODE 0600
51643+
51644+static char read_eth_ring_buf[4096];
51645+static int read_eth_ring_count;
51646+
51647+/*
51648+ * Get Various Eth Ring Properties
51649+ */
51650+static int proc_eth_read_ring(char *page, char **start,
51651+ off_t off, int count, int *eof, void *data)
51652+{
51653+ int i, n;
51654+ char *p = read_eth_ring_buf;
51655+ struct be_adapter *adapter = (struct be_adapter *) data;
51656+
51657+ if (off == 0) {
51658+ /* Reset read_eth_ring_count */
51659+ read_eth_ring_count = 0;
51660+
51661+ n = sprintf(p, " PhyAddr VirtAddr Size TotalEntries ProducerIndex ConsumerIndex NumUsed\n");
51662+ p += n;
51663+ read_eth_ring_count += n;
51664+
51665+ n = sprintf(p, " ------- -------- ---- ------------ ------------- ------------- -------\n");
51666+ p += n;
51667+ read_eth_ring_count += n;
51668+
51669+ n = sprintf(p, "%s", "EthSendRing");
51670+ p += n;
51671+ read_eth_ring_count += n;
51672+
51673+ n = sprintf(p, " %7lx %8p %4u %12u %13u %13u %7u \n",
51674+ (long) adapter->tx_obj.q.dma_mem.dma,
51675+ (void *)adapter->tx_obj.q.dma_mem.va,
51676+ (u32) (adapter->tx_obj.q.len *
51677+ sizeof(struct be_eth_wrb)),
51678+ adapter->tx_obj.q.len, adapter->tx_obj.q.head,
51679+ adapter->tx_obj.q.tail,
51680+ atomic_read(&adapter->tx_obj.q.used));
51681+
51682+ p += n;
51683+ read_eth_ring_count += n;
51684+
51685+ /* Get Eth Send Compl Queue Details */
51686+ n = sprintf(p, "%s", "EthSendCmplRing");
51687+ p += n;
51688+ read_eth_ring_count += n;
51689+
51690+ n = sprintf(p, " %7lx %8p %4u %12u %13s %13u %7s\n",
51691+ (long)adapter->tx_obj.cq.dma_mem.dma,
51692+ (void *)adapter->tx_obj.cq.dma_mem.va,
51693+ (u32) (adapter->tx_obj.cq.len *
51694+ sizeof(struct be_eth_tx_compl)),
51695+ adapter->tx_obj.cq.len, "NA",
51696+ adapter->tx_obj.cq.tail, "NA");
51697+
51698+ p += n;
51699+ read_eth_ring_count += n;
51700+ /* Get Eth Rx Queue Details */
51701+ n = sprintf(p, "%s", "EthRxRing");
51702+ p += n;
51703+ read_eth_ring_count += n;
51704+
51705+ n = sprintf(p, " %7lx %8p %4u %12u %13u %13s %7u \n",
51706+ (long)adapter->rx_obj.q.dma_mem.dma,
51707+ (void *)adapter->rx_obj.q.dma_mem.va,
51708+ (u32) (adapter->rx_obj.q.len *
51709+ sizeof(struct be_eth_rx_d)),
51710+ adapter->rx_obj.q.len, adapter->rx_obj.q.head,"NA",
51711+ atomic_read(&adapter->rx_obj.q.used));
51712+ p += n;
51713+ read_eth_ring_count += n;
51714+
51715+ /* Get Eth Unicast Rx Compl Queue Details */
51716+ n = sprintf(p, "%s", "EthRxCmplRing");
51717+ p += n;
51718+ read_eth_ring_count += n;
51719+
51720+ n = sprintf(p, " %7lx %8p %4u %12u %13s %13u %7s\n",
51721+ (long)adapter->rx_obj.cq.dma_mem.dma,
51722+ (void *)adapter->rx_obj.cq.dma_mem.va,
51723+ (u32) (adapter->rx_obj.cq.len *
51724+ sizeof(struct be_eth_rx_compl)),
51725+ adapter->rx_obj.cq.len, "NA",
51726+ adapter->rx_obj.cq.tail, "NA");
51727+ p += n;
51728+ read_eth_ring_count += n;
51729+
51730+ /* Get Eth Event Queue Details */
51731+ n = sprintf(p, "%s", "EthTxEventRing");
51732+ p += n;
51733+ read_eth_ring_count += n;
51734+
51735+ n = sprintf(p,
51736+ " %7lx %8p %4u %12u %13s %13u %7s\n",
51737+ (long) adapter->tx_eq.q.dma_mem.dma,
51738+ (void *)adapter->tx_eq.q.dma_mem.va,
51739+ (u32) (adapter->tx_eq.q.len *
51740+ sizeof(struct be_eq_entry)),
51741+ adapter->tx_eq.q.len, "NA",
51742+ adapter->tx_eq.q.tail, "NA");
51743+
51744+ p += n;
51745+ read_eth_ring_count += n;
51746+
51747+ /* Get Eth Event Queue Details */
51748+ n = sprintf(p, "%s", "EthRxEventRing");
51749+ p += n;
51750+ read_eth_ring_count += n;
51751+
51752+ n = sprintf(p,
51753+ " %7lx %8p %4u %12u %13s %13u %7s\n",
51754+ (long) adapter->rx_eq.q.dma_mem.dma,
51755+ (void *)adapter->rx_eq.q.dma_mem.va,
51756+ (u32) (adapter->rx_eq.q.len *
51757+ sizeof(struct be_eq_entry)),
51758+ adapter->rx_eq.q.len, "NA",
51759+ adapter->rx_eq.q.tail, "NA");
51760+
51761+ p += n;
51762+ read_eth_ring_count += n;
51763+ }
51764+
51765+ *start = page;
51766+ /* copy whatever we can */
51767+ if (count < (read_eth_ring_count - off)) {
51768+ i = count;
51769+ *eof = 0; /* More bytes left */
51770+ } else {
51771+ i = read_eth_ring_count - off;
51772+ *eof = 1; /* Nothing left. indicate EOF */
51773+ }
51774+
51775+ memcpy(page, read_eth_ring_buf + off, i);
51776+ return (i);
51777+}
51778+
51779+static int proc_eth_write_ring(struct file *file,
51780+ const char *buffer, unsigned long count,
51781+ void *data)
51782+{
51783+ return (count); /* we do not support write */
51784+}
51785+
51786+/*
51787+ * read the driver stats.
51788+ */
51789+static int proc_read_drvr_stat(char *page, char **start,
51790+ off_t off, int count, int *eof, void *data)
51791+{
51792+ int n, lro_cp;
51793+ char *p = page;
51794+ struct be_adapter *adapter = (struct be_adapter *) data;
51795+ struct net_device *netdev = adapter->netdev;
51796+
51797+ if (off == 0) {
51798+ n = sprintf(p, "interface = %s\n", netdev->name);
51799+ p += n;
51800+ n = sprintf(p, "tx_reqs = %d\n",
51801+ drvr_stats(adapter)->be_tx_reqs);
51802+ p += n;
51803+ n = sprintf(p, "tx_stops = %d\n",
51804+ drvr_stats(adapter)->be_tx_stops);
51805+ p += n;
51806+ n = sprintf(p, "fwd_reqs = %d\n",
51807+ drvr_stats(adapter)->be_fwd_reqs);
51808+ p += n;
51809+ n = sprintf(p, "tx_wrbs = %d\n",
51810+ drvr_stats(adapter)->be_tx_wrbs);
51811+ p += n;
51812+ n = sprintf(p, "rx_poll = %d\n", drvr_stats(adapter)->be_rx_polls);
51813+ p += n;
51814+ n = sprintf(p, "tx_events = %d\n",
51815+ drvr_stats(adapter)->be_tx_events);
51816+ p += n;
51817+ n = sprintf(p, "rx_events = %d\n",
51818+ drvr_stats(adapter)->be_rx_events);
51819+ p += n;
51820+ n = sprintf(p, "tx_compl = %d\n",
51821+ drvr_stats(adapter)->be_tx_compl);
51822+ p += n;
51823+ n = sprintf(p, "rx_compl = %d\n",
51824+ drvr_stats(adapter)->be_rx_compl);
51825+ p += n;
51826+ n = sprintf(p, "ethrx_post_fail = %d\n",
51827+ drvr_stats(adapter)->be_ethrx_post_fail);
51828+ p += n;
51829+ n = sprintf(p, "802.3_dropped_frames = %d\n",
51830+ drvr_stats(adapter)->be_802_3_dropped_frames);
51831+ p += n;
51832+ n = sprintf(p, "802.3_malformed_frames = %d\n",
51833+ drvr_stats(adapter)->be_802_3_malformed_frames);
51834+ p += n;
51835+ n = sprintf(p, "eth_tx_rate = %d\n",
51836+ drvr_stats(adapter)->be_tx_rate);
51837+ p += n;
51838+ n = sprintf(p, "eth_rx_rate = %d\n",
51839+ drvr_stats(adapter)->be_rx_rate);
51840+ p += n;
51841+
51842+ lro_cp = (drvr_stats(adapter)->be_lro_hgram_data[0] +
51843+ drvr_stats(adapter)->be_lro_hgram_data[1] +
51844+ drvr_stats(adapter)->be_lro_hgram_data[2] +
51845+ drvr_stats(adapter)->be_lro_hgram_data[3] +
51846+ drvr_stats(adapter)->be_lro_hgram_data[4] +
51847+ drvr_stats(adapter)->be_lro_hgram_data[5] +
51848+ drvr_stats(adapter)->be_lro_hgram_data[6] +
51849+ drvr_stats(adapter)->be_lro_hgram_data[7])/100;
51850+ lro_cp = (lro_cp == 0) ? 1 : lro_cp; /* avoid divide by 0 */
51851+ n = sprintf(p,
51852+ "LRO data count %% histogram (1, 2-3, 4-5,..,>=16) = "
51853+ "%d, %d, %d, %d - %d, %d, %d, %d\n",
51854+ drvr_stats(adapter)->be_lro_hgram_data[0]/lro_cp,
51855+ drvr_stats(adapter)->be_lro_hgram_data[1]/lro_cp,
51856+ drvr_stats(adapter)->be_lro_hgram_data[2]/lro_cp,
51857+ drvr_stats(adapter)->be_lro_hgram_data[3]/lro_cp,
51858+ drvr_stats(adapter)->be_lro_hgram_data[4]/lro_cp,
51859+ drvr_stats(adapter)->be_lro_hgram_data[5]/lro_cp,
51860+ drvr_stats(adapter)->be_lro_hgram_data[6]/lro_cp,
51861+ drvr_stats(adapter)->be_lro_hgram_data[7]/lro_cp);
51862+ p += n;
51863+
51864+ lro_cp = (drvr_stats(adapter)->be_lro_hgram_ack[0] +
51865+ drvr_stats(adapter)->be_lro_hgram_ack[1] +
51866+ drvr_stats(adapter)->be_lro_hgram_ack[2] +
51867+ drvr_stats(adapter)->be_lro_hgram_ack[3] +
51868+ drvr_stats(adapter)->be_lro_hgram_ack[4] +
51869+ drvr_stats(adapter)->be_lro_hgram_ack[5] +
51870+ drvr_stats(adapter)->be_lro_hgram_ack[6] +
51871+ drvr_stats(adapter)->be_lro_hgram_ack[7])/100;
51872+ lro_cp = (lro_cp == 0) ? 1 : lro_cp; /* avoid divide by 0 */
51873+ n = sprintf(p,
51874+ "LRO ack count %% histogram (1, 2-3, 4-5,..,>=16) = "
51875+ "%d, %d, %d, %d - %d, %d, %d, %d\n",
51876+ drvr_stats(adapter)->be_lro_hgram_ack[0]/lro_cp,
51877+ drvr_stats(adapter)->be_lro_hgram_ack[1]/lro_cp,
51878+ drvr_stats(adapter)->be_lro_hgram_ack[2]/lro_cp,
51879+ drvr_stats(adapter)->be_lro_hgram_ack[3]/lro_cp,
51880+ drvr_stats(adapter)->be_lro_hgram_ack[4]/lro_cp,
51881+ drvr_stats(adapter)->be_lro_hgram_ack[5]/lro_cp,
51882+ drvr_stats(adapter)->be_lro_hgram_ack[6]/lro_cp,
51883+ drvr_stats(adapter)->be_lro_hgram_ack[7]/lro_cp);
51884+ p += n;
51885+ n = sprintf(p, "rx_eq_delay = %d \n", adapter->rx_eq.cur_eqd);
51886+ p += n;
51887+ n = sprintf(p, "rx frags per sec=%d \n",
51888+ drvr_stats(adapter)->be_rx_fps);
51889+ p += n;
51890+
51891+ }
51892+ *eof = 1;
51893+ return (p - page);
51894+}
51895+
51896+static int proc_write_drvr_stat(struct file *file,
51897+ const char *buffer, unsigned long count,
51898+ void *data)
51899+{
51900+ struct be_adapter *adapter = (struct be_adapter *) data;
51901+
51902+ memset(&(adapter->stats.drvr_stats), 0,
51903+ sizeof(adapter->stats.drvr_stats));
51904+ return (count); /* we do not support write */
51905+}
51906+
51907+#if 0
51908+/* the following are some of the functions that are needed here
51909+ * until all initializations are done by MPU.
51910+ */
51911+
51912+u32
51913+CsrReadDr(void* BaseAddress, u32 Offset)
51914+{
51915+ u32 *rp;
51916+
51917+ rp = (u32 *) (((u8 *) BaseAddress) + Offset);
51918+ return (*rp);
51919+}
51920+
51921+/*!
51922+
51923+@brief
51924+ This routine writes to a register located within the CSR
51925+ space for a given function object.
51926+
51927+@param
51928+ FuncObj - Pointer to the function object to read from.
51929+
51930+@param
51931+ Offset - The Offset (in bytes) to write to within the function's CSR space.
51932+
51933+@param
51934+ Value - The value to write to the register.
51935+
51936+@return
51937+
51938+@note
51939+ IRQL: any
51940+
51941+*/
51942+void
51943+CsrWriteDr(void* BaseAddress, u32 Offset, u32 Value)
51944+{
51945+ u32 *Register;
51946+
51947+ Register = (u32 *) (((u8 *) BaseAddress) + Offset);
51948+
51949+ //TRACE(DL_INFO, "CsrWrite[ %X ] <= %X", Register, Value);
51950+ *Register = Value;
51951+}
51952+u32 be_proc_csrr_offset = -1; /* to latch the offset of next CSR Read req. */
51953+
51954+/*
51955+ * read the csr_r file. return the 32 bit register value from
51956+ * CSR space at offset latched in the global location
51957+ * be_proc_csrr_offset
51958+ */
51959+static int proc_read_csr_r(char *page, char **start,
51960+ off_t off, int count, int *eof, void *data)
51961+{
51962+ struct be_adapter * adapter = (struct be_adapter *)data;
51963+ u32 val;
51964+ int n = 0;
51965+ if (be_proc_csrr_offset == -1)
51966+ return -EINVAL;
51967+
51968+ if (off == 0) {
51969+ /* read the CSR at offset be_proc_csrr_offset and return */
51970+ val = CsrReadDr(adapter->csr_va, be_proc_csrr_offset);
51971+ n = sprintf(page, "0x%x\n", val);
51972+ }
51973+ *eof = 1;
51974+ return n;
51975+}
51976+
51977+/*
51978+ * save the written value in be_proc_csrr_offset for next
51979+ * read from the file
51980+ */
51981+static int proc_write_csr_r(struct file *file,
51982+ const char *buffer, unsigned long count, void *data)
51983+{
51984+ char buf[64];
51985+ u32 n;
51986+
51987+ if (count > sizeof(buf) + 1)
51988+ return -EINVAL;
51989+ if (copy_from_user(buf, buffer, count))
51990+ return -EFAULT;
51991+ buf[count] = '\0';
51992+
51993+ n = simple_strtoul(buf, NULL, 16);
51994+ if (n < 0x50000)
51995+ be_proc_csrr_offset = n;
51996+ return (count);
51997+}
51998+
51999+/*
52000+ * return the latched offset for reading the csr_r file.
52001+ */
52002+static int proc_read_csr_w(char *page, char **start,
52003+ off_t off, int count, int *eof, void *data)
52004+{
52005+
52006+ *eof = 1;
52007+ return sprintf(page, "0x%x\n", be_proc_csrr_offset);
52008+}
52009+
52010+/*
52011+ * the incoming string is of the form "<offset> <value>"
52012+ * where the offset is the offset of the register to be written
52013+ * and value is the value to be written.
52014+ */
52015+static int proc_write_csr_w(struct file *file,
52016+ const char *buffer, unsigned long count,
52017+ void *data)
52018+{
52019+ char buf[64];
52020+ char *p;
52021+ u32 n, val;
52022+ struct be_adapter * adapter = (struct be_adapter *)data;
52023+
52024+ if (count > sizeof(buf) + 1)
52025+ return -EINVAL;
52026+ if (copy_from_user(buf, buffer, count))
52027+ return -EFAULT;
52028+ buf[count] = '\0';
52029+
52030+ n = simple_strtoul(buf, &p, 16);
52031+ if (n > 0x50000)
52032+ return -EINVAL;
52033+
52034+ /* now get the actual value to be written */
52035+ while (*p == ' ' || *p == '\t')
52036+ p++;
52037+ val = simple_strtoul(p, NULL, 16);
52038+ CsrWriteDr(adapter->csr_va, n, val);
52039+ return (count);
52040+}
52041+#endif
52042+
52043+void be_init_procfs(struct be_adapter *adapter, int adapt_num)
52044+{
52045+ static struct proc_dir_entry *pde;
52046+
52047+ if (adapt_num > MAX_BE_DEVICES - 1)
52048+ return;
52049+
52050+ /* create directory */
52051+ be_proc_dir[adapt_num] =
52052+ proc_mkdir(be_adpt_name[adapt_num], NULL);
52053+ if (be_proc_dir[adapt_num]) {
52054+ (be_proc_dir[adapt_num])->owner = THIS_MODULE;
52055+ }
52056+
52057+ pde = create_proc_entry(BE_ETH_RING_FILE, BE_PROC_MODE,
52058+ be_proc_dir[adapt_num]);
52059+ if (pde) {
52060+ pde->read_proc = proc_eth_read_ring;
52061+ pde->write_proc = proc_eth_write_ring;
52062+ pde->data = adapter;
52063+ pde->owner = THIS_MODULE;
52064+ }
52065+
52066+ pde = create_proc_entry(BE_DRVR_STAT_FILE, BE_PROC_MODE,
52067+ be_proc_dir[adapt_num]);
52068+ if (pde) {
52069+ pde->read_proc = proc_read_drvr_stat;
52070+ pde->write_proc = proc_write_drvr_stat;
52071+ pde->data = adapter;
52072+ pde->owner = THIS_MODULE;
52073+ }
52074+
52075+#if 0
52076+ if ((pde = create_proc_entry(BE_CSR_R_FILE, BE_PROC_MODE, be_proc_dir[adapt_num]))) {
52077+ pde->read_proc = proc_read_csr_r;
52078+ pde->write_proc = proc_write_csr_r;
52079+ pde->data = adapter;
52080+ pde->owner = THIS_MODULE;
52081+ }
52082+
52083+ if ((pde = create_proc_entry(BE_CSR_W_FILE, BE_PROC_MODE, be_proc_dir[adapt_num]))) {
52084+ pde->read_proc = proc_read_csr_w;
52085+ pde->write_proc = proc_write_csr_w;
52086+ pde->data = adapter;
52087+ pde->owner = THIS_MODULE;
52088+ }
52089+#endif
52090+}
52091+
52092+void be_cleanup_procfs(struct be_adapter *adapter, int adapt_num)
52093+{
52094+ if (adapt_num > MAX_BE_DEVICES - 1)
52095+ return;
52096+ remove_proc_entry(BE_ETH_RING_FILE, be_proc_dir[adapt_num]);
52097+ remove_proc_entry(BE_DRVR_STAT_FILE, be_proc_dir[adapt_num]);
52098+ remove_proc_entry(BE_CSR_R_FILE, be_proc_dir[adapt_num]);
52099+ remove_proc_entry(BE_CSR_W_FILE, be_proc_dir[adapt_num]);
52100+ remove_proc_entry(be_adpt_name[adapt_num], NULL);
52101+}
52102diff --git a/drivers/net/benet/version.h b/drivers/net/benet/version.h
52103new file mode 100644
52104index 0000000..c7ed692
52105--- /dev/null
52106+++ b/drivers/net/benet/version.h
52107@@ -0,0 +1,51 @@
52108+#define STR_BE_BRANCH "0" \r
52109+#define STR_BE_BUILD "479" \r
52110+#define STR_BE_DOT "0"\r
52111+#define STR_BE_MINOR "0"\r
52112+#define STR_BE_MAJOR "4"\r
52113+\r
52114+#define BE_BRANCH 0 \r
52115+#define BE_BUILD 479 \r
52116+#define BE_DOT 0\r
52117+#define BE_MINOR 0\r
52118+#define BE_MAJOR 4\r
52119+\r
52120+#define MGMT_BRANCH 0\r
52121+#define MGMT_BUILDNUM 479\r
52122+#define MGMT_MINOR 0\r
52123+#define MGMT_MAJOR 4\r
52124+\r
52125+#define BE_REDBOOT_VERSION "2.0.5.0"\r
52126+\r
52127+//start-auto\r
52128+#define BUILD_MONTH "12"\r
52129+#define BUILD_MONTH_NAME "December"\r
52130+#define BUILD_DAY "6"\r
52131+#define BUILD_YEAR "2011"\r
52132+#define BUILD_24HOUR "21"\r
52133+#define BUILD_12HOUR "9"\r
52134+#define BUILD_AM_PM "PM"\r
52135+#define BUILD_MIN "48"\r
52136+#define BUILD_SEC "05"\r
52137+#define BUILD_MONTH_NUMBER 12\r
52138+#define BUILD_DAY_NUMBER 6\r
52139+#define BUILD_YEAR_NUMBER 2011\r
52140+#define BUILD_24HOUR_NUMBER 21\r
52141+#define BUILD_12HOUR_NUMBER 9\r
52142+#define BUILD_MIN_NUMBER 48\r
52143+#define BUILD_SEC_NUMBER 5\r
52144+#undef MAJOR_BUILD\r
52145+#undef MINOR_BUILD\r
52146+#undef DOT_BUILD\r
52147+#define NUMBERED_BUILD\r
52148+#undef BRANCH_BUILD\r
52149+//end-auto\r
52150+\r
52151+#define ELX_FCOE_XROM_BIOS_VER "7.03a1"\r
52152+#define ELX_FCoE_X86_VER "4.02a1"\r
52153+#define ELX_FCoE_EFI_VER "5.01a1"\r
52154+#define ELX_FCoE_FCODE_VER "4.01a0"\r
52155+#define ELX_PXE_BIOS_VER "3.00a5"\r
52156+#define ELX_UEFI_NIC_VER "2.10A10"\r
52157+#define ELX_UEFI_FCODE_VER "1.10A0"\r
52158+#define ELX_ISCSI_BIOS_VER "1.00A8"\r
52159diff --git a/drivers/net/bnx2.c b/drivers/net/bnx2.c
52160index 4874b2b..67f8526 100644
52161--- a/drivers/net/bnx2.c
52162+++ b/drivers/net/bnx2.c
52163@@ -5809,6 +5809,8 @@ bnx2_test_nvram(struct bnx2 *bp)
52164 int rc = 0;
52165 u32 magic, csum;
52166
52167+ pax_track_stack();
52168+
52169 if ((rc = bnx2_nvram_read(bp, 0, data, 4)) != 0)
52170 goto test_nvram_done;
52171
52172diff --git a/drivers/net/cxgb3/l2t.h b/drivers/net/cxgb3/l2t.h
52173index fd3eb07..8a6978d 100644
52174--- a/drivers/net/cxgb3/l2t.h
52175+++ b/drivers/net/cxgb3/l2t.h
52176@@ -86,7 +86,7 @@ typedef void (*arp_failure_handler_func)(struct t3cdev * dev,
52177 */
52178 struct l2t_skb_cb {
52179 arp_failure_handler_func arp_failure_handler;
52180-};
52181+} __no_const;
52182
52183 #define L2T_SKB_CB(skb) ((struct l2t_skb_cb *)(skb)->cb)
52184
52185diff --git a/drivers/net/cxgb3/t3_hw.c b/drivers/net/cxgb3/t3_hw.c
52186index 032cfe0..411af379 100644
52187--- a/drivers/net/cxgb3/t3_hw.c
52188+++ b/drivers/net/cxgb3/t3_hw.c
52189@@ -699,6 +699,8 @@ static int get_vpd_params(struct adapter *adapter, struct vpd_params *p)
52190 int i, addr, ret;
52191 struct t3_vpd vpd;
52192
52193+ pax_track_stack();
52194+
52195 /*
52196 * Card information is normally at VPD_BASE but some early cards had
52197 * it at 0.
52198diff --git a/drivers/net/e1000e/82571.c b/drivers/net/e1000e/82571.c
52199index d1e0563..b9e129c 100644
52200--- a/drivers/net/e1000e/82571.c
52201+++ b/drivers/net/e1000e/82571.c
52202@@ -212,7 +212,7 @@ static s32 e1000_init_mac_params_82571(struct e1000_adapter *adapter)
52203 {
52204 struct e1000_hw *hw = &adapter->hw;
52205 struct e1000_mac_info *mac = &hw->mac;
52206- struct e1000_mac_operations *func = &mac->ops;
52207+ e1000_mac_operations_no_const *func = &mac->ops;
52208 u32 swsm = 0;
52209 u32 swsm2 = 0;
52210 bool force_clear_smbi = false;
52211@@ -1656,7 +1656,7 @@ static void e1000_clear_hw_cntrs_82571(struct e1000_hw *hw)
52212 temp = er32(ICRXDMTC);
52213 }
52214
52215-static struct e1000_mac_operations e82571_mac_ops = {
52216+static const struct e1000_mac_operations e82571_mac_ops = {
52217 /* .check_mng_mode: mac type dependent */
52218 /* .check_for_link: media type dependent */
52219 .id_led_init = e1000e_id_led_init,
52220@@ -1674,7 +1674,7 @@ static struct e1000_mac_operations e82571_mac_ops = {
52221 .setup_led = e1000e_setup_led_generic,
52222 };
52223
52224-static struct e1000_phy_operations e82_phy_ops_igp = {
52225+static const struct e1000_phy_operations e82_phy_ops_igp = {
52226 .acquire_phy = e1000_get_hw_semaphore_82571,
52227 .check_reset_block = e1000e_check_reset_block_generic,
52228 .commit_phy = NULL,
52229@@ -1691,7 +1691,7 @@ static struct e1000_phy_operations e82_phy_ops_igp = {
52230 .cfg_on_link_up = NULL,
52231 };
52232
52233-static struct e1000_phy_operations e82_phy_ops_m88 = {
52234+static const struct e1000_phy_operations e82_phy_ops_m88 = {
52235 .acquire_phy = e1000_get_hw_semaphore_82571,
52236 .check_reset_block = e1000e_check_reset_block_generic,
52237 .commit_phy = e1000e_phy_sw_reset,
52238@@ -1708,7 +1708,7 @@ static struct e1000_phy_operations e82_phy_ops_m88 = {
52239 .cfg_on_link_up = NULL,
52240 };
52241
52242-static struct e1000_phy_operations e82_phy_ops_bm = {
52243+static const struct e1000_phy_operations e82_phy_ops_bm = {
52244 .acquire_phy = e1000_get_hw_semaphore_82571,
52245 .check_reset_block = e1000e_check_reset_block_generic,
52246 .commit_phy = e1000e_phy_sw_reset,
52247@@ -1725,7 +1725,7 @@ static struct e1000_phy_operations e82_phy_ops_bm = {
52248 .cfg_on_link_up = NULL,
52249 };
52250
52251-static struct e1000_nvm_operations e82571_nvm_ops = {
52252+static const struct e1000_nvm_operations e82571_nvm_ops = {
52253 .acquire_nvm = e1000_acquire_nvm_82571,
52254 .read_nvm = e1000e_read_nvm_eerd,
52255 .release_nvm = e1000_release_nvm_82571,
52256diff --git a/drivers/net/e1000e/e1000.h b/drivers/net/e1000e/e1000.h
52257index 47db9bd..fa58ccd 100644
52258--- a/drivers/net/e1000e/e1000.h
52259+++ b/drivers/net/e1000e/e1000.h
52260@@ -375,9 +375,9 @@ struct e1000_info {
52261 u32 pba;
52262 u32 max_hw_frame_size;
52263 s32 (*get_variants)(struct e1000_adapter *);
52264- struct e1000_mac_operations *mac_ops;
52265- struct e1000_phy_operations *phy_ops;
52266- struct e1000_nvm_operations *nvm_ops;
52267+ const struct e1000_mac_operations *mac_ops;
52268+ const struct e1000_phy_operations *phy_ops;
52269+ const struct e1000_nvm_operations *nvm_ops;
52270 };
52271
52272 /* hardware capability, feature, and workaround flags */
52273diff --git a/drivers/net/e1000e/es2lan.c b/drivers/net/e1000e/es2lan.c
52274index ae5d736..e9a93a1 100644
52275--- a/drivers/net/e1000e/es2lan.c
52276+++ b/drivers/net/e1000e/es2lan.c
52277@@ -207,7 +207,7 @@ static s32 e1000_init_mac_params_80003es2lan(struct e1000_adapter *adapter)
52278 {
52279 struct e1000_hw *hw = &adapter->hw;
52280 struct e1000_mac_info *mac = &hw->mac;
52281- struct e1000_mac_operations *func = &mac->ops;
52282+ e1000_mac_operations_no_const *func = &mac->ops;
52283
52284 /* Set media type */
52285 switch (adapter->pdev->device) {
52286@@ -1365,7 +1365,7 @@ static void e1000_clear_hw_cntrs_80003es2lan(struct e1000_hw *hw)
52287 temp = er32(ICRXDMTC);
52288 }
52289
52290-static struct e1000_mac_operations es2_mac_ops = {
52291+static const struct e1000_mac_operations es2_mac_ops = {
52292 .id_led_init = e1000e_id_led_init,
52293 .check_mng_mode = e1000e_check_mng_mode_generic,
52294 /* check_for_link dependent on media type */
52295@@ -1383,7 +1383,7 @@ static struct e1000_mac_operations es2_mac_ops = {
52296 .setup_led = e1000e_setup_led_generic,
52297 };
52298
52299-static struct e1000_phy_operations es2_phy_ops = {
52300+static const struct e1000_phy_operations es2_phy_ops = {
52301 .acquire_phy = e1000_acquire_phy_80003es2lan,
52302 .check_reset_block = e1000e_check_reset_block_generic,
52303 .commit_phy = e1000e_phy_sw_reset,
52304@@ -1400,7 +1400,7 @@ static struct e1000_phy_operations es2_phy_ops = {
52305 .cfg_on_link_up = e1000_cfg_on_link_up_80003es2lan,
52306 };
52307
52308-static struct e1000_nvm_operations es2_nvm_ops = {
52309+static const struct e1000_nvm_operations es2_nvm_ops = {
52310 .acquire_nvm = e1000_acquire_nvm_80003es2lan,
52311 .read_nvm = e1000e_read_nvm_eerd,
52312 .release_nvm = e1000_release_nvm_80003es2lan,
52313diff --git a/drivers/net/e1000e/hw.h b/drivers/net/e1000e/hw.h
52314index 11f3b7c..6381887 100644
52315--- a/drivers/net/e1000e/hw.h
52316+++ b/drivers/net/e1000e/hw.h
52317@@ -753,6 +753,7 @@ struct e1000_mac_operations {
52318 s32 (*setup_physical_interface)(struct e1000_hw *);
52319 s32 (*setup_led)(struct e1000_hw *);
52320 };
52321+typedef struct e1000_mac_operations __no_const e1000_mac_operations_no_const;
52322
52323 /* Function pointers for the PHY. */
52324 struct e1000_phy_operations {
52325@@ -774,6 +775,7 @@ struct e1000_phy_operations {
52326 s32 (*write_phy_reg_locked)(struct e1000_hw *, u32, u16);
52327 s32 (*cfg_on_link_up)(struct e1000_hw *);
52328 };
52329+typedef struct e1000_phy_operations __no_const e1000_phy_operations_no_const;
52330
52331 /* Function pointers for the NVM. */
52332 struct e1000_nvm_operations {
52333@@ -785,9 +787,10 @@ struct e1000_nvm_operations {
52334 s32 (*validate_nvm)(struct e1000_hw *);
52335 s32 (*write_nvm)(struct e1000_hw *, u16, u16, u16 *);
52336 };
52337+typedef struct e1000_nvm_operations __no_const e1000_nvm_operations_no_const;
52338
52339 struct e1000_mac_info {
52340- struct e1000_mac_operations ops;
52341+ e1000_mac_operations_no_const ops;
52342
52343 u8 addr[6];
52344 u8 perm_addr[6];
52345@@ -823,7 +826,7 @@ struct e1000_mac_info {
52346 };
52347
52348 struct e1000_phy_info {
52349- struct e1000_phy_operations ops;
52350+ e1000_phy_operations_no_const ops;
52351
52352 enum e1000_phy_type type;
52353
52354@@ -857,7 +860,7 @@ struct e1000_phy_info {
52355 };
52356
52357 struct e1000_nvm_info {
52358- struct e1000_nvm_operations ops;
52359+ e1000_nvm_operations_no_const ops;
52360
52361 enum e1000_nvm_type type;
52362 enum e1000_nvm_override override;
52363diff --git a/drivers/net/e1000e/ich8lan.c b/drivers/net/e1000e/ich8lan.c
52364index de39f9a..e28d3e0 100644
52365--- a/drivers/net/e1000e/ich8lan.c
52366+++ b/drivers/net/e1000e/ich8lan.c
52367@@ -3463,7 +3463,7 @@ static void e1000_clear_hw_cntrs_ich8lan(struct e1000_hw *hw)
52368 }
52369 }
52370
52371-static struct e1000_mac_operations ich8_mac_ops = {
52372+static const struct e1000_mac_operations ich8_mac_ops = {
52373 .id_led_init = e1000e_id_led_init,
52374 .check_mng_mode = e1000_check_mng_mode_ich8lan,
52375 .check_for_link = e1000_check_for_copper_link_ich8lan,
52376@@ -3481,7 +3481,7 @@ static struct e1000_mac_operations ich8_mac_ops = {
52377 /* id_led_init dependent on mac type */
52378 };
52379
52380-static struct e1000_phy_operations ich8_phy_ops = {
52381+static const struct e1000_phy_operations ich8_phy_ops = {
52382 .acquire_phy = e1000_acquire_swflag_ich8lan,
52383 .check_reset_block = e1000_check_reset_block_ich8lan,
52384 .commit_phy = NULL,
52385@@ -3497,7 +3497,7 @@ static struct e1000_phy_operations ich8_phy_ops = {
52386 .write_phy_reg = e1000e_write_phy_reg_igp,
52387 };
52388
52389-static struct e1000_nvm_operations ich8_nvm_ops = {
52390+static const struct e1000_nvm_operations ich8_nvm_ops = {
52391 .acquire_nvm = e1000_acquire_nvm_ich8lan,
52392 .read_nvm = e1000_read_nvm_ich8lan,
52393 .release_nvm = e1000_release_nvm_ich8lan,
52394diff --git a/drivers/net/fealnx.c b/drivers/net/fealnx.c
52395index 18d5fbb..542d96d 100644
52396--- a/drivers/net/fealnx.c
52397+++ b/drivers/net/fealnx.c
52398@@ -151,7 +151,7 @@ struct chip_info {
52399 int flags;
52400 };
52401
52402-static const struct chip_info skel_netdrv_tbl[] __devinitdata = {
52403+static const struct chip_info skel_netdrv_tbl[] __devinitconst = {
52404 { "100/10M Ethernet PCI Adapter", HAS_MII_XCVR },
52405 { "100/10M Ethernet PCI Adapter", HAS_CHIP_XCVR },
52406 { "1000/100/10M Ethernet PCI Adapter", HAS_MII_XCVR },
52407diff --git a/drivers/net/hamradio/6pack.c b/drivers/net/hamradio/6pack.c
52408index 0e5b54b..b503f82 100644
52409--- a/drivers/net/hamradio/6pack.c
52410+++ b/drivers/net/hamradio/6pack.c
52411@@ -461,6 +461,8 @@ static void sixpack_receive_buf(struct tty_struct *tty,
52412 unsigned char buf[512];
52413 int count1;
52414
52415+ pax_track_stack();
52416+
52417 if (!count)
52418 return;
52419
52420diff --git a/drivers/net/ibmveth.c b/drivers/net/ibmveth.c
52421index 5862282..7cce8cb 100644
52422--- a/drivers/net/ibmveth.c
52423+++ b/drivers/net/ibmveth.c
52424@@ -1577,7 +1577,7 @@ static struct attribute * veth_pool_attrs[] = {
52425 NULL,
52426 };
52427
52428-static struct sysfs_ops veth_pool_ops = {
52429+static const struct sysfs_ops veth_pool_ops = {
52430 .show = veth_pool_show,
52431 .store = veth_pool_store,
52432 };
52433diff --git a/drivers/net/igb/e1000_82575.c b/drivers/net/igb/e1000_82575.c
52434index d617f2d..57b5309 100644
52435--- a/drivers/net/igb/e1000_82575.c
52436+++ b/drivers/net/igb/e1000_82575.c
52437@@ -1411,7 +1411,7 @@ void igb_vmdq_set_replication_pf(struct e1000_hw *hw, bool enable)
52438 wr32(E1000_VT_CTL, vt_ctl);
52439 }
52440
52441-static struct e1000_mac_operations e1000_mac_ops_82575 = {
52442+static const struct e1000_mac_operations e1000_mac_ops_82575 = {
52443 .reset_hw = igb_reset_hw_82575,
52444 .init_hw = igb_init_hw_82575,
52445 .check_for_link = igb_check_for_link_82575,
52446@@ -1420,13 +1420,13 @@ static struct e1000_mac_operations e1000_mac_ops_82575 = {
52447 .get_speed_and_duplex = igb_get_speed_and_duplex_copper,
52448 };
52449
52450-static struct e1000_phy_operations e1000_phy_ops_82575 = {
52451+static const struct e1000_phy_operations e1000_phy_ops_82575 = {
52452 .acquire = igb_acquire_phy_82575,
52453 .get_cfg_done = igb_get_cfg_done_82575,
52454 .release = igb_release_phy_82575,
52455 };
52456
52457-static struct e1000_nvm_operations e1000_nvm_ops_82575 = {
52458+static const struct e1000_nvm_operations e1000_nvm_ops_82575 = {
52459 .acquire = igb_acquire_nvm_82575,
52460 .read = igb_read_nvm_eerd,
52461 .release = igb_release_nvm_82575,
52462diff --git a/drivers/net/igb/e1000_hw.h b/drivers/net/igb/e1000_hw.h
52463index 72081df..d855cf5 100644
52464--- a/drivers/net/igb/e1000_hw.h
52465+++ b/drivers/net/igb/e1000_hw.h
52466@@ -288,6 +288,7 @@ struct e1000_mac_operations {
52467 s32 (*read_mac_addr)(struct e1000_hw *);
52468 s32 (*get_speed_and_duplex)(struct e1000_hw *, u16 *, u16 *);
52469 };
52470+typedef struct e1000_mac_operations __no_const e1000_mac_operations_no_const;
52471
52472 struct e1000_phy_operations {
52473 s32 (*acquire)(struct e1000_hw *);
52474@@ -303,6 +304,7 @@ struct e1000_phy_operations {
52475 s32 (*set_d3_lplu_state)(struct e1000_hw *, bool);
52476 s32 (*write_reg)(struct e1000_hw *, u32, u16);
52477 };
52478+typedef struct e1000_phy_operations __no_const e1000_phy_operations_no_const;
52479
52480 struct e1000_nvm_operations {
52481 s32 (*acquire)(struct e1000_hw *);
52482@@ -310,6 +312,7 @@ struct e1000_nvm_operations {
52483 void (*release)(struct e1000_hw *);
52484 s32 (*write)(struct e1000_hw *, u16, u16, u16 *);
52485 };
52486+typedef struct e1000_nvm_operations __no_const e1000_nvm_operations_no_const;
52487
52488 struct e1000_info {
52489 s32 (*get_invariants)(struct e1000_hw *);
52490@@ -321,7 +324,7 @@ struct e1000_info {
52491 extern const struct e1000_info e1000_82575_info;
52492
52493 struct e1000_mac_info {
52494- struct e1000_mac_operations ops;
52495+ e1000_mac_operations_no_const ops;
52496
52497 u8 addr[6];
52498 u8 perm_addr[6];
52499@@ -365,7 +368,7 @@ struct e1000_mac_info {
52500 };
52501
52502 struct e1000_phy_info {
52503- struct e1000_phy_operations ops;
52504+ e1000_phy_operations_no_const ops;
52505
52506 enum e1000_phy_type type;
52507
52508@@ -400,7 +403,7 @@ struct e1000_phy_info {
52509 };
52510
52511 struct e1000_nvm_info {
52512- struct e1000_nvm_operations ops;
52513+ e1000_nvm_operations_no_const ops;
52514
52515 enum e1000_nvm_type type;
52516 enum e1000_nvm_override override;
52517@@ -446,6 +449,7 @@ struct e1000_mbx_operations {
52518 s32 (*check_for_ack)(struct e1000_hw *, u16);
52519 s32 (*check_for_rst)(struct e1000_hw *, u16);
52520 };
52521+typedef struct e1000_mbx_operations __no_const e1000_mbx_operations_no_const;
52522
52523 struct e1000_mbx_stats {
52524 u32 msgs_tx;
52525@@ -457,7 +461,7 @@ struct e1000_mbx_stats {
52526 };
52527
52528 struct e1000_mbx_info {
52529- struct e1000_mbx_operations ops;
52530+ e1000_mbx_operations_no_const ops;
52531 struct e1000_mbx_stats stats;
52532 u32 timeout;
52533 u32 usec_delay;
52534diff --git a/drivers/net/igbvf/vf.h b/drivers/net/igbvf/vf.h
52535index 1e8ce37..549c453 100644
52536--- a/drivers/net/igbvf/vf.h
52537+++ b/drivers/net/igbvf/vf.h
52538@@ -187,9 +187,10 @@ struct e1000_mac_operations {
52539 s32 (*read_mac_addr)(struct e1000_hw *);
52540 s32 (*set_vfta)(struct e1000_hw *, u16, bool);
52541 };
52542+typedef struct e1000_mac_operations __no_const e1000_mac_operations_no_const;
52543
52544 struct e1000_mac_info {
52545- struct e1000_mac_operations ops;
52546+ e1000_mac_operations_no_const ops;
52547 u8 addr[6];
52548 u8 perm_addr[6];
52549
52550@@ -211,6 +212,7 @@ struct e1000_mbx_operations {
52551 s32 (*check_for_ack)(struct e1000_hw *);
52552 s32 (*check_for_rst)(struct e1000_hw *);
52553 };
52554+typedef struct e1000_mbx_operations __no_const e1000_mbx_operations_no_const;
52555
52556 struct e1000_mbx_stats {
52557 u32 msgs_tx;
52558@@ -222,7 +224,7 @@ struct e1000_mbx_stats {
52559 };
52560
52561 struct e1000_mbx_info {
52562- struct e1000_mbx_operations ops;
52563+ e1000_mbx_operations_no_const ops;
52564 struct e1000_mbx_stats stats;
52565 u32 timeout;
52566 u32 usec_delay;
52567diff --git a/drivers/net/iseries_veth.c b/drivers/net/iseries_veth.c
52568index aa7286b..a61394f 100644
52569--- a/drivers/net/iseries_veth.c
52570+++ b/drivers/net/iseries_veth.c
52571@@ -384,7 +384,7 @@ static struct attribute *veth_cnx_default_attrs[] = {
52572 NULL
52573 };
52574
52575-static struct sysfs_ops veth_cnx_sysfs_ops = {
52576+static const struct sysfs_ops veth_cnx_sysfs_ops = {
52577 .show = veth_cnx_attribute_show
52578 };
52579
52580@@ -441,7 +441,7 @@ static struct attribute *veth_port_default_attrs[] = {
52581 NULL
52582 };
52583
52584-static struct sysfs_ops veth_port_sysfs_ops = {
52585+static const struct sysfs_ops veth_port_sysfs_ops = {
52586 .show = veth_port_attribute_show
52587 };
52588
52589diff --git a/drivers/net/ixgb/ixgb_main.c b/drivers/net/ixgb/ixgb_main.c
52590index 8aa44dc..fa1e797 100644
52591--- a/drivers/net/ixgb/ixgb_main.c
52592+++ b/drivers/net/ixgb/ixgb_main.c
52593@@ -1052,6 +1052,8 @@ ixgb_set_multi(struct net_device *netdev)
52594 u32 rctl;
52595 int i;
52596
52597+ pax_track_stack();
52598+
52599 /* Check for Promiscuous and All Multicast modes */
52600
52601 rctl = IXGB_READ_REG(hw, RCTL);
52602diff --git a/drivers/net/ixgb/ixgb_param.c b/drivers/net/ixgb/ixgb_param.c
52603index af35e1d..8781785 100644
52604--- a/drivers/net/ixgb/ixgb_param.c
52605+++ b/drivers/net/ixgb/ixgb_param.c
52606@@ -260,6 +260,9 @@ void __devinit
52607 ixgb_check_options(struct ixgb_adapter *adapter)
52608 {
52609 int bd = adapter->bd_number;
52610+
52611+ pax_track_stack();
52612+
52613 if (bd >= IXGB_MAX_NIC) {
52614 printk(KERN_NOTICE
52615 "Warning: no configuration for board #%i\n", bd);
52616diff --git a/drivers/net/ixgbe/ixgbe_type.h b/drivers/net/ixgbe/ixgbe_type.h
52617index b17aa73..ed74540 100644
52618--- a/drivers/net/ixgbe/ixgbe_type.h
52619+++ b/drivers/net/ixgbe/ixgbe_type.h
52620@@ -2327,6 +2327,7 @@ struct ixgbe_eeprom_operations {
52621 s32 (*validate_checksum)(struct ixgbe_hw *, u16 *);
52622 s32 (*update_checksum)(struct ixgbe_hw *);
52623 };
52624+typedef struct ixgbe_eeprom_operations __no_const ixgbe_eeprom_operations_no_const;
52625
52626 struct ixgbe_mac_operations {
52627 s32 (*init_hw)(struct ixgbe_hw *);
52628@@ -2376,6 +2377,7 @@ struct ixgbe_mac_operations {
52629 /* Flow Control */
52630 s32 (*fc_enable)(struct ixgbe_hw *, s32);
52631 };
52632+typedef struct ixgbe_mac_operations __no_const ixgbe_mac_operations_no_const;
52633
52634 struct ixgbe_phy_operations {
52635 s32 (*identify)(struct ixgbe_hw *);
52636@@ -2394,9 +2396,10 @@ struct ixgbe_phy_operations {
52637 s32 (*read_i2c_eeprom)(struct ixgbe_hw *, u8 , u8 *);
52638 s32 (*write_i2c_eeprom)(struct ixgbe_hw *, u8, u8);
52639 };
52640+typedef struct ixgbe_phy_operations __no_const ixgbe_phy_operations_no_const;
52641
52642 struct ixgbe_eeprom_info {
52643- struct ixgbe_eeprom_operations ops;
52644+ ixgbe_eeprom_operations_no_const ops;
52645 enum ixgbe_eeprom_type type;
52646 u32 semaphore_delay;
52647 u16 word_size;
52648@@ -2404,7 +2407,7 @@ struct ixgbe_eeprom_info {
52649 };
52650
52651 struct ixgbe_mac_info {
52652- struct ixgbe_mac_operations ops;
52653+ ixgbe_mac_operations_no_const ops;
52654 enum ixgbe_mac_type type;
52655 u8 addr[IXGBE_ETH_LENGTH_OF_ADDRESS];
52656 u8 perm_addr[IXGBE_ETH_LENGTH_OF_ADDRESS];
52657@@ -2423,7 +2426,7 @@ struct ixgbe_mac_info {
52658 };
52659
52660 struct ixgbe_phy_info {
52661- struct ixgbe_phy_operations ops;
52662+ ixgbe_phy_operations_no_const ops;
52663 struct mdio_if_info mdio;
52664 enum ixgbe_phy_type type;
52665 u32 id;
52666diff --git a/drivers/net/mlx4/main.c b/drivers/net/mlx4/main.c
52667index 291a505..2543756 100644
52668--- a/drivers/net/mlx4/main.c
52669+++ b/drivers/net/mlx4/main.c
52670@@ -38,6 +38,7 @@
52671 #include <linux/errno.h>
52672 #include <linux/pci.h>
52673 #include <linux/dma-mapping.h>
52674+#include <linux/sched.h>
52675
52676 #include <linux/mlx4/device.h>
52677 #include <linux/mlx4/doorbell.h>
52678@@ -730,6 +731,8 @@ static int mlx4_init_hca(struct mlx4_dev *dev)
52679 u64 icm_size;
52680 int err;
52681
52682+ pax_track_stack();
52683+
52684 err = mlx4_QUERY_FW(dev);
52685 if (err) {
52686 if (err == -EACCES)
52687diff --git a/drivers/net/niu.c b/drivers/net/niu.c
52688index 2dce134..fa5ce75 100644
52689--- a/drivers/net/niu.c
52690+++ b/drivers/net/niu.c
52691@@ -9128,6 +9128,8 @@ static void __devinit niu_try_msix(struct niu *np, u8 *ldg_num_map)
52692 int i, num_irqs, err;
52693 u8 first_ldg;
52694
52695+ pax_track_stack();
52696+
52697 first_ldg = (NIU_NUM_LDG / parent->num_ports) * np->port;
52698 for (i = 0; i < (NIU_NUM_LDG / parent->num_ports); i++)
52699 ldg_num_map[i] = first_ldg + i;
52700diff --git a/drivers/net/pcnet32.c b/drivers/net/pcnet32.c
52701index c1b3f09..97cd8c4 100644
52702--- a/drivers/net/pcnet32.c
52703+++ b/drivers/net/pcnet32.c
52704@@ -79,7 +79,7 @@ static int cards_found;
52705 /*
52706 * VLB I/O addresses
52707 */
52708-static unsigned int pcnet32_portlist[] __initdata =
52709+static unsigned int pcnet32_portlist[] __devinitdata =
52710 { 0x300, 0x320, 0x340, 0x360, 0 };
52711
52712 static int pcnet32_debug = 0;
52713@@ -267,7 +267,7 @@ struct pcnet32_private {
52714 struct sk_buff **rx_skbuff;
52715 dma_addr_t *tx_dma_addr;
52716 dma_addr_t *rx_dma_addr;
52717- struct pcnet32_access a;
52718+ struct pcnet32_access *a;
52719 spinlock_t lock; /* Guard lock */
52720 unsigned int cur_rx, cur_tx; /* The next free ring entry */
52721 unsigned int rx_ring_size; /* current rx ring size */
52722@@ -457,9 +457,9 @@ static void pcnet32_netif_start(struct net_device *dev)
52723 u16 val;
52724
52725 netif_wake_queue(dev);
52726- val = lp->a.read_csr(ioaddr, CSR3);
52727+ val = lp->a->read_csr(ioaddr, CSR3);
52728 val &= 0x00ff;
52729- lp->a.write_csr(ioaddr, CSR3, val);
52730+ lp->a->write_csr(ioaddr, CSR3, val);
52731 napi_enable(&lp->napi);
52732 }
52733
52734@@ -744,7 +744,7 @@ static u32 pcnet32_get_link(struct net_device *dev)
52735 r = mii_link_ok(&lp->mii_if);
52736 } else if (lp->chip_version >= PCNET32_79C970A) {
52737 ulong ioaddr = dev->base_addr; /* card base I/O address */
52738- r = (lp->a.read_bcr(ioaddr, 4) != 0xc0);
52739+ r = (lp->a->read_bcr(ioaddr, 4) != 0xc0);
52740 } else { /* can not detect link on really old chips */
52741 r = 1;
52742 }
52743@@ -806,7 +806,7 @@ static int pcnet32_set_ringparam(struct net_device *dev,
52744 pcnet32_netif_stop(dev);
52745
52746 spin_lock_irqsave(&lp->lock, flags);
52747- lp->a.write_csr(ioaddr, CSR0, CSR0_STOP); /* stop the chip */
52748+ lp->a->write_csr(ioaddr, CSR0, CSR0_STOP); /* stop the chip */
52749
52750 size = min(ering->tx_pending, (unsigned int)TX_MAX_RING_SIZE);
52751
52752@@ -886,7 +886,7 @@ static void pcnet32_ethtool_test(struct net_device *dev,
52753 static int pcnet32_loopback_test(struct net_device *dev, uint64_t * data1)
52754 {
52755 struct pcnet32_private *lp = netdev_priv(dev);
52756- struct pcnet32_access *a = &lp->a; /* access to registers */
52757+ struct pcnet32_access *a = lp->a; /* access to registers */
52758 ulong ioaddr = dev->base_addr; /* card base I/O address */
52759 struct sk_buff *skb; /* sk buff */
52760 int x, i; /* counters */
52761@@ -906,21 +906,21 @@ static int pcnet32_loopback_test(struct net_device *dev, uint64_t * data1)
52762 pcnet32_netif_stop(dev);
52763
52764 spin_lock_irqsave(&lp->lock, flags);
52765- lp->a.write_csr(ioaddr, CSR0, CSR0_STOP); /* stop the chip */
52766+ lp->a->write_csr(ioaddr, CSR0, CSR0_STOP); /* stop the chip */
52767
52768 numbuffs = min(numbuffs, (int)min(lp->rx_ring_size, lp->tx_ring_size));
52769
52770 /* Reset the PCNET32 */
52771- lp->a.reset(ioaddr);
52772- lp->a.write_csr(ioaddr, CSR4, 0x0915); /* auto tx pad */
52773+ lp->a->reset(ioaddr);
52774+ lp->a->write_csr(ioaddr, CSR4, 0x0915); /* auto tx pad */
52775
52776 /* switch pcnet32 to 32bit mode */
52777- lp->a.write_bcr(ioaddr, 20, 2);
52778+ lp->a->write_bcr(ioaddr, 20, 2);
52779
52780 /* purge & init rings but don't actually restart */
52781 pcnet32_restart(dev, 0x0000);
52782
52783- lp->a.write_csr(ioaddr, CSR0, CSR0_STOP); /* Set STOP bit */
52784+ lp->a->write_csr(ioaddr, CSR0, CSR0_STOP); /* Set STOP bit */
52785
52786 /* Initialize Transmit buffers. */
52787 size = data_len + 15;
52788@@ -966,10 +966,10 @@ static int pcnet32_loopback_test(struct net_device *dev, uint64_t * data1)
52789
52790 /* set int loopback in CSR15 */
52791 x = a->read_csr(ioaddr, CSR15) & 0xfffc;
52792- lp->a.write_csr(ioaddr, CSR15, x | 0x0044);
52793+ lp->a->write_csr(ioaddr, CSR15, x | 0x0044);
52794
52795 teststatus = cpu_to_le16(0x8000);
52796- lp->a.write_csr(ioaddr, CSR0, CSR0_START); /* Set STRT bit */
52797+ lp->a->write_csr(ioaddr, CSR0, CSR0_START); /* Set STRT bit */
52798
52799 /* Check status of descriptors */
52800 for (x = 0; x < numbuffs; x++) {
52801@@ -990,7 +990,7 @@ static int pcnet32_loopback_test(struct net_device *dev, uint64_t * data1)
52802 }
52803 }
52804
52805- lp->a.write_csr(ioaddr, CSR0, CSR0_STOP); /* Set STOP bit */
52806+ lp->a->write_csr(ioaddr, CSR0, CSR0_STOP); /* Set STOP bit */
52807 wmb();
52808 if (netif_msg_hw(lp) && netif_msg_pktdata(lp)) {
52809 printk(KERN_DEBUG "%s: RX loopback packets:\n", dev->name);
52810@@ -1039,7 +1039,7 @@ static int pcnet32_loopback_test(struct net_device *dev, uint64_t * data1)
52811 pcnet32_restart(dev, CSR0_NORMAL);
52812 } else {
52813 pcnet32_purge_rx_ring(dev);
52814- lp->a.write_bcr(ioaddr, 20, 4); /* return to 16bit mode */
52815+ lp->a->write_bcr(ioaddr, 20, 4); /* return to 16bit mode */
52816 }
52817 spin_unlock_irqrestore(&lp->lock, flags);
52818
52819@@ -1049,7 +1049,7 @@ static int pcnet32_loopback_test(struct net_device *dev, uint64_t * data1)
52820 static void pcnet32_led_blink_callback(struct net_device *dev)
52821 {
52822 struct pcnet32_private *lp = netdev_priv(dev);
52823- struct pcnet32_access *a = &lp->a;
52824+ struct pcnet32_access *a = lp->a;
52825 ulong ioaddr = dev->base_addr;
52826 unsigned long flags;
52827 int i;
52828@@ -1066,7 +1066,7 @@ static void pcnet32_led_blink_callback(struct net_device *dev)
52829 static int pcnet32_phys_id(struct net_device *dev, u32 data)
52830 {
52831 struct pcnet32_private *lp = netdev_priv(dev);
52832- struct pcnet32_access *a = &lp->a;
52833+ struct pcnet32_access *a = lp->a;
52834 ulong ioaddr = dev->base_addr;
52835 unsigned long flags;
52836 int i, regs[4];
52837@@ -1112,7 +1112,7 @@ static int pcnet32_suspend(struct net_device *dev, unsigned long *flags,
52838 {
52839 int csr5;
52840 struct pcnet32_private *lp = netdev_priv(dev);
52841- struct pcnet32_access *a = &lp->a;
52842+ struct pcnet32_access *a = lp->a;
52843 ulong ioaddr = dev->base_addr;
52844 int ticks;
52845
52846@@ -1388,8 +1388,8 @@ static int pcnet32_poll(struct napi_struct *napi, int budget)
52847 spin_lock_irqsave(&lp->lock, flags);
52848 if (pcnet32_tx(dev)) {
52849 /* reset the chip to clear the error condition, then restart */
52850- lp->a.reset(ioaddr);
52851- lp->a.write_csr(ioaddr, CSR4, 0x0915); /* auto tx pad */
52852+ lp->a->reset(ioaddr);
52853+ lp->a->write_csr(ioaddr, CSR4, 0x0915); /* auto tx pad */
52854 pcnet32_restart(dev, CSR0_START);
52855 netif_wake_queue(dev);
52856 }
52857@@ -1401,12 +1401,12 @@ static int pcnet32_poll(struct napi_struct *napi, int budget)
52858 __napi_complete(napi);
52859
52860 /* clear interrupt masks */
52861- val = lp->a.read_csr(ioaddr, CSR3);
52862+ val = lp->a->read_csr(ioaddr, CSR3);
52863 val &= 0x00ff;
52864- lp->a.write_csr(ioaddr, CSR3, val);
52865+ lp->a->write_csr(ioaddr, CSR3, val);
52866
52867 /* Set interrupt enable. */
52868- lp->a.write_csr(ioaddr, CSR0, CSR0_INTEN);
52869+ lp->a->write_csr(ioaddr, CSR0, CSR0_INTEN);
52870
52871 spin_unlock_irqrestore(&lp->lock, flags);
52872 }
52873@@ -1429,7 +1429,7 @@ static void pcnet32_get_regs(struct net_device *dev, struct ethtool_regs *regs,
52874 int i, csr0;
52875 u16 *buff = ptr;
52876 struct pcnet32_private *lp = netdev_priv(dev);
52877- struct pcnet32_access *a = &lp->a;
52878+ struct pcnet32_access *a = lp->a;
52879 ulong ioaddr = dev->base_addr;
52880 unsigned long flags;
52881
52882@@ -1466,9 +1466,9 @@ static void pcnet32_get_regs(struct net_device *dev, struct ethtool_regs *regs,
52883 for (j = 0; j < PCNET32_MAX_PHYS; j++) {
52884 if (lp->phymask & (1 << j)) {
52885 for (i = 0; i < PCNET32_REGS_PER_PHY; i++) {
52886- lp->a.write_bcr(ioaddr, 33,
52887+ lp->a->write_bcr(ioaddr, 33,
52888 (j << 5) | i);
52889- *buff++ = lp->a.read_bcr(ioaddr, 34);
52890+ *buff++ = lp->a->read_bcr(ioaddr, 34);
52891 }
52892 }
52893 }
52894@@ -1858,7 +1858,7 @@ pcnet32_probe1(unsigned long ioaddr, int shared, struct pci_dev *pdev)
52895 ((cards_found >= MAX_UNITS) || full_duplex[cards_found]))
52896 lp->options |= PCNET32_PORT_FD;
52897
52898- lp->a = *a;
52899+ lp->a = a;
52900
52901 /* prior to register_netdev, dev->name is not yet correct */
52902 if (pcnet32_alloc_ring(dev, pci_name(lp->pci_dev))) {
52903@@ -1917,7 +1917,7 @@ pcnet32_probe1(unsigned long ioaddr, int shared, struct pci_dev *pdev)
52904 if (lp->mii) {
52905 /* lp->phycount and lp->phymask are set to 0 by memset above */
52906
52907- lp->mii_if.phy_id = ((lp->a.read_bcr(ioaddr, 33)) >> 5) & 0x1f;
52908+ lp->mii_if.phy_id = ((lp->a->read_bcr(ioaddr, 33)) >> 5) & 0x1f;
52909 /* scan for PHYs */
52910 for (i = 0; i < PCNET32_MAX_PHYS; i++) {
52911 unsigned short id1, id2;
52912@@ -1938,7 +1938,7 @@ pcnet32_probe1(unsigned long ioaddr, int shared, struct pci_dev *pdev)
52913 "Found PHY %04x:%04x at address %d.\n",
52914 id1, id2, i);
52915 }
52916- lp->a.write_bcr(ioaddr, 33, (lp->mii_if.phy_id) << 5);
52917+ lp->a->write_bcr(ioaddr, 33, (lp->mii_if.phy_id) << 5);
52918 if (lp->phycount > 1) {
52919 lp->options |= PCNET32_PORT_MII;
52920 }
52921@@ -2109,10 +2109,10 @@ static int pcnet32_open(struct net_device *dev)
52922 }
52923
52924 /* Reset the PCNET32 */
52925- lp->a.reset(ioaddr);
52926+ lp->a->reset(ioaddr);
52927
52928 /* switch pcnet32 to 32bit mode */
52929- lp->a.write_bcr(ioaddr, 20, 2);
52930+ lp->a->write_bcr(ioaddr, 20, 2);
52931
52932 if (netif_msg_ifup(lp))
52933 printk(KERN_DEBUG
52934@@ -2122,14 +2122,14 @@ static int pcnet32_open(struct net_device *dev)
52935 (u32) (lp->init_dma_addr));
52936
52937 /* set/reset autoselect bit */
52938- val = lp->a.read_bcr(ioaddr, 2) & ~2;
52939+ val = lp->a->read_bcr(ioaddr, 2) & ~2;
52940 if (lp->options & PCNET32_PORT_ASEL)
52941 val |= 2;
52942- lp->a.write_bcr(ioaddr, 2, val);
52943+ lp->a->write_bcr(ioaddr, 2, val);
52944
52945 /* handle full duplex setting */
52946 if (lp->mii_if.full_duplex) {
52947- val = lp->a.read_bcr(ioaddr, 9) & ~3;
52948+ val = lp->a->read_bcr(ioaddr, 9) & ~3;
52949 if (lp->options & PCNET32_PORT_FD) {
52950 val |= 1;
52951 if (lp->options == (PCNET32_PORT_FD | PCNET32_PORT_AUI))
52952@@ -2139,14 +2139,14 @@ static int pcnet32_open(struct net_device *dev)
52953 if (lp->chip_version == 0x2627)
52954 val |= 3;
52955 }
52956- lp->a.write_bcr(ioaddr, 9, val);
52957+ lp->a->write_bcr(ioaddr, 9, val);
52958 }
52959
52960 /* set/reset GPSI bit in test register */
52961- val = lp->a.read_csr(ioaddr, 124) & ~0x10;
52962+ val = lp->a->read_csr(ioaddr, 124) & ~0x10;
52963 if ((lp->options & PCNET32_PORT_PORTSEL) == PCNET32_PORT_GPSI)
52964 val |= 0x10;
52965- lp->a.write_csr(ioaddr, 124, val);
52966+ lp->a->write_csr(ioaddr, 124, val);
52967
52968 /* Allied Telesyn AT 2700/2701 FX are 100Mbit only and do not negotiate */
52969 if (pdev && pdev->subsystem_vendor == PCI_VENDOR_ID_AT &&
52970@@ -2167,24 +2167,24 @@ static int pcnet32_open(struct net_device *dev)
52971 * duplex, and/or enable auto negotiation, and clear DANAS
52972 */
52973 if (lp->mii && !(lp->options & PCNET32_PORT_ASEL)) {
52974- lp->a.write_bcr(ioaddr, 32,
52975- lp->a.read_bcr(ioaddr, 32) | 0x0080);
52976+ lp->a->write_bcr(ioaddr, 32,
52977+ lp->a->read_bcr(ioaddr, 32) | 0x0080);
52978 /* disable Auto Negotiation, set 10Mpbs, HD */
52979- val = lp->a.read_bcr(ioaddr, 32) & ~0xb8;
52980+ val = lp->a->read_bcr(ioaddr, 32) & ~0xb8;
52981 if (lp->options & PCNET32_PORT_FD)
52982 val |= 0x10;
52983 if (lp->options & PCNET32_PORT_100)
52984 val |= 0x08;
52985- lp->a.write_bcr(ioaddr, 32, val);
52986+ lp->a->write_bcr(ioaddr, 32, val);
52987 } else {
52988 if (lp->options & PCNET32_PORT_ASEL) {
52989- lp->a.write_bcr(ioaddr, 32,
52990- lp->a.read_bcr(ioaddr,
52991+ lp->a->write_bcr(ioaddr, 32,
52992+ lp->a->read_bcr(ioaddr,
52993 32) | 0x0080);
52994 /* enable auto negotiate, setup, disable fd */
52995- val = lp->a.read_bcr(ioaddr, 32) & ~0x98;
52996+ val = lp->a->read_bcr(ioaddr, 32) & ~0x98;
52997 val |= 0x20;
52998- lp->a.write_bcr(ioaddr, 32, val);
52999+ lp->a->write_bcr(ioaddr, 32, val);
53000 }
53001 }
53002 } else {
53003@@ -2197,10 +2197,10 @@ static int pcnet32_open(struct net_device *dev)
53004 * There is really no good other way to handle multiple PHYs
53005 * other than turning off all automatics
53006 */
53007- val = lp->a.read_bcr(ioaddr, 2);
53008- lp->a.write_bcr(ioaddr, 2, val & ~2);
53009- val = lp->a.read_bcr(ioaddr, 32);
53010- lp->a.write_bcr(ioaddr, 32, val & ~(1 << 7)); /* stop MII manager */
53011+ val = lp->a->read_bcr(ioaddr, 2);
53012+ lp->a->write_bcr(ioaddr, 2, val & ~2);
53013+ val = lp->a->read_bcr(ioaddr, 32);
53014+ lp->a->write_bcr(ioaddr, 32, val & ~(1 << 7)); /* stop MII manager */
53015
53016 if (!(lp->options & PCNET32_PORT_ASEL)) {
53017 /* setup ecmd */
53018@@ -2210,7 +2210,7 @@ static int pcnet32_open(struct net_device *dev)
53019 ecmd.speed =
53020 lp->
53021 options & PCNET32_PORT_100 ? SPEED_100 : SPEED_10;
53022- bcr9 = lp->a.read_bcr(ioaddr, 9);
53023+ bcr9 = lp->a->read_bcr(ioaddr, 9);
53024
53025 if (lp->options & PCNET32_PORT_FD) {
53026 ecmd.duplex = DUPLEX_FULL;
53027@@ -2219,7 +2219,7 @@ static int pcnet32_open(struct net_device *dev)
53028 ecmd.duplex = DUPLEX_HALF;
53029 bcr9 |= ~(1 << 0);
53030 }
53031- lp->a.write_bcr(ioaddr, 9, bcr9);
53032+ lp->a->write_bcr(ioaddr, 9, bcr9);
53033 }
53034
53035 for (i = 0; i < PCNET32_MAX_PHYS; i++) {
53036@@ -2252,9 +2252,9 @@ static int pcnet32_open(struct net_device *dev)
53037
53038 #ifdef DO_DXSUFLO
53039 if (lp->dxsuflo) { /* Disable transmit stop on underflow */
53040- val = lp->a.read_csr(ioaddr, CSR3);
53041+ val = lp->a->read_csr(ioaddr, CSR3);
53042 val |= 0x40;
53043- lp->a.write_csr(ioaddr, CSR3, val);
53044+ lp->a->write_csr(ioaddr, CSR3, val);
53045 }
53046 #endif
53047
53048@@ -2270,11 +2270,11 @@ static int pcnet32_open(struct net_device *dev)
53049 napi_enable(&lp->napi);
53050
53051 /* Re-initialize the PCNET32, and start it when done. */
53052- lp->a.write_csr(ioaddr, 1, (lp->init_dma_addr & 0xffff));
53053- lp->a.write_csr(ioaddr, 2, (lp->init_dma_addr >> 16));
53054+ lp->a->write_csr(ioaddr, 1, (lp->init_dma_addr & 0xffff));
53055+ lp->a->write_csr(ioaddr, 2, (lp->init_dma_addr >> 16));
53056
53057- lp->a.write_csr(ioaddr, CSR4, 0x0915); /* auto tx pad */
53058- lp->a.write_csr(ioaddr, CSR0, CSR0_INIT);
53059+ lp->a->write_csr(ioaddr, CSR4, 0x0915); /* auto tx pad */
53060+ lp->a->write_csr(ioaddr, CSR0, CSR0_INIT);
53061
53062 netif_start_queue(dev);
53063
53064@@ -2286,20 +2286,20 @@ static int pcnet32_open(struct net_device *dev)
53065
53066 i = 0;
53067 while (i++ < 100)
53068- if (lp->a.read_csr(ioaddr, CSR0) & CSR0_IDON)
53069+ if (lp->a->read_csr(ioaddr, CSR0) & CSR0_IDON)
53070 break;
53071 /*
53072 * We used to clear the InitDone bit, 0x0100, here but Mark Stockton
53073 * reports that doing so triggers a bug in the '974.
53074 */
53075- lp->a.write_csr(ioaddr, CSR0, CSR0_NORMAL);
53076+ lp->a->write_csr(ioaddr, CSR0, CSR0_NORMAL);
53077
53078 if (netif_msg_ifup(lp))
53079 printk(KERN_DEBUG
53080 "%s: pcnet32 open after %d ticks, init block %#x csr0 %4.4x.\n",
53081 dev->name, i,
53082 (u32) (lp->init_dma_addr),
53083- lp->a.read_csr(ioaddr, CSR0));
53084+ lp->a->read_csr(ioaddr, CSR0));
53085
53086 spin_unlock_irqrestore(&lp->lock, flags);
53087
53088@@ -2313,7 +2313,7 @@ static int pcnet32_open(struct net_device *dev)
53089 * Switch back to 16bit mode to avoid problems with dumb
53090 * DOS packet driver after a warm reboot
53091 */
53092- lp->a.write_bcr(ioaddr, 20, 4);
53093+ lp->a->write_bcr(ioaddr, 20, 4);
53094
53095 err_free_irq:
53096 spin_unlock_irqrestore(&lp->lock, flags);
53097@@ -2420,7 +2420,7 @@ static void pcnet32_restart(struct net_device *dev, unsigned int csr0_bits)
53098
53099 /* wait for stop */
53100 for (i = 0; i < 100; i++)
53101- if (lp->a.read_csr(ioaddr, CSR0) & CSR0_STOP)
53102+ if (lp->a->read_csr(ioaddr, CSR0) & CSR0_STOP)
53103 break;
53104
53105 if (i >= 100 && netif_msg_drv(lp))
53106@@ -2433,13 +2433,13 @@ static void pcnet32_restart(struct net_device *dev, unsigned int csr0_bits)
53107 return;
53108
53109 /* ReInit Ring */
53110- lp->a.write_csr(ioaddr, CSR0, CSR0_INIT);
53111+ lp->a->write_csr(ioaddr, CSR0, CSR0_INIT);
53112 i = 0;
53113 while (i++ < 1000)
53114- if (lp->a.read_csr(ioaddr, CSR0) & CSR0_IDON)
53115+ if (lp->a->read_csr(ioaddr, CSR0) & CSR0_IDON)
53116 break;
53117
53118- lp->a.write_csr(ioaddr, CSR0, csr0_bits);
53119+ lp->a->write_csr(ioaddr, CSR0, csr0_bits);
53120 }
53121
53122 static void pcnet32_tx_timeout(struct net_device *dev)
53123@@ -2452,8 +2452,8 @@ static void pcnet32_tx_timeout(struct net_device *dev)
53124 if (pcnet32_debug & NETIF_MSG_DRV)
53125 printk(KERN_ERR
53126 "%s: transmit timed out, status %4.4x, resetting.\n",
53127- dev->name, lp->a.read_csr(ioaddr, CSR0));
53128- lp->a.write_csr(ioaddr, CSR0, CSR0_STOP);
53129+ dev->name, lp->a->read_csr(ioaddr, CSR0));
53130+ lp->a->write_csr(ioaddr, CSR0, CSR0_STOP);
53131 dev->stats.tx_errors++;
53132 if (netif_msg_tx_err(lp)) {
53133 int i;
53134@@ -2497,7 +2497,7 @@ static netdev_tx_t pcnet32_start_xmit(struct sk_buff *skb,
53135 if (netif_msg_tx_queued(lp)) {
53136 printk(KERN_DEBUG
53137 "%s: pcnet32_start_xmit() called, csr0 %4.4x.\n",
53138- dev->name, lp->a.read_csr(ioaddr, CSR0));
53139+ dev->name, lp->a->read_csr(ioaddr, CSR0));
53140 }
53141
53142 /* Default status -- will not enable Successful-TxDone
53143@@ -2528,7 +2528,7 @@ static netdev_tx_t pcnet32_start_xmit(struct sk_buff *skb,
53144 dev->stats.tx_bytes += skb->len;
53145
53146 /* Trigger an immediate send poll. */
53147- lp->a.write_csr(ioaddr, CSR0, CSR0_INTEN | CSR0_TXPOLL);
53148+ lp->a->write_csr(ioaddr, CSR0, CSR0_INTEN | CSR0_TXPOLL);
53149
53150 dev->trans_start = jiffies;
53151
53152@@ -2555,18 +2555,18 @@ pcnet32_interrupt(int irq, void *dev_id)
53153
53154 spin_lock(&lp->lock);
53155
53156- csr0 = lp->a.read_csr(ioaddr, CSR0);
53157+ csr0 = lp->a->read_csr(ioaddr, CSR0);
53158 while ((csr0 & 0x8f00) && --boguscnt >= 0) {
53159 if (csr0 == 0xffff) {
53160 break; /* PCMCIA remove happened */
53161 }
53162 /* Acknowledge all of the current interrupt sources ASAP. */
53163- lp->a.write_csr(ioaddr, CSR0, csr0 & ~0x004f);
53164+ lp->a->write_csr(ioaddr, CSR0, csr0 & ~0x004f);
53165
53166 if (netif_msg_intr(lp))
53167 printk(KERN_DEBUG
53168 "%s: interrupt csr0=%#2.2x new csr=%#2.2x.\n",
53169- dev->name, csr0, lp->a.read_csr(ioaddr, CSR0));
53170+ dev->name, csr0, lp->a->read_csr(ioaddr, CSR0));
53171
53172 /* Log misc errors. */
53173 if (csr0 & 0x4000)
53174@@ -2595,19 +2595,19 @@ pcnet32_interrupt(int irq, void *dev_id)
53175 if (napi_schedule_prep(&lp->napi)) {
53176 u16 val;
53177 /* set interrupt masks */
53178- val = lp->a.read_csr(ioaddr, CSR3);
53179+ val = lp->a->read_csr(ioaddr, CSR3);
53180 val |= 0x5f00;
53181- lp->a.write_csr(ioaddr, CSR3, val);
53182+ lp->a->write_csr(ioaddr, CSR3, val);
53183
53184 __napi_schedule(&lp->napi);
53185 break;
53186 }
53187- csr0 = lp->a.read_csr(ioaddr, CSR0);
53188+ csr0 = lp->a->read_csr(ioaddr, CSR0);
53189 }
53190
53191 if (netif_msg_intr(lp))
53192 printk(KERN_DEBUG "%s: exiting interrupt, csr0=%#4.4x.\n",
53193- dev->name, lp->a.read_csr(ioaddr, CSR0));
53194+ dev->name, lp->a->read_csr(ioaddr, CSR0));
53195
53196 spin_unlock(&lp->lock);
53197
53198@@ -2627,21 +2627,21 @@ static int pcnet32_close(struct net_device *dev)
53199
53200 spin_lock_irqsave(&lp->lock, flags);
53201
53202- dev->stats.rx_missed_errors = lp->a.read_csr(ioaddr, 112);
53203+ dev->stats.rx_missed_errors = lp->a->read_csr(ioaddr, 112);
53204
53205 if (netif_msg_ifdown(lp))
53206 printk(KERN_DEBUG
53207 "%s: Shutting down ethercard, status was %2.2x.\n",
53208- dev->name, lp->a.read_csr(ioaddr, CSR0));
53209+ dev->name, lp->a->read_csr(ioaddr, CSR0));
53210
53211 /* We stop the PCNET32 here -- it occasionally polls memory if we don't. */
53212- lp->a.write_csr(ioaddr, CSR0, CSR0_STOP);
53213+ lp->a->write_csr(ioaddr, CSR0, CSR0_STOP);
53214
53215 /*
53216 * Switch back to 16bit mode to avoid problems with dumb
53217 * DOS packet driver after a warm reboot
53218 */
53219- lp->a.write_bcr(ioaddr, 20, 4);
53220+ lp->a->write_bcr(ioaddr, 20, 4);
53221
53222 spin_unlock_irqrestore(&lp->lock, flags);
53223
53224@@ -2664,7 +2664,7 @@ static struct net_device_stats *pcnet32_get_stats(struct net_device *dev)
53225 unsigned long flags;
53226
53227 spin_lock_irqsave(&lp->lock, flags);
53228- dev->stats.rx_missed_errors = lp->a.read_csr(ioaddr, 112);
53229+ dev->stats.rx_missed_errors = lp->a->read_csr(ioaddr, 112);
53230 spin_unlock_irqrestore(&lp->lock, flags);
53231
53232 return &dev->stats;
53233@@ -2686,10 +2686,10 @@ static void pcnet32_load_multicast(struct net_device *dev)
53234 if (dev->flags & IFF_ALLMULTI) {
53235 ib->filter[0] = cpu_to_le32(~0U);
53236 ib->filter[1] = cpu_to_le32(~0U);
53237- lp->a.write_csr(ioaddr, PCNET32_MC_FILTER, 0xffff);
53238- lp->a.write_csr(ioaddr, PCNET32_MC_FILTER+1, 0xffff);
53239- lp->a.write_csr(ioaddr, PCNET32_MC_FILTER+2, 0xffff);
53240- lp->a.write_csr(ioaddr, PCNET32_MC_FILTER+3, 0xffff);
53241+ lp->a->write_csr(ioaddr, PCNET32_MC_FILTER, 0xffff);
53242+ lp->a->write_csr(ioaddr, PCNET32_MC_FILTER+1, 0xffff);
53243+ lp->a->write_csr(ioaddr, PCNET32_MC_FILTER+2, 0xffff);
53244+ lp->a->write_csr(ioaddr, PCNET32_MC_FILTER+3, 0xffff);
53245 return;
53246 }
53247 /* clear the multicast filter */
53248@@ -2710,7 +2710,7 @@ static void pcnet32_load_multicast(struct net_device *dev)
53249 mcast_table[crc >> 4] |= cpu_to_le16(1 << (crc & 0xf));
53250 }
53251 for (i = 0; i < 4; i++)
53252- lp->a.write_csr(ioaddr, PCNET32_MC_FILTER + i,
53253+ lp->a->write_csr(ioaddr, PCNET32_MC_FILTER + i,
53254 le16_to_cpu(mcast_table[i]));
53255 return;
53256 }
53257@@ -2726,7 +2726,7 @@ static void pcnet32_set_multicast_list(struct net_device *dev)
53258
53259 spin_lock_irqsave(&lp->lock, flags);
53260 suspended = pcnet32_suspend(dev, &flags, 0);
53261- csr15 = lp->a.read_csr(ioaddr, CSR15);
53262+ csr15 = lp->a->read_csr(ioaddr, CSR15);
53263 if (dev->flags & IFF_PROMISC) {
53264 /* Log any net taps. */
53265 if (netif_msg_hw(lp))
53266@@ -2735,21 +2735,21 @@ static void pcnet32_set_multicast_list(struct net_device *dev)
53267 lp->init_block->mode =
53268 cpu_to_le16(0x8000 | (lp->options & PCNET32_PORT_PORTSEL) <<
53269 7);
53270- lp->a.write_csr(ioaddr, CSR15, csr15 | 0x8000);
53271+ lp->a->write_csr(ioaddr, CSR15, csr15 | 0x8000);
53272 } else {
53273 lp->init_block->mode =
53274 cpu_to_le16((lp->options & PCNET32_PORT_PORTSEL) << 7);
53275- lp->a.write_csr(ioaddr, CSR15, csr15 & 0x7fff);
53276+ lp->a->write_csr(ioaddr, CSR15, csr15 & 0x7fff);
53277 pcnet32_load_multicast(dev);
53278 }
53279
53280 if (suspended) {
53281 int csr5;
53282 /* clear SUSPEND (SPND) - CSR5 bit 0 */
53283- csr5 = lp->a.read_csr(ioaddr, CSR5);
53284- lp->a.write_csr(ioaddr, CSR5, csr5 & (~CSR5_SUSPEND));
53285+ csr5 = lp->a->read_csr(ioaddr, CSR5);
53286+ lp->a->write_csr(ioaddr, CSR5, csr5 & (~CSR5_SUSPEND));
53287 } else {
53288- lp->a.write_csr(ioaddr, CSR0, CSR0_STOP);
53289+ lp->a->write_csr(ioaddr, CSR0, CSR0_STOP);
53290 pcnet32_restart(dev, CSR0_NORMAL);
53291 netif_wake_queue(dev);
53292 }
53293@@ -2767,8 +2767,8 @@ static int mdio_read(struct net_device *dev, int phy_id, int reg_num)
53294 if (!lp->mii)
53295 return 0;
53296
53297- lp->a.write_bcr(ioaddr, 33, ((phy_id & 0x1f) << 5) | (reg_num & 0x1f));
53298- val_out = lp->a.read_bcr(ioaddr, 34);
53299+ lp->a->write_bcr(ioaddr, 33, ((phy_id & 0x1f) << 5) | (reg_num & 0x1f));
53300+ val_out = lp->a->read_bcr(ioaddr, 34);
53301
53302 return val_out;
53303 }
53304@@ -2782,8 +2782,8 @@ static void mdio_write(struct net_device *dev, int phy_id, int reg_num, int val)
53305 if (!lp->mii)
53306 return;
53307
53308- lp->a.write_bcr(ioaddr, 33, ((phy_id & 0x1f) << 5) | (reg_num & 0x1f));
53309- lp->a.write_bcr(ioaddr, 34, val);
53310+ lp->a->write_bcr(ioaddr, 33, ((phy_id & 0x1f) << 5) | (reg_num & 0x1f));
53311+ lp->a->write_bcr(ioaddr, 34, val);
53312 }
53313
53314 static int pcnet32_ioctl(struct net_device *dev, struct ifreq *rq, int cmd)
53315@@ -2862,7 +2862,7 @@ static void pcnet32_check_media(struct net_device *dev, int verbose)
53316 curr_link = mii_link_ok(&lp->mii_if);
53317 } else {
53318 ulong ioaddr = dev->base_addr; /* card base I/O address */
53319- curr_link = (lp->a.read_bcr(ioaddr, 4) != 0xc0);
53320+ curr_link = (lp->a->read_bcr(ioaddr, 4) != 0xc0);
53321 }
53322 if (!curr_link) {
53323 if (prev_link || verbose) {
53324@@ -2887,13 +2887,13 @@ static void pcnet32_check_media(struct net_device *dev, int verbose)
53325 (ecmd.duplex ==
53326 DUPLEX_FULL) ? "full" : "half");
53327 }
53328- bcr9 = lp->a.read_bcr(dev->base_addr, 9);
53329+ bcr9 = lp->a->read_bcr(dev->base_addr, 9);
53330 if ((bcr9 & (1 << 0)) != lp->mii_if.full_duplex) {
53331 if (lp->mii_if.full_duplex)
53332 bcr9 |= (1 << 0);
53333 else
53334 bcr9 &= ~(1 << 0);
53335- lp->a.write_bcr(dev->base_addr, 9, bcr9);
53336+ lp->a->write_bcr(dev->base_addr, 9, bcr9);
53337 }
53338 } else {
53339 if (netif_msg_link(lp))
53340diff --git a/drivers/net/sis190.c b/drivers/net/sis190.c
53341index 7cc9898..6eb50d3 100644
53342--- a/drivers/net/sis190.c
53343+++ b/drivers/net/sis190.c
53344@@ -1598,7 +1598,7 @@ static int __devinit sis190_get_mac_addr_from_eeprom(struct pci_dev *pdev,
53345 static int __devinit sis190_get_mac_addr_from_apc(struct pci_dev *pdev,
53346 struct net_device *dev)
53347 {
53348- static const u16 __devinitdata ids[] = { 0x0965, 0x0966, 0x0968 };
53349+ static const u16 __devinitconst ids[] = { 0x0965, 0x0966, 0x0968 };
53350 struct sis190_private *tp = netdev_priv(dev);
53351 struct pci_dev *isa_bridge;
53352 u8 reg, tmp8;
53353diff --git a/drivers/net/sundance.c b/drivers/net/sundance.c
53354index e13685a..60c948c 100644
53355--- a/drivers/net/sundance.c
53356+++ b/drivers/net/sundance.c
53357@@ -225,7 +225,7 @@ enum {
53358 struct pci_id_info {
53359 const char *name;
53360 };
53361-static const struct pci_id_info pci_id_tbl[] __devinitdata = {
53362+static const struct pci_id_info pci_id_tbl[] __devinitconst = {
53363 {"D-Link DFE-550TX FAST Ethernet Adapter"},
53364 {"D-Link DFE-550FX 100Mbps Fiber-optics Adapter"},
53365 {"D-Link DFE-580TX 4 port Server Adapter"},
53366diff --git a/drivers/net/tg3.h b/drivers/net/tg3.h
53367index 529f55a..cccaa18 100644
53368--- a/drivers/net/tg3.h
53369+++ b/drivers/net/tg3.h
53370@@ -95,6 +95,7 @@
53371 #define CHIPREV_ID_5750_A0 0x4000
53372 #define CHIPREV_ID_5750_A1 0x4001
53373 #define CHIPREV_ID_5750_A3 0x4003
53374+#define CHIPREV_ID_5750_C1 0x4201
53375 #define CHIPREV_ID_5750_C2 0x4202
53376 #define CHIPREV_ID_5752_A0_HW 0x5000
53377 #define CHIPREV_ID_5752_A0 0x6000
53378diff --git a/drivers/net/tokenring/abyss.c b/drivers/net/tokenring/abyss.c
53379index b9db1b5..720f9ce 100644
53380--- a/drivers/net/tokenring/abyss.c
53381+++ b/drivers/net/tokenring/abyss.c
53382@@ -451,10 +451,12 @@ static struct pci_driver abyss_driver = {
53383
53384 static int __init abyss_init (void)
53385 {
53386- abyss_netdev_ops = tms380tr_netdev_ops;
53387+ pax_open_kernel();
53388+ memcpy((void *)&abyss_netdev_ops, &tms380tr_netdev_ops, sizeof(tms380tr_netdev_ops));
53389
53390- abyss_netdev_ops.ndo_open = abyss_open;
53391- abyss_netdev_ops.ndo_stop = abyss_close;
53392+ *(void **)&abyss_netdev_ops.ndo_open = abyss_open;
53393+ *(void **)&abyss_netdev_ops.ndo_stop = abyss_close;
53394+ pax_close_kernel();
53395
53396 return pci_register_driver(&abyss_driver);
53397 }
53398diff --git a/drivers/net/tokenring/madgemc.c b/drivers/net/tokenring/madgemc.c
53399index 456f8bf..373e56d 100644
53400--- a/drivers/net/tokenring/madgemc.c
53401+++ b/drivers/net/tokenring/madgemc.c
53402@@ -755,9 +755,11 @@ static struct mca_driver madgemc_driver = {
53403
53404 static int __init madgemc_init (void)
53405 {
53406- madgemc_netdev_ops = tms380tr_netdev_ops;
53407- madgemc_netdev_ops.ndo_open = madgemc_open;
53408- madgemc_netdev_ops.ndo_stop = madgemc_close;
53409+ pax_open_kernel();
53410+ memcpy((void *)&madgemc_netdev_ops, &tms380tr_netdev_ops, sizeof(tms380tr_netdev_ops));
53411+ *(void **)&madgemc_netdev_ops.ndo_open = madgemc_open;
53412+ *(void **)&madgemc_netdev_ops.ndo_stop = madgemc_close;
53413+ pax_close_kernel();
53414
53415 return mca_register_driver (&madgemc_driver);
53416 }
53417diff --git a/drivers/net/tokenring/proteon.c b/drivers/net/tokenring/proteon.c
53418index 16e8783..925bd49 100644
53419--- a/drivers/net/tokenring/proteon.c
53420+++ b/drivers/net/tokenring/proteon.c
53421@@ -353,9 +353,11 @@ static int __init proteon_init(void)
53422 struct platform_device *pdev;
53423 int i, num = 0, err = 0;
53424
53425- proteon_netdev_ops = tms380tr_netdev_ops;
53426- proteon_netdev_ops.ndo_open = proteon_open;
53427- proteon_netdev_ops.ndo_stop = tms380tr_close;
53428+ pax_open_kernel();
53429+ memcpy((void *)&proteon_netdev_ops, &tms380tr_netdev_ops, sizeof(tms380tr_netdev_ops));
53430+ *(void **)&proteon_netdev_ops.ndo_open = proteon_open;
53431+ *(void **)&proteon_netdev_ops.ndo_stop = tms380tr_close;
53432+ pax_close_kernel();
53433
53434 err = platform_driver_register(&proteon_driver);
53435 if (err)
53436diff --git a/drivers/net/tokenring/skisa.c b/drivers/net/tokenring/skisa.c
53437index 46db5c5..37c1536 100644
53438--- a/drivers/net/tokenring/skisa.c
53439+++ b/drivers/net/tokenring/skisa.c
53440@@ -363,9 +363,11 @@ static int __init sk_isa_init(void)
53441 struct platform_device *pdev;
53442 int i, num = 0, err = 0;
53443
53444- sk_isa_netdev_ops = tms380tr_netdev_ops;
53445- sk_isa_netdev_ops.ndo_open = sk_isa_open;
53446- sk_isa_netdev_ops.ndo_stop = tms380tr_close;
53447+ pax_open_kernel();
53448+ memcpy((void *)&sk_isa_netdev_ops, &tms380tr_netdev_ops, sizeof(tms380tr_netdev_ops));
53449+ *(void **)&sk_isa_netdev_ops.ndo_open = sk_isa_open;
53450+ *(void **)&sk_isa_netdev_ops.ndo_stop = tms380tr_close;
53451+ pax_close_kernel();
53452
53453 err = platform_driver_register(&sk_isa_driver);
53454 if (err)
53455diff --git a/drivers/net/tulip/de2104x.c b/drivers/net/tulip/de2104x.c
53456index 74e5ba4..5cf6bc9 100644
53457--- a/drivers/net/tulip/de2104x.c
53458+++ b/drivers/net/tulip/de2104x.c
53459@@ -1785,6 +1785,8 @@ static void __devinit de21041_get_srom_info (struct de_private *de)
53460 struct de_srom_info_leaf *il;
53461 void *bufp;
53462
53463+ pax_track_stack();
53464+
53465 /* download entire eeprom */
53466 for (i = 0; i < DE_EEPROM_WORDS; i++)
53467 ((__le16 *)ee_data)[i] =
53468diff --git a/drivers/net/tulip/de4x5.c b/drivers/net/tulip/de4x5.c
53469index a8349b7..90f9dfe 100644
53470--- a/drivers/net/tulip/de4x5.c
53471+++ b/drivers/net/tulip/de4x5.c
53472@@ -5472,7 +5472,7 @@ de4x5_ioctl(struct net_device *dev, struct ifreq *rq, int cmd)
53473 for (i=0; i<ETH_ALEN; i++) {
53474 tmp.addr[i] = dev->dev_addr[i];
53475 }
53476- if (copy_to_user(ioc->data, tmp.addr, ioc->len)) return -EFAULT;
53477+ if (ioc->len > sizeof tmp.addr || copy_to_user(ioc->data, tmp.addr, ioc->len)) return -EFAULT;
53478 break;
53479
53480 case DE4X5_SET_HWADDR: /* Set the hardware address */
53481@@ -5512,7 +5512,7 @@ de4x5_ioctl(struct net_device *dev, struct ifreq *rq, int cmd)
53482 spin_lock_irqsave(&lp->lock, flags);
53483 memcpy(&statbuf, &lp->pktStats, ioc->len);
53484 spin_unlock_irqrestore(&lp->lock, flags);
53485- if (copy_to_user(ioc->data, &statbuf, ioc->len))
53486+ if (ioc->len > sizeof statbuf || copy_to_user(ioc->data, &statbuf, ioc->len))
53487 return -EFAULT;
53488 break;
53489 }
53490diff --git a/drivers/net/tulip/eeprom.c b/drivers/net/tulip/eeprom.c
53491index 391acd3..56d11cd 100644
53492--- a/drivers/net/tulip/eeprom.c
53493+++ b/drivers/net/tulip/eeprom.c
53494@@ -80,7 +80,7 @@ static struct eeprom_fixup eeprom_fixups[] __devinitdata = {
53495 {NULL}};
53496
53497
53498-static const char *block_name[] __devinitdata = {
53499+static const char *block_name[] __devinitconst = {
53500 "21140 non-MII",
53501 "21140 MII PHY",
53502 "21142 Serial PHY",
53503diff --git a/drivers/net/tulip/winbond-840.c b/drivers/net/tulip/winbond-840.c
53504index b38d3b7..b1cff23 100644
53505--- a/drivers/net/tulip/winbond-840.c
53506+++ b/drivers/net/tulip/winbond-840.c
53507@@ -235,7 +235,7 @@ struct pci_id_info {
53508 int drv_flags; /* Driver use, intended as capability flags. */
53509 };
53510
53511-static const struct pci_id_info pci_id_tbl[] __devinitdata = {
53512+static const struct pci_id_info pci_id_tbl[] __devinitconst = {
53513 { /* Sometime a Level-One switch card. */
53514 "Winbond W89c840", CanHaveMII | HasBrokenTx | FDXOnNoMII},
53515 { "Winbond W89c840", CanHaveMII | HasBrokenTx},
53516diff --git a/drivers/net/usb/hso.c b/drivers/net/usb/hso.c
53517index f450bc9..2b747c8 100644
53518--- a/drivers/net/usb/hso.c
53519+++ b/drivers/net/usb/hso.c
53520@@ -71,7 +71,7 @@
53521 #include <asm/byteorder.h>
53522 #include <linux/serial_core.h>
53523 #include <linux/serial.h>
53524-
53525+#include <asm/local.h>
53526
53527 #define DRIVER_VERSION "1.2"
53528 #define MOD_AUTHOR "Option Wireless"
53529@@ -258,7 +258,7 @@ struct hso_serial {
53530
53531 /* from usb_serial_port */
53532 struct tty_struct *tty;
53533- int open_count;
53534+ local_t open_count;
53535 spinlock_t serial_lock;
53536
53537 int (*write_data) (struct hso_serial *serial);
53538@@ -1180,7 +1180,7 @@ static void put_rxbuf_data_and_resubmit_ctrl_urb(struct hso_serial *serial)
53539 struct urb *urb;
53540
53541 urb = serial->rx_urb[0];
53542- if (serial->open_count > 0) {
53543+ if (local_read(&serial->open_count) > 0) {
53544 count = put_rxbuf_data(urb, serial);
53545 if (count == -1)
53546 return;
53547@@ -1216,7 +1216,7 @@ static void hso_std_serial_read_bulk_callback(struct urb *urb)
53548 DUMP1(urb->transfer_buffer, urb->actual_length);
53549
53550 /* Anyone listening? */
53551- if (serial->open_count == 0)
53552+ if (local_read(&serial->open_count) == 0)
53553 return;
53554
53555 if (status == 0) {
53556@@ -1311,8 +1311,7 @@ static int hso_serial_open(struct tty_struct *tty, struct file *filp)
53557 spin_unlock_irq(&serial->serial_lock);
53558
53559 /* check for port already opened, if not set the termios */
53560- serial->open_count++;
53561- if (serial->open_count == 1) {
53562+ if (local_inc_return(&serial->open_count) == 1) {
53563 tty->low_latency = 1;
53564 serial->rx_state = RX_IDLE;
53565 /* Force default termio settings */
53566@@ -1325,7 +1324,7 @@ static int hso_serial_open(struct tty_struct *tty, struct file *filp)
53567 result = hso_start_serial_device(serial->parent, GFP_KERNEL);
53568 if (result) {
53569 hso_stop_serial_device(serial->parent);
53570- serial->open_count--;
53571+ local_dec(&serial->open_count);
53572 kref_put(&serial->parent->ref, hso_serial_ref_free);
53573 }
53574 } else {
53575@@ -1362,10 +1361,10 @@ static void hso_serial_close(struct tty_struct *tty, struct file *filp)
53576
53577 /* reset the rts and dtr */
53578 /* do the actual close */
53579- serial->open_count--;
53580+ local_dec(&serial->open_count);
53581
53582- if (serial->open_count <= 0) {
53583- serial->open_count = 0;
53584+ if (local_read(&serial->open_count) <= 0) {
53585+ local_set(&serial->open_count, 0);
53586 spin_lock_irq(&serial->serial_lock);
53587 if (serial->tty == tty) {
53588 serial->tty->driver_data = NULL;
53589@@ -1447,7 +1446,7 @@ static void hso_serial_set_termios(struct tty_struct *tty, struct ktermios *old)
53590
53591 /* the actual setup */
53592 spin_lock_irqsave(&serial->serial_lock, flags);
53593- if (serial->open_count)
53594+ if (local_read(&serial->open_count))
53595 _hso_serial_set_termios(tty, old);
53596 else
53597 tty->termios = old;
53598@@ -3097,7 +3096,7 @@ static int hso_resume(struct usb_interface *iface)
53599 /* Start all serial ports */
53600 for (i = 0; i < HSO_SERIAL_TTY_MINORS; i++) {
53601 if (serial_table[i] && (serial_table[i]->interface == iface)) {
53602- if (dev2ser(serial_table[i])->open_count) {
53603+ if (local_read(&dev2ser(serial_table[i])->open_count)) {
53604 result =
53605 hso_start_serial_device(serial_table[i], GFP_NOIO);
53606 hso_kick_transmit(dev2ser(serial_table[i]));
53607diff --git a/drivers/net/vxge/vxge-config.h b/drivers/net/vxge/vxge-config.h
53608index 3e94f0c..ffdd926 100644
53609--- a/drivers/net/vxge/vxge-config.h
53610+++ b/drivers/net/vxge/vxge-config.h
53611@@ -474,7 +474,7 @@ struct vxge_hw_uld_cbs {
53612 void (*link_down)(struct __vxge_hw_device *devh);
53613 void (*crit_err)(struct __vxge_hw_device *devh,
53614 enum vxge_hw_event type, u64 ext_data);
53615-};
53616+} __no_const;
53617
53618 /*
53619 * struct __vxge_hw_blockpool_entry - Block private data structure
53620diff --git a/drivers/net/vxge/vxge-main.c b/drivers/net/vxge/vxge-main.c
53621index 068d7a9..35293de 100644
53622--- a/drivers/net/vxge/vxge-main.c
53623+++ b/drivers/net/vxge/vxge-main.c
53624@@ -93,6 +93,8 @@ static inline void VXGE_COMPLETE_VPATH_TX(struct vxge_fifo *fifo)
53625 struct sk_buff *completed[NR_SKB_COMPLETED];
53626 int more;
53627
53628+ pax_track_stack();
53629+
53630 do {
53631 more = 0;
53632 skb_ptr = completed;
53633@@ -1779,6 +1781,8 @@ static enum vxge_hw_status vxge_rth_configure(struct vxgedev *vdev)
53634 u8 mtable[256] = {0}; /* CPU to vpath mapping */
53635 int index;
53636
53637+ pax_track_stack();
53638+
53639 /*
53640 * Filling
53641 * - itable with bucket numbers
53642diff --git a/drivers/net/vxge/vxge-traffic.h b/drivers/net/vxge/vxge-traffic.h
53643index 461742b..81be42e 100644
53644--- a/drivers/net/vxge/vxge-traffic.h
53645+++ b/drivers/net/vxge/vxge-traffic.h
53646@@ -2123,7 +2123,7 @@ struct vxge_hw_mempool_cbs {
53647 struct vxge_hw_mempool_dma *dma_object,
53648 u32 index,
53649 u32 is_last);
53650-};
53651+} __no_const;
53652
53653 void
53654 __vxge_hw_mempool_destroy(
53655diff --git a/drivers/net/wan/cycx_x25.c b/drivers/net/wan/cycx_x25.c
53656index cd8cb95..4153b79 100644
53657--- a/drivers/net/wan/cycx_x25.c
53658+++ b/drivers/net/wan/cycx_x25.c
53659@@ -1017,6 +1017,8 @@ static void hex_dump(char *msg, unsigned char *p, int len)
53660 unsigned char hex[1024],
53661 * phex = hex;
53662
53663+ pax_track_stack();
53664+
53665 if (len >= (sizeof(hex) / 2))
53666 len = (sizeof(hex) / 2) - 1;
53667
53668diff --git a/drivers/net/wan/hdlc_x25.c b/drivers/net/wan/hdlc_x25.c
53669index aa9248f..a4e3c3b 100644
53670--- a/drivers/net/wan/hdlc_x25.c
53671+++ b/drivers/net/wan/hdlc_x25.c
53672@@ -136,16 +136,16 @@ static netdev_tx_t x25_xmit(struct sk_buff *skb, struct net_device *dev)
53673
53674 static int x25_open(struct net_device *dev)
53675 {
53676- struct lapb_register_struct cb;
53677+ static struct lapb_register_struct cb = {
53678+ .connect_confirmation = x25_connected,
53679+ .connect_indication = x25_connected,
53680+ .disconnect_confirmation = x25_disconnected,
53681+ .disconnect_indication = x25_disconnected,
53682+ .data_indication = x25_data_indication,
53683+ .data_transmit = x25_data_transmit
53684+ };
53685 int result;
53686
53687- cb.connect_confirmation = x25_connected;
53688- cb.connect_indication = x25_connected;
53689- cb.disconnect_confirmation = x25_disconnected;
53690- cb.disconnect_indication = x25_disconnected;
53691- cb.data_indication = x25_data_indication;
53692- cb.data_transmit = x25_data_transmit;
53693-
53694 result = lapb_register(dev, &cb);
53695 if (result != LAPB_OK)
53696 return result;
53697diff --git a/drivers/net/wimax/i2400m/usb-fw.c b/drivers/net/wimax/i2400m/usb-fw.c
53698index 5ad287c..783b020 100644
53699--- a/drivers/net/wimax/i2400m/usb-fw.c
53700+++ b/drivers/net/wimax/i2400m/usb-fw.c
53701@@ -263,6 +263,8 @@ ssize_t i2400mu_bus_bm_wait_for_ack(struct i2400m *i2400m,
53702 int do_autopm = 1;
53703 DECLARE_COMPLETION_ONSTACK(notif_completion);
53704
53705+ pax_track_stack();
53706+
53707 d_fnstart(8, dev, "(i2400m %p ack %p size %zu)\n",
53708 i2400m, ack, ack_size);
53709 BUG_ON(_ack == i2400m->bm_ack_buf);
53710diff --git a/drivers/net/wireless/airo.c b/drivers/net/wireless/airo.c
53711index 6c26840..62c97c3 100644
53712--- a/drivers/net/wireless/airo.c
53713+++ b/drivers/net/wireless/airo.c
53714@@ -3003,6 +3003,8 @@ static void airo_process_scan_results (struct airo_info *ai) {
53715 BSSListElement * loop_net;
53716 BSSListElement * tmp_net;
53717
53718+ pax_track_stack();
53719+
53720 /* Blow away current list of scan results */
53721 list_for_each_entry_safe (loop_net, tmp_net, &ai->network_list, list) {
53722 list_move_tail (&loop_net->list, &ai->network_free_list);
53723@@ -3783,6 +3785,8 @@ static u16 setup_card(struct airo_info *ai, u8 *mac, int lock)
53724 WepKeyRid wkr;
53725 int rc;
53726
53727+ pax_track_stack();
53728+
53729 memset( &mySsid, 0, sizeof( mySsid ) );
53730 kfree (ai->flash);
53731 ai->flash = NULL;
53732@@ -4758,6 +4762,8 @@ static int proc_stats_rid_open( struct inode *inode,
53733 __le32 *vals = stats.vals;
53734 int len;
53735
53736+ pax_track_stack();
53737+
53738 if ((file->private_data = kzalloc(sizeof(struct proc_data ), GFP_KERNEL)) == NULL)
53739 return -ENOMEM;
53740 data = (struct proc_data *)file->private_data;
53741@@ -5487,6 +5493,8 @@ static int proc_BSSList_open( struct inode *inode, struct file *file ) {
53742 /* If doLoseSync is not 1, we won't do a Lose Sync */
53743 int doLoseSync = -1;
53744
53745+ pax_track_stack();
53746+
53747 if ((file->private_data = kzalloc(sizeof(struct proc_data ), GFP_KERNEL)) == NULL)
53748 return -ENOMEM;
53749 data = (struct proc_data *)file->private_data;
53750@@ -7193,6 +7201,8 @@ static int airo_get_aplist(struct net_device *dev,
53751 int i;
53752 int loseSync = capable(CAP_NET_ADMIN) ? 1: -1;
53753
53754+ pax_track_stack();
53755+
53756 qual = kmalloc(IW_MAX_AP * sizeof(*qual), GFP_KERNEL);
53757 if (!qual)
53758 return -ENOMEM;
53759@@ -7753,6 +7763,8 @@ static void airo_read_wireless_stats(struct airo_info *local)
53760 CapabilityRid cap_rid;
53761 __le32 *vals = stats_rid.vals;
53762
53763+ pax_track_stack();
53764+
53765 /* Get stats out of the card */
53766 clear_bit(JOB_WSTATS, &local->jobs);
53767 if (local->power.event) {
53768diff --git a/drivers/net/wireless/ath/ath5k/debug.c b/drivers/net/wireless/ath/ath5k/debug.c
53769index 747508c..c36cb08 100644
53770--- a/drivers/net/wireless/ath/ath5k/debug.c
53771+++ b/drivers/net/wireless/ath/ath5k/debug.c
53772@@ -205,6 +205,8 @@ static ssize_t read_file_beacon(struct file *file, char __user *user_buf,
53773 unsigned int v;
53774 u64 tsf;
53775
53776+ pax_track_stack();
53777+
53778 v = ath5k_hw_reg_read(sc->ah, AR5K_BEACON);
53779 len += snprintf(buf+len, sizeof(buf)-len,
53780 "%-24s0x%08x\tintval: %d\tTIM: 0x%x\n",
53781@@ -318,6 +320,8 @@ static ssize_t read_file_debug(struct file *file, char __user *user_buf,
53782 unsigned int len = 0;
53783 unsigned int i;
53784
53785+ pax_track_stack();
53786+
53787 len += snprintf(buf+len, sizeof(buf)-len,
53788 "DEBUG LEVEL: 0x%08x\n\n", sc->debug.level);
53789
53790@@ -337,6 +341,9 @@ static ssize_t read_file_debug(struct file *file, char __user *user_buf,
53791
53792 static ssize_t write_file_debug(struct file *file,
53793 const char __user *userbuf,
53794+ size_t count, loff_t *ppos) __size_overflow(3);
53795+static ssize_t write_file_debug(struct file *file,
53796+ const char __user *userbuf,
53797 size_t count, loff_t *ppos)
53798 {
53799 struct ath5k_softc *sc = file->private_data;
53800diff --git a/drivers/net/wireless/ath/ath9k/debug.c b/drivers/net/wireless/ath/ath9k/debug.c
53801index 2be4c22..a8ad784 100644
53802--- a/drivers/net/wireless/ath/ath9k/debug.c
53803+++ b/drivers/net/wireless/ath/ath9k/debug.c
53804@@ -56,6 +56,8 @@ static ssize_t read_file_debug(struct file *file, char __user *user_buf,
53805 }
53806
53807 static ssize_t write_file_debug(struct file *file, const char __user *user_buf,
53808+ size_t count, loff_t *ppos) __size_overflow(3);
53809+static ssize_t write_file_debug(struct file *file, const char __user *user_buf,
53810 size_t count, loff_t *ppos)
53811 {
53812 struct ath_softc *sc = file->private_data;
53813@@ -220,6 +222,8 @@ static ssize_t read_file_interrupt(struct file *file, char __user *user_buf,
53814 char buf[512];
53815 unsigned int len = 0;
53816
53817+ pax_track_stack();
53818+
53819 len += snprintf(buf + len, sizeof(buf) - len,
53820 "%8s: %10u\n", "RX", sc->debug.stats.istats.rxok);
53821 len += snprintf(buf + len, sizeof(buf) - len,
53822@@ -360,6 +364,8 @@ static ssize_t read_file_wiphy(struct file *file, char __user *user_buf,
53823 int i;
53824 u8 addr[ETH_ALEN];
53825
53826+ pax_track_stack();
53827+
53828 len += snprintf(buf + len, sizeof(buf) - len,
53829 "primary: %s (%s chan=%d ht=%d)\n",
53830 wiphy_name(sc->pri_wiphy->hw->wiphy),
53831diff --git a/drivers/net/wireless/b43/debugfs.c b/drivers/net/wireless/b43/debugfs.c
53832index 80b19a4..dab3a45 100644
53833--- a/drivers/net/wireless/b43/debugfs.c
53834+++ b/drivers/net/wireless/b43/debugfs.c
53835@@ -43,7 +43,7 @@ static struct dentry *rootdir;
53836 struct b43_debugfs_fops {
53837 ssize_t (*read)(struct b43_wldev *dev, char *buf, size_t bufsize);
53838 int (*write)(struct b43_wldev *dev, const char *buf, size_t count);
53839- struct file_operations fops;
53840+ const struct file_operations fops;
53841 /* Offset of struct b43_dfs_file in struct b43_dfsentry */
53842 size_t file_struct_offset;
53843 };
53844diff --git a/drivers/net/wireless/b43legacy/debugfs.c b/drivers/net/wireless/b43legacy/debugfs.c
53845index 1f85ac5..c99b4b4 100644
53846--- a/drivers/net/wireless/b43legacy/debugfs.c
53847+++ b/drivers/net/wireless/b43legacy/debugfs.c
53848@@ -44,7 +44,7 @@ static struct dentry *rootdir;
53849 struct b43legacy_debugfs_fops {
53850 ssize_t (*read)(struct b43legacy_wldev *dev, char *buf, size_t bufsize);
53851 int (*write)(struct b43legacy_wldev *dev, const char *buf, size_t count);
53852- struct file_operations fops;
53853+ const struct file_operations fops;
53854 /* Offset of struct b43legacy_dfs_file in struct b43legacy_dfsentry */
53855 size_t file_struct_offset;
53856 /* Take wl->irq_lock before calling read/write? */
53857diff --git a/drivers/net/wireless/ipw2x00/ipw2100.c b/drivers/net/wireless/ipw2x00/ipw2100.c
53858index 43102bf..3b569c3 100644
53859--- a/drivers/net/wireless/ipw2x00/ipw2100.c
53860+++ b/drivers/net/wireless/ipw2x00/ipw2100.c
53861@@ -2014,6 +2014,8 @@ static int ipw2100_set_essid(struct ipw2100_priv *priv, char *essid,
53862 int err;
53863 DECLARE_SSID_BUF(ssid);
53864
53865+ pax_track_stack();
53866+
53867 IPW_DEBUG_HC("SSID: '%s'\n", print_ssid(ssid, essid, ssid_len));
53868
53869 if (ssid_len)
53870@@ -5380,6 +5382,8 @@ static int ipw2100_set_key(struct ipw2100_priv *priv,
53871 struct ipw2100_wep_key *wep_key = (void *)cmd.host_command_parameters;
53872 int err;
53873
53874+ pax_track_stack();
53875+
53876 IPW_DEBUG_HC("WEP_KEY_INFO: index = %d, len = %d/%d\n",
53877 idx, keylen, len);
53878
53879diff --git a/drivers/net/wireless/ipw2x00/libipw_rx.c b/drivers/net/wireless/ipw2x00/libipw_rx.c
53880index 282b1f7..169f0cf 100644
53881--- a/drivers/net/wireless/ipw2x00/libipw_rx.c
53882+++ b/drivers/net/wireless/ipw2x00/libipw_rx.c
53883@@ -1566,6 +1566,8 @@ static void libipw_process_probe_response(struct libipw_device
53884 unsigned long flags;
53885 DECLARE_SSID_BUF(ssid);
53886
53887+ pax_track_stack();
53888+
53889 LIBIPW_DEBUG_SCAN("'%s' (%pM"
53890 "): %c%c%c%c %c%c%c%c-%c%c%c%c %c%c%c%c\n",
53891 print_ssid(ssid, info_element->data, info_element->len),
53892diff --git a/drivers/net/wireless/iwlwifi/iwl-1000.c b/drivers/net/wireless/iwlwifi/iwl-1000.c
53893index 950267a..80d5fd2 100644
53894--- a/drivers/net/wireless/iwlwifi/iwl-1000.c
53895+++ b/drivers/net/wireless/iwlwifi/iwl-1000.c
53896@@ -137,7 +137,7 @@ static struct iwl_lib_ops iwl1000_lib = {
53897 },
53898 };
53899
53900-static struct iwl_ops iwl1000_ops = {
53901+static const struct iwl_ops iwl1000_ops = {
53902 .ucode = &iwl5000_ucode,
53903 .lib = &iwl1000_lib,
53904 .hcmd = &iwl5000_hcmd,
53905diff --git a/drivers/net/wireless/iwlwifi/iwl-3945.c b/drivers/net/wireless/iwlwifi/iwl-3945.c
53906index 56bfcc3..b348020 100644
53907--- a/drivers/net/wireless/iwlwifi/iwl-3945.c
53908+++ b/drivers/net/wireless/iwlwifi/iwl-3945.c
53909@@ -2874,7 +2874,7 @@ static struct iwl_hcmd_utils_ops iwl3945_hcmd_utils = {
53910 .build_addsta_hcmd = iwl3945_build_addsta_hcmd,
53911 };
53912
53913-static struct iwl_ops iwl3945_ops = {
53914+static const struct iwl_ops iwl3945_ops = {
53915 .ucode = &iwl3945_ucode,
53916 .lib = &iwl3945_lib,
53917 .hcmd = &iwl3945_hcmd,
53918diff --git a/drivers/net/wireless/iwlwifi/iwl-4965.c b/drivers/net/wireless/iwlwifi/iwl-4965.c
53919index 585b8d4..e142963 100644
53920--- a/drivers/net/wireless/iwlwifi/iwl-4965.c
53921+++ b/drivers/net/wireless/iwlwifi/iwl-4965.c
53922@@ -2345,7 +2345,7 @@ static struct iwl_lib_ops iwl4965_lib = {
53923 },
53924 };
53925
53926-static struct iwl_ops iwl4965_ops = {
53927+static const struct iwl_ops iwl4965_ops = {
53928 .ucode = &iwl4965_ucode,
53929 .lib = &iwl4965_lib,
53930 .hcmd = &iwl4965_hcmd,
53931diff --git a/drivers/net/wireless/iwlwifi/iwl-5000.c b/drivers/net/wireless/iwlwifi/iwl-5000.c
53932index 1f423f2..e37c192 100644
53933--- a/drivers/net/wireless/iwlwifi/iwl-5000.c
53934+++ b/drivers/net/wireless/iwlwifi/iwl-5000.c
53935@@ -1633,14 +1633,14 @@ static struct iwl_lib_ops iwl5150_lib = {
53936 },
53937 };
53938
53939-struct iwl_ops iwl5000_ops = {
53940+const struct iwl_ops iwl5000_ops = {
53941 .ucode = &iwl5000_ucode,
53942 .lib = &iwl5000_lib,
53943 .hcmd = &iwl5000_hcmd,
53944 .utils = &iwl5000_hcmd_utils,
53945 };
53946
53947-static struct iwl_ops iwl5150_ops = {
53948+static const struct iwl_ops iwl5150_ops = {
53949 .ucode = &iwl5000_ucode,
53950 .lib = &iwl5150_lib,
53951 .hcmd = &iwl5000_hcmd,
53952diff --git a/drivers/net/wireless/iwlwifi/iwl-6000.c b/drivers/net/wireless/iwlwifi/iwl-6000.c
53953index 1473452..f07d5e1 100644
53954--- a/drivers/net/wireless/iwlwifi/iwl-6000.c
53955+++ b/drivers/net/wireless/iwlwifi/iwl-6000.c
53956@@ -146,7 +146,7 @@ static struct iwl_hcmd_utils_ops iwl6000_hcmd_utils = {
53957 .calc_rssi = iwl5000_calc_rssi,
53958 };
53959
53960-static struct iwl_ops iwl6000_ops = {
53961+static const struct iwl_ops iwl6000_ops = {
53962 .ucode = &iwl5000_ucode,
53963 .lib = &iwl6000_lib,
53964 .hcmd = &iwl5000_hcmd,
53965diff --git a/drivers/net/wireless/iwlwifi/iwl-agn-rs.c b/drivers/net/wireless/iwlwifi/iwl-agn-rs.c
53966index 1a3dfa2..b3e0a61 100644
53967--- a/drivers/net/wireless/iwlwifi/iwl-agn-rs.c
53968+++ b/drivers/net/wireless/iwlwifi/iwl-agn-rs.c
53969@@ -857,6 +857,8 @@ static void rs_tx_status(void *priv_r, struct ieee80211_supported_band *sband,
53970 u8 active_index = 0;
53971 s32 tpt = 0;
53972
53973+ pax_track_stack();
53974+
53975 IWL_DEBUG_RATE_LIMIT(priv, "get frame ack response, update rate scale window\n");
53976
53977 if (!ieee80211_is_data(hdr->frame_control) ||
53978@@ -2722,6 +2724,8 @@ static void rs_fill_link_cmd(struct iwl_priv *priv,
53979 u8 valid_tx_ant = 0;
53980 struct iwl_link_quality_cmd *lq_cmd = &lq_sta->lq;
53981
53982+ pax_track_stack();
53983+
53984 /* Override starting rate (index 0) if needed for debug purposes */
53985 rs_dbgfs_set_mcs(lq_sta, &new_rate, index);
53986
53987diff --git a/drivers/net/wireless/iwlwifi/iwl-agn.c b/drivers/net/wireless/iwlwifi/iwl-agn.c
53988index 0e56d78..6a3c107 100644
53989--- a/drivers/net/wireless/iwlwifi/iwl-agn.c
53990+++ b/drivers/net/wireless/iwlwifi/iwl-agn.c
53991@@ -2911,7 +2911,9 @@ static int iwl_pci_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
53992 if (iwl_debug_level & IWL_DL_INFO)
53993 dev_printk(KERN_DEBUG, &(pdev->dev),
53994 "Disabling hw_scan\n");
53995- iwl_hw_ops.hw_scan = NULL;
53996+ pax_open_kernel();
53997+ *(void **)&iwl_hw_ops.hw_scan = NULL;
53998+ pax_close_kernel();
53999 }
54000
54001 hw = iwl_alloc_all(cfg, &iwl_hw_ops);
54002diff --git a/drivers/net/wireless/iwlwifi/iwl-debug.h b/drivers/net/wireless/iwlwifi/iwl-debug.h
54003index cbc6290..eb323d7 100644
54004--- a/drivers/net/wireless/iwlwifi/iwl-debug.h
54005+++ b/drivers/net/wireless/iwlwifi/iwl-debug.h
54006@@ -118,8 +118,8 @@ void iwl_dbgfs_unregister(struct iwl_priv *priv);
54007 #endif
54008
54009 #else
54010-#define IWL_DEBUG(__priv, level, fmt, args...)
54011-#define IWL_DEBUG_LIMIT(__priv, level, fmt, args...)
54012+#define IWL_DEBUG(__priv, level, fmt, args...) do {} while (0)
54013+#define IWL_DEBUG_LIMIT(__priv, level, fmt, args...) do {} while (0)
54014 static inline void iwl_print_hex_dump(struct iwl_priv *priv, int level,
54015 void *p, u32 len)
54016 {}
54017diff --git a/drivers/net/wireless/iwlwifi/iwl-debugfs.c b/drivers/net/wireless/iwlwifi/iwl-debugfs.c
54018index a198bcf..8e68233 100644
54019--- a/drivers/net/wireless/iwlwifi/iwl-debugfs.c
54020+++ b/drivers/net/wireless/iwlwifi/iwl-debugfs.c
54021@@ -524,6 +524,8 @@ static ssize_t iwl_dbgfs_status_read(struct file *file,
54022 int pos = 0;
54023 const size_t bufsz = sizeof(buf);
54024
54025+ pax_track_stack();
54026+
54027 pos += scnprintf(buf + pos, bufsz - pos, "STATUS_HCMD_ACTIVE:\t %d\n",
54028 test_bit(STATUS_HCMD_ACTIVE, &priv->status));
54029 pos += scnprintf(buf + pos, bufsz - pos, "STATUS_HCMD_SYNC_ACTIVE: %d\n",
54030@@ -658,6 +660,8 @@ static ssize_t iwl_dbgfs_qos_read(struct file *file, char __user *user_buf,
54031 const size_t bufsz = sizeof(buf);
54032 ssize_t ret;
54033
54034+ pax_track_stack();
54035+
54036 for (i = 0; i < AC_NUM; i++) {
54037 pos += scnprintf(buf + pos, bufsz - pos,
54038 "\tcw_min\tcw_max\taifsn\ttxop\n");
54039diff --git a/drivers/net/wireless/iwlwifi/iwl-dev.h b/drivers/net/wireless/iwlwifi/iwl-dev.h
54040index 3539ea4..b174bfa 100644
54041--- a/drivers/net/wireless/iwlwifi/iwl-dev.h
54042+++ b/drivers/net/wireless/iwlwifi/iwl-dev.h
54043@@ -68,7 +68,7 @@ struct iwl_tx_queue;
54044
54045 /* shared structures from iwl-5000.c */
54046 extern struct iwl_mod_params iwl50_mod_params;
54047-extern struct iwl_ops iwl5000_ops;
54048+extern const struct iwl_ops iwl5000_ops;
54049 extern struct iwl_ucode_ops iwl5000_ucode;
54050 extern struct iwl_lib_ops iwl5000_lib;
54051 extern struct iwl_hcmd_ops iwl5000_hcmd;
54052diff --git a/drivers/net/wireless/iwlwifi/iwl3945-base.c b/drivers/net/wireless/iwlwifi/iwl3945-base.c
54053index 619590d..69235ee 100644
54054--- a/drivers/net/wireless/iwlwifi/iwl3945-base.c
54055+++ b/drivers/net/wireless/iwlwifi/iwl3945-base.c
54056@@ -3927,7 +3927,9 @@ static int iwl3945_pci_probe(struct pci_dev *pdev, const struct pci_device_id *e
54057 */
54058 if (iwl3945_mod_params.disable_hw_scan) {
54059 IWL_DEBUG_INFO(priv, "Disabling hw_scan\n");
54060- iwl3945_hw_ops.hw_scan = NULL;
54061+ pax_open_kernel();
54062+ *(void **)&iwl3945_hw_ops.hw_scan = NULL;
54063+ pax_close_kernel();
54064 }
54065
54066
54067diff --git a/drivers/net/wireless/iwmc3200wifi/debugfs.c b/drivers/net/wireless/iwmc3200wifi/debugfs.c
54068index 1465379..fe4d78b 100644
54069--- a/drivers/net/wireless/iwmc3200wifi/debugfs.c
54070+++ b/drivers/net/wireless/iwmc3200wifi/debugfs.c
54071@@ -299,6 +299,8 @@ static ssize_t iwm_debugfs_fw_err_read(struct file *filp,
54072 int buf_len = 512;
54073 size_t len = 0;
54074
54075+ pax_track_stack();
54076+
54077 if (*ppos != 0)
54078 return 0;
54079 if (count < sizeof(buf))
54080diff --git a/drivers/net/wireless/libertas/debugfs.c b/drivers/net/wireless/libertas/debugfs.c
54081index 893a55c..7f66a50 100644
54082--- a/drivers/net/wireless/libertas/debugfs.c
54083+++ b/drivers/net/wireless/libertas/debugfs.c
54084@@ -708,7 +708,7 @@ out_unlock:
54085 struct lbs_debugfs_files {
54086 const char *name;
54087 int perm;
54088- struct file_operations fops;
54089+ const struct file_operations fops;
54090 };
54091
54092 static const struct lbs_debugfs_files debugfs_files[] = {
54093diff --git a/drivers/net/wireless/rndis_wlan.c b/drivers/net/wireless/rndis_wlan.c
54094index 2ecbedb..42704f0 100644
54095--- a/drivers/net/wireless/rndis_wlan.c
54096+++ b/drivers/net/wireless/rndis_wlan.c
54097@@ -1176,7 +1176,7 @@ static int set_rts_threshold(struct usbnet *usbdev, u32 rts_threshold)
54098
54099 devdbg(usbdev, "set_rts_threshold %i", rts_threshold);
54100
54101- if (rts_threshold < 0 || rts_threshold > 2347)
54102+ if (rts_threshold > 2347)
54103 rts_threshold = 2347;
54104
54105 tmp = cpu_to_le32(rts_threshold);
54106diff --git a/drivers/oprofile/buffer_sync.c b/drivers/oprofile/buffer_sync.c
54107index 334ccd6..47f8944 100644
54108--- a/drivers/oprofile/buffer_sync.c
54109+++ b/drivers/oprofile/buffer_sync.c
54110@@ -342,7 +342,7 @@ static void add_data(struct op_entry *entry, struct mm_struct *mm)
54111 if (cookie == NO_COOKIE)
54112 offset = pc;
54113 if (cookie == INVALID_COOKIE) {
54114- atomic_inc(&oprofile_stats.sample_lost_no_mapping);
54115+ atomic_inc_unchecked(&oprofile_stats.sample_lost_no_mapping);
54116 offset = pc;
54117 }
54118 if (cookie != last_cookie) {
54119@@ -386,14 +386,14 @@ add_sample(struct mm_struct *mm, struct op_sample *s, int in_kernel)
54120 /* add userspace sample */
54121
54122 if (!mm) {
54123- atomic_inc(&oprofile_stats.sample_lost_no_mm);
54124+ atomic_inc_unchecked(&oprofile_stats.sample_lost_no_mm);
54125 return 0;
54126 }
54127
54128 cookie = lookup_dcookie(mm, s->eip, &offset);
54129
54130 if (cookie == INVALID_COOKIE) {
54131- atomic_inc(&oprofile_stats.sample_lost_no_mapping);
54132+ atomic_inc_unchecked(&oprofile_stats.sample_lost_no_mapping);
54133 return 0;
54134 }
54135
54136@@ -562,7 +562,7 @@ void sync_buffer(int cpu)
54137 /* ignore backtraces if failed to add a sample */
54138 if (state == sb_bt_start) {
54139 state = sb_bt_ignore;
54140- atomic_inc(&oprofile_stats.bt_lost_no_mapping);
54141+ atomic_inc_unchecked(&oprofile_stats.bt_lost_no_mapping);
54142 }
54143 }
54144 release_mm(mm);
54145diff --git a/drivers/oprofile/event_buffer.c b/drivers/oprofile/event_buffer.c
54146index 5df60a6..72f5c1c 100644
54147--- a/drivers/oprofile/event_buffer.c
54148+++ b/drivers/oprofile/event_buffer.c
54149@@ -53,7 +53,7 @@ void add_event_entry(unsigned long value)
54150 }
54151
54152 if (buffer_pos == buffer_size) {
54153- atomic_inc(&oprofile_stats.event_lost_overflow);
54154+ atomic_inc_unchecked(&oprofile_stats.event_lost_overflow);
54155 return;
54156 }
54157
54158diff --git a/drivers/oprofile/oprof.c b/drivers/oprofile/oprof.c
54159index dc8a042..fe5f315 100644
54160--- a/drivers/oprofile/oprof.c
54161+++ b/drivers/oprofile/oprof.c
54162@@ -110,7 +110,7 @@ static void switch_worker(struct work_struct *work)
54163 if (oprofile_ops.switch_events())
54164 return;
54165
54166- atomic_inc(&oprofile_stats.multiplex_counter);
54167+ atomic_inc_unchecked(&oprofile_stats.multiplex_counter);
54168 start_switch_worker();
54169 }
54170
54171diff --git a/drivers/oprofile/oprofile_files.c b/drivers/oprofile/oprofile_files.c
54172index bbd7516..1f97f55 100644
54173--- a/drivers/oprofile/oprofile_files.c
54174+++ b/drivers/oprofile/oprofile_files.c
54175@@ -36,6 +36,8 @@ static ssize_t timeout_read(struct file *file, char __user *buf,
54176
54177
54178 static ssize_t timeout_write(struct file *file, char const __user *buf,
54179+ size_t count, loff_t *offset) __size_overflow(3);
54180+static ssize_t timeout_write(struct file *file, char const __user *buf,
54181 size_t count, loff_t *offset)
54182 {
54183 unsigned long val;
54184@@ -71,6 +73,7 @@ static ssize_t depth_read(struct file *file, char __user *buf, size_t count, lof
54185 }
54186
54187
54188+static ssize_t depth_write(struct file *file, char const __user *buf, size_t count, loff_t *offset) __size_overflow(3);
54189 static ssize_t depth_write(struct file *file, char const __user *buf, size_t count, loff_t *offset)
54190 {
54191 unsigned long val;
54192@@ -119,12 +122,14 @@ static const struct file_operations cpu_type_fops = {
54193 };
54194
54195
54196+static ssize_t enable_read(struct file *file, char __user *buf, size_t count, loff_t *offset) __size_overflow(3);
54197 static ssize_t enable_read(struct file *file, char __user *buf, size_t count, loff_t *offset)
54198 {
54199 return oprofilefs_ulong_to_user(oprofile_started, buf, count, offset);
54200 }
54201
54202
54203+static ssize_t enable_write(struct file *file, char const __user *buf, size_t count, loff_t *offset) __size_overflow(3);
54204 static ssize_t enable_write(struct file *file, char const __user *buf, size_t count, loff_t *offset)
54205 {
54206 unsigned long val;
54207diff --git a/drivers/oprofile/oprofile_stats.c b/drivers/oprofile/oprofile_stats.c
54208index 61689e8..387f7f8 100644
54209--- a/drivers/oprofile/oprofile_stats.c
54210+++ b/drivers/oprofile/oprofile_stats.c
54211@@ -30,11 +30,11 @@ void oprofile_reset_stats(void)
54212 cpu_buf->sample_invalid_eip = 0;
54213 }
54214
54215- atomic_set(&oprofile_stats.sample_lost_no_mm, 0);
54216- atomic_set(&oprofile_stats.sample_lost_no_mapping, 0);
54217- atomic_set(&oprofile_stats.event_lost_overflow, 0);
54218- atomic_set(&oprofile_stats.bt_lost_no_mapping, 0);
54219- atomic_set(&oprofile_stats.multiplex_counter, 0);
54220+ atomic_set_unchecked(&oprofile_stats.sample_lost_no_mm, 0);
54221+ atomic_set_unchecked(&oprofile_stats.sample_lost_no_mapping, 0);
54222+ atomic_set_unchecked(&oprofile_stats.event_lost_overflow, 0);
54223+ atomic_set_unchecked(&oprofile_stats.bt_lost_no_mapping, 0);
54224+ atomic_set_unchecked(&oprofile_stats.multiplex_counter, 0);
54225 }
54226
54227
54228diff --git a/drivers/oprofile/oprofile_stats.h b/drivers/oprofile/oprofile_stats.h
54229index 0b54e46..a37c527 100644
54230--- a/drivers/oprofile/oprofile_stats.h
54231+++ b/drivers/oprofile/oprofile_stats.h
54232@@ -13,11 +13,11 @@
54233 #include <asm/atomic.h>
54234
54235 struct oprofile_stat_struct {
54236- atomic_t sample_lost_no_mm;
54237- atomic_t sample_lost_no_mapping;
54238- atomic_t bt_lost_no_mapping;
54239- atomic_t event_lost_overflow;
54240- atomic_t multiplex_counter;
54241+ atomic_unchecked_t sample_lost_no_mm;
54242+ atomic_unchecked_t sample_lost_no_mapping;
54243+ atomic_unchecked_t bt_lost_no_mapping;
54244+ atomic_unchecked_t event_lost_overflow;
54245+ atomic_unchecked_t multiplex_counter;
54246 };
54247
54248 extern struct oprofile_stat_struct oprofile_stats;
54249diff --git a/drivers/oprofile/oprofilefs.c b/drivers/oprofile/oprofilefs.c
54250index 2766a6d..4d533c7 100644
54251--- a/drivers/oprofile/oprofilefs.c
54252+++ b/drivers/oprofile/oprofilefs.c
54253@@ -89,6 +89,7 @@ static ssize_t ulong_read_file(struct file *file, char __user *buf, size_t count
54254 }
54255
54256
54257+static ssize_t ulong_write_file(struct file *file, char const __user *buf, size_t count, loff_t *offset) __size_overflow(3);
54258 static ssize_t ulong_write_file(struct file *file, char const __user *buf, size_t count, loff_t *offset)
54259 {
54260 unsigned long *value = file->private_data;
54261@@ -187,7 +188,7 @@ static const struct file_operations atomic_ro_fops = {
54262
54263
54264 int oprofilefs_create_ro_atomic(struct super_block *sb, struct dentry *root,
54265- char const *name, atomic_t *val)
54266+ char const *name, atomic_unchecked_t *val)
54267 {
54268 struct dentry *d = __oprofilefs_create_file(sb, root, name,
54269 &atomic_ro_fops, 0444);
54270diff --git a/drivers/parisc/pdc_stable.c b/drivers/parisc/pdc_stable.c
54271index 13a64bc..ad62835 100644
54272--- a/drivers/parisc/pdc_stable.c
54273+++ b/drivers/parisc/pdc_stable.c
54274@@ -481,7 +481,7 @@ pdcspath_attr_store(struct kobject *kobj, struct attribute *attr,
54275 return ret;
54276 }
54277
54278-static struct sysfs_ops pdcspath_attr_ops = {
54279+static const struct sysfs_ops pdcspath_attr_ops = {
54280 .show = pdcspath_attr_show,
54281 .store = pdcspath_attr_store,
54282 };
54283diff --git a/drivers/parport/procfs.c b/drivers/parport/procfs.c
54284index 8eefe56..40751a7 100644
54285--- a/drivers/parport/procfs.c
54286+++ b/drivers/parport/procfs.c
54287@@ -64,7 +64,7 @@ static int do_active_device(ctl_table *table, int write,
54288
54289 *ppos += len;
54290
54291- return copy_to_user(result, buffer, len) ? -EFAULT : 0;
54292+ return (len > sizeof buffer || copy_to_user(result, buffer, len)) ? -EFAULT : 0;
54293 }
54294
54295 #ifdef CONFIG_PARPORT_1284
54296@@ -106,7 +106,7 @@ static int do_autoprobe(ctl_table *table, int write,
54297
54298 *ppos += len;
54299
54300- return copy_to_user (result, buffer, len) ? -EFAULT : 0;
54301+ return (len > sizeof buffer || copy_to_user (result, buffer, len)) ? -EFAULT : 0;
54302 }
54303 #endif /* IEEE1284.3 support. */
54304
54305diff --git a/drivers/pci/hotplug/acpiphp_glue.c b/drivers/pci/hotplug/acpiphp_glue.c
54306index 73e7d8e..c80f3d2 100644
54307--- a/drivers/pci/hotplug/acpiphp_glue.c
54308+++ b/drivers/pci/hotplug/acpiphp_glue.c
54309@@ -111,7 +111,7 @@ static int post_dock_fixups(struct notifier_block *nb, unsigned long val,
54310 }
54311
54312
54313-static struct acpi_dock_ops acpiphp_dock_ops = {
54314+static const struct acpi_dock_ops acpiphp_dock_ops = {
54315 .handler = handle_hotplug_event_func,
54316 };
54317
54318diff --git a/drivers/pci/hotplug/cpci_hotplug.h b/drivers/pci/hotplug/cpci_hotplug.h
54319index 9fff878..ad0ad53 100644
54320--- a/drivers/pci/hotplug/cpci_hotplug.h
54321+++ b/drivers/pci/hotplug/cpci_hotplug.h
54322@@ -59,7 +59,7 @@ struct cpci_hp_controller_ops {
54323 int (*hardware_test) (struct slot* slot, u32 value);
54324 u8 (*get_power) (struct slot* slot);
54325 int (*set_power) (struct slot* slot, int value);
54326-};
54327+} __no_const;
54328
54329 struct cpci_hp_controller {
54330 unsigned int irq;
54331diff --git a/drivers/pci/hotplug/cpqphp_nvram.c b/drivers/pci/hotplug/cpqphp_nvram.c
54332index 76ba8a1..20ca857 100644
54333--- a/drivers/pci/hotplug/cpqphp_nvram.c
54334+++ b/drivers/pci/hotplug/cpqphp_nvram.c
54335@@ -428,9 +428,13 @@ static u32 store_HRT (void __iomem *rom_start)
54336
54337 void compaq_nvram_init (void __iomem *rom_start)
54338 {
54339+
54340+#ifndef CONFIG_PAX_KERNEXEC
54341 if (rom_start) {
54342 compaq_int15_entry_point = (rom_start + ROM_INT15_PHY_ADDR - ROM_PHY_ADDR);
54343 }
54344+#endif
54345+
54346 dbg("int15 entry = %p\n", compaq_int15_entry_point);
54347
54348 /* initialize our int15 lock */
54349diff --git a/drivers/pci/hotplug/fakephp.c b/drivers/pci/hotplug/fakephp.c
54350index 6151389..0a894ef 100644
54351--- a/drivers/pci/hotplug/fakephp.c
54352+++ b/drivers/pci/hotplug/fakephp.c
54353@@ -73,7 +73,7 @@ static void legacy_release(struct kobject *kobj)
54354 }
54355
54356 static struct kobj_type legacy_ktype = {
54357- .sysfs_ops = &(struct sysfs_ops){
54358+ .sysfs_ops = &(const struct sysfs_ops){
54359 .store = legacy_store, .show = legacy_show
54360 },
54361 .release = &legacy_release,
54362diff --git a/drivers/pci/intel-iommu.c b/drivers/pci/intel-iommu.c
54363index 5b680df..fe05b7e 100644
54364--- a/drivers/pci/intel-iommu.c
54365+++ b/drivers/pci/intel-iommu.c
54366@@ -2643,7 +2643,7 @@ error:
54367 return 0;
54368 }
54369
54370-static dma_addr_t intel_map_page(struct device *dev, struct page *page,
54371+dma_addr_t intel_map_page(struct device *dev, struct page *page,
54372 unsigned long offset, size_t size,
54373 enum dma_data_direction dir,
54374 struct dma_attrs *attrs)
54375@@ -2719,7 +2719,7 @@ static void add_unmap(struct dmar_domain *dom, struct iova *iova)
54376 spin_unlock_irqrestore(&async_umap_flush_lock, flags);
54377 }
54378
54379-static void intel_unmap_page(struct device *dev, dma_addr_t dev_addr,
54380+void intel_unmap_page(struct device *dev, dma_addr_t dev_addr,
54381 size_t size, enum dma_data_direction dir,
54382 struct dma_attrs *attrs)
54383 {
54384@@ -2768,7 +2768,7 @@ static void intel_unmap_page(struct device *dev, dma_addr_t dev_addr,
54385 }
54386 }
54387
54388-static void *intel_alloc_coherent(struct device *hwdev, size_t size,
54389+void *intel_alloc_coherent(struct device *hwdev, size_t size,
54390 dma_addr_t *dma_handle, gfp_t flags)
54391 {
54392 void *vaddr;
54393@@ -2800,7 +2800,7 @@ static void *intel_alloc_coherent(struct device *hwdev, size_t size,
54394 return NULL;
54395 }
54396
54397-static void intel_free_coherent(struct device *hwdev, size_t size, void *vaddr,
54398+void intel_free_coherent(struct device *hwdev, size_t size, void *vaddr,
54399 dma_addr_t dma_handle)
54400 {
54401 int order;
54402@@ -2812,7 +2812,7 @@ static void intel_free_coherent(struct device *hwdev, size_t size, void *vaddr,
54403 free_pages((unsigned long)vaddr, order);
54404 }
54405
54406-static void intel_unmap_sg(struct device *hwdev, struct scatterlist *sglist,
54407+void intel_unmap_sg(struct device *hwdev, struct scatterlist *sglist,
54408 int nelems, enum dma_data_direction dir,
54409 struct dma_attrs *attrs)
54410 {
54411@@ -2872,7 +2872,7 @@ static int intel_nontranslate_map_sg(struct device *hddev,
54412 return nelems;
54413 }
54414
54415-static int intel_map_sg(struct device *hwdev, struct scatterlist *sglist, int nelems,
54416+int intel_map_sg(struct device *hwdev, struct scatterlist *sglist, int nelems,
54417 enum dma_data_direction dir, struct dma_attrs *attrs)
54418 {
54419 int i;
54420@@ -2941,12 +2941,12 @@ static int intel_map_sg(struct device *hwdev, struct scatterlist *sglist, int ne
54421 return nelems;
54422 }
54423
54424-static int intel_mapping_error(struct device *dev, dma_addr_t dma_addr)
54425+int intel_mapping_error(struct device *dev, dma_addr_t dma_addr)
54426 {
54427 return !dma_addr;
54428 }
54429
54430-struct dma_map_ops intel_dma_ops = {
54431+const struct dma_map_ops intel_dma_ops = {
54432 .alloc_coherent = intel_alloc_coherent,
54433 .free_coherent = intel_free_coherent,
54434 .map_sg = intel_map_sg,
54435diff --git a/drivers/pci/pcie/aspm.c b/drivers/pci/pcie/aspm.c
54436index 5b7056c..607bc94 100644
54437--- a/drivers/pci/pcie/aspm.c
54438+++ b/drivers/pci/pcie/aspm.c
54439@@ -27,9 +27,9 @@
54440 #define MODULE_PARAM_PREFIX "pcie_aspm."
54441
54442 /* Note: those are not register definitions */
54443-#define ASPM_STATE_L0S_UP (1) /* Upstream direction L0s state */
54444-#define ASPM_STATE_L0S_DW (2) /* Downstream direction L0s state */
54445-#define ASPM_STATE_L1 (4) /* L1 state */
54446+#define ASPM_STATE_L0S_UP (1U) /* Upstream direction L0s state */
54447+#define ASPM_STATE_L0S_DW (2U) /* Downstream direction L0s state */
54448+#define ASPM_STATE_L1 (4U) /* L1 state */
54449 #define ASPM_STATE_L0S (ASPM_STATE_L0S_UP | ASPM_STATE_L0S_DW)
54450 #define ASPM_STATE_ALL (ASPM_STATE_L0S | ASPM_STATE_L1)
54451
54452diff --git a/drivers/pci/probe.c b/drivers/pci/probe.c
54453index 8105e32..ca10419 100644
54454--- a/drivers/pci/probe.c
54455+++ b/drivers/pci/probe.c
54456@@ -62,14 +62,14 @@ static ssize_t pci_bus_show_cpuaffinity(struct device *dev,
54457 return ret;
54458 }
54459
54460-static ssize_t inline pci_bus_show_cpumaskaffinity(struct device *dev,
54461+static inline ssize_t pci_bus_show_cpumaskaffinity(struct device *dev,
54462 struct device_attribute *attr,
54463 char *buf)
54464 {
54465 return pci_bus_show_cpuaffinity(dev, 0, attr, buf);
54466 }
54467
54468-static ssize_t inline pci_bus_show_cpulistaffinity(struct device *dev,
54469+static inline ssize_t pci_bus_show_cpulistaffinity(struct device *dev,
54470 struct device_attribute *attr,
54471 char *buf)
54472 {
54473diff --git a/drivers/pci/proc.c b/drivers/pci/proc.c
54474index a03ad8c..024b0da 100644
54475--- a/drivers/pci/proc.c
54476+++ b/drivers/pci/proc.c
54477@@ -480,7 +480,16 @@ static const struct file_operations proc_bus_pci_dev_operations = {
54478 static int __init pci_proc_init(void)
54479 {
54480 struct pci_dev *dev = NULL;
54481+
54482+#ifdef CONFIG_GRKERNSEC_PROC_ADD
54483+#ifdef CONFIG_GRKERNSEC_PROC_USER
54484+ proc_bus_pci_dir = proc_mkdir_mode("bus/pci", S_IRUSR | S_IXUSR, NULL);
54485+#elif defined(CONFIG_GRKERNSEC_PROC_USERGROUP)
54486+ proc_bus_pci_dir = proc_mkdir_mode("bus/pci", S_IRUSR | S_IXUSR | S_IRGRP | S_IXGRP, NULL);
54487+#endif
54488+#else
54489 proc_bus_pci_dir = proc_mkdir("bus/pci", NULL);
54490+#endif
54491 proc_create("devices", 0, proc_bus_pci_dir,
54492 &proc_bus_pci_dev_operations);
54493 proc_initialized = 1;
54494diff --git a/drivers/pci/slot.c b/drivers/pci/slot.c
54495index 8c02b6c..5584d8e 100644
54496--- a/drivers/pci/slot.c
54497+++ b/drivers/pci/slot.c
54498@@ -29,7 +29,7 @@ static ssize_t pci_slot_attr_store(struct kobject *kobj,
54499 return attribute->store ? attribute->store(slot, buf, len) : -EIO;
54500 }
54501
54502-static struct sysfs_ops pci_slot_sysfs_ops = {
54503+static const struct sysfs_ops pci_slot_sysfs_ops = {
54504 .show = pci_slot_attr_show,
54505 .store = pci_slot_attr_store,
54506 };
54507diff --git a/drivers/pcmcia/pcmcia_ioctl.c b/drivers/pcmcia/pcmcia_ioctl.c
54508index 30cf71d2..50938f1 100644
54509--- a/drivers/pcmcia/pcmcia_ioctl.c
54510+++ b/drivers/pcmcia/pcmcia_ioctl.c
54511@@ -819,7 +819,7 @@ static int ds_ioctl(struct inode * inode, struct file * file,
54512 return -EFAULT;
54513 }
54514 }
54515- buf = kmalloc(sizeof(ds_ioctl_arg_t), GFP_KERNEL);
54516+ buf = kzalloc(sizeof(ds_ioctl_arg_t), GFP_KERNEL);
54517 if (!buf)
54518 return -ENOMEM;
54519
54520diff --git a/drivers/platform/x86/acer-wmi.c b/drivers/platform/x86/acer-wmi.c
54521index 52183c4..b224c69 100644
54522--- a/drivers/platform/x86/acer-wmi.c
54523+++ b/drivers/platform/x86/acer-wmi.c
54524@@ -918,7 +918,7 @@ static int update_bl_status(struct backlight_device *bd)
54525 return 0;
54526 }
54527
54528-static struct backlight_ops acer_bl_ops = {
54529+static const struct backlight_ops acer_bl_ops = {
54530 .get_brightness = read_brightness,
54531 .update_status = update_bl_status,
54532 };
54533diff --git a/drivers/platform/x86/asus-laptop.c b/drivers/platform/x86/asus-laptop.c
54534index 767cb61..a87380b 100644
54535--- a/drivers/platform/x86/asus-laptop.c
54536+++ b/drivers/platform/x86/asus-laptop.c
54537@@ -250,7 +250,7 @@ static struct backlight_device *asus_backlight_device;
54538 */
54539 static int read_brightness(struct backlight_device *bd);
54540 static int update_bl_status(struct backlight_device *bd);
54541-static struct backlight_ops asusbl_ops = {
54542+static const struct backlight_ops asusbl_ops = {
54543 .get_brightness = read_brightness,
54544 .update_status = update_bl_status,
54545 };
54546diff --git a/drivers/platform/x86/asus_acpi.c b/drivers/platform/x86/asus_acpi.c
54547index d66c07a..a4abaac 100644
54548--- a/drivers/platform/x86/asus_acpi.c
54549+++ b/drivers/platform/x86/asus_acpi.c
54550@@ -1396,7 +1396,7 @@ static int asus_hotk_remove(struct acpi_device *device, int type)
54551 return 0;
54552 }
54553
54554-static struct backlight_ops asus_backlight_data = {
54555+static const struct backlight_ops asus_backlight_data = {
54556 .get_brightness = read_brightness,
54557 .update_status = set_brightness_status,
54558 };
54559diff --git a/drivers/platform/x86/compal-laptop.c b/drivers/platform/x86/compal-laptop.c
54560index 11003bb..550ff1b 100644
54561--- a/drivers/platform/x86/compal-laptop.c
54562+++ b/drivers/platform/x86/compal-laptop.c
54563@@ -163,7 +163,7 @@ static int bl_update_status(struct backlight_device *b)
54564 return set_lcd_level(b->props.brightness);
54565 }
54566
54567-static struct backlight_ops compalbl_ops = {
54568+static const struct backlight_ops compalbl_ops = {
54569 .get_brightness = bl_get_brightness,
54570 .update_status = bl_update_status,
54571 };
54572diff --git a/drivers/platform/x86/dell-laptop.c b/drivers/platform/x86/dell-laptop.c
54573index 07a74da..9dc99fa 100644
54574--- a/drivers/platform/x86/dell-laptop.c
54575+++ b/drivers/platform/x86/dell-laptop.c
54576@@ -318,7 +318,7 @@ static int dell_get_intensity(struct backlight_device *bd)
54577 return buffer.output[1];
54578 }
54579
54580-static struct backlight_ops dell_ops = {
54581+static const struct backlight_ops dell_ops = {
54582 .get_brightness = dell_get_intensity,
54583 .update_status = dell_send_intensity,
54584 };
54585diff --git a/drivers/platform/x86/eeepc-laptop.c b/drivers/platform/x86/eeepc-laptop.c
54586index c533b1c..5c81f22 100644
54587--- a/drivers/platform/x86/eeepc-laptop.c
54588+++ b/drivers/platform/x86/eeepc-laptop.c
54589@@ -245,7 +245,7 @@ static struct device *eeepc_hwmon_device;
54590 */
54591 static int read_brightness(struct backlight_device *bd);
54592 static int update_bl_status(struct backlight_device *bd);
54593-static struct backlight_ops eeepcbl_ops = {
54594+static const struct backlight_ops eeepcbl_ops = {
54595 .get_brightness = read_brightness,
54596 .update_status = update_bl_status,
54597 };
54598diff --git a/drivers/platform/x86/fujitsu-laptop.c b/drivers/platform/x86/fujitsu-laptop.c
54599index bcd4ba8..a249b35 100644
54600--- a/drivers/platform/x86/fujitsu-laptop.c
54601+++ b/drivers/platform/x86/fujitsu-laptop.c
54602@@ -436,7 +436,7 @@ static int bl_update_status(struct backlight_device *b)
54603 return ret;
54604 }
54605
54606-static struct backlight_ops fujitsubl_ops = {
54607+static const struct backlight_ops fujitsubl_ops = {
54608 .get_brightness = bl_get_brightness,
54609 .update_status = bl_update_status,
54610 };
54611diff --git a/drivers/platform/x86/msi-laptop.c b/drivers/platform/x86/msi-laptop.c
54612index 759763d..1093ba2 100644
54613--- a/drivers/platform/x86/msi-laptop.c
54614+++ b/drivers/platform/x86/msi-laptop.c
54615@@ -161,7 +161,7 @@ static int bl_update_status(struct backlight_device *b)
54616 return set_lcd_level(b->props.brightness);
54617 }
54618
54619-static struct backlight_ops msibl_ops = {
54620+static const struct backlight_ops msibl_ops = {
54621 .get_brightness = bl_get_brightness,
54622 .update_status = bl_update_status,
54623 };
54624diff --git a/drivers/platform/x86/panasonic-laptop.c b/drivers/platform/x86/panasonic-laptop.c
54625index fe7cf01..9012d8d 100644
54626--- a/drivers/platform/x86/panasonic-laptop.c
54627+++ b/drivers/platform/x86/panasonic-laptop.c
54628@@ -352,7 +352,7 @@ static int bl_set_status(struct backlight_device *bd)
54629 return acpi_pcc_write_sset(pcc, SINF_DC_CUR_BRIGHT, bright);
54630 }
54631
54632-static struct backlight_ops pcc_backlight_ops = {
54633+static const struct backlight_ops pcc_backlight_ops = {
54634 .get_brightness = bl_get,
54635 .update_status = bl_set_status,
54636 };
54637diff --git a/drivers/platform/x86/sony-laptop.c b/drivers/platform/x86/sony-laptop.c
54638index a2a742c..b37e25e 100644
54639--- a/drivers/platform/x86/sony-laptop.c
54640+++ b/drivers/platform/x86/sony-laptop.c
54641@@ -850,7 +850,7 @@ static int sony_backlight_get_brightness(struct backlight_device *bd)
54642 }
54643
54644 static struct backlight_device *sony_backlight_device;
54645-static struct backlight_ops sony_backlight_ops = {
54646+static const struct backlight_ops sony_backlight_ops = {
54647 .update_status = sony_backlight_update_status,
54648 .get_brightness = sony_backlight_get_brightness,
54649 };
54650diff --git a/drivers/platform/x86/thinkpad_acpi.c b/drivers/platform/x86/thinkpad_acpi.c
54651index 68271ae..5e8fb10 100644
54652--- a/drivers/platform/x86/thinkpad_acpi.c
54653+++ b/drivers/platform/x86/thinkpad_acpi.c
54654@@ -2139,7 +2139,7 @@ static int hotkey_mask_get(void)
54655 return 0;
54656 }
54657
54658-void static hotkey_mask_warn_incomplete_mask(void)
54659+static void hotkey_mask_warn_incomplete_mask(void)
54660 {
54661 /* log only what the user can fix... */
54662 const u32 wantedmask = hotkey_driver_mask &
54663@@ -6125,7 +6125,7 @@ static void tpacpi_brightness_notify_change(void)
54664 BACKLIGHT_UPDATE_HOTKEY);
54665 }
54666
54667-static struct backlight_ops ibm_backlight_data = {
54668+static const struct backlight_ops ibm_backlight_data = {
54669 .get_brightness = brightness_get,
54670 .update_status = brightness_update_status,
54671 };
54672diff --git a/drivers/platform/x86/toshiba_acpi.c b/drivers/platform/x86/toshiba_acpi.c
54673index 51c0a8b..0786629 100644
54674--- a/drivers/platform/x86/toshiba_acpi.c
54675+++ b/drivers/platform/x86/toshiba_acpi.c
54676@@ -671,7 +671,7 @@ static acpi_status remove_device(void)
54677 return AE_OK;
54678 }
54679
54680-static struct backlight_ops toshiba_backlight_data = {
54681+static const struct backlight_ops toshiba_backlight_data = {
54682 .get_brightness = get_lcd,
54683 .update_status = set_lcd_status,
54684 };
54685diff --git a/drivers/pnp/pnpbios/bioscalls.c b/drivers/pnp/pnpbios/bioscalls.c
54686index fc83783c..cf370d7 100644
54687--- a/drivers/pnp/pnpbios/bioscalls.c
54688+++ b/drivers/pnp/pnpbios/bioscalls.c
54689@@ -60,7 +60,7 @@ do { \
54690 set_desc_limit(&gdt[(selname) >> 3], (size) - 1); \
54691 } while(0)
54692
54693-static struct desc_struct bad_bios_desc = GDT_ENTRY_INIT(0x4092,
54694+static const struct desc_struct bad_bios_desc = GDT_ENTRY_INIT(0x4093,
54695 (unsigned long)__va(0x400UL), PAGE_SIZE - 0x400 - 1);
54696
54697 /*
54698@@ -97,7 +97,10 @@ static inline u16 call_pnp_bios(u16 func, u16 arg1, u16 arg2, u16 arg3,
54699
54700 cpu = get_cpu();
54701 save_desc_40 = get_cpu_gdt_table(cpu)[0x40 / 8];
54702+
54703+ pax_open_kernel();
54704 get_cpu_gdt_table(cpu)[0x40 / 8] = bad_bios_desc;
54705+ pax_close_kernel();
54706
54707 /* On some boxes IRQ's during PnP BIOS calls are deadly. */
54708 spin_lock_irqsave(&pnp_bios_lock, flags);
54709@@ -135,7 +138,10 @@ static inline u16 call_pnp_bios(u16 func, u16 arg1, u16 arg2, u16 arg3,
54710 :"memory");
54711 spin_unlock_irqrestore(&pnp_bios_lock, flags);
54712
54713+ pax_open_kernel();
54714 get_cpu_gdt_table(cpu)[0x40 / 8] = save_desc_40;
54715+ pax_close_kernel();
54716+
54717 put_cpu();
54718
54719 /* If we get here and this is set then the PnP BIOS faulted on us. */
54720@@ -469,7 +475,7 @@ int pnp_bios_read_escd(char *data, u32 nvram_base)
54721 return status;
54722 }
54723
54724-void pnpbios_calls_init(union pnp_bios_install_struct *header)
54725+void __init pnpbios_calls_init(union pnp_bios_install_struct *header)
54726 {
54727 int i;
54728
54729@@ -477,6 +483,8 @@ void pnpbios_calls_init(union pnp_bios_install_struct *header)
54730 pnp_bios_callpoint.offset = header->fields.pm16offset;
54731 pnp_bios_callpoint.segment = PNP_CS16;
54732
54733+ pax_open_kernel();
54734+
54735 for_each_possible_cpu(i) {
54736 struct desc_struct *gdt = get_cpu_gdt_table(i);
54737 if (!gdt)
54738@@ -488,4 +496,6 @@ void pnpbios_calls_init(union pnp_bios_install_struct *header)
54739 set_desc_base(&gdt[GDT_ENTRY_PNPBIOS_DS],
54740 (unsigned long)__va(header->fields.pm16dseg));
54741 }
54742+
54743+ pax_close_kernel();
54744 }
54745diff --git a/drivers/pnp/resource.c b/drivers/pnp/resource.c
54746index ba97654..66b99d4 100644
54747--- a/drivers/pnp/resource.c
54748+++ b/drivers/pnp/resource.c
54749@@ -355,7 +355,7 @@ int pnp_check_irq(struct pnp_dev *dev, struct resource *res)
54750 return 1;
54751
54752 /* check if the resource is valid */
54753- if (*irq < 0 || *irq > 15)
54754+ if (*irq > 15)
54755 return 0;
54756
54757 /* check if the resource is reserved */
54758@@ -419,7 +419,7 @@ int pnp_check_dma(struct pnp_dev *dev, struct resource *res)
54759 return 1;
54760
54761 /* check if the resource is valid */
54762- if (*dma < 0 || *dma == 4 || *dma > 7)
54763+ if (*dma == 4 || *dma > 7)
54764 return 0;
54765
54766 /* check if the resource is reserved */
54767diff --git a/drivers/power/bq27x00_battery.c b/drivers/power/bq27x00_battery.c
54768index 62bb981..24a2dc9 100644
54769--- a/drivers/power/bq27x00_battery.c
54770+++ b/drivers/power/bq27x00_battery.c
54771@@ -44,7 +44,7 @@ struct bq27x00_device_info;
54772 struct bq27x00_access_methods {
54773 int (*read)(u8 reg, int *rt_value, int b_single,
54774 struct bq27x00_device_info *di);
54775-};
54776+} __no_const;
54777
54778 struct bq27x00_device_info {
54779 struct device *dev;
54780diff --git a/drivers/rtc/rtc-dev.c b/drivers/rtc/rtc-dev.c
54781index 62227cd..b5b538b 100644
54782--- a/drivers/rtc/rtc-dev.c
54783+++ b/drivers/rtc/rtc-dev.c
54784@@ -14,6 +14,7 @@
54785 #include <linux/module.h>
54786 #include <linux/rtc.h>
54787 #include <linux/sched.h>
54788+#include <linux/grsecurity.h>
54789 #include "rtc-core.h"
54790
54791 static dev_t rtc_devt;
54792@@ -357,6 +358,8 @@ static long rtc_dev_ioctl(struct file *file,
54793 if (copy_from_user(&tm, uarg, sizeof(tm)))
54794 return -EFAULT;
54795
54796+ gr_log_timechange();
54797+
54798 return rtc_set_time(rtc, &tm);
54799
54800 case RTC_PIE_ON:
54801diff --git a/drivers/s390/cio/qdio_perf.c b/drivers/s390/cio/qdio_perf.c
54802index 968e3c7..fbc637a 100644
54803--- a/drivers/s390/cio/qdio_perf.c
54804+++ b/drivers/s390/cio/qdio_perf.c
54805@@ -31,51 +31,51 @@ static struct proc_dir_entry *qdio_perf_pde;
54806 static int qdio_perf_proc_show(struct seq_file *m, void *v)
54807 {
54808 seq_printf(m, "Number of qdio interrupts\t\t\t: %li\n",
54809- (long)atomic_long_read(&perf_stats.qdio_int));
54810+ (long)atomic_long_read_unchecked(&perf_stats.qdio_int));
54811 seq_printf(m, "Number of PCI interrupts\t\t\t: %li\n",
54812- (long)atomic_long_read(&perf_stats.pci_int));
54813+ (long)atomic_long_read_unchecked(&perf_stats.pci_int));
54814 seq_printf(m, "Number of adapter interrupts\t\t\t: %li\n",
54815- (long)atomic_long_read(&perf_stats.thin_int));
54816+ (long)atomic_long_read_unchecked(&perf_stats.thin_int));
54817 seq_printf(m, "\n");
54818 seq_printf(m, "Inbound tasklet runs\t\t\t\t: %li\n",
54819- (long)atomic_long_read(&perf_stats.tasklet_inbound));
54820+ (long)atomic_long_read_unchecked(&perf_stats.tasklet_inbound));
54821 seq_printf(m, "Outbound tasklet runs\t\t\t\t: %li\n",
54822- (long)atomic_long_read(&perf_stats.tasklet_outbound));
54823+ (long)atomic_long_read_unchecked(&perf_stats.tasklet_outbound));
54824 seq_printf(m, "Adapter interrupt tasklet runs/loops\t\t: %li/%li\n",
54825- (long)atomic_long_read(&perf_stats.tasklet_thinint),
54826- (long)atomic_long_read(&perf_stats.tasklet_thinint_loop));
54827+ (long)atomic_long_read_unchecked(&perf_stats.tasklet_thinint),
54828+ (long)atomic_long_read_unchecked(&perf_stats.tasklet_thinint_loop));
54829 seq_printf(m, "Adapter interrupt inbound tasklet runs/loops\t: %li/%li\n",
54830- (long)atomic_long_read(&perf_stats.thinint_inbound),
54831- (long)atomic_long_read(&perf_stats.thinint_inbound_loop));
54832+ (long)atomic_long_read_unchecked(&perf_stats.thinint_inbound),
54833+ (long)atomic_long_read_unchecked(&perf_stats.thinint_inbound_loop));
54834 seq_printf(m, "\n");
54835 seq_printf(m, "Number of SIGA In issued\t\t\t: %li\n",
54836- (long)atomic_long_read(&perf_stats.siga_in));
54837+ (long)atomic_long_read_unchecked(&perf_stats.siga_in));
54838 seq_printf(m, "Number of SIGA Out issued\t\t\t: %li\n",
54839- (long)atomic_long_read(&perf_stats.siga_out));
54840+ (long)atomic_long_read_unchecked(&perf_stats.siga_out));
54841 seq_printf(m, "Number of SIGA Sync issued\t\t\t: %li\n",
54842- (long)atomic_long_read(&perf_stats.siga_sync));
54843+ (long)atomic_long_read_unchecked(&perf_stats.siga_sync));
54844 seq_printf(m, "\n");
54845 seq_printf(m, "Number of inbound transfers\t\t\t: %li\n",
54846- (long)atomic_long_read(&perf_stats.inbound_handler));
54847+ (long)atomic_long_read_unchecked(&perf_stats.inbound_handler));
54848 seq_printf(m, "Number of outbound transfers\t\t\t: %li\n",
54849- (long)atomic_long_read(&perf_stats.outbound_handler));
54850+ (long)atomic_long_read_unchecked(&perf_stats.outbound_handler));
54851 seq_printf(m, "\n");
54852 seq_printf(m, "Number of fast requeues (outg. SBAL w/o SIGA)\t: %li\n",
54853- (long)atomic_long_read(&perf_stats.fast_requeue));
54854+ (long)atomic_long_read_unchecked(&perf_stats.fast_requeue));
54855 seq_printf(m, "Number of outbound target full condition\t: %li\n",
54856- (long)atomic_long_read(&perf_stats.outbound_target_full));
54857+ (long)atomic_long_read_unchecked(&perf_stats.outbound_target_full));
54858 seq_printf(m, "Number of outbound tasklet mod_timer calls\t: %li\n",
54859- (long)atomic_long_read(&perf_stats.debug_tl_out_timer));
54860+ (long)atomic_long_read_unchecked(&perf_stats.debug_tl_out_timer));
54861 seq_printf(m, "Number of stop polling calls\t\t\t: %li\n",
54862- (long)atomic_long_read(&perf_stats.debug_stop_polling));
54863+ (long)atomic_long_read_unchecked(&perf_stats.debug_stop_polling));
54864 seq_printf(m, "AI inbound tasklet loops after stop polling\t: %li\n",
54865- (long)atomic_long_read(&perf_stats.thinint_inbound_loop2));
54866+ (long)atomic_long_read_unchecked(&perf_stats.thinint_inbound_loop2));
54867 seq_printf(m, "QEBSM EQBS total/incomplete\t\t\t: %li/%li\n",
54868- (long)atomic_long_read(&perf_stats.debug_eqbs_all),
54869- (long)atomic_long_read(&perf_stats.debug_eqbs_incomplete));
54870+ (long)atomic_long_read_unchecked(&perf_stats.debug_eqbs_all),
54871+ (long)atomic_long_read_unchecked(&perf_stats.debug_eqbs_incomplete));
54872 seq_printf(m, "QEBSM SQBS total/incomplete\t\t\t: %li/%li\n",
54873- (long)atomic_long_read(&perf_stats.debug_sqbs_all),
54874- (long)atomic_long_read(&perf_stats.debug_sqbs_incomplete));
54875+ (long)atomic_long_read_unchecked(&perf_stats.debug_sqbs_all),
54876+ (long)atomic_long_read_unchecked(&perf_stats.debug_sqbs_incomplete));
54877 seq_printf(m, "\n");
54878 return 0;
54879 }
54880diff --git a/drivers/s390/cio/qdio_perf.h b/drivers/s390/cio/qdio_perf.h
54881index ff4504c..b3604c3 100644
54882--- a/drivers/s390/cio/qdio_perf.h
54883+++ b/drivers/s390/cio/qdio_perf.h
54884@@ -13,46 +13,46 @@
54885
54886 struct qdio_perf_stats {
54887 /* interrupt handler calls */
54888- atomic_long_t qdio_int;
54889- atomic_long_t pci_int;
54890- atomic_long_t thin_int;
54891+ atomic_long_unchecked_t qdio_int;
54892+ atomic_long_unchecked_t pci_int;
54893+ atomic_long_unchecked_t thin_int;
54894
54895 /* tasklet runs */
54896- atomic_long_t tasklet_inbound;
54897- atomic_long_t tasklet_outbound;
54898- atomic_long_t tasklet_thinint;
54899- atomic_long_t tasklet_thinint_loop;
54900- atomic_long_t thinint_inbound;
54901- atomic_long_t thinint_inbound_loop;
54902- atomic_long_t thinint_inbound_loop2;
54903+ atomic_long_unchecked_t tasklet_inbound;
54904+ atomic_long_unchecked_t tasklet_outbound;
54905+ atomic_long_unchecked_t tasklet_thinint;
54906+ atomic_long_unchecked_t tasklet_thinint_loop;
54907+ atomic_long_unchecked_t thinint_inbound;
54908+ atomic_long_unchecked_t thinint_inbound_loop;
54909+ atomic_long_unchecked_t thinint_inbound_loop2;
54910
54911 /* signal adapter calls */
54912- atomic_long_t siga_out;
54913- atomic_long_t siga_in;
54914- atomic_long_t siga_sync;
54915+ atomic_long_unchecked_t siga_out;
54916+ atomic_long_unchecked_t siga_in;
54917+ atomic_long_unchecked_t siga_sync;
54918
54919 /* misc */
54920- atomic_long_t inbound_handler;
54921- atomic_long_t outbound_handler;
54922- atomic_long_t fast_requeue;
54923- atomic_long_t outbound_target_full;
54924+ atomic_long_unchecked_t inbound_handler;
54925+ atomic_long_unchecked_t outbound_handler;
54926+ atomic_long_unchecked_t fast_requeue;
54927+ atomic_long_unchecked_t outbound_target_full;
54928
54929 /* for debugging */
54930- atomic_long_t debug_tl_out_timer;
54931- atomic_long_t debug_stop_polling;
54932- atomic_long_t debug_eqbs_all;
54933- atomic_long_t debug_eqbs_incomplete;
54934- atomic_long_t debug_sqbs_all;
54935- atomic_long_t debug_sqbs_incomplete;
54936+ atomic_long_unchecked_t debug_tl_out_timer;
54937+ atomic_long_unchecked_t debug_stop_polling;
54938+ atomic_long_unchecked_t debug_eqbs_all;
54939+ atomic_long_unchecked_t debug_eqbs_incomplete;
54940+ atomic_long_unchecked_t debug_sqbs_all;
54941+ atomic_long_unchecked_t debug_sqbs_incomplete;
54942 };
54943
54944 extern struct qdio_perf_stats perf_stats;
54945 extern int qdio_performance_stats;
54946
54947-static inline void qdio_perf_stat_inc(atomic_long_t *count)
54948+static inline void qdio_perf_stat_inc(atomic_long_unchecked_t *count)
54949 {
54950 if (qdio_performance_stats)
54951- atomic_long_inc(count);
54952+ atomic_long_inc_unchecked(count);
54953 }
54954
54955 int qdio_setup_perf_stats(void);
54956diff --git a/drivers/scsi/3w-sas.c b/drivers/scsi/3w-sas.c
54957new file mode 100644
54958index 0000000..7d18a18
54959--- /dev/null
54960+++ b/drivers/scsi/3w-sas.c
54961@@ -0,0 +1,1933 @@
54962+/*
54963+ 3w-sas.c -- LSI 3ware SAS/SATA-RAID Controller device driver for Linux.
54964+
54965+ Written By: Adam Radford <linuxraid@lsi.com>
54966+
54967+ Copyright (C) 2009 LSI Corporation.
54968+
54969+ This program is free software; you can redistribute it and/or modify
54970+ it under the terms of the GNU General Public License as published by
54971+ the Free Software Foundation; version 2 of the License.
54972+
54973+ This program is distributed in the hope that it will be useful,
54974+ but WITHOUT ANY WARRANTY; without even the implied warranty of
54975+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
54976+ GNU General Public License for more details.
54977+
54978+ NO WARRANTY
54979+ THE PROGRAM IS PROVIDED ON AN "AS IS" BASIS, WITHOUT WARRANTIES OR
54980+ CONDITIONS OF ANY KIND, EITHER EXPRESS OR IMPLIED INCLUDING, WITHOUT
54981+ LIMITATION, ANY WARRANTIES OR CONDITIONS OF TITLE, NON-INFRINGEMENT,
54982+ MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE. Each Recipient is
54983+ solely responsible for determining the appropriateness of using and
54984+ distributing the Program and assumes all risks associated with its
54985+ exercise of rights under this Agreement, including but not limited to
54986+ the risks and costs of program errors, damage to or loss of data,
54987+ programs or equipment, and unavailability or interruption of operations.
54988+
54989+ DISCLAIMER OF LIABILITY
54990+ NEITHER RECIPIENT NOR ANY CONTRIBUTORS SHALL HAVE ANY LIABILITY FOR ANY
54991+ DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
54992+ DAMAGES (INCLUDING WITHOUT LIMITATION LOST PROFITS), HOWEVER CAUSED AND
54993+ ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR
54994+ TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE
54995+ USE OR DISTRIBUTION OF THE PROGRAM OR THE EXERCISE OF ANY RIGHTS GRANTED
54996+ HEREUNDER, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGES
54997+
54998+ You should have received a copy of the GNU General Public License
54999+ along with this program; if not, write to the Free Software
55000+ Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
55001+
55002+ Controllers supported by this driver:
55003+
55004+ LSI 3ware 9750 6Gb/s SAS/SATA-RAID
55005+
55006+ Bugs/Comments/Suggestions should be mailed to:
55007+ linuxraid@lsi.com
55008+
55009+ For more information, goto:
55010+ http://www.lsi.com
55011+
55012+ History
55013+ -------
55014+ 3.26.00.000 - Initial driver release.
55015+*/
55016+
55017+#include <linux/module.h>
55018+#include <linux/reboot.h>
55019+#include <linux/spinlock.h>
55020+#include <linux/interrupt.h>
55021+#include <linux/moduleparam.h>
55022+#include <linux/errno.h>
55023+#include <linux/types.h>
55024+#include <linux/delay.h>
55025+#include <linux/pci.h>
55026+#include <linux/time.h>
55027+#include <linux/mutex.h>
55028+#include <linux/smp_lock.h>
55029+#include <asm/io.h>
55030+#include <asm/irq.h>
55031+#include <asm/uaccess.h>
55032+#include <scsi/scsi.h>
55033+#include <scsi/scsi_host.h>
55034+#include <scsi/scsi_tcq.h>
55035+#include <scsi/scsi_cmnd.h>
55036+#include "3w-sas.h"
55037+
55038+/* Globals */
55039+#define TW_DRIVER_VERSION "3.26.00.028-2.6.32RH"
55040+static TW_Device_Extension *twl_device_extension_list[TW_MAX_SLOT];
55041+static unsigned int twl_device_extension_count;
55042+static int twl_major = -1;
55043+extern struct timezone sys_tz;
55044+
55045+/* Module parameters */
55046+MODULE_AUTHOR ("LSI");
55047+MODULE_DESCRIPTION ("LSI 3ware SAS/SATA-RAID Linux Driver");
55048+MODULE_LICENSE("GPL");
55049+MODULE_VERSION(TW_DRIVER_VERSION);
55050+
55051+static int use_msi = 0;
55052+module_param(use_msi, int, S_IRUGO);
55053+MODULE_PARM_DESC(use_msi, "Use Message Signaled Interrupts. Default: 0");
55054+
55055+/* Function prototypes */
55056+static int twl_reset_device_extension(TW_Device_Extension *tw_dev, int ioctl_reset);
55057+
55058+/* Functions */
55059+
55060+/* This function returns AENs through sysfs */
55061+static ssize_t twl_sysfs_aen_read(struct file *filp, struct kobject *kobj,
55062+ struct bin_attribute *bin_attr,
55063+ char *outbuf, loff_t offset, size_t count)
55064+{
55065+ struct device *dev = container_of(kobj, struct device, kobj);
55066+ struct Scsi_Host *shost = class_to_shost(dev);
55067+ TW_Device_Extension *tw_dev = (TW_Device_Extension *)shost->hostdata;
55068+ unsigned long flags = 0;
55069+ ssize_t ret;
55070+
55071+ if (!capable(CAP_SYS_ADMIN))
55072+ return -EACCES;
55073+
55074+ spin_lock_irqsave(tw_dev->host->host_lock, flags);
55075+ ret = memory_read_from_buffer(outbuf, count, &offset, tw_dev->event_queue[0], sizeof(TW_Event) * TW_Q_LENGTH);
55076+ spin_unlock_irqrestore(tw_dev->host->host_lock, flags);
55077+
55078+ return ret;
55079+} /* End twl_sysfs_aen_read() */
55080+
55081+/* aen_read sysfs attribute initializer */
55082+static struct bin_attribute twl_sysfs_aen_read_attr = {
55083+ .attr = {
55084+ .name = "3ware_aen_read",
55085+ .mode = S_IRUSR,
55086+ },
55087+ .size = 0,
55088+ .read = twl_sysfs_aen_read
55089+};
55090+
55091+/* This function returns driver compatibility info through sysfs */
55092+static ssize_t twl_sysfs_compat_info(struct file *filp, struct kobject *kobj,
55093+ struct bin_attribute *bin_attr,
55094+ char *outbuf, loff_t offset, size_t count)
55095+{
55096+ struct device *dev = container_of(kobj, struct device, kobj);
55097+ struct Scsi_Host *shost = class_to_shost(dev);
55098+ TW_Device_Extension *tw_dev = (TW_Device_Extension *)shost->hostdata;
55099+ unsigned long flags = 0;
55100+ ssize_t ret;
55101+
55102+ if (!capable(CAP_SYS_ADMIN))
55103+ return -EACCES;
55104+
55105+ spin_lock_irqsave(tw_dev->host->host_lock, flags);
55106+ ret = memory_read_from_buffer(outbuf, count, &offset, &tw_dev->tw_compat_info, sizeof(TW_Compatibility_Info));
55107+ spin_unlock_irqrestore(tw_dev->host->host_lock, flags);
55108+
55109+ return ret;
55110+} /* End twl_sysfs_compat_info() */
55111+
55112+/* compat_info sysfs attribute initializer */
55113+static struct bin_attribute twl_sysfs_compat_info_attr = {
55114+ .attr = {
55115+ .name = "3ware_compat_info",
55116+ .mode = S_IRUSR,
55117+ },
55118+ .size = 0,
55119+ .read = twl_sysfs_compat_info
55120+};
55121+
55122+/* Show some statistics about the card */
55123+static ssize_t twl_show_stats(struct device *dev,
55124+ struct device_attribute *attr, char *buf)
55125+{
55126+ struct Scsi_Host *host = class_to_shost(dev);
55127+ TW_Device_Extension *tw_dev = (TW_Device_Extension *)host->hostdata;
55128+ unsigned long flags = 0;
55129+ ssize_t len;
55130+
55131+ spin_lock_irqsave(tw_dev->host->host_lock, flags);
55132+ len = snprintf(buf, PAGE_SIZE, "3w-sas Driver version: %s\n"
55133+ "Current commands posted: %4d\n"
55134+ "Max commands posted: %4d\n"
55135+ "Last sgl length: %4d\n"
55136+ "Max sgl length: %4d\n"
55137+ "Last sector count: %4d\n"
55138+ "Max sector count: %4d\n"
55139+ "SCSI Host Resets: %4d\n"
55140+ "AEN's: %4d\n",
55141+ TW_DRIVER_VERSION,
55142+ tw_dev->posted_request_count,
55143+ tw_dev->max_posted_request_count,
55144+ tw_dev->sgl_entries,
55145+ tw_dev->max_sgl_entries,
55146+ tw_dev->sector_count,
55147+ tw_dev->max_sector_count,
55148+ tw_dev->num_resets,
55149+ tw_dev->aen_count);
55150+ spin_unlock_irqrestore(tw_dev->host->host_lock, flags);
55151+ return len;
55152+} /* End twl_show_stats() */
55153+
55154+/* This function will set a devices queue depth */
55155+static int twl_change_queue_depth(struct scsi_device *sdev, int queue_depth,
55156+ int reason)
55157+{
55158+ if (reason != SCSI_QDEPTH_DEFAULT)
55159+ return -EOPNOTSUPP;
55160+
55161+ if (queue_depth > TW_Q_LENGTH-2)
55162+ queue_depth = TW_Q_LENGTH-2;
55163+ scsi_adjust_queue_depth(sdev, MSG_ORDERED_TAG, queue_depth);
55164+ return queue_depth;
55165+} /* End twl_change_queue_depth() */
55166+
55167+/* stats sysfs attribute initializer */
55168+static struct device_attribute twl_host_stats_attr = {
55169+ .attr = {
55170+ .name = "3ware_stats",
55171+ .mode = S_IRUGO,
55172+ },
55173+ .show = twl_show_stats
55174+};
55175+
55176+/* Host attributes initializer */
55177+static struct device_attribute *twl_host_attrs[] = {
55178+ &twl_host_stats_attr,
55179+ NULL,
55180+};
55181+
55182+/* This function will look up an AEN severity string */
55183+static char *twl_aen_severity_lookup(unsigned char severity_code)
55184+{
55185+ char *retval = NULL;
55186+
55187+ if ((severity_code < (unsigned char) TW_AEN_SEVERITY_ERROR) ||
55188+ (severity_code > (unsigned char) TW_AEN_SEVERITY_DEBUG))
55189+ goto out;
55190+
55191+ retval = twl_aen_severity_table[severity_code];
55192+out:
55193+ return retval;
55194+} /* End twl_aen_severity_lookup() */
55195+
55196+/* This function will queue an event */
55197+static void twl_aen_queue_event(TW_Device_Extension *tw_dev, TW_Command_Apache_Header *header)
55198+{
55199+ u32 local_time;
55200+ struct timeval time;
55201+ TW_Event *event;
55202+ unsigned short aen;
55203+ char host[16];
55204+ char *error_str;
55205+
55206+ tw_dev->aen_count++;
55207+
55208+ /* Fill out event info */
55209+ event = tw_dev->event_queue[tw_dev->error_index];
55210+
55211+ host[0] = '\0';
55212+ if (tw_dev->host)
55213+ sprintf(host, " scsi%d:", tw_dev->host->host_no);
55214+
55215+ aen = le16_to_cpu(header->status_block.error);
55216+ memset(event, 0, sizeof(TW_Event));
55217+
55218+ event->severity = TW_SEV_OUT(header->status_block.severity__reserved);
55219+ do_gettimeofday(&time);
55220+ local_time = (u32)(time.tv_sec - (sys_tz.tz_minuteswest * 60));
55221+ event->time_stamp_sec = local_time;
55222+ event->aen_code = aen;
55223+ event->retrieved = TW_AEN_NOT_RETRIEVED;
55224+ event->sequence_id = tw_dev->error_sequence_id;
55225+ tw_dev->error_sequence_id++;
55226+
55227+ /* Check for embedded error string */
55228+ error_str = &(header->err_specific_desc[strlen(header->err_specific_desc)+1]);
55229+
55230+ header->err_specific_desc[sizeof(header->err_specific_desc) - 1] = '\0';
55231+ event->parameter_len = strlen(header->err_specific_desc);
55232+ memcpy(event->parameter_data, header->err_specific_desc, event->parameter_len + 1 + strlen(error_str));
55233+ if (event->severity != TW_AEN_SEVERITY_DEBUG)
55234+ printk(KERN_WARNING "3w-sas:%s AEN: %s (0x%02X:0x%04X): %s:%s.\n",
55235+ host,
55236+ twl_aen_severity_lookup(TW_SEV_OUT(header->status_block.severity__reserved)),
55237+ TW_MESSAGE_SOURCE_CONTROLLER_EVENT, aen, error_str,
55238+ header->err_specific_desc);
55239+ else
55240+ tw_dev->aen_count--;
55241+
55242+ tw_dev->error_index = (tw_dev->error_index + 1 ) % TW_Q_LENGTH;
55243+} /* End twl_aen_queue_event() */
55244+
55245+/* This function will attempt to post a command packet to the board */
55246+static int twl_post_command_packet(TW_Device_Extension *tw_dev, int request_id)
55247+{
55248+ dma_addr_t command_que_value;
55249+
55250+ command_que_value = tw_dev->command_packet_phys[request_id];
55251+ command_que_value += TW_COMMAND_OFFSET;
55252+
55253+ /* First write upper 4 bytes */
55254+ writel((u32)((u64)command_que_value >> 32), TWL_HIBQPH_REG_ADDR(tw_dev));
55255+ /* Then the lower 4 bytes */
55256+ writel((u32)(command_que_value | TWL_PULL_MODE), TWL_HIBQPL_REG_ADDR(tw_dev));
55257+
55258+ tw_dev->state[request_id] = TW_S_POSTED;
55259+ tw_dev->posted_request_count++;
55260+ if (tw_dev->posted_request_count > tw_dev->max_posted_request_count)
55261+ tw_dev->max_posted_request_count = tw_dev->posted_request_count;
55262+
55263+ return 0;
55264+} /* End twl_post_command_packet() */
55265+
55266+/* This function will perform a pci-dma mapping for a scatter gather list */
55267+static int twl_map_scsi_sg_data(TW_Device_Extension *tw_dev, int request_id)
55268+{
55269+ int use_sg;
55270+ struct scsi_cmnd *cmd = tw_dev->srb[request_id];
55271+
55272+ use_sg = scsi_dma_map(cmd);
55273+ if (!use_sg)
55274+ return 0;
55275+ else if (use_sg < 0) {
55276+ TW_PRINTK(tw_dev->host, TW_DRIVER, 0x1, "Failed to map scatter gather list");
55277+ return 0;
55278+ }
55279+
55280+ cmd->SCp.phase = TW_PHASE_SGLIST;
55281+ cmd->SCp.have_data_in = use_sg;
55282+
55283+ return use_sg;
55284+} /* End twl_map_scsi_sg_data() */
55285+
55286+/* This function hands scsi cdb's to the firmware */
55287+static int twl_scsiop_execute_scsi(TW_Device_Extension *tw_dev, int request_id, char *cdb, int use_sg, TW_SG_Entry_ISO *sglistarg)
55288+{
55289+ TW_Command_Full *full_command_packet;
55290+ TW_Command_Apache *command_packet;
55291+ int i, sg_count;
55292+ struct scsi_cmnd *srb = NULL;
55293+ struct scatterlist *sglist = NULL, *sg;
55294+ int retval = 1;
55295+
55296+ if (tw_dev->srb[request_id]) {
55297+ srb = tw_dev->srb[request_id];
55298+ if (scsi_sglist(srb))
55299+ sglist = scsi_sglist(srb);
55300+ }
55301+
55302+ /* Initialize command packet */
55303+ full_command_packet = tw_dev->command_packet_virt[request_id];
55304+ full_command_packet->header.header_desc.size_header = 128;
55305+ full_command_packet->header.status_block.error = 0;
55306+ full_command_packet->header.status_block.severity__reserved = 0;
55307+
55308+ command_packet = &full_command_packet->command.newcommand;
55309+ command_packet->status = 0;
55310+ command_packet->opcode__reserved = TW_OPRES_IN(0, TW_OP_EXECUTE_SCSI);
55311+
55312+ /* We forced 16 byte cdb use earlier */
55313+ if (!cdb)
55314+ memcpy(command_packet->cdb, srb->cmnd, TW_MAX_CDB_LEN);
55315+ else
55316+ memcpy(command_packet->cdb, cdb, TW_MAX_CDB_LEN);
55317+
55318+ if (srb) {
55319+ command_packet->unit = srb->device->id;
55320+ command_packet->request_id__lunl =
55321+ cpu_to_le16(TW_REQ_LUN_IN(srb->device->lun, request_id));
55322+ } else {
55323+ command_packet->request_id__lunl =
55324+ cpu_to_le16(TW_REQ_LUN_IN(0, request_id));
55325+ command_packet->unit = 0;
55326+ }
55327+
55328+ command_packet->sgl_offset = 16;
55329+
55330+ if (!sglistarg) {
55331+ /* Map sglist from scsi layer to cmd packet */
55332+ if (scsi_sg_count(srb)) {
55333+ sg_count = twl_map_scsi_sg_data(tw_dev, request_id);
55334+ if (sg_count == 0)
55335+ goto out;
55336+
55337+ scsi_for_each_sg(srb, sg, sg_count, i) {
55338+ command_packet->sg_list[i].address = TW_CPU_TO_SGL(sg_dma_address(sg));
55339+ command_packet->sg_list[i].length = TW_CPU_TO_SGL(sg_dma_len(sg));
55340+ }
55341+ command_packet->sgl_entries__lunh = cpu_to_le16(TW_REQ_LUN_IN((srb->device->lun >> 4), scsi_sg_count(tw_dev->srb[request_id])));
55342+ }
55343+ } else {
55344+ /* Internal cdb post */
55345+ for (i = 0; i < use_sg; i++) {
55346+ command_packet->sg_list[i].address = TW_CPU_TO_SGL(sglistarg[i].address);
55347+ command_packet->sg_list[i].length = TW_CPU_TO_SGL(sglistarg[i].length);
55348+ }
55349+ command_packet->sgl_entries__lunh = cpu_to_le16(TW_REQ_LUN_IN(0, use_sg));
55350+ }
55351+
55352+ /* Update some stats */
55353+ if (srb) {
55354+ tw_dev->sector_count = scsi_bufflen(srb) / 512;
55355+ if (tw_dev->sector_count > tw_dev->max_sector_count)
55356+ tw_dev->max_sector_count = tw_dev->sector_count;
55357+ tw_dev->sgl_entries = scsi_sg_count(srb);
55358+ if (tw_dev->sgl_entries > tw_dev->max_sgl_entries)
55359+ tw_dev->max_sgl_entries = tw_dev->sgl_entries;
55360+ }
55361+
55362+ /* Now post the command to the board */
55363+ retval = twl_post_command_packet(tw_dev, request_id);
55364+
55365+out:
55366+ return retval;
55367+} /* End twl_scsiop_execute_scsi() */
55368+
55369+/* This function will read the aen queue from the isr */
55370+static int twl_aen_read_queue(TW_Device_Extension *tw_dev, int request_id)
55371+{
55372+ char cdb[TW_MAX_CDB_LEN];
55373+ TW_SG_Entry_ISO sglist[1];
55374+ TW_Command_Full *full_command_packet;
55375+ int retval = 1;
55376+
55377+ full_command_packet = tw_dev->command_packet_virt[request_id];
55378+ memset(full_command_packet, 0, sizeof(TW_Command_Full));
55379+
55380+ /* Initialize cdb */
55381+ memset(&cdb, 0, TW_MAX_CDB_LEN);
55382+ cdb[0] = REQUEST_SENSE; /* opcode */
55383+ cdb[4] = TW_ALLOCATION_LENGTH; /* allocation length */
55384+
55385+ /* Initialize sglist */
55386+ memset(&sglist, 0, sizeof(TW_SG_Entry_ISO));
55387+ sglist[0].length = TW_SECTOR_SIZE;
55388+ sglist[0].address = tw_dev->generic_buffer_phys[request_id];
55389+
55390+ /* Mark internal command */
55391+ tw_dev->srb[request_id] = NULL;
55392+
55393+ /* Now post the command packet */
55394+ if (twl_scsiop_execute_scsi(tw_dev, request_id, cdb, 1, sglist)) {
55395+ TW_PRINTK(tw_dev->host, TW_DRIVER, 0x2, "Post failed while reading AEN queue");
55396+ goto out;
55397+ }
55398+ retval = 0;
55399+out:
55400+ return retval;
55401+} /* End twl_aen_read_queue() */
55402+
55403+/* This function will sync firmware time with the host time */
55404+static void twl_aen_sync_time(TW_Device_Extension *tw_dev, int request_id)
55405+{
55406+ u32 schedulertime;
55407+ struct timeval utc;
55408+ TW_Command_Full *full_command_packet;
55409+ TW_Command *command_packet;
55410+ TW_Param_Apache *param;
55411+ u32 local_time;
55412+
55413+ /* Fill out the command packet */
55414+ full_command_packet = tw_dev->command_packet_virt[request_id];
55415+ memset(full_command_packet, 0, sizeof(TW_Command_Full));
55416+ command_packet = &full_command_packet->command.oldcommand;
55417+ command_packet->opcode__sgloffset = TW_OPSGL_IN(2, TW_OP_SET_PARAM);
55418+ command_packet->request_id = request_id;
55419+ command_packet->byte8_offset.param.sgl[0].address = TW_CPU_TO_SGL(tw_dev->generic_buffer_phys[request_id]);
55420+ command_packet->byte8_offset.param.sgl[0].length = TW_CPU_TO_SGL(TW_SECTOR_SIZE);
55421+ command_packet->size = TW_COMMAND_SIZE;
55422+ command_packet->byte6_offset.parameter_count = cpu_to_le16(1);
55423+
55424+ /* Setup the param */
55425+ param = (TW_Param_Apache *)tw_dev->generic_buffer_virt[request_id];
55426+ memset(param, 0, TW_SECTOR_SIZE);
55427+ param->table_id = cpu_to_le16(TW_TIMEKEEP_TABLE | 0x8000); /* Controller time keep table */
55428+ param->parameter_id = cpu_to_le16(0x3); /* SchedulerTime */
55429+ param->parameter_size_bytes = cpu_to_le16(4);
55430+
55431+ /* Convert system time in UTC to local time seconds since last
55432+ Sunday 12:00AM */
55433+ do_gettimeofday(&utc);
55434+ local_time = (u32)(utc.tv_sec - (sys_tz.tz_minuteswest * 60));
55435+ schedulertime = local_time - (3 * 86400);
55436+ schedulertime = cpu_to_le32(schedulertime % 604800);
55437+
55438+ memcpy(param->data, &schedulertime, sizeof(u32));
55439+
55440+ /* Mark internal command */
55441+ tw_dev->srb[request_id] = NULL;
55442+
55443+ /* Now post the command */
55444+ twl_post_command_packet(tw_dev, request_id);
55445+} /* End twl_aen_sync_time() */
55446+
55447+/* This function will assign an available request id */
55448+static void twl_get_request_id(TW_Device_Extension *tw_dev, int *request_id)
55449+{
55450+ *request_id = tw_dev->free_queue[tw_dev->free_head];
55451+ tw_dev->free_head = (tw_dev->free_head + 1) % TW_Q_LENGTH;
55452+ tw_dev->state[*request_id] = TW_S_STARTED;
55453+} /* End twl_get_request_id() */
55454+
55455+/* This function will free a request id */
55456+static void twl_free_request_id(TW_Device_Extension *tw_dev, int request_id)
55457+{
55458+ tw_dev->free_queue[tw_dev->free_tail] = request_id;
55459+ tw_dev->state[request_id] = TW_S_FINISHED;
55460+ tw_dev->free_tail = (tw_dev->free_tail + 1) % TW_Q_LENGTH;
55461+} /* End twl_free_request_id() */
55462+
55463+/* This function will complete an aen request from the isr */
55464+static int twl_aen_complete(TW_Device_Extension *tw_dev, int request_id)
55465+{
55466+ TW_Command_Full *full_command_packet;
55467+ TW_Command *command_packet;
55468+ TW_Command_Apache_Header *header;
55469+ unsigned short aen;
55470+ int retval = 1;
55471+
55472+ header = (TW_Command_Apache_Header *)tw_dev->generic_buffer_virt[request_id];
55473+ tw_dev->posted_request_count--;
55474+ aen = le16_to_cpu(header->status_block.error);
55475+ full_command_packet = tw_dev->command_packet_virt[request_id];
55476+ command_packet = &full_command_packet->command.oldcommand;
55477+
55478+ /* First check for internal completion of set param for time sync */
55479+ if (TW_OP_OUT(command_packet->opcode__sgloffset) == TW_OP_SET_PARAM) {
55480+ /* Keep reading the queue in case there are more aen's */
55481+ if (twl_aen_read_queue(tw_dev, request_id))
55482+ goto out2;
55483+ else {
55484+ retval = 0;
55485+ goto out;
55486+ }
55487+ }
55488+
55489+ switch (aen) {
55490+ case TW_AEN_QUEUE_EMPTY:
55491+ /* Quit reading the queue if this is the last one */
55492+ break;
55493+ case TW_AEN_SYNC_TIME_WITH_HOST:
55494+ twl_aen_sync_time(tw_dev, request_id);
55495+ retval = 0;
55496+ goto out;
55497+ default:
55498+ twl_aen_queue_event(tw_dev, header);
55499+
55500+ /* If there are more aen's, keep reading the queue */
55501+ if (twl_aen_read_queue(tw_dev, request_id))
55502+ goto out2;
55503+ else {
55504+ retval = 0;
55505+ goto out;
55506+ }
55507+ }
55508+ retval = 0;
55509+out2:
55510+ tw_dev->state[request_id] = TW_S_COMPLETED;
55511+ twl_free_request_id(tw_dev, request_id);
55512+ clear_bit(TW_IN_ATTENTION_LOOP, &tw_dev->flags);
55513+out:
55514+ return retval;
55515+} /* End twl_aen_complete() */
55516+
55517+/* This function will poll for a response */
55518+static int twl_poll_response(TW_Device_Extension *tw_dev, int request_id, int seconds)
55519+{
55520+ unsigned long before;
55521+ dma_addr_t mfa;
55522+ u32 regh, regl;
55523+ u32 response;
55524+ int retval = 1;
55525+ int found = 0;
55526+
55527+ before = jiffies;
55528+
55529+ while (!found) {
55530+ if (sizeof(dma_addr_t) > 4) {
55531+ regh = readl(TWL_HOBQPH_REG_ADDR(tw_dev));
55532+ regl = readl(TWL_HOBQPL_REG_ADDR(tw_dev));
55533+ mfa = ((u64)regh << 32) | regl;
55534+ } else
55535+ mfa = readl(TWL_HOBQPL_REG_ADDR(tw_dev));
55536+
55537+ response = (u32)mfa;
55538+
55539+ if (TW_RESID_OUT(response) == request_id)
55540+ found = 1;
55541+
55542+ if (time_after(jiffies, before + HZ * seconds))
55543+ goto out;
55544+
55545+ msleep(50);
55546+ }
55547+ retval = 0;
55548+out:
55549+ return retval;
55550+} /* End twl_poll_response() */
55551+
55552+/* This function will drain the aen queue */
55553+static int twl_aen_drain_queue(TW_Device_Extension *tw_dev, int no_check_reset)
55554+{
55555+ int request_id = 0;
55556+ char cdb[TW_MAX_CDB_LEN];
55557+ TW_SG_Entry_ISO sglist[1];
55558+ int finished = 0, count = 0;
55559+ TW_Command_Full *full_command_packet;
55560+ TW_Command_Apache_Header *header;
55561+ unsigned short aen;
55562+ int first_reset = 0, queue = 0, retval = 1;
55563+
55564+ if (no_check_reset)
55565+ first_reset = 0;
55566+ else
55567+ first_reset = 1;
55568+
55569+ full_command_packet = tw_dev->command_packet_virt[request_id];
55570+ memset(full_command_packet, 0, sizeof(TW_Command_Full));
55571+
55572+ /* Initialize cdb */
55573+ memset(&cdb, 0, TW_MAX_CDB_LEN);
55574+ cdb[0] = REQUEST_SENSE; /* opcode */
55575+ cdb[4] = TW_ALLOCATION_LENGTH; /* allocation length */
55576+
55577+ /* Initialize sglist */
55578+ memset(&sglist, 0, sizeof(TW_SG_Entry_ISO));
55579+ sglist[0].length = TW_SECTOR_SIZE;
55580+ sglist[0].address = tw_dev->generic_buffer_phys[request_id];
55581+
55582+ /* Mark internal command */
55583+ tw_dev->srb[request_id] = NULL;
55584+
55585+ do {
55586+ /* Send command to the board */
55587+ if (twl_scsiop_execute_scsi(tw_dev, request_id, cdb, 1, sglist)) {
55588+ TW_PRINTK(tw_dev->host, TW_DRIVER, 0x3, "Error posting request sense");
55589+ goto out;
55590+ }
55591+
55592+ /* Now poll for completion */
55593+ if (twl_poll_response(tw_dev, request_id, 30)) {
55594+ TW_PRINTK(tw_dev->host, TW_DRIVER, 0x4, "No valid response while draining AEN queue");
55595+ tw_dev->posted_request_count--;
55596+ goto out;
55597+ }
55598+
55599+ tw_dev->posted_request_count--;
55600+ header = (TW_Command_Apache_Header *)tw_dev->generic_buffer_virt[request_id];
55601+ aen = le16_to_cpu(header->status_block.error);
55602+ queue = 0;
55603+ count++;
55604+
55605+ switch (aen) {
55606+ case TW_AEN_QUEUE_EMPTY:
55607+ if (first_reset != 1)
55608+ goto out;
55609+ else
55610+ finished = 1;
55611+ break;
55612+ case TW_AEN_SOFT_RESET:
55613+ if (first_reset == 0)
55614+ first_reset = 1;
55615+ else
55616+ queue = 1;
55617+ break;
55618+ case TW_AEN_SYNC_TIME_WITH_HOST:
55619+ break;
55620+ default:
55621+ queue = 1;
55622+ }
55623+
55624+ /* Now queue an event info */
55625+ if (queue)
55626+ twl_aen_queue_event(tw_dev, header);
55627+ } while ((finished == 0) && (count < TW_MAX_AEN_DRAIN));
55628+
55629+ if (count == TW_MAX_AEN_DRAIN)
55630+ goto out;
55631+
55632+ retval = 0;
55633+out:
55634+ tw_dev->state[request_id] = TW_S_INITIAL;
55635+ return retval;
55636+} /* End twl_aen_drain_queue() */
55637+
55638+/* This function will allocate memory and check if it is correctly aligned */
55639+static int twl_allocate_memory(TW_Device_Extension *tw_dev, int size, int which)
55640+{
55641+ int i;
55642+ dma_addr_t dma_handle;
55643+ unsigned long *cpu_addr;
55644+ int retval = 1;
55645+
55646+ cpu_addr = pci_alloc_consistent(tw_dev->tw_pci_dev, size*TW_Q_LENGTH, &dma_handle);
55647+ if (!cpu_addr) {
55648+ TW_PRINTK(tw_dev->host, TW_DRIVER, 0x5, "Memory allocation failed");
55649+ goto out;
55650+ }
55651+
55652+ memset(cpu_addr, 0, size*TW_Q_LENGTH);
55653+
55654+ for (i = 0; i < TW_Q_LENGTH; i++) {
55655+ switch(which) {
55656+ case 0:
55657+ tw_dev->command_packet_phys[i] = dma_handle+(i*size);
55658+ tw_dev->command_packet_virt[i] = (TW_Command_Full *)((unsigned char *)cpu_addr + (i*size));
55659+ break;
55660+ case 1:
55661+ tw_dev->generic_buffer_phys[i] = dma_handle+(i*size);
55662+ tw_dev->generic_buffer_virt[i] = (unsigned long *)((unsigned char *)cpu_addr + (i*size));
55663+ break;
55664+ case 2:
55665+ tw_dev->sense_buffer_phys[i] = dma_handle+(i*size);
55666+ tw_dev->sense_buffer_virt[i] = (TW_Command_Apache_Header *)((unsigned char *)cpu_addr + (i*size));
55667+ break;
55668+ }
55669+ }
55670+ retval = 0;
55671+out:
55672+ return retval;
55673+} /* End twl_allocate_memory() */
55674+
55675+/* This function will load the request id and various sgls for ioctls */
55676+static void twl_load_sgl(TW_Device_Extension *tw_dev, TW_Command_Full *full_command_packet, int request_id, dma_addr_t dma_handle, int length)
55677+{
55678+ TW_Command *oldcommand;
55679+ TW_Command_Apache *newcommand;
55680+ TW_SG_Entry_ISO *sgl;
55681+ unsigned int pae = 0;
55682+
55683+ if ((sizeof(long) < 8) && (sizeof(dma_addr_t) > 4))
55684+ pae = 1;
55685+
55686+ if (TW_OP_OUT(full_command_packet->command.newcommand.opcode__reserved) == TW_OP_EXECUTE_SCSI) {
55687+ newcommand = &full_command_packet->command.newcommand;
55688+ newcommand->request_id__lunl =
55689+ cpu_to_le16(TW_REQ_LUN_IN(TW_LUN_OUT(newcommand->request_id__lunl), request_id));
55690+ if (length) {
55691+ newcommand->sg_list[0].address = TW_CPU_TO_SGL(dma_handle + sizeof(TW_Ioctl_Buf_Apache) - 1);
55692+ newcommand->sg_list[0].length = TW_CPU_TO_SGL(length);
55693+ }
55694+ newcommand->sgl_entries__lunh =
55695+ cpu_to_le16(TW_REQ_LUN_IN(TW_LUN_OUT(newcommand->sgl_entries__lunh), length ? 1 : 0));
55696+ } else {
55697+ oldcommand = &full_command_packet->command.oldcommand;
55698+ oldcommand->request_id = request_id;
55699+
55700+ if (TW_SGL_OUT(oldcommand->opcode__sgloffset)) {
55701+ /* Load the sg list */
55702+ sgl = (TW_SG_Entry_ISO *)((u32 *)oldcommand+oldcommand->size - (sizeof(TW_SG_Entry_ISO)/4) + pae + (sizeof(dma_addr_t) > 4 ? 1 : 0));
55703+ sgl->address = TW_CPU_TO_SGL(dma_handle + sizeof(TW_Ioctl_Buf_Apache) - 1);
55704+ sgl->length = TW_CPU_TO_SGL(length);
55705+ oldcommand->size += pae;
55706+ oldcommand->size += sizeof(dma_addr_t) > 4 ? 1 : 0;
55707+ }
55708+ }
55709+} /* End twl_load_sgl() */
55710+
55711+/* This function handles ioctl for the character device
55712+ This interface is used by smartmontools open source software */
55713+static int twl_chrdev_ioctl(struct inode *inode, struct file *file, unsigned int cmd, unsigned long arg)
55714+{
55715+ long timeout;
55716+ unsigned long *cpu_addr, data_buffer_length_adjusted = 0, flags = 0;
55717+ dma_addr_t dma_handle;
55718+ int request_id = 0;
55719+ TW_Ioctl_Driver_Command driver_command;
55720+ TW_Ioctl_Buf_Apache *tw_ioctl;
55721+ TW_Command_Full *full_command_packet;
55722+ TW_Device_Extension *tw_dev = twl_device_extension_list[iminor(inode)];
55723+ int retval = -EFAULT;
55724+ void __user *argp = (void __user *)arg;
55725+
55726+ /* Only let one of these through at a time */
55727+ if (mutex_lock_interruptible(&tw_dev->ioctl_lock)) {
55728+ retval = -EINTR;
55729+ goto out;
55730+ }
55731+
55732+ /* First copy down the driver command */
55733+ if (copy_from_user(&driver_command, argp, sizeof(TW_Ioctl_Driver_Command)))
55734+ goto out2;
55735+
55736+ /* Check data buffer size */
55737+ if (driver_command.buffer_length > TW_MAX_SECTORS * 2048) {
55738+ retval = -EINVAL;
55739+ goto out2;
55740+ }
55741+
55742+ /* Hardware can only do multiple of 512 byte transfers */
55743+ data_buffer_length_adjusted = (driver_command.buffer_length + 511) & ~511;
55744+
55745+ /* Now allocate ioctl buf memory */
55746+ cpu_addr = dma_alloc_coherent(&tw_dev->tw_pci_dev->dev, data_buffer_length_adjusted+sizeof(TW_Ioctl_Buf_Apache) - 1, &dma_handle, GFP_KERNEL);
55747+ if (!cpu_addr) {
55748+ retval = -ENOMEM;
55749+ goto out2;
55750+ }
55751+
55752+ tw_ioctl = (TW_Ioctl_Buf_Apache *)cpu_addr;
55753+
55754+ /* Now copy down the entire ioctl */
55755+ if (copy_from_user(tw_ioctl, argp, driver_command.buffer_length + sizeof(TW_Ioctl_Buf_Apache) - 1))
55756+ goto out3;
55757+
55758+ /* See which ioctl we are doing */
55759+ switch (cmd) {
55760+ case TW_IOCTL_FIRMWARE_PASS_THROUGH:
55761+ spin_lock_irqsave(tw_dev->host->host_lock, flags);
55762+ twl_get_request_id(tw_dev, &request_id);
55763+
55764+ /* Flag internal command */
55765+ tw_dev->srb[request_id] = NULL;
55766+
55767+ /* Flag chrdev ioctl */
55768+ tw_dev->chrdev_request_id = request_id;
55769+
55770+ full_command_packet = (TW_Command_Full *)&tw_ioctl->firmware_command;
55771+
55772+ /* Load request id and sglist for both command types */
55773+ twl_load_sgl(tw_dev, full_command_packet, request_id, dma_handle, data_buffer_length_adjusted);
55774+
55775+ memcpy(tw_dev->command_packet_virt[request_id], &(tw_ioctl->firmware_command), sizeof(TW_Command_Full));
55776+
55777+ /* Now post the command packet to the controller */
55778+ twl_post_command_packet(tw_dev, request_id);
55779+ spin_unlock_irqrestore(tw_dev->host->host_lock, flags);
55780+
55781+ timeout = TW_IOCTL_CHRDEV_TIMEOUT*HZ;
55782+
55783+ /* Now wait for command to complete */
55784+ timeout = wait_event_timeout(tw_dev->ioctl_wqueue, tw_dev->chrdev_request_id == TW_IOCTL_CHRDEV_FREE, timeout);
55785+
55786+ /* We timed out, and didn't get an interrupt */
55787+ if (tw_dev->chrdev_request_id != TW_IOCTL_CHRDEV_FREE) {
55788+ /* Now we need to reset the board */
55789+ printk(KERN_WARNING "3w-sas: scsi%d: WARNING: (0x%02X:0x%04X): Character ioctl (0x%x) timed out, resetting card.\n",
55790+ tw_dev->host->host_no, TW_DRIVER, 0x6,
55791+ cmd);
55792+ retval = -EIO;
55793+ twl_reset_device_extension(tw_dev, 1);
55794+ goto out3;
55795+ }
55796+
55797+ /* Now copy in the command packet response */
55798+ memcpy(&(tw_ioctl->firmware_command), tw_dev->command_packet_virt[request_id], sizeof(TW_Command_Full));
55799+
55800+ /* Now complete the io */
55801+ spin_lock_irqsave(tw_dev->host->host_lock, flags);
55802+ tw_dev->posted_request_count--;
55803+ tw_dev->state[request_id] = TW_S_COMPLETED;
55804+ twl_free_request_id(tw_dev, request_id);
55805+ spin_unlock_irqrestore(tw_dev->host->host_lock, flags);
55806+ break;
55807+ default:
55808+ retval = -ENOTTY;
55809+ goto out3;
55810+ }
55811+
55812+ /* Now copy the entire response to userspace */
55813+ if (copy_to_user(argp, tw_ioctl, sizeof(TW_Ioctl_Buf_Apache) + driver_command.buffer_length - 1) == 0)
55814+ retval = 0;
55815+out3:
55816+ /* Now free ioctl buf memory */
55817+ dma_free_coherent(&tw_dev->tw_pci_dev->dev, data_buffer_length_adjusted+sizeof(TW_Ioctl_Buf_Apache) - 1, cpu_addr, dma_handle);
55818+out2:
55819+ mutex_unlock(&tw_dev->ioctl_lock);
55820+out:
55821+ return retval;
55822+} /* End twl_chrdev_ioctl() */
55823+
55824+/* This function handles open for the character device */
55825+static int twl_chrdev_open(struct inode *inode, struct file *file)
55826+{
55827+ unsigned int minor_number;
55828+ int retval = -ENODEV;
55829+
55830+ if (!capable(CAP_SYS_ADMIN)) {
55831+ retval = -EACCES;
55832+ goto out;
55833+ }
55834+
55835+ cycle_kernel_lock();
55836+ minor_number = iminor(inode);
55837+ if (minor_number >= twl_device_extension_count)
55838+ goto out;
55839+ retval = 0;
55840+out:
55841+ return retval;
55842+} /* End twl_chrdev_open() */
55843+
55844+/* File operations struct for character device */
55845+static const struct file_operations twl_fops = {
55846+ .owner = THIS_MODULE,
55847+ .ioctl = twl_chrdev_ioctl,
55848+ .open = twl_chrdev_open,
55849+ .release = NULL
55850+};
55851+
55852+/* This function passes sense data from firmware to scsi layer */
55853+static int twl_fill_sense(TW_Device_Extension *tw_dev, int i, int request_id, int copy_sense, int print_host)
55854+{
55855+ TW_Command_Apache_Header *header;
55856+ TW_Command_Full *full_command_packet;
55857+ unsigned short error;
55858+ char *error_str;
55859+ int retval = 1;
55860+
55861+ header = tw_dev->sense_buffer_virt[i];
55862+ full_command_packet = tw_dev->command_packet_virt[request_id];
55863+
55864+ /* Get embedded firmware error string */
55865+ error_str = &(header->err_specific_desc[strlen(header->err_specific_desc) + 1]);
55866+
55867+ /* Don't print error for Logical unit not supported during rollcall */
55868+ error = le16_to_cpu(header->status_block.error);
55869+ if ((error != TW_ERROR_LOGICAL_UNIT_NOT_SUPPORTED) && (error != TW_ERROR_UNIT_OFFLINE) && (error != TW_ERROR_INVALID_FIELD_IN_CDB)) {
55870+ if (print_host)
55871+ printk(KERN_WARNING "3w-sas: scsi%d: ERROR: (0x%02X:0x%04X): %s:%s.\n",
55872+ tw_dev->host->host_no,
55873+ TW_MESSAGE_SOURCE_CONTROLLER_ERROR,
55874+ header->status_block.error,
55875+ error_str,
55876+ header->err_specific_desc);
55877+ else
55878+ printk(KERN_WARNING "3w-sas: ERROR: (0x%02X:0x%04X): %s:%s.\n",
55879+ TW_MESSAGE_SOURCE_CONTROLLER_ERROR,
55880+ header->status_block.error,
55881+ error_str,
55882+ header->err_specific_desc);
55883+ }
55884+
55885+ if (copy_sense) {
55886+ memcpy(tw_dev->srb[request_id]->sense_buffer, header->sense_data, TW_SENSE_DATA_LENGTH);
55887+ tw_dev->srb[request_id]->result = (full_command_packet->command.newcommand.status << 1);
55888+ goto out;
55889+ }
55890+out:
55891+ return retval;
55892+} /* End twl_fill_sense() */
55893+
55894+/* This function will free up device extension resources */
55895+static void twl_free_device_extension(TW_Device_Extension *tw_dev)
55896+{
55897+ if (tw_dev->command_packet_virt[0])
55898+ pci_free_consistent(tw_dev->tw_pci_dev,
55899+ sizeof(TW_Command_Full)*TW_Q_LENGTH,
55900+ tw_dev->command_packet_virt[0],
55901+ tw_dev->command_packet_phys[0]);
55902+
55903+ if (tw_dev->generic_buffer_virt[0])
55904+ pci_free_consistent(tw_dev->tw_pci_dev,
55905+ TW_SECTOR_SIZE*TW_Q_LENGTH,
55906+ tw_dev->generic_buffer_virt[0],
55907+ tw_dev->generic_buffer_phys[0]);
55908+
55909+ if (tw_dev->sense_buffer_virt[0])
55910+ pci_free_consistent(tw_dev->tw_pci_dev,
55911+ sizeof(TW_Command_Apache_Header)*
55912+ TW_Q_LENGTH,
55913+ tw_dev->sense_buffer_virt[0],
55914+ tw_dev->sense_buffer_phys[0]);
55915+
55916+ kfree(tw_dev->event_queue[0]);
55917+} /* End twl_free_device_extension() */
55918+
55919+/* This function will get parameter table entries from the firmware */
55920+static void *twl_get_param(TW_Device_Extension *tw_dev, int request_id, int table_id, int parameter_id, int parameter_size_bytes)
55921+{
55922+ TW_Command_Full *full_command_packet;
55923+ TW_Command *command_packet;
55924+ TW_Param_Apache *param;
55925+ void *retval = NULL;
55926+
55927+ /* Setup the command packet */
55928+ full_command_packet = tw_dev->command_packet_virt[request_id];
55929+ memset(full_command_packet, 0, sizeof(TW_Command_Full));
55930+ command_packet = &full_command_packet->command.oldcommand;
55931+
55932+ command_packet->opcode__sgloffset = TW_OPSGL_IN(2, TW_OP_GET_PARAM);
55933+ command_packet->size = TW_COMMAND_SIZE;
55934+ command_packet->request_id = request_id;
55935+ command_packet->byte6_offset.block_count = cpu_to_le16(1);
55936+
55937+ /* Now setup the param */
55938+ param = (TW_Param_Apache *)tw_dev->generic_buffer_virt[request_id];
55939+ memset(param, 0, TW_SECTOR_SIZE);
55940+ param->table_id = cpu_to_le16(table_id | 0x8000);
55941+ param->parameter_id = cpu_to_le16(parameter_id);
55942+ param->parameter_size_bytes = cpu_to_le16(parameter_size_bytes);
55943+
55944+ command_packet->byte8_offset.param.sgl[0].address = TW_CPU_TO_SGL(tw_dev->generic_buffer_phys[request_id]);
55945+ command_packet->byte8_offset.param.sgl[0].length = TW_CPU_TO_SGL(TW_SECTOR_SIZE);
55946+
55947+ /* Post the command packet to the board */
55948+ twl_post_command_packet(tw_dev, request_id);
55949+
55950+ /* Poll for completion */
55951+ if (twl_poll_response(tw_dev, request_id, 30))
55952+ TW_PRINTK(tw_dev->host, TW_DRIVER, 0x7, "No valid response during get param")
55953+ else
55954+ retval = (void *)&(param->data[0]);
55955+
55956+ tw_dev->posted_request_count--;
55957+ tw_dev->state[request_id] = TW_S_INITIAL;
55958+
55959+ return retval;
55960+} /* End twl_get_param() */
55961+
55962+/* This function will send an initconnection command to controller */
55963+static int twl_initconnection(TW_Device_Extension *tw_dev, int message_credits,
55964+ u32 set_features, unsigned short current_fw_srl,
55965+ unsigned short current_fw_arch_id,
55966+ unsigned short current_fw_branch,
55967+ unsigned short current_fw_build,
55968+ unsigned short *fw_on_ctlr_srl,
55969+ unsigned short *fw_on_ctlr_arch_id,
55970+ unsigned short *fw_on_ctlr_branch,
55971+ unsigned short *fw_on_ctlr_build,
55972+ u32 *init_connect_result)
55973+{
55974+ TW_Command_Full *full_command_packet;
55975+ TW_Initconnect *tw_initconnect;
55976+ int request_id = 0, retval = 1;
55977+
55978+ /* Initialize InitConnection command packet */
55979+ full_command_packet = tw_dev->command_packet_virt[request_id];
55980+ memset(full_command_packet, 0, sizeof(TW_Command_Full));
55981+ full_command_packet->header.header_desc.size_header = 128;
55982+
55983+ tw_initconnect = (TW_Initconnect *)&full_command_packet->command.oldcommand;
55984+ tw_initconnect->opcode__reserved = TW_OPRES_IN(0, TW_OP_INIT_CONNECTION);
55985+ tw_initconnect->request_id = request_id;
55986+ tw_initconnect->message_credits = cpu_to_le16(message_credits);
55987+ tw_initconnect->features = set_features;
55988+
55989+ /* Turn on 64-bit sgl support if we need to */
55990+ tw_initconnect->features |= sizeof(dma_addr_t) > 4 ? 1 : 0;
55991+
55992+ tw_initconnect->features = cpu_to_le32(tw_initconnect->features);
55993+
55994+ if (set_features & TW_EXTENDED_INIT_CONNECT) {
55995+ tw_initconnect->size = TW_INIT_COMMAND_PACKET_SIZE_EXTENDED;
55996+ tw_initconnect->fw_srl = cpu_to_le16(current_fw_srl);
55997+ tw_initconnect->fw_arch_id = cpu_to_le16(current_fw_arch_id);
55998+ tw_initconnect->fw_branch = cpu_to_le16(current_fw_branch);
55999+ tw_initconnect->fw_build = cpu_to_le16(current_fw_build);
56000+ } else
56001+ tw_initconnect->size = TW_INIT_COMMAND_PACKET_SIZE;
56002+
56003+ /* Send command packet to the board */
56004+ twl_post_command_packet(tw_dev, request_id);
56005+
56006+ /* Poll for completion */
56007+ if (twl_poll_response(tw_dev, request_id, 30)) {
56008+ TW_PRINTK(tw_dev->host, TW_DRIVER, 0x8, "No valid response during init connection");
56009+ } else {
56010+ if (set_features & TW_EXTENDED_INIT_CONNECT) {
56011+ *fw_on_ctlr_srl = le16_to_cpu(tw_initconnect->fw_srl);
56012+ *fw_on_ctlr_arch_id = le16_to_cpu(tw_initconnect->fw_arch_id);
56013+ *fw_on_ctlr_branch = le16_to_cpu(tw_initconnect->fw_branch);
56014+ *fw_on_ctlr_build = le16_to_cpu(tw_initconnect->fw_build);
56015+ *init_connect_result = le32_to_cpu(tw_initconnect->result);
56016+ }
56017+ retval = 0;
56018+ }
56019+
56020+ tw_dev->posted_request_count--;
56021+ tw_dev->state[request_id] = TW_S_INITIAL;
56022+
56023+ return retval;
56024+} /* End twl_initconnection() */
56025+
56026+/* This function will initialize the fields of a device extension */
56027+static int twl_initialize_device_extension(TW_Device_Extension *tw_dev)
56028+{
56029+ int i, retval = 1;
56030+
56031+ /* Initialize command packet buffers */
56032+ if (twl_allocate_memory(tw_dev, sizeof(TW_Command_Full), 0)) {
56033+ TW_PRINTK(tw_dev->host, TW_DRIVER, 0x9, "Command packet memory allocation failed");
56034+ goto out;
56035+ }
56036+
56037+ /* Initialize generic buffer */
56038+ if (twl_allocate_memory(tw_dev, TW_SECTOR_SIZE, 1)) {
56039+ TW_PRINTK(tw_dev->host, TW_DRIVER, 0xa, "Generic memory allocation failed");
56040+ goto out;
56041+ }
56042+
56043+ /* Allocate sense buffers */
56044+ if (twl_allocate_memory(tw_dev, sizeof(TW_Command_Apache_Header), 2)) {
56045+ TW_PRINTK(tw_dev->host, TW_DRIVER, 0xb, "Sense buffer allocation failed");
56046+ goto out;
56047+ }
56048+
56049+ /* Allocate event info space */
56050+ tw_dev->event_queue[0] = kcalloc(TW_Q_LENGTH, sizeof(TW_Event), GFP_KERNEL);
56051+ if (!tw_dev->event_queue[0]) {
56052+ TW_PRINTK(tw_dev->host, TW_DRIVER, 0xc, "Event info memory allocation failed");
56053+ goto out;
56054+ }
56055+
56056+ for (i = 0; i < TW_Q_LENGTH; i++) {
56057+ tw_dev->event_queue[i] = (TW_Event *)((unsigned char *)tw_dev->event_queue[0] + (i * sizeof(TW_Event)));
56058+ tw_dev->free_queue[i] = i;
56059+ tw_dev->state[i] = TW_S_INITIAL;
56060+ }
56061+
56062+ tw_dev->free_head = TW_Q_START;
56063+ tw_dev->free_tail = TW_Q_START;
56064+ tw_dev->error_sequence_id = 1;
56065+ tw_dev->chrdev_request_id = TW_IOCTL_CHRDEV_FREE;
56066+
56067+ mutex_init(&tw_dev->ioctl_lock);
56068+ init_waitqueue_head(&tw_dev->ioctl_wqueue);
56069+
56070+ retval = 0;
56071+out:
56072+ return retval;
56073+} /* End twl_initialize_device_extension() */
56074+
56075+/* This function will perform a pci-dma unmap */
56076+static void twl_unmap_scsi_data(TW_Device_Extension *tw_dev, int request_id)
56077+{
56078+ struct scsi_cmnd *cmd = tw_dev->srb[request_id];
56079+
56080+ if (cmd->SCp.phase == TW_PHASE_SGLIST)
56081+ scsi_dma_unmap(cmd);
56082+} /* End twl_unmap_scsi_data() */
56083+
56084+/* This function will handle attention interrupts */
56085+static int twl_handle_attention_interrupt(TW_Device_Extension *tw_dev)
56086+{
56087+ int retval = 1;
56088+ u32 request_id, doorbell;
56089+
56090+ /* Read doorbell status */
56091+ doorbell = readl(TWL_HOBDB_REG_ADDR(tw_dev));
56092+
56093+ /* Check for controller errors */
56094+ if (doorbell & TWL_DOORBELL_CONTROLLER_ERROR) {
56095+ TW_PRINTK(tw_dev->host, TW_DRIVER, 0xd, "Microcontroller Error: clearing");
56096+ goto out;
56097+ }
56098+
56099+ /* Check if we need to perform an AEN drain */
56100+ if (doorbell & TWL_DOORBELL_ATTENTION_INTERRUPT) {
56101+ if (!(test_and_set_bit(TW_IN_ATTENTION_LOOP, &tw_dev->flags))) {
56102+ twl_get_request_id(tw_dev, &request_id);
56103+ if (twl_aen_read_queue(tw_dev, request_id)) {
56104+ tw_dev->state[request_id] = TW_S_COMPLETED;
56105+ twl_free_request_id(tw_dev, request_id);
56106+ clear_bit(TW_IN_ATTENTION_LOOP, &tw_dev->flags);
56107+ }
56108+ }
56109+ }
56110+
56111+ retval = 0;
56112+out:
56113+ /* Clear doorbell interrupt */
56114+ TWL_CLEAR_DB_INTERRUPT(tw_dev);
56115+
56116+ /* Make sure the clear was flushed by reading it back */
56117+ readl(TWL_HOBDBC_REG_ADDR(tw_dev));
56118+
56119+ return retval;
56120+} /* End twl_handle_attention_interrupt() */
56121+
56122+/* Interrupt service routine */
56123+static irqreturn_t twl_interrupt(int irq, void *dev_instance)
56124+{
56125+ TW_Device_Extension *tw_dev = (TW_Device_Extension *)dev_instance;
56126+ int i, handled = 0, error = 0;
56127+ dma_addr_t mfa = 0;
56128+ u32 reg, regl, regh, response, request_id = 0;
56129+ struct scsi_cmnd *cmd;
56130+ TW_Command_Full *full_command_packet;
56131+
56132+ spin_lock(tw_dev->host->host_lock);
56133+
56134+ /* Read host interrupt status */
56135+ reg = readl(TWL_HISTAT_REG_ADDR(tw_dev));
56136+
56137+ /* Check if this is our interrupt, otherwise bail */
56138+ if (!(reg & TWL_HISTATUS_VALID_INTERRUPT))
56139+ goto twl_interrupt_bail;
56140+
56141+ handled = 1;
56142+
56143+ /* If we are resetting, bail */
56144+ if (test_bit(TW_IN_RESET, &tw_dev->flags))
56145+ goto twl_interrupt_bail;
56146+
56147+ /* Attention interrupt */
56148+ if (reg & TWL_HISTATUS_ATTENTION_INTERRUPT) {
56149+ if (twl_handle_attention_interrupt(tw_dev)) {
56150+ TWL_MASK_INTERRUPTS(tw_dev);
56151+ goto twl_interrupt_bail;
56152+ }
56153+ }
56154+
56155+ /* Response interrupt */
56156+ while (reg & TWL_HISTATUS_RESPONSE_INTERRUPT) {
56157+ if (sizeof(dma_addr_t) > 4) {
56158+ regh = readl(TWL_HOBQPH_REG_ADDR(tw_dev));
56159+ regl = readl(TWL_HOBQPL_REG_ADDR(tw_dev));
56160+ mfa = ((u64)regh << 32) | regl;
56161+ } else
56162+ mfa = readl(TWL_HOBQPL_REG_ADDR(tw_dev));
56163+
56164+ error = 0;
56165+ response = (u32)mfa;
56166+
56167+ /* Check for command packet error */
56168+ if (!TW_NOTMFA_OUT(response)) {
56169+ for (i=0;i<TW_Q_LENGTH;i++) {
56170+ if (tw_dev->sense_buffer_phys[i] == mfa) {
56171+ request_id = le16_to_cpu(tw_dev->sense_buffer_virt[i]->header_desc.request_id);
56172+ if (tw_dev->srb[request_id] != NULL)
56173+ error = twl_fill_sense(tw_dev, i, request_id, 1, 1);
56174+ else {
56175+ /* Skip ioctl error prints */
56176+ if (request_id != tw_dev->chrdev_request_id)
56177+ error = twl_fill_sense(tw_dev, i, request_id, 0, 1);
56178+ else
56179+ memcpy(tw_dev->command_packet_virt[request_id], tw_dev->sense_buffer_virt[i], sizeof(TW_Command_Apache_Header));
56180+ }
56181+
56182+ /* Now re-post the sense buffer */
56183+ writel((u32)((u64)tw_dev->sense_buffer_phys[i] >> 32), TWL_HOBQPH_REG_ADDR(tw_dev));
56184+ writel((u32)tw_dev->sense_buffer_phys[i], TWL_HOBQPL_REG_ADDR(tw_dev));
56185+ break;
56186+ }
56187+ }
56188+ } else
56189+ request_id = TW_RESID_OUT(response);
56190+
56191+ full_command_packet = tw_dev->command_packet_virt[request_id];
56192+
56193+ /* Check for correct state */
56194+ if (tw_dev->state[request_id] != TW_S_POSTED) {
56195+ if (tw_dev->srb[request_id] != NULL) {
56196+ TW_PRINTK(tw_dev->host, TW_DRIVER, 0xe, "Received a request id that wasn't posted");
56197+ TWL_MASK_INTERRUPTS(tw_dev);
56198+ goto twl_interrupt_bail;
56199+ }
56200+ }
56201+
56202+ /* Check for internal command completion */
56203+ if (tw_dev->srb[request_id] == NULL) {
56204+ if (request_id != tw_dev->chrdev_request_id) {
56205+ if (twl_aen_complete(tw_dev, request_id))
56206+ TW_PRINTK(tw_dev->host, TW_DRIVER, 0xf, "Error completing AEN during attention interrupt");
56207+ } else {
56208+ tw_dev->chrdev_request_id = TW_IOCTL_CHRDEV_FREE;
56209+ wake_up(&tw_dev->ioctl_wqueue);
56210+ }
56211+ } else {
56212+ cmd = tw_dev->srb[request_id];
56213+
56214+ if (!error)
56215+ cmd->result = (DID_OK << 16);
56216+
56217+ /* Report residual bytes for single sgl */
56218+ if ((scsi_sg_count(cmd) <= 1) && (full_command_packet->command.newcommand.status == 0)) {
56219+ if (full_command_packet->command.newcommand.sg_list[0].length < scsi_bufflen(tw_dev->srb[request_id]))
56220+ scsi_set_resid(cmd, scsi_bufflen(cmd) - full_command_packet->command.newcommand.sg_list[0].length);
56221+ }
56222+
56223+ /* Now complete the io */
56224+ tw_dev->state[request_id] = TW_S_COMPLETED;
56225+ twl_free_request_id(tw_dev, request_id);
56226+ tw_dev->posted_request_count--;
56227+ tw_dev->srb[request_id]->scsi_done(tw_dev->srb[request_id]);
56228+ twl_unmap_scsi_data(tw_dev, request_id);
56229+ }
56230+
56231+ /* Check for another response interrupt */
56232+ reg = readl(TWL_HISTAT_REG_ADDR(tw_dev));
56233+ }
56234+
56235+twl_interrupt_bail:
56236+ spin_unlock(tw_dev->host->host_lock);
56237+ return IRQ_RETVAL(handled);
56238+} /* End twl_interrupt() */
56239+
56240+/* This function will poll for a register change */
56241+static int twl_poll_register(TW_Device_Extension *tw_dev, void *reg, u32 value, u32 result, int seconds)
56242+{
56243+ unsigned long before;
56244+ int retval = 1;
56245+ u32 reg_value;
56246+
56247+ reg_value = readl(reg);
56248+ before = jiffies;
56249+
56250+ while ((reg_value & value) != result) {
56251+ reg_value = readl(reg);
56252+ if (time_after(jiffies, before + HZ * seconds))
56253+ goto out;
56254+ msleep(50);
56255+ }
56256+ retval = 0;
56257+out:
56258+ return retval;
56259+} /* End twl_poll_register() */
56260+
56261+/* This function will reset a controller */
56262+static int twl_reset_sequence(TW_Device_Extension *tw_dev, int soft_reset)
56263+{
56264+ int retval = 1;
56265+ int i = 0;
56266+ u32 status = 0;
56267+ unsigned short fw_on_ctlr_srl = 0, fw_on_ctlr_arch_id = 0;
56268+ unsigned short fw_on_ctlr_branch = 0, fw_on_ctlr_build = 0;
56269+ u32 init_connect_result = 0;
56270+ int tries = 0;
56271+ int do_soft_reset = soft_reset;
56272+
56273+ while (tries < TW_MAX_RESET_TRIES) {
56274+ /* Do a soft reset if one is needed */
56275+ if (do_soft_reset) {
56276+ TWL_SOFT_RESET(tw_dev);
56277+
56278+ /* Make sure controller is in a good state */
56279+ if (twl_poll_register(tw_dev, TWL_SCRPD3_REG_ADDR(tw_dev), TWL_CONTROLLER_READY, 0x0, 30)) {
56280+ TW_PRINTK(tw_dev->host, TW_DRIVER, 0x10, "Controller never went non-ready during reset sequence");
56281+ tries++;
56282+ continue;
56283+ }
56284+ if (twl_poll_register(tw_dev, TWL_SCRPD3_REG_ADDR(tw_dev), TWL_CONTROLLER_READY, TWL_CONTROLLER_READY, 60)) {
56285+ TW_PRINTK(tw_dev->host, TW_DRIVER, 0x11, "Controller not ready during reset sequence");
56286+ tries++;
56287+ continue;
56288+ }
56289+ }
56290+
56291+ /* Initconnect */
56292+ if (twl_initconnection(tw_dev, TW_INIT_MESSAGE_CREDITS,
56293+ TW_EXTENDED_INIT_CONNECT, TW_CURRENT_DRIVER_SRL,
56294+ TW_9750_ARCH_ID, TW_CURRENT_DRIVER_BRANCH,
56295+ TW_CURRENT_DRIVER_BUILD, &fw_on_ctlr_srl,
56296+ &fw_on_ctlr_arch_id, &fw_on_ctlr_branch,
56297+ &fw_on_ctlr_build, &init_connect_result)) {
56298+ TW_PRINTK(tw_dev->host, TW_DRIVER, 0x12, "Initconnection failed while checking SRL");
56299+ do_soft_reset = 1;
56300+ tries++;
56301+ continue;
56302+ }
56303+
56304+ /* Load sense buffers */
56305+ while (i < TW_Q_LENGTH) {
56306+ writel((u32)((u64)tw_dev->sense_buffer_phys[i] >> 32), TWL_HOBQPH_REG_ADDR(tw_dev));
56307+ writel((u32)tw_dev->sense_buffer_phys[i], TWL_HOBQPL_REG_ADDR(tw_dev));
56308+
56309+ /* Check status for over-run after each write */
56310+ status = readl(TWL_STATUS_REG_ADDR(tw_dev));
56311+ if (!(status & TWL_STATUS_OVERRUN_SUBMIT))
56312+ i++;
56313+ }
56314+
56315+ /* Now check status */
56316+ status = readl(TWL_STATUS_REG_ADDR(tw_dev));
56317+ if (status) {
56318+ TW_PRINTK(tw_dev->host, TW_DRIVER, 0x13, "Bad controller status after loading sense buffers");
56319+ do_soft_reset = 1;
56320+ tries++;
56321+ continue;
56322+ }
56323+
56324+ /* Drain the AEN queue */
56325+ if (twl_aen_drain_queue(tw_dev, soft_reset)) {
56326+ TW_PRINTK(tw_dev->host, TW_DRIVER, 0x14, "AEN drain failed during reset sequence");
56327+ do_soft_reset = 1;
56328+ tries++;
56329+ continue;
56330+ }
56331+
56332+ /* Load rest of compatibility struct */
56333+ strncpy(tw_dev->tw_compat_info.driver_version, TW_DRIVER_VERSION, strlen(TW_DRIVER_VERSION));
56334+ tw_dev->tw_compat_info.driver_srl_high = TW_CURRENT_DRIVER_SRL;
56335+ tw_dev->tw_compat_info.driver_branch_high = TW_CURRENT_DRIVER_BRANCH;
56336+ tw_dev->tw_compat_info.driver_build_high = TW_CURRENT_DRIVER_BUILD;
56337+ tw_dev->tw_compat_info.driver_srl_low = TW_BASE_FW_SRL;
56338+ tw_dev->tw_compat_info.driver_branch_low = TW_BASE_FW_BRANCH;
56339+ tw_dev->tw_compat_info.driver_build_low = TW_BASE_FW_BUILD;
56340+ tw_dev->tw_compat_info.fw_on_ctlr_srl = fw_on_ctlr_srl;
56341+ tw_dev->tw_compat_info.fw_on_ctlr_branch = fw_on_ctlr_branch;
56342+ tw_dev->tw_compat_info.fw_on_ctlr_build = fw_on_ctlr_build;
56343+
56344+ /* If we got here, controller is in a good state */
56345+ retval = 0;
56346+ goto out;
56347+ }
56348+out:
56349+ return retval;
56350+} /* End twl_reset_sequence() */
56351+
56352+/* This function will reset a device extension */
56353+static int twl_reset_device_extension(TW_Device_Extension *tw_dev, int ioctl_reset)
56354+{
56355+ int i = 0, retval = 1;
56356+ unsigned long flags = 0;
56357+
56358+ /* Block SCSI requests while we are resetting */
56359+ if (ioctl_reset)
56360+ scsi_block_requests(tw_dev->host);
56361+
56362+ set_bit(TW_IN_RESET, &tw_dev->flags);
56363+ TWL_MASK_INTERRUPTS(tw_dev);
56364+ TWL_CLEAR_DB_INTERRUPT(tw_dev);
56365+
56366+ spin_lock_irqsave(tw_dev->host->host_lock, flags);
56367+
56368+ /* Abort all requests that are in progress */
56369+ for (i = 0; i < TW_Q_LENGTH; i++) {
56370+ if ((tw_dev->state[i] != TW_S_FINISHED) &&
56371+ (tw_dev->state[i] != TW_S_INITIAL) &&
56372+ (tw_dev->state[i] != TW_S_COMPLETED)) {
56373+ if (tw_dev->srb[i]) {
56374+ tw_dev->srb[i]->result = (DID_RESET << 16);
56375+ tw_dev->srb[i]->scsi_done(tw_dev->srb[i]);
56376+ twl_unmap_scsi_data(tw_dev, i);
56377+ }
56378+ }
56379+ }
56380+
56381+ /* Reset queues and counts */
56382+ for (i = 0; i < TW_Q_LENGTH; i++) {
56383+ tw_dev->free_queue[i] = i;
56384+ tw_dev->state[i] = TW_S_INITIAL;
56385+ }
56386+ tw_dev->free_head = TW_Q_START;
56387+ tw_dev->free_tail = TW_Q_START;
56388+ tw_dev->posted_request_count = 0;
56389+
56390+ spin_unlock_irqrestore(tw_dev->host->host_lock, flags);
56391+
56392+ if (twl_reset_sequence(tw_dev, 1))
56393+ goto out;
56394+
56395+ TWL_UNMASK_INTERRUPTS(tw_dev);
56396+
56397+ clear_bit(TW_IN_RESET, &tw_dev->flags);
56398+ tw_dev->chrdev_request_id = TW_IOCTL_CHRDEV_FREE;
56399+
56400+ retval = 0;
56401+out:
56402+ if (ioctl_reset)
56403+ scsi_unblock_requests(tw_dev->host);
56404+ return retval;
56405+} /* End twl_reset_device_extension() */
56406+
56407+/* This funciton returns unit geometry in cylinders/heads/sectors */
56408+static int twl_scsi_biosparam(struct scsi_device *sdev, struct block_device *bdev, sector_t capacity, int geom[])
56409+{
56410+ int heads, sectors, cylinders;
56411+ TW_Device_Extension *tw_dev;
56412+
56413+ tw_dev = (TW_Device_Extension *)sdev->host->hostdata;
56414+
56415+ if (capacity >= 0x200000) {
56416+ heads = 255;
56417+ sectors = 63;
56418+ cylinders = sector_div(capacity, heads * sectors);
56419+ } else {
56420+ heads = 64;
56421+ sectors = 32;
56422+ cylinders = sector_div(capacity, heads * sectors);
56423+ }
56424+
56425+ geom[0] = heads;
56426+ geom[1] = sectors;
56427+ geom[2] = cylinders;
56428+
56429+ return 0;
56430+} /* End twl_scsi_biosparam() */
56431+
56432+/* This is the new scsi eh reset function */
56433+static int twl_scsi_eh_reset(struct scsi_cmnd *SCpnt)
56434+{
56435+ TW_Device_Extension *tw_dev = NULL;
56436+ int retval = FAILED;
56437+
56438+ tw_dev = (TW_Device_Extension *)SCpnt->device->host->hostdata;
56439+
56440+ tw_dev->num_resets++;
56441+
56442+ sdev_printk(KERN_WARNING, SCpnt->device,
56443+ "WARNING: (0x%02X:0x%04X): Command (0x%x) timed out, resetting card.\n",
56444+ TW_DRIVER, 0x2c, SCpnt->cmnd[0]);
56445+
56446+ /* Make sure we are not issuing an ioctl or resetting from ioctl */
56447+ mutex_lock(&tw_dev->ioctl_lock);
56448+
56449+ /* Now reset the card and some of the device extension data */
56450+ if (twl_reset_device_extension(tw_dev, 0)) {
56451+ TW_PRINTK(tw_dev->host, TW_DRIVER, 0x15, "Controller reset failed during scsi host reset");
56452+ goto out;
56453+ }
56454+
56455+ retval = SUCCESS;
56456+out:
56457+ mutex_unlock(&tw_dev->ioctl_lock);
56458+ return retval;
56459+} /* End twl_scsi_eh_reset() */
56460+
56461+/* This is the main scsi queue function to handle scsi opcodes */
56462+static int twl_scsi_queue(struct scsi_cmnd *SCpnt, void (*done)(struct scsi_cmnd *))
56463+{
56464+ int request_id, retval;
56465+ TW_Device_Extension *tw_dev = (TW_Device_Extension *)SCpnt->device->host->hostdata;
56466+
56467+ /* If we are resetting due to timed out ioctl, report as busy */
56468+ if (test_bit(TW_IN_RESET, &tw_dev->flags)) {
56469+ retval = SCSI_MLQUEUE_HOST_BUSY;
56470+ goto out;
56471+ }
56472+
56473+ /* Save done function into scsi_cmnd struct */
56474+ SCpnt->scsi_done = done;
56475+
56476+ /* Get a free request id */
56477+ twl_get_request_id(tw_dev, &request_id);
56478+
56479+ /* Save the scsi command for use by the ISR */
56480+ tw_dev->srb[request_id] = SCpnt;
56481+
56482+ /* Initialize phase to zero */
56483+ SCpnt->SCp.phase = TW_PHASE_INITIAL;
56484+
56485+ retval = twl_scsiop_execute_scsi(tw_dev, request_id, NULL, 0, NULL);
56486+ if (retval) {
56487+ tw_dev->state[request_id] = TW_S_COMPLETED;
56488+ twl_free_request_id(tw_dev, request_id);
56489+ SCpnt->result = (DID_ERROR << 16);
56490+ done(SCpnt);
56491+ retval = 0;
56492+ }
56493+out:
56494+ return retval;
56495+} /* End twl_scsi_queue() */
56496+
56497+/* This function tells the controller to shut down */
56498+static void __twl_shutdown(TW_Device_Extension *tw_dev)
56499+{
56500+ /* Disable interrupts */
56501+ TWL_MASK_INTERRUPTS(tw_dev);
56502+
56503+ /* Free up the IRQ */
56504+ free_irq(tw_dev->tw_pci_dev->irq, tw_dev);
56505+
56506+ printk(KERN_WARNING "3w-sas: Shutting down host %d.\n", tw_dev->host->host_no);
56507+
56508+ /* Tell the card we are shutting down */
56509+ if (twl_initconnection(tw_dev, 1, 0, 0, 0, 0, 0, NULL, NULL, NULL, NULL, NULL)) {
56510+ TW_PRINTK(tw_dev->host, TW_DRIVER, 0x16, "Connection shutdown failed");
56511+ } else {
56512+ printk(KERN_WARNING "3w-sas: Shutdown complete.\n");
56513+ }
56514+
56515+ /* Clear doorbell interrupt just before exit */
56516+ TWL_CLEAR_DB_INTERRUPT(tw_dev);
56517+} /* End __twl_shutdown() */
56518+
56519+/* Wrapper for __twl_shutdown */
56520+static void twl_shutdown(struct pci_dev *pdev)
56521+{
56522+ struct Scsi_Host *host = pci_get_drvdata(pdev);
56523+ TW_Device_Extension *tw_dev;
56524+
56525+ if (!host)
56526+ return;
56527+
56528+ tw_dev = (TW_Device_Extension *)host->hostdata;
56529+
56530+ if (tw_dev->online)
56531+ __twl_shutdown(tw_dev);
56532+} /* End twl_shutdown() */
56533+
56534+/* This function configures unit settings when a unit is coming on-line */
56535+static int twl_slave_configure(struct scsi_device *sdev)
56536+{
56537+ /* Force 60 second timeout */
56538+ blk_queue_rq_timeout(sdev->request_queue, 60 * HZ);
56539+
56540+ return 0;
56541+} /* End twl_slave_configure() */
56542+
56543+/* scsi_host_template initializer */
56544+static struct scsi_host_template driver_template = {
56545+ .module = THIS_MODULE,
56546+ .name = "3w-sas",
56547+ .queuecommand = twl_scsi_queue,
56548+ .eh_host_reset_handler = twl_scsi_eh_reset,
56549+ .bios_param = twl_scsi_biosparam,
56550+ .change_queue_depth = twl_change_queue_depth,
56551+ .can_queue = TW_Q_LENGTH-2,
56552+ .slave_configure = twl_slave_configure,
56553+ .this_id = -1,
56554+ .sg_tablesize = TW_LIBERATOR_MAX_SGL_LENGTH,
56555+ .max_sectors = TW_MAX_SECTORS,
56556+ .cmd_per_lun = TW_MAX_CMDS_PER_LUN,
56557+ .use_clustering = ENABLE_CLUSTERING,
56558+ .shost_attrs = twl_host_attrs,
56559+ .emulated = 1
56560+};
56561+
56562+/* This function will probe and initialize a card */
56563+static int __devinit twl_probe(struct pci_dev *pdev, const struct pci_device_id *dev_id)
56564+{
56565+ struct Scsi_Host *host = NULL;
56566+ TW_Device_Extension *tw_dev;
56567+ resource_size_t mem_addr, mem_len;
56568+ int retval = -ENODEV;
56569+ int *ptr_phycount, phycount=0;
56570+
56571+ retval = pci_enable_device(pdev);
56572+ if (retval) {
56573+ TW_PRINTK(host, TW_DRIVER, 0x17, "Failed to enable pci device");
56574+ goto out_disable_device;
56575+ }
56576+
56577+ pci_set_master(pdev);
56578+ pci_try_set_mwi(pdev);
56579+
56580+ if (pci_set_dma_mask(pdev, DMA_BIT_MASK(64))
56581+ || pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(64)))
56582+ if (pci_set_dma_mask(pdev, DMA_BIT_MASK(32))
56583+ || pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(32))) {
56584+ TW_PRINTK(host, TW_DRIVER, 0x18, "Failed to set dma mask");
56585+ retval = -ENODEV;
56586+ goto out_disable_device;
56587+ }
56588+
56589+ host = scsi_host_alloc(&driver_template, sizeof(TW_Device_Extension));
56590+ if (!host) {
56591+ TW_PRINTK(host, TW_DRIVER, 0x19, "Failed to allocate memory for device extension");
56592+ retval = -ENOMEM;
56593+ goto out_disable_device;
56594+ }
56595+ tw_dev = (TW_Device_Extension *)host->hostdata;
56596+
56597+ /* Save values to device extension */
56598+ tw_dev->host = host;
56599+ tw_dev->tw_pci_dev = pdev;
56600+
56601+ if (twl_initialize_device_extension(tw_dev)) {
56602+ TW_PRINTK(tw_dev->host, TW_DRIVER, 0x1a, "Failed to initialize device extension");
56603+ goto out_free_device_extension;
56604+ }
56605+
56606+ /* Request IO regions */
56607+ retval = pci_request_regions(pdev, "3w-sas");
56608+ if (retval) {
56609+ TW_PRINTK(tw_dev->host, TW_DRIVER, 0x1b, "Failed to get mem region");
56610+ goto out_free_device_extension;
56611+ }
56612+
56613+ /* Use region 1 */
56614+ mem_addr = pci_resource_start(pdev, 1);
56615+ mem_len = pci_resource_len(pdev, 1);
56616+
56617+ /* Save base address */
56618+ tw_dev->base_addr = ioremap(mem_addr, mem_len);
56619+
56620+ if (!tw_dev->base_addr) {
56621+ TW_PRINTK(tw_dev->host, TW_DRIVER, 0x1c, "Failed to ioremap");
56622+ goto out_release_mem_region;
56623+ }
56624+
56625+ /* Disable interrupts on the card */
56626+ TWL_MASK_INTERRUPTS(tw_dev);
56627+
56628+ /* Initialize the card */
56629+ if (twl_reset_sequence(tw_dev, 0)) {
56630+ TW_PRINTK(tw_dev->host, TW_DRIVER, 0x1d, "Controller reset failed during probe");
56631+ goto out_iounmap;
56632+ }
56633+
56634+ /* Set host specific parameters */
56635+ host->max_id = TW_MAX_UNITS;
56636+ host->max_cmd_len = TW_MAX_CDB_LEN;
56637+ host->max_lun = TW_MAX_LUNS;
56638+ host->max_channel = 0;
56639+
56640+ /* Register the card with the kernel SCSI layer */
56641+ retval = scsi_add_host(host, &pdev->dev);
56642+ if (retval) {
56643+ TW_PRINTK(tw_dev->host, TW_DRIVER, 0x1e, "scsi add host failed");
56644+ goto out_iounmap;
56645+ }
56646+
56647+ pci_set_drvdata(pdev, host);
56648+
56649+ printk(KERN_WARNING "3w-sas: scsi%d: Found an LSI 3ware %s Controller at 0x%llx, IRQ: %d.\n",
56650+ host->host_no,
56651+ (char *)twl_get_param(tw_dev, 1, TW_VERSION_TABLE,
56652+ TW_PARAM_MODEL, TW_PARAM_MODEL_LENGTH),
56653+ (u64)mem_addr, pdev->irq);
56654+
56655+ ptr_phycount = twl_get_param(tw_dev, 2, TW_PARAM_PHY_SUMMARY_TABLE,
56656+ TW_PARAM_PHYCOUNT, TW_PARAM_PHYCOUNT_LENGTH);
56657+ if (ptr_phycount)
56658+ phycount = le32_to_cpu(*(int *)ptr_phycount);
56659+
56660+ printk(KERN_WARNING "3w-sas: scsi%d: Firmware %s, BIOS %s, Phys: %d.\n",
56661+ host->host_no,
56662+ (char *)twl_get_param(tw_dev, 1, TW_VERSION_TABLE,
56663+ TW_PARAM_FWVER, TW_PARAM_FWVER_LENGTH),
56664+ (char *)twl_get_param(tw_dev, 2, TW_VERSION_TABLE,
56665+ TW_PARAM_BIOSVER, TW_PARAM_BIOSVER_LENGTH),
56666+ phycount);
56667+
56668+ /* Try to enable MSI */
56669+ if (use_msi && !pci_enable_msi(pdev))
56670+ set_bit(TW_USING_MSI, &tw_dev->flags);
56671+
56672+ /* Now setup the interrupt handler */
56673+ retval = request_irq(pdev->irq, twl_interrupt, IRQF_SHARED, "3w-sas", tw_dev);
56674+ if (retval) {
56675+ TW_PRINTK(tw_dev->host, TW_DRIVER, 0x1f, "Error requesting IRQ");
56676+ goto out_remove_host;
56677+ }
56678+
56679+ twl_device_extension_list[twl_device_extension_count] = tw_dev;
56680+ twl_device_extension_count++;
56681+
56682+ /* Re-enable interrupts on the card */
56683+ TWL_UNMASK_INTERRUPTS(tw_dev);
56684+
56685+ /* Finally, scan the host */
56686+ scsi_scan_host(host);
56687+
56688+ /* Add sysfs binary files */
56689+ if (sysfs_create_bin_file(&host->shost_dev.kobj, &twl_sysfs_aen_read_attr))
56690+ TW_PRINTK(tw_dev->host, TW_DRIVER, 0x20, "Failed to create sysfs binary file: 3ware_aen_read");
56691+ if (sysfs_create_bin_file(&host->shost_dev.kobj, &twl_sysfs_compat_info_attr))
56692+ TW_PRINTK(tw_dev->host, TW_DRIVER, 0x21, "Failed to create sysfs binary file: 3ware_compat_info");
56693+
56694+ if (twl_major == -1) {
56695+ if ((twl_major = register_chrdev (0, "twl", &twl_fops)) < 0)
56696+ TW_PRINTK(host, TW_DRIVER, 0x22, "Failed to register character device");
56697+ }
56698+ tw_dev->online = 1;
56699+ return 0;
56700+
56701+out_remove_host:
56702+ if (test_bit(TW_USING_MSI, &tw_dev->flags))
56703+ pci_disable_msi(pdev);
56704+ scsi_remove_host(host);
56705+out_iounmap:
56706+ iounmap(tw_dev->base_addr);
56707+out_release_mem_region:
56708+ pci_release_regions(pdev);
56709+out_free_device_extension:
56710+ twl_free_device_extension(tw_dev);
56711+ scsi_host_put(host);
56712+out_disable_device:
56713+ pci_disable_device(pdev);
56714+
56715+ return retval;
56716+} /* End twl_probe() */
56717+
56718+/* This function is called to remove a device */
56719+static void twl_remove(struct pci_dev *pdev)
56720+{
56721+ struct Scsi_Host *host = pci_get_drvdata(pdev);
56722+ TW_Device_Extension *tw_dev;
56723+
56724+ if (!host)
56725+ return;
56726+
56727+ tw_dev = (TW_Device_Extension *)host->hostdata;
56728+
56729+ if (!tw_dev->online)
56730+ return;
56731+
56732+ /* Remove sysfs binary files */
56733+ sysfs_remove_bin_file(&host->shost_dev.kobj, &twl_sysfs_aen_read_attr);
56734+ sysfs_remove_bin_file(&host->shost_dev.kobj, &twl_sysfs_compat_info_attr);
56735+
56736+ scsi_remove_host(tw_dev->host);
56737+
56738+ /* Unregister character device */
56739+ if (twl_major >= 0) {
56740+ unregister_chrdev(twl_major, "twl");
56741+ twl_major = -1;
56742+ }
56743+
56744+ /* Shutdown the card */
56745+ __twl_shutdown(tw_dev);
56746+
56747+ /* Disable MSI if enabled */
56748+ if (test_bit(TW_USING_MSI, &tw_dev->flags))
56749+ pci_disable_msi(pdev);
56750+
56751+ /* Free IO remapping */
56752+ iounmap(tw_dev->base_addr);
56753+
56754+ /* Free up the mem region */
56755+ pci_release_regions(pdev);
56756+
56757+ /* Free up device extension resources */
56758+ twl_free_device_extension(tw_dev);
56759+
56760+ scsi_host_put(tw_dev->host);
56761+ pci_disable_device(pdev);
56762+ twl_device_extension_count--;
56763+} /* End twl_remove() */
56764+
56765+#ifdef CONFIG_PM
56766+/* This function is called on PCI suspend */
56767+static int twl_suspend(struct pci_dev *pdev, pm_message_t state)
56768+{
56769+ struct Scsi_Host *host = pci_get_drvdata(pdev);
56770+ TW_Device_Extension *tw_dev = (TW_Device_Extension *)host->hostdata;
56771+
56772+ printk(KERN_WARNING "3w-sas: Suspending host %d.\n", tw_dev->host->host_no);
56773+ /* Disable interrupts */
56774+ TWL_MASK_INTERRUPTS(tw_dev);
56775+
56776+ free_irq(tw_dev->tw_pci_dev->irq, tw_dev);
56777+
56778+ /* Tell the card we are shutting down */
56779+ if (twl_initconnection(tw_dev, 1, 0, 0, 0, 0, 0, NULL, NULL, NULL, NULL, NULL)) {
56780+ TW_PRINTK(tw_dev->host, TW_DRIVER, 0x23, "Connection shutdown failed during suspend");
56781+ } else {
56782+ printk(KERN_WARNING "3w-sas: Suspend complete.\n");
56783+ }
56784+
56785+ /* Clear doorbell interrupt */
56786+ TWL_CLEAR_DB_INTERRUPT(tw_dev);
56787+
56788+ pci_save_state(pdev);
56789+ pci_disable_device(pdev);
56790+ pci_set_power_state(pdev, pci_choose_state(pdev, state));
56791+
56792+ return 0;
56793+} /* End twl_suspend() */
56794+
56795+/* This function is called on PCI resume */
56796+static int twl_resume(struct pci_dev *pdev)
56797+{
56798+ int retval = 0;
56799+ struct Scsi_Host *host = pci_get_drvdata(pdev);
56800+ TW_Device_Extension *tw_dev = (TW_Device_Extension *)host->hostdata;
56801+
56802+ printk(KERN_WARNING "3w-sas: Resuming host %d.\n", tw_dev->host->host_no);
56803+ pci_set_power_state(pdev, PCI_D0);
56804+ pci_enable_wake(pdev, PCI_D0, 0);
56805+ pci_restore_state(pdev);
56806+
56807+ retval = pci_enable_device(pdev);
56808+ if (retval) {
56809+ TW_PRINTK(tw_dev->host, TW_DRIVER, 0x24, "Enable device failed during resume");
56810+ return retval;
56811+ }
56812+
56813+ pci_set_master(pdev);
56814+ pci_try_set_mwi(pdev);
56815+
56816+ if (pci_set_dma_mask(pdev, DMA_BIT_MASK(64))
56817+ || pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(64)))
56818+ if (pci_set_dma_mask(pdev, DMA_BIT_MASK(32))
56819+ || pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(32))) {
56820+ TW_PRINTK(host, TW_DRIVER, 0x25, "Failed to set dma mask during resume");
56821+ retval = -ENODEV;
56822+ goto out_disable_device;
56823+ }
56824+
56825+ /* Initialize the card */
56826+ if (twl_reset_sequence(tw_dev, 0)) {
56827+ retval = -ENODEV;
56828+ goto out_disable_device;
56829+ }
56830+
56831+ /* Now setup the interrupt handler */
56832+ retval = request_irq(pdev->irq, twl_interrupt, IRQF_SHARED, "3w-sas", tw_dev);
56833+ if (retval) {
56834+ TW_PRINTK(tw_dev->host, TW_DRIVER, 0x26, "Error requesting IRQ during resume");
56835+ retval = -ENODEV;
56836+ goto out_disable_device;
56837+ }
56838+
56839+ /* Now enable MSI if enabled */
56840+ if (test_bit(TW_USING_MSI, &tw_dev->flags))
56841+ pci_enable_msi(pdev);
56842+
56843+ /* Re-enable interrupts on the card */
56844+ TWL_UNMASK_INTERRUPTS(tw_dev);
56845+
56846+ printk(KERN_WARNING "3w-sas: Resume complete.\n");
56847+ return 0;
56848+
56849+out_disable_device:
56850+ scsi_remove_host(host);
56851+ pci_disable_device(pdev);
56852+
56853+ return retval;
56854+} /* End twl_resume() */
56855+#endif
56856+
56857+/* PCI Devices supported by this driver */
56858+static struct pci_device_id twl_pci_tbl[] __devinitdata = {
56859+ { PCI_VENDOR_ID_3WARE, PCI_DEVICE_ID_3WARE_9750,
56860+ PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0},
56861+ { }
56862+};
56863+MODULE_DEVICE_TABLE(pci, twl_pci_tbl);
56864+
56865+/* pci_driver initializer */
56866+static struct pci_driver twl_driver = {
56867+ .name = "3w-sas",
56868+ .id_table = twl_pci_tbl,
56869+ .probe = twl_probe,
56870+ .remove = twl_remove,
56871+#ifdef CONFIG_PM
56872+ .suspend = twl_suspend,
56873+ .resume = twl_resume,
56874+#endif
56875+ .shutdown = twl_shutdown
56876+};
56877+
56878+/* This function is called on driver initialization */
56879+static int __init twl_init(void)
56880+{
56881+ printk(KERN_WARNING "LSI 3ware SAS/SATA-RAID Controller device driver for Linux v%s.\n", TW_DRIVER_VERSION);
56882+
56883+ return pci_register_driver(&twl_driver);
56884+} /* End twl_init() */
56885+
56886+/* This function is called on driver exit */
56887+static void __exit twl_exit(void)
56888+{
56889+ pci_unregister_driver(&twl_driver);
56890+} /* End twl_exit() */
56891+
56892+module_init(twl_init);
56893+module_exit(twl_exit);
56894+
56895diff --git a/drivers/scsi/3w-sas.h b/drivers/scsi/3w-sas.h
56896new file mode 100644
56897index 0000000..e620505
56898--- /dev/null
56899+++ b/drivers/scsi/3w-sas.h
56900@@ -0,0 +1,396 @@
56901+/*
56902+ 3w-sas.h -- LSI 3ware SAS/SATA-RAID Controller device driver for Linux.
56903+
56904+ Written By: Adam Radford <linuxraid@lsi.com>
56905+
56906+ Copyright (C) 2009 LSI Corporation.
56907+
56908+ This program is free software; you can redistribute it and/or modify
56909+ it under the terms of the GNU General Public License as published by
56910+ the Free Software Foundation; version 2 of the License.
56911+
56912+ This program is distributed in the hope that it will be useful,
56913+ but WITHOUT ANY WARRANTY; without even the implied warranty of
56914+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
56915+ GNU General Public License for more details.
56916+
56917+ NO WARRANTY
56918+ THE PROGRAM IS PROVIDED ON AN "AS IS" BASIS, WITHOUT WARRANTIES OR
56919+ CONDITIONS OF ANY KIND, EITHER EXPRESS OR IMPLIED INCLUDING, WITHOUT
56920+ LIMITATION, ANY WARRANTIES OR CONDITIONS OF TITLE, NON-INFRINGEMENT,
56921+ MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE. Each Recipient is
56922+ solely responsible for determining the appropriateness of using and
56923+ distributing the Program and assumes all risks associated with its
56924+ exercise of rights under this Agreement, including but not limited to
56925+ the risks and costs of program errors, damage to or loss of data,
56926+ programs or equipment, and unavailability or interruption of operations.
56927+
56928+ DISCLAIMER OF LIABILITY
56929+ NEITHER RECIPIENT NOR ANY CONTRIBUTORS SHALL HAVE ANY LIABILITY FOR ANY
56930+ DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
56931+ DAMAGES (INCLUDING WITHOUT LIMITATION LOST PROFITS), HOWEVER CAUSED AND
56932+ ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR
56933+ TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE
56934+ USE OR DISTRIBUTION OF THE PROGRAM OR THE EXERCISE OF ANY RIGHTS GRANTED
56935+ HEREUNDER, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGES
56936+
56937+ You should have received a copy of the GNU General Public License
56938+ along with this program; if not, write to the Free Software
56939+ Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
56940+
56941+ Bugs/Comments/Suggestions should be mailed to:
56942+ linuxraid@lsi.com
56943+
56944+ For more information, goto:
56945+ http://www.lsi.com
56946+*/
56947+
56948+#ifndef _3W_SAS_H
56949+#define _3W_SAS_H
56950+
56951+/* AEN severity table */
56952+static char *twl_aen_severity_table[] =
56953+{
56954+ "None", "ERROR", "WARNING", "INFO", "DEBUG", (char*) 0
56955+};
56956+
56957+/* Liberator register offsets */
56958+#define TWL_STATUS 0x0 /* Status */
56959+#define TWL_HIBDB 0x20 /* Inbound doorbell */
56960+#define TWL_HISTAT 0x30 /* Host interrupt status */
56961+#define TWL_HIMASK 0x34 /* Host interrupt mask */
56962+#define TWL_HOBDB 0x9C /* Outbound doorbell */
56963+#define TWL_HOBDBC 0xA0 /* Outbound doorbell clear */
56964+#define TWL_SCRPD3 0xBC /* Scratchpad */
56965+#define TWL_HIBQPL 0xC0 /* Host inbound Q low */
56966+#define TWL_HIBQPH 0xC4 /* Host inbound Q high */
56967+#define TWL_HOBQPL 0xC8 /* Host outbound Q low */
56968+#define TWL_HOBQPH 0xCC /* Host outbound Q high */
56969+#define TWL_HISTATUS_VALID_INTERRUPT 0xC
56970+#define TWL_HISTATUS_ATTENTION_INTERRUPT 0x4
56971+#define TWL_HISTATUS_RESPONSE_INTERRUPT 0x8
56972+#define TWL_STATUS_OVERRUN_SUBMIT 0x2000
56973+#define TWL_ISSUE_SOFT_RESET 0x100
56974+#define TWL_CONTROLLER_READY 0x2000
56975+#define TWL_DOORBELL_CONTROLLER_ERROR 0x200000
56976+#define TWL_DOORBELL_ATTENTION_INTERRUPT 0x40000
56977+#define TWL_PULL_MODE 0x1
56978+
56979+/* Command packet opcodes used by the driver */
56980+#define TW_OP_INIT_CONNECTION 0x1
56981+#define TW_OP_GET_PARAM 0x12
56982+#define TW_OP_SET_PARAM 0x13
56983+#define TW_OP_EXECUTE_SCSI 0x10
56984+
56985+/* Asynchronous Event Notification (AEN) codes used by the driver */
56986+#define TW_AEN_QUEUE_EMPTY 0x0000
56987+#define TW_AEN_SOFT_RESET 0x0001
56988+#define TW_AEN_SYNC_TIME_WITH_HOST 0x031
56989+#define TW_AEN_SEVERITY_ERROR 0x1
56990+#define TW_AEN_SEVERITY_DEBUG 0x4
56991+#define TW_AEN_NOT_RETRIEVED 0x1
56992+
56993+/* Command state defines */
56994+#define TW_S_INITIAL 0x1 /* Initial state */
56995+#define TW_S_STARTED 0x2 /* Id in use */
56996+#define TW_S_POSTED 0x4 /* Posted to the controller */
56997+#define TW_S_COMPLETED 0x8 /* Completed by isr */
56998+#define TW_S_FINISHED 0x10 /* I/O completely done */
56999+
57000+/* Compatibility defines */
57001+#define TW_9750_ARCH_ID 10
57002+#define TW_CURRENT_DRIVER_SRL 40
57003+#define TW_CURRENT_DRIVER_BUILD 0
57004+#define TW_CURRENT_DRIVER_BRANCH 0
57005+
57006+/* Phase defines */
57007+#define TW_PHASE_INITIAL 0
57008+#define TW_PHASE_SGLIST 2
57009+
57010+/* Misc defines */
57011+#define TW_SECTOR_SIZE 512
57012+#define TW_MAX_UNITS 32
57013+#define TW_INIT_MESSAGE_CREDITS 0x100
57014+#define TW_INIT_COMMAND_PACKET_SIZE 0x3
57015+#define TW_INIT_COMMAND_PACKET_SIZE_EXTENDED 0x6
57016+#define TW_EXTENDED_INIT_CONNECT 0x2
57017+#define TW_BASE_FW_SRL 24
57018+#define TW_BASE_FW_BRANCH 0
57019+#define TW_BASE_FW_BUILD 1
57020+#define TW_Q_LENGTH 256
57021+#define TW_Q_START 0
57022+#define TW_MAX_SLOT 32
57023+#define TW_MAX_RESET_TRIES 2
57024+#define TW_MAX_CMDS_PER_LUN 254
57025+#define TW_MAX_AEN_DRAIN 255
57026+#define TW_IN_RESET 2
57027+#define TW_USING_MSI 3
57028+#define TW_IN_ATTENTION_LOOP 4
57029+#define TW_MAX_SECTORS 256
57030+#define TW_MAX_CDB_LEN 16
57031+#define TW_IOCTL_CHRDEV_TIMEOUT 60 /* 60 seconds */
57032+#define TW_IOCTL_CHRDEV_FREE -1
57033+#define TW_COMMAND_OFFSET 128 /* 128 bytes */
57034+#define TW_VERSION_TABLE 0x0402
57035+#define TW_TIMEKEEP_TABLE 0x040A
57036+#define TW_INFORMATION_TABLE 0x0403
57037+#define TW_PARAM_FWVER 3
57038+#define TW_PARAM_FWVER_LENGTH 16
57039+#define TW_PARAM_BIOSVER 4
57040+#define TW_PARAM_BIOSVER_LENGTH 16
57041+#define TW_PARAM_MODEL 8
57042+#define TW_PARAM_MODEL_LENGTH 16
57043+#define TW_PARAM_PHY_SUMMARY_TABLE 1
57044+#define TW_PARAM_PHYCOUNT 2
57045+#define TW_PARAM_PHYCOUNT_LENGTH 1
57046+#define TW_IOCTL_FIRMWARE_PASS_THROUGH 0x108 // Used by smartmontools
57047+#define TW_ALLOCATION_LENGTH 128
57048+#define TW_SENSE_DATA_LENGTH 18
57049+#define TW_ERROR_LOGICAL_UNIT_NOT_SUPPORTED 0x10a
57050+#define TW_ERROR_INVALID_FIELD_IN_CDB 0x10d
57051+#define TW_ERROR_UNIT_OFFLINE 0x128
57052+#define TW_MESSAGE_SOURCE_CONTROLLER_ERROR 3
57053+#define TW_MESSAGE_SOURCE_CONTROLLER_EVENT 4
57054+#define TW_DRIVER 6
57055+#ifndef PCI_DEVICE_ID_3WARE_9750
57056+#define PCI_DEVICE_ID_3WARE_9750 0x1010
57057+#endif
57058+
57059+/* Bitmask macros to eliminate bitfields */
57060+
57061+/* opcode: 5, reserved: 3 */
57062+#define TW_OPRES_IN(x,y) ((x << 5) | (y & 0x1f))
57063+#define TW_OP_OUT(x) (x & 0x1f)
57064+
57065+/* opcode: 5, sgloffset: 3 */
57066+#define TW_OPSGL_IN(x,y) ((x << 5) | (y & 0x1f))
57067+#define TW_SGL_OUT(x) ((x >> 5) & 0x7)
57068+
57069+/* severity: 3, reserved: 5 */
57070+#define TW_SEV_OUT(x) (x & 0x7)
57071+
57072+/* not_mfa: 1, reserved: 7, status: 8, request_id: 16 */
57073+#define TW_RESID_OUT(x) ((x >> 16) & 0xffff)
57074+#define TW_NOTMFA_OUT(x) (x & 0x1)
57075+
57076+/* request_id: 12, lun: 4 */
57077+#define TW_REQ_LUN_IN(lun, request_id) (((lun << 12) & 0xf000) | (request_id & 0xfff))
57078+#define TW_LUN_OUT(lun) ((lun >> 12) & 0xf)
57079+
57080+/* Register access macros */
57081+#define TWL_STATUS_REG_ADDR(x) ((unsigned char __iomem *)x->base_addr + TWL_STATUS)
57082+#define TWL_HOBQPL_REG_ADDR(x) ((unsigned char __iomem *)x->base_addr + TWL_HOBQPL)
57083+#define TWL_HOBQPH_REG_ADDR(x) ((unsigned char __iomem *)x->base_addr + TWL_HOBQPH)
57084+#define TWL_HOBDB_REG_ADDR(x) ((unsigned char __iomem *)x->base_addr + TWL_HOBDB)
57085+#define TWL_HOBDBC_REG_ADDR(x) ((unsigned char __iomem *)x->base_addr + TWL_HOBDBC)
57086+#define TWL_HIMASK_REG_ADDR(x) ((unsigned char __iomem *)x->base_addr + TWL_HIMASK)
57087+#define TWL_HISTAT_REG_ADDR(x) ((unsigned char __iomem *)x->base_addr + TWL_HISTAT)
57088+#define TWL_HIBQPH_REG_ADDR(x) ((unsigned char __iomem *)x->base_addr + TWL_HIBQPH)
57089+#define TWL_HIBQPL_REG_ADDR(x) ((unsigned char __iomem *)x->base_addr + TWL_HIBQPL)
57090+#define TWL_HIBDB_REG_ADDR(x) ((unsigned char __iomem *)x->base_addr + TWL_HIBDB)
57091+#define TWL_SCRPD3_REG_ADDR(x) ((unsigned char __iomem *)x->base_addr + TWL_SCRPD3)
57092+#define TWL_MASK_INTERRUPTS(x) (writel(~0, TWL_HIMASK_REG_ADDR(tw_dev)))
57093+#define TWL_UNMASK_INTERRUPTS(x) (writel(~TWL_HISTATUS_VALID_INTERRUPT, TWL_HIMASK_REG_ADDR(tw_dev)))
57094+#define TWL_CLEAR_DB_INTERRUPT(x) (writel(~0, TWL_HOBDBC_REG_ADDR(tw_dev)))
57095+#define TWL_SOFT_RESET(x) (writel(TWL_ISSUE_SOFT_RESET, TWL_HIBDB_REG_ADDR(tw_dev)))
57096+
57097+/* Macros */
57098+#define TW_PRINTK(h,a,b,c) { \
57099+if (h) \
57100+printk(KERN_WARNING "3w-sas: scsi%d: ERROR: (0x%02X:0x%04X): %s.\n",h->host_no,a,b,c); \
57101+else \
57102+printk(KERN_WARNING "3w-sas: ERROR: (0x%02X:0x%04X): %s.\n",a,b,c); \
57103+}
57104+#define TW_MAX_LUNS 16
57105+#define TW_COMMAND_SIZE (sizeof(dma_addr_t) > 4 ? 6 : 4)
57106+#define TW_LIBERATOR_MAX_SGL_LENGTH (sizeof(dma_addr_t) > 4 ? 46 : 92)
57107+#define TW_LIBERATOR_MAX_SGL_LENGTH_OLD (sizeof(dma_addr_t) > 4 ? 47 : 94)
57108+#define TW_PADDING_LENGTH_LIBERATOR 136
57109+#define TW_PADDING_LENGTH_LIBERATOR_OLD 132
57110+#define TW_CPU_TO_SGL(x) (sizeof(dma_addr_t) > 4 ? cpu_to_le64(x) : cpu_to_le32(x))
57111+
57112+#pragma pack(1)
57113+
57114+/* SGL entry */
57115+typedef struct TAG_TW_SG_Entry_ISO {
57116+ dma_addr_t address;
57117+ dma_addr_t length;
57118+} TW_SG_Entry_ISO;
57119+
57120+/* Old Command Packet with ISO SGL */
57121+typedef struct TW_Command {
57122+ unsigned char opcode__sgloffset;
57123+ unsigned char size;
57124+ unsigned char request_id;
57125+ unsigned char unit__hostid;
57126+ /* Second DWORD */
57127+ unsigned char status;
57128+ unsigned char flags;
57129+ union {
57130+ unsigned short block_count;
57131+ unsigned short parameter_count;
57132+ } byte6_offset;
57133+ union {
57134+ struct {
57135+ u32 lba;
57136+ TW_SG_Entry_ISO sgl[TW_LIBERATOR_MAX_SGL_LENGTH_OLD];
57137+ unsigned char padding[TW_PADDING_LENGTH_LIBERATOR_OLD];
57138+ } io;
57139+ struct {
57140+ TW_SG_Entry_ISO sgl[TW_LIBERATOR_MAX_SGL_LENGTH_OLD];
57141+ u32 padding;
57142+ unsigned char padding2[TW_PADDING_LENGTH_LIBERATOR_OLD];
57143+ } param;
57144+ } byte8_offset;
57145+} TW_Command;
57146+
57147+/* New Command Packet with ISO SGL */
57148+typedef struct TAG_TW_Command_Apache {
57149+ unsigned char opcode__reserved;
57150+ unsigned char unit;
57151+ unsigned short request_id__lunl;
57152+ unsigned char status;
57153+ unsigned char sgl_offset;
57154+ unsigned short sgl_entries__lunh;
57155+ unsigned char cdb[16];
57156+ TW_SG_Entry_ISO sg_list[TW_LIBERATOR_MAX_SGL_LENGTH];
57157+ unsigned char padding[TW_PADDING_LENGTH_LIBERATOR];
57158+} TW_Command_Apache;
57159+
57160+/* New command packet header */
57161+typedef struct TAG_TW_Command_Apache_Header {
57162+ unsigned char sense_data[TW_SENSE_DATA_LENGTH];
57163+ struct {
57164+ char reserved[4];
57165+ unsigned short error;
57166+ unsigned char padding;
57167+ unsigned char severity__reserved;
57168+ } status_block;
57169+ unsigned char err_specific_desc[98];
57170+ struct {
57171+ unsigned char size_header;
57172+ unsigned short request_id;
57173+ unsigned char size_sense;
57174+ } header_desc;
57175+} TW_Command_Apache_Header;
57176+
57177+/* This struct is a union of the 2 command packets */
57178+typedef struct TAG_TW_Command_Full {
57179+ TW_Command_Apache_Header header;
57180+ union {
57181+ TW_Command oldcommand;
57182+ TW_Command_Apache newcommand;
57183+ } command;
57184+} TW_Command_Full;
57185+
57186+/* Initconnection structure */
57187+typedef struct TAG_TW_Initconnect {
57188+ unsigned char opcode__reserved;
57189+ unsigned char size;
57190+ unsigned char request_id;
57191+ unsigned char res2;
57192+ unsigned char status;
57193+ unsigned char flags;
57194+ unsigned short message_credits;
57195+ u32 features;
57196+ unsigned short fw_srl;
57197+ unsigned short fw_arch_id;
57198+ unsigned short fw_branch;
57199+ unsigned short fw_build;
57200+ u32 result;
57201+} TW_Initconnect;
57202+
57203+/* Event info structure */
57204+typedef struct TAG_TW_Event
57205+{
57206+ unsigned int sequence_id;
57207+ unsigned int time_stamp_sec;
57208+ unsigned short aen_code;
57209+ unsigned char severity;
57210+ unsigned char retrieved;
57211+ unsigned char repeat_count;
57212+ unsigned char parameter_len;
57213+ unsigned char parameter_data[98];
57214+} TW_Event;
57215+
57216+typedef struct TAG_TW_Ioctl_Driver_Command {
57217+ unsigned int control_code;
57218+ unsigned int status;
57219+ unsigned int unique_id;
57220+ unsigned int sequence_id;
57221+ unsigned int os_specific;
57222+ unsigned int buffer_length;
57223+} TW_Ioctl_Driver_Command;
57224+
57225+typedef struct TAG_TW_Ioctl_Apache {
57226+ TW_Ioctl_Driver_Command driver_command;
57227+ char padding[488];
57228+ TW_Command_Full firmware_command;
57229+ char data_buffer[1];
57230+} TW_Ioctl_Buf_Apache;
57231+
57232+/* GetParam descriptor */
57233+typedef struct {
57234+ unsigned short table_id;
57235+ unsigned short parameter_id;
57236+ unsigned short parameter_size_bytes;
57237+ unsigned short actual_parameter_size_bytes;
57238+ unsigned char data[1];
57239+} TW_Param_Apache;
57240+
57241+/* Compatibility information structure */
57242+typedef struct TAG_TW_Compatibility_Info
57243+{
57244+ char driver_version[32];
57245+ unsigned short working_srl;
57246+ unsigned short working_branch;
57247+ unsigned short working_build;
57248+ unsigned short driver_srl_high;
57249+ unsigned short driver_branch_high;
57250+ unsigned short driver_build_high;
57251+ unsigned short driver_srl_low;
57252+ unsigned short driver_branch_low;
57253+ unsigned short driver_build_low;
57254+ unsigned short fw_on_ctlr_srl;
57255+ unsigned short fw_on_ctlr_branch;
57256+ unsigned short fw_on_ctlr_build;
57257+} TW_Compatibility_Info;
57258+
57259+#pragma pack()
57260+
57261+typedef struct TAG_TW_Device_Extension {
57262+ void __iomem *base_addr;
57263+ unsigned long *generic_buffer_virt[TW_Q_LENGTH];
57264+ dma_addr_t generic_buffer_phys[TW_Q_LENGTH];
57265+ TW_Command_Full *command_packet_virt[TW_Q_LENGTH];
57266+ dma_addr_t command_packet_phys[TW_Q_LENGTH];
57267+ TW_Command_Apache_Header *sense_buffer_virt[TW_Q_LENGTH];
57268+ dma_addr_t sense_buffer_phys[TW_Q_LENGTH];
57269+ struct pci_dev *tw_pci_dev;
57270+ struct scsi_cmnd *srb[TW_Q_LENGTH];
57271+ unsigned char free_queue[TW_Q_LENGTH];
57272+ unsigned char free_head;
57273+ unsigned char free_tail;
57274+ int state[TW_Q_LENGTH];
57275+ unsigned int posted_request_count;
57276+ unsigned int max_posted_request_count;
57277+ unsigned int max_sgl_entries;
57278+ unsigned int sgl_entries;
57279+ unsigned int num_resets;
57280+ unsigned int sector_count;
57281+ unsigned int max_sector_count;
57282+ unsigned int aen_count;
57283+ struct Scsi_Host *host;
57284+ long flags;
57285+ TW_Event *event_queue[TW_Q_LENGTH];
57286+ unsigned char error_index;
57287+ unsigned int error_sequence_id;
57288+ int chrdev_request_id;
57289+ wait_queue_head_t ioctl_wqueue;
57290+ struct mutex ioctl_lock;
57291+ TW_Compatibility_Info tw_compat_info;
57292+ char online;
57293+} TW_Device_Extension;
57294+
57295+#endif /* _3W_SAS_H */
57296+
57297diff --git a/drivers/scsi/BusLogic.c b/drivers/scsi/BusLogic.c
57298index 1ddcf40..a85f062 100644
57299--- a/drivers/scsi/BusLogic.c
57300+++ b/drivers/scsi/BusLogic.c
57301@@ -961,6 +961,8 @@ static int __init BusLogic_InitializeFlashPointProbeInfo(struct BusLogic_HostAda
57302 static void __init BusLogic_InitializeProbeInfoList(struct BusLogic_HostAdapter
57303 *PrototypeHostAdapter)
57304 {
57305+ pax_track_stack();
57306+
57307 /*
57308 If a PCI BIOS is present, interrogate it for MultiMaster and FlashPoint
57309 Host Adapters; otherwise, default to the standard ISA MultiMaster probe.
57310diff --git a/drivers/scsi/Kconfig b/drivers/scsi/Kconfig
57311index e11cca4..4295679 100644
57312--- a/drivers/scsi/Kconfig
57313+++ b/drivers/scsi/Kconfig
57314@@ -399,6 +399,17 @@ config SCSI_3W_9XXX
57315 Please read the comments at the top of
57316 <file:drivers/scsi/3w-9xxx.c>.
57317
57318+config SCSI_3W_SAS
57319+ tristate "3ware 97xx SAS/SATA-RAID support"
57320+ depends on PCI && SCSI
57321+ help
57322+ This driver supports the LSI 3ware 9750 6Gb/s SAS/SATA-RAID cards.
57323+
57324+ <http://www.lsi.com>
57325+
57326+ Please read the comments at the top of
57327+ <file:drivers/scsi/3w-sas.c>.
57328+
57329 config SCSI_7000FASST
57330 tristate "7000FASST SCSI support"
57331 depends on ISA && SCSI && ISA_DMA_API
57332@@ -621,6 +632,14 @@ config SCSI_FLASHPOINT
57333 substantial, so users of MultiMaster Host Adapters may not
57334 wish to include it.
57335
57336+config VMWARE_PVSCSI
57337+ tristate "VMware PVSCSI driver support"
57338+ depends on PCI && SCSI && X86
57339+ help
57340+ This driver supports VMware's para virtualized SCSI HBA.
57341+ To compile this driver as a module, choose M here: the
57342+ module will be called vmw_pvscsi.
57343+
57344 config LIBFC
57345 tristate "LibFC module"
57346 select SCSI_FC_ATTRS
57347diff --git a/drivers/scsi/Makefile b/drivers/scsi/Makefile
57348index 3ad61db..c938975 100644
57349--- a/drivers/scsi/Makefile
57350+++ b/drivers/scsi/Makefile
57351@@ -113,6 +113,7 @@ obj-$(CONFIG_SCSI_MESH) += mesh.o
57352 obj-$(CONFIG_SCSI_MAC53C94) += mac53c94.o
57353 obj-$(CONFIG_BLK_DEV_3W_XXXX_RAID) += 3w-xxxx.o
57354 obj-$(CONFIG_SCSI_3W_9XXX) += 3w-9xxx.o
57355+obj-$(CONFIG_SCSI_3W_SAS) += 3w-sas.o
57356 obj-$(CONFIG_SCSI_PPA) += ppa.o
57357 obj-$(CONFIG_SCSI_IMM) += imm.o
57358 obj-$(CONFIG_JAZZ_ESP) += esp_scsi.o jazz_esp.o
57359@@ -133,6 +134,7 @@ obj-$(CONFIG_SCSI_CXGB3_ISCSI) += libiscsi.o libiscsi_tcp.o cxgb3i/
57360 obj-$(CONFIG_SCSI_BNX2_ISCSI) += libiscsi.o bnx2i/
57361 obj-$(CONFIG_BE2ISCSI) += libiscsi.o be2iscsi/
57362 obj-$(CONFIG_SCSI_PMCRAID) += pmcraid.o
57363+obj-$(CONFIG_VMWARE_PVSCSI) += vmw_pvscsi.o
57364
57365 obj-$(CONFIG_ARM) += arm/
57366
57367diff --git a/drivers/scsi/aacraid/aacraid.h b/drivers/scsi/aacraid/aacraid.h
57368index cdbdec9..b7d560b 100644
57369--- a/drivers/scsi/aacraid/aacraid.h
57370+++ b/drivers/scsi/aacraid/aacraid.h
57371@@ -471,7 +471,7 @@ struct adapter_ops
57372 int (*adapter_scsi)(struct fib * fib, struct scsi_cmnd * cmd);
57373 /* Administrative operations */
57374 int (*adapter_comm)(struct aac_dev * dev, int comm);
57375-};
57376+} __no_const;
57377
57378 /*
57379 * Define which interrupt handler needs to be installed
57380diff --git a/drivers/scsi/aacraid/commctrl.c b/drivers/scsi/aacraid/commctrl.c
57381index a5b8e7b..a6a0e43 100644
57382--- a/drivers/scsi/aacraid/commctrl.c
57383+++ b/drivers/scsi/aacraid/commctrl.c
57384@@ -481,6 +481,7 @@ static int aac_send_raw_srb(struct aac_dev* dev, void __user * arg)
57385 u32 actual_fibsize64, actual_fibsize = 0;
57386 int i;
57387
57388+ pax_track_stack();
57389
57390 if (dev->in_reset) {
57391 dprintk((KERN_DEBUG"aacraid: send raw srb -EBUSY\n"));
57392diff --git a/drivers/scsi/aacraid/linit.c b/drivers/scsi/aacraid/linit.c
57393index 9b97c3e..f099725 100644
57394--- a/drivers/scsi/aacraid/linit.c
57395+++ b/drivers/scsi/aacraid/linit.c
57396@@ -91,7 +91,7 @@ static DECLARE_PCI_DEVICE_TABLE(aac_pci_tbl) = {
57397 #elif defined(__devinitconst)
57398 static const struct pci_device_id aac_pci_tbl[] __devinitconst = {
57399 #else
57400-static const struct pci_device_id aac_pci_tbl[] __devinitdata = {
57401+static const struct pci_device_id aac_pci_tbl[] __devinitconst = {
57402 #endif
57403 { 0x1028, 0x0001, 0x1028, 0x0001, 0, 0, 0 }, /* PERC 2/Si (Iguana/PERC2Si) */
57404 { 0x1028, 0x0002, 0x1028, 0x0002, 0, 0, 1 }, /* PERC 3/Di (Opal/PERC3Di) */
57405diff --git a/drivers/scsi/aic94xx/aic94xx_init.c b/drivers/scsi/aic94xx/aic94xx_init.c
57406index 996f722..9127845 100644
57407--- a/drivers/scsi/aic94xx/aic94xx_init.c
57408+++ b/drivers/scsi/aic94xx/aic94xx_init.c
57409@@ -485,7 +485,7 @@ static ssize_t asd_show_update_bios(struct device *dev,
57410 flash_error_table[i].reason);
57411 }
57412
57413-static DEVICE_ATTR(update_bios, S_IRUGO|S_IWUGO,
57414+static DEVICE_ATTR(update_bios, S_IRUGO|S_IWUSR,
57415 asd_show_update_bios, asd_store_update_bios);
57416
57417 static int asd_create_dev_attrs(struct asd_ha_struct *asd_ha)
57418@@ -1011,7 +1011,7 @@ static struct sas_domain_function_template aic94xx_transport_functions = {
57419 .lldd_control_phy = asd_control_phy,
57420 };
57421
57422-static const struct pci_device_id aic94xx_pci_table[] __devinitdata = {
57423+static const struct pci_device_id aic94xx_pci_table[] __devinitconst = {
57424 {PCI_DEVICE(PCI_VENDOR_ID_ADAPTEC2, 0x410),0, 0, 1},
57425 {PCI_DEVICE(PCI_VENDOR_ID_ADAPTEC2, 0x412),0, 0, 1},
57426 {PCI_DEVICE(PCI_VENDOR_ID_ADAPTEC2, 0x416),0, 0, 1},
57427diff --git a/drivers/scsi/bfa/bfa_ioc.h b/drivers/scsi/bfa/bfa_ioc.h
57428index 58efd4b..cb48dc7 100644
57429--- a/drivers/scsi/bfa/bfa_ioc.h
57430+++ b/drivers/scsi/bfa/bfa_ioc.h
57431@@ -127,7 +127,7 @@ struct bfa_ioc_cbfn_s {
57432 bfa_ioc_disable_cbfn_t disable_cbfn;
57433 bfa_ioc_hbfail_cbfn_t hbfail_cbfn;
57434 bfa_ioc_reset_cbfn_t reset_cbfn;
57435-};
57436+} __no_const;
57437
57438 /**
57439 * Heartbeat failure notification queue element.
57440diff --git a/drivers/scsi/bfa/bfa_iocfc.h b/drivers/scsi/bfa/bfa_iocfc.h
57441index 7ad177e..5503586 100644
57442--- a/drivers/scsi/bfa/bfa_iocfc.h
57443+++ b/drivers/scsi/bfa/bfa_iocfc.h
57444@@ -61,7 +61,7 @@ struct bfa_hwif_s {
57445 void (*hw_isr_mode_set)(struct bfa_s *bfa, bfa_boolean_t msix);
57446 void (*hw_msix_getvecs)(struct bfa_s *bfa, u32 *vecmap,
57447 u32 *nvecs, u32 *maxvec);
57448-};
57449+} __no_const;
57450 typedef void (*bfa_cb_iocfc_t) (void *cbarg, enum bfa_status status);
57451
57452 struct bfa_iocfc_s {
57453diff --git a/drivers/scsi/dpt_i2o.c b/drivers/scsi/dpt_i2o.c
57454index 4967643..cbec06b 100644
57455--- a/drivers/scsi/dpt_i2o.c
57456+++ b/drivers/scsi/dpt_i2o.c
57457@@ -1804,6 +1804,8 @@ static int adpt_i2o_passthru(adpt_hba* pHba, u32 __user *arg)
57458 dma_addr_t addr;
57459 ulong flags = 0;
57460
57461+ pax_track_stack();
57462+
57463 memset(&msg, 0, MAX_MESSAGE_SIZE*4);
57464 // get user msg size in u32s
57465 if(get_user(size, &user_msg[0])){
57466@@ -2297,6 +2299,8 @@ static s32 adpt_scsi_to_i2o(adpt_hba* pHba, struct scsi_cmnd* cmd, struct adpt_d
57467 s32 rcode;
57468 dma_addr_t addr;
57469
57470+ pax_track_stack();
57471+
57472 memset(msg, 0 , sizeof(msg));
57473 len = scsi_bufflen(cmd);
57474 direction = 0x00000000;
57475diff --git a/drivers/scsi/eata.c b/drivers/scsi/eata.c
57476index c7076ce..e20c67c 100644
57477--- a/drivers/scsi/eata.c
57478+++ b/drivers/scsi/eata.c
57479@@ -1087,6 +1087,8 @@ static int port_detect(unsigned long port_base, unsigned int j,
57480 struct hostdata *ha;
57481 char name[16];
57482
57483+ pax_track_stack();
57484+
57485 sprintf(name, "%s%d", driver_name, j);
57486
57487 if (!request_region(port_base, REGION_SIZE, driver_name)) {
57488diff --git a/drivers/scsi/fcoe/libfcoe.c b/drivers/scsi/fcoe/libfcoe.c
57489index 11ae5c9..891daec 100644
57490--- a/drivers/scsi/fcoe/libfcoe.c
57491+++ b/drivers/scsi/fcoe/libfcoe.c
57492@@ -809,6 +809,8 @@ static void fcoe_ctlr_recv_els(struct fcoe_ctlr *fip, struct sk_buff *skb)
57493 size_t rlen;
57494 size_t dlen;
57495
57496+ pax_track_stack();
57497+
57498 fiph = (struct fip_header *)skb->data;
57499 sub = fiph->fip_subcode;
57500 if (sub != FIP_SC_REQ && sub != FIP_SC_REP)
57501diff --git a/drivers/scsi/fnic/fnic_main.c b/drivers/scsi/fnic/fnic_main.c
57502index 71c7bbe..e93088a 100644
57503--- a/drivers/scsi/fnic/fnic_main.c
57504+++ b/drivers/scsi/fnic/fnic_main.c
57505@@ -669,7 +669,7 @@ static int __devinit fnic_probe(struct pci_dev *pdev,
57506 /* Start local port initiatialization */
57507
57508 lp->link_up = 0;
57509- lp->tt = fnic_transport_template;
57510+ memcpy((void *)&lp->tt, &fnic_transport_template, sizeof(fnic_transport_template));
57511
57512 lp->max_retry_count = fnic->config.flogi_retries;
57513 lp->max_rport_retry_count = fnic->config.plogi_retries;
57514diff --git a/drivers/scsi/gdth.c b/drivers/scsi/gdth.c
57515index bb96d74..9ec3ce4 100644
57516--- a/drivers/scsi/gdth.c
57517+++ b/drivers/scsi/gdth.c
57518@@ -4102,6 +4102,8 @@ static int ioc_lockdrv(void __user *arg)
57519 ulong flags;
57520 gdth_ha_str *ha;
57521
57522+ pax_track_stack();
57523+
57524 if (copy_from_user(&ldrv, arg, sizeof(gdth_ioctl_lockdrv)))
57525 return -EFAULT;
57526 ha = gdth_find_ha(ldrv.ionode);
57527@@ -4134,6 +4136,8 @@ static int ioc_resetdrv(void __user *arg, char *cmnd)
57528 gdth_ha_str *ha;
57529 int rval;
57530
57531+ pax_track_stack();
57532+
57533 if (copy_from_user(&res, arg, sizeof(gdth_ioctl_reset)) ||
57534 res.number >= MAX_HDRIVES)
57535 return -EFAULT;
57536@@ -4169,6 +4173,8 @@ static int ioc_general(void __user *arg, char *cmnd)
57537 gdth_ha_str *ha;
57538 int rval;
57539
57540+ pax_track_stack();
57541+
57542 if (copy_from_user(&gen, arg, sizeof(gdth_ioctl_general)))
57543 return -EFAULT;
57544 ha = gdth_find_ha(gen.ionode);
57545@@ -4625,6 +4631,9 @@ static void gdth_flush(gdth_ha_str *ha)
57546 int i;
57547 gdth_cmd_str gdtcmd;
57548 char cmnd[MAX_COMMAND_SIZE];
57549+
57550+ pax_track_stack();
57551+
57552 memset(cmnd, 0xff, MAX_COMMAND_SIZE);
57553
57554 TRACE2(("gdth_flush() hanum %d\n", ha->hanum));
57555diff --git a/drivers/scsi/gdth_proc.c b/drivers/scsi/gdth_proc.c
57556index 1258da3..20d8ae6 100644
57557--- a/drivers/scsi/gdth_proc.c
57558+++ b/drivers/scsi/gdth_proc.c
57559@@ -46,6 +46,9 @@ static int gdth_set_asc_info(struct Scsi_Host *host, char *buffer,
57560 ulong64 paddr;
57561
57562 char cmnd[MAX_COMMAND_SIZE];
57563+
57564+ pax_track_stack();
57565+
57566 memset(cmnd, 0xff, 12);
57567 memset(&gdtcmd, 0, sizeof(gdth_cmd_str));
57568
57569@@ -174,6 +177,8 @@ static int gdth_get_info(char *buffer,char **start,off_t offset,int length,
57570 gdth_hget_str *phg;
57571 char cmnd[MAX_COMMAND_SIZE];
57572
57573+ pax_track_stack();
57574+
57575 gdtcmd = kmalloc(sizeof(*gdtcmd), GFP_KERNEL);
57576 estr = kmalloc(sizeof(*estr), GFP_KERNEL);
57577 if (!gdtcmd || !estr)
57578diff --git a/drivers/scsi/hosts.c b/drivers/scsi/hosts.c
57579index d03a926..f324286 100644
57580--- a/drivers/scsi/hosts.c
57581+++ b/drivers/scsi/hosts.c
57582@@ -40,7 +40,7 @@
57583 #include "scsi_logging.h"
57584
57585
57586-static atomic_t scsi_host_next_hn; /* host_no for next new host */
57587+static atomic_unchecked_t scsi_host_next_hn; /* host_no for next new host */
57588
57589
57590 static void scsi_host_cls_release(struct device *dev)
57591@@ -347,7 +347,7 @@ struct Scsi_Host *scsi_host_alloc(struct scsi_host_template *sht, int privsize)
57592 * subtract one because we increment first then return, but we need to
57593 * know what the next host number was before increment
57594 */
57595- shost->host_no = atomic_inc_return(&scsi_host_next_hn) - 1;
57596+ shost->host_no = atomic_inc_return_unchecked(&scsi_host_next_hn) - 1;
57597 shost->dma_channel = 0xff;
57598
57599 /* These three are default values which can be overridden */
57600diff --git a/drivers/scsi/ipr.c b/drivers/scsi/ipr.c
57601index a601159..55e19d2 100644
57602--- a/drivers/scsi/ipr.c
57603+++ b/drivers/scsi/ipr.c
57604@@ -5286,7 +5286,7 @@ static bool ipr_qc_fill_rtf(struct ata_queued_cmd *qc)
57605 return true;
57606 }
57607
57608-static struct ata_port_operations ipr_sata_ops = {
57609+static const struct ata_port_operations ipr_sata_ops = {
57610 .phy_reset = ipr_ata_phy_reset,
57611 .hardreset = ipr_sata_reset,
57612 .post_internal_cmd = ipr_ata_post_internal,
57613diff --git a/drivers/scsi/ips.h b/drivers/scsi/ips.h
57614index 4e49fbc..97907ff 100644
57615--- a/drivers/scsi/ips.h
57616+++ b/drivers/scsi/ips.h
57617@@ -1027,7 +1027,7 @@ typedef struct {
57618 int (*intr)(struct ips_ha *);
57619 void (*enableint)(struct ips_ha *);
57620 uint32_t (*statupd)(struct ips_ha *);
57621-} ips_hw_func_t;
57622+} __no_const ips_hw_func_t;
57623
57624 typedef struct ips_ha {
57625 uint8_t ha_id[IPS_MAX_CHANNELS+1];
57626diff --git a/drivers/scsi/libfc/fc_exch.c b/drivers/scsi/libfc/fc_exch.c
57627index c1c1574..a9c9348 100644
57628--- a/drivers/scsi/libfc/fc_exch.c
57629+++ b/drivers/scsi/libfc/fc_exch.c
57630@@ -86,12 +86,12 @@ struct fc_exch_mgr {
57631 * all together if not used XXX
57632 */
57633 struct {
57634- atomic_t no_free_exch;
57635- atomic_t no_free_exch_xid;
57636- atomic_t xid_not_found;
57637- atomic_t xid_busy;
57638- atomic_t seq_not_found;
57639- atomic_t non_bls_resp;
57640+ atomic_unchecked_t no_free_exch;
57641+ atomic_unchecked_t no_free_exch_xid;
57642+ atomic_unchecked_t xid_not_found;
57643+ atomic_unchecked_t xid_busy;
57644+ atomic_unchecked_t seq_not_found;
57645+ atomic_unchecked_t non_bls_resp;
57646 } stats;
57647 };
57648 #define fc_seq_exch(sp) container_of(sp, struct fc_exch, seq)
57649@@ -510,7 +510,7 @@ static struct fc_exch *fc_exch_em_alloc(struct fc_lport *lport,
57650 /* allocate memory for exchange */
57651 ep = mempool_alloc(mp->ep_pool, GFP_ATOMIC);
57652 if (!ep) {
57653- atomic_inc(&mp->stats.no_free_exch);
57654+ atomic_inc_unchecked(&mp->stats.no_free_exch);
57655 goto out;
57656 }
57657 memset(ep, 0, sizeof(*ep));
57658@@ -557,7 +557,7 @@ out:
57659 return ep;
57660 err:
57661 spin_unlock_bh(&pool->lock);
57662- atomic_inc(&mp->stats.no_free_exch_xid);
57663+ atomic_inc_unchecked(&mp->stats.no_free_exch_xid);
57664 mempool_free(ep, mp->ep_pool);
57665 return NULL;
57666 }
57667@@ -690,7 +690,7 @@ static enum fc_pf_rjt_reason fc_seq_lookup_recip(struct fc_lport *lport,
57668 xid = ntohs(fh->fh_ox_id); /* we originated exch */
57669 ep = fc_exch_find(mp, xid);
57670 if (!ep) {
57671- atomic_inc(&mp->stats.xid_not_found);
57672+ atomic_inc_unchecked(&mp->stats.xid_not_found);
57673 reject = FC_RJT_OX_ID;
57674 goto out;
57675 }
57676@@ -720,7 +720,7 @@ static enum fc_pf_rjt_reason fc_seq_lookup_recip(struct fc_lport *lport,
57677 ep = fc_exch_find(mp, xid);
57678 if ((f_ctl & FC_FC_FIRST_SEQ) && fc_sof_is_init(fr_sof(fp))) {
57679 if (ep) {
57680- atomic_inc(&mp->stats.xid_busy);
57681+ atomic_inc_unchecked(&mp->stats.xid_busy);
57682 reject = FC_RJT_RX_ID;
57683 goto rel;
57684 }
57685@@ -731,7 +731,7 @@ static enum fc_pf_rjt_reason fc_seq_lookup_recip(struct fc_lport *lport,
57686 }
57687 xid = ep->xid; /* get our XID */
57688 } else if (!ep) {
57689- atomic_inc(&mp->stats.xid_not_found);
57690+ atomic_inc_unchecked(&mp->stats.xid_not_found);
57691 reject = FC_RJT_RX_ID; /* XID not found */
57692 goto out;
57693 }
57694@@ -752,7 +752,7 @@ static enum fc_pf_rjt_reason fc_seq_lookup_recip(struct fc_lport *lport,
57695 } else {
57696 sp = &ep->seq;
57697 if (sp->id != fh->fh_seq_id) {
57698- atomic_inc(&mp->stats.seq_not_found);
57699+ atomic_inc_unchecked(&mp->stats.seq_not_found);
57700 reject = FC_RJT_SEQ_ID; /* sequence/exch should exist */
57701 goto rel;
57702 }
57703@@ -1163,22 +1163,22 @@ static void fc_exch_recv_seq_resp(struct fc_exch_mgr *mp, struct fc_frame *fp)
57704
57705 ep = fc_exch_find(mp, ntohs(fh->fh_ox_id));
57706 if (!ep) {
57707- atomic_inc(&mp->stats.xid_not_found);
57708+ atomic_inc_unchecked(&mp->stats.xid_not_found);
57709 goto out;
57710 }
57711 if (ep->esb_stat & ESB_ST_COMPLETE) {
57712- atomic_inc(&mp->stats.xid_not_found);
57713+ atomic_inc_unchecked(&mp->stats.xid_not_found);
57714 goto out;
57715 }
57716 if (ep->rxid == FC_XID_UNKNOWN)
57717 ep->rxid = ntohs(fh->fh_rx_id);
57718 if (ep->sid != 0 && ep->sid != ntoh24(fh->fh_d_id)) {
57719- atomic_inc(&mp->stats.xid_not_found);
57720+ atomic_inc_unchecked(&mp->stats.xid_not_found);
57721 goto rel;
57722 }
57723 if (ep->did != ntoh24(fh->fh_s_id) &&
57724 ep->did != FC_FID_FLOGI) {
57725- atomic_inc(&mp->stats.xid_not_found);
57726+ atomic_inc_unchecked(&mp->stats.xid_not_found);
57727 goto rel;
57728 }
57729 sof = fr_sof(fp);
57730@@ -1189,7 +1189,7 @@ static void fc_exch_recv_seq_resp(struct fc_exch_mgr *mp, struct fc_frame *fp)
57731 } else {
57732 sp = &ep->seq;
57733 if (sp->id != fh->fh_seq_id) {
57734- atomic_inc(&mp->stats.seq_not_found);
57735+ atomic_inc_unchecked(&mp->stats.seq_not_found);
57736 goto rel;
57737 }
57738 }
57739@@ -1249,9 +1249,9 @@ static void fc_exch_recv_resp(struct fc_exch_mgr *mp, struct fc_frame *fp)
57740 sp = fc_seq_lookup_orig(mp, fp); /* doesn't hold sequence */
57741
57742 if (!sp)
57743- atomic_inc(&mp->stats.xid_not_found);
57744+ atomic_inc_unchecked(&mp->stats.xid_not_found);
57745 else
57746- atomic_inc(&mp->stats.non_bls_resp);
57747+ atomic_inc_unchecked(&mp->stats.non_bls_resp);
57748
57749 fc_frame_free(fp);
57750 }
57751diff --git a/drivers/scsi/libsas/sas_ata.c b/drivers/scsi/libsas/sas_ata.c
57752index 0ee989f..a582241 100644
57753--- a/drivers/scsi/libsas/sas_ata.c
57754+++ b/drivers/scsi/libsas/sas_ata.c
57755@@ -343,7 +343,7 @@ static int sas_ata_scr_read(struct ata_link *link, unsigned int sc_reg_in,
57756 }
57757 }
57758
57759-static struct ata_port_operations sas_sata_ops = {
57760+static const struct ata_port_operations sas_sata_ops = {
57761 .phy_reset = sas_ata_phy_reset,
57762 .post_internal_cmd = sas_ata_post_internal,
57763 .qc_defer = ata_std_qc_defer,
57764diff --git a/drivers/scsi/lpfc/lpfc.h b/drivers/scsi/lpfc/lpfc.h
57765index aa10f79..5cc79e4 100644
57766--- a/drivers/scsi/lpfc/lpfc.h
57767+++ b/drivers/scsi/lpfc/lpfc.h
57768@@ -400,7 +400,7 @@ struct lpfc_vport {
57769 struct dentry *debug_nodelist;
57770 struct dentry *vport_debugfs_root;
57771 struct lpfc_debugfs_trc *disc_trc;
57772- atomic_t disc_trc_cnt;
57773+ atomic_unchecked_t disc_trc_cnt;
57774 #endif
57775 uint8_t stat_data_enabled;
57776 uint8_t stat_data_blocked;
57777@@ -725,8 +725,8 @@ struct lpfc_hba {
57778 struct timer_list fabric_block_timer;
57779 unsigned long bit_flags;
57780 #define FABRIC_COMANDS_BLOCKED 0
57781- atomic_t num_rsrc_err;
57782- atomic_t num_cmd_success;
57783+ atomic_unchecked_t num_rsrc_err;
57784+ atomic_unchecked_t num_cmd_success;
57785 unsigned long last_rsrc_error_time;
57786 unsigned long last_ramp_down_time;
57787 unsigned long last_ramp_up_time;
57788@@ -740,7 +740,7 @@ struct lpfc_hba {
57789 struct dentry *debug_dumpDif; /* BlockGuard BPL*/
57790 struct dentry *debug_slow_ring_trc;
57791 struct lpfc_debugfs_trc *slow_ring_trc;
57792- atomic_t slow_ring_trc_cnt;
57793+ atomic_unchecked_t slow_ring_trc_cnt;
57794 #endif
57795
57796 /* Used for deferred freeing of ELS data buffers */
57797diff --git a/drivers/scsi/lpfc/lpfc_debugfs.c b/drivers/scsi/lpfc/lpfc_debugfs.c
57798index 8d0f0de..7c77a62 100644
57799--- a/drivers/scsi/lpfc/lpfc_debugfs.c
57800+++ b/drivers/scsi/lpfc/lpfc_debugfs.c
57801@@ -124,7 +124,7 @@ struct lpfc_debug {
57802 int len;
57803 };
57804
57805-static atomic_t lpfc_debugfs_seq_trc_cnt = ATOMIC_INIT(0);
57806+static atomic_unchecked_t lpfc_debugfs_seq_trc_cnt = ATOMIC_INIT(0);
57807 static unsigned long lpfc_debugfs_start_time = 0L;
57808
57809 /**
57810@@ -158,7 +158,7 @@ lpfc_debugfs_disc_trc_data(struct lpfc_vport *vport, char *buf, int size)
57811 lpfc_debugfs_enable = 0;
57812
57813 len = 0;
57814- index = (atomic_read(&vport->disc_trc_cnt) + 1) &
57815+ index = (atomic_read_unchecked(&vport->disc_trc_cnt) + 1) &
57816 (lpfc_debugfs_max_disc_trc - 1);
57817 for (i = index; i < lpfc_debugfs_max_disc_trc; i++) {
57818 dtp = vport->disc_trc + i;
57819@@ -219,7 +219,7 @@ lpfc_debugfs_slow_ring_trc_data(struct lpfc_hba *phba, char *buf, int size)
57820 lpfc_debugfs_enable = 0;
57821
57822 len = 0;
57823- index = (atomic_read(&phba->slow_ring_trc_cnt) + 1) &
57824+ index = (atomic_read_unchecked(&phba->slow_ring_trc_cnt) + 1) &
57825 (lpfc_debugfs_max_slow_ring_trc - 1);
57826 for (i = index; i < lpfc_debugfs_max_slow_ring_trc; i++) {
57827 dtp = phba->slow_ring_trc + i;
57828@@ -397,6 +397,8 @@ lpfc_debugfs_dumpHBASlim_data(struct lpfc_hba *phba, char *buf, int size)
57829 uint32_t *ptr;
57830 char buffer[1024];
57831
57832+ pax_track_stack();
57833+
57834 off = 0;
57835 spin_lock_irq(&phba->hbalock);
57836
57837@@ -634,14 +636,14 @@ lpfc_debugfs_disc_trc(struct lpfc_vport *vport, int mask, char *fmt,
57838 !vport || !vport->disc_trc)
57839 return;
57840
57841- index = atomic_inc_return(&vport->disc_trc_cnt) &
57842+ index = atomic_inc_return_unchecked(&vport->disc_trc_cnt) &
57843 (lpfc_debugfs_max_disc_trc - 1);
57844 dtp = vport->disc_trc + index;
57845 dtp->fmt = fmt;
57846 dtp->data1 = data1;
57847 dtp->data2 = data2;
57848 dtp->data3 = data3;
57849- dtp->seq_cnt = atomic_inc_return(&lpfc_debugfs_seq_trc_cnt);
57850+ dtp->seq_cnt = atomic_inc_return_unchecked(&lpfc_debugfs_seq_trc_cnt);
57851 dtp->jif = jiffies;
57852 #endif
57853 return;
57854@@ -672,14 +674,14 @@ lpfc_debugfs_slow_ring_trc(struct lpfc_hba *phba, char *fmt,
57855 !phba || !phba->slow_ring_trc)
57856 return;
57857
57858- index = atomic_inc_return(&phba->slow_ring_trc_cnt) &
57859+ index = atomic_inc_return_unchecked(&phba->slow_ring_trc_cnt) &
57860 (lpfc_debugfs_max_slow_ring_trc - 1);
57861 dtp = phba->slow_ring_trc + index;
57862 dtp->fmt = fmt;
57863 dtp->data1 = data1;
57864 dtp->data2 = data2;
57865 dtp->data3 = data3;
57866- dtp->seq_cnt = atomic_inc_return(&lpfc_debugfs_seq_trc_cnt);
57867+ dtp->seq_cnt = atomic_inc_return_unchecked(&lpfc_debugfs_seq_trc_cnt);
57868 dtp->jif = jiffies;
57869 #endif
57870 return;
57871@@ -1364,7 +1366,7 @@ lpfc_debugfs_initialize(struct lpfc_vport *vport)
57872 "slow_ring buffer\n");
57873 goto debug_failed;
57874 }
57875- atomic_set(&phba->slow_ring_trc_cnt, 0);
57876+ atomic_set_unchecked(&phba->slow_ring_trc_cnt, 0);
57877 memset(phba->slow_ring_trc, 0,
57878 (sizeof(struct lpfc_debugfs_trc) *
57879 lpfc_debugfs_max_slow_ring_trc));
57880@@ -1410,7 +1412,7 @@ lpfc_debugfs_initialize(struct lpfc_vport *vport)
57881 "buffer\n");
57882 goto debug_failed;
57883 }
57884- atomic_set(&vport->disc_trc_cnt, 0);
57885+ atomic_set_unchecked(&vport->disc_trc_cnt, 0);
57886
57887 snprintf(name, sizeof(name), "discovery_trace");
57888 vport->debug_disc_trc =
57889diff --git a/drivers/scsi/lpfc/lpfc_init.c b/drivers/scsi/lpfc/lpfc_init.c
57890index 549bc7d..8189dbb 100644
57891--- a/drivers/scsi/lpfc/lpfc_init.c
57892+++ b/drivers/scsi/lpfc/lpfc_init.c
57893@@ -8021,8 +8021,10 @@ lpfc_init(void)
57894 printk(LPFC_COPYRIGHT "\n");
57895
57896 if (lpfc_enable_npiv) {
57897- lpfc_transport_functions.vport_create = lpfc_vport_create;
57898- lpfc_transport_functions.vport_delete = lpfc_vport_delete;
57899+ pax_open_kernel();
57900+ *(void **)&lpfc_transport_functions.vport_create = lpfc_vport_create;
57901+ *(void **)&lpfc_transport_functions.vport_delete = lpfc_vport_delete;
57902+ pax_close_kernel();
57903 }
57904 lpfc_transport_template =
57905 fc_attach_transport(&lpfc_transport_functions);
57906diff --git a/drivers/scsi/lpfc/lpfc_scsi.c b/drivers/scsi/lpfc/lpfc_scsi.c
57907index c88f59f..ff2a42f 100644
57908--- a/drivers/scsi/lpfc/lpfc_scsi.c
57909+++ b/drivers/scsi/lpfc/lpfc_scsi.c
57910@@ -259,7 +259,7 @@ lpfc_rampdown_queue_depth(struct lpfc_hba *phba)
57911 uint32_t evt_posted;
57912
57913 spin_lock_irqsave(&phba->hbalock, flags);
57914- atomic_inc(&phba->num_rsrc_err);
57915+ atomic_inc_unchecked(&phba->num_rsrc_err);
57916 phba->last_rsrc_error_time = jiffies;
57917
57918 if ((phba->last_ramp_down_time + QUEUE_RAMP_DOWN_INTERVAL) > jiffies) {
57919@@ -300,7 +300,7 @@ lpfc_rampup_queue_depth(struct lpfc_vport *vport,
57920 unsigned long flags;
57921 struct lpfc_hba *phba = vport->phba;
57922 uint32_t evt_posted;
57923- atomic_inc(&phba->num_cmd_success);
57924+ atomic_inc_unchecked(&phba->num_cmd_success);
57925
57926 if (vport->cfg_lun_queue_depth <= queue_depth)
57927 return;
57928@@ -343,8 +343,8 @@ lpfc_ramp_down_queue_handler(struct lpfc_hba *phba)
57929 int i;
57930 struct lpfc_rport_data *rdata;
57931
57932- num_rsrc_err = atomic_read(&phba->num_rsrc_err);
57933- num_cmd_success = atomic_read(&phba->num_cmd_success);
57934+ num_rsrc_err = atomic_read_unchecked(&phba->num_rsrc_err);
57935+ num_cmd_success = atomic_read_unchecked(&phba->num_cmd_success);
57936
57937 vports = lpfc_create_vport_work_array(phba);
57938 if (vports != NULL)
57939@@ -378,8 +378,8 @@ lpfc_ramp_down_queue_handler(struct lpfc_hba *phba)
57940 }
57941 }
57942 lpfc_destroy_vport_work_array(phba, vports);
57943- atomic_set(&phba->num_rsrc_err, 0);
57944- atomic_set(&phba->num_cmd_success, 0);
57945+ atomic_set_unchecked(&phba->num_rsrc_err, 0);
57946+ atomic_set_unchecked(&phba->num_cmd_success, 0);
57947 }
57948
57949 /**
57950@@ -427,8 +427,8 @@ lpfc_ramp_up_queue_handler(struct lpfc_hba *phba)
57951 }
57952 }
57953 lpfc_destroy_vport_work_array(phba, vports);
57954- atomic_set(&phba->num_rsrc_err, 0);
57955- atomic_set(&phba->num_cmd_success, 0);
57956+ atomic_set_unchecked(&phba->num_rsrc_err, 0);
57957+ atomic_set_unchecked(&phba->num_cmd_success, 0);
57958 }
57959
57960 /**
57961diff --git a/drivers/scsi/megaraid/megaraid_mbox.c b/drivers/scsi/megaraid/megaraid_mbox.c
57962index 234f0b7..3020aea 100644
57963--- a/drivers/scsi/megaraid/megaraid_mbox.c
57964+++ b/drivers/scsi/megaraid/megaraid_mbox.c
57965@@ -3503,6 +3503,8 @@ megaraid_cmm_register(adapter_t *adapter)
57966 int rval;
57967 int i;
57968
57969+ pax_track_stack();
57970+
57971 // Allocate memory for the base list of scb for management module.
57972 adapter->uscb_list = kcalloc(MBOX_MAX_USER_CMDS, sizeof(scb_t), GFP_KERNEL);
57973
57974diff --git a/drivers/scsi/osd/osd_initiator.c b/drivers/scsi/osd/osd_initiator.c
57975index 7a117c1..ee01e9e 100644
57976--- a/drivers/scsi/osd/osd_initiator.c
57977+++ b/drivers/scsi/osd/osd_initiator.c
57978@@ -94,6 +94,8 @@ static int _osd_print_system_info(struct osd_dev *od, void *caps)
57979 int nelem = ARRAY_SIZE(get_attrs), a = 0;
57980 int ret;
57981
57982+ pax_track_stack();
57983+
57984 or = osd_start_request(od, GFP_KERNEL);
57985 if (!or)
57986 return -ENOMEM;
57987diff --git a/drivers/scsi/pmcraid.c b/drivers/scsi/pmcraid.c
57988index 9ab8c86..9425ad3 100644
57989--- a/drivers/scsi/pmcraid.c
57990+++ b/drivers/scsi/pmcraid.c
57991@@ -189,8 +189,8 @@ static int pmcraid_slave_alloc(struct scsi_device *scsi_dev)
57992 res->scsi_dev = scsi_dev;
57993 scsi_dev->hostdata = res;
57994 res->change_detected = 0;
57995- atomic_set(&res->read_failures, 0);
57996- atomic_set(&res->write_failures, 0);
57997+ atomic_set_unchecked(&res->read_failures, 0);
57998+ atomic_set_unchecked(&res->write_failures, 0);
57999 rc = 0;
58000 }
58001 spin_unlock_irqrestore(&pinstance->resource_lock, lock_flags);
58002@@ -2396,9 +2396,9 @@ static int pmcraid_error_handler(struct pmcraid_cmd *cmd)
58003
58004 /* If this was a SCSI read/write command keep count of errors */
58005 if (SCSI_CMD_TYPE(scsi_cmd->cmnd[0]) == SCSI_READ_CMD)
58006- atomic_inc(&res->read_failures);
58007+ atomic_inc_unchecked(&res->read_failures);
58008 else if (SCSI_CMD_TYPE(scsi_cmd->cmnd[0]) == SCSI_WRITE_CMD)
58009- atomic_inc(&res->write_failures);
58010+ atomic_inc_unchecked(&res->write_failures);
58011
58012 if (!RES_IS_GSCSI(res->cfg_entry) &&
58013 masked_ioasc != PMCRAID_IOASC_HW_DEVICE_BUS_STATUS_ERROR) {
58014@@ -4116,7 +4116,7 @@ static void pmcraid_worker_function(struct work_struct *workp)
58015
58016 pinstance = container_of(workp, struct pmcraid_instance, worker_q);
58017 /* add resources only after host is added into system */
58018- if (!atomic_read(&pinstance->expose_resources))
58019+ if (!atomic_read_unchecked(&pinstance->expose_resources))
58020 return;
58021
58022 spin_lock_irqsave(&pinstance->resource_lock, lock_flags);
58023@@ -4850,7 +4850,7 @@ static int __devinit pmcraid_init_instance(
58024 init_waitqueue_head(&pinstance->reset_wait_q);
58025
58026 atomic_set(&pinstance->outstanding_cmds, 0);
58027- atomic_set(&pinstance->expose_resources, 0);
58028+ atomic_set_unchecked(&pinstance->expose_resources, 0);
58029
58030 INIT_LIST_HEAD(&pinstance->free_res_q);
58031 INIT_LIST_HEAD(&pinstance->used_res_q);
58032@@ -5502,7 +5502,7 @@ static int __devinit pmcraid_probe(
58033 /* Schedule worker thread to handle CCN and take care of adding and
58034 * removing devices to OS
58035 */
58036- atomic_set(&pinstance->expose_resources, 1);
58037+ atomic_set_unchecked(&pinstance->expose_resources, 1);
58038 schedule_work(&pinstance->worker_q);
58039 return rc;
58040
58041diff --git a/drivers/scsi/pmcraid.h b/drivers/scsi/pmcraid.h
58042index 3441b3f..6cbe8f7 100644
58043--- a/drivers/scsi/pmcraid.h
58044+++ b/drivers/scsi/pmcraid.h
58045@@ -690,7 +690,7 @@ struct pmcraid_instance {
58046 atomic_t outstanding_cmds;
58047
58048 /* should add/delete resources to mid-layer now ?*/
58049- atomic_t expose_resources;
58050+ atomic_unchecked_t expose_resources;
58051
58052 /* Tasklet to handle deferred processing */
58053 struct tasklet_struct isr_tasklet[PMCRAID_NUM_MSIX_VECTORS];
58054@@ -727,8 +727,8 @@ struct pmcraid_resource_entry {
58055 struct list_head queue; /* link to "to be exposed" resources */
58056 struct pmcraid_config_table_entry cfg_entry;
58057 struct scsi_device *scsi_dev; /* Link scsi_device structure */
58058- atomic_t read_failures; /* count of failed READ commands */
58059- atomic_t write_failures; /* count of failed WRITE commands */
58060+ atomic_unchecked_t read_failures; /* count of failed READ commands */
58061+ atomic_unchecked_t write_failures; /* count of failed WRITE commands */
58062
58063 /* To indicate add/delete/modify during CCN */
58064 u8 change_detected;
58065diff --git a/drivers/scsi/qla2xxx/qla_def.h b/drivers/scsi/qla2xxx/qla_def.h
58066index 2150618..7034215 100644
58067--- a/drivers/scsi/qla2xxx/qla_def.h
58068+++ b/drivers/scsi/qla2xxx/qla_def.h
58069@@ -2089,7 +2089,7 @@ struct isp_operations {
58070
58071 int (*get_flash_version) (struct scsi_qla_host *, void *);
58072 int (*start_scsi) (srb_t *);
58073-};
58074+} __no_const;
58075
58076 /* MSI-X Support *************************************************************/
58077
58078diff --git a/drivers/scsi/qla4xxx/ql4_def.h b/drivers/scsi/qla4xxx/ql4_def.h
58079index 81b5f29..2ae1fad 100644
58080--- a/drivers/scsi/qla4xxx/ql4_def.h
58081+++ b/drivers/scsi/qla4xxx/ql4_def.h
58082@@ -240,7 +240,7 @@ struct ddb_entry {
58083 atomic_t retry_relogin_timer; /* Min Time between relogins
58084 * (4000 only) */
58085 atomic_t relogin_timer; /* Max Time to wait for relogin to complete */
58086- atomic_t relogin_retry_count; /* Num of times relogin has been
58087+ atomic_unchecked_t relogin_retry_count; /* Num of times relogin has been
58088 * retried */
58089
58090 uint16_t port;
58091diff --git a/drivers/scsi/qla4xxx/ql4_init.c b/drivers/scsi/qla4xxx/ql4_init.c
58092index af8c323..515dd51 100644
58093--- a/drivers/scsi/qla4xxx/ql4_init.c
58094+++ b/drivers/scsi/qla4xxx/ql4_init.c
58095@@ -482,7 +482,7 @@ static struct ddb_entry * qla4xxx_alloc_ddb(struct scsi_qla_host *ha,
58096 atomic_set(&ddb_entry->port_down_timer, ha->port_down_retry_count);
58097 atomic_set(&ddb_entry->retry_relogin_timer, INVALID_ENTRY);
58098 atomic_set(&ddb_entry->relogin_timer, 0);
58099- atomic_set(&ddb_entry->relogin_retry_count, 0);
58100+ atomic_set_unchecked(&ddb_entry->relogin_retry_count, 0);
58101 atomic_set(&ddb_entry->state, DDB_STATE_ONLINE);
58102 list_add_tail(&ddb_entry->list, &ha->ddb_list);
58103 ha->fw_ddb_index_map[fw_ddb_index] = ddb_entry;
58104@@ -1308,7 +1308,7 @@ int qla4xxx_process_ddb_changed(struct scsi_qla_host *ha,
58105 atomic_set(&ddb_entry->state, DDB_STATE_ONLINE);
58106 atomic_set(&ddb_entry->port_down_timer,
58107 ha->port_down_retry_count);
58108- atomic_set(&ddb_entry->relogin_retry_count, 0);
58109+ atomic_set_unchecked(&ddb_entry->relogin_retry_count, 0);
58110 atomic_set(&ddb_entry->relogin_timer, 0);
58111 clear_bit(DF_RELOGIN, &ddb_entry->flags);
58112 clear_bit(DF_NO_RELOGIN, &ddb_entry->flags);
58113diff --git a/drivers/scsi/qla4xxx/ql4_os.c b/drivers/scsi/qla4xxx/ql4_os.c
58114index 83c8b5e..a82b348 100644
58115--- a/drivers/scsi/qla4xxx/ql4_os.c
58116+++ b/drivers/scsi/qla4xxx/ql4_os.c
58117@@ -641,13 +641,13 @@ static void qla4xxx_timer(struct scsi_qla_host *ha)
58118 ddb_entry->fw_ddb_device_state ==
58119 DDB_DS_SESSION_FAILED) {
58120 /* Reset retry relogin timer */
58121- atomic_inc(&ddb_entry->relogin_retry_count);
58122+ atomic_inc_unchecked(&ddb_entry->relogin_retry_count);
58123 DEBUG2(printk("scsi%ld: index[%d] relogin"
58124 " timed out-retrying"
58125 " relogin (%d)\n",
58126 ha->host_no,
58127 ddb_entry->fw_ddb_index,
58128- atomic_read(&ddb_entry->
58129+ atomic_read_unchecked(&ddb_entry->
58130 relogin_retry_count))
58131 );
58132 start_dpc++;
58133diff --git a/drivers/scsi/scsi.c b/drivers/scsi/scsi.c
58134index dd098ca..686ce01 100644
58135--- a/drivers/scsi/scsi.c
58136+++ b/drivers/scsi/scsi.c
58137@@ -652,7 +652,7 @@ int scsi_dispatch_cmd(struct scsi_cmnd *cmd)
58138 unsigned long timeout;
58139 int rtn = 0;
58140
58141- atomic_inc(&cmd->device->iorequest_cnt);
58142+ atomic_inc_unchecked(&cmd->device->iorequest_cnt);
58143
58144 /* check if the device is still usable */
58145 if (unlikely(cmd->device->sdev_state == SDEV_DEL)) {
58146diff --git a/drivers/scsi/scsi_debug.c b/drivers/scsi/scsi_debug.c
58147index bc3e363..e1a8e50 100644
58148--- a/drivers/scsi/scsi_debug.c
58149+++ b/drivers/scsi/scsi_debug.c
58150@@ -1395,6 +1395,8 @@ static int resp_mode_select(struct scsi_cmnd * scp, int mselect6,
58151 unsigned char arr[SDEBUG_MAX_MSELECT_SZ];
58152 unsigned char *cmd = (unsigned char *)scp->cmnd;
58153
58154+ pax_track_stack();
58155+
58156 if ((errsts = check_readiness(scp, 1, devip)))
58157 return errsts;
58158 memset(arr, 0, sizeof(arr));
58159@@ -1492,6 +1494,8 @@ static int resp_log_sense(struct scsi_cmnd * scp,
58160 unsigned char arr[SDEBUG_MAX_LSENSE_SZ];
58161 unsigned char *cmd = (unsigned char *)scp->cmnd;
58162
58163+ pax_track_stack();
58164+
58165 if ((errsts = check_readiness(scp, 1, devip)))
58166 return errsts;
58167 memset(arr, 0, sizeof(arr));
58168diff --git a/drivers/scsi/scsi_lib.c b/drivers/scsi/scsi_lib.c
58169index 8df12522..c4c1472 100644
58170--- a/drivers/scsi/scsi_lib.c
58171+++ b/drivers/scsi/scsi_lib.c
58172@@ -1389,7 +1389,7 @@ static void scsi_kill_request(struct request *req, struct request_queue *q)
58173 shost = sdev->host;
58174 scsi_init_cmd_errh(cmd);
58175 cmd->result = DID_NO_CONNECT << 16;
58176- atomic_inc(&cmd->device->iorequest_cnt);
58177+ atomic_inc_unchecked(&cmd->device->iorequest_cnt);
58178
58179 /*
58180 * SCSI request completion path will do scsi_device_unbusy(),
58181@@ -1420,9 +1420,9 @@ static void scsi_softirq_done(struct request *rq)
58182 */
58183 cmd->serial_number = 0;
58184
58185- atomic_inc(&cmd->device->iodone_cnt);
58186+ atomic_inc_unchecked(&cmd->device->iodone_cnt);
58187 if (cmd->result)
58188- atomic_inc(&cmd->device->ioerr_cnt);
58189+ atomic_inc_unchecked(&cmd->device->ioerr_cnt);
58190
58191 disposition = scsi_decide_disposition(cmd);
58192 if (disposition != SUCCESS &&
58193diff --git a/drivers/scsi/scsi_sysfs.c b/drivers/scsi/scsi_sysfs.c
58194index 91a93e0..eae0fe3 100644
58195--- a/drivers/scsi/scsi_sysfs.c
58196+++ b/drivers/scsi/scsi_sysfs.c
58197@@ -662,7 +662,7 @@ show_iostat_##field(struct device *dev, struct device_attribute *attr, \
58198 char *buf) \
58199 { \
58200 struct scsi_device *sdev = to_scsi_device(dev); \
58201- unsigned long long count = atomic_read(&sdev->field); \
58202+ unsigned long long count = atomic_read_unchecked(&sdev->field); \
58203 return snprintf(buf, 20, "0x%llx\n", count); \
58204 } \
58205 static DEVICE_ATTR(field, S_IRUGO, show_iostat_##field, NULL)
58206diff --git a/drivers/scsi/scsi_tgt_lib.c b/drivers/scsi/scsi_tgt_lib.c
58207index 1030327..f91fd30 100644
58208--- a/drivers/scsi/scsi_tgt_lib.c
58209+++ b/drivers/scsi/scsi_tgt_lib.c
58210@@ -362,7 +362,7 @@ static int scsi_map_user_pages(struct scsi_tgt_cmd *tcmd, struct scsi_cmnd *cmd,
58211 int err;
58212
58213 dprintk("%lx %u\n", uaddr, len);
58214- err = blk_rq_map_user(q, rq, NULL, (void *)uaddr, len, GFP_KERNEL);
58215+ err = blk_rq_map_user(q, rq, NULL, (void __user *)uaddr, len, GFP_KERNEL);
58216 if (err) {
58217 /*
58218 * TODO: need to fixup sg_tablesize, max_segment_size,
58219diff --git a/drivers/scsi/scsi_transport_fc.c b/drivers/scsi/scsi_transport_fc.c
58220index db02e31..1b42ea9 100644
58221--- a/drivers/scsi/scsi_transport_fc.c
58222+++ b/drivers/scsi/scsi_transport_fc.c
58223@@ -480,7 +480,7 @@ MODULE_PARM_DESC(dev_loss_tmo,
58224 * Netlink Infrastructure
58225 */
58226
58227-static atomic_t fc_event_seq;
58228+static atomic_unchecked_t fc_event_seq;
58229
58230 /**
58231 * fc_get_event_number - Obtain the next sequential FC event number
58232@@ -493,7 +493,7 @@ static atomic_t fc_event_seq;
58233 u32
58234 fc_get_event_number(void)
58235 {
58236- return atomic_add_return(1, &fc_event_seq);
58237+ return atomic_add_return_unchecked(1, &fc_event_seq);
58238 }
58239 EXPORT_SYMBOL(fc_get_event_number);
58240
58241@@ -641,7 +641,7 @@ static __init int fc_transport_init(void)
58242 {
58243 int error;
58244
58245- atomic_set(&fc_event_seq, 0);
58246+ atomic_set_unchecked(&fc_event_seq, 0);
58247
58248 error = transport_class_register(&fc_host_class);
58249 if (error)
58250diff --git a/drivers/scsi/scsi_transport_iscsi.c b/drivers/scsi/scsi_transport_iscsi.c
58251index de2f8c4..63c5278 100644
58252--- a/drivers/scsi/scsi_transport_iscsi.c
58253+++ b/drivers/scsi/scsi_transport_iscsi.c
58254@@ -81,7 +81,7 @@ struct iscsi_internal {
58255 struct device_attribute *session_attrs[ISCSI_SESSION_ATTRS + 1];
58256 };
58257
58258-static atomic_t iscsi_session_nr; /* sysfs session id for next new session */
58259+static atomic_unchecked_t iscsi_session_nr; /* sysfs session id for next new session */
58260 static struct workqueue_struct *iscsi_eh_timer_workq;
58261
58262 /*
58263@@ -728,7 +728,7 @@ int iscsi_add_session(struct iscsi_cls_session *session, unsigned int target_id)
58264 int err;
58265
58266 ihost = shost->shost_data;
58267- session->sid = atomic_add_return(1, &iscsi_session_nr);
58268+ session->sid = atomic_add_return_unchecked(1, &iscsi_session_nr);
58269
58270 if (id == ISCSI_MAX_TARGET) {
58271 for (id = 0; id < ISCSI_MAX_TARGET; id++) {
58272@@ -2060,7 +2060,7 @@ static __init int iscsi_transport_init(void)
58273 printk(KERN_INFO "Loading iSCSI transport class v%s.\n",
58274 ISCSI_TRANSPORT_VERSION);
58275
58276- atomic_set(&iscsi_session_nr, 0);
58277+ atomic_set_unchecked(&iscsi_session_nr, 0);
58278
58279 err = class_register(&iscsi_transport_class);
58280 if (err)
58281diff --git a/drivers/scsi/scsi_transport_srp.c b/drivers/scsi/scsi_transport_srp.c
58282index 21a045e..ec89e03 100644
58283--- a/drivers/scsi/scsi_transport_srp.c
58284+++ b/drivers/scsi/scsi_transport_srp.c
58285@@ -33,7 +33,7 @@
58286 #include "scsi_transport_srp_internal.h"
58287
58288 struct srp_host_attrs {
58289- atomic_t next_port_id;
58290+ atomic_unchecked_t next_port_id;
58291 };
58292 #define to_srp_host_attrs(host) ((struct srp_host_attrs *)(host)->shost_data)
58293
58294@@ -62,7 +62,7 @@ static int srp_host_setup(struct transport_container *tc, struct device *dev,
58295 struct Scsi_Host *shost = dev_to_shost(dev);
58296 struct srp_host_attrs *srp_host = to_srp_host_attrs(shost);
58297
58298- atomic_set(&srp_host->next_port_id, 0);
58299+ atomic_set_unchecked(&srp_host->next_port_id, 0);
58300 return 0;
58301 }
58302
58303@@ -211,7 +211,7 @@ struct srp_rport *srp_rport_add(struct Scsi_Host *shost,
58304 memcpy(rport->port_id, ids->port_id, sizeof(rport->port_id));
58305 rport->roles = ids->roles;
58306
58307- id = atomic_inc_return(&to_srp_host_attrs(shost)->next_port_id);
58308+ id = atomic_inc_return_unchecked(&to_srp_host_attrs(shost)->next_port_id);
58309 dev_set_name(&rport->dev, "port-%d:%d", shost->host_no, id);
58310
58311 transport_setup_device(&rport->dev);
58312diff --git a/drivers/scsi/sg.c b/drivers/scsi/sg.c
58313index 040f751..98a5ed2 100644
58314--- a/drivers/scsi/sg.c
58315+++ b/drivers/scsi/sg.c
58316@@ -1064,7 +1064,7 @@ sg_ioctl(struct inode *inode, struct file *filp,
58317 sdp->disk->disk_name,
58318 MKDEV(SCSI_GENERIC_MAJOR, sdp->index),
58319 NULL,
58320- (char *)arg);
58321+ (char __user *)arg);
58322 case BLKTRACESTART:
58323 return blk_trace_startstop(sdp->device->request_queue, 1);
58324 case BLKTRACESTOP:
58325@@ -2292,7 +2292,7 @@ struct sg_proc_leaf {
58326 const struct file_operations * fops;
58327 };
58328
58329-static struct sg_proc_leaf sg_proc_leaf_arr[] = {
58330+static const struct sg_proc_leaf sg_proc_leaf_arr[] = {
58331 {"allow_dio", &adio_fops},
58332 {"debug", &debug_fops},
58333 {"def_reserved_size", &dressz_fops},
58334@@ -2307,7 +2307,7 @@ sg_proc_init(void)
58335 {
58336 int k, mask;
58337 int num_leaves = ARRAY_SIZE(sg_proc_leaf_arr);
58338- struct sg_proc_leaf * leaf;
58339+ const struct sg_proc_leaf * leaf;
58340
58341 sg_proc_sgp = proc_mkdir(sg_proc_sg_dirname, NULL);
58342 if (!sg_proc_sgp)
58343diff --git a/drivers/scsi/sym53c8xx_2/sym_glue.c b/drivers/scsi/sym53c8xx_2/sym_glue.c
58344index c19ca5e..3eb5959 100644
58345--- a/drivers/scsi/sym53c8xx_2/sym_glue.c
58346+++ b/drivers/scsi/sym53c8xx_2/sym_glue.c
58347@@ -1758,6 +1758,8 @@ static int __devinit sym2_probe(struct pci_dev *pdev,
58348 int do_iounmap = 0;
58349 int do_disable_device = 1;
58350
58351+ pax_track_stack();
58352+
58353 memset(&sym_dev, 0, sizeof(sym_dev));
58354 memset(&nvram, 0, sizeof(nvram));
58355 sym_dev.pdev = pdev;
58356diff --git a/drivers/scsi/vmw_pvscsi.c b/drivers/scsi/vmw_pvscsi.c
58357new file mode 100644
58358index 0000000..eabb432
58359--- /dev/null
58360+++ b/drivers/scsi/vmw_pvscsi.c
58361@@ -0,0 +1,1401 @@
58362+/*
58363+ * Linux driver for VMware's para-virtualized SCSI HBA.
58364+ *
58365+ * Copyright (C) 2008-2009, VMware, Inc. All Rights Reserved.
58366+ *
58367+ * This program is free software; you can redistribute it and/or modify it
58368+ * under the terms of the GNU General Public License as published by the
58369+ * Free Software Foundation; version 2 of the License and no later version.
58370+ *
58371+ * This program is distributed in the hope that it will be useful, but
58372+ * WITHOUT ANY WARRANTY; without even the implied warranty of
58373+ * MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE, GOOD TITLE or
58374+ * NON INFRINGEMENT. See the GNU General Public License for more
58375+ * details.
58376+ *
58377+ * You should have received a copy of the GNU General Public License
58378+ * along with this program; if not, write to the Free Software
58379+ * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
58380+ *
58381+ * Maintained by: Alok N Kataria <akataria@vmware.com>
58382+ *
58383+ */
58384+
58385+#include <linux/kernel.h>
58386+#include <linux/module.h>
58387+#include <linux/moduleparam.h>
58388+#include <linux/types.h>
58389+#include <linux/interrupt.h>
58390+#include <linux/workqueue.h>
58391+#include <linux/pci.h>
58392+
58393+#include <scsi/scsi.h>
58394+#include <scsi/scsi_host.h>
58395+#include <scsi/scsi_cmnd.h>
58396+#include <scsi/scsi_device.h>
58397+
58398+#include "vmw_pvscsi.h"
58399+
58400+#define PVSCSI_LINUX_DRIVER_DESC "VMware PVSCSI driver"
58401+
58402+MODULE_DESCRIPTION(PVSCSI_LINUX_DRIVER_DESC);
58403+MODULE_AUTHOR("VMware, Inc.");
58404+MODULE_LICENSE("GPL");
58405+MODULE_VERSION(PVSCSI_DRIVER_VERSION_STRING);
58406+
58407+#define PVSCSI_DEFAULT_NUM_PAGES_PER_RING 8
58408+#define PVSCSI_DEFAULT_NUM_PAGES_MSG_RING 1
58409+#define PVSCSI_DEFAULT_QUEUE_DEPTH 64
58410+#define SGL_SIZE PAGE_SIZE
58411+
58412+#define pvscsi_dev(adapter) (&(adapter->dev->dev))
58413+
58414+struct pvscsi_sg_list {
58415+ struct PVSCSISGElement sge[PVSCSI_MAX_NUM_SG_ENTRIES_PER_SEGMENT];
58416+};
58417+
58418+struct pvscsi_ctx {
58419+ /*
58420+ * The index of the context in cmd_map serves as the context ID for a
58421+ * 1-to-1 mapping completions back to requests.
58422+ */
58423+ struct scsi_cmnd *cmd;
58424+ struct pvscsi_sg_list *sgl;
58425+ struct list_head list;
58426+ dma_addr_t dataPA;
58427+ dma_addr_t sensePA;
58428+ dma_addr_t sglPA;
58429+};
58430+
58431+struct pvscsi_adapter {
58432+ char *mmioBase;
58433+ unsigned int irq;
58434+ u8 rev;
58435+ bool use_msi;
58436+ bool use_msix;
58437+ bool use_msg;
58438+
58439+ spinlock_t hw_lock;
58440+
58441+ struct workqueue_struct *workqueue;
58442+ struct work_struct work;
58443+
58444+ struct PVSCSIRingReqDesc *req_ring;
58445+ unsigned req_pages;
58446+ unsigned req_depth;
58447+ dma_addr_t reqRingPA;
58448+
58449+ struct PVSCSIRingCmpDesc *cmp_ring;
58450+ unsigned cmp_pages;
58451+ dma_addr_t cmpRingPA;
58452+
58453+ struct PVSCSIRingMsgDesc *msg_ring;
58454+ unsigned msg_pages;
58455+ dma_addr_t msgRingPA;
58456+
58457+ struct PVSCSIRingsState *rings_state;
58458+ dma_addr_t ringStatePA;
58459+
58460+ struct pci_dev *dev;
58461+ struct Scsi_Host *host;
58462+
58463+ struct list_head cmd_pool;
58464+ struct pvscsi_ctx *cmd_map;
58465+};
58466+
58467+
58468+/* Command line parameters */
58469+static int pvscsi_ring_pages = PVSCSI_DEFAULT_NUM_PAGES_PER_RING;
58470+static int pvscsi_msg_ring_pages = PVSCSI_DEFAULT_NUM_PAGES_MSG_RING;
58471+static int pvscsi_cmd_per_lun = PVSCSI_DEFAULT_QUEUE_DEPTH;
58472+static bool pvscsi_disable_msi;
58473+static bool pvscsi_disable_msix;
58474+static bool pvscsi_use_msg = true;
58475+
58476+#define PVSCSI_RW (S_IRUSR | S_IWUSR)
58477+
58478+module_param_named(ring_pages, pvscsi_ring_pages, int, PVSCSI_RW);
58479+MODULE_PARM_DESC(ring_pages, "Number of pages per req/cmp ring - (default="
58480+ __stringify(PVSCSI_DEFAULT_NUM_PAGES_PER_RING) ")");
58481+
58482+module_param_named(msg_ring_pages, pvscsi_msg_ring_pages, int, PVSCSI_RW);
58483+MODULE_PARM_DESC(msg_ring_pages, "Number of pages for the msg ring - (default="
58484+ __stringify(PVSCSI_DEFAULT_NUM_PAGES_MSG_RING) ")");
58485+
58486+module_param_named(cmd_per_lun, pvscsi_cmd_per_lun, int, PVSCSI_RW);
58487+MODULE_PARM_DESC(cmd_per_lun, "Maximum commands per lun - (default="
58488+ __stringify(PVSCSI_MAX_REQ_QUEUE_DEPTH) ")");
58489+
58490+module_param_named(disable_msi, pvscsi_disable_msi, bool, PVSCSI_RW);
58491+MODULE_PARM_DESC(disable_msi, "Disable MSI use in driver - (default=0)");
58492+
58493+module_param_named(disable_msix, pvscsi_disable_msix, bool, PVSCSI_RW);
58494+MODULE_PARM_DESC(disable_msix, "Disable MSI-X use in driver - (default=0)");
58495+
58496+module_param_named(use_msg, pvscsi_use_msg, bool, PVSCSI_RW);
58497+MODULE_PARM_DESC(use_msg, "Use msg ring when available - (default=1)");
58498+
58499+static const struct pci_device_id pvscsi_pci_tbl[] = {
58500+ { PCI_VDEVICE(VMWARE, PCI_DEVICE_ID_VMWARE_PVSCSI) },
58501+ { 0 }
58502+};
58503+
58504+MODULE_DEVICE_TABLE(pci, pvscsi_pci_tbl);
58505+
58506+static struct pvscsi_ctx *
58507+pvscsi_find_context(const struct pvscsi_adapter *adapter, struct scsi_cmnd *cmd)
58508+{
58509+ struct pvscsi_ctx *ctx, *end;
58510+
58511+ end = &adapter->cmd_map[adapter->req_depth];
58512+ for (ctx = adapter->cmd_map; ctx < end; ctx++)
58513+ if (ctx->cmd == cmd)
58514+ return ctx;
58515+
58516+ return NULL;
58517+}
58518+
58519+static struct pvscsi_ctx *
58520+pvscsi_acquire_context(struct pvscsi_adapter *adapter, struct scsi_cmnd *cmd)
58521+{
58522+ struct pvscsi_ctx *ctx;
58523+
58524+ if (list_empty(&adapter->cmd_pool))
58525+ return NULL;
58526+
58527+ ctx = list_first_entry(&adapter->cmd_pool, struct pvscsi_ctx, list);
58528+ ctx->cmd = cmd;
58529+ list_del(&ctx->list);
58530+
58531+ return ctx;
58532+}
58533+
58534+static void pvscsi_release_context(struct pvscsi_adapter *adapter,
58535+ struct pvscsi_ctx *ctx)
58536+{
58537+ ctx->cmd = NULL;
58538+ list_add(&ctx->list, &adapter->cmd_pool);
58539+}
58540+
58541+/*
58542+ * Map a pvscsi_ctx struct to a context ID field value; we map to a simple
58543+ * non-zero integer. ctx always points to an entry in cmd_map array, hence
58544+ * the return value is always >=1.
58545+ */
58546+static u64 pvscsi_map_context(const struct pvscsi_adapter *adapter,
58547+ const struct pvscsi_ctx *ctx)
58548+{
58549+ return ctx - adapter->cmd_map + 1;
58550+}
58551+
58552+static struct pvscsi_ctx *
58553+pvscsi_get_context(const struct pvscsi_adapter *adapter, u64 context)
58554+{
58555+ return &adapter->cmd_map[context - 1];
58556+}
58557+
58558+static void pvscsi_reg_write(const struct pvscsi_adapter *adapter,
58559+ u32 offset, u32 val)
58560+{
58561+ writel(val, adapter->mmioBase + offset);
58562+}
58563+
58564+static u32 pvscsi_reg_read(const struct pvscsi_adapter *adapter, u32 offset)
58565+{
58566+ return readl(adapter->mmioBase + offset);
58567+}
58568+
58569+static u32 pvscsi_read_intr_status(const struct pvscsi_adapter *adapter)
58570+{
58571+ return pvscsi_reg_read(adapter, PVSCSI_REG_OFFSET_INTR_STATUS);
58572+}
58573+
58574+static void pvscsi_write_intr_status(const struct pvscsi_adapter *adapter,
58575+ u32 val)
58576+{
58577+ pvscsi_reg_write(adapter, PVSCSI_REG_OFFSET_INTR_STATUS, val);
58578+}
58579+
58580+static void pvscsi_unmask_intr(const struct pvscsi_adapter *adapter)
58581+{
58582+ u32 intr_bits;
58583+
58584+ intr_bits = PVSCSI_INTR_CMPL_MASK;
58585+ if (adapter->use_msg)
58586+ intr_bits |= PVSCSI_INTR_MSG_MASK;
58587+
58588+ pvscsi_reg_write(adapter, PVSCSI_REG_OFFSET_INTR_MASK, intr_bits);
58589+}
58590+
58591+static void pvscsi_mask_intr(const struct pvscsi_adapter *adapter)
58592+{
58593+ pvscsi_reg_write(adapter, PVSCSI_REG_OFFSET_INTR_MASK, 0);
58594+}
58595+
58596+static void pvscsi_write_cmd_desc(const struct pvscsi_adapter *adapter,
58597+ u32 cmd, const void *desc, size_t len)
58598+{
58599+ const u32 *ptr = desc;
58600+ size_t i;
58601+
58602+ len /= sizeof(*ptr);
58603+ pvscsi_reg_write(adapter, PVSCSI_REG_OFFSET_COMMAND, cmd);
58604+ for (i = 0; i < len; i++)
58605+ pvscsi_reg_write(adapter,
58606+ PVSCSI_REG_OFFSET_COMMAND_DATA, ptr[i]);
58607+}
58608+
58609+static void pvscsi_abort_cmd(const struct pvscsi_adapter *adapter,
58610+ const struct pvscsi_ctx *ctx)
58611+{
58612+ struct PVSCSICmdDescAbortCmd cmd = { 0 };
58613+
58614+ cmd.target = ctx->cmd->device->id;
58615+ cmd.context = pvscsi_map_context(adapter, ctx);
58616+
58617+ pvscsi_write_cmd_desc(adapter, PVSCSI_CMD_ABORT_CMD, &cmd, sizeof(cmd));
58618+}
58619+
58620+static void pvscsi_kick_rw_io(const struct pvscsi_adapter *adapter)
58621+{
58622+ pvscsi_reg_write(adapter, PVSCSI_REG_OFFSET_KICK_RW_IO, 0);
58623+}
58624+
58625+static void pvscsi_process_request_ring(const struct pvscsi_adapter *adapter)
58626+{
58627+ pvscsi_reg_write(adapter, PVSCSI_REG_OFFSET_KICK_NON_RW_IO, 0);
58628+}
58629+
58630+static int scsi_is_rw(unsigned char op)
58631+{
58632+ return op == READ_6 || op == WRITE_6 ||
58633+ op == READ_10 || op == WRITE_10 ||
58634+ op == READ_12 || op == WRITE_12 ||
58635+ op == READ_16 || op == WRITE_16;
58636+}
58637+
58638+static void pvscsi_kick_io(const struct pvscsi_adapter *adapter,
58639+ unsigned char op)
58640+{
58641+ if (scsi_is_rw(op))
58642+ pvscsi_kick_rw_io(adapter);
58643+ else
58644+ pvscsi_process_request_ring(adapter);
58645+}
58646+
58647+static void ll_adapter_reset(const struct pvscsi_adapter *adapter)
58648+{
58649+ dev_dbg(pvscsi_dev(adapter), "Adapter Reset on %p\n", adapter);
58650+
58651+ pvscsi_write_cmd_desc(adapter, PVSCSI_CMD_ADAPTER_RESET, NULL, 0);
58652+}
58653+
58654+static void ll_bus_reset(const struct pvscsi_adapter *adapter)
58655+{
58656+ dev_dbg(pvscsi_dev(adapter), "Reseting bus on %p\n", adapter);
58657+
58658+ pvscsi_write_cmd_desc(adapter, PVSCSI_CMD_RESET_BUS, NULL, 0);
58659+}
58660+
58661+static void ll_device_reset(const struct pvscsi_adapter *adapter, u32 target)
58662+{
58663+ struct PVSCSICmdDescResetDevice cmd = { 0 };
58664+
58665+ dev_dbg(pvscsi_dev(adapter), "Reseting device: target=%u\n", target);
58666+
58667+ cmd.target = target;
58668+
58669+ pvscsi_write_cmd_desc(adapter, PVSCSI_CMD_RESET_DEVICE,
58670+ &cmd, sizeof(cmd));
58671+}
58672+
58673+static void pvscsi_create_sg(struct pvscsi_ctx *ctx,
58674+ struct scatterlist *sg, unsigned count)
58675+{
58676+ unsigned i;
58677+ struct PVSCSISGElement *sge;
58678+
58679+ BUG_ON(count > PVSCSI_MAX_NUM_SG_ENTRIES_PER_SEGMENT);
58680+
58681+ sge = &ctx->sgl->sge[0];
58682+ for (i = 0; i < count; i++, sg++) {
58683+ sge[i].addr = sg_dma_address(sg);
58684+ sge[i].length = sg_dma_len(sg);
58685+ sge[i].flags = 0;
58686+ }
58687+}
58688+
58689+/*
58690+ * Map all data buffers for a command into PCI space and
58691+ * setup the scatter/gather list if needed.
58692+ */
58693+static void pvscsi_map_buffers(struct pvscsi_adapter *adapter,
58694+ struct pvscsi_ctx *ctx, struct scsi_cmnd *cmd,
58695+ struct PVSCSIRingReqDesc *e)
58696+{
58697+ unsigned count;
58698+ unsigned bufflen = scsi_bufflen(cmd);
58699+ struct scatterlist *sg;
58700+
58701+ e->dataLen = bufflen;
58702+ e->dataAddr = 0;
58703+ if (bufflen == 0)
58704+ return;
58705+
58706+ sg = scsi_sglist(cmd);
58707+ count = scsi_sg_count(cmd);
58708+ if (count != 0) {
58709+ int segs = scsi_dma_map(cmd);
58710+ if (segs > 1) {
58711+ pvscsi_create_sg(ctx, sg, segs);
58712+
58713+ e->flags |= PVSCSI_FLAG_CMD_WITH_SG_LIST;
58714+ ctx->sglPA = pci_map_single(adapter->dev, ctx->sgl,
58715+ SGL_SIZE, PCI_DMA_TODEVICE);
58716+ e->dataAddr = ctx->sglPA;
58717+ } else
58718+ e->dataAddr = sg_dma_address(sg);
58719+ } else {
58720+ /*
58721+ * In case there is no S/G list, scsi_sglist points
58722+ * directly to the buffer.
58723+ */
58724+ ctx->dataPA = pci_map_single(adapter->dev, sg, bufflen,
58725+ cmd->sc_data_direction);
58726+ e->dataAddr = ctx->dataPA;
58727+ }
58728+}
58729+
58730+static void pvscsi_unmap_buffers(const struct pvscsi_adapter *adapter,
58731+ struct pvscsi_ctx *ctx)
58732+{
58733+ struct scsi_cmnd *cmd;
58734+ unsigned bufflen;
58735+
58736+ cmd = ctx->cmd;
58737+ bufflen = scsi_bufflen(cmd);
58738+
58739+ if (bufflen != 0) {
58740+ unsigned count = scsi_sg_count(cmd);
58741+
58742+ if (count != 0) {
58743+ scsi_dma_unmap(cmd);
58744+ if (ctx->sglPA) {
58745+ pci_unmap_single(adapter->dev, ctx->sglPA,
58746+ SGL_SIZE, PCI_DMA_TODEVICE);
58747+ ctx->sglPA = 0;
58748+ }
58749+ } else
58750+ pci_unmap_single(adapter->dev, ctx->dataPA, bufflen,
58751+ cmd->sc_data_direction);
58752+ }
58753+ if (cmd->sense_buffer)
58754+ pci_unmap_single(adapter->dev, ctx->sensePA,
58755+ SCSI_SENSE_BUFFERSIZE, PCI_DMA_FROMDEVICE);
58756+}
58757+
58758+static int __devinit pvscsi_allocate_rings(struct pvscsi_adapter *adapter)
58759+{
58760+ adapter->rings_state = pci_alloc_consistent(adapter->dev, PAGE_SIZE,
58761+ &adapter->ringStatePA);
58762+ if (!adapter->rings_state)
58763+ return -ENOMEM;
58764+
58765+ adapter->req_pages = min(PVSCSI_MAX_NUM_PAGES_REQ_RING,
58766+ pvscsi_ring_pages);
58767+ adapter->req_depth = adapter->req_pages
58768+ * PVSCSI_MAX_NUM_REQ_ENTRIES_PER_PAGE;
58769+ adapter->req_ring = pci_alloc_consistent(adapter->dev,
58770+ adapter->req_pages * PAGE_SIZE,
58771+ &adapter->reqRingPA);
58772+ if (!adapter->req_ring)
58773+ return -ENOMEM;
58774+
58775+ adapter->cmp_pages = min(PVSCSI_MAX_NUM_PAGES_CMP_RING,
58776+ pvscsi_ring_pages);
58777+ adapter->cmp_ring = pci_alloc_consistent(adapter->dev,
58778+ adapter->cmp_pages * PAGE_SIZE,
58779+ &adapter->cmpRingPA);
58780+ if (!adapter->cmp_ring)
58781+ return -ENOMEM;
58782+
58783+ BUG_ON(!IS_ALIGNED(adapter->ringStatePA, PAGE_SIZE));
58784+ BUG_ON(!IS_ALIGNED(adapter->reqRingPA, PAGE_SIZE));
58785+ BUG_ON(!IS_ALIGNED(adapter->cmpRingPA, PAGE_SIZE));
58786+
58787+ if (!adapter->use_msg)
58788+ return 0;
58789+
58790+ adapter->msg_pages = min(PVSCSI_MAX_NUM_PAGES_MSG_RING,
58791+ pvscsi_msg_ring_pages);
58792+ adapter->msg_ring = pci_alloc_consistent(adapter->dev,
58793+ adapter->msg_pages * PAGE_SIZE,
58794+ &adapter->msgRingPA);
58795+ if (!adapter->msg_ring)
58796+ return -ENOMEM;
58797+ BUG_ON(!IS_ALIGNED(adapter->msgRingPA, PAGE_SIZE));
58798+
58799+ return 0;
58800+}
58801+
58802+static void pvscsi_setup_all_rings(const struct pvscsi_adapter *adapter)
58803+{
58804+ struct PVSCSICmdDescSetupRings cmd = { 0 };
58805+ dma_addr_t base;
58806+ unsigned i;
58807+
58808+ cmd.ringsStatePPN = adapter->ringStatePA >> PAGE_SHIFT;
58809+ cmd.reqRingNumPages = adapter->req_pages;
58810+ cmd.cmpRingNumPages = adapter->cmp_pages;
58811+
58812+ base = adapter->reqRingPA;
58813+ for (i = 0; i < adapter->req_pages; i++) {
58814+ cmd.reqRingPPNs[i] = base >> PAGE_SHIFT;
58815+ base += PAGE_SIZE;
58816+ }
58817+
58818+ base = adapter->cmpRingPA;
58819+ for (i = 0; i < adapter->cmp_pages; i++) {
58820+ cmd.cmpRingPPNs[i] = base >> PAGE_SHIFT;
58821+ base += PAGE_SIZE;
58822+ }
58823+
58824+ memset(adapter->rings_state, 0, PAGE_SIZE);
58825+ memset(adapter->req_ring, 0, adapter->req_pages * PAGE_SIZE);
58826+ memset(adapter->cmp_ring, 0, adapter->cmp_pages * PAGE_SIZE);
58827+
58828+ pvscsi_write_cmd_desc(adapter, PVSCSI_CMD_SETUP_RINGS,
58829+ &cmd, sizeof(cmd));
58830+
58831+ if (adapter->use_msg) {
58832+ struct PVSCSICmdDescSetupMsgRing cmd_msg = { 0 };
58833+
58834+ cmd_msg.numPages = adapter->msg_pages;
58835+
58836+ base = adapter->msgRingPA;
58837+ for (i = 0; i < adapter->msg_pages; i++) {
58838+ cmd_msg.ringPPNs[i] = base >> PAGE_SHIFT;
58839+ base += PAGE_SIZE;
58840+ }
58841+ memset(adapter->msg_ring, 0, adapter->msg_pages * PAGE_SIZE);
58842+
58843+ pvscsi_write_cmd_desc(adapter, PVSCSI_CMD_SETUP_MSG_RING,
58844+ &cmd_msg, sizeof(cmd_msg));
58845+ }
58846+}
58847+
58848+/*
58849+ * Pull a completion descriptor off and pass the completion back
58850+ * to the SCSI mid layer.
58851+ */
58852+static void pvscsi_complete_request(struct pvscsi_adapter *adapter,
58853+ const struct PVSCSIRingCmpDesc *e)
58854+{
58855+ struct pvscsi_ctx *ctx;
58856+ struct scsi_cmnd *cmd;
58857+ u32 btstat = e->hostStatus;
58858+ u32 sdstat = e->scsiStatus;
58859+
58860+ ctx = pvscsi_get_context(adapter, e->context);
58861+ cmd = ctx->cmd;
58862+ pvscsi_unmap_buffers(adapter, ctx);
58863+ pvscsi_release_context(adapter, ctx);
58864+ cmd->result = 0;
58865+
58866+ if (sdstat != SAM_STAT_GOOD &&
58867+ (btstat == BTSTAT_SUCCESS ||
58868+ btstat == BTSTAT_LINKED_COMMAND_COMPLETED ||
58869+ btstat == BTSTAT_LINKED_COMMAND_COMPLETED_WITH_FLAG)) {
58870+ cmd->result = (DID_OK << 16) | sdstat;
58871+ if (sdstat == SAM_STAT_CHECK_CONDITION && cmd->sense_buffer)
58872+ cmd->result |= (DRIVER_SENSE << 24);
58873+ } else
58874+ switch (btstat) {
58875+ case BTSTAT_SUCCESS:
58876+ case BTSTAT_LINKED_COMMAND_COMPLETED:
58877+ case BTSTAT_LINKED_COMMAND_COMPLETED_WITH_FLAG:
58878+ /* If everything went fine, let's move on.. */
58879+ cmd->result = (DID_OK << 16);
58880+ break;
58881+
58882+ case BTSTAT_DATARUN:
58883+ case BTSTAT_DATA_UNDERRUN:
58884+ /* Report residual data in underruns */
58885+ scsi_set_resid(cmd, scsi_bufflen(cmd) - e->dataLen);
58886+ cmd->result = (DID_ERROR << 16);
58887+ break;
58888+
58889+ case BTSTAT_SELTIMEO:
58890+ /* Our emulation returns this for non-connected devs */
58891+ cmd->result = (DID_BAD_TARGET << 16);
58892+ break;
58893+
58894+ case BTSTAT_LUNMISMATCH:
58895+ case BTSTAT_TAGREJECT:
58896+ case BTSTAT_BADMSG:
58897+ cmd->result = (DRIVER_INVALID << 24);
58898+ /* fall through */
58899+
58900+ case BTSTAT_HAHARDWARE:
58901+ case BTSTAT_INVPHASE:
58902+ case BTSTAT_HATIMEOUT:
58903+ case BTSTAT_NORESPONSE:
58904+ case BTSTAT_DISCONNECT:
58905+ case BTSTAT_HASOFTWARE:
58906+ case BTSTAT_BUSFREE:
58907+ case BTSTAT_SENSFAILED:
58908+ cmd->result |= (DID_ERROR << 16);
58909+ break;
58910+
58911+ case BTSTAT_SENTRST:
58912+ case BTSTAT_RECVRST:
58913+ case BTSTAT_BUSRESET:
58914+ cmd->result = (DID_RESET << 16);
58915+ break;
58916+
58917+ case BTSTAT_ABORTQUEUE:
58918+ cmd->result = (DID_ABORT << 16);
58919+ break;
58920+
58921+ case BTSTAT_SCSIPARITY:
58922+ cmd->result = (DID_PARITY << 16);
58923+ break;
58924+
58925+ default:
58926+ cmd->result = (DID_ERROR << 16);
58927+ scmd_printk(KERN_DEBUG, cmd,
58928+ "Unknown completion status: 0x%x\n",
58929+ btstat);
58930+ }
58931+
58932+ dev_dbg(&cmd->device->sdev_gendev,
58933+ "cmd=%p %x ctx=%p result=0x%x status=0x%x,%x\n",
58934+ cmd, cmd->cmnd[0], ctx, cmd->result, btstat, sdstat);
58935+
58936+ cmd->scsi_done(cmd);
58937+}
58938+
58939+/*
58940+ * barrier usage : Since the PVSCSI device is emulated, there could be cases
58941+ * where we may want to serialize some accesses between the driver and the
58942+ * emulation layer. We use compiler barriers instead of the more expensive
58943+ * memory barriers because PVSCSI is only supported on X86 which has strong
58944+ * memory access ordering.
58945+ */
58946+static void pvscsi_process_completion_ring(struct pvscsi_adapter *adapter)
58947+{
58948+ struct PVSCSIRingsState *s = adapter->rings_state;
58949+ struct PVSCSIRingCmpDesc *ring = adapter->cmp_ring;
58950+ u32 cmp_entries = s->cmpNumEntriesLog2;
58951+
58952+ while (s->cmpConsIdx != s->cmpProdIdx) {
58953+ struct PVSCSIRingCmpDesc *e = ring + (s->cmpConsIdx &
58954+ MASK(cmp_entries));
58955+ /*
58956+ * This barrier() ensures that *e is not dereferenced while
58957+ * the device emulation still writes data into the slot.
58958+ * Since the device emulation advances s->cmpProdIdx only after
58959+ * updating the slot we want to check it first.
58960+ */
58961+ barrier();
58962+ pvscsi_complete_request(adapter, e);
58963+ /*
58964+ * This barrier() ensures that compiler doesn't reorder write
58965+ * to s->cmpConsIdx before the read of (*e) inside
58966+ * pvscsi_complete_request. Otherwise, device emulation may
58967+ * overwrite *e before we had a chance to read it.
58968+ */
58969+ barrier();
58970+ s->cmpConsIdx++;
58971+ }
58972+}
58973+
58974+/*
58975+ * Translate a Linux SCSI request into a request ring entry.
58976+ */
58977+static int pvscsi_queue_ring(struct pvscsi_adapter *adapter,
58978+ struct pvscsi_ctx *ctx, struct scsi_cmnd *cmd)
58979+{
58980+ struct PVSCSIRingsState *s;
58981+ struct PVSCSIRingReqDesc *e;
58982+ struct scsi_device *sdev;
58983+ u32 req_entries;
58984+
58985+ s = adapter->rings_state;
58986+ sdev = cmd->device;
58987+ req_entries = s->reqNumEntriesLog2;
58988+
58989+ /*
58990+ * If this condition holds, we might have room on the request ring, but
58991+ * we might not have room on the completion ring for the response.
58992+ * However, we have already ruled out this possibility - we would not
58993+ * have successfully allocated a context if it were true, since we only
58994+ * have one context per request entry. Check for it anyway, since it
58995+ * would be a serious bug.
58996+ */
58997+ if (s->reqProdIdx - s->cmpConsIdx >= 1 << req_entries) {
58998+ scmd_printk(KERN_ERR, cmd, "vmw_pvscsi: "
58999+ "ring full: reqProdIdx=%d cmpConsIdx=%d\n",
59000+ s->reqProdIdx, s->cmpConsIdx);
59001+ return -1;
59002+ }
59003+
59004+ e = adapter->req_ring + (s->reqProdIdx & MASK(req_entries));
59005+
59006+ e->bus = sdev->channel;
59007+ e->target = sdev->id;
59008+ memset(e->lun, 0, sizeof(e->lun));
59009+ e->lun[1] = sdev->lun;
59010+
59011+ if (cmd->sense_buffer) {
59012+ ctx->sensePA = pci_map_single(adapter->dev, cmd->sense_buffer,
59013+ SCSI_SENSE_BUFFERSIZE,
59014+ PCI_DMA_FROMDEVICE);
59015+ e->senseAddr = ctx->sensePA;
59016+ e->senseLen = SCSI_SENSE_BUFFERSIZE;
59017+ } else {
59018+ e->senseLen = 0;
59019+ e->senseAddr = 0;
59020+ }
59021+ e->cdbLen = cmd->cmd_len;
59022+ e->vcpuHint = smp_processor_id();
59023+ memcpy(e->cdb, cmd->cmnd, e->cdbLen);
59024+
59025+ e->tag = SIMPLE_QUEUE_TAG;
59026+ if (sdev->tagged_supported &&
59027+ (cmd->tag == HEAD_OF_QUEUE_TAG ||
59028+ cmd->tag == ORDERED_QUEUE_TAG))
59029+ e->tag = cmd->tag;
59030+
59031+ if (cmd->sc_data_direction == DMA_FROM_DEVICE)
59032+ e->flags = PVSCSI_FLAG_CMD_DIR_TOHOST;
59033+ else if (cmd->sc_data_direction == DMA_TO_DEVICE)
59034+ e->flags = PVSCSI_FLAG_CMD_DIR_TODEVICE;
59035+ else if (cmd->sc_data_direction == DMA_NONE)
59036+ e->flags = PVSCSI_FLAG_CMD_DIR_NONE;
59037+ else
59038+ e->flags = 0;
59039+
59040+ pvscsi_map_buffers(adapter, ctx, cmd, e);
59041+
59042+ e->context = pvscsi_map_context(adapter, ctx);
59043+
59044+ barrier();
59045+
59046+ s->reqProdIdx++;
59047+
59048+ return 0;
59049+}
59050+
59051+static int pvscsi_queue(struct scsi_cmnd *cmd, void (*done)(struct scsi_cmnd *))
59052+{
59053+ struct Scsi_Host *host = cmd->device->host;
59054+ struct pvscsi_adapter *adapter = shost_priv(host);
59055+ struct pvscsi_ctx *ctx;
59056+ unsigned long flags;
59057+
59058+ spin_lock_irqsave(&adapter->hw_lock, flags);
59059+
59060+ ctx = pvscsi_acquire_context(adapter, cmd);
59061+ if (!ctx || pvscsi_queue_ring(adapter, ctx, cmd) != 0) {
59062+ if (ctx)
59063+ pvscsi_release_context(adapter, ctx);
59064+ spin_unlock_irqrestore(&adapter->hw_lock, flags);
59065+ return SCSI_MLQUEUE_HOST_BUSY;
59066+ }
59067+
59068+ cmd->scsi_done = done;
59069+
59070+ dev_dbg(&cmd->device->sdev_gendev,
59071+ "queued cmd %p, ctx %p, op=%x\n", cmd, ctx, cmd->cmnd[0]);
59072+
59073+ spin_unlock_irqrestore(&adapter->hw_lock, flags);
59074+
59075+ pvscsi_kick_io(adapter, cmd->cmnd[0]);
59076+
59077+ return 0;
59078+}
59079+
59080+static int pvscsi_abort(struct scsi_cmnd *cmd)
59081+{
59082+ struct pvscsi_adapter *adapter = shost_priv(cmd->device->host);
59083+ struct pvscsi_ctx *ctx;
59084+ unsigned long flags;
59085+
59086+ scmd_printk(KERN_DEBUG, cmd, "task abort on host %u, %p\n",
59087+ adapter->host->host_no, cmd);
59088+
59089+ spin_lock_irqsave(&adapter->hw_lock, flags);
59090+
59091+ /*
59092+ * Poll the completion ring first - we might be trying to abort
59093+ * a command that is waiting to be dispatched in the completion ring.
59094+ */
59095+ pvscsi_process_completion_ring(adapter);
59096+
59097+ /*
59098+ * If there is no context for the command, it either already succeeded
59099+ * or else was never properly issued. Not our problem.
59100+ */
59101+ ctx = pvscsi_find_context(adapter, cmd);
59102+ if (!ctx) {
59103+ scmd_printk(KERN_DEBUG, cmd, "Failed to abort cmd %p\n", cmd);
59104+ goto out;
59105+ }
59106+
59107+ pvscsi_abort_cmd(adapter, ctx);
59108+
59109+ pvscsi_process_completion_ring(adapter);
59110+
59111+out:
59112+ spin_unlock_irqrestore(&adapter->hw_lock, flags);
59113+ return SUCCESS;
59114+}
59115+
59116+/*
59117+ * Abort all outstanding requests. This is only safe to use if the completion
59118+ * ring will never be walked again or the device has been reset, because it
59119+ * destroys the 1-1 mapping between context field passed to emulation and our
59120+ * request structure.
59121+ */
59122+static void pvscsi_reset_all(struct pvscsi_adapter *adapter)
59123+{
59124+ unsigned i;
59125+
59126+ for (i = 0; i < adapter->req_depth; i++) {
59127+ struct pvscsi_ctx *ctx = &adapter->cmd_map[i];
59128+ struct scsi_cmnd *cmd = ctx->cmd;
59129+ if (cmd) {
59130+ scmd_printk(KERN_ERR, cmd,
59131+ "Forced reset on cmd %p\n", cmd);
59132+ pvscsi_unmap_buffers(adapter, ctx);
59133+ pvscsi_release_context(adapter, ctx);
59134+ cmd->result = (DID_RESET << 16);
59135+ cmd->scsi_done(cmd);
59136+ }
59137+ }
59138+}
59139+
59140+static int pvscsi_host_reset(struct scsi_cmnd *cmd)
59141+{
59142+ struct Scsi_Host *host = cmd->device->host;
59143+ struct pvscsi_adapter *adapter = shost_priv(host);
59144+ unsigned long flags;
59145+ bool use_msg;
59146+
59147+ scmd_printk(KERN_INFO, cmd, "SCSI Host reset\n");
59148+
59149+ spin_lock_irqsave(&adapter->hw_lock, flags);
59150+
59151+ use_msg = adapter->use_msg;
59152+
59153+ if (use_msg) {
59154+ adapter->use_msg = 0;
59155+ spin_unlock_irqrestore(&adapter->hw_lock, flags);
59156+
59157+ /*
59158+ * Now that we know that the ISR won't add more work on the
59159+ * workqueue we can safely flush any outstanding work.
59160+ */
59161+ flush_workqueue(adapter->workqueue);
59162+ spin_lock_irqsave(&adapter->hw_lock, flags);
59163+ }
59164+
59165+ /*
59166+ * We're going to tear down the entire ring structure and set it back
59167+ * up, so stalling new requests until all completions are flushed and
59168+ * the rings are back in place.
59169+ */
59170+
59171+ pvscsi_process_request_ring(adapter);
59172+
59173+ ll_adapter_reset(adapter);
59174+
59175+ /*
59176+ * Now process any completions. Note we do this AFTER adapter reset,
59177+ * which is strange, but stops races where completions get posted
59178+ * between processing the ring and issuing the reset. The backend will
59179+ * not touch the ring memory after reset, so the immediately pre-reset
59180+ * completion ring state is still valid.
59181+ */
59182+ pvscsi_process_completion_ring(adapter);
59183+
59184+ pvscsi_reset_all(adapter);
59185+ adapter->use_msg = use_msg;
59186+ pvscsi_setup_all_rings(adapter);
59187+ pvscsi_unmask_intr(adapter);
59188+
59189+ spin_unlock_irqrestore(&adapter->hw_lock, flags);
59190+
59191+ return SUCCESS;
59192+}
59193+
59194+static int pvscsi_bus_reset(struct scsi_cmnd *cmd)
59195+{
59196+ struct Scsi_Host *host = cmd->device->host;
59197+ struct pvscsi_adapter *adapter = shost_priv(host);
59198+ unsigned long flags;
59199+
59200+ scmd_printk(KERN_INFO, cmd, "SCSI Bus reset\n");
59201+
59202+ /*
59203+ * We don't want to queue new requests for this bus after
59204+ * flushing all pending requests to emulation, since new
59205+ * requests could then sneak in during this bus reset phase,
59206+ * so take the lock now.
59207+ */
59208+ spin_lock_irqsave(&adapter->hw_lock, flags);
59209+
59210+ pvscsi_process_request_ring(adapter);
59211+ ll_bus_reset(adapter);
59212+ pvscsi_process_completion_ring(adapter);
59213+
59214+ spin_unlock_irqrestore(&adapter->hw_lock, flags);
59215+
59216+ return SUCCESS;
59217+}
59218+
59219+static int pvscsi_device_reset(struct scsi_cmnd *cmd)
59220+{
59221+ struct Scsi_Host *host = cmd->device->host;
59222+ struct pvscsi_adapter *adapter = shost_priv(host);
59223+ unsigned long flags;
59224+
59225+ scmd_printk(KERN_INFO, cmd, "SCSI device reset on scsi%u:%u\n",
59226+ host->host_no, cmd->device->id);
59227+
59228+ /*
59229+ * We don't want to queue new requests for this device after flushing
59230+ * all pending requests to emulation, since new requests could then
59231+ * sneak in during this device reset phase, so take the lock now.
59232+ */
59233+ spin_lock_irqsave(&adapter->hw_lock, flags);
59234+
59235+ pvscsi_process_request_ring(adapter);
59236+ ll_device_reset(adapter, cmd->device->id);
59237+ pvscsi_process_completion_ring(adapter);
59238+
59239+ spin_unlock_irqrestore(&adapter->hw_lock, flags);
59240+
59241+ return SUCCESS;
59242+}
59243+
59244+static struct scsi_host_template pvscsi_template;
59245+
59246+static const char *pvscsi_info(struct Scsi_Host *host)
59247+{
59248+ struct pvscsi_adapter *adapter = shost_priv(host);
59249+ static char buf[256];
59250+
59251+ sprintf(buf, "VMware PVSCSI storage adapter rev %d, req/cmp/msg rings: "
59252+ "%u/%u/%u pages, cmd_per_lun=%u", adapter->rev,
59253+ adapter->req_pages, adapter->cmp_pages, adapter->msg_pages,
59254+ pvscsi_template.cmd_per_lun);
59255+
59256+ return buf;
59257+}
59258+
59259+static struct scsi_host_template pvscsi_template = {
59260+ .module = THIS_MODULE,
59261+ .name = "VMware PVSCSI Host Adapter",
59262+ .proc_name = "vmw_pvscsi",
59263+ .info = pvscsi_info,
59264+ .queuecommand = pvscsi_queue,
59265+ .this_id = -1,
59266+ .sg_tablesize = PVSCSI_MAX_NUM_SG_ENTRIES_PER_SEGMENT,
59267+ .dma_boundary = UINT_MAX,
59268+ .max_sectors = 0xffff,
59269+ .use_clustering = ENABLE_CLUSTERING,
59270+ .eh_abort_handler = pvscsi_abort,
59271+ .eh_device_reset_handler = pvscsi_device_reset,
59272+ .eh_bus_reset_handler = pvscsi_bus_reset,
59273+ .eh_host_reset_handler = pvscsi_host_reset,
59274+};
59275+
59276+static void pvscsi_process_msg(const struct pvscsi_adapter *adapter,
59277+ const struct PVSCSIRingMsgDesc *e)
59278+{
59279+ struct PVSCSIRingsState *s = adapter->rings_state;
59280+ struct Scsi_Host *host = adapter->host;
59281+ struct scsi_device *sdev;
59282+
59283+ printk(KERN_INFO "vmw_pvscsi: msg type: 0x%x - MSG RING: %u/%u (%u) \n",
59284+ e->type, s->msgProdIdx, s->msgConsIdx, s->msgNumEntriesLog2);
59285+
59286+ BUILD_BUG_ON(PVSCSI_MSG_LAST != 2);
59287+
59288+ if (e->type == PVSCSI_MSG_DEV_ADDED) {
59289+ struct PVSCSIMsgDescDevStatusChanged *desc;
59290+ desc = (struct PVSCSIMsgDescDevStatusChanged *)e;
59291+
59292+ printk(KERN_INFO
59293+ "vmw_pvscsi: msg: device added at scsi%u:%u:%u\n",
59294+ desc->bus, desc->target, desc->lun[1]);
59295+
59296+ if (!scsi_host_get(host))
59297+ return;
59298+
59299+ sdev = scsi_device_lookup(host, desc->bus, desc->target,
59300+ desc->lun[1]);
59301+ if (sdev) {
59302+ printk(KERN_INFO "vmw_pvscsi: device already exists\n");
59303+ scsi_device_put(sdev);
59304+ } else
59305+ scsi_add_device(adapter->host, desc->bus,
59306+ desc->target, desc->lun[1]);
59307+
59308+ scsi_host_put(host);
59309+ } else if (e->type == PVSCSI_MSG_DEV_REMOVED) {
59310+ struct PVSCSIMsgDescDevStatusChanged *desc;
59311+ desc = (struct PVSCSIMsgDescDevStatusChanged *)e;
59312+
59313+ printk(KERN_INFO
59314+ "vmw_pvscsi: msg: device removed at scsi%u:%u:%u\n",
59315+ desc->bus, desc->target, desc->lun[1]);
59316+
59317+ if (!scsi_host_get(host))
59318+ return;
59319+
59320+ sdev = scsi_device_lookup(host, desc->bus, desc->target,
59321+ desc->lun[1]);
59322+ if (sdev) {
59323+ scsi_remove_device(sdev);
59324+ scsi_device_put(sdev);
59325+ } else
59326+ printk(KERN_INFO
59327+ "vmw_pvscsi: failed to lookup scsi%u:%u:%u\n",
59328+ desc->bus, desc->target, desc->lun[1]);
59329+
59330+ scsi_host_put(host);
59331+ }
59332+}
59333+
59334+static int pvscsi_msg_pending(const struct pvscsi_adapter *adapter)
59335+{
59336+ struct PVSCSIRingsState *s = adapter->rings_state;
59337+
59338+ return s->msgProdIdx != s->msgConsIdx;
59339+}
59340+
59341+static void pvscsi_process_msg_ring(const struct pvscsi_adapter *adapter)
59342+{
59343+ struct PVSCSIRingsState *s = adapter->rings_state;
59344+ struct PVSCSIRingMsgDesc *ring = adapter->msg_ring;
59345+ u32 msg_entries = s->msgNumEntriesLog2;
59346+
59347+ while (pvscsi_msg_pending(adapter)) {
59348+ struct PVSCSIRingMsgDesc *e = ring + (s->msgConsIdx &
59349+ MASK(msg_entries));
59350+
59351+ barrier();
59352+ pvscsi_process_msg(adapter, e);
59353+ barrier();
59354+ s->msgConsIdx++;
59355+ }
59356+}
59357+
59358+static void pvscsi_msg_workqueue_handler(struct work_struct *data)
59359+{
59360+ struct pvscsi_adapter *adapter;
59361+
59362+ adapter = container_of(data, struct pvscsi_adapter, work);
59363+
59364+ pvscsi_process_msg_ring(adapter);
59365+}
59366+
59367+static int pvscsi_setup_msg_workqueue(struct pvscsi_adapter *adapter)
59368+{
59369+ char name[32];
59370+
59371+ if (!pvscsi_use_msg)
59372+ return 0;
59373+
59374+ pvscsi_reg_write(adapter, PVSCSI_REG_OFFSET_COMMAND,
59375+ PVSCSI_CMD_SETUP_MSG_RING);
59376+
59377+ if (pvscsi_reg_read(adapter, PVSCSI_REG_OFFSET_COMMAND_STATUS) == -1)
59378+ return 0;
59379+
59380+ snprintf(name, sizeof(name),
59381+ "vmw_pvscsi_wq_%u", adapter->host->host_no);
59382+
59383+ adapter->workqueue = create_singlethread_workqueue(name);
59384+ if (!adapter->workqueue) {
59385+ printk(KERN_ERR "vmw_pvscsi: failed to create work queue\n");
59386+ return 0;
59387+ }
59388+ INIT_WORK(&adapter->work, pvscsi_msg_workqueue_handler);
59389+
59390+ return 1;
59391+}
59392+
59393+static irqreturn_t pvscsi_isr(int irq, void *devp)
59394+{
59395+ struct pvscsi_adapter *adapter = devp;
59396+ int handled;
59397+
59398+ if (adapter->use_msi || adapter->use_msix)
59399+ handled = true;
59400+ else {
59401+ u32 val = pvscsi_read_intr_status(adapter);
59402+ handled = (val & PVSCSI_INTR_ALL_SUPPORTED) != 0;
59403+ if (handled)
59404+ pvscsi_write_intr_status(devp, val);
59405+ }
59406+
59407+ if (handled) {
59408+ unsigned long flags;
59409+
59410+ spin_lock_irqsave(&adapter->hw_lock, flags);
59411+
59412+ pvscsi_process_completion_ring(adapter);
59413+ if (adapter->use_msg && pvscsi_msg_pending(adapter))
59414+ queue_work(adapter->workqueue, &adapter->work);
59415+
59416+ spin_unlock_irqrestore(&adapter->hw_lock, flags);
59417+ }
59418+
59419+ return IRQ_RETVAL(handled);
59420+}
59421+
59422+static void pvscsi_free_sgls(const struct pvscsi_adapter *adapter)
59423+{
59424+ struct pvscsi_ctx *ctx = adapter->cmd_map;
59425+ unsigned i;
59426+
59427+ for (i = 0; i < adapter->req_depth; ++i, ++ctx)
59428+ kfree(ctx->sgl);
59429+}
59430+
59431+static int pvscsi_setup_msix(const struct pvscsi_adapter *adapter, int *irq)
59432+{
59433+ struct msix_entry entry = { 0, PVSCSI_VECTOR_COMPLETION };
59434+ int ret;
59435+
59436+ ret = pci_enable_msix(adapter->dev, &entry, 1);
59437+ if (ret)
59438+ return ret;
59439+
59440+ *irq = entry.vector;
59441+
59442+ return 0;
59443+}
59444+
59445+static void pvscsi_shutdown_intr(struct pvscsi_adapter *adapter)
59446+{
59447+ if (adapter->irq) {
59448+ free_irq(adapter->irq, adapter);
59449+ adapter->irq = 0;
59450+ }
59451+ if (adapter->use_msi) {
59452+ pci_disable_msi(adapter->dev);
59453+ adapter->use_msi = 0;
59454+ } else if (adapter->use_msix) {
59455+ pci_disable_msix(adapter->dev);
59456+ adapter->use_msix = 0;
59457+ }
59458+}
59459+
59460+static void pvscsi_release_resources(struct pvscsi_adapter *adapter)
59461+{
59462+ pvscsi_shutdown_intr(adapter);
59463+
59464+ if (adapter->workqueue)
59465+ destroy_workqueue(adapter->workqueue);
59466+
59467+ if (adapter->mmioBase)
59468+ pci_iounmap(adapter->dev, adapter->mmioBase);
59469+
59470+ pci_release_regions(adapter->dev);
59471+
59472+ if (adapter->cmd_map) {
59473+ pvscsi_free_sgls(adapter);
59474+ kfree(adapter->cmd_map);
59475+ }
59476+
59477+ if (adapter->rings_state)
59478+ pci_free_consistent(adapter->dev, PAGE_SIZE,
59479+ adapter->rings_state, adapter->ringStatePA);
59480+
59481+ if (adapter->req_ring)
59482+ pci_free_consistent(adapter->dev,
59483+ adapter->req_pages * PAGE_SIZE,
59484+ adapter->req_ring, adapter->reqRingPA);
59485+
59486+ if (adapter->cmp_ring)
59487+ pci_free_consistent(adapter->dev,
59488+ adapter->cmp_pages * PAGE_SIZE,
59489+ adapter->cmp_ring, adapter->cmpRingPA);
59490+
59491+ if (adapter->msg_ring)
59492+ pci_free_consistent(adapter->dev,
59493+ adapter->msg_pages * PAGE_SIZE,
59494+ adapter->msg_ring, adapter->msgRingPA);
59495+}
59496+
59497+/*
59498+ * Allocate scatter gather lists.
59499+ *
59500+ * These are statically allocated. Trying to be clever was not worth it.
59501+ *
59502+ * Dynamic allocation can fail, and we can't go deeep into the memory
59503+ * allocator, since we're a SCSI driver, and trying too hard to allocate
59504+ * memory might generate disk I/O. We also don't want to fail disk I/O
59505+ * in that case because we can't get an allocation - the I/O could be
59506+ * trying to swap out data to free memory. Since that is pathological,
59507+ * just use a statically allocated scatter list.
59508+ *
59509+ */
59510+static int __devinit pvscsi_allocate_sg(struct pvscsi_adapter *adapter)
59511+{
59512+ struct pvscsi_ctx *ctx;
59513+ int i;
59514+
59515+ ctx = adapter->cmd_map;
59516+ BUILD_BUG_ON(sizeof(struct pvscsi_sg_list) > SGL_SIZE);
59517+
59518+ for (i = 0; i < adapter->req_depth; ++i, ++ctx) {
59519+ ctx->sgl = kmalloc(SGL_SIZE, GFP_KERNEL);
59520+ ctx->sglPA = 0;
59521+ BUG_ON(!IS_ALIGNED(((unsigned long)ctx->sgl), PAGE_SIZE));
59522+ if (!ctx->sgl) {
59523+ for (; i >= 0; --i, --ctx) {
59524+ kfree(ctx->sgl);
59525+ ctx->sgl = NULL;
59526+ }
59527+ return -ENOMEM;
59528+ }
59529+ }
59530+
59531+ return 0;
59532+}
59533+
59534+static int __devinit pvscsi_probe(struct pci_dev *pdev,
59535+ const struct pci_device_id *id)
59536+{
59537+ struct pvscsi_adapter *adapter;
59538+ struct Scsi_Host *host;
59539+ unsigned int i;
59540+ int error;
59541+
59542+ error = -ENODEV;
59543+
59544+ if (pci_enable_device(pdev))
59545+ return error;
59546+
59547+ if (pci_set_dma_mask(pdev, DMA_BIT_MASK(64)) == 0 &&
59548+ pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(64)) == 0) {
59549+ printk(KERN_INFO "vmw_pvscsi: using 64bit dma\n");
59550+ } else if (pci_set_dma_mask(pdev, DMA_BIT_MASK(32)) == 0 &&
59551+ pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(32)) == 0) {
59552+ printk(KERN_INFO "vmw_pvscsi: using 32bit dma\n");
59553+ } else {
59554+ printk(KERN_ERR "vmw_pvscsi: failed to set DMA mask\n");
59555+ goto out_disable_device;
59556+ }
59557+
59558+ pvscsi_template.can_queue =
59559+ min(PVSCSI_MAX_NUM_PAGES_REQ_RING, pvscsi_ring_pages) *
59560+ PVSCSI_MAX_NUM_REQ_ENTRIES_PER_PAGE;
59561+ pvscsi_template.cmd_per_lun =
59562+ min(pvscsi_template.can_queue, pvscsi_cmd_per_lun);
59563+ host = scsi_host_alloc(&pvscsi_template, sizeof(struct pvscsi_adapter));
59564+ if (!host) {
59565+ printk(KERN_ERR "vmw_pvscsi: failed to allocate host\n");
59566+ goto out_disable_device;
59567+ }
59568+
59569+ adapter = shost_priv(host);
59570+ memset(adapter, 0, sizeof(*adapter));
59571+ adapter->dev = pdev;
59572+ adapter->host = host;
59573+
59574+ spin_lock_init(&adapter->hw_lock);
59575+
59576+ host->max_channel = 0;
59577+ host->max_id = 16;
59578+ host->max_lun = 1;
59579+ host->max_cmd_len = 16;
59580+
59581+ adapter->rev = pdev->revision;
59582+
59583+ if (pci_request_regions(pdev, "vmw_pvscsi")) {
59584+ printk(KERN_ERR "vmw_pvscsi: pci memory selection failed\n");
59585+ goto out_free_host;
59586+ }
59587+
59588+ for (i = 0; i < DEVICE_COUNT_RESOURCE; i++) {
59589+ if ((pci_resource_flags(pdev, i) & PCI_BASE_ADDRESS_SPACE_IO))
59590+ continue;
59591+
59592+ if (pci_resource_len(pdev, i) < PVSCSI_MEM_SPACE_SIZE)
59593+ continue;
59594+
59595+ break;
59596+ }
59597+
59598+ if (i == DEVICE_COUNT_RESOURCE) {
59599+ printk(KERN_ERR
59600+ "vmw_pvscsi: adapter has no suitable MMIO region\n");
59601+ goto out_release_resources;
59602+ }
59603+
59604+ adapter->mmioBase = pci_iomap(pdev, i, PVSCSI_MEM_SPACE_SIZE);
59605+
59606+ if (!adapter->mmioBase) {
59607+ printk(KERN_ERR
59608+ "vmw_pvscsi: can't iomap for BAR %d memsize %lu\n",
59609+ i, PVSCSI_MEM_SPACE_SIZE);
59610+ goto out_release_resources;
59611+ }
59612+
59613+ pci_set_master(pdev);
59614+ pci_set_drvdata(pdev, host);
59615+
59616+ ll_adapter_reset(adapter);
59617+
59618+ adapter->use_msg = pvscsi_setup_msg_workqueue(adapter);
59619+
59620+ error = pvscsi_allocate_rings(adapter);
59621+ if (error) {
59622+ printk(KERN_ERR "vmw_pvscsi: unable to allocate ring memory\n");
59623+ goto out_release_resources;
59624+ }
59625+
59626+ /*
59627+ * From this point on we should reset the adapter if anything goes
59628+ * wrong.
59629+ */
59630+ pvscsi_setup_all_rings(adapter);
59631+
59632+ adapter->cmd_map = kcalloc(adapter->req_depth,
59633+ sizeof(struct pvscsi_ctx), GFP_KERNEL);
59634+ if (!adapter->cmd_map) {
59635+ printk(KERN_ERR "vmw_pvscsi: failed to allocate memory.\n");
59636+ error = -ENOMEM;
59637+ goto out_reset_adapter;
59638+ }
59639+
59640+ INIT_LIST_HEAD(&adapter->cmd_pool);
59641+ for (i = 0; i < adapter->req_depth; i++) {
59642+ struct pvscsi_ctx *ctx = adapter->cmd_map + i;
59643+ list_add(&ctx->list, &adapter->cmd_pool);
59644+ }
59645+
59646+ error = pvscsi_allocate_sg(adapter);
59647+ if (error) {
59648+ printk(KERN_ERR "vmw_pvscsi: unable to allocate s/g table\n");
59649+ goto out_reset_adapter;
59650+ }
59651+
59652+ if (!pvscsi_disable_msix &&
59653+ pvscsi_setup_msix(adapter, &adapter->irq) == 0) {
59654+ printk(KERN_INFO "vmw_pvscsi: using MSI-X\n");
59655+ adapter->use_msix = 1;
59656+ } else if (!pvscsi_disable_msi && pci_enable_msi(pdev) == 0) {
59657+ printk(KERN_INFO "vmw_pvscsi: using MSI\n");
59658+ adapter->use_msi = 1;
59659+ adapter->irq = pdev->irq;
59660+ } else {
59661+ printk(KERN_INFO "vmw_pvscsi: using INTx\n");
59662+ adapter->irq = pdev->irq;
59663+ }
59664+
59665+ error = request_irq(adapter->irq, pvscsi_isr, IRQF_SHARED,
59666+ "vmw_pvscsi", adapter);
59667+ if (error) {
59668+ printk(KERN_ERR
59669+ "vmw_pvscsi: unable to request IRQ: %d\n", error);
59670+ adapter->irq = 0;
59671+ goto out_reset_adapter;
59672+ }
59673+
59674+ error = scsi_add_host(host, &pdev->dev);
59675+ if (error) {
59676+ printk(KERN_ERR
59677+ "vmw_pvscsi: scsi_add_host failed: %d\n", error);
59678+ goto out_reset_adapter;
59679+ }
59680+
59681+ dev_info(&pdev->dev, "VMware PVSCSI rev %d host #%u\n",
59682+ adapter->rev, host->host_no);
59683+
59684+ pvscsi_unmask_intr(adapter);
59685+
59686+ scsi_scan_host(host);
59687+
59688+ return 0;
59689+
59690+out_reset_adapter:
59691+ ll_adapter_reset(adapter);
59692+out_release_resources:
59693+ pvscsi_release_resources(adapter);
59694+out_free_host:
59695+ scsi_host_put(host);
59696+out_disable_device:
59697+ pci_set_drvdata(pdev, NULL);
59698+ pci_disable_device(pdev);
59699+
59700+ return error;
59701+}
59702+
59703+static void __pvscsi_shutdown(struct pvscsi_adapter *adapter)
59704+{
59705+ pvscsi_mask_intr(adapter);
59706+
59707+ if (adapter->workqueue)
59708+ flush_workqueue(adapter->workqueue);
59709+
59710+ pvscsi_shutdown_intr(adapter);
59711+
59712+ pvscsi_process_request_ring(adapter);
59713+ pvscsi_process_completion_ring(adapter);
59714+ ll_adapter_reset(adapter);
59715+}
59716+
59717+static void pvscsi_shutdown(struct pci_dev *dev)
59718+{
59719+ struct Scsi_Host *host = pci_get_drvdata(dev);
59720+ struct pvscsi_adapter *adapter = shost_priv(host);
59721+
59722+ __pvscsi_shutdown(adapter);
59723+}
59724+
59725+static void pvscsi_remove(struct pci_dev *pdev)
59726+{
59727+ struct Scsi_Host *host = pci_get_drvdata(pdev);
59728+ struct pvscsi_adapter *adapter = shost_priv(host);
59729+
59730+ scsi_remove_host(host);
59731+
59732+ __pvscsi_shutdown(adapter);
59733+ pvscsi_release_resources(adapter);
59734+
59735+ scsi_host_put(host);
59736+
59737+ pci_set_drvdata(pdev, NULL);
59738+ pci_disable_device(pdev);
59739+}
59740+
59741+static struct pci_driver pvscsi_pci_driver = {
59742+ .name = "vmw_pvscsi",
59743+ .id_table = pvscsi_pci_tbl,
59744+ .probe = pvscsi_probe,
59745+ .remove = __devexit_p(pvscsi_remove),
59746+ .shutdown = pvscsi_shutdown,
59747+};
59748+
59749+static int __init pvscsi_init(void)
59750+{
59751+ pr_info("%s - version %s\n",
59752+ PVSCSI_LINUX_DRIVER_DESC, PVSCSI_DRIVER_VERSION_STRING);
59753+ return pci_register_driver(&pvscsi_pci_driver);
59754+}
59755+
59756+static void __exit pvscsi_exit(void)
59757+{
59758+ pci_unregister_driver(&pvscsi_pci_driver);
59759+}
59760+
59761+module_init(pvscsi_init);
59762+module_exit(pvscsi_exit);
59763diff --git a/drivers/scsi/vmw_pvscsi.h b/drivers/scsi/vmw_pvscsi.h
59764new file mode 100644
59765index 0000000..62e36e7
59766--- /dev/null
59767+++ b/drivers/scsi/vmw_pvscsi.h
59768@@ -0,0 +1,397 @@
59769+/*
59770+ * VMware PVSCSI header file
59771+ *
59772+ * Copyright (C) 2008-2009, VMware, Inc. All Rights Reserved.
59773+ *
59774+ * This program is free software; you can redistribute it and/or modify it
59775+ * under the terms of the GNU General Public License as published by the
59776+ * Free Software Foundation; version 2 of the License and no later version.
59777+ *
59778+ * This program is distributed in the hope that it will be useful, but
59779+ * WITHOUT ANY WARRANTY; without even the implied warranty of
59780+ * MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE, GOOD TITLE or
59781+ * NON INFRINGEMENT. See the GNU General Public License for more
59782+ * details.
59783+ *
59784+ * You should have received a copy of the GNU General Public License
59785+ * along with this program; if not, write to the Free Software
59786+ * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
59787+ *
59788+ * Maintained by: Alok N Kataria <akataria@vmware.com>
59789+ *
59790+ */
59791+
59792+#ifndef _VMW_PVSCSI_H_
59793+#define _VMW_PVSCSI_H_
59794+
59795+#include <linux/types.h>
59796+
59797+#define PVSCSI_DRIVER_VERSION_STRING "1.0.1.0-k"
59798+
59799+#define PVSCSI_MAX_NUM_SG_ENTRIES_PER_SEGMENT 128
59800+
59801+#define MASK(n) ((1 << (n)) - 1) /* make an n-bit mask */
59802+
59803+#define PCI_VENDOR_ID_VMWARE 0x15AD
59804+#define PCI_DEVICE_ID_VMWARE_PVSCSI 0x07C0
59805+
59806+/*
59807+ * host adapter status/error codes
59808+ */
59809+enum HostBusAdapterStatus {
59810+ BTSTAT_SUCCESS = 0x00, /* CCB complete normally with no errors */
59811+ BTSTAT_LINKED_COMMAND_COMPLETED = 0x0a,
59812+ BTSTAT_LINKED_COMMAND_COMPLETED_WITH_FLAG = 0x0b,
59813+ BTSTAT_DATA_UNDERRUN = 0x0c,
59814+ BTSTAT_SELTIMEO = 0x11, /* SCSI selection timeout */
59815+ BTSTAT_DATARUN = 0x12, /* data overrun/underrun */
59816+ BTSTAT_BUSFREE = 0x13, /* unexpected bus free */
59817+ BTSTAT_INVPHASE = 0x14, /* invalid bus phase or sequence requested by target */
59818+ BTSTAT_LUNMISMATCH = 0x17, /* linked CCB has different LUN from first CCB */
59819+ BTSTAT_SENSFAILED = 0x1b, /* auto request sense failed */
59820+ BTSTAT_TAGREJECT = 0x1c, /* SCSI II tagged queueing message rejected by target */
59821+ BTSTAT_BADMSG = 0x1d, /* unsupported message received by the host adapter */
59822+ BTSTAT_HAHARDWARE = 0x20, /* host adapter hardware failed */
59823+ BTSTAT_NORESPONSE = 0x21, /* target did not respond to SCSI ATN, sent a SCSI RST */
59824+ BTSTAT_SENTRST = 0x22, /* host adapter asserted a SCSI RST */
59825+ BTSTAT_RECVRST = 0x23, /* other SCSI devices asserted a SCSI RST */
59826+ BTSTAT_DISCONNECT = 0x24, /* target device reconnected improperly (w/o tag) */
59827+ BTSTAT_BUSRESET = 0x25, /* host adapter issued BUS device reset */
59828+ BTSTAT_ABORTQUEUE = 0x26, /* abort queue generated */
59829+ BTSTAT_HASOFTWARE = 0x27, /* host adapter software error */
59830+ BTSTAT_HATIMEOUT = 0x30, /* host adapter hardware timeout error */
59831+ BTSTAT_SCSIPARITY = 0x34, /* SCSI parity error detected */
59832+};
59833+
59834+/*
59835+ * Register offsets.
59836+ *
59837+ * These registers are accessible both via i/o space and mm i/o.
59838+ */
59839+
59840+enum PVSCSIRegOffset {
59841+ PVSCSI_REG_OFFSET_COMMAND = 0x0,
59842+ PVSCSI_REG_OFFSET_COMMAND_DATA = 0x4,
59843+ PVSCSI_REG_OFFSET_COMMAND_STATUS = 0x8,
59844+ PVSCSI_REG_OFFSET_LAST_STS_0 = 0x100,
59845+ PVSCSI_REG_OFFSET_LAST_STS_1 = 0x104,
59846+ PVSCSI_REG_OFFSET_LAST_STS_2 = 0x108,
59847+ PVSCSI_REG_OFFSET_LAST_STS_3 = 0x10c,
59848+ PVSCSI_REG_OFFSET_INTR_STATUS = 0x100c,
59849+ PVSCSI_REG_OFFSET_INTR_MASK = 0x2010,
59850+ PVSCSI_REG_OFFSET_KICK_NON_RW_IO = 0x3014,
59851+ PVSCSI_REG_OFFSET_DEBUG = 0x3018,
59852+ PVSCSI_REG_OFFSET_KICK_RW_IO = 0x4018,
59853+};
59854+
59855+/*
59856+ * Virtual h/w commands.
59857+ */
59858+
59859+enum PVSCSICommands {
59860+ PVSCSI_CMD_FIRST = 0, /* has to be first */
59861+
59862+ PVSCSI_CMD_ADAPTER_RESET = 1,
59863+ PVSCSI_CMD_ISSUE_SCSI = 2,
59864+ PVSCSI_CMD_SETUP_RINGS = 3,
59865+ PVSCSI_CMD_RESET_BUS = 4,
59866+ PVSCSI_CMD_RESET_DEVICE = 5,
59867+ PVSCSI_CMD_ABORT_CMD = 6,
59868+ PVSCSI_CMD_CONFIG = 7,
59869+ PVSCSI_CMD_SETUP_MSG_RING = 8,
59870+ PVSCSI_CMD_DEVICE_UNPLUG = 9,
59871+
59872+ PVSCSI_CMD_LAST = 10 /* has to be last */
59873+};
59874+
59875+/*
59876+ * Command descriptor for PVSCSI_CMD_RESET_DEVICE --
59877+ */
59878+
59879+struct PVSCSICmdDescResetDevice {
59880+ u32 target;
59881+ u8 lun[8];
59882+} __packed;
59883+
59884+/*
59885+ * Command descriptor for PVSCSI_CMD_ABORT_CMD --
59886+ *
59887+ * - currently does not support specifying the LUN.
59888+ * - _pad should be 0.
59889+ */
59890+
59891+struct PVSCSICmdDescAbortCmd {
59892+ u64 context;
59893+ u32 target;
59894+ u32 _pad;
59895+} __packed;
59896+
59897+/*
59898+ * Command descriptor for PVSCSI_CMD_SETUP_RINGS --
59899+ *
59900+ * Notes:
59901+ * - reqRingNumPages and cmpRingNumPages need to be power of two.
59902+ * - reqRingNumPages and cmpRingNumPages need to be different from 0,
59903+ * - reqRingNumPages and cmpRingNumPages need to be inferior to
59904+ * PVSCSI_SETUP_RINGS_MAX_NUM_PAGES.
59905+ */
59906+
59907+#define PVSCSI_SETUP_RINGS_MAX_NUM_PAGES 32
59908+struct PVSCSICmdDescSetupRings {
59909+ u32 reqRingNumPages;
59910+ u32 cmpRingNumPages;
59911+ u64 ringsStatePPN;
59912+ u64 reqRingPPNs[PVSCSI_SETUP_RINGS_MAX_NUM_PAGES];
59913+ u64 cmpRingPPNs[PVSCSI_SETUP_RINGS_MAX_NUM_PAGES];
59914+} __packed;
59915+
59916+/*
59917+ * Command descriptor for PVSCSI_CMD_SETUP_MSG_RING --
59918+ *
59919+ * Notes:
59920+ * - this command was not supported in the initial revision of the h/w
59921+ * interface. Before using it, you need to check that it is supported by
59922+ * writing PVSCSI_CMD_SETUP_MSG_RING to the 'command' register, then
59923+ * immediately after read the 'command status' register:
59924+ * * a value of -1 means that the cmd is NOT supported,
59925+ * * a value != -1 means that the cmd IS supported.
59926+ * If it's supported the 'command status' register should return:
59927+ * sizeof(PVSCSICmdDescSetupMsgRing) / sizeof(u32).
59928+ * - this command should be issued _after_ the usual SETUP_RINGS so that the
59929+ * RingsState page is already setup. If not, the command is a nop.
59930+ * - numPages needs to be a power of two,
59931+ * - numPages needs to be different from 0,
59932+ * - _pad should be zero.
59933+ */
59934+
59935+#define PVSCSI_SETUP_MSG_RING_MAX_NUM_PAGES 16
59936+
59937+struct PVSCSICmdDescSetupMsgRing {
59938+ u32 numPages;
59939+ u32 _pad;
59940+ u64 ringPPNs[PVSCSI_SETUP_MSG_RING_MAX_NUM_PAGES];
59941+} __packed;
59942+
59943+enum PVSCSIMsgType {
59944+ PVSCSI_MSG_DEV_ADDED = 0,
59945+ PVSCSI_MSG_DEV_REMOVED = 1,
59946+ PVSCSI_MSG_LAST = 2,
59947+};
59948+
59949+/*
59950+ * Msg descriptor.
59951+ *
59952+ * sizeof(struct PVSCSIRingMsgDesc) == 128.
59953+ *
59954+ * - type is of type enum PVSCSIMsgType.
59955+ * - the content of args depend on the type of event being delivered.
59956+ */
59957+
59958+struct PVSCSIRingMsgDesc {
59959+ u32 type;
59960+ u32 args[31];
59961+} __packed;
59962+
59963+struct PVSCSIMsgDescDevStatusChanged {
59964+ u32 type; /* PVSCSI_MSG_DEV _ADDED / _REMOVED */
59965+ u32 bus;
59966+ u32 target;
59967+ u8 lun[8];
59968+ u32 pad[27];
59969+} __packed;
59970+
59971+/*
59972+ * Rings state.
59973+ *
59974+ * - the fields:
59975+ * . msgProdIdx,
59976+ * . msgConsIdx,
59977+ * . msgNumEntriesLog2,
59978+ * .. are only used once the SETUP_MSG_RING cmd has been issued.
59979+ * - '_pad' helps to ensure that the msg related fields are on their own
59980+ * cache-line.
59981+ */
59982+
59983+struct PVSCSIRingsState {
59984+ u32 reqProdIdx;
59985+ u32 reqConsIdx;
59986+ u32 reqNumEntriesLog2;
59987+
59988+ u32 cmpProdIdx;
59989+ u32 cmpConsIdx;
59990+ u32 cmpNumEntriesLog2;
59991+
59992+ u8 _pad[104];
59993+
59994+ u32 msgProdIdx;
59995+ u32 msgConsIdx;
59996+ u32 msgNumEntriesLog2;
59997+} __packed;
59998+
59999+/*
60000+ * Request descriptor.
60001+ *
60002+ * sizeof(RingReqDesc) = 128
60003+ *
60004+ * - context: is a unique identifier of a command. It could normally be any
60005+ * 64bit value, however we currently store it in the serialNumber variable
60006+ * of struct SCSI_Command, so we have the following restrictions due to the
60007+ * way this field is handled in the vmkernel storage stack:
60008+ * * this value can't be 0,
60009+ * * the upper 32bit need to be 0 since serialNumber is as a u32.
60010+ * Currently tracked as PR 292060.
60011+ * - dataLen: contains the total number of bytes that need to be transferred.
60012+ * - dataAddr:
60013+ * * if PVSCSI_FLAG_CMD_WITH_SG_LIST is set: dataAddr is the PA of the first
60014+ * s/g table segment, each s/g segment is entirely contained on a single
60015+ * page of physical memory,
60016+ * * if PVSCSI_FLAG_CMD_WITH_SG_LIST is NOT set, then dataAddr is the PA of
60017+ * the buffer used for the DMA transfer,
60018+ * - flags:
60019+ * * PVSCSI_FLAG_CMD_WITH_SG_LIST: see dataAddr above,
60020+ * * PVSCSI_FLAG_CMD_DIR_NONE: no DMA involved,
60021+ * * PVSCSI_FLAG_CMD_DIR_TOHOST: transfer from device to main memory,
60022+ * * PVSCSI_FLAG_CMD_DIR_TODEVICE: transfer from main memory to device,
60023+ * * PVSCSI_FLAG_CMD_OUT_OF_BAND_CDB: reserved to handle CDBs larger than
60024+ * 16bytes. To be specified.
60025+ * - vcpuHint: vcpuId of the processor that will be most likely waiting for the
60026+ * completion of the i/o. For guest OSes that use lowest priority message
60027+ * delivery mode (such as windows), we use this "hint" to deliver the
60028+ * completion action to the proper vcpu. For now, we can use the vcpuId of
60029+ * the processor that initiated the i/o as a likely candidate for the vcpu
60030+ * that will be waiting for the completion..
60031+ * - bus should be 0: we currently only support bus 0 for now.
60032+ * - unused should be zero'd.
60033+ */
60034+
60035+#define PVSCSI_FLAG_CMD_WITH_SG_LIST (1 << 0)
60036+#define PVSCSI_FLAG_CMD_OUT_OF_BAND_CDB (1 << 1)
60037+#define PVSCSI_FLAG_CMD_DIR_NONE (1 << 2)
60038+#define PVSCSI_FLAG_CMD_DIR_TOHOST (1 << 3)
60039+#define PVSCSI_FLAG_CMD_DIR_TODEVICE (1 << 4)
60040+
60041+struct PVSCSIRingReqDesc {
60042+ u64 context;
60043+ u64 dataAddr;
60044+ u64 dataLen;
60045+ u64 senseAddr;
60046+ u32 senseLen;
60047+ u32 flags;
60048+ u8 cdb[16];
60049+ u8 cdbLen;
60050+ u8 lun[8];
60051+ u8 tag;
60052+ u8 bus;
60053+ u8 target;
60054+ u8 vcpuHint;
60055+ u8 unused[59];
60056+} __packed;
60057+
60058+/*
60059+ * Scatter-gather list management.
60060+ *
60061+ * As described above, when PVSCSI_FLAG_CMD_WITH_SG_LIST is set in the
60062+ * RingReqDesc.flags, then RingReqDesc.dataAddr is the PA of the first s/g
60063+ * table segment.
60064+ *
60065+ * - each segment of the s/g table contain a succession of struct
60066+ * PVSCSISGElement.
60067+ * - each segment is entirely contained on a single physical page of memory.
60068+ * - a "chain" s/g element has the flag PVSCSI_SGE_FLAG_CHAIN_ELEMENT set in
60069+ * PVSCSISGElement.flags and in this case:
60070+ * * addr is the PA of the next s/g segment,
60071+ * * length is undefined, assumed to be 0.
60072+ */
60073+
60074+struct PVSCSISGElement {
60075+ u64 addr;
60076+ u32 length;
60077+ u32 flags;
60078+} __packed;
60079+
60080+/*
60081+ * Completion descriptor.
60082+ *
60083+ * sizeof(RingCmpDesc) = 32
60084+ *
60085+ * - context: identifier of the command. The same thing that was specified
60086+ * under "context" as part of struct RingReqDesc at initiation time,
60087+ * - dataLen: number of bytes transferred for the actual i/o operation,
60088+ * - senseLen: number of bytes written into the sense buffer,
60089+ * - hostStatus: adapter status,
60090+ * - scsiStatus: device status,
60091+ * - _pad should be zero.
60092+ */
60093+
60094+struct PVSCSIRingCmpDesc {
60095+ u64 context;
60096+ u64 dataLen;
60097+ u32 senseLen;
60098+ u16 hostStatus;
60099+ u16 scsiStatus;
60100+ u32 _pad[2];
60101+} __packed;
60102+
60103+/*
60104+ * Interrupt status / IRQ bits.
60105+ */
60106+
60107+#define PVSCSI_INTR_CMPL_0 (1 << 0)
60108+#define PVSCSI_INTR_CMPL_1 (1 << 1)
60109+#define PVSCSI_INTR_CMPL_MASK MASK(2)
60110+
60111+#define PVSCSI_INTR_MSG_0 (1 << 2)
60112+#define PVSCSI_INTR_MSG_1 (1 << 3)
60113+#define PVSCSI_INTR_MSG_MASK (MASK(2) << 2)
60114+
60115+#define PVSCSI_INTR_ALL_SUPPORTED MASK(4)
60116+
60117+/*
60118+ * Number of MSI-X vectors supported.
60119+ */
60120+#define PVSCSI_MAX_INTRS 24
60121+
60122+/*
60123+ * Enumeration of supported MSI-X vectors
60124+ */
60125+#define PVSCSI_VECTOR_COMPLETION 0
60126+
60127+/*
60128+ * Misc constants for the rings.
60129+ */
60130+
60131+#define PVSCSI_MAX_NUM_PAGES_REQ_RING PVSCSI_SETUP_RINGS_MAX_NUM_PAGES
60132+#define PVSCSI_MAX_NUM_PAGES_CMP_RING PVSCSI_SETUP_RINGS_MAX_NUM_PAGES
60133+#define PVSCSI_MAX_NUM_PAGES_MSG_RING PVSCSI_SETUP_MSG_RING_MAX_NUM_PAGES
60134+
60135+#define PVSCSI_MAX_NUM_REQ_ENTRIES_PER_PAGE \
60136+ (PAGE_SIZE / sizeof(struct PVSCSIRingReqDesc))
60137+
60138+#define PVSCSI_MAX_REQ_QUEUE_DEPTH \
60139+ (PVSCSI_MAX_NUM_PAGES_REQ_RING * PVSCSI_MAX_NUM_REQ_ENTRIES_PER_PAGE)
60140+
60141+#define PVSCSI_MEM_SPACE_COMMAND_NUM_PAGES 1
60142+#define PVSCSI_MEM_SPACE_INTR_STATUS_NUM_PAGES 1
60143+#define PVSCSI_MEM_SPACE_MISC_NUM_PAGES 2
60144+#define PVSCSI_MEM_SPACE_KICK_IO_NUM_PAGES 2
60145+#define PVSCSI_MEM_SPACE_MSIX_NUM_PAGES 2
60146+
60147+enum PVSCSIMemSpace {
60148+ PVSCSI_MEM_SPACE_COMMAND_PAGE = 0,
60149+ PVSCSI_MEM_SPACE_INTR_STATUS_PAGE = 1,
60150+ PVSCSI_MEM_SPACE_MISC_PAGE = 2,
60151+ PVSCSI_MEM_SPACE_KICK_IO_PAGE = 4,
60152+ PVSCSI_MEM_SPACE_MSIX_TABLE_PAGE = 6,
60153+ PVSCSI_MEM_SPACE_MSIX_PBA_PAGE = 7,
60154+};
60155+
60156+#define PVSCSI_MEM_SPACE_NUM_PAGES \
60157+ (PVSCSI_MEM_SPACE_COMMAND_NUM_PAGES + \
60158+ PVSCSI_MEM_SPACE_INTR_STATUS_NUM_PAGES + \
60159+ PVSCSI_MEM_SPACE_MISC_NUM_PAGES + \
60160+ PVSCSI_MEM_SPACE_KICK_IO_NUM_PAGES + \
60161+ PVSCSI_MEM_SPACE_MSIX_NUM_PAGES)
60162+
60163+#define PVSCSI_MEM_SPACE_SIZE (PVSCSI_MEM_SPACE_NUM_PAGES * PAGE_SIZE)
60164+
60165+#endif /* _VMW_PVSCSI_H_ */
60166diff --git a/drivers/serial/kgdboc.c b/drivers/serial/kgdboc.c
60167index eadc1ab..2d81457 100644
60168--- a/drivers/serial/kgdboc.c
60169+++ b/drivers/serial/kgdboc.c
60170@@ -18,7 +18,7 @@
60171
60172 #define MAX_CONFIG_LEN 40
60173
60174-static struct kgdb_io kgdboc_io_ops;
60175+static const struct kgdb_io kgdboc_io_ops;
60176
60177 /* -1 = init not run yet, 0 = unconfigured, 1 = configured. */
60178 static int configured = -1;
60179@@ -154,7 +154,7 @@ static void kgdboc_post_exp_handler(void)
60180 module_put(THIS_MODULE);
60181 }
60182
60183-static struct kgdb_io kgdboc_io_ops = {
60184+static const struct kgdb_io kgdboc_io_ops = {
60185 .name = "kgdboc",
60186 .read_char = kgdboc_get_char,
60187 .write_char = kgdboc_put_char,
60188diff --git a/drivers/spi/spi.c b/drivers/spi/spi.c
60189index b76f246..7f41af7 100644
60190--- a/drivers/spi/spi.c
60191+++ b/drivers/spi/spi.c
60192@@ -774,7 +774,7 @@ int spi_sync(struct spi_device *spi, struct spi_message *message)
60193 EXPORT_SYMBOL_GPL(spi_sync);
60194
60195 /* portable code must never pass more than 32 bytes */
60196-#define SPI_BUFSIZ max(32,SMP_CACHE_BYTES)
60197+#define SPI_BUFSIZ max(32U,SMP_CACHE_BYTES)
60198
60199 static u8 *buf;
60200
60201diff --git a/drivers/staging/android/binder.c b/drivers/staging/android/binder.c
60202index b9b37ff..19dfa23 100644
60203--- a/drivers/staging/android/binder.c
60204+++ b/drivers/staging/android/binder.c
60205@@ -2761,7 +2761,7 @@ static void binder_vma_close(struct vm_area_struct *vma)
60206 binder_defer_work(proc, BINDER_DEFERRED_PUT_FILES);
60207 }
60208
60209-static struct vm_operations_struct binder_vm_ops = {
60210+static const struct vm_operations_struct binder_vm_ops = {
60211 .open = binder_vma_open,
60212 .close = binder_vma_close,
60213 };
60214diff --git a/drivers/staging/b3dfg/b3dfg.c b/drivers/staging/b3dfg/b3dfg.c
60215index cda26bb..39fed3f 100644
60216--- a/drivers/staging/b3dfg/b3dfg.c
60217+++ b/drivers/staging/b3dfg/b3dfg.c
60218@@ -455,7 +455,7 @@ static int b3dfg_vma_fault(struct vm_area_struct *vma,
60219 return VM_FAULT_NOPAGE;
60220 }
60221
60222-static struct vm_operations_struct b3dfg_vm_ops = {
60223+static const struct vm_operations_struct b3dfg_vm_ops = {
60224 .fault = b3dfg_vma_fault,
60225 };
60226
60227@@ -848,7 +848,7 @@ static int b3dfg_mmap(struct file *filp, struct vm_area_struct *vma)
60228 return r;
60229 }
60230
60231-static struct file_operations b3dfg_fops = {
60232+static const struct file_operations b3dfg_fops = {
60233 .owner = THIS_MODULE,
60234 .open = b3dfg_open,
60235 .release = b3dfg_release,
60236diff --git a/drivers/staging/comedi/comedi_fops.c b/drivers/staging/comedi/comedi_fops.c
60237index 908f25a..c9a579b 100644
60238--- a/drivers/staging/comedi/comedi_fops.c
60239+++ b/drivers/staging/comedi/comedi_fops.c
60240@@ -1389,7 +1389,7 @@ void comedi_unmap(struct vm_area_struct *area)
60241 mutex_unlock(&dev->mutex);
60242 }
60243
60244-static struct vm_operations_struct comedi_vm_ops = {
60245+static const struct vm_operations_struct comedi_vm_ops = {
60246 .close = comedi_unmap,
60247 };
60248
60249diff --git a/drivers/staging/dream/qdsp5/adsp_driver.c b/drivers/staging/dream/qdsp5/adsp_driver.c
60250index e55a0db..577b776 100644
60251--- a/drivers/staging/dream/qdsp5/adsp_driver.c
60252+++ b/drivers/staging/dream/qdsp5/adsp_driver.c
60253@@ -576,7 +576,7 @@ static struct adsp_device *inode_to_device(struct inode *inode)
60254 static dev_t adsp_devno;
60255 static struct class *adsp_class;
60256
60257-static struct file_operations adsp_fops = {
60258+static const struct file_operations adsp_fops = {
60259 .owner = THIS_MODULE,
60260 .open = adsp_open,
60261 .unlocked_ioctl = adsp_ioctl,
60262diff --git a/drivers/staging/dream/qdsp5/audio_aac.c b/drivers/staging/dream/qdsp5/audio_aac.c
60263index ad2390f..4116ee8 100644
60264--- a/drivers/staging/dream/qdsp5/audio_aac.c
60265+++ b/drivers/staging/dream/qdsp5/audio_aac.c
60266@@ -1022,7 +1022,7 @@ done:
60267 return rc;
60268 }
60269
60270-static struct file_operations audio_aac_fops = {
60271+static const struct file_operations audio_aac_fops = {
60272 .owner = THIS_MODULE,
60273 .open = audio_open,
60274 .release = audio_release,
60275diff --git a/drivers/staging/dream/qdsp5/audio_amrnb.c b/drivers/staging/dream/qdsp5/audio_amrnb.c
60276index cd818a5..870b37b 100644
60277--- a/drivers/staging/dream/qdsp5/audio_amrnb.c
60278+++ b/drivers/staging/dream/qdsp5/audio_amrnb.c
60279@@ -833,7 +833,7 @@ done:
60280 return rc;
60281 }
60282
60283-static struct file_operations audio_amrnb_fops = {
60284+static const struct file_operations audio_amrnb_fops = {
60285 .owner = THIS_MODULE,
60286 .open = audamrnb_open,
60287 .release = audamrnb_release,
60288diff --git a/drivers/staging/dream/qdsp5/audio_evrc.c b/drivers/staging/dream/qdsp5/audio_evrc.c
60289index 4b43e18..cedafda 100644
60290--- a/drivers/staging/dream/qdsp5/audio_evrc.c
60291+++ b/drivers/staging/dream/qdsp5/audio_evrc.c
60292@@ -805,7 +805,7 @@ dma_fail:
60293 return rc;
60294 }
60295
60296-static struct file_operations audio_evrc_fops = {
60297+static const struct file_operations audio_evrc_fops = {
60298 .owner = THIS_MODULE,
60299 .open = audevrc_open,
60300 .release = audevrc_release,
60301diff --git a/drivers/staging/dream/qdsp5/audio_in.c b/drivers/staging/dream/qdsp5/audio_in.c
60302index 3d950a2..9431118 100644
60303--- a/drivers/staging/dream/qdsp5/audio_in.c
60304+++ b/drivers/staging/dream/qdsp5/audio_in.c
60305@@ -913,7 +913,7 @@ static int audpre_open(struct inode *inode, struct file *file)
60306 return 0;
60307 }
60308
60309-static struct file_operations audio_fops = {
60310+static const struct file_operations audio_fops = {
60311 .owner = THIS_MODULE,
60312 .open = audio_in_open,
60313 .release = audio_in_release,
60314@@ -922,7 +922,7 @@ static struct file_operations audio_fops = {
60315 .unlocked_ioctl = audio_in_ioctl,
60316 };
60317
60318-static struct file_operations audpre_fops = {
60319+static const struct file_operations audpre_fops = {
60320 .owner = THIS_MODULE,
60321 .open = audpre_open,
60322 .unlocked_ioctl = audpre_ioctl,
60323diff --git a/drivers/staging/dream/qdsp5/audio_mp3.c b/drivers/staging/dream/qdsp5/audio_mp3.c
60324index b95574f..286c2f4 100644
60325--- a/drivers/staging/dream/qdsp5/audio_mp3.c
60326+++ b/drivers/staging/dream/qdsp5/audio_mp3.c
60327@@ -941,7 +941,7 @@ done:
60328 return rc;
60329 }
60330
60331-static struct file_operations audio_mp3_fops = {
60332+static const struct file_operations audio_mp3_fops = {
60333 .owner = THIS_MODULE,
60334 .open = audio_open,
60335 .release = audio_release,
60336diff --git a/drivers/staging/dream/qdsp5/audio_out.c b/drivers/staging/dream/qdsp5/audio_out.c
60337index d1adcf6..f8f9833 100644
60338--- a/drivers/staging/dream/qdsp5/audio_out.c
60339+++ b/drivers/staging/dream/qdsp5/audio_out.c
60340@@ -810,7 +810,7 @@ static int audpp_open(struct inode *inode, struct file *file)
60341 return 0;
60342 }
60343
60344-static struct file_operations audio_fops = {
60345+static const struct file_operations audio_fops = {
60346 .owner = THIS_MODULE,
60347 .open = audio_open,
60348 .release = audio_release,
60349@@ -819,7 +819,7 @@ static struct file_operations audio_fops = {
60350 .unlocked_ioctl = audio_ioctl,
60351 };
60352
60353-static struct file_operations audpp_fops = {
60354+static const struct file_operations audpp_fops = {
60355 .owner = THIS_MODULE,
60356 .open = audpp_open,
60357 .unlocked_ioctl = audpp_ioctl,
60358diff --git a/drivers/staging/dream/qdsp5/audio_qcelp.c b/drivers/staging/dream/qdsp5/audio_qcelp.c
60359index f0f50e3..f6b9dbc 100644
60360--- a/drivers/staging/dream/qdsp5/audio_qcelp.c
60361+++ b/drivers/staging/dream/qdsp5/audio_qcelp.c
60362@@ -816,7 +816,7 @@ err:
60363 return rc;
60364 }
60365
60366-static struct file_operations audio_qcelp_fops = {
60367+static const struct file_operations audio_qcelp_fops = {
60368 .owner = THIS_MODULE,
60369 .open = audqcelp_open,
60370 .release = audqcelp_release,
60371diff --git a/drivers/staging/dream/qdsp5/snd.c b/drivers/staging/dream/qdsp5/snd.c
60372index 037d7ff..5469ec3 100644
60373--- a/drivers/staging/dream/qdsp5/snd.c
60374+++ b/drivers/staging/dream/qdsp5/snd.c
60375@@ -242,7 +242,7 @@ err:
60376 return rc;
60377 }
60378
60379-static struct file_operations snd_fops = {
60380+static const struct file_operations snd_fops = {
60381 .owner = THIS_MODULE,
60382 .open = snd_open,
60383 .release = snd_release,
60384diff --git a/drivers/staging/dream/smd/smd_qmi.c b/drivers/staging/dream/smd/smd_qmi.c
60385index d4e7d88..0ea632a 100644
60386--- a/drivers/staging/dream/smd/smd_qmi.c
60387+++ b/drivers/staging/dream/smd/smd_qmi.c
60388@@ -793,7 +793,7 @@ static int qmi_release(struct inode *ip, struct file *fp)
60389 return 0;
60390 }
60391
60392-static struct file_operations qmi_fops = {
60393+static const struct file_operations qmi_fops = {
60394 .owner = THIS_MODULE,
60395 .read = qmi_read,
60396 .write = qmi_write,
60397diff --git a/drivers/staging/dream/smd/smd_rpcrouter_device.c b/drivers/staging/dream/smd/smd_rpcrouter_device.c
60398index cd3910b..ff053d3 100644
60399--- a/drivers/staging/dream/smd/smd_rpcrouter_device.c
60400+++ b/drivers/staging/dream/smd/smd_rpcrouter_device.c
60401@@ -214,7 +214,7 @@ static long rpcrouter_ioctl(struct file *filp, unsigned int cmd,
60402 return rc;
60403 }
60404
60405-static struct file_operations rpcrouter_server_fops = {
60406+static const struct file_operations rpcrouter_server_fops = {
60407 .owner = THIS_MODULE,
60408 .open = rpcrouter_open,
60409 .release = rpcrouter_release,
60410@@ -224,7 +224,7 @@ static struct file_operations rpcrouter_server_fops = {
60411 .unlocked_ioctl = rpcrouter_ioctl,
60412 };
60413
60414-static struct file_operations rpcrouter_router_fops = {
60415+static const struct file_operations rpcrouter_router_fops = {
60416 .owner = THIS_MODULE,
60417 .open = rpcrouter_open,
60418 .release = rpcrouter_release,
60419diff --git a/drivers/staging/dst/dcore.c b/drivers/staging/dst/dcore.c
60420index c24e4e0..07665be 100644
60421--- a/drivers/staging/dst/dcore.c
60422+++ b/drivers/staging/dst/dcore.c
60423@@ -149,7 +149,7 @@ static int dst_bdev_release(struct gendisk *disk, fmode_t mode)
60424 return 0;
60425 }
60426
60427-static struct block_device_operations dst_blk_ops = {
60428+static const struct block_device_operations dst_blk_ops = {
60429 .open = dst_bdev_open,
60430 .release = dst_bdev_release,
60431 .owner = THIS_MODULE,
60432@@ -588,7 +588,7 @@ static struct dst_node *dst_alloc_node(struct dst_ctl *ctl,
60433 n->size = ctl->size;
60434
60435 atomic_set(&n->refcnt, 1);
60436- atomic_long_set(&n->gen, 0);
60437+ atomic_long_set_unchecked(&n->gen, 0);
60438 snprintf(n->name, sizeof(n->name), "%s", ctl->name);
60439
60440 err = dst_node_sysfs_init(n);
60441diff --git a/drivers/staging/dst/trans.c b/drivers/staging/dst/trans.c
60442index 557d372..8d84422 100644
60443--- a/drivers/staging/dst/trans.c
60444+++ b/drivers/staging/dst/trans.c
60445@@ -169,7 +169,7 @@ int dst_process_bio(struct dst_node *n, struct bio *bio)
60446 t->error = 0;
60447 t->retries = 0;
60448 atomic_set(&t->refcnt, 1);
60449- t->gen = atomic_long_inc_return(&n->gen);
60450+ t->gen = atomic_long_inc_return_unchecked(&n->gen);
60451
60452 t->enc = bio_data_dir(bio);
60453 dst_bio_to_cmd(bio, &t->cmd, DST_IO, t->gen);
60454diff --git a/drivers/staging/et131x/et1310_tx.c b/drivers/staging/et131x/et1310_tx.c
60455index 94f7752..d051514 100644
60456--- a/drivers/staging/et131x/et1310_tx.c
60457+++ b/drivers/staging/et131x/et1310_tx.c
60458@@ -710,11 +710,11 @@ inline void et131x_free_send_packet(struct et131x_adapter *etdev,
60459 struct net_device_stats *stats = &etdev->net_stats;
60460
60461 if (pMpTcb->Flags & fMP_DEST_BROAD)
60462- atomic_inc(&etdev->Stats.brdcstxmt);
60463+ atomic_inc_unchecked(&etdev->Stats.brdcstxmt);
60464 else if (pMpTcb->Flags & fMP_DEST_MULTI)
60465- atomic_inc(&etdev->Stats.multixmt);
60466+ atomic_inc_unchecked(&etdev->Stats.multixmt);
60467 else
60468- atomic_inc(&etdev->Stats.unixmt);
60469+ atomic_inc_unchecked(&etdev->Stats.unixmt);
60470
60471 if (pMpTcb->Packet) {
60472 stats->tx_bytes += pMpTcb->Packet->len;
60473diff --git a/drivers/staging/et131x/et131x_adapter.h b/drivers/staging/et131x/et131x_adapter.h
60474index 1dfe06f..f469b4d 100644
60475--- a/drivers/staging/et131x/et131x_adapter.h
60476+++ b/drivers/staging/et131x/et131x_adapter.h
60477@@ -145,11 +145,11 @@ typedef struct _ce_stats_t {
60478 * operations
60479 */
60480 u32 unircv; /* # multicast packets received */
60481- atomic_t unixmt; /* # multicast packets for Tx */
60482+ atomic_unchecked_t unixmt; /* # multicast packets for Tx */
60483 u32 multircv; /* # multicast packets received */
60484- atomic_t multixmt; /* # multicast packets for Tx */
60485+ atomic_unchecked_t multixmt; /* # multicast packets for Tx */
60486 u32 brdcstrcv; /* # broadcast packets received */
60487- atomic_t brdcstxmt; /* # broadcast packets for Tx */
60488+ atomic_unchecked_t brdcstxmt; /* # broadcast packets for Tx */
60489 u32 norcvbuf; /* # Rx packets discarded */
60490 u32 noxmtbuf; /* # Tx packets discarded */
60491
60492diff --git a/drivers/staging/go7007/go7007-v4l2.c b/drivers/staging/go7007/go7007-v4l2.c
60493index 4bd353a..e28f455 100644
60494--- a/drivers/staging/go7007/go7007-v4l2.c
60495+++ b/drivers/staging/go7007/go7007-v4l2.c
60496@@ -1700,7 +1700,7 @@ static int go7007_vm_fault(struct vm_area_struct *vma, struct vm_fault *vmf)
60497 return 0;
60498 }
60499
60500-static struct vm_operations_struct go7007_vm_ops = {
60501+static const struct vm_operations_struct go7007_vm_ops = {
60502 .open = go7007_vm_open,
60503 .close = go7007_vm_close,
60504 .fault = go7007_vm_fault,
60505diff --git a/drivers/staging/hv/Channel.c b/drivers/staging/hv/Channel.c
60506index 366dc95..b974d87 100644
60507--- a/drivers/staging/hv/Channel.c
60508+++ b/drivers/staging/hv/Channel.c
60509@@ -464,8 +464,8 @@ int VmbusChannelEstablishGpadl(struct vmbus_channel *Channel, void *Kbuffer,
60510
60511 DPRINT_ENTER(VMBUS);
60512
60513- nextGpadlHandle = atomic_read(&gVmbusConnection.NextGpadlHandle);
60514- atomic_inc(&gVmbusConnection.NextGpadlHandle);
60515+ nextGpadlHandle = atomic_read_unchecked(&gVmbusConnection.NextGpadlHandle);
60516+ atomic_inc_unchecked(&gVmbusConnection.NextGpadlHandle);
60517
60518 VmbusChannelCreateGpadlHeader(Kbuffer, Size, &msgInfo, &msgCount);
60519 ASSERT(msgInfo != NULL);
60520diff --git a/drivers/staging/hv/Hv.c b/drivers/staging/hv/Hv.c
60521index b12237f..01ae28a 100644
60522--- a/drivers/staging/hv/Hv.c
60523+++ b/drivers/staging/hv/Hv.c
60524@@ -161,7 +161,7 @@ static u64 HvDoHypercall(u64 Control, void *Input, void *Output)
60525 u64 outputAddress = (Output) ? virt_to_phys(Output) : 0;
60526 u32 outputAddressHi = outputAddress >> 32;
60527 u32 outputAddressLo = outputAddress & 0xFFFFFFFF;
60528- volatile void *hypercallPage = gHvContext.HypercallPage;
60529+ volatile void *hypercallPage = ktva_ktla(gHvContext.HypercallPage);
60530
60531 DPRINT_DBG(VMBUS, "Hypercall <control %llx input %p output %p>",
60532 Control, Input, Output);
60533diff --git a/drivers/staging/hv/VmbusApi.h b/drivers/staging/hv/VmbusApi.h
60534index d089bb1..2ebc158 100644
60535--- a/drivers/staging/hv/VmbusApi.h
60536+++ b/drivers/staging/hv/VmbusApi.h
60537@@ -109,7 +109,7 @@ struct vmbus_channel_interface {
60538 u32 *GpadlHandle);
60539 int (*TeardownGpadl)(struct hv_device *device, u32 GpadlHandle);
60540 void (*GetInfo)(struct hv_device *dev, struct hv_device_info *devinfo);
60541-};
60542+} __no_const;
60543
60544 /* Base driver object */
60545 struct hv_driver {
60546diff --git a/drivers/staging/hv/VmbusPrivate.h b/drivers/staging/hv/VmbusPrivate.h
60547index 5a37cce..6ecc88c 100644
60548--- a/drivers/staging/hv/VmbusPrivate.h
60549+++ b/drivers/staging/hv/VmbusPrivate.h
60550@@ -59,7 +59,7 @@ enum VMBUS_CONNECT_STATE {
60551 struct VMBUS_CONNECTION {
60552 enum VMBUS_CONNECT_STATE ConnectState;
60553
60554- atomic_t NextGpadlHandle;
60555+ atomic_unchecked_t NextGpadlHandle;
60556
60557 /*
60558 * Represents channel interrupts. Each bit position represents a
60559diff --git a/drivers/staging/hv/blkvsc_drv.c b/drivers/staging/hv/blkvsc_drv.c
60560index 871a202..ca50ddf 100644
60561--- a/drivers/staging/hv/blkvsc_drv.c
60562+++ b/drivers/staging/hv/blkvsc_drv.c
60563@@ -153,7 +153,7 @@ static int blkvsc_ringbuffer_size = BLKVSC_RING_BUFFER_SIZE;
60564 /* The one and only one */
60565 static struct blkvsc_driver_context g_blkvsc_drv;
60566
60567-static struct block_device_operations block_ops = {
60568+static const struct block_device_operations block_ops = {
60569 .owner = THIS_MODULE,
60570 .open = blkvsc_open,
60571 .release = blkvsc_release,
60572diff --git a/drivers/staging/hv/vmbus_drv.c b/drivers/staging/hv/vmbus_drv.c
60573index 6acc49a..fbc8d46 100644
60574--- a/drivers/staging/hv/vmbus_drv.c
60575+++ b/drivers/staging/hv/vmbus_drv.c
60576@@ -532,7 +532,7 @@ static int vmbus_child_device_register(struct hv_device *root_device_obj,
60577 to_device_context(root_device_obj);
60578 struct device_context *child_device_ctx =
60579 to_device_context(child_device_obj);
60580- static atomic_t device_num = ATOMIC_INIT(0);
60581+ static atomic_unchecked_t device_num = ATOMIC_INIT(0);
60582
60583 DPRINT_ENTER(VMBUS_DRV);
60584
60585@@ -541,7 +541,7 @@ static int vmbus_child_device_register(struct hv_device *root_device_obj,
60586
60587 /* Set the device name. Otherwise, device_register() will fail. */
60588 dev_set_name(&child_device_ctx->device, "vmbus_0_%d",
60589- atomic_inc_return(&device_num));
60590+ atomic_inc_return_unchecked(&device_num));
60591
60592 /* The new device belongs to this bus */
60593 child_device_ctx->device.bus = &g_vmbus_drv.bus; /* device->dev.bus; */
60594diff --git a/drivers/staging/iio/ring_generic.h b/drivers/staging/iio/ring_generic.h
60595index d926189..17b19fd 100644
60596--- a/drivers/staging/iio/ring_generic.h
60597+++ b/drivers/staging/iio/ring_generic.h
60598@@ -87,7 +87,7 @@ struct iio_ring_access_funcs {
60599
60600 int (*is_enabled)(struct iio_ring_buffer *ring);
60601 int (*enable)(struct iio_ring_buffer *ring);
60602-};
60603+} __no_const;
60604
60605 /**
60606 * struct iio_ring_buffer - general ring buffer structure
60607diff --git a/drivers/staging/octeon/ethernet-rx.c b/drivers/staging/octeon/ethernet-rx.c
60608index 1b237b7..88c624e 100644
60609--- a/drivers/staging/octeon/ethernet-rx.c
60610+++ b/drivers/staging/octeon/ethernet-rx.c
60611@@ -406,11 +406,11 @@ void cvm_oct_tasklet_rx(unsigned long unused)
60612 /* Increment RX stats for virtual ports */
60613 if (work->ipprt >= CVMX_PIP_NUM_INPUT_PORTS) {
60614 #ifdef CONFIG_64BIT
60615- atomic64_add(1, (atomic64_t *)&priv->stats.rx_packets);
60616- atomic64_add(skb->len, (atomic64_t *)&priv->stats.rx_bytes);
60617+ atomic64_add_unchecked(1, (atomic64_unchecked_t *)&priv->stats.rx_packets);
60618+ atomic64_add_unchecked(skb->len, (atomic64_unchecked_t *)&priv->stats.rx_bytes);
60619 #else
60620- atomic_add(1, (atomic_t *)&priv->stats.rx_packets);
60621- atomic_add(skb->len, (atomic_t *)&priv->stats.rx_bytes);
60622+ atomic_add_unchecked(1, (atomic_unchecked_t *)&priv->stats.rx_packets);
60623+ atomic_add_unchecked(skb->len, (atomic_unchecked_t *)&priv->stats.rx_bytes);
60624 #endif
60625 }
60626 netif_receive_skb(skb);
60627@@ -424,9 +424,9 @@ void cvm_oct_tasklet_rx(unsigned long unused)
60628 dev->name);
60629 */
60630 #ifdef CONFIG_64BIT
60631- atomic64_add(1, (atomic64_t *)&priv->stats.rx_dropped);
60632+ atomic64_add_unchecked(1, (atomic64_t *)&priv->stats.rx_dropped);
60633 #else
60634- atomic_add(1, (atomic_t *)&priv->stats.rx_dropped);
60635+ atomic_add_unchecked(1, (atomic_t *)&priv->stats.rx_dropped);
60636 #endif
60637 dev_kfree_skb_irq(skb);
60638 }
60639diff --git a/drivers/staging/octeon/ethernet.c b/drivers/staging/octeon/ethernet.c
60640index 492c502..d9909f1 100644
60641--- a/drivers/staging/octeon/ethernet.c
60642+++ b/drivers/staging/octeon/ethernet.c
60643@@ -294,11 +294,11 @@ static struct net_device_stats *cvm_oct_common_get_stats(struct net_device *dev)
60644 * since the RX tasklet also increments it.
60645 */
60646 #ifdef CONFIG_64BIT
60647- atomic64_add(rx_status.dropped_packets,
60648- (atomic64_t *)&priv->stats.rx_dropped);
60649+ atomic64_add_unchecked(rx_status.dropped_packets,
60650+ (atomic64_unchecked_t *)&priv->stats.rx_dropped);
60651 #else
60652- atomic_add(rx_status.dropped_packets,
60653- (atomic_t *)&priv->stats.rx_dropped);
60654+ atomic_add_unchecked(rx_status.dropped_packets,
60655+ (atomic_unchecked_t *)&priv->stats.rx_dropped);
60656 #endif
60657 }
60658
60659diff --git a/drivers/staging/otus/80211core/pub_zfi.h b/drivers/staging/otus/80211core/pub_zfi.h
60660index a35bd5d..28fff45 100644
60661--- a/drivers/staging/otus/80211core/pub_zfi.h
60662+++ b/drivers/staging/otus/80211core/pub_zfi.h
60663@@ -531,7 +531,7 @@ struct zsCbFuncTbl
60664 u8_t (*zfcbClassifyTxPacket)(zdev_t* dev, zbuf_t* buf);
60665
60666 void (*zfcbHwWatchDogNotify)(zdev_t* dev);
60667-};
60668+} __no_const;
60669
60670 extern void zfZeroMemory(u8_t* va, u16_t length);
60671 #define ZM_INIT_CB_FUNC_TABLE(p) zfZeroMemory((u8_t *)p, sizeof(struct zsCbFuncTbl));
60672diff --git a/drivers/staging/panel/panel.c b/drivers/staging/panel/panel.c
60673index c39a25f..696f5aa 100644
60674--- a/drivers/staging/panel/panel.c
60675+++ b/drivers/staging/panel/panel.c
60676@@ -1305,7 +1305,7 @@ static int lcd_release(struct inode *inode, struct file *file)
60677 return 0;
60678 }
60679
60680-static struct file_operations lcd_fops = {
60681+static const struct file_operations lcd_fops = {
60682 .write = lcd_write,
60683 .open = lcd_open,
60684 .release = lcd_release,
60685@@ -1565,7 +1565,7 @@ static int keypad_release(struct inode *inode, struct file *file)
60686 return 0;
60687 }
60688
60689-static struct file_operations keypad_fops = {
60690+static const struct file_operations keypad_fops = {
60691 .read = keypad_read, /* read */
60692 .open = keypad_open, /* open */
60693 .release = keypad_release, /* close */
60694diff --git a/drivers/staging/phison/phison.c b/drivers/staging/phison/phison.c
60695index 270ebcb..37e46af 100644
60696--- a/drivers/staging/phison/phison.c
60697+++ b/drivers/staging/phison/phison.c
60698@@ -43,7 +43,7 @@ static struct scsi_host_template phison_sht = {
60699 ATA_BMDMA_SHT(DRV_NAME),
60700 };
60701
60702-static struct ata_port_operations phison_ops = {
60703+static const struct ata_port_operations phison_ops = {
60704 .inherits = &ata_bmdma_port_ops,
60705 .prereset = phison_pre_reset,
60706 };
60707diff --git a/drivers/staging/poch/poch.c b/drivers/staging/poch/poch.c
60708index 2eb8e3d..57616a7 100644
60709--- a/drivers/staging/poch/poch.c
60710+++ b/drivers/staging/poch/poch.c
60711@@ -1057,7 +1057,7 @@ static int poch_ioctl(struct inode *inode, struct file *filp,
60712 return 0;
60713 }
60714
60715-static struct file_operations poch_fops = {
60716+static const struct file_operations poch_fops = {
60717 .owner = THIS_MODULE,
60718 .open = poch_open,
60719 .release = poch_release,
60720diff --git a/drivers/staging/pohmelfs/inode.c b/drivers/staging/pohmelfs/inode.c
60721index c94de31..19402bc 100644
60722--- a/drivers/staging/pohmelfs/inode.c
60723+++ b/drivers/staging/pohmelfs/inode.c
60724@@ -1850,7 +1850,7 @@ static int pohmelfs_fill_super(struct super_block *sb, void *data, int silent)
60725 mutex_init(&psb->mcache_lock);
60726 psb->mcache_root = RB_ROOT;
60727 psb->mcache_timeout = msecs_to_jiffies(5000);
60728- atomic_long_set(&psb->mcache_gen, 0);
60729+ atomic_long_set_unchecked(&psb->mcache_gen, 0);
60730
60731 psb->trans_max_pages = 100;
60732
60733@@ -1865,7 +1865,7 @@ static int pohmelfs_fill_super(struct super_block *sb, void *data, int silent)
60734 INIT_LIST_HEAD(&psb->crypto_ready_list);
60735 INIT_LIST_HEAD(&psb->crypto_active_list);
60736
60737- atomic_set(&psb->trans_gen, 1);
60738+ atomic_set_unchecked(&psb->trans_gen, 1);
60739 atomic_long_set(&psb->total_inodes, 0);
60740
60741 mutex_init(&psb->state_lock);
60742diff --git a/drivers/staging/pohmelfs/mcache.c b/drivers/staging/pohmelfs/mcache.c
60743index e22665c..a2a9390 100644
60744--- a/drivers/staging/pohmelfs/mcache.c
60745+++ b/drivers/staging/pohmelfs/mcache.c
60746@@ -121,7 +121,7 @@ struct pohmelfs_mcache *pohmelfs_mcache_alloc(struct pohmelfs_sb *psb, u64 start
60747 m->data = data;
60748 m->start = start;
60749 m->size = size;
60750- m->gen = atomic_long_inc_return(&psb->mcache_gen);
60751+ m->gen = atomic_long_inc_return_unchecked(&psb->mcache_gen);
60752
60753 mutex_lock(&psb->mcache_lock);
60754 err = pohmelfs_mcache_insert(psb, m);
60755diff --git a/drivers/staging/pohmelfs/netfs.h b/drivers/staging/pohmelfs/netfs.h
60756index 623a07d..4035c19 100644
60757--- a/drivers/staging/pohmelfs/netfs.h
60758+++ b/drivers/staging/pohmelfs/netfs.h
60759@@ -570,14 +570,14 @@ struct pohmelfs_config;
60760 struct pohmelfs_sb {
60761 struct rb_root mcache_root;
60762 struct mutex mcache_lock;
60763- atomic_long_t mcache_gen;
60764+ atomic_long_unchecked_t mcache_gen;
60765 unsigned long mcache_timeout;
60766
60767 unsigned int idx;
60768
60769 unsigned int trans_retries;
60770
60771- atomic_t trans_gen;
60772+ atomic_unchecked_t trans_gen;
60773
60774 unsigned int crypto_attached_size;
60775 unsigned int crypto_align_size;
60776diff --git a/drivers/staging/pohmelfs/trans.c b/drivers/staging/pohmelfs/trans.c
60777index 36a2535..0591bf4 100644
60778--- a/drivers/staging/pohmelfs/trans.c
60779+++ b/drivers/staging/pohmelfs/trans.c
60780@@ -492,7 +492,7 @@ int netfs_trans_finish(struct netfs_trans *t, struct pohmelfs_sb *psb)
60781 int err;
60782 struct netfs_cmd *cmd = t->iovec.iov_base;
60783
60784- t->gen = atomic_inc_return(&psb->trans_gen);
60785+ t->gen = atomic_inc_return_unchecked(&psb->trans_gen);
60786
60787 cmd->size = t->iovec.iov_len - sizeof(struct netfs_cmd) +
60788 t->attached_size + t->attached_pages * sizeof(struct netfs_cmd);
60789diff --git a/drivers/staging/sep/sep_driver.c b/drivers/staging/sep/sep_driver.c
60790index f890a16..509ece8 100644
60791--- a/drivers/staging/sep/sep_driver.c
60792+++ b/drivers/staging/sep/sep_driver.c
60793@@ -2603,7 +2603,7 @@ static struct pci_driver sep_pci_driver = {
60794 static dev_t sep_devno;
60795
60796 /* the files operations structure of the driver */
60797-static struct file_operations sep_file_operations = {
60798+static const struct file_operations sep_file_operations = {
60799 .owner = THIS_MODULE,
60800 .ioctl = sep_ioctl,
60801 .poll = sep_poll,
60802diff --git a/drivers/staging/usbip/usbip_common.h b/drivers/staging/usbip/usbip_common.h
60803index 5e16bc3..7655b10 100644
60804--- a/drivers/staging/usbip/usbip_common.h
60805+++ b/drivers/staging/usbip/usbip_common.h
60806@@ -374,7 +374,7 @@ struct usbip_device {
60807 void (*shutdown)(struct usbip_device *);
60808 void (*reset)(struct usbip_device *);
60809 void (*unusable)(struct usbip_device *);
60810- } eh_ops;
60811+ } __no_const eh_ops;
60812 };
60813
60814
60815diff --git a/drivers/staging/usbip/vhci.h b/drivers/staging/usbip/vhci.h
60816index 57f7946..d9df23d 100644
60817--- a/drivers/staging/usbip/vhci.h
60818+++ b/drivers/staging/usbip/vhci.h
60819@@ -92,7 +92,7 @@ struct vhci_hcd {
60820 unsigned resuming:1;
60821 unsigned long re_timeout;
60822
60823- atomic_t seqnum;
60824+ atomic_unchecked_t seqnum;
60825
60826 /*
60827 * NOTE:
60828diff --git a/drivers/staging/usbip/vhci_hcd.c b/drivers/staging/usbip/vhci_hcd.c
60829index 20cd7db..c2693ff 100644
60830--- a/drivers/staging/usbip/vhci_hcd.c
60831+++ b/drivers/staging/usbip/vhci_hcd.c
60832@@ -534,7 +534,7 @@ static void vhci_tx_urb(struct urb *urb)
60833 return;
60834 }
60835
60836- priv->seqnum = atomic_inc_return(&the_controller->seqnum);
60837+ priv->seqnum = atomic_inc_return_unchecked(&the_controller->seqnum);
60838 if (priv->seqnum == 0xffff)
60839 usbip_uinfo("seqnum max\n");
60840
60841@@ -793,7 +793,7 @@ static int vhci_urb_dequeue(struct usb_hcd *hcd, struct urb *urb, int status)
60842 return -ENOMEM;
60843 }
60844
60845- unlink->seqnum = atomic_inc_return(&the_controller->seqnum);
60846+ unlink->seqnum = atomic_inc_return_unchecked(&the_controller->seqnum);
60847 if (unlink->seqnum == 0xffff)
60848 usbip_uinfo("seqnum max\n");
60849
60850@@ -988,7 +988,7 @@ static int vhci_start(struct usb_hcd *hcd)
60851 vdev->rhport = rhport;
60852 }
60853
60854- atomic_set(&vhci->seqnum, 0);
60855+ atomic_set_unchecked(&vhci->seqnum, 0);
60856 spin_lock_init(&vhci->lock);
60857
60858
60859diff --git a/drivers/staging/usbip/vhci_rx.c b/drivers/staging/usbip/vhci_rx.c
60860index 7fd76fe..673695a 100644
60861--- a/drivers/staging/usbip/vhci_rx.c
60862+++ b/drivers/staging/usbip/vhci_rx.c
60863@@ -79,7 +79,7 @@ static void vhci_recv_ret_submit(struct vhci_device *vdev,
60864 usbip_uerr("cannot find a urb of seqnum %u\n",
60865 pdu->base.seqnum);
60866 usbip_uinfo("max seqnum %d\n",
60867- atomic_read(&the_controller->seqnum));
60868+ atomic_read_unchecked(&the_controller->seqnum));
60869 usbip_event_add(ud, VDEV_EVENT_ERROR_TCP);
60870 return;
60871 }
60872diff --git a/drivers/staging/vme/devices/vme_user.c b/drivers/staging/vme/devices/vme_user.c
60873index 7891288..8e31300 100644
60874--- a/drivers/staging/vme/devices/vme_user.c
60875+++ b/drivers/staging/vme/devices/vme_user.c
60876@@ -136,7 +136,7 @@ static int vme_user_ioctl(struct inode *, struct file *, unsigned int,
60877 static int __init vme_user_probe(struct device *, int, int);
60878 static int __exit vme_user_remove(struct device *, int, int);
60879
60880-static struct file_operations vme_user_fops = {
60881+static const struct file_operations vme_user_fops = {
60882 .open = vme_user_open,
60883 .release = vme_user_release,
60884 .read = vme_user_read,
60885diff --git a/drivers/staging/vt6655/hostap.c b/drivers/staging/vt6655/hostap.c
60886index 58abf44..00c1fc8 100644
60887--- a/drivers/staging/vt6655/hostap.c
60888+++ b/drivers/staging/vt6655/hostap.c
60889@@ -84,7 +84,7 @@ static int hostap_enable_hostapd(PSDevice pDevice, int rtnl_locked)
60890 PSDevice apdev_priv;
60891 struct net_device *dev = pDevice->dev;
60892 int ret;
60893- const struct net_device_ops apdev_netdev_ops = {
60894+ net_device_ops_no_const apdev_netdev_ops = {
60895 .ndo_start_xmit = pDevice->tx_80211,
60896 };
60897
60898diff --git a/drivers/staging/vt6656/hostap.c b/drivers/staging/vt6656/hostap.c
60899index 0c8267a..db1f363 100644
60900--- a/drivers/staging/vt6656/hostap.c
60901+++ b/drivers/staging/vt6656/hostap.c
60902@@ -86,7 +86,7 @@ static int hostap_enable_hostapd(PSDevice pDevice, int rtnl_locked)
60903 PSDevice apdev_priv;
60904 struct net_device *dev = pDevice->dev;
60905 int ret;
60906- const struct net_device_ops apdev_netdev_ops = {
60907+ net_device_ops_no_const apdev_netdev_ops = {
60908 .ndo_start_xmit = pDevice->tx_80211,
60909 };
60910
60911diff --git a/drivers/staging/wlan-ng/hfa384x_usb.c b/drivers/staging/wlan-ng/hfa384x_usb.c
60912index 925678b..da7f5ed 100644
60913--- a/drivers/staging/wlan-ng/hfa384x_usb.c
60914+++ b/drivers/staging/wlan-ng/hfa384x_usb.c
60915@@ -205,7 +205,7 @@ static void unlocked_usbctlx_complete(hfa384x_t *hw, hfa384x_usbctlx_t *ctlx);
60916
60917 struct usbctlx_completor {
60918 int (*complete) (struct usbctlx_completor *);
60919-};
60920+} __no_const;
60921 typedef struct usbctlx_completor usbctlx_completor_t;
60922
60923 static int
60924diff --git a/drivers/telephony/ixj.c b/drivers/telephony/ixj.c
60925index 40de151..924f268 100644
60926--- a/drivers/telephony/ixj.c
60927+++ b/drivers/telephony/ixj.c
60928@@ -4976,6 +4976,8 @@ static int ixj_daa_cid_read(IXJ *j)
60929 bool mContinue;
60930 char *pIn, *pOut;
60931
60932+ pax_track_stack();
60933+
60934 if (!SCI_Prepare(j))
60935 return 0;
60936
60937diff --git a/drivers/uio/uio.c b/drivers/uio/uio.c
60938index e941367..b631f5a 100644
60939--- a/drivers/uio/uio.c
60940+++ b/drivers/uio/uio.c
60941@@ -23,6 +23,7 @@
60942 #include <linux/string.h>
60943 #include <linux/kobject.h>
60944 #include <linux/uio_driver.h>
60945+#include <asm/local.h>
60946
60947 #define UIO_MAX_DEVICES 255
60948
60949@@ -30,10 +31,10 @@ struct uio_device {
60950 struct module *owner;
60951 struct device *dev;
60952 int minor;
60953- atomic_t event;
60954+ atomic_unchecked_t event;
60955 struct fasync_struct *async_queue;
60956 wait_queue_head_t wait;
60957- int vma_count;
60958+ local_t vma_count;
60959 struct uio_info *info;
60960 struct kobject *map_dir;
60961 struct kobject *portio_dir;
60962@@ -129,7 +130,7 @@ static ssize_t map_type_show(struct kobject *kobj, struct attribute *attr,
60963 return entry->show(mem, buf);
60964 }
60965
60966-static struct sysfs_ops map_sysfs_ops = {
60967+static const struct sysfs_ops map_sysfs_ops = {
60968 .show = map_type_show,
60969 };
60970
60971@@ -217,7 +218,7 @@ static ssize_t portio_type_show(struct kobject *kobj, struct attribute *attr,
60972 return entry->show(port, buf);
60973 }
60974
60975-static struct sysfs_ops portio_sysfs_ops = {
60976+static const struct sysfs_ops portio_sysfs_ops = {
60977 .show = portio_type_show,
60978 };
60979
60980@@ -255,7 +256,7 @@ static ssize_t show_event(struct device *dev,
60981 struct uio_device *idev = dev_get_drvdata(dev);
60982 if (idev)
60983 return sprintf(buf, "%u\n",
60984- (unsigned int)atomic_read(&idev->event));
60985+ (unsigned int)atomic_read_unchecked(&idev->event));
60986 else
60987 return -ENODEV;
60988 }
60989@@ -424,7 +425,7 @@ void uio_event_notify(struct uio_info *info)
60990 {
60991 struct uio_device *idev = info->uio_dev;
60992
60993- atomic_inc(&idev->event);
60994+ atomic_inc_unchecked(&idev->event);
60995 wake_up_interruptible(&idev->wait);
60996 kill_fasync(&idev->async_queue, SIGIO, POLL_IN);
60997 }
60998@@ -477,7 +478,7 @@ static int uio_open(struct inode *inode, struct file *filep)
60999 }
61000
61001 listener->dev = idev;
61002- listener->event_count = atomic_read(&idev->event);
61003+ listener->event_count = atomic_read_unchecked(&idev->event);
61004 filep->private_data = listener;
61005
61006 if (idev->info->open) {
61007@@ -528,7 +529,7 @@ static unsigned int uio_poll(struct file *filep, poll_table *wait)
61008 return -EIO;
61009
61010 poll_wait(filep, &idev->wait, wait);
61011- if (listener->event_count != atomic_read(&idev->event))
61012+ if (listener->event_count != atomic_read_unchecked(&idev->event))
61013 return POLLIN | POLLRDNORM;
61014 return 0;
61015 }
61016@@ -553,7 +554,7 @@ static ssize_t uio_read(struct file *filep, char __user *buf,
61017 do {
61018 set_current_state(TASK_INTERRUPTIBLE);
61019
61020- event_count = atomic_read(&idev->event);
61021+ event_count = atomic_read_unchecked(&idev->event);
61022 if (event_count != listener->event_count) {
61023 if (copy_to_user(buf, &event_count, count))
61024 retval = -EFAULT;
61025@@ -624,13 +625,13 @@ static int uio_find_mem_index(struct vm_area_struct *vma)
61026 static void uio_vma_open(struct vm_area_struct *vma)
61027 {
61028 struct uio_device *idev = vma->vm_private_data;
61029- idev->vma_count++;
61030+ local_inc(&idev->vma_count);
61031 }
61032
61033 static void uio_vma_close(struct vm_area_struct *vma)
61034 {
61035 struct uio_device *idev = vma->vm_private_data;
61036- idev->vma_count--;
61037+ local_dec(&idev->vma_count);
61038 }
61039
61040 static int uio_vma_fault(struct vm_area_struct *vma, struct vm_fault *vmf)
61041@@ -840,7 +841,7 @@ int __uio_register_device(struct module *owner,
61042 idev->owner = owner;
61043 idev->info = info;
61044 init_waitqueue_head(&idev->wait);
61045- atomic_set(&idev->event, 0);
61046+ atomic_set_unchecked(&idev->event, 0);
61047
61048 ret = uio_get_minor(idev);
61049 if (ret)
61050diff --git a/drivers/usb/atm/usbatm.c b/drivers/usb/atm/usbatm.c
61051index fbea856..06efea6 100644
61052--- a/drivers/usb/atm/usbatm.c
61053+++ b/drivers/usb/atm/usbatm.c
61054@@ -333,7 +333,7 @@ static void usbatm_extract_one_cell(struct usbatm_data *instance, unsigned char
61055 if (printk_ratelimit())
61056 atm_warn(instance, "%s: OAM not supported (vpi %d, vci %d)!\n",
61057 __func__, vpi, vci);
61058- atomic_inc(&vcc->stats->rx_err);
61059+ atomic_inc_unchecked(&vcc->stats->rx_err);
61060 return;
61061 }
61062
61063@@ -361,7 +361,7 @@ static void usbatm_extract_one_cell(struct usbatm_data *instance, unsigned char
61064 if (length > ATM_MAX_AAL5_PDU) {
61065 atm_rldbg(instance, "%s: bogus length %u (vcc: 0x%p)!\n",
61066 __func__, length, vcc);
61067- atomic_inc(&vcc->stats->rx_err);
61068+ atomic_inc_unchecked(&vcc->stats->rx_err);
61069 goto out;
61070 }
61071
61072@@ -370,14 +370,14 @@ static void usbatm_extract_one_cell(struct usbatm_data *instance, unsigned char
61073 if (sarb->len < pdu_length) {
61074 atm_rldbg(instance, "%s: bogus pdu_length %u (sarb->len: %u, vcc: 0x%p)!\n",
61075 __func__, pdu_length, sarb->len, vcc);
61076- atomic_inc(&vcc->stats->rx_err);
61077+ atomic_inc_unchecked(&vcc->stats->rx_err);
61078 goto out;
61079 }
61080
61081 if (crc32_be(~0, skb_tail_pointer(sarb) - pdu_length, pdu_length) != 0xc704dd7b) {
61082 atm_rldbg(instance, "%s: packet failed crc check (vcc: 0x%p)!\n",
61083 __func__, vcc);
61084- atomic_inc(&vcc->stats->rx_err);
61085+ atomic_inc_unchecked(&vcc->stats->rx_err);
61086 goto out;
61087 }
61088
61089@@ -387,7 +387,7 @@ static void usbatm_extract_one_cell(struct usbatm_data *instance, unsigned char
61090 if (printk_ratelimit())
61091 atm_err(instance, "%s: no memory for skb (length: %u)!\n",
61092 __func__, length);
61093- atomic_inc(&vcc->stats->rx_drop);
61094+ atomic_inc_unchecked(&vcc->stats->rx_drop);
61095 goto out;
61096 }
61097
61098@@ -412,7 +412,7 @@ static void usbatm_extract_one_cell(struct usbatm_data *instance, unsigned char
61099
61100 vcc->push(vcc, skb);
61101
61102- atomic_inc(&vcc->stats->rx);
61103+ atomic_inc_unchecked(&vcc->stats->rx);
61104 out:
61105 skb_trim(sarb, 0);
61106 }
61107@@ -616,7 +616,7 @@ static void usbatm_tx_process(unsigned long data)
61108 struct atm_vcc *vcc = UDSL_SKB(skb)->atm.vcc;
61109
61110 usbatm_pop(vcc, skb);
61111- atomic_inc(&vcc->stats->tx);
61112+ atomic_inc_unchecked(&vcc->stats->tx);
61113
61114 skb = skb_dequeue(&instance->sndqueue);
61115 }
61116@@ -775,11 +775,11 @@ static int usbatm_atm_proc_read(struct atm_dev *atm_dev, loff_t * pos, char *pag
61117 if (!left--)
61118 return sprintf(page,
61119 "AAL5: tx %d ( %d err ), rx %d ( %d err, %d drop )\n",
61120- atomic_read(&atm_dev->stats.aal5.tx),
61121- atomic_read(&atm_dev->stats.aal5.tx_err),
61122- atomic_read(&atm_dev->stats.aal5.rx),
61123- atomic_read(&atm_dev->stats.aal5.rx_err),
61124- atomic_read(&atm_dev->stats.aal5.rx_drop));
61125+ atomic_read_unchecked(&atm_dev->stats.aal5.tx),
61126+ atomic_read_unchecked(&atm_dev->stats.aal5.tx_err),
61127+ atomic_read_unchecked(&atm_dev->stats.aal5.rx),
61128+ atomic_read_unchecked(&atm_dev->stats.aal5.rx_err),
61129+ atomic_read_unchecked(&atm_dev->stats.aal5.rx_drop));
61130
61131 if (!left--) {
61132 if (instance->disconnected)
61133diff --git a/drivers/usb/core/hcd.c b/drivers/usb/core/hcd.c
61134index 24e6205..fe5a5d4 100644
61135--- a/drivers/usb/core/hcd.c
61136+++ b/drivers/usb/core/hcd.c
61137@@ -2216,7 +2216,7 @@ EXPORT_SYMBOL_GPL(usb_hcd_platform_shutdown);
61138
61139 #if defined(CONFIG_USB_MON) || defined(CONFIG_USB_MON_MODULE)
61140
61141-struct usb_mon_operations *mon_ops;
61142+const struct usb_mon_operations *mon_ops;
61143
61144 /*
61145 * The registration is unlocked.
61146@@ -2226,7 +2226,7 @@ struct usb_mon_operations *mon_ops;
61147 * symbols from usbcore, usbcore gets referenced and cannot be unloaded first.
61148 */
61149
61150-int usb_mon_register (struct usb_mon_operations *ops)
61151+int usb_mon_register (const struct usb_mon_operations *ops)
61152 {
61153
61154 if (mon_ops)
61155diff --git a/drivers/usb/core/hcd.h b/drivers/usb/core/hcd.h
61156index bcbe104..9cfd1c6 100644
61157--- a/drivers/usb/core/hcd.h
61158+++ b/drivers/usb/core/hcd.h
61159@@ -486,13 +486,13 @@ static inline void usbfs_cleanup(void) { }
61160 #if defined(CONFIG_USB_MON) || defined(CONFIG_USB_MON_MODULE)
61161
61162 struct usb_mon_operations {
61163- void (*urb_submit)(struct usb_bus *bus, struct urb *urb);
61164- void (*urb_submit_error)(struct usb_bus *bus, struct urb *urb, int err);
61165- void (*urb_complete)(struct usb_bus *bus, struct urb *urb, int status);
61166+ void (* const urb_submit)(struct usb_bus *bus, struct urb *urb);
61167+ void (* const urb_submit_error)(struct usb_bus *bus, struct urb *urb, int err);
61168+ void (* const urb_complete)(struct usb_bus *bus, struct urb *urb, int status);
61169 /* void (*urb_unlink)(struct usb_bus *bus, struct urb *urb); */
61170 };
61171
61172-extern struct usb_mon_operations *mon_ops;
61173+extern const struct usb_mon_operations *mon_ops;
61174
61175 static inline void usbmon_urb_submit(struct usb_bus *bus, struct urb *urb)
61176 {
61177@@ -514,7 +514,7 @@ static inline void usbmon_urb_complete(struct usb_bus *bus, struct urb *urb,
61178 (*mon_ops->urb_complete)(bus, urb, status);
61179 }
61180
61181-int usb_mon_register(struct usb_mon_operations *ops);
61182+int usb_mon_register(const struct usb_mon_operations *ops);
61183 void usb_mon_deregister(void);
61184
61185 #else
61186diff --git a/drivers/usb/misc/appledisplay.c b/drivers/usb/misc/appledisplay.c
61187index 62ff5e7..530b74e 100644
61188--- a/drivers/usb/misc/appledisplay.c
61189+++ b/drivers/usb/misc/appledisplay.c
61190@@ -178,7 +178,7 @@ static int appledisplay_bl_get_brightness(struct backlight_device *bd)
61191 return pdata->msgdata[1];
61192 }
61193
61194-static struct backlight_ops appledisplay_bl_data = {
61195+static const struct backlight_ops appledisplay_bl_data = {
61196 .get_brightness = appledisplay_bl_get_brightness,
61197 .update_status = appledisplay_bl_update_status,
61198 };
61199diff --git a/drivers/usb/mon/mon_main.c b/drivers/usb/mon/mon_main.c
61200index e0c2db3..bd8cb66 100644
61201--- a/drivers/usb/mon/mon_main.c
61202+++ b/drivers/usb/mon/mon_main.c
61203@@ -238,7 +238,7 @@ static struct notifier_block mon_nb = {
61204 /*
61205 * Ops
61206 */
61207-static struct usb_mon_operations mon_ops_0 = {
61208+static const struct usb_mon_operations mon_ops_0 = {
61209 .urb_submit = mon_submit,
61210 .urb_submit_error = mon_submit_error,
61211 .urb_complete = mon_complete,
61212diff --git a/drivers/usb/wusbcore/wa-hc.h b/drivers/usb/wusbcore/wa-hc.h
61213index d6bea3e..60b250e 100644
61214--- a/drivers/usb/wusbcore/wa-hc.h
61215+++ b/drivers/usb/wusbcore/wa-hc.h
61216@@ -192,7 +192,7 @@ struct wahc {
61217 struct list_head xfer_delayed_list;
61218 spinlock_t xfer_list_lock;
61219 struct work_struct xfer_work;
61220- atomic_t xfer_id_count;
61221+ atomic_unchecked_t xfer_id_count;
61222 };
61223
61224
61225@@ -246,7 +246,7 @@ static inline void wa_init(struct wahc *wa)
61226 INIT_LIST_HEAD(&wa->xfer_delayed_list);
61227 spin_lock_init(&wa->xfer_list_lock);
61228 INIT_WORK(&wa->xfer_work, wa_urb_enqueue_run);
61229- atomic_set(&wa->xfer_id_count, 1);
61230+ atomic_set_unchecked(&wa->xfer_id_count, 1);
61231 }
61232
61233 /**
61234diff --git a/drivers/usb/wusbcore/wa-xfer.c b/drivers/usb/wusbcore/wa-xfer.c
61235index 613a5fc..3174865 100644
61236--- a/drivers/usb/wusbcore/wa-xfer.c
61237+++ b/drivers/usb/wusbcore/wa-xfer.c
61238@@ -293,7 +293,7 @@ out:
61239 */
61240 static void wa_xfer_id_init(struct wa_xfer *xfer)
61241 {
61242- xfer->id = atomic_add_return(1, &xfer->wa->xfer_id_count);
61243+ xfer->id = atomic_add_return_unchecked(1, &xfer->wa->xfer_id_count);
61244 }
61245
61246 /*
61247diff --git a/drivers/uwb/wlp/messages.c b/drivers/uwb/wlp/messages.c
61248index aa42fce..f8a828c 100644
61249--- a/drivers/uwb/wlp/messages.c
61250+++ b/drivers/uwb/wlp/messages.c
61251@@ -903,7 +903,7 @@ int wlp_parse_f0(struct wlp *wlp, struct sk_buff *skb)
61252 size_t len = skb->len;
61253 size_t used;
61254 ssize_t result;
61255- struct wlp_nonce enonce, rnonce;
61256+ struct wlp_nonce enonce = {{0}}, rnonce = {{0}};
61257 enum wlp_assc_error assc_err;
61258 char enonce_buf[WLP_WSS_NONCE_STRSIZE];
61259 char rnonce_buf[WLP_WSS_NONCE_STRSIZE];
61260diff --git a/drivers/uwb/wlp/sysfs.c b/drivers/uwb/wlp/sysfs.c
61261index 0370399..6627c94 100644
61262--- a/drivers/uwb/wlp/sysfs.c
61263+++ b/drivers/uwb/wlp/sysfs.c
61264@@ -615,8 +615,7 @@ ssize_t wlp_wss_attr_store(struct kobject *kobj, struct attribute *attr,
61265 return ret;
61266 }
61267
61268-static
61269-struct sysfs_ops wss_sysfs_ops = {
61270+static const struct sysfs_ops wss_sysfs_ops = {
61271 .show = wlp_wss_attr_show,
61272 .store = wlp_wss_attr_store,
61273 };
61274diff --git a/drivers/video/atmel_lcdfb.c b/drivers/video/atmel_lcdfb.c
61275index 8c5e432..5ee90ea 100644
61276--- a/drivers/video/atmel_lcdfb.c
61277+++ b/drivers/video/atmel_lcdfb.c
61278@@ -110,7 +110,7 @@ static int atmel_bl_get_brightness(struct backlight_device *bl)
61279 return lcdc_readl(sinfo, ATMEL_LCDC_CONTRAST_VAL);
61280 }
61281
61282-static struct backlight_ops atmel_lcdc_bl_ops = {
61283+static const struct backlight_ops atmel_lcdc_bl_ops = {
61284 .update_status = atmel_bl_update_status,
61285 .get_brightness = atmel_bl_get_brightness,
61286 };
61287diff --git a/drivers/video/aty/aty128fb.c b/drivers/video/aty/aty128fb.c
61288index e4e4d43..66bcbcc 100644
61289--- a/drivers/video/aty/aty128fb.c
61290+++ b/drivers/video/aty/aty128fb.c
61291@@ -149,7 +149,7 @@ enum {
61292 };
61293
61294 /* Must match above enum */
61295-static const char *r128_family[] __devinitdata = {
61296+static const char *r128_family[] __devinitconst = {
61297 "AGP",
61298 "PCI",
61299 "PRO AGP",
61300@@ -1787,7 +1787,7 @@ static int aty128_bl_get_brightness(struct backlight_device *bd)
61301 return bd->props.brightness;
61302 }
61303
61304-static struct backlight_ops aty128_bl_data = {
61305+static const struct backlight_ops aty128_bl_data = {
61306 .get_brightness = aty128_bl_get_brightness,
61307 .update_status = aty128_bl_update_status,
61308 };
61309diff --git a/drivers/video/aty/atyfb_base.c b/drivers/video/aty/atyfb_base.c
61310index 913b4a4..9295a38 100644
61311--- a/drivers/video/aty/atyfb_base.c
61312+++ b/drivers/video/aty/atyfb_base.c
61313@@ -2225,7 +2225,7 @@ static int aty_bl_get_brightness(struct backlight_device *bd)
61314 return bd->props.brightness;
61315 }
61316
61317-static struct backlight_ops aty_bl_data = {
61318+static const struct backlight_ops aty_bl_data = {
61319 .get_brightness = aty_bl_get_brightness,
61320 .update_status = aty_bl_update_status,
61321 };
61322diff --git a/drivers/video/aty/radeon_backlight.c b/drivers/video/aty/radeon_backlight.c
61323index 1a056ad..221bd6a 100644
61324--- a/drivers/video/aty/radeon_backlight.c
61325+++ b/drivers/video/aty/radeon_backlight.c
61326@@ -127,7 +127,7 @@ static int radeon_bl_get_brightness(struct backlight_device *bd)
61327 return bd->props.brightness;
61328 }
61329
61330-static struct backlight_ops radeon_bl_data = {
61331+static const struct backlight_ops radeon_bl_data = {
61332 .get_brightness = radeon_bl_get_brightness,
61333 .update_status = radeon_bl_update_status,
61334 };
61335diff --git a/drivers/video/backlight/adp5520_bl.c b/drivers/video/backlight/adp5520_bl.c
61336index ad05da5..3cb2cb9 100644
61337--- a/drivers/video/backlight/adp5520_bl.c
61338+++ b/drivers/video/backlight/adp5520_bl.c
61339@@ -84,7 +84,7 @@ static int adp5520_bl_get_brightness(struct backlight_device *bl)
61340 return error ? data->current_brightness : reg_val;
61341 }
61342
61343-static struct backlight_ops adp5520_bl_ops = {
61344+static const struct backlight_ops adp5520_bl_ops = {
61345 .update_status = adp5520_bl_update_status,
61346 .get_brightness = adp5520_bl_get_brightness,
61347 };
61348diff --git a/drivers/video/backlight/adx_bl.c b/drivers/video/backlight/adx_bl.c
61349index 2c3bdfc..d769b0b 100644
61350--- a/drivers/video/backlight/adx_bl.c
61351+++ b/drivers/video/backlight/adx_bl.c
61352@@ -61,7 +61,7 @@ static int adx_backlight_check_fb(struct fb_info *fb)
61353 return 1;
61354 }
61355
61356-static struct backlight_ops adx_backlight_ops = {
61357+static const struct backlight_ops adx_backlight_ops = {
61358 .options = 0,
61359 .update_status = adx_backlight_update_status,
61360 .get_brightness = adx_backlight_get_brightness,
61361diff --git a/drivers/video/backlight/atmel-pwm-bl.c b/drivers/video/backlight/atmel-pwm-bl.c
61362index 505c082..6b6b3cc 100644
61363--- a/drivers/video/backlight/atmel-pwm-bl.c
61364+++ b/drivers/video/backlight/atmel-pwm-bl.c
61365@@ -113,7 +113,7 @@ static int atmel_pwm_bl_init_pwm(struct atmel_pwm_bl *pwmbl)
61366 return pwm_channel_enable(&pwmbl->pwmc);
61367 }
61368
61369-static struct backlight_ops atmel_pwm_bl_ops = {
61370+static const struct backlight_ops atmel_pwm_bl_ops = {
61371 .get_brightness = atmel_pwm_bl_get_intensity,
61372 .update_status = atmel_pwm_bl_set_intensity,
61373 };
61374diff --git a/drivers/video/backlight/backlight.c b/drivers/video/backlight/backlight.c
61375index 5e20e6e..89025e6 100644
61376--- a/drivers/video/backlight/backlight.c
61377+++ b/drivers/video/backlight/backlight.c
61378@@ -269,7 +269,7 @@ EXPORT_SYMBOL(backlight_force_update);
61379 * ERR_PTR() or a pointer to the newly allocated device.
61380 */
61381 struct backlight_device *backlight_device_register(const char *name,
61382- struct device *parent, void *devdata, struct backlight_ops *ops)
61383+ struct device *parent, void *devdata, const struct backlight_ops *ops)
61384 {
61385 struct backlight_device *new_bd;
61386 int rc;
61387diff --git a/drivers/video/backlight/corgi_lcd.c b/drivers/video/backlight/corgi_lcd.c
61388index 9677494..b4bcf80 100644
61389--- a/drivers/video/backlight/corgi_lcd.c
61390+++ b/drivers/video/backlight/corgi_lcd.c
61391@@ -451,7 +451,7 @@ void corgi_lcd_limit_intensity(int limit)
61392 }
61393 EXPORT_SYMBOL(corgi_lcd_limit_intensity);
61394
61395-static struct backlight_ops corgi_bl_ops = {
61396+static const struct backlight_ops corgi_bl_ops = {
61397 .get_brightness = corgi_bl_get_intensity,
61398 .update_status = corgi_bl_update_status,
61399 };
61400diff --git a/drivers/video/backlight/cr_bllcd.c b/drivers/video/backlight/cr_bllcd.c
61401index b9fe62b..2914bf1 100644
61402--- a/drivers/video/backlight/cr_bllcd.c
61403+++ b/drivers/video/backlight/cr_bllcd.c
61404@@ -108,7 +108,7 @@ static int cr_backlight_get_intensity(struct backlight_device *bd)
61405 return intensity;
61406 }
61407
61408-static struct backlight_ops cr_backlight_ops = {
61409+static const struct backlight_ops cr_backlight_ops = {
61410 .get_brightness = cr_backlight_get_intensity,
61411 .update_status = cr_backlight_set_intensity,
61412 };
61413diff --git a/drivers/video/backlight/da903x_bl.c b/drivers/video/backlight/da903x_bl.c
61414index 701a108..feacfd5 100644
61415--- a/drivers/video/backlight/da903x_bl.c
61416+++ b/drivers/video/backlight/da903x_bl.c
61417@@ -94,7 +94,7 @@ static int da903x_backlight_get_brightness(struct backlight_device *bl)
61418 return data->current_brightness;
61419 }
61420
61421-static struct backlight_ops da903x_backlight_ops = {
61422+static const struct backlight_ops da903x_backlight_ops = {
61423 .update_status = da903x_backlight_update_status,
61424 .get_brightness = da903x_backlight_get_brightness,
61425 };
61426diff --git a/drivers/video/backlight/generic_bl.c b/drivers/video/backlight/generic_bl.c
61427index 6d27f62..e6d348e 100644
61428--- a/drivers/video/backlight/generic_bl.c
61429+++ b/drivers/video/backlight/generic_bl.c
61430@@ -70,7 +70,7 @@ void corgibl_limit_intensity(int limit)
61431 }
61432 EXPORT_SYMBOL(corgibl_limit_intensity);
61433
61434-static struct backlight_ops genericbl_ops = {
61435+static const struct backlight_ops genericbl_ops = {
61436 .options = BL_CORE_SUSPENDRESUME,
61437 .get_brightness = genericbl_get_intensity,
61438 .update_status = genericbl_send_intensity,
61439diff --git a/drivers/video/backlight/hp680_bl.c b/drivers/video/backlight/hp680_bl.c
61440index 7fb4eef..f7cc528 100644
61441--- a/drivers/video/backlight/hp680_bl.c
61442+++ b/drivers/video/backlight/hp680_bl.c
61443@@ -98,7 +98,7 @@ static int hp680bl_get_intensity(struct backlight_device *bd)
61444 return current_intensity;
61445 }
61446
61447-static struct backlight_ops hp680bl_ops = {
61448+static const struct backlight_ops hp680bl_ops = {
61449 .get_brightness = hp680bl_get_intensity,
61450 .update_status = hp680bl_set_intensity,
61451 };
61452diff --git a/drivers/video/backlight/jornada720_bl.c b/drivers/video/backlight/jornada720_bl.c
61453index 7aed256..db9071f 100644
61454--- a/drivers/video/backlight/jornada720_bl.c
61455+++ b/drivers/video/backlight/jornada720_bl.c
61456@@ -93,7 +93,7 @@ out:
61457 return ret;
61458 }
61459
61460-static struct backlight_ops jornada_bl_ops = {
61461+static const struct backlight_ops jornada_bl_ops = {
61462 .get_brightness = jornada_bl_get_brightness,
61463 .update_status = jornada_bl_update_status,
61464 .options = BL_CORE_SUSPENDRESUME,
61465diff --git a/drivers/video/backlight/kb3886_bl.c b/drivers/video/backlight/kb3886_bl.c
61466index a38fda1..939e7b8 100644
61467--- a/drivers/video/backlight/kb3886_bl.c
61468+++ b/drivers/video/backlight/kb3886_bl.c
61469@@ -134,7 +134,7 @@ static int kb3886bl_get_intensity(struct backlight_device *bd)
61470 return kb3886bl_intensity;
61471 }
61472
61473-static struct backlight_ops kb3886bl_ops = {
61474+static const struct backlight_ops kb3886bl_ops = {
61475 .get_brightness = kb3886bl_get_intensity,
61476 .update_status = kb3886bl_send_intensity,
61477 };
61478diff --git a/drivers/video/backlight/locomolcd.c b/drivers/video/backlight/locomolcd.c
61479index 6b488b8..00a9591 100644
61480--- a/drivers/video/backlight/locomolcd.c
61481+++ b/drivers/video/backlight/locomolcd.c
61482@@ -141,7 +141,7 @@ static int locomolcd_get_intensity(struct backlight_device *bd)
61483 return current_intensity;
61484 }
61485
61486-static struct backlight_ops locomobl_data = {
61487+static const struct backlight_ops locomobl_data = {
61488 .get_brightness = locomolcd_get_intensity,
61489 .update_status = locomolcd_set_intensity,
61490 };
61491diff --git a/drivers/video/backlight/mbp_nvidia_bl.c b/drivers/video/backlight/mbp_nvidia_bl.c
61492index 99bdfa8..3dac448 100644
61493--- a/drivers/video/backlight/mbp_nvidia_bl.c
61494+++ b/drivers/video/backlight/mbp_nvidia_bl.c
61495@@ -33,7 +33,7 @@ struct dmi_match_data {
61496 unsigned long iostart;
61497 unsigned long iolen;
61498 /* Backlight operations structure. */
61499- struct backlight_ops backlight_ops;
61500+ const struct backlight_ops backlight_ops;
61501 };
61502
61503 /* Module parameters. */
61504diff --git a/drivers/video/backlight/omap1_bl.c b/drivers/video/backlight/omap1_bl.c
61505index cbad67e..3cf900e 100644
61506--- a/drivers/video/backlight/omap1_bl.c
61507+++ b/drivers/video/backlight/omap1_bl.c
61508@@ -125,7 +125,7 @@ static int omapbl_get_intensity(struct backlight_device *dev)
61509 return bl->current_intensity;
61510 }
61511
61512-static struct backlight_ops omapbl_ops = {
61513+static const struct backlight_ops omapbl_ops = {
61514 .get_brightness = omapbl_get_intensity,
61515 .update_status = omapbl_update_status,
61516 };
61517diff --git a/drivers/video/backlight/progear_bl.c b/drivers/video/backlight/progear_bl.c
61518index 9edaf24..075786e 100644
61519--- a/drivers/video/backlight/progear_bl.c
61520+++ b/drivers/video/backlight/progear_bl.c
61521@@ -54,7 +54,7 @@ static int progearbl_get_intensity(struct backlight_device *bd)
61522 return intensity - HW_LEVEL_MIN;
61523 }
61524
61525-static struct backlight_ops progearbl_ops = {
61526+static const struct backlight_ops progearbl_ops = {
61527 .get_brightness = progearbl_get_intensity,
61528 .update_status = progearbl_set_intensity,
61529 };
61530diff --git a/drivers/video/backlight/pwm_bl.c b/drivers/video/backlight/pwm_bl.c
61531index 8871662..df9e0b3 100644
61532--- a/drivers/video/backlight/pwm_bl.c
61533+++ b/drivers/video/backlight/pwm_bl.c
61534@@ -56,7 +56,7 @@ static int pwm_backlight_get_brightness(struct backlight_device *bl)
61535 return bl->props.brightness;
61536 }
61537
61538-static struct backlight_ops pwm_backlight_ops = {
61539+static const struct backlight_ops pwm_backlight_ops = {
61540 .update_status = pwm_backlight_update_status,
61541 .get_brightness = pwm_backlight_get_brightness,
61542 };
61543diff --git a/drivers/video/backlight/tosa_bl.c b/drivers/video/backlight/tosa_bl.c
61544index 43edbad..e14ce4d 100644
61545--- a/drivers/video/backlight/tosa_bl.c
61546+++ b/drivers/video/backlight/tosa_bl.c
61547@@ -72,7 +72,7 @@ static int tosa_bl_get_brightness(struct backlight_device *dev)
61548 return props->brightness;
61549 }
61550
61551-static struct backlight_ops bl_ops = {
61552+static const struct backlight_ops bl_ops = {
61553 .get_brightness = tosa_bl_get_brightness,
61554 .update_status = tosa_bl_update_status,
61555 };
61556diff --git a/drivers/video/backlight/wm831x_bl.c b/drivers/video/backlight/wm831x_bl.c
61557index 467bdb7..e32add3 100644
61558--- a/drivers/video/backlight/wm831x_bl.c
61559+++ b/drivers/video/backlight/wm831x_bl.c
61560@@ -112,7 +112,7 @@ static int wm831x_backlight_get_brightness(struct backlight_device *bl)
61561 return data->current_brightness;
61562 }
61563
61564-static struct backlight_ops wm831x_backlight_ops = {
61565+static const struct backlight_ops wm831x_backlight_ops = {
61566 .options = BL_CORE_SUSPENDRESUME,
61567 .update_status = wm831x_backlight_update_status,
61568 .get_brightness = wm831x_backlight_get_brightness,
61569diff --git a/drivers/video/bf54x-lq043fb.c b/drivers/video/bf54x-lq043fb.c
61570index e49ae5e..db4e6f7 100644
61571--- a/drivers/video/bf54x-lq043fb.c
61572+++ b/drivers/video/bf54x-lq043fb.c
61573@@ -463,7 +463,7 @@ static int bl_get_brightness(struct backlight_device *bd)
61574 return 0;
61575 }
61576
61577-static struct backlight_ops bfin_lq043fb_bl_ops = {
61578+static const struct backlight_ops bfin_lq043fb_bl_ops = {
61579 .get_brightness = bl_get_brightness,
61580 };
61581
61582diff --git a/drivers/video/bfin-t350mcqb-fb.c b/drivers/video/bfin-t350mcqb-fb.c
61583index 2c72a7c..d523e52 100644
61584--- a/drivers/video/bfin-t350mcqb-fb.c
61585+++ b/drivers/video/bfin-t350mcqb-fb.c
61586@@ -381,7 +381,7 @@ static int bl_get_brightness(struct backlight_device *bd)
61587 return 0;
61588 }
61589
61590-static struct backlight_ops bfin_lq043fb_bl_ops = {
61591+static const struct backlight_ops bfin_lq043fb_bl_ops = {
61592 .get_brightness = bl_get_brightness,
61593 };
61594
61595diff --git a/drivers/video/fbcmap.c b/drivers/video/fbcmap.c
61596index f53b9f1..958bf4e 100644
61597--- a/drivers/video/fbcmap.c
61598+++ b/drivers/video/fbcmap.c
61599@@ -266,8 +266,7 @@ int fb_set_user_cmap(struct fb_cmap_user *cmap, struct fb_info *info)
61600 rc = -ENODEV;
61601 goto out;
61602 }
61603- if (cmap->start < 0 || (!info->fbops->fb_setcolreg &&
61604- !info->fbops->fb_setcmap)) {
61605+ if (!info->fbops->fb_setcolreg && !info->fbops->fb_setcmap) {
61606 rc = -EINVAL;
61607 goto out1;
61608 }
61609diff --git a/drivers/video/fbmem.c b/drivers/video/fbmem.c
61610index 99bbd28..ad3829e 100644
61611--- a/drivers/video/fbmem.c
61612+++ b/drivers/video/fbmem.c
61613@@ -403,7 +403,7 @@ static void fb_do_show_logo(struct fb_info *info, struct fb_image *image,
61614 image->dx += image->width + 8;
61615 }
61616 } else if (rotate == FB_ROTATE_UD) {
61617- for (x = 0; x < num && image->dx >= 0; x++) {
61618+ for (x = 0; x < num && (__s32)image->dx >= 0; x++) {
61619 info->fbops->fb_imageblit(info, image);
61620 image->dx -= image->width + 8;
61621 }
61622@@ -415,7 +415,7 @@ static void fb_do_show_logo(struct fb_info *info, struct fb_image *image,
61623 image->dy += image->height + 8;
61624 }
61625 } else if (rotate == FB_ROTATE_CCW) {
61626- for (x = 0; x < num && image->dy >= 0; x++) {
61627+ for (x = 0; x < num && (__s32)image->dy >= 0; x++) {
61628 info->fbops->fb_imageblit(info, image);
61629 image->dy -= image->height + 8;
61630 }
61631@@ -915,6 +915,8 @@ fb_set_var(struct fb_info *info, struct fb_var_screeninfo *var)
61632 int flags = info->flags;
61633 int ret = 0;
61634
61635+ pax_track_stack();
61636+
61637 if (var->activate & FB_ACTIVATE_INV_MODE) {
61638 struct fb_videomode mode1, mode2;
61639
61640@@ -1040,6 +1042,8 @@ static long do_fb_ioctl(struct fb_info *info, unsigned int cmd,
61641 void __user *argp = (void __user *)arg;
61642 long ret = 0;
61643
61644+ pax_track_stack();
61645+
61646 switch (cmd) {
61647 case FBIOGET_VSCREENINFO:
61648 if (!lock_fb_info(info))
61649@@ -1119,7 +1123,7 @@ static long do_fb_ioctl(struct fb_info *info, unsigned int cmd,
61650 return -EFAULT;
61651 if (con2fb.console < 1 || con2fb.console > MAX_NR_CONSOLES)
61652 return -EINVAL;
61653- if (con2fb.framebuffer < 0 || con2fb.framebuffer >= FB_MAX)
61654+ if (con2fb.framebuffer >= FB_MAX)
61655 return -EINVAL;
61656 if (!registered_fb[con2fb.framebuffer])
61657 request_module("fb%d", con2fb.framebuffer);
61658diff --git a/drivers/video/geode/gx1fb_core.c b/drivers/video/geode/gx1fb_core.c
61659index f20eff8..3e4f622 100644
61660--- a/drivers/video/geode/gx1fb_core.c
61661+++ b/drivers/video/geode/gx1fb_core.c
61662@@ -30,7 +30,7 @@ static int crt_option = 1;
61663 static char panel_option[32] = "";
61664
61665 /* Modes relevant to the GX1 (taken from modedb.c) */
61666-static const struct fb_videomode __initdata gx1_modedb[] = {
61667+static const struct fb_videomode __initconst gx1_modedb[] = {
61668 /* 640x480-60 VESA */
61669 { NULL, 60, 640, 480, 39682, 48, 16, 33, 10, 96, 2,
61670 0, FB_VMODE_NONINTERLACED, FB_MODE_IS_VESA },
61671diff --git a/drivers/video/gxt4500.c b/drivers/video/gxt4500.c
61672index 896e53d..4d87d0b 100644
61673--- a/drivers/video/gxt4500.c
61674+++ b/drivers/video/gxt4500.c
61675@@ -156,7 +156,7 @@ struct gxt4500_par {
61676 static char *mode_option;
61677
61678 /* default mode: 1280x1024 @ 60 Hz, 8 bpp */
61679-static const struct fb_videomode defaultmode __devinitdata = {
61680+static const struct fb_videomode defaultmode __devinitconst = {
61681 .refresh = 60,
61682 .xres = 1280,
61683 .yres = 1024,
61684@@ -581,7 +581,7 @@ static int gxt4500_blank(int blank, struct fb_info *info)
61685 return 0;
61686 }
61687
61688-static const struct fb_fix_screeninfo gxt4500_fix __devinitdata = {
61689+static const struct fb_fix_screeninfo gxt4500_fix __devinitconst = {
61690 .id = "IBM GXT4500P",
61691 .type = FB_TYPE_PACKED_PIXELS,
61692 .visual = FB_VISUAL_PSEUDOCOLOR,
61693diff --git a/drivers/video/i810/i810_accel.c b/drivers/video/i810/i810_accel.c
61694index f5bedee..28c6028 100644
61695--- a/drivers/video/i810/i810_accel.c
61696+++ b/drivers/video/i810/i810_accel.c
61697@@ -73,6 +73,7 @@ static inline int wait_for_space(struct fb_info *info, u32 space)
61698 }
61699 }
61700 printk("ringbuffer lockup!!!\n");
61701+ printk("head:%u tail:%u iring.size:%u space:%u\n", head, tail, par->iring.size, space);
61702 i810_report_error(mmio);
61703 par->dev_flags |= LOCKUP;
61704 info->pixmap.scan_align = 1;
61705diff --git a/drivers/video/i810/i810_main.c b/drivers/video/i810/i810_main.c
61706index 5743ea2..457f82c 100644
61707--- a/drivers/video/i810/i810_main.c
61708+++ b/drivers/video/i810/i810_main.c
61709@@ -97,7 +97,7 @@ static int i810fb_blank (int blank_mode, struct fb_info *info);
61710 static void i810fb_release_resource (struct fb_info *info, struct i810fb_par *par);
61711
61712 /* PCI */
61713-static const char *i810_pci_list[] __devinitdata = {
61714+static const char *i810_pci_list[] __devinitconst = {
61715 "Intel(R) 810 Framebuffer Device" ,
61716 "Intel(R) 810-DC100 Framebuffer Device" ,
61717 "Intel(R) 810E Framebuffer Device" ,
61718diff --git a/drivers/video/logo/logo_linux_clut224.ppm b/drivers/video/logo/logo_linux_clut224.ppm
61719index 3c14e43..eafa544 100644
61720--- a/drivers/video/logo/logo_linux_clut224.ppm
61721+++ b/drivers/video/logo/logo_linux_clut224.ppm
61722@@ -1,1604 +1,1123 @@
61723 P3
61724-# Standard 224-color Linux logo
61725 80 80
61726 255
61727- 0 0 0 0 0 0 0 0 0 0 0 0
61728- 0 0 0 0 0 0 0 0 0 0 0 0
61729- 0 0 0 0 0 0 0 0 0 0 0 0
61730- 0 0 0 0 0 0 0 0 0 0 0 0
61731- 0 0 0 0 0 0 0 0 0 0 0 0
61732- 0 0 0 0 0 0 0 0 0 0 0 0
61733- 0 0 0 0 0 0 0 0 0 0 0 0
61734- 0 0 0 0 0 0 0 0 0 0 0 0
61735- 0 0 0 0 0 0 0 0 0 0 0 0
61736- 6 6 6 6 6 6 10 10 10 10 10 10
61737- 10 10 10 6 6 6 6 6 6 6 6 6
61738- 0 0 0 0 0 0 0 0 0 0 0 0
61739- 0 0 0 0 0 0 0 0 0 0 0 0
61740- 0 0 0 0 0 0 0 0 0 0 0 0
61741- 0 0 0 0 0 0 0 0 0 0 0 0
61742- 0 0 0 0 0 0 0 0 0 0 0 0
61743- 0 0 0 0 0 0 0 0 0 0 0 0
61744- 0 0 0 0 0 0 0 0 0 0 0 0
61745- 0 0 0 0 0 0 0 0 0 0 0 0
61746- 0 0 0 0 0 0 0 0 0 0 0 0
61747- 0 0 0 0 0 0 0 0 0 0 0 0
61748- 0 0 0 0 0 0 0 0 0 0 0 0
61749- 0 0 0 0 0 0 0 0 0 0 0 0
61750- 0 0 0 0 0 0 0 0 0 0 0 0
61751- 0 0 0 0 0 0 0 0 0 0 0 0
61752- 0 0 0 0 0 0 0 0 0 0 0 0
61753- 0 0 0 0 0 0 0 0 0 0 0 0
61754- 0 0 0 0 0 0 0 0 0 0 0 0
61755- 0 0 0 6 6 6 10 10 10 14 14 14
61756- 22 22 22 26 26 26 30 30 30 34 34 34
61757- 30 30 30 30 30 30 26 26 26 18 18 18
61758- 14 14 14 10 10 10 6 6 6 0 0 0
61759- 0 0 0 0 0 0 0 0 0 0 0 0
61760- 0 0 0 0 0 0 0 0 0 0 0 0
61761- 0 0 0 0 0 0 0 0 0 0 0 0
61762- 0 0 0 0 0 0 0 0 0 0 0 0
61763- 0 0 0 0 0 0 0 0 0 0 0 0
61764- 0 0 0 0 0 0 0 0 0 0 0 0
61765- 0 0 0 0 0 0 0 0 0 0 0 0
61766- 0 0 0 0 0 0 0 0 0 0 0 0
61767- 0 0 0 0 0 0 0 0 0 0 0 0
61768- 0 0 0 0 0 1 0 0 1 0 0 0
61769- 0 0 0 0 0 0 0 0 0 0 0 0
61770- 0 0 0 0 0 0 0 0 0 0 0 0
61771- 0 0 0 0 0 0 0 0 0 0 0 0
61772- 0 0 0 0 0 0 0 0 0 0 0 0
61773- 0 0 0 0 0 0 0 0 0 0 0 0
61774- 0 0 0 0 0 0 0 0 0 0 0 0
61775- 6 6 6 14 14 14 26 26 26 42 42 42
61776- 54 54 54 66 66 66 78 78 78 78 78 78
61777- 78 78 78 74 74 74 66 66 66 54 54 54
61778- 42 42 42 26 26 26 18 18 18 10 10 10
61779- 6 6 6 0 0 0 0 0 0 0 0 0
61780- 0 0 0 0 0 0 0 0 0 0 0 0
61781- 0 0 0 0 0 0 0 0 0 0 0 0
61782- 0 0 0 0 0 0 0 0 0 0 0 0
61783- 0 0 0 0 0 0 0 0 0 0 0 0
61784- 0 0 0 0 0 0 0 0 0 0 0 0
61785- 0 0 0 0 0 0 0 0 0 0 0 0
61786- 0 0 0 0 0 0 0 0 0 0 0 0
61787- 0 0 0 0 0 0 0 0 0 0 0 0
61788- 0 0 1 0 0 0 0 0 0 0 0 0
61789- 0 0 0 0 0 0 0 0 0 0 0 0
61790- 0 0 0 0 0 0 0 0 0 0 0 0
61791- 0 0 0 0 0 0 0 0 0 0 0 0
61792- 0 0 0 0 0 0 0 0 0 0 0 0
61793- 0 0 0 0 0 0 0 0 0 0 0 0
61794- 0 0 0 0 0 0 0 0 0 10 10 10
61795- 22 22 22 42 42 42 66 66 66 86 86 86
61796- 66 66 66 38 38 38 38 38 38 22 22 22
61797- 26 26 26 34 34 34 54 54 54 66 66 66
61798- 86 86 86 70 70 70 46 46 46 26 26 26
61799- 14 14 14 6 6 6 0 0 0 0 0 0
61800- 0 0 0 0 0 0 0 0 0 0 0 0
61801- 0 0 0 0 0 0 0 0 0 0 0 0
61802- 0 0 0 0 0 0 0 0 0 0 0 0
61803- 0 0 0 0 0 0 0 0 0 0 0 0
61804- 0 0 0 0 0 0 0 0 0 0 0 0
61805- 0 0 0 0 0 0 0 0 0 0 0 0
61806- 0 0 0 0 0 0 0 0 0 0 0 0
61807- 0 0 0 0 0 0 0 0 0 0 0 0
61808- 0 0 1 0 0 1 0 0 1 0 0 0
61809- 0 0 0 0 0 0 0 0 0 0 0 0
61810- 0 0 0 0 0 0 0 0 0 0 0 0
61811- 0 0 0 0 0 0 0 0 0 0 0 0
61812- 0 0 0 0 0 0 0 0 0 0 0 0
61813- 0 0 0 0 0 0 0 0 0 0 0 0
61814- 0 0 0 0 0 0 10 10 10 26 26 26
61815- 50 50 50 82 82 82 58 58 58 6 6 6
61816- 2 2 6 2 2 6 2 2 6 2 2 6
61817- 2 2 6 2 2 6 2 2 6 2 2 6
61818- 6 6 6 54 54 54 86 86 86 66 66 66
61819- 38 38 38 18 18 18 6 6 6 0 0 0
61820- 0 0 0 0 0 0 0 0 0 0 0 0
61821- 0 0 0 0 0 0 0 0 0 0 0 0
61822- 0 0 0 0 0 0 0 0 0 0 0 0
61823- 0 0 0 0 0 0 0 0 0 0 0 0
61824- 0 0 0 0 0 0 0 0 0 0 0 0
61825- 0 0 0 0 0 0 0 0 0 0 0 0
61826- 0 0 0 0 0 0 0 0 0 0 0 0
61827- 0 0 0 0 0 0 0 0 0 0 0 0
61828- 0 0 0 0 0 0 0 0 0 0 0 0
61829- 0 0 0 0 0 0 0 0 0 0 0 0
61830- 0 0 0 0 0 0 0 0 0 0 0 0
61831- 0 0 0 0 0 0 0 0 0 0 0 0
61832- 0 0 0 0 0 0 0 0 0 0 0 0
61833- 0 0 0 0 0 0 0 0 0 0 0 0
61834- 0 0 0 6 6 6 22 22 22 50 50 50
61835- 78 78 78 34 34 34 2 2 6 2 2 6
61836- 2 2 6 2 2 6 2 2 6 2 2 6
61837- 2 2 6 2 2 6 2 2 6 2 2 6
61838- 2 2 6 2 2 6 6 6 6 70 70 70
61839- 78 78 78 46 46 46 22 22 22 6 6 6
61840- 0 0 0 0 0 0 0 0 0 0 0 0
61841- 0 0 0 0 0 0 0 0 0 0 0 0
61842- 0 0 0 0 0 0 0 0 0 0 0 0
61843- 0 0 0 0 0 0 0 0 0 0 0 0
61844- 0 0 0 0 0 0 0 0 0 0 0 0
61845- 0 0 0 0 0 0 0 0 0 0 0 0
61846- 0 0 0 0 0 0 0 0 0 0 0 0
61847- 0 0 0 0 0 0 0 0 0 0 0 0
61848- 0 0 1 0 0 1 0 0 1 0 0 0
61849- 0 0 0 0 0 0 0 0 0 0 0 0
61850- 0 0 0 0 0 0 0 0 0 0 0 0
61851- 0 0 0 0 0 0 0 0 0 0 0 0
61852- 0 0 0 0 0 0 0 0 0 0 0 0
61853- 0 0 0 0 0 0 0 0 0 0 0 0
61854- 6 6 6 18 18 18 42 42 42 82 82 82
61855- 26 26 26 2 2 6 2 2 6 2 2 6
61856- 2 2 6 2 2 6 2 2 6 2 2 6
61857- 2 2 6 2 2 6 2 2 6 14 14 14
61858- 46 46 46 34 34 34 6 6 6 2 2 6
61859- 42 42 42 78 78 78 42 42 42 18 18 18
61860- 6 6 6 0 0 0 0 0 0 0 0 0
61861- 0 0 0 0 0 0 0 0 0 0 0 0
61862- 0 0 0 0 0 0 0 0 0 0 0 0
61863- 0 0 0 0 0 0 0 0 0 0 0 0
61864- 0 0 0 0 0 0 0 0 0 0 0 0
61865- 0 0 0 0 0 0 0 0 0 0 0 0
61866- 0 0 0 0 0 0 0 0 0 0 0 0
61867- 0 0 0 0 0 0 0 0 0 0 0 0
61868- 0 0 1 0 0 0 0 0 1 0 0 0
61869- 0 0 0 0 0 0 0 0 0 0 0 0
61870- 0 0 0 0 0 0 0 0 0 0 0 0
61871- 0 0 0 0 0 0 0 0 0 0 0 0
61872- 0 0 0 0 0 0 0 0 0 0 0 0
61873- 0 0 0 0 0 0 0 0 0 0 0 0
61874- 10 10 10 30 30 30 66 66 66 58 58 58
61875- 2 2 6 2 2 6 2 2 6 2 2 6
61876- 2 2 6 2 2 6 2 2 6 2 2 6
61877- 2 2 6 2 2 6 2 2 6 26 26 26
61878- 86 86 86 101 101 101 46 46 46 10 10 10
61879- 2 2 6 58 58 58 70 70 70 34 34 34
61880- 10 10 10 0 0 0 0 0 0 0 0 0
61881- 0 0 0 0 0 0 0 0 0 0 0 0
61882- 0 0 0 0 0 0 0 0 0 0 0 0
61883- 0 0 0 0 0 0 0 0 0 0 0 0
61884- 0 0 0 0 0 0 0 0 0 0 0 0
61885- 0 0 0 0 0 0 0 0 0 0 0 0
61886- 0 0 0 0 0 0 0 0 0 0 0 0
61887- 0 0 0 0 0 0 0 0 0 0 0 0
61888- 0 0 1 0 0 1 0 0 1 0 0 0
61889- 0 0 0 0 0 0 0 0 0 0 0 0
61890- 0 0 0 0 0 0 0 0 0 0 0 0
61891- 0 0 0 0 0 0 0 0 0 0 0 0
61892- 0 0 0 0 0 0 0 0 0 0 0 0
61893- 0 0 0 0 0 0 0 0 0 0 0 0
61894- 14 14 14 42 42 42 86 86 86 10 10 10
61895- 2 2 6 2 2 6 2 2 6 2 2 6
61896- 2 2 6 2 2 6 2 2 6 2 2 6
61897- 2 2 6 2 2 6 2 2 6 30 30 30
61898- 94 94 94 94 94 94 58 58 58 26 26 26
61899- 2 2 6 6 6 6 78 78 78 54 54 54
61900- 22 22 22 6 6 6 0 0 0 0 0 0
61901- 0 0 0 0 0 0 0 0 0 0 0 0
61902- 0 0 0 0 0 0 0 0 0 0 0 0
61903- 0 0 0 0 0 0 0 0 0 0 0 0
61904- 0 0 0 0 0 0 0 0 0 0 0 0
61905- 0 0 0 0 0 0 0 0 0 0 0 0
61906- 0 0 0 0 0 0 0 0 0 0 0 0
61907- 0 0 0 0 0 0 0 0 0 0 0 0
61908- 0 0 0 0 0 0 0 0 0 0 0 0
61909- 0 0 0 0 0 0 0 0 0 0 0 0
61910- 0 0 0 0 0 0 0 0 0 0 0 0
61911- 0 0 0 0 0 0 0 0 0 0 0 0
61912- 0 0 0 0 0 0 0 0 0 0 0 0
61913- 0 0 0 0 0 0 0 0 0 6 6 6
61914- 22 22 22 62 62 62 62 62 62 2 2 6
61915- 2 2 6 2 2 6 2 2 6 2 2 6
61916- 2 2 6 2 2 6 2 2 6 2 2 6
61917- 2 2 6 2 2 6 2 2 6 26 26 26
61918- 54 54 54 38 38 38 18 18 18 10 10 10
61919- 2 2 6 2 2 6 34 34 34 82 82 82
61920- 38 38 38 14 14 14 0 0 0 0 0 0
61921- 0 0 0 0 0 0 0 0 0 0 0 0
61922- 0 0 0 0 0 0 0 0 0 0 0 0
61923- 0 0 0 0 0 0 0 0 0 0 0 0
61924- 0 0 0 0 0 0 0 0 0 0 0 0
61925- 0 0 0 0 0 0 0 0 0 0 0 0
61926- 0 0 0 0 0 0 0 0 0 0 0 0
61927- 0 0 0 0 0 0 0 0 0 0 0 0
61928- 0 0 0 0 0 1 0 0 1 0 0 0
61929- 0 0 0 0 0 0 0 0 0 0 0 0
61930- 0 0 0 0 0 0 0 0 0 0 0 0
61931- 0 0 0 0 0 0 0 0 0 0 0 0
61932- 0 0 0 0 0 0 0 0 0 0 0 0
61933- 0 0 0 0 0 0 0 0 0 6 6 6
61934- 30 30 30 78 78 78 30 30 30 2 2 6
61935- 2 2 6 2 2 6 2 2 6 2 2 6
61936- 2 2 6 2 2 6 2 2 6 2 2 6
61937- 2 2 6 2 2 6 2 2 6 10 10 10
61938- 10 10 10 2 2 6 2 2 6 2 2 6
61939- 2 2 6 2 2 6 2 2 6 78 78 78
61940- 50 50 50 18 18 18 6 6 6 0 0 0
61941- 0 0 0 0 0 0 0 0 0 0 0 0
61942- 0 0 0 0 0 0 0 0 0 0 0 0
61943- 0 0 0 0 0 0 0 0 0 0 0 0
61944- 0 0 0 0 0 0 0 0 0 0 0 0
61945- 0 0 0 0 0 0 0 0 0 0 0 0
61946- 0 0 0 0 0 0 0 0 0 0 0 0
61947- 0 0 0 0 0 0 0 0 0 0 0 0
61948- 0 0 1 0 0 0 0 0 0 0 0 0
61949- 0 0 0 0 0 0 0 0 0 0 0 0
61950- 0 0 0 0 0 0 0 0 0 0 0 0
61951- 0 0 0 0 0 0 0 0 0 0 0 0
61952- 0 0 0 0 0 0 0 0 0 0 0 0
61953- 0 0 0 0 0 0 0 0 0 10 10 10
61954- 38 38 38 86 86 86 14 14 14 2 2 6
61955- 2 2 6 2 2 6 2 2 6 2 2 6
61956- 2 2 6 2 2 6 2 2 6 2 2 6
61957- 2 2 6 2 2 6 2 2 6 2 2 6
61958- 2 2 6 2 2 6 2 2 6 2 2 6
61959- 2 2 6 2 2 6 2 2 6 54 54 54
61960- 66 66 66 26 26 26 6 6 6 0 0 0
61961- 0 0 0 0 0 0 0 0 0 0 0 0
61962- 0 0 0 0 0 0 0 0 0 0 0 0
61963- 0 0 0 0 0 0 0 0 0 0 0 0
61964- 0 0 0 0 0 0 0 0 0 0 0 0
61965- 0 0 0 0 0 0 0 0 0 0 0 0
61966- 0 0 0 0 0 0 0 0 0 0 0 0
61967- 0 0 0 0 0 0 0 0 0 0 0 0
61968- 0 0 0 0 0 1 0 0 1 0 0 0
61969- 0 0 0 0 0 0 0 0 0 0 0 0
61970- 0 0 0 0 0 0 0 0 0 0 0 0
61971- 0 0 0 0 0 0 0 0 0 0 0 0
61972- 0 0 0 0 0 0 0 0 0 0 0 0
61973- 0 0 0 0 0 0 0 0 0 14 14 14
61974- 42 42 42 82 82 82 2 2 6 2 2 6
61975- 2 2 6 6 6 6 10 10 10 2 2 6
61976- 2 2 6 2 2 6 2 2 6 2 2 6
61977- 2 2 6 2 2 6 2 2 6 6 6 6
61978- 14 14 14 10 10 10 2 2 6 2 2 6
61979- 2 2 6 2 2 6 2 2 6 18 18 18
61980- 82 82 82 34 34 34 10 10 10 0 0 0
61981- 0 0 0 0 0 0 0 0 0 0 0 0
61982- 0 0 0 0 0 0 0 0 0 0 0 0
61983- 0 0 0 0 0 0 0 0 0 0 0 0
61984- 0 0 0 0 0 0 0 0 0 0 0 0
61985- 0 0 0 0 0 0 0 0 0 0 0 0
61986- 0 0 0 0 0 0 0 0 0 0 0 0
61987- 0 0 0 0 0 0 0 0 0 0 0 0
61988- 0 0 1 0 0 0 0 0 0 0 0 0
61989- 0 0 0 0 0 0 0 0 0 0 0 0
61990- 0 0 0 0 0 0 0 0 0 0 0 0
61991- 0 0 0 0 0 0 0 0 0 0 0 0
61992- 0 0 0 0 0 0 0 0 0 0 0 0
61993- 0 0 0 0 0 0 0 0 0 14 14 14
61994- 46 46 46 86 86 86 2 2 6 2 2 6
61995- 6 6 6 6 6 6 22 22 22 34 34 34
61996- 6 6 6 2 2 6 2 2 6 2 2 6
61997- 2 2 6 2 2 6 18 18 18 34 34 34
61998- 10 10 10 50 50 50 22 22 22 2 2 6
61999- 2 2 6 2 2 6 2 2 6 10 10 10
62000- 86 86 86 42 42 42 14 14 14 0 0 0
62001- 0 0 0 0 0 0 0 0 0 0 0 0
62002- 0 0 0 0 0 0 0 0 0 0 0 0
62003- 0 0 0 0 0 0 0 0 0 0 0 0
62004- 0 0 0 0 0 0 0 0 0 0 0 0
62005- 0 0 0 0 0 0 0 0 0 0 0 0
62006- 0 0 0 0 0 0 0 0 0 0 0 0
62007- 0 0 0 0 0 0 0 0 0 0 0 0
62008- 0 0 1 0 0 1 0 0 1 0 0 0
62009- 0 0 0 0 0 0 0 0 0 0 0 0
62010- 0 0 0 0 0 0 0 0 0 0 0 0
62011- 0 0 0 0 0 0 0 0 0 0 0 0
62012- 0 0 0 0 0 0 0 0 0 0 0 0
62013- 0 0 0 0 0 0 0 0 0 14 14 14
62014- 46 46 46 86 86 86 2 2 6 2 2 6
62015- 38 38 38 116 116 116 94 94 94 22 22 22
62016- 22 22 22 2 2 6 2 2 6 2 2 6
62017- 14 14 14 86 86 86 138 138 138 162 162 162
62018-154 154 154 38 38 38 26 26 26 6 6 6
62019- 2 2 6 2 2 6 2 2 6 2 2 6
62020- 86 86 86 46 46 46 14 14 14 0 0 0
62021- 0 0 0 0 0 0 0 0 0 0 0 0
62022- 0 0 0 0 0 0 0 0 0 0 0 0
62023- 0 0 0 0 0 0 0 0 0 0 0 0
62024- 0 0 0 0 0 0 0 0 0 0 0 0
62025- 0 0 0 0 0 0 0 0 0 0 0 0
62026- 0 0 0 0 0 0 0 0 0 0 0 0
62027- 0 0 0 0 0 0 0 0 0 0 0 0
62028- 0 0 0 0 0 0 0 0 0 0 0 0
62029- 0 0 0 0 0 0 0 0 0 0 0 0
62030- 0 0 0 0 0 0 0 0 0 0 0 0
62031- 0 0 0 0 0 0 0 0 0 0 0 0
62032- 0 0 0 0 0 0 0 0 0 0 0 0
62033- 0 0 0 0 0 0 0 0 0 14 14 14
62034- 46 46 46 86 86 86 2 2 6 14 14 14
62035-134 134 134 198 198 198 195 195 195 116 116 116
62036- 10 10 10 2 2 6 2 2 6 6 6 6
62037-101 98 89 187 187 187 210 210 210 218 218 218
62038-214 214 214 134 134 134 14 14 14 6 6 6
62039- 2 2 6 2 2 6 2 2 6 2 2 6
62040- 86 86 86 50 50 50 18 18 18 6 6 6
62041- 0 0 0 0 0 0 0 0 0 0 0 0
62042- 0 0 0 0 0 0 0 0 0 0 0 0
62043- 0 0 0 0 0 0 0 0 0 0 0 0
62044- 0 0 0 0 0 0 0 0 0 0 0 0
62045- 0 0 0 0 0 0 0 0 0 0 0 0
62046- 0 0 0 0 0 0 0 0 0 0 0 0
62047- 0 0 0 0 0 0 0 0 1 0 0 0
62048- 0 0 1 0 0 1 0 0 1 0 0 0
62049- 0 0 0 0 0 0 0 0 0 0 0 0
62050- 0 0 0 0 0 0 0 0 0 0 0 0
62051- 0 0 0 0 0 0 0 0 0 0 0 0
62052- 0 0 0 0 0 0 0 0 0 0 0 0
62053- 0 0 0 0 0 0 0 0 0 14 14 14
62054- 46 46 46 86 86 86 2 2 6 54 54 54
62055-218 218 218 195 195 195 226 226 226 246 246 246
62056- 58 58 58 2 2 6 2 2 6 30 30 30
62057-210 210 210 253 253 253 174 174 174 123 123 123
62058-221 221 221 234 234 234 74 74 74 2 2 6
62059- 2 2 6 2 2 6 2 2 6 2 2 6
62060- 70 70 70 58 58 58 22 22 22 6 6 6
62061- 0 0 0 0 0 0 0 0 0 0 0 0
62062- 0 0 0 0 0 0 0 0 0 0 0 0
62063- 0 0 0 0 0 0 0 0 0 0 0 0
62064- 0 0 0 0 0 0 0 0 0 0 0 0
62065- 0 0 0 0 0 0 0 0 0 0 0 0
62066- 0 0 0 0 0 0 0 0 0 0 0 0
62067- 0 0 0 0 0 0 0 0 0 0 0 0
62068- 0 0 0 0 0 0 0 0 0 0 0 0
62069- 0 0 0 0 0 0 0 0 0 0 0 0
62070- 0 0 0 0 0 0 0 0 0 0 0 0
62071- 0 0 0 0 0 0 0 0 0 0 0 0
62072- 0 0 0 0 0 0 0 0 0 0 0 0
62073- 0 0 0 0 0 0 0 0 0 14 14 14
62074- 46 46 46 82 82 82 2 2 6 106 106 106
62075-170 170 170 26 26 26 86 86 86 226 226 226
62076-123 123 123 10 10 10 14 14 14 46 46 46
62077-231 231 231 190 190 190 6 6 6 70 70 70
62078- 90 90 90 238 238 238 158 158 158 2 2 6
62079- 2 2 6 2 2 6 2 2 6 2 2 6
62080- 70 70 70 58 58 58 22 22 22 6 6 6
62081- 0 0 0 0 0 0 0 0 0 0 0 0
62082- 0 0 0 0 0 0 0 0 0 0 0 0
62083- 0 0 0 0 0 0 0 0 0 0 0 0
62084- 0 0 0 0 0 0 0 0 0 0 0 0
62085- 0 0 0 0 0 0 0 0 0 0 0 0
62086- 0 0 0 0 0 0 0 0 0 0 0 0
62087- 0 0 0 0 0 0 0 0 1 0 0 0
62088- 0 0 1 0 0 1 0 0 1 0 0 0
62089- 0 0 0 0 0 0 0 0 0 0 0 0
62090- 0 0 0 0 0 0 0 0 0 0 0 0
62091- 0 0 0 0 0 0 0 0 0 0 0 0
62092- 0 0 0 0 0 0 0 0 0 0 0 0
62093- 0 0 0 0 0 0 0 0 0 14 14 14
62094- 42 42 42 86 86 86 6 6 6 116 116 116
62095-106 106 106 6 6 6 70 70 70 149 149 149
62096-128 128 128 18 18 18 38 38 38 54 54 54
62097-221 221 221 106 106 106 2 2 6 14 14 14
62098- 46 46 46 190 190 190 198 198 198 2 2 6
62099- 2 2 6 2 2 6 2 2 6 2 2 6
62100- 74 74 74 62 62 62 22 22 22 6 6 6
62101- 0 0 0 0 0 0 0 0 0 0 0 0
62102- 0 0 0 0 0 0 0 0 0 0 0 0
62103- 0 0 0 0 0 0 0 0 0 0 0 0
62104- 0 0 0 0 0 0 0 0 0 0 0 0
62105- 0 0 0 0 0 0 0 0 0 0 0 0
62106- 0 0 0 0 0 0 0 0 0 0 0 0
62107- 0 0 0 0 0 0 0 0 1 0 0 0
62108- 0 0 1 0 0 0 0 0 1 0 0 0
62109- 0 0 0 0 0 0 0 0 0 0 0 0
62110- 0 0 0 0 0 0 0 0 0 0 0 0
62111- 0 0 0 0 0 0 0 0 0 0 0 0
62112- 0 0 0 0 0 0 0 0 0 0 0 0
62113- 0 0 0 0 0 0 0 0 0 14 14 14
62114- 42 42 42 94 94 94 14 14 14 101 101 101
62115-128 128 128 2 2 6 18 18 18 116 116 116
62116-118 98 46 121 92 8 121 92 8 98 78 10
62117-162 162 162 106 106 106 2 2 6 2 2 6
62118- 2 2 6 195 195 195 195 195 195 6 6 6
62119- 2 2 6 2 2 6 2 2 6 2 2 6
62120- 74 74 74 62 62 62 22 22 22 6 6 6
62121- 0 0 0 0 0 0 0 0 0 0 0 0
62122- 0 0 0 0 0 0 0 0 0 0 0 0
62123- 0 0 0 0 0 0 0 0 0 0 0 0
62124- 0 0 0 0 0 0 0 0 0 0 0 0
62125- 0 0 0 0 0 0 0 0 0 0 0 0
62126- 0 0 0 0 0 0 0 0 0 0 0 0
62127- 0 0 0 0 0 0 0 0 1 0 0 1
62128- 0 0 1 0 0 0 0 0 1 0 0 0
62129- 0 0 0 0 0 0 0 0 0 0 0 0
62130- 0 0 0 0 0 0 0 0 0 0 0 0
62131- 0 0 0 0 0 0 0 0 0 0 0 0
62132- 0 0 0 0 0 0 0 0 0 0 0 0
62133- 0 0 0 0 0 0 0 0 0 10 10 10
62134- 38 38 38 90 90 90 14 14 14 58 58 58
62135-210 210 210 26 26 26 54 38 6 154 114 10
62136-226 170 11 236 186 11 225 175 15 184 144 12
62137-215 174 15 175 146 61 37 26 9 2 2 6
62138- 70 70 70 246 246 246 138 138 138 2 2 6
62139- 2 2 6 2 2 6 2 2 6 2 2 6
62140- 70 70 70 66 66 66 26 26 26 6 6 6
62141- 0 0 0 0 0 0 0 0 0 0 0 0
62142- 0 0 0 0 0 0 0 0 0 0 0 0
62143- 0 0 0 0 0 0 0 0 0 0 0 0
62144- 0 0 0 0 0 0 0 0 0 0 0 0
62145- 0 0 0 0 0 0 0 0 0 0 0 0
62146- 0 0 0 0 0 0 0 0 0 0 0 0
62147- 0 0 0 0 0 0 0 0 0 0 0 0
62148- 0 0 0 0 0 0 0 0 0 0 0 0
62149- 0 0 0 0 0 0 0 0 0 0 0 0
62150- 0 0 0 0 0 0 0 0 0 0 0 0
62151- 0 0 0 0 0 0 0 0 0 0 0 0
62152- 0 0 0 0 0 0 0 0 0 0 0 0
62153- 0 0 0 0 0 0 0 0 0 10 10 10
62154- 38 38 38 86 86 86 14 14 14 10 10 10
62155-195 195 195 188 164 115 192 133 9 225 175 15
62156-239 182 13 234 190 10 232 195 16 232 200 30
62157-245 207 45 241 208 19 232 195 16 184 144 12
62158-218 194 134 211 206 186 42 42 42 2 2 6
62159- 2 2 6 2 2 6 2 2 6 2 2 6
62160- 50 50 50 74 74 74 30 30 30 6 6 6
62161- 0 0 0 0 0 0 0 0 0 0 0 0
62162- 0 0 0 0 0 0 0 0 0 0 0 0
62163- 0 0 0 0 0 0 0 0 0 0 0 0
62164- 0 0 0 0 0 0 0 0 0 0 0 0
62165- 0 0 0 0 0 0 0 0 0 0 0 0
62166- 0 0 0 0 0 0 0 0 0 0 0 0
62167- 0 0 0 0 0 0 0 0 0 0 0 0
62168- 0 0 0 0 0 0 0 0 0 0 0 0
62169- 0 0 0 0 0 0 0 0 0 0 0 0
62170- 0 0 0 0 0 0 0 0 0 0 0 0
62171- 0 0 0 0 0 0 0 0 0 0 0 0
62172- 0 0 0 0 0 0 0 0 0 0 0 0
62173- 0 0 0 0 0 0 0 0 0 10 10 10
62174- 34 34 34 86 86 86 14 14 14 2 2 6
62175-121 87 25 192 133 9 219 162 10 239 182 13
62176-236 186 11 232 195 16 241 208 19 244 214 54
62177-246 218 60 246 218 38 246 215 20 241 208 19
62178-241 208 19 226 184 13 121 87 25 2 2 6
62179- 2 2 6 2 2 6 2 2 6 2 2 6
62180- 50 50 50 82 82 82 34 34 34 10 10 10
62181- 0 0 0 0 0 0 0 0 0 0 0 0
62182- 0 0 0 0 0 0 0 0 0 0 0 0
62183- 0 0 0 0 0 0 0 0 0 0 0 0
62184- 0 0 0 0 0 0 0 0 0 0 0 0
62185- 0 0 0 0 0 0 0 0 0 0 0 0
62186- 0 0 0 0 0 0 0 0 0 0 0 0
62187- 0 0 0 0 0 0 0 0 0 0 0 0
62188- 0 0 0 0 0 0 0 0 0 0 0 0
62189- 0 0 0 0 0 0 0 0 0 0 0 0
62190- 0 0 0 0 0 0 0 0 0 0 0 0
62191- 0 0 0 0 0 0 0 0 0 0 0 0
62192- 0 0 0 0 0 0 0 0 0 0 0 0
62193- 0 0 0 0 0 0 0 0 0 10 10 10
62194- 34 34 34 82 82 82 30 30 30 61 42 6
62195-180 123 7 206 145 10 230 174 11 239 182 13
62196-234 190 10 238 202 15 241 208 19 246 218 74
62197-246 218 38 246 215 20 246 215 20 246 215 20
62198-226 184 13 215 174 15 184 144 12 6 6 6
62199- 2 2 6 2 2 6 2 2 6 2 2 6
62200- 26 26 26 94 94 94 42 42 42 14 14 14
62201- 0 0 0 0 0 0 0 0 0 0 0 0
62202- 0 0 0 0 0 0 0 0 0 0 0 0
62203- 0 0 0 0 0 0 0 0 0 0 0 0
62204- 0 0 0 0 0 0 0 0 0 0 0 0
62205- 0 0 0 0 0 0 0 0 0 0 0 0
62206- 0 0 0 0 0 0 0 0 0 0 0 0
62207- 0 0 0 0 0 0 0 0 0 0 0 0
62208- 0 0 0 0 0 0 0 0 0 0 0 0
62209- 0 0 0 0 0 0 0 0 0 0 0 0
62210- 0 0 0 0 0 0 0 0 0 0 0 0
62211- 0 0 0 0 0 0 0 0 0 0 0 0
62212- 0 0 0 0 0 0 0 0 0 0 0 0
62213- 0 0 0 0 0 0 0 0 0 10 10 10
62214- 30 30 30 78 78 78 50 50 50 104 69 6
62215-192 133 9 216 158 10 236 178 12 236 186 11
62216-232 195 16 241 208 19 244 214 54 245 215 43
62217-246 215 20 246 215 20 241 208 19 198 155 10
62218-200 144 11 216 158 10 156 118 10 2 2 6
62219- 2 2 6 2 2 6 2 2 6 2 2 6
62220- 6 6 6 90 90 90 54 54 54 18 18 18
62221- 6 6 6 0 0 0 0 0 0 0 0 0
62222- 0 0 0 0 0 0 0 0 0 0 0 0
62223- 0 0 0 0 0 0 0 0 0 0 0 0
62224- 0 0 0 0 0 0 0 0 0 0 0 0
62225- 0 0 0 0 0 0 0 0 0 0 0 0
62226- 0 0 0 0 0 0 0 0 0 0 0 0
62227- 0 0 0 0 0 0 0 0 0 0 0 0
62228- 0 0 0 0 0 0 0 0 0 0 0 0
62229- 0 0 0 0 0 0 0 0 0 0 0 0
62230- 0 0 0 0 0 0 0 0 0 0 0 0
62231- 0 0 0 0 0 0 0 0 0 0 0 0
62232- 0 0 0 0 0 0 0 0 0 0 0 0
62233- 0 0 0 0 0 0 0 0 0 10 10 10
62234- 30 30 30 78 78 78 46 46 46 22 22 22
62235-137 92 6 210 162 10 239 182 13 238 190 10
62236-238 202 15 241 208 19 246 215 20 246 215 20
62237-241 208 19 203 166 17 185 133 11 210 150 10
62238-216 158 10 210 150 10 102 78 10 2 2 6
62239- 6 6 6 54 54 54 14 14 14 2 2 6
62240- 2 2 6 62 62 62 74 74 74 30 30 30
62241- 10 10 10 0 0 0 0 0 0 0 0 0
62242- 0 0 0 0 0 0 0 0 0 0 0 0
62243- 0 0 0 0 0 0 0 0 0 0 0 0
62244- 0 0 0 0 0 0 0 0 0 0 0 0
62245- 0 0 0 0 0 0 0 0 0 0 0 0
62246- 0 0 0 0 0 0 0 0 0 0 0 0
62247- 0 0 0 0 0 0 0 0 0 0 0 0
62248- 0 0 0 0 0 0 0 0 0 0 0 0
62249- 0 0 0 0 0 0 0 0 0 0 0 0
62250- 0 0 0 0 0 0 0 0 0 0 0 0
62251- 0 0 0 0 0 0 0 0 0 0 0 0
62252- 0 0 0 0 0 0 0 0 0 0 0 0
62253- 0 0 0 0 0 0 0 0 0 10 10 10
62254- 34 34 34 78 78 78 50 50 50 6 6 6
62255- 94 70 30 139 102 15 190 146 13 226 184 13
62256-232 200 30 232 195 16 215 174 15 190 146 13
62257-168 122 10 192 133 9 210 150 10 213 154 11
62258-202 150 34 182 157 106 101 98 89 2 2 6
62259- 2 2 6 78 78 78 116 116 116 58 58 58
62260- 2 2 6 22 22 22 90 90 90 46 46 46
62261- 18 18 18 6 6 6 0 0 0 0 0 0
62262- 0 0 0 0 0 0 0 0 0 0 0 0
62263- 0 0 0 0 0 0 0 0 0 0 0 0
62264- 0 0 0 0 0 0 0 0 0 0 0 0
62265- 0 0 0 0 0 0 0 0 0 0 0 0
62266- 0 0 0 0 0 0 0 0 0 0 0 0
62267- 0 0 0 0 0 0 0 0 0 0 0 0
62268- 0 0 0 0 0 0 0 0 0 0 0 0
62269- 0 0 0 0 0 0 0 0 0 0 0 0
62270- 0 0 0 0 0 0 0 0 0 0 0 0
62271- 0 0 0 0 0 0 0 0 0 0 0 0
62272- 0 0 0 0 0 0 0 0 0 0 0 0
62273- 0 0 0 0 0 0 0 0 0 10 10 10
62274- 38 38 38 86 86 86 50 50 50 6 6 6
62275-128 128 128 174 154 114 156 107 11 168 122 10
62276-198 155 10 184 144 12 197 138 11 200 144 11
62277-206 145 10 206 145 10 197 138 11 188 164 115
62278-195 195 195 198 198 198 174 174 174 14 14 14
62279- 2 2 6 22 22 22 116 116 116 116 116 116
62280- 22 22 22 2 2 6 74 74 74 70 70 70
62281- 30 30 30 10 10 10 0 0 0 0 0 0
62282- 0 0 0 0 0 0 0 0 0 0 0 0
62283- 0 0 0 0 0 0 0 0 0 0 0 0
62284- 0 0 0 0 0 0 0 0 0 0 0 0
62285- 0 0 0 0 0 0 0 0 0 0 0 0
62286- 0 0 0 0 0 0 0 0 0 0 0 0
62287- 0 0 0 0 0 0 0 0 0 0 0 0
62288- 0 0 0 0 0 0 0 0 0 0 0 0
62289- 0 0 0 0 0 0 0 0 0 0 0 0
62290- 0 0 0 0 0 0 0 0 0 0 0 0
62291- 0 0 0 0 0 0 0 0 0 0 0 0
62292- 0 0 0 0 0 0 0 0 0 0 0 0
62293- 0 0 0 0 0 0 6 6 6 18 18 18
62294- 50 50 50 101 101 101 26 26 26 10 10 10
62295-138 138 138 190 190 190 174 154 114 156 107 11
62296-197 138 11 200 144 11 197 138 11 192 133 9
62297-180 123 7 190 142 34 190 178 144 187 187 187
62298-202 202 202 221 221 221 214 214 214 66 66 66
62299- 2 2 6 2 2 6 50 50 50 62 62 62
62300- 6 6 6 2 2 6 10 10 10 90 90 90
62301- 50 50 50 18 18 18 6 6 6 0 0 0
62302- 0 0 0 0 0 0 0 0 0 0 0 0
62303- 0 0 0 0 0 0 0 0 0 0 0 0
62304- 0 0 0 0 0 0 0 0 0 0 0 0
62305- 0 0 0 0 0 0 0 0 0 0 0 0
62306- 0 0 0 0 0 0 0 0 0 0 0 0
62307- 0 0 0 0 0 0 0 0 0 0 0 0
62308- 0 0 0 0 0 0 0 0 0 0 0 0
62309- 0 0 0 0 0 0 0 0 0 0 0 0
62310- 0 0 0 0 0 0 0 0 0 0 0 0
62311- 0 0 0 0 0 0 0 0 0 0 0 0
62312- 0 0 0 0 0 0 0 0 0 0 0 0
62313- 0 0 0 0 0 0 10 10 10 34 34 34
62314- 74 74 74 74 74 74 2 2 6 6 6 6
62315-144 144 144 198 198 198 190 190 190 178 166 146
62316-154 121 60 156 107 11 156 107 11 168 124 44
62317-174 154 114 187 187 187 190 190 190 210 210 210
62318-246 246 246 253 253 253 253 253 253 182 182 182
62319- 6 6 6 2 2 6 2 2 6 2 2 6
62320- 2 2 6 2 2 6 2 2 6 62 62 62
62321- 74 74 74 34 34 34 14 14 14 0 0 0
62322- 0 0 0 0 0 0 0 0 0 0 0 0
62323- 0 0 0 0 0 0 0 0 0 0 0 0
62324- 0 0 0 0 0 0 0 0 0 0 0 0
62325- 0 0 0 0 0 0 0 0 0 0 0 0
62326- 0 0 0 0 0 0 0 0 0 0 0 0
62327- 0 0 0 0 0 0 0 0 0 0 0 0
62328- 0 0 0 0 0 0 0 0 0 0 0 0
62329- 0 0 0 0 0 0 0 0 0 0 0 0
62330- 0 0 0 0 0 0 0 0 0 0 0 0
62331- 0 0 0 0 0 0 0 0 0 0 0 0
62332- 0 0 0 0 0 0 0 0 0 0 0 0
62333- 0 0 0 10 10 10 22 22 22 54 54 54
62334- 94 94 94 18 18 18 2 2 6 46 46 46
62335-234 234 234 221 221 221 190 190 190 190 190 190
62336-190 190 190 187 187 187 187 187 187 190 190 190
62337-190 190 190 195 195 195 214 214 214 242 242 242
62338-253 253 253 253 253 253 253 253 253 253 253 253
62339- 82 82 82 2 2 6 2 2 6 2 2 6
62340- 2 2 6 2 2 6 2 2 6 14 14 14
62341- 86 86 86 54 54 54 22 22 22 6 6 6
62342- 0 0 0 0 0 0 0 0 0 0 0 0
62343- 0 0 0 0 0 0 0 0 0 0 0 0
62344- 0 0 0 0 0 0 0 0 0 0 0 0
62345- 0 0 0 0 0 0 0 0 0 0 0 0
62346- 0 0 0 0 0 0 0 0 0 0 0 0
62347- 0 0 0 0 0 0 0 0 0 0 0 0
62348- 0 0 0 0 0 0 0 0 0 0 0 0
62349- 0 0 0 0 0 0 0 0 0 0 0 0
62350- 0 0 0 0 0 0 0 0 0 0 0 0
62351- 0 0 0 0 0 0 0 0 0 0 0 0
62352- 0 0 0 0 0 0 0 0 0 0 0 0
62353- 6 6 6 18 18 18 46 46 46 90 90 90
62354- 46 46 46 18 18 18 6 6 6 182 182 182
62355-253 253 253 246 246 246 206 206 206 190 190 190
62356-190 190 190 190 190 190 190 190 190 190 190 190
62357-206 206 206 231 231 231 250 250 250 253 253 253
62358-253 253 253 253 253 253 253 253 253 253 253 253
62359-202 202 202 14 14 14 2 2 6 2 2 6
62360- 2 2 6 2 2 6 2 2 6 2 2 6
62361- 42 42 42 86 86 86 42 42 42 18 18 18
62362- 6 6 6 0 0 0 0 0 0 0 0 0
62363- 0 0 0 0 0 0 0 0 0 0 0 0
62364- 0 0 0 0 0 0 0 0 0 0 0 0
62365- 0 0 0 0 0 0 0 0 0 0 0 0
62366- 0 0 0 0 0 0 0 0 0 0 0 0
62367- 0 0 0 0 0 0 0 0 0 0 0 0
62368- 0 0 0 0 0 0 0 0 0 0 0 0
62369- 0 0 0 0 0 0 0 0 0 0 0 0
62370- 0 0 0 0 0 0 0 0 0 0 0 0
62371- 0 0 0 0 0 0 0 0 0 0 0 0
62372- 0 0 0 0 0 0 0 0 0 6 6 6
62373- 14 14 14 38 38 38 74 74 74 66 66 66
62374- 2 2 6 6 6 6 90 90 90 250 250 250
62375-253 253 253 253 253 253 238 238 238 198 198 198
62376-190 190 190 190 190 190 195 195 195 221 221 221
62377-246 246 246 253 253 253 253 253 253 253 253 253
62378-253 253 253 253 253 253 253 253 253 253 253 253
62379-253 253 253 82 82 82 2 2 6 2 2 6
62380- 2 2 6 2 2 6 2 2 6 2 2 6
62381- 2 2 6 78 78 78 70 70 70 34 34 34
62382- 14 14 14 6 6 6 0 0 0 0 0 0
62383- 0 0 0 0 0 0 0 0 0 0 0 0
62384- 0 0 0 0 0 0 0 0 0 0 0 0
62385- 0 0 0 0 0 0 0 0 0 0 0 0
62386- 0 0 0 0 0 0 0 0 0 0 0 0
62387- 0 0 0 0 0 0 0 0 0 0 0 0
62388- 0 0 0 0 0 0 0 0 0 0 0 0
62389- 0 0 0 0 0 0 0 0 0 0 0 0
62390- 0 0 0 0 0 0 0 0 0 0 0 0
62391- 0 0 0 0 0 0 0 0 0 0 0 0
62392- 0 0 0 0 0 0 0 0 0 14 14 14
62393- 34 34 34 66 66 66 78 78 78 6 6 6
62394- 2 2 6 18 18 18 218 218 218 253 253 253
62395-253 253 253 253 253 253 253 253 253 246 246 246
62396-226 226 226 231 231 231 246 246 246 253 253 253
62397-253 253 253 253 253 253 253 253 253 253 253 253
62398-253 253 253 253 253 253 253 253 253 253 253 253
62399-253 253 253 178 178 178 2 2 6 2 2 6
62400- 2 2 6 2 2 6 2 2 6 2 2 6
62401- 2 2 6 18 18 18 90 90 90 62 62 62
62402- 30 30 30 10 10 10 0 0 0 0 0 0
62403- 0 0 0 0 0 0 0 0 0 0 0 0
62404- 0 0 0 0 0 0 0 0 0 0 0 0
62405- 0 0 0 0 0 0 0 0 0 0 0 0
62406- 0 0 0 0 0 0 0 0 0 0 0 0
62407- 0 0 0 0 0 0 0 0 0 0 0 0
62408- 0 0 0 0 0 0 0 0 0 0 0 0
62409- 0 0 0 0 0 0 0 0 0 0 0 0
62410- 0 0 0 0 0 0 0 0 0 0 0 0
62411- 0 0 0 0 0 0 0 0 0 0 0 0
62412- 0 0 0 0 0 0 10 10 10 26 26 26
62413- 58 58 58 90 90 90 18 18 18 2 2 6
62414- 2 2 6 110 110 110 253 253 253 253 253 253
62415-253 253 253 253 253 253 253 253 253 253 253 253
62416-250 250 250 253 253 253 253 253 253 253 253 253
62417-253 253 253 253 253 253 253 253 253 253 253 253
62418-253 253 253 253 253 253 253 253 253 253 253 253
62419-253 253 253 231 231 231 18 18 18 2 2 6
62420- 2 2 6 2 2 6 2 2 6 2 2 6
62421- 2 2 6 2 2 6 18 18 18 94 94 94
62422- 54 54 54 26 26 26 10 10 10 0 0 0
62423- 0 0 0 0 0 0 0 0 0 0 0 0
62424- 0 0 0 0 0 0 0 0 0 0 0 0
62425- 0 0 0 0 0 0 0 0 0 0 0 0
62426- 0 0 0 0 0 0 0 0 0 0 0 0
62427- 0 0 0 0 0 0 0 0 0 0 0 0
62428- 0 0 0 0 0 0 0 0 0 0 0 0
62429- 0 0 0 0 0 0 0 0 0 0 0 0
62430- 0 0 0 0 0 0 0 0 0 0 0 0
62431- 0 0 0 0 0 0 0 0 0 0 0 0
62432- 0 0 0 6 6 6 22 22 22 50 50 50
62433- 90 90 90 26 26 26 2 2 6 2 2 6
62434- 14 14 14 195 195 195 250 250 250 253 253 253
62435-253 253 253 253 253 253 253 253 253 253 253 253
62436-253 253 253 253 253 253 253 253 253 253 253 253
62437-253 253 253 253 253 253 253 253 253 253 253 253
62438-253 253 253 253 253 253 253 253 253 253 253 253
62439-250 250 250 242 242 242 54 54 54 2 2 6
62440- 2 2 6 2 2 6 2 2 6 2 2 6
62441- 2 2 6 2 2 6 2 2 6 38 38 38
62442- 86 86 86 50 50 50 22 22 22 6 6 6
62443- 0 0 0 0 0 0 0 0 0 0 0 0
62444- 0 0 0 0 0 0 0 0 0 0 0 0
62445- 0 0 0 0 0 0 0 0 0 0 0 0
62446- 0 0 0 0 0 0 0 0 0 0 0 0
62447- 0 0 0 0 0 0 0 0 0 0 0 0
62448- 0 0 0 0 0 0 0 0 0 0 0 0
62449- 0 0 0 0 0 0 0 0 0 0 0 0
62450- 0 0 0 0 0 0 0 0 0 0 0 0
62451- 0 0 0 0 0 0 0 0 0 0 0 0
62452- 6 6 6 14 14 14 38 38 38 82 82 82
62453- 34 34 34 2 2 6 2 2 6 2 2 6
62454- 42 42 42 195 195 195 246 246 246 253 253 253
62455-253 253 253 253 253 253 253 253 253 250 250 250
62456-242 242 242 242 242 242 250 250 250 253 253 253
62457-253 253 253 253 253 253 253 253 253 253 253 253
62458-253 253 253 250 250 250 246 246 246 238 238 238
62459-226 226 226 231 231 231 101 101 101 6 6 6
62460- 2 2 6 2 2 6 2 2 6 2 2 6
62461- 2 2 6 2 2 6 2 2 6 2 2 6
62462- 38 38 38 82 82 82 42 42 42 14 14 14
62463- 6 6 6 0 0 0 0 0 0 0 0 0
62464- 0 0 0 0 0 0 0 0 0 0 0 0
62465- 0 0 0 0 0 0 0 0 0 0 0 0
62466- 0 0 0 0 0 0 0 0 0 0 0 0
62467- 0 0 0 0 0 0 0 0 0 0 0 0
62468- 0 0 0 0 0 0 0 0 0 0 0 0
62469- 0 0 0 0 0 0 0 0 0 0 0 0
62470- 0 0 0 0 0 0 0 0 0 0 0 0
62471- 0 0 0 0 0 0 0 0 0 0 0 0
62472- 10 10 10 26 26 26 62 62 62 66 66 66
62473- 2 2 6 2 2 6 2 2 6 6 6 6
62474- 70 70 70 170 170 170 206 206 206 234 234 234
62475-246 246 246 250 250 250 250 250 250 238 238 238
62476-226 226 226 231 231 231 238 238 238 250 250 250
62477-250 250 250 250 250 250 246 246 246 231 231 231
62478-214 214 214 206 206 206 202 202 202 202 202 202
62479-198 198 198 202 202 202 182 182 182 18 18 18
62480- 2 2 6 2 2 6 2 2 6 2 2 6
62481- 2 2 6 2 2 6 2 2 6 2 2 6
62482- 2 2 6 62 62 62 66 66 66 30 30 30
62483- 10 10 10 0 0 0 0 0 0 0 0 0
62484- 0 0 0 0 0 0 0 0 0 0 0 0
62485- 0 0 0 0 0 0 0 0 0 0 0 0
62486- 0 0 0 0 0 0 0 0 0 0 0 0
62487- 0 0 0 0 0 0 0 0 0 0 0 0
62488- 0 0 0 0 0 0 0 0 0 0 0 0
62489- 0 0 0 0 0 0 0 0 0 0 0 0
62490- 0 0 0 0 0 0 0 0 0 0 0 0
62491- 0 0 0 0 0 0 0 0 0 0 0 0
62492- 14 14 14 42 42 42 82 82 82 18 18 18
62493- 2 2 6 2 2 6 2 2 6 10 10 10
62494- 94 94 94 182 182 182 218 218 218 242 242 242
62495-250 250 250 253 253 253 253 253 253 250 250 250
62496-234 234 234 253 253 253 253 253 253 253 253 253
62497-253 253 253 253 253 253 253 253 253 246 246 246
62498-238 238 238 226 226 226 210 210 210 202 202 202
62499-195 195 195 195 195 195 210 210 210 158 158 158
62500- 6 6 6 14 14 14 50 50 50 14 14 14
62501- 2 2 6 2 2 6 2 2 6 2 2 6
62502- 2 2 6 6 6 6 86 86 86 46 46 46
62503- 18 18 18 6 6 6 0 0 0 0 0 0
62504- 0 0 0 0 0 0 0 0 0 0 0 0
62505- 0 0 0 0 0 0 0 0 0 0 0 0
62506- 0 0 0 0 0 0 0 0 0 0 0 0
62507- 0 0 0 0 0 0 0 0 0 0 0 0
62508- 0 0 0 0 0 0 0 0 0 0 0 0
62509- 0 0 0 0 0 0 0 0 0 0 0 0
62510- 0 0 0 0 0 0 0 0 0 0 0 0
62511- 0 0 0 0 0 0 0 0 0 6 6 6
62512- 22 22 22 54 54 54 70 70 70 2 2 6
62513- 2 2 6 10 10 10 2 2 6 22 22 22
62514-166 166 166 231 231 231 250 250 250 253 253 253
62515-253 253 253 253 253 253 253 253 253 250 250 250
62516-242 242 242 253 253 253 253 253 253 253 253 253
62517-253 253 253 253 253 253 253 253 253 253 253 253
62518-253 253 253 253 253 253 253 253 253 246 246 246
62519-231 231 231 206 206 206 198 198 198 226 226 226
62520- 94 94 94 2 2 6 6 6 6 38 38 38
62521- 30 30 30 2 2 6 2 2 6 2 2 6
62522- 2 2 6 2 2 6 62 62 62 66 66 66
62523- 26 26 26 10 10 10 0 0 0 0 0 0
62524- 0 0 0 0 0 0 0 0 0 0 0 0
62525- 0 0 0 0 0 0 0 0 0 0 0 0
62526- 0 0 0 0 0 0 0 0 0 0 0 0
62527- 0 0 0 0 0 0 0 0 0 0 0 0
62528- 0 0 0 0 0 0 0 0 0 0 0 0
62529- 0 0 0 0 0 0 0 0 0 0 0 0
62530- 0 0 0 0 0 0 0 0 0 0 0 0
62531- 0 0 0 0 0 0 0 0 0 10 10 10
62532- 30 30 30 74 74 74 50 50 50 2 2 6
62533- 26 26 26 26 26 26 2 2 6 106 106 106
62534-238 238 238 253 253 253 253 253 253 253 253 253
62535-253 253 253 253 253 253 253 253 253 253 253 253
62536-253 253 253 253 253 253 253 253 253 253 253 253
62537-253 253 253 253 253 253 253 253 253 253 253 253
62538-253 253 253 253 253 253 253 253 253 253 253 253
62539-253 253 253 246 246 246 218 218 218 202 202 202
62540-210 210 210 14 14 14 2 2 6 2 2 6
62541- 30 30 30 22 22 22 2 2 6 2 2 6
62542- 2 2 6 2 2 6 18 18 18 86 86 86
62543- 42 42 42 14 14 14 0 0 0 0 0 0
62544- 0 0 0 0 0 0 0 0 0 0 0 0
62545- 0 0 0 0 0 0 0 0 0 0 0 0
62546- 0 0 0 0 0 0 0 0 0 0 0 0
62547- 0 0 0 0 0 0 0 0 0 0 0 0
62548- 0 0 0 0 0 0 0 0 0 0 0 0
62549- 0 0 0 0 0 0 0 0 0 0 0 0
62550- 0 0 0 0 0 0 0 0 0 0 0 0
62551- 0 0 0 0 0 0 0 0 0 14 14 14
62552- 42 42 42 90 90 90 22 22 22 2 2 6
62553- 42 42 42 2 2 6 18 18 18 218 218 218
62554-253 253 253 253 253 253 253 253 253 253 253 253
62555-253 253 253 253 253 253 253 253 253 253 253 253
62556-253 253 253 253 253 253 253 253 253 253 253 253
62557-253 253 253 253 253 253 253 253 253 253 253 253
62558-253 253 253 253 253 253 253 253 253 253 253 253
62559-253 253 253 253 253 253 250 250 250 221 221 221
62560-218 218 218 101 101 101 2 2 6 14 14 14
62561- 18 18 18 38 38 38 10 10 10 2 2 6
62562- 2 2 6 2 2 6 2 2 6 78 78 78
62563- 58 58 58 22 22 22 6 6 6 0 0 0
62564- 0 0 0 0 0 0 0 0 0 0 0 0
62565- 0 0 0 0 0 0 0 0 0 0 0 0
62566- 0 0 0 0 0 0 0 0 0 0 0 0
62567- 0 0 0 0 0 0 0 0 0 0 0 0
62568- 0 0 0 0 0 0 0 0 0 0 0 0
62569- 0 0 0 0 0 0 0 0 0 0 0 0
62570- 0 0 0 0 0 0 0 0 0 0 0 0
62571- 0 0 0 0 0 0 6 6 6 18 18 18
62572- 54 54 54 82 82 82 2 2 6 26 26 26
62573- 22 22 22 2 2 6 123 123 123 253 253 253
62574-253 253 253 253 253 253 253 253 253 253 253 253
62575-253 253 253 253 253 253 253 253 253 253 253 253
62576-253 253 253 253 253 253 253 253 253 253 253 253
62577-253 253 253 253 253 253 253 253 253 253 253 253
62578-253 253 253 253 253 253 253 253 253 253 253 253
62579-253 253 253 253 253 253 253 253 253 250 250 250
62580-238 238 238 198 198 198 6 6 6 38 38 38
62581- 58 58 58 26 26 26 38 38 38 2 2 6
62582- 2 2 6 2 2 6 2 2 6 46 46 46
62583- 78 78 78 30 30 30 10 10 10 0 0 0
62584- 0 0 0 0 0 0 0 0 0 0 0 0
62585- 0 0 0 0 0 0 0 0 0 0 0 0
62586- 0 0 0 0 0 0 0 0 0 0 0 0
62587- 0 0 0 0 0 0 0 0 0 0 0 0
62588- 0 0 0 0 0 0 0 0 0 0 0 0
62589- 0 0 0 0 0 0 0 0 0 0 0 0
62590- 0 0 0 0 0 0 0 0 0 0 0 0
62591- 0 0 0 0 0 0 10 10 10 30 30 30
62592- 74 74 74 58 58 58 2 2 6 42 42 42
62593- 2 2 6 22 22 22 231 231 231 253 253 253
62594-253 253 253 253 253 253 253 253 253 253 253 253
62595-253 253 253 253 253 253 253 253 253 250 250 250
62596-253 253 253 253 253 253 253 253 253 253 253 253
62597-253 253 253 253 253 253 253 253 253 253 253 253
62598-253 253 253 253 253 253 253 253 253 253 253 253
62599-253 253 253 253 253 253 253 253 253 253 253 253
62600-253 253 253 246 246 246 46 46 46 38 38 38
62601- 42 42 42 14 14 14 38 38 38 14 14 14
62602- 2 2 6 2 2 6 2 2 6 6 6 6
62603- 86 86 86 46 46 46 14 14 14 0 0 0
62604- 0 0 0 0 0 0 0 0 0 0 0 0
62605- 0 0 0 0 0 0 0 0 0 0 0 0
62606- 0 0 0 0 0 0 0 0 0 0 0 0
62607- 0 0 0 0 0 0 0 0 0 0 0 0
62608- 0 0 0 0 0 0 0 0 0 0 0 0
62609- 0 0 0 0 0 0 0 0 0 0 0 0
62610- 0 0 0 0 0 0 0 0 0 0 0 0
62611- 0 0 0 6 6 6 14 14 14 42 42 42
62612- 90 90 90 18 18 18 18 18 18 26 26 26
62613- 2 2 6 116 116 116 253 253 253 253 253 253
62614-253 253 253 253 253 253 253 253 253 253 253 253
62615-253 253 253 253 253 253 250 250 250 238 238 238
62616-253 253 253 253 253 253 253 253 253 253 253 253
62617-253 253 253 253 253 253 253 253 253 253 253 253
62618-253 253 253 253 253 253 253 253 253 253 253 253
62619-253 253 253 253 253 253 253 253 253 253 253 253
62620-253 253 253 253 253 253 94 94 94 6 6 6
62621- 2 2 6 2 2 6 10 10 10 34 34 34
62622- 2 2 6 2 2 6 2 2 6 2 2 6
62623- 74 74 74 58 58 58 22 22 22 6 6 6
62624- 0 0 0 0 0 0 0 0 0 0 0 0
62625- 0 0 0 0 0 0 0 0 0 0 0 0
62626- 0 0 0 0 0 0 0 0 0 0 0 0
62627- 0 0 0 0 0 0 0 0 0 0 0 0
62628- 0 0 0 0 0 0 0 0 0 0 0 0
62629- 0 0 0 0 0 0 0 0 0 0 0 0
62630- 0 0 0 0 0 0 0 0 0 0 0 0
62631- 0 0 0 10 10 10 26 26 26 66 66 66
62632- 82 82 82 2 2 6 38 38 38 6 6 6
62633- 14 14 14 210 210 210 253 253 253 253 253 253
62634-253 253 253 253 253 253 253 253 253 253 253 253
62635-253 253 253 253 253 253 246 246 246 242 242 242
62636-253 253 253 253 253 253 253 253 253 253 253 253
62637-253 253 253 253 253 253 253 253 253 253 253 253
62638-253 253 253 253 253 253 253 253 253 253 253 253
62639-253 253 253 253 253 253 253 253 253 253 253 253
62640-253 253 253 253 253 253 144 144 144 2 2 6
62641- 2 2 6 2 2 6 2 2 6 46 46 46
62642- 2 2 6 2 2 6 2 2 6 2 2 6
62643- 42 42 42 74 74 74 30 30 30 10 10 10
62644- 0 0 0 0 0 0 0 0 0 0 0 0
62645- 0 0 0 0 0 0 0 0 0 0 0 0
62646- 0 0 0 0 0 0 0 0 0 0 0 0
62647- 0 0 0 0 0 0 0 0 0 0 0 0
62648- 0 0 0 0 0 0 0 0 0 0 0 0
62649- 0 0 0 0 0 0 0 0 0 0 0 0
62650- 0 0 0 0 0 0 0 0 0 0 0 0
62651- 6 6 6 14 14 14 42 42 42 90 90 90
62652- 26 26 26 6 6 6 42 42 42 2 2 6
62653- 74 74 74 250 250 250 253 253 253 253 253 253
62654-253 253 253 253 253 253 253 253 253 253 253 253
62655-253 253 253 253 253 253 242 242 242 242 242 242
62656-253 253 253 253 253 253 253 253 253 253 253 253
62657-253 253 253 253 253 253 253 253 253 253 253 253
62658-253 253 253 253 253 253 253 253 253 253 253 253
62659-253 253 253 253 253 253 253 253 253 253 253 253
62660-253 253 253 253 253 253 182 182 182 2 2 6
62661- 2 2 6 2 2 6 2 2 6 46 46 46
62662- 2 2 6 2 2 6 2 2 6 2 2 6
62663- 10 10 10 86 86 86 38 38 38 10 10 10
62664- 0 0 0 0 0 0 0 0 0 0 0 0
62665- 0 0 0 0 0 0 0 0 0 0 0 0
62666- 0 0 0 0 0 0 0 0 0 0 0 0
62667- 0 0 0 0 0 0 0 0 0 0 0 0
62668- 0 0 0 0 0 0 0 0 0 0 0 0
62669- 0 0 0 0 0 0 0 0 0 0 0 0
62670- 0 0 0 0 0 0 0 0 0 0 0 0
62671- 10 10 10 26 26 26 66 66 66 82 82 82
62672- 2 2 6 22 22 22 18 18 18 2 2 6
62673-149 149 149 253 253 253 253 253 253 253 253 253
62674-253 253 253 253 253 253 253 253 253 253 253 253
62675-253 253 253 253 253 253 234 234 234 242 242 242
62676-253 253 253 253 253 253 253 253 253 253 253 253
62677-253 253 253 253 253 253 253 253 253 253 253 253
62678-253 253 253 253 253 253 253 253 253 253 253 253
62679-253 253 253 253 253 253 253 253 253 253 253 253
62680-253 253 253 253 253 253 206 206 206 2 2 6
62681- 2 2 6 2 2 6 2 2 6 38 38 38
62682- 2 2 6 2 2 6 2 2 6 2 2 6
62683- 6 6 6 86 86 86 46 46 46 14 14 14
62684- 0 0 0 0 0 0 0 0 0 0 0 0
62685- 0 0 0 0 0 0 0 0 0 0 0 0
62686- 0 0 0 0 0 0 0 0 0 0 0 0
62687- 0 0 0 0 0 0 0 0 0 0 0 0
62688- 0 0 0 0 0 0 0 0 0 0 0 0
62689- 0 0 0 0 0 0 0 0 0 0 0 0
62690- 0 0 0 0 0 0 0 0 0 6 6 6
62691- 18 18 18 46 46 46 86 86 86 18 18 18
62692- 2 2 6 34 34 34 10 10 10 6 6 6
62693-210 210 210 253 253 253 253 253 253 253 253 253
62694-253 253 253 253 253 253 253 253 253 253 253 253
62695-253 253 253 253 253 253 234 234 234 242 242 242
62696-253 253 253 253 253 253 253 253 253 253 253 253
62697-253 253 253 253 253 253 253 253 253 253 253 253
62698-253 253 253 253 253 253 253 253 253 253 253 253
62699-253 253 253 253 253 253 253 253 253 253 253 253
62700-253 253 253 253 253 253 221 221 221 6 6 6
62701- 2 2 6 2 2 6 6 6 6 30 30 30
62702- 2 2 6 2 2 6 2 2 6 2 2 6
62703- 2 2 6 82 82 82 54 54 54 18 18 18
62704- 6 6 6 0 0 0 0 0 0 0 0 0
62705- 0 0 0 0 0 0 0 0 0 0 0 0
62706- 0 0 0 0 0 0 0 0 0 0 0 0
62707- 0 0 0 0 0 0 0 0 0 0 0 0
62708- 0 0 0 0 0 0 0 0 0 0 0 0
62709- 0 0 0 0 0 0 0 0 0 0 0 0
62710- 0 0 0 0 0 0 0 0 0 10 10 10
62711- 26 26 26 66 66 66 62 62 62 2 2 6
62712- 2 2 6 38 38 38 10 10 10 26 26 26
62713-238 238 238 253 253 253 253 253 253 253 253 253
62714-253 253 253 253 253 253 253 253 253 253 253 253
62715-253 253 253 253 253 253 231 231 231 238 238 238
62716-253 253 253 253 253 253 253 253 253 253 253 253
62717-253 253 253 253 253 253 253 253 253 253 253 253
62718-253 253 253 253 253 253 253 253 253 253 253 253
62719-253 253 253 253 253 253 253 253 253 253 253 253
62720-253 253 253 253 253 253 231 231 231 6 6 6
62721- 2 2 6 2 2 6 10 10 10 30 30 30
62722- 2 2 6 2 2 6 2 2 6 2 2 6
62723- 2 2 6 66 66 66 58 58 58 22 22 22
62724- 6 6 6 0 0 0 0 0 0 0 0 0
62725- 0 0 0 0 0 0 0 0 0 0 0 0
62726- 0 0 0 0 0 0 0 0 0 0 0 0
62727- 0 0 0 0 0 0 0 0 0 0 0 0
62728- 0 0 0 0 0 0 0 0 0 0 0 0
62729- 0 0 0 0 0 0 0 0 0 0 0 0
62730- 0 0 0 0 0 0 0 0 0 10 10 10
62731- 38 38 38 78 78 78 6 6 6 2 2 6
62732- 2 2 6 46 46 46 14 14 14 42 42 42
62733-246 246 246 253 253 253 253 253 253 253 253 253
62734-253 253 253 253 253 253 253 253 253 253 253 253
62735-253 253 253 253 253 253 231 231 231 242 242 242
62736-253 253 253 253 253 253 253 253 253 253 253 253
62737-253 253 253 253 253 253 253 253 253 253 253 253
62738-253 253 253 253 253 253 253 253 253 253 253 253
62739-253 253 253 253 253 253 253 253 253 253 253 253
62740-253 253 253 253 253 253 234 234 234 10 10 10
62741- 2 2 6 2 2 6 22 22 22 14 14 14
62742- 2 2 6 2 2 6 2 2 6 2 2 6
62743- 2 2 6 66 66 66 62 62 62 22 22 22
62744- 6 6 6 0 0 0 0 0 0 0 0 0
62745- 0 0 0 0 0 0 0 0 0 0 0 0
62746- 0 0 0 0 0 0 0 0 0 0 0 0
62747- 0 0 0 0 0 0 0 0 0 0 0 0
62748- 0 0 0 0 0 0 0 0 0 0 0 0
62749- 0 0 0 0 0 0 0 0 0 0 0 0
62750- 0 0 0 0 0 0 6 6 6 18 18 18
62751- 50 50 50 74 74 74 2 2 6 2 2 6
62752- 14 14 14 70 70 70 34 34 34 62 62 62
62753-250 250 250 253 253 253 253 253 253 253 253 253
62754-253 253 253 253 253 253 253 253 253 253 253 253
62755-253 253 253 253 253 253 231 231 231 246 246 246
62756-253 253 253 253 253 253 253 253 253 253 253 253
62757-253 253 253 253 253 253 253 253 253 253 253 253
62758-253 253 253 253 253 253 253 253 253 253 253 253
62759-253 253 253 253 253 253 253 253 253 253 253 253
62760-253 253 253 253 253 253 234 234 234 14 14 14
62761- 2 2 6 2 2 6 30 30 30 2 2 6
62762- 2 2 6 2 2 6 2 2 6 2 2 6
62763- 2 2 6 66 66 66 62 62 62 22 22 22
62764- 6 6 6 0 0 0 0 0 0 0 0 0
62765- 0 0 0 0 0 0 0 0 0 0 0 0
62766- 0 0 0 0 0 0 0 0 0 0 0 0
62767- 0 0 0 0 0 0 0 0 0 0 0 0
62768- 0 0 0 0 0 0 0 0 0 0 0 0
62769- 0 0 0 0 0 0 0 0 0 0 0 0
62770- 0 0 0 0 0 0 6 6 6 18 18 18
62771- 54 54 54 62 62 62 2 2 6 2 2 6
62772- 2 2 6 30 30 30 46 46 46 70 70 70
62773-250 250 250 253 253 253 253 253 253 253 253 253
62774-253 253 253 253 253 253 253 253 253 253 253 253
62775-253 253 253 253 253 253 231 231 231 246 246 246
62776-253 253 253 253 253 253 253 253 253 253 253 253
62777-253 253 253 253 253 253 253 253 253 253 253 253
62778-253 253 253 253 253 253 253 253 253 253 253 253
62779-253 253 253 253 253 253 253 253 253 253 253 253
62780-253 253 253 253 253 253 226 226 226 10 10 10
62781- 2 2 6 6 6 6 30 30 30 2 2 6
62782- 2 2 6 2 2 6 2 2 6 2 2 6
62783- 2 2 6 66 66 66 58 58 58 22 22 22
62784- 6 6 6 0 0 0 0 0 0 0 0 0
62785- 0 0 0 0 0 0 0 0 0 0 0 0
62786- 0 0 0 0 0 0 0 0 0 0 0 0
62787- 0 0 0 0 0 0 0 0 0 0 0 0
62788- 0 0 0 0 0 0 0 0 0 0 0 0
62789- 0 0 0 0 0 0 0 0 0 0 0 0
62790- 0 0 0 0 0 0 6 6 6 22 22 22
62791- 58 58 58 62 62 62 2 2 6 2 2 6
62792- 2 2 6 2 2 6 30 30 30 78 78 78
62793-250 250 250 253 253 253 253 253 253 253 253 253
62794-253 253 253 253 253 253 253 253 253 253 253 253
62795-253 253 253 253 253 253 231 231 231 246 246 246
62796-253 253 253 253 253 253 253 253 253 253 253 253
62797-253 253 253 253 253 253 253 253 253 253 253 253
62798-253 253 253 253 253 253 253 253 253 253 253 253
62799-253 253 253 253 253 253 253 253 253 253 253 253
62800-253 253 253 253 253 253 206 206 206 2 2 6
62801- 22 22 22 34 34 34 18 14 6 22 22 22
62802- 26 26 26 18 18 18 6 6 6 2 2 6
62803- 2 2 6 82 82 82 54 54 54 18 18 18
62804- 6 6 6 0 0 0 0 0 0 0 0 0
62805- 0 0 0 0 0 0 0 0 0 0 0 0
62806- 0 0 0 0 0 0 0 0 0 0 0 0
62807- 0 0 0 0 0 0 0 0 0 0 0 0
62808- 0 0 0 0 0 0 0 0 0 0 0 0
62809- 0 0 0 0 0 0 0 0 0 0 0 0
62810- 0 0 0 0 0 0 6 6 6 26 26 26
62811- 62 62 62 106 106 106 74 54 14 185 133 11
62812-210 162 10 121 92 8 6 6 6 62 62 62
62813-238 238 238 253 253 253 253 253 253 253 253 253
62814-253 253 253 253 253 253 253 253 253 253 253 253
62815-253 253 253 253 253 253 231 231 231 246 246 246
62816-253 253 253 253 253 253 253 253 253 253 253 253
62817-253 253 253 253 253 253 253 253 253 253 253 253
62818-253 253 253 253 253 253 253 253 253 253 253 253
62819-253 253 253 253 253 253 253 253 253 253 253 253
62820-253 253 253 253 253 253 158 158 158 18 18 18
62821- 14 14 14 2 2 6 2 2 6 2 2 6
62822- 6 6 6 18 18 18 66 66 66 38 38 38
62823- 6 6 6 94 94 94 50 50 50 18 18 18
62824- 6 6 6 0 0 0 0 0 0 0 0 0
62825- 0 0 0 0 0 0 0 0 0 0 0 0
62826- 0 0 0 0 0 0 0 0 0 0 0 0
62827- 0 0 0 0 0 0 0 0 0 0 0 0
62828- 0 0 0 0 0 0 0 0 0 0 0 0
62829- 0 0 0 0 0 0 0 0 0 6 6 6
62830- 10 10 10 10 10 10 18 18 18 38 38 38
62831- 78 78 78 142 134 106 216 158 10 242 186 14
62832-246 190 14 246 190 14 156 118 10 10 10 10
62833- 90 90 90 238 238 238 253 253 253 253 253 253
62834-253 253 253 253 253 253 253 253 253 253 253 253
62835-253 253 253 253 253 253 231 231 231 250 250 250
62836-253 253 253 253 253 253 253 253 253 253 253 253
62837-253 253 253 253 253 253 253 253 253 253 253 253
62838-253 253 253 253 253 253 253 253 253 253 253 253
62839-253 253 253 253 253 253 253 253 253 246 230 190
62840-238 204 91 238 204 91 181 142 44 37 26 9
62841- 2 2 6 2 2 6 2 2 6 2 2 6
62842- 2 2 6 2 2 6 38 38 38 46 46 46
62843- 26 26 26 106 106 106 54 54 54 18 18 18
62844- 6 6 6 0 0 0 0 0 0 0 0 0
62845- 0 0 0 0 0 0 0 0 0 0 0 0
62846- 0 0 0 0 0 0 0 0 0 0 0 0
62847- 0 0 0 0 0 0 0 0 0 0 0 0
62848- 0 0 0 0 0 0 0 0 0 0 0 0
62849- 0 0 0 6 6 6 14 14 14 22 22 22
62850- 30 30 30 38 38 38 50 50 50 70 70 70
62851-106 106 106 190 142 34 226 170 11 242 186 14
62852-246 190 14 246 190 14 246 190 14 154 114 10
62853- 6 6 6 74 74 74 226 226 226 253 253 253
62854-253 253 253 253 253 253 253 253 253 253 253 253
62855-253 253 253 253 253 253 231 231 231 250 250 250
62856-253 253 253 253 253 253 253 253 253 253 253 253
62857-253 253 253 253 253 253 253 253 253 253 253 253
62858-253 253 253 253 253 253 253 253 253 253 253 253
62859-253 253 253 253 253 253 253 253 253 228 184 62
62860-241 196 14 241 208 19 232 195 16 38 30 10
62861- 2 2 6 2 2 6 2 2 6 2 2 6
62862- 2 2 6 6 6 6 30 30 30 26 26 26
62863-203 166 17 154 142 90 66 66 66 26 26 26
62864- 6 6 6 0 0 0 0 0 0 0 0 0
62865- 0 0 0 0 0 0 0 0 0 0 0 0
62866- 0 0 0 0 0 0 0 0 0 0 0 0
62867- 0 0 0 0 0 0 0 0 0 0 0 0
62868- 0 0 0 0 0 0 0 0 0 0 0 0
62869- 6 6 6 18 18 18 38 38 38 58 58 58
62870- 78 78 78 86 86 86 101 101 101 123 123 123
62871-175 146 61 210 150 10 234 174 13 246 186 14
62872-246 190 14 246 190 14 246 190 14 238 190 10
62873-102 78 10 2 2 6 46 46 46 198 198 198
62874-253 253 253 253 253 253 253 253 253 253 253 253
62875-253 253 253 253 253 253 234 234 234 242 242 242
62876-253 253 253 253 253 253 253 253 253 253 253 253
62877-253 253 253 253 253 253 253 253 253 253 253 253
62878-253 253 253 253 253 253 253 253 253 253 253 253
62879-253 253 253 253 253 253 253 253 253 224 178 62
62880-242 186 14 241 196 14 210 166 10 22 18 6
62881- 2 2 6 2 2 6 2 2 6 2 2 6
62882- 2 2 6 2 2 6 6 6 6 121 92 8
62883-238 202 15 232 195 16 82 82 82 34 34 34
62884- 10 10 10 0 0 0 0 0 0 0 0 0
62885- 0 0 0 0 0 0 0 0 0 0 0 0
62886- 0 0 0 0 0 0 0 0 0 0 0 0
62887- 0 0 0 0 0 0 0 0 0 0 0 0
62888- 0 0 0 0 0 0 0 0 0 0 0 0
62889- 14 14 14 38 38 38 70 70 70 154 122 46
62890-190 142 34 200 144 11 197 138 11 197 138 11
62891-213 154 11 226 170 11 242 186 14 246 190 14
62892-246 190 14 246 190 14 246 190 14 246 190 14
62893-225 175 15 46 32 6 2 2 6 22 22 22
62894-158 158 158 250 250 250 253 253 253 253 253 253
62895-253 253 253 253 253 253 253 253 253 253 253 253
62896-253 253 253 253 253 253 253 253 253 253 253 253
62897-253 253 253 253 253 253 253 253 253 253 253 253
62898-253 253 253 253 253 253 253 253 253 253 253 253
62899-253 253 253 250 250 250 242 242 242 224 178 62
62900-239 182 13 236 186 11 213 154 11 46 32 6
62901- 2 2 6 2 2 6 2 2 6 2 2 6
62902- 2 2 6 2 2 6 61 42 6 225 175 15
62903-238 190 10 236 186 11 112 100 78 42 42 42
62904- 14 14 14 0 0 0 0 0 0 0 0 0
62905- 0 0 0 0 0 0 0 0 0 0 0 0
62906- 0 0 0 0 0 0 0 0 0 0 0 0
62907- 0 0 0 0 0 0 0 0 0 0 0 0
62908- 0 0 0 0 0 0 0 0 0 6 6 6
62909- 22 22 22 54 54 54 154 122 46 213 154 11
62910-226 170 11 230 174 11 226 170 11 226 170 11
62911-236 178 12 242 186 14 246 190 14 246 190 14
62912-246 190 14 246 190 14 246 190 14 246 190 14
62913-241 196 14 184 144 12 10 10 10 2 2 6
62914- 6 6 6 116 116 116 242 242 242 253 253 253
62915-253 253 253 253 253 253 253 253 253 253 253 253
62916-253 253 253 253 253 253 253 253 253 253 253 253
62917-253 253 253 253 253 253 253 253 253 253 253 253
62918-253 253 253 253 253 253 253 253 253 253 253 253
62919-253 253 253 231 231 231 198 198 198 214 170 54
62920-236 178 12 236 178 12 210 150 10 137 92 6
62921- 18 14 6 2 2 6 2 2 6 2 2 6
62922- 6 6 6 70 47 6 200 144 11 236 178 12
62923-239 182 13 239 182 13 124 112 88 58 58 58
62924- 22 22 22 6 6 6 0 0 0 0 0 0
62925- 0 0 0 0 0 0 0 0 0 0 0 0
62926- 0 0 0 0 0 0 0 0 0 0 0 0
62927- 0 0 0 0 0 0 0 0 0 0 0 0
62928- 0 0 0 0 0 0 0 0 0 10 10 10
62929- 30 30 30 70 70 70 180 133 36 226 170 11
62930-239 182 13 242 186 14 242 186 14 246 186 14
62931-246 190 14 246 190 14 246 190 14 246 190 14
62932-246 190 14 246 190 14 246 190 14 246 190 14
62933-246 190 14 232 195 16 98 70 6 2 2 6
62934- 2 2 6 2 2 6 66 66 66 221 221 221
62935-253 253 253 253 253 253 253 253 253 253 253 253
62936-253 253 253 253 253 253 253 253 253 253 253 253
62937-253 253 253 253 253 253 253 253 253 253 253 253
62938-253 253 253 253 253 253 253 253 253 253 253 253
62939-253 253 253 206 206 206 198 198 198 214 166 58
62940-230 174 11 230 174 11 216 158 10 192 133 9
62941-163 110 8 116 81 8 102 78 10 116 81 8
62942-167 114 7 197 138 11 226 170 11 239 182 13
62943-242 186 14 242 186 14 162 146 94 78 78 78
62944- 34 34 34 14 14 14 6 6 6 0 0 0
62945- 0 0 0 0 0 0 0 0 0 0 0 0
62946- 0 0 0 0 0 0 0 0 0 0 0 0
62947- 0 0 0 0 0 0 0 0 0 0 0 0
62948- 0 0 0 0 0 0 0 0 0 6 6 6
62949- 30 30 30 78 78 78 190 142 34 226 170 11
62950-239 182 13 246 190 14 246 190 14 246 190 14
62951-246 190 14 246 190 14 246 190 14 246 190 14
62952-246 190 14 246 190 14 246 190 14 246 190 14
62953-246 190 14 241 196 14 203 166 17 22 18 6
62954- 2 2 6 2 2 6 2 2 6 38 38 38
62955-218 218 218 253 253 253 253 253 253 253 253 253
62956-253 253 253 253 253 253 253 253 253 253 253 253
62957-253 253 253 253 253 253 253 253 253 253 253 253
62958-253 253 253 253 253 253 253 253 253 253 253 253
62959-250 250 250 206 206 206 198 198 198 202 162 69
62960-226 170 11 236 178 12 224 166 10 210 150 10
62961-200 144 11 197 138 11 192 133 9 197 138 11
62962-210 150 10 226 170 11 242 186 14 246 190 14
62963-246 190 14 246 186 14 225 175 15 124 112 88
62964- 62 62 62 30 30 30 14 14 14 6 6 6
62965- 0 0 0 0 0 0 0 0 0 0 0 0
62966- 0 0 0 0 0 0 0 0 0 0 0 0
62967- 0 0 0 0 0 0 0 0 0 0 0 0
62968- 0 0 0 0 0 0 0 0 0 10 10 10
62969- 30 30 30 78 78 78 174 135 50 224 166 10
62970-239 182 13 246 190 14 246 190 14 246 190 14
62971-246 190 14 246 190 14 246 190 14 246 190 14
62972-246 190 14 246 190 14 246 190 14 246 190 14
62973-246 190 14 246 190 14 241 196 14 139 102 15
62974- 2 2 6 2 2 6 2 2 6 2 2 6
62975- 78 78 78 250 250 250 253 253 253 253 253 253
62976-253 253 253 253 253 253 253 253 253 253 253 253
62977-253 253 253 253 253 253 253 253 253 253 253 253
62978-253 253 253 253 253 253 253 253 253 253 253 253
62979-250 250 250 214 214 214 198 198 198 190 150 46
62980-219 162 10 236 178 12 234 174 13 224 166 10
62981-216 158 10 213 154 11 213 154 11 216 158 10
62982-226 170 11 239 182 13 246 190 14 246 190 14
62983-246 190 14 246 190 14 242 186 14 206 162 42
62984-101 101 101 58 58 58 30 30 30 14 14 14
62985- 6 6 6 0 0 0 0 0 0 0 0 0
62986- 0 0 0 0 0 0 0 0 0 0 0 0
62987- 0 0 0 0 0 0 0 0 0 0 0 0
62988- 0 0 0 0 0 0 0 0 0 10 10 10
62989- 30 30 30 74 74 74 174 135 50 216 158 10
62990-236 178 12 246 190 14 246 190 14 246 190 14
62991-246 190 14 246 190 14 246 190 14 246 190 14
62992-246 190 14 246 190 14 246 190 14 246 190 14
62993-246 190 14 246 190 14 241 196 14 226 184 13
62994- 61 42 6 2 2 6 2 2 6 2 2 6
62995- 22 22 22 238 238 238 253 253 253 253 253 253
62996-253 253 253 253 253 253 253 253 253 253 253 253
62997-253 253 253 253 253 253 253 253 253 253 253 253
62998-253 253 253 253 253 253 253 253 253 253 253 253
62999-253 253 253 226 226 226 187 187 187 180 133 36
63000-216 158 10 236 178 12 239 182 13 236 178 12
63001-230 174 11 226 170 11 226 170 11 230 174 11
63002-236 178 12 242 186 14 246 190 14 246 190 14
63003-246 190 14 246 190 14 246 186 14 239 182 13
63004-206 162 42 106 106 106 66 66 66 34 34 34
63005- 14 14 14 6 6 6 0 0 0 0 0 0
63006- 0 0 0 0 0 0 0 0 0 0 0 0
63007- 0 0 0 0 0 0 0 0 0 0 0 0
63008- 0 0 0 0 0 0 0 0 0 6 6 6
63009- 26 26 26 70 70 70 163 133 67 213 154 11
63010-236 178 12 246 190 14 246 190 14 246 190 14
63011-246 190 14 246 190 14 246 190 14 246 190 14
63012-246 190 14 246 190 14 246 190 14 246 190 14
63013-246 190 14 246 190 14 246 190 14 241 196 14
63014-190 146 13 18 14 6 2 2 6 2 2 6
63015- 46 46 46 246 246 246 253 253 253 253 253 253
63016-253 253 253 253 253 253 253 253 253 253 253 253
63017-253 253 253 253 253 253 253 253 253 253 253 253
63018-253 253 253 253 253 253 253 253 253 253 253 253
63019-253 253 253 221 221 221 86 86 86 156 107 11
63020-216 158 10 236 178 12 242 186 14 246 186 14
63021-242 186 14 239 182 13 239 182 13 242 186 14
63022-242 186 14 246 186 14 246 190 14 246 190 14
63023-246 190 14 246 190 14 246 190 14 246 190 14
63024-242 186 14 225 175 15 142 122 72 66 66 66
63025- 30 30 30 10 10 10 0 0 0 0 0 0
63026- 0 0 0 0 0 0 0 0 0 0 0 0
63027- 0 0 0 0 0 0 0 0 0 0 0 0
63028- 0 0 0 0 0 0 0 0 0 6 6 6
63029- 26 26 26 70 70 70 163 133 67 210 150 10
63030-236 178 12 246 190 14 246 190 14 246 190 14
63031-246 190 14 246 190 14 246 190 14 246 190 14
63032-246 190 14 246 190 14 246 190 14 246 190 14
63033-246 190 14 246 190 14 246 190 14 246 190 14
63034-232 195 16 121 92 8 34 34 34 106 106 106
63035-221 221 221 253 253 253 253 253 253 253 253 253
63036-253 253 253 253 253 253 253 253 253 253 253 253
63037-253 253 253 253 253 253 253 253 253 253 253 253
63038-253 253 253 253 253 253 253 253 253 253 253 253
63039-242 242 242 82 82 82 18 14 6 163 110 8
63040-216 158 10 236 178 12 242 186 14 246 190 14
63041-246 190 14 246 190 14 246 190 14 246 190 14
63042-246 190 14 246 190 14 246 190 14 246 190 14
63043-246 190 14 246 190 14 246 190 14 246 190 14
63044-246 190 14 246 190 14 242 186 14 163 133 67
63045- 46 46 46 18 18 18 6 6 6 0 0 0
63046- 0 0 0 0 0 0 0 0 0 0 0 0
63047- 0 0 0 0 0 0 0 0 0 0 0 0
63048- 0 0 0 0 0 0 0 0 0 10 10 10
63049- 30 30 30 78 78 78 163 133 67 210 150 10
63050-236 178 12 246 186 14 246 190 14 246 190 14
63051-246 190 14 246 190 14 246 190 14 246 190 14
63052-246 190 14 246 190 14 246 190 14 246 190 14
63053-246 190 14 246 190 14 246 190 14 246 190 14
63054-241 196 14 215 174 15 190 178 144 253 253 253
63055-253 253 253 253 253 253 253 253 253 253 253 253
63056-253 253 253 253 253 253 253 253 253 253 253 253
63057-253 253 253 253 253 253 253 253 253 253 253 253
63058-253 253 253 253 253 253 253 253 253 218 218 218
63059- 58 58 58 2 2 6 22 18 6 167 114 7
63060-216 158 10 236 178 12 246 186 14 246 190 14
63061-246 190 14 246 190 14 246 190 14 246 190 14
63062-246 190 14 246 190 14 246 190 14 246 190 14
63063-246 190 14 246 190 14 246 190 14 246 190 14
63064-246 190 14 246 186 14 242 186 14 190 150 46
63065- 54 54 54 22 22 22 6 6 6 0 0 0
63066- 0 0 0 0 0 0 0 0 0 0 0 0
63067- 0 0 0 0 0 0 0 0 0 0 0 0
63068- 0 0 0 0 0 0 0 0 0 14 14 14
63069- 38 38 38 86 86 86 180 133 36 213 154 11
63070-236 178 12 246 186 14 246 190 14 246 190 14
63071-246 190 14 246 190 14 246 190 14 246 190 14
63072-246 190 14 246 190 14 246 190 14 246 190 14
63073-246 190 14 246 190 14 246 190 14 246 190 14
63074-246 190 14 232 195 16 190 146 13 214 214 214
63075-253 253 253 253 253 253 253 253 253 253 253 253
63076-253 253 253 253 253 253 253 253 253 253 253 253
63077-253 253 253 253 253 253 253 253 253 253 253 253
63078-253 253 253 250 250 250 170 170 170 26 26 26
63079- 2 2 6 2 2 6 37 26 9 163 110 8
63080-219 162 10 239 182 13 246 186 14 246 190 14
63081-246 190 14 246 190 14 246 190 14 246 190 14
63082-246 190 14 246 190 14 246 190 14 246 190 14
63083-246 190 14 246 190 14 246 190 14 246 190 14
63084-246 186 14 236 178 12 224 166 10 142 122 72
63085- 46 46 46 18 18 18 6 6 6 0 0 0
63086- 0 0 0 0 0 0 0 0 0 0 0 0
63087- 0 0 0 0 0 0 0 0 0 0 0 0
63088- 0 0 0 0 0 0 6 6 6 18 18 18
63089- 50 50 50 109 106 95 192 133 9 224 166 10
63090-242 186 14 246 190 14 246 190 14 246 190 14
63091-246 190 14 246 190 14 246 190 14 246 190 14
63092-246 190 14 246 190 14 246 190 14 246 190 14
63093-246 190 14 246 190 14 246 190 14 246 190 14
63094-242 186 14 226 184 13 210 162 10 142 110 46
63095-226 226 226 253 253 253 253 253 253 253 253 253
63096-253 253 253 253 253 253 253 253 253 253 253 253
63097-253 253 253 253 253 253 253 253 253 253 253 253
63098-198 198 198 66 66 66 2 2 6 2 2 6
63099- 2 2 6 2 2 6 50 34 6 156 107 11
63100-219 162 10 239 182 13 246 186 14 246 190 14
63101-246 190 14 246 190 14 246 190 14 246 190 14
63102-246 190 14 246 190 14 246 190 14 246 190 14
63103-246 190 14 246 190 14 246 190 14 242 186 14
63104-234 174 13 213 154 11 154 122 46 66 66 66
63105- 30 30 30 10 10 10 0 0 0 0 0 0
63106- 0 0 0 0 0 0 0 0 0 0 0 0
63107- 0 0 0 0 0 0 0 0 0 0 0 0
63108- 0 0 0 0 0 0 6 6 6 22 22 22
63109- 58 58 58 154 121 60 206 145 10 234 174 13
63110-242 186 14 246 186 14 246 190 14 246 190 14
63111-246 190 14 246 190 14 246 190 14 246 190 14
63112-246 190 14 246 190 14 246 190 14 246 190 14
63113-246 190 14 246 190 14 246 190 14 246 190 14
63114-246 186 14 236 178 12 210 162 10 163 110 8
63115- 61 42 6 138 138 138 218 218 218 250 250 250
63116-253 253 253 253 253 253 253 253 253 250 250 250
63117-242 242 242 210 210 210 144 144 144 66 66 66
63118- 6 6 6 2 2 6 2 2 6 2 2 6
63119- 2 2 6 2 2 6 61 42 6 163 110 8
63120-216 158 10 236 178 12 246 190 14 246 190 14
63121-246 190 14 246 190 14 246 190 14 246 190 14
63122-246 190 14 246 190 14 246 190 14 246 190 14
63123-246 190 14 239 182 13 230 174 11 216 158 10
63124-190 142 34 124 112 88 70 70 70 38 38 38
63125- 18 18 18 6 6 6 0 0 0 0 0 0
63126- 0 0 0 0 0 0 0 0 0 0 0 0
63127- 0 0 0 0 0 0 0 0 0 0 0 0
63128- 0 0 0 0 0 0 6 6 6 22 22 22
63129- 62 62 62 168 124 44 206 145 10 224 166 10
63130-236 178 12 239 182 13 242 186 14 242 186 14
63131-246 186 14 246 190 14 246 190 14 246 190 14
63132-246 190 14 246 190 14 246 190 14 246 190 14
63133-246 190 14 246 190 14 246 190 14 246 190 14
63134-246 190 14 236 178 12 216 158 10 175 118 6
63135- 80 54 7 2 2 6 6 6 6 30 30 30
63136- 54 54 54 62 62 62 50 50 50 38 38 38
63137- 14 14 14 2 2 6 2 2 6 2 2 6
63138- 2 2 6 2 2 6 2 2 6 2 2 6
63139- 2 2 6 6 6 6 80 54 7 167 114 7
63140-213 154 11 236 178 12 246 190 14 246 190 14
63141-246 190 14 246 190 14 246 190 14 246 190 14
63142-246 190 14 242 186 14 239 182 13 239 182 13
63143-230 174 11 210 150 10 174 135 50 124 112 88
63144- 82 82 82 54 54 54 34 34 34 18 18 18
63145- 6 6 6 0 0 0 0 0 0 0 0 0
63146- 0 0 0 0 0 0 0 0 0 0 0 0
63147- 0 0 0 0 0 0 0 0 0 0 0 0
63148- 0 0 0 0 0 0 6 6 6 18 18 18
63149- 50 50 50 158 118 36 192 133 9 200 144 11
63150-216 158 10 219 162 10 224 166 10 226 170 11
63151-230 174 11 236 178 12 239 182 13 239 182 13
63152-242 186 14 246 186 14 246 190 14 246 190 14
63153-246 190 14 246 190 14 246 190 14 246 190 14
63154-246 186 14 230 174 11 210 150 10 163 110 8
63155-104 69 6 10 10 10 2 2 6 2 2 6
63156- 2 2 6 2 2 6 2 2 6 2 2 6
63157- 2 2 6 2 2 6 2 2 6 2 2 6
63158- 2 2 6 2 2 6 2 2 6 2 2 6
63159- 2 2 6 6 6 6 91 60 6 167 114 7
63160-206 145 10 230 174 11 242 186 14 246 190 14
63161-246 190 14 246 190 14 246 186 14 242 186 14
63162-239 182 13 230 174 11 224 166 10 213 154 11
63163-180 133 36 124 112 88 86 86 86 58 58 58
63164- 38 38 38 22 22 22 10 10 10 6 6 6
63165- 0 0 0 0 0 0 0 0 0 0 0 0
63166- 0 0 0 0 0 0 0 0 0 0 0 0
63167- 0 0 0 0 0 0 0 0 0 0 0 0
63168- 0 0 0 0 0 0 0 0 0 14 14 14
63169- 34 34 34 70 70 70 138 110 50 158 118 36
63170-167 114 7 180 123 7 192 133 9 197 138 11
63171-200 144 11 206 145 10 213 154 11 219 162 10
63172-224 166 10 230 174 11 239 182 13 242 186 14
63173-246 186 14 246 186 14 246 186 14 246 186 14
63174-239 182 13 216 158 10 185 133 11 152 99 6
63175-104 69 6 18 14 6 2 2 6 2 2 6
63176- 2 2 6 2 2 6 2 2 6 2 2 6
63177- 2 2 6 2 2 6 2 2 6 2 2 6
63178- 2 2 6 2 2 6 2 2 6 2 2 6
63179- 2 2 6 6 6 6 80 54 7 152 99 6
63180-192 133 9 219 162 10 236 178 12 239 182 13
63181-246 186 14 242 186 14 239 182 13 236 178 12
63182-224 166 10 206 145 10 192 133 9 154 121 60
63183- 94 94 94 62 62 62 42 42 42 22 22 22
63184- 14 14 14 6 6 6 0 0 0 0 0 0
63185- 0 0 0 0 0 0 0 0 0 0 0 0
63186- 0 0 0 0 0 0 0 0 0 0 0 0
63187- 0 0 0 0 0 0 0 0 0 0 0 0
63188- 0 0 0 0 0 0 0 0 0 6 6 6
63189- 18 18 18 34 34 34 58 58 58 78 78 78
63190-101 98 89 124 112 88 142 110 46 156 107 11
63191-163 110 8 167 114 7 175 118 6 180 123 7
63192-185 133 11 197 138 11 210 150 10 219 162 10
63193-226 170 11 236 178 12 236 178 12 234 174 13
63194-219 162 10 197 138 11 163 110 8 130 83 6
63195- 91 60 6 10 10 10 2 2 6 2 2 6
63196- 18 18 18 38 38 38 38 38 38 38 38 38
63197- 38 38 38 38 38 38 38 38 38 38 38 38
63198- 38 38 38 38 38 38 26 26 26 2 2 6
63199- 2 2 6 6 6 6 70 47 6 137 92 6
63200-175 118 6 200 144 11 219 162 10 230 174 11
63201-234 174 13 230 174 11 219 162 10 210 150 10
63202-192 133 9 163 110 8 124 112 88 82 82 82
63203- 50 50 50 30 30 30 14 14 14 6 6 6
63204- 0 0 0 0 0 0 0 0 0 0 0 0
63205- 0 0 0 0 0 0 0 0 0 0 0 0
63206- 0 0 0 0 0 0 0 0 0 0 0 0
63207- 0 0 0 0 0 0 0 0 0 0 0 0
63208- 0 0 0 0 0 0 0 0 0 0 0 0
63209- 6 6 6 14 14 14 22 22 22 34 34 34
63210- 42 42 42 58 58 58 74 74 74 86 86 86
63211-101 98 89 122 102 70 130 98 46 121 87 25
63212-137 92 6 152 99 6 163 110 8 180 123 7
63213-185 133 11 197 138 11 206 145 10 200 144 11
63214-180 123 7 156 107 11 130 83 6 104 69 6
63215- 50 34 6 54 54 54 110 110 110 101 98 89
63216- 86 86 86 82 82 82 78 78 78 78 78 78
63217- 78 78 78 78 78 78 78 78 78 78 78 78
63218- 78 78 78 82 82 82 86 86 86 94 94 94
63219-106 106 106 101 101 101 86 66 34 124 80 6
63220-156 107 11 180 123 7 192 133 9 200 144 11
63221-206 145 10 200 144 11 192 133 9 175 118 6
63222-139 102 15 109 106 95 70 70 70 42 42 42
63223- 22 22 22 10 10 10 0 0 0 0 0 0
63224- 0 0 0 0 0 0 0 0 0 0 0 0
63225- 0 0 0 0 0 0 0 0 0 0 0 0
63226- 0 0 0 0 0 0 0 0 0 0 0 0
63227- 0 0 0 0 0 0 0 0 0 0 0 0
63228- 0 0 0 0 0 0 0 0 0 0 0 0
63229- 0 0 0 0 0 0 6 6 6 10 10 10
63230- 14 14 14 22 22 22 30 30 30 38 38 38
63231- 50 50 50 62 62 62 74 74 74 90 90 90
63232-101 98 89 112 100 78 121 87 25 124 80 6
63233-137 92 6 152 99 6 152 99 6 152 99 6
63234-138 86 6 124 80 6 98 70 6 86 66 30
63235-101 98 89 82 82 82 58 58 58 46 46 46
63236- 38 38 38 34 34 34 34 34 34 34 34 34
63237- 34 34 34 34 34 34 34 34 34 34 34 34
63238- 34 34 34 34 34 34 38 38 38 42 42 42
63239- 54 54 54 82 82 82 94 86 76 91 60 6
63240-134 86 6 156 107 11 167 114 7 175 118 6
63241-175 118 6 167 114 7 152 99 6 121 87 25
63242-101 98 89 62 62 62 34 34 34 18 18 18
63243- 6 6 6 0 0 0 0 0 0 0 0 0
63244- 0 0 0 0 0 0 0 0 0 0 0 0
63245- 0 0 0 0 0 0 0 0 0 0 0 0
63246- 0 0 0 0 0 0 0 0 0 0 0 0
63247- 0 0 0 0 0 0 0 0 0 0 0 0
63248- 0 0 0 0 0 0 0 0 0 0 0 0
63249- 0 0 0 0 0 0 0 0 0 0 0 0
63250- 0 0 0 6 6 6 6 6 6 10 10 10
63251- 18 18 18 22 22 22 30 30 30 42 42 42
63252- 50 50 50 66 66 66 86 86 86 101 98 89
63253-106 86 58 98 70 6 104 69 6 104 69 6
63254-104 69 6 91 60 6 82 62 34 90 90 90
63255- 62 62 62 38 38 38 22 22 22 14 14 14
63256- 10 10 10 10 10 10 10 10 10 10 10 10
63257- 10 10 10 10 10 10 6 6 6 10 10 10
63258- 10 10 10 10 10 10 10 10 10 14 14 14
63259- 22 22 22 42 42 42 70 70 70 89 81 66
63260- 80 54 7 104 69 6 124 80 6 137 92 6
63261-134 86 6 116 81 8 100 82 52 86 86 86
63262- 58 58 58 30 30 30 14 14 14 6 6 6
63263- 0 0 0 0 0 0 0 0 0 0 0 0
63264- 0 0 0 0 0 0 0 0 0 0 0 0
63265- 0 0 0 0 0 0 0 0 0 0 0 0
63266- 0 0 0 0 0 0 0 0 0 0 0 0
63267- 0 0 0 0 0 0 0 0 0 0 0 0
63268- 0 0 0 0 0 0 0 0 0 0 0 0
63269- 0 0 0 0 0 0 0 0 0 0 0 0
63270- 0 0 0 0 0 0 0 0 0 0 0 0
63271- 0 0 0 6 6 6 10 10 10 14 14 14
63272- 18 18 18 26 26 26 38 38 38 54 54 54
63273- 70 70 70 86 86 86 94 86 76 89 81 66
63274- 89 81 66 86 86 86 74 74 74 50 50 50
63275- 30 30 30 14 14 14 6 6 6 0 0 0
63276- 0 0 0 0 0 0 0 0 0 0 0 0
63277- 0 0 0 0 0 0 0 0 0 0 0 0
63278- 0 0 0 0 0 0 0 0 0 0 0 0
63279- 6 6 6 18 18 18 34 34 34 58 58 58
63280- 82 82 82 89 81 66 89 81 66 89 81 66
63281- 94 86 66 94 86 76 74 74 74 50 50 50
63282- 26 26 26 14 14 14 6 6 6 0 0 0
63283- 0 0 0 0 0 0 0 0 0 0 0 0
63284- 0 0 0 0 0 0 0 0 0 0 0 0
63285- 0 0 0 0 0 0 0 0 0 0 0 0
63286- 0 0 0 0 0 0 0 0 0 0 0 0
63287- 0 0 0 0 0 0 0 0 0 0 0 0
63288- 0 0 0 0 0 0 0 0 0 0 0 0
63289- 0 0 0 0 0 0 0 0 0 0 0 0
63290- 0 0 0 0 0 0 0 0 0 0 0 0
63291- 0 0 0 0 0 0 0 0 0 0 0 0
63292- 6 6 6 6 6 6 14 14 14 18 18 18
63293- 30 30 30 38 38 38 46 46 46 54 54 54
63294- 50 50 50 42 42 42 30 30 30 18 18 18
63295- 10 10 10 0 0 0 0 0 0 0 0 0
63296- 0 0 0 0 0 0 0 0 0 0 0 0
63297- 0 0 0 0 0 0 0 0 0 0 0 0
63298- 0 0 0 0 0 0 0 0 0 0 0 0
63299- 0 0 0 6 6 6 14 14 14 26 26 26
63300- 38 38 38 50 50 50 58 58 58 58 58 58
63301- 54 54 54 42 42 42 30 30 30 18 18 18
63302- 10 10 10 0 0 0 0 0 0 0 0 0
63303- 0 0 0 0 0 0 0 0 0 0 0 0
63304- 0 0 0 0 0 0 0 0 0 0 0 0
63305- 0 0 0 0 0 0 0 0 0 0 0 0
63306- 0 0 0 0 0 0 0 0 0 0 0 0
63307- 0 0 0 0 0 0 0 0 0 0 0 0
63308- 0 0 0 0 0 0 0 0 0 0 0 0
63309- 0 0 0 0 0 0 0 0 0 0 0 0
63310- 0 0 0 0 0 0 0 0 0 0 0 0
63311- 0 0 0 0 0 0 0 0 0 0 0 0
63312- 0 0 0 0 0 0 0 0 0 6 6 6
63313- 6 6 6 10 10 10 14 14 14 18 18 18
63314- 18 18 18 14 14 14 10 10 10 6 6 6
63315- 0 0 0 0 0 0 0 0 0 0 0 0
63316- 0 0 0 0 0 0 0 0 0 0 0 0
63317- 0 0 0 0 0 0 0 0 0 0 0 0
63318- 0 0 0 0 0 0 0 0 0 0 0 0
63319- 0 0 0 0 0 0 0 0 0 6 6 6
63320- 14 14 14 18 18 18 22 22 22 22 22 22
63321- 18 18 18 14 14 14 10 10 10 6 6 6
63322- 0 0 0 0 0 0 0 0 0 0 0 0
63323- 0 0 0 0 0 0 0 0 0 0 0 0
63324- 0 0 0 0 0 0 0 0 0 0 0 0
63325- 0 0 0 0 0 0 0 0 0 0 0 0
63326- 0 0 0 0 0 0 0 0 0 0 0 0
63327+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
63328+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
63329+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
63330+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
63331+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
63332+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
63333+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
63334+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
63335+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
63336+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
63337+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
63338+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
63339+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
63340+4 4 4 4 4 4
63341+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
63342+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
63343+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
63344+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
63345+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
63346+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
63347+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
63348+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
63349+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
63350+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
63351+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
63352+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
63353+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
63354+4 4 4 4 4 4
63355+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
63356+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
63357+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
63358+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
63359+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
63360+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
63361+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
63362+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
63363+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
63364+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
63365+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
63366+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
63367+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
63368+4 4 4 4 4 4
63369+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
63370+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
63371+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
63372+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
63373+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
63374+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
63375+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
63376+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
63377+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
63378+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
63379+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
63380+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
63381+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
63382+4 4 4 4 4 4
63383+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
63384+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
63385+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
63386+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
63387+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
63388+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
63389+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
63390+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
63391+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
63392+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
63393+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
63394+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
63395+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
63396+4 4 4 4 4 4
63397+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
63398+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
63399+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
63400+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
63401+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
63402+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
63403+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
63404+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
63405+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
63406+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
63407+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
63408+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
63409+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
63410+4 4 4 4 4 4
63411+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
63412+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
63413+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
63414+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
63415+4 4 4 4 4 4 4 4 4 3 3 3 0 0 0 0 0 0
63416+0 0 0 0 0 0 0 0 0 0 0 0 3 3 3 4 4 4
63417+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
63418+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
63419+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
63420+4 4 4 4 4 4 4 4 4 4 4 4 1 1 1 0 0 0
63421+0 0 0 3 3 3 4 4 4 4 4 4 4 4 4 4 4 4
63422+4 4 4 4 4 4 4 4 4 2 1 0 2 1 0 3 2 2
63423+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
63424+4 4 4 4 4 4
63425+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
63426+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
63427+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
63428+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
63429+4 4 4 4 4 4 2 2 2 0 0 0 3 4 3 26 28 28
63430+37 38 37 37 38 37 14 17 19 2 2 2 0 0 0 2 2 2
63431+5 5 5 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
63432+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
63433+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
63434+4 4 4 4 4 4 3 3 3 0 0 0 1 1 1 6 6 6
63435+2 2 2 0 0 0 3 3 3 4 4 4 4 4 4 4 4 4
63436+4 4 5 3 3 3 1 0 0 0 0 0 1 0 0 0 0 0
63437+1 1 1 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
63438+4 4 4 4 4 4
63439+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
63440+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
63441+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
63442+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
63443+2 2 2 0 0 0 0 0 0 14 17 19 60 74 84 137 136 137
63444+153 152 153 137 136 137 125 124 125 60 73 81 6 6 6 3 1 0
63445+0 0 0 3 3 3 4 4 4 4 4 4 4 4 4 4 4 4
63446+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
63447+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
63448+4 4 4 4 4 4 0 0 0 4 4 4 41 54 63 125 124 125
63449+60 73 81 6 6 6 4 0 0 3 3 3 4 4 4 4 4 4
63450+4 4 4 0 0 0 6 9 11 41 54 63 41 65 82 22 30 35
63451+2 2 2 2 1 0 4 4 4 4 4 4 4 4 4 4 4 4
63452+4 4 4 4 4 4
63453+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
63454+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
63455+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
63456+4 4 4 4 4 4 5 5 5 5 5 5 2 2 2 0 0 0
63457+4 0 0 6 6 6 41 54 63 137 136 137 174 174 174 167 166 167
63458+165 164 165 165 164 165 163 162 163 163 162 163 125 124 125 41 54 63
63459+1 1 1 0 0 0 0 0 0 3 3 3 5 5 5 4 4 4
63460+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
63461+4 4 4 4 4 4 4 4 4 4 4 4 5 5 5 5 5 5
63462+3 3 3 2 0 0 4 0 0 60 73 81 156 155 156 167 166 167
63463+163 162 163 85 115 134 5 7 8 0 0 0 4 4 4 5 5 5
63464+0 0 0 2 5 5 55 98 126 90 154 193 90 154 193 72 125 159
63465+37 51 59 2 0 0 1 1 1 4 5 5 4 4 4 4 4 4
63466+4 4 4 4 4 4
63467+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
63468+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
63469+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
63470+4 4 4 5 5 5 4 4 4 1 1 1 0 0 0 3 3 3
63471+37 38 37 125 124 125 163 162 163 174 174 174 158 157 158 158 157 158
63472+156 155 156 156 155 156 158 157 158 165 164 165 174 174 174 166 165 166
63473+125 124 125 16 19 21 1 0 0 0 0 0 0 0 0 4 4 4
63474+5 5 5 5 5 5 4 4 4 4 4 4 4 4 4 4 4 4
63475+4 4 4 4 4 4 4 4 4 5 5 5 5 5 5 1 1 1
63476+0 0 0 0 0 0 37 38 37 153 152 153 174 174 174 158 157 158
63477+174 174 174 163 162 163 37 38 37 4 3 3 4 0 0 1 1 1
63478+0 0 0 22 40 52 101 161 196 101 161 196 90 154 193 101 161 196
63479+64 123 161 14 17 19 0 0 0 4 4 4 4 4 4 4 4 4
63480+4 4 4 4 4 4
63481+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
63482+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
63483+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 5 5 5
63484+5 5 5 2 2 2 0 0 0 4 0 0 24 26 27 85 115 134
63485+156 155 156 174 174 174 167 166 167 156 155 156 154 153 154 157 156 157
63486+156 155 156 156 155 156 155 154 155 153 152 153 158 157 158 167 166 167
63487+174 174 174 156 155 156 60 74 84 16 19 21 0 0 0 0 0 0
63488+1 1 1 5 5 5 5 5 5 4 4 4 4 4 4 4 4 4
63489+4 4 4 5 5 5 6 6 6 3 3 3 0 0 0 4 0 0
63490+13 16 17 60 73 81 137 136 137 165 164 165 156 155 156 153 152 153
63491+174 174 174 177 184 187 60 73 81 3 1 0 0 0 0 1 1 2
63492+22 30 35 64 123 161 136 185 209 90 154 193 90 154 193 90 154 193
63493+90 154 193 21 29 34 0 0 0 3 2 2 4 4 5 4 4 4
63494+4 4 4 4 4 4
63495+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
63496+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
63497+4 4 4 4 4 4 4 4 4 4 4 4 5 5 5 3 3 3
63498+0 0 0 0 0 0 10 13 16 60 74 84 157 156 157 174 174 174
63499+174 174 174 158 157 158 153 152 153 154 153 154 156 155 156 155 154 155
63500+156 155 156 155 154 155 154 153 154 157 156 157 154 153 154 153 152 153
63501+163 162 163 174 174 174 177 184 187 137 136 137 60 73 81 13 16 17
63502+4 0 0 0 0 0 3 3 3 5 5 5 4 4 4 4 4 4
63503+5 5 5 4 4 4 1 1 1 0 0 0 3 3 3 41 54 63
63504+131 129 131 174 174 174 174 174 174 174 174 174 167 166 167 174 174 174
63505+190 197 201 137 136 137 24 26 27 4 0 0 16 21 25 50 82 103
63506+90 154 193 136 185 209 90 154 193 101 161 196 101 161 196 101 161 196
63507+31 91 132 3 6 7 0 0 0 4 4 4 4 4 4 4 4 4
63508+4 4 4 4 4 4
63509+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
63510+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
63511+4 4 4 4 4 4 4 4 4 2 2 2 0 0 0 4 0 0
63512+4 0 0 43 57 68 137 136 137 177 184 187 174 174 174 163 162 163
63513+155 154 155 155 154 155 156 155 156 155 154 155 158 157 158 165 164 165
63514+167 166 167 166 165 166 163 162 163 157 156 157 155 154 155 155 154 155
63515+153 152 153 156 155 156 167 166 167 174 174 174 174 174 174 131 129 131
63516+41 54 63 5 5 5 0 0 0 0 0 0 3 3 3 4 4 4
63517+1 1 1 0 0 0 1 0 0 26 28 28 125 124 125 174 174 174
63518+177 184 187 174 174 174 174 174 174 156 155 156 131 129 131 137 136 137
63519+125 124 125 24 26 27 4 0 0 41 65 82 90 154 193 136 185 209
63520+136 185 209 101 161 196 53 118 160 37 112 160 90 154 193 34 86 122
63521+7 12 15 0 0 0 4 4 4 4 4 4 4 4 4 4 4 4
63522+4 4 4 4 4 4
63523+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
63524+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
63525+4 4 4 3 3 3 0 0 0 0 0 0 5 5 5 37 38 37
63526+125 124 125 167 166 167 174 174 174 167 166 167 158 157 158 155 154 155
63527+156 155 156 156 155 156 156 155 156 163 162 163 167 166 167 155 154 155
63528+137 136 137 153 152 153 156 155 156 165 164 165 163 162 163 156 155 156
63529+156 155 156 156 155 156 155 154 155 158 157 158 166 165 166 174 174 174
63530+167 166 167 125 124 125 37 38 37 1 0 0 0 0 0 0 0 0
63531+0 0 0 24 26 27 60 74 84 158 157 158 174 174 174 174 174 174
63532+166 165 166 158 157 158 125 124 125 41 54 63 13 16 17 6 6 6
63533+6 6 6 37 38 37 80 127 157 136 185 209 101 161 196 101 161 196
63534+90 154 193 28 67 93 6 10 14 13 20 25 13 20 25 6 10 14
63535+1 1 2 4 3 3 4 4 4 4 4 4 4 4 4 4 4 4
63536+4 4 4 4 4 4
63537+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
63538+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
63539+1 1 1 1 0 0 4 3 3 37 38 37 60 74 84 153 152 153
63540+167 166 167 167 166 167 158 157 158 154 153 154 155 154 155 156 155 156
63541+157 156 157 158 157 158 167 166 167 167 166 167 131 129 131 43 57 68
63542+26 28 28 37 38 37 60 73 81 131 129 131 165 164 165 166 165 166
63543+158 157 158 155 154 155 156 155 156 156 155 156 156 155 156 158 157 158
63544+165 164 165 174 174 174 163 162 163 60 74 84 16 19 21 13 16 17
63545+60 73 81 131 129 131 174 174 174 174 174 174 167 166 167 165 164 165
63546+137 136 137 60 73 81 24 26 27 4 0 0 4 0 0 16 19 21
63547+52 104 138 101 161 196 136 185 209 136 185 209 90 154 193 27 99 146
63548+13 20 25 4 5 7 2 5 5 4 5 7 1 1 2 0 0 0
63549+4 4 4 4 4 4 3 3 3 2 2 2 2 2 2 4 4 4
63550+4 4 4 4 4 4
63551+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
63552+4 4 4 4 4 4 4 4 4 4 4 4 3 3 3 0 0 0
63553+0 0 0 13 16 17 60 73 81 137 136 137 174 174 174 166 165 166
63554+158 157 158 156 155 156 157 156 157 156 155 156 155 154 155 158 157 158
63555+167 166 167 174 174 174 153 152 153 60 73 81 16 19 21 4 0 0
63556+4 0 0 4 0 0 6 6 6 26 28 28 60 74 84 158 157 158
63557+174 174 174 166 165 166 157 156 157 155 154 155 156 155 156 156 155 156
63558+155 154 155 158 157 158 167 166 167 167 166 167 131 129 131 125 124 125
63559+137 136 137 167 166 167 167 166 167 174 174 174 158 157 158 125 124 125
63560+16 19 21 4 0 0 4 0 0 10 13 16 49 76 92 107 159 188
63561+136 185 209 136 185 209 90 154 193 26 108 161 22 40 52 6 10 14
63562+2 3 3 1 1 2 1 1 2 4 4 5 4 4 5 4 4 5
63563+4 4 5 2 2 1 0 0 0 0 0 0 0 0 0 2 2 2
63564+4 4 4 4 4 4
63565+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
63566+4 4 4 5 5 5 3 3 3 0 0 0 1 0 0 4 0 0
63567+37 51 59 131 129 131 167 166 167 167 166 167 163 162 163 157 156 157
63568+157 156 157 155 154 155 153 152 153 157 156 157 167 166 167 174 174 174
63569+153 152 153 125 124 125 37 38 37 4 0 0 4 0 0 4 0 0
63570+4 3 3 4 3 3 4 0 0 6 6 6 4 0 0 37 38 37
63571+125 124 125 174 174 174 174 174 174 165 164 165 156 155 156 154 153 154
63572+156 155 156 156 155 156 155 154 155 163 162 163 158 157 158 163 162 163
63573+174 174 174 174 174 174 174 174 174 125 124 125 37 38 37 0 0 0
63574+4 0 0 6 9 11 41 54 63 90 154 193 136 185 209 146 190 211
63575+136 185 209 37 112 160 22 40 52 6 10 14 3 6 7 1 1 2
63576+1 1 2 3 3 3 1 1 2 3 3 3 4 4 4 4 4 4
63577+2 2 2 2 0 0 16 19 21 37 38 37 24 26 27 0 0 0
63578+0 0 0 4 4 4
63579+4 4 4 4 4 4 4 4 4 4 4 4 5 5 5 5 5 5
63580+4 4 4 0 0 0 0 0 0 0 0 0 26 28 28 120 125 127
63581+158 157 158 174 174 174 165 164 165 157 156 157 155 154 155 156 155 156
63582+153 152 153 153 152 153 167 166 167 174 174 174 174 174 174 125 124 125
63583+37 38 37 4 0 0 0 0 0 4 0 0 4 3 3 4 4 4
63584+4 4 4 4 4 4 5 5 5 4 0 0 4 0 0 4 0 0
63585+4 3 3 43 57 68 137 136 137 174 174 174 174 174 174 165 164 165
63586+154 153 154 153 152 153 153 152 153 153 152 153 163 162 163 174 174 174
63587+174 174 174 153 152 153 60 73 81 6 6 6 4 0 0 4 3 3
63588+32 43 50 80 127 157 136 185 209 146 190 211 146 190 211 90 154 193
63589+28 67 93 28 67 93 40 71 93 3 6 7 1 1 2 2 5 5
63590+50 82 103 79 117 143 26 37 45 0 0 0 3 3 3 1 1 1
63591+0 0 0 41 54 63 137 136 137 174 174 174 153 152 153 60 73 81
63592+2 0 0 0 0 0
63593+4 4 4 4 4 4 4 4 4 4 4 4 6 6 6 2 2 2
63594+0 0 0 2 0 0 24 26 27 60 74 84 153 152 153 174 174 174
63595+174 174 174 157 156 157 154 153 154 156 155 156 154 153 154 153 152 153
63596+165 164 165 174 174 174 177 184 187 137 136 137 43 57 68 6 6 6
63597+4 0 0 2 0 0 3 3 3 5 5 5 5 5 5 4 4 4
63598+4 4 4 4 4 4 4 4 4 5 5 5 6 6 6 4 3 3
63599+4 0 0 4 0 0 24 26 27 60 73 81 153 152 153 174 174 174
63600+174 174 174 158 157 158 158 157 158 174 174 174 174 174 174 158 157 158
63601+60 74 84 24 26 27 4 0 0 4 0 0 17 23 27 59 113 148
63602+136 185 209 191 222 234 146 190 211 136 185 209 31 91 132 7 11 13
63603+22 40 52 101 161 196 90 154 193 6 9 11 3 4 4 43 95 132
63604+136 185 209 172 205 220 55 98 126 0 0 0 0 0 0 2 0 0
63605+26 28 28 153 152 153 177 184 187 167 166 167 177 184 187 165 164 165
63606+37 38 37 0 0 0
63607+4 4 4 4 4 4 5 5 5 5 5 5 1 1 1 0 0 0
63608+13 16 17 60 73 81 137 136 137 174 174 174 174 174 174 165 164 165
63609+153 152 153 153 152 153 155 154 155 154 153 154 158 157 158 174 174 174
63610+177 184 187 163 162 163 60 73 81 16 19 21 4 0 0 4 0 0
63611+4 3 3 4 4 4 5 5 5 5 5 5 4 4 4 5 5 5
63612+5 5 5 5 5 5 5 5 5 4 4 4 4 4 4 5 5 5
63613+6 6 6 4 0 0 4 0 0 4 0 0 24 26 27 60 74 84
63614+166 165 166 174 174 174 177 184 187 165 164 165 125 124 125 24 26 27
63615+4 0 0 4 0 0 5 5 5 50 82 103 136 185 209 172 205 220
63616+146 190 211 136 185 209 26 108 161 22 40 52 7 12 15 44 81 103
63617+71 116 144 28 67 93 37 51 59 41 65 82 100 139 164 101 161 196
63618+90 154 193 90 154 193 28 67 93 0 0 0 0 0 0 26 28 28
63619+125 124 125 167 166 167 163 162 163 153 152 153 163 162 163 174 174 174
63620+85 115 134 4 0 0
63621+4 4 4 5 5 5 4 4 4 1 0 0 4 0 0 34 47 55
63622+125 124 125 174 174 174 174 174 174 167 166 167 157 156 157 153 152 153
63623+155 154 155 155 154 155 158 157 158 166 165 166 167 166 167 154 153 154
63624+125 124 125 26 28 28 4 0 0 4 0 0 4 0 0 5 5 5
63625+5 5 5 4 4 4 4 4 4 4 4 4 4 4 4 1 1 1
63626+0 0 0 0 0 0 1 1 1 4 4 4 4 4 4 4 4 4
63627+5 5 5 5 5 5 4 3 3 4 0 0 4 0 0 6 6 6
63628+37 38 37 131 129 131 137 136 137 37 38 37 0 0 0 4 0 0
63629+4 5 5 43 61 72 90 154 193 172 205 220 146 190 211 136 185 209
63630+90 154 193 28 67 93 13 20 25 43 61 72 71 116 144 44 81 103
63631+2 5 5 7 11 13 59 113 148 101 161 196 90 154 193 28 67 93
63632+13 20 25 6 10 14 0 0 0 13 16 17 60 73 81 137 136 137
63633+166 165 166 158 157 158 156 155 156 154 153 154 167 166 167 174 174 174
63634+60 73 81 4 0 0
63635+4 4 4 4 4 4 0 0 0 3 3 3 60 74 84 174 174 174
63636+174 174 174 167 166 167 163 162 163 155 154 155 157 156 157 155 154 155
63637+156 155 156 163 162 163 167 166 167 158 157 158 125 124 125 37 38 37
63638+4 3 3 4 0 0 4 0 0 6 6 6 6 6 6 5 5 5
63639+4 4 4 4 4 4 4 4 4 1 1 1 0 0 0 2 3 3
63640+10 13 16 7 11 13 1 0 0 0 0 0 2 2 1 4 4 4
63641+4 4 4 4 4 4 4 4 4 5 5 5 4 3 3 4 0 0
63642+4 0 0 7 11 13 13 16 17 4 0 0 3 3 3 34 47 55
63643+80 127 157 146 190 211 172 205 220 136 185 209 136 185 209 136 185 209
63644+28 67 93 22 40 52 55 98 126 55 98 126 21 29 34 7 11 13
63645+50 82 103 101 161 196 101 161 196 35 83 115 13 20 25 2 2 1
63646+1 1 2 1 1 2 37 51 59 131 129 131 174 174 174 174 174 174
63647+167 166 167 163 162 163 163 162 163 167 166 167 174 174 174 125 124 125
63648+16 19 21 4 0 0
63649+4 4 4 4 0 0 4 0 0 60 74 84 174 174 174 174 174 174
63650+158 157 158 155 154 155 155 154 155 156 155 156 155 154 155 158 157 158
63651+167 166 167 165 164 165 131 129 131 60 73 81 13 16 17 4 0 0
63652+4 0 0 4 3 3 6 6 6 4 3 3 5 5 5 4 4 4
63653+4 4 4 3 2 2 0 0 0 0 0 0 7 11 13 45 69 86
63654+80 127 157 71 116 144 43 61 72 7 11 13 0 0 0 1 1 1
63655+4 3 3 4 4 4 4 4 4 4 4 4 6 6 6 5 5 5
63656+3 2 2 4 0 0 1 0 0 21 29 34 59 113 148 136 185 209
63657+146 190 211 136 185 209 136 185 209 136 185 209 136 185 209 136 185 209
63658+68 124 159 44 81 103 22 40 52 13 16 17 43 61 72 90 154 193
63659+136 185 209 59 113 148 21 29 34 3 4 3 1 1 1 0 0 0
63660+24 26 27 125 124 125 163 162 163 174 174 174 166 165 166 165 164 165
63661+163 162 163 125 124 125 125 124 125 125 124 125 125 124 125 26 28 28
63662+4 0 0 4 3 3
63663+3 3 3 0 0 0 24 26 27 153 152 153 177 184 187 158 157 158
63664+156 155 156 156 155 156 155 154 155 155 154 155 165 164 165 174 174 174
63665+155 154 155 60 74 84 26 28 28 4 0 0 4 0 0 3 1 0
63666+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 3 3
63667+2 0 0 0 0 0 0 0 0 32 43 50 72 125 159 101 161 196
63668+136 185 209 101 161 196 101 161 196 79 117 143 32 43 50 0 0 0
63669+0 0 0 2 2 2 4 4 4 4 4 4 3 3 3 1 0 0
63670+0 0 0 4 5 5 49 76 92 101 161 196 146 190 211 146 190 211
63671+136 185 209 136 185 209 136 185 209 136 185 209 136 185 209 90 154 193
63672+28 67 93 13 16 17 37 51 59 80 127 157 136 185 209 90 154 193
63673+22 40 52 6 9 11 3 4 3 2 2 1 16 19 21 60 73 81
63674+137 136 137 163 162 163 158 157 158 166 165 166 167 166 167 153 152 153
63675+60 74 84 37 38 37 6 6 6 13 16 17 4 0 0 1 0 0
63676+3 2 2 4 4 4
63677+3 2 2 4 0 0 37 38 37 137 136 137 167 166 167 158 157 158
63678+157 156 157 154 153 154 157 156 157 167 166 167 174 174 174 125 124 125
63679+37 38 37 4 0 0 4 0 0 4 0 0 4 3 3 4 4 4
63680+4 4 4 4 4 4 5 5 5 5 5 5 1 1 1 0 0 0
63681+0 0 0 16 21 25 55 98 126 90 154 193 136 185 209 101 161 196
63682+101 161 196 101 161 196 136 185 209 136 185 209 101 161 196 55 98 126
63683+14 17 19 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0
63684+22 40 52 90 154 193 146 190 211 146 190 211 136 185 209 136 185 209
63685+136 185 209 136 185 209 136 185 209 101 161 196 35 83 115 7 11 13
63686+17 23 27 59 113 148 136 185 209 101 161 196 34 86 122 7 12 15
63687+2 5 5 3 4 3 6 6 6 60 73 81 131 129 131 163 162 163
63688+166 165 166 174 174 174 174 174 174 163 162 163 125 124 125 41 54 63
63689+13 16 17 4 0 0 4 0 0 4 0 0 1 0 0 2 2 2
63690+4 4 4 4 4 4
63691+1 1 1 2 1 0 43 57 68 137 136 137 153 152 153 153 152 153
63692+163 162 163 156 155 156 165 164 165 167 166 167 60 74 84 6 6 6
63693+4 0 0 4 0 0 5 5 5 4 4 4 4 4 4 4 4 4
63694+4 5 5 6 6 6 4 3 3 0 0 0 0 0 0 11 15 18
63695+40 71 93 100 139 164 101 161 196 101 161 196 101 161 196 101 161 196
63696+101 161 196 101 161 196 101 161 196 101 161 196 136 185 209 136 185 209
63697+101 161 196 45 69 86 6 6 6 0 0 0 17 23 27 55 98 126
63698+136 185 209 146 190 211 136 185 209 136 185 209 136 185 209 136 185 209
63699+136 185 209 136 185 209 90 154 193 22 40 52 7 11 13 50 82 103
63700+136 185 209 136 185 209 53 118 160 22 40 52 7 11 13 2 5 5
63701+3 4 3 37 38 37 125 124 125 157 156 157 166 165 166 167 166 167
63702+174 174 174 174 174 174 137 136 137 60 73 81 4 0 0 4 0 0
63703+4 0 0 4 0 0 5 5 5 3 3 3 3 3 3 4 4 4
63704+4 4 4 4 4 4
63705+4 0 0 4 0 0 41 54 63 137 136 137 125 124 125 131 129 131
63706+155 154 155 167 166 167 174 174 174 60 74 84 6 6 6 4 0 0
63707+4 3 3 6 6 6 4 4 4 4 4 4 4 4 4 5 5 5
63708+4 4 4 1 1 1 0 0 0 3 6 7 41 65 82 72 125 159
63709+101 161 196 101 161 196 101 161 196 90 154 193 90 154 193 101 161 196
63710+101 161 196 101 161 196 101 161 196 101 161 196 101 161 196 136 185 209
63711+136 185 209 136 185 209 80 127 157 55 98 126 101 161 196 146 190 211
63712+136 185 209 136 185 209 136 185 209 101 161 196 136 185 209 101 161 196
63713+136 185 209 101 161 196 35 83 115 22 30 35 101 161 196 172 205 220
63714+90 154 193 28 67 93 7 11 13 2 5 5 3 4 3 13 16 17
63715+85 115 134 167 166 167 174 174 174 174 174 174 174 174 174 174 174 174
63716+167 166 167 60 74 84 13 16 17 4 0 0 4 0 0 4 3 3
63717+6 6 6 5 5 5 4 4 4 5 5 5 4 4 4 5 5 5
63718+5 5 5 5 5 5
63719+1 1 1 4 0 0 41 54 63 137 136 137 137 136 137 125 124 125
63720+131 129 131 167 166 167 157 156 157 37 38 37 6 6 6 4 0 0
63721+6 6 6 5 5 5 4 4 4 4 4 4 4 5 5 2 2 1
63722+0 0 0 0 0 0 26 37 45 58 111 146 101 161 196 101 161 196
63723+101 161 196 90 154 193 90 154 193 90 154 193 101 161 196 101 161 196
63724+101 161 196 101 161 196 101 161 196 101 161 196 101 161 196 101 161 196
63725+101 161 196 136 185 209 136 185 209 136 185 209 146 190 211 136 185 209
63726+136 185 209 101 161 196 136 185 209 136 185 209 101 161 196 136 185 209
63727+101 161 196 136 185 209 136 185 209 136 185 209 136 185 209 16 89 141
63728+7 11 13 2 5 5 2 5 5 13 16 17 60 73 81 154 154 154
63729+174 174 174 174 174 174 174 174 174 174 174 174 163 162 163 125 124 125
63730+24 26 27 4 0 0 4 0 0 4 0 0 5 5 5 5 5 5
63731+4 4 4 4 4 4 4 4 4 5 5 5 5 5 5 5 5 5
63732+5 5 5 4 4 4
63733+4 0 0 6 6 6 37 38 37 137 136 137 137 136 137 131 129 131
63734+131 129 131 153 152 153 131 129 131 26 28 28 4 0 0 4 3 3
63735+6 6 6 4 4 4 4 4 4 4 4 4 0 0 0 0 0 0
63736+13 20 25 51 88 114 90 154 193 101 161 196 101 161 196 90 154 193
63737+90 154 193 90 154 193 90 154 193 90 154 193 90 154 193 101 161 196
63738+101 161 196 101 161 196 101 161 196 101 161 196 136 185 209 101 161 196
63739+101 161 196 136 185 209 101 161 196 136 185 209 136 185 209 101 161 196
63740+136 185 209 101 161 196 136 185 209 101 161 196 101 161 196 101 161 196
63741+136 185 209 136 185 209 136 185 209 37 112 160 21 29 34 5 7 8
63742+2 5 5 13 16 17 43 57 68 131 129 131 174 174 174 174 174 174
63743+174 174 174 167 166 167 157 156 157 125 124 125 37 38 37 4 0 0
63744+4 0 0 4 0 0 5 5 5 5 5 5 4 4 4 4 4 4
63745+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
63746+4 4 4 4 4 4
63747+1 1 1 4 0 0 41 54 63 153 152 153 137 136 137 137 136 137
63748+137 136 137 153 152 153 125 124 125 24 26 27 4 0 0 3 2 2
63749+4 4 4 4 4 4 4 3 3 4 0 0 3 6 7 43 61 72
63750+64 123 161 101 161 196 90 154 193 90 154 193 90 154 193 90 154 193
63751+90 154 193 90 154 193 90 154 193 90 154 193 101 161 196 90 154 193
63752+101 161 196 101 161 196 101 161 196 101 161 196 101 161 196 101 161 196
63753+101 161 196 101 161 196 101 161 196 101 161 196 101 161 196 101 161 196
63754+136 185 209 101 161 196 101 161 196 136 185 209 136 185 209 101 161 196
63755+101 161 196 90 154 193 28 67 93 13 16 17 7 11 13 3 6 7
63756+37 51 59 125 124 125 163 162 163 174 174 174 167 166 167 166 165 166
63757+167 166 167 131 129 131 60 73 81 4 0 0 4 0 0 4 0 0
63758+3 3 3 5 5 5 6 6 6 4 4 4 4 4 4 4 4 4
63759+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
63760+4 4 4 4 4 4
63761+4 0 0 4 0 0 41 54 63 137 136 137 153 152 153 137 136 137
63762+153 152 153 157 156 157 125 124 125 24 26 27 0 0 0 2 2 2
63763+4 4 4 4 4 4 2 0 0 0 0 0 28 67 93 90 154 193
63764+90 154 193 90 154 193 90 154 193 90 154 193 64 123 161 90 154 193
63765+90 154 193 90 154 193 90 154 193 90 154 193 90 154 193 101 161 196
63766+90 154 193 101 161 196 101 161 196 101 161 196 90 154 193 136 185 209
63767+101 161 196 101 161 196 136 185 209 101 161 196 136 185 209 101 161 196
63768+101 161 196 101 161 196 136 185 209 101 161 196 101 161 196 90 154 193
63769+35 83 115 13 16 17 3 6 7 2 5 5 13 16 17 60 74 84
63770+154 154 154 166 165 166 165 164 165 158 157 158 163 162 163 157 156 157
63771+60 74 84 13 16 17 4 0 0 4 0 0 3 2 2 4 4 4
63772+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
63773+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
63774+4 4 4 4 4 4
63775+1 1 1 4 0 0 41 54 63 157 156 157 155 154 155 137 136 137
63776+153 152 153 158 157 158 137 136 137 26 28 28 2 0 0 2 2 2
63777+4 4 4 4 4 4 1 0 0 6 10 14 34 86 122 90 154 193
63778+64 123 161 90 154 193 64 123 161 90 154 193 90 154 193 90 154 193
63779+64 123 161 90 154 193 90 154 193 90 154 193 90 154 193 90 154 193
63780+101 161 196 101 161 196 101 161 196 101 161 196 101 161 196 101 161 196
63781+101 161 196 101 161 196 101 161 196 101 161 196 101 161 196 101 161 196
63782+136 185 209 101 161 196 136 185 209 90 154 193 26 108 161 22 40 52
63783+13 16 17 5 7 8 2 5 5 2 5 5 37 38 37 165 164 165
63784+174 174 174 163 162 163 154 154 154 165 164 165 167 166 167 60 73 81
63785+6 6 6 4 0 0 4 0 0 4 4 4 4 4 4 4 4 4
63786+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
63787+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
63788+4 4 4 4 4 4
63789+4 0 0 6 6 6 41 54 63 156 155 156 158 157 158 153 152 153
63790+156 155 156 165 164 165 137 136 137 26 28 28 0 0 0 2 2 2
63791+4 4 5 4 4 4 2 0 0 7 12 15 31 96 139 64 123 161
63792+90 154 193 64 123 161 90 154 193 90 154 193 64 123 161 90 154 193
63793+90 154 193 90 154 193 90 154 193 90 154 193 90 154 193 90 154 193
63794+90 154 193 90 154 193 90 154 193 101 161 196 101 161 196 101 161 196
63795+101 161 196 101 161 196 101 161 196 101 161 196 101 161 196 136 185 209
63796+101 161 196 136 185 209 26 108 161 22 40 52 7 11 13 5 7 8
63797+2 5 5 2 5 5 2 5 5 2 2 1 37 38 37 158 157 158
63798+174 174 174 154 154 154 156 155 156 167 166 167 165 164 165 37 38 37
63799+4 0 0 4 3 3 5 5 5 4 4 4 4 4 4 4 4 4
63800+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
63801+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
63802+4 4 4 4 4 4
63803+3 1 0 4 0 0 60 73 81 157 156 157 163 162 163 153 152 153
63804+158 157 158 167 166 167 137 136 137 26 28 28 2 0 0 2 2 2
63805+4 5 5 4 4 4 4 0 0 7 12 15 24 86 132 26 108 161
63806+37 112 160 64 123 161 90 154 193 64 123 161 90 154 193 90 154 193
63807+90 154 193 90 154 193 90 154 193 90 154 193 90 154 193 90 154 193
63808+90 154 193 101 161 196 90 154 193 101 161 196 101 161 196 101 161 196
63809+101 161 196 101 161 196 101 161 196 136 185 209 101 161 196 136 185 209
63810+90 154 193 35 83 115 13 16 17 13 16 17 7 11 13 3 6 7
63811+5 7 8 6 6 6 3 4 3 2 2 1 30 32 34 154 154 154
63812+167 166 167 154 154 154 154 154 154 174 174 174 165 164 165 37 38 37
63813+6 6 6 4 0 0 6 6 6 4 4 4 4 4 4 4 4 4
63814+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
63815+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
63816+4 4 4 4 4 4
63817+4 0 0 4 0 0 41 54 63 163 162 163 166 165 166 154 154 154
63818+163 162 163 174 174 174 137 136 137 26 28 28 0 0 0 2 2 2
63819+4 5 5 4 4 5 1 1 2 6 10 14 28 67 93 18 97 151
63820+18 97 151 18 97 151 26 108 161 37 112 160 37 112 160 90 154 193
63821+64 123 161 90 154 193 90 154 193 90 154 193 90 154 193 101 161 196
63822+90 154 193 101 161 196 101 161 196 90 154 193 101 161 196 101 161 196
63823+101 161 196 101 161 196 101 161 196 136 185 209 90 154 193 16 89 141
63824+13 20 25 7 11 13 5 7 8 5 7 8 2 5 5 4 5 5
63825+3 4 3 4 5 5 3 4 3 0 0 0 37 38 37 158 157 158
63826+174 174 174 158 157 158 158 157 158 167 166 167 174 174 174 41 54 63
63827+4 0 0 3 2 2 5 5 5 4 4 4 4 4 4 4 4 4
63828+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
63829+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
63830+4 4 4 4 4 4
63831+1 1 1 4 0 0 60 73 81 165 164 165 174 174 174 158 157 158
63832+167 166 167 174 174 174 153 152 153 26 28 28 2 0 0 2 2 2
63833+4 5 5 4 4 4 4 0 0 7 12 15 10 87 144 10 87 144
63834+18 97 151 18 97 151 18 97 151 26 108 161 26 108 161 26 108 161
63835+26 108 161 37 112 160 53 118 160 90 154 193 90 154 193 90 154 193
63836+90 154 193 90 154 193 101 161 196 101 161 196 101 161 196 101 161 196
63837+101 161 196 136 185 209 90 154 193 26 108 161 22 40 52 13 16 17
63838+7 11 13 3 6 7 5 7 8 5 7 8 2 5 5 4 5 5
63839+4 5 5 6 6 6 3 4 3 0 0 0 30 32 34 158 157 158
63840+174 174 174 156 155 156 155 154 155 165 164 165 154 153 154 37 38 37
63841+4 0 0 4 3 3 5 5 5 4 4 4 4 4 4 4 4 4
63842+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
63843+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
63844+4 4 4 4 4 4
63845+4 0 0 4 0 0 60 73 81 167 166 167 174 174 174 163 162 163
63846+174 174 174 174 174 174 153 152 153 26 28 28 0 0 0 3 3 3
63847+5 5 5 4 4 4 1 1 2 7 12 15 28 67 93 18 97 151
63848+18 97 151 18 97 151 18 97 151 18 97 151 18 97 151 26 108 161
63849+26 108 161 26 108 161 26 108 161 26 108 161 26 108 161 26 108 161
63850+90 154 193 26 108 161 90 154 193 90 154 193 90 154 193 101 161 196
63851+101 161 196 26 108 161 22 40 52 13 16 17 7 11 13 2 5 5
63852+2 5 5 6 6 6 2 5 5 4 5 5 4 5 5 4 5 5
63853+3 4 3 5 5 5 3 4 3 2 0 0 30 32 34 137 136 137
63854+153 152 153 137 136 137 131 129 131 137 136 137 131 129 131 37 38 37
63855+4 0 0 4 3 3 5 5 5 4 4 4 4 4 4 4 4 4
63856+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
63857+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
63858+4 4 4 4 4 4
63859+1 1 1 4 0 0 60 73 81 167 166 167 174 174 174 166 165 166
63860+174 174 174 177 184 187 153 152 153 30 32 34 1 0 0 3 3 3
63861+5 5 5 4 3 3 4 0 0 7 12 15 10 87 144 10 87 144
63862+18 97 151 18 97 151 18 97 151 26 108 161 26 108 161 26 108 161
63863+26 108 161 26 108 161 26 108 161 26 108 161 26 108 161 26 108 161
63864+26 108 161 26 108 161 26 108 161 90 154 193 90 154 193 26 108 161
63865+35 83 115 13 16 17 7 11 13 5 7 8 3 6 7 5 7 8
63866+2 5 5 6 6 6 4 5 5 4 5 5 3 4 3 4 5 5
63867+3 4 3 6 6 6 3 4 3 0 0 0 26 28 28 125 124 125
63868+131 129 131 125 124 125 125 124 125 131 129 131 131 129 131 37 38 37
63869+4 0 0 3 3 3 5 5 5 4 4 4 4 4 4 4 4 4
63870+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
63871+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
63872+4 4 4 4 4 4
63873+3 1 0 4 0 0 60 73 81 174 174 174 177 184 187 167 166 167
63874+174 174 174 177 184 187 153 152 153 30 32 34 0 0 0 3 3 3
63875+5 5 5 4 4 4 1 1 2 6 10 14 28 67 93 18 97 151
63876+18 97 151 18 97 151 18 97 151 18 97 151 18 97 151 26 108 161
63877+26 108 161 26 108 161 26 108 161 26 108 161 26 108 161 26 108 161
63878+26 108 161 90 154 193 26 108 161 26 108 161 24 86 132 13 20 25
63879+7 11 13 13 20 25 22 40 52 5 7 8 3 4 3 3 4 3
63880+4 5 5 3 4 3 4 5 5 3 4 3 4 5 5 3 4 3
63881+4 4 4 5 5 5 3 3 3 2 0 0 26 28 28 125 124 125
63882+137 136 137 125 124 125 125 124 125 137 136 137 131 129 131 37 38 37
63883+0 0 0 3 3 3 5 5 5 4 4 4 4 4 4 4 4 4
63884+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
63885+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
63886+4 4 4 4 4 4
63887+1 1 1 4 0 0 60 73 81 174 174 174 177 184 187 174 174 174
63888+174 174 174 190 197 201 157 156 157 30 32 34 1 0 0 3 3 3
63889+5 5 5 4 3 3 4 0 0 7 12 15 10 87 144 10 87 144
63890+18 97 151 19 95 150 19 95 150 18 97 151 18 97 151 26 108 161
63891+18 97 151 26 108 161 26 108 161 26 108 161 26 108 161 90 154 193
63892+26 108 161 26 108 161 26 108 161 22 40 52 2 5 5 3 4 3
63893+28 67 93 37 112 160 34 86 122 2 5 5 3 4 3 3 4 3
63894+3 4 3 3 4 3 3 4 3 2 2 1 3 4 3 4 4 4
63895+4 5 5 5 5 5 3 3 3 0 0 0 26 28 28 131 129 131
63896+137 136 137 125 124 125 125 124 125 137 136 137 131 129 131 37 38 37
63897+0 0 0 3 3 3 5 5 5 4 4 4 4 4 4 4 4 4
63898+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
63899+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
63900+4 4 4 4 4 4
63901+4 0 0 4 0 0 60 73 81 174 174 174 177 184 187 174 174 174
63902+174 174 174 190 197 201 158 157 158 30 32 34 0 0 0 2 2 2
63903+5 5 5 4 4 4 1 1 2 6 10 14 28 67 93 18 97 151
63904+10 87 144 19 95 150 19 95 150 18 97 151 18 97 151 18 97 151
63905+26 108 161 26 108 161 26 108 161 26 108 161 26 108 161 26 108 161
63906+18 97 151 22 40 52 2 5 5 2 2 1 22 40 52 26 108 161
63907+90 154 193 37 112 160 22 40 52 3 4 3 13 20 25 22 30 35
63908+3 6 7 1 1 1 2 2 2 6 9 11 5 5 5 4 3 3
63909+4 4 4 5 5 5 3 3 3 2 0 0 26 28 28 131 129 131
63910+137 136 137 125 124 125 125 124 125 137 136 137 131 129 131 37 38 37
63911+0 0 0 3 3 3 5 5 5 4 4 4 4 4 4 4 4 4
63912+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
63913+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
63914+4 4 4 4 4 4
63915+1 1 1 4 0 0 60 73 81 177 184 187 193 200 203 174 174 174
63916+177 184 187 193 200 203 163 162 163 30 32 34 4 0 0 2 2 2
63917+5 5 5 4 3 3 4 0 0 6 10 14 24 86 132 10 87 144
63918+10 87 144 10 87 144 19 95 150 19 95 150 19 95 150 18 97 151
63919+26 108 161 26 108 161 26 108 161 90 154 193 26 108 161 28 67 93
63920+6 10 14 2 5 5 13 20 25 24 86 132 37 112 160 90 154 193
63921+10 87 144 7 12 15 2 5 5 28 67 93 37 112 160 28 67 93
63922+2 2 1 7 12 15 35 83 115 28 67 93 3 6 7 1 0 0
63923+4 4 4 5 5 5 3 3 3 0 0 0 26 28 28 131 129 131
63924+137 136 137 125 124 125 125 124 125 137 136 137 131 129 131 37 38 37
63925+0 0 0 3 3 3 5 5 5 4 4 4 4 4 4 4 4 4
63926+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
63927+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
63928+4 4 4 4 4 4
63929+4 0 0 4 0 0 60 73 81 174 174 174 190 197 201 174 174 174
63930+177 184 187 193 200 203 163 162 163 30 32 34 0 0 0 2 2 2
63931+5 5 5 4 4 4 1 1 2 6 10 14 28 67 93 10 87 144
63932+10 87 144 16 89 141 19 95 150 10 87 144 26 108 161 26 108 161
63933+26 108 161 26 108 161 26 108 161 28 67 93 6 10 14 1 1 2
63934+7 12 15 28 67 93 26 108 161 16 89 141 24 86 132 21 29 34
63935+3 4 3 21 29 34 37 112 160 37 112 160 27 99 146 21 29 34
63936+21 29 34 26 108 161 90 154 193 35 83 115 1 1 2 2 0 0
63937+4 4 4 5 5 5 3 3 3 2 0 0 26 28 28 125 124 125
63938+137 136 137 125 124 125 125 124 125 137 136 137 131 129 131 37 38 37
63939+0 0 0 3 3 3 5 5 5 4 4 4 4 4 4 4 4 4
63940+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
63941+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
63942+4 4 4 4 4 4
63943+3 1 0 4 0 0 60 73 81 193 200 203 193 200 203 174 174 174
63944+190 197 201 193 200 203 165 164 165 37 38 37 4 0 0 2 2 2
63945+5 5 5 4 3 3 4 0 0 6 10 14 24 86 132 10 87 144
63946+10 87 144 10 87 144 16 89 141 18 97 151 18 97 151 10 87 144
63947+24 86 132 24 86 132 13 20 25 4 5 7 4 5 7 22 40 52
63948+18 97 151 37 112 160 26 108 161 7 12 15 1 1 1 0 0 0
63949+28 67 93 37 112 160 26 108 161 28 67 93 22 40 52 28 67 93
63950+26 108 161 90 154 193 26 108 161 10 87 144 0 0 0 2 0 0
63951+4 4 4 5 5 5 3 3 3 0 0 0 26 28 28 131 129 131
63952+137 136 137 125 124 125 125 124 125 137 136 137 131 129 131 37 38 37
63953+0 0 0 3 3 3 5 5 5 4 4 4 4 4 4 4 4 4
63954+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
63955+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
63956+4 4 4 4 4 4
63957+4 0 0 6 6 6 60 73 81 174 174 174 193 200 203 174 174 174
63958+190 197 201 193 200 203 165 164 165 30 32 34 0 0 0 2 2 2
63959+5 5 5 4 4 4 1 1 2 6 10 14 28 67 93 10 87 144
63960+10 87 144 10 87 144 10 87 144 18 97 151 28 67 93 6 10 14
63961+0 0 0 1 1 2 4 5 7 13 20 25 16 89 141 26 108 161
63962+26 108 161 26 108 161 24 86 132 6 9 11 2 3 3 22 40 52
63963+37 112 160 16 89 141 22 40 52 28 67 93 26 108 161 26 108 161
63964+90 154 193 26 108 161 26 108 161 28 67 93 1 1 1 4 0 0
63965+4 4 4 5 5 5 3 3 3 4 0 0 26 28 28 124 126 130
63966+137 136 137 125 124 125 125 124 125 137 136 137 131 129 131 37 38 37
63967+0 0 0 3 3 3 5 5 5 4 4 4 4 4 4 4 4 4
63968+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
63969+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
63970+4 4 4 4 4 4
63971+4 0 0 4 0 0 60 73 81 193 200 203 193 200 203 174 174 174
63972+193 200 203 193 200 203 167 166 167 37 38 37 4 0 0 2 2 2
63973+5 5 5 4 4 4 4 0 0 6 10 14 28 67 93 10 87 144
63974+10 87 144 10 87 144 18 97 151 10 87 144 13 20 25 4 5 7
63975+1 1 2 1 1 1 22 40 52 26 108 161 26 108 161 26 108 161
63976+26 108 161 26 108 161 26 108 161 24 86 132 22 40 52 22 40 52
63977+22 40 52 22 40 52 10 87 144 26 108 161 26 108 161 26 108 161
63978+26 108 161 26 108 161 90 154 193 10 87 144 0 0 0 4 0 0
63979+4 4 4 5 5 5 3 3 3 0 0 0 26 28 28 131 129 131
63980+137 136 137 125 124 125 125 124 125 137 136 137 131 129 131 37 38 37
63981+0 0 0 3 3 3 5 5 5 4 4 4 4 4 4 4 4 4
63982+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
63983+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
63984+4 4 4 4 4 4
63985+4 0 0 6 6 6 60 73 81 174 174 174 220 221 221 174 174 174
63986+190 197 201 205 212 215 167 166 167 30 32 34 0 0 0 2 2 2
63987+5 5 5 4 4 4 1 1 2 6 10 14 28 67 93 10 87 144
63988+10 87 144 10 87 144 10 87 144 10 87 144 22 40 52 1 1 2
63989+2 0 0 1 1 2 24 86 132 26 108 161 26 108 161 26 108 161
63990+26 108 161 19 95 150 16 89 141 10 87 144 22 40 52 22 40 52
63991+10 87 144 26 108 161 37 112 160 26 108 161 26 108 161 26 108 161
63992+26 108 161 26 108 161 26 108 161 28 67 93 2 0 0 3 1 0
63993+4 4 4 5 5 5 3 3 3 2 0 0 26 28 28 131 129 131
63994+137 136 137 125 124 125 125 124 125 137 136 137 131 129 131 37 38 37
63995+0 0 0 3 3 3 5 5 5 4 4 4 4 4 4 4 4 4
63996+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
63997+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
63998+4 4 4 4 4 4
63999+4 0 0 4 0 0 60 73 81 220 221 221 190 197 201 174 174 174
64000+193 200 203 193 200 203 174 174 174 37 38 37 4 0 0 2 2 2
64001+5 5 5 4 4 4 3 2 2 1 1 2 13 20 25 10 87 144
64002+10 87 144 10 87 144 10 87 144 10 87 144 10 87 144 13 20 25
64003+13 20 25 22 40 52 10 87 144 18 97 151 18 97 151 26 108 161
64004+10 87 144 13 20 25 6 10 14 21 29 34 24 86 132 18 97 151
64005+26 108 161 26 108 161 26 108 161 26 108 161 26 108 161 26 108 161
64006+26 108 161 90 154 193 18 97 151 13 20 25 0 0 0 4 3 3
64007+4 4 4 5 5 5 3 3 3 0 0 0 26 28 28 131 129 131
64008+137 136 137 125 124 125 125 124 125 137 136 137 131 129 131 37 38 37
64009+0 0 0 3 3 3 5 5 5 4 4 4 4 4 4 4 4 4
64010+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
64011+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
64012+4 4 4 4 4 4
64013+4 0 0 6 6 6 60 73 81 174 174 174 220 221 221 174 174 174
64014+190 197 201 220 221 221 167 166 167 30 32 34 1 0 0 2 2 2
64015+5 5 5 4 4 4 4 4 5 2 5 5 4 5 7 13 20 25
64016+28 67 93 10 87 144 10 87 144 10 87 144 10 87 144 10 87 144
64017+10 87 144 10 87 144 18 97 151 10 87 144 18 97 151 18 97 151
64018+28 67 93 2 3 3 0 0 0 28 67 93 26 108 161 26 108 161
64019+26 108 161 26 108 161 26 108 161 26 108 161 26 108 161 26 108 161
64020+26 108 161 10 87 144 13 20 25 1 1 2 3 2 2 4 4 4
64021+4 4 4 5 5 5 3 3 3 2 0 0 26 28 28 131 129 131
64022+137 136 137 125 124 125 125 124 125 137 136 137 131 129 131 37 38 37
64023+0 0 0 3 3 3 5 5 5 4 4 4 4 4 4 4 4 4
64024+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
64025+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
64026+4 4 4 4 4 4
64027+4 0 0 4 0 0 60 73 81 220 221 221 190 197 201 174 174 174
64028+193 200 203 193 200 203 174 174 174 26 28 28 4 0 0 4 3 3
64029+5 5 5 4 4 4 4 4 4 4 4 5 1 1 2 2 5 5
64030+4 5 7 22 40 52 10 87 144 10 87 144 18 97 151 10 87 144
64031+10 87 144 10 87 144 10 87 144 10 87 144 10 87 144 18 97 151
64032+10 87 144 28 67 93 22 40 52 10 87 144 26 108 161 18 97 151
64033+18 97 151 18 97 151 26 108 161 26 108 161 26 108 161 26 108 161
64034+22 40 52 1 1 2 0 0 0 2 3 3 4 4 4 4 4 4
64035+4 4 4 5 5 5 4 4 4 0 0 0 26 28 28 131 129 131
64036+137 136 137 125 124 125 125 124 125 137 136 137 131 129 131 37 38 37
64037+0 0 0 3 3 3 5 5 5 4 4 4 4 4 4 4 4 4
64038+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
64039+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
64040+4 4 4 4 4 4
64041+4 0 0 6 6 6 60 73 81 174 174 174 220 221 221 174 174 174
64042+190 197 201 220 221 221 190 197 201 41 54 63 4 0 0 2 2 2
64043+6 6 6 4 4 4 4 4 4 4 4 5 4 4 5 3 3 3
64044+1 1 2 1 1 2 6 10 14 22 40 52 10 87 144 18 97 151
64045+18 97 151 10 87 144 10 87 144 10 87 144 18 97 151 10 87 144
64046+10 87 144 18 97 151 26 108 161 18 97 151 18 97 151 10 87 144
64047+26 108 161 26 108 161 26 108 161 10 87 144 28 67 93 6 10 14
64048+1 1 2 1 1 2 4 3 3 4 4 5 4 4 4 4 4 4
64049+5 5 5 5 5 5 1 1 1 4 0 0 37 51 59 137 136 137
64050+137 136 137 125 124 125 125 124 125 137 136 137 131 129 131 37 38 37
64051+0 0 0 3 3 3 5 5 5 4 4 4 4 4 4 4 4 4
64052+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
64053+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
64054+4 4 4 4 4 4
64055+4 0 0 4 0 0 60 73 81 220 221 221 193 200 203 174 174 174
64056+193 200 203 193 200 203 220 221 221 137 136 137 13 16 17 4 0 0
64057+2 2 2 4 4 4 4 4 4 4 4 4 4 4 4 4 4 5
64058+4 4 5 4 3 3 1 1 2 4 5 7 13 20 25 28 67 93
64059+10 87 144 10 87 144 10 87 144 10 87 144 10 87 144 10 87 144
64060+10 87 144 18 97 151 18 97 151 10 87 144 18 97 151 26 108 161
64061+26 108 161 18 97 151 28 67 93 6 10 14 0 0 0 0 0 0
64062+2 3 3 4 5 5 4 4 5 4 4 4 4 4 4 5 5 5
64063+3 3 3 1 1 1 0 0 0 16 19 21 125 124 125 137 136 137
64064+131 129 131 125 124 125 125 124 125 137 136 137 131 129 131 37 38 37
64065+0 0 0 3 3 3 5 5 5 4 4 4 4 4 4 4 4 4
64066+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
64067+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
64068+4 4 4 4 4 4
64069+4 0 0 6 6 6 60 73 81 174 174 174 220 221 221 174 174 174
64070+193 200 203 190 197 201 220 221 221 220 221 221 153 152 153 30 32 34
64071+0 0 0 0 0 0 2 2 2 4 4 4 4 4 4 4 4 4
64072+4 4 4 4 5 5 4 5 7 1 1 2 1 1 2 4 5 7
64073+13 20 25 28 67 93 10 87 144 18 97 151 10 87 144 10 87 144
64074+10 87 144 10 87 144 10 87 144 18 97 151 26 108 161 18 97 151
64075+28 67 93 7 12 15 0 0 0 0 0 0 2 2 1 4 4 4
64076+4 5 5 4 5 5 4 4 4 4 4 4 3 3 3 0 0 0
64077+0 0 0 0 0 0 37 38 37 125 124 125 158 157 158 131 129 131
64078+125 124 125 125 124 125 125 124 125 137 136 137 131 129 131 37 38 37
64079+0 0 0 3 3 3 5 5 5 4 4 4 4 4 4 4 4 4
64080+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
64081+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
64082+4 4 4 4 4 4
64083+4 3 3 4 0 0 41 54 63 193 200 203 220 221 221 174 174 174
64084+193 200 203 193 200 203 193 200 203 220 221 221 244 246 246 193 200 203
64085+120 125 127 5 5 5 1 0 0 0 0 0 1 1 1 4 4 4
64086+4 4 4 4 4 4 4 5 5 4 5 5 4 4 5 1 1 2
64087+4 5 7 4 5 7 22 40 52 10 87 144 10 87 144 10 87 144
64088+10 87 144 10 87 144 18 97 151 10 87 144 10 87 144 13 20 25
64089+4 5 7 2 3 3 1 1 2 4 4 4 4 5 5 4 4 4
64090+4 4 4 4 4 4 4 4 4 1 1 1 0 0 0 1 1 2
64091+24 26 27 60 74 84 153 152 153 163 162 163 137 136 137 125 124 125
64092+125 124 125 125 124 125 125 124 125 137 136 137 125 124 125 26 28 28
64093+0 0 0 3 3 3 5 5 5 4 4 4 4 4 4 4 4 4
64094+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
64095+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
64096+4 4 4 4 4 4
64097+4 0 0 6 6 6 26 28 28 156 155 156 220 221 221 220 221 221
64098+174 174 174 193 200 203 193 200 203 193 200 203 205 212 215 220 221 221
64099+220 221 221 167 166 167 60 73 81 7 11 13 0 0 0 0 0 0
64100+3 3 3 4 4 4 4 4 4 4 4 4 4 4 5 4 4 5
64101+4 4 5 1 1 2 1 1 2 4 5 7 22 40 52 10 87 144
64102+10 87 144 10 87 144 10 87 144 22 40 52 4 5 7 1 1 2
64103+1 1 2 4 4 5 4 4 4 4 4 4 4 4 4 4 4 4
64104+5 5 5 2 2 2 0 0 0 4 0 0 16 19 21 60 73 81
64105+137 136 137 167 166 167 158 157 158 137 136 137 131 129 131 131 129 131
64106+125 124 125 125 124 125 131 129 131 155 154 155 60 74 84 5 7 8
64107+0 0 0 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
64108+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
64109+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
64110+4 4 4 4 4 4
64111+5 5 5 4 0 0 4 0 0 60 73 81 193 200 203 220 221 221
64112+193 200 203 193 200 203 193 200 203 193 200 203 205 212 215 220 221 221
64113+220 221 221 220 221 221 220 221 221 137 136 137 43 57 68 6 6 6
64114+4 0 0 1 1 1 4 4 4 4 4 4 4 4 4 4 4 4
64115+4 4 5 4 4 5 3 2 2 1 1 2 2 5 5 13 20 25
64116+22 40 52 22 40 52 13 20 25 2 3 3 1 1 2 3 3 3
64117+4 5 7 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
64118+1 1 1 0 0 0 2 3 3 41 54 63 131 129 131 166 165 166
64119+166 165 166 155 154 155 153 152 153 137 136 137 137 136 137 125 124 125
64120+125 124 125 137 136 137 137 136 137 125 124 125 37 38 37 4 3 3
64121+4 3 3 5 5 5 4 4 4 4 4 4 4 4 4 4 4 4
64122+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
64123+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
64124+4 4 4 4 4 4
64125+4 3 3 6 6 6 6 6 6 13 16 17 60 73 81 167 166 167
64126+220 221 221 220 221 221 220 221 221 193 200 203 193 200 203 193 200 203
64127+205 212 215 220 221 221 220 221 221 244 246 246 205 212 215 125 124 125
64128+24 26 27 0 0 0 0 0 0 2 2 2 5 5 5 5 5 5
64129+4 4 4 4 4 4 4 4 4 4 4 5 1 1 2 4 5 7
64130+4 5 7 4 5 7 1 1 2 3 2 2 4 4 5 4 4 4
64131+4 4 4 4 4 4 5 5 5 4 4 4 0 0 0 0 0 0
64132+2 0 0 26 28 28 125 124 125 174 174 174 174 174 174 166 165 166
64133+156 155 156 153 152 153 137 136 137 137 136 137 131 129 131 137 136 137
64134+137 136 137 137 136 137 60 74 84 30 32 34 4 0 0 4 0 0
64135+5 5 5 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
64136+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
64137+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
64138+4 4 4 4 4 4
64139+5 5 5 6 6 6 4 0 0 4 0 0 6 6 6 26 28 28
64140+125 124 125 174 174 174 220 221 221 220 221 221 220 221 221 193 200 203
64141+205 212 215 220 221 221 205 212 215 220 221 221 220 221 221 244 246 246
64142+193 200 203 60 74 84 13 16 17 4 0 0 0 0 0 3 3 3
64143+5 5 5 5 5 5 4 4 4 4 4 4 4 4 5 3 3 3
64144+1 1 2 3 3 3 4 4 5 4 4 5 4 4 4 4 4 4
64145+5 5 5 5 5 5 2 2 2 0 0 0 0 0 0 13 16 17
64146+60 74 84 174 174 174 193 200 203 174 174 174 167 166 167 163 162 163
64147+153 152 153 153 152 153 137 136 137 137 136 137 153 152 153 137 136 137
64148+125 124 125 41 54 63 24 26 27 4 0 0 4 0 0 5 5 5
64149+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
64150+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
64151+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
64152+4 4 4 4 4 4
64153+4 3 3 6 6 6 6 6 6 6 6 6 6 6 6 6 6 6
64154+6 6 6 37 38 37 131 129 131 220 221 221 220 221 221 220 221 221
64155+193 200 203 193 200 203 220 221 221 205 212 215 220 221 221 244 246 246
64156+244 246 246 244 246 246 174 174 174 41 54 63 0 0 0 0 0 0
64157+0 0 0 4 4 4 5 5 5 5 5 5 4 4 4 4 4 5
64158+4 4 5 4 4 5 4 4 4 4 4 4 6 6 6 6 6 6
64159+3 3 3 0 0 0 2 0 0 13 16 17 60 73 81 156 155 156
64160+220 221 221 193 200 203 174 174 174 165 164 165 163 162 163 154 153 154
64161+153 152 153 153 152 153 158 157 158 163 162 163 137 136 137 60 73 81
64162+13 16 17 4 0 0 4 0 0 4 3 3 4 4 4 4 4 4
64163+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
64164+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
64165+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
64166+4 4 4 4 4 4
64167+5 5 5 4 3 3 4 3 3 6 6 6 6 6 6 6 6 6
64168+6 6 6 6 6 6 6 6 6 37 38 37 167 166 167 244 246 246
64169+244 246 246 220 221 221 205 212 215 205 212 215 220 221 221 193 200 203
64170+220 221 221 244 246 246 244 246 246 244 246 246 137 136 137 37 38 37
64171+3 2 2 0 0 0 1 1 1 5 5 5 5 5 5 4 4 4
64172+4 4 4 4 4 4 4 4 4 5 5 5 4 4 4 1 1 1
64173+0 0 0 5 5 5 43 57 68 153 152 153 193 200 203 220 221 221
64174+177 184 187 174 174 174 167 166 167 166 165 166 158 157 158 157 156 157
64175+158 157 158 166 165 166 156 155 156 85 115 134 13 16 17 4 0 0
64176+4 0 0 4 0 0 5 5 5 5 5 5 4 4 4 4 4 4
64177+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
64178+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
64179+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
64180+4 4 4 4 4 4
64181+5 5 5 4 3 3 6 6 6 6 6 6 4 0 0 6 6 6
64182+6 6 6 6 6 6 6 6 6 6 6 6 13 16 17 60 73 81
64183+177 184 187 220 221 221 220 221 221 220 221 221 205 212 215 220 221 221
64184+220 221 221 205 212 215 220 221 221 244 246 246 244 246 246 205 212 215
64185+125 124 125 30 32 34 0 0 0 0 0 0 2 2 2 5 5 5
64186+4 4 4 4 4 4 4 4 4 1 1 1 0 0 0 1 0 0
64187+37 38 37 131 129 131 205 212 215 220 221 221 193 200 203 174 174 174
64188+174 174 174 174 174 174 167 166 167 165 164 165 166 165 166 167 166 167
64189+158 157 158 125 124 125 37 38 37 4 0 0 4 0 0 4 0 0
64190+4 3 3 5 5 5 4 4 4 4 4 4 4 4 4 4 4 4
64191+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
64192+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
64193+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
64194+4 4 4 4 4 4
64195+4 4 4 5 5 5 4 3 3 4 3 3 6 6 6 6 6 6
64196+4 0 0 6 6 6 6 6 6 6 6 6 6 6 6 6 6 6
64197+26 28 28 125 124 125 205 212 215 220 221 221 220 221 221 220 221 221
64198+205 212 215 220 221 221 205 212 215 220 221 221 220 221 221 244 246 246
64199+244 246 246 190 197 201 60 74 84 16 19 21 4 0 0 0 0 0
64200+0 0 0 0 0 0 0 0 0 0 0 0 16 19 21 120 125 127
64201+177 184 187 220 221 221 205 212 215 177 184 187 174 174 174 177 184 187
64202+174 174 174 174 174 174 167 166 167 174 174 174 166 165 166 137 136 137
64203+60 73 81 13 16 17 4 0 0 4 0 0 4 3 3 6 6 6
64204+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
64205+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
64206+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
64207+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
64208+4 4 4 4 4 4
64209+5 5 5 4 3 3 5 5 5 4 3 3 6 6 6 4 0 0
64210+6 6 6 6 6 6 4 0 0 6 6 6 4 0 0 6 6 6
64211+6 6 6 6 6 6 37 38 37 137 136 137 193 200 203 220 221 221
64212+220 221 221 205 212 215 220 221 221 205 212 215 205 212 215 220 221 221
64213+220 221 221 220 221 221 244 246 246 166 165 166 43 57 68 2 2 2
64214+0 0 0 4 0 0 16 19 21 60 73 81 157 156 157 202 210 214
64215+220 221 221 193 200 203 177 184 187 177 184 187 177 184 187 174 174 174
64216+174 174 174 174 174 174 174 174 174 157 156 157 60 74 84 24 26 27
64217+4 0 0 4 0 0 4 0 0 6 6 6 4 4 4 4 4 4
64218+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
64219+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
64220+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
64221+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
64222+4 4 4 4 4 4
64223+4 4 4 4 4 4 5 5 5 4 3 3 5 5 5 6 6 6
64224+6 6 6 4 0 0 6 6 6 6 6 6 6 6 6 4 0 0
64225+4 0 0 4 0 0 6 6 6 24 26 27 60 73 81 167 166 167
64226+220 221 221 220 221 221 220 221 221 205 212 215 205 212 215 205 212 215
64227+205 212 215 220 221 221 220 221 221 220 221 221 205 212 215 137 136 137
64228+60 74 84 125 124 125 137 136 137 190 197 201 220 221 221 193 200 203
64229+177 184 187 177 184 187 177 184 187 174 174 174 174 174 174 177 184 187
64230+190 197 201 174 174 174 125 124 125 37 38 37 6 6 6 4 0 0
64231+4 0 0 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
64232+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
64233+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
64234+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
64235+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
64236+4 4 4 4 4 4
64237+4 4 4 4 4 4 5 5 5 5 5 5 4 3 3 6 6 6
64238+4 0 0 6 6 6 6 6 6 6 6 6 4 0 0 6 6 6
64239+6 6 6 6 6 6 4 0 0 4 0 0 6 6 6 6 6 6
64240+125 124 125 193 200 203 244 246 246 220 221 221 205 212 215 205 212 215
64241+205 212 215 193 200 203 205 212 215 205 212 215 220 221 221 220 221 221
64242+193 200 203 193 200 203 205 212 215 193 200 203 193 200 203 177 184 187
64243+190 197 201 190 197 201 174 174 174 190 197 201 193 200 203 190 197 201
64244+153 152 153 60 73 81 4 0 0 4 0 0 4 0 0 3 2 2
64245+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
64246+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
64247+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
64248+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
64249+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
64250+4 4 4 4 4 4
64251+4 4 4 4 4 4 4 4 4 4 4 4 5 5 5 4 3 3
64252+6 6 6 4 3 3 4 3 3 4 3 3 6 6 6 6 6 6
64253+4 0 0 6 6 6 6 6 6 6 6 6 4 0 0 4 0 0
64254+4 0 0 26 28 28 131 129 131 220 221 221 244 246 246 220 221 221
64255+205 212 215 193 200 203 205 212 215 193 200 203 193 200 203 205 212 215
64256+220 221 221 193 200 203 193 200 203 193 200 203 190 197 201 174 174 174
64257+174 174 174 190 197 201 193 200 203 193 200 203 167 166 167 125 124 125
64258+6 6 6 4 0 0 4 0 0 4 3 3 4 4 4 4 4 4
64259+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
64260+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
64261+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
64262+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
64263+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
64264+4 4 4 4 4 4
64265+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 5 5 5
64266+5 5 5 4 3 3 5 5 5 6 6 6 4 3 3 5 5 5
64267+6 6 6 6 6 6 4 0 0 6 6 6 6 6 6 6 6 6
64268+4 0 0 4 0 0 6 6 6 41 54 63 158 157 158 220 221 221
64269+220 221 221 220 221 221 193 200 203 193 200 203 193 200 203 190 197 201
64270+190 197 201 190 197 201 190 197 201 190 197 201 174 174 174 193 200 203
64271+193 200 203 220 221 221 174 174 174 125 124 125 37 38 37 4 0 0
64272+4 0 0 4 3 3 6 6 6 4 4 4 4 4 4 4 4 4
64273+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
64274+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
64275+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
64276+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
64277+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
64278+4 4 4 4 4 4
64279+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
64280+4 4 4 5 5 5 4 3 3 4 3 3 4 3 3 5 5 5
64281+4 3 3 6 6 6 5 5 5 4 3 3 6 6 6 6 6 6
64282+6 6 6 6 6 6 4 0 0 4 0 0 13 16 17 60 73 81
64283+174 174 174 220 221 221 220 221 221 205 212 215 190 197 201 174 174 174
64284+193 200 203 174 174 174 190 197 201 174 174 174 193 200 203 220 221 221
64285+193 200 203 131 129 131 37 38 37 6 6 6 4 0 0 4 0 0
64286+6 6 6 6 6 6 4 3 3 5 5 5 4 4 4 4 4 4
64287+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
64288+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
64289+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
64290+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
64291+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
64292+4 4 4 4 4 4
64293+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
64294+4 4 4 4 4 4 4 4 4 5 5 5 5 5 5 5 5 5
64295+5 5 5 4 3 3 4 3 3 5 5 5 4 3 3 4 3 3
64296+5 5 5 6 6 6 6 6 6 4 0 0 6 6 6 6 6 6
64297+6 6 6 125 124 125 174 174 174 220 221 221 220 221 221 193 200 203
64298+193 200 203 193 200 203 193 200 203 193 200 203 220 221 221 158 157 158
64299+60 73 81 6 6 6 4 0 0 4 0 0 5 5 5 6 6 6
64300+5 5 5 5 5 5 4 4 4 4 4 4 4 4 4 4 4 4
64301+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
64302+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
64303+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
64304+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
64305+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
64306+4 4 4 4 4 4
64307+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
64308+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
64309+4 4 4 5 5 5 5 5 5 4 3 3 5 5 5 4 3 3
64310+5 5 5 5 5 5 6 6 6 6 6 6 4 0 0 4 0 0
64311+4 0 0 4 0 0 26 28 28 125 124 125 174 174 174 193 200 203
64312+193 200 203 174 174 174 193 200 203 167 166 167 125 124 125 6 6 6
64313+6 6 6 6 6 6 4 0 0 6 6 6 6 6 6 5 5 5
64314+4 3 3 5 5 5 4 4 4 4 4 4 4 4 4 4 4 4
64315+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
64316+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
64317+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
64318+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
64319+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
64320+4 4 4 4 4 4
64321+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
64322+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
64323+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 5 5 5
64324+4 3 3 6 6 6 4 0 0 6 6 6 6 6 6 6 6 6
64325+6 6 6 4 0 0 4 0 0 6 6 6 37 38 37 125 124 125
64326+153 152 153 131 129 131 125 124 125 37 38 37 6 6 6 6 6 6
64327+6 6 6 4 0 0 6 6 6 6 6 6 4 3 3 5 5 5
64328+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
64329+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
64330+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
64331+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
64332+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
64333+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
64334+4 4 4 4 4 4
64335+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
64336+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
64337+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
64338+4 4 4 5 5 5 5 5 5 4 3 3 5 5 5 4 3 3
64339+6 6 6 6 6 6 4 0 0 4 0 0 6 6 6 6 6 6
64340+24 26 27 24 26 27 6 6 6 6 6 6 6 6 6 4 0 0
64341+6 6 6 6 6 6 4 0 0 6 6 6 5 5 5 4 3 3
64342+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
64343+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
64344+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
64345+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
64346+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
64347+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
64348+4 4 4 4 4 4
64349+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
64350+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
64351+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
64352+4 4 4 4 4 4 5 5 5 4 3 3 5 5 5 6 6 6
64353+4 0 0 6 6 6 6 6 6 6 6 6 6 6 6 6 6 6
64354+6 6 6 6 6 6 6 6 6 4 0 0 6 6 6 6 6 6
64355+4 0 0 6 6 6 6 6 6 4 3 3 5 5 5 4 4 4
64356+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
64357+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
64358+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
64359+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
64360+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
64361+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
64362+4 4 4 4 4 4
64363+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
64364+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
64365+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
64366+4 4 4 4 4 4 4 4 4 5 5 5 4 3 3 5 5 5
64367+5 5 5 5 5 5 4 0 0 6 6 6 4 0 0 6 6 6
64368+6 6 6 6 6 6 6 6 6 4 0 0 6 6 6 4 0 0
64369+6 6 6 4 3 3 5 5 5 4 3 3 5 5 5 4 4 4
64370+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
64371+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
64372+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
64373+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
64374+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
64375+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
64376+4 4 4 4 4 4
64377+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
64378+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
64379+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
64380+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 5 5 5
64381+4 3 3 6 6 6 4 3 3 6 6 6 6 6 6 6 6 6
64382+4 0 0 6 6 6 4 0 0 6 6 6 6 6 6 6 6 6
64383+6 6 6 4 3 3 5 5 5 4 4 4 4 4 4 4 4 4
64384+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
64385+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
64386+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
64387+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
64388+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
64389+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
64390+4 4 4 4 4 4
64391+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
64392+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
64393+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
64394+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
64395+4 4 4 5 5 5 4 3 3 5 5 5 4 0 0 6 6 6
64396+6 6 6 4 0 0 6 6 6 6 6 6 4 0 0 6 6 6
64397+4 3 3 5 5 5 5 5 5 4 4 4 4 4 4 4 4 4
64398+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
64399+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
64400+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
64401+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
64402+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
64403+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
64404+4 4 4 4 4 4
64405+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
64406+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
64407+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
64408+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
64409+4 4 4 5 5 5 4 3 3 5 5 5 6 6 6 4 3 3
64410+4 3 3 6 6 6 6 6 6 4 3 3 6 6 6 4 3 3
64411+5 5 5 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
64412+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
64413+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
64414+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
64415+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
64416+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
64417+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
64418+4 4 4 4 4 4
64419+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
64420+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
64421+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
64422+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
64423+4 4 4 4 4 4 4 4 4 5 5 5 4 3 3 6 6 6
64424+5 5 5 4 3 3 4 3 3 4 3 3 5 5 5 5 5 5
64425+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
64426+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
64427+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
64428+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
64429+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
64430+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
64431+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
64432+4 4 4 4 4 4
64433+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
64434+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
64435+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
64436+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
64437+4 4 4 4 4 4 4 4 4 4 4 4 5 5 5 4 3 3
64438+5 5 5 4 3 3 5 5 5 5 5 5 4 4 4 4 4 4
64439+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
64440+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
64441+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
64442+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
64443+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
64444+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
64445+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
64446+4 4 4 4 4 4
64447diff --git a/drivers/video/nvidia/nv_backlight.c b/drivers/video/nvidia/nv_backlight.c
64448index 443e3c8..c443d6a 100644
64449--- a/drivers/video/nvidia/nv_backlight.c
64450+++ b/drivers/video/nvidia/nv_backlight.c
64451@@ -87,7 +87,7 @@ static int nvidia_bl_get_brightness(struct backlight_device *bd)
64452 return bd->props.brightness;
64453 }
64454
64455-static struct backlight_ops nvidia_bl_ops = {
64456+static const struct backlight_ops nvidia_bl_ops = {
64457 .get_brightness = nvidia_bl_get_brightness,
64458 .update_status = nvidia_bl_update_status,
64459 };
64460diff --git a/drivers/video/riva/fbdev.c b/drivers/video/riva/fbdev.c
64461index d94c57f..912984c 100644
64462--- a/drivers/video/riva/fbdev.c
64463+++ b/drivers/video/riva/fbdev.c
64464@@ -331,7 +331,7 @@ static int riva_bl_get_brightness(struct backlight_device *bd)
64465 return bd->props.brightness;
64466 }
64467
64468-static struct backlight_ops riva_bl_ops = {
64469+static const struct backlight_ops riva_bl_ops = {
64470 .get_brightness = riva_bl_get_brightness,
64471 .update_status = riva_bl_update_status,
64472 };
64473diff --git a/drivers/video/uvesafb.c b/drivers/video/uvesafb.c
64474index 54fbb29..2c108fc 100644
64475--- a/drivers/video/uvesafb.c
64476+++ b/drivers/video/uvesafb.c
64477@@ -18,6 +18,7 @@
64478 #include <linux/fb.h>
64479 #include <linux/io.h>
64480 #include <linux/mutex.h>
64481+#include <linux/moduleloader.h>
64482 #include <video/edid.h>
64483 #include <video/uvesafb.h>
64484 #ifdef CONFIG_X86
64485@@ -120,7 +121,7 @@ static int uvesafb_helper_start(void)
64486 NULL,
64487 };
64488
64489- return call_usermodehelper(v86d_path, argv, envp, 1);
64490+ return call_usermodehelper(v86d_path, argv, envp, UMH_WAIT_PROC);
64491 }
64492
64493 /*
64494@@ -568,10 +569,32 @@ static int __devinit uvesafb_vbe_getpmi(struct uvesafb_ktask *task,
64495 if ((task->t.regs.eax & 0xffff) != 0x4f || task->t.regs.es < 0xc000) {
64496 par->pmi_setpal = par->ypan = 0;
64497 } else {
64498+
64499+#ifdef CONFIG_PAX_KERNEXEC
64500+#ifdef CONFIG_MODULES
64501+ par->pmi_code = module_alloc_exec((u16)task->t.regs.ecx);
64502+#endif
64503+ if (!par->pmi_code) {
64504+ par->pmi_setpal = par->ypan = 0;
64505+ return 0;
64506+ }
64507+#endif
64508+
64509 par->pmi_base = (u16 *)phys_to_virt(((u32)task->t.regs.es << 4)
64510 + task->t.regs.edi);
64511+
64512+#if defined(CONFIG_MODULES) && defined(CONFIG_PAX_KERNEXEC)
64513+ pax_open_kernel();
64514+ memcpy(par->pmi_code, par->pmi_base, (u16)task->t.regs.ecx);
64515+ pax_close_kernel();
64516+
64517+ par->pmi_start = ktva_ktla(par->pmi_code + par->pmi_base[1]);
64518+ par->pmi_pal = ktva_ktla(par->pmi_code + par->pmi_base[2]);
64519+#else
64520 par->pmi_start = (u8 *)par->pmi_base + par->pmi_base[1];
64521 par->pmi_pal = (u8 *)par->pmi_base + par->pmi_base[2];
64522+#endif
64523+
64524 printk(KERN_INFO "uvesafb: protected mode interface info at "
64525 "%04x:%04x\n",
64526 (u16)task->t.regs.es, (u16)task->t.regs.edi);
64527@@ -1799,6 +1822,11 @@ out:
64528 if (par->vbe_modes)
64529 kfree(par->vbe_modes);
64530
64531+#if defined(CONFIG_MODULES) && defined(CONFIG_PAX_KERNEXEC)
64532+ if (par->pmi_code)
64533+ module_free_exec(NULL, par->pmi_code);
64534+#endif
64535+
64536 framebuffer_release(info);
64537 return err;
64538 }
64539@@ -1825,6 +1853,12 @@ static int uvesafb_remove(struct platform_device *dev)
64540 kfree(par->vbe_state_orig);
64541 if (par->vbe_state_saved)
64542 kfree(par->vbe_state_saved);
64543+
64544+#if defined(CONFIG_MODULES) && defined(CONFIG_PAX_KERNEXEC)
64545+ if (par->pmi_code)
64546+ module_free_exec(NULL, par->pmi_code);
64547+#endif
64548+
64549 }
64550
64551 framebuffer_release(info);
64552diff --git a/drivers/video/vesafb.c b/drivers/video/vesafb.c
64553index bd37ee1..cb827e8 100644
64554--- a/drivers/video/vesafb.c
64555+++ b/drivers/video/vesafb.c
64556@@ -9,6 +9,7 @@
64557 */
64558
64559 #include <linux/module.h>
64560+#include <linux/moduleloader.h>
64561 #include <linux/kernel.h>
64562 #include <linux/errno.h>
64563 #include <linux/string.h>
64564@@ -53,8 +54,8 @@ static int vram_remap __initdata; /* Set amount of memory to be used */
64565 static int vram_total __initdata; /* Set total amount of memory */
64566 static int pmi_setpal __read_mostly = 1; /* pmi for palette changes ??? */
64567 static int ypan __read_mostly; /* 0..nothing, 1..ypan, 2..ywrap */
64568-static void (*pmi_start)(void) __read_mostly;
64569-static void (*pmi_pal) (void) __read_mostly;
64570+static void (*pmi_start)(void) __read_only;
64571+static void (*pmi_pal) (void) __read_only;
64572 static int depth __read_mostly;
64573 static int vga_compat __read_mostly;
64574 /* --------------------------------------------------------------------- */
64575@@ -233,6 +234,7 @@ static int __init vesafb_probe(struct platform_device *dev)
64576 unsigned int size_vmode;
64577 unsigned int size_remap;
64578 unsigned int size_total;
64579+ void *pmi_code = NULL;
64580
64581 if (screen_info.orig_video_isVGA != VIDEO_TYPE_VLFB)
64582 return -ENODEV;
64583@@ -275,10 +277,6 @@ static int __init vesafb_probe(struct platform_device *dev)
64584 size_remap = size_total;
64585 vesafb_fix.smem_len = size_remap;
64586
64587-#ifndef __i386__
64588- screen_info.vesapm_seg = 0;
64589-#endif
64590-
64591 if (!request_mem_region(vesafb_fix.smem_start, size_total, "vesafb")) {
64592 printk(KERN_WARNING
64593 "vesafb: cannot reserve video memory at 0x%lx\n",
64594@@ -315,9 +313,21 @@ static int __init vesafb_probe(struct platform_device *dev)
64595 printk(KERN_INFO "vesafb: mode is %dx%dx%d, linelength=%d, pages=%d\n",
64596 vesafb_defined.xres, vesafb_defined.yres, vesafb_defined.bits_per_pixel, vesafb_fix.line_length, screen_info.pages);
64597
64598+#ifdef __i386__
64599+
64600+#if defined(CONFIG_MODULES) && defined(CONFIG_PAX_KERNEXEC)
64601+ pmi_code = module_alloc_exec(screen_info.vesapm_size);
64602+ if (!pmi_code)
64603+#elif !defined(CONFIG_PAX_KERNEXEC)
64604+ if (0)
64605+#endif
64606+
64607+#endif
64608+ screen_info.vesapm_seg = 0;
64609+
64610 if (screen_info.vesapm_seg) {
64611- printk(KERN_INFO "vesafb: protected mode interface info at %04x:%04x\n",
64612- screen_info.vesapm_seg,screen_info.vesapm_off);
64613+ printk(KERN_INFO "vesafb: protected mode interface info at %04x:%04x %04x bytes\n",
64614+ screen_info.vesapm_seg,screen_info.vesapm_off,screen_info.vesapm_size);
64615 }
64616
64617 if (screen_info.vesapm_seg < 0xc000)
64618@@ -325,9 +335,25 @@ static int __init vesafb_probe(struct platform_device *dev)
64619
64620 if (ypan || pmi_setpal) {
64621 unsigned short *pmi_base;
64622+
64623 pmi_base = (unsigned short*)phys_to_virt(((unsigned long)screen_info.vesapm_seg << 4) + screen_info.vesapm_off);
64624- pmi_start = (void*)((char*)pmi_base + pmi_base[1]);
64625- pmi_pal = (void*)((char*)pmi_base + pmi_base[2]);
64626+
64627+#if defined(CONFIG_MODULES) && defined(CONFIG_PAX_KERNEXEC)
64628+ pax_open_kernel();
64629+ memcpy(pmi_code, pmi_base, screen_info.vesapm_size);
64630+#else
64631+ pmi_code = pmi_base;
64632+#endif
64633+
64634+ pmi_start = (void*)((char*)pmi_code + pmi_base[1]);
64635+ pmi_pal = (void*)((char*)pmi_code + pmi_base[2]);
64636+
64637+#if defined(CONFIG_MODULES) && defined(CONFIG_PAX_KERNEXEC)
64638+ pmi_start = ktva_ktla(pmi_start);
64639+ pmi_pal = ktva_ktla(pmi_pal);
64640+ pax_close_kernel();
64641+#endif
64642+
64643 printk(KERN_INFO "vesafb: pmi: set display start = %p, set palette = %p\n",pmi_start,pmi_pal);
64644 if (pmi_base[3]) {
64645 printk(KERN_INFO "vesafb: pmi: ports = ");
64646@@ -469,6 +495,11 @@ static int __init vesafb_probe(struct platform_device *dev)
64647 info->node, info->fix.id);
64648 return 0;
64649 err:
64650+
64651+#if defined(__i386__) && defined(CONFIG_MODULES) && defined(CONFIG_PAX_KERNEXEC)
64652+ module_free_exec(NULL, pmi_code);
64653+#endif
64654+
64655 if (info->screen_base)
64656 iounmap(info->screen_base);
64657 framebuffer_release(info);
64658diff --git a/drivers/xen/sys-hypervisor.c b/drivers/xen/sys-hypervisor.c
64659index 88a60e0..6783cc2 100644
64660--- a/drivers/xen/sys-hypervisor.c
64661+++ b/drivers/xen/sys-hypervisor.c
64662@@ -425,7 +425,7 @@ static ssize_t hyp_sysfs_store(struct kobject *kobj,
64663 return 0;
64664 }
64665
64666-static struct sysfs_ops hyp_sysfs_ops = {
64667+static const struct sysfs_ops hyp_sysfs_ops = {
64668 .show = hyp_sysfs_show,
64669 .store = hyp_sysfs_store,
64670 };
64671diff --git a/fs/9p/vfs_inode.c b/fs/9p/vfs_inode.c
64672index 18f74ec..3227009 100644
64673--- a/fs/9p/vfs_inode.c
64674+++ b/fs/9p/vfs_inode.c
64675@@ -1079,7 +1079,7 @@ static void *v9fs_vfs_follow_link(struct dentry *dentry, struct nameidata *nd)
64676 static void
64677 v9fs_vfs_put_link(struct dentry *dentry, struct nameidata *nd, void *p)
64678 {
64679- char *s = nd_get_link(nd);
64680+ const char *s = nd_get_link(nd);
64681
64682 P9_DPRINTK(P9_DEBUG_VFS, " %s %s\n", dentry->d_name.name,
64683 IS_ERR(s) ? "<error>" : s);
64684diff --git a/fs/Kconfig.binfmt b/fs/Kconfig.binfmt
64685index bb4cc5b..df5eaa0 100644
64686--- a/fs/Kconfig.binfmt
64687+++ b/fs/Kconfig.binfmt
64688@@ -86,7 +86,7 @@ config HAVE_AOUT
64689
64690 config BINFMT_AOUT
64691 tristate "Kernel support for a.out and ECOFF binaries"
64692- depends on HAVE_AOUT
64693+ depends on HAVE_AOUT && BROKEN
64694 ---help---
64695 A.out (Assembler.OUTput) is a set of formats for libraries and
64696 executables used in the earliest versions of UNIX. Linux used
64697diff --git a/fs/aio.c b/fs/aio.c
64698index 22a19ad..d484e5b 100644
64699--- a/fs/aio.c
64700+++ b/fs/aio.c
64701@@ -115,7 +115,7 @@ static int aio_setup_ring(struct kioctx *ctx)
64702 size += sizeof(struct io_event) * nr_events;
64703 nr_pages = (size + PAGE_SIZE-1) >> PAGE_SHIFT;
64704
64705- if (nr_pages < 0)
64706+ if (nr_pages <= 0)
64707 return -EINVAL;
64708
64709 nr_events = (PAGE_SIZE * nr_pages - sizeof(struct aio_ring)) / sizeof(struct io_event);
64710@@ -1089,6 +1089,8 @@ static int read_events(struct kioctx *ctx,
64711 struct aio_timeout to;
64712 int retry = 0;
64713
64714+ pax_track_stack();
64715+
64716 /* needed to zero any padding within an entry (there shouldn't be
64717 * any, but C is fun!
64718 */
64719@@ -1382,13 +1384,18 @@ static ssize_t aio_fsync(struct kiocb *iocb)
64720 static ssize_t aio_setup_vectored_rw(int type, struct kiocb *kiocb)
64721 {
64722 ssize_t ret;
64723+ struct iovec iovstack;
64724
64725 ret = rw_copy_check_uvector(type, (struct iovec __user *)kiocb->ki_buf,
64726 kiocb->ki_nbytes, 1,
64727- &kiocb->ki_inline_vec, &kiocb->ki_iovec);
64728+ &iovstack, &kiocb->ki_iovec);
64729 if (ret < 0)
64730 goto out;
64731
64732+ if (kiocb->ki_iovec == &iovstack) {
64733+ kiocb->ki_inline_vec = iovstack;
64734+ kiocb->ki_iovec = &kiocb->ki_inline_vec;
64735+ }
64736 kiocb->ki_nr_segs = kiocb->ki_nbytes;
64737 kiocb->ki_cur_seg = 0;
64738 /* ki_nbytes/left now reflect bytes instead of segs */
64739diff --git a/fs/attr.c b/fs/attr.c
64740index 96d394b..33cf5b4 100644
64741--- a/fs/attr.c
64742+++ b/fs/attr.c
64743@@ -83,6 +83,7 @@ int inode_newsize_ok(const struct inode *inode, loff_t offset)
64744 unsigned long limit;
64745
64746 limit = current->signal->rlim[RLIMIT_FSIZE].rlim_cur;
64747+ gr_learn_resource(current, RLIMIT_FSIZE, (unsigned long)offset, 1);
64748 if (limit != RLIM_INFINITY && offset > limit)
64749 goto out_sig;
64750 if (offset > inode->i_sb->s_maxbytes)
64751diff --git a/fs/autofs4/symlink.c b/fs/autofs4/symlink.c
64752index b4ea829..e63ef18 100644
64753--- a/fs/autofs4/symlink.c
64754+++ b/fs/autofs4/symlink.c
64755@@ -15,7 +15,7 @@
64756 static void *autofs4_follow_link(struct dentry *dentry, struct nameidata *nd)
64757 {
64758 struct autofs_info *ino = autofs4_dentry_ino(dentry);
64759- nd_set_link(nd, (char *)ino->u.symlink);
64760+ nd_set_link(nd, ino->u.symlink);
64761 return NULL;
64762 }
64763
64764diff --git a/fs/autofs4/waitq.c b/fs/autofs4/waitq.c
64765index 136a0d6..a287331 100644
64766--- a/fs/autofs4/waitq.c
64767+++ b/fs/autofs4/waitq.c
64768@@ -60,7 +60,7 @@ static int autofs4_write(struct file *file, const void *addr, int bytes)
64769 {
64770 unsigned long sigpipe, flags;
64771 mm_segment_t fs;
64772- const char *data = (const char *)addr;
64773+ const char __user *data = (const char __force_user *)addr;
64774 ssize_t wr = 0;
64775
64776 /** WARNING: this is not safe for writing more than PIPE_BUF bytes! **/
64777diff --git a/fs/befs/linuxvfs.c b/fs/befs/linuxvfs.c
64778index 9158c07..3f06659 100644
64779--- a/fs/befs/linuxvfs.c
64780+++ b/fs/befs/linuxvfs.c
64781@@ -498,7 +498,7 @@ static void befs_put_link(struct dentry *dentry, struct nameidata *nd, void *p)
64782 {
64783 befs_inode_info *befs_ino = BEFS_I(dentry->d_inode);
64784 if (befs_ino->i_flags & BEFS_LONG_SYMLINK) {
64785- char *link = nd_get_link(nd);
64786+ const char *link = nd_get_link(nd);
64787 if (!IS_ERR(link))
64788 kfree(link);
64789 }
64790diff --git a/fs/binfmt_aout.c b/fs/binfmt_aout.c
64791index 0133b5a..3710d09 100644
64792--- a/fs/binfmt_aout.c
64793+++ b/fs/binfmt_aout.c
64794@@ -16,6 +16,7 @@
64795 #include <linux/string.h>
64796 #include <linux/fs.h>
64797 #include <linux/file.h>
64798+#include <linux/security.h>
64799 #include <linux/stat.h>
64800 #include <linux/fcntl.h>
64801 #include <linux/ptrace.h>
64802@@ -102,6 +103,8 @@ static int aout_core_dump(long signr, struct pt_regs *regs, struct file *file, u
64803 #endif
64804 # define START_STACK(u) (u.start_stack)
64805
64806+ memset(&dump, 0, sizeof(dump));
64807+
64808 fs = get_fs();
64809 set_fs(KERNEL_DS);
64810 has_dumped = 1;
64811@@ -113,10 +116,12 @@ static int aout_core_dump(long signr, struct pt_regs *regs, struct file *file, u
64812
64813 /* If the size of the dump file exceeds the rlimit, then see what would happen
64814 if we wrote the stack, but not the data area. */
64815+ gr_learn_resource(current, RLIMIT_CORE, (dump.u_dsize + dump.u_ssize+1) * PAGE_SIZE, 1);
64816 if ((dump.u_dsize + dump.u_ssize+1) * PAGE_SIZE > limit)
64817 dump.u_dsize = 0;
64818
64819 /* Make sure we have enough room to write the stack and data areas. */
64820+ gr_learn_resource(current, RLIMIT_CORE, (dump.u_ssize + 1) * PAGE_SIZE, 1);
64821 if ((dump.u_ssize + 1) * PAGE_SIZE > limit)
64822 dump.u_ssize = 0;
64823
64824@@ -146,9 +151,7 @@ static int aout_core_dump(long signr, struct pt_regs *regs, struct file *file, u
64825 dump_size = dump.u_ssize << PAGE_SHIFT;
64826 DUMP_WRITE(dump_start,dump_size);
64827 }
64828-/* Finally dump the task struct. Not be used by gdb, but could be useful */
64829- set_fs(KERNEL_DS);
64830- DUMP_WRITE(current,sizeof(*current));
64831+/* Finally, let's not dump the task struct. Not be used by gdb, but could be useful to an attacker */
64832 end_coredump:
64833 set_fs(fs);
64834 return has_dumped;
64835@@ -249,6 +252,8 @@ static int load_aout_binary(struct linux_binprm * bprm, struct pt_regs * regs)
64836 rlim = current->signal->rlim[RLIMIT_DATA].rlim_cur;
64837 if (rlim >= RLIM_INFINITY)
64838 rlim = ~0;
64839+
64840+ gr_learn_resource(current, RLIMIT_DATA, ex.a_data + ex.a_bss, 1);
64841 if (ex.a_data + ex.a_bss > rlim)
64842 return -ENOMEM;
64843
64844@@ -274,9 +279,37 @@ static int load_aout_binary(struct linux_binprm * bprm, struct pt_regs * regs)
64845 current->mm->free_area_cache = current->mm->mmap_base;
64846 current->mm->cached_hole_size = 0;
64847
64848+ retval = setup_arg_pages(bprm, STACK_TOP, EXSTACK_DEFAULT);
64849+ if (retval < 0) {
64850+ /* Someone check-me: is this error path enough? */
64851+ send_sig(SIGKILL, current, 0);
64852+ return retval;
64853+ }
64854+
64855 install_exec_creds(bprm);
64856 current->flags &= ~PF_FORKNOEXEC;
64857
64858+#if defined(CONFIG_PAX_NOEXEC) || defined(CONFIG_PAX_ASLR)
64859+ current->mm->pax_flags = 0UL;
64860+#endif
64861+
64862+#ifdef CONFIG_PAX_PAGEEXEC
64863+ if (!(N_FLAGS(ex) & F_PAX_PAGEEXEC)) {
64864+ current->mm->pax_flags |= MF_PAX_PAGEEXEC;
64865+
64866+#ifdef CONFIG_PAX_EMUTRAMP
64867+ if (N_FLAGS(ex) & F_PAX_EMUTRAMP)
64868+ current->mm->pax_flags |= MF_PAX_EMUTRAMP;
64869+#endif
64870+
64871+#ifdef CONFIG_PAX_MPROTECT
64872+ if (!(N_FLAGS(ex) & F_PAX_MPROTECT))
64873+ current->mm->pax_flags |= MF_PAX_MPROTECT;
64874+#endif
64875+
64876+ }
64877+#endif
64878+
64879 if (N_MAGIC(ex) == OMAGIC) {
64880 unsigned long text_addr, map_size;
64881 loff_t pos;
64882@@ -349,7 +382,7 @@ static int load_aout_binary(struct linux_binprm * bprm, struct pt_regs * regs)
64883
64884 down_write(&current->mm->mmap_sem);
64885 error = do_mmap(bprm->file, N_DATADDR(ex), ex.a_data,
64886- PROT_READ | PROT_WRITE | PROT_EXEC,
64887+ PROT_READ | PROT_WRITE,
64888 MAP_FIXED | MAP_PRIVATE | MAP_DENYWRITE | MAP_EXECUTABLE,
64889 fd_offset + ex.a_text);
64890 up_write(&current->mm->mmap_sem);
64891@@ -367,13 +400,6 @@ beyond_if:
64892 return retval;
64893 }
64894
64895- retval = setup_arg_pages(bprm, STACK_TOP, EXSTACK_DEFAULT);
64896- if (retval < 0) {
64897- /* Someone check-me: is this error path enough? */
64898- send_sig(SIGKILL, current, 0);
64899- return retval;
64900- }
64901-
64902 current->mm->start_stack =
64903 (unsigned long) create_aout_tables((char __user *) bprm->p, bprm);
64904 #ifdef __alpha__
64905diff --git a/fs/binfmt_elf.c b/fs/binfmt_elf.c
64906index a64fde6..b6699eb 100644
64907--- a/fs/binfmt_elf.c
64908+++ b/fs/binfmt_elf.c
64909@@ -31,6 +31,7 @@
64910 #include <linux/random.h>
64911 #include <linux/elf.h>
64912 #include <linux/utsname.h>
64913+#include <linux/xattr.h>
64914 #include <asm/uaccess.h>
64915 #include <asm/param.h>
64916 #include <asm/page.h>
64917@@ -50,6 +51,10 @@ static int elf_core_dump(long signr, struct pt_regs *regs, struct file *file, un
64918 #define elf_core_dump NULL
64919 #endif
64920
64921+#ifdef CONFIG_PAX_MPROTECT
64922+static void elf_handle_mprotect(struct vm_area_struct *vma, unsigned long newflags);
64923+#endif
64924+
64925 #if ELF_EXEC_PAGESIZE > PAGE_SIZE
64926 #define ELF_MIN_ALIGN ELF_EXEC_PAGESIZE
64927 #else
64928@@ -69,6 +74,11 @@ static struct linux_binfmt elf_format = {
64929 .load_binary = load_elf_binary,
64930 .load_shlib = load_elf_library,
64931 .core_dump = elf_core_dump,
64932+
64933+#ifdef CONFIG_PAX_MPROTECT
64934+ .handle_mprotect= elf_handle_mprotect,
64935+#endif
64936+
64937 .min_coredump = ELF_EXEC_PAGESIZE,
64938 .hasvdso = 1
64939 };
64940@@ -77,6 +87,8 @@ static struct linux_binfmt elf_format = {
64941
64942 static int set_brk(unsigned long start, unsigned long end)
64943 {
64944+ unsigned long e = end;
64945+
64946 start = ELF_PAGEALIGN(start);
64947 end = ELF_PAGEALIGN(end);
64948 if (end > start) {
64949@@ -87,7 +99,7 @@ static int set_brk(unsigned long start, unsigned long end)
64950 if (BAD_ADDR(addr))
64951 return addr;
64952 }
64953- current->mm->start_brk = current->mm->brk = end;
64954+ current->mm->start_brk = current->mm->brk = e;
64955 return 0;
64956 }
64957
64958@@ -148,12 +160,15 @@ create_elf_tables(struct linux_binprm *bprm, struct elfhdr *exec,
64959 elf_addr_t __user *u_rand_bytes;
64960 const char *k_platform = ELF_PLATFORM;
64961 const char *k_base_platform = ELF_BASE_PLATFORM;
64962- unsigned char k_rand_bytes[16];
64963+ u32 k_rand_bytes[4];
64964 int items;
64965 elf_addr_t *elf_info;
64966 int ei_index = 0;
64967 const struct cred *cred = current_cred();
64968 struct vm_area_struct *vma;
64969+ unsigned long saved_auxv[AT_VECTOR_SIZE];
64970+
64971+ pax_track_stack();
64972
64973 /*
64974 * In some cases (e.g. Hyper-Threading), we want to avoid L1
64975@@ -195,8 +210,12 @@ create_elf_tables(struct linux_binprm *bprm, struct elfhdr *exec,
64976 * Generate 16 random bytes for userspace PRNG seeding.
64977 */
64978 get_random_bytes(k_rand_bytes, sizeof(k_rand_bytes));
64979- u_rand_bytes = (elf_addr_t __user *)
64980- STACK_ALLOC(p, sizeof(k_rand_bytes));
64981+ srandom32(k_rand_bytes[0] ^ random32());
64982+ srandom32(k_rand_bytes[1] ^ random32());
64983+ srandom32(k_rand_bytes[2] ^ random32());
64984+ srandom32(k_rand_bytes[3] ^ random32());
64985+ p = STACK_ROUND(p, sizeof(k_rand_bytes));
64986+ u_rand_bytes = (elf_addr_t __user *) p;
64987 if (__copy_to_user(u_rand_bytes, k_rand_bytes, sizeof(k_rand_bytes)))
64988 return -EFAULT;
64989
64990@@ -308,9 +327,11 @@ create_elf_tables(struct linux_binprm *bprm, struct elfhdr *exec,
64991 return -EFAULT;
64992 current->mm->env_end = p;
64993
64994+ memcpy(saved_auxv, elf_info, ei_index * sizeof(elf_addr_t));
64995+
64996 /* Put the elf_info on the stack in the right place. */
64997 sp = (elf_addr_t __user *)envp + 1;
64998- if (copy_to_user(sp, elf_info, ei_index * sizeof(elf_addr_t)))
64999+ if (copy_to_user(sp, saved_auxv, ei_index * sizeof(elf_addr_t)))
65000 return -EFAULT;
65001 return 0;
65002 }
65003@@ -385,10 +406,10 @@ static unsigned long load_elf_interp(struct elfhdr *interp_elf_ex,
65004 {
65005 struct elf_phdr *elf_phdata;
65006 struct elf_phdr *eppnt;
65007- unsigned long load_addr = 0;
65008+ unsigned long load_addr = 0, pax_task_size = TASK_SIZE;
65009 int load_addr_set = 0;
65010 unsigned long last_bss = 0, elf_bss = 0;
65011- unsigned long error = ~0UL;
65012+ unsigned long error = -EINVAL;
65013 unsigned long total_size;
65014 int retval, i, size;
65015
65016@@ -434,6 +455,11 @@ static unsigned long load_elf_interp(struct elfhdr *interp_elf_ex,
65017 goto out_close;
65018 }
65019
65020+#ifdef CONFIG_PAX_SEGMEXEC
65021+ if (current->mm->pax_flags & MF_PAX_SEGMEXEC)
65022+ pax_task_size = SEGMEXEC_TASK_SIZE;
65023+#endif
65024+
65025 eppnt = elf_phdata;
65026 for (i = 0; i < interp_elf_ex->e_phnum; i++, eppnt++) {
65027 if (eppnt->p_type == PT_LOAD) {
65028@@ -477,8 +503,8 @@ static unsigned long load_elf_interp(struct elfhdr *interp_elf_ex,
65029 k = load_addr + eppnt->p_vaddr;
65030 if (BAD_ADDR(k) ||
65031 eppnt->p_filesz > eppnt->p_memsz ||
65032- eppnt->p_memsz > TASK_SIZE ||
65033- TASK_SIZE - eppnt->p_memsz < k) {
65034+ eppnt->p_memsz > pax_task_size ||
65035+ pax_task_size - eppnt->p_memsz < k) {
65036 error = -ENOMEM;
65037 goto out_close;
65038 }
65039@@ -532,6 +558,351 @@ out:
65040 return error;
65041 }
65042
65043+#if defined(CONFIG_PAX_EI_PAX) || defined(CONFIG_PAX_PT_PAX_FLAGS) || defined(CONFIG_PAX_XATTR_PAX_FLAGS)
65044+static unsigned long pax_parse_pt_pax_softmode(const struct elf_phdr * const elf_phdata)
65045+{
65046+ unsigned long pax_flags = 0UL;
65047+
65048+#ifdef CONFIG_PAX_PT_PAX_FLAGS
65049+
65050+#ifdef CONFIG_PAX_PAGEEXEC
65051+ if (elf_phdata->p_flags & PF_PAGEEXEC)
65052+ pax_flags |= MF_PAX_PAGEEXEC;
65053+#endif
65054+
65055+#ifdef CONFIG_PAX_SEGMEXEC
65056+ if (elf_phdata->p_flags & PF_SEGMEXEC)
65057+ pax_flags |= MF_PAX_SEGMEXEC;
65058+#endif
65059+
65060+#if defined(CONFIG_PAX_PAGEEXEC) && defined(CONFIG_PAX_SEGMEXEC)
65061+ if ((pax_flags & (MF_PAX_PAGEEXEC | MF_PAX_SEGMEXEC)) == (MF_PAX_PAGEEXEC | MF_PAX_SEGMEXEC)) {
65062+ if (nx_enabled)
65063+ pax_flags &= ~MF_PAX_SEGMEXEC;
65064+ else
65065+ pax_flags &= ~MF_PAX_PAGEEXEC;
65066+ }
65067+#endif
65068+
65069+#ifdef CONFIG_PAX_EMUTRAMP
65070+ if (elf_phdata->p_flags & PF_EMUTRAMP)
65071+ pax_flags |= MF_PAX_EMUTRAMP;
65072+#endif
65073+
65074+#ifdef CONFIG_PAX_MPROTECT
65075+ if (elf_phdata->p_flags & PF_MPROTECT)
65076+ pax_flags |= MF_PAX_MPROTECT;
65077+#endif
65078+
65079+#if defined(CONFIG_PAX_RANDMMAP) || defined(CONFIG_PAX_RANDUSTACK)
65080+ if (randomize_va_space && (elf_phdata->p_flags & PF_RANDMMAP))
65081+ pax_flags |= MF_PAX_RANDMMAP;
65082+#endif
65083+
65084+#endif
65085+
65086+ return pax_flags;
65087+}
65088+
65089+static unsigned long pax_parse_pt_pax_hardmode(const struct elf_phdr * const elf_phdata)
65090+{
65091+ unsigned long pax_flags = 0UL;
65092+
65093+#ifdef CONFIG_PAX_PT_PAX_FLAGS
65094+
65095+#ifdef CONFIG_PAX_PAGEEXEC
65096+ if (!(elf_phdata->p_flags & PF_NOPAGEEXEC))
65097+ pax_flags |= MF_PAX_PAGEEXEC;
65098+#endif
65099+
65100+#ifdef CONFIG_PAX_SEGMEXEC
65101+ if (!(elf_phdata->p_flags & PF_NOSEGMEXEC))
65102+ pax_flags |= MF_PAX_SEGMEXEC;
65103+#endif
65104+
65105+#if defined(CONFIG_PAX_PAGEEXEC) && defined(CONFIG_PAX_SEGMEXEC)
65106+ if ((pax_flags & (MF_PAX_PAGEEXEC | MF_PAX_SEGMEXEC)) == (MF_PAX_PAGEEXEC | MF_PAX_SEGMEXEC)) {
65107+ if (nx_enabled)
65108+ pax_flags &= ~MF_PAX_SEGMEXEC;
65109+ else
65110+ pax_flags &= ~MF_PAX_PAGEEXEC;
65111+ }
65112+#endif
65113+
65114+#ifdef CONFIG_PAX_EMUTRAMP
65115+ if (!(elf_phdata->p_flags & PF_NOEMUTRAMP))
65116+ pax_flags |= MF_PAX_EMUTRAMP;
65117+#endif
65118+
65119+#ifdef CONFIG_PAX_MPROTECT
65120+ if (!(elf_phdata->p_flags & PF_NOMPROTECT))
65121+ pax_flags |= MF_PAX_MPROTECT;
65122+#endif
65123+
65124+#if defined(CONFIG_PAX_RANDMMAP) || defined(CONFIG_PAX_RANDUSTACK)
65125+ if (randomize_va_space && !(elf_phdata->p_flags & PF_NORANDMMAP))
65126+ pax_flags |= MF_PAX_RANDMMAP;
65127+#endif
65128+
65129+#endif
65130+
65131+ return pax_flags;
65132+}
65133+
65134+static unsigned long pax_parse_ei_pax(const struct elfhdr * const elf_ex)
65135+{
65136+ unsigned long pax_flags = 0UL;
65137+
65138+#ifdef CONFIG_PAX_EI_PAX
65139+
65140+#ifdef CONFIG_PAX_PAGEEXEC
65141+ if (!(elf_ex->e_ident[EI_PAX] & EF_PAX_PAGEEXEC))
65142+ pax_flags |= MF_PAX_PAGEEXEC;
65143+#endif
65144+
65145+#ifdef CONFIG_PAX_SEGMEXEC
65146+ if (!(elf_ex->e_ident[EI_PAX] & EF_PAX_SEGMEXEC))
65147+ pax_flags |= MF_PAX_SEGMEXEC;
65148+#endif
65149+
65150+#if defined(CONFIG_PAX_PAGEEXEC) && defined(CONFIG_PAX_SEGMEXEC)
65151+ if ((pax_flags & (MF_PAX_PAGEEXEC | MF_PAX_SEGMEXEC)) == (MF_PAX_PAGEEXEC | MF_PAX_SEGMEXEC)) {
65152+ if (nx_enabled)
65153+ pax_flags &= ~MF_PAX_SEGMEXEC;
65154+ else
65155+ pax_flags &= ~MF_PAX_PAGEEXEC;
65156+ }
65157+#endif
65158+
65159+#ifdef CONFIG_PAX_EMUTRAMP
65160+ if ((pax_flags & (MF_PAX_PAGEEXEC | MF_PAX_SEGMEXEC)) && (elf_ex->e_ident[EI_PAX] & EF_PAX_EMUTRAMP))
65161+ pax_flags |= MF_PAX_EMUTRAMP;
65162+#endif
65163+
65164+#ifdef CONFIG_PAX_MPROTECT
65165+ if ((pax_flags & (MF_PAX_PAGEEXEC | MF_PAX_SEGMEXEC)) && !(elf_ex->e_ident[EI_PAX] & EF_PAX_MPROTECT))
65166+ pax_flags |= MF_PAX_MPROTECT;
65167+#endif
65168+
65169+#ifdef CONFIG_PAX_ASLR
65170+ if (randomize_va_space && !(elf_ex->e_ident[EI_PAX] & EF_PAX_RANDMMAP))
65171+ pax_flags |= MF_PAX_RANDMMAP;
65172+#endif
65173+
65174+#else
65175+
65176+#ifdef CONFIG_PAX_PAGEEXEC
65177+ pax_flags |= MF_PAX_PAGEEXEC;
65178+#endif
65179+
65180+#ifdef CONFIG_PAX_MPROTECT
65181+ pax_flags |= MF_PAX_MPROTECT;
65182+#endif
65183+
65184+#ifdef CONFIG_PAX_RANDMMAP
65185+ pax_flags |= MF_PAX_RANDMMAP;
65186+#endif
65187+
65188+#ifdef CONFIG_PAX_SEGMEXEC
65189+ if (!(pax_flags & MF_PAX_PAGEEXEC) || !(__supported_pte_mask & _PAGE_NX)) {
65190+ pax_flags &= ~MF_PAX_PAGEEXEC;
65191+ pax_flags |= MF_PAX_SEGMEXEC;
65192+ }
65193+#endif
65194+
65195+#endif
65196+
65197+ return pax_flags;
65198+}
65199+
65200+static unsigned long pax_parse_pt_pax(const struct elfhdr * const elf_ex, const struct elf_phdr * const elf_phdata)
65201+{
65202+
65203+#ifdef CONFIG_PAX_PT_PAX_FLAGS
65204+ unsigned long i;
65205+
65206+ for (i = 0UL; i < elf_ex->e_phnum; i++)
65207+ if (elf_phdata[i].p_type == PT_PAX_FLAGS) {
65208+ if (((elf_phdata[i].p_flags & PF_PAGEEXEC) && (elf_phdata[i].p_flags & PF_NOPAGEEXEC)) ||
65209+ ((elf_phdata[i].p_flags & PF_SEGMEXEC) && (elf_phdata[i].p_flags & PF_NOSEGMEXEC)) ||
65210+ ((elf_phdata[i].p_flags & PF_EMUTRAMP) && (elf_phdata[i].p_flags & PF_NOEMUTRAMP)) ||
65211+ ((elf_phdata[i].p_flags & PF_MPROTECT) && (elf_phdata[i].p_flags & PF_NOMPROTECT)) ||
65212+ ((elf_phdata[i].p_flags & PF_RANDMMAP) && (elf_phdata[i].p_flags & PF_NORANDMMAP)))
65213+ return ~0UL;
65214+
65215+#ifdef CONFIG_PAX_SOFTMODE
65216+ if (pax_softmode)
65217+ return pax_parse_pt_pax_softmode(&elf_phdata[i]);
65218+ else
65219+#endif
65220+
65221+ return pax_parse_pt_pax_hardmode(&elf_phdata[i]);
65222+ break;
65223+ }
65224+#endif
65225+
65226+ return ~0UL;
65227+}
65228+
65229+#ifdef CONFIG_PAX_XATTR_PAX_FLAGS
65230+static unsigned long pax_parse_xattr_pax_softmode(unsigned long pax_flags_softmode)
65231+{
65232+ unsigned long pax_flags = 0UL;
65233+
65234+#ifdef CONFIG_PAX_PAGEEXEC
65235+ if (pax_flags_softmode & MF_PAX_PAGEEXEC)
65236+ pax_flags |= MF_PAX_PAGEEXEC;
65237+#endif
65238+
65239+#ifdef CONFIG_PAX_SEGMEXEC
65240+ if (pax_flags_softmode & MF_PAX_SEGMEXEC)
65241+ pax_flags |= MF_PAX_SEGMEXEC;
65242+#endif
65243+
65244+#if defined(CONFIG_PAX_PAGEEXEC) && defined(CONFIG_PAX_SEGMEXEC)
65245+ if ((pax_flags & (MF_PAX_PAGEEXEC | MF_PAX_SEGMEXEC)) == (MF_PAX_PAGEEXEC | MF_PAX_SEGMEXEC)) {
65246+ if ((__supported_pte_mask & _PAGE_NX))
65247+ pax_flags &= ~MF_PAX_SEGMEXEC;
65248+ else
65249+ pax_flags &= ~MF_PAX_PAGEEXEC;
65250+ }
65251+#endif
65252+
65253+#ifdef CONFIG_PAX_EMUTRAMP
65254+ if (pax_flags_softmode & MF_PAX_EMUTRAMP)
65255+ pax_flags |= MF_PAX_EMUTRAMP;
65256+#endif
65257+
65258+#ifdef CONFIG_PAX_MPROTECT
65259+ if (pax_flags_softmode & MF_PAX_MPROTECT)
65260+ pax_flags |= MF_PAX_MPROTECT;
65261+#endif
65262+
65263+#if defined(CONFIG_PAX_RANDMMAP) || defined(CONFIG_PAX_RANDUSTACK)
65264+ if (randomize_va_space && (pax_flags_softmode & MF_PAX_RANDMMAP))
65265+ pax_flags |= MF_PAX_RANDMMAP;
65266+#endif
65267+
65268+ return pax_flags;
65269+}
65270+
65271+static unsigned long pax_parse_xattr_pax_hardmode(unsigned long pax_flags_hardmode)
65272+{
65273+ unsigned long pax_flags = 0UL;
65274+
65275+#ifdef CONFIG_PAX_PAGEEXEC
65276+ if (!(pax_flags_hardmode & MF_PAX_PAGEEXEC))
65277+ pax_flags |= MF_PAX_PAGEEXEC;
65278+#endif
65279+
65280+#ifdef CONFIG_PAX_SEGMEXEC
65281+ if (!(pax_flags_hardmode & MF_PAX_SEGMEXEC))
65282+ pax_flags |= MF_PAX_SEGMEXEC;
65283+#endif
65284+
65285+#if defined(CONFIG_PAX_PAGEEXEC) && defined(CONFIG_PAX_SEGMEXEC)
65286+ if ((pax_flags & (MF_PAX_PAGEEXEC | MF_PAX_SEGMEXEC)) == (MF_PAX_PAGEEXEC | MF_PAX_SEGMEXEC)) {
65287+ if ((__supported_pte_mask & _PAGE_NX))
65288+ pax_flags &= ~MF_PAX_SEGMEXEC;
65289+ else
65290+ pax_flags &= ~MF_PAX_PAGEEXEC;
65291+ }
65292+#endif
65293+
65294+#ifdef CONFIG_PAX_EMUTRAMP
65295+ if (!(pax_flags_hardmode & MF_PAX_EMUTRAMP))
65296+ pax_flags |= MF_PAX_EMUTRAMP;
65297+#endif
65298+
65299+#ifdef CONFIG_PAX_MPROTECT
65300+ if (!(pax_flags_hardmode & MF_PAX_MPROTECT))
65301+ pax_flags |= MF_PAX_MPROTECT;
65302+#endif
65303+
65304+#if defined(CONFIG_PAX_RANDMMAP) || defined(CONFIG_PAX_RANDUSTACK)
65305+ if (randomize_va_space && !(pax_flags_hardmode & MF_PAX_RANDMMAP))
65306+ pax_flags |= MF_PAX_RANDMMAP;
65307+#endif
65308+
65309+ return pax_flags;
65310+}
65311+#endif
65312+
65313+static unsigned long pax_parse_xattr_pax(struct file * const file)
65314+{
65315+
65316+#ifdef CONFIG_PAX_XATTR_PAX_FLAGS
65317+ ssize_t xattr_size, i;
65318+ unsigned char xattr_value[5];
65319+ unsigned long pax_flags_hardmode = 0UL, pax_flags_softmode = 0UL;
65320+
65321+ xattr_size = vfs_getxattr(file->f_path.dentry, XATTR_NAME_PAX_FLAGS, xattr_value, sizeof xattr_value);
65322+ if (xattr_size <= 0)
65323+ return ~0UL;
65324+
65325+ for (i = 0; i < xattr_size; i++)
65326+ switch (xattr_value[i]) {
65327+ default:
65328+ return ~0UL;
65329+
65330+#define parse_flag(option1, option2, flag) \
65331+ case option1: \
65332+ pax_flags_hardmode |= MF_PAX_##flag; \
65333+ break; \
65334+ case option2: \
65335+ pax_flags_softmode |= MF_PAX_##flag; \
65336+ break;
65337+
65338+ parse_flag('p', 'P', PAGEEXEC);
65339+ parse_flag('e', 'E', EMUTRAMP);
65340+ parse_flag('m', 'M', MPROTECT);
65341+ parse_flag('r', 'R', RANDMMAP);
65342+ parse_flag('s', 'S', SEGMEXEC);
65343+
65344+#undef parse_flag
65345+ }
65346+
65347+ if (pax_flags_hardmode & pax_flags_softmode)
65348+ return ~0UL;
65349+
65350+#ifdef CONFIG_PAX_SOFTMODE
65351+ if (pax_softmode)
65352+ return pax_parse_xattr_pax_softmode(pax_flags_softmode);
65353+ else
65354+#endif
65355+
65356+ return pax_parse_xattr_pax_hardmode(pax_flags_hardmode);
65357+#else
65358+ return ~0UL;
65359+#endif
65360+
65361+}
65362+
65363+static long pax_parse_pax_flags(const struct elfhdr * const elf_ex, const struct elf_phdr * const elf_phdata, struct file * const file)
65364+{
65365+ unsigned long pax_flags, pt_pax_flags, xattr_pax_flags;
65366+
65367+ pax_flags = pax_parse_ei_pax(elf_ex);
65368+ pt_pax_flags = pax_parse_pt_pax(elf_ex, elf_phdata);
65369+ xattr_pax_flags = pax_parse_xattr_pax(file);
65370+
65371+ if (pt_pax_flags == ~0UL)
65372+ pt_pax_flags = xattr_pax_flags;
65373+ else if (xattr_pax_flags == ~0UL)
65374+ xattr_pax_flags = pt_pax_flags;
65375+ if (pt_pax_flags != xattr_pax_flags)
65376+ return -EINVAL;
65377+ if (pt_pax_flags != ~0UL)
65378+ pax_flags = pt_pax_flags;
65379+
65380+ if (0 > pax_check_flags(&pax_flags))
65381+ return -EINVAL;
65382+
65383+ current->mm->pax_flags = pax_flags;
65384+ return 0;
65385+}
65386+#endif
65387+
65388 /*
65389 * These are the functions used to load ELF style executables and shared
65390 * libraries. There is no binary dependent code anywhere else.
65391@@ -548,6 +919,11 @@ static unsigned long randomize_stack_top(unsigned long stack_top)
65392 {
65393 unsigned int random_variable = 0;
65394
65395+#ifdef CONFIG_PAX_RANDUSTACK
65396+ if (randomize_va_space)
65397+ return stack_top - current->mm->delta_stack;
65398+#endif
65399+
65400 if ((current->flags & PF_RANDOMIZE) &&
65401 !(current->personality & ADDR_NO_RANDOMIZE)) {
65402 random_variable = get_random_int() & STACK_RND_MASK;
65403@@ -566,7 +942,7 @@ static int load_elf_binary(struct linux_binprm *bprm, struct pt_regs *regs)
65404 unsigned long load_addr = 0, load_bias = 0;
65405 int load_addr_set = 0;
65406 char * elf_interpreter = NULL;
65407- unsigned long error;
65408+ unsigned long error = 0;
65409 struct elf_phdr *elf_ppnt, *elf_phdata;
65410 unsigned long elf_bss, elf_brk;
65411 int retval, i;
65412@@ -576,11 +952,11 @@ static int load_elf_binary(struct linux_binprm *bprm, struct pt_regs *regs)
65413 unsigned long start_code, end_code, start_data, end_data;
65414 unsigned long reloc_func_desc = 0;
65415 int executable_stack = EXSTACK_DEFAULT;
65416- unsigned long def_flags = 0;
65417 struct {
65418 struct elfhdr elf_ex;
65419 struct elfhdr interp_elf_ex;
65420 } *loc;
65421+ unsigned long pax_task_size = TASK_SIZE;
65422
65423 loc = kmalloc(sizeof(*loc), GFP_KERNEL);
65424 if (!loc) {
65425@@ -718,11 +1094,80 @@ static int load_elf_binary(struct linux_binprm *bprm, struct pt_regs *regs)
65426
65427 /* OK, This is the point of no return */
65428 current->flags &= ~PF_FORKNOEXEC;
65429- current->mm->def_flags = def_flags;
65430+
65431+#if defined(CONFIG_PAX_NOEXEC) || defined(CONFIG_PAX_ASLR)
65432+ current->mm->pax_flags = 0UL;
65433+#endif
65434+
65435+#ifdef CONFIG_PAX_DLRESOLVE
65436+ current->mm->call_dl_resolve = 0UL;
65437+#endif
65438+
65439+#if defined(CONFIG_PPC32) && defined(CONFIG_PAX_EMUSIGRT)
65440+ current->mm->call_syscall = 0UL;
65441+#endif
65442+
65443+#ifdef CONFIG_PAX_ASLR
65444+ current->mm->delta_mmap = 0UL;
65445+ current->mm->delta_stack = 0UL;
65446+#endif
65447+
65448+ current->mm->def_flags = 0;
65449+
65450+#if defined(CONFIG_PAX_EI_PAX) || defined(CONFIG_PAX_PT_PAX_FLAGS) || defined(CONFIG_PAX_XATTR_PAX_FLAGS)
65451+ if (0 > pax_parse_pax_flags(&loc->elf_ex, elf_phdata, bprm->file)) {
65452+ send_sig(SIGKILL, current, 0);
65453+ goto out_free_dentry;
65454+ }
65455+#endif
65456+
65457+#ifdef CONFIG_PAX_HAVE_ACL_FLAGS
65458+ pax_set_initial_flags(bprm);
65459+#elif defined(CONFIG_PAX_HOOK_ACL_FLAGS)
65460+ if (pax_set_initial_flags_func)
65461+ (pax_set_initial_flags_func)(bprm);
65462+#endif
65463+
65464+#ifdef CONFIG_ARCH_TRACK_EXEC_LIMIT
65465+ if ((current->mm->pax_flags & MF_PAX_PAGEEXEC) && !nx_enabled) {
65466+ current->mm->context.user_cs_limit = PAGE_SIZE;
65467+ current->mm->def_flags |= VM_PAGEEXEC;
65468+ }
65469+#endif
65470+
65471+#ifdef CONFIG_PAX_SEGMEXEC
65472+ if (current->mm->pax_flags & MF_PAX_SEGMEXEC) {
65473+ current->mm->context.user_cs_base = SEGMEXEC_TASK_SIZE;
65474+ current->mm->context.user_cs_limit = TASK_SIZE-SEGMEXEC_TASK_SIZE;
65475+ pax_task_size = SEGMEXEC_TASK_SIZE;
65476+ }
65477+#endif
65478+
65479+#if defined(CONFIG_ARCH_TRACK_EXEC_LIMIT) || defined(CONFIG_PAX_SEGMEXEC)
65480+ if (current->mm->pax_flags & (MF_PAX_PAGEEXEC | MF_PAX_SEGMEXEC)) {
65481+ set_user_cs(current->mm->context.user_cs_base, current->mm->context.user_cs_limit, get_cpu());
65482+ put_cpu();
65483+ }
65484+#endif
65485
65486 /* Do this immediately, since STACK_TOP as used in setup_arg_pages
65487 may depend on the personality. */
65488 SET_PERSONALITY(loc->elf_ex);
65489+
65490+#ifdef CONFIG_PAX_ASLR
65491+ if (current->mm->pax_flags & MF_PAX_RANDMMAP) {
65492+ current->mm->delta_mmap = (pax_get_random_long() & ((1UL << PAX_DELTA_MMAP_LEN)-1)) << PAGE_SHIFT;
65493+ current->mm->delta_stack = (pax_get_random_long() & ((1UL << PAX_DELTA_STACK_LEN)-1)) << PAGE_SHIFT;
65494+ }
65495+#endif
65496+
65497+#if defined(CONFIG_PAX_PAGEEXEC) || defined(CONFIG_PAX_SEGMEXEC)
65498+ if (current->mm->pax_flags & (MF_PAX_PAGEEXEC | MF_PAX_SEGMEXEC)) {
65499+ executable_stack = EXSTACK_DISABLE_X;
65500+ current->personality &= ~READ_IMPLIES_EXEC;
65501+ } else
65502+#endif
65503+
65504 if (elf_read_implies_exec(loc->elf_ex, executable_stack))
65505 current->personality |= READ_IMPLIES_EXEC;
65506
65507@@ -800,10 +1245,27 @@ static int load_elf_binary(struct linux_binprm *bprm, struct pt_regs *regs)
65508 * might try to exec. This is because the brk will
65509 * follow the loader, and is not movable. */
65510 #ifdef CONFIG_X86
65511- load_bias = 0;
65512+ if (current->flags & PF_RANDOMIZE)
65513+ load_bias = 0;
65514+ else
65515+ load_bias = ELF_PAGESTART(ELF_ET_DYN_BASE - vaddr);
65516 #else
65517 load_bias = ELF_PAGESTART(ELF_ET_DYN_BASE - vaddr);
65518 #endif
65519+
65520+#ifdef CONFIG_PAX_RANDMMAP
65521+ /* PaX: randomize base address at the default exe base if requested */
65522+ if ((current->mm->pax_flags & MF_PAX_RANDMMAP) && elf_interpreter) {
65523+#ifdef CONFIG_SPARC64
65524+ load_bias = (pax_get_random_long() & ((1UL << PAX_DELTA_MMAP_LEN) - 1)) << (PAGE_SHIFT+1);
65525+#else
65526+ load_bias = (pax_get_random_long() & ((1UL << PAX_DELTA_MMAP_LEN) - 1)) << PAGE_SHIFT;
65527+#endif
65528+ load_bias = ELF_PAGESTART(PAX_ELF_ET_DYN_BASE - vaddr + load_bias);
65529+ elf_flags |= MAP_FIXED;
65530+ }
65531+#endif
65532+
65533 }
65534
65535 error = elf_map(bprm->file, load_bias + vaddr, elf_ppnt,
65536@@ -836,9 +1298,9 @@ static int load_elf_binary(struct linux_binprm *bprm, struct pt_regs *regs)
65537 * allowed task size. Note that p_filesz must always be
65538 * <= p_memsz so it is only necessary to check p_memsz.
65539 */
65540- if (BAD_ADDR(k) || elf_ppnt->p_filesz > elf_ppnt->p_memsz ||
65541- elf_ppnt->p_memsz > TASK_SIZE ||
65542- TASK_SIZE - elf_ppnt->p_memsz < k) {
65543+ if (k >= pax_task_size || elf_ppnt->p_filesz > elf_ppnt->p_memsz ||
65544+ elf_ppnt->p_memsz > pax_task_size ||
65545+ pax_task_size - elf_ppnt->p_memsz < k) {
65546 /* set_brk can never work. Avoid overflows. */
65547 send_sig(SIGKILL, current, 0);
65548 retval = -EINVAL;
65549@@ -866,6 +1328,11 @@ static int load_elf_binary(struct linux_binprm *bprm, struct pt_regs *regs)
65550 start_data += load_bias;
65551 end_data += load_bias;
65552
65553+#ifdef CONFIG_PAX_RANDMMAP
65554+ if (current->mm->pax_flags & MF_PAX_RANDMMAP)
65555+ elf_brk += PAGE_SIZE + ((pax_get_random_long() & ((1UL << 22) - 1UL)) << 4);
65556+#endif
65557+
65558 /* Calling set_brk effectively mmaps the pages that we need
65559 * for the bss and break sections. We must do this before
65560 * mapping in the interpreter, to make sure it doesn't wind
65561@@ -877,9 +1344,11 @@ static int load_elf_binary(struct linux_binprm *bprm, struct pt_regs *regs)
65562 goto out_free_dentry;
65563 }
65564 if (likely(elf_bss != elf_brk) && unlikely(padzero(elf_bss))) {
65565- send_sig(SIGSEGV, current, 0);
65566- retval = -EFAULT; /* Nobody gets to see this, but.. */
65567- goto out_free_dentry;
65568+ /*
65569+ * This bss-zeroing can fail if the ELF
65570+ * file specifies odd protections. So
65571+ * we don't check the return value
65572+ */
65573 }
65574
65575 if (elf_interpreter) {
65576@@ -1112,8 +1581,10 @@ static int dump_seek(struct file *file, loff_t off)
65577 unsigned long n = off;
65578 if (n > PAGE_SIZE)
65579 n = PAGE_SIZE;
65580- if (!dump_write(file, buf, n))
65581+ if (!dump_write(file, buf, n)) {
65582+ free_page((unsigned long)buf);
65583 return 0;
65584+ }
65585 off -= n;
65586 }
65587 free_page((unsigned long)buf);
65588@@ -1125,7 +1596,7 @@ static int dump_seek(struct file *file, loff_t off)
65589 * Decide what to dump of a segment, part, all or none.
65590 */
65591 static unsigned long vma_dump_size(struct vm_area_struct *vma,
65592- unsigned long mm_flags)
65593+ unsigned long mm_flags, long signr)
65594 {
65595 #define FILTER(type) (mm_flags & (1UL << MMF_DUMP_##type))
65596
65597@@ -1159,7 +1630,7 @@ static unsigned long vma_dump_size(struct vm_area_struct *vma,
65598 if (vma->vm_file == NULL)
65599 return 0;
65600
65601- if (FILTER(MAPPED_PRIVATE))
65602+ if (signr == SIGKILL || FILTER(MAPPED_PRIVATE))
65603 goto whole;
65604
65605 /*
65606@@ -1255,8 +1726,11 @@ static int writenote(struct memelfnote *men, struct file *file,
65607 #undef DUMP_WRITE
65608
65609 #define DUMP_WRITE(addr, nr) \
65610+ do { \
65611+ gr_learn_resource(current, RLIMIT_CORE, size + (nr), 1); \
65612 if ((size += (nr)) > limit || !dump_write(file, (addr), (nr))) \
65613- goto end_coredump;
65614+ goto end_coredump; \
65615+ } while (0);
65616
65617 static void fill_elf_header(struct elfhdr *elf, int segs,
65618 u16 machine, u32 flags, u8 osabi)
65619@@ -1385,9 +1859,9 @@ static void fill_auxv_note(struct memelfnote *note, struct mm_struct *mm)
65620 {
65621 elf_addr_t *auxv = (elf_addr_t *) mm->saved_auxv;
65622 int i = 0;
65623- do
65624+ do {
65625 i += 2;
65626- while (auxv[i - 2] != AT_NULL);
65627+ } while (auxv[i - 2] != AT_NULL);
65628 fill_note(note, "CORE", NT_AUXV, i * sizeof(elf_addr_t), auxv);
65629 }
65630
65631@@ -1973,7 +2447,7 @@ static int elf_core_dump(long signr, struct pt_regs *regs, struct file *file, un
65632 phdr.p_offset = offset;
65633 phdr.p_vaddr = vma->vm_start;
65634 phdr.p_paddr = 0;
65635- phdr.p_filesz = vma_dump_size(vma, mm_flags);
65636+ phdr.p_filesz = vma_dump_size(vma, mm_flags, signr);
65637 phdr.p_memsz = vma->vm_end - vma->vm_start;
65638 offset += phdr.p_filesz;
65639 phdr.p_flags = vma->vm_flags & VM_READ ? PF_R : 0;
65640@@ -2006,7 +2480,7 @@ static int elf_core_dump(long signr, struct pt_regs *regs, struct file *file, un
65641 unsigned long addr;
65642 unsigned long end;
65643
65644- end = vma->vm_start + vma_dump_size(vma, mm_flags);
65645+ end = vma->vm_start + vma_dump_size(vma, mm_flags, signr);
65646
65647 for (addr = vma->vm_start; addr < end; addr += PAGE_SIZE) {
65648 struct page *page;
65649@@ -2015,6 +2489,7 @@ static int elf_core_dump(long signr, struct pt_regs *regs, struct file *file, un
65650 page = get_dump_page(addr);
65651 if (page) {
65652 void *kaddr = kmap(page);
65653+ gr_learn_resource(current, RLIMIT_CORE, size + PAGE_SIZE, 1);
65654 stop = ((size += PAGE_SIZE) > limit) ||
65655 !dump_write(file, kaddr, PAGE_SIZE);
65656 kunmap(page);
65657@@ -2042,6 +2517,97 @@ out:
65658
65659 #endif /* USE_ELF_CORE_DUMP */
65660
65661+#ifdef CONFIG_PAX_MPROTECT
65662+/* PaX: non-PIC ELF libraries need relocations on their executable segments
65663+ * therefore we'll grant them VM_MAYWRITE once during their life. Similarly
65664+ * we'll remove VM_MAYWRITE for good on RELRO segments.
65665+ *
65666+ * The checks favour ld-linux.so behaviour which operates on a per ELF segment
65667+ * basis because we want to allow the common case and not the special ones.
65668+ */
65669+static void elf_handle_mprotect(struct vm_area_struct *vma, unsigned long newflags)
65670+{
65671+ struct elfhdr elf_h;
65672+ struct elf_phdr elf_p;
65673+ unsigned long i;
65674+ unsigned long oldflags;
65675+ bool is_textrel_rw, is_textrel_rx, is_relro;
65676+
65677+ if (!(vma->vm_mm->pax_flags & MF_PAX_MPROTECT))
65678+ return;
65679+
65680+ oldflags = vma->vm_flags & (VM_MAYEXEC | VM_MAYWRITE | VM_MAYREAD | VM_EXEC | VM_WRITE | VM_READ);
65681+ newflags &= VM_MAYEXEC | VM_MAYWRITE | VM_MAYREAD | VM_EXEC | VM_WRITE | VM_READ;
65682+
65683+#ifdef CONFIG_PAX_ELFRELOCS
65684+ /* possible TEXTREL */
65685+ is_textrel_rw = vma->vm_file && !vma->anon_vma && oldflags == (VM_MAYEXEC | VM_MAYREAD | VM_EXEC | VM_READ) && newflags == (VM_WRITE | VM_READ);
65686+ is_textrel_rx = vma->vm_file && vma->anon_vma && oldflags == (VM_MAYEXEC | VM_MAYWRITE | VM_MAYREAD | VM_WRITE | VM_READ) && newflags == (VM_EXEC | VM_READ);
65687+#else
65688+ is_textrel_rw = false;
65689+ is_textrel_rx = false;
65690+#endif
65691+
65692+ /* possible RELRO */
65693+ is_relro = vma->vm_file && vma->anon_vma && oldflags == (VM_MAYWRITE | VM_MAYREAD | VM_READ) && newflags == (VM_MAYWRITE | VM_MAYREAD | VM_READ);
65694+
65695+ if (!is_textrel_rw && !is_textrel_rx && !is_relro)
65696+ return;
65697+
65698+ if (sizeof(elf_h) != kernel_read(vma->vm_file, 0UL, (char *)&elf_h, sizeof(elf_h)) ||
65699+ memcmp(elf_h.e_ident, ELFMAG, SELFMAG) ||
65700+
65701+#ifdef CONFIG_PAX_ETEXECRELOCS
65702+ ((is_textrel_rw || is_textrel_rx) && (elf_h.e_type != ET_DYN && elf_h.e_type != ET_EXEC)) ||
65703+#else
65704+ ((is_textrel_rw || is_textrel_rx) && elf_h.e_type != ET_DYN) ||
65705+#endif
65706+
65707+ (is_relro && (elf_h.e_type != ET_DYN && elf_h.e_type != ET_EXEC)) ||
65708+ !elf_check_arch(&elf_h) ||
65709+ elf_h.e_phentsize != sizeof(struct elf_phdr) ||
65710+ elf_h.e_phnum > 65536UL / sizeof(struct elf_phdr))
65711+ return;
65712+
65713+ for (i = 0UL; i < elf_h.e_phnum; i++) {
65714+ if (sizeof(elf_p) != kernel_read(vma->vm_file, elf_h.e_phoff + i*sizeof(elf_p), (char *)&elf_p, sizeof(elf_p)))
65715+ return;
65716+ switch (elf_p.p_type) {
65717+ case PT_DYNAMIC:
65718+ if (!is_textrel_rw && !is_textrel_rx)
65719+ continue;
65720+ i = 0UL;
65721+ while ((i+1) * sizeof(elf_dyn) <= elf_p.p_filesz) {
65722+ elf_dyn dyn;
65723+
65724+ if (sizeof(dyn) != kernel_read(vma->vm_file, elf_p.p_offset + i*sizeof(dyn), (char *)&dyn, sizeof(dyn)))
65725+ return;
65726+ if (dyn.d_tag == DT_NULL)
65727+ return;
65728+ if (dyn.d_tag == DT_TEXTREL || (dyn.d_tag == DT_FLAGS && (dyn.d_un.d_val & DF_TEXTREL))) {
65729+ gr_log_textrel(vma);
65730+ if (is_textrel_rw)
65731+ vma->vm_flags |= VM_MAYWRITE;
65732+ else
65733+ /* PaX: disallow write access after relocs are done, hopefully noone else needs it... */
65734+ vma->vm_flags &= ~VM_MAYWRITE;
65735+ return;
65736+ }
65737+ i++;
65738+ }
65739+ return;
65740+
65741+ case PT_GNU_RELRO:
65742+ if (!is_relro)
65743+ continue;
65744+ if ((elf_p.p_offset >> PAGE_SHIFT) == vma->vm_pgoff && ELF_PAGEALIGN(elf_p.p_memsz) == vma->vm_end - vma->vm_start)
65745+ vma->vm_flags &= ~VM_MAYWRITE;
65746+ return;
65747+ }
65748+ }
65749+}
65750+#endif
65751+
65752 static int __init init_elf_binfmt(void)
65753 {
65754 return register_binfmt(&elf_format);
65755diff --git a/fs/binfmt_flat.c b/fs/binfmt_flat.c
65756index ca88c46..f155a60 100644
65757--- a/fs/binfmt_flat.c
65758+++ b/fs/binfmt_flat.c
65759@@ -564,7 +564,9 @@ static int load_flat_file(struct linux_binprm * bprm,
65760 realdatastart = (unsigned long) -ENOMEM;
65761 printk("Unable to allocate RAM for process data, errno %d\n",
65762 (int)-realdatastart);
65763+ down_write(&current->mm->mmap_sem);
65764 do_munmap(current->mm, textpos, text_len);
65765+ up_write(&current->mm->mmap_sem);
65766 ret = realdatastart;
65767 goto err;
65768 }
65769@@ -588,8 +590,10 @@ static int load_flat_file(struct linux_binprm * bprm,
65770 }
65771 if (IS_ERR_VALUE(result)) {
65772 printk("Unable to read data+bss, errno %d\n", (int)-result);
65773+ down_write(&current->mm->mmap_sem);
65774 do_munmap(current->mm, textpos, text_len);
65775 do_munmap(current->mm, realdatastart, data_len + extra);
65776+ up_write(&current->mm->mmap_sem);
65777 ret = result;
65778 goto err;
65779 }
65780@@ -658,8 +662,10 @@ static int load_flat_file(struct linux_binprm * bprm,
65781 }
65782 if (IS_ERR_VALUE(result)) {
65783 printk("Unable to read code+data+bss, errno %d\n",(int)-result);
65784+ down_write(&current->mm->mmap_sem);
65785 do_munmap(current->mm, textpos, text_len + data_len + extra +
65786 MAX_SHARED_LIBS * sizeof(unsigned long));
65787+ up_write(&current->mm->mmap_sem);
65788 ret = result;
65789 goto err;
65790 }
65791diff --git a/fs/bio.c b/fs/bio.c
65792index e696713..83de133 100644
65793--- a/fs/bio.c
65794+++ b/fs/bio.c
65795@@ -78,7 +78,7 @@ static struct kmem_cache *bio_find_or_create_slab(unsigned int extra_size)
65796
65797 i = 0;
65798 while (i < bio_slab_nr) {
65799- struct bio_slab *bslab = &bio_slabs[i];
65800+ bslab = &bio_slabs[i];
65801
65802 if (!bslab->slab && entry == -1)
65803 entry = i;
65804@@ -1236,7 +1236,7 @@ static void bio_copy_kern_endio(struct bio *bio, int err)
65805 const int read = bio_data_dir(bio) == READ;
65806 struct bio_map_data *bmd = bio->bi_private;
65807 int i;
65808- char *p = bmd->sgvecs[0].iov_base;
65809+ char *p = (char __force_kernel *)bmd->sgvecs[0].iov_base;
65810
65811 __bio_for_each_segment(bvec, bio, i, 0) {
65812 char *addr = page_address(bvec->bv_page);
65813diff --git a/fs/block_dev.c b/fs/block_dev.c
65814index e65efa2..04fae57 100644
65815--- a/fs/block_dev.c
65816+++ b/fs/block_dev.c
65817@@ -664,7 +664,7 @@ int bd_claim(struct block_device *bdev, void *holder)
65818 else if (bdev->bd_contains == bdev)
65819 res = 0; /* is a whole device which isn't held */
65820
65821- else if (bdev->bd_contains->bd_holder == bd_claim)
65822+ else if (bdev->bd_contains->bd_holder == (void *)bd_claim)
65823 res = 0; /* is a partition of a device that is being partitioned */
65824 else if (bdev->bd_contains->bd_holder != NULL)
65825 res = -EBUSY; /* is a partition of a held device */
65826diff --git a/fs/btrfs/ctree.c b/fs/btrfs/ctree.c
65827index c4bc570..42acd8d 100644
65828--- a/fs/btrfs/ctree.c
65829+++ b/fs/btrfs/ctree.c
65830@@ -461,9 +461,12 @@ static noinline int __btrfs_cow_block(struct btrfs_trans_handle *trans,
65831 free_extent_buffer(buf);
65832 add_root_to_dirty_list(root);
65833 } else {
65834- if (root->root_key.objectid == BTRFS_TREE_RELOC_OBJECTID)
65835- parent_start = parent->start;
65836- else
65837+ if (root->root_key.objectid == BTRFS_TREE_RELOC_OBJECTID) {
65838+ if (parent)
65839+ parent_start = parent->start;
65840+ else
65841+ parent_start = 0;
65842+ } else
65843 parent_start = 0;
65844
65845 WARN_ON(trans->transid != btrfs_header_generation(parent));
65846@@ -3645,7 +3648,6 @@ setup_items_for_insert(struct btrfs_trans_handle *trans,
65847
65848 ret = 0;
65849 if (slot == 0) {
65850- struct btrfs_disk_key disk_key;
65851 btrfs_cpu_key_to_disk(&disk_key, cpu_key);
65852 ret = fixup_low_keys(trans, root, path, &disk_key, 1);
65853 }
65854diff --git a/fs/btrfs/disk-io.c b/fs/btrfs/disk-io.c
65855index f447188..59c17c5 100644
65856--- a/fs/btrfs/disk-io.c
65857+++ b/fs/btrfs/disk-io.c
65858@@ -39,7 +39,7 @@
65859 #include "tree-log.h"
65860 #include "free-space-cache.h"
65861
65862-static struct extent_io_ops btree_extent_io_ops;
65863+static const struct extent_io_ops btree_extent_io_ops;
65864 static void end_workqueue_fn(struct btrfs_work *work);
65865 static void free_fs_root(struct btrfs_root *root);
65866
65867@@ -2607,7 +2607,7 @@ out:
65868 return 0;
65869 }
65870
65871-static struct extent_io_ops btree_extent_io_ops = {
65872+static const struct extent_io_ops btree_extent_io_ops = {
65873 .write_cache_pages_lock_hook = btree_lock_page_hook,
65874 .readpage_end_io_hook = btree_readpage_end_io_hook,
65875 .submit_bio_hook = btree_submit_bio_hook,
65876diff --git a/fs/btrfs/extent-tree.c b/fs/btrfs/extent-tree.c
65877index 559f724..a026171 100644
65878--- a/fs/btrfs/extent-tree.c
65879+++ b/fs/btrfs/extent-tree.c
65880@@ -7141,6 +7141,10 @@ static noinline int relocate_one_extent(struct btrfs_root *extent_root,
65881 u64 group_start = group->key.objectid;
65882 new_extents = kmalloc(sizeof(*new_extents),
65883 GFP_NOFS);
65884+ if (!new_extents) {
65885+ ret = -ENOMEM;
65886+ goto out;
65887+ }
65888 nr_extents = 1;
65889 ret = get_new_locations(reloc_inode,
65890 extent_key,
65891diff --git a/fs/btrfs/extent_io.h b/fs/btrfs/extent_io.h
65892index 36de250..7ec75c7 100644
65893--- a/fs/btrfs/extent_io.h
65894+++ b/fs/btrfs/extent_io.h
65895@@ -49,36 +49,36 @@ typedef int (extent_submit_bio_hook_t)(struct inode *inode, int rw,
65896 struct bio *bio, int mirror_num,
65897 unsigned long bio_flags);
65898 struct extent_io_ops {
65899- int (*fill_delalloc)(struct inode *inode, struct page *locked_page,
65900+ int (* const fill_delalloc)(struct inode *inode, struct page *locked_page,
65901 u64 start, u64 end, int *page_started,
65902 unsigned long *nr_written);
65903- int (*writepage_start_hook)(struct page *page, u64 start, u64 end);
65904- int (*writepage_io_hook)(struct page *page, u64 start, u64 end);
65905+ int (* const writepage_start_hook)(struct page *page, u64 start, u64 end);
65906+ int (* const writepage_io_hook)(struct page *page, u64 start, u64 end);
65907 extent_submit_bio_hook_t *submit_bio_hook;
65908- int (*merge_bio_hook)(struct page *page, unsigned long offset,
65909+ int (* const merge_bio_hook)(struct page *page, unsigned long offset,
65910 size_t size, struct bio *bio,
65911 unsigned long bio_flags);
65912- int (*readpage_io_hook)(struct page *page, u64 start, u64 end);
65913- int (*readpage_io_failed_hook)(struct bio *bio, struct page *page,
65914+ int (* const readpage_io_hook)(struct page *page, u64 start, u64 end);
65915+ int (* const readpage_io_failed_hook)(struct bio *bio, struct page *page,
65916 u64 start, u64 end,
65917 struct extent_state *state);
65918- int (*writepage_io_failed_hook)(struct bio *bio, struct page *page,
65919+ int (* const writepage_io_failed_hook)(struct bio *bio, struct page *page,
65920 u64 start, u64 end,
65921 struct extent_state *state);
65922- int (*readpage_end_io_hook)(struct page *page, u64 start, u64 end,
65923+ int (* const readpage_end_io_hook)(struct page *page, u64 start, u64 end,
65924 struct extent_state *state);
65925- int (*writepage_end_io_hook)(struct page *page, u64 start, u64 end,
65926+ int (* const writepage_end_io_hook)(struct page *page, u64 start, u64 end,
65927 struct extent_state *state, int uptodate);
65928- int (*set_bit_hook)(struct inode *inode, u64 start, u64 end,
65929+ int (* const set_bit_hook)(struct inode *inode, u64 start, u64 end,
65930 unsigned long old, unsigned long bits);
65931- int (*clear_bit_hook)(struct inode *inode, struct extent_state *state,
65932+ int (* const clear_bit_hook)(struct inode *inode, struct extent_state *state,
65933 unsigned long bits);
65934- int (*merge_extent_hook)(struct inode *inode,
65935+ int (* const merge_extent_hook)(struct inode *inode,
65936 struct extent_state *new,
65937 struct extent_state *other);
65938- int (*split_extent_hook)(struct inode *inode,
65939+ int (* const split_extent_hook)(struct inode *inode,
65940 struct extent_state *orig, u64 split);
65941- int (*write_cache_pages_lock_hook)(struct page *page);
65942+ int (* const write_cache_pages_lock_hook)(struct page *page);
65943 };
65944
65945 struct extent_io_tree {
65946@@ -88,7 +88,7 @@ struct extent_io_tree {
65947 u64 dirty_bytes;
65948 spinlock_t lock;
65949 spinlock_t buffer_lock;
65950- struct extent_io_ops *ops;
65951+ const struct extent_io_ops *ops;
65952 };
65953
65954 struct extent_state {
65955diff --git a/fs/btrfs/free-space-cache.c b/fs/btrfs/free-space-cache.c
65956index cb2849f..3718fb4 100644
65957--- a/fs/btrfs/free-space-cache.c
65958+++ b/fs/btrfs/free-space-cache.c
65959@@ -1074,8 +1074,6 @@ u64 btrfs_alloc_from_cluster(struct btrfs_block_group_cache *block_group,
65960
65961 while(1) {
65962 if (entry->bytes < bytes || entry->offset < min_start) {
65963- struct rb_node *node;
65964-
65965 node = rb_next(&entry->offset_index);
65966 if (!node)
65967 break;
65968@@ -1226,7 +1224,7 @@ again:
65969 */
65970 while (entry->bitmap || found_bitmap ||
65971 (!entry->bitmap && entry->bytes < min_bytes)) {
65972- struct rb_node *node = rb_next(&entry->offset_index);
65973+ node = rb_next(&entry->offset_index);
65974
65975 if (entry->bitmap && entry->bytes > bytes + empty_size) {
65976 ret = btrfs_bitmap_cluster(block_group, entry, cluster,
65977diff --git a/fs/btrfs/inode.c b/fs/btrfs/inode.c
65978index e03a836..323837e 100644
65979--- a/fs/btrfs/inode.c
65980+++ b/fs/btrfs/inode.c
65981@@ -63,7 +63,7 @@ static const struct inode_operations btrfs_file_inode_operations;
65982 static const struct address_space_operations btrfs_aops;
65983 static const struct address_space_operations btrfs_symlink_aops;
65984 static const struct file_operations btrfs_dir_file_operations;
65985-static struct extent_io_ops btrfs_extent_io_ops;
65986+static const struct extent_io_ops btrfs_extent_io_ops;
65987
65988 static struct kmem_cache *btrfs_inode_cachep;
65989 struct kmem_cache *btrfs_trans_handle_cachep;
65990@@ -925,6 +925,7 @@ static int cow_file_range_async(struct inode *inode, struct page *locked_page,
65991 1, 0, NULL, GFP_NOFS);
65992 while (start < end) {
65993 async_cow = kmalloc(sizeof(*async_cow), GFP_NOFS);
65994+ BUG_ON(!async_cow);
65995 async_cow->inode = inode;
65996 async_cow->root = root;
65997 async_cow->locked_page = locked_page;
65998@@ -4591,6 +4592,8 @@ static noinline int uncompress_inline(struct btrfs_path *path,
65999 inline_size = btrfs_file_extent_inline_item_len(leaf,
66000 btrfs_item_nr(leaf, path->slots[0]));
66001 tmp = kmalloc(inline_size, GFP_NOFS);
66002+ if (!tmp)
66003+ return -ENOMEM;
66004 ptr = btrfs_file_extent_inline_start(item);
66005
66006 read_extent_buffer(leaf, tmp, ptr, inline_size);
66007@@ -5410,7 +5413,7 @@ fail:
66008 return -ENOMEM;
66009 }
66010
66011-static int btrfs_getattr(struct vfsmount *mnt,
66012+int btrfs_getattr(struct vfsmount *mnt,
66013 struct dentry *dentry, struct kstat *stat)
66014 {
66015 struct inode *inode = dentry->d_inode;
66016@@ -5422,6 +5425,14 @@ static int btrfs_getattr(struct vfsmount *mnt,
66017 return 0;
66018 }
66019
66020+EXPORT_SYMBOL(btrfs_getattr);
66021+
66022+dev_t get_btrfs_dev_from_inode(struct inode *inode)
66023+{
66024+ return BTRFS_I(inode)->root->anon_super.s_dev;
66025+}
66026+EXPORT_SYMBOL(get_btrfs_dev_from_inode);
66027+
66028 static int btrfs_rename(struct inode *old_dir, struct dentry *old_dentry,
66029 struct inode *new_dir, struct dentry *new_dentry)
66030 {
66031@@ -5972,7 +5983,7 @@ static const struct file_operations btrfs_dir_file_operations = {
66032 .fsync = btrfs_sync_file,
66033 };
66034
66035-static struct extent_io_ops btrfs_extent_io_ops = {
66036+static const struct extent_io_ops btrfs_extent_io_ops = {
66037 .fill_delalloc = run_delalloc_range,
66038 .submit_bio_hook = btrfs_submit_bio_hook,
66039 .merge_bio_hook = btrfs_merge_bio_hook,
66040diff --git a/fs/btrfs/relocation.c b/fs/btrfs/relocation.c
66041index ab7ab53..94e0781 100644
66042--- a/fs/btrfs/relocation.c
66043+++ b/fs/btrfs/relocation.c
66044@@ -884,7 +884,7 @@ static int __update_reloc_root(struct btrfs_root *root, int del)
66045 }
66046 spin_unlock(&rc->reloc_root_tree.lock);
66047
66048- BUG_ON((struct btrfs_root *)node->data != root);
66049+ BUG_ON(!node || (struct btrfs_root *)node->data != root);
66050
66051 if (!del) {
66052 spin_lock(&rc->reloc_root_tree.lock);
66053diff --git a/fs/btrfs/sysfs.c b/fs/btrfs/sysfs.c
66054index a240b6f..4ce16ef 100644
66055--- a/fs/btrfs/sysfs.c
66056+++ b/fs/btrfs/sysfs.c
66057@@ -164,12 +164,12 @@ static void btrfs_root_release(struct kobject *kobj)
66058 complete(&root->kobj_unregister);
66059 }
66060
66061-static struct sysfs_ops btrfs_super_attr_ops = {
66062+static const struct sysfs_ops btrfs_super_attr_ops = {
66063 .show = btrfs_super_attr_show,
66064 .store = btrfs_super_attr_store,
66065 };
66066
66067-static struct sysfs_ops btrfs_root_attr_ops = {
66068+static const struct sysfs_ops btrfs_root_attr_ops = {
66069 .show = btrfs_root_attr_show,
66070 .store = btrfs_root_attr_store,
66071 };
66072diff --git a/fs/buffer.c b/fs/buffer.c
66073index 6fa5302..395d9f6 100644
66074--- a/fs/buffer.c
66075+++ b/fs/buffer.c
66076@@ -25,6 +25,7 @@
66077 #include <linux/percpu.h>
66078 #include <linux/slab.h>
66079 #include <linux/capability.h>
66080+#include <linux/security.h>
66081 #include <linux/blkdev.h>
66082 #include <linux/file.h>
66083 #include <linux/quotaops.h>
66084diff --git a/fs/cachefiles/bind.c b/fs/cachefiles/bind.c
66085index 3797e00..ce776f6 100644
66086--- a/fs/cachefiles/bind.c
66087+++ b/fs/cachefiles/bind.c
66088@@ -39,13 +39,11 @@ int cachefiles_daemon_bind(struct cachefiles_cache *cache, char *args)
66089 args);
66090
66091 /* start by checking things over */
66092- ASSERT(cache->fstop_percent >= 0 &&
66093- cache->fstop_percent < cache->fcull_percent &&
66094+ ASSERT(cache->fstop_percent < cache->fcull_percent &&
66095 cache->fcull_percent < cache->frun_percent &&
66096 cache->frun_percent < 100);
66097
66098- ASSERT(cache->bstop_percent >= 0 &&
66099- cache->bstop_percent < cache->bcull_percent &&
66100+ ASSERT(cache->bstop_percent < cache->bcull_percent &&
66101 cache->bcull_percent < cache->brun_percent &&
66102 cache->brun_percent < 100);
66103
66104diff --git a/fs/cachefiles/daemon.c b/fs/cachefiles/daemon.c
66105index 4618516..bb30d01 100644
66106--- a/fs/cachefiles/daemon.c
66107+++ b/fs/cachefiles/daemon.c
66108@@ -220,7 +220,7 @@ static ssize_t cachefiles_daemon_write(struct file *file,
66109 if (test_bit(CACHEFILES_DEAD, &cache->flags))
66110 return -EIO;
66111
66112- if (datalen < 0 || datalen > PAGE_SIZE - 1)
66113+ if (datalen > PAGE_SIZE - 1)
66114 return -EOPNOTSUPP;
66115
66116 /* drag the command string into the kernel so we can parse it */
66117@@ -385,7 +385,7 @@ static int cachefiles_daemon_fstop(struct cachefiles_cache *cache, char *args)
66118 if (args[0] != '%' || args[1] != '\0')
66119 return -EINVAL;
66120
66121- if (fstop < 0 || fstop >= cache->fcull_percent)
66122+ if (fstop >= cache->fcull_percent)
66123 return cachefiles_daemon_range_error(cache, args);
66124
66125 cache->fstop_percent = fstop;
66126@@ -457,7 +457,7 @@ static int cachefiles_daemon_bstop(struct cachefiles_cache *cache, char *args)
66127 if (args[0] != '%' || args[1] != '\0')
66128 return -EINVAL;
66129
66130- if (bstop < 0 || bstop >= cache->bcull_percent)
66131+ if (bstop >= cache->bcull_percent)
66132 return cachefiles_daemon_range_error(cache, args);
66133
66134 cache->bstop_percent = bstop;
66135diff --git a/fs/cachefiles/internal.h b/fs/cachefiles/internal.h
66136index f7c255f..fcd61de 100644
66137--- a/fs/cachefiles/internal.h
66138+++ b/fs/cachefiles/internal.h
66139@@ -56,7 +56,7 @@ struct cachefiles_cache {
66140 wait_queue_head_t daemon_pollwq; /* poll waitqueue for daemon */
66141 struct rb_root active_nodes; /* active nodes (can't be culled) */
66142 rwlock_t active_lock; /* lock for active_nodes */
66143- atomic_t gravecounter; /* graveyard uniquifier */
66144+ atomic_unchecked_t gravecounter; /* graveyard uniquifier */
66145 unsigned frun_percent; /* when to stop culling (% files) */
66146 unsigned fcull_percent; /* when to start culling (% files) */
66147 unsigned fstop_percent; /* when to stop allocating (% files) */
66148@@ -168,19 +168,19 @@ extern int cachefiles_check_in_use(struct cachefiles_cache *cache,
66149 * proc.c
66150 */
66151 #ifdef CONFIG_CACHEFILES_HISTOGRAM
66152-extern atomic_t cachefiles_lookup_histogram[HZ];
66153-extern atomic_t cachefiles_mkdir_histogram[HZ];
66154-extern atomic_t cachefiles_create_histogram[HZ];
66155+extern atomic_unchecked_t cachefiles_lookup_histogram[HZ];
66156+extern atomic_unchecked_t cachefiles_mkdir_histogram[HZ];
66157+extern atomic_unchecked_t cachefiles_create_histogram[HZ];
66158
66159 extern int __init cachefiles_proc_init(void);
66160 extern void cachefiles_proc_cleanup(void);
66161 static inline
66162-void cachefiles_hist(atomic_t histogram[], unsigned long start_jif)
66163+void cachefiles_hist(atomic_unchecked_t histogram[], unsigned long start_jif)
66164 {
66165 unsigned long jif = jiffies - start_jif;
66166 if (jif >= HZ)
66167 jif = HZ - 1;
66168- atomic_inc(&histogram[jif]);
66169+ atomic_inc_unchecked(&histogram[jif]);
66170 }
66171
66172 #else
66173diff --git a/fs/cachefiles/namei.c b/fs/cachefiles/namei.c
66174index 14ac480..a62766c 100644
66175--- a/fs/cachefiles/namei.c
66176+++ b/fs/cachefiles/namei.c
66177@@ -250,7 +250,7 @@ try_again:
66178 /* first step is to make up a grave dentry in the graveyard */
66179 sprintf(nbuffer, "%08x%08x",
66180 (uint32_t) get_seconds(),
66181- (uint32_t) atomic_inc_return(&cache->gravecounter));
66182+ (uint32_t) atomic_inc_return_unchecked(&cache->gravecounter));
66183
66184 /* do the multiway lock magic */
66185 trap = lock_rename(cache->graveyard, dir);
66186diff --git a/fs/cachefiles/proc.c b/fs/cachefiles/proc.c
66187index eccd339..4c1d995 100644
66188--- a/fs/cachefiles/proc.c
66189+++ b/fs/cachefiles/proc.c
66190@@ -14,9 +14,9 @@
66191 #include <linux/seq_file.h>
66192 #include "internal.h"
66193
66194-atomic_t cachefiles_lookup_histogram[HZ];
66195-atomic_t cachefiles_mkdir_histogram[HZ];
66196-atomic_t cachefiles_create_histogram[HZ];
66197+atomic_unchecked_t cachefiles_lookup_histogram[HZ];
66198+atomic_unchecked_t cachefiles_mkdir_histogram[HZ];
66199+atomic_unchecked_t cachefiles_create_histogram[HZ];
66200
66201 /*
66202 * display the latency histogram
66203@@ -35,9 +35,9 @@ static int cachefiles_histogram_show(struct seq_file *m, void *v)
66204 return 0;
66205 default:
66206 index = (unsigned long) v - 3;
66207- x = atomic_read(&cachefiles_lookup_histogram[index]);
66208- y = atomic_read(&cachefiles_mkdir_histogram[index]);
66209- z = atomic_read(&cachefiles_create_histogram[index]);
66210+ x = atomic_read_unchecked(&cachefiles_lookup_histogram[index]);
66211+ y = atomic_read_unchecked(&cachefiles_mkdir_histogram[index]);
66212+ z = atomic_read_unchecked(&cachefiles_create_histogram[index]);
66213 if (x == 0 && y == 0 && z == 0)
66214 return 0;
66215
66216diff --git a/fs/cachefiles/rdwr.c b/fs/cachefiles/rdwr.c
66217index a6c8c6f..5cf8517 100644
66218--- a/fs/cachefiles/rdwr.c
66219+++ b/fs/cachefiles/rdwr.c
66220@@ -946,7 +946,7 @@ int cachefiles_write_page(struct fscache_storage *op, struct page *page)
66221 old_fs = get_fs();
66222 set_fs(KERNEL_DS);
66223 ret = file->f_op->write(
66224- file, (const void __user *) data, len, &pos);
66225+ file, (const void __force_user *) data, len, &pos);
66226 set_fs(old_fs);
66227 kunmap(page);
66228 if (ret != len)
66229diff --git a/fs/cifs/asn1.c b/fs/cifs/asn1.c
66230index 20692fb..0098fb7 100644
66231--- a/fs/cifs/asn1.c
66232+++ b/fs/cifs/asn1.c
66233@@ -416,6 +416,9 @@ asn1_subid_decode(struct asn1_ctx *ctx, unsigned long *subid)
66234
66235 static int
66236 asn1_oid_decode(struct asn1_ctx *ctx,
66237+ unsigned char *eoc, unsigned long **oid, unsigned int *len) __size_overflow(2);
66238+static int
66239+asn1_oid_decode(struct asn1_ctx *ctx,
66240 unsigned char *eoc, unsigned long **oid, unsigned int *len)
66241 {
66242 unsigned long subid;
66243diff --git a/fs/cifs/cifs_debug.c b/fs/cifs/cifs_debug.c
66244index 42cec2a..2aba466 100644
66245--- a/fs/cifs/cifs_debug.c
66246+++ b/fs/cifs/cifs_debug.c
66247@@ -256,25 +256,25 @@ static ssize_t cifs_stats_proc_write(struct file *file,
66248 tcon = list_entry(tmp3,
66249 struct cifsTconInfo,
66250 tcon_list);
66251- atomic_set(&tcon->num_smbs_sent, 0);
66252- atomic_set(&tcon->num_writes, 0);
66253- atomic_set(&tcon->num_reads, 0);
66254- atomic_set(&tcon->num_oplock_brks, 0);
66255- atomic_set(&tcon->num_opens, 0);
66256- atomic_set(&tcon->num_posixopens, 0);
66257- atomic_set(&tcon->num_posixmkdirs, 0);
66258- atomic_set(&tcon->num_closes, 0);
66259- atomic_set(&tcon->num_deletes, 0);
66260- atomic_set(&tcon->num_mkdirs, 0);
66261- atomic_set(&tcon->num_rmdirs, 0);
66262- atomic_set(&tcon->num_renames, 0);
66263- atomic_set(&tcon->num_t2renames, 0);
66264- atomic_set(&tcon->num_ffirst, 0);
66265- atomic_set(&tcon->num_fnext, 0);
66266- atomic_set(&tcon->num_fclose, 0);
66267- atomic_set(&tcon->num_hardlinks, 0);
66268- atomic_set(&tcon->num_symlinks, 0);
66269- atomic_set(&tcon->num_locks, 0);
66270+ atomic_set_unchecked(&tcon->num_smbs_sent, 0);
66271+ atomic_set_unchecked(&tcon->num_writes, 0);
66272+ atomic_set_unchecked(&tcon->num_reads, 0);
66273+ atomic_set_unchecked(&tcon->num_oplock_brks, 0);
66274+ atomic_set_unchecked(&tcon->num_opens, 0);
66275+ atomic_set_unchecked(&tcon->num_posixopens, 0);
66276+ atomic_set_unchecked(&tcon->num_posixmkdirs, 0);
66277+ atomic_set_unchecked(&tcon->num_closes, 0);
66278+ atomic_set_unchecked(&tcon->num_deletes, 0);
66279+ atomic_set_unchecked(&tcon->num_mkdirs, 0);
66280+ atomic_set_unchecked(&tcon->num_rmdirs, 0);
66281+ atomic_set_unchecked(&tcon->num_renames, 0);
66282+ atomic_set_unchecked(&tcon->num_t2renames, 0);
66283+ atomic_set_unchecked(&tcon->num_ffirst, 0);
66284+ atomic_set_unchecked(&tcon->num_fnext, 0);
66285+ atomic_set_unchecked(&tcon->num_fclose, 0);
66286+ atomic_set_unchecked(&tcon->num_hardlinks, 0);
66287+ atomic_set_unchecked(&tcon->num_symlinks, 0);
66288+ atomic_set_unchecked(&tcon->num_locks, 0);
66289 }
66290 }
66291 }
66292@@ -334,41 +334,41 @@ static int cifs_stats_proc_show(struct seq_file *m, void *v)
66293 if (tcon->need_reconnect)
66294 seq_puts(m, "\tDISCONNECTED ");
66295 seq_printf(m, "\nSMBs: %d Oplock Breaks: %d",
66296- atomic_read(&tcon->num_smbs_sent),
66297- atomic_read(&tcon->num_oplock_brks));
66298+ atomic_read_unchecked(&tcon->num_smbs_sent),
66299+ atomic_read_unchecked(&tcon->num_oplock_brks));
66300 seq_printf(m, "\nReads: %d Bytes: %lld",
66301- atomic_read(&tcon->num_reads),
66302+ atomic_read_unchecked(&tcon->num_reads),
66303 (long long)(tcon->bytes_read));
66304 seq_printf(m, "\nWrites: %d Bytes: %lld",
66305- atomic_read(&tcon->num_writes),
66306+ atomic_read_unchecked(&tcon->num_writes),
66307 (long long)(tcon->bytes_written));
66308 seq_printf(m, "\nFlushes: %d",
66309- atomic_read(&tcon->num_flushes));
66310+ atomic_read_unchecked(&tcon->num_flushes));
66311 seq_printf(m, "\nLocks: %d HardLinks: %d "
66312 "Symlinks: %d",
66313- atomic_read(&tcon->num_locks),
66314- atomic_read(&tcon->num_hardlinks),
66315- atomic_read(&tcon->num_symlinks));
66316+ atomic_read_unchecked(&tcon->num_locks),
66317+ atomic_read_unchecked(&tcon->num_hardlinks),
66318+ atomic_read_unchecked(&tcon->num_symlinks));
66319 seq_printf(m, "\nOpens: %d Closes: %d "
66320 "Deletes: %d",
66321- atomic_read(&tcon->num_opens),
66322- atomic_read(&tcon->num_closes),
66323- atomic_read(&tcon->num_deletes));
66324+ atomic_read_unchecked(&tcon->num_opens),
66325+ atomic_read_unchecked(&tcon->num_closes),
66326+ atomic_read_unchecked(&tcon->num_deletes));
66327 seq_printf(m, "\nPosix Opens: %d "
66328 "Posix Mkdirs: %d",
66329- atomic_read(&tcon->num_posixopens),
66330- atomic_read(&tcon->num_posixmkdirs));
66331+ atomic_read_unchecked(&tcon->num_posixopens),
66332+ atomic_read_unchecked(&tcon->num_posixmkdirs));
66333 seq_printf(m, "\nMkdirs: %d Rmdirs: %d",
66334- atomic_read(&tcon->num_mkdirs),
66335- atomic_read(&tcon->num_rmdirs));
66336+ atomic_read_unchecked(&tcon->num_mkdirs),
66337+ atomic_read_unchecked(&tcon->num_rmdirs));
66338 seq_printf(m, "\nRenames: %d T2 Renames %d",
66339- atomic_read(&tcon->num_renames),
66340- atomic_read(&tcon->num_t2renames));
66341+ atomic_read_unchecked(&tcon->num_renames),
66342+ atomic_read_unchecked(&tcon->num_t2renames));
66343 seq_printf(m, "\nFindFirst: %d FNext %d "
66344 "FClose %d",
66345- atomic_read(&tcon->num_ffirst),
66346- atomic_read(&tcon->num_fnext),
66347- atomic_read(&tcon->num_fclose));
66348+ atomic_read_unchecked(&tcon->num_ffirst),
66349+ atomic_read_unchecked(&tcon->num_fnext),
66350+ atomic_read_unchecked(&tcon->num_fclose));
66351 }
66352 }
66353 }
66354diff --git a/fs/cifs/cifsfs.c b/fs/cifs/cifsfs.c
66355index 1445407..68cb0dc 100644
66356--- a/fs/cifs/cifsfs.c
66357+++ b/fs/cifs/cifsfs.c
66358@@ -869,7 +869,7 @@ cifs_init_request_bufs(void)
66359 cifs_req_cachep = kmem_cache_create("cifs_request",
66360 CIFSMaxBufSize +
66361 MAX_CIFS_HDR_SIZE, 0,
66362- SLAB_HWCACHE_ALIGN, NULL);
66363+ SLAB_HWCACHE_ALIGN | SLAB_USERCOPY, NULL);
66364 if (cifs_req_cachep == NULL)
66365 return -ENOMEM;
66366
66367@@ -896,7 +896,7 @@ cifs_init_request_bufs(void)
66368 efficient to alloc 1 per page off the slab compared to 17K (5page)
66369 alloc of large cifs buffers even when page debugging is on */
66370 cifs_sm_req_cachep = kmem_cache_create("cifs_small_rq",
66371- MAX_CIFS_SMALL_BUFFER_SIZE, 0, SLAB_HWCACHE_ALIGN,
66372+ MAX_CIFS_SMALL_BUFFER_SIZE, 0, SLAB_HWCACHE_ALIGN | SLAB_USERCOPY,
66373 NULL);
66374 if (cifs_sm_req_cachep == NULL) {
66375 mempool_destroy(cifs_req_poolp);
66376@@ -991,8 +991,8 @@ init_cifs(void)
66377 atomic_set(&bufAllocCount, 0);
66378 atomic_set(&smBufAllocCount, 0);
66379 #ifdef CONFIG_CIFS_STATS2
66380- atomic_set(&totBufAllocCount, 0);
66381- atomic_set(&totSmBufAllocCount, 0);
66382+ atomic_set_unchecked(&totBufAllocCount, 0);
66383+ atomic_set_unchecked(&totSmBufAllocCount, 0);
66384 #endif /* CONFIG_CIFS_STATS2 */
66385
66386 atomic_set(&midCount, 0);
66387diff --git a/fs/cifs/cifsglob.h b/fs/cifs/cifsglob.h
66388index e29581e..1c22bab 100644
66389--- a/fs/cifs/cifsglob.h
66390+++ b/fs/cifs/cifsglob.h
66391@@ -252,28 +252,28 @@ struct cifsTconInfo {
66392 __u16 Flags; /* optional support bits */
66393 enum statusEnum tidStatus;
66394 #ifdef CONFIG_CIFS_STATS
66395- atomic_t num_smbs_sent;
66396- atomic_t num_writes;
66397- atomic_t num_reads;
66398- atomic_t num_flushes;
66399- atomic_t num_oplock_brks;
66400- atomic_t num_opens;
66401- atomic_t num_closes;
66402- atomic_t num_deletes;
66403- atomic_t num_mkdirs;
66404- atomic_t num_posixopens;
66405- atomic_t num_posixmkdirs;
66406- atomic_t num_rmdirs;
66407- atomic_t num_renames;
66408- atomic_t num_t2renames;
66409- atomic_t num_ffirst;
66410- atomic_t num_fnext;
66411- atomic_t num_fclose;
66412- atomic_t num_hardlinks;
66413- atomic_t num_symlinks;
66414- atomic_t num_locks;
66415- atomic_t num_acl_get;
66416- atomic_t num_acl_set;
66417+ atomic_unchecked_t num_smbs_sent;
66418+ atomic_unchecked_t num_writes;
66419+ atomic_unchecked_t num_reads;
66420+ atomic_unchecked_t num_flushes;
66421+ atomic_unchecked_t num_oplock_brks;
66422+ atomic_unchecked_t num_opens;
66423+ atomic_unchecked_t num_closes;
66424+ atomic_unchecked_t num_deletes;
66425+ atomic_unchecked_t num_mkdirs;
66426+ atomic_unchecked_t num_posixopens;
66427+ atomic_unchecked_t num_posixmkdirs;
66428+ atomic_unchecked_t num_rmdirs;
66429+ atomic_unchecked_t num_renames;
66430+ atomic_unchecked_t num_t2renames;
66431+ atomic_unchecked_t num_ffirst;
66432+ atomic_unchecked_t num_fnext;
66433+ atomic_unchecked_t num_fclose;
66434+ atomic_unchecked_t num_hardlinks;
66435+ atomic_unchecked_t num_symlinks;
66436+ atomic_unchecked_t num_locks;
66437+ atomic_unchecked_t num_acl_get;
66438+ atomic_unchecked_t num_acl_set;
66439 #ifdef CONFIG_CIFS_STATS2
66440 unsigned long long time_writes;
66441 unsigned long long time_reads;
66442@@ -414,7 +414,7 @@ static inline char CIFS_DIR_SEP(const struct cifs_sb_info *cifs_sb)
66443 }
66444
66445 #ifdef CONFIG_CIFS_STATS
66446-#define cifs_stats_inc atomic_inc
66447+#define cifs_stats_inc atomic_inc_unchecked
66448
66449 static inline void cifs_stats_bytes_written(struct cifsTconInfo *tcon,
66450 unsigned int bytes)
66451@@ -701,8 +701,8 @@ GLOBAL_EXTERN atomic_t tconInfoReconnectCount;
66452 /* Various Debug counters */
66453 GLOBAL_EXTERN atomic_t bufAllocCount; /* current number allocated */
66454 #ifdef CONFIG_CIFS_STATS2
66455-GLOBAL_EXTERN atomic_t totBufAllocCount; /* total allocated over all time */
66456-GLOBAL_EXTERN atomic_t totSmBufAllocCount;
66457+GLOBAL_EXTERN atomic_unchecked_t totBufAllocCount; /* total allocated over all time */
66458+GLOBAL_EXTERN atomic_unchecked_t totSmBufAllocCount;
66459 #endif
66460 GLOBAL_EXTERN atomic_t smBufAllocCount;
66461 GLOBAL_EXTERN atomic_t midCount;
66462diff --git a/fs/cifs/link.c b/fs/cifs/link.c
66463index fc1e048..28b3441 100644
66464--- a/fs/cifs/link.c
66465+++ b/fs/cifs/link.c
66466@@ -215,7 +215,7 @@ cifs_symlink(struct inode *inode, struct dentry *direntry, const char *symname)
66467
66468 void cifs_put_link(struct dentry *direntry, struct nameidata *nd, void *cookie)
66469 {
66470- char *p = nd_get_link(nd);
66471+ const char *p = nd_get_link(nd);
66472 if (!IS_ERR(p))
66473 kfree(p);
66474 }
66475diff --git a/fs/cifs/misc.c b/fs/cifs/misc.c
66476index 95b82e8..12a538d 100644
66477--- a/fs/cifs/misc.c
66478+++ b/fs/cifs/misc.c
66479@@ -155,7 +155,7 @@ cifs_buf_get(void)
66480 memset(ret_buf, 0, sizeof(struct smb_hdr) + 3);
66481 atomic_inc(&bufAllocCount);
66482 #ifdef CONFIG_CIFS_STATS2
66483- atomic_inc(&totBufAllocCount);
66484+ atomic_inc_unchecked(&totBufAllocCount);
66485 #endif /* CONFIG_CIFS_STATS2 */
66486 }
66487
66488@@ -190,7 +190,7 @@ cifs_small_buf_get(void)
66489 /* memset(ret_buf, 0, sizeof(struct smb_hdr) + 27);*/
66490 atomic_inc(&smBufAllocCount);
66491 #ifdef CONFIG_CIFS_STATS2
66492- atomic_inc(&totSmBufAllocCount);
66493+ atomic_inc_unchecked(&totSmBufAllocCount);
66494 #endif /* CONFIG_CIFS_STATS2 */
66495
66496 }
66497diff --git a/fs/coda/cache.c b/fs/coda/cache.c
66498index a5bf577..6d19845 100644
66499--- a/fs/coda/cache.c
66500+++ b/fs/coda/cache.c
66501@@ -24,14 +24,14 @@
66502 #include <linux/coda_fs_i.h>
66503 #include <linux/coda_cache.h>
66504
66505-static atomic_t permission_epoch = ATOMIC_INIT(0);
66506+static atomic_unchecked_t permission_epoch = ATOMIC_INIT(0);
66507
66508 /* replace or extend an acl cache hit */
66509 void coda_cache_enter(struct inode *inode, int mask)
66510 {
66511 struct coda_inode_info *cii = ITOC(inode);
66512
66513- cii->c_cached_epoch = atomic_read(&permission_epoch);
66514+ cii->c_cached_epoch = atomic_read_unchecked(&permission_epoch);
66515 if (cii->c_uid != current_fsuid()) {
66516 cii->c_uid = current_fsuid();
66517 cii->c_cached_perm = mask;
66518@@ -43,13 +43,13 @@ void coda_cache_enter(struct inode *inode, int mask)
66519 void coda_cache_clear_inode(struct inode *inode)
66520 {
66521 struct coda_inode_info *cii = ITOC(inode);
66522- cii->c_cached_epoch = atomic_read(&permission_epoch) - 1;
66523+ cii->c_cached_epoch = atomic_read_unchecked(&permission_epoch) - 1;
66524 }
66525
66526 /* remove all acl caches */
66527 void coda_cache_clear_all(struct super_block *sb)
66528 {
66529- atomic_inc(&permission_epoch);
66530+ atomic_inc_unchecked(&permission_epoch);
66531 }
66532
66533
66534@@ -61,7 +61,7 @@ int coda_cache_check(struct inode *inode, int mask)
66535
66536 hit = (mask & cii->c_cached_perm) == mask &&
66537 cii->c_uid == current_fsuid() &&
66538- cii->c_cached_epoch == atomic_read(&permission_epoch);
66539+ cii->c_cached_epoch == atomic_read_unchecked(&permission_epoch);
66540
66541 return hit;
66542 }
66543diff --git a/fs/compat.c b/fs/compat.c
66544index d1e2411..9a958d2 100644
66545--- a/fs/compat.c
66546+++ b/fs/compat.c
66547@@ -133,8 +133,8 @@ asmlinkage long compat_sys_utimes(char __user *filename, struct compat_timeval _
66548 static int cp_compat_stat(struct kstat *stat, struct compat_stat __user *ubuf)
66549 {
66550 compat_ino_t ino = stat->ino;
66551- typeof(ubuf->st_uid) uid = 0;
66552- typeof(ubuf->st_gid) gid = 0;
66553+ typeof(((struct compat_stat *)0)->st_uid) uid = 0;
66554+ typeof(((struct compat_stat *)0)->st_gid) gid = 0;
66555 int err;
66556
66557 SET_UID(uid, stat->uid);
66558@@ -533,7 +533,7 @@ compat_sys_io_setup(unsigned nr_reqs, u32 __user *ctx32p)
66559
66560 set_fs(KERNEL_DS);
66561 /* The __user pointer cast is valid because of the set_fs() */
66562- ret = sys_io_setup(nr_reqs, (aio_context_t __user *) &ctx64);
66563+ ret = sys_io_setup(nr_reqs, (aio_context_t __force_user *) &ctx64);
66564 set_fs(oldfs);
66565 /* truncating is ok because it's a user address */
66566 if (!ret)
66567@@ -830,6 +830,7 @@ struct compat_old_linux_dirent {
66568
66569 struct compat_readdir_callback {
66570 struct compat_old_linux_dirent __user *dirent;
66571+ struct file * file;
66572 int result;
66573 };
66574
66575@@ -847,6 +848,10 @@ static int compat_fillonedir(void *__buf, const char *name, int namlen,
66576 buf->result = -EOVERFLOW;
66577 return -EOVERFLOW;
66578 }
66579+
66580+ if (!gr_acl_handle_filldir(buf->file, name, namlen, ino))
66581+ return 0;
66582+
66583 buf->result++;
66584 dirent = buf->dirent;
66585 if (!access_ok(VERIFY_WRITE, dirent,
66586@@ -879,6 +884,7 @@ asmlinkage long compat_sys_old_readdir(unsigned int fd,
66587
66588 buf.result = 0;
66589 buf.dirent = dirent;
66590+ buf.file = file;
66591
66592 error = vfs_readdir(file, compat_fillonedir, &buf);
66593 if (buf.result)
66594@@ -899,6 +905,7 @@ struct compat_linux_dirent {
66595 struct compat_getdents_callback {
66596 struct compat_linux_dirent __user *current_dir;
66597 struct compat_linux_dirent __user *previous;
66598+ struct file * file;
66599 int count;
66600 int error;
66601 };
66602@@ -919,6 +926,10 @@ static int compat_filldir(void *__buf, const char *name, int namlen,
66603 buf->error = -EOVERFLOW;
66604 return -EOVERFLOW;
66605 }
66606+
66607+ if (!gr_acl_handle_filldir(buf->file, name, namlen, ino))
66608+ return 0;
66609+
66610 dirent = buf->previous;
66611 if (dirent) {
66612 if (__put_user(offset, &dirent->d_off))
66613@@ -966,6 +977,7 @@ asmlinkage long compat_sys_getdents(unsigned int fd,
66614 buf.previous = NULL;
66615 buf.count = count;
66616 buf.error = 0;
66617+ buf.file = file;
66618
66619 error = vfs_readdir(file, compat_filldir, &buf);
66620 if (error >= 0)
66621@@ -987,6 +999,7 @@ out:
66622 struct compat_getdents_callback64 {
66623 struct linux_dirent64 __user *current_dir;
66624 struct linux_dirent64 __user *previous;
66625+ struct file * file;
66626 int count;
66627 int error;
66628 };
66629@@ -1003,6 +1016,10 @@ static int compat_filldir64(void * __buf, const char * name, int namlen, loff_t
66630 buf->error = -EINVAL; /* only used if we fail.. */
66631 if (reclen > buf->count)
66632 return -EINVAL;
66633+
66634+ if (!gr_acl_handle_filldir(buf->file, name, namlen, ino))
66635+ return 0;
66636+
66637 dirent = buf->previous;
66638
66639 if (dirent) {
66640@@ -1054,13 +1071,14 @@ asmlinkage long compat_sys_getdents64(unsigned int fd,
66641 buf.previous = NULL;
66642 buf.count = count;
66643 buf.error = 0;
66644+ buf.file = file;
66645
66646 error = vfs_readdir(file, compat_filldir64, &buf);
66647 if (error >= 0)
66648 error = buf.error;
66649 lastdirent = buf.previous;
66650 if (lastdirent) {
66651- typeof(lastdirent->d_off) d_off = file->f_pos;
66652+ typeof(((struct linux_dirent64 *)0)->d_off) d_off = file->f_pos;
66653 if (__put_user_unaligned(d_off, &lastdirent->d_off))
66654 error = -EFAULT;
66655 else
66656@@ -1098,7 +1116,7 @@ static ssize_t compat_do_readv_writev(int type, struct file *file,
66657 * verify all the pointers
66658 */
66659 ret = -EINVAL;
66660- if ((nr_segs > UIO_MAXIOV) || (nr_segs <= 0))
66661+ if (nr_segs > UIO_MAXIOV)
66662 goto out;
66663 if (!file->f_op)
66664 goto out;
66665@@ -1463,11 +1481,35 @@ int compat_do_execve(char * filename,
66666 compat_uptr_t __user *envp,
66667 struct pt_regs * regs)
66668 {
66669+#ifdef CONFIG_GRKERNSEC
66670+ struct file *old_exec_file;
66671+ struct acl_subject_label *old_acl;
66672+ struct rlimit old_rlim[RLIM_NLIMITS];
66673+#endif
66674 struct linux_binprm *bprm;
66675 struct file *file;
66676 struct files_struct *displaced;
66677 bool clear_in_exec;
66678 int retval;
66679+ const struct cred *cred = current_cred();
66680+
66681+ /*
66682+ * We move the actual failure in case of RLIMIT_NPROC excess from
66683+ * set*uid() to execve() because too many poorly written programs
66684+ * don't check setuid() return code. Here we additionally recheck
66685+ * whether NPROC limit is still exceeded.
66686+ */
66687+ gr_learn_resource(current, RLIMIT_NPROC, atomic_read(&current->cred->user->processes), 1);
66688+
66689+ if ((current->flags & PF_NPROC_EXCEEDED) &&
66690+ atomic_read(&cred->user->processes) > current->signal->rlim[RLIMIT_NPROC].rlim_cur) {
66691+ retval = -EAGAIN;
66692+ goto out_ret;
66693+ }
66694+
66695+ /* We're below the limit (still or again), so we don't want to make
66696+ * further execve() calls fail. */
66697+ current->flags &= ~PF_NPROC_EXCEEDED;
66698
66699 retval = unshare_files(&displaced);
66700 if (retval)
66701@@ -1493,12 +1535,26 @@ int compat_do_execve(char * filename,
66702 if (IS_ERR(file))
66703 goto out_unmark;
66704
66705+ if (gr_ptrace_readexec(file, bprm->unsafe)) {
66706+ retval = -EPERM;
66707+ goto out_file;
66708+ }
66709+
66710 sched_exec();
66711
66712 bprm->file = file;
66713 bprm->filename = filename;
66714 bprm->interp = filename;
66715
66716+ if (gr_process_user_ban()) {
66717+ retval = -EPERM;
66718+ goto out_file;
66719+ }
66720+
66721+ retval = -EACCES;
66722+ if (!gr_acl_handle_execve(file->f_dentry, file->f_vfsmnt))
66723+ goto out_file;
66724+
66725 retval = bprm_mm_init(bprm);
66726 if (retval)
66727 goto out_file;
66728@@ -1515,24 +1571,63 @@ int compat_do_execve(char * filename,
66729 if (retval < 0)
66730 goto out;
66731
66732+#ifdef CONFIG_GRKERNSEC
66733+ old_acl = current->acl;
66734+ memcpy(old_rlim, current->signal->rlim, sizeof(old_rlim));
66735+ old_exec_file = current->exec_file;
66736+ get_file(file);
66737+ current->exec_file = file;
66738+#endif
66739+#ifdef CONFIG_GRKERNSEC_PROC_MEMMAP
66740+ /* limit suid stack to 8MB
66741+ we saved the old limits above and will restore them if this exec fails
66742+ */
66743+ if ((bprm->cred->euid != current_euid()) || (bprm->cred->egid != current_egid()))
66744+ current->signal->rlim[RLIMIT_STACK].rlim_cur = 8 * 1024 * 1024;
66745+#endif
66746+
66747+ if (!gr_tpe_allow(file)) {
66748+ retval = -EACCES;
66749+ goto out_fail;
66750+ }
66751+
66752+ if (gr_check_crash_exec(file)) {
66753+ retval = -EACCES;
66754+ goto out_fail;
66755+ }
66756+
66757+ retval = gr_set_proc_label(file->f_dentry, file->f_vfsmnt,
66758+ bprm->unsafe);
66759+ if (retval < 0)
66760+ goto out_fail;
66761+
66762 retval = copy_strings_kernel(1, &bprm->filename, bprm);
66763 if (retval < 0)
66764- goto out;
66765+ goto out_fail;
66766
66767 bprm->exec = bprm->p;
66768 retval = compat_copy_strings(bprm->envc, envp, bprm);
66769 if (retval < 0)
66770- goto out;
66771+ goto out_fail;
66772
66773 retval = compat_copy_strings(bprm->argc, argv, bprm);
66774 if (retval < 0)
66775- goto out;
66776+ goto out_fail;
66777+
66778+ gr_log_chroot_exec(file->f_dentry, file->f_vfsmnt);
66779+
66780+ gr_handle_exec_args_compat(bprm, argv);
66781
66782 retval = search_binary_handler(bprm, regs);
66783 if (retval < 0)
66784- goto out;
66785+ goto out_fail;
66786+#ifdef CONFIG_GRKERNSEC
66787+ if (old_exec_file)
66788+ fput(old_exec_file);
66789+#endif
66790
66791 /* execve succeeded */
66792+ increment_exec_counter();
66793 current->fs->in_exec = 0;
66794 current->in_execve = 0;
66795 acct_update_integrals(current);
66796@@ -1541,6 +1636,14 @@ int compat_do_execve(char * filename,
66797 put_files_struct(displaced);
66798 return retval;
66799
66800+out_fail:
66801+#ifdef CONFIG_GRKERNSEC
66802+ current->acl = old_acl;
66803+ memcpy(current->signal->rlim, old_rlim, sizeof(old_rlim));
66804+ fput(current->exec_file);
66805+ current->exec_file = old_exec_file;
66806+#endif
66807+
66808 out:
66809 if (bprm->mm) {
66810 acct_arg_size(bprm, 0);
66811@@ -1711,6 +1814,8 @@ int compat_core_sys_select(int n, compat_ulong_t __user *inp,
66812 struct fdtable *fdt;
66813 long stack_fds[SELECT_STACK_ALLOC/sizeof(long)];
66814
66815+ pax_track_stack();
66816+
66817 if (n < 0)
66818 goto out_nofds;
66819
66820@@ -2151,7 +2256,7 @@ asmlinkage long compat_sys_nfsservctl(int cmd,
66821 oldfs = get_fs();
66822 set_fs(KERNEL_DS);
66823 /* The __user pointer casts are valid because of the set_fs() */
66824- err = sys_nfsservctl(cmd, (void __user *) karg, (void __user *) kres);
66825+ err = sys_nfsservctl(cmd, (void __force_user *) karg, (void __force_user *) kres);
66826 set_fs(oldfs);
66827
66828 if (err)
66829diff --git a/fs/compat_binfmt_elf.c b/fs/compat_binfmt_elf.c
66830index 0adced2..bbb1b0d 100644
66831--- a/fs/compat_binfmt_elf.c
66832+++ b/fs/compat_binfmt_elf.c
66833@@ -29,10 +29,12 @@
66834 #undef elfhdr
66835 #undef elf_phdr
66836 #undef elf_note
66837+#undef elf_dyn
66838 #undef elf_addr_t
66839 #define elfhdr elf32_hdr
66840 #define elf_phdr elf32_phdr
66841 #define elf_note elf32_note
66842+#define elf_dyn Elf32_Dyn
66843 #define elf_addr_t Elf32_Addr
66844
66845 /*
66846diff --git a/fs/compat_ioctl.c b/fs/compat_ioctl.c
66847index d84e705..d8c364c 100644
66848--- a/fs/compat_ioctl.c
66849+++ b/fs/compat_ioctl.c
66850@@ -234,6 +234,8 @@ static int do_video_set_spu_palette(unsigned int fd, unsigned int cmd, unsigned
66851 up = (struct compat_video_spu_palette __user *) arg;
66852 err = get_user(palp, &up->palette);
66853 err |= get_user(length, &up->length);
66854+ if (err)
66855+ return -EFAULT;
66856
66857 up_native = compat_alloc_user_space(sizeof(struct video_spu_palette));
66858 err = put_user(compat_ptr(palp), &up_native->palette);
66859@@ -1513,7 +1515,7 @@ static int serial_struct_ioctl(unsigned fd, unsigned cmd, unsigned long arg)
66860 return -EFAULT;
66861 if (__get_user(udata, &ss32->iomem_base))
66862 return -EFAULT;
66863- ss.iomem_base = compat_ptr(udata);
66864+ ss.iomem_base = (unsigned char __force_kernel *)compat_ptr(udata);
66865 if (__get_user(ss.iomem_reg_shift, &ss32->iomem_reg_shift) ||
66866 __get_user(ss.port_high, &ss32->port_high))
66867 return -EFAULT;
66868@@ -1809,7 +1811,7 @@ static int compat_ioctl_preallocate(struct file *file, unsigned long arg)
66869 copy_in_user(&p->l_len, &p32->l_len, sizeof(s64)) ||
66870 copy_in_user(&p->l_sysid, &p32->l_sysid, sizeof(s32)) ||
66871 copy_in_user(&p->l_pid, &p32->l_pid, sizeof(u32)) ||
66872- copy_in_user(&p->l_pad, &p32->l_pad, 4*sizeof(u32)))
66873+ copy_in_user(p->l_pad, &p32->l_pad, 4*sizeof(u32)))
66874 return -EFAULT;
66875
66876 return ioctl_preallocate(file, p);
66877diff --git a/fs/configfs/dir.c b/fs/configfs/dir.c
66878index 8e48b52..f01ed91 100644
66879--- a/fs/configfs/dir.c
66880+++ b/fs/configfs/dir.c
66881@@ -1572,7 +1572,8 @@ static int configfs_readdir(struct file * filp, void * dirent, filldir_t filldir
66882 }
66883 for (p=q->next; p!= &parent_sd->s_children; p=p->next) {
66884 struct configfs_dirent *next;
66885- const char * name;
66886+ const unsigned char * name;
66887+ char d_name[sizeof(next->s_dentry->d_iname)];
66888 int len;
66889
66890 next = list_entry(p, struct configfs_dirent,
66891@@ -1581,7 +1582,12 @@ static int configfs_readdir(struct file * filp, void * dirent, filldir_t filldir
66892 continue;
66893
66894 name = configfs_get_name(next);
66895- len = strlen(name);
66896+ if (next->s_dentry && name == next->s_dentry->d_iname) {
66897+ len = next->s_dentry->d_name.len;
66898+ memcpy(d_name, name, len);
66899+ name = d_name;
66900+ } else
66901+ len = strlen(name);
66902 if (next->s_dentry)
66903 ino = next->s_dentry->d_inode->i_ino;
66904 else
66905diff --git a/fs/dcache.c b/fs/dcache.c
66906index 44c0aea..2529092 100644
66907--- a/fs/dcache.c
66908+++ b/fs/dcache.c
66909@@ -45,8 +45,6 @@ EXPORT_SYMBOL(dcache_lock);
66910
66911 static struct kmem_cache *dentry_cache __read_mostly;
66912
66913-#define DNAME_INLINE_LEN (sizeof(struct dentry)-offsetof(struct dentry,d_iname))
66914-
66915 /*
66916 * This is the single most critical data structure when it comes
66917 * to the dcache: the hashtable for lookups. Somebody should try
66918@@ -2319,7 +2317,7 @@ void __init vfs_caches_init(unsigned long mempages)
66919 mempages -= reserve;
66920
66921 names_cachep = kmem_cache_create("names_cache", PATH_MAX, 0,
66922- SLAB_HWCACHE_ALIGN|SLAB_PANIC, NULL);
66923+ SLAB_HWCACHE_ALIGN|SLAB_PANIC|SLAB_USERCOPY, NULL);
66924
66925 dcache_init();
66926 inode_init();
66927diff --git a/fs/debugfs/inode.c b/fs/debugfs/inode.c
66928index 39c6ee8..dcee0f1 100644
66929--- a/fs/debugfs/inode.c
66930+++ b/fs/debugfs/inode.c
66931@@ -269,7 +269,11 @@ EXPORT_SYMBOL_GPL(debugfs_create_file);
66932 struct dentry *debugfs_create_dir(const char *name, struct dentry *parent)
66933 {
66934 return debugfs_create_file(name,
66935+#ifdef CONFIG_GRKERNSEC_SYSFS_RESTRICT
66936+ S_IFDIR | S_IRWXU,
66937+#else
66938 S_IFDIR | S_IRWXU | S_IRUGO | S_IXUGO,
66939+#endif
66940 parent, NULL, NULL);
66941 }
66942 EXPORT_SYMBOL_GPL(debugfs_create_dir);
66943diff --git a/fs/dlm/lockspace.c b/fs/dlm/lockspace.c
66944index c010ecf..a8d8c59 100644
66945--- a/fs/dlm/lockspace.c
66946+++ b/fs/dlm/lockspace.c
66947@@ -148,7 +148,7 @@ static void lockspace_kobj_release(struct kobject *k)
66948 kfree(ls);
66949 }
66950
66951-static struct sysfs_ops dlm_attr_ops = {
66952+static const struct sysfs_ops dlm_attr_ops = {
66953 .show = dlm_attr_show,
66954 .store = dlm_attr_store,
66955 };
66956diff --git a/fs/ecryptfs/crypto.c b/fs/ecryptfs/crypto.c
66957index 7e164bb..62fa913 100644
66958--- a/fs/ecryptfs/crypto.c
66959+++ b/fs/ecryptfs/crypto.c
66960@@ -418,17 +418,6 @@ static int ecryptfs_encrypt_extent(struct page *enc_extent_page,
66961 rc);
66962 goto out;
66963 }
66964- if (unlikely(ecryptfs_verbosity > 0)) {
66965- ecryptfs_printk(KERN_DEBUG, "Encrypting extent "
66966- "with iv:\n");
66967- ecryptfs_dump_hex(extent_iv, crypt_stat->iv_bytes);
66968- ecryptfs_printk(KERN_DEBUG, "First 8 bytes before "
66969- "encryption:\n");
66970- ecryptfs_dump_hex((char *)
66971- (page_address(page)
66972- + (extent_offset * crypt_stat->extent_size)),
66973- 8);
66974- }
66975 rc = ecryptfs_encrypt_page_offset(crypt_stat, enc_extent_page, 0,
66976 page, (extent_offset
66977 * crypt_stat->extent_size),
66978@@ -441,14 +430,6 @@ static int ecryptfs_encrypt_extent(struct page *enc_extent_page,
66979 goto out;
66980 }
66981 rc = 0;
66982- if (unlikely(ecryptfs_verbosity > 0)) {
66983- ecryptfs_printk(KERN_DEBUG, "Encrypt extent [0x%.16x]; "
66984- "rc = [%d]\n", (extent_base + extent_offset),
66985- rc);
66986- ecryptfs_printk(KERN_DEBUG, "First 8 bytes after "
66987- "encryption:\n");
66988- ecryptfs_dump_hex((char *)(page_address(enc_extent_page)), 8);
66989- }
66990 out:
66991 return rc;
66992 }
66993@@ -545,17 +526,6 @@ static int ecryptfs_decrypt_extent(struct page *page,
66994 rc);
66995 goto out;
66996 }
66997- if (unlikely(ecryptfs_verbosity > 0)) {
66998- ecryptfs_printk(KERN_DEBUG, "Decrypting extent "
66999- "with iv:\n");
67000- ecryptfs_dump_hex(extent_iv, crypt_stat->iv_bytes);
67001- ecryptfs_printk(KERN_DEBUG, "First 8 bytes before "
67002- "decryption:\n");
67003- ecryptfs_dump_hex((char *)
67004- (page_address(enc_extent_page)
67005- + (extent_offset * crypt_stat->extent_size)),
67006- 8);
67007- }
67008 rc = ecryptfs_decrypt_page_offset(crypt_stat, page,
67009 (extent_offset
67010 * crypt_stat->extent_size),
67011@@ -569,16 +539,6 @@ static int ecryptfs_decrypt_extent(struct page *page,
67012 goto out;
67013 }
67014 rc = 0;
67015- if (unlikely(ecryptfs_verbosity > 0)) {
67016- ecryptfs_printk(KERN_DEBUG, "Decrypt extent [0x%.16x]; "
67017- "rc = [%d]\n", (extent_base + extent_offset),
67018- rc);
67019- ecryptfs_printk(KERN_DEBUG, "First 8 bytes after "
67020- "decryption:\n");
67021- ecryptfs_dump_hex((char *)(page_address(page)
67022- + (extent_offset
67023- * crypt_stat->extent_size)), 8);
67024- }
67025 out:
67026 return rc;
67027 }
67028diff --git a/fs/ecryptfs/file.c b/fs/ecryptfs/file.c
67029index 502b09f..49129f4 100644
67030--- a/fs/ecryptfs/file.c
67031+++ b/fs/ecryptfs/file.c
67032@@ -348,7 +348,6 @@ const struct file_operations ecryptfs_main_fops = {
67033 #ifdef CONFIG_COMPAT
67034 .compat_ioctl = ecryptfs_compat_ioctl,
67035 #endif
67036- .mmap = generic_file_mmap,
67037 .open = ecryptfs_open,
67038 .flush = ecryptfs_flush,
67039 .release = ecryptfs_release,
67040diff --git a/fs/ecryptfs/inode.c b/fs/ecryptfs/inode.c
67041index 90a6087..fa05803 100644
67042--- a/fs/ecryptfs/inode.c
67043+++ b/fs/ecryptfs/inode.c
67044@@ -647,7 +647,7 @@ static int ecryptfs_readlink_lower(struct dentry *dentry, char **buf,
67045 old_fs = get_fs();
67046 set_fs(get_ds());
67047 rc = lower_dentry->d_inode->i_op->readlink(lower_dentry,
67048- (char __user *)lower_buf,
67049+ (char __force_user *)lower_buf,
67050 lower_bufsiz);
67051 set_fs(old_fs);
67052 if (rc < 0)
67053@@ -693,7 +693,7 @@ static void *ecryptfs_follow_link(struct dentry *dentry, struct nameidata *nd)
67054 }
67055 old_fs = get_fs();
67056 set_fs(get_ds());
67057- rc = dentry->d_inode->i_op->readlink(dentry, (char __user *)buf, len);
67058+ rc = dentry->d_inode->i_op->readlink(dentry, (__force char __user *)buf, len);
67059 set_fs(old_fs);
67060 if (rc < 0)
67061 goto out_free;
67062diff --git a/fs/exec.c b/fs/exec.c
67063index 86fafc6..6a109b9 100644
67064--- a/fs/exec.c
67065+++ b/fs/exec.c
67066@@ -56,12 +56,28 @@
67067 #include <linux/fsnotify.h>
67068 #include <linux/fs_struct.h>
67069 #include <linux/pipe_fs_i.h>
67070+#include <linux/random.h>
67071+#include <linux/seq_file.h>
67072+
67073+#ifdef CONFIG_PAX_REFCOUNT
67074+#include <linux/kallsyms.h>
67075+#include <linux/kdebug.h>
67076+#endif
67077
67078 #include <asm/uaccess.h>
67079 #include <asm/mmu_context.h>
67080 #include <asm/tlb.h>
67081 #include "internal.h"
67082
67083+#ifndef CONFIG_PAX_HAVE_ACL_FLAGS
67084+void __weak pax_set_initial_flags(struct linux_binprm *bprm) {}
67085+#endif
67086+
67087+#ifdef CONFIG_PAX_HOOK_ACL_FLAGS
67088+void (*pax_set_initial_flags_func)(struct linux_binprm *bprm);
67089+EXPORT_SYMBOL(pax_set_initial_flags_func);
67090+#endif
67091+
67092 int core_uses_pid;
67093 char core_pattern[CORENAME_MAX_SIZE] = "core";
67094 unsigned int core_pipe_limit;
67095@@ -178,18 +194,10 @@ struct page *get_arg_page(struct linux_binprm *bprm, unsigned long pos,
67096 int write)
67097 {
67098 struct page *page;
67099- int ret;
67100
67101-#ifdef CONFIG_STACK_GROWSUP
67102- if (write) {
67103- ret = expand_stack_downwards(bprm->vma, pos);
67104- if (ret < 0)
67105- return NULL;
67106- }
67107-#endif
67108- ret = get_user_pages(current, bprm->mm, pos,
67109- 1, write, 1, &page, NULL);
67110- if (ret <= 0)
67111+ if (0 > expand_stack_downwards(bprm->vma, pos))
67112+ return NULL;
67113+ if (0 >= get_user_pages(current, bprm->mm, pos, 1, write, 1, &page, NULL))
67114 return NULL;
67115
67116 if (write) {
67117@@ -205,6 +213,17 @@ struct page *get_arg_page(struct linux_binprm *bprm, unsigned long pos,
67118 if (size <= ARG_MAX)
67119 return page;
67120
67121+#ifdef CONFIG_GRKERNSEC_PROC_MEMMAP
67122+ // only allow 512KB for argv+env on suid/sgid binaries
67123+ // to prevent easy ASLR exhaustion
67124+ if (((bprm->cred->euid != current_euid()) ||
67125+ (bprm->cred->egid != current_egid())) &&
67126+ (size > (512 * 1024))) {
67127+ put_page(page);
67128+ return NULL;
67129+ }
67130+#endif
67131+
67132 /*
67133 * Limit to 1/4-th the stack size for the argv+env strings.
67134 * This ensures that:
67135@@ -263,6 +282,11 @@ static int __bprm_mm_init(struct linux_binprm *bprm)
67136 vma->vm_end = STACK_TOP_MAX;
67137 vma->vm_start = vma->vm_end - PAGE_SIZE;
67138 vma->vm_flags = VM_STACK_FLAGS;
67139+
67140+#ifdef CONFIG_PAX_SEGMEXEC
67141+ vma->vm_flags &= ~(VM_EXEC | VM_MAYEXEC);
67142+#endif
67143+
67144 vma->vm_page_prot = vm_get_page_prot(vma->vm_flags);
67145
67146 err = security_file_mmap(NULL, 0, 0, 0, vma->vm_start, 1);
67147@@ -276,6 +300,12 @@ static int __bprm_mm_init(struct linux_binprm *bprm)
67148 mm->stack_vm = mm->total_vm = 1;
67149 up_write(&mm->mmap_sem);
67150 bprm->p = vma->vm_end - sizeof(void *);
67151+
67152+#ifdef CONFIG_PAX_RANDUSTACK
67153+ if (randomize_va_space)
67154+ bprm->p ^= random32() & ~PAGE_MASK;
67155+#endif
67156+
67157 return 0;
67158 err:
67159 up_write(&mm->mmap_sem);
67160@@ -510,7 +540,7 @@ int copy_strings_kernel(int argc,char ** argv, struct linux_binprm *bprm)
67161 int r;
67162 mm_segment_t oldfs = get_fs();
67163 set_fs(KERNEL_DS);
67164- r = copy_strings(argc, (char __user * __user *)argv, bprm);
67165+ r = copy_strings(argc, (__force char __user * __user *)argv, bprm);
67166 set_fs(oldfs);
67167 return r;
67168 }
67169@@ -540,7 +570,8 @@ static int shift_arg_pages(struct vm_area_struct *vma, unsigned long shift)
67170 unsigned long new_end = old_end - shift;
67171 struct mmu_gather *tlb;
67172
67173- BUG_ON(new_start > new_end);
67174+ if (new_start >= new_end || new_start < mmap_min_addr)
67175+ return -ENOMEM;
67176
67177 /*
67178 * ensure there are no vmas between where we want to go
67179@@ -549,6 +580,10 @@ static int shift_arg_pages(struct vm_area_struct *vma, unsigned long shift)
67180 if (vma != find_vma(mm, new_start))
67181 return -EFAULT;
67182
67183+#ifdef CONFIG_PAX_SEGMEXEC
67184+ BUG_ON(pax_find_mirror_vma(vma));
67185+#endif
67186+
67187 /*
67188 * cover the whole range: [new_start, old_end)
67189 */
67190@@ -630,10 +665,6 @@ int setup_arg_pages(struct linux_binprm *bprm,
67191 stack_top = arch_align_stack(stack_top);
67192 stack_top = PAGE_ALIGN(stack_top);
67193
67194- if (unlikely(stack_top < mmap_min_addr) ||
67195- unlikely(vma->vm_end - vma->vm_start >= stack_top - mmap_min_addr))
67196- return -ENOMEM;
67197-
67198 stack_shift = vma->vm_end - stack_top;
67199
67200 bprm->p -= stack_shift;
67201@@ -645,6 +676,14 @@ int setup_arg_pages(struct linux_binprm *bprm,
67202 bprm->exec -= stack_shift;
67203
67204 down_write(&mm->mmap_sem);
67205+
67206+ /* Move stack pages down in memory. */
67207+ if (stack_shift) {
67208+ ret = shift_arg_pages(vma, stack_shift);
67209+ if (ret)
67210+ goto out_unlock;
67211+ }
67212+
67213 vm_flags = VM_STACK_FLAGS;
67214
67215 /*
67216@@ -658,19 +697,24 @@ int setup_arg_pages(struct linux_binprm *bprm,
67217 vm_flags &= ~VM_EXEC;
67218 vm_flags |= mm->def_flags;
67219
67220+#if defined(CONFIG_PAX_PAGEEXEC) || defined(CONFIG_PAX_SEGMEXEC)
67221+ if (mm->pax_flags & (MF_PAX_PAGEEXEC | MF_PAX_SEGMEXEC)) {
67222+ vm_flags &= ~VM_EXEC;
67223+
67224+#ifdef CONFIG_PAX_MPROTECT
67225+ if (mm->pax_flags & MF_PAX_MPROTECT)
67226+ vm_flags &= ~VM_MAYEXEC;
67227+#endif
67228+
67229+ }
67230+#endif
67231+
67232 ret = mprotect_fixup(vma, &prev, vma->vm_start, vma->vm_end,
67233 vm_flags);
67234 if (ret)
67235 goto out_unlock;
67236 BUG_ON(prev != vma);
67237
67238- /* Move stack pages down in memory. */
67239- if (stack_shift) {
67240- ret = shift_arg_pages(vma, stack_shift);
67241- if (ret)
67242- goto out_unlock;
67243- }
67244-
67245 stack_expand = EXTRA_STACK_VM_PAGES * PAGE_SIZE;
67246 stack_size = vma->vm_end - vma->vm_start;
67247 /*
67248@@ -744,7 +788,7 @@ int kernel_read(struct file *file, loff_t offset,
67249 old_fs = get_fs();
67250 set_fs(get_ds());
67251 /* The cast to a user pointer is valid due to the set_fs() */
67252- result = vfs_read(file, (void __user *)addr, count, &pos);
67253+ result = vfs_read(file, (void __force_user *)addr, count, &pos);
67254 set_fs(old_fs);
67255 return result;
67256 }
67257@@ -985,6 +1029,21 @@ void set_task_comm(struct task_struct *tsk, char *buf)
67258 perf_event_comm(tsk);
67259 }
67260
67261+static void filename_to_taskname(char *tcomm, const char *fn, unsigned int len)
67262+{
67263+ int i, ch;
67264+
67265+ /* Copies the binary name from after last slash */
67266+ for (i = 0; (ch = *(fn++)) != '\0';) {
67267+ if (ch == '/')
67268+ i = 0; /* overwrite what we wrote */
67269+ else
67270+ if (i < len - 1)
67271+ tcomm[i++] = ch;
67272+ }
67273+ tcomm[i] = '\0';
67274+}
67275+
67276 int flush_old_exec(struct linux_binprm * bprm)
67277 {
67278 int retval;
67279@@ -999,6 +1058,7 @@ int flush_old_exec(struct linux_binprm * bprm)
67280
67281 set_mm_exe_file(bprm->mm, bprm->file);
67282
67283+ filename_to_taskname(bprm->tcomm, bprm->filename, sizeof(bprm->tcomm));
67284 /*
67285 * Release all of the old mmap stuff
67286 */
67287@@ -1023,10 +1083,6 @@ EXPORT_SYMBOL(flush_old_exec);
67288
67289 void setup_new_exec(struct linux_binprm * bprm)
67290 {
67291- int i, ch;
67292- char * name;
67293- char tcomm[sizeof(current->comm)];
67294-
67295 arch_pick_mmap_layout(current->mm);
67296
67297 /* This is the point of no return */
67298@@ -1037,18 +1093,7 @@ void setup_new_exec(struct linux_binprm * bprm)
67299 else
67300 set_dumpable(current->mm, suid_dumpable);
67301
67302- name = bprm->filename;
67303-
67304- /* Copies the binary name from after last slash */
67305- for (i=0; (ch = *(name++)) != '\0';) {
67306- if (ch == '/')
67307- i = 0; /* overwrite what we wrote */
67308- else
67309- if (i < (sizeof(tcomm) - 1))
67310- tcomm[i++] = ch;
67311- }
67312- tcomm[i] = '\0';
67313- set_task_comm(current, tcomm);
67314+ set_task_comm(current, bprm->tcomm);
67315
67316 /* Set the new mm task size. We have to do that late because it may
67317 * depend on TIF_32BIT which is only updated in flush_thread() on
67318@@ -1152,7 +1197,7 @@ int check_unsafe_exec(struct linux_binprm *bprm)
67319 }
67320 rcu_read_unlock();
67321
67322- if (p->fs->users > n_fs) {
67323+ if (atomic_read(&p->fs->users) > n_fs) {
67324 bprm->unsafe |= LSM_UNSAFE_SHARE;
67325 } else {
67326 res = -EAGAIN;
67327@@ -1339,6 +1384,21 @@ int search_binary_handler(struct linux_binprm *bprm,struct pt_regs *regs)
67328
67329 EXPORT_SYMBOL(search_binary_handler);
67330
67331+#ifdef CONFIG_GRKERNSEC_PROC_MEMMAP
67332+DEFINE_PER_CPU(u64, exec_counter);
67333+static int __init init_exec_counters(void)
67334+{
67335+ unsigned int cpu;
67336+
67337+ for_each_possible_cpu(cpu) {
67338+ per_cpu(exec_counter, cpu) = (u64)cpu;
67339+ }
67340+
67341+ return 0;
67342+}
67343+early_initcall(init_exec_counters);
67344+#endif
67345+
67346 /*
67347 * sys_execve() executes a new program.
67348 */
67349@@ -1347,11 +1407,35 @@ int do_execve(char * filename,
67350 char __user *__user *envp,
67351 struct pt_regs * regs)
67352 {
67353+#ifdef CONFIG_GRKERNSEC
67354+ struct file *old_exec_file;
67355+ struct acl_subject_label *old_acl;
67356+ struct rlimit old_rlim[RLIM_NLIMITS];
67357+#endif
67358 struct linux_binprm *bprm;
67359 struct file *file;
67360 struct files_struct *displaced;
67361 bool clear_in_exec;
67362 int retval;
67363+ const struct cred *cred = current_cred();
67364+
67365+ /*
67366+ * We move the actual failure in case of RLIMIT_NPROC excess from
67367+ * set*uid() to execve() because too many poorly written programs
67368+ * don't check setuid() return code. Here we additionally recheck
67369+ * whether NPROC limit is still exceeded.
67370+ */
67371+ gr_learn_resource(current, RLIMIT_NPROC, atomic_read(&current->cred->user->processes), 1);
67372+
67373+ if ((current->flags & PF_NPROC_EXCEEDED) &&
67374+ atomic_read(&cred->user->processes) > current->signal->rlim[RLIMIT_NPROC].rlim_cur) {
67375+ retval = -EAGAIN;
67376+ goto out_ret;
67377+ }
67378+
67379+ /* We're below the limit (still or again), so we don't want to make
67380+ * further execve() calls fail. */
67381+ current->flags &= ~PF_NPROC_EXCEEDED;
67382
67383 retval = unshare_files(&displaced);
67384 if (retval)
67385@@ -1377,12 +1461,27 @@ int do_execve(char * filename,
67386 if (IS_ERR(file))
67387 goto out_unmark;
67388
67389+ if (gr_ptrace_readexec(file, bprm->unsafe)) {
67390+ retval = -EPERM;
67391+ goto out_file;
67392+ }
67393+
67394 sched_exec();
67395
67396 bprm->file = file;
67397 bprm->filename = filename;
67398 bprm->interp = filename;
67399
67400+ if (gr_process_user_ban()) {
67401+ retval = -EPERM;
67402+ goto out_file;
67403+ }
67404+
67405+ if (!gr_acl_handle_execve(file->f_dentry, file->f_vfsmnt)) {
67406+ retval = -EACCES;
67407+ goto out_file;
67408+ }
67409+
67410 retval = bprm_mm_init(bprm);
67411 if (retval)
67412 goto out_file;
67413@@ -1399,25 +1498,66 @@ int do_execve(char * filename,
67414 if (retval < 0)
67415 goto out;
67416
67417+#ifdef CONFIG_GRKERNSEC
67418+ old_acl = current->acl;
67419+ memcpy(old_rlim, current->signal->rlim, sizeof(old_rlim));
67420+ old_exec_file = current->exec_file;
67421+ get_file(file);
67422+ current->exec_file = file;
67423+#endif
67424+#ifdef CONFIG_GRKERNSEC_PROC_MEMMAP
67425+ /* limit suid stack to 8MB
67426+ we saved the old limits above and will restore them if this exec fails
67427+ */
67428+ if (((bprm->cred->euid != current_euid()) || (bprm->cred->egid != current_egid())) &&
67429+ (old_rlim[RLIMIT_STACK].rlim_cur > (8 * 1024 * 1024)))
67430+ current->signal->rlim[RLIMIT_STACK].rlim_cur = 8 * 1024 * 1024;
67431+#endif
67432+
67433+ if (!gr_tpe_allow(file)) {
67434+ retval = -EACCES;
67435+ goto out_fail;
67436+ }
67437+
67438+ if (gr_check_crash_exec(file)) {
67439+ retval = -EACCES;
67440+ goto out_fail;
67441+ }
67442+
67443+ retval = gr_set_proc_label(file->f_dentry, file->f_vfsmnt,
67444+ bprm->unsafe);
67445+ if (retval < 0)
67446+ goto out_fail;
67447+
67448 retval = copy_strings_kernel(1, &bprm->filename, bprm);
67449 if (retval < 0)
67450- goto out;
67451+ goto out_fail;
67452
67453 bprm->exec = bprm->p;
67454 retval = copy_strings(bprm->envc, envp, bprm);
67455 if (retval < 0)
67456- goto out;
67457+ goto out_fail;
67458
67459 retval = copy_strings(bprm->argc, argv, bprm);
67460 if (retval < 0)
67461- goto out;
67462+ goto out_fail;
67463+
67464+ gr_log_chroot_exec(file->f_dentry, file->f_vfsmnt);
67465+
67466+ gr_handle_exec_args(bprm, (const char __user *const __user *)argv);
67467
67468 current->flags &= ~PF_KTHREAD;
67469 retval = search_binary_handler(bprm,regs);
67470 if (retval < 0)
67471- goto out;
67472+ goto out_fail;
67473+#ifdef CONFIG_GRKERNSEC
67474+ if (old_exec_file)
67475+ fput(old_exec_file);
67476+#endif
67477
67478 /* execve succeeded */
67479+
67480+ increment_exec_counter();
67481 current->fs->in_exec = 0;
67482 current->in_execve = 0;
67483 acct_update_integrals(current);
67484@@ -1426,6 +1566,14 @@ int do_execve(char * filename,
67485 put_files_struct(displaced);
67486 return retval;
67487
67488+out_fail:
67489+#ifdef CONFIG_GRKERNSEC
67490+ current->acl = old_acl;
67491+ memcpy(current->signal->rlim, old_rlim, sizeof(old_rlim));
67492+ fput(current->exec_file);
67493+ current->exec_file = old_exec_file;
67494+#endif
67495+
67496 out:
67497 if (bprm->mm) {
67498 acct_arg_size(bprm, 0);
67499@@ -1591,6 +1739,229 @@ out:
67500 return ispipe;
67501 }
67502
67503+int pax_check_flags(unsigned long *flags)
67504+{
67505+ int retval = 0;
67506+
67507+#if !defined(CONFIG_X86_32) || !defined(CONFIG_PAX_SEGMEXEC)
67508+ if (*flags & MF_PAX_SEGMEXEC)
67509+ {
67510+ *flags &= ~MF_PAX_SEGMEXEC;
67511+ retval = -EINVAL;
67512+ }
67513+#endif
67514+
67515+ if ((*flags & MF_PAX_PAGEEXEC)
67516+
67517+#ifdef CONFIG_PAX_PAGEEXEC
67518+ && (*flags & MF_PAX_SEGMEXEC)
67519+#endif
67520+
67521+ )
67522+ {
67523+ *flags &= ~MF_PAX_PAGEEXEC;
67524+ retval = -EINVAL;
67525+ }
67526+
67527+ if ((*flags & MF_PAX_MPROTECT)
67528+
67529+#ifdef CONFIG_PAX_MPROTECT
67530+ && !(*flags & (MF_PAX_PAGEEXEC | MF_PAX_SEGMEXEC))
67531+#endif
67532+
67533+ )
67534+ {
67535+ *flags &= ~MF_PAX_MPROTECT;
67536+ retval = -EINVAL;
67537+ }
67538+
67539+ if ((*flags & MF_PAX_EMUTRAMP)
67540+
67541+#ifdef CONFIG_PAX_EMUTRAMP
67542+ && !(*flags & (MF_PAX_PAGEEXEC | MF_PAX_SEGMEXEC))
67543+#endif
67544+
67545+ )
67546+ {
67547+ *flags &= ~MF_PAX_EMUTRAMP;
67548+ retval = -EINVAL;
67549+ }
67550+
67551+ return retval;
67552+}
67553+
67554+EXPORT_SYMBOL(pax_check_flags);
67555+
67556+#if defined(CONFIG_PAX_PAGEEXEC) || defined(CONFIG_PAX_SEGMEXEC)
67557+void pax_report_fault(struct pt_regs *regs, void *pc, void *sp)
67558+{
67559+ struct task_struct *tsk = current;
67560+ struct mm_struct *mm = current->mm;
67561+ char *buffer_exec = (char *)__get_free_page(GFP_KERNEL);
67562+ char *buffer_fault = (char *)__get_free_page(GFP_KERNEL);
67563+ char *path_exec = NULL;
67564+ char *path_fault = NULL;
67565+ unsigned long start = 0UL, end = 0UL, offset = 0UL;
67566+
67567+ if (buffer_exec && buffer_fault) {
67568+ struct vm_area_struct *vma, *vma_exec = NULL, *vma_fault = NULL;
67569+
67570+ down_read(&mm->mmap_sem);
67571+ vma = mm->mmap;
67572+ while (vma && (!vma_exec || !vma_fault)) {
67573+ if ((vma->vm_flags & VM_EXECUTABLE) && vma->vm_file)
67574+ vma_exec = vma;
67575+ if (vma->vm_start <= (unsigned long)pc && (unsigned long)pc < vma->vm_end)
67576+ vma_fault = vma;
67577+ vma = vma->vm_next;
67578+ }
67579+ if (vma_exec) {
67580+ path_exec = d_path(&vma_exec->vm_file->f_path, buffer_exec, PAGE_SIZE);
67581+ if (IS_ERR(path_exec))
67582+ path_exec = "<path too long>";
67583+ else {
67584+ path_exec = mangle_path(buffer_exec, path_exec, "\t\n\\");
67585+ if (path_exec) {
67586+ *path_exec = 0;
67587+ path_exec = buffer_exec;
67588+ } else
67589+ path_exec = "<path too long>";
67590+ }
67591+ }
67592+ if (vma_fault) {
67593+ start = vma_fault->vm_start;
67594+ end = vma_fault->vm_end;
67595+ offset = vma_fault->vm_pgoff << PAGE_SHIFT;
67596+ if (vma_fault->vm_file) {
67597+ path_fault = d_path(&vma_fault->vm_file->f_path, buffer_fault, PAGE_SIZE);
67598+ if (IS_ERR(path_fault))
67599+ path_fault = "<path too long>";
67600+ else {
67601+ path_fault = mangle_path(buffer_fault, path_fault, "\t\n\\");
67602+ if (path_fault) {
67603+ *path_fault = 0;
67604+ path_fault = buffer_fault;
67605+ } else
67606+ path_fault = "<path too long>";
67607+ }
67608+ } else
67609+ path_fault = "<anonymous mapping>";
67610+ }
67611+ up_read(&mm->mmap_sem);
67612+ }
67613+ if (tsk->signal->curr_ip)
67614+ printk(KERN_ERR "PAX: From %pI4: execution attempt in: %s, %08lx-%08lx %08lx\n", &tsk->signal->curr_ip, path_fault, start, end, offset);
67615+ else
67616+ printk(KERN_ERR "PAX: execution attempt in: %s, %08lx-%08lx %08lx\n", path_fault, start, end, offset);
67617+ printk(KERN_ERR "PAX: terminating task: %s(%s):%d, uid/euid: %u/%u, "
67618+ "PC: %p, SP: %p\n", path_exec, tsk->comm, task_pid_nr(tsk),
67619+ task_uid(tsk), task_euid(tsk), pc, sp);
67620+ free_page((unsigned long)buffer_exec);
67621+ free_page((unsigned long)buffer_fault);
67622+ pax_report_insns(regs, pc, sp);
67623+ do_coredump(SIGKILL, SIGKILL, regs);
67624+}
67625+#endif
67626+
67627+#ifdef CONFIG_PAX_REFCOUNT
67628+void pax_report_refcount_overflow(struct pt_regs *regs)
67629+{
67630+ if (current->signal->curr_ip)
67631+ printk(KERN_ERR "PAX: From %pI4: refcount overflow detected in: %s:%d, uid/euid: %u/%u\n",
67632+ &current->signal->curr_ip, current->comm, task_pid_nr(current), current_uid(), current_euid());
67633+ else
67634+ printk(KERN_ERR "PAX: refcount overflow detected in: %s:%d, uid/euid: %u/%u\n",
67635+ current->comm, task_pid_nr(current), current_uid(), current_euid());
67636+ print_symbol(KERN_ERR "PAX: refcount overflow occured at: %s\n", instruction_pointer(regs));
67637+ show_regs(regs);
67638+ force_sig_specific(SIGKILL, current);
67639+}
67640+#endif
67641+
67642+#ifdef CONFIG_PAX_USERCOPY
67643+/* 0: not at all, 1: fully, 2: fully inside frame, -1: partially (implies an error) */
67644+int object_is_on_stack(const void *obj, unsigned long len)
67645+{
67646+ const void * const stack = task_stack_page(current);
67647+ const void * const stackend = stack + THREAD_SIZE;
67648+
67649+#if defined(CONFIG_FRAME_POINTER) && defined(CONFIG_X86)
67650+ const void *frame = NULL;
67651+ const void *oldframe;
67652+#endif
67653+
67654+ if (obj + len < obj)
67655+ return -1;
67656+
67657+ if (obj + len <= stack || stackend <= obj)
67658+ return 0;
67659+
67660+ if (obj < stack || stackend < obj + len)
67661+ return -1;
67662+
67663+#if defined(CONFIG_FRAME_POINTER) && defined(CONFIG_X86)
67664+ oldframe = __builtin_frame_address(1);
67665+ if (oldframe)
67666+ frame = __builtin_frame_address(2);
67667+ /*
67668+ low ----------------------------------------------> high
67669+ [saved bp][saved ip][args][local vars][saved bp][saved ip]
67670+ ^----------------^
67671+ allow copies only within here
67672+ */
67673+ while (stack <= frame && frame < stackend) {
67674+ /* if obj + len extends past the last frame, this
67675+ check won't pass and the next frame will be 0,
67676+ causing us to bail out and correctly report
67677+ the copy as invalid
67678+ */
67679+ if (obj + len <= frame)
67680+ return obj >= oldframe + 2 * sizeof(void *) ? 2 : -1;
67681+ oldframe = frame;
67682+ frame = *(const void * const *)frame;
67683+ }
67684+ return -1;
67685+#else
67686+ return 1;
67687+#endif
67688+}
67689+
67690+__noreturn void pax_report_usercopy(const void *ptr, unsigned long len, bool to, const char *type)
67691+{
67692+ if (current->signal->curr_ip)
67693+ printk(KERN_ERR "PAX: From %pI4: kernel memory %s attempt detected %s %p (%s) (%lu bytes)\n",
67694+ &current->signal->curr_ip, to ? "leak" : "overwrite", to ? "from" : "to", ptr, type ? : "unknown", len);
67695+ else
67696+ printk(KERN_ERR "PAX: kernel memory %s attempt detected %s %p (%s) (%lu bytes)\n",
67697+ to ? "leak" : "overwrite", to ? "from" : "to", ptr, type ? : "unknown", len);
67698+
67699+ dump_stack();
67700+ gr_handle_kernel_exploit();
67701+ do_group_exit(SIGKILL);
67702+}
67703+#endif
67704+
67705+#ifdef CONFIG_PAX_MEMORY_STACKLEAK
67706+void pax_track_stack(void)
67707+{
67708+ unsigned long sp = (unsigned long)&sp;
67709+ if (sp < current_thread_info()->lowest_stack &&
67710+ sp > (unsigned long)task_stack_page(current))
67711+ current_thread_info()->lowest_stack = sp;
67712+}
67713+EXPORT_SYMBOL(pax_track_stack);
67714+#endif
67715+
67716+#ifdef CONFIG_PAX_SIZE_OVERFLOW
67717+void report_size_overflow(const char *file, unsigned int line, const char *func)
67718+{
67719+ printk(KERN_ERR "PAX: size overflow detected in function %s %s:%u\n", func, file, line);
67720+ dump_stack();
67721+ do_group_exit(SIGKILL);
67722+}
67723+EXPORT_SYMBOL(report_size_overflow);
67724+#endif
67725+
67726 static int zap_process(struct task_struct *start)
67727 {
67728 struct task_struct *t;
67729@@ -1793,17 +2164,17 @@ static void wait_for_dump_helpers(struct file *file)
67730 pipe = file->f_path.dentry->d_inode->i_pipe;
67731
67732 pipe_lock(pipe);
67733- pipe->readers++;
67734- pipe->writers--;
67735+ atomic_inc(&pipe->readers);
67736+ atomic_dec(&pipe->writers);
67737
67738- while ((pipe->readers > 1) && (!signal_pending(current))) {
67739+ while ((atomic_read(&pipe->readers) > 1) && (!signal_pending(current))) {
67740 wake_up_interruptible_sync(&pipe->wait);
67741 kill_fasync(&pipe->fasync_readers, SIGIO, POLL_IN);
67742 pipe_wait(pipe);
67743 }
67744
67745- pipe->readers--;
67746- pipe->writers++;
67747+ atomic_dec(&pipe->readers);
67748+ atomic_inc(&pipe->writers);
67749 pipe_unlock(pipe);
67750
67751 }
67752@@ -1826,10 +2197,13 @@ void do_coredump(long signr, int exit_code, struct pt_regs *regs)
67753 char **helper_argv = NULL;
67754 int helper_argc = 0;
67755 int dump_count = 0;
67756- static atomic_t core_dump_count = ATOMIC_INIT(0);
67757+ static atomic_unchecked_t core_dump_count = ATOMIC_INIT(0);
67758
67759 audit_core_dumps(signr);
67760
67761+ if (signr == SIGSEGV || signr == SIGBUS || signr == SIGKILL || signr == SIGILL)
67762+ gr_handle_brute_attach(current, mm->flags);
67763+
67764 binfmt = mm->binfmt;
67765 if (!binfmt || !binfmt->core_dump)
67766 goto fail;
67767@@ -1874,6 +2248,8 @@ void do_coredump(long signr, int exit_code, struct pt_regs *regs)
67768 */
67769 clear_thread_flag(TIF_SIGPENDING);
67770
67771+ gr_learn_resource(current, RLIMIT_CORE, binfmt->min_coredump, 1);
67772+
67773 /*
67774 * lock_kernel() because format_corename() is controlled by sysctl, which
67775 * uses lock_kernel()
67776@@ -1908,7 +2284,7 @@ void do_coredump(long signr, int exit_code, struct pt_regs *regs)
67777 goto fail_unlock;
67778 }
67779
67780- dump_count = atomic_inc_return(&core_dump_count);
67781+ dump_count = atomic_inc_return_unchecked(&core_dump_count);
67782 if (core_pipe_limit && (core_pipe_limit < dump_count)) {
67783 printk(KERN_WARNING "Pid %d(%s) over core_pipe_limit\n",
67784 task_tgid_vnr(current), current->comm);
67785@@ -1972,7 +2348,7 @@ close_fail:
67786 filp_close(file, NULL);
67787 fail_dropcount:
67788 if (dump_count)
67789- atomic_dec(&core_dump_count);
67790+ atomic_dec_unchecked(&core_dump_count);
67791 fail_unlock:
67792 if (helper_argv)
67793 argv_free(helper_argv);
67794diff --git a/fs/ext2/balloc.c b/fs/ext2/balloc.c
67795index 7f8d2e5..a1abdbb 100644
67796--- a/fs/ext2/balloc.c
67797+++ b/fs/ext2/balloc.c
67798@@ -1192,7 +1192,7 @@ static int ext2_has_free_blocks(struct ext2_sb_info *sbi)
67799
67800 free_blocks = percpu_counter_read_positive(&sbi->s_freeblocks_counter);
67801 root_blocks = le32_to_cpu(sbi->s_es->s_r_blocks_count);
67802- if (free_blocks < root_blocks + 1 && !capable(CAP_SYS_RESOURCE) &&
67803+ if (free_blocks < root_blocks + 1 && !capable_nolog(CAP_SYS_RESOURCE) &&
67804 sbi->s_resuid != current_fsuid() &&
67805 (sbi->s_resgid == 0 || !in_group_p (sbi->s_resgid))) {
67806 return 0;
67807diff --git a/fs/ext3/balloc.c b/fs/ext3/balloc.c
67808index 27967f9..9f2a5fb 100644
67809--- a/fs/ext3/balloc.c
67810+++ b/fs/ext3/balloc.c
67811@@ -1421,7 +1421,7 @@ static int ext3_has_free_blocks(struct ext3_sb_info *sbi)
67812
67813 free_blocks = percpu_counter_read_positive(&sbi->s_freeblocks_counter);
67814 root_blocks = le32_to_cpu(sbi->s_es->s_r_blocks_count);
67815- if (free_blocks < root_blocks + 1 && !capable(CAP_SYS_RESOURCE) &&
67816+ if (free_blocks < root_blocks + 1 && !capable_nolog(CAP_SYS_RESOURCE) &&
67817 sbi->s_resuid != current_fsuid() &&
67818 (sbi->s_resgid == 0 || !in_group_p (sbi->s_resgid))) {
67819 return 0;
67820diff --git a/fs/ext4/balloc.c b/fs/ext4/balloc.c
67821index e85b63c..80398e6 100644
67822--- a/fs/ext4/balloc.c
67823+++ b/fs/ext4/balloc.c
67824@@ -570,7 +570,7 @@ int ext4_has_free_blocks(struct ext4_sb_info *sbi, s64 nblocks)
67825 /* Hm, nope. Are (enough) root reserved blocks available? */
67826 if (sbi->s_resuid == current_fsuid() ||
67827 ((sbi->s_resgid != 0) && in_group_p(sbi->s_resgid)) ||
67828- capable(CAP_SYS_RESOURCE)) {
67829+ capable_nolog(CAP_SYS_RESOURCE)) {
67830 if (free_blocks >= (nblocks + dirty_blocks))
67831 return 1;
67832 }
67833diff --git a/fs/ext4/ext4.h b/fs/ext4/ext4.h
67834index 67c46ed..1f237e5 100644
67835--- a/fs/ext4/ext4.h
67836+++ b/fs/ext4/ext4.h
67837@@ -1077,19 +1077,19 @@ struct ext4_sb_info {
67838
67839 /* stats for buddy allocator */
67840 spinlock_t s_mb_pa_lock;
67841- atomic_t s_bal_reqs; /* number of reqs with len > 1 */
67842- atomic_t s_bal_success; /* we found long enough chunks */
67843- atomic_t s_bal_allocated; /* in blocks */
67844- atomic_t s_bal_ex_scanned; /* total extents scanned */
67845- atomic_t s_bal_goals; /* goal hits */
67846- atomic_t s_bal_breaks; /* too long searches */
67847- atomic_t s_bal_2orders; /* 2^order hits */
67848+ atomic_unchecked_t s_bal_reqs; /* number of reqs with len > 1 */
67849+ atomic_unchecked_t s_bal_success; /* we found long enough chunks */
67850+ atomic_unchecked_t s_bal_allocated; /* in blocks */
67851+ atomic_unchecked_t s_bal_ex_scanned; /* total extents scanned */
67852+ atomic_unchecked_t s_bal_goals; /* goal hits */
67853+ atomic_unchecked_t s_bal_breaks; /* too long searches */
67854+ atomic_unchecked_t s_bal_2orders; /* 2^order hits */
67855 spinlock_t s_bal_lock;
67856 unsigned long s_mb_buddies_generated;
67857 unsigned long long s_mb_generation_time;
67858- atomic_t s_mb_lost_chunks;
67859- atomic_t s_mb_preallocated;
67860- atomic_t s_mb_discarded;
67861+ atomic_unchecked_t s_mb_lost_chunks;
67862+ atomic_unchecked_t s_mb_preallocated;
67863+ atomic_unchecked_t s_mb_discarded;
67864 atomic_t s_lock_busy;
67865
67866 /* locality groups */
67867diff --git a/fs/ext4/file.c b/fs/ext4/file.c
67868index 2a60541..7439d61 100644
67869--- a/fs/ext4/file.c
67870+++ b/fs/ext4/file.c
67871@@ -122,8 +122,8 @@ static int ext4_file_open(struct inode * inode, struct file * filp)
67872 cp = d_path(&path, buf, sizeof(buf));
67873 path_put(&path);
67874 if (!IS_ERR(cp)) {
67875- memcpy(sbi->s_es->s_last_mounted, cp,
67876- sizeof(sbi->s_es->s_last_mounted));
67877+ strlcpy(sbi->s_es->s_last_mounted, cp,
67878+ sizeof(sbi->s_es->s_last_mounted));
67879 sb->s_dirt = 1;
67880 }
67881 }
67882diff --git a/fs/ext4/mballoc.c b/fs/ext4/mballoc.c
67883index 42bac1b..0aab9d8 100644
67884--- a/fs/ext4/mballoc.c
67885+++ b/fs/ext4/mballoc.c
67886@@ -1755,7 +1755,7 @@ void ext4_mb_simple_scan_group(struct ext4_allocation_context *ac,
67887 BUG_ON(ac->ac_b_ex.fe_len != ac->ac_g_ex.fe_len);
67888
67889 if (EXT4_SB(sb)->s_mb_stats)
67890- atomic_inc(&EXT4_SB(sb)->s_bal_2orders);
67891+ atomic_inc_unchecked(&EXT4_SB(sb)->s_bal_2orders);
67892
67893 break;
67894 }
67895@@ -2131,7 +2131,7 @@ repeat:
67896 ac->ac_status = AC_STATUS_CONTINUE;
67897 ac->ac_flags |= EXT4_MB_HINT_FIRST;
67898 cr = 3;
67899- atomic_inc(&sbi->s_mb_lost_chunks);
67900+ atomic_inc_unchecked(&sbi->s_mb_lost_chunks);
67901 goto repeat;
67902 }
67903 }
67904@@ -2174,6 +2174,8 @@ static int ext4_mb_seq_groups_show(struct seq_file *seq, void *v)
67905 ext4_grpblk_t counters[16];
67906 } sg;
67907
67908+ pax_track_stack();
67909+
67910 group--;
67911 if (group == 0)
67912 seq_printf(seq, "#%-5s: %-5s %-5s %-5s "
67913@@ -2534,25 +2536,25 @@ int ext4_mb_release(struct super_block *sb)
67914 if (sbi->s_mb_stats) {
67915 printk(KERN_INFO
67916 "EXT4-fs: mballoc: %u blocks %u reqs (%u success)\n",
67917- atomic_read(&sbi->s_bal_allocated),
67918- atomic_read(&sbi->s_bal_reqs),
67919- atomic_read(&sbi->s_bal_success));
67920+ atomic_read_unchecked(&sbi->s_bal_allocated),
67921+ atomic_read_unchecked(&sbi->s_bal_reqs),
67922+ atomic_read_unchecked(&sbi->s_bal_success));
67923 printk(KERN_INFO
67924 "EXT4-fs: mballoc: %u extents scanned, %u goal hits, "
67925 "%u 2^N hits, %u breaks, %u lost\n",
67926- atomic_read(&sbi->s_bal_ex_scanned),
67927- atomic_read(&sbi->s_bal_goals),
67928- atomic_read(&sbi->s_bal_2orders),
67929- atomic_read(&sbi->s_bal_breaks),
67930- atomic_read(&sbi->s_mb_lost_chunks));
67931+ atomic_read_unchecked(&sbi->s_bal_ex_scanned),
67932+ atomic_read_unchecked(&sbi->s_bal_goals),
67933+ atomic_read_unchecked(&sbi->s_bal_2orders),
67934+ atomic_read_unchecked(&sbi->s_bal_breaks),
67935+ atomic_read_unchecked(&sbi->s_mb_lost_chunks));
67936 printk(KERN_INFO
67937 "EXT4-fs: mballoc: %lu generated and it took %Lu\n",
67938 sbi->s_mb_buddies_generated++,
67939 sbi->s_mb_generation_time);
67940 printk(KERN_INFO
67941 "EXT4-fs: mballoc: %u preallocated, %u discarded\n",
67942- atomic_read(&sbi->s_mb_preallocated),
67943- atomic_read(&sbi->s_mb_discarded));
67944+ atomic_read_unchecked(&sbi->s_mb_preallocated),
67945+ atomic_read_unchecked(&sbi->s_mb_discarded));
67946 }
67947
67948 free_percpu(sbi->s_locality_groups);
67949@@ -3034,16 +3036,16 @@ static void ext4_mb_collect_stats(struct ext4_allocation_context *ac)
67950 struct ext4_sb_info *sbi = EXT4_SB(ac->ac_sb);
67951
67952 if (sbi->s_mb_stats && ac->ac_g_ex.fe_len > 1) {
67953- atomic_inc(&sbi->s_bal_reqs);
67954- atomic_add(ac->ac_b_ex.fe_len, &sbi->s_bal_allocated);
67955+ atomic_inc_unchecked(&sbi->s_bal_reqs);
67956+ atomic_add_unchecked(ac->ac_b_ex.fe_len, &sbi->s_bal_allocated);
67957 if (ac->ac_o_ex.fe_len >= ac->ac_g_ex.fe_len)
67958- atomic_inc(&sbi->s_bal_success);
67959- atomic_add(ac->ac_found, &sbi->s_bal_ex_scanned);
67960+ atomic_inc_unchecked(&sbi->s_bal_success);
67961+ atomic_add_unchecked(ac->ac_found, &sbi->s_bal_ex_scanned);
67962 if (ac->ac_g_ex.fe_start == ac->ac_b_ex.fe_start &&
67963 ac->ac_g_ex.fe_group == ac->ac_b_ex.fe_group)
67964- atomic_inc(&sbi->s_bal_goals);
67965+ atomic_inc_unchecked(&sbi->s_bal_goals);
67966 if (ac->ac_found > sbi->s_mb_max_to_scan)
67967- atomic_inc(&sbi->s_bal_breaks);
67968+ atomic_inc_unchecked(&sbi->s_bal_breaks);
67969 }
67970
67971 if (ac->ac_op == EXT4_MB_HISTORY_ALLOC)
67972@@ -3443,7 +3445,7 @@ ext4_mb_new_inode_pa(struct ext4_allocation_context *ac)
67973 trace_ext4_mb_new_inode_pa(ac, pa);
67974
67975 ext4_mb_use_inode_pa(ac, pa);
67976- atomic_add(pa->pa_free, &EXT4_SB(sb)->s_mb_preallocated);
67977+ atomic_add_unchecked(pa->pa_free, &EXT4_SB(sb)->s_mb_preallocated);
67978
67979 ei = EXT4_I(ac->ac_inode);
67980 grp = ext4_get_group_info(sb, ac->ac_b_ex.fe_group);
67981@@ -3503,7 +3505,7 @@ ext4_mb_new_group_pa(struct ext4_allocation_context *ac)
67982 trace_ext4_mb_new_group_pa(ac, pa);
67983
67984 ext4_mb_use_group_pa(ac, pa);
67985- atomic_add(pa->pa_free, &EXT4_SB(sb)->s_mb_preallocated);
67986+ atomic_add_unchecked(pa->pa_free, &EXT4_SB(sb)->s_mb_preallocated);
67987
67988 grp = ext4_get_group_info(sb, ac->ac_b_ex.fe_group);
67989 lg = ac->ac_lg;
67990@@ -3607,7 +3609,7 @@ ext4_mb_release_inode_pa(struct ext4_buddy *e4b, struct buffer_head *bitmap_bh,
67991 * from the bitmap and continue.
67992 */
67993 }
67994- atomic_add(free, &sbi->s_mb_discarded);
67995+ atomic_add_unchecked(free, &sbi->s_mb_discarded);
67996
67997 return err;
67998 }
67999@@ -3626,7 +3628,7 @@ ext4_mb_release_group_pa(struct ext4_buddy *e4b,
68000 ext4_get_group_no_and_offset(sb, pa->pa_pstart, &group, &bit);
68001 BUG_ON(group != e4b->bd_group && pa->pa_len != 0);
68002 mb_free_blocks(pa->pa_inode, e4b, bit, pa->pa_len);
68003- atomic_add(pa->pa_len, &EXT4_SB(sb)->s_mb_discarded);
68004+ atomic_add_unchecked(pa->pa_len, &EXT4_SB(sb)->s_mb_discarded);
68005
68006 if (ac) {
68007 ac->ac_sb = sb;
68008diff --git a/fs/ext4/super.c b/fs/ext4/super.c
68009index f1e7077..edd86b2 100644
68010--- a/fs/ext4/super.c
68011+++ b/fs/ext4/super.c
68012@@ -2286,7 +2286,7 @@ static void ext4_sb_release(struct kobject *kobj)
68013 }
68014
68015
68016-static struct sysfs_ops ext4_attr_ops = {
68017+static const struct sysfs_ops ext4_attr_ops = {
68018 .show = ext4_attr_show,
68019 .store = ext4_attr_store,
68020 };
68021diff --git a/fs/fcntl.c b/fs/fcntl.c
68022index 97e01dc..e9aab2d 100644
68023--- a/fs/fcntl.c
68024+++ b/fs/fcntl.c
68025@@ -223,6 +223,11 @@ int __f_setown(struct file *filp, struct pid *pid, enum pid_type type,
68026 if (err)
68027 return err;
68028
68029+ if (gr_handle_chroot_fowner(pid, type))
68030+ return -ENOENT;
68031+ if (gr_check_protected_task_fowner(pid, type))
68032+ return -EACCES;
68033+
68034 f_modown(filp, pid, type, force);
68035 return 0;
68036 }
68037@@ -265,7 +270,7 @@ pid_t f_getown(struct file *filp)
68038
68039 static int f_setown_ex(struct file *filp, unsigned long arg)
68040 {
68041- struct f_owner_ex * __user owner_p = (void * __user)arg;
68042+ struct f_owner_ex __user *owner_p = (void __user *)arg;
68043 struct f_owner_ex owner;
68044 struct pid *pid;
68045 int type;
68046@@ -305,7 +310,7 @@ static int f_setown_ex(struct file *filp, unsigned long arg)
68047
68048 static int f_getown_ex(struct file *filp, unsigned long arg)
68049 {
68050- struct f_owner_ex * __user owner_p = (void * __user)arg;
68051+ struct f_owner_ex __user *owner_p = (void __user *)arg;
68052 struct f_owner_ex owner;
68053 int ret = 0;
68054
68055@@ -344,6 +349,7 @@ static long do_fcntl(int fd, unsigned int cmd, unsigned long arg,
68056 switch (cmd) {
68057 case F_DUPFD:
68058 case F_DUPFD_CLOEXEC:
68059+ gr_learn_resource(current, RLIMIT_NOFILE, arg, 0);
68060 if (arg >= current->signal->rlim[RLIMIT_NOFILE].rlim_cur)
68061 break;
68062 err = alloc_fd(arg, cmd == F_DUPFD_CLOEXEC ? O_CLOEXEC : 0);
68063diff --git a/fs/fifo.c b/fs/fifo.c
68064index f8f97b8..b1f2259 100644
68065--- a/fs/fifo.c
68066+++ b/fs/fifo.c
68067@@ -59,10 +59,10 @@ static int fifo_open(struct inode *inode, struct file *filp)
68068 */
68069 filp->f_op = &read_pipefifo_fops;
68070 pipe->r_counter++;
68071- if (pipe->readers++ == 0)
68072+ if (atomic_inc_return(&pipe->readers) == 1)
68073 wake_up_partner(inode);
68074
68075- if (!pipe->writers) {
68076+ if (!atomic_read(&pipe->writers)) {
68077 if ((filp->f_flags & O_NONBLOCK)) {
68078 /* suppress POLLHUP until we have
68079 * seen a writer */
68080@@ -83,15 +83,15 @@ static int fifo_open(struct inode *inode, struct file *filp)
68081 * errno=ENXIO when there is no process reading the FIFO.
68082 */
68083 ret = -ENXIO;
68084- if ((filp->f_flags & O_NONBLOCK) && !pipe->readers)
68085+ if ((filp->f_flags & O_NONBLOCK) && !atomic_read(&pipe->readers))
68086 goto err;
68087
68088 filp->f_op = &write_pipefifo_fops;
68089 pipe->w_counter++;
68090- if (!pipe->writers++)
68091+ if (atomic_inc_return(&pipe->writers) == 1)
68092 wake_up_partner(inode);
68093
68094- if (!pipe->readers) {
68095+ if (!atomic_read(&pipe->readers)) {
68096 wait_for_partner(inode, &pipe->r_counter);
68097 if (signal_pending(current))
68098 goto err_wr;
68099@@ -107,11 +107,11 @@ static int fifo_open(struct inode *inode, struct file *filp)
68100 */
68101 filp->f_op = &rdwr_pipefifo_fops;
68102
68103- pipe->readers++;
68104- pipe->writers++;
68105+ atomic_inc(&pipe->readers);
68106+ atomic_inc(&pipe->writers);
68107 pipe->r_counter++;
68108 pipe->w_counter++;
68109- if (pipe->readers == 1 || pipe->writers == 1)
68110+ if (atomic_read(&pipe->readers) == 1 || atomic_read(&pipe->writers) == 1)
68111 wake_up_partner(inode);
68112 break;
68113
68114@@ -125,19 +125,19 @@ static int fifo_open(struct inode *inode, struct file *filp)
68115 return 0;
68116
68117 err_rd:
68118- if (!--pipe->readers)
68119+ if (atomic_dec_and_test(&pipe->readers))
68120 wake_up_interruptible(&pipe->wait);
68121 ret = -ERESTARTSYS;
68122 goto err;
68123
68124 err_wr:
68125- if (!--pipe->writers)
68126+ if (atomic_dec_and_test(&pipe->writers))
68127 wake_up_interruptible(&pipe->wait);
68128 ret = -ERESTARTSYS;
68129 goto err;
68130
68131 err:
68132- if (!pipe->readers && !pipe->writers)
68133+ if (!atomic_read(&pipe->readers) && !atomic_read(&pipe->writers))
68134 free_pipe_info(inode);
68135
68136 err_nocleanup:
68137diff --git a/fs/file.c b/fs/file.c
68138index 87e1290..a930cc4 100644
68139--- a/fs/file.c
68140+++ b/fs/file.c
68141@@ -14,6 +14,7 @@
68142 #include <linux/slab.h>
68143 #include <linux/vmalloc.h>
68144 #include <linux/file.h>
68145+#include <linux/security.h>
68146 #include <linux/fdtable.h>
68147 #include <linux/bitops.h>
68148 #include <linux/interrupt.h>
68149@@ -257,6 +258,8 @@ int expand_files(struct files_struct *files, int nr)
68150 * N.B. For clone tasks sharing a files structure, this test
68151 * will limit the total number of files that can be opened.
68152 */
68153+
68154+ gr_learn_resource(current, RLIMIT_NOFILE, nr, 0);
68155 if (nr >= current->signal->rlim[RLIMIT_NOFILE].rlim_cur)
68156 return -EMFILE;
68157
68158diff --git a/fs/filesystems.c b/fs/filesystems.c
68159index a24c58e..53f91ee 100644
68160--- a/fs/filesystems.c
68161+++ b/fs/filesystems.c
68162@@ -272,7 +272,12 @@ struct file_system_type *get_fs_type(const char *name)
68163 int len = dot ? dot - name : strlen(name);
68164
68165 fs = __get_fs_type(name, len);
68166+
68167+#ifdef CONFIG_GRKERNSEC_MODHARDEN
68168+ if (!fs && (___request_module(true, "grsec_modharden_fs", "%.*s", len, name) == 0))
68169+#else
68170 if (!fs && (request_module("%.*s", len, name) == 0))
68171+#endif
68172 fs = __get_fs_type(name, len);
68173
68174 if (dot && fs && !(fs->fs_flags & FS_HAS_SUBTYPE)) {
68175diff --git a/fs/fs_struct.c b/fs/fs_struct.c
68176index eee0590..1181166 100644
68177--- a/fs/fs_struct.c
68178+++ b/fs/fs_struct.c
68179@@ -4,6 +4,7 @@
68180 #include <linux/path.h>
68181 #include <linux/slab.h>
68182 #include <linux/fs_struct.h>
68183+#include <linux/grsecurity.h>
68184
68185 /*
68186 * Replace the fs->{rootmnt,root} with {mnt,dentry}. Put the old values.
68187@@ -17,6 +18,7 @@ void set_fs_root(struct fs_struct *fs, struct path *path)
68188 old_root = fs->root;
68189 fs->root = *path;
68190 path_get(path);
68191+ gr_set_chroot_entries(current, path);
68192 write_unlock(&fs->lock);
68193 if (old_root.dentry)
68194 path_put(&old_root);
68195@@ -56,6 +58,7 @@ void chroot_fs_refs(struct path *old_root, struct path *new_root)
68196 && fs->root.mnt == old_root->mnt) {
68197 path_get(new_root);
68198 fs->root = *new_root;
68199+ gr_set_chroot_entries(p, new_root);
68200 count++;
68201 }
68202 if (fs->pwd.dentry == old_root->dentry
68203@@ -89,7 +92,8 @@ void exit_fs(struct task_struct *tsk)
68204 task_lock(tsk);
68205 write_lock(&fs->lock);
68206 tsk->fs = NULL;
68207- kill = !--fs->users;
68208+ gr_clear_chroot_entries(tsk);
68209+ kill = !atomic_dec_return(&fs->users);
68210 write_unlock(&fs->lock);
68211 task_unlock(tsk);
68212 if (kill)
68213@@ -102,7 +106,7 @@ struct fs_struct *copy_fs_struct(struct fs_struct *old)
68214 struct fs_struct *fs = kmem_cache_alloc(fs_cachep, GFP_KERNEL);
68215 /* We don't need to lock fs - think why ;-) */
68216 if (fs) {
68217- fs->users = 1;
68218+ atomic_set(&fs->users, 1);
68219 fs->in_exec = 0;
68220 rwlock_init(&fs->lock);
68221 fs->umask = old->umask;
68222@@ -127,8 +131,9 @@ int unshare_fs_struct(void)
68223
68224 task_lock(current);
68225 write_lock(&fs->lock);
68226- kill = !--fs->users;
68227+ kill = !atomic_dec_return(&fs->users);
68228 current->fs = new_fs;
68229+ gr_set_chroot_entries(current, &new_fs->root);
68230 write_unlock(&fs->lock);
68231 task_unlock(current);
68232
68233@@ -141,13 +146,13 @@ EXPORT_SYMBOL_GPL(unshare_fs_struct);
68234
68235 int current_umask(void)
68236 {
68237- return current->fs->umask;
68238+ return current->fs->umask | gr_acl_umask();
68239 }
68240 EXPORT_SYMBOL(current_umask);
68241
68242 /* to be mentioned only in INIT_TASK */
68243 struct fs_struct init_fs = {
68244- .users = 1,
68245+ .users = ATOMIC_INIT(1),
68246 .lock = __RW_LOCK_UNLOCKED(init_fs.lock),
68247 .umask = 0022,
68248 };
68249@@ -162,12 +167,13 @@ void daemonize_fs_struct(void)
68250 task_lock(current);
68251
68252 write_lock(&init_fs.lock);
68253- init_fs.users++;
68254+ atomic_inc(&init_fs.users);
68255 write_unlock(&init_fs.lock);
68256
68257 write_lock(&fs->lock);
68258 current->fs = &init_fs;
68259- kill = !--fs->users;
68260+ gr_set_chroot_entries(current, &current->fs->root);
68261+ kill = !atomic_dec_return(&fs->users);
68262 write_unlock(&fs->lock);
68263
68264 task_unlock(current);
68265diff --git a/fs/fscache/cookie.c b/fs/fscache/cookie.c
68266index 9905350..02eaec4 100644
68267--- a/fs/fscache/cookie.c
68268+++ b/fs/fscache/cookie.c
68269@@ -68,11 +68,11 @@ struct fscache_cookie *__fscache_acquire_cookie(
68270 parent ? (char *) parent->def->name : "<no-parent>",
68271 def->name, netfs_data);
68272
68273- fscache_stat(&fscache_n_acquires);
68274+ fscache_stat_unchecked(&fscache_n_acquires);
68275
68276 /* if there's no parent cookie, then we don't create one here either */
68277 if (!parent) {
68278- fscache_stat(&fscache_n_acquires_null);
68279+ fscache_stat_unchecked(&fscache_n_acquires_null);
68280 _leave(" [no parent]");
68281 return NULL;
68282 }
68283@@ -87,7 +87,7 @@ struct fscache_cookie *__fscache_acquire_cookie(
68284 /* allocate and initialise a cookie */
68285 cookie = kmem_cache_alloc(fscache_cookie_jar, GFP_KERNEL);
68286 if (!cookie) {
68287- fscache_stat(&fscache_n_acquires_oom);
68288+ fscache_stat_unchecked(&fscache_n_acquires_oom);
68289 _leave(" [ENOMEM]");
68290 return NULL;
68291 }
68292@@ -109,13 +109,13 @@ struct fscache_cookie *__fscache_acquire_cookie(
68293
68294 switch (cookie->def->type) {
68295 case FSCACHE_COOKIE_TYPE_INDEX:
68296- fscache_stat(&fscache_n_cookie_index);
68297+ fscache_stat_unchecked(&fscache_n_cookie_index);
68298 break;
68299 case FSCACHE_COOKIE_TYPE_DATAFILE:
68300- fscache_stat(&fscache_n_cookie_data);
68301+ fscache_stat_unchecked(&fscache_n_cookie_data);
68302 break;
68303 default:
68304- fscache_stat(&fscache_n_cookie_special);
68305+ fscache_stat_unchecked(&fscache_n_cookie_special);
68306 break;
68307 }
68308
68309@@ -126,13 +126,13 @@ struct fscache_cookie *__fscache_acquire_cookie(
68310 if (fscache_acquire_non_index_cookie(cookie) < 0) {
68311 atomic_dec(&parent->n_children);
68312 __fscache_cookie_put(cookie);
68313- fscache_stat(&fscache_n_acquires_nobufs);
68314+ fscache_stat_unchecked(&fscache_n_acquires_nobufs);
68315 _leave(" = NULL");
68316 return NULL;
68317 }
68318 }
68319
68320- fscache_stat(&fscache_n_acquires_ok);
68321+ fscache_stat_unchecked(&fscache_n_acquires_ok);
68322 _leave(" = %p", cookie);
68323 return cookie;
68324 }
68325@@ -168,7 +168,7 @@ static int fscache_acquire_non_index_cookie(struct fscache_cookie *cookie)
68326 cache = fscache_select_cache_for_object(cookie->parent);
68327 if (!cache) {
68328 up_read(&fscache_addremove_sem);
68329- fscache_stat(&fscache_n_acquires_no_cache);
68330+ fscache_stat_unchecked(&fscache_n_acquires_no_cache);
68331 _leave(" = -ENOMEDIUM [no cache]");
68332 return -ENOMEDIUM;
68333 }
68334@@ -256,12 +256,12 @@ static int fscache_alloc_object(struct fscache_cache *cache,
68335 object = cache->ops->alloc_object(cache, cookie);
68336 fscache_stat_d(&fscache_n_cop_alloc_object);
68337 if (IS_ERR(object)) {
68338- fscache_stat(&fscache_n_object_no_alloc);
68339+ fscache_stat_unchecked(&fscache_n_object_no_alloc);
68340 ret = PTR_ERR(object);
68341 goto error;
68342 }
68343
68344- fscache_stat(&fscache_n_object_alloc);
68345+ fscache_stat_unchecked(&fscache_n_object_alloc);
68346
68347 object->debug_id = atomic_inc_return(&fscache_object_debug_id);
68348
68349@@ -377,10 +377,10 @@ void __fscache_update_cookie(struct fscache_cookie *cookie)
68350 struct fscache_object *object;
68351 struct hlist_node *_p;
68352
68353- fscache_stat(&fscache_n_updates);
68354+ fscache_stat_unchecked(&fscache_n_updates);
68355
68356 if (!cookie) {
68357- fscache_stat(&fscache_n_updates_null);
68358+ fscache_stat_unchecked(&fscache_n_updates_null);
68359 _leave(" [no cookie]");
68360 return;
68361 }
68362@@ -414,12 +414,12 @@ void __fscache_relinquish_cookie(struct fscache_cookie *cookie, int retire)
68363 struct fscache_object *object;
68364 unsigned long event;
68365
68366- fscache_stat(&fscache_n_relinquishes);
68367+ fscache_stat_unchecked(&fscache_n_relinquishes);
68368 if (retire)
68369- fscache_stat(&fscache_n_relinquishes_retire);
68370+ fscache_stat_unchecked(&fscache_n_relinquishes_retire);
68371
68372 if (!cookie) {
68373- fscache_stat(&fscache_n_relinquishes_null);
68374+ fscache_stat_unchecked(&fscache_n_relinquishes_null);
68375 _leave(" [no cookie]");
68376 return;
68377 }
68378@@ -435,7 +435,7 @@ void __fscache_relinquish_cookie(struct fscache_cookie *cookie, int retire)
68379
68380 /* wait for the cookie to finish being instantiated (or to fail) */
68381 if (test_bit(FSCACHE_COOKIE_CREATING, &cookie->flags)) {
68382- fscache_stat(&fscache_n_relinquishes_waitcrt);
68383+ fscache_stat_unchecked(&fscache_n_relinquishes_waitcrt);
68384 wait_on_bit(&cookie->flags, FSCACHE_COOKIE_CREATING,
68385 fscache_wait_bit, TASK_UNINTERRUPTIBLE);
68386 }
68387diff --git a/fs/fscache/internal.h b/fs/fscache/internal.h
68388index edd7434..0725e66 100644
68389--- a/fs/fscache/internal.h
68390+++ b/fs/fscache/internal.h
68391@@ -136,94 +136,94 @@ extern void fscache_proc_cleanup(void);
68392 extern atomic_t fscache_n_ops_processed[FSCACHE_MAX_THREADS];
68393 extern atomic_t fscache_n_objs_processed[FSCACHE_MAX_THREADS];
68394
68395-extern atomic_t fscache_n_op_pend;
68396-extern atomic_t fscache_n_op_run;
68397-extern atomic_t fscache_n_op_enqueue;
68398-extern atomic_t fscache_n_op_deferred_release;
68399-extern atomic_t fscache_n_op_release;
68400-extern atomic_t fscache_n_op_gc;
68401-extern atomic_t fscache_n_op_cancelled;
68402-extern atomic_t fscache_n_op_rejected;
68403+extern atomic_unchecked_t fscache_n_op_pend;
68404+extern atomic_unchecked_t fscache_n_op_run;
68405+extern atomic_unchecked_t fscache_n_op_enqueue;
68406+extern atomic_unchecked_t fscache_n_op_deferred_release;
68407+extern atomic_unchecked_t fscache_n_op_release;
68408+extern atomic_unchecked_t fscache_n_op_gc;
68409+extern atomic_unchecked_t fscache_n_op_cancelled;
68410+extern atomic_unchecked_t fscache_n_op_rejected;
68411
68412-extern atomic_t fscache_n_attr_changed;
68413-extern atomic_t fscache_n_attr_changed_ok;
68414-extern atomic_t fscache_n_attr_changed_nobufs;
68415-extern atomic_t fscache_n_attr_changed_nomem;
68416-extern atomic_t fscache_n_attr_changed_calls;
68417+extern atomic_unchecked_t fscache_n_attr_changed;
68418+extern atomic_unchecked_t fscache_n_attr_changed_ok;
68419+extern atomic_unchecked_t fscache_n_attr_changed_nobufs;
68420+extern atomic_unchecked_t fscache_n_attr_changed_nomem;
68421+extern atomic_unchecked_t fscache_n_attr_changed_calls;
68422
68423-extern atomic_t fscache_n_allocs;
68424-extern atomic_t fscache_n_allocs_ok;
68425-extern atomic_t fscache_n_allocs_wait;
68426-extern atomic_t fscache_n_allocs_nobufs;
68427-extern atomic_t fscache_n_allocs_intr;
68428-extern atomic_t fscache_n_allocs_object_dead;
68429-extern atomic_t fscache_n_alloc_ops;
68430-extern atomic_t fscache_n_alloc_op_waits;
68431+extern atomic_unchecked_t fscache_n_allocs;
68432+extern atomic_unchecked_t fscache_n_allocs_ok;
68433+extern atomic_unchecked_t fscache_n_allocs_wait;
68434+extern atomic_unchecked_t fscache_n_allocs_nobufs;
68435+extern atomic_unchecked_t fscache_n_allocs_intr;
68436+extern atomic_unchecked_t fscache_n_allocs_object_dead;
68437+extern atomic_unchecked_t fscache_n_alloc_ops;
68438+extern atomic_unchecked_t fscache_n_alloc_op_waits;
68439
68440-extern atomic_t fscache_n_retrievals;
68441-extern atomic_t fscache_n_retrievals_ok;
68442-extern atomic_t fscache_n_retrievals_wait;
68443-extern atomic_t fscache_n_retrievals_nodata;
68444-extern atomic_t fscache_n_retrievals_nobufs;
68445-extern atomic_t fscache_n_retrievals_intr;
68446-extern atomic_t fscache_n_retrievals_nomem;
68447-extern atomic_t fscache_n_retrievals_object_dead;
68448-extern atomic_t fscache_n_retrieval_ops;
68449-extern atomic_t fscache_n_retrieval_op_waits;
68450+extern atomic_unchecked_t fscache_n_retrievals;
68451+extern atomic_unchecked_t fscache_n_retrievals_ok;
68452+extern atomic_unchecked_t fscache_n_retrievals_wait;
68453+extern atomic_unchecked_t fscache_n_retrievals_nodata;
68454+extern atomic_unchecked_t fscache_n_retrievals_nobufs;
68455+extern atomic_unchecked_t fscache_n_retrievals_intr;
68456+extern atomic_unchecked_t fscache_n_retrievals_nomem;
68457+extern atomic_unchecked_t fscache_n_retrievals_object_dead;
68458+extern atomic_unchecked_t fscache_n_retrieval_ops;
68459+extern atomic_unchecked_t fscache_n_retrieval_op_waits;
68460
68461-extern atomic_t fscache_n_stores;
68462-extern atomic_t fscache_n_stores_ok;
68463-extern atomic_t fscache_n_stores_again;
68464-extern atomic_t fscache_n_stores_nobufs;
68465-extern atomic_t fscache_n_stores_oom;
68466-extern atomic_t fscache_n_store_ops;
68467-extern atomic_t fscache_n_store_calls;
68468-extern atomic_t fscache_n_store_pages;
68469-extern atomic_t fscache_n_store_radix_deletes;
68470-extern atomic_t fscache_n_store_pages_over_limit;
68471+extern atomic_unchecked_t fscache_n_stores;
68472+extern atomic_unchecked_t fscache_n_stores_ok;
68473+extern atomic_unchecked_t fscache_n_stores_again;
68474+extern atomic_unchecked_t fscache_n_stores_nobufs;
68475+extern atomic_unchecked_t fscache_n_stores_oom;
68476+extern atomic_unchecked_t fscache_n_store_ops;
68477+extern atomic_unchecked_t fscache_n_store_calls;
68478+extern atomic_unchecked_t fscache_n_store_pages;
68479+extern atomic_unchecked_t fscache_n_store_radix_deletes;
68480+extern atomic_unchecked_t fscache_n_store_pages_over_limit;
68481
68482-extern atomic_t fscache_n_store_vmscan_not_storing;
68483-extern atomic_t fscache_n_store_vmscan_gone;
68484-extern atomic_t fscache_n_store_vmscan_busy;
68485-extern atomic_t fscache_n_store_vmscan_cancelled;
68486+extern atomic_unchecked_t fscache_n_store_vmscan_not_storing;
68487+extern atomic_unchecked_t fscache_n_store_vmscan_gone;
68488+extern atomic_unchecked_t fscache_n_store_vmscan_busy;
68489+extern atomic_unchecked_t fscache_n_store_vmscan_cancelled;
68490
68491-extern atomic_t fscache_n_marks;
68492-extern atomic_t fscache_n_uncaches;
68493+extern atomic_unchecked_t fscache_n_marks;
68494+extern atomic_unchecked_t fscache_n_uncaches;
68495
68496-extern atomic_t fscache_n_acquires;
68497-extern atomic_t fscache_n_acquires_null;
68498-extern atomic_t fscache_n_acquires_no_cache;
68499-extern atomic_t fscache_n_acquires_ok;
68500-extern atomic_t fscache_n_acquires_nobufs;
68501-extern atomic_t fscache_n_acquires_oom;
68502+extern atomic_unchecked_t fscache_n_acquires;
68503+extern atomic_unchecked_t fscache_n_acquires_null;
68504+extern atomic_unchecked_t fscache_n_acquires_no_cache;
68505+extern atomic_unchecked_t fscache_n_acquires_ok;
68506+extern atomic_unchecked_t fscache_n_acquires_nobufs;
68507+extern atomic_unchecked_t fscache_n_acquires_oom;
68508
68509-extern atomic_t fscache_n_updates;
68510-extern atomic_t fscache_n_updates_null;
68511-extern atomic_t fscache_n_updates_run;
68512+extern atomic_unchecked_t fscache_n_updates;
68513+extern atomic_unchecked_t fscache_n_updates_null;
68514+extern atomic_unchecked_t fscache_n_updates_run;
68515
68516-extern atomic_t fscache_n_relinquishes;
68517-extern atomic_t fscache_n_relinquishes_null;
68518-extern atomic_t fscache_n_relinquishes_waitcrt;
68519-extern atomic_t fscache_n_relinquishes_retire;
68520+extern atomic_unchecked_t fscache_n_relinquishes;
68521+extern atomic_unchecked_t fscache_n_relinquishes_null;
68522+extern atomic_unchecked_t fscache_n_relinquishes_waitcrt;
68523+extern atomic_unchecked_t fscache_n_relinquishes_retire;
68524
68525-extern atomic_t fscache_n_cookie_index;
68526-extern atomic_t fscache_n_cookie_data;
68527-extern atomic_t fscache_n_cookie_special;
68528+extern atomic_unchecked_t fscache_n_cookie_index;
68529+extern atomic_unchecked_t fscache_n_cookie_data;
68530+extern atomic_unchecked_t fscache_n_cookie_special;
68531
68532-extern atomic_t fscache_n_object_alloc;
68533-extern atomic_t fscache_n_object_no_alloc;
68534-extern atomic_t fscache_n_object_lookups;
68535-extern atomic_t fscache_n_object_lookups_negative;
68536-extern atomic_t fscache_n_object_lookups_positive;
68537-extern atomic_t fscache_n_object_lookups_timed_out;
68538-extern atomic_t fscache_n_object_created;
68539-extern atomic_t fscache_n_object_avail;
68540-extern atomic_t fscache_n_object_dead;
68541+extern atomic_unchecked_t fscache_n_object_alloc;
68542+extern atomic_unchecked_t fscache_n_object_no_alloc;
68543+extern atomic_unchecked_t fscache_n_object_lookups;
68544+extern atomic_unchecked_t fscache_n_object_lookups_negative;
68545+extern atomic_unchecked_t fscache_n_object_lookups_positive;
68546+extern atomic_unchecked_t fscache_n_object_lookups_timed_out;
68547+extern atomic_unchecked_t fscache_n_object_created;
68548+extern atomic_unchecked_t fscache_n_object_avail;
68549+extern atomic_unchecked_t fscache_n_object_dead;
68550
68551-extern atomic_t fscache_n_checkaux_none;
68552-extern atomic_t fscache_n_checkaux_okay;
68553-extern atomic_t fscache_n_checkaux_update;
68554-extern atomic_t fscache_n_checkaux_obsolete;
68555+extern atomic_unchecked_t fscache_n_checkaux_none;
68556+extern atomic_unchecked_t fscache_n_checkaux_okay;
68557+extern atomic_unchecked_t fscache_n_checkaux_update;
68558+extern atomic_unchecked_t fscache_n_checkaux_obsolete;
68559
68560 extern atomic_t fscache_n_cop_alloc_object;
68561 extern atomic_t fscache_n_cop_lookup_object;
68562@@ -247,6 +247,11 @@ static inline void fscache_stat(atomic_t *stat)
68563 atomic_inc(stat);
68564 }
68565
68566+static inline void fscache_stat_unchecked(atomic_unchecked_t *stat)
68567+{
68568+ atomic_inc_unchecked(stat);
68569+}
68570+
68571 static inline void fscache_stat_d(atomic_t *stat)
68572 {
68573 atomic_dec(stat);
68574@@ -259,6 +264,7 @@ extern const struct file_operations fscache_stats_fops;
68575
68576 #define __fscache_stat(stat) (NULL)
68577 #define fscache_stat(stat) do {} while (0)
68578+#define fscache_stat_unchecked(stat) do {} while (0)
68579 #define fscache_stat_d(stat) do {} while (0)
68580 #endif
68581
68582diff --git a/fs/fscache/object.c b/fs/fscache/object.c
68583index e513ac5..e888d34 100644
68584--- a/fs/fscache/object.c
68585+++ b/fs/fscache/object.c
68586@@ -144,7 +144,7 @@ static void fscache_object_state_machine(struct fscache_object *object)
68587 /* update the object metadata on disk */
68588 case FSCACHE_OBJECT_UPDATING:
68589 clear_bit(FSCACHE_OBJECT_EV_UPDATE, &object->events);
68590- fscache_stat(&fscache_n_updates_run);
68591+ fscache_stat_unchecked(&fscache_n_updates_run);
68592 fscache_stat(&fscache_n_cop_update_object);
68593 object->cache->ops->update_object(object);
68594 fscache_stat_d(&fscache_n_cop_update_object);
68595@@ -233,7 +233,7 @@ static void fscache_object_state_machine(struct fscache_object *object)
68596 spin_lock(&object->lock);
68597 object->state = FSCACHE_OBJECT_DEAD;
68598 spin_unlock(&object->lock);
68599- fscache_stat(&fscache_n_object_dead);
68600+ fscache_stat_unchecked(&fscache_n_object_dead);
68601 goto terminal_transit;
68602
68603 /* handle the parent cache of this object being withdrawn from
68604@@ -248,7 +248,7 @@ static void fscache_object_state_machine(struct fscache_object *object)
68605 spin_lock(&object->lock);
68606 object->state = FSCACHE_OBJECT_DEAD;
68607 spin_unlock(&object->lock);
68608- fscache_stat(&fscache_n_object_dead);
68609+ fscache_stat_unchecked(&fscache_n_object_dead);
68610 goto terminal_transit;
68611
68612 /* complain about the object being woken up once it is
68613@@ -492,7 +492,7 @@ static void fscache_lookup_object(struct fscache_object *object)
68614 parent->cookie->def->name, cookie->def->name,
68615 object->cache->tag->name);
68616
68617- fscache_stat(&fscache_n_object_lookups);
68618+ fscache_stat_unchecked(&fscache_n_object_lookups);
68619 fscache_stat(&fscache_n_cop_lookup_object);
68620 ret = object->cache->ops->lookup_object(object);
68621 fscache_stat_d(&fscache_n_cop_lookup_object);
68622@@ -503,7 +503,7 @@ static void fscache_lookup_object(struct fscache_object *object)
68623 if (ret == -ETIMEDOUT) {
68624 /* probably stuck behind another object, so move this one to
68625 * the back of the queue */
68626- fscache_stat(&fscache_n_object_lookups_timed_out);
68627+ fscache_stat_unchecked(&fscache_n_object_lookups_timed_out);
68628 set_bit(FSCACHE_OBJECT_EV_REQUEUE, &object->events);
68629 }
68630
68631@@ -526,7 +526,7 @@ void fscache_object_lookup_negative(struct fscache_object *object)
68632
68633 spin_lock(&object->lock);
68634 if (object->state == FSCACHE_OBJECT_LOOKING_UP) {
68635- fscache_stat(&fscache_n_object_lookups_negative);
68636+ fscache_stat_unchecked(&fscache_n_object_lookups_negative);
68637
68638 /* transit here to allow write requests to begin stacking up
68639 * and read requests to begin returning ENODATA */
68640@@ -572,7 +572,7 @@ void fscache_obtained_object(struct fscache_object *object)
68641 * result, in which case there may be data available */
68642 spin_lock(&object->lock);
68643 if (object->state == FSCACHE_OBJECT_LOOKING_UP) {
68644- fscache_stat(&fscache_n_object_lookups_positive);
68645+ fscache_stat_unchecked(&fscache_n_object_lookups_positive);
68646
68647 clear_bit(FSCACHE_COOKIE_NO_DATA_YET, &cookie->flags);
68648
68649@@ -586,7 +586,7 @@ void fscache_obtained_object(struct fscache_object *object)
68650 set_bit(FSCACHE_OBJECT_EV_REQUEUE, &object->events);
68651 } else {
68652 ASSERTCMP(object->state, ==, FSCACHE_OBJECT_CREATING);
68653- fscache_stat(&fscache_n_object_created);
68654+ fscache_stat_unchecked(&fscache_n_object_created);
68655
68656 object->state = FSCACHE_OBJECT_AVAILABLE;
68657 spin_unlock(&object->lock);
68658@@ -633,7 +633,7 @@ static void fscache_object_available(struct fscache_object *object)
68659 fscache_enqueue_dependents(object);
68660
68661 fscache_hist(fscache_obj_instantiate_histogram, object->lookup_jif);
68662- fscache_stat(&fscache_n_object_avail);
68663+ fscache_stat_unchecked(&fscache_n_object_avail);
68664
68665 _leave("");
68666 }
68667@@ -861,7 +861,7 @@ enum fscache_checkaux fscache_check_aux(struct fscache_object *object,
68668 enum fscache_checkaux result;
68669
68670 if (!object->cookie->def->check_aux) {
68671- fscache_stat(&fscache_n_checkaux_none);
68672+ fscache_stat_unchecked(&fscache_n_checkaux_none);
68673 return FSCACHE_CHECKAUX_OKAY;
68674 }
68675
68676@@ -870,17 +870,17 @@ enum fscache_checkaux fscache_check_aux(struct fscache_object *object,
68677 switch (result) {
68678 /* entry okay as is */
68679 case FSCACHE_CHECKAUX_OKAY:
68680- fscache_stat(&fscache_n_checkaux_okay);
68681+ fscache_stat_unchecked(&fscache_n_checkaux_okay);
68682 break;
68683
68684 /* entry requires update */
68685 case FSCACHE_CHECKAUX_NEEDS_UPDATE:
68686- fscache_stat(&fscache_n_checkaux_update);
68687+ fscache_stat_unchecked(&fscache_n_checkaux_update);
68688 break;
68689
68690 /* entry requires deletion */
68691 case FSCACHE_CHECKAUX_OBSOLETE:
68692- fscache_stat(&fscache_n_checkaux_obsolete);
68693+ fscache_stat_unchecked(&fscache_n_checkaux_obsolete);
68694 break;
68695
68696 default:
68697diff --git a/fs/fscache/operation.c b/fs/fscache/operation.c
68698index 313e79a..775240f 100644
68699--- a/fs/fscache/operation.c
68700+++ b/fs/fscache/operation.c
68701@@ -16,7 +16,7 @@
68702 #include <linux/seq_file.h>
68703 #include "internal.h"
68704
68705-atomic_t fscache_op_debug_id;
68706+atomic_unchecked_t fscache_op_debug_id;
68707 EXPORT_SYMBOL(fscache_op_debug_id);
68708
68709 /**
68710@@ -39,7 +39,7 @@ void fscache_enqueue_operation(struct fscache_operation *op)
68711 ASSERTCMP(op->object->state, >=, FSCACHE_OBJECT_AVAILABLE);
68712 ASSERTCMP(atomic_read(&op->usage), >, 0);
68713
68714- fscache_stat(&fscache_n_op_enqueue);
68715+ fscache_stat_unchecked(&fscache_n_op_enqueue);
68716 switch (op->flags & FSCACHE_OP_TYPE) {
68717 case FSCACHE_OP_FAST:
68718 _debug("queue fast");
68719@@ -76,7 +76,7 @@ static void fscache_run_op(struct fscache_object *object,
68720 wake_up_bit(&op->flags, FSCACHE_OP_WAITING);
68721 if (op->processor)
68722 fscache_enqueue_operation(op);
68723- fscache_stat(&fscache_n_op_run);
68724+ fscache_stat_unchecked(&fscache_n_op_run);
68725 }
68726
68727 /*
68728@@ -107,11 +107,11 @@ int fscache_submit_exclusive_op(struct fscache_object *object,
68729 if (object->n_ops > 0) {
68730 atomic_inc(&op->usage);
68731 list_add_tail(&op->pend_link, &object->pending_ops);
68732- fscache_stat(&fscache_n_op_pend);
68733+ fscache_stat_unchecked(&fscache_n_op_pend);
68734 } else if (!list_empty(&object->pending_ops)) {
68735 atomic_inc(&op->usage);
68736 list_add_tail(&op->pend_link, &object->pending_ops);
68737- fscache_stat(&fscache_n_op_pend);
68738+ fscache_stat_unchecked(&fscache_n_op_pend);
68739 fscache_start_operations(object);
68740 } else {
68741 ASSERTCMP(object->n_in_progress, ==, 0);
68742@@ -127,7 +127,7 @@ int fscache_submit_exclusive_op(struct fscache_object *object,
68743 object->n_exclusive++; /* reads and writes must wait */
68744 atomic_inc(&op->usage);
68745 list_add_tail(&op->pend_link, &object->pending_ops);
68746- fscache_stat(&fscache_n_op_pend);
68747+ fscache_stat_unchecked(&fscache_n_op_pend);
68748 ret = 0;
68749 } else {
68750 /* not allowed to submit ops in any other state */
68751@@ -214,11 +214,11 @@ int fscache_submit_op(struct fscache_object *object,
68752 if (object->n_exclusive > 0) {
68753 atomic_inc(&op->usage);
68754 list_add_tail(&op->pend_link, &object->pending_ops);
68755- fscache_stat(&fscache_n_op_pend);
68756+ fscache_stat_unchecked(&fscache_n_op_pend);
68757 } else if (!list_empty(&object->pending_ops)) {
68758 atomic_inc(&op->usage);
68759 list_add_tail(&op->pend_link, &object->pending_ops);
68760- fscache_stat(&fscache_n_op_pend);
68761+ fscache_stat_unchecked(&fscache_n_op_pend);
68762 fscache_start_operations(object);
68763 } else {
68764 ASSERTCMP(object->n_exclusive, ==, 0);
68765@@ -230,12 +230,12 @@ int fscache_submit_op(struct fscache_object *object,
68766 object->n_ops++;
68767 atomic_inc(&op->usage);
68768 list_add_tail(&op->pend_link, &object->pending_ops);
68769- fscache_stat(&fscache_n_op_pend);
68770+ fscache_stat_unchecked(&fscache_n_op_pend);
68771 ret = 0;
68772 } else if (object->state == FSCACHE_OBJECT_DYING ||
68773 object->state == FSCACHE_OBJECT_LC_DYING ||
68774 object->state == FSCACHE_OBJECT_WITHDRAWING) {
68775- fscache_stat(&fscache_n_op_rejected);
68776+ fscache_stat_unchecked(&fscache_n_op_rejected);
68777 ret = -ENOBUFS;
68778 } else if (!test_bit(FSCACHE_IOERROR, &object->cache->flags)) {
68779 fscache_report_unexpected_submission(object, op, ostate);
68780@@ -305,7 +305,7 @@ int fscache_cancel_op(struct fscache_operation *op)
68781
68782 ret = -EBUSY;
68783 if (!list_empty(&op->pend_link)) {
68784- fscache_stat(&fscache_n_op_cancelled);
68785+ fscache_stat_unchecked(&fscache_n_op_cancelled);
68786 list_del_init(&op->pend_link);
68787 object->n_ops--;
68788 if (test_bit(FSCACHE_OP_EXCLUSIVE, &op->flags))
68789@@ -344,7 +344,7 @@ void fscache_put_operation(struct fscache_operation *op)
68790 if (test_and_set_bit(FSCACHE_OP_DEAD, &op->flags))
68791 BUG();
68792
68793- fscache_stat(&fscache_n_op_release);
68794+ fscache_stat_unchecked(&fscache_n_op_release);
68795
68796 if (op->release) {
68797 op->release(op);
68798@@ -361,7 +361,7 @@ void fscache_put_operation(struct fscache_operation *op)
68799 * lock, and defer it otherwise */
68800 if (!spin_trylock(&object->lock)) {
68801 _debug("defer put");
68802- fscache_stat(&fscache_n_op_deferred_release);
68803+ fscache_stat_unchecked(&fscache_n_op_deferred_release);
68804
68805 cache = object->cache;
68806 spin_lock(&cache->op_gc_list_lock);
68807@@ -423,7 +423,7 @@ void fscache_operation_gc(struct work_struct *work)
68808
68809 _debug("GC DEFERRED REL OBJ%x OP%x",
68810 object->debug_id, op->debug_id);
68811- fscache_stat(&fscache_n_op_gc);
68812+ fscache_stat_unchecked(&fscache_n_op_gc);
68813
68814 ASSERTCMP(atomic_read(&op->usage), ==, 0);
68815
68816diff --git a/fs/fscache/page.c b/fs/fscache/page.c
68817index c598ea4..6aac13e 100644
68818--- a/fs/fscache/page.c
68819+++ b/fs/fscache/page.c
68820@@ -59,7 +59,7 @@ bool __fscache_maybe_release_page(struct fscache_cookie *cookie,
68821 val = radix_tree_lookup(&cookie->stores, page->index);
68822 if (!val) {
68823 rcu_read_unlock();
68824- fscache_stat(&fscache_n_store_vmscan_not_storing);
68825+ fscache_stat_unchecked(&fscache_n_store_vmscan_not_storing);
68826 __fscache_uncache_page(cookie, page);
68827 return true;
68828 }
68829@@ -89,11 +89,11 @@ bool __fscache_maybe_release_page(struct fscache_cookie *cookie,
68830 spin_unlock(&cookie->stores_lock);
68831
68832 if (xpage) {
68833- fscache_stat(&fscache_n_store_vmscan_cancelled);
68834- fscache_stat(&fscache_n_store_radix_deletes);
68835+ fscache_stat_unchecked(&fscache_n_store_vmscan_cancelled);
68836+ fscache_stat_unchecked(&fscache_n_store_radix_deletes);
68837 ASSERTCMP(xpage, ==, page);
68838 } else {
68839- fscache_stat(&fscache_n_store_vmscan_gone);
68840+ fscache_stat_unchecked(&fscache_n_store_vmscan_gone);
68841 }
68842
68843 wake_up_bit(&cookie->flags, 0);
68844@@ -106,7 +106,7 @@ page_busy:
68845 /* we might want to wait here, but that could deadlock the allocator as
68846 * the slow-work threads writing to the cache may all end up sleeping
68847 * on memory allocation */
68848- fscache_stat(&fscache_n_store_vmscan_busy);
68849+ fscache_stat_unchecked(&fscache_n_store_vmscan_busy);
68850 return false;
68851 }
68852 EXPORT_SYMBOL(__fscache_maybe_release_page);
68853@@ -130,7 +130,7 @@ static void fscache_end_page_write(struct fscache_object *object,
68854 FSCACHE_COOKIE_STORING_TAG);
68855 if (!radix_tree_tag_get(&cookie->stores, page->index,
68856 FSCACHE_COOKIE_PENDING_TAG)) {
68857- fscache_stat(&fscache_n_store_radix_deletes);
68858+ fscache_stat_unchecked(&fscache_n_store_radix_deletes);
68859 xpage = radix_tree_delete(&cookie->stores, page->index);
68860 }
68861 spin_unlock(&cookie->stores_lock);
68862@@ -151,7 +151,7 @@ static void fscache_attr_changed_op(struct fscache_operation *op)
68863
68864 _enter("{OBJ%x OP%x}", object->debug_id, op->debug_id);
68865
68866- fscache_stat(&fscache_n_attr_changed_calls);
68867+ fscache_stat_unchecked(&fscache_n_attr_changed_calls);
68868
68869 if (fscache_object_is_active(object)) {
68870 fscache_set_op_state(op, "CallFS");
68871@@ -178,11 +178,11 @@ int __fscache_attr_changed(struct fscache_cookie *cookie)
68872
68873 ASSERTCMP(cookie->def->type, !=, FSCACHE_COOKIE_TYPE_INDEX);
68874
68875- fscache_stat(&fscache_n_attr_changed);
68876+ fscache_stat_unchecked(&fscache_n_attr_changed);
68877
68878 op = kzalloc(sizeof(*op), GFP_KERNEL);
68879 if (!op) {
68880- fscache_stat(&fscache_n_attr_changed_nomem);
68881+ fscache_stat_unchecked(&fscache_n_attr_changed_nomem);
68882 _leave(" = -ENOMEM");
68883 return -ENOMEM;
68884 }
68885@@ -202,7 +202,7 @@ int __fscache_attr_changed(struct fscache_cookie *cookie)
68886 if (fscache_submit_exclusive_op(object, op) < 0)
68887 goto nobufs;
68888 spin_unlock(&cookie->lock);
68889- fscache_stat(&fscache_n_attr_changed_ok);
68890+ fscache_stat_unchecked(&fscache_n_attr_changed_ok);
68891 fscache_put_operation(op);
68892 _leave(" = 0");
68893 return 0;
68894@@ -210,7 +210,7 @@ int __fscache_attr_changed(struct fscache_cookie *cookie)
68895 nobufs:
68896 spin_unlock(&cookie->lock);
68897 kfree(op);
68898- fscache_stat(&fscache_n_attr_changed_nobufs);
68899+ fscache_stat_unchecked(&fscache_n_attr_changed_nobufs);
68900 _leave(" = %d", -ENOBUFS);
68901 return -ENOBUFS;
68902 }
68903@@ -264,7 +264,7 @@ static struct fscache_retrieval *fscache_alloc_retrieval(
68904 /* allocate a retrieval operation and attempt to submit it */
68905 op = kzalloc(sizeof(*op), GFP_NOIO);
68906 if (!op) {
68907- fscache_stat(&fscache_n_retrievals_nomem);
68908+ fscache_stat_unchecked(&fscache_n_retrievals_nomem);
68909 return NULL;
68910 }
68911
68912@@ -294,13 +294,13 @@ static int fscache_wait_for_deferred_lookup(struct fscache_cookie *cookie)
68913 return 0;
68914 }
68915
68916- fscache_stat(&fscache_n_retrievals_wait);
68917+ fscache_stat_unchecked(&fscache_n_retrievals_wait);
68918
68919 jif = jiffies;
68920 if (wait_on_bit(&cookie->flags, FSCACHE_COOKIE_LOOKING_UP,
68921 fscache_wait_bit_interruptible,
68922 TASK_INTERRUPTIBLE) != 0) {
68923- fscache_stat(&fscache_n_retrievals_intr);
68924+ fscache_stat_unchecked(&fscache_n_retrievals_intr);
68925 _leave(" = -ERESTARTSYS");
68926 return -ERESTARTSYS;
68927 }
68928@@ -318,8 +318,8 @@ static int fscache_wait_for_deferred_lookup(struct fscache_cookie *cookie)
68929 */
68930 static int fscache_wait_for_retrieval_activation(struct fscache_object *object,
68931 struct fscache_retrieval *op,
68932- atomic_t *stat_op_waits,
68933- atomic_t *stat_object_dead)
68934+ atomic_unchecked_t *stat_op_waits,
68935+ atomic_unchecked_t *stat_object_dead)
68936 {
68937 int ret;
68938
68939@@ -327,7 +327,7 @@ static int fscache_wait_for_retrieval_activation(struct fscache_object *object,
68940 goto check_if_dead;
68941
68942 _debug(">>> WT");
68943- fscache_stat(stat_op_waits);
68944+ fscache_stat_unchecked(stat_op_waits);
68945 if (wait_on_bit(&op->op.flags, FSCACHE_OP_WAITING,
68946 fscache_wait_bit_interruptible,
68947 TASK_INTERRUPTIBLE) < 0) {
68948@@ -344,7 +344,7 @@ static int fscache_wait_for_retrieval_activation(struct fscache_object *object,
68949
68950 check_if_dead:
68951 if (unlikely(fscache_object_is_dead(object))) {
68952- fscache_stat(stat_object_dead);
68953+ fscache_stat_unchecked(stat_object_dead);
68954 return -ENOBUFS;
68955 }
68956 return 0;
68957@@ -371,7 +371,7 @@ int __fscache_read_or_alloc_page(struct fscache_cookie *cookie,
68958
68959 _enter("%p,%p,,,", cookie, page);
68960
68961- fscache_stat(&fscache_n_retrievals);
68962+ fscache_stat_unchecked(&fscache_n_retrievals);
68963
68964 if (hlist_empty(&cookie->backing_objects))
68965 goto nobufs;
68966@@ -405,7 +405,7 @@ int __fscache_read_or_alloc_page(struct fscache_cookie *cookie,
68967 goto nobufs_unlock;
68968 spin_unlock(&cookie->lock);
68969
68970- fscache_stat(&fscache_n_retrieval_ops);
68971+ fscache_stat_unchecked(&fscache_n_retrieval_ops);
68972
68973 /* pin the netfs read context in case we need to do the actual netfs
68974 * read because we've encountered a cache read failure */
68975@@ -435,15 +435,15 @@ int __fscache_read_or_alloc_page(struct fscache_cookie *cookie,
68976
68977 error:
68978 if (ret == -ENOMEM)
68979- fscache_stat(&fscache_n_retrievals_nomem);
68980+ fscache_stat_unchecked(&fscache_n_retrievals_nomem);
68981 else if (ret == -ERESTARTSYS)
68982- fscache_stat(&fscache_n_retrievals_intr);
68983+ fscache_stat_unchecked(&fscache_n_retrievals_intr);
68984 else if (ret == -ENODATA)
68985- fscache_stat(&fscache_n_retrievals_nodata);
68986+ fscache_stat_unchecked(&fscache_n_retrievals_nodata);
68987 else if (ret < 0)
68988- fscache_stat(&fscache_n_retrievals_nobufs);
68989+ fscache_stat_unchecked(&fscache_n_retrievals_nobufs);
68990 else
68991- fscache_stat(&fscache_n_retrievals_ok);
68992+ fscache_stat_unchecked(&fscache_n_retrievals_ok);
68993
68994 fscache_put_retrieval(op);
68995 _leave(" = %d", ret);
68996@@ -453,7 +453,7 @@ nobufs_unlock:
68997 spin_unlock(&cookie->lock);
68998 kfree(op);
68999 nobufs:
69000- fscache_stat(&fscache_n_retrievals_nobufs);
69001+ fscache_stat_unchecked(&fscache_n_retrievals_nobufs);
69002 _leave(" = -ENOBUFS");
69003 return -ENOBUFS;
69004 }
69005@@ -491,7 +491,7 @@ int __fscache_read_or_alloc_pages(struct fscache_cookie *cookie,
69006
69007 _enter("%p,,%d,,,", cookie, *nr_pages);
69008
69009- fscache_stat(&fscache_n_retrievals);
69010+ fscache_stat_unchecked(&fscache_n_retrievals);
69011
69012 if (hlist_empty(&cookie->backing_objects))
69013 goto nobufs;
69014@@ -522,7 +522,7 @@ int __fscache_read_or_alloc_pages(struct fscache_cookie *cookie,
69015 goto nobufs_unlock;
69016 spin_unlock(&cookie->lock);
69017
69018- fscache_stat(&fscache_n_retrieval_ops);
69019+ fscache_stat_unchecked(&fscache_n_retrieval_ops);
69020
69021 /* pin the netfs read context in case we need to do the actual netfs
69022 * read because we've encountered a cache read failure */
69023@@ -552,15 +552,15 @@ int __fscache_read_or_alloc_pages(struct fscache_cookie *cookie,
69024
69025 error:
69026 if (ret == -ENOMEM)
69027- fscache_stat(&fscache_n_retrievals_nomem);
69028+ fscache_stat_unchecked(&fscache_n_retrievals_nomem);
69029 else if (ret == -ERESTARTSYS)
69030- fscache_stat(&fscache_n_retrievals_intr);
69031+ fscache_stat_unchecked(&fscache_n_retrievals_intr);
69032 else if (ret == -ENODATA)
69033- fscache_stat(&fscache_n_retrievals_nodata);
69034+ fscache_stat_unchecked(&fscache_n_retrievals_nodata);
69035 else if (ret < 0)
69036- fscache_stat(&fscache_n_retrievals_nobufs);
69037+ fscache_stat_unchecked(&fscache_n_retrievals_nobufs);
69038 else
69039- fscache_stat(&fscache_n_retrievals_ok);
69040+ fscache_stat_unchecked(&fscache_n_retrievals_ok);
69041
69042 fscache_put_retrieval(op);
69043 _leave(" = %d", ret);
69044@@ -570,7 +570,7 @@ nobufs_unlock:
69045 spin_unlock(&cookie->lock);
69046 kfree(op);
69047 nobufs:
69048- fscache_stat(&fscache_n_retrievals_nobufs);
69049+ fscache_stat_unchecked(&fscache_n_retrievals_nobufs);
69050 _leave(" = -ENOBUFS");
69051 return -ENOBUFS;
69052 }
69053@@ -594,7 +594,7 @@ int __fscache_alloc_page(struct fscache_cookie *cookie,
69054
69055 _enter("%p,%p,,,", cookie, page);
69056
69057- fscache_stat(&fscache_n_allocs);
69058+ fscache_stat_unchecked(&fscache_n_allocs);
69059
69060 if (hlist_empty(&cookie->backing_objects))
69061 goto nobufs;
69062@@ -621,7 +621,7 @@ int __fscache_alloc_page(struct fscache_cookie *cookie,
69063 goto nobufs_unlock;
69064 spin_unlock(&cookie->lock);
69065
69066- fscache_stat(&fscache_n_alloc_ops);
69067+ fscache_stat_unchecked(&fscache_n_alloc_ops);
69068
69069 ret = fscache_wait_for_retrieval_activation(
69070 object, op,
69071@@ -637,11 +637,11 @@ int __fscache_alloc_page(struct fscache_cookie *cookie,
69072
69073 error:
69074 if (ret == -ERESTARTSYS)
69075- fscache_stat(&fscache_n_allocs_intr);
69076+ fscache_stat_unchecked(&fscache_n_allocs_intr);
69077 else if (ret < 0)
69078- fscache_stat(&fscache_n_allocs_nobufs);
69079+ fscache_stat_unchecked(&fscache_n_allocs_nobufs);
69080 else
69081- fscache_stat(&fscache_n_allocs_ok);
69082+ fscache_stat_unchecked(&fscache_n_allocs_ok);
69083
69084 fscache_put_retrieval(op);
69085 _leave(" = %d", ret);
69086@@ -651,7 +651,7 @@ nobufs_unlock:
69087 spin_unlock(&cookie->lock);
69088 kfree(op);
69089 nobufs:
69090- fscache_stat(&fscache_n_allocs_nobufs);
69091+ fscache_stat_unchecked(&fscache_n_allocs_nobufs);
69092 _leave(" = -ENOBUFS");
69093 return -ENOBUFS;
69094 }
69095@@ -694,7 +694,7 @@ static void fscache_write_op(struct fscache_operation *_op)
69096
69097 spin_lock(&cookie->stores_lock);
69098
69099- fscache_stat(&fscache_n_store_calls);
69100+ fscache_stat_unchecked(&fscache_n_store_calls);
69101
69102 /* find a page to store */
69103 page = NULL;
69104@@ -705,7 +705,7 @@ static void fscache_write_op(struct fscache_operation *_op)
69105 page = results[0];
69106 _debug("gang %d [%lx]", n, page->index);
69107 if (page->index > op->store_limit) {
69108- fscache_stat(&fscache_n_store_pages_over_limit);
69109+ fscache_stat_unchecked(&fscache_n_store_pages_over_limit);
69110 goto superseded;
69111 }
69112
69113@@ -721,7 +721,7 @@ static void fscache_write_op(struct fscache_operation *_op)
69114
69115 if (page) {
69116 fscache_set_op_state(&op->op, "Store");
69117- fscache_stat(&fscache_n_store_pages);
69118+ fscache_stat_unchecked(&fscache_n_store_pages);
69119 fscache_stat(&fscache_n_cop_write_page);
69120 ret = object->cache->ops->write_page(op, page);
69121 fscache_stat_d(&fscache_n_cop_write_page);
69122@@ -792,7 +792,7 @@ int __fscache_write_page(struct fscache_cookie *cookie,
69123 ASSERTCMP(cookie->def->type, !=, FSCACHE_COOKIE_TYPE_INDEX);
69124 ASSERT(PageFsCache(page));
69125
69126- fscache_stat(&fscache_n_stores);
69127+ fscache_stat_unchecked(&fscache_n_stores);
69128
69129 op = kzalloc(sizeof(*op), GFP_NOIO);
69130 if (!op)
69131@@ -844,7 +844,7 @@ int __fscache_write_page(struct fscache_cookie *cookie,
69132 spin_unlock(&cookie->stores_lock);
69133 spin_unlock(&object->lock);
69134
69135- op->op.debug_id = atomic_inc_return(&fscache_op_debug_id);
69136+ op->op.debug_id = atomic_inc_return_unchecked(&fscache_op_debug_id);
69137 op->store_limit = object->store_limit;
69138
69139 if (fscache_submit_op(object, &op->op) < 0)
69140@@ -852,8 +852,8 @@ int __fscache_write_page(struct fscache_cookie *cookie,
69141
69142 spin_unlock(&cookie->lock);
69143 radix_tree_preload_end();
69144- fscache_stat(&fscache_n_store_ops);
69145- fscache_stat(&fscache_n_stores_ok);
69146+ fscache_stat_unchecked(&fscache_n_store_ops);
69147+ fscache_stat_unchecked(&fscache_n_stores_ok);
69148
69149 /* the slow work queue now carries its own ref on the object */
69150 fscache_put_operation(&op->op);
69151@@ -861,14 +861,14 @@ int __fscache_write_page(struct fscache_cookie *cookie,
69152 return 0;
69153
69154 already_queued:
69155- fscache_stat(&fscache_n_stores_again);
69156+ fscache_stat_unchecked(&fscache_n_stores_again);
69157 already_pending:
69158 spin_unlock(&cookie->stores_lock);
69159 spin_unlock(&object->lock);
69160 spin_unlock(&cookie->lock);
69161 radix_tree_preload_end();
69162 kfree(op);
69163- fscache_stat(&fscache_n_stores_ok);
69164+ fscache_stat_unchecked(&fscache_n_stores_ok);
69165 _leave(" = 0");
69166 return 0;
69167
69168@@ -886,14 +886,14 @@ nobufs:
69169 spin_unlock(&cookie->lock);
69170 radix_tree_preload_end();
69171 kfree(op);
69172- fscache_stat(&fscache_n_stores_nobufs);
69173+ fscache_stat_unchecked(&fscache_n_stores_nobufs);
69174 _leave(" = -ENOBUFS");
69175 return -ENOBUFS;
69176
69177 nomem_free:
69178 kfree(op);
69179 nomem:
69180- fscache_stat(&fscache_n_stores_oom);
69181+ fscache_stat_unchecked(&fscache_n_stores_oom);
69182 _leave(" = -ENOMEM");
69183 return -ENOMEM;
69184 }
69185@@ -911,7 +911,7 @@ void __fscache_uncache_page(struct fscache_cookie *cookie, struct page *page)
69186 ASSERTCMP(cookie->def->type, !=, FSCACHE_COOKIE_TYPE_INDEX);
69187 ASSERTCMP(page, !=, NULL);
69188
69189- fscache_stat(&fscache_n_uncaches);
69190+ fscache_stat_unchecked(&fscache_n_uncaches);
69191
69192 /* cache withdrawal may beat us to it */
69193 if (!PageFsCache(page))
69194@@ -964,7 +964,7 @@ void fscache_mark_pages_cached(struct fscache_retrieval *op,
69195 unsigned long loop;
69196
69197 #ifdef CONFIG_FSCACHE_STATS
69198- atomic_add(pagevec->nr, &fscache_n_marks);
69199+ atomic_add_unchecked(pagevec->nr, &fscache_n_marks);
69200 #endif
69201
69202 for (loop = 0; loop < pagevec->nr; loop++) {
69203diff --git a/fs/fscache/stats.c b/fs/fscache/stats.c
69204index 46435f3..8cddf18 100644
69205--- a/fs/fscache/stats.c
69206+++ b/fs/fscache/stats.c
69207@@ -18,95 +18,95 @@
69208 /*
69209 * operation counters
69210 */
69211-atomic_t fscache_n_op_pend;
69212-atomic_t fscache_n_op_run;
69213-atomic_t fscache_n_op_enqueue;
69214-atomic_t fscache_n_op_requeue;
69215-atomic_t fscache_n_op_deferred_release;
69216-atomic_t fscache_n_op_release;
69217-atomic_t fscache_n_op_gc;
69218-atomic_t fscache_n_op_cancelled;
69219-atomic_t fscache_n_op_rejected;
69220+atomic_unchecked_t fscache_n_op_pend;
69221+atomic_unchecked_t fscache_n_op_run;
69222+atomic_unchecked_t fscache_n_op_enqueue;
69223+atomic_unchecked_t fscache_n_op_requeue;
69224+atomic_unchecked_t fscache_n_op_deferred_release;
69225+atomic_unchecked_t fscache_n_op_release;
69226+atomic_unchecked_t fscache_n_op_gc;
69227+atomic_unchecked_t fscache_n_op_cancelled;
69228+atomic_unchecked_t fscache_n_op_rejected;
69229
69230-atomic_t fscache_n_attr_changed;
69231-atomic_t fscache_n_attr_changed_ok;
69232-atomic_t fscache_n_attr_changed_nobufs;
69233-atomic_t fscache_n_attr_changed_nomem;
69234-atomic_t fscache_n_attr_changed_calls;
69235+atomic_unchecked_t fscache_n_attr_changed;
69236+atomic_unchecked_t fscache_n_attr_changed_ok;
69237+atomic_unchecked_t fscache_n_attr_changed_nobufs;
69238+atomic_unchecked_t fscache_n_attr_changed_nomem;
69239+atomic_unchecked_t fscache_n_attr_changed_calls;
69240
69241-atomic_t fscache_n_allocs;
69242-atomic_t fscache_n_allocs_ok;
69243-atomic_t fscache_n_allocs_wait;
69244-atomic_t fscache_n_allocs_nobufs;
69245-atomic_t fscache_n_allocs_intr;
69246-atomic_t fscache_n_allocs_object_dead;
69247-atomic_t fscache_n_alloc_ops;
69248-atomic_t fscache_n_alloc_op_waits;
69249+atomic_unchecked_t fscache_n_allocs;
69250+atomic_unchecked_t fscache_n_allocs_ok;
69251+atomic_unchecked_t fscache_n_allocs_wait;
69252+atomic_unchecked_t fscache_n_allocs_nobufs;
69253+atomic_unchecked_t fscache_n_allocs_intr;
69254+atomic_unchecked_t fscache_n_allocs_object_dead;
69255+atomic_unchecked_t fscache_n_alloc_ops;
69256+atomic_unchecked_t fscache_n_alloc_op_waits;
69257
69258-atomic_t fscache_n_retrievals;
69259-atomic_t fscache_n_retrievals_ok;
69260-atomic_t fscache_n_retrievals_wait;
69261-atomic_t fscache_n_retrievals_nodata;
69262-atomic_t fscache_n_retrievals_nobufs;
69263-atomic_t fscache_n_retrievals_intr;
69264-atomic_t fscache_n_retrievals_nomem;
69265-atomic_t fscache_n_retrievals_object_dead;
69266-atomic_t fscache_n_retrieval_ops;
69267-atomic_t fscache_n_retrieval_op_waits;
69268+atomic_unchecked_t fscache_n_retrievals;
69269+atomic_unchecked_t fscache_n_retrievals_ok;
69270+atomic_unchecked_t fscache_n_retrievals_wait;
69271+atomic_unchecked_t fscache_n_retrievals_nodata;
69272+atomic_unchecked_t fscache_n_retrievals_nobufs;
69273+atomic_unchecked_t fscache_n_retrievals_intr;
69274+atomic_unchecked_t fscache_n_retrievals_nomem;
69275+atomic_unchecked_t fscache_n_retrievals_object_dead;
69276+atomic_unchecked_t fscache_n_retrieval_ops;
69277+atomic_unchecked_t fscache_n_retrieval_op_waits;
69278
69279-atomic_t fscache_n_stores;
69280-atomic_t fscache_n_stores_ok;
69281-atomic_t fscache_n_stores_again;
69282-atomic_t fscache_n_stores_nobufs;
69283-atomic_t fscache_n_stores_oom;
69284-atomic_t fscache_n_store_ops;
69285-atomic_t fscache_n_store_calls;
69286-atomic_t fscache_n_store_pages;
69287-atomic_t fscache_n_store_radix_deletes;
69288-atomic_t fscache_n_store_pages_over_limit;
69289+atomic_unchecked_t fscache_n_stores;
69290+atomic_unchecked_t fscache_n_stores_ok;
69291+atomic_unchecked_t fscache_n_stores_again;
69292+atomic_unchecked_t fscache_n_stores_nobufs;
69293+atomic_unchecked_t fscache_n_stores_oom;
69294+atomic_unchecked_t fscache_n_store_ops;
69295+atomic_unchecked_t fscache_n_store_calls;
69296+atomic_unchecked_t fscache_n_store_pages;
69297+atomic_unchecked_t fscache_n_store_radix_deletes;
69298+atomic_unchecked_t fscache_n_store_pages_over_limit;
69299
69300-atomic_t fscache_n_store_vmscan_not_storing;
69301-atomic_t fscache_n_store_vmscan_gone;
69302-atomic_t fscache_n_store_vmscan_busy;
69303-atomic_t fscache_n_store_vmscan_cancelled;
69304+atomic_unchecked_t fscache_n_store_vmscan_not_storing;
69305+atomic_unchecked_t fscache_n_store_vmscan_gone;
69306+atomic_unchecked_t fscache_n_store_vmscan_busy;
69307+atomic_unchecked_t fscache_n_store_vmscan_cancelled;
69308
69309-atomic_t fscache_n_marks;
69310-atomic_t fscache_n_uncaches;
69311+atomic_unchecked_t fscache_n_marks;
69312+atomic_unchecked_t fscache_n_uncaches;
69313
69314-atomic_t fscache_n_acquires;
69315-atomic_t fscache_n_acquires_null;
69316-atomic_t fscache_n_acquires_no_cache;
69317-atomic_t fscache_n_acquires_ok;
69318-atomic_t fscache_n_acquires_nobufs;
69319-atomic_t fscache_n_acquires_oom;
69320+atomic_unchecked_t fscache_n_acquires;
69321+atomic_unchecked_t fscache_n_acquires_null;
69322+atomic_unchecked_t fscache_n_acquires_no_cache;
69323+atomic_unchecked_t fscache_n_acquires_ok;
69324+atomic_unchecked_t fscache_n_acquires_nobufs;
69325+atomic_unchecked_t fscache_n_acquires_oom;
69326
69327-atomic_t fscache_n_updates;
69328-atomic_t fscache_n_updates_null;
69329-atomic_t fscache_n_updates_run;
69330+atomic_unchecked_t fscache_n_updates;
69331+atomic_unchecked_t fscache_n_updates_null;
69332+atomic_unchecked_t fscache_n_updates_run;
69333
69334-atomic_t fscache_n_relinquishes;
69335-atomic_t fscache_n_relinquishes_null;
69336-atomic_t fscache_n_relinquishes_waitcrt;
69337-atomic_t fscache_n_relinquishes_retire;
69338+atomic_unchecked_t fscache_n_relinquishes;
69339+atomic_unchecked_t fscache_n_relinquishes_null;
69340+atomic_unchecked_t fscache_n_relinquishes_waitcrt;
69341+atomic_unchecked_t fscache_n_relinquishes_retire;
69342
69343-atomic_t fscache_n_cookie_index;
69344-atomic_t fscache_n_cookie_data;
69345-atomic_t fscache_n_cookie_special;
69346+atomic_unchecked_t fscache_n_cookie_index;
69347+atomic_unchecked_t fscache_n_cookie_data;
69348+atomic_unchecked_t fscache_n_cookie_special;
69349
69350-atomic_t fscache_n_object_alloc;
69351-atomic_t fscache_n_object_no_alloc;
69352-atomic_t fscache_n_object_lookups;
69353-atomic_t fscache_n_object_lookups_negative;
69354-atomic_t fscache_n_object_lookups_positive;
69355-atomic_t fscache_n_object_lookups_timed_out;
69356-atomic_t fscache_n_object_created;
69357-atomic_t fscache_n_object_avail;
69358-atomic_t fscache_n_object_dead;
69359+atomic_unchecked_t fscache_n_object_alloc;
69360+atomic_unchecked_t fscache_n_object_no_alloc;
69361+atomic_unchecked_t fscache_n_object_lookups;
69362+atomic_unchecked_t fscache_n_object_lookups_negative;
69363+atomic_unchecked_t fscache_n_object_lookups_positive;
69364+atomic_unchecked_t fscache_n_object_lookups_timed_out;
69365+atomic_unchecked_t fscache_n_object_created;
69366+atomic_unchecked_t fscache_n_object_avail;
69367+atomic_unchecked_t fscache_n_object_dead;
69368
69369-atomic_t fscache_n_checkaux_none;
69370-atomic_t fscache_n_checkaux_okay;
69371-atomic_t fscache_n_checkaux_update;
69372-atomic_t fscache_n_checkaux_obsolete;
69373+atomic_unchecked_t fscache_n_checkaux_none;
69374+atomic_unchecked_t fscache_n_checkaux_okay;
69375+atomic_unchecked_t fscache_n_checkaux_update;
69376+atomic_unchecked_t fscache_n_checkaux_obsolete;
69377
69378 atomic_t fscache_n_cop_alloc_object;
69379 atomic_t fscache_n_cop_lookup_object;
69380@@ -133,113 +133,113 @@ static int fscache_stats_show(struct seq_file *m, void *v)
69381 seq_puts(m, "FS-Cache statistics\n");
69382
69383 seq_printf(m, "Cookies: idx=%u dat=%u spc=%u\n",
69384- atomic_read(&fscache_n_cookie_index),
69385- atomic_read(&fscache_n_cookie_data),
69386- atomic_read(&fscache_n_cookie_special));
69387+ atomic_read_unchecked(&fscache_n_cookie_index),
69388+ atomic_read_unchecked(&fscache_n_cookie_data),
69389+ atomic_read_unchecked(&fscache_n_cookie_special));
69390
69391 seq_printf(m, "Objects: alc=%u nal=%u avl=%u ded=%u\n",
69392- atomic_read(&fscache_n_object_alloc),
69393- atomic_read(&fscache_n_object_no_alloc),
69394- atomic_read(&fscache_n_object_avail),
69395- atomic_read(&fscache_n_object_dead));
69396+ atomic_read_unchecked(&fscache_n_object_alloc),
69397+ atomic_read_unchecked(&fscache_n_object_no_alloc),
69398+ atomic_read_unchecked(&fscache_n_object_avail),
69399+ atomic_read_unchecked(&fscache_n_object_dead));
69400 seq_printf(m, "ChkAux : non=%u ok=%u upd=%u obs=%u\n",
69401- atomic_read(&fscache_n_checkaux_none),
69402- atomic_read(&fscache_n_checkaux_okay),
69403- atomic_read(&fscache_n_checkaux_update),
69404- atomic_read(&fscache_n_checkaux_obsolete));
69405+ atomic_read_unchecked(&fscache_n_checkaux_none),
69406+ atomic_read_unchecked(&fscache_n_checkaux_okay),
69407+ atomic_read_unchecked(&fscache_n_checkaux_update),
69408+ atomic_read_unchecked(&fscache_n_checkaux_obsolete));
69409
69410 seq_printf(m, "Pages : mrk=%u unc=%u\n",
69411- atomic_read(&fscache_n_marks),
69412- atomic_read(&fscache_n_uncaches));
69413+ atomic_read_unchecked(&fscache_n_marks),
69414+ atomic_read_unchecked(&fscache_n_uncaches));
69415
69416 seq_printf(m, "Acquire: n=%u nul=%u noc=%u ok=%u nbf=%u"
69417 " oom=%u\n",
69418- atomic_read(&fscache_n_acquires),
69419- atomic_read(&fscache_n_acquires_null),
69420- atomic_read(&fscache_n_acquires_no_cache),
69421- atomic_read(&fscache_n_acquires_ok),
69422- atomic_read(&fscache_n_acquires_nobufs),
69423- atomic_read(&fscache_n_acquires_oom));
69424+ atomic_read_unchecked(&fscache_n_acquires),
69425+ atomic_read_unchecked(&fscache_n_acquires_null),
69426+ atomic_read_unchecked(&fscache_n_acquires_no_cache),
69427+ atomic_read_unchecked(&fscache_n_acquires_ok),
69428+ atomic_read_unchecked(&fscache_n_acquires_nobufs),
69429+ atomic_read_unchecked(&fscache_n_acquires_oom));
69430
69431 seq_printf(m, "Lookups: n=%u neg=%u pos=%u crt=%u tmo=%u\n",
69432- atomic_read(&fscache_n_object_lookups),
69433- atomic_read(&fscache_n_object_lookups_negative),
69434- atomic_read(&fscache_n_object_lookups_positive),
69435- atomic_read(&fscache_n_object_lookups_timed_out),
69436- atomic_read(&fscache_n_object_created));
69437+ atomic_read_unchecked(&fscache_n_object_lookups),
69438+ atomic_read_unchecked(&fscache_n_object_lookups_negative),
69439+ atomic_read_unchecked(&fscache_n_object_lookups_positive),
69440+ atomic_read_unchecked(&fscache_n_object_lookups_timed_out),
69441+ atomic_read_unchecked(&fscache_n_object_created));
69442
69443 seq_printf(m, "Updates: n=%u nul=%u run=%u\n",
69444- atomic_read(&fscache_n_updates),
69445- atomic_read(&fscache_n_updates_null),
69446- atomic_read(&fscache_n_updates_run));
69447+ atomic_read_unchecked(&fscache_n_updates),
69448+ atomic_read_unchecked(&fscache_n_updates_null),
69449+ atomic_read_unchecked(&fscache_n_updates_run));
69450
69451 seq_printf(m, "Relinqs: n=%u nul=%u wcr=%u rtr=%u\n",
69452- atomic_read(&fscache_n_relinquishes),
69453- atomic_read(&fscache_n_relinquishes_null),
69454- atomic_read(&fscache_n_relinquishes_waitcrt),
69455- atomic_read(&fscache_n_relinquishes_retire));
69456+ atomic_read_unchecked(&fscache_n_relinquishes),
69457+ atomic_read_unchecked(&fscache_n_relinquishes_null),
69458+ atomic_read_unchecked(&fscache_n_relinquishes_waitcrt),
69459+ atomic_read_unchecked(&fscache_n_relinquishes_retire));
69460
69461 seq_printf(m, "AttrChg: n=%u ok=%u nbf=%u oom=%u run=%u\n",
69462- atomic_read(&fscache_n_attr_changed),
69463- atomic_read(&fscache_n_attr_changed_ok),
69464- atomic_read(&fscache_n_attr_changed_nobufs),
69465- atomic_read(&fscache_n_attr_changed_nomem),
69466- atomic_read(&fscache_n_attr_changed_calls));
69467+ atomic_read_unchecked(&fscache_n_attr_changed),
69468+ atomic_read_unchecked(&fscache_n_attr_changed_ok),
69469+ atomic_read_unchecked(&fscache_n_attr_changed_nobufs),
69470+ atomic_read_unchecked(&fscache_n_attr_changed_nomem),
69471+ atomic_read_unchecked(&fscache_n_attr_changed_calls));
69472
69473 seq_printf(m, "Allocs : n=%u ok=%u wt=%u nbf=%u int=%u\n",
69474- atomic_read(&fscache_n_allocs),
69475- atomic_read(&fscache_n_allocs_ok),
69476- atomic_read(&fscache_n_allocs_wait),
69477- atomic_read(&fscache_n_allocs_nobufs),
69478- atomic_read(&fscache_n_allocs_intr));
69479+ atomic_read_unchecked(&fscache_n_allocs),
69480+ atomic_read_unchecked(&fscache_n_allocs_ok),
69481+ atomic_read_unchecked(&fscache_n_allocs_wait),
69482+ atomic_read_unchecked(&fscache_n_allocs_nobufs),
69483+ atomic_read_unchecked(&fscache_n_allocs_intr));
69484 seq_printf(m, "Allocs : ops=%u owt=%u abt=%u\n",
69485- atomic_read(&fscache_n_alloc_ops),
69486- atomic_read(&fscache_n_alloc_op_waits),
69487- atomic_read(&fscache_n_allocs_object_dead));
69488+ atomic_read_unchecked(&fscache_n_alloc_ops),
69489+ atomic_read_unchecked(&fscache_n_alloc_op_waits),
69490+ atomic_read_unchecked(&fscache_n_allocs_object_dead));
69491
69492 seq_printf(m, "Retrvls: n=%u ok=%u wt=%u nod=%u nbf=%u"
69493 " int=%u oom=%u\n",
69494- atomic_read(&fscache_n_retrievals),
69495- atomic_read(&fscache_n_retrievals_ok),
69496- atomic_read(&fscache_n_retrievals_wait),
69497- atomic_read(&fscache_n_retrievals_nodata),
69498- atomic_read(&fscache_n_retrievals_nobufs),
69499- atomic_read(&fscache_n_retrievals_intr),
69500- atomic_read(&fscache_n_retrievals_nomem));
69501+ atomic_read_unchecked(&fscache_n_retrievals),
69502+ atomic_read_unchecked(&fscache_n_retrievals_ok),
69503+ atomic_read_unchecked(&fscache_n_retrievals_wait),
69504+ atomic_read_unchecked(&fscache_n_retrievals_nodata),
69505+ atomic_read_unchecked(&fscache_n_retrievals_nobufs),
69506+ atomic_read_unchecked(&fscache_n_retrievals_intr),
69507+ atomic_read_unchecked(&fscache_n_retrievals_nomem));
69508 seq_printf(m, "Retrvls: ops=%u owt=%u abt=%u\n",
69509- atomic_read(&fscache_n_retrieval_ops),
69510- atomic_read(&fscache_n_retrieval_op_waits),
69511- atomic_read(&fscache_n_retrievals_object_dead));
69512+ atomic_read_unchecked(&fscache_n_retrieval_ops),
69513+ atomic_read_unchecked(&fscache_n_retrieval_op_waits),
69514+ atomic_read_unchecked(&fscache_n_retrievals_object_dead));
69515
69516 seq_printf(m, "Stores : n=%u ok=%u agn=%u nbf=%u oom=%u\n",
69517- atomic_read(&fscache_n_stores),
69518- atomic_read(&fscache_n_stores_ok),
69519- atomic_read(&fscache_n_stores_again),
69520- atomic_read(&fscache_n_stores_nobufs),
69521- atomic_read(&fscache_n_stores_oom));
69522+ atomic_read_unchecked(&fscache_n_stores),
69523+ atomic_read_unchecked(&fscache_n_stores_ok),
69524+ atomic_read_unchecked(&fscache_n_stores_again),
69525+ atomic_read_unchecked(&fscache_n_stores_nobufs),
69526+ atomic_read_unchecked(&fscache_n_stores_oom));
69527 seq_printf(m, "Stores : ops=%u run=%u pgs=%u rxd=%u olm=%u\n",
69528- atomic_read(&fscache_n_store_ops),
69529- atomic_read(&fscache_n_store_calls),
69530- atomic_read(&fscache_n_store_pages),
69531- atomic_read(&fscache_n_store_radix_deletes),
69532- atomic_read(&fscache_n_store_pages_over_limit));
69533+ atomic_read_unchecked(&fscache_n_store_ops),
69534+ atomic_read_unchecked(&fscache_n_store_calls),
69535+ atomic_read_unchecked(&fscache_n_store_pages),
69536+ atomic_read_unchecked(&fscache_n_store_radix_deletes),
69537+ atomic_read_unchecked(&fscache_n_store_pages_over_limit));
69538
69539 seq_printf(m, "VmScan : nos=%u gon=%u bsy=%u can=%u\n",
69540- atomic_read(&fscache_n_store_vmscan_not_storing),
69541- atomic_read(&fscache_n_store_vmscan_gone),
69542- atomic_read(&fscache_n_store_vmscan_busy),
69543- atomic_read(&fscache_n_store_vmscan_cancelled));
69544+ atomic_read_unchecked(&fscache_n_store_vmscan_not_storing),
69545+ atomic_read_unchecked(&fscache_n_store_vmscan_gone),
69546+ atomic_read_unchecked(&fscache_n_store_vmscan_busy),
69547+ atomic_read_unchecked(&fscache_n_store_vmscan_cancelled));
69548
69549 seq_printf(m, "Ops : pend=%u run=%u enq=%u can=%u rej=%u\n",
69550- atomic_read(&fscache_n_op_pend),
69551- atomic_read(&fscache_n_op_run),
69552- atomic_read(&fscache_n_op_enqueue),
69553- atomic_read(&fscache_n_op_cancelled),
69554- atomic_read(&fscache_n_op_rejected));
69555+ atomic_read_unchecked(&fscache_n_op_pend),
69556+ atomic_read_unchecked(&fscache_n_op_run),
69557+ atomic_read_unchecked(&fscache_n_op_enqueue),
69558+ atomic_read_unchecked(&fscache_n_op_cancelled),
69559+ atomic_read_unchecked(&fscache_n_op_rejected));
69560 seq_printf(m, "Ops : dfr=%u rel=%u gc=%u\n",
69561- atomic_read(&fscache_n_op_deferred_release),
69562- atomic_read(&fscache_n_op_release),
69563- atomic_read(&fscache_n_op_gc));
69564+ atomic_read_unchecked(&fscache_n_op_deferred_release),
69565+ atomic_read_unchecked(&fscache_n_op_release),
69566+ atomic_read_unchecked(&fscache_n_op_gc));
69567
69568 seq_printf(m, "CacheOp: alo=%d luo=%d luc=%d gro=%d\n",
69569 atomic_read(&fscache_n_cop_alloc_object),
69570diff --git a/fs/fuse/cuse.c b/fs/fuse/cuse.c
69571index de792dc..448b532 100644
69572--- a/fs/fuse/cuse.c
69573+++ b/fs/fuse/cuse.c
69574@@ -576,10 +576,12 @@ static int __init cuse_init(void)
69575 INIT_LIST_HEAD(&cuse_conntbl[i]);
69576
69577 /* inherit and extend fuse_dev_operations */
69578- cuse_channel_fops = fuse_dev_operations;
69579- cuse_channel_fops.owner = THIS_MODULE;
69580- cuse_channel_fops.open = cuse_channel_open;
69581- cuse_channel_fops.release = cuse_channel_release;
69582+ pax_open_kernel();
69583+ memcpy((void *)&cuse_channel_fops, &fuse_dev_operations, sizeof(fuse_dev_operations));
69584+ *(void **)&cuse_channel_fops.owner = THIS_MODULE;
69585+ *(void **)&cuse_channel_fops.open = cuse_channel_open;
69586+ *(void **)&cuse_channel_fops.release = cuse_channel_release;
69587+ pax_close_kernel();
69588
69589 cuse_class = class_create(THIS_MODULE, "cuse");
69590 if (IS_ERR(cuse_class))
69591diff --git a/fs/fuse/dev.c b/fs/fuse/dev.c
69592index 1facb39..7f48557 100644
69593--- a/fs/fuse/dev.c
69594+++ b/fs/fuse/dev.c
69595@@ -885,7 +885,7 @@ static int fuse_notify_inval_entry(struct fuse_conn *fc, unsigned int size,
69596 {
69597 struct fuse_notify_inval_entry_out outarg;
69598 int err = -EINVAL;
69599- char buf[FUSE_NAME_MAX+1];
69600+ char *buf = NULL;
69601 struct qstr name;
69602
69603 if (size < sizeof(outarg))
69604@@ -899,6 +899,11 @@ static int fuse_notify_inval_entry(struct fuse_conn *fc, unsigned int size,
69605 if (outarg.namelen > FUSE_NAME_MAX)
69606 goto err;
69607
69608+ err = -ENOMEM;
69609+ buf = kmalloc(FUSE_NAME_MAX+1, GFP_KERNEL);
69610+ if (!buf)
69611+ goto err;
69612+
69613 err = -EINVAL;
69614 if (size != sizeof(outarg) + outarg.namelen + 1)
69615 goto err;
69616@@ -914,17 +919,15 @@ static int fuse_notify_inval_entry(struct fuse_conn *fc, unsigned int size,
69617
69618 down_read(&fc->killsb);
69619 err = -ENOENT;
69620- if (!fc->sb)
69621- goto err_unlock;
69622-
69623- err = fuse_reverse_inval_entry(fc->sb, outarg.parent, &name);
69624-
69625-err_unlock:
69626+ if (fc->sb)
69627+ err = fuse_reverse_inval_entry(fc->sb, outarg.parent, &name);
69628 up_read(&fc->killsb);
69629+ kfree(buf);
69630 return err;
69631
69632 err:
69633 fuse_copy_finish(cs);
69634+ kfree(buf);
69635 return err;
69636 }
69637
69638diff --git a/fs/fuse/dir.c b/fs/fuse/dir.c
69639index 4787ae6..73efff7 100644
69640--- a/fs/fuse/dir.c
69641+++ b/fs/fuse/dir.c
69642@@ -1127,7 +1127,7 @@ static char *read_link(struct dentry *dentry)
69643 return link;
69644 }
69645
69646-static void free_link(char *link)
69647+static void free_link(const char *link)
69648 {
69649 if (!IS_ERR(link))
69650 free_page((unsigned long) link);
69651diff --git a/fs/gfs2/ops_inode.c b/fs/gfs2/ops_inode.c
69652index 247436c..e650ccb 100644
69653--- a/fs/gfs2/ops_inode.c
69654+++ b/fs/gfs2/ops_inode.c
69655@@ -752,6 +752,8 @@ static int gfs2_rename(struct inode *odir, struct dentry *odentry,
69656 unsigned int x;
69657 int error;
69658
69659+ pax_track_stack();
69660+
69661 if (ndentry->d_inode) {
69662 nip = GFS2_I(ndentry->d_inode);
69663 if (ip == nip)
69664diff --git a/fs/gfs2/sys.c b/fs/gfs2/sys.c
69665index 4463297..4fed53b 100644
69666--- a/fs/gfs2/sys.c
69667+++ b/fs/gfs2/sys.c
69668@@ -49,7 +49,7 @@ static ssize_t gfs2_attr_store(struct kobject *kobj, struct attribute *attr,
69669 return a->store ? a->store(sdp, buf, len) : len;
69670 }
69671
69672-static struct sysfs_ops gfs2_attr_ops = {
69673+static const struct sysfs_ops gfs2_attr_ops = {
69674 .show = gfs2_attr_show,
69675 .store = gfs2_attr_store,
69676 };
69677@@ -584,7 +584,7 @@ static int gfs2_uevent(struct kset *kset, struct kobject *kobj,
69678 return 0;
69679 }
69680
69681-static struct kset_uevent_ops gfs2_uevent_ops = {
69682+static const struct kset_uevent_ops gfs2_uevent_ops = {
69683 .uevent = gfs2_uevent,
69684 };
69685
69686diff --git a/fs/hfsplus/catalog.c b/fs/hfsplus/catalog.c
69687index f6874ac..7cd98a8 100644
69688--- a/fs/hfsplus/catalog.c
69689+++ b/fs/hfsplus/catalog.c
69690@@ -157,6 +157,8 @@ int hfsplus_find_cat(struct super_block *sb, u32 cnid,
69691 int err;
69692 u16 type;
69693
69694+ pax_track_stack();
69695+
69696 hfsplus_cat_build_key(sb, fd->search_key, cnid, NULL);
69697 err = hfs_brec_read(fd, &tmp, sizeof(hfsplus_cat_entry));
69698 if (err)
69699@@ -186,6 +188,8 @@ int hfsplus_create_cat(u32 cnid, struct inode *dir, struct qstr *str, struct ino
69700 int entry_size;
69701 int err;
69702
69703+ pax_track_stack();
69704+
69705 dprint(DBG_CAT_MOD, "create_cat: %s,%u(%d)\n", str->name, cnid, inode->i_nlink);
69706 sb = dir->i_sb;
69707 hfs_find_init(HFSPLUS_SB(sb).cat_tree, &fd);
69708@@ -318,6 +322,8 @@ int hfsplus_rename_cat(u32 cnid,
69709 int entry_size, type;
69710 int err = 0;
69711
69712+ pax_track_stack();
69713+
69714 dprint(DBG_CAT_MOD, "rename_cat: %u - %lu,%s - %lu,%s\n", cnid, src_dir->i_ino, src_name->name,
69715 dst_dir->i_ino, dst_name->name);
69716 sb = src_dir->i_sb;
69717diff --git a/fs/hfsplus/dir.c b/fs/hfsplus/dir.c
69718index 5f40236..dac3421 100644
69719--- a/fs/hfsplus/dir.c
69720+++ b/fs/hfsplus/dir.c
69721@@ -121,6 +121,8 @@ static int hfsplus_readdir(struct file *filp, void *dirent, filldir_t filldir)
69722 struct hfsplus_readdir_data *rd;
69723 u16 type;
69724
69725+ pax_track_stack();
69726+
69727 if (filp->f_pos >= inode->i_size)
69728 return 0;
69729
69730diff --git a/fs/hfsplus/inode.c b/fs/hfsplus/inode.c
69731index 1bcf597..905a251 100644
69732--- a/fs/hfsplus/inode.c
69733+++ b/fs/hfsplus/inode.c
69734@@ -399,6 +399,8 @@ int hfsplus_cat_read_inode(struct inode *inode, struct hfs_find_data *fd)
69735 int res = 0;
69736 u16 type;
69737
69738+ pax_track_stack();
69739+
69740 type = hfs_bnode_read_u16(fd->bnode, fd->entryoffset);
69741
69742 HFSPLUS_I(inode).dev = 0;
69743@@ -461,6 +463,8 @@ int hfsplus_cat_write_inode(struct inode *inode)
69744 struct hfs_find_data fd;
69745 hfsplus_cat_entry entry;
69746
69747+ pax_track_stack();
69748+
69749 if (HFSPLUS_IS_RSRC(inode))
69750 main_inode = HFSPLUS_I(inode).rsrc_inode;
69751
69752diff --git a/fs/hfsplus/ioctl.c b/fs/hfsplus/ioctl.c
69753index f457d2c..7ef4ad5 100644
69754--- a/fs/hfsplus/ioctl.c
69755+++ b/fs/hfsplus/ioctl.c
69756@@ -101,6 +101,8 @@ int hfsplus_setxattr(struct dentry *dentry, const char *name,
69757 struct hfsplus_cat_file *file;
69758 int res;
69759
69760+ pax_track_stack();
69761+
69762 if (!S_ISREG(inode->i_mode) || HFSPLUS_IS_RSRC(inode))
69763 return -EOPNOTSUPP;
69764
69765@@ -143,6 +145,8 @@ ssize_t hfsplus_getxattr(struct dentry *dentry, const char *name,
69766 struct hfsplus_cat_file *file;
69767 ssize_t res = 0;
69768
69769+ pax_track_stack();
69770+
69771 if (!S_ISREG(inode->i_mode) || HFSPLUS_IS_RSRC(inode))
69772 return -EOPNOTSUPP;
69773
69774diff --git a/fs/hfsplus/super.c b/fs/hfsplus/super.c
69775index 43022f3..7298079 100644
69776--- a/fs/hfsplus/super.c
69777+++ b/fs/hfsplus/super.c
69778@@ -312,6 +312,8 @@ static int hfsplus_fill_super(struct super_block *sb, void *data, int silent)
69779 struct nls_table *nls = NULL;
69780 int err = -EINVAL;
69781
69782+ pax_track_stack();
69783+
69784 sbi = kzalloc(sizeof(*sbi), GFP_KERNEL);
69785 if (!sbi)
69786 return -ENOMEM;
69787diff --git a/fs/hugetlbfs/inode.c b/fs/hugetlbfs/inode.c
69788index 87a1258..5694d91 100644
69789--- a/fs/hugetlbfs/inode.c
69790+++ b/fs/hugetlbfs/inode.c
69791@@ -909,7 +909,7 @@ static struct file_system_type hugetlbfs_fs_type = {
69792 .kill_sb = kill_litter_super,
69793 };
69794
69795-static struct vfsmount *hugetlbfs_vfsmount;
69796+struct vfsmount *hugetlbfs_vfsmount;
69797
69798 static int can_do_hugetlb_shm(void)
69799 {
69800diff --git a/fs/ioctl.c b/fs/ioctl.c
69801index 6c75110..19d2c3c 100644
69802--- a/fs/ioctl.c
69803+++ b/fs/ioctl.c
69804@@ -97,7 +97,7 @@ int fiemap_fill_next_extent(struct fiemap_extent_info *fieinfo, u64 logical,
69805 u64 phys, u64 len, u32 flags)
69806 {
69807 struct fiemap_extent extent;
69808- struct fiemap_extent *dest = fieinfo->fi_extents_start;
69809+ struct fiemap_extent __user *dest = fieinfo->fi_extents_start;
69810
69811 /* only count the extents */
69812 if (fieinfo->fi_extents_max == 0) {
69813@@ -207,7 +207,7 @@ static int ioctl_fiemap(struct file *filp, unsigned long arg)
69814
69815 fieinfo.fi_flags = fiemap.fm_flags;
69816 fieinfo.fi_extents_max = fiemap.fm_extent_count;
69817- fieinfo.fi_extents_start = (struct fiemap_extent *)(arg + sizeof(fiemap));
69818+ fieinfo.fi_extents_start = (struct fiemap_extent __user *)(arg + sizeof(fiemap));
69819
69820 if (fiemap.fm_extent_count != 0 &&
69821 !access_ok(VERIFY_WRITE, fieinfo.fi_extents_start,
69822@@ -220,7 +220,7 @@ static int ioctl_fiemap(struct file *filp, unsigned long arg)
69823 error = inode->i_op->fiemap(inode, &fieinfo, fiemap.fm_start, len);
69824 fiemap.fm_flags = fieinfo.fi_flags;
69825 fiemap.fm_mapped_extents = fieinfo.fi_extents_mapped;
69826- if (copy_to_user((char *)arg, &fiemap, sizeof(fiemap)))
69827+ if (copy_to_user((__force char __user *)arg, &fiemap, sizeof(fiemap)))
69828 error = -EFAULT;
69829
69830 return error;
69831diff --git a/fs/jbd/checkpoint.c b/fs/jbd/checkpoint.c
69832index b0435dd..81ee0be 100644
69833--- a/fs/jbd/checkpoint.c
69834+++ b/fs/jbd/checkpoint.c
69835@@ -348,6 +348,8 @@ int log_do_checkpoint(journal_t *journal)
69836 tid_t this_tid;
69837 int result;
69838
69839+ pax_track_stack();
69840+
69841 jbd_debug(1, "Start checkpoint\n");
69842
69843 /*
69844diff --git a/fs/jffs2/compr_rtime.c b/fs/jffs2/compr_rtime.c
69845index 546d153..736896c 100644
69846--- a/fs/jffs2/compr_rtime.c
69847+++ b/fs/jffs2/compr_rtime.c
69848@@ -37,6 +37,8 @@ static int jffs2_rtime_compress(unsigned char *data_in,
69849 int outpos = 0;
69850 int pos=0;
69851
69852+ pax_track_stack();
69853+
69854 memset(positions,0,sizeof(positions));
69855
69856 while (pos < (*sourcelen) && outpos <= (*dstlen)-2) {
69857@@ -79,6 +81,8 @@ static int jffs2_rtime_decompress(unsigned char *data_in,
69858 int outpos = 0;
69859 int pos=0;
69860
69861+ pax_track_stack();
69862+
69863 memset(positions,0,sizeof(positions));
69864
69865 while (outpos<destlen) {
69866diff --git a/fs/jffs2/compr_rubin.c b/fs/jffs2/compr_rubin.c
69867index 170d289..3254b98 100644
69868--- a/fs/jffs2/compr_rubin.c
69869+++ b/fs/jffs2/compr_rubin.c
69870@@ -314,6 +314,8 @@ static int jffs2_dynrubin_compress(unsigned char *data_in,
69871 int ret;
69872 uint32_t mysrclen, mydstlen;
69873
69874+ pax_track_stack();
69875+
69876 mysrclen = *sourcelen;
69877 mydstlen = *dstlen - 8;
69878
69879diff --git a/fs/jffs2/erase.c b/fs/jffs2/erase.c
69880index b47679b..00d65d3 100644
69881--- a/fs/jffs2/erase.c
69882+++ b/fs/jffs2/erase.c
69883@@ -434,7 +434,8 @@ static void jffs2_mark_erased_block(struct jffs2_sb_info *c, struct jffs2_eraseb
69884 struct jffs2_unknown_node marker = {
69885 .magic = cpu_to_je16(JFFS2_MAGIC_BITMASK),
69886 .nodetype = cpu_to_je16(JFFS2_NODETYPE_CLEANMARKER),
69887- .totlen = cpu_to_je32(c->cleanmarker_size)
69888+ .totlen = cpu_to_je32(c->cleanmarker_size),
69889+ .hdr_crc = cpu_to_je32(0)
69890 };
69891
69892 jffs2_prealloc_raw_node_refs(c, jeb, 1);
69893diff --git a/fs/jffs2/wbuf.c b/fs/jffs2/wbuf.c
69894index 5ef7bac..4fd1e3c 100644
69895--- a/fs/jffs2/wbuf.c
69896+++ b/fs/jffs2/wbuf.c
69897@@ -1012,7 +1012,8 @@ static const struct jffs2_unknown_node oob_cleanmarker =
69898 {
69899 .magic = constant_cpu_to_je16(JFFS2_MAGIC_BITMASK),
69900 .nodetype = constant_cpu_to_je16(JFFS2_NODETYPE_CLEANMARKER),
69901- .totlen = constant_cpu_to_je32(8)
69902+ .totlen = constant_cpu_to_je32(8),
69903+ .hdr_crc = constant_cpu_to_je32(0)
69904 };
69905
69906 /*
69907diff --git a/fs/jffs2/xattr.c b/fs/jffs2/xattr.c
69908index 082e844..52012a1 100644
69909--- a/fs/jffs2/xattr.c
69910+++ b/fs/jffs2/xattr.c
69911@@ -773,6 +773,8 @@ void jffs2_build_xattr_subsystem(struct jffs2_sb_info *c)
69912
69913 BUG_ON(!(c->flags & JFFS2_SB_FLAG_BUILDING));
69914
69915+ pax_track_stack();
69916+
69917 /* Phase.1 : Merge same xref */
69918 for (i=0; i < XREF_TMPHASH_SIZE; i++)
69919 xref_tmphash[i] = NULL;
69920diff --git a/fs/jfs/super.c b/fs/jfs/super.c
69921index 2234c73..f6e6e6b 100644
69922--- a/fs/jfs/super.c
69923+++ b/fs/jfs/super.c
69924@@ -793,7 +793,7 @@ static int __init init_jfs_fs(void)
69925
69926 jfs_inode_cachep =
69927 kmem_cache_create("jfs_ip", sizeof(struct jfs_inode_info), 0,
69928- SLAB_RECLAIM_ACCOUNT|SLAB_MEM_SPREAD,
69929+ SLAB_RECLAIM_ACCOUNT|SLAB_MEM_SPREAD|SLAB_USERCOPY,
69930 init_once);
69931 if (jfs_inode_cachep == NULL)
69932 return -ENOMEM;
69933diff --git a/fs/libfs.c b/fs/libfs.c
69934index ba36e93..3153fce 100644
69935--- a/fs/libfs.c
69936+++ b/fs/libfs.c
69937@@ -157,12 +157,20 @@ int dcache_readdir(struct file * filp, void * dirent, filldir_t filldir)
69938
69939 for (p=q->next; p != &dentry->d_subdirs; p=p->next) {
69940 struct dentry *next;
69941+ char d_name[sizeof(next->d_iname)];
69942+ const unsigned char *name;
69943+
69944 next = list_entry(p, struct dentry, d_u.d_child);
69945 if (d_unhashed(next) || !next->d_inode)
69946 continue;
69947
69948 spin_unlock(&dcache_lock);
69949- if (filldir(dirent, next->d_name.name,
69950+ name = next->d_name.name;
69951+ if (name == next->d_iname) {
69952+ memcpy(d_name, name, next->d_name.len);
69953+ name = d_name;
69954+ }
69955+ if (filldir(dirent, name,
69956 next->d_name.len, filp->f_pos,
69957 next->d_inode->i_ino,
69958 dt_type(next->d_inode)) < 0)
69959diff --git a/fs/lockd/clntproc.c b/fs/lockd/clntproc.c
69960index c325a83..d15b07b 100644
69961--- a/fs/lockd/clntproc.c
69962+++ b/fs/lockd/clntproc.c
69963@@ -36,11 +36,11 @@ static const struct rpc_call_ops nlmclnt_cancel_ops;
69964 /*
69965 * Cookie counter for NLM requests
69966 */
69967-static atomic_t nlm_cookie = ATOMIC_INIT(0x1234);
69968+static atomic_unchecked_t nlm_cookie = ATOMIC_INIT(0x1234);
69969
69970 void nlmclnt_next_cookie(struct nlm_cookie *c)
69971 {
69972- u32 cookie = atomic_inc_return(&nlm_cookie);
69973+ u32 cookie = atomic_inc_return_unchecked(&nlm_cookie);
69974
69975 memcpy(c->data, &cookie, 4);
69976 c->len=4;
69977@@ -621,6 +621,8 @@ nlmclnt_reclaim(struct nlm_host *host, struct file_lock *fl)
69978 struct nlm_rqst reqst, *req;
69979 int status;
69980
69981+ pax_track_stack();
69982+
69983 req = &reqst;
69984 memset(req, 0, sizeof(*req));
69985 locks_init_lock(&req->a_args.lock.fl);
69986diff --git a/fs/lockd/svc.c b/fs/lockd/svc.c
69987index 1a54ae1..6a16c27 100644
69988--- a/fs/lockd/svc.c
69989+++ b/fs/lockd/svc.c
69990@@ -43,7 +43,7 @@
69991
69992 static struct svc_program nlmsvc_program;
69993
69994-struct nlmsvc_binding * nlmsvc_ops;
69995+const struct nlmsvc_binding * nlmsvc_ops;
69996 EXPORT_SYMBOL_GPL(nlmsvc_ops);
69997
69998 static DEFINE_MUTEX(nlmsvc_mutex);
69999diff --git a/fs/locks.c b/fs/locks.c
70000index a8794f2..4041e55 100644
70001--- a/fs/locks.c
70002+++ b/fs/locks.c
70003@@ -145,10 +145,28 @@ static LIST_HEAD(blocked_list);
70004
70005 static struct kmem_cache *filelock_cache __read_mostly;
70006
70007+static void locks_init_lock_always(struct file_lock *fl)
70008+{
70009+ fl->fl_next = NULL;
70010+ fl->fl_fasync = NULL;
70011+ fl->fl_owner = NULL;
70012+ fl->fl_pid = 0;
70013+ fl->fl_nspid = NULL;
70014+ fl->fl_file = NULL;
70015+ fl->fl_flags = 0;
70016+ fl->fl_type = 0;
70017+ fl->fl_start = fl->fl_end = 0;
70018+}
70019+
70020 /* Allocate an empty lock structure. */
70021 static struct file_lock *locks_alloc_lock(void)
70022 {
70023- return kmem_cache_alloc(filelock_cache, GFP_KERNEL);
70024+ struct file_lock *fl = kmem_cache_alloc(filelock_cache, GFP_KERNEL);
70025+
70026+ if (fl)
70027+ locks_init_lock_always(fl);
70028+
70029+ return fl;
70030 }
70031
70032 void locks_release_private(struct file_lock *fl)
70033@@ -183,17 +201,9 @@ void locks_init_lock(struct file_lock *fl)
70034 INIT_LIST_HEAD(&fl->fl_link);
70035 INIT_LIST_HEAD(&fl->fl_block);
70036 init_waitqueue_head(&fl->fl_wait);
70037- fl->fl_next = NULL;
70038- fl->fl_fasync = NULL;
70039- fl->fl_owner = NULL;
70040- fl->fl_pid = 0;
70041- fl->fl_nspid = NULL;
70042- fl->fl_file = NULL;
70043- fl->fl_flags = 0;
70044- fl->fl_type = 0;
70045- fl->fl_start = fl->fl_end = 0;
70046 fl->fl_ops = NULL;
70047 fl->fl_lmops = NULL;
70048+ locks_init_lock_always(fl);
70049 }
70050
70051 EXPORT_SYMBOL(locks_init_lock);
70052@@ -2007,16 +2017,16 @@ void locks_remove_flock(struct file *filp)
70053 return;
70054
70055 if (filp->f_op && filp->f_op->flock) {
70056- struct file_lock fl = {
70057+ struct file_lock flock = {
70058 .fl_pid = current->tgid,
70059 .fl_file = filp,
70060 .fl_flags = FL_FLOCK,
70061 .fl_type = F_UNLCK,
70062 .fl_end = OFFSET_MAX,
70063 };
70064- filp->f_op->flock(filp, F_SETLKW, &fl);
70065- if (fl.fl_ops && fl.fl_ops->fl_release_private)
70066- fl.fl_ops->fl_release_private(&fl);
70067+ filp->f_op->flock(filp, F_SETLKW, &flock);
70068+ if (flock.fl_ops && flock.fl_ops->fl_release_private)
70069+ flock.fl_ops->fl_release_private(&flock);
70070 }
70071
70072 lock_kernel();
70073diff --git a/fs/mbcache.c b/fs/mbcache.c
70074index ec88ff3..b843a82 100644
70075--- a/fs/mbcache.c
70076+++ b/fs/mbcache.c
70077@@ -266,9 +266,9 @@ mb_cache_create(const char *name, struct mb_cache_op *cache_op,
70078 if (!cache)
70079 goto fail;
70080 cache->c_name = name;
70081- cache->c_op.free = NULL;
70082+ *(void **)&cache->c_op.free = NULL;
70083 if (cache_op)
70084- cache->c_op.free = cache_op->free;
70085+ *(void **)&cache->c_op.free = cache_op->free;
70086 atomic_set(&cache->c_entry_count, 0);
70087 cache->c_bucket_bits = bucket_bits;
70088 #ifdef MB_CACHE_INDEXES_COUNT
70089diff --git a/fs/namei.c b/fs/namei.c
70090index b0afbd4..8d065a1 100644
70091--- a/fs/namei.c
70092+++ b/fs/namei.c
70093@@ -224,6 +224,14 @@ int generic_permission(struct inode *inode, int mask,
70094 return ret;
70095
70096 /*
70097+ * Searching includes executable on directories, else just read.
70098+ */
70099+ mask &= MAY_READ | MAY_WRITE | MAY_EXEC;
70100+ if (mask == MAY_READ || (S_ISDIR(inode->i_mode) && !(mask & MAY_WRITE)))
70101+ if (capable(CAP_DAC_READ_SEARCH))
70102+ return 0;
70103+
70104+ /*
70105 * Read/write DACs are always overridable.
70106 * Executable DACs are overridable if at least one exec bit is set.
70107 */
70108@@ -231,14 +239,6 @@ int generic_permission(struct inode *inode, int mask,
70109 if (capable(CAP_DAC_OVERRIDE))
70110 return 0;
70111
70112- /*
70113- * Searching includes executable on directories, else just read.
70114- */
70115- mask &= MAY_READ | MAY_WRITE | MAY_EXEC;
70116- if (mask == MAY_READ || (S_ISDIR(inode->i_mode) && !(mask & MAY_WRITE)))
70117- if (capable(CAP_DAC_READ_SEARCH))
70118- return 0;
70119-
70120 return -EACCES;
70121 }
70122
70123@@ -458,7 +458,8 @@ static int exec_permission_lite(struct inode *inode)
70124 if (!ret)
70125 goto ok;
70126
70127- if (capable(CAP_DAC_OVERRIDE) || capable(CAP_DAC_READ_SEARCH))
70128+ if (capable_nolog(CAP_DAC_OVERRIDE) || capable(CAP_DAC_READ_SEARCH) ||
70129+ capable(CAP_DAC_OVERRIDE))
70130 goto ok;
70131
70132 return ret;
70133@@ -638,7 +639,7 @@ static __always_inline int __do_follow_link(struct path *path, struct nameidata
70134 cookie = dentry->d_inode->i_op->follow_link(dentry, nd);
70135 error = PTR_ERR(cookie);
70136 if (!IS_ERR(cookie)) {
70137- char *s = nd_get_link(nd);
70138+ const char *s = nd_get_link(nd);
70139 error = 0;
70140 if (s)
70141 error = __vfs_follow_link(nd, s);
70142@@ -669,6 +670,13 @@ static inline int do_follow_link(struct path *path, struct nameidata *nd)
70143 err = security_inode_follow_link(path->dentry, nd);
70144 if (err)
70145 goto loop;
70146+
70147+ if (gr_handle_follow_link(path->dentry->d_parent->d_inode,
70148+ path->dentry->d_inode, path->dentry, nd->path.mnt)) {
70149+ err = -EACCES;
70150+ goto loop;
70151+ }
70152+
70153 current->link_count++;
70154 current->total_link_count++;
70155 nd->depth++;
70156@@ -1016,11 +1024,19 @@ return_reval:
70157 break;
70158 }
70159 return_base:
70160+ if (!(nd->flags & (LOOKUP_CONTINUE | LOOKUP_PARENT)) &&
70161+ !gr_acl_handle_hidden_file(nd->path.dentry, nd->path.mnt)) {
70162+ path_put(&nd->path);
70163+ return -ENOENT;
70164+ }
70165 return 0;
70166 out_dput:
70167 path_put_conditional(&next, nd);
70168 break;
70169 }
70170+ if (!gr_acl_handle_hidden_file(nd->path.dentry, nd->path.mnt))
70171+ err = -ENOENT;
70172+
70173 path_put(&nd->path);
70174 return_err:
70175 return err;
70176@@ -1091,13 +1107,20 @@ static int do_path_lookup(int dfd, const char *name,
70177 int retval = path_init(dfd, name, flags, nd);
70178 if (!retval)
70179 retval = path_walk(name, nd);
70180- if (unlikely(!retval && !audit_dummy_context() && nd->path.dentry &&
70181- nd->path.dentry->d_inode))
70182- audit_inode(name, nd->path.dentry);
70183+
70184+ if (likely(!retval)) {
70185+ if (nd->path.dentry && nd->path.dentry->d_inode) {
70186+ if (*name != '/' && !gr_chroot_fchdir(nd->path.dentry, nd->path.mnt))
70187+ retval = -ENOENT;
70188+ if (!audit_dummy_context())
70189+ audit_inode(name, nd->path.dentry);
70190+ }
70191+ }
70192 if (nd->root.mnt) {
70193 path_put(&nd->root);
70194 nd->root.mnt = NULL;
70195 }
70196+
70197 return retval;
70198 }
70199
70200@@ -1576,6 +1599,20 @@ int may_open(struct path *path, int acc_mode, int flag)
70201 if (error)
70202 goto err_out;
70203
70204+
70205+ if (gr_handle_rofs_blockwrite(dentry, path->mnt, acc_mode)) {
70206+ error = -EPERM;
70207+ goto err_out;
70208+ }
70209+ if (gr_handle_rawio(inode)) {
70210+ error = -EPERM;
70211+ goto err_out;
70212+ }
70213+ if (!gr_acl_handle_open(dentry, path->mnt, acc_mode)) {
70214+ error = -EACCES;
70215+ goto err_out;
70216+ }
70217+
70218 if (flag & O_TRUNC) {
70219 error = get_write_access(inode);
70220 if (error)
70221@@ -1620,6 +1657,17 @@ static int __open_namei_create(struct nameidata *nd, struct path *path,
70222 {
70223 int error;
70224 struct dentry *dir = nd->path.dentry;
70225+ int acc_mode = ACC_MODE(flag);
70226+
70227+ if (flag & O_TRUNC)
70228+ acc_mode |= MAY_WRITE;
70229+ if (flag & O_APPEND)
70230+ acc_mode |= MAY_APPEND;
70231+
70232+ if (!gr_acl_handle_creat(path->dentry, dir, nd->path.mnt, flag, acc_mode, mode)) {
70233+ error = -EACCES;
70234+ goto out_unlock;
70235+ }
70236
70237 if (!IS_POSIXACL(dir->d_inode))
70238 mode &= ~current_umask();
70239@@ -1627,6 +1675,8 @@ static int __open_namei_create(struct nameidata *nd, struct path *path,
70240 if (error)
70241 goto out_unlock;
70242 error = vfs_create(dir->d_inode, path->dentry, mode, nd);
70243+ if (!error)
70244+ gr_handle_create(path->dentry, nd->path.mnt);
70245 out_unlock:
70246 mutex_unlock(&dir->d_inode->i_mutex);
70247 dput(nd->path.dentry);
70248@@ -1709,6 +1759,22 @@ struct file *do_filp_open(int dfd, const char *pathname,
70249 &nd, flag);
70250 if (error)
70251 return ERR_PTR(error);
70252+
70253+ if (gr_handle_rofs_blockwrite(nd.path.dentry, nd.path.mnt, acc_mode)) {
70254+ error = -EPERM;
70255+ goto exit;
70256+ }
70257+
70258+ if (gr_handle_rawio(nd.path.dentry->d_inode)) {
70259+ error = -EPERM;
70260+ goto exit;
70261+ }
70262+
70263+ if (!gr_acl_handle_open(nd.path.dentry, nd.path.mnt, acc_mode)) {
70264+ error = -EACCES;
70265+ goto exit;
70266+ }
70267+
70268 goto ok;
70269 }
70270
70271@@ -1795,6 +1861,19 @@ do_last:
70272 /*
70273 * It already exists.
70274 */
70275+
70276+ if (!gr_acl_handle_hidden_file(path.dentry, path.mnt)) {
70277+ error = -ENOENT;
70278+ goto exit_mutex_unlock;
70279+ }
70280+
70281+ /* only check if O_CREAT is specified, all other checks need
70282+ to go into may_open */
70283+ if (gr_handle_fifo(path.dentry, path.mnt, dir, flag, acc_mode)) {
70284+ error = -EACCES;
70285+ goto exit_mutex_unlock;
70286+ }
70287+
70288 mutex_unlock(&dir->d_inode->i_mutex);
70289 audit_inode(pathname, path.dentry);
70290
70291@@ -1887,6 +1966,13 @@ do_link:
70292 error = security_inode_follow_link(path.dentry, &nd);
70293 if (error)
70294 goto exit_dput;
70295+
70296+ if (gr_handle_follow_link(path.dentry->d_parent->d_inode, path.dentry->d_inode,
70297+ path.dentry, nd.path.mnt)) {
70298+ error = -EACCES;
70299+ goto exit_dput;
70300+ }
70301+
70302 error = __do_follow_link(&path, &nd);
70303 if (error) {
70304 /* Does someone understand code flow here? Or it is only
70305@@ -1984,6 +2070,10 @@ struct dentry *lookup_create(struct nameidata *nd, int is_dir)
70306 }
70307 return dentry;
70308 eexist:
70309+ if (!gr_acl_handle_hidden_file(dentry, nd->path.mnt)) {
70310+ dput(dentry);
70311+ return ERR_PTR(-ENOENT);
70312+ }
70313 dput(dentry);
70314 dentry = ERR_PTR(-EEXIST);
70315 fail:
70316@@ -2061,6 +2151,17 @@ SYSCALL_DEFINE4(mknodat, int, dfd, const char __user *, filename, int, mode,
70317 error = may_mknod(mode);
70318 if (error)
70319 goto out_dput;
70320+
70321+ if (gr_handle_chroot_mknod(dentry, nd.path.mnt, mode)) {
70322+ error = -EPERM;
70323+ goto out_dput;
70324+ }
70325+
70326+ if (!gr_acl_handle_mknod(dentry, nd.path.dentry, nd.path.mnt, mode)) {
70327+ error = -EACCES;
70328+ goto out_dput;
70329+ }
70330+
70331 error = mnt_want_write(nd.path.mnt);
70332 if (error)
70333 goto out_dput;
70334@@ -2081,6 +2182,9 @@ SYSCALL_DEFINE4(mknodat, int, dfd, const char __user *, filename, int, mode,
70335 }
70336 out_drop_write:
70337 mnt_drop_write(nd.path.mnt);
70338+
70339+ if (!error)
70340+ gr_handle_create(dentry, nd.path.mnt);
70341 out_dput:
70342 dput(dentry);
70343 out_unlock:
70344@@ -2134,6 +2238,11 @@ SYSCALL_DEFINE3(mkdirat, int, dfd, const char __user *, pathname, int, mode)
70345 if (IS_ERR(dentry))
70346 goto out_unlock;
70347
70348+ if (!gr_acl_handle_mkdir(dentry, nd.path.dentry, nd.path.mnt)) {
70349+ error = -EACCES;
70350+ goto out_dput;
70351+ }
70352+
70353 if (!IS_POSIXACL(nd.path.dentry->d_inode))
70354 mode &= ~current_umask();
70355 error = mnt_want_write(nd.path.mnt);
70356@@ -2145,6 +2254,10 @@ SYSCALL_DEFINE3(mkdirat, int, dfd, const char __user *, pathname, int, mode)
70357 error = vfs_mkdir(nd.path.dentry->d_inode, dentry, mode);
70358 out_drop_write:
70359 mnt_drop_write(nd.path.mnt);
70360+
70361+ if (!error)
70362+ gr_handle_create(dentry, nd.path.mnt);
70363+
70364 out_dput:
70365 dput(dentry);
70366 out_unlock:
70367@@ -2226,6 +2339,8 @@ static long do_rmdir(int dfd, const char __user *pathname)
70368 char * name;
70369 struct dentry *dentry;
70370 struct nameidata nd;
70371+ ino_t saved_ino = 0;
70372+ dev_t saved_dev = 0;
70373
70374 error = user_path_parent(dfd, pathname, &nd, &name);
70375 if (error)
70376@@ -2250,6 +2365,17 @@ static long do_rmdir(int dfd, const char __user *pathname)
70377 error = PTR_ERR(dentry);
70378 if (IS_ERR(dentry))
70379 goto exit2;
70380+
70381+ if (dentry->d_inode != NULL) {
70382+ saved_ino = dentry->d_inode->i_ino;
70383+ saved_dev = gr_get_dev_from_dentry(dentry);
70384+
70385+ if (!gr_acl_handle_rmdir(dentry, nd.path.mnt)) {
70386+ error = -EACCES;
70387+ goto exit3;
70388+ }
70389+ }
70390+
70391 error = mnt_want_write(nd.path.mnt);
70392 if (error)
70393 goto exit3;
70394@@ -2257,6 +2383,8 @@ static long do_rmdir(int dfd, const char __user *pathname)
70395 if (error)
70396 goto exit4;
70397 error = vfs_rmdir(nd.path.dentry->d_inode, dentry);
70398+ if (!error && (saved_dev || saved_ino))
70399+ gr_handle_delete(saved_ino, saved_dev);
70400 exit4:
70401 mnt_drop_write(nd.path.mnt);
70402 exit3:
70403@@ -2318,6 +2446,8 @@ static long do_unlinkat(int dfd, const char __user *pathname)
70404 struct dentry *dentry;
70405 struct nameidata nd;
70406 struct inode *inode = NULL;
70407+ ino_t saved_ino = 0;
70408+ dev_t saved_dev = 0;
70409
70410 error = user_path_parent(dfd, pathname, &nd, &name);
70411 if (error)
70412@@ -2337,8 +2467,19 @@ static long do_unlinkat(int dfd, const char __user *pathname)
70413 if (nd.last.name[nd.last.len])
70414 goto slashes;
70415 inode = dentry->d_inode;
70416- if (inode)
70417+ if (inode) {
70418+ if (inode->i_nlink <= 1) {
70419+ saved_ino = inode->i_ino;
70420+ saved_dev = gr_get_dev_from_dentry(dentry);
70421+ }
70422+
70423 atomic_inc(&inode->i_count);
70424+
70425+ if (!gr_acl_handle_unlink(dentry, nd.path.mnt)) {
70426+ error = -EACCES;
70427+ goto exit2;
70428+ }
70429+ }
70430 error = mnt_want_write(nd.path.mnt);
70431 if (error)
70432 goto exit2;
70433@@ -2346,6 +2487,8 @@ static long do_unlinkat(int dfd, const char __user *pathname)
70434 if (error)
70435 goto exit3;
70436 error = vfs_unlink(nd.path.dentry->d_inode, dentry);
70437+ if (!error && (saved_ino || saved_dev))
70438+ gr_handle_delete(saved_ino, saved_dev);
70439 exit3:
70440 mnt_drop_write(nd.path.mnt);
70441 exit2:
70442@@ -2424,6 +2567,11 @@ SYSCALL_DEFINE3(symlinkat, const char __user *, oldname,
70443 if (IS_ERR(dentry))
70444 goto out_unlock;
70445
70446+ if (!gr_acl_handle_symlink(dentry, nd.path.dentry, nd.path.mnt, from)) {
70447+ error = -EACCES;
70448+ goto out_dput;
70449+ }
70450+
70451 error = mnt_want_write(nd.path.mnt);
70452 if (error)
70453 goto out_dput;
70454@@ -2431,6 +2579,8 @@ SYSCALL_DEFINE3(symlinkat, const char __user *, oldname,
70455 if (error)
70456 goto out_drop_write;
70457 error = vfs_symlink(nd.path.dentry->d_inode, dentry, from);
70458+ if (!error)
70459+ gr_handle_create(dentry, nd.path.mnt);
70460 out_drop_write:
70461 mnt_drop_write(nd.path.mnt);
70462 out_dput:
70463@@ -2524,6 +2674,20 @@ SYSCALL_DEFINE5(linkat, int, olddfd, const char __user *, oldname,
70464 error = PTR_ERR(new_dentry);
70465 if (IS_ERR(new_dentry))
70466 goto out_unlock;
70467+
70468+ if (gr_handle_hardlink(old_path.dentry, old_path.mnt,
70469+ old_path.dentry->d_inode,
70470+ old_path.dentry->d_inode->i_mode, to)) {
70471+ error = -EACCES;
70472+ goto out_dput;
70473+ }
70474+
70475+ if (!gr_acl_handle_link(new_dentry, nd.path.dentry, nd.path.mnt,
70476+ old_path.dentry, old_path.mnt, to)) {
70477+ error = -EACCES;
70478+ goto out_dput;
70479+ }
70480+
70481 error = mnt_want_write(nd.path.mnt);
70482 if (error)
70483 goto out_dput;
70484@@ -2531,6 +2695,8 @@ SYSCALL_DEFINE5(linkat, int, olddfd, const char __user *, oldname,
70485 if (error)
70486 goto out_drop_write;
70487 error = vfs_link(old_path.dentry, nd.path.dentry->d_inode, new_dentry);
70488+ if (!error)
70489+ gr_handle_create(new_dentry, nd.path.mnt);
70490 out_drop_write:
70491 mnt_drop_write(nd.path.mnt);
70492 out_dput:
70493@@ -2708,6 +2874,8 @@ SYSCALL_DEFINE4(renameat, int, olddfd, const char __user *, oldname,
70494 char *to;
70495 int error;
70496
70497+ pax_track_stack();
70498+
70499 error = user_path_parent(olddfd, oldname, &oldnd, &from);
70500 if (error)
70501 goto exit;
70502@@ -2764,6 +2932,12 @@ SYSCALL_DEFINE4(renameat, int, olddfd, const char __user *, oldname,
70503 if (new_dentry == trap)
70504 goto exit5;
70505
70506+ error = gr_acl_handle_rename(new_dentry, new_dir, newnd.path.mnt,
70507+ old_dentry, old_dir->d_inode, oldnd.path.mnt,
70508+ to);
70509+ if (error)
70510+ goto exit5;
70511+
70512 error = mnt_want_write(oldnd.path.mnt);
70513 if (error)
70514 goto exit5;
70515@@ -2773,6 +2947,9 @@ SYSCALL_DEFINE4(renameat, int, olddfd, const char __user *, oldname,
70516 goto exit6;
70517 error = vfs_rename(old_dir->d_inode, old_dentry,
70518 new_dir->d_inode, new_dentry);
70519+ if (!error)
70520+ gr_handle_rename(old_dir->d_inode, new_dir->d_inode, old_dentry,
70521+ new_dentry, oldnd.path.mnt, new_dentry->d_inode ? 1 : 0);
70522 exit6:
70523 mnt_drop_write(oldnd.path.mnt);
70524 exit5:
70525@@ -2798,6 +2975,8 @@ SYSCALL_DEFINE2(rename, const char __user *, oldname, const char __user *, newna
70526
70527 int vfs_readlink(struct dentry *dentry, char __user *buffer, int buflen, const char *link)
70528 {
70529+ char tmpbuf[64];
70530+ const char *newlink;
70531 int len;
70532
70533 len = PTR_ERR(link);
70534@@ -2807,7 +2986,14 @@ int vfs_readlink(struct dentry *dentry, char __user *buffer, int buflen, const c
70535 len = strlen(link);
70536 if (len > (unsigned) buflen)
70537 len = buflen;
70538- if (copy_to_user(buffer, link, len))
70539+
70540+ if (len < sizeof(tmpbuf)) {
70541+ memcpy(tmpbuf, link, len);
70542+ newlink = tmpbuf;
70543+ } else
70544+ newlink = link;
70545+
70546+ if (copy_to_user(buffer, newlink, len))
70547 len = -EFAULT;
70548 out:
70549 return len;
70550diff --git a/fs/namespace.c b/fs/namespace.c
70551index 2beb0fb..11a95a5 100644
70552--- a/fs/namespace.c
70553+++ b/fs/namespace.c
70554@@ -1083,6 +1083,9 @@ static int do_umount(struct vfsmount *mnt, int flags)
70555 if (!(sb->s_flags & MS_RDONLY))
70556 retval = do_remount_sb(sb, MS_RDONLY, NULL, 0);
70557 up_write(&sb->s_umount);
70558+
70559+ gr_log_remount(mnt->mnt_devname, retval);
70560+
70561 return retval;
70562 }
70563
70564@@ -1104,6 +1107,9 @@ static int do_umount(struct vfsmount *mnt, int flags)
70565 security_sb_umount_busy(mnt);
70566 up_write(&namespace_sem);
70567 release_mounts(&umount_list);
70568+
70569+ gr_log_unmount(mnt->mnt_devname, retval);
70570+
70571 return retval;
70572 }
70573
70574@@ -1962,6 +1968,16 @@ long do_mount(char *dev_name, char *dir_name, char *type_page,
70575 if (retval)
70576 goto dput_out;
70577
70578+ if (gr_handle_rofs_mount(path.dentry, path.mnt, mnt_flags)) {
70579+ retval = -EPERM;
70580+ goto dput_out;
70581+ }
70582+
70583+ if (gr_handle_chroot_mount(path.dentry, path.mnt, dev_name)) {
70584+ retval = -EPERM;
70585+ goto dput_out;
70586+ }
70587+
70588 if (flags & MS_REMOUNT)
70589 retval = do_remount(&path, flags & ~MS_REMOUNT, mnt_flags,
70590 data_page);
70591@@ -1976,6 +1992,9 @@ long do_mount(char *dev_name, char *dir_name, char *type_page,
70592 dev_name, data_page);
70593 dput_out:
70594 path_put(&path);
70595+
70596+ gr_log_mount(dev_name, dir_name, retval);
70597+
70598 return retval;
70599 }
70600
70601@@ -2182,6 +2201,12 @@ SYSCALL_DEFINE2(pivot_root, const char __user *, new_root,
70602 goto out1;
70603 }
70604
70605+ if (gr_handle_chroot_pivot()) {
70606+ error = -EPERM;
70607+ path_put(&old);
70608+ goto out1;
70609+ }
70610+
70611 read_lock(&current->fs->lock);
70612 root = current->fs->root;
70613 path_get(&current->fs->root);
70614diff --git a/fs/ncpfs/dir.c b/fs/ncpfs/dir.c
70615index b8b5b30..2bd9ccb 100644
70616--- a/fs/ncpfs/dir.c
70617+++ b/fs/ncpfs/dir.c
70618@@ -275,6 +275,8 @@ __ncp_lookup_validate(struct dentry *dentry)
70619 int res, val = 0, len;
70620 __u8 __name[NCP_MAXPATHLEN + 1];
70621
70622+ pax_track_stack();
70623+
70624 parent = dget_parent(dentry);
70625 dir = parent->d_inode;
70626
70627@@ -799,6 +801,8 @@ static struct dentry *ncp_lookup(struct inode *dir, struct dentry *dentry, struc
70628 int error, res, len;
70629 __u8 __name[NCP_MAXPATHLEN + 1];
70630
70631+ pax_track_stack();
70632+
70633 lock_kernel();
70634 error = -EIO;
70635 if (!ncp_conn_valid(server))
70636@@ -883,10 +887,12 @@ int ncp_create_new(struct inode *dir, struct dentry *dentry, int mode,
70637 int error, result, len;
70638 int opmode;
70639 __u8 __name[NCP_MAXPATHLEN + 1];
70640-
70641+
70642 PPRINTK("ncp_create_new: creating %s/%s, mode=%x\n",
70643 dentry->d_parent->d_name.name, dentry->d_name.name, mode);
70644
70645+ pax_track_stack();
70646+
70647 error = -EIO;
70648 lock_kernel();
70649 if (!ncp_conn_valid(server))
70650@@ -952,6 +958,8 @@ static int ncp_mkdir(struct inode *dir, struct dentry *dentry, int mode)
70651 int error, len;
70652 __u8 __name[NCP_MAXPATHLEN + 1];
70653
70654+ pax_track_stack();
70655+
70656 DPRINTK("ncp_mkdir: making %s/%s\n",
70657 dentry->d_parent->d_name.name, dentry->d_name.name);
70658
70659@@ -960,6 +968,8 @@ static int ncp_mkdir(struct inode *dir, struct dentry *dentry, int mode)
70660 if (!ncp_conn_valid(server))
70661 goto out;
70662
70663+ pax_track_stack();
70664+
70665 ncp_age_dentry(server, dentry);
70666 len = sizeof(__name);
70667 error = ncp_io2vol(server, __name, &len, dentry->d_name.name,
70668@@ -1114,6 +1124,8 @@ static int ncp_rename(struct inode *old_dir, struct dentry *old_dentry,
70669 int old_len, new_len;
70670 __u8 __old_name[NCP_MAXPATHLEN + 1], __new_name[NCP_MAXPATHLEN + 1];
70671
70672+ pax_track_stack();
70673+
70674 DPRINTK("ncp_rename: %s/%s to %s/%s\n",
70675 old_dentry->d_parent->d_name.name, old_dentry->d_name.name,
70676 new_dentry->d_parent->d_name.name, new_dentry->d_name.name);
70677diff --git a/fs/ncpfs/inode.c b/fs/ncpfs/inode.c
70678index cf98da1..da890a9 100644
70679--- a/fs/ncpfs/inode.c
70680+++ b/fs/ncpfs/inode.c
70681@@ -445,6 +445,8 @@ static int ncp_fill_super(struct super_block *sb, void *raw_data, int silent)
70682 #endif
70683 struct ncp_entry_info finfo;
70684
70685+ pax_track_stack();
70686+
70687 data.wdog_pid = NULL;
70688 server = kzalloc(sizeof(struct ncp_server), GFP_KERNEL);
70689 if (!server)
70690diff --git a/fs/ncpfs/ncplib_kernel.h b/fs/ncpfs/ncplib_kernel.h
70691index 2441d1a..96882c1 100644
70692--- a/fs/ncpfs/ncplib_kernel.h
70693+++ b/fs/ncpfs/ncplib_kernel.h
70694@@ -131,7 +131,7 @@ static inline int ncp_is_nfs_extras(struct ncp_server* server, unsigned int voln
70695 int ncp__io2vol(struct ncp_server *, unsigned char *, unsigned int *,
70696 const unsigned char *, unsigned int, int);
70697 int ncp__vol2io(struct ncp_server *, unsigned char *, unsigned int *,
70698- const unsigned char *, unsigned int, int);
70699+ const unsigned char *, unsigned int, int) __size_overflow(5);
70700
70701 #define NCP_ESC ':'
70702 #define NCP_IO_TABLE(dentry) (NCP_SERVER((dentry)->d_inode)->nls_io)
70703@@ -147,7 +147,7 @@ int ncp__vol2io(struct ncp_server *, unsigned char *, unsigned int *,
70704 int ncp__io2vol(unsigned char *, unsigned int *,
70705 const unsigned char *, unsigned int, int);
70706 int ncp__vol2io(unsigned char *, unsigned int *,
70707- const unsigned char *, unsigned int, int);
70708+ const unsigned char *, unsigned int, int) __size_overflow(5);
70709
70710 #define NCP_IO_TABLE(dentry) NULL
70711 #define ncp_tolower(t, c) tolower(c)
70712diff --git a/fs/nfs/inode.c b/fs/nfs/inode.c
70713index bfaef7b..e9d03ca 100644
70714--- a/fs/nfs/inode.c
70715+++ b/fs/nfs/inode.c
70716@@ -156,7 +156,7 @@ static void nfs_zap_caches_locked(struct inode *inode)
70717 nfsi->attrtimeo = NFS_MINATTRTIMEO(inode);
70718 nfsi->attrtimeo_timestamp = jiffies;
70719
70720- memset(NFS_COOKIEVERF(inode), 0, sizeof(NFS_COOKIEVERF(inode)));
70721+ memset(NFS_COOKIEVERF(inode), 0, sizeof(NFS_I(inode)->cookieverf));
70722 if (S_ISREG(mode) || S_ISDIR(mode) || S_ISLNK(mode))
70723 nfsi->cache_validity |= NFS_INO_INVALID_ATTR|NFS_INO_INVALID_DATA|NFS_INO_INVALID_ACCESS|NFS_INO_INVALID_ACL|NFS_INO_REVAL_PAGECACHE;
70724 else
70725@@ -973,16 +973,16 @@ static int nfs_size_need_update(const struct inode *inode, const struct nfs_fatt
70726 return nfs_size_to_loff_t(fattr->size) > i_size_read(inode);
70727 }
70728
70729-static atomic_long_t nfs_attr_generation_counter;
70730+static atomic_long_unchecked_t nfs_attr_generation_counter;
70731
70732 static unsigned long nfs_read_attr_generation_counter(void)
70733 {
70734- return atomic_long_read(&nfs_attr_generation_counter);
70735+ return atomic_long_read_unchecked(&nfs_attr_generation_counter);
70736 }
70737
70738 unsigned long nfs_inc_attr_generation_counter(void)
70739 {
70740- return atomic_long_inc_return(&nfs_attr_generation_counter);
70741+ return atomic_long_inc_return_unchecked(&nfs_attr_generation_counter);
70742 }
70743
70744 void nfs_fattr_init(struct nfs_fattr *fattr)
70745diff --git a/fs/nfsd/lockd.c b/fs/nfsd/lockd.c
70746index cc2f505..f6a236f 100644
70747--- a/fs/nfsd/lockd.c
70748+++ b/fs/nfsd/lockd.c
70749@@ -66,7 +66,7 @@ nlm_fclose(struct file *filp)
70750 fput(filp);
70751 }
70752
70753-static struct nlmsvc_binding nfsd_nlm_ops = {
70754+static const struct nlmsvc_binding nfsd_nlm_ops = {
70755 .fopen = nlm_fopen, /* open file for locking */
70756 .fclose = nlm_fclose, /* close file */
70757 };
70758diff --git a/fs/nfsd/nfs4state.c b/fs/nfsd/nfs4state.c
70759index cfc3391..dcc083a 100644
70760--- a/fs/nfsd/nfs4state.c
70761+++ b/fs/nfsd/nfs4state.c
70762@@ -3459,6 +3459,8 @@ nfsd4_lock(struct svc_rqst *rqstp, struct nfsd4_compound_state *cstate,
70763 unsigned int cmd;
70764 int err;
70765
70766+ pax_track_stack();
70767+
70768 dprintk("NFSD: nfsd4_lock: start=%Ld length=%Ld\n",
70769 (long long) lock->lk_offset,
70770 (long long) lock->lk_length);
70771diff --git a/fs/nfsd/nfs4xdr.c b/fs/nfsd/nfs4xdr.c
70772index 4a82a96..0d5fb49 100644
70773--- a/fs/nfsd/nfs4xdr.c
70774+++ b/fs/nfsd/nfs4xdr.c
70775@@ -1751,6 +1751,8 @@ nfsd4_encode_fattr(struct svc_fh *fhp, struct svc_export *exp,
70776 struct nfsd4_compoundres *resp = rqstp->rq_resp;
70777 u32 minorversion = resp->cstate.minorversion;
70778
70779+ pax_track_stack();
70780+
70781 BUG_ON(bmval1 & NFSD_WRITEONLY_ATTRS_WORD1);
70782 BUG_ON(bmval0 & ~nfsd_suppattrs0(minorversion));
70783 BUG_ON(bmval1 & ~nfsd_suppattrs1(minorversion));
70784diff --git a/fs/nfsd/vfs.c b/fs/nfsd/vfs.c
70785index 2e09588..596421d 100644
70786--- a/fs/nfsd/vfs.c
70787+++ b/fs/nfsd/vfs.c
70788@@ -937,7 +937,7 @@ nfsd_vfs_read(struct svc_rqst *rqstp, struct svc_fh *fhp, struct file *file,
70789 } else {
70790 oldfs = get_fs();
70791 set_fs(KERNEL_DS);
70792- host_err = vfs_readv(file, (struct iovec __user *)vec, vlen, &offset);
70793+ host_err = vfs_readv(file, (struct iovec __force_user *)vec, vlen, &offset);
70794 set_fs(oldfs);
70795 }
70796
70797@@ -1060,7 +1060,7 @@ nfsd_vfs_write(struct svc_rqst *rqstp, struct svc_fh *fhp, struct file *file,
70798
70799 /* Write the data. */
70800 oldfs = get_fs(); set_fs(KERNEL_DS);
70801- host_err = vfs_writev(file, (struct iovec __user *)vec, vlen, &offset);
70802+ host_err = vfs_writev(file, (struct iovec __force_user *)vec, vlen, &offset);
70803 set_fs(oldfs);
70804 if (host_err < 0)
70805 goto out_nfserr;
70806@@ -1542,7 +1542,7 @@ nfsd_readlink(struct svc_rqst *rqstp, struct svc_fh *fhp, char *buf, int *lenp)
70807 */
70808
70809 oldfs = get_fs(); set_fs(KERNEL_DS);
70810- host_err = inode->i_op->readlink(dentry, buf, *lenp);
70811+ host_err = inode->i_op->readlink(dentry, (char __force_user *)buf, *lenp);
70812 set_fs(oldfs);
70813
70814 if (host_err < 0)
70815diff --git a/fs/nilfs2/ioctl.c b/fs/nilfs2/ioctl.c
70816index f6af760..d0adf34 100644
70817--- a/fs/nilfs2/ioctl.c
70818+++ b/fs/nilfs2/ioctl.c
70819@@ -480,7 +480,7 @@ static int nilfs_ioctl_clean_segments(struct inode *inode, struct file *filp,
70820 unsigned int cmd, void __user *argp)
70821 {
70822 struct nilfs_argv argv[5];
70823- const static size_t argsz[5] = {
70824+ static const size_t argsz[5] = {
70825 sizeof(struct nilfs_vdesc),
70826 sizeof(struct nilfs_period),
70827 sizeof(__u64),
70828@@ -522,6 +522,9 @@ static int nilfs_ioctl_clean_segments(struct inode *inode, struct file *filp,
70829 if (argv[n].v_nmembs > nsegs * nilfs->ns_blocks_per_segment)
70830 goto out_free;
70831
70832+ if (argv[n].v_nmembs >= UINT_MAX / argv[n].v_size)
70833+ goto out_free;
70834+
70835 len = argv[n].v_size * argv[n].v_nmembs;
70836 base = (void __user *)(unsigned long)argv[n].v_base;
70837 if (len == 0) {
70838diff --git a/fs/nilfs2/the_nilfs.c b/fs/nilfs2/the_nilfs.c
70839index ad391a8..149a8a1 100644
70840--- a/fs/nilfs2/the_nilfs.c
70841+++ b/fs/nilfs2/the_nilfs.c
70842@@ -478,6 +478,7 @@ static int nilfs_load_super_block(struct the_nilfs *nilfs,
70843 brelse(sbh[1]);
70844 sbh[1] = NULL;
70845 sbp[1] = NULL;
70846+ valid[1] = 0;
70847 swp = 0;
70848 }
70849 if (!valid[swp]) {
70850diff --git a/fs/notify/dnotify/dnotify.c b/fs/notify/dnotify/dnotify.c
70851index 7e54e52..9337248 100644
70852--- a/fs/notify/dnotify/dnotify.c
70853+++ b/fs/notify/dnotify/dnotify.c
70854@@ -173,7 +173,7 @@ static void dnotify_free_mark(struct fsnotify_mark_entry *entry)
70855 kmem_cache_free(dnotify_mark_entry_cache, dnentry);
70856 }
70857
70858-static struct fsnotify_ops dnotify_fsnotify_ops = {
70859+static const struct fsnotify_ops dnotify_fsnotify_ops = {
70860 .handle_event = dnotify_handle_event,
70861 .should_send_event = dnotify_should_send_event,
70862 .free_group_priv = NULL,
70863diff --git a/fs/notify/notification.c b/fs/notify/notification.c
70864index b8bf53b..c518688 100644
70865--- a/fs/notify/notification.c
70866+++ b/fs/notify/notification.c
70867@@ -57,7 +57,7 @@ static struct kmem_cache *fsnotify_event_holder_cachep;
70868 * get set to 0 so it will never get 'freed'
70869 */
70870 static struct fsnotify_event q_overflow_event;
70871-static atomic_t fsnotify_sync_cookie = ATOMIC_INIT(0);
70872+static atomic_unchecked_t fsnotify_sync_cookie = ATOMIC_INIT(0);
70873
70874 /**
70875 * fsnotify_get_cookie - return a unique cookie for use in synchronizing events.
70876@@ -65,7 +65,7 @@ static atomic_t fsnotify_sync_cookie = ATOMIC_INIT(0);
70877 */
70878 u32 fsnotify_get_cookie(void)
70879 {
70880- return atomic_inc_return(&fsnotify_sync_cookie);
70881+ return atomic_inc_return_unchecked(&fsnotify_sync_cookie);
70882 }
70883 EXPORT_SYMBOL_GPL(fsnotify_get_cookie);
70884
70885diff --git a/fs/ntfs/dir.c b/fs/ntfs/dir.c
70886index 5a9e344..0f8cd28 100644
70887--- a/fs/ntfs/dir.c
70888+++ b/fs/ntfs/dir.c
70889@@ -1328,7 +1328,7 @@ find_next_index_buffer:
70890 ia = (INDEX_ALLOCATION*)(kaddr + (ia_pos & ~PAGE_CACHE_MASK &
70891 ~(s64)(ndir->itype.index.block_size - 1)));
70892 /* Bounds checks. */
70893- if (unlikely((u8*)ia < kaddr || (u8*)ia > kaddr + PAGE_CACHE_SIZE)) {
70894+ if (unlikely(!kaddr || (u8*)ia < kaddr || (u8*)ia > kaddr + PAGE_CACHE_SIZE)) {
70895 ntfs_error(sb, "Out of bounds check failed. Corrupt directory "
70896 "inode 0x%lx or driver bug.", vdir->i_ino);
70897 goto err_out;
70898diff --git a/fs/ntfs/file.c b/fs/ntfs/file.c
70899index 663c0e3..b6868e9 100644
70900--- a/fs/ntfs/file.c
70901+++ b/fs/ntfs/file.c
70902@@ -2243,6 +2243,6 @@ const struct inode_operations ntfs_file_inode_ops = {
70903 #endif /* NTFS_RW */
70904 };
70905
70906-const struct file_operations ntfs_empty_file_ops = {};
70907+const struct file_operations ntfs_empty_file_ops __read_only;
70908
70909-const struct inode_operations ntfs_empty_inode_ops = {};
70910+const struct inode_operations ntfs_empty_inode_ops __read_only;
70911diff --git a/fs/ocfs2/cluster/masklog.c b/fs/ocfs2/cluster/masklog.c
70912index 1cd2934..880b5d2 100644
70913--- a/fs/ocfs2/cluster/masklog.c
70914+++ b/fs/ocfs2/cluster/masklog.c
70915@@ -135,7 +135,7 @@ static ssize_t mlog_store(struct kobject *obj, struct attribute *attr,
70916 return mlog_mask_store(mlog_attr->mask, buf, count);
70917 }
70918
70919-static struct sysfs_ops mlog_attr_ops = {
70920+static const struct sysfs_ops mlog_attr_ops = {
70921 .show = mlog_show,
70922 .store = mlog_store,
70923 };
70924diff --git a/fs/ocfs2/localalloc.c b/fs/ocfs2/localalloc.c
70925index ac10f83..2cd2607 100644
70926--- a/fs/ocfs2/localalloc.c
70927+++ b/fs/ocfs2/localalloc.c
70928@@ -1188,7 +1188,7 @@ static int ocfs2_local_alloc_slide_window(struct ocfs2_super *osb,
70929 goto bail;
70930 }
70931
70932- atomic_inc(&osb->alloc_stats.moves);
70933+ atomic_inc_unchecked(&osb->alloc_stats.moves);
70934
70935 status = 0;
70936 bail:
70937diff --git a/fs/ocfs2/namei.c b/fs/ocfs2/namei.c
70938index f010b22..9f9ed34 100644
70939--- a/fs/ocfs2/namei.c
70940+++ b/fs/ocfs2/namei.c
70941@@ -1043,6 +1043,8 @@ static int ocfs2_rename(struct inode *old_dir,
70942 struct ocfs2_dir_lookup_result orphan_insert = { NULL, };
70943 struct ocfs2_dir_lookup_result target_insert = { NULL, };
70944
70945+ pax_track_stack();
70946+
70947 /* At some point it might be nice to break this function up a
70948 * bit. */
70949
70950diff --git a/fs/ocfs2/ocfs2.h b/fs/ocfs2/ocfs2.h
70951index d963d86..914cfbd 100644
70952--- a/fs/ocfs2/ocfs2.h
70953+++ b/fs/ocfs2/ocfs2.h
70954@@ -217,11 +217,11 @@ enum ocfs2_vol_state
70955
70956 struct ocfs2_alloc_stats
70957 {
70958- atomic_t moves;
70959- atomic_t local_data;
70960- atomic_t bitmap_data;
70961- atomic_t bg_allocs;
70962- atomic_t bg_extends;
70963+ atomic_unchecked_t moves;
70964+ atomic_unchecked_t local_data;
70965+ atomic_unchecked_t bitmap_data;
70966+ atomic_unchecked_t bg_allocs;
70967+ atomic_unchecked_t bg_extends;
70968 };
70969
70970 enum ocfs2_local_alloc_state
70971diff --git a/fs/ocfs2/suballoc.c b/fs/ocfs2/suballoc.c
70972index 79b5dac..d322952 100644
70973--- a/fs/ocfs2/suballoc.c
70974+++ b/fs/ocfs2/suballoc.c
70975@@ -623,7 +623,7 @@ static int ocfs2_reserve_suballoc_bits(struct ocfs2_super *osb,
70976 mlog_errno(status);
70977 goto bail;
70978 }
70979- atomic_inc(&osb->alloc_stats.bg_extends);
70980+ atomic_inc_unchecked(&osb->alloc_stats.bg_extends);
70981
70982 /* You should never ask for this much metadata */
70983 BUG_ON(bits_wanted >
70984@@ -1654,7 +1654,7 @@ int ocfs2_claim_metadata(struct ocfs2_super *osb,
70985 mlog_errno(status);
70986 goto bail;
70987 }
70988- atomic_inc(&osb->alloc_stats.bg_allocs);
70989+ atomic_inc_unchecked(&osb->alloc_stats.bg_allocs);
70990
70991 *blkno_start = bg_blkno + (u64) *suballoc_bit_start;
70992 ac->ac_bits_given += (*num_bits);
70993@@ -1728,7 +1728,7 @@ int ocfs2_claim_new_inode(struct ocfs2_super *osb,
70994 mlog_errno(status);
70995 goto bail;
70996 }
70997- atomic_inc(&osb->alloc_stats.bg_allocs);
70998+ atomic_inc_unchecked(&osb->alloc_stats.bg_allocs);
70999
71000 BUG_ON(num_bits != 1);
71001
71002@@ -1830,7 +1830,7 @@ int __ocfs2_claim_clusters(struct ocfs2_super *osb,
71003 cluster_start,
71004 num_clusters);
71005 if (!status)
71006- atomic_inc(&osb->alloc_stats.local_data);
71007+ atomic_inc_unchecked(&osb->alloc_stats.local_data);
71008 } else {
71009 if (min_clusters > (osb->bitmap_cpg - 1)) {
71010 /* The only paths asking for contiguousness
71011@@ -1858,7 +1858,7 @@ int __ocfs2_claim_clusters(struct ocfs2_super *osb,
71012 ocfs2_desc_bitmap_to_cluster_off(ac->ac_inode,
71013 bg_blkno,
71014 bg_bit_off);
71015- atomic_inc(&osb->alloc_stats.bitmap_data);
71016+ atomic_inc_unchecked(&osb->alloc_stats.bitmap_data);
71017 }
71018 }
71019 if (status < 0) {
71020diff --git a/fs/ocfs2/super.c b/fs/ocfs2/super.c
71021index 9f55be4..a3f8048 100644
71022--- a/fs/ocfs2/super.c
71023+++ b/fs/ocfs2/super.c
71024@@ -284,11 +284,11 @@ static int ocfs2_osb_dump(struct ocfs2_super *osb, char *buf, int len)
71025 "%10s => GlobalAllocs: %d LocalAllocs: %d "
71026 "SubAllocs: %d LAWinMoves: %d SAExtends: %d\n",
71027 "Stats",
71028- atomic_read(&osb->alloc_stats.bitmap_data),
71029- atomic_read(&osb->alloc_stats.local_data),
71030- atomic_read(&osb->alloc_stats.bg_allocs),
71031- atomic_read(&osb->alloc_stats.moves),
71032- atomic_read(&osb->alloc_stats.bg_extends));
71033+ atomic_read_unchecked(&osb->alloc_stats.bitmap_data),
71034+ atomic_read_unchecked(&osb->alloc_stats.local_data),
71035+ atomic_read_unchecked(&osb->alloc_stats.bg_allocs),
71036+ atomic_read_unchecked(&osb->alloc_stats.moves),
71037+ atomic_read_unchecked(&osb->alloc_stats.bg_extends));
71038
71039 out += snprintf(buf + out, len - out,
71040 "%10s => State: %u Descriptor: %llu Size: %u bits "
71041@@ -2002,11 +2002,11 @@ static int ocfs2_initialize_super(struct super_block *sb,
71042 spin_lock_init(&osb->osb_xattr_lock);
71043 ocfs2_init_inode_steal_slot(osb);
71044
71045- atomic_set(&osb->alloc_stats.moves, 0);
71046- atomic_set(&osb->alloc_stats.local_data, 0);
71047- atomic_set(&osb->alloc_stats.bitmap_data, 0);
71048- atomic_set(&osb->alloc_stats.bg_allocs, 0);
71049- atomic_set(&osb->alloc_stats.bg_extends, 0);
71050+ atomic_set_unchecked(&osb->alloc_stats.moves, 0);
71051+ atomic_set_unchecked(&osb->alloc_stats.local_data, 0);
71052+ atomic_set_unchecked(&osb->alloc_stats.bitmap_data, 0);
71053+ atomic_set_unchecked(&osb->alloc_stats.bg_allocs, 0);
71054+ atomic_set_unchecked(&osb->alloc_stats.bg_extends, 0);
71055
71056 /* Copy the blockcheck stats from the superblock probe */
71057 osb->osb_ecc_stats = *stats;
71058diff --git a/fs/open.c b/fs/open.c
71059index 4f01e06..2a8057a 100644
71060--- a/fs/open.c
71061+++ b/fs/open.c
71062@@ -275,6 +275,10 @@ static long do_sys_truncate(const char __user *pathname, loff_t length)
71063 error = locks_verify_truncate(inode, NULL, length);
71064 if (!error)
71065 error = security_path_truncate(&path, length, 0);
71066+
71067+ if (!error && !gr_acl_handle_truncate(path.dentry, path.mnt))
71068+ error = -EACCES;
71069+
71070 if (!error) {
71071 vfs_dq_init(inode);
71072 error = do_truncate(path.dentry, length, 0, NULL);
71073@@ -511,6 +515,9 @@ SYSCALL_DEFINE3(faccessat, int, dfd, const char __user *, filename, int, mode)
71074 if (__mnt_is_readonly(path.mnt))
71075 res = -EROFS;
71076
71077+ if (!res && !gr_acl_handle_access(path.dentry, path.mnt, mode))
71078+ res = -EACCES;
71079+
71080 out_path_release:
71081 path_put(&path);
71082 out:
71083@@ -537,6 +544,8 @@ SYSCALL_DEFINE1(chdir, const char __user *, filename)
71084 if (error)
71085 goto dput_and_out;
71086
71087+ gr_log_chdir(path.dentry, path.mnt);
71088+
71089 set_fs_pwd(current->fs, &path);
71090
71091 dput_and_out:
71092@@ -563,6 +572,13 @@ SYSCALL_DEFINE1(fchdir, unsigned int, fd)
71093 goto out_putf;
71094
71095 error = inode_permission(inode, MAY_EXEC | MAY_ACCESS);
71096+
71097+ if (!error && !gr_chroot_fchdir(file->f_path.dentry, file->f_path.mnt))
71098+ error = -EPERM;
71099+
71100+ if (!error)
71101+ gr_log_chdir(file->f_path.dentry, file->f_path.mnt);
71102+
71103 if (!error)
71104 set_fs_pwd(current->fs, &file->f_path);
71105 out_putf:
71106@@ -588,7 +604,13 @@ SYSCALL_DEFINE1(chroot, const char __user *, filename)
71107 if (!capable(CAP_SYS_CHROOT))
71108 goto dput_and_out;
71109
71110+ if (gr_handle_chroot_chroot(path.dentry, path.mnt))
71111+ goto dput_and_out;
71112+
71113 set_fs_root(current->fs, &path);
71114+
71115+ gr_handle_chroot_chdir(&path);
71116+
71117 error = 0;
71118 dput_and_out:
71119 path_put(&path);
71120@@ -596,66 +618,57 @@ out:
71121 return error;
71122 }
71123
71124+static int chmod_common(struct path *path, umode_t mode)
71125+{
71126+ struct inode *inode = path->dentry->d_inode;
71127+ struct iattr newattrs;
71128+ int error;
71129+
71130+ error = mnt_want_write(path->mnt);
71131+ if (error)
71132+ return error;
71133+ mutex_lock(&inode->i_mutex);
71134+ if (!gr_acl_handle_chmod(path->dentry, path->mnt, &mode)) {
71135+ error = -EACCES;
71136+ goto out_unlock;
71137+ }
71138+ if (gr_handle_chroot_chmod(path->dentry, path->mnt, mode)) {
71139+ error = -EPERM;
71140+ goto out_unlock;
71141+ }
71142+ newattrs.ia_mode = (mode & S_IALLUGO) | (inode->i_mode & ~S_IALLUGO);
71143+ newattrs.ia_valid = ATTR_MODE | ATTR_CTIME;
71144+ error = notify_change(path->dentry, &newattrs);
71145+out_unlock:
71146+ mutex_unlock(&inode->i_mutex);
71147+ mnt_drop_write(path->mnt);
71148+ return error;
71149+}
71150+
71151 SYSCALL_DEFINE2(fchmod, unsigned int, fd, mode_t, mode)
71152 {
71153- struct inode * inode;
71154- struct dentry * dentry;
71155 struct file * file;
71156 int err = -EBADF;
71157- struct iattr newattrs;
71158
71159 file = fget(fd);
71160- if (!file)
71161- goto out;
71162-
71163- dentry = file->f_path.dentry;
71164- inode = dentry->d_inode;
71165-
71166- audit_inode(NULL, dentry);
71167-
71168- err = mnt_want_write_file(file);
71169- if (err)
71170- goto out_putf;
71171- mutex_lock(&inode->i_mutex);
71172- if (mode == (mode_t) -1)
71173- mode = inode->i_mode;
71174- newattrs.ia_mode = (mode & S_IALLUGO) | (inode->i_mode & ~S_IALLUGO);
71175- newattrs.ia_valid = ATTR_MODE | ATTR_CTIME;
71176- err = notify_change(dentry, &newattrs);
71177- mutex_unlock(&inode->i_mutex);
71178- mnt_drop_write(file->f_path.mnt);
71179-out_putf:
71180- fput(file);
71181-out:
71182+ if (file) {
71183+ audit_inode(NULL, file->f_path.dentry);
71184+ err = chmod_common(&file->f_path, mode);
71185+ fput(file);
71186+ }
71187 return err;
71188 }
71189
71190 SYSCALL_DEFINE3(fchmodat, int, dfd, const char __user *, filename, mode_t, mode)
71191 {
71192 struct path path;
71193- struct inode *inode;
71194 int error;
71195- struct iattr newattrs;
71196
71197 error = user_path_at(dfd, filename, LOOKUP_FOLLOW, &path);
71198- if (error)
71199- goto out;
71200- inode = path.dentry->d_inode;
71201-
71202- error = mnt_want_write(path.mnt);
71203- if (error)
71204- goto dput_and_out;
71205- mutex_lock(&inode->i_mutex);
71206- if (mode == (mode_t) -1)
71207- mode = inode->i_mode;
71208- newattrs.ia_mode = (mode & S_IALLUGO) | (inode->i_mode & ~S_IALLUGO);
71209- newattrs.ia_valid = ATTR_MODE | ATTR_CTIME;
71210- error = notify_change(path.dentry, &newattrs);
71211- mutex_unlock(&inode->i_mutex);
71212- mnt_drop_write(path.mnt);
71213-dput_and_out:
71214- path_put(&path);
71215-out:
71216+ if (!error) {
71217+ error = chmod_common(&path, mode);
71218+ path_put(&path);
71219+ }
71220 return error;
71221 }
71222
71223@@ -664,12 +677,15 @@ SYSCALL_DEFINE2(chmod, const char __user *, filename, mode_t, mode)
71224 return sys_fchmodat(AT_FDCWD, filename, mode);
71225 }
71226
71227-static int chown_common(struct dentry * dentry, uid_t user, gid_t group)
71228+static int chown_common(struct dentry * dentry, uid_t user, gid_t group, struct vfsmount *mnt)
71229 {
71230 struct inode *inode = dentry->d_inode;
71231 int error;
71232 struct iattr newattrs;
71233
71234+ if (!gr_acl_handle_chown(dentry, mnt))
71235+ return -EACCES;
71236+
71237 newattrs.ia_valid = ATTR_CTIME;
71238 if (user != (uid_t) -1) {
71239 newattrs.ia_valid |= ATTR_UID;
71240@@ -700,7 +716,7 @@ SYSCALL_DEFINE3(chown, const char __user *, filename, uid_t, user, gid_t, group)
71241 error = mnt_want_write(path.mnt);
71242 if (error)
71243 goto out_release;
71244- error = chown_common(path.dentry, user, group);
71245+ error = chown_common(path.dentry, user, group, path.mnt);
71246 mnt_drop_write(path.mnt);
71247 out_release:
71248 path_put(&path);
71249@@ -725,7 +741,7 @@ SYSCALL_DEFINE5(fchownat, int, dfd, const char __user *, filename, uid_t, user,
71250 error = mnt_want_write(path.mnt);
71251 if (error)
71252 goto out_release;
71253- error = chown_common(path.dentry, user, group);
71254+ error = chown_common(path.dentry, user, group, path.mnt);
71255 mnt_drop_write(path.mnt);
71256 out_release:
71257 path_put(&path);
71258@@ -744,7 +760,7 @@ SYSCALL_DEFINE3(lchown, const char __user *, filename, uid_t, user, gid_t, group
71259 error = mnt_want_write(path.mnt);
71260 if (error)
71261 goto out_release;
71262- error = chown_common(path.dentry, user, group);
71263+ error = chown_common(path.dentry, user, group, path.mnt);
71264 mnt_drop_write(path.mnt);
71265 out_release:
71266 path_put(&path);
71267@@ -767,7 +783,7 @@ SYSCALL_DEFINE3(fchown, unsigned int, fd, uid_t, user, gid_t, group)
71268 goto out_fput;
71269 dentry = file->f_path.dentry;
71270 audit_inode(NULL, dentry);
71271- error = chown_common(dentry, user, group);
71272+ error = chown_common(dentry, user, group, file->f_path.mnt);
71273 mnt_drop_write(file->f_path.mnt);
71274 out_fput:
71275 fput(file);
71276@@ -1036,7 +1052,7 @@ long do_sys_open(int dfd, const char __user *filename, int flags, int mode)
71277 if (!IS_ERR(tmp)) {
71278 fd = get_unused_fd_flags(flags);
71279 if (fd >= 0) {
71280- struct file *f = do_filp_open(dfd, tmp, flags, mode, 0);
71281+ struct file *f = do_filp_open(dfd, tmp, flags, mode, 0);
71282 if (IS_ERR(f)) {
71283 put_unused_fd(fd);
71284 fd = PTR_ERR(f);
71285diff --git a/fs/partitions/efi.c b/fs/partitions/efi.c
71286index 6ab70f4..f4103d1 100644
71287--- a/fs/partitions/efi.c
71288+++ b/fs/partitions/efi.c
71289@@ -231,14 +231,14 @@ alloc_read_gpt_entries(struct block_device *bdev, gpt_header *gpt)
71290 if (!bdev || !gpt)
71291 return NULL;
71292
71293+ if (!le32_to_cpu(gpt->num_partition_entries))
71294+ return NULL;
71295+ pte = kcalloc(le32_to_cpu(gpt->num_partition_entries), le32_to_cpu(gpt->sizeof_partition_entry), GFP_KERNEL);
71296+ if (!pte)
71297+ return NULL;
71298+
71299 count = le32_to_cpu(gpt->num_partition_entries) *
71300 le32_to_cpu(gpt->sizeof_partition_entry);
71301- if (!count)
71302- return NULL;
71303- pte = kzalloc(count, GFP_KERNEL);
71304- if (!pte)
71305- return NULL;
71306-
71307 if (read_lba(bdev, le64_to_cpu(gpt->partition_entry_lba),
71308 (u8 *) pte,
71309 count) < count) {
71310diff --git a/fs/partitions/ldm.c b/fs/partitions/ldm.c
71311index dd6efdb..3babc6c 100644
71312--- a/fs/partitions/ldm.c
71313+++ b/fs/partitions/ldm.c
71314@@ -1311,6 +1311,7 @@ static bool ldm_frag_add (const u8 *data, int size, struct list_head *frags)
71315 ldm_error ("A VBLK claims to have %d parts.", num);
71316 return false;
71317 }
71318+
71319 if (rec >= num) {
71320 ldm_error("REC value (%d) exceeds NUM value (%d)", rec, num);
71321 return false;
71322@@ -1322,7 +1323,7 @@ static bool ldm_frag_add (const u8 *data, int size, struct list_head *frags)
71323 goto found;
71324 }
71325
71326- f = kmalloc (sizeof (*f) + size*num, GFP_KERNEL);
71327+ f = kmalloc (size*num + sizeof (*f), GFP_KERNEL);
71328 if (!f) {
71329 ldm_crit ("Out of memory.");
71330 return false;
71331diff --git a/fs/partitions/mac.c b/fs/partitions/mac.c
71332index 5765198..7f8e9e0 100644
71333--- a/fs/partitions/mac.c
71334+++ b/fs/partitions/mac.c
71335@@ -59,11 +59,11 @@ int mac_partition(struct parsed_partitions *state, struct block_device *bdev)
71336 return 0; /* not a MacOS disk */
71337 }
71338 blocks_in_map = be32_to_cpu(part->map_count);
71339- if (blocks_in_map < 0 || blocks_in_map >= DISK_MAX_PARTS) {
71340- put_dev_sector(sect);
71341- return 0;
71342- }
71343 printk(" [mac]");
71344+ if (blocks_in_map < 0 || blocks_in_map >= DISK_MAX_PARTS) {
71345+ put_dev_sector(sect);
71346+ return 0;
71347+ }
71348 for (slot = 1; slot <= blocks_in_map; ++slot) {
71349 int pos = slot * secsize;
71350 put_dev_sector(sect);
71351diff --git a/fs/pipe.c b/fs/pipe.c
71352index d0cc080..8a6f211 100644
71353--- a/fs/pipe.c
71354+++ b/fs/pipe.c
71355@@ -401,9 +401,9 @@ redo:
71356 }
71357 if (bufs) /* More to do? */
71358 continue;
71359- if (!pipe->writers)
71360+ if (!atomic_read(&pipe->writers))
71361 break;
71362- if (!pipe->waiting_writers) {
71363+ if (!atomic_read(&pipe->waiting_writers)) {
71364 /* syscall merging: Usually we must not sleep
71365 * if O_NONBLOCK is set, or if we got some data.
71366 * But if a writer sleeps in kernel space, then
71367@@ -462,7 +462,7 @@ pipe_write(struct kiocb *iocb, const struct iovec *_iov,
71368 mutex_lock(&inode->i_mutex);
71369 pipe = inode->i_pipe;
71370
71371- if (!pipe->readers) {
71372+ if (!atomic_read(&pipe->readers)) {
71373 send_sig(SIGPIPE, current, 0);
71374 ret = -EPIPE;
71375 goto out;
71376@@ -511,7 +511,7 @@ redo1:
71377 for (;;) {
71378 int bufs;
71379
71380- if (!pipe->readers) {
71381+ if (!atomic_read(&pipe->readers)) {
71382 send_sig(SIGPIPE, current, 0);
71383 if (!ret)
71384 ret = -EPIPE;
71385@@ -597,9 +597,9 @@ redo2:
71386 kill_fasync(&pipe->fasync_readers, SIGIO, POLL_IN);
71387 do_wakeup = 0;
71388 }
71389- pipe->waiting_writers++;
71390+ atomic_inc(&pipe->waiting_writers);
71391 pipe_wait(pipe);
71392- pipe->waiting_writers--;
71393+ atomic_dec(&pipe->waiting_writers);
71394 }
71395 out:
71396 mutex_unlock(&inode->i_mutex);
71397@@ -666,7 +666,7 @@ pipe_poll(struct file *filp, poll_table *wait)
71398 mask = 0;
71399 if (filp->f_mode & FMODE_READ) {
71400 mask = (nrbufs > 0) ? POLLIN | POLLRDNORM : 0;
71401- if (!pipe->writers && filp->f_version != pipe->w_counter)
71402+ if (!atomic_read(&pipe->writers) && filp->f_version != pipe->w_counter)
71403 mask |= POLLHUP;
71404 }
71405
71406@@ -676,7 +676,7 @@ pipe_poll(struct file *filp, poll_table *wait)
71407 * Most Unices do not set POLLERR for FIFOs but on Linux they
71408 * behave exactly like pipes for poll().
71409 */
71410- if (!pipe->readers)
71411+ if (!atomic_read(&pipe->readers))
71412 mask |= POLLERR;
71413 }
71414
71415@@ -690,10 +690,10 @@ pipe_release(struct inode *inode, int decr, int decw)
71416
71417 mutex_lock(&inode->i_mutex);
71418 pipe = inode->i_pipe;
71419- pipe->readers -= decr;
71420- pipe->writers -= decw;
71421+ atomic_sub(decr, &pipe->readers);
71422+ atomic_sub(decw, &pipe->writers);
71423
71424- if (!pipe->readers && !pipe->writers) {
71425+ if (!atomic_read(&pipe->readers) && !atomic_read(&pipe->writers)) {
71426 free_pipe_info(inode);
71427 } else {
71428 wake_up_interruptible_sync(&pipe->wait);
71429@@ -783,7 +783,7 @@ pipe_read_open(struct inode *inode, struct file *filp)
71430
71431 if (inode->i_pipe) {
71432 ret = 0;
71433- inode->i_pipe->readers++;
71434+ atomic_inc(&inode->i_pipe->readers);
71435 }
71436
71437 mutex_unlock(&inode->i_mutex);
71438@@ -800,7 +800,7 @@ pipe_write_open(struct inode *inode, struct file *filp)
71439
71440 if (inode->i_pipe) {
71441 ret = 0;
71442- inode->i_pipe->writers++;
71443+ atomic_inc(&inode->i_pipe->writers);
71444 }
71445
71446 mutex_unlock(&inode->i_mutex);
71447@@ -818,9 +818,9 @@ pipe_rdwr_open(struct inode *inode, struct file *filp)
71448 if (inode->i_pipe) {
71449 ret = 0;
71450 if (filp->f_mode & FMODE_READ)
71451- inode->i_pipe->readers++;
71452+ atomic_inc(&inode->i_pipe->readers);
71453 if (filp->f_mode & FMODE_WRITE)
71454- inode->i_pipe->writers++;
71455+ atomic_inc(&inode->i_pipe->writers);
71456 }
71457
71458 mutex_unlock(&inode->i_mutex);
71459@@ -905,7 +905,7 @@ void free_pipe_info(struct inode *inode)
71460 inode->i_pipe = NULL;
71461 }
71462
71463-static struct vfsmount *pipe_mnt __read_mostly;
71464+struct vfsmount *pipe_mnt __read_mostly;
71465 static int pipefs_delete_dentry(struct dentry *dentry)
71466 {
71467 /*
71468@@ -945,7 +945,8 @@ static struct inode * get_pipe_inode(void)
71469 goto fail_iput;
71470 inode->i_pipe = pipe;
71471
71472- pipe->readers = pipe->writers = 1;
71473+ atomic_set(&pipe->readers, 1);
71474+ atomic_set(&pipe->writers, 1);
71475 inode->i_fop = &rdwr_pipefifo_fops;
71476
71477 /*
71478diff --git a/fs/proc/Kconfig b/fs/proc/Kconfig
71479index 50f8f06..c5755df 100644
71480--- a/fs/proc/Kconfig
71481+++ b/fs/proc/Kconfig
71482@@ -30,12 +30,12 @@ config PROC_FS
71483
71484 config PROC_KCORE
71485 bool "/proc/kcore support" if !ARM
71486- depends on PROC_FS && MMU
71487+ depends on PROC_FS && MMU && !GRKERNSEC_PROC_ADD
71488
71489 config PROC_VMCORE
71490 bool "/proc/vmcore support (EXPERIMENTAL)"
71491- depends on PROC_FS && CRASH_DUMP
71492- default y
71493+ depends on PROC_FS && CRASH_DUMP && !GRKERNSEC
71494+ default n
71495 help
71496 Exports the dump image of crashed kernel in ELF format.
71497
71498@@ -59,8 +59,8 @@ config PROC_SYSCTL
71499 limited in memory.
71500
71501 config PROC_PAGE_MONITOR
71502- default y
71503- depends on PROC_FS && MMU
71504+ default n
71505+ depends on PROC_FS && MMU && !GRKERNSEC
71506 bool "Enable /proc page monitoring" if EMBEDDED
71507 help
71508 Various /proc files exist to monitor process memory utilization:
71509diff --git a/fs/proc/array.c b/fs/proc/array.c
71510index c5ef152..28c94f7 100644
71511--- a/fs/proc/array.c
71512+++ b/fs/proc/array.c
71513@@ -60,6 +60,7 @@
71514 #include <linux/tty.h>
71515 #include <linux/string.h>
71516 #include <linux/mman.h>
71517+#include <linux/grsecurity.h>
71518 #include <linux/proc_fs.h>
71519 #include <linux/ioport.h>
71520 #include <linux/uaccess.h>
71521@@ -321,6 +322,21 @@ static inline void task_context_switch_counts(struct seq_file *m,
71522 p->nivcsw);
71523 }
71524
71525+#if defined(CONFIG_PAX_NOEXEC) || defined(CONFIG_PAX_ASLR)
71526+static inline void task_pax(struct seq_file *m, struct task_struct *p)
71527+{
71528+ if (p->mm)
71529+ seq_printf(m, "PaX:\t%c%c%c%c%c\n",
71530+ p->mm->pax_flags & MF_PAX_PAGEEXEC ? 'P' : 'p',
71531+ p->mm->pax_flags & MF_PAX_EMUTRAMP ? 'E' : 'e',
71532+ p->mm->pax_flags & MF_PAX_MPROTECT ? 'M' : 'm',
71533+ p->mm->pax_flags & MF_PAX_RANDMMAP ? 'R' : 'r',
71534+ p->mm->pax_flags & MF_PAX_SEGMEXEC ? 'S' : 's');
71535+ else
71536+ seq_printf(m, "PaX:\t-----\n");
71537+}
71538+#endif
71539+
71540 int proc_pid_status(struct seq_file *m, struct pid_namespace *ns,
71541 struct pid *pid, struct task_struct *task)
71542 {
71543@@ -337,9 +353,24 @@ int proc_pid_status(struct seq_file *m, struct pid_namespace *ns,
71544 task_cap(m, task);
71545 cpuset_task_status_allowed(m, task);
71546 task_context_switch_counts(m, task);
71547+
71548+#if defined(CONFIG_PAX_NOEXEC) || defined(CONFIG_PAX_ASLR)
71549+ task_pax(m, task);
71550+#endif
71551+
71552+#if defined(CONFIG_GRKERNSEC) && !defined(CONFIG_GRKERNSEC_NO_RBAC)
71553+ task_grsec_rbac(m, task);
71554+#endif
71555+
71556 return 0;
71557 }
71558
71559+#ifdef CONFIG_GRKERNSEC_PROC_MEMMAP
71560+#define PAX_RAND_FLAGS(_mm) (_mm != NULL && _mm != current->mm && \
71561+ (_mm->pax_flags & MF_PAX_RANDMMAP || \
71562+ _mm->pax_flags & MF_PAX_SEGMEXEC))
71563+#endif
71564+
71565 static int do_task_stat(struct seq_file *m, struct pid_namespace *ns,
71566 struct pid *pid, struct task_struct *task, int whole)
71567 {
71568@@ -358,9 +389,18 @@ static int do_task_stat(struct seq_file *m, struct pid_namespace *ns,
71569 cputime_t cutime, cstime, utime, stime;
71570 cputime_t cgtime, gtime;
71571 unsigned long rsslim = 0;
71572- char tcomm[sizeof(task->comm)];
71573+ char tcomm[sizeof(task->comm)] = { 0 };
71574 unsigned long flags;
71575
71576+ pax_track_stack();
71577+
71578+#ifdef CONFIG_GRKERNSEC_PROC_MEMMAP
71579+ if (current->exec_id != m->exec_id) {
71580+ gr_log_badprocpid("stat");
71581+ return 0;
71582+ }
71583+#endif
71584+
71585 state = *get_task_state(task);
71586 vsize = eip = esp = 0;
71587 permitted = ptrace_may_access(task, PTRACE_MODE_READ);
71588@@ -433,6 +473,19 @@ static int do_task_stat(struct seq_file *m, struct pid_namespace *ns,
71589 gtime = task_gtime(task);
71590 }
71591
71592+#ifdef CONFIG_GRKERNSEC_PROC_MEMMAP
71593+ if (PAX_RAND_FLAGS(mm)) {
71594+ eip = 0;
71595+ esp = 0;
71596+ wchan = 0;
71597+ }
71598+#endif
71599+#ifdef CONFIG_GRKERNSEC_HIDESYM
71600+ wchan = 0;
71601+ eip =0;
71602+ esp =0;
71603+#endif
71604+
71605 /* scale priority and nice values from timeslices to -20..20 */
71606 /* to make it look like a "normal" Unix priority/nice value */
71607 priority = task_prio(task);
71608@@ -473,9 +526,15 @@ static int do_task_stat(struct seq_file *m, struct pid_namespace *ns,
71609 vsize,
71610 mm ? get_mm_rss(mm) : 0,
71611 rsslim,
71612+#ifdef CONFIG_GRKERNSEC_PROC_MEMMAP
71613+ PAX_RAND_FLAGS(mm) ? 1 : (mm ? (permitted ? mm->start_code : 1) : 0),
71614+ PAX_RAND_FLAGS(mm) ? 1 : (mm ? (permitted ? mm->end_code : 1) : 0),
71615+ PAX_RAND_FLAGS(mm) ? 0 : ((permitted && mm) ? mm->start_stack : 0),
71616+#else
71617 mm ? (permitted ? mm->start_code : 1) : 0,
71618 mm ? (permitted ? mm->end_code : 1) : 0,
71619 (permitted && mm) ? mm->start_stack : 0,
71620+#endif
71621 esp,
71622 eip,
71623 /* The signal information here is obsolete.
71624@@ -517,8 +576,16 @@ int proc_pid_statm(struct seq_file *m, struct pid_namespace *ns,
71625 struct pid *pid, struct task_struct *task)
71626 {
71627 int size = 0, resident = 0, shared = 0, text = 0, lib = 0, data = 0;
71628- struct mm_struct *mm = get_task_mm(task);
71629+ struct mm_struct *mm;
71630
71631+#ifdef CONFIG_GRKERNSEC_PROC_MEMMAP
71632+ if (current->exec_id != m->exec_id) {
71633+ gr_log_badprocpid("statm");
71634+ return 0;
71635+ }
71636+#endif
71637+
71638+ mm = get_task_mm(task);
71639 if (mm) {
71640 size = task_statm(mm, &shared, &text, &data, &resident);
71641 mmput(mm);
71642@@ -528,3 +595,18 @@ int proc_pid_statm(struct seq_file *m, struct pid_namespace *ns,
71643
71644 return 0;
71645 }
71646+
71647+#ifdef CONFIG_GRKERNSEC_PROC_IPADDR
71648+int proc_pid_ipaddr(struct task_struct *task, char *buffer)
71649+{
71650+ u32 curr_ip = 0;
71651+ unsigned long flags;
71652+
71653+ if (lock_task_sighand(task, &flags)) {
71654+ curr_ip = task->signal->curr_ip;
71655+ unlock_task_sighand(task, &flags);
71656+ }
71657+
71658+ return sprintf(buffer, "%pI4\n", &curr_ip);
71659+}
71660+#endif
71661diff --git a/fs/proc/base.c b/fs/proc/base.c
71662index 67f7dc0..a86ad9a 100644
71663--- a/fs/proc/base.c
71664+++ b/fs/proc/base.c
71665@@ -102,6 +102,22 @@ struct pid_entry {
71666 union proc_op op;
71667 };
71668
71669+struct getdents_callback {
71670+ struct linux_dirent __user * current_dir;
71671+ struct linux_dirent __user * previous;
71672+ struct file * file;
71673+ int count;
71674+ int error;
71675+};
71676+
71677+static int gr_fake_filldir(void * __buf, const char *name, int namlen,
71678+ loff_t offset, u64 ino, unsigned int d_type)
71679+{
71680+ struct getdents_callback * buf = (struct getdents_callback *) __buf;
71681+ buf->error = -EINVAL;
71682+ return 0;
71683+}
71684+
71685 #define NOD(NAME, MODE, IOP, FOP, OP) { \
71686 .name = (NAME), \
71687 .len = sizeof(NAME) - 1, \
71688@@ -213,6 +229,9 @@ static int check_mem_permission(struct task_struct *task)
71689 if (task == current)
71690 return 0;
71691
71692+ if (gr_handle_proc_ptrace(task) || gr_acl_handle_procpidmem(task))
71693+ return -EPERM;
71694+
71695 /*
71696 * If current is actively ptrace'ing, and would also be
71697 * permitted to freshly attach with ptrace now, permit it.
71698@@ -260,6 +279,9 @@ static int proc_pid_cmdline(struct task_struct *task, char * buffer)
71699 if (!mm->arg_end)
71700 goto out_mm; /* Shh! No looking before we're done */
71701
71702+ if (gr_acl_handle_procpidmem(task))
71703+ goto out_mm;
71704+
71705 len = mm->arg_end - mm->arg_start;
71706
71707 if (len > PAGE_SIZE)
71708@@ -287,12 +309,28 @@ out:
71709 return res;
71710 }
71711
71712+#ifdef CONFIG_GRKERNSEC_PROC_MEMMAP
71713+#define PAX_RAND_FLAGS(_mm) (_mm != NULL && _mm != current->mm && \
71714+ (_mm->pax_flags & MF_PAX_RANDMMAP || \
71715+ _mm->pax_flags & MF_PAX_SEGMEXEC))
71716+#endif
71717+
71718 static int proc_pid_auxv(struct task_struct *task, char *buffer)
71719 {
71720 int res = 0;
71721 struct mm_struct *mm = get_task_mm(task);
71722 if (mm) {
71723 unsigned int nwords = 0;
71724+
71725+#ifdef CONFIG_GRKERNSEC_PROC_MEMMAP
71726+ /* allow if we're currently ptracing this task */
71727+ if (PAX_RAND_FLAGS(mm) &&
71728+ (!(task->ptrace & PT_PTRACED) || (task->parent != current))) {
71729+ mmput(mm);
71730+ return 0;
71731+ }
71732+#endif
71733+
71734 do {
71735 nwords += 2;
71736 } while (mm->saved_auxv[nwords - 2] != 0); /* AT_NULL */
71737@@ -306,7 +344,7 @@ static int proc_pid_auxv(struct task_struct *task, char *buffer)
71738 }
71739
71740
71741-#ifdef CONFIG_KALLSYMS
71742+#if defined(CONFIG_KALLSYMS) && !defined(CONFIG_GRKERNSEC_HIDESYM)
71743 /*
71744 * Provides a wchan file via kallsyms in a proper one-value-per-file format.
71745 * Returns the resolved symbol. If that fails, simply return the address.
71746@@ -345,7 +383,7 @@ static void unlock_trace(struct task_struct *task)
71747 mutex_unlock(&task->cred_guard_mutex);
71748 }
71749
71750-#ifdef CONFIG_STACKTRACE
71751+#if defined(CONFIG_STACKTRACE) && !defined(CONFIG_GRKERNSEC_HIDESYM)
71752
71753 #define MAX_STACK_TRACE_DEPTH 64
71754
71755@@ -545,7 +583,7 @@ static int proc_pid_limits(struct task_struct *task, char *buffer)
71756 return count;
71757 }
71758
71759-#ifdef CONFIG_HAVE_ARCH_TRACEHOOK
71760+#if defined(CONFIG_HAVE_ARCH_TRACEHOOK) && !defined(CONFIG_GRKERNSEC_PROC_MEMMAP)
71761 static int proc_pid_syscall(struct task_struct *task, char *buffer)
71762 {
71763 long nr;
71764@@ -574,7 +612,7 @@ static int proc_pid_syscall(struct task_struct *task, char *buffer)
71765 /************************************************************************/
71766
71767 /* permission checks */
71768-static int proc_fd_access_allowed(struct inode *inode)
71769+static int proc_fd_access_allowed(struct inode *inode, unsigned int log)
71770 {
71771 struct task_struct *task;
71772 int allowed = 0;
71773@@ -584,7 +622,10 @@ static int proc_fd_access_allowed(struct inode *inode)
71774 */
71775 task = get_proc_task(inode);
71776 if (task) {
71777- allowed = ptrace_may_access(task, PTRACE_MODE_READ);
71778+ if (log)
71779+ allowed = ptrace_may_access_log(task, PTRACE_MODE_READ);
71780+ else
71781+ allowed = ptrace_may_access(task, PTRACE_MODE_READ);
71782 put_task_struct(task);
71783 }
71784 return allowed;
71785@@ -806,9 +847,16 @@ static const struct file_operations proc_single_file_operations = {
71786 static int mem_open(struct inode* inode, struct file* file)
71787 {
71788 file->private_data = (void*)((long)current->self_exec_id);
71789+
71790+#ifdef CONFIG_GRKERNSEC_PROC_MEMMAP
71791+ file->f_version = current->exec_id;
71792+#endif
71793+
71794 return 0;
71795 }
71796
71797+static int task_dumpable(struct task_struct *task);
71798+
71799 static ssize_t mem_read(struct file * file, char __user * buf,
71800 size_t count, loff_t *ppos)
71801 {
71802@@ -818,6 +866,13 @@ static ssize_t mem_read(struct file * file, char __user * buf,
71803 int ret = -ESRCH;
71804 struct mm_struct *mm;
71805
71806+#ifdef CONFIG_GRKERNSEC_PROC_MEMMAP
71807+ if (file->f_version != current->exec_id) {
71808+ gr_log_badprocpid("mem");
71809+ return 0;
71810+ }
71811+#endif
71812+
71813 if (!task)
71814 goto out_no_task;
71815
71816@@ -963,6 +1018,9 @@ static ssize_t environ_read(struct file *file, char __user *buf,
71817 if (!task)
71818 goto out_no_task;
71819
71820+ if (gr_acl_handle_procpidmem(task))
71821+ goto out;
71822+
71823 if (!ptrace_may_access(task, PTRACE_MODE_READ))
71824 goto out;
71825
71826@@ -1377,7 +1435,7 @@ static void *proc_pid_follow_link(struct dentry *dentry, struct nameidata *nd)
71827 path_put(&nd->path);
71828
71829 /* Are we allowed to snoop on the tasks file descriptors? */
71830- if (!proc_fd_access_allowed(inode))
71831+ if (!proc_fd_access_allowed(inode,0))
71832 goto out;
71833
71834 error = PROC_I(inode)->op.proc_get_link(inode, &nd->path);
71835@@ -1417,8 +1475,18 @@ static int proc_pid_readlink(struct dentry * dentry, char __user * buffer, int b
71836 struct path path;
71837
71838 /* Are we allowed to snoop on the tasks file descriptors? */
71839- if (!proc_fd_access_allowed(inode))
71840- goto out;
71841+ /* logging this is needed for learning on chromium to work properly,
71842+ but we don't want to flood the logs from 'ps' which does a readlink
71843+ on /proc/fd/2 of tasks in the listing, nor do we want 'ps' to learn
71844+ CAP_SYS_PTRACE as it's not necessary for its basic functionality
71845+ */
71846+ if (dentry->d_name.name[0] == '2' && dentry->d_name.name[1] == '\0') {
71847+ if (!proc_fd_access_allowed(inode,0))
71848+ goto out;
71849+ } else {
71850+ if (!proc_fd_access_allowed(inode,1))
71851+ goto out;
71852+ }
71853
71854 error = PROC_I(inode)->op.proc_get_link(inode, &path);
71855 if (error)
71856@@ -1483,7 +1551,11 @@ static struct inode *proc_pid_make_inode(struct super_block * sb, struct task_st
71857 rcu_read_lock();
71858 cred = __task_cred(task);
71859 inode->i_uid = cred->euid;
71860+#ifdef CONFIG_GRKERNSEC_PROC_USERGROUP
71861+ inode->i_gid = CONFIG_GRKERNSEC_PROC_GID;
71862+#else
71863 inode->i_gid = cred->egid;
71864+#endif
71865 rcu_read_unlock();
71866 }
71867 security_task_to_inode(task, inode);
71868@@ -1501,6 +1573,9 @@ static int pid_getattr(struct vfsmount *mnt, struct dentry *dentry, struct kstat
71869 struct inode *inode = dentry->d_inode;
71870 struct task_struct *task;
71871 const struct cred *cred;
71872+#if defined(CONFIG_GRKERNSEC_PROC_USER) || defined(CONFIG_GRKERNSEC_PROC_USERGROUP)
71873+ const struct cred *tmpcred = current_cred();
71874+#endif
71875
71876 generic_fillattr(inode, stat);
71877
71878@@ -1508,13 +1583,41 @@ static int pid_getattr(struct vfsmount *mnt, struct dentry *dentry, struct kstat
71879 stat->uid = 0;
71880 stat->gid = 0;
71881 task = pid_task(proc_pid(inode), PIDTYPE_PID);
71882+
71883+ if (task && (gr_pid_is_chrooted(task) || gr_check_hidden_task(task))) {
71884+ rcu_read_unlock();
71885+ return -ENOENT;
71886+ }
71887+
71888 if (task) {
71889+ cred = __task_cred(task);
71890+#if defined(CONFIG_GRKERNSEC_PROC_USER) || defined(CONFIG_GRKERNSEC_PROC_USERGROUP)
71891+ if (!tmpcred->uid || (tmpcred->uid == cred->uid)
71892+#ifdef CONFIG_GRKERNSEC_PROC_USERGROUP
71893+ || in_group_p(CONFIG_GRKERNSEC_PROC_GID)
71894+#endif
71895+ ) {
71896+#endif
71897 if ((inode->i_mode == (S_IFDIR|S_IRUGO|S_IXUGO)) ||
71898+#ifdef CONFIG_GRKERNSEC_PROC_USER
71899+ (inode->i_mode == (S_IFDIR|S_IRUSR|S_IXUSR)) ||
71900+#elif defined(CONFIG_GRKERNSEC_PROC_USERGROUP)
71901+ (inode->i_mode == (S_IFDIR|S_IRUSR|S_IRGRP|S_IXUSR|S_IXGRP)) ||
71902+#endif
71903 task_dumpable(task)) {
71904- cred = __task_cred(task);
71905 stat->uid = cred->euid;
71906+#ifdef CONFIG_GRKERNSEC_PROC_USERGROUP
71907+ stat->gid = CONFIG_GRKERNSEC_PROC_GID;
71908+#else
71909 stat->gid = cred->egid;
71910+#endif
71911 }
71912+#if defined(CONFIG_GRKERNSEC_PROC_USER) || defined(CONFIG_GRKERNSEC_PROC_USERGROUP)
71913+ } else {
71914+ rcu_read_unlock();
71915+ return -ENOENT;
71916+ }
71917+#endif
71918 }
71919 rcu_read_unlock();
71920 return 0;
71921@@ -1545,11 +1648,20 @@ static int pid_revalidate(struct dentry *dentry, struct nameidata *nd)
71922
71923 if (task) {
71924 if ((inode->i_mode == (S_IFDIR|S_IRUGO|S_IXUGO)) ||
71925+#ifdef CONFIG_GRKERNSEC_PROC_USER
71926+ (inode->i_mode == (S_IFDIR|S_IRUSR|S_IXUSR)) ||
71927+#elif defined(CONFIG_GRKERNSEC_PROC_USERGROUP)
71928+ (inode->i_mode == (S_IFDIR|S_IRUSR|S_IRGRP|S_IXUSR|S_IXGRP)) ||
71929+#endif
71930 task_dumpable(task)) {
71931 rcu_read_lock();
71932 cred = __task_cred(task);
71933 inode->i_uid = cred->euid;
71934+#ifdef CONFIG_GRKERNSEC_PROC_USERGROUP
71935+ inode->i_gid = CONFIG_GRKERNSEC_PROC_GID;
71936+#else
71937 inode->i_gid = cred->egid;
71938+#endif
71939 rcu_read_unlock();
71940 } else {
71941 inode->i_uid = 0;
71942@@ -1670,7 +1782,8 @@ static int proc_fd_info(struct inode *inode, struct path *path, char *info)
71943 int fd = proc_fd(inode);
71944
71945 if (task) {
71946- files = get_files_struct(task);
71947+ if (!gr_acl_handle_procpidmem(task))
71948+ files = get_files_struct(task);
71949 put_task_struct(task);
71950 }
71951 if (files) {
71952@@ -1922,12 +2035,22 @@ static const struct file_operations proc_fd_operations = {
71953 static int proc_fd_permission(struct inode *inode, int mask)
71954 {
71955 int rv;
71956+ struct task_struct *task;
71957
71958 rv = generic_permission(inode, mask, NULL);
71959- if (rv == 0)
71960- return 0;
71961+
71962 if (task_pid(current) == proc_pid(inode))
71963 rv = 0;
71964+
71965+ task = get_proc_task(inode);
71966+ if (task == NULL)
71967+ return rv;
71968+
71969+ if (gr_acl_handle_procpidmem(task))
71970+ rv = -EACCES;
71971+
71972+ put_task_struct(task);
71973+
71974 return rv;
71975 }
71976
71977@@ -2036,6 +2159,9 @@ static struct dentry *proc_pident_lookup(struct inode *dir,
71978 if (!task)
71979 goto out_no_task;
71980
71981+ if (gr_pid_is_chrooted(task) || gr_check_hidden_task(task))
71982+ goto out;
71983+
71984 /*
71985 * Yes, it does not scale. And it should not. Don't add
71986 * new entries into /proc/<tgid>/ without very good reasons.
71987@@ -2080,6 +2206,9 @@ static int proc_pident_readdir(struct file *filp,
71988 if (!task)
71989 goto out_no_task;
71990
71991+ if (gr_pid_is_chrooted(task) || gr_check_hidden_task(task))
71992+ goto out;
71993+
71994 ret = 0;
71995 i = filp->f_pos;
71996 switch (i) {
71997@@ -2347,7 +2476,7 @@ static void *proc_self_follow_link(struct dentry *dentry, struct nameidata *nd)
71998 static void proc_self_put_link(struct dentry *dentry, struct nameidata *nd,
71999 void *cookie)
72000 {
72001- char *s = nd_get_link(nd);
72002+ const char *s = nd_get_link(nd);
72003 if (!IS_ERR(s))
72004 __putname(s);
72005 }
72006@@ -2553,7 +2682,7 @@ static const struct pid_entry tgid_base_stuff[] = {
72007 #ifdef CONFIG_SCHED_DEBUG
72008 REG("sched", S_IRUGO|S_IWUSR, proc_pid_sched_operations),
72009 #endif
72010-#ifdef CONFIG_HAVE_ARCH_TRACEHOOK
72011+#if defined(CONFIG_HAVE_ARCH_TRACEHOOK) && !defined(CONFIG_GRKERNSEC_PROC_MEMMAP)
72012 INF("syscall", S_IRUGO, proc_pid_syscall),
72013 #endif
72014 INF("cmdline", S_IRUGO, proc_pid_cmdline),
72015@@ -2578,10 +2707,10 @@ static const struct pid_entry tgid_base_stuff[] = {
72016 #ifdef CONFIG_SECURITY
72017 DIR("attr", S_IRUGO|S_IXUGO, proc_attr_dir_inode_operations, proc_attr_dir_operations),
72018 #endif
72019-#ifdef CONFIG_KALLSYMS
72020+#if defined(CONFIG_KALLSYMS) && !defined(CONFIG_GRKERNSEC_HIDESYM)
72021 INF("wchan", S_IRUGO, proc_pid_wchan),
72022 #endif
72023-#ifdef CONFIG_STACKTRACE
72024+#if defined(CONFIG_STACKTRACE) && !defined(CONFIG_GRKERNSEC_HIDESYM)
72025 ONE("stack", S_IRUGO, proc_pid_stack),
72026 #endif
72027 #ifdef CONFIG_SCHEDSTATS
72028@@ -2611,6 +2740,9 @@ static const struct pid_entry tgid_base_stuff[] = {
72029 #ifdef CONFIG_TASK_IO_ACCOUNTING
72030 INF("io", S_IRUSR, proc_tgid_io_accounting),
72031 #endif
72032+#ifdef CONFIG_GRKERNSEC_PROC_IPADDR
72033+ INF("ipaddr", S_IRUSR, proc_pid_ipaddr),
72034+#endif
72035 };
72036
72037 static int proc_tgid_base_readdir(struct file * filp,
72038@@ -2735,7 +2867,14 @@ static struct dentry *proc_pid_instantiate(struct inode *dir,
72039 if (!inode)
72040 goto out;
72041
72042+#ifdef CONFIG_GRKERNSEC_PROC_USER
72043+ inode->i_mode = S_IFDIR|S_IRUSR|S_IXUSR;
72044+#elif defined(CONFIG_GRKERNSEC_PROC_USERGROUP)
72045+ inode->i_gid = CONFIG_GRKERNSEC_PROC_GID;
72046+ inode->i_mode = S_IFDIR|S_IRUSR|S_IRGRP|S_IXUSR|S_IXGRP;
72047+#else
72048 inode->i_mode = S_IFDIR|S_IRUGO|S_IXUGO;
72049+#endif
72050 inode->i_op = &proc_tgid_base_inode_operations;
72051 inode->i_fop = &proc_tgid_base_operations;
72052 inode->i_flags|=S_IMMUTABLE;
72053@@ -2777,7 +2916,11 @@ struct dentry *proc_pid_lookup(struct inode *dir, struct dentry * dentry, struct
72054 if (!task)
72055 goto out;
72056
72057+ if (gr_pid_is_chrooted(task) || gr_check_hidden_task(task))
72058+ goto out_put_task;
72059+
72060 result = proc_pid_instantiate(dir, dentry, task, NULL);
72061+out_put_task:
72062 put_task_struct(task);
72063 out:
72064 return result;
72065@@ -2842,6 +2985,11 @@ int proc_pid_readdir(struct file * filp, void * dirent, filldir_t filldir)
72066 {
72067 unsigned int nr;
72068 struct task_struct *reaper;
72069+#if defined(CONFIG_GRKERNSEC_PROC_USER) || defined(CONFIG_GRKERNSEC_PROC_USERGROUP)
72070+ const struct cred *tmpcred = current_cred();
72071+ const struct cred *itercred;
72072+#endif
72073+ filldir_t __filldir = filldir;
72074 struct tgid_iter iter;
72075 struct pid_namespace *ns;
72076
72077@@ -2865,8 +3013,27 @@ int proc_pid_readdir(struct file * filp, void * dirent, filldir_t filldir)
72078 for (iter = next_tgid(ns, iter);
72079 iter.task;
72080 iter.tgid += 1, iter = next_tgid(ns, iter)) {
72081+#if defined(CONFIG_GRKERNSEC_PROC_USER) || defined(CONFIG_GRKERNSEC_PROC_USERGROUP)
72082+ rcu_read_lock();
72083+ itercred = __task_cred(iter.task);
72084+#endif
72085+ if (gr_pid_is_chrooted(iter.task) || gr_check_hidden_task(iter.task)
72086+#if defined(CONFIG_GRKERNSEC_PROC_USER) || defined(CONFIG_GRKERNSEC_PROC_USERGROUP)
72087+ || (tmpcred->uid && (itercred->uid != tmpcred->uid)
72088+#ifdef CONFIG_GRKERNSEC_PROC_USERGROUP
72089+ && !in_group_p(CONFIG_GRKERNSEC_PROC_GID)
72090+#endif
72091+ )
72092+#endif
72093+ )
72094+ __filldir = &gr_fake_filldir;
72095+ else
72096+ __filldir = filldir;
72097+#if defined(CONFIG_GRKERNSEC_PROC_USER) || defined(CONFIG_GRKERNSEC_PROC_USERGROUP)
72098+ rcu_read_unlock();
72099+#endif
72100 filp->f_pos = iter.tgid + TGID_OFFSET;
72101- if (proc_pid_fill_cache(filp, dirent, filldir, iter) < 0) {
72102+ if (proc_pid_fill_cache(filp, dirent, __filldir, iter) < 0) {
72103 put_task_struct(iter.task);
72104 goto out;
72105 }
72106@@ -2892,7 +3059,7 @@ static const struct pid_entry tid_base_stuff[] = {
72107 #ifdef CONFIG_SCHED_DEBUG
72108 REG("sched", S_IRUGO|S_IWUSR, proc_pid_sched_operations),
72109 #endif
72110-#ifdef CONFIG_HAVE_ARCH_TRACEHOOK
72111+#if defined(CONFIG_HAVE_ARCH_TRACEHOOK) && !defined(CONFIG_GRKERNSEC_PROC_MEMMAP)
72112 INF("syscall", S_IRUGO, proc_pid_syscall),
72113 #endif
72114 INF("cmdline", S_IRUGO, proc_pid_cmdline),
72115@@ -2916,10 +3083,10 @@ static const struct pid_entry tid_base_stuff[] = {
72116 #ifdef CONFIG_SECURITY
72117 DIR("attr", S_IRUGO|S_IXUGO, proc_attr_dir_inode_operations, proc_attr_dir_operations),
72118 #endif
72119-#ifdef CONFIG_KALLSYMS
72120+#if defined(CONFIG_KALLSYMS) && !defined(CONFIG_GRKERNSEC_HIDESYM)
72121 INF("wchan", S_IRUGO, proc_pid_wchan),
72122 #endif
72123-#ifdef CONFIG_STACKTRACE
72124+#if defined(CONFIG_STACKTRACE) && !defined(CONFIG_GRKERNSEC_HIDESYM)
72125 ONE("stack", S_IRUGO, proc_pid_stack),
72126 #endif
72127 #ifdef CONFIG_SCHEDSTATS
72128diff --git a/fs/proc/cmdline.c b/fs/proc/cmdline.c
72129index 82676e3..5f8518a 100644
72130--- a/fs/proc/cmdline.c
72131+++ b/fs/proc/cmdline.c
72132@@ -23,7 +23,11 @@ static const struct file_operations cmdline_proc_fops = {
72133
72134 static int __init proc_cmdline_init(void)
72135 {
72136+#ifdef CONFIG_GRKERNSEC_PROC_ADD
72137+ proc_create_grsec("cmdline", 0, NULL, &cmdline_proc_fops);
72138+#else
72139 proc_create("cmdline", 0, NULL, &cmdline_proc_fops);
72140+#endif
72141 return 0;
72142 }
72143 module_init(proc_cmdline_init);
72144diff --git a/fs/proc/devices.c b/fs/proc/devices.c
72145index 59ee7da..469b4b6 100644
72146--- a/fs/proc/devices.c
72147+++ b/fs/proc/devices.c
72148@@ -64,7 +64,11 @@ static const struct file_operations proc_devinfo_operations = {
72149
72150 static int __init proc_devices_init(void)
72151 {
72152+#ifdef CONFIG_GRKERNSEC_PROC_ADD
72153+ proc_create_grsec("devices", 0, NULL, &proc_devinfo_operations);
72154+#else
72155 proc_create("devices", 0, NULL, &proc_devinfo_operations);
72156+#endif
72157 return 0;
72158 }
72159 module_init(proc_devices_init);
72160diff --git a/fs/proc/inode.c b/fs/proc/inode.c
72161index d78ade3..81767f9 100644
72162--- a/fs/proc/inode.c
72163+++ b/fs/proc/inode.c
72164@@ -18,12 +18,19 @@
72165 #include <linux/module.h>
72166 #include <linux/smp_lock.h>
72167 #include <linux/sysctl.h>
72168+#include <linux/grsecurity.h>
72169
72170 #include <asm/system.h>
72171 #include <asm/uaccess.h>
72172
72173 #include "internal.h"
72174
72175+#ifdef CONFIG_PROC_SYSCTL
72176+extern const struct inode_operations proc_sys_inode_operations;
72177+extern const struct inode_operations proc_sys_dir_operations;
72178+#endif
72179+
72180+
72181 struct proc_dir_entry *de_get(struct proc_dir_entry *de)
72182 {
72183 atomic_inc(&de->count);
72184@@ -62,6 +69,13 @@ static void proc_delete_inode(struct inode *inode)
72185 de_put(de);
72186 if (PROC_I(inode)->sysctl)
72187 sysctl_head_put(PROC_I(inode)->sysctl);
72188+
72189+#ifdef CONFIG_PROC_SYSCTL
72190+ if (inode->i_op == &proc_sys_inode_operations ||
72191+ inode->i_op == &proc_sys_dir_operations)
72192+ gr_handle_delete(inode->i_ino, inode->i_sb->s_dev);
72193+#endif
72194+
72195 clear_inode(inode);
72196 }
72197
72198@@ -457,7 +471,11 @@ struct inode *proc_get_inode(struct super_block *sb, unsigned int ino,
72199 if (de->mode) {
72200 inode->i_mode = de->mode;
72201 inode->i_uid = de->uid;
72202+#ifdef CONFIG_GRKERNSEC_PROC_USERGROUP
72203+ inode->i_gid = CONFIG_GRKERNSEC_PROC_GID;
72204+#else
72205 inode->i_gid = de->gid;
72206+#endif
72207 }
72208 if (de->size)
72209 inode->i_size = de->size;
72210diff --git a/fs/proc/internal.h b/fs/proc/internal.h
72211index 753ca37..26bcf3b 100644
72212--- a/fs/proc/internal.h
72213+++ b/fs/proc/internal.h
72214@@ -51,6 +51,9 @@ extern int proc_pid_status(struct seq_file *m, struct pid_namespace *ns,
72215 struct pid *pid, struct task_struct *task);
72216 extern int proc_pid_statm(struct seq_file *m, struct pid_namespace *ns,
72217 struct pid *pid, struct task_struct *task);
72218+#ifdef CONFIG_GRKERNSEC_PROC_IPADDR
72219+extern int proc_pid_ipaddr(struct task_struct *task, char *buffer);
72220+#endif
72221 extern loff_t mem_lseek(struct file *file, loff_t offset, int orig);
72222
72223 extern const struct file_operations proc_maps_operations;
72224diff --git a/fs/proc/kcore.c b/fs/proc/kcore.c
72225index b442dac..aab29cb 100644
72226--- a/fs/proc/kcore.c
72227+++ b/fs/proc/kcore.c
72228@@ -320,6 +320,8 @@ static void elf_kcore_store_hdr(char *bufp, int nphdr, int dataoff)
72229 off_t offset = 0;
72230 struct kcore_list *m;
72231
72232+ pax_track_stack();
72233+
72234 /* setup ELF header */
72235 elf = (struct elfhdr *) bufp;
72236 bufp += sizeof(struct elfhdr);
72237@@ -477,9 +479,10 @@ read_kcore(struct file *file, char __user *buffer, size_t buflen, loff_t *fpos)
72238 * the addresses in the elf_phdr on our list.
72239 */
72240 start = kc_offset_to_vaddr(*fpos - elf_buflen);
72241- if ((tsz = (PAGE_SIZE - (start & ~PAGE_MASK))) > buflen)
72242+ tsz = PAGE_SIZE - (start & ~PAGE_MASK);
72243+ if (tsz > buflen)
72244 tsz = buflen;
72245-
72246+
72247 while (buflen) {
72248 struct kcore_list *m;
72249
72250@@ -508,20 +511,23 @@ read_kcore(struct file *file, char __user *buffer, size_t buflen, loff_t *fpos)
72251 kfree(elf_buf);
72252 } else {
72253 if (kern_addr_valid(start)) {
72254- unsigned long n;
72255+ char *elf_buf;
72256+ mm_segment_t oldfs;
72257
72258- n = copy_to_user(buffer, (char *)start, tsz);
72259- /*
72260- * We cannot distingush between fault on source
72261- * and fault on destination. When this happens
72262- * we clear too and hope it will trigger the
72263- * EFAULT again.
72264- */
72265- if (n) {
72266- if (clear_user(buffer + tsz - n,
72267- n))
72268+ elf_buf = kmalloc(tsz, GFP_KERNEL);
72269+ if (!elf_buf)
72270+ return -ENOMEM;
72271+ oldfs = get_fs();
72272+ set_fs(KERNEL_DS);
72273+ if (!__copy_from_user(elf_buf, (const void __user *)start, tsz)) {
72274+ set_fs(oldfs);
72275+ if (copy_to_user(buffer, elf_buf, tsz)) {
72276+ kfree(elf_buf);
72277 return -EFAULT;
72278+ }
72279 }
72280+ set_fs(oldfs);
72281+ kfree(elf_buf);
72282 } else {
72283 if (clear_user(buffer, tsz))
72284 return -EFAULT;
72285@@ -541,6 +547,9 @@ read_kcore(struct file *file, char __user *buffer, size_t buflen, loff_t *fpos)
72286
72287 static int open_kcore(struct inode *inode, struct file *filp)
72288 {
72289+#if defined(CONFIG_GRKERNSEC_PROC_ADD) || defined(CONFIG_GRKERNSEC_HIDESYM)
72290+ return -EPERM;
72291+#endif
72292 if (!capable(CAP_SYS_RAWIO))
72293 return -EPERM;
72294 if (kcore_need_update)
72295diff --git a/fs/proc/kmsg.c b/fs/proc/kmsg.c
72296index 7ca7834..cfe90a4 100644
72297--- a/fs/proc/kmsg.c
72298+++ b/fs/proc/kmsg.c
72299@@ -12,37 +12,37 @@
72300 #include <linux/poll.h>
72301 #include <linux/proc_fs.h>
72302 #include <linux/fs.h>
72303+#include <linux/syslog.h>
72304
72305 #include <asm/uaccess.h>
72306 #include <asm/io.h>
72307
72308 extern wait_queue_head_t log_wait;
72309
72310-extern int do_syslog(int type, char __user *bug, int count);
72311-
72312 static int kmsg_open(struct inode * inode, struct file * file)
72313 {
72314- return do_syslog(1,NULL,0);
72315+ return do_syslog(SYSLOG_ACTION_OPEN, NULL, 0, SYSLOG_FROM_FILE);
72316 }
72317
72318 static int kmsg_release(struct inode * inode, struct file * file)
72319 {
72320- (void) do_syslog(0,NULL,0);
72321+ (void) do_syslog(SYSLOG_ACTION_CLOSE, NULL, 0, SYSLOG_FROM_FILE);
72322 return 0;
72323 }
72324
72325 static ssize_t kmsg_read(struct file *file, char __user *buf,
72326 size_t count, loff_t *ppos)
72327 {
72328- if ((file->f_flags & O_NONBLOCK) && !do_syslog(9, NULL, 0))
72329+ if ((file->f_flags & O_NONBLOCK) &&
72330+ !do_syslog(SYSLOG_ACTION_SIZE_UNREAD, NULL, 0, SYSLOG_FROM_FILE))
72331 return -EAGAIN;
72332- return do_syslog(2, buf, count);
72333+ return do_syslog(SYSLOG_ACTION_READ, buf, count, SYSLOG_FROM_FILE);
72334 }
72335
72336 static unsigned int kmsg_poll(struct file *file, poll_table *wait)
72337 {
72338 poll_wait(file, &log_wait, wait);
72339- if (do_syslog(9, NULL, 0))
72340+ if (do_syslog(SYSLOG_ACTION_SIZE_UNREAD, NULL, 0, SYSLOG_FROM_FILE))
72341 return POLLIN | POLLRDNORM;
72342 return 0;
72343 }
72344diff --git a/fs/proc/meminfo.c b/fs/proc/meminfo.c
72345index a65239c..ad1182a 100644
72346--- a/fs/proc/meminfo.c
72347+++ b/fs/proc/meminfo.c
72348@@ -29,6 +29,8 @@ static int meminfo_proc_show(struct seq_file *m, void *v)
72349 unsigned long pages[NR_LRU_LISTS];
72350 int lru;
72351
72352+ pax_track_stack();
72353+
72354 /*
72355 * display in kilobytes.
72356 */
72357@@ -149,7 +151,7 @@ static int meminfo_proc_show(struct seq_file *m, void *v)
72358 vmi.used >> 10,
72359 vmi.largest_chunk >> 10
72360 #ifdef CONFIG_MEMORY_FAILURE
72361- ,atomic_long_read(&mce_bad_pages) << (PAGE_SHIFT - 10)
72362+ ,atomic_long_read_unchecked(&mce_bad_pages) << (PAGE_SHIFT - 10)
72363 #endif
72364 );
72365
72366diff --git a/fs/proc/nommu.c b/fs/proc/nommu.c
72367index 9fe7d7e..cdb62c9 100644
72368--- a/fs/proc/nommu.c
72369+++ b/fs/proc/nommu.c
72370@@ -67,7 +67,7 @@ static int nommu_region_show(struct seq_file *m, struct vm_region *region)
72371 if (len < 1)
72372 len = 1;
72373 seq_printf(m, "%*c", len, ' ');
72374- seq_path(m, &file->f_path, "");
72375+ seq_path(m, &file->f_path, "\n\\");
72376 }
72377
72378 seq_putc(m, '\n');
72379diff --git a/fs/proc/proc_net.c b/fs/proc/proc_net.c
72380index 04d1270..25e1173 100644
72381--- a/fs/proc/proc_net.c
72382+++ b/fs/proc/proc_net.c
72383@@ -104,6 +104,17 @@ static struct net *get_proc_task_net(struct inode *dir)
72384 struct task_struct *task;
72385 struct nsproxy *ns;
72386 struct net *net = NULL;
72387+#if defined(CONFIG_GRKERNSEC_PROC_USER) || defined(CONFIG_GRKERNSEC_PROC_USERGROUP)
72388+ const struct cred *cred = current_cred();
72389+#endif
72390+
72391+#ifdef CONFIG_GRKERNSEC_PROC_USER
72392+ if (cred->fsuid)
72393+ return net;
72394+#elif defined(CONFIG_GRKERNSEC_PROC_USERGROUP)
72395+ if (cred->fsuid && !in_group_p(CONFIG_GRKERNSEC_PROC_GID))
72396+ return net;
72397+#endif
72398
72399 rcu_read_lock();
72400 task = pid_task(proc_pid(dir), PIDTYPE_PID);
72401diff --git a/fs/proc/proc_sysctl.c b/fs/proc/proc_sysctl.c
72402index f667e8a..55f4d96 100644
72403--- a/fs/proc/proc_sysctl.c
72404+++ b/fs/proc/proc_sysctl.c
72405@@ -7,11 +7,13 @@
72406 #include <linux/security.h>
72407 #include "internal.h"
72408
72409+extern __u32 gr_handle_sysctl(const struct ctl_table *table, const int op);
72410+
72411 static const struct dentry_operations proc_sys_dentry_operations;
72412 static const struct file_operations proc_sys_file_operations;
72413-static const struct inode_operations proc_sys_inode_operations;
72414+const struct inode_operations proc_sys_inode_operations;
72415 static const struct file_operations proc_sys_dir_file_operations;
72416-static const struct inode_operations proc_sys_dir_operations;
72417+const struct inode_operations proc_sys_dir_operations;
72418
72419 static struct inode *proc_sys_make_inode(struct super_block *sb,
72420 struct ctl_table_header *head, struct ctl_table *table)
72421@@ -109,6 +111,9 @@ static struct dentry *proc_sys_lookup(struct inode *dir, struct dentry *dentry,
72422 if (!p)
72423 goto out;
72424
72425+ if (gr_handle_sysctl(p, MAY_EXEC))
72426+ goto out;
72427+
72428 err = ERR_PTR(-ENOMEM);
72429 inode = proc_sys_make_inode(dir->i_sb, h ? h : head, p);
72430 if (h)
72431@@ -119,6 +124,9 @@ static struct dentry *proc_sys_lookup(struct inode *dir, struct dentry *dentry,
72432
72433 err = NULL;
72434 dentry->d_op = &proc_sys_dentry_operations;
72435+
72436+ gr_handle_proc_create(dentry, inode);
72437+
72438 d_add(dentry, inode);
72439
72440 out:
72441@@ -200,6 +208,9 @@ static int proc_sys_fill_cache(struct file *filp, void *dirent,
72442 return -ENOMEM;
72443 } else {
72444 child->d_op = &proc_sys_dentry_operations;
72445+
72446+ gr_handle_proc_create(child, inode);
72447+
72448 d_add(child, inode);
72449 }
72450 } else {
72451@@ -228,6 +239,9 @@ static int scan(struct ctl_table_header *head, ctl_table *table,
72452 if (*pos < file->f_pos)
72453 continue;
72454
72455+ if (gr_handle_sysctl(table, 0))
72456+ continue;
72457+
72458 res = proc_sys_fill_cache(file, dirent, filldir, head, table);
72459 if (res)
72460 return res;
72461@@ -344,6 +358,9 @@ static int proc_sys_getattr(struct vfsmount *mnt, struct dentry *dentry, struct
72462 if (IS_ERR(head))
72463 return PTR_ERR(head);
72464
72465+ if (table && gr_handle_sysctl(table, MAY_EXEC))
72466+ return -ENOENT;
72467+
72468 generic_fillattr(inode, stat);
72469 if (table)
72470 stat->mode = (stat->mode & S_IFMT) | table->mode;
72471@@ -358,17 +375,18 @@ static const struct file_operations proc_sys_file_operations = {
72472 };
72473
72474 static const struct file_operations proc_sys_dir_file_operations = {
72475+ .read = generic_read_dir,
72476 .readdir = proc_sys_readdir,
72477 .llseek = generic_file_llseek,
72478 };
72479
72480-static const struct inode_operations proc_sys_inode_operations = {
72481+const struct inode_operations proc_sys_inode_operations = {
72482 .permission = proc_sys_permission,
72483 .setattr = proc_sys_setattr,
72484 .getattr = proc_sys_getattr,
72485 };
72486
72487-static const struct inode_operations proc_sys_dir_operations = {
72488+const struct inode_operations proc_sys_dir_operations = {
72489 .lookup = proc_sys_lookup,
72490 .permission = proc_sys_permission,
72491 .setattr = proc_sys_setattr,
72492diff --git a/fs/proc/root.c b/fs/proc/root.c
72493index b080b79..d957e63 100644
72494--- a/fs/proc/root.c
72495+++ b/fs/proc/root.c
72496@@ -134,7 +134,15 @@ void __init proc_root_init(void)
72497 #ifdef CONFIG_PROC_DEVICETREE
72498 proc_device_tree_init();
72499 #endif
72500+#ifdef CONFIG_GRKERNSEC_PROC_ADD
72501+#ifdef CONFIG_GRKERNSEC_PROC_USER
72502+ proc_mkdir_mode("bus", S_IRUSR | S_IXUSR, NULL);
72503+#elif defined(CONFIG_GRKERNSEC_PROC_USERGROUP)
72504+ proc_mkdir_mode("bus", S_IRUSR | S_IXUSR | S_IRGRP | S_IXGRP, NULL);
72505+#endif
72506+#else
72507 proc_mkdir("bus", NULL);
72508+#endif
72509 proc_sys_init();
72510 }
72511
72512diff --git a/fs/proc/task_mmu.c b/fs/proc/task_mmu.c
72513index 3b7b82a..4b420b0 100644
72514--- a/fs/proc/task_mmu.c
72515+++ b/fs/proc/task_mmu.c
72516@@ -8,6 +8,7 @@
72517 #include <linux/mempolicy.h>
72518 #include <linux/swap.h>
72519 #include <linux/swapops.h>
72520+#include <linux/grsecurity.h>
72521
72522 #include <asm/elf.h>
72523 #include <asm/uaccess.h>
72524@@ -46,15 +47,26 @@ void task_mem(struct seq_file *m, struct mm_struct *mm)
72525 "VmStk:\t%8lu kB\n"
72526 "VmExe:\t%8lu kB\n"
72527 "VmLib:\t%8lu kB\n"
72528- "VmPTE:\t%8lu kB\n",
72529- hiwater_vm << (PAGE_SHIFT-10),
72530+ "VmPTE:\t%8lu kB\n"
72531+
72532+#ifdef CONFIG_ARCH_TRACK_EXEC_LIMIT
72533+ "CsBase:\t%8lx\nCsLim:\t%8lx\n"
72534+#endif
72535+
72536+ ,hiwater_vm << (PAGE_SHIFT-10),
72537 (total_vm - mm->reserved_vm) << (PAGE_SHIFT-10),
72538 mm->locked_vm << (PAGE_SHIFT-10),
72539 hiwater_rss << (PAGE_SHIFT-10),
72540 total_rss << (PAGE_SHIFT-10),
72541 data << (PAGE_SHIFT-10),
72542 mm->stack_vm << (PAGE_SHIFT-10), text, lib,
72543- (PTRS_PER_PTE*sizeof(pte_t)*mm->nr_ptes) >> 10);
72544+ (PTRS_PER_PTE*sizeof(pte_t)*mm->nr_ptes) >> 10
72545+
72546+#ifdef CONFIG_ARCH_TRACK_EXEC_LIMIT
72547+ , mm->context.user_cs_base, mm->context.user_cs_limit
72548+#endif
72549+
72550+ );
72551 }
72552
72553 unsigned long task_vsize(struct mm_struct *mm)
72554@@ -175,7 +187,8 @@ static void m_stop(struct seq_file *m, void *v)
72555 struct proc_maps_private *priv = m->private;
72556 struct vm_area_struct *vma = v;
72557
72558- vma_stop(priv, vma);
72559+ if (!IS_ERR(vma))
72560+ vma_stop(priv, vma);
72561 if (priv->task)
72562 put_task_struct(priv->task);
72563 }
72564@@ -199,6 +212,12 @@ static int do_maps_open(struct inode *inode, struct file *file,
72565 return ret;
72566 }
72567
72568+#ifdef CONFIG_GRKERNSEC_PROC_MEMMAP
72569+#define PAX_RAND_FLAGS(_mm) (_mm != NULL && _mm != current->mm && \
72570+ (_mm->pax_flags & MF_PAX_RANDMMAP || \
72571+ _mm->pax_flags & MF_PAX_SEGMEXEC))
72572+#endif
72573+
72574 static void show_map_vma(struct seq_file *m, struct vm_area_struct *vma)
72575 {
72576 struct mm_struct *mm = vma->vm_mm;
72577@@ -206,7 +225,6 @@ static void show_map_vma(struct seq_file *m, struct vm_area_struct *vma)
72578 int flags = vma->vm_flags;
72579 unsigned long ino = 0;
72580 unsigned long long pgoff = 0;
72581- unsigned long start;
72582 dev_t dev = 0;
72583 int len;
72584
72585@@ -217,20 +235,23 @@ static void show_map_vma(struct seq_file *m, struct vm_area_struct *vma)
72586 pgoff = ((loff_t)vma->vm_pgoff) << PAGE_SHIFT;
72587 }
72588
72589- /* We don't show the stack guard page in /proc/maps */
72590- start = vma->vm_start;
72591- if (vma->vm_flags & VM_GROWSDOWN)
72592- if (!vma_stack_continue(vma->vm_prev, vma->vm_start))
72593- start += PAGE_SIZE;
72594-
72595 seq_printf(m, "%08lx-%08lx %c%c%c%c %08llx %02x:%02x %lu %n",
72596- start,
72597+#ifdef CONFIG_GRKERNSEC_PROC_MEMMAP
72598+ PAX_RAND_FLAGS(mm) ? 0UL : vma->vm_start,
72599+ PAX_RAND_FLAGS(mm) ? 0UL : vma->vm_end,
72600+#else
72601+ vma->vm_start,
72602 vma->vm_end,
72603+#endif
72604 flags & VM_READ ? 'r' : '-',
72605 flags & VM_WRITE ? 'w' : '-',
72606 flags & VM_EXEC ? 'x' : '-',
72607 flags & VM_MAYSHARE ? 's' : 'p',
72608+#ifdef CONFIG_GRKERNSEC_PROC_MEMMAP
72609+ PAX_RAND_FLAGS(mm) ? 0UL : pgoff,
72610+#else
72611 pgoff,
72612+#endif
72613 MAJOR(dev), MINOR(dev), ino, &len);
72614
72615 /*
72616@@ -239,7 +260,7 @@ static void show_map_vma(struct seq_file *m, struct vm_area_struct *vma)
72617 */
72618 if (file) {
72619 pad_len_spaces(m, len);
72620- seq_path(m, &file->f_path, "\n");
72621+ seq_path(m, &file->f_path, "\n\\");
72622 } else {
72623 const char *name = arch_vma_name(vma);
72624 if (!name) {
72625@@ -247,8 +268,9 @@ static void show_map_vma(struct seq_file *m, struct vm_area_struct *vma)
72626 if (vma->vm_start <= mm->brk &&
72627 vma->vm_end >= mm->start_brk) {
72628 name = "[heap]";
72629- } else if (vma->vm_start <= mm->start_stack &&
72630- vma->vm_end >= mm->start_stack) {
72631+ } else if ((vma->vm_flags & (VM_GROWSDOWN | VM_GROWSUP)) ||
72632+ (vma->vm_start <= mm->start_stack &&
72633+ vma->vm_end >= mm->start_stack)) {
72634 name = "[stack]";
72635 }
72636 } else {
72637@@ -269,6 +291,13 @@ static int show_map(struct seq_file *m, void *v)
72638 struct proc_maps_private *priv = m->private;
72639 struct task_struct *task = priv->task;
72640
72641+#ifdef CONFIG_GRKERNSEC_PROC_MEMMAP
72642+ if (current->exec_id != m->exec_id) {
72643+ gr_log_badprocpid("maps");
72644+ return 0;
72645+ }
72646+#endif
72647+
72648 show_map_vma(m, vma);
72649
72650 if (m->count < m->size) /* vma is copied successfully */
72651@@ -390,10 +419,23 @@ static int show_smap(struct seq_file *m, void *v)
72652 .private = &mss,
72653 };
72654
72655+#ifdef CONFIG_GRKERNSEC_PROC_MEMMAP
72656+ if (current->exec_id != m->exec_id) {
72657+ gr_log_badprocpid("smaps");
72658+ return 0;
72659+ }
72660+#endif
72661 memset(&mss, 0, sizeof mss);
72662- mss.vma = vma;
72663- if (vma->vm_mm && !is_vm_hugetlb_page(vma))
72664- walk_page_range(vma->vm_start, vma->vm_end, &smaps_walk);
72665+
72666+#ifdef CONFIG_GRKERNSEC_PROC_MEMMAP
72667+ if (!PAX_RAND_FLAGS(vma->vm_mm)) {
72668+#endif
72669+ mss.vma = vma;
72670+ if (vma->vm_mm && !is_vm_hugetlb_page(vma))
72671+ walk_page_range(vma->vm_start, vma->vm_end, &smaps_walk);
72672+#ifdef CONFIG_GRKERNSEC_PROC_MEMMAP
72673+ }
72674+#endif
72675
72676 show_map_vma(m, vma);
72677
72678@@ -409,7 +451,11 @@ static int show_smap(struct seq_file *m, void *v)
72679 "Swap: %8lu kB\n"
72680 "KernelPageSize: %8lu kB\n"
72681 "MMUPageSize: %8lu kB\n",
72682+#ifdef CONFIG_GRKERNSEC_PROC_MEMMAP
72683+ PAX_RAND_FLAGS(vma->vm_mm) ? 0UL : (vma->vm_end - vma->vm_start) >> 10,
72684+#else
72685 (vma->vm_end - vma->vm_start) >> 10,
72686+#endif
72687 mss.resident >> 10,
72688 (unsigned long)(mss.pss >> (10 + PSS_SHIFT)),
72689 mss.shared_clean >> 10,
72690diff --git a/fs/proc/task_nommu.c b/fs/proc/task_nommu.c
72691index 8f5c05d..c99c76d 100644
72692--- a/fs/proc/task_nommu.c
72693+++ b/fs/proc/task_nommu.c
72694@@ -50,7 +50,7 @@ void task_mem(struct seq_file *m, struct mm_struct *mm)
72695 else
72696 bytes += kobjsize(mm);
72697
72698- if (current->fs && current->fs->users > 1)
72699+ if (current->fs && atomic_read(&current->fs->users) > 1)
72700 sbytes += kobjsize(current->fs);
72701 else
72702 bytes += kobjsize(current->fs);
72703@@ -154,7 +154,7 @@ static int nommu_vma_show(struct seq_file *m, struct vm_area_struct *vma)
72704 if (len < 1)
72705 len = 1;
72706 seq_printf(m, "%*c", len, ' ');
72707- seq_path(m, &file->f_path, "");
72708+ seq_path(m, &file->f_path, "\n\\");
72709 }
72710
72711 seq_putc(m, '\n');
72712diff --git a/fs/readdir.c b/fs/readdir.c
72713index 7723401..30059a6 100644
72714--- a/fs/readdir.c
72715+++ b/fs/readdir.c
72716@@ -16,6 +16,7 @@
72717 #include <linux/security.h>
72718 #include <linux/syscalls.h>
72719 #include <linux/unistd.h>
72720+#include <linux/namei.h>
72721
72722 #include <asm/uaccess.h>
72723
72724@@ -67,6 +68,7 @@ struct old_linux_dirent {
72725
72726 struct readdir_callback {
72727 struct old_linux_dirent __user * dirent;
72728+ struct file * file;
72729 int result;
72730 };
72731
72732@@ -84,6 +86,10 @@ static int fillonedir(void * __buf, const char * name, int namlen, loff_t offset
72733 buf->result = -EOVERFLOW;
72734 return -EOVERFLOW;
72735 }
72736+
72737+ if (!gr_acl_handle_filldir(buf->file, name, namlen, ino))
72738+ return 0;
72739+
72740 buf->result++;
72741 dirent = buf->dirent;
72742 if (!access_ok(VERIFY_WRITE, dirent,
72743@@ -116,6 +122,7 @@ SYSCALL_DEFINE3(old_readdir, unsigned int, fd,
72744
72745 buf.result = 0;
72746 buf.dirent = dirent;
72747+ buf.file = file;
72748
72749 error = vfs_readdir(file, fillonedir, &buf);
72750 if (buf.result)
72751@@ -142,6 +149,7 @@ struct linux_dirent {
72752 struct getdents_callback {
72753 struct linux_dirent __user * current_dir;
72754 struct linux_dirent __user * previous;
72755+ struct file * file;
72756 int count;
72757 int error;
72758 };
72759@@ -162,6 +170,10 @@ static int filldir(void * __buf, const char * name, int namlen, loff_t offset,
72760 buf->error = -EOVERFLOW;
72761 return -EOVERFLOW;
72762 }
72763+
72764+ if (!gr_acl_handle_filldir(buf->file, name, namlen, ino))
72765+ return 0;
72766+
72767 dirent = buf->previous;
72768 if (dirent) {
72769 if (__put_user(offset, &dirent->d_off))
72770@@ -209,6 +221,7 @@ SYSCALL_DEFINE3(getdents, unsigned int, fd,
72771 buf.previous = NULL;
72772 buf.count = count;
72773 buf.error = 0;
72774+ buf.file = file;
72775
72776 error = vfs_readdir(file, filldir, &buf);
72777 if (error >= 0)
72778@@ -228,6 +241,7 @@ out:
72779 struct getdents_callback64 {
72780 struct linux_dirent64 __user * current_dir;
72781 struct linux_dirent64 __user * previous;
72782+ struct file *file;
72783 int count;
72784 int error;
72785 };
72786@@ -242,6 +256,10 @@ static int filldir64(void * __buf, const char * name, int namlen, loff_t offset,
72787 buf->error = -EINVAL; /* only used if we fail.. */
72788 if (reclen > buf->count)
72789 return -EINVAL;
72790+
72791+ if (!gr_acl_handle_filldir(buf->file, name, namlen, ino))
72792+ return 0;
72793+
72794 dirent = buf->previous;
72795 if (dirent) {
72796 if (__put_user(offset, &dirent->d_off))
72797@@ -289,6 +307,7 @@ SYSCALL_DEFINE3(getdents64, unsigned int, fd,
72798
72799 buf.current_dir = dirent;
72800 buf.previous = NULL;
72801+ buf.file = file;
72802 buf.count = count;
72803 buf.error = 0;
72804
72805@@ -297,7 +316,7 @@ SYSCALL_DEFINE3(getdents64, unsigned int, fd,
72806 error = buf.error;
72807 lastdirent = buf.previous;
72808 if (lastdirent) {
72809- typeof(lastdirent->d_off) d_off = file->f_pos;
72810+ typeof(((struct linux_dirent64 *)0)->d_off) d_off = file->f_pos;
72811 if (__put_user(d_off, &lastdirent->d_off))
72812 error = -EFAULT;
72813 else
72814diff --git a/fs/reiserfs/dir.c b/fs/reiserfs/dir.c
72815index d42c30c..4fd8718 100644
72816--- a/fs/reiserfs/dir.c
72817+++ b/fs/reiserfs/dir.c
72818@@ -66,6 +66,8 @@ int reiserfs_readdir_dentry(struct dentry *dentry, void *dirent,
72819 struct reiserfs_dir_entry de;
72820 int ret = 0;
72821
72822+ pax_track_stack();
72823+
72824 reiserfs_write_lock(inode->i_sb);
72825
72826 reiserfs_check_lock_depth(inode->i_sb, "readdir");
72827diff --git a/fs/reiserfs/do_balan.c b/fs/reiserfs/do_balan.c
72828index 128d3f7..8840d44 100644
72829--- a/fs/reiserfs/do_balan.c
72830+++ b/fs/reiserfs/do_balan.c
72831@@ -2058,7 +2058,7 @@ void do_balance(struct tree_balance *tb, /* tree_balance structure */
72832 return;
72833 }
72834
72835- atomic_inc(&(fs_generation(tb->tb_sb)));
72836+ atomic_inc_unchecked(&(fs_generation(tb->tb_sb)));
72837 do_balance_starts(tb);
72838
72839 /* balance leaf returns 0 except if combining L R and S into
72840diff --git a/fs/reiserfs/item_ops.c b/fs/reiserfs/item_ops.c
72841index 72cb1cc..d0e3181 100644
72842--- a/fs/reiserfs/item_ops.c
72843+++ b/fs/reiserfs/item_ops.c
72844@@ -102,7 +102,7 @@ static void sd_print_vi(struct virtual_item *vi)
72845 vi->vi_index, vi->vi_type, vi->vi_ih);
72846 }
72847
72848-static struct item_operations stat_data_ops = {
72849+static const struct item_operations stat_data_ops = {
72850 .bytes_number = sd_bytes_number,
72851 .decrement_key = sd_decrement_key,
72852 .is_left_mergeable = sd_is_left_mergeable,
72853@@ -196,7 +196,7 @@ static void direct_print_vi(struct virtual_item *vi)
72854 vi->vi_index, vi->vi_type, vi->vi_ih);
72855 }
72856
72857-static struct item_operations direct_ops = {
72858+static const struct item_operations direct_ops = {
72859 .bytes_number = direct_bytes_number,
72860 .decrement_key = direct_decrement_key,
72861 .is_left_mergeable = direct_is_left_mergeable,
72862@@ -341,7 +341,7 @@ static void indirect_print_vi(struct virtual_item *vi)
72863 vi->vi_index, vi->vi_type, vi->vi_ih);
72864 }
72865
72866-static struct item_operations indirect_ops = {
72867+static const struct item_operations indirect_ops = {
72868 .bytes_number = indirect_bytes_number,
72869 .decrement_key = indirect_decrement_key,
72870 .is_left_mergeable = indirect_is_left_mergeable,
72871@@ -628,7 +628,7 @@ static void direntry_print_vi(struct virtual_item *vi)
72872 printk("\n");
72873 }
72874
72875-static struct item_operations direntry_ops = {
72876+static const struct item_operations direntry_ops = {
72877 .bytes_number = direntry_bytes_number,
72878 .decrement_key = direntry_decrement_key,
72879 .is_left_mergeable = direntry_is_left_mergeable,
72880@@ -724,7 +724,7 @@ static void errcatch_print_vi(struct virtual_item *vi)
72881 "Invalid item type observed, run fsck ASAP");
72882 }
72883
72884-static struct item_operations errcatch_ops = {
72885+static const struct item_operations errcatch_ops = {
72886 errcatch_bytes_number,
72887 errcatch_decrement_key,
72888 errcatch_is_left_mergeable,
72889@@ -746,7 +746,7 @@ static struct item_operations errcatch_ops = {
72890 #error Item types must use disk-format assigned values.
72891 #endif
72892
72893-struct item_operations *item_ops[TYPE_ANY + 1] = {
72894+const struct item_operations * const item_ops[TYPE_ANY + 1] = {
72895 &stat_data_ops,
72896 &indirect_ops,
72897 &direct_ops,
72898diff --git a/fs/reiserfs/journal.c b/fs/reiserfs/journal.c
72899index b5fe0aa..e0e25c4 100644
72900--- a/fs/reiserfs/journal.c
72901+++ b/fs/reiserfs/journal.c
72902@@ -2329,6 +2329,8 @@ static struct buffer_head *reiserfs_breada(struct block_device *dev,
72903 struct buffer_head *bh;
72904 int i, j;
72905
72906+ pax_track_stack();
72907+
72908 bh = __getblk(dev, block, bufsize);
72909 if (buffer_uptodate(bh))
72910 return (bh);
72911diff --git a/fs/reiserfs/namei.c b/fs/reiserfs/namei.c
72912index 2715791..b8996db 100644
72913--- a/fs/reiserfs/namei.c
72914+++ b/fs/reiserfs/namei.c
72915@@ -1214,6 +1214,8 @@ static int reiserfs_rename(struct inode *old_dir, struct dentry *old_dentry,
72916 unsigned long savelink = 1;
72917 struct timespec ctime;
72918
72919+ pax_track_stack();
72920+
72921 /* three balancings: (1) old name removal, (2) new name insertion
72922 and (3) maybe "save" link insertion
72923 stat data updates: (1) old directory,
72924diff --git a/fs/reiserfs/procfs.c b/fs/reiserfs/procfs.c
72925index 9229e55..3d2e3b7 100644
72926--- a/fs/reiserfs/procfs.c
72927+++ b/fs/reiserfs/procfs.c
72928@@ -123,7 +123,7 @@ static int show_super(struct seq_file *m, struct super_block *sb)
72929 "SMALL_TAILS " : "NO_TAILS ",
72930 replay_only(sb) ? "REPLAY_ONLY " : "",
72931 convert_reiserfs(sb) ? "CONV " : "",
72932- atomic_read(&r->s_generation_counter),
72933+ atomic_read_unchecked(&r->s_generation_counter),
72934 SF(s_disk_reads), SF(s_disk_writes), SF(s_fix_nodes),
72935 SF(s_do_balance), SF(s_unneeded_left_neighbor),
72936 SF(s_good_search_by_key_reada), SF(s_bmaps),
72937@@ -309,6 +309,8 @@ static int show_journal(struct seq_file *m, struct super_block *sb)
72938 struct journal_params *jp = &rs->s_v1.s_journal;
72939 char b[BDEVNAME_SIZE];
72940
72941+ pax_track_stack();
72942+
72943 seq_printf(m, /* on-disk fields */
72944 "jp_journal_1st_block: \t%i\n"
72945 "jp_journal_dev: \t%s[%x]\n"
72946diff --git a/fs/reiserfs/stree.c b/fs/reiserfs/stree.c
72947index d036ee5..4c7dca1 100644
72948--- a/fs/reiserfs/stree.c
72949+++ b/fs/reiserfs/stree.c
72950@@ -1159,6 +1159,8 @@ int reiserfs_delete_item(struct reiserfs_transaction_handle *th,
72951 int iter = 0;
72952 #endif
72953
72954+ pax_track_stack();
72955+
72956 BUG_ON(!th->t_trans_id);
72957
72958 init_tb_struct(th, &s_del_balance, sb, path,
72959@@ -1296,6 +1298,8 @@ void reiserfs_delete_solid_item(struct reiserfs_transaction_handle *th,
72960 int retval;
72961 int quota_cut_bytes = 0;
72962
72963+ pax_track_stack();
72964+
72965 BUG_ON(!th->t_trans_id);
72966
72967 le_key2cpu_key(&cpu_key, key);
72968@@ -1525,6 +1529,8 @@ int reiserfs_cut_from_item(struct reiserfs_transaction_handle *th,
72969 int quota_cut_bytes;
72970 loff_t tail_pos = 0;
72971
72972+ pax_track_stack();
72973+
72974 BUG_ON(!th->t_trans_id);
72975
72976 init_tb_struct(th, &s_cut_balance, inode->i_sb, path,
72977@@ -1920,6 +1926,8 @@ int reiserfs_paste_into_item(struct reiserfs_transaction_handle *th, struct tree
72978 int retval;
72979 int fs_gen;
72980
72981+ pax_track_stack();
72982+
72983 BUG_ON(!th->t_trans_id);
72984
72985 fs_gen = get_generation(inode->i_sb);
72986@@ -2007,6 +2015,8 @@ int reiserfs_insert_item(struct reiserfs_transaction_handle *th,
72987 int fs_gen = 0;
72988 int quota_bytes = 0;
72989
72990+ pax_track_stack();
72991+
72992 BUG_ON(!th->t_trans_id);
72993
72994 if (inode) { /* Do we count quotas for item? */
72995diff --git a/fs/reiserfs/super.c b/fs/reiserfs/super.c
72996index 7cb1285..c726cd0 100644
72997--- a/fs/reiserfs/super.c
72998+++ b/fs/reiserfs/super.c
72999@@ -916,6 +916,8 @@ static int reiserfs_parse_options(struct super_block *s, char *options, /* strin
73000 {.option_name = NULL}
73001 };
73002
73003+ pax_track_stack();
73004+
73005 *blocks = 0;
73006 if (!options || !*options)
73007 /* use default configuration: create tails, journaling on, no
73008diff --git a/fs/select.c b/fs/select.c
73009index fd38ce2..f5381b8 100644
73010--- a/fs/select.c
73011+++ b/fs/select.c
73012@@ -20,6 +20,7 @@
73013 #include <linux/module.h>
73014 #include <linux/slab.h>
73015 #include <linux/poll.h>
73016+#include <linux/security.h>
73017 #include <linux/personality.h> /* for STICKY_TIMEOUTS */
73018 #include <linux/file.h>
73019 #include <linux/fdtable.h>
73020@@ -401,6 +402,8 @@ int do_select(int n, fd_set_bits *fds, struct timespec *end_time)
73021 int retval, i, timed_out = 0;
73022 unsigned long slack = 0;
73023
73024+ pax_track_stack();
73025+
73026 rcu_read_lock();
73027 retval = max_select_fd(n, fds);
73028 rcu_read_unlock();
73029@@ -529,6 +532,8 @@ int core_sys_select(int n, fd_set __user *inp, fd_set __user *outp,
73030 /* Allocate small arguments on the stack to save memory and be faster */
73031 long stack_fds[SELECT_STACK_ALLOC/sizeof(long)];
73032
73033+ pax_track_stack();
73034+
73035 ret = -EINVAL;
73036 if (n < 0)
73037 goto out_nofds;
73038@@ -821,6 +826,9 @@ int do_sys_poll(struct pollfd __user *ufds, unsigned int nfds,
73039 struct poll_list *walk = head;
73040 unsigned long todo = nfds;
73041
73042+ pax_track_stack();
73043+
73044+ gr_learn_resource(current, RLIMIT_NOFILE, nfds, 1);
73045 if (nfds > current->signal->rlim[RLIMIT_NOFILE].rlim_cur)
73046 return -EINVAL;
73047
73048diff --git a/fs/seq_file.c b/fs/seq_file.c
73049index eae7d9d..b7613c6 100644
73050--- a/fs/seq_file.c
73051+++ b/fs/seq_file.c
73052@@ -9,6 +9,7 @@
73053 #include <linux/module.h>
73054 #include <linux/seq_file.h>
73055 #include <linux/slab.h>
73056+#include <linux/sched.h>
73057
73058 #include <asm/uaccess.h>
73059 #include <asm/page.h>
73060@@ -40,6 +41,9 @@ int seq_open(struct file *file, const struct seq_operations *op)
73061 memset(p, 0, sizeof(*p));
73062 mutex_init(&p->lock);
73063 p->op = op;
73064+#ifdef CONFIG_GRKERNSEC_PROC_MEMMAP
73065+ p->exec_id = current->exec_id;
73066+#endif
73067
73068 /*
73069 * Wrappers around seq_open(e.g. swaps_open) need to be
73070@@ -551,7 +555,7 @@ static void single_stop(struct seq_file *p, void *v)
73071 int single_open(struct file *file, int (*show)(struct seq_file *, void *),
73072 void *data)
73073 {
73074- struct seq_operations *op = kmalloc(sizeof(*op), GFP_KERNEL);
73075+ seq_operations_no_const *op = kmalloc(sizeof(*op), GFP_KERNEL);
73076 int res = -ENOMEM;
73077
73078 if (op) {
73079diff --git a/fs/smbfs/proc.c b/fs/smbfs/proc.c
73080index 71c29b6..54694dd 100644
73081--- a/fs/smbfs/proc.c
73082+++ b/fs/smbfs/proc.c
73083@@ -266,9 +266,9 @@ int smb_setcodepage(struct smb_sb_info *server, struct smb_nls_codepage *cp)
73084
73085 out:
73086 if (server->local_nls != NULL && server->remote_nls != NULL)
73087- server->ops->convert = convert_cp;
73088+ *(void **)&server->ops->convert = convert_cp;
73089 else
73090- server->ops->convert = convert_memcpy;
73091+ *(void **)&server->ops->convert = convert_memcpy;
73092
73093 smb_unlock_server(server);
73094 return n;
73095@@ -933,9 +933,9 @@ smb_newconn(struct smb_sb_info *server, struct smb_conn_opt *opt)
73096
73097 /* FIXME: the win9x code wants to modify these ... (seek/trunc bug) */
73098 if (server->mnt->flags & SMB_MOUNT_OLDATTR) {
73099- server->ops->getattr = smb_proc_getattr_core;
73100+ *(void **)&server->ops->getattr = smb_proc_getattr_core;
73101 } else if (server->mnt->flags & SMB_MOUNT_DIRATTR) {
73102- server->ops->getattr = smb_proc_getattr_ff;
73103+ *(void **)&server->ops->getattr = smb_proc_getattr_ff;
73104 }
73105
73106 /* Decode server capabilities */
73107@@ -3439,7 +3439,7 @@ out:
73108 static void
73109 install_ops(struct smb_ops *dst, struct smb_ops *src)
73110 {
73111- memcpy(dst, src, sizeof(void *) * SMB_OPS_NUM_STATIC);
73112+ memcpy((void *)dst, src, sizeof(void *) * SMB_OPS_NUM_STATIC);
73113 }
73114
73115 /* < LANMAN2 */
73116diff --git a/fs/smbfs/symlink.c b/fs/smbfs/symlink.c
73117index 00b2909..2ace383 100644
73118--- a/fs/smbfs/symlink.c
73119+++ b/fs/smbfs/symlink.c
73120@@ -55,7 +55,7 @@ static void *smb_follow_link(struct dentry *dentry, struct nameidata *nd)
73121
73122 static void smb_put_link(struct dentry *dentry, struct nameidata *nd, void *p)
73123 {
73124- char *s = nd_get_link(nd);
73125+ const char *s = nd_get_link(nd);
73126 if (!IS_ERR(s))
73127 __putname(s);
73128 }
73129diff --git a/fs/splice.c b/fs/splice.c
73130index bb92b7c5..5aa72b0 100644
73131--- a/fs/splice.c
73132+++ b/fs/splice.c
73133@@ -185,7 +185,7 @@ ssize_t splice_to_pipe(struct pipe_inode_info *pipe,
73134 pipe_lock(pipe);
73135
73136 for (;;) {
73137- if (!pipe->readers) {
73138+ if (!atomic_read(&pipe->readers)) {
73139 send_sig(SIGPIPE, current, 0);
73140 if (!ret)
73141 ret = -EPIPE;
73142@@ -239,9 +239,9 @@ ssize_t splice_to_pipe(struct pipe_inode_info *pipe,
73143 do_wakeup = 0;
73144 }
73145
73146- pipe->waiting_writers++;
73147+ atomic_inc(&pipe->waiting_writers);
73148 pipe_wait(pipe);
73149- pipe->waiting_writers--;
73150+ atomic_dec(&pipe->waiting_writers);
73151 }
73152
73153 pipe_unlock(pipe);
73154@@ -285,6 +285,8 @@ __generic_file_splice_read(struct file *in, loff_t *ppos,
73155 .spd_release = spd_release_page,
73156 };
73157
73158+ pax_track_stack();
73159+
73160 index = *ppos >> PAGE_CACHE_SHIFT;
73161 loff = *ppos & ~PAGE_CACHE_MASK;
73162 req_pages = (len + loff + PAGE_CACHE_SIZE - 1) >> PAGE_CACHE_SHIFT;
73163@@ -521,7 +523,7 @@ static ssize_t kernel_readv(struct file *file, const struct iovec *vec,
73164 old_fs = get_fs();
73165 set_fs(get_ds());
73166 /* The cast to a user pointer is valid due to the set_fs() */
73167- res = vfs_readv(file, (const struct iovec __user *)vec, vlen, &pos);
73168+ res = vfs_readv(file, (const struct iovec __force_user *)vec, vlen, &pos);
73169 set_fs(old_fs);
73170
73171 return res;
73172@@ -536,7 +538,7 @@ static ssize_t kernel_write(struct file *file, const char *buf, size_t count,
73173 old_fs = get_fs();
73174 set_fs(get_ds());
73175 /* The cast to a user pointer is valid due to the set_fs() */
73176- res = vfs_write(file, (const char __user *)buf, count, &pos);
73177+ res = vfs_write(file, (const char __force_user *)buf, count, &pos);
73178 set_fs(old_fs);
73179
73180 return res;
73181@@ -565,6 +567,8 @@ ssize_t default_file_splice_read(struct file *in, loff_t *ppos,
73182 .spd_release = spd_release_page,
73183 };
73184
73185+ pax_track_stack();
73186+
73187 index = *ppos >> PAGE_CACHE_SHIFT;
73188 offset = *ppos & ~PAGE_CACHE_MASK;
73189 nr_pages = (len + offset + PAGE_CACHE_SIZE - 1) >> PAGE_CACHE_SHIFT;
73190@@ -578,7 +582,7 @@ ssize_t default_file_splice_read(struct file *in, loff_t *ppos,
73191 goto err;
73192
73193 this_len = min_t(size_t, len, PAGE_CACHE_SIZE - offset);
73194- vec[i].iov_base = (void __user *) page_address(page);
73195+ vec[i].iov_base = (__force void __user *) page_address(page);
73196 vec[i].iov_len = this_len;
73197 pages[i] = page;
73198 spd.nr_pages++;
73199@@ -800,10 +804,10 @@ EXPORT_SYMBOL(splice_from_pipe_feed);
73200 int splice_from_pipe_next(struct pipe_inode_info *pipe, struct splice_desc *sd)
73201 {
73202 while (!pipe->nrbufs) {
73203- if (!pipe->writers)
73204+ if (!atomic_read(&pipe->writers))
73205 return 0;
73206
73207- if (!pipe->waiting_writers && sd->num_spliced)
73208+ if (!atomic_read(&pipe->waiting_writers) && sd->num_spliced)
73209 return 0;
73210
73211 if (sd->flags & SPLICE_F_NONBLOCK)
73212@@ -1140,7 +1144,7 @@ ssize_t splice_direct_to_actor(struct file *in, struct splice_desc *sd,
73213 * out of the pipe right after the splice_to_pipe(). So set
73214 * PIPE_READERS appropriately.
73215 */
73216- pipe->readers = 1;
73217+ atomic_set(&pipe->readers, 1);
73218
73219 current->splice_pipe = pipe;
73220 }
73221@@ -1593,6 +1597,8 @@ static long vmsplice_to_pipe(struct file *file, const struct iovec __user *iov,
73222 .spd_release = spd_release_page,
73223 };
73224
73225+ pax_track_stack();
73226+
73227 pipe = pipe_info(file->f_path.dentry->d_inode);
73228 if (!pipe)
73229 return -EBADF;
73230@@ -1701,9 +1707,9 @@ static int ipipe_prep(struct pipe_inode_info *pipe, unsigned int flags)
73231 ret = -ERESTARTSYS;
73232 break;
73233 }
73234- if (!pipe->writers)
73235+ if (!atomic_read(&pipe->writers))
73236 break;
73237- if (!pipe->waiting_writers) {
73238+ if (!atomic_read(&pipe->waiting_writers)) {
73239 if (flags & SPLICE_F_NONBLOCK) {
73240 ret = -EAGAIN;
73241 break;
73242@@ -1735,7 +1741,7 @@ static int opipe_prep(struct pipe_inode_info *pipe, unsigned int flags)
73243 pipe_lock(pipe);
73244
73245 while (pipe->nrbufs >= PIPE_BUFFERS) {
73246- if (!pipe->readers) {
73247+ if (!atomic_read(&pipe->readers)) {
73248 send_sig(SIGPIPE, current, 0);
73249 ret = -EPIPE;
73250 break;
73251@@ -1748,9 +1754,9 @@ static int opipe_prep(struct pipe_inode_info *pipe, unsigned int flags)
73252 ret = -ERESTARTSYS;
73253 break;
73254 }
73255- pipe->waiting_writers++;
73256+ atomic_inc(&pipe->waiting_writers);
73257 pipe_wait(pipe);
73258- pipe->waiting_writers--;
73259+ atomic_dec(&pipe->waiting_writers);
73260 }
73261
73262 pipe_unlock(pipe);
73263@@ -1786,14 +1792,14 @@ retry:
73264 pipe_double_lock(ipipe, opipe);
73265
73266 do {
73267- if (!opipe->readers) {
73268+ if (!atomic_read(&opipe->readers)) {
73269 send_sig(SIGPIPE, current, 0);
73270 if (!ret)
73271 ret = -EPIPE;
73272 break;
73273 }
73274
73275- if (!ipipe->nrbufs && !ipipe->writers)
73276+ if (!ipipe->nrbufs && !atomic_read(&ipipe->writers))
73277 break;
73278
73279 /*
73280@@ -1893,7 +1899,7 @@ static int link_pipe(struct pipe_inode_info *ipipe,
73281 pipe_double_lock(ipipe, opipe);
73282
73283 do {
73284- if (!opipe->readers) {
73285+ if (!atomic_read(&opipe->readers)) {
73286 send_sig(SIGPIPE, current, 0);
73287 if (!ret)
73288 ret = -EPIPE;
73289@@ -1938,7 +1944,7 @@ static int link_pipe(struct pipe_inode_info *ipipe,
73290 * return EAGAIN if we have the potential of some data in the
73291 * future, otherwise just return 0
73292 */
73293- if (!ret && ipipe->waiting_writers && (flags & SPLICE_F_NONBLOCK))
73294+ if (!ret && atomic_read(&ipipe->waiting_writers) && (flags & SPLICE_F_NONBLOCK))
73295 ret = -EAGAIN;
73296
73297 pipe_unlock(ipipe);
73298diff --git a/fs/sysfs/bin.c b/fs/sysfs/bin.c
73299index 60c702b..dddc2b5 100644
73300--- a/fs/sysfs/bin.c
73301+++ b/fs/sysfs/bin.c
73302@@ -67,6 +67,8 @@ fill_read(struct dentry *dentry, char *buffer, loff_t off, size_t count)
73303 }
73304
73305 static ssize_t
73306+read(struct file *file, char __user *userbuf, size_t bytes, loff_t *off) __size_overflow(3);
73307+static ssize_t
73308 read(struct file *file, char __user *userbuf, size_t bytes, loff_t *off)
73309 {
73310 struct bin_buffer *bb = file->private_data;
73311diff --git a/fs/sysfs/dir.c b/fs/sysfs/dir.c
73312index e020183..18d64b4 100644
73313--- a/fs/sysfs/dir.c
73314+++ b/fs/sysfs/dir.c
73315@@ -678,6 +678,18 @@ static int create_dir(struct kobject *kobj, struct sysfs_dirent *parent_sd,
73316 struct sysfs_dirent *sd;
73317 int rc;
73318
73319+#ifdef CONFIG_GRKERNSEC_SYSFS_RESTRICT
73320+ const char *parent_name = parent_sd->s_name;
73321+
73322+ mode = S_IFDIR | S_IRWXU;
73323+
73324+ if ((!strcmp(parent_name, "") && (!strcmp(name, "devices") || !strcmp(name, "fs"))) ||
73325+ (!strcmp(parent_name, "devices") && !strcmp(name, "system")) ||
73326+ (!strcmp(parent_name, "fs") && (!strcmp(name, "selinux") || !strcmp(name, "fuse"))) ||
73327+ (!strcmp(parent_name, "system") && !strcmp(name, "cpu")))
73328+ mode = S_IFDIR | S_IRWXU | S_IRUGO | S_IXUGO;
73329+#endif
73330+
73331 /* allocate */
73332 sd = sysfs_new_dirent(name, mode, SYSFS_DIR);
73333 if (!sd)
73334diff --git a/fs/sysfs/file.c b/fs/sysfs/file.c
73335index 7118a38..70af853 100644
73336--- a/fs/sysfs/file.c
73337+++ b/fs/sysfs/file.c
73338@@ -44,7 +44,7 @@ static DEFINE_SPINLOCK(sysfs_open_dirent_lock);
73339
73340 struct sysfs_open_dirent {
73341 atomic_t refcnt;
73342- atomic_t event;
73343+ atomic_unchecked_t event;
73344 wait_queue_head_t poll;
73345 struct list_head buffers; /* goes through sysfs_buffer.list */
73346 };
73347@@ -53,7 +53,7 @@ struct sysfs_buffer {
73348 size_t count;
73349 loff_t pos;
73350 char * page;
73351- struct sysfs_ops * ops;
73352+ const struct sysfs_ops * ops;
73353 struct mutex mutex;
73354 int needs_read_fill;
73355 int event;
73356@@ -75,7 +75,7 @@ static int fill_read_buffer(struct dentry * dentry, struct sysfs_buffer * buffer
73357 {
73358 struct sysfs_dirent *attr_sd = dentry->d_fsdata;
73359 struct kobject *kobj = attr_sd->s_parent->s_dir.kobj;
73360- struct sysfs_ops * ops = buffer->ops;
73361+ const struct sysfs_ops * ops = buffer->ops;
73362 int ret = 0;
73363 ssize_t count;
73364
73365@@ -88,7 +88,7 @@ static int fill_read_buffer(struct dentry * dentry, struct sysfs_buffer * buffer
73366 if (!sysfs_get_active_two(attr_sd))
73367 return -ENODEV;
73368
73369- buffer->event = atomic_read(&attr_sd->s_attr.open->event);
73370+ buffer->event = atomic_read_unchecked(&attr_sd->s_attr.open->event);
73371 count = ops->show(kobj, attr_sd->s_attr.attr, buffer->page);
73372
73373 sysfs_put_active_two(attr_sd);
73374@@ -199,7 +199,7 @@ flush_write_buffer(struct dentry * dentry, struct sysfs_buffer * buffer, size_t
73375 {
73376 struct sysfs_dirent *attr_sd = dentry->d_fsdata;
73377 struct kobject *kobj = attr_sd->s_parent->s_dir.kobj;
73378- struct sysfs_ops * ops = buffer->ops;
73379+ const struct sysfs_ops * ops = buffer->ops;
73380 int rc;
73381
73382 /* need attr_sd for attr and ops, its parent for kobj */
73383@@ -294,7 +294,7 @@ static int sysfs_get_open_dirent(struct sysfs_dirent *sd,
73384 return -ENOMEM;
73385
73386 atomic_set(&new_od->refcnt, 0);
73387- atomic_set(&new_od->event, 1);
73388+ atomic_set_unchecked(&new_od->event, 1);
73389 init_waitqueue_head(&new_od->poll);
73390 INIT_LIST_HEAD(&new_od->buffers);
73391 goto retry;
73392@@ -335,7 +335,7 @@ static int sysfs_open_file(struct inode *inode, struct file *file)
73393 struct sysfs_dirent *attr_sd = file->f_path.dentry->d_fsdata;
73394 struct kobject *kobj = attr_sd->s_parent->s_dir.kobj;
73395 struct sysfs_buffer *buffer;
73396- struct sysfs_ops *ops;
73397+ const struct sysfs_ops *ops;
73398 int error = -EACCES;
73399 char *p;
73400
73401@@ -444,7 +444,7 @@ static unsigned int sysfs_poll(struct file *filp, poll_table *wait)
73402
73403 sysfs_put_active_two(attr_sd);
73404
73405- if (buffer->event != atomic_read(&od->event))
73406+ if (buffer->event != atomic_read_unchecked(&od->event))
73407 goto trigger;
73408
73409 return DEFAULT_POLLMASK;
73410@@ -463,7 +463,7 @@ void sysfs_notify_dirent(struct sysfs_dirent *sd)
73411
73412 od = sd->s_attr.open;
73413 if (od) {
73414- atomic_inc(&od->event);
73415+ atomic_inc_unchecked(&od->event);
73416 wake_up_interruptible(&od->poll);
73417 }
73418
73419diff --git a/fs/sysfs/symlink.c b/fs/sysfs/symlink.c
73420index c5081ad..342ea86 100644
73421--- a/fs/sysfs/symlink.c
73422+++ b/fs/sysfs/symlink.c
73423@@ -204,7 +204,7 @@ static void *sysfs_follow_link(struct dentry *dentry, struct nameidata *nd)
73424
73425 static void sysfs_put_link(struct dentry *dentry, struct nameidata *nd, void *cookie)
73426 {
73427- char *page = nd_get_link(nd);
73428+ const char *page = nd_get_link(nd);
73429 if (!IS_ERR(page))
73430 free_page((unsigned long)page);
73431 }
73432diff --git a/fs/udf/balloc.c b/fs/udf/balloc.c
73433index 1e06853..b06d325 100644
73434--- a/fs/udf/balloc.c
73435+++ b/fs/udf/balloc.c
73436@@ -172,9 +172,7 @@ static void udf_bitmap_free_blocks(struct super_block *sb,
73437
73438 mutex_lock(&sbi->s_alloc_mutex);
73439 partmap = &sbi->s_partmaps[bloc->partitionReferenceNum];
73440- if (bloc->logicalBlockNum < 0 ||
73441- (bloc->logicalBlockNum + count) >
73442- partmap->s_partition_len) {
73443+ if ((bloc->logicalBlockNum + count) > partmap->s_partition_len) {
73444 udf_debug("%d < %d || %d + %d > %d\n",
73445 bloc->logicalBlockNum, 0, bloc->logicalBlockNum,
73446 count, partmap->s_partition_len);
73447@@ -436,9 +434,7 @@ static void udf_table_free_blocks(struct super_block *sb,
73448
73449 mutex_lock(&sbi->s_alloc_mutex);
73450 partmap = &sbi->s_partmaps[bloc->partitionReferenceNum];
73451- if (bloc->logicalBlockNum < 0 ||
73452- (bloc->logicalBlockNum + count) >
73453- partmap->s_partition_len) {
73454+ if ((bloc->logicalBlockNum + count) > partmap->s_partition_len) {
73455 udf_debug("%d < %d || %d + %d > %d\n",
73456 bloc.logicalBlockNum, 0, bloc.logicalBlockNum, count,
73457 partmap->s_partition_len);
73458diff --git a/fs/udf/inode.c b/fs/udf/inode.c
73459index 6d24c2c..fff470f 100644
73460--- a/fs/udf/inode.c
73461+++ b/fs/udf/inode.c
73462@@ -484,6 +484,8 @@ static struct buffer_head *inode_getblk(struct inode *inode, sector_t block,
73463 int goal = 0, pgoal = iinfo->i_location.logicalBlockNum;
73464 int lastblock = 0;
73465
73466+ pax_track_stack();
73467+
73468 prev_epos.offset = udf_file_entry_alloc_offset(inode);
73469 prev_epos.block = iinfo->i_location;
73470 prev_epos.bh = NULL;
73471diff --git a/fs/udf/misc.c b/fs/udf/misc.c
73472index 9215700..bf1f68e 100644
73473--- a/fs/udf/misc.c
73474+++ b/fs/udf/misc.c
73475@@ -286,7 +286,7 @@ void udf_new_tag(char *data, uint16_t ident, uint16_t version, uint16_t snum,
73476
73477 u8 udf_tag_checksum(const struct tag *t)
73478 {
73479- u8 *data = (u8 *)t;
73480+ const u8 *data = (const u8 *)t;
73481 u8 checksum = 0;
73482 int i;
73483 for (i = 0; i < sizeof(struct tag); ++i)
73484diff --git a/fs/utimes.c b/fs/utimes.c
73485index e4c75db..b4df0e0 100644
73486--- a/fs/utimes.c
73487+++ b/fs/utimes.c
73488@@ -1,6 +1,7 @@
73489 #include <linux/compiler.h>
73490 #include <linux/file.h>
73491 #include <linux/fs.h>
73492+#include <linux/security.h>
73493 #include <linux/linkage.h>
73494 #include <linux/mount.h>
73495 #include <linux/namei.h>
73496@@ -101,6 +102,12 @@ static int utimes_common(struct path *path, struct timespec *times)
73497 goto mnt_drop_write_and_out;
73498 }
73499 }
73500+
73501+ if (!gr_acl_handle_utime(path->dentry, path->mnt)) {
73502+ error = -EACCES;
73503+ goto mnt_drop_write_and_out;
73504+ }
73505+
73506 mutex_lock(&inode->i_mutex);
73507 error = notify_change(path->dentry, &newattrs);
73508 mutex_unlock(&inode->i_mutex);
73509diff --git a/fs/xattr.c b/fs/xattr.c
73510index 6d4f6d3..cda3958 100644
73511--- a/fs/xattr.c
73512+++ b/fs/xattr.c
73513@@ -247,7 +247,7 @@ EXPORT_SYMBOL_GPL(vfs_removexattr);
73514 * Extended attribute SET operations
73515 */
73516 static long
73517-setxattr(struct dentry *d, const char __user *name, const void __user *value,
73518+setxattr(struct path *path, const char __user *name, const void __user *value,
73519 size_t size, int flags)
73520 {
73521 int error;
73522@@ -271,7 +271,13 @@ setxattr(struct dentry *d, const char __user *name, const void __user *value,
73523 return PTR_ERR(kvalue);
73524 }
73525
73526- error = vfs_setxattr(d, kname, kvalue, size, flags);
73527+ if (!gr_acl_handle_setxattr(path->dentry, path->mnt)) {
73528+ error = -EACCES;
73529+ goto out;
73530+ }
73531+
73532+ error = vfs_setxattr(path->dentry, kname, kvalue, size, flags);
73533+out:
73534 kfree(kvalue);
73535 return error;
73536 }
73537@@ -288,7 +294,7 @@ SYSCALL_DEFINE5(setxattr, const char __user *, pathname,
73538 return error;
73539 error = mnt_want_write(path.mnt);
73540 if (!error) {
73541- error = setxattr(path.dentry, name, value, size, flags);
73542+ error = setxattr(&path, name, value, size, flags);
73543 mnt_drop_write(path.mnt);
73544 }
73545 path_put(&path);
73546@@ -307,7 +313,7 @@ SYSCALL_DEFINE5(lsetxattr, const char __user *, pathname,
73547 return error;
73548 error = mnt_want_write(path.mnt);
73549 if (!error) {
73550- error = setxattr(path.dentry, name, value, size, flags);
73551+ error = setxattr(&path, name, value, size, flags);
73552 mnt_drop_write(path.mnt);
73553 }
73554 path_put(&path);
73555@@ -318,17 +324,15 @@ SYSCALL_DEFINE5(fsetxattr, int, fd, const char __user *, name,
73556 const void __user *,value, size_t, size, int, flags)
73557 {
73558 struct file *f;
73559- struct dentry *dentry;
73560 int error = -EBADF;
73561
73562 f = fget(fd);
73563 if (!f)
73564 return error;
73565- dentry = f->f_path.dentry;
73566- audit_inode(NULL, dentry);
73567+ audit_inode(NULL, f->f_path.dentry);
73568 error = mnt_want_write_file(f);
73569 if (!error) {
73570- error = setxattr(dentry, name, value, size, flags);
73571+ error = setxattr(&f->f_path, name, value, size, flags);
73572 mnt_drop_write(f->f_path.mnt);
73573 }
73574 fput(f);
73575diff --git a/fs/xattr_acl.c b/fs/xattr_acl.c
73576index c6ad7c7..f2847a7 100644
73577--- a/fs/xattr_acl.c
73578+++ b/fs/xattr_acl.c
73579@@ -17,8 +17,8 @@
73580 struct posix_acl *
73581 posix_acl_from_xattr(const void *value, size_t size)
73582 {
73583- posix_acl_xattr_header *header = (posix_acl_xattr_header *)value;
73584- posix_acl_xattr_entry *entry = (posix_acl_xattr_entry *)(header+1), *end;
73585+ const posix_acl_xattr_header *header = (const posix_acl_xattr_header *)value;
73586+ const posix_acl_xattr_entry *entry = (const posix_acl_xattr_entry *)(header+1), *end;
73587 int count;
73588 struct posix_acl *acl;
73589 struct posix_acl_entry *acl_e;
73590diff --git a/fs/xfs/linux-2.6/xfs_ioctl.c b/fs/xfs/linux-2.6/xfs_ioctl.c
73591index 942362f..88f96f5 100644
73592--- a/fs/xfs/linux-2.6/xfs_ioctl.c
73593+++ b/fs/xfs/linux-2.6/xfs_ioctl.c
73594@@ -134,7 +134,7 @@ xfs_find_handle(
73595 }
73596
73597 error = -EFAULT;
73598- if (copy_to_user(hreq->ohandle, &handle, hsize) ||
73599+ if (hsize > sizeof handle || copy_to_user(hreq->ohandle, &handle, hsize) ||
73600 copy_to_user(hreq->ohandlen, &hsize, sizeof(__s32)))
73601 goto out_put;
73602
73603@@ -423,7 +423,7 @@ xfs_attrlist_by_handle(
73604 if (IS_ERR(dentry))
73605 return PTR_ERR(dentry);
73606
73607- kbuf = kmalloc(al_hreq.buflen, GFP_KERNEL);
73608+ kbuf = kzalloc(al_hreq.buflen, GFP_KERNEL);
73609 if (!kbuf)
73610 goto out_dput;
73611
73612@@ -697,7 +697,7 @@ xfs_ioc_fsgeometry_v1(
73613 xfs_mount_t *mp,
73614 void __user *arg)
73615 {
73616- xfs_fsop_geom_t fsgeo;
73617+ xfs_fsop_geom_t fsgeo;
73618 int error;
73619
73620 error = xfs_fs_geometry(mp, &fsgeo, 3);
73621diff --git a/fs/xfs/linux-2.6/xfs_ioctl32.c b/fs/xfs/linux-2.6/xfs_ioctl32.c
73622index bad485a..479bd32 100644
73623--- a/fs/xfs/linux-2.6/xfs_ioctl32.c
73624+++ b/fs/xfs/linux-2.6/xfs_ioctl32.c
73625@@ -75,6 +75,7 @@ xfs_compat_ioc_fsgeometry_v1(
73626 xfs_fsop_geom_t fsgeo;
73627 int error;
73628
73629+ memset(&fsgeo, 0, sizeof(fsgeo));
73630 error = xfs_fs_geometry(mp, &fsgeo, 3);
73631 if (error)
73632 return -error;
73633diff --git a/fs/xfs/linux-2.6/xfs_iops.c b/fs/xfs/linux-2.6/xfs_iops.c
73634index 1f3b4b8..6102f6d 100644
73635--- a/fs/xfs/linux-2.6/xfs_iops.c
73636+++ b/fs/xfs/linux-2.6/xfs_iops.c
73637@@ -468,7 +468,7 @@ xfs_vn_put_link(
73638 struct nameidata *nd,
73639 void *p)
73640 {
73641- char *s = nd_get_link(nd);
73642+ const char *s = nd_get_link(nd);
73643
73644 if (!IS_ERR(s))
73645 kfree(s);
73646diff --git a/fs/xfs/xfs_bmap.c b/fs/xfs/xfs_bmap.c
73647index 8971fb0..5fc1eb2 100644
73648--- a/fs/xfs/xfs_bmap.c
73649+++ b/fs/xfs/xfs_bmap.c
73650@@ -360,7 +360,7 @@ xfs_bmap_validate_ret(
73651 int nmap,
73652 int ret_nmap);
73653 #else
73654-#define xfs_bmap_validate_ret(bno,len,flags,mval,onmap,nmap)
73655+#define xfs_bmap_validate_ret(bno,len,flags,mval,onmap,nmap) do {} while (0)
73656 #endif /* DEBUG */
73657
73658 #if defined(XFS_RW_TRACE)
73659diff --git a/fs/xfs/xfs_dir2_sf.c b/fs/xfs/xfs_dir2_sf.c
73660index e89734e..5e84d8d 100644
73661--- a/fs/xfs/xfs_dir2_sf.c
73662+++ b/fs/xfs/xfs_dir2_sf.c
73663@@ -779,7 +779,15 @@ xfs_dir2_sf_getdents(
73664 }
73665
73666 ino = xfs_dir2_sf_get_inumber(sfp, xfs_dir2_sf_inumberp(sfep));
73667- if (filldir(dirent, sfep->name, sfep->namelen,
73668+ if (dp->i_df.if_u1.if_data == dp->i_df.if_u2.if_inline_data) {
73669+ char name[sfep->namelen];
73670+ memcpy(name, sfep->name, sfep->namelen);
73671+ if (filldir(dirent, name, sfep->namelen,
73672+ off & 0x7fffffff, ino, DT_UNKNOWN)) {
73673+ *offset = off & 0x7fffffff;
73674+ return 0;
73675+ }
73676+ } else if (filldir(dirent, sfep->name, sfep->namelen,
73677 off & 0x7fffffff, ino, DT_UNKNOWN)) {
73678 *offset = off & 0x7fffffff;
73679 return 0;
73680diff --git a/fs/xfs/xfs_vnodeops.c b/fs/xfs/xfs_vnodeops.c
73681index 8f32f50..b6a41e8 100644
73682--- a/fs/xfs/xfs_vnodeops.c
73683+++ b/fs/xfs/xfs_vnodeops.c
73684@@ -564,13 +564,18 @@ xfs_readlink(
73685
73686 xfs_ilock(ip, XFS_ILOCK_SHARED);
73687
73688- ASSERT((ip->i_d.di_mode & S_IFMT) == S_IFLNK);
73689- ASSERT(ip->i_d.di_size <= MAXPATHLEN);
73690-
73691 pathlen = ip->i_d.di_size;
73692 if (!pathlen)
73693 goto out;
73694
73695+ if (pathlen > MAXPATHLEN) {
73696+ xfs_fs_cmn_err(CE_ALERT, mp, "%s: inode (%llu) symlink length (%d) too long",
73697+ __func__, (unsigned long long)ip->i_ino, pathlen);
73698+ ASSERT(0);
73699+ error = XFS_ERROR(EFSCORRUPTED);
73700+ goto out;
73701+ }
73702+
73703 if (ip->i_df.if_flags & XFS_IFINLINE) {
73704 memcpy(link, ip->i_df.if_u1.if_data, pathlen);
73705 link[pathlen] = '\0';
73706diff --git a/grsecurity/Kconfig b/grsecurity/Kconfig
73707new file mode 100644
73708index 0000000..50819f8
73709--- /dev/null
73710+++ b/grsecurity/Kconfig
73711@@ -0,0 +1,1077 @@
73712+#
73713+# grecurity configuration
73714+#
73715+
73716+menu "Grsecurity"
73717+
73718+config GRKERNSEC
73719+ bool "Grsecurity"
73720+ select CRYPTO
73721+ select CRYPTO_SHA256
73722+ help
73723+ If you say Y here, you will be able to configure many features
73724+ that will enhance the security of your system. It is highly
73725+ recommended that you say Y here and read through the help
73726+ for each option so that you fully understand the features and
73727+ can evaluate their usefulness for your machine.
73728+
73729+choice
73730+ prompt "Security Level"
73731+ depends on GRKERNSEC
73732+ default GRKERNSEC_CUSTOM
73733+
73734+config GRKERNSEC_LOW
73735+ bool "Low"
73736+ select GRKERNSEC_LINK
73737+ select GRKERNSEC_FIFO
73738+ select GRKERNSEC_RANDNET
73739+ select GRKERNSEC_DMESG
73740+ select GRKERNSEC_CHROOT
73741+ select GRKERNSEC_CHROOT_CHDIR
73742+
73743+ help
73744+ If you choose this option, several of the grsecurity options will
73745+ be enabled that will give you greater protection against a number
73746+ of attacks, while assuring that none of your software will have any
73747+ conflicts with the additional security measures. If you run a lot
73748+ of unusual software, or you are having problems with the higher
73749+ security levels, you should say Y here. With this option, the
73750+ following features are enabled:
73751+
73752+ - Linking restrictions
73753+ - FIFO restrictions
73754+ - Restricted dmesg
73755+ - Enforced chdir("/") on chroot
73756+ - Runtime module disabling
73757+
73758+config GRKERNSEC_MEDIUM
73759+ bool "Medium"
73760+ select PAX
73761+ select PAX_EI_PAX
73762+ select PAX_PT_PAX_FLAGS
73763+ select PAX_HAVE_ACL_FLAGS
73764+ select GRKERNSEC_PROC_MEMMAP if (PAX_NOEXEC || PAX_ASLR)
73765+ select GRKERNSEC_CHROOT
73766+ select GRKERNSEC_CHROOT_SYSCTL
73767+ select GRKERNSEC_LINK
73768+ select GRKERNSEC_FIFO
73769+ select GRKERNSEC_DMESG
73770+ select GRKERNSEC_RANDNET
73771+ select GRKERNSEC_FORKFAIL
73772+ select GRKERNSEC_TIME
73773+ select GRKERNSEC_SIGNAL
73774+ select GRKERNSEC_CHROOT
73775+ select GRKERNSEC_CHROOT_UNIX
73776+ select GRKERNSEC_CHROOT_MOUNT
73777+ select GRKERNSEC_CHROOT_PIVOT
73778+ select GRKERNSEC_CHROOT_DOUBLE
73779+ select GRKERNSEC_CHROOT_CHDIR
73780+ select GRKERNSEC_CHROOT_MKNOD
73781+ select GRKERNSEC_PROC
73782+ select GRKERNSEC_PROC_USERGROUP
73783+ select PAX_RANDUSTACK
73784+ select PAX_ASLR
73785+ select PAX_RANDMMAP
73786+ select PAX_REFCOUNT if (X86 || SPARC64)
73787+ select PAX_USERCOPY if ((X86 || SPARC || PPC || ARM) && (SLAB || SLUB || SLOB))
73788+
73789+ help
73790+ If you say Y here, several features in addition to those included
73791+ in the low additional security level will be enabled. These
73792+ features provide even more security to your system, though in rare
73793+ cases they may be incompatible with very old or poorly written
73794+ software. If you enable this option, make sure that your auth
73795+ service (identd) is running as gid 1001. With this option,
73796+ the following features (in addition to those provided in the
73797+ low additional security level) will be enabled:
73798+
73799+ - Failed fork logging
73800+ - Time change logging
73801+ - Signal logging
73802+ - Deny mounts in chroot
73803+ - Deny double chrooting
73804+ - Deny sysctl writes in chroot
73805+ - Deny mknod in chroot
73806+ - Deny access to abstract AF_UNIX sockets out of chroot
73807+ - Deny pivot_root in chroot
73808+ - Denied reads/writes of /dev/kmem, /dev/mem, and /dev/port
73809+ - /proc restrictions with special GID set to 10 (usually wheel)
73810+ - Address Space Layout Randomization (ASLR)
73811+ - Prevent exploitation of most refcount overflows
73812+ - Bounds checking of copying between the kernel and userland
73813+
73814+config GRKERNSEC_HIGH
73815+ bool "High"
73816+ select GRKERNSEC_LINK
73817+ select GRKERNSEC_FIFO
73818+ select GRKERNSEC_DMESG
73819+ select GRKERNSEC_FORKFAIL
73820+ select GRKERNSEC_TIME
73821+ select GRKERNSEC_SIGNAL
73822+ select GRKERNSEC_CHROOT
73823+ select GRKERNSEC_CHROOT_SHMAT
73824+ select GRKERNSEC_CHROOT_UNIX
73825+ select GRKERNSEC_CHROOT_MOUNT
73826+ select GRKERNSEC_CHROOT_FCHDIR
73827+ select GRKERNSEC_CHROOT_PIVOT
73828+ select GRKERNSEC_CHROOT_DOUBLE
73829+ select GRKERNSEC_CHROOT_CHDIR
73830+ select GRKERNSEC_CHROOT_MKNOD
73831+ select GRKERNSEC_CHROOT_CAPS
73832+ select GRKERNSEC_CHROOT_SYSCTL
73833+ select GRKERNSEC_CHROOT_FINDTASK
73834+ select GRKERNSEC_SYSFS_RESTRICT
73835+ select GRKERNSEC_PROC
73836+ select GRKERNSEC_PROC_MEMMAP if (PAX_NOEXEC || PAX_ASLR)
73837+ select GRKERNSEC_HIDESYM
73838+ select GRKERNSEC_BRUTE
73839+ select GRKERNSEC_PROC_USERGROUP
73840+ select GRKERNSEC_KMEM
73841+ select GRKERNSEC_RESLOG
73842+ select GRKERNSEC_RANDNET
73843+ select GRKERNSEC_PROC_ADD
73844+ select GRKERNSEC_CHROOT_CHMOD
73845+ select GRKERNSEC_CHROOT_NICE
73846+ select GRKERNSEC_SETXID
73847+ select GRKERNSEC_AUDIT_MOUNT
73848+ select GRKERNSEC_MODHARDEN if (MODULES)
73849+ select GRKERNSEC_HARDEN_PTRACE
73850+ select GRKERNSEC_PTRACE_READEXEC
73851+ select GRKERNSEC_VM86 if (X86_32)
73852+ select GRKERNSEC_KERN_LOCKOUT if (X86 || ARM || PPC || SPARC)
73853+ select PAX
73854+ select PAX_RANDUSTACK
73855+ select PAX_ASLR
73856+ select PAX_RANDMMAP
73857+ select PAX_NOEXEC
73858+ select PAX_MPROTECT
73859+ select PAX_EI_PAX
73860+ select PAX_PT_PAX_FLAGS
73861+ select PAX_HAVE_ACL_FLAGS
73862+ select PAX_KERNEXEC if ((PPC || X86) && (!X86_32 || X86_WP_WORKS_OK) && !XEN)
73863+ select PAX_MEMORY_UDEREF if (X86 && !XEN)
73864+ select PAX_RANDKSTACK if (X86_TSC && X86)
73865+ select PAX_SEGMEXEC if (X86_32)
73866+ select PAX_PAGEEXEC
73867+ select PAX_EMUPLT if (ALPHA || PARISC || SPARC)
73868+ select PAX_EMUTRAMP if (PARISC)
73869+ select PAX_EMUSIGRT if (PARISC)
73870+ select PAX_ETEXECRELOCS if (ALPHA || IA64 || PARISC)
73871+ select PAX_ELFRELOCS if (PAX_ETEXECRELOCS || (IA64 || PPC || X86))
73872+ select PAX_REFCOUNT if (X86 || SPARC64)
73873+ select PAX_USERCOPY if ((X86 || SPARC || PPC || ARM) && (SLAB || SLUB || SLOB))
73874+ help
73875+ If you say Y here, many of the features of grsecurity will be
73876+ enabled, which will protect you against many kinds of attacks
73877+ against your system. The heightened security comes at a cost
73878+ of an increased chance of incompatibilities with rare software
73879+ on your machine. Since this security level enables PaX, you should
73880+ view <http://pax.grsecurity.net> and read about the PaX
73881+ project. While you are there, download chpax and run it on
73882+ binaries that cause problems with PaX. Also remember that
73883+ since the /proc restrictions are enabled, you must run your
73884+ identd as gid 1001. This security level enables the following
73885+ features in addition to those listed in the low and medium
73886+ security levels:
73887+
73888+ - Additional /proc restrictions
73889+ - Chmod restrictions in chroot
73890+ - No signals, ptrace, or viewing of processes outside of chroot
73891+ - Capability restrictions in chroot
73892+ - Deny fchdir out of chroot
73893+ - Priority restrictions in chroot
73894+ - Segmentation-based implementation of PaX
73895+ - Mprotect restrictions
73896+ - Removal of addresses from /proc/<pid>/[smaps|maps|stat]
73897+ - Kernel stack randomization
73898+ - Mount/unmount/remount logging
73899+ - Kernel symbol hiding
73900+ - Hardening of module auto-loading
73901+ - Ptrace restrictions
73902+ - Restricted vm86 mode
73903+ - Restricted sysfs/debugfs
73904+ - Active kernel exploit response
73905+
73906+config GRKERNSEC_CUSTOM
73907+ bool "Custom"
73908+ help
73909+ If you say Y here, you will be able to configure every grsecurity
73910+ option, which allows you to enable many more features that aren't
73911+ covered in the basic security levels. These additional features
73912+ include TPE, socket restrictions, and the sysctl system for
73913+ grsecurity. It is advised that you read through the help for
73914+ each option to determine its usefulness in your situation.
73915+
73916+endchoice
73917+
73918+menu "Memory Protections"
73919+depends on GRKERNSEC
73920+
73921+config GRKERNSEC_KMEM
73922+ bool "Deny reading/writing to /dev/kmem, /dev/mem, and /dev/port"
73923+ select STRICT_DEVMEM if (X86 || ARM || TILE || S390)
73924+ help
73925+ If you say Y here, /dev/kmem and /dev/mem won't be allowed to
73926+ be written to or read from to modify or leak the contents of the running
73927+ kernel. /dev/port will also not be allowed to be opened. If you have module
73928+ support disabled, enabling this will close up four ways that are
73929+ currently used to insert malicious code into the running kernel.
73930+ Even with all these features enabled, we still highly recommend that
73931+ you use the RBAC system, as it is still possible for an attacker to
73932+ modify the running kernel through privileged I/O granted by ioperm/iopl.
73933+ If you are not using XFree86, you may be able to stop this additional
73934+ case by enabling the 'Disable privileged I/O' option. Though nothing
73935+ legitimately writes to /dev/kmem, XFree86 does need to write to /dev/mem,
73936+ but only to video memory, which is the only writing we allow in this
73937+ case. If /dev/kmem or /dev/mem are mmaped without PROT_WRITE, they will
73938+ not be allowed to mprotect it with PROT_WRITE later.
73939+ It is highly recommended that you say Y here if you meet all the
73940+ conditions above.
73941+
73942+config GRKERNSEC_VM86
73943+ bool "Restrict VM86 mode"
73944+ depends on X86_32
73945+
73946+ help
73947+ If you say Y here, only processes with CAP_SYS_RAWIO will be able to
73948+ make use of a special execution mode on 32bit x86 processors called
73949+ Virtual 8086 (VM86) mode. XFree86 may need vm86 mode for certain
73950+ video cards and will still work with this option enabled. The purpose
73951+ of the option is to prevent exploitation of emulation errors in
73952+ virtualization of vm86 mode like the one discovered in VMWare in 2009.
73953+ Nearly all users should be able to enable this option.
73954+
73955+config GRKERNSEC_IO
73956+ bool "Disable privileged I/O"
73957+ depends on X86
73958+ select RTC_CLASS
73959+ select RTC_INTF_DEV
73960+ select RTC_DRV_CMOS
73961+
73962+ help
73963+ If you say Y here, all ioperm and iopl calls will return an error.
73964+ Ioperm and iopl can be used to modify the running kernel.
73965+ Unfortunately, some programs need this access to operate properly,
73966+ the most notable of which are XFree86 and hwclock. hwclock can be
73967+ remedied by having RTC support in the kernel, so real-time
73968+ clock support is enabled if this option is enabled, to ensure
73969+ that hwclock operates correctly. XFree86 still will not
73970+ operate correctly with this option enabled, so DO NOT CHOOSE Y
73971+ IF YOU USE XFree86. If you use XFree86 and you still want to
73972+ protect your kernel against modification, use the RBAC system.
73973+
73974+config GRKERNSEC_PROC_MEMMAP
73975+ bool "Harden ASLR against information leaks and entropy reduction"
73976+ default y if (PAX_NOEXEC || PAX_ASLR)
73977+ depends on PAX_NOEXEC || PAX_ASLR
73978+ help
73979+ If you say Y here, the /proc/<pid>/maps and /proc/<pid>/stat files will
73980+ give no information about the addresses of its mappings if
73981+ PaX features that rely on random addresses are enabled on the task.
73982+ In addition to sanitizing this information and disabling other
73983+ dangerous sources of information, this option causes reads of sensitive
73984+ /proc/<pid> entries where the file descriptor was opened in a different
73985+ task than the one performing the read. Such attempts are logged.
73986+ This option also limits argv/env strings for suid/sgid binaries
73987+ to 512KB to prevent a complete exhaustion of the stack entropy provided
73988+ by ASLR. Finally, it places an 8MB stack resource limit on suid/sgid
73989+ binaries to prevent alternative mmap layouts from being abused.
73990+
73991+ If you use PaX it is essential that you say Y here as it closes up
73992+ several holes that make full ASLR useless locally.
73993+
73994+config GRKERNSEC_BRUTE
73995+ bool "Deter exploit bruteforcing"
73996+ help
73997+ If you say Y here, attempts to bruteforce exploits against forking
73998+ daemons such as apache or sshd, as well as against suid/sgid binaries
73999+ will be deterred. When a child of a forking daemon is killed by PaX
74000+ or crashes due to an illegal instruction or other suspicious signal,
74001+ the parent process will be delayed 30 seconds upon every subsequent
74002+ fork until the administrator is able to assess the situation and
74003+ restart the daemon.
74004+ In the suid/sgid case, the attempt is logged, the user has all their
74005+ processes terminated, and they are prevented from executing any further
74006+ processes for 15 minutes.
74007+ It is recommended that you also enable signal logging in the auditing
74008+ section so that logs are generated when a process triggers a suspicious
74009+ signal.
74010+ If the sysctl option is enabled, a sysctl option with name
74011+ "deter_bruteforce" is created.
74012+
74013+config GRKERNSEC_MODHARDEN
74014+ bool "Harden module auto-loading"
74015+ depends on MODULES
74016+ help
74017+ If you say Y here, module auto-loading in response to use of some
74018+ feature implemented by an unloaded module will be restricted to
74019+ root users. Enabling this option helps defend against attacks
74020+ by unprivileged users who abuse the auto-loading behavior to
74021+ cause a vulnerable module to load that is then exploited.
74022+
74023+ If this option prevents a legitimate use of auto-loading for a
74024+ non-root user, the administrator can execute modprobe manually
74025+ with the exact name of the module mentioned in the alert log.
74026+ Alternatively, the administrator can add the module to the list
74027+ of modules loaded at boot by modifying init scripts.
74028+
74029+ Modification of init scripts will most likely be needed on
74030+ Ubuntu servers with encrypted home directory support enabled,
74031+ as the first non-root user logging in will cause the ecb(aes),
74032+ ecb(aes)-all, cbc(aes), and cbc(aes)-all modules to be loaded.
74033+
74034+config GRKERNSEC_HIDESYM
74035+ bool "Hide kernel symbols"
74036+ help
74037+ If you say Y here, getting information on loaded modules, and
74038+ displaying all kernel symbols through a syscall will be restricted
74039+ to users with CAP_SYS_MODULE. For software compatibility reasons,
74040+ /proc/kallsyms will be restricted to the root user. The RBAC
74041+ system can hide that entry even from root.
74042+
74043+ This option also prevents leaking of kernel addresses through
74044+ several /proc entries.
74045+
74046+ Note that this option is only effective provided the following
74047+ conditions are met:
74048+ 1) The kernel using grsecurity is not precompiled by some distribution
74049+ 2) You have also enabled GRKERNSEC_DMESG
74050+ 3) You are using the RBAC system and hiding other files such as your
74051+ kernel image and System.map. Alternatively, enabling this option
74052+ causes the permissions on /boot, /lib/modules, and the kernel
74053+ source directory to change at compile time to prevent
74054+ reading by non-root users.
74055+ If the above conditions are met, this option will aid in providing a
74056+ useful protection against local kernel exploitation of overflows
74057+ and arbitrary read/write vulnerabilities.
74058+
74059+config GRKERNSEC_KERN_LOCKOUT
74060+ bool "Active kernel exploit response"
74061+ depends on X86 || ARM || PPC || SPARC
74062+ help
74063+ If you say Y here, when a PaX alert is triggered due to suspicious
74064+ activity in the kernel (from KERNEXEC/UDEREF/USERCOPY)
74065+ or an OOPs occurs due to bad memory accesses, instead of just
74066+ terminating the offending process (and potentially allowing
74067+ a subsequent exploit from the same user), we will take one of two
74068+ actions:
74069+ If the user was root, we will panic the system
74070+ If the user was non-root, we will log the attempt, terminate
74071+ all processes owned by the user, then prevent them from creating
74072+ any new processes until the system is restarted
74073+ This deters repeated kernel exploitation/bruteforcing attempts
74074+ and is useful for later forensics.
74075+
74076+endmenu
74077+menu "Role Based Access Control Options"
74078+depends on GRKERNSEC
74079+
74080+config GRKERNSEC_RBAC_DEBUG
74081+ bool
74082+
74083+config GRKERNSEC_NO_RBAC
74084+ bool "Disable RBAC system"
74085+ help
74086+ If you say Y here, the /dev/grsec device will be removed from the kernel,
74087+ preventing the RBAC system from being enabled. You should only say Y
74088+ here if you have no intention of using the RBAC system, so as to prevent
74089+ an attacker with root access from misusing the RBAC system to hide files
74090+ and processes when loadable module support and /dev/[k]mem have been
74091+ locked down.
74092+
74093+config GRKERNSEC_ACL_HIDEKERN
74094+ bool "Hide kernel processes"
74095+ help
74096+ If you say Y here, all kernel threads will be hidden to all
74097+ processes but those whose subject has the "view hidden processes"
74098+ flag.
74099+
74100+config GRKERNSEC_ACL_MAXTRIES
74101+ int "Maximum tries before password lockout"
74102+ default 3
74103+ help
74104+ This option enforces the maximum number of times a user can attempt
74105+ to authorize themselves with the grsecurity RBAC system before being
74106+ denied the ability to attempt authorization again for a specified time.
74107+ The lower the number, the harder it will be to brute-force a password.
74108+
74109+config GRKERNSEC_ACL_TIMEOUT
74110+ int "Time to wait after max password tries, in seconds"
74111+ default 30
74112+ help
74113+ This option specifies the time the user must wait after attempting to
74114+ authorize to the RBAC system with the maximum number of invalid
74115+ passwords. The higher the number, the harder it will be to brute-force
74116+ a password.
74117+
74118+endmenu
74119+menu "Filesystem Protections"
74120+depends on GRKERNSEC
74121+
74122+config GRKERNSEC_PROC
74123+ bool "Proc restrictions"
74124+ help
74125+ If you say Y here, the permissions of the /proc filesystem
74126+ will be altered to enhance system security and privacy. You MUST
74127+ choose either a user only restriction or a user and group restriction.
74128+ Depending upon the option you choose, you can either restrict users to
74129+ see only the processes they themselves run, or choose a group that can
74130+ view all processes and files normally restricted to root if you choose
74131+ the "restrict to user only" option. NOTE: If you're running identd or
74132+ ntpd as a non-root user, you will have to run it as the group you
74133+ specify here.
74134+
74135+config GRKERNSEC_PROC_USER
74136+ bool "Restrict /proc to user only"
74137+ depends on GRKERNSEC_PROC
74138+ help
74139+ If you say Y here, non-root users will only be able to view their own
74140+ processes, and restricts them from viewing network-related information,
74141+ and viewing kernel symbol and module information.
74142+
74143+config GRKERNSEC_PROC_USERGROUP
74144+ bool "Allow special group"
74145+ depends on GRKERNSEC_PROC && !GRKERNSEC_PROC_USER
74146+ help
74147+ If you say Y here, you will be able to select a group that will be
74148+ able to view all processes and network-related information. If you've
74149+ enabled GRKERNSEC_HIDESYM, kernel and symbol information may still
74150+ remain hidden. This option is useful if you want to run identd as
74151+ a non-root user.
74152+
74153+config GRKERNSEC_PROC_GID
74154+ int "GID for special group"
74155+ depends on GRKERNSEC_PROC_USERGROUP
74156+ default 1001
74157+
74158+config GRKERNSEC_PROC_ADD
74159+ bool "Additional restrictions"
74160+ depends on GRKERNSEC_PROC_USER || GRKERNSEC_PROC_USERGROUP
74161+ help
74162+ If you say Y here, additional restrictions will be placed on
74163+ /proc that keep normal users from viewing device information and
74164+ slabinfo information that could be useful for exploits.
74165+
74166+config GRKERNSEC_LINK
74167+ bool "Linking restrictions"
74168+ help
74169+ If you say Y here, /tmp race exploits will be prevented, since users
74170+ will no longer be able to follow symlinks owned by other users in
74171+ world-writable +t directories (e.g. /tmp), unless the owner of the
74172+ symlink is the owner of the directory. users will also not be
74173+ able to hardlink to files they do not own. If the sysctl option is
74174+ enabled, a sysctl option with name "linking_restrictions" is created.
74175+
74176+config GRKERNSEC_FIFO
74177+ bool "FIFO restrictions"
74178+ help
74179+ If you say Y here, users will not be able to write to FIFOs they don't
74180+ own in world-writable +t directories (e.g. /tmp), unless the owner of
74181+ the FIFO is the same owner of the directory it's held in. If the sysctl
74182+ option is enabled, a sysctl option with name "fifo_restrictions" is
74183+ created.
74184+
74185+config GRKERNSEC_SYSFS_RESTRICT
74186+ bool "Sysfs/debugfs restriction"
74187+ depends on SYSFS
74188+ help
74189+ If you say Y here, sysfs (the pseudo-filesystem mounted at /sys) and
74190+ any filesystem normally mounted under it (e.g. debugfs) will be
74191+ mostly accessible only by root. These filesystems generally provide access
74192+ to hardware and debug information that isn't appropriate for unprivileged
74193+ users of the system. Sysfs and debugfs have also become a large source
74194+ of new vulnerabilities, ranging from infoleaks to local compromise.
74195+ There has been very little oversight with an eye toward security involved
74196+ in adding new exporters of information to these filesystems, so their
74197+ use is discouraged.
74198+ For reasons of compatibility, a few directories have been whitelisted
74199+ for access by non-root users:
74200+ /sys/fs/selinux
74201+ /sys/fs/fuse
74202+ /sys/devices/system/cpu
74203+
74204+config GRKERNSEC_ROFS
74205+ bool "Runtime read-only mount protection"
74206+ help
74207+ If you say Y here, a sysctl option with name "romount_protect" will
74208+ be created. By setting this option to 1 at runtime, filesystems
74209+ will be protected in the following ways:
74210+ * No new writable mounts will be allowed
74211+ * Existing read-only mounts won't be able to be remounted read/write
74212+ * Write operations will be denied on all block devices
74213+ This option acts independently of grsec_lock: once it is set to 1,
74214+ it cannot be turned off. Therefore, please be mindful of the resulting
74215+ behavior if this option is enabled in an init script on a read-only
74216+ filesystem. This feature is mainly intended for secure embedded systems.
74217+
74218+config GRKERNSEC_CHROOT
74219+ bool "Chroot jail restrictions"
74220+ help
74221+ If you say Y here, you will be able to choose several options that will
74222+ make breaking out of a chrooted jail much more difficult. If you
74223+ encounter no software incompatibilities with the following options, it
74224+ is recommended that you enable each one.
74225+
74226+config GRKERNSEC_CHROOT_MOUNT
74227+ bool "Deny mounts"
74228+ depends on GRKERNSEC_CHROOT
74229+ help
74230+ If you say Y here, processes inside a chroot will not be able to
74231+ mount or remount filesystems. If the sysctl option is enabled, a
74232+ sysctl option with name "chroot_deny_mount" is created.
74233+
74234+config GRKERNSEC_CHROOT_DOUBLE
74235+ bool "Deny double-chroots"
74236+ depends on GRKERNSEC_CHROOT
74237+ help
74238+ If you say Y here, processes inside a chroot will not be able to chroot
74239+ again outside the chroot. This is a widely used method of breaking
74240+ out of a chroot jail and should not be allowed. If the sysctl
74241+ option is enabled, a sysctl option with name
74242+ "chroot_deny_chroot" is created.
74243+
74244+config GRKERNSEC_CHROOT_PIVOT
74245+ bool "Deny pivot_root in chroot"
74246+ depends on GRKERNSEC_CHROOT
74247+ help
74248+ If you say Y here, processes inside a chroot will not be able to use
74249+ a function called pivot_root() that was introduced in Linux 2.3.41. It
74250+ works similar to chroot in that it changes the root filesystem. This
74251+ function could be misused in a chrooted process to attempt to break out
74252+ of the chroot, and therefore should not be allowed. If the sysctl
74253+ option is enabled, a sysctl option with name "chroot_deny_pivot" is
74254+ created.
74255+
74256+config GRKERNSEC_CHROOT_CHDIR
74257+ bool "Enforce chdir(\"/\") on all chroots"
74258+ depends on GRKERNSEC_CHROOT
74259+ help
74260+ If you say Y here, the current working directory of all newly-chrooted
74261+ applications will be set to the the root directory of the chroot.
74262+ The man page on chroot(2) states:
74263+ Note that this call does not change the current working
74264+ directory, so that `.' can be outside the tree rooted at
74265+ `/'. In particular, the super-user can escape from a
74266+ `chroot jail' by doing `mkdir foo; chroot foo; cd ..'.
74267+
74268+ It is recommended that you say Y here, since it's not known to break
74269+ any software. If the sysctl option is enabled, a sysctl option with
74270+ name "chroot_enforce_chdir" is created.
74271+
74272+config GRKERNSEC_CHROOT_CHMOD
74273+ bool "Deny (f)chmod +s"
74274+ depends on GRKERNSEC_CHROOT
74275+ help
74276+ If you say Y here, processes inside a chroot will not be able to chmod
74277+ or fchmod files to make them have suid or sgid bits. This protects
74278+ against another published method of breaking a chroot. If the sysctl
74279+ option is enabled, a sysctl option with name "chroot_deny_chmod" is
74280+ created.
74281+
74282+config GRKERNSEC_CHROOT_FCHDIR
74283+ bool "Deny fchdir out of chroot"
74284+ depends on GRKERNSEC_CHROOT
74285+ help
74286+ If you say Y here, a well-known method of breaking chroots by fchdir'ing
74287+ to a file descriptor of the chrooting process that points to a directory
74288+ outside the filesystem will be stopped. If the sysctl option
74289+ is enabled, a sysctl option with name "chroot_deny_fchdir" is created.
74290+
74291+config GRKERNSEC_CHROOT_MKNOD
74292+ bool "Deny mknod"
74293+ depends on GRKERNSEC_CHROOT
74294+ help
74295+ If you say Y here, processes inside a chroot will not be allowed to
74296+ mknod. The problem with using mknod inside a chroot is that it
74297+ would allow an attacker to create a device entry that is the same
74298+ as one on the physical root of your system, which could range from
74299+ anything from the console device to a device for your harddrive (which
74300+ they could then use to wipe the drive or steal data). It is recommended
74301+ that you say Y here, unless you run into software incompatibilities.
74302+ If the sysctl option is enabled, a sysctl option with name
74303+ "chroot_deny_mknod" is created.
74304+
74305+config GRKERNSEC_CHROOT_SHMAT
74306+ bool "Deny shmat() out of chroot"
74307+ depends on GRKERNSEC_CHROOT
74308+ help
74309+ If you say Y here, processes inside a chroot will not be able to attach
74310+ to shared memory segments that were created outside of the chroot jail.
74311+ It is recommended that you say Y here. If the sysctl option is enabled,
74312+ a sysctl option with name "chroot_deny_shmat" is created.
74313+
74314+config GRKERNSEC_CHROOT_UNIX
74315+ bool "Deny access to abstract AF_UNIX sockets out of chroot"
74316+ depends on GRKERNSEC_CHROOT
74317+ help
74318+ If you say Y here, processes inside a chroot will not be able to
74319+ connect to abstract (meaning not belonging to a filesystem) Unix
74320+ domain sockets that were bound outside of a chroot. It is recommended
74321+ that you say Y here. If the sysctl option is enabled, a sysctl option
74322+ with name "chroot_deny_unix" is created.
74323+
74324+config GRKERNSEC_CHROOT_FINDTASK
74325+ bool "Protect outside processes"
74326+ depends on GRKERNSEC_CHROOT
74327+ help
74328+ If you say Y here, processes inside a chroot will not be able to
74329+ kill, send signals with fcntl, ptrace, capget, getpgid, setpgid,
74330+ getsid, or view any process outside of the chroot. If the sysctl
74331+ option is enabled, a sysctl option with name "chroot_findtask" is
74332+ created.
74333+
74334+config GRKERNSEC_CHROOT_NICE
74335+ bool "Restrict priority changes"
74336+ depends on GRKERNSEC_CHROOT
74337+ help
74338+ If you say Y here, processes inside a chroot will not be able to raise
74339+ the priority of processes in the chroot, or alter the priority of
74340+ processes outside the chroot. This provides more security than simply
74341+ removing CAP_SYS_NICE from the process' capability set. If the
74342+ sysctl option is enabled, a sysctl option with name "chroot_restrict_nice"
74343+ is created.
74344+
74345+config GRKERNSEC_CHROOT_SYSCTL
74346+ bool "Deny sysctl writes"
74347+ depends on GRKERNSEC_CHROOT
74348+ help
74349+ If you say Y here, an attacker in a chroot will not be able to
74350+ write to sysctl entries, either by sysctl(2) or through a /proc
74351+ interface. It is strongly recommended that you say Y here. If the
74352+ sysctl option is enabled, a sysctl option with name
74353+ "chroot_deny_sysctl" is created.
74354+
74355+config GRKERNSEC_CHROOT_CAPS
74356+ bool "Capability restrictions"
74357+ depends on GRKERNSEC_CHROOT
74358+ help
74359+ If you say Y here, the capabilities on all processes within a
74360+ chroot jail will be lowered to stop module insertion, raw i/o,
74361+ system and net admin tasks, rebooting the system, modifying immutable
74362+ files, modifying IPC owned by another, and changing the system time.
74363+ This is left an option because it can break some apps. Disable this
74364+ if your chrooted apps are having problems performing those kinds of
74365+ tasks. If the sysctl option is enabled, a sysctl option with
74366+ name "chroot_caps" is created.
74367+
74368+endmenu
74369+menu "Kernel Auditing"
74370+depends on GRKERNSEC
74371+
74372+config GRKERNSEC_AUDIT_GROUP
74373+ bool "Single group for auditing"
74374+ help
74375+ If you say Y here, the exec, chdir, and (un)mount logging features
74376+ will only operate on a group you specify. This option is recommended
74377+ if you only want to watch certain users instead of having a large
74378+ amount of logs from the entire system. If the sysctl option is enabled,
74379+ a sysctl option with name "audit_group" is created.
74380+
74381+config GRKERNSEC_AUDIT_GID
74382+ int "GID for auditing"
74383+ depends on GRKERNSEC_AUDIT_GROUP
74384+ default 1007
74385+
74386+config GRKERNSEC_EXECLOG
74387+ bool "Exec logging"
74388+ help
74389+ If you say Y here, all execve() calls will be logged (since the
74390+ other exec*() calls are frontends to execve(), all execution
74391+ will be logged). Useful for shell-servers that like to keep track
74392+ of their users. If the sysctl option is enabled, a sysctl option with
74393+ name "exec_logging" is created.
74394+ WARNING: This option when enabled will produce a LOT of logs, especially
74395+ on an active system.
74396+
74397+config GRKERNSEC_RESLOG
74398+ bool "Resource logging"
74399+ help
74400+ If you say Y here, all attempts to overstep resource limits will
74401+ be logged with the resource name, the requested size, and the current
74402+ limit. It is highly recommended that you say Y here. If the sysctl
74403+ option is enabled, a sysctl option with name "resource_logging" is
74404+ created. If the RBAC system is enabled, the sysctl value is ignored.
74405+
74406+config GRKERNSEC_CHROOT_EXECLOG
74407+ bool "Log execs within chroot"
74408+ help
74409+ If you say Y here, all executions inside a chroot jail will be logged
74410+ to syslog. This can cause a large amount of logs if certain
74411+ applications (eg. djb's daemontools) are installed on the system, and
74412+ is therefore left as an option. If the sysctl option is enabled, a
74413+ sysctl option with name "chroot_execlog" is created.
74414+
74415+config GRKERNSEC_AUDIT_PTRACE
74416+ bool "Ptrace logging"
74417+ help
74418+ If you say Y here, all attempts to attach to a process via ptrace
74419+ will be logged. If the sysctl option is enabled, a sysctl option
74420+ with name "audit_ptrace" is created.
74421+
74422+config GRKERNSEC_AUDIT_CHDIR
74423+ bool "Chdir logging"
74424+ help
74425+ If you say Y here, all chdir() calls will be logged. If the sysctl
74426+ option is enabled, a sysctl option with name "audit_chdir" is created.
74427+
74428+config GRKERNSEC_AUDIT_MOUNT
74429+ bool "(Un)Mount logging"
74430+ help
74431+ If you say Y here, all mounts and unmounts will be logged. If the
74432+ sysctl option is enabled, a sysctl option with name "audit_mount" is
74433+ created.
74434+
74435+config GRKERNSEC_SIGNAL
74436+ bool "Signal logging"
74437+ help
74438+ If you say Y here, certain important signals will be logged, such as
74439+ SIGSEGV, which will as a result inform you of when a error in a program
74440+ occurred, which in some cases could mean a possible exploit attempt.
74441+ If the sysctl option is enabled, a sysctl option with name
74442+ "signal_logging" is created.
74443+
74444+config GRKERNSEC_FORKFAIL
74445+ bool "Fork failure logging"
74446+ help
74447+ If you say Y here, all failed fork() attempts will be logged.
74448+ This could suggest a fork bomb, or someone attempting to overstep
74449+ their process limit. If the sysctl option is enabled, a sysctl option
74450+ with name "forkfail_logging" is created.
74451+
74452+config GRKERNSEC_TIME
74453+ bool "Time change logging"
74454+ help
74455+ If you say Y here, any changes of the system clock will be logged.
74456+ If the sysctl option is enabled, a sysctl option with name
74457+ "timechange_logging" is created.
74458+
74459+config GRKERNSEC_PROC_IPADDR
74460+ bool "/proc/<pid>/ipaddr support"
74461+ help
74462+ If you say Y here, a new entry will be added to each /proc/<pid>
74463+ directory that contains the IP address of the person using the task.
74464+ The IP is carried across local TCP and AF_UNIX stream sockets.
74465+ This information can be useful for IDS/IPSes to perform remote response
74466+ to a local attack. The entry is readable by only the owner of the
74467+ process (and root if he has CAP_DAC_OVERRIDE, which can be removed via
74468+ the RBAC system), and thus does not create privacy concerns.
74469+
74470+config GRKERNSEC_RWXMAP_LOG
74471+ bool 'Denied RWX mmap/mprotect logging'
74472+ depends on PAX_MPROTECT && !PAX_EMUPLT && !PAX_EMUSIGRT
74473+ help
74474+ If you say Y here, calls to mmap() and mprotect() with explicit
74475+ usage of PROT_WRITE and PROT_EXEC together will be logged when
74476+ denied by the PAX_MPROTECT feature. If the sysctl option is
74477+ enabled, a sysctl option with name "rwxmap_logging" is created.
74478+
74479+config GRKERNSEC_AUDIT_TEXTREL
74480+ bool 'ELF text relocations logging (READ HELP)'
74481+ depends on PAX_MPROTECT
74482+ help
74483+ If you say Y here, text relocations will be logged with the filename
74484+ of the offending library or binary. The purpose of the feature is
74485+ to help Linux distribution developers get rid of libraries and
74486+ binaries that need text relocations which hinder the future progress
74487+ of PaX. Only Linux distribution developers should say Y here, and
74488+ never on a production machine, as this option creates an information
74489+ leak that could aid an attacker in defeating the randomization of
74490+ a single memory region. If the sysctl option is enabled, a sysctl
74491+ option with name "audit_textrel" is created.
74492+
74493+endmenu
74494+
74495+menu "Executable Protections"
74496+depends on GRKERNSEC
74497+
74498+config GRKERNSEC_DMESG
74499+ bool "Dmesg(8) restriction"
74500+ help
74501+ If you say Y here, non-root users will not be able to use dmesg(8)
74502+ to view up to the last 4kb of messages in the kernel's log buffer.
74503+ The kernel's log buffer often contains kernel addresses and other
74504+ identifying information useful to an attacker in fingerprinting a
74505+ system for a targeted exploit.
74506+ If the sysctl option is enabled, a sysctl option with name "dmesg" is
74507+ created.
74508+
74509+config GRKERNSEC_HARDEN_PTRACE
74510+ bool "Deter ptrace-based process snooping"
74511+ help
74512+ If you say Y here, TTY sniffers and other malicious monitoring
74513+ programs implemented through ptrace will be defeated. If you
74514+ have been using the RBAC system, this option has already been
74515+ enabled for several years for all users, with the ability to make
74516+ fine-grained exceptions.
74517+
74518+ This option only affects the ability of non-root users to ptrace
74519+ processes that are not a descendent of the ptracing process.
74520+ This means that strace ./binary and gdb ./binary will still work,
74521+ but attaching to arbitrary processes will not. If the sysctl
74522+ option is enabled, a sysctl option with name "harden_ptrace" is
74523+ created.
74524+
74525+config GRKERNSEC_PTRACE_READEXEC
74526+ bool "Require read access to ptrace sensitive binaries"
74527+ help
74528+ If you say Y here, unprivileged users will not be able to ptrace unreadable
74529+ binaries. This option is useful in environments that
74530+ remove the read bits (e.g. file mode 4711) from suid binaries to
74531+ prevent infoleaking of their contents. This option adds
74532+ consistency to the use of that file mode, as the binary could normally
74533+ be read out when run without privileges while ptracing.
74534+
74535+ If the sysctl option is enabled, a sysctl option with name "ptrace_readexec"
74536+ is created.
74537+
74538+config GRKERNSEC_SETXID
74539+ bool "Enforce consistent multithreaded privileges"
74540+ help
74541+ If you say Y here, a change from a root uid to a non-root uid
74542+ in a multithreaded application will cause the resulting uids,
74543+ gids, supplementary groups, and capabilities in that thread
74544+ to be propagated to the other threads of the process. In most
74545+ cases this is unnecessary, as glibc will emulate this behavior
74546+ on behalf of the application. Other libcs do not act in the
74547+ same way, allowing the other threads of the process to continue
74548+ running with root privileges. If the sysctl option is enabled,
74549+ a sysctl option with name "consistent_setxid" is created.
74550+
74551+config GRKERNSEC_TPE
74552+ bool "Trusted Path Execution (TPE)"
74553+ help
74554+ If you say Y here, you will be able to choose a gid to add to the
74555+ supplementary groups of users you want to mark as "untrusted."
74556+ These users will not be able to execute any files that are not in
74557+ root-owned directories writable only by root. If the sysctl option
74558+ is enabled, a sysctl option with name "tpe" is created.
74559+
74560+config GRKERNSEC_TPE_ALL
74561+ bool "Partially restrict all non-root users"
74562+ depends on GRKERNSEC_TPE
74563+ help
74564+ If you say Y here, all non-root users will be covered under
74565+ a weaker TPE restriction. This is separate from, and in addition to,
74566+ the main TPE options that you have selected elsewhere. Thus, if a
74567+ "trusted" GID is chosen, this restriction applies to even that GID.
74568+ Under this restriction, all non-root users will only be allowed to
74569+ execute files in directories they own that are not group or
74570+ world-writable, or in directories owned by root and writable only by
74571+ root. If the sysctl option is enabled, a sysctl option with name
74572+ "tpe_restrict_all" is created.
74573+
74574+config GRKERNSEC_TPE_INVERT
74575+ bool "Invert GID option"
74576+ depends on GRKERNSEC_TPE
74577+ help
74578+ If you say Y here, the group you specify in the TPE configuration will
74579+ decide what group TPE restrictions will be *disabled* for. This
74580+ option is useful if you want TPE restrictions to be applied to most
74581+ users on the system. If the sysctl option is enabled, a sysctl option
74582+ with name "tpe_invert" is created. Unlike other sysctl options, this
74583+ entry will default to on for backward-compatibility.
74584+
74585+config GRKERNSEC_TPE_GID
74586+ int "GID for untrusted users"
74587+ depends on GRKERNSEC_TPE && !GRKERNSEC_TPE_INVERT
74588+ default 1005
74589+ help
74590+ Setting this GID determines what group TPE restrictions will be
74591+ *enabled* for. If the sysctl option is enabled, a sysctl option
74592+ with name "tpe_gid" is created.
74593+
74594+config GRKERNSEC_TPE_GID
74595+ int "GID for trusted users"
74596+ depends on GRKERNSEC_TPE && GRKERNSEC_TPE_INVERT
74597+ default 1005
74598+ help
74599+ Setting this GID determines what group TPE restrictions will be
74600+ *disabled* for. If the sysctl option is enabled, a sysctl option
74601+ with name "tpe_gid" is created.
74602+
74603+endmenu
74604+menu "Network Protections"
74605+depends on GRKERNSEC
74606+
74607+config GRKERNSEC_RANDNET
74608+ bool "Larger entropy pools"
74609+ help
74610+ If you say Y here, the entropy pools used for many features of Linux
74611+ and grsecurity will be doubled in size. Since several grsecurity
74612+ features use additional randomness, it is recommended that you say Y
74613+ here. Saying Y here has a similar effect as modifying
74614+ /proc/sys/kernel/random/poolsize.
74615+
74616+config GRKERNSEC_BLACKHOLE
74617+ bool "TCP/UDP blackhole and LAST_ACK DoS prevention"
74618+ depends on NET
74619+ help
74620+ If you say Y here, neither TCP resets nor ICMP
74621+ destination-unreachable packets will be sent in response to packets
74622+ sent to ports for which no associated listening process exists.
74623+ This feature supports both IPV4 and IPV6 and exempts the
74624+ loopback interface from blackholing. Enabling this feature
74625+ makes a host more resilient to DoS attacks and reduces network
74626+ visibility against scanners.
74627+
74628+ The blackhole feature as-implemented is equivalent to the FreeBSD
74629+ blackhole feature, as it prevents RST responses to all packets, not
74630+ just SYNs. Under most application behavior this causes no
74631+ problems, but applications (like haproxy) may not close certain
74632+ connections in a way that cleanly terminates them on the remote
74633+ end, leaving the remote host in LAST_ACK state. Because of this
74634+ side-effect and to prevent intentional LAST_ACK DoSes, this
74635+ feature also adds automatic mitigation against such attacks.
74636+ The mitigation drastically reduces the amount of time a socket
74637+ can spend in LAST_ACK state. If you're using haproxy and not
74638+ all servers it connects to have this option enabled, consider
74639+ disabling this feature on the haproxy host.
74640+
74641+ If the sysctl option is enabled, two sysctl options with names
74642+ "ip_blackhole" and "lastack_retries" will be created.
74643+ While "ip_blackhole" takes the standard zero/non-zero on/off
74644+ toggle, "lastack_retries" uses the same kinds of values as
74645+ "tcp_retries1" and "tcp_retries2". The default value of 4
74646+ prevents a socket from lasting more than 45 seconds in LAST_ACK
74647+ state.
74648+
74649+config GRKERNSEC_SOCKET
74650+ bool "Socket restrictions"
74651+ depends on NET
74652+ help
74653+ If you say Y here, you will be able to choose from several options.
74654+ If you assign a GID on your system and add it to the supplementary
74655+ groups of users you want to restrict socket access to, this patch
74656+ will perform up to three things, based on the option(s) you choose.
74657+
74658+config GRKERNSEC_SOCKET_ALL
74659+ bool "Deny any sockets to group"
74660+ depends on GRKERNSEC_SOCKET
74661+ help
74662+ If you say Y here, you will be able to choose a GID of whose users will
74663+ be unable to connect to other hosts from your machine or run server
74664+ applications from your machine. If the sysctl option is enabled, a
74665+ sysctl option with name "socket_all" is created.
74666+
74667+config GRKERNSEC_SOCKET_ALL_GID
74668+ int "GID to deny all sockets for"
74669+ depends on GRKERNSEC_SOCKET_ALL
74670+ default 1004
74671+ help
74672+ Here you can choose the GID to disable socket access for. Remember to
74673+ add the users you want socket access disabled for to the GID
74674+ specified here. If the sysctl option is enabled, a sysctl option
74675+ with name "socket_all_gid" is created.
74676+
74677+config GRKERNSEC_SOCKET_CLIENT
74678+ bool "Deny client sockets to group"
74679+ depends on GRKERNSEC_SOCKET
74680+ help
74681+ If you say Y here, you will be able to choose a GID of whose users will
74682+ be unable to connect to other hosts from your machine, but will be
74683+ able to run servers. If this option is enabled, all users in the group
74684+ you specify will have to use passive mode when initiating ftp transfers
74685+ from the shell on your machine. If the sysctl option is enabled, a
74686+ sysctl option with name "socket_client" is created.
74687+
74688+config GRKERNSEC_SOCKET_CLIENT_GID
74689+ int "GID to deny client sockets for"
74690+ depends on GRKERNSEC_SOCKET_CLIENT
74691+ default 1003
74692+ help
74693+ Here you can choose the GID to disable client socket access for.
74694+ Remember to add the users you want client socket access disabled for to
74695+ the GID specified here. If the sysctl option is enabled, a sysctl
74696+ option with name "socket_client_gid" is created.
74697+
74698+config GRKERNSEC_SOCKET_SERVER
74699+ bool "Deny server sockets to group"
74700+ depends on GRKERNSEC_SOCKET
74701+ help
74702+ If you say Y here, you will be able to choose a GID of whose users will
74703+ be unable to run server applications from your machine. If the sysctl
74704+ option is enabled, a sysctl option with name "socket_server" is created.
74705+
74706+config GRKERNSEC_SOCKET_SERVER_GID
74707+ int "GID to deny server sockets for"
74708+ depends on GRKERNSEC_SOCKET_SERVER
74709+ default 1002
74710+ help
74711+ Here you can choose the GID to disable server socket access for.
74712+ Remember to add the users you want server socket access disabled for to
74713+ the GID specified here. If the sysctl option is enabled, a sysctl
74714+ option with name "socket_server_gid" is created.
74715+
74716+endmenu
74717+menu "Sysctl support"
74718+depends on GRKERNSEC && SYSCTL
74719+
74720+config GRKERNSEC_SYSCTL
74721+ bool "Sysctl support"
74722+ help
74723+ If you say Y here, you will be able to change the options that
74724+ grsecurity runs with at bootup, without having to recompile your
74725+ kernel. You can echo values to files in /proc/sys/kernel/grsecurity
74726+ to enable (1) or disable (0) various features. All the sysctl entries
74727+ are mutable until the "grsec_lock" entry is set to a non-zero value.
74728+ All features enabled in the kernel configuration are disabled at boot
74729+ if you do not say Y to the "Turn on features by default" option.
74730+ All options should be set at startup, and the grsec_lock entry should
74731+ be set to a non-zero value after all the options are set.
74732+ *THIS IS EXTREMELY IMPORTANT*
74733+
74734+config GRKERNSEC_SYSCTL_DISTRO
74735+ bool "Extra sysctl support for distro makers (READ HELP)"
74736+ depends on GRKERNSEC_SYSCTL && GRKERNSEC_IO
74737+ help
74738+ If you say Y here, additional sysctl options will be created
74739+ for features that affect processes running as root. Therefore,
74740+ it is critical when using this option that the grsec_lock entry be
74741+ enabled after boot. Only distros with prebuilt kernel packages
74742+ with this option enabled that can ensure grsec_lock is enabled
74743+ after boot should use this option.
74744+ *Failure to set grsec_lock after boot makes all grsec features
74745+ this option covers useless*
74746+
74747+ Currently this option creates the following sysctl entries:
74748+ "Disable Privileged I/O": "disable_priv_io"
74749+
74750+config GRKERNSEC_SYSCTL_ON
74751+ bool "Turn on features by default"
74752+ depends on GRKERNSEC_SYSCTL
74753+ help
74754+ If you say Y here, instead of having all features enabled in the
74755+ kernel configuration disabled at boot time, the features will be
74756+ enabled at boot time. It is recommended you say Y here unless
74757+ there is some reason you would want all sysctl-tunable features to
74758+ be disabled by default. As mentioned elsewhere, it is important
74759+ to enable the grsec_lock entry once you have finished modifying
74760+ the sysctl entries.
74761+
74762+endmenu
74763+menu "Logging Options"
74764+depends on GRKERNSEC
74765+
74766+config GRKERNSEC_FLOODTIME
74767+ int "Seconds in between log messages (minimum)"
74768+ default 10
74769+ help
74770+ This option allows you to enforce the number of seconds between
74771+ grsecurity log messages. The default should be suitable for most
74772+ people, however, if you choose to change it, choose a value small enough
74773+ to allow informative logs to be produced, but large enough to
74774+ prevent flooding.
74775+
74776+config GRKERNSEC_FLOODBURST
74777+ int "Number of messages in a burst (maximum)"
74778+ default 6
74779+ help
74780+ This option allows you to choose the maximum number of messages allowed
74781+ within the flood time interval you chose in a separate option. The
74782+ default should be suitable for most people, however if you find that
74783+ many of your logs are being interpreted as flooding, you may want to
74784+ raise this value.
74785+
74786+endmenu
74787+
74788+endmenu
74789diff --git a/grsecurity/Makefile b/grsecurity/Makefile
74790new file mode 100644
74791index 0000000..1b9afa9
74792--- /dev/null
74793+++ b/grsecurity/Makefile
74794@@ -0,0 +1,38 @@
74795+# grsecurity's ACL system was originally written in 2001 by Michael Dalton
74796+# during 2001-2009 it has been completely redesigned by Brad Spengler
74797+# into an RBAC system
74798+#
74799+# All code in this directory and various hooks inserted throughout the kernel
74800+# are copyright Brad Spengler - Open Source Security, Inc., and released
74801+# under the GPL v2 or higher
74802+
74803+KBUILD_CFLAGS += -Werror
74804+
74805+obj-y = grsec_chdir.o grsec_chroot.o grsec_exec.o grsec_fifo.o grsec_fork.o \
74806+ grsec_mount.o grsec_sig.o grsec_sysctl.o \
74807+ grsec_time.o grsec_tpe.o grsec_link.o grsec_pax.o grsec_ptrace.o
74808+
74809+obj-$(CONFIG_GRKERNSEC) += grsec_init.o grsum.o gracl.o gracl_segv.o \
74810+ gracl_cap.o gracl_alloc.o gracl_shm.o grsec_mem.o gracl_fs.o \
74811+ gracl_learn.o grsec_log.o
74812+obj-$(CONFIG_GRKERNSEC_RESLOG) += gracl_res.o
74813+
74814+ifdef CONFIG_NET
74815+obj-y += grsec_sock.o
74816+obj-$(CONFIG_GRKERNSEC) += gracl_ip.o
74817+endif
74818+
74819+ifndef CONFIG_GRKERNSEC
74820+obj-y += grsec_disabled.o
74821+endif
74822+
74823+ifdef CONFIG_GRKERNSEC_HIDESYM
74824+extra-y := grsec_hidesym.o
74825+$(obj)/grsec_hidesym.o:
74826+ @-chmod -f 500 /boot
74827+ @-chmod -f 500 /lib/modules
74828+ @-chmod -f 500 /lib64/modules
74829+ @-chmod -f 500 /lib32/modules
74830+ @-chmod -f 700 .
74831+ @echo ' grsec: protected kernel image paths'
74832+endif
74833diff --git a/grsecurity/gracl.c b/grsecurity/gracl.c
74834new file mode 100644
74835index 0000000..d881a39
74836--- /dev/null
74837+++ b/grsecurity/gracl.c
74838@@ -0,0 +1,4156 @@
74839+#include <linux/kernel.h>
74840+#include <linux/module.h>
74841+#include <linux/sched.h>
74842+#include <linux/mm.h>
74843+#include <linux/file.h>
74844+#include <linux/fs.h>
74845+#include <linux/namei.h>
74846+#include <linux/mount.h>
74847+#include <linux/tty.h>
74848+#include <linux/proc_fs.h>
74849+#include <linux/smp_lock.h>
74850+#include <linux/slab.h>
74851+#include <linux/vmalloc.h>
74852+#include <linux/types.h>
74853+#include <linux/sysctl.h>
74854+#include <linux/netdevice.h>
74855+#include <linux/ptrace.h>
74856+#include <linux/gracl.h>
74857+#include <linux/gralloc.h>
74858+#include <linux/security.h>
74859+#include <linux/grinternal.h>
74860+#include <linux/pid_namespace.h>
74861+#include <linux/fdtable.h>
74862+#include <linux/percpu.h>
74863+
74864+#include <asm/uaccess.h>
74865+#include <asm/errno.h>
74866+#include <asm/mman.h>
74867+
74868+static struct acl_role_db acl_role_set;
74869+static struct name_db name_set;
74870+static struct inodev_db inodev_set;
74871+
74872+/* for keeping track of userspace pointers used for subjects, so we
74873+ can share references in the kernel as well
74874+*/
74875+
74876+static struct dentry *real_root;
74877+static struct vfsmount *real_root_mnt;
74878+
74879+static struct acl_subj_map_db subj_map_set;
74880+
74881+static struct acl_role_label *default_role;
74882+
74883+static struct acl_role_label *role_list;
74884+
74885+static u16 acl_sp_role_value;
74886+
74887+extern char *gr_shared_page[4];
74888+static DEFINE_MUTEX(gr_dev_mutex);
74889+DEFINE_RWLOCK(gr_inode_lock);
74890+
74891+struct gr_arg *gr_usermode;
74892+
74893+static unsigned int gr_status __read_only = GR_STATUS_INIT;
74894+
74895+extern int chkpw(struct gr_arg *entry, unsigned char *salt, unsigned char *sum);
74896+extern void gr_clear_learn_entries(void);
74897+
74898+#ifdef CONFIG_GRKERNSEC_RESLOG
74899+extern void gr_log_resource(const struct task_struct *task,
74900+ const int res, const unsigned long wanted, const int gt);
74901+#endif
74902+
74903+unsigned char *gr_system_salt;
74904+unsigned char *gr_system_sum;
74905+
74906+static struct sprole_pw **acl_special_roles = NULL;
74907+static __u16 num_sprole_pws = 0;
74908+
74909+static struct acl_role_label *kernel_role = NULL;
74910+
74911+static unsigned int gr_auth_attempts = 0;
74912+static unsigned long gr_auth_expires = 0UL;
74913+
74914+#ifdef CONFIG_NET
74915+extern struct vfsmount *sock_mnt;
74916+#endif
74917+extern struct vfsmount *pipe_mnt;
74918+extern struct vfsmount *shm_mnt;
74919+#ifdef CONFIG_HUGETLBFS
74920+extern struct vfsmount *hugetlbfs_vfsmount;
74921+#endif
74922+
74923+static struct acl_object_label *fakefs_obj_rw;
74924+static struct acl_object_label *fakefs_obj_rwx;
74925+
74926+extern int gr_init_uidset(void);
74927+extern void gr_free_uidset(void);
74928+extern void gr_remove_uid(uid_t uid);
74929+extern int gr_find_uid(uid_t uid);
74930+
74931+__inline__ int
74932+gr_acl_is_enabled(void)
74933+{
74934+ return (gr_status & GR_READY);
74935+}
74936+
74937+#ifdef CONFIG_BTRFS_FS
74938+extern dev_t get_btrfs_dev_from_inode(struct inode *inode);
74939+extern int btrfs_getattr(struct vfsmount *mnt, struct dentry *dentry, struct kstat *stat);
74940+#endif
74941+
74942+static inline dev_t __get_dev(const struct dentry *dentry)
74943+{
74944+#ifdef CONFIG_BTRFS_FS
74945+ if (dentry->d_inode->i_op && dentry->d_inode->i_op->getattr == &btrfs_getattr)
74946+ return get_btrfs_dev_from_inode(dentry->d_inode);
74947+ else
74948+#endif
74949+ return dentry->d_inode->i_sb->s_dev;
74950+}
74951+
74952+dev_t gr_get_dev_from_dentry(struct dentry *dentry)
74953+{
74954+ return __get_dev(dentry);
74955+}
74956+
74957+static char gr_task_roletype_to_char(struct task_struct *task)
74958+{
74959+ switch (task->role->roletype &
74960+ (GR_ROLE_DEFAULT | GR_ROLE_USER | GR_ROLE_GROUP |
74961+ GR_ROLE_SPECIAL)) {
74962+ case GR_ROLE_DEFAULT:
74963+ return 'D';
74964+ case GR_ROLE_USER:
74965+ return 'U';
74966+ case GR_ROLE_GROUP:
74967+ return 'G';
74968+ case GR_ROLE_SPECIAL:
74969+ return 'S';
74970+ }
74971+
74972+ return 'X';
74973+}
74974+
74975+char gr_roletype_to_char(void)
74976+{
74977+ return gr_task_roletype_to_char(current);
74978+}
74979+
74980+__inline__ int
74981+gr_acl_tpe_check(void)
74982+{
74983+ if (unlikely(!(gr_status & GR_READY)))
74984+ return 0;
74985+ if (current->role->roletype & GR_ROLE_TPE)
74986+ return 1;
74987+ else
74988+ return 0;
74989+}
74990+
74991+int
74992+gr_handle_rawio(const struct inode *inode)
74993+{
74994+#ifdef CONFIG_GRKERNSEC_CHROOT_CAPS
74995+ if (inode && S_ISBLK(inode->i_mode) &&
74996+ grsec_enable_chroot_caps && proc_is_chrooted(current) &&
74997+ !capable(CAP_SYS_RAWIO))
74998+ return 1;
74999+#endif
75000+ return 0;
75001+}
75002+
75003+static int
75004+gr_streq(const char *a, const char *b, const unsigned int lena, const unsigned int lenb)
75005+{
75006+ if (likely(lena != lenb))
75007+ return 0;
75008+
75009+ return !memcmp(a, b, lena);
75010+}
75011+
75012+static int prepend(char **buffer, int *buflen, const char *str, int namelen)
75013+{
75014+ *buflen -= namelen;
75015+ if (*buflen < 0)
75016+ return -ENAMETOOLONG;
75017+ *buffer -= namelen;
75018+ memcpy(*buffer, str, namelen);
75019+ return 0;
75020+}
75021+
75022+/* this must be called with vfsmount_lock and dcache_lock held */
75023+
75024+static char * __our_d_path(struct dentry *dentry, struct vfsmount *vfsmnt,
75025+ struct dentry *root, struct vfsmount *rootmnt,
75026+ char *buffer, int buflen)
75027+{
75028+ char * end = buffer+buflen;
75029+ char * retval;
75030+ int namelen;
75031+
75032+ *--end = '\0';
75033+ buflen--;
75034+
75035+ if (buflen < 1)
75036+ goto Elong;
75037+ /* Get '/' right */
75038+ retval = end-1;
75039+ *retval = '/';
75040+
75041+ for (;;) {
75042+ struct dentry * parent;
75043+
75044+ if (dentry == root && vfsmnt == rootmnt)
75045+ break;
75046+ if (dentry == vfsmnt->mnt_root || IS_ROOT(dentry)) {
75047+ /* Global root? */
75048+ if (vfsmnt->mnt_parent == vfsmnt)
75049+ goto global_root;
75050+ dentry = vfsmnt->mnt_mountpoint;
75051+ vfsmnt = vfsmnt->mnt_parent;
75052+ continue;
75053+ }
75054+ parent = dentry->d_parent;
75055+ prefetch(parent);
75056+ namelen = dentry->d_name.len;
75057+ buflen -= namelen + 1;
75058+ if (buflen < 0)
75059+ goto Elong;
75060+ end -= namelen;
75061+ memcpy(end, dentry->d_name.name, namelen);
75062+ *--end = '/';
75063+ retval = end;
75064+ dentry = parent;
75065+ }
75066+
75067+out:
75068+ return retval;
75069+
75070+global_root:
75071+ namelen = dentry->d_name.len;
75072+ buflen -= namelen;
75073+ if (buflen < 0)
75074+ goto Elong;
75075+ retval -= namelen-1; /* hit the slash */
75076+ memcpy(retval, dentry->d_name.name, namelen);
75077+ goto out;
75078+Elong:
75079+ retval = ERR_PTR(-ENAMETOOLONG);
75080+ goto out;
75081+}
75082+
75083+static char *
75084+gen_full_path(struct dentry *dentry, struct vfsmount *vfsmnt,
75085+ struct dentry *root, struct vfsmount *rootmnt, char *buf, int buflen)
75086+{
75087+ char *retval;
75088+
75089+ retval = __our_d_path(dentry, vfsmnt, root, rootmnt, buf, buflen);
75090+ if (unlikely(IS_ERR(retval)))
75091+ retval = strcpy(buf, "<path too long>");
75092+ else if (unlikely(retval[1] == '/' && retval[2] == '\0'))
75093+ retval[1] = '\0';
75094+
75095+ return retval;
75096+}
75097+
75098+static char *
75099+__d_real_path(const struct dentry *dentry, const struct vfsmount *vfsmnt,
75100+ char *buf, int buflen)
75101+{
75102+ char *res;
75103+
75104+ /* we can use real_root, real_root_mnt, because this is only called
75105+ by the RBAC system */
75106+ res = gen_full_path((struct dentry *)dentry, (struct vfsmount *)vfsmnt, real_root, real_root_mnt, buf, buflen);
75107+
75108+ return res;
75109+}
75110+
75111+static char *
75112+d_real_path(const struct dentry *dentry, const struct vfsmount *vfsmnt,
75113+ char *buf, int buflen)
75114+{
75115+ char *res;
75116+ struct dentry *root;
75117+ struct vfsmount *rootmnt;
75118+ struct task_struct *reaper = &init_task;
75119+
75120+ /* we can't use real_root, real_root_mnt, because they belong only to the RBAC system */
75121+ read_lock(&reaper->fs->lock);
75122+ root = dget(reaper->fs->root.dentry);
75123+ rootmnt = mntget(reaper->fs->root.mnt);
75124+ read_unlock(&reaper->fs->lock);
75125+
75126+ spin_lock(&dcache_lock);
75127+ spin_lock(&vfsmount_lock);
75128+ res = gen_full_path((struct dentry *)dentry, (struct vfsmount *)vfsmnt, root, rootmnt, buf, buflen);
75129+ spin_unlock(&vfsmount_lock);
75130+ spin_unlock(&dcache_lock);
75131+
75132+ dput(root);
75133+ mntput(rootmnt);
75134+ return res;
75135+}
75136+
75137+static char *
75138+gr_to_filename_rbac(const struct dentry *dentry, const struct vfsmount *mnt)
75139+{
75140+ char *ret;
75141+ spin_lock(&dcache_lock);
75142+ spin_lock(&vfsmount_lock);
75143+ ret = __d_real_path(dentry, mnt, per_cpu_ptr(gr_shared_page[0],smp_processor_id()),
75144+ PAGE_SIZE);
75145+ spin_unlock(&vfsmount_lock);
75146+ spin_unlock(&dcache_lock);
75147+ return ret;
75148+}
75149+
75150+static char *
75151+gr_to_proc_filename_rbac(const struct dentry *dentry, const struct vfsmount *mnt)
75152+{
75153+ char *ret;
75154+ char *buf;
75155+ int buflen;
75156+
75157+ spin_lock(&dcache_lock);
75158+ spin_lock(&vfsmount_lock);
75159+ buf = per_cpu_ptr(gr_shared_page[0], smp_processor_id());
75160+ ret = __d_real_path(dentry, mnt, buf, PAGE_SIZE - 6);
75161+ buflen = (int)(ret - buf);
75162+ if (buflen >= 5)
75163+ prepend(&ret, &buflen, "/proc", 5);
75164+ else
75165+ ret = strcpy(buf, "<path too long>");
75166+ spin_unlock(&vfsmount_lock);
75167+ spin_unlock(&dcache_lock);
75168+ return ret;
75169+}
75170+
75171+char *
75172+gr_to_filename_nolock(const struct dentry *dentry, const struct vfsmount *mnt)
75173+{
75174+ return __d_real_path(dentry, mnt, per_cpu_ptr(gr_shared_page[0],smp_processor_id()),
75175+ PAGE_SIZE);
75176+}
75177+
75178+char *
75179+gr_to_filename(const struct dentry *dentry, const struct vfsmount *mnt)
75180+{
75181+ return d_real_path(dentry, mnt, per_cpu_ptr(gr_shared_page[0], smp_processor_id()),
75182+ PAGE_SIZE);
75183+}
75184+
75185+char *
75186+gr_to_filename1(const struct dentry *dentry, const struct vfsmount *mnt)
75187+{
75188+ return d_real_path(dentry, mnt, per_cpu_ptr(gr_shared_page[1], smp_processor_id()),
75189+ PAGE_SIZE);
75190+}
75191+
75192+char *
75193+gr_to_filename2(const struct dentry *dentry, const struct vfsmount *mnt)
75194+{
75195+ return d_real_path(dentry, mnt, per_cpu_ptr(gr_shared_page[2], smp_processor_id()),
75196+ PAGE_SIZE);
75197+}
75198+
75199+char *
75200+gr_to_filename3(const struct dentry *dentry, const struct vfsmount *mnt)
75201+{
75202+ return d_real_path(dentry, mnt, per_cpu_ptr(gr_shared_page[3], smp_processor_id()),
75203+ PAGE_SIZE);
75204+}
75205+
75206+__inline__ __u32
75207+to_gr_audit(const __u32 reqmode)
75208+{
75209+ /* masks off auditable permission flags, then shifts them to create
75210+ auditing flags, and adds the special case of append auditing if
75211+ we're requesting write */
75212+ return (((reqmode & ~GR_AUDITS) << 10) | ((reqmode & GR_WRITE) ? GR_AUDIT_APPEND : 0));
75213+}
75214+
75215+struct acl_subject_label *
75216+lookup_subject_map(const struct acl_subject_label *userp)
75217+{
75218+ unsigned int index = shash(userp, subj_map_set.s_size);
75219+ struct subject_map *match;
75220+
75221+ match = subj_map_set.s_hash[index];
75222+
75223+ while (match && match->user != userp)
75224+ match = match->next;
75225+
75226+ if (match != NULL)
75227+ return match->kernel;
75228+ else
75229+ return NULL;
75230+}
75231+
75232+static void
75233+insert_subj_map_entry(struct subject_map *subjmap)
75234+{
75235+ unsigned int index = shash(subjmap->user, subj_map_set.s_size);
75236+ struct subject_map **curr;
75237+
75238+ subjmap->prev = NULL;
75239+
75240+ curr = &subj_map_set.s_hash[index];
75241+ if (*curr != NULL)
75242+ (*curr)->prev = subjmap;
75243+
75244+ subjmap->next = *curr;
75245+ *curr = subjmap;
75246+
75247+ return;
75248+}
75249+
75250+static struct acl_role_label *
75251+lookup_acl_role_label(const struct task_struct *task, const uid_t uid,
75252+ const gid_t gid)
75253+{
75254+ unsigned int index = rhash(uid, GR_ROLE_USER, acl_role_set.r_size);
75255+ struct acl_role_label *match;
75256+ struct role_allowed_ip *ipp;
75257+ unsigned int x;
75258+ u32 curr_ip = task->signal->curr_ip;
75259+
75260+ task->signal->saved_ip = curr_ip;
75261+
75262+ match = acl_role_set.r_hash[index];
75263+
75264+ while (match) {
75265+ if ((match->roletype & (GR_ROLE_DOMAIN | GR_ROLE_USER)) == (GR_ROLE_DOMAIN | GR_ROLE_USER)) {
75266+ for (x = 0; x < match->domain_child_num; x++) {
75267+ if (match->domain_children[x] == uid)
75268+ goto found;
75269+ }
75270+ } else if (match->uidgid == uid && match->roletype & GR_ROLE_USER)
75271+ break;
75272+ match = match->next;
75273+ }
75274+found:
75275+ if (match == NULL) {
75276+ try_group:
75277+ index = rhash(gid, GR_ROLE_GROUP, acl_role_set.r_size);
75278+ match = acl_role_set.r_hash[index];
75279+
75280+ while (match) {
75281+ if ((match->roletype & (GR_ROLE_DOMAIN | GR_ROLE_GROUP)) == (GR_ROLE_DOMAIN | GR_ROLE_GROUP)) {
75282+ for (x = 0; x < match->domain_child_num; x++) {
75283+ if (match->domain_children[x] == gid)
75284+ goto found2;
75285+ }
75286+ } else if (match->uidgid == gid && match->roletype & GR_ROLE_GROUP)
75287+ break;
75288+ match = match->next;
75289+ }
75290+found2:
75291+ if (match == NULL)
75292+ match = default_role;
75293+ if (match->allowed_ips == NULL)
75294+ return match;
75295+ else {
75296+ for (ipp = match->allowed_ips; ipp; ipp = ipp->next) {
75297+ if (likely
75298+ ((ntohl(curr_ip) & ipp->netmask) ==
75299+ (ntohl(ipp->addr) & ipp->netmask)))
75300+ return match;
75301+ }
75302+ match = default_role;
75303+ }
75304+ } else if (match->allowed_ips == NULL) {
75305+ return match;
75306+ } else {
75307+ for (ipp = match->allowed_ips; ipp; ipp = ipp->next) {
75308+ if (likely
75309+ ((ntohl(curr_ip) & ipp->netmask) ==
75310+ (ntohl(ipp->addr) & ipp->netmask)))
75311+ return match;
75312+ }
75313+ goto try_group;
75314+ }
75315+
75316+ return match;
75317+}
75318+
75319+struct acl_subject_label *
75320+lookup_acl_subj_label(const ino_t ino, const dev_t dev,
75321+ const struct acl_role_label *role)
75322+{
75323+ unsigned int index = fhash(ino, dev, role->subj_hash_size);
75324+ struct acl_subject_label *match;
75325+
75326+ match = role->subj_hash[index];
75327+
75328+ while (match && (match->inode != ino || match->device != dev ||
75329+ (match->mode & GR_DELETED))) {
75330+ match = match->next;
75331+ }
75332+
75333+ if (match && !(match->mode & GR_DELETED))
75334+ return match;
75335+ else
75336+ return NULL;
75337+}
75338+
75339+struct acl_subject_label *
75340+lookup_acl_subj_label_deleted(const ino_t ino, const dev_t dev,
75341+ const struct acl_role_label *role)
75342+{
75343+ unsigned int index = fhash(ino, dev, role->subj_hash_size);
75344+ struct acl_subject_label *match;
75345+
75346+ match = role->subj_hash[index];
75347+
75348+ while (match && (match->inode != ino || match->device != dev ||
75349+ !(match->mode & GR_DELETED))) {
75350+ match = match->next;
75351+ }
75352+
75353+ if (match && (match->mode & GR_DELETED))
75354+ return match;
75355+ else
75356+ return NULL;
75357+}
75358+
75359+static struct acl_object_label *
75360+lookup_acl_obj_label(const ino_t ino, const dev_t dev,
75361+ const struct acl_subject_label *subj)
75362+{
75363+ unsigned int index = fhash(ino, dev, subj->obj_hash_size);
75364+ struct acl_object_label *match;
75365+
75366+ match = subj->obj_hash[index];
75367+
75368+ while (match && (match->inode != ino || match->device != dev ||
75369+ (match->mode & GR_DELETED))) {
75370+ match = match->next;
75371+ }
75372+
75373+ if (match && !(match->mode & GR_DELETED))
75374+ return match;
75375+ else
75376+ return NULL;
75377+}
75378+
75379+static struct acl_object_label *
75380+lookup_acl_obj_label_create(const ino_t ino, const dev_t dev,
75381+ const struct acl_subject_label *subj)
75382+{
75383+ unsigned int index = fhash(ino, dev, subj->obj_hash_size);
75384+ struct acl_object_label *match;
75385+
75386+ match = subj->obj_hash[index];
75387+
75388+ while (match && (match->inode != ino || match->device != dev ||
75389+ !(match->mode & GR_DELETED))) {
75390+ match = match->next;
75391+ }
75392+
75393+ if (match && (match->mode & GR_DELETED))
75394+ return match;
75395+
75396+ match = subj->obj_hash[index];
75397+
75398+ while (match && (match->inode != ino || match->device != dev ||
75399+ (match->mode & GR_DELETED))) {
75400+ match = match->next;
75401+ }
75402+
75403+ if (match && !(match->mode & GR_DELETED))
75404+ return match;
75405+ else
75406+ return NULL;
75407+}
75408+
75409+static struct name_entry *
75410+lookup_name_entry(const char *name)
75411+{
75412+ unsigned int len = strlen(name);
75413+ unsigned int key = full_name_hash(name, len);
75414+ unsigned int index = key % name_set.n_size;
75415+ struct name_entry *match;
75416+
75417+ match = name_set.n_hash[index];
75418+
75419+ while (match && (match->key != key || !gr_streq(match->name, name, match->len, len)))
75420+ match = match->next;
75421+
75422+ return match;
75423+}
75424+
75425+static struct name_entry *
75426+lookup_name_entry_create(const char *name)
75427+{
75428+ unsigned int len = strlen(name);
75429+ unsigned int key = full_name_hash(name, len);
75430+ unsigned int index = key % name_set.n_size;
75431+ struct name_entry *match;
75432+
75433+ match = name_set.n_hash[index];
75434+
75435+ while (match && (match->key != key || !gr_streq(match->name, name, match->len, len) ||
75436+ !match->deleted))
75437+ match = match->next;
75438+
75439+ if (match && match->deleted)
75440+ return match;
75441+
75442+ match = name_set.n_hash[index];
75443+
75444+ while (match && (match->key != key || !gr_streq(match->name, name, match->len, len) ||
75445+ match->deleted))
75446+ match = match->next;
75447+
75448+ if (match && !match->deleted)
75449+ return match;
75450+ else
75451+ return NULL;
75452+}
75453+
75454+static struct inodev_entry *
75455+lookup_inodev_entry(const ino_t ino, const dev_t dev)
75456+{
75457+ unsigned int index = fhash(ino, dev, inodev_set.i_size);
75458+ struct inodev_entry *match;
75459+
75460+ match = inodev_set.i_hash[index];
75461+
75462+ while (match && (match->nentry->inode != ino || match->nentry->device != dev))
75463+ match = match->next;
75464+
75465+ return match;
75466+}
75467+
75468+static void
75469+insert_inodev_entry(struct inodev_entry *entry)
75470+{
75471+ unsigned int index = fhash(entry->nentry->inode, entry->nentry->device,
75472+ inodev_set.i_size);
75473+ struct inodev_entry **curr;
75474+
75475+ entry->prev = NULL;
75476+
75477+ curr = &inodev_set.i_hash[index];
75478+ if (*curr != NULL)
75479+ (*curr)->prev = entry;
75480+
75481+ entry->next = *curr;
75482+ *curr = entry;
75483+
75484+ return;
75485+}
75486+
75487+static void
75488+__insert_acl_role_label(struct acl_role_label *role, uid_t uidgid)
75489+{
75490+ unsigned int index =
75491+ rhash(uidgid, role->roletype & (GR_ROLE_USER | GR_ROLE_GROUP), acl_role_set.r_size);
75492+ struct acl_role_label **curr;
75493+ struct acl_role_label *tmp, *tmp2;
75494+
75495+ curr = &acl_role_set.r_hash[index];
75496+
75497+ /* simple case, slot is empty, just set it to our role */
75498+ if (*curr == NULL) {
75499+ *curr = role;
75500+ } else {
75501+ /* example:
75502+ 1 -> 2 -> 3 (adding 2 -> 3 to here)
75503+ 2 -> 3
75504+ */
75505+ /* first check to see if we can already be reached via this slot */
75506+ tmp = *curr;
75507+ while (tmp && tmp != role)
75508+ tmp = tmp->next;
75509+ if (tmp == role) {
75510+ /* we don't need to add ourselves to this slot's chain */
75511+ return;
75512+ }
75513+ /* we need to add ourselves to this chain, two cases */
75514+ if (role->next == NULL) {
75515+ /* simple case, append the current chain to our role */
75516+ role->next = *curr;
75517+ *curr = role;
75518+ } else {
75519+ /* 1 -> 2 -> 3 -> 4
75520+ 2 -> 3 -> 4
75521+ 3 -> 4 (adding 1 -> 2 -> 3 -> 4 to here)
75522+ */
75523+ /* trickier case: walk our role's chain until we find
75524+ the role for the start of the current slot's chain */
75525+ tmp = role;
75526+ tmp2 = *curr;
75527+ while (tmp->next && tmp->next != tmp2)
75528+ tmp = tmp->next;
75529+ if (tmp->next == tmp2) {
75530+ /* from example above, we found 3, so just
75531+ replace this slot's chain with ours */
75532+ *curr = role;
75533+ } else {
75534+ /* we didn't find a subset of our role's chain
75535+ in the current slot's chain, so append their
75536+ chain to ours, and set us as the first role in
75537+ the slot's chain
75538+
75539+ we could fold this case with the case above,
75540+ but making it explicit for clarity
75541+ */
75542+ tmp->next = tmp2;
75543+ *curr = role;
75544+ }
75545+ }
75546+ }
75547+
75548+ return;
75549+}
75550+
75551+static void
75552+insert_acl_role_label(struct acl_role_label *role)
75553+{
75554+ int i;
75555+
75556+ if (role_list == NULL) {
75557+ role_list = role;
75558+ role->prev = NULL;
75559+ } else {
75560+ role->prev = role_list;
75561+ role_list = role;
75562+ }
75563+
75564+ /* used for hash chains */
75565+ role->next = NULL;
75566+
75567+ if (role->roletype & GR_ROLE_DOMAIN) {
75568+ for (i = 0; i < role->domain_child_num; i++)
75569+ __insert_acl_role_label(role, role->domain_children[i]);
75570+ } else
75571+ __insert_acl_role_label(role, role->uidgid);
75572+}
75573+
75574+static int
75575+insert_name_entry(char *name, const ino_t inode, const dev_t device, __u8 deleted)
75576+{
75577+ struct name_entry **curr, *nentry;
75578+ struct inodev_entry *ientry;
75579+ unsigned int len = strlen(name);
75580+ unsigned int key = full_name_hash(name, len);
75581+ unsigned int index = key % name_set.n_size;
75582+
75583+ curr = &name_set.n_hash[index];
75584+
75585+ while (*curr && ((*curr)->key != key || !gr_streq((*curr)->name, name, (*curr)->len, len)))
75586+ curr = &((*curr)->next);
75587+
75588+ if (*curr != NULL)
75589+ return 1;
75590+
75591+ nentry = acl_alloc(sizeof (struct name_entry));
75592+ if (nentry == NULL)
75593+ return 0;
75594+ ientry = acl_alloc(sizeof (struct inodev_entry));
75595+ if (ientry == NULL)
75596+ return 0;
75597+ ientry->nentry = nentry;
75598+
75599+ nentry->key = key;
75600+ nentry->name = name;
75601+ nentry->inode = inode;
75602+ nentry->device = device;
75603+ nentry->len = len;
75604+ nentry->deleted = deleted;
75605+
75606+ nentry->prev = NULL;
75607+ curr = &name_set.n_hash[index];
75608+ if (*curr != NULL)
75609+ (*curr)->prev = nentry;
75610+ nentry->next = *curr;
75611+ *curr = nentry;
75612+
75613+ /* insert us into the table searchable by inode/dev */
75614+ insert_inodev_entry(ientry);
75615+
75616+ return 1;
75617+}
75618+
75619+static void
75620+insert_acl_obj_label(struct acl_object_label *obj,
75621+ struct acl_subject_label *subj)
75622+{
75623+ unsigned int index =
75624+ fhash(obj->inode, obj->device, subj->obj_hash_size);
75625+ struct acl_object_label **curr;
75626+
75627+
75628+ obj->prev = NULL;
75629+
75630+ curr = &subj->obj_hash[index];
75631+ if (*curr != NULL)
75632+ (*curr)->prev = obj;
75633+
75634+ obj->next = *curr;
75635+ *curr = obj;
75636+
75637+ return;
75638+}
75639+
75640+static void
75641+insert_acl_subj_label(struct acl_subject_label *obj,
75642+ struct acl_role_label *role)
75643+{
75644+ unsigned int index = fhash(obj->inode, obj->device, role->subj_hash_size);
75645+ struct acl_subject_label **curr;
75646+
75647+ obj->prev = NULL;
75648+
75649+ curr = &role->subj_hash[index];
75650+ if (*curr != NULL)
75651+ (*curr)->prev = obj;
75652+
75653+ obj->next = *curr;
75654+ *curr = obj;
75655+
75656+ return;
75657+}
75658+
75659+/* allocating chained hash tables, so optimal size is where lambda ~ 1 */
75660+
75661+static void *
75662+create_table(__u32 * len, int elementsize)
75663+{
75664+ unsigned int table_sizes[] = {
75665+ 7, 13, 31, 61, 127, 251, 509, 1021, 2039, 4093, 8191, 16381,
75666+ 32749, 65521, 131071, 262139, 524287, 1048573, 2097143,
75667+ 4194301, 8388593, 16777213, 33554393, 67108859
75668+ };
75669+ void *newtable = NULL;
75670+ unsigned int pwr = 0;
75671+
75672+ while ((pwr < ((sizeof (table_sizes) / sizeof (table_sizes[0])) - 1)) &&
75673+ table_sizes[pwr] <= *len)
75674+ pwr++;
75675+
75676+ if (table_sizes[pwr] <= *len || (table_sizes[pwr] > ULONG_MAX / elementsize))
75677+ return newtable;
75678+
75679+ if ((table_sizes[pwr] * elementsize) <= PAGE_SIZE)
75680+ newtable =
75681+ kmalloc(table_sizes[pwr] * elementsize, GFP_KERNEL);
75682+ else
75683+ newtable = vmalloc(table_sizes[pwr] * elementsize);
75684+
75685+ *len = table_sizes[pwr];
75686+
75687+ return newtable;
75688+}
75689+
75690+static int
75691+init_variables(const struct gr_arg *arg)
75692+{
75693+ struct task_struct *reaper = &init_task;
75694+ unsigned int stacksize;
75695+
75696+ subj_map_set.s_size = arg->role_db.num_subjects;
75697+ acl_role_set.r_size = arg->role_db.num_roles + arg->role_db.num_domain_children;
75698+ name_set.n_size = arg->role_db.num_objects;
75699+ inodev_set.i_size = arg->role_db.num_objects;
75700+
75701+ if (!subj_map_set.s_size || !acl_role_set.r_size ||
75702+ !name_set.n_size || !inodev_set.i_size)
75703+ return 1;
75704+
75705+ if (!gr_init_uidset())
75706+ return 1;
75707+
75708+ /* set up the stack that holds allocation info */
75709+
75710+ stacksize = arg->role_db.num_pointers + 5;
75711+
75712+ if (!acl_alloc_stack_init(stacksize))
75713+ return 1;
75714+
75715+ /* grab reference for the real root dentry and vfsmount */
75716+ read_lock(&reaper->fs->lock);
75717+ real_root = dget(reaper->fs->root.dentry);
75718+ real_root_mnt = mntget(reaper->fs->root.mnt);
75719+ read_unlock(&reaper->fs->lock);
75720+
75721+#ifdef CONFIG_GRKERNSEC_RBAC_DEBUG
75722+ printk(KERN_ALERT "Obtained real root device=%d, inode=%lu\n", __get_dev(real_root), real_root->d_inode->i_ino);
75723+#endif
75724+
75725+ fakefs_obj_rw = acl_alloc(sizeof(struct acl_object_label));
75726+ if (fakefs_obj_rw == NULL)
75727+ return 1;
75728+ fakefs_obj_rw->mode = GR_FIND | GR_READ | GR_WRITE;
75729+
75730+ fakefs_obj_rwx = acl_alloc(sizeof(struct acl_object_label));
75731+ if (fakefs_obj_rwx == NULL)
75732+ return 1;
75733+ fakefs_obj_rwx->mode = GR_FIND | GR_READ | GR_WRITE | GR_EXEC;
75734+
75735+ subj_map_set.s_hash =
75736+ (struct subject_map **) create_table(&subj_map_set.s_size, sizeof(void *));
75737+ acl_role_set.r_hash =
75738+ (struct acl_role_label **) create_table(&acl_role_set.r_size, sizeof(void *));
75739+ name_set.n_hash = (struct name_entry **) create_table(&name_set.n_size, sizeof(void *));
75740+ inodev_set.i_hash =
75741+ (struct inodev_entry **) create_table(&inodev_set.i_size, sizeof(void *));
75742+
75743+ if (!subj_map_set.s_hash || !acl_role_set.r_hash ||
75744+ !name_set.n_hash || !inodev_set.i_hash)
75745+ return 1;
75746+
75747+ memset(subj_map_set.s_hash, 0,
75748+ sizeof(struct subject_map *) * subj_map_set.s_size);
75749+ memset(acl_role_set.r_hash, 0,
75750+ sizeof (struct acl_role_label *) * acl_role_set.r_size);
75751+ memset(name_set.n_hash, 0,
75752+ sizeof (struct name_entry *) * name_set.n_size);
75753+ memset(inodev_set.i_hash, 0,
75754+ sizeof (struct inodev_entry *) * inodev_set.i_size);
75755+
75756+ return 0;
75757+}
75758+
75759+/* free information not needed after startup
75760+ currently contains user->kernel pointer mappings for subjects
75761+*/
75762+
75763+static void
75764+free_init_variables(void)
75765+{
75766+ __u32 i;
75767+
75768+ if (subj_map_set.s_hash) {
75769+ for (i = 0; i < subj_map_set.s_size; i++) {
75770+ if (subj_map_set.s_hash[i]) {
75771+ kfree(subj_map_set.s_hash[i]);
75772+ subj_map_set.s_hash[i] = NULL;
75773+ }
75774+ }
75775+
75776+ if ((subj_map_set.s_size * sizeof (struct subject_map *)) <=
75777+ PAGE_SIZE)
75778+ kfree(subj_map_set.s_hash);
75779+ else
75780+ vfree(subj_map_set.s_hash);
75781+ }
75782+
75783+ return;
75784+}
75785+
75786+static void
75787+free_variables(void)
75788+{
75789+ struct acl_subject_label *s;
75790+ struct acl_role_label *r;
75791+ struct task_struct *task, *task2;
75792+ unsigned int x;
75793+
75794+ gr_clear_learn_entries();
75795+
75796+ read_lock(&tasklist_lock);
75797+ do_each_thread(task2, task) {
75798+ task->acl_sp_role = 0;
75799+ task->acl_role_id = 0;
75800+ task->acl = NULL;
75801+ task->role = NULL;
75802+ } while_each_thread(task2, task);
75803+ read_unlock(&tasklist_lock);
75804+
75805+ /* release the reference to the real root dentry and vfsmount */
75806+ if (real_root)
75807+ dput(real_root);
75808+ real_root = NULL;
75809+ if (real_root_mnt)
75810+ mntput(real_root_mnt);
75811+ real_root_mnt = NULL;
75812+
75813+ /* free all object hash tables */
75814+
75815+ FOR_EACH_ROLE_START(r)
75816+ if (r->subj_hash == NULL)
75817+ goto next_role;
75818+ FOR_EACH_SUBJECT_START(r, s, x)
75819+ if (s->obj_hash == NULL)
75820+ break;
75821+ if ((s->obj_hash_size * sizeof (struct acl_object_label *)) <= PAGE_SIZE)
75822+ kfree(s->obj_hash);
75823+ else
75824+ vfree(s->obj_hash);
75825+ FOR_EACH_SUBJECT_END(s, x)
75826+ FOR_EACH_NESTED_SUBJECT_START(r, s)
75827+ if (s->obj_hash == NULL)
75828+ break;
75829+ if ((s->obj_hash_size * sizeof (struct acl_object_label *)) <= PAGE_SIZE)
75830+ kfree(s->obj_hash);
75831+ else
75832+ vfree(s->obj_hash);
75833+ FOR_EACH_NESTED_SUBJECT_END(s)
75834+ if ((r->subj_hash_size * sizeof (struct acl_subject_label *)) <= PAGE_SIZE)
75835+ kfree(r->subj_hash);
75836+ else
75837+ vfree(r->subj_hash);
75838+ r->subj_hash = NULL;
75839+next_role:
75840+ FOR_EACH_ROLE_END(r)
75841+
75842+ acl_free_all();
75843+
75844+ if (acl_role_set.r_hash) {
75845+ if ((acl_role_set.r_size * sizeof (struct acl_role_label *)) <=
75846+ PAGE_SIZE)
75847+ kfree(acl_role_set.r_hash);
75848+ else
75849+ vfree(acl_role_set.r_hash);
75850+ }
75851+ if (name_set.n_hash) {
75852+ if ((name_set.n_size * sizeof (struct name_entry *)) <=
75853+ PAGE_SIZE)
75854+ kfree(name_set.n_hash);
75855+ else
75856+ vfree(name_set.n_hash);
75857+ }
75858+
75859+ if (inodev_set.i_hash) {
75860+ if ((inodev_set.i_size * sizeof (struct inodev_entry *)) <=
75861+ PAGE_SIZE)
75862+ kfree(inodev_set.i_hash);
75863+ else
75864+ vfree(inodev_set.i_hash);
75865+ }
75866+
75867+ gr_free_uidset();
75868+
75869+ memset(&name_set, 0, sizeof (struct name_db));
75870+ memset(&inodev_set, 0, sizeof (struct inodev_db));
75871+ memset(&acl_role_set, 0, sizeof (struct acl_role_db));
75872+ memset(&subj_map_set, 0, sizeof (struct acl_subj_map_db));
75873+
75874+ default_role = NULL;
75875+ kernel_role = NULL;
75876+ role_list = NULL;
75877+
75878+ return;
75879+}
75880+
75881+static __u32
75882+count_user_objs(struct acl_object_label *userp)
75883+{
75884+ struct acl_object_label o_tmp;
75885+ __u32 num = 0;
75886+
75887+ while (userp) {
75888+ if (copy_from_user(&o_tmp, userp,
75889+ sizeof (struct acl_object_label)))
75890+ break;
75891+
75892+ userp = o_tmp.prev;
75893+ num++;
75894+ }
75895+
75896+ return num;
75897+}
75898+
75899+static struct acl_subject_label *
75900+do_copy_user_subj(struct acl_subject_label *userp, struct acl_role_label *role);
75901+
75902+static int
75903+copy_user_glob(struct acl_object_label *obj)
75904+{
75905+ struct acl_object_label *g_tmp, **guser;
75906+ unsigned int len;
75907+ char *tmp;
75908+
75909+ if (obj->globbed == NULL)
75910+ return 0;
75911+
75912+ guser = &obj->globbed;
75913+ while (*guser) {
75914+ g_tmp = (struct acl_object_label *)
75915+ acl_alloc(sizeof (struct acl_object_label));
75916+ if (g_tmp == NULL)
75917+ return -ENOMEM;
75918+
75919+ if (copy_from_user(g_tmp, *guser,
75920+ sizeof (struct acl_object_label)))
75921+ return -EFAULT;
75922+
75923+ len = strnlen_user(g_tmp->filename, PATH_MAX);
75924+
75925+ if (!len || len >= PATH_MAX)
75926+ return -EINVAL;
75927+
75928+ if ((tmp = (char *) acl_alloc(len)) == NULL)
75929+ return -ENOMEM;
75930+
75931+ if (copy_from_user(tmp, g_tmp->filename, len))
75932+ return -EFAULT;
75933+ tmp[len-1] = '\0';
75934+ g_tmp->filename = tmp;
75935+
75936+ *guser = g_tmp;
75937+ guser = &(g_tmp->next);
75938+ }
75939+
75940+ return 0;
75941+}
75942+
75943+static int
75944+copy_user_objs(struct acl_object_label *userp, struct acl_subject_label *subj,
75945+ struct acl_role_label *role)
75946+{
75947+ struct acl_object_label *o_tmp;
75948+ unsigned int len;
75949+ int ret;
75950+ char *tmp;
75951+
75952+ while (userp) {
75953+ if ((o_tmp = (struct acl_object_label *)
75954+ acl_alloc(sizeof (struct acl_object_label))) == NULL)
75955+ return -ENOMEM;
75956+
75957+ if (copy_from_user(o_tmp, userp,
75958+ sizeof (struct acl_object_label)))
75959+ return -EFAULT;
75960+
75961+ userp = o_tmp->prev;
75962+
75963+ len = strnlen_user(o_tmp->filename, PATH_MAX);
75964+
75965+ if (!len || len >= PATH_MAX)
75966+ return -EINVAL;
75967+
75968+ if ((tmp = (char *) acl_alloc(len)) == NULL)
75969+ return -ENOMEM;
75970+
75971+ if (copy_from_user(tmp, o_tmp->filename, len))
75972+ return -EFAULT;
75973+ tmp[len-1] = '\0';
75974+ o_tmp->filename = tmp;
75975+
75976+ insert_acl_obj_label(o_tmp, subj);
75977+ if (!insert_name_entry(o_tmp->filename, o_tmp->inode,
75978+ o_tmp->device, (o_tmp->mode & GR_DELETED) ? 1 : 0))
75979+ return -ENOMEM;
75980+
75981+ ret = copy_user_glob(o_tmp);
75982+ if (ret)
75983+ return ret;
75984+
75985+ if (o_tmp->nested) {
75986+ o_tmp->nested = do_copy_user_subj(o_tmp->nested, role);
75987+ if (IS_ERR(o_tmp->nested))
75988+ return PTR_ERR(o_tmp->nested);
75989+
75990+ /* insert into nested subject list */
75991+ o_tmp->nested->next = role->hash->first;
75992+ role->hash->first = o_tmp->nested;
75993+ }
75994+ }
75995+
75996+ return 0;
75997+}
75998+
75999+static __u32
76000+count_user_subjs(struct acl_subject_label *userp)
76001+{
76002+ struct acl_subject_label s_tmp;
76003+ __u32 num = 0;
76004+
76005+ while (userp) {
76006+ if (copy_from_user(&s_tmp, userp,
76007+ sizeof (struct acl_subject_label)))
76008+ break;
76009+
76010+ userp = s_tmp.prev;
76011+ /* do not count nested subjects against this count, since
76012+ they are not included in the hash table, but are
76013+ attached to objects. We have already counted
76014+ the subjects in userspace for the allocation
76015+ stack
76016+ */
76017+ if (!(s_tmp.mode & GR_NESTED))
76018+ num++;
76019+ }
76020+
76021+ return num;
76022+}
76023+
76024+static int
76025+copy_user_allowedips(struct acl_role_label *rolep)
76026+{
76027+ struct role_allowed_ip *ruserip, *rtmp = NULL, *rlast;
76028+
76029+ ruserip = rolep->allowed_ips;
76030+
76031+ while (ruserip) {
76032+ rlast = rtmp;
76033+
76034+ if ((rtmp = (struct role_allowed_ip *)
76035+ acl_alloc(sizeof (struct role_allowed_ip))) == NULL)
76036+ return -ENOMEM;
76037+
76038+ if (copy_from_user(rtmp, ruserip,
76039+ sizeof (struct role_allowed_ip)))
76040+ return -EFAULT;
76041+
76042+ ruserip = rtmp->prev;
76043+
76044+ if (!rlast) {
76045+ rtmp->prev = NULL;
76046+ rolep->allowed_ips = rtmp;
76047+ } else {
76048+ rlast->next = rtmp;
76049+ rtmp->prev = rlast;
76050+ }
76051+
76052+ if (!ruserip)
76053+ rtmp->next = NULL;
76054+ }
76055+
76056+ return 0;
76057+}
76058+
76059+static int
76060+copy_user_transitions(struct acl_role_label *rolep)
76061+{
76062+ struct role_transition *rusertp, *rtmp = NULL, *rlast;
76063+
76064+ unsigned int len;
76065+ char *tmp;
76066+
76067+ rusertp = rolep->transitions;
76068+
76069+ while (rusertp) {
76070+ rlast = rtmp;
76071+
76072+ if ((rtmp = (struct role_transition *)
76073+ acl_alloc(sizeof (struct role_transition))) == NULL)
76074+ return -ENOMEM;
76075+
76076+ if (copy_from_user(rtmp, rusertp,
76077+ sizeof (struct role_transition)))
76078+ return -EFAULT;
76079+
76080+ rusertp = rtmp->prev;
76081+
76082+ len = strnlen_user(rtmp->rolename, GR_SPROLE_LEN);
76083+
76084+ if (!len || len >= GR_SPROLE_LEN)
76085+ return -EINVAL;
76086+
76087+ if ((tmp = (char *) acl_alloc(len)) == NULL)
76088+ return -ENOMEM;
76089+
76090+ if (copy_from_user(tmp, rtmp->rolename, len))
76091+ return -EFAULT;
76092+ tmp[len-1] = '\0';
76093+ rtmp->rolename = tmp;
76094+
76095+ if (!rlast) {
76096+ rtmp->prev = NULL;
76097+ rolep->transitions = rtmp;
76098+ } else {
76099+ rlast->next = rtmp;
76100+ rtmp->prev = rlast;
76101+ }
76102+
76103+ if (!rusertp)
76104+ rtmp->next = NULL;
76105+ }
76106+
76107+ return 0;
76108+}
76109+
76110+static struct acl_subject_label *
76111+do_copy_user_subj(struct acl_subject_label *userp, struct acl_role_label *role)
76112+{
76113+ struct acl_subject_label *s_tmp = NULL, *s_tmp2;
76114+ unsigned int len;
76115+ char *tmp;
76116+ __u32 num_objs;
76117+ struct acl_ip_label **i_tmp, *i_utmp2;
76118+ struct gr_hash_struct ghash;
76119+ struct subject_map *subjmap;
76120+ unsigned int i_num;
76121+ int err;
76122+
76123+ s_tmp = lookup_subject_map(userp);
76124+
76125+ /* we've already copied this subject into the kernel, just return
76126+ the reference to it, and don't copy it over again
76127+ */
76128+ if (s_tmp)
76129+ return(s_tmp);
76130+
76131+ if ((s_tmp = (struct acl_subject_label *)
76132+ acl_alloc(sizeof (struct acl_subject_label))) == NULL)
76133+ return ERR_PTR(-ENOMEM);
76134+
76135+ subjmap = (struct subject_map *)kmalloc(sizeof (struct subject_map), GFP_KERNEL);
76136+ if (subjmap == NULL)
76137+ return ERR_PTR(-ENOMEM);
76138+
76139+ subjmap->user = userp;
76140+ subjmap->kernel = s_tmp;
76141+ insert_subj_map_entry(subjmap);
76142+
76143+ if (copy_from_user(s_tmp, userp,
76144+ sizeof (struct acl_subject_label)))
76145+ return ERR_PTR(-EFAULT);
76146+
76147+ len = strnlen_user(s_tmp->filename, PATH_MAX);
76148+
76149+ if (!len || len >= PATH_MAX)
76150+ return ERR_PTR(-EINVAL);
76151+
76152+ if ((tmp = (char *) acl_alloc(len)) == NULL)
76153+ return ERR_PTR(-ENOMEM);
76154+
76155+ if (copy_from_user(tmp, s_tmp->filename, len))
76156+ return ERR_PTR(-EFAULT);
76157+ tmp[len-1] = '\0';
76158+ s_tmp->filename = tmp;
76159+
76160+ if (!strcmp(s_tmp->filename, "/"))
76161+ role->root_label = s_tmp;
76162+
76163+ if (copy_from_user(&ghash, s_tmp->hash, sizeof(struct gr_hash_struct)))
76164+ return ERR_PTR(-EFAULT);
76165+
76166+ /* copy user and group transition tables */
76167+
76168+ if (s_tmp->user_trans_num) {
76169+ uid_t *uidlist;
76170+
76171+ uidlist = (uid_t *)acl_alloc_num(s_tmp->user_trans_num, sizeof(uid_t));
76172+ if (uidlist == NULL)
76173+ return ERR_PTR(-ENOMEM);
76174+ if (copy_from_user(uidlist, s_tmp->user_transitions, s_tmp->user_trans_num * sizeof(uid_t)))
76175+ return ERR_PTR(-EFAULT);
76176+
76177+ s_tmp->user_transitions = uidlist;
76178+ }
76179+
76180+ if (s_tmp->group_trans_num) {
76181+ gid_t *gidlist;
76182+
76183+ gidlist = (gid_t *)acl_alloc_num(s_tmp->group_trans_num, sizeof(gid_t));
76184+ if (gidlist == NULL)
76185+ return ERR_PTR(-ENOMEM);
76186+ if (copy_from_user(gidlist, s_tmp->group_transitions, s_tmp->group_trans_num * sizeof(gid_t)))
76187+ return ERR_PTR(-EFAULT);
76188+
76189+ s_tmp->group_transitions = gidlist;
76190+ }
76191+
76192+ /* set up object hash table */
76193+ num_objs = count_user_objs(ghash.first);
76194+
76195+ s_tmp->obj_hash_size = num_objs;
76196+ s_tmp->obj_hash =
76197+ (struct acl_object_label **)
76198+ create_table(&(s_tmp->obj_hash_size), sizeof(void *));
76199+
76200+ if (!s_tmp->obj_hash)
76201+ return ERR_PTR(-ENOMEM);
76202+
76203+ memset(s_tmp->obj_hash, 0,
76204+ s_tmp->obj_hash_size *
76205+ sizeof (struct acl_object_label *));
76206+
76207+ /* add in objects */
76208+ err = copy_user_objs(ghash.first, s_tmp, role);
76209+
76210+ if (err)
76211+ return ERR_PTR(err);
76212+
76213+ /* set pointer for parent subject */
76214+ if (s_tmp->parent_subject) {
76215+ s_tmp2 = do_copy_user_subj(s_tmp->parent_subject, role);
76216+
76217+ if (IS_ERR(s_tmp2))
76218+ return s_tmp2;
76219+
76220+ s_tmp->parent_subject = s_tmp2;
76221+ }
76222+
76223+ /* add in ip acls */
76224+
76225+ if (!s_tmp->ip_num) {
76226+ s_tmp->ips = NULL;
76227+ goto insert;
76228+ }
76229+
76230+ i_tmp =
76231+ (struct acl_ip_label **) acl_alloc_num(s_tmp->ip_num,
76232+ sizeof (struct acl_ip_label *));
76233+
76234+ if (!i_tmp)
76235+ return ERR_PTR(-ENOMEM);
76236+
76237+ for (i_num = 0; i_num < s_tmp->ip_num; i_num++) {
76238+ *(i_tmp + i_num) =
76239+ (struct acl_ip_label *)
76240+ acl_alloc(sizeof (struct acl_ip_label));
76241+ if (!*(i_tmp + i_num))
76242+ return ERR_PTR(-ENOMEM);
76243+
76244+ if (copy_from_user
76245+ (&i_utmp2, s_tmp->ips + i_num,
76246+ sizeof (struct acl_ip_label *)))
76247+ return ERR_PTR(-EFAULT);
76248+
76249+ if (copy_from_user
76250+ (*(i_tmp + i_num), i_utmp2,
76251+ sizeof (struct acl_ip_label)))
76252+ return ERR_PTR(-EFAULT);
76253+
76254+ if ((*(i_tmp + i_num))->iface == NULL)
76255+ continue;
76256+
76257+ len = strnlen_user((*(i_tmp + i_num))->iface, IFNAMSIZ);
76258+ if (!len || len >= IFNAMSIZ)
76259+ return ERR_PTR(-EINVAL);
76260+ tmp = acl_alloc(len);
76261+ if (tmp == NULL)
76262+ return ERR_PTR(-ENOMEM);
76263+ if (copy_from_user(tmp, (*(i_tmp + i_num))->iface, len))
76264+ return ERR_PTR(-EFAULT);
76265+ (*(i_tmp + i_num))->iface = tmp;
76266+ }
76267+
76268+ s_tmp->ips = i_tmp;
76269+
76270+insert:
76271+ if (!insert_name_entry(s_tmp->filename, s_tmp->inode,
76272+ s_tmp->device, (s_tmp->mode & GR_DELETED) ? 1 : 0))
76273+ return ERR_PTR(-ENOMEM);
76274+
76275+ return s_tmp;
76276+}
76277+
76278+static int
76279+copy_user_subjs(struct acl_subject_label *userp, struct acl_role_label *role)
76280+{
76281+ struct acl_subject_label s_pre;
76282+ struct acl_subject_label * ret;
76283+ int err;
76284+
76285+ while (userp) {
76286+ if (copy_from_user(&s_pre, userp,
76287+ sizeof (struct acl_subject_label)))
76288+ return -EFAULT;
76289+
76290+ /* do not add nested subjects here, add
76291+ while parsing objects
76292+ */
76293+
76294+ if (s_pre.mode & GR_NESTED) {
76295+ userp = s_pre.prev;
76296+ continue;
76297+ }
76298+
76299+ ret = do_copy_user_subj(userp, role);
76300+
76301+ err = PTR_ERR(ret);
76302+ if (IS_ERR(ret))
76303+ return err;
76304+
76305+ insert_acl_subj_label(ret, role);
76306+
76307+ userp = s_pre.prev;
76308+ }
76309+
76310+ return 0;
76311+}
76312+
76313+static int
76314+copy_user_acl(struct gr_arg *arg)
76315+{
76316+ struct acl_role_label *r_tmp = NULL, **r_utmp, *r_utmp2;
76317+ struct sprole_pw *sptmp;
76318+ struct gr_hash_struct *ghash;
76319+ uid_t *domainlist;
76320+ unsigned int r_num;
76321+ unsigned int len;
76322+ char *tmp;
76323+ int err = 0;
76324+ __u16 i;
76325+ __u32 num_subjs;
76326+
76327+ /* we need a default and kernel role */
76328+ if (arg->role_db.num_roles < 2)
76329+ return -EINVAL;
76330+
76331+ /* copy special role authentication info from userspace */
76332+
76333+ num_sprole_pws = arg->num_sprole_pws;
76334+ acl_special_roles = (struct sprole_pw **) acl_alloc_num(num_sprole_pws, sizeof(struct sprole_pw *));
76335+
76336+ if (!acl_special_roles && num_sprole_pws)
76337+ return -ENOMEM;
76338+
76339+ for (i = 0; i < num_sprole_pws; i++) {
76340+ sptmp = (struct sprole_pw *) acl_alloc(sizeof(struct sprole_pw));
76341+ if (!sptmp)
76342+ return -ENOMEM;
76343+ if (copy_from_user(sptmp, arg->sprole_pws + i,
76344+ sizeof (struct sprole_pw)))
76345+ return -EFAULT;
76346+
76347+ len = strnlen_user(sptmp->rolename, GR_SPROLE_LEN);
76348+
76349+ if (!len || len >= GR_SPROLE_LEN)
76350+ return -EINVAL;
76351+
76352+ if ((tmp = (char *) acl_alloc(len)) == NULL)
76353+ return -ENOMEM;
76354+
76355+ if (copy_from_user(tmp, sptmp->rolename, len))
76356+ return -EFAULT;
76357+
76358+ tmp[len-1] = '\0';
76359+#ifdef CONFIG_GRKERNSEC_RBAC_DEBUG
76360+ printk(KERN_ALERT "Copying special role %s\n", tmp);
76361+#endif
76362+ sptmp->rolename = tmp;
76363+ acl_special_roles[i] = sptmp;
76364+ }
76365+
76366+ r_utmp = (struct acl_role_label **) arg->role_db.r_table;
76367+
76368+ for (r_num = 0; r_num < arg->role_db.num_roles; r_num++) {
76369+ r_tmp = acl_alloc(sizeof (struct acl_role_label));
76370+
76371+ if (!r_tmp)
76372+ return -ENOMEM;
76373+
76374+ if (copy_from_user(&r_utmp2, r_utmp + r_num,
76375+ sizeof (struct acl_role_label *)))
76376+ return -EFAULT;
76377+
76378+ if (copy_from_user(r_tmp, r_utmp2,
76379+ sizeof (struct acl_role_label)))
76380+ return -EFAULT;
76381+
76382+ len = strnlen_user(r_tmp->rolename, GR_SPROLE_LEN);
76383+
76384+ if (!len || len >= PATH_MAX)
76385+ return -EINVAL;
76386+
76387+ if ((tmp = (char *) acl_alloc(len)) == NULL)
76388+ return -ENOMEM;
76389+
76390+ if (copy_from_user(tmp, r_tmp->rolename, len))
76391+ return -EFAULT;
76392+
76393+ tmp[len-1] = '\0';
76394+ r_tmp->rolename = tmp;
76395+
76396+ if (!strcmp(r_tmp->rolename, "default")
76397+ && (r_tmp->roletype & GR_ROLE_DEFAULT)) {
76398+ default_role = r_tmp;
76399+ } else if (!strcmp(r_tmp->rolename, ":::kernel:::")) {
76400+ kernel_role = r_tmp;
76401+ }
76402+
76403+ if ((ghash = (struct gr_hash_struct *) acl_alloc(sizeof(struct gr_hash_struct))) == NULL)
76404+ return -ENOMEM;
76405+
76406+ if (copy_from_user(ghash, r_tmp->hash, sizeof(struct gr_hash_struct)))
76407+ return -EFAULT;
76408+
76409+ r_tmp->hash = ghash;
76410+
76411+ num_subjs = count_user_subjs(r_tmp->hash->first);
76412+
76413+ r_tmp->subj_hash_size = num_subjs;
76414+ r_tmp->subj_hash =
76415+ (struct acl_subject_label **)
76416+ create_table(&(r_tmp->subj_hash_size), sizeof(void *));
76417+
76418+ if (!r_tmp->subj_hash)
76419+ return -ENOMEM;
76420+
76421+ err = copy_user_allowedips(r_tmp);
76422+ if (err)
76423+ return err;
76424+
76425+ /* copy domain info */
76426+ if (r_tmp->domain_children != NULL) {
76427+ domainlist = acl_alloc_num(r_tmp->domain_child_num, sizeof(uid_t));
76428+ if (domainlist == NULL)
76429+ return -ENOMEM;
76430+
76431+ if (copy_from_user(domainlist, r_tmp->domain_children, r_tmp->domain_child_num * sizeof(uid_t)))
76432+ return -EFAULT;
76433+
76434+ r_tmp->domain_children = domainlist;
76435+ }
76436+
76437+ err = copy_user_transitions(r_tmp);
76438+ if (err)
76439+ return err;
76440+
76441+ memset(r_tmp->subj_hash, 0,
76442+ r_tmp->subj_hash_size *
76443+ sizeof (struct acl_subject_label *));
76444+
76445+ err = copy_user_subjs(r_tmp->hash->first, r_tmp);
76446+
76447+ if (err)
76448+ return err;
76449+
76450+ /* set nested subject list to null */
76451+ r_tmp->hash->first = NULL;
76452+
76453+ insert_acl_role_label(r_tmp);
76454+ }
76455+
76456+ if (default_role == NULL || kernel_role == NULL)
76457+ return -EINVAL;
76458+
76459+ return err;
76460+}
76461+
76462+static int
76463+gracl_init(struct gr_arg *args)
76464+{
76465+ int error = 0;
76466+
76467+ memcpy(gr_system_salt, args->salt, GR_SALT_LEN);
76468+ memcpy(gr_system_sum, args->sum, GR_SHA_LEN);
76469+
76470+ if (init_variables(args)) {
76471+ gr_log_str(GR_DONT_AUDIT_GOOD, GR_INITF_ACL_MSG, GR_VERSION);
76472+ error = -ENOMEM;
76473+ free_variables();
76474+ goto out;
76475+ }
76476+
76477+ error = copy_user_acl(args);
76478+ free_init_variables();
76479+ if (error) {
76480+ free_variables();
76481+ goto out;
76482+ }
76483+
76484+ if ((error = gr_set_acls(0))) {
76485+ free_variables();
76486+ goto out;
76487+ }
76488+
76489+ pax_open_kernel();
76490+ gr_status |= GR_READY;
76491+ pax_close_kernel();
76492+
76493+ out:
76494+ return error;
76495+}
76496+
76497+/* derived from glibc fnmatch() 0: match, 1: no match*/
76498+
76499+static int
76500+glob_match(const char *p, const char *n)
76501+{
76502+ char c;
76503+
76504+ while ((c = *p++) != '\0') {
76505+ switch (c) {
76506+ case '?':
76507+ if (*n == '\0')
76508+ return 1;
76509+ else if (*n == '/')
76510+ return 1;
76511+ break;
76512+ case '\\':
76513+ if (*n != c)
76514+ return 1;
76515+ break;
76516+ case '*':
76517+ for (c = *p++; c == '?' || c == '*'; c = *p++) {
76518+ if (*n == '/')
76519+ return 1;
76520+ else if (c == '?') {
76521+ if (*n == '\0')
76522+ return 1;
76523+ else
76524+ ++n;
76525+ }
76526+ }
76527+ if (c == '\0') {
76528+ return 0;
76529+ } else {
76530+ const char *endp;
76531+
76532+ if ((endp = strchr(n, '/')) == NULL)
76533+ endp = n + strlen(n);
76534+
76535+ if (c == '[') {
76536+ for (--p; n < endp; ++n)
76537+ if (!glob_match(p, n))
76538+ return 0;
76539+ } else if (c == '/') {
76540+ while (*n != '\0' && *n != '/')
76541+ ++n;
76542+ if (*n == '/' && !glob_match(p, n + 1))
76543+ return 0;
76544+ } else {
76545+ for (--p; n < endp; ++n)
76546+ if (*n == c && !glob_match(p, n))
76547+ return 0;
76548+ }
76549+
76550+ return 1;
76551+ }
76552+ case '[':
76553+ {
76554+ int not;
76555+ char cold;
76556+
76557+ if (*n == '\0' || *n == '/')
76558+ return 1;
76559+
76560+ not = (*p == '!' || *p == '^');
76561+ if (not)
76562+ ++p;
76563+
76564+ c = *p++;
76565+ for (;;) {
76566+ unsigned char fn = (unsigned char)*n;
76567+
76568+ if (c == '\0')
76569+ return 1;
76570+ else {
76571+ if (c == fn)
76572+ goto matched;
76573+ cold = c;
76574+ c = *p++;
76575+
76576+ if (c == '-' && *p != ']') {
76577+ unsigned char cend = *p++;
76578+
76579+ if (cend == '\0')
76580+ return 1;
76581+
76582+ if (cold <= fn && fn <= cend)
76583+ goto matched;
76584+
76585+ c = *p++;
76586+ }
76587+ }
76588+
76589+ if (c == ']')
76590+ break;
76591+ }
76592+ if (!not)
76593+ return 1;
76594+ break;
76595+ matched:
76596+ while (c != ']') {
76597+ if (c == '\0')
76598+ return 1;
76599+
76600+ c = *p++;
76601+ }
76602+ if (not)
76603+ return 1;
76604+ }
76605+ break;
76606+ default:
76607+ if (c != *n)
76608+ return 1;
76609+ }
76610+
76611+ ++n;
76612+ }
76613+
76614+ if (*n == '\0')
76615+ return 0;
76616+
76617+ if (*n == '/')
76618+ return 0;
76619+
76620+ return 1;
76621+}
76622+
76623+static struct acl_object_label *
76624+chk_glob_label(struct acl_object_label *globbed,
76625+ const struct dentry *dentry, const struct vfsmount *mnt, char **path)
76626+{
76627+ struct acl_object_label *tmp;
76628+
76629+ if (*path == NULL)
76630+ *path = gr_to_filename_nolock(dentry, mnt);
76631+
76632+ tmp = globbed;
76633+
76634+ while (tmp) {
76635+ if (!glob_match(tmp->filename, *path))
76636+ return tmp;
76637+ tmp = tmp->next;
76638+ }
76639+
76640+ return NULL;
76641+}
76642+
76643+static struct acl_object_label *
76644+__full_lookup(const struct dentry *orig_dentry, const struct vfsmount *orig_mnt,
76645+ const ino_t curr_ino, const dev_t curr_dev,
76646+ const struct acl_subject_label *subj, char **path, const int checkglob)
76647+{
76648+ struct acl_subject_label *tmpsubj;
76649+ struct acl_object_label *retval;
76650+ struct acl_object_label *retval2;
76651+
76652+ tmpsubj = (struct acl_subject_label *) subj;
76653+ read_lock(&gr_inode_lock);
76654+ do {
76655+ retval = lookup_acl_obj_label(curr_ino, curr_dev, tmpsubj);
76656+ if (retval) {
76657+ if (checkglob && retval->globbed) {
76658+ retval2 = chk_glob_label(retval->globbed, orig_dentry, orig_mnt, path);
76659+ if (retval2)
76660+ retval = retval2;
76661+ }
76662+ break;
76663+ }
76664+ } while ((tmpsubj = tmpsubj->parent_subject));
76665+ read_unlock(&gr_inode_lock);
76666+
76667+ return retval;
76668+}
76669+
76670+static __inline__ struct acl_object_label *
76671+full_lookup(const struct dentry *orig_dentry, const struct vfsmount *orig_mnt,
76672+ const struct dentry *curr_dentry,
76673+ const struct acl_subject_label *subj, char **path, const int checkglob)
76674+{
76675+ int newglob = checkglob;
76676+
76677+ /* if we aren't checking a subdirectory of the original path yet, don't do glob checking
76678+ as we don't want a / * rule to match instead of the / object
76679+ don't do this for create lookups that call this function though, since they're looking up
76680+ on the parent and thus need globbing checks on all paths
76681+ */
76682+ if (orig_dentry == curr_dentry && newglob != GR_CREATE_GLOB)
76683+ newglob = GR_NO_GLOB;
76684+
76685+ return __full_lookup(orig_dentry, orig_mnt,
76686+ curr_dentry->d_inode->i_ino,
76687+ __get_dev(curr_dentry), subj, path, newglob);
76688+}
76689+
76690+static struct acl_object_label *
76691+__chk_obj_label(const struct dentry *l_dentry, const struct vfsmount *l_mnt,
76692+ const struct acl_subject_label *subj, char *path, const int checkglob)
76693+{
76694+ struct dentry *dentry = (struct dentry *) l_dentry;
76695+ struct vfsmount *mnt = (struct vfsmount *) l_mnt;
76696+ struct acl_object_label *retval;
76697+
76698+ spin_lock(&dcache_lock);
76699+ spin_lock(&vfsmount_lock);
76700+
76701+ if (unlikely((mnt == shm_mnt && dentry->d_inode->i_nlink == 0) || mnt == pipe_mnt ||
76702+#ifdef CONFIG_NET
76703+ mnt == sock_mnt ||
76704+#endif
76705+#ifdef CONFIG_HUGETLBFS
76706+ (mnt == hugetlbfs_vfsmount && dentry->d_inode->i_nlink == 0) ||
76707+#endif
76708+ /* ignore Eric Biederman */
76709+ IS_PRIVATE(l_dentry->d_inode))) {
76710+ retval = (subj->mode & GR_SHMEXEC) ? fakefs_obj_rwx : fakefs_obj_rw;
76711+ goto out;
76712+ }
76713+
76714+ for (;;) {
76715+ if (dentry == real_root && mnt == real_root_mnt)
76716+ break;
76717+
76718+ if (dentry == mnt->mnt_root || IS_ROOT(dentry)) {
76719+ if (mnt->mnt_parent == mnt)
76720+ break;
76721+
76722+ retval = full_lookup(l_dentry, l_mnt, dentry, subj, &path, checkglob);
76723+ if (retval != NULL)
76724+ goto out;
76725+
76726+ dentry = mnt->mnt_mountpoint;
76727+ mnt = mnt->mnt_parent;
76728+ continue;
76729+ }
76730+
76731+ retval = full_lookup(l_dentry, l_mnt, dentry, subj, &path, checkglob);
76732+ if (retval != NULL)
76733+ goto out;
76734+
76735+ dentry = dentry->d_parent;
76736+ }
76737+
76738+ retval = full_lookup(l_dentry, l_mnt, dentry, subj, &path, checkglob);
76739+
76740+ if (retval == NULL)
76741+ retval = full_lookup(l_dentry, l_mnt, real_root, subj, &path, checkglob);
76742+out:
76743+ spin_unlock(&vfsmount_lock);
76744+ spin_unlock(&dcache_lock);
76745+
76746+ BUG_ON(retval == NULL);
76747+
76748+ return retval;
76749+}
76750+
76751+static __inline__ struct acl_object_label *
76752+chk_obj_label(const struct dentry *l_dentry, const struct vfsmount *l_mnt,
76753+ const struct acl_subject_label *subj)
76754+{
76755+ char *path = NULL;
76756+ return __chk_obj_label(l_dentry, l_mnt, subj, path, GR_REG_GLOB);
76757+}
76758+
76759+static __inline__ struct acl_object_label *
76760+chk_obj_label_noglob(const struct dentry *l_dentry, const struct vfsmount *l_mnt,
76761+ const struct acl_subject_label *subj)
76762+{
76763+ char *path = NULL;
76764+ return __chk_obj_label(l_dentry, l_mnt, subj, path, GR_NO_GLOB);
76765+}
76766+
76767+static __inline__ struct acl_object_label *
76768+chk_obj_create_label(const struct dentry *l_dentry, const struct vfsmount *l_mnt,
76769+ const struct acl_subject_label *subj, char *path)
76770+{
76771+ return __chk_obj_label(l_dentry, l_mnt, subj, path, GR_CREATE_GLOB);
76772+}
76773+
76774+static struct acl_subject_label *
76775+chk_subj_label(const struct dentry *l_dentry, const struct vfsmount *l_mnt,
76776+ const struct acl_role_label *role)
76777+{
76778+ struct dentry *dentry = (struct dentry *) l_dentry;
76779+ struct vfsmount *mnt = (struct vfsmount *) l_mnt;
76780+ struct acl_subject_label *retval;
76781+
76782+ spin_lock(&dcache_lock);
76783+ spin_lock(&vfsmount_lock);
76784+
76785+ for (;;) {
76786+ if (dentry == real_root && mnt == real_root_mnt)
76787+ break;
76788+ if (dentry == mnt->mnt_root || IS_ROOT(dentry)) {
76789+ if (mnt->mnt_parent == mnt)
76790+ break;
76791+
76792+ read_lock(&gr_inode_lock);
76793+ retval =
76794+ lookup_acl_subj_label(dentry->d_inode->i_ino,
76795+ __get_dev(dentry), role);
76796+ read_unlock(&gr_inode_lock);
76797+ if (retval != NULL)
76798+ goto out;
76799+
76800+ dentry = mnt->mnt_mountpoint;
76801+ mnt = mnt->mnt_parent;
76802+ continue;
76803+ }
76804+
76805+ read_lock(&gr_inode_lock);
76806+ retval = lookup_acl_subj_label(dentry->d_inode->i_ino,
76807+ __get_dev(dentry), role);
76808+ read_unlock(&gr_inode_lock);
76809+ if (retval != NULL)
76810+ goto out;
76811+
76812+ dentry = dentry->d_parent;
76813+ }
76814+
76815+ read_lock(&gr_inode_lock);
76816+ retval = lookup_acl_subj_label(dentry->d_inode->i_ino,
76817+ __get_dev(dentry), role);
76818+ read_unlock(&gr_inode_lock);
76819+
76820+ if (unlikely(retval == NULL)) {
76821+ read_lock(&gr_inode_lock);
76822+ retval = lookup_acl_subj_label(real_root->d_inode->i_ino,
76823+ __get_dev(real_root), role);
76824+ read_unlock(&gr_inode_lock);
76825+ }
76826+out:
76827+ spin_unlock(&vfsmount_lock);
76828+ spin_unlock(&dcache_lock);
76829+
76830+ BUG_ON(retval == NULL);
76831+
76832+ return retval;
76833+}
76834+
76835+static void
76836+gr_log_learn(const struct dentry *dentry, const struct vfsmount *mnt, const __u32 mode)
76837+{
76838+ struct task_struct *task = current;
76839+ const struct cred *cred = current_cred();
76840+
76841+ security_learn(GR_LEARN_AUDIT_MSG, task->role->rolename, task->role->roletype,
76842+ cred->uid, cred->gid, task->exec_file ? gr_to_filename1(task->exec_file->f_path.dentry,
76843+ task->exec_file->f_path.mnt) : task->acl->filename, task->acl->filename,
76844+ 1UL, 1UL, gr_to_filename(dentry, mnt), (unsigned long) mode, &task->signal->saved_ip);
76845+
76846+ return;
76847+}
76848+
76849+static void
76850+gr_log_learn_sysctl(const char *path, const __u32 mode)
76851+{
76852+ struct task_struct *task = current;
76853+ const struct cred *cred = current_cred();
76854+
76855+ security_learn(GR_LEARN_AUDIT_MSG, task->role->rolename, task->role->roletype,
76856+ cred->uid, cred->gid, task->exec_file ? gr_to_filename1(task->exec_file->f_path.dentry,
76857+ task->exec_file->f_path.mnt) : task->acl->filename, task->acl->filename,
76858+ 1UL, 1UL, path, (unsigned long) mode, &task->signal->saved_ip);
76859+
76860+ return;
76861+}
76862+
76863+static void
76864+gr_log_learn_id_change(const char type, const unsigned int real,
76865+ const unsigned int effective, const unsigned int fs)
76866+{
76867+ struct task_struct *task = current;
76868+ const struct cred *cred = current_cred();
76869+
76870+ security_learn(GR_ID_LEARN_MSG, task->role->rolename, task->role->roletype,
76871+ cred->uid, cred->gid, task->exec_file ? gr_to_filename1(task->exec_file->f_path.dentry,
76872+ task->exec_file->f_path.mnt) : task->acl->filename, task->acl->filename,
76873+ type, real, effective, fs, &task->signal->saved_ip);
76874+
76875+ return;
76876+}
76877+
76878+__u32
76879+gr_search_file(const struct dentry * dentry, const __u32 mode,
76880+ const struct vfsmount * mnt)
76881+{
76882+ __u32 retval = mode;
76883+ struct acl_subject_label *curracl;
76884+ struct acl_object_label *currobj;
76885+
76886+ if (unlikely(!(gr_status & GR_READY)))
76887+ return (mode & ~GR_AUDITS);
76888+
76889+ curracl = current->acl;
76890+
76891+ currobj = chk_obj_label(dentry, mnt, curracl);
76892+ retval = currobj->mode & mode;
76893+
76894+ /* if we're opening a specified transfer file for writing
76895+ (e.g. /dev/initctl), then transfer our role to init
76896+ */
76897+ if (unlikely(currobj->mode & GR_INIT_TRANSFER && retval & GR_WRITE &&
76898+ current->role->roletype & GR_ROLE_PERSIST)) {
76899+ struct task_struct *task = init_pid_ns.child_reaper;
76900+
76901+ if (task->role != current->role) {
76902+ task->acl_sp_role = 0;
76903+ task->acl_role_id = current->acl_role_id;
76904+ task->role = current->role;
76905+ rcu_read_lock();
76906+ read_lock(&grsec_exec_file_lock);
76907+ gr_apply_subject_to_task(task);
76908+ read_unlock(&grsec_exec_file_lock);
76909+ rcu_read_unlock();
76910+ gr_log_noargs(GR_DONT_AUDIT_GOOD, GR_INIT_TRANSFER_MSG);
76911+ }
76912+ }
76913+
76914+ if (unlikely
76915+ ((curracl->mode & (GR_LEARN | GR_INHERITLEARN)) && !(mode & GR_NOPTRACE)
76916+ && (retval != (mode & ~(GR_AUDITS | GR_SUPPRESS))))) {
76917+ __u32 new_mode = mode;
76918+
76919+ new_mode &= ~(GR_AUDITS | GR_SUPPRESS);
76920+
76921+ retval = new_mode;
76922+
76923+ if (new_mode & GR_EXEC && curracl->mode & GR_INHERITLEARN)
76924+ new_mode |= GR_INHERIT;
76925+
76926+ if (!(mode & GR_NOLEARN))
76927+ gr_log_learn(dentry, mnt, new_mode);
76928+ }
76929+
76930+ return retval;
76931+}
76932+
76933+struct acl_object_label *gr_get_create_object(const struct dentry *new_dentry,
76934+ const struct dentry *parent,
76935+ const struct vfsmount *mnt)
76936+{
76937+ struct name_entry *match;
76938+ struct acl_object_label *matchpo;
76939+ struct acl_subject_label *curracl;
76940+ char *path;
76941+
76942+ if (unlikely(!(gr_status & GR_READY)))
76943+ return NULL;
76944+
76945+ preempt_disable();
76946+ path = gr_to_filename_rbac(new_dentry, mnt);
76947+ match = lookup_name_entry_create(path);
76948+
76949+ curracl = current->acl;
76950+
76951+ if (match) {
76952+ read_lock(&gr_inode_lock);
76953+ matchpo = lookup_acl_obj_label_create(match->inode, match->device, curracl);
76954+ read_unlock(&gr_inode_lock);
76955+
76956+ if (matchpo) {
76957+ preempt_enable();
76958+ return matchpo;
76959+ }
76960+ }
76961+
76962+ // lookup parent
76963+
76964+ matchpo = chk_obj_create_label(parent, mnt, curracl, path);
76965+
76966+ preempt_enable();
76967+ return matchpo;
76968+}
76969+
76970+__u32
76971+gr_check_create(const struct dentry * new_dentry, const struct dentry * parent,
76972+ const struct vfsmount * mnt, const __u32 mode)
76973+{
76974+ struct acl_object_label *matchpo;
76975+ __u32 retval;
76976+
76977+ if (unlikely(!(gr_status & GR_READY)))
76978+ return (mode & ~GR_AUDITS);
76979+
76980+ matchpo = gr_get_create_object(new_dentry, parent, mnt);
76981+
76982+ retval = matchpo->mode & mode;
76983+
76984+ if ((retval != (mode & ~(GR_AUDITS | GR_SUPPRESS)))
76985+ && (current->acl->mode & (GR_LEARN | GR_INHERITLEARN))) {
76986+ __u32 new_mode = mode;
76987+
76988+ new_mode &= ~(GR_AUDITS | GR_SUPPRESS);
76989+
76990+ gr_log_learn(new_dentry, mnt, new_mode);
76991+ return new_mode;
76992+ }
76993+
76994+ return retval;
76995+}
76996+
76997+__u32
76998+gr_check_link(const struct dentry * new_dentry,
76999+ const struct dentry * parent_dentry,
77000+ const struct vfsmount * parent_mnt,
77001+ const struct dentry * old_dentry, const struct vfsmount * old_mnt)
77002+{
77003+ struct acl_object_label *obj;
77004+ __u32 oldmode, newmode;
77005+ __u32 needmode;
77006+ __u32 checkmodes = GR_FIND | GR_APPEND | GR_WRITE | GR_EXEC | GR_SETID | GR_READ |
77007+ GR_DELETE | GR_INHERIT;
77008+
77009+ if (unlikely(!(gr_status & GR_READY)))
77010+ return (GR_CREATE | GR_LINK);
77011+
77012+ obj = chk_obj_label(old_dentry, old_mnt, current->acl);
77013+ oldmode = obj->mode;
77014+
77015+ obj = gr_get_create_object(new_dentry, parent_dentry, parent_mnt);
77016+ newmode = obj->mode;
77017+
77018+ needmode = newmode & checkmodes;
77019+
77020+ // old name for hardlink must have at least the permissions of the new name
77021+ if ((oldmode & needmode) != needmode)
77022+ goto bad;
77023+
77024+ // if old name had restrictions/auditing, make sure the new name does as well
77025+ needmode = oldmode & (GR_NOPTRACE | GR_PTRACERD | GR_INHERIT | GR_AUDITS);
77026+
77027+ // don't allow hardlinking of suid/sgid files without permission
77028+ if (old_dentry->d_inode->i_mode & (S_ISUID | S_ISGID))
77029+ needmode |= GR_SETID;
77030+
77031+ if ((newmode & needmode) != needmode)
77032+ goto bad;
77033+
77034+ // enforce minimum permissions
77035+ if ((newmode & (GR_CREATE | GR_LINK)) == (GR_CREATE | GR_LINK))
77036+ return newmode;
77037+bad:
77038+ needmode = oldmode;
77039+ if (old_dentry->d_inode->i_mode & (S_ISUID | S_ISGID))
77040+ needmode |= GR_SETID;
77041+
77042+ if (current->acl->mode & (GR_LEARN | GR_INHERITLEARN)) {
77043+ gr_log_learn(old_dentry, old_mnt, needmode | GR_CREATE | GR_LINK);
77044+ return (GR_CREATE | GR_LINK);
77045+ } else if (newmode & GR_SUPPRESS)
77046+ return GR_SUPPRESS;
77047+ else
77048+ return 0;
77049+}
77050+
77051+int
77052+gr_check_hidden_task(const struct task_struct *task)
77053+{
77054+ if (unlikely(!(gr_status & GR_READY)))
77055+ return 0;
77056+
77057+ if (!(task->acl->mode & GR_PROCFIND) && !(current->acl->mode & GR_VIEW))
77058+ return 1;
77059+
77060+ return 0;
77061+}
77062+
77063+int
77064+gr_check_protected_task(const struct task_struct *task)
77065+{
77066+ if (unlikely(!(gr_status & GR_READY) || !task))
77067+ return 0;
77068+
77069+ if ((task->acl->mode & GR_PROTECTED) && !(current->acl->mode & GR_KILL) &&
77070+ task->acl != current->acl)
77071+ return 1;
77072+
77073+ return 0;
77074+}
77075+
77076+int
77077+gr_check_protected_task_fowner(struct pid *pid, enum pid_type type)
77078+{
77079+ struct task_struct *p;
77080+ int ret = 0;
77081+
77082+ if (unlikely(!(gr_status & GR_READY) || !pid))
77083+ return ret;
77084+
77085+ read_lock(&tasklist_lock);
77086+ do_each_pid_task(pid, type, p) {
77087+ if ((p->acl->mode & GR_PROTECTED) && !(current->acl->mode & GR_KILL) &&
77088+ p->acl != current->acl) {
77089+ ret = 1;
77090+ goto out;
77091+ }
77092+ } while_each_pid_task(pid, type, p);
77093+out:
77094+ read_unlock(&tasklist_lock);
77095+
77096+ return ret;
77097+}
77098+
77099+void
77100+gr_copy_label(struct task_struct *tsk)
77101+{
77102+ /* plain copying of fields is already done by dup_task_struct */
77103+ tsk->signal->used_accept = 0;
77104+ tsk->acl_sp_role = 0;
77105+ //tsk->acl_role_id = current->acl_role_id;
77106+ //tsk->acl = current->acl;
77107+ //tsk->role = current->role;
77108+ tsk->signal->curr_ip = current->signal->curr_ip;
77109+ tsk->signal->saved_ip = current->signal->saved_ip;
77110+ if (current->exec_file)
77111+ get_file(current->exec_file);
77112+ //tsk->exec_file = current->exec_file;
77113+ //tsk->is_writable = current->is_writable;
77114+ if (unlikely(current->signal->used_accept)) {
77115+ current->signal->curr_ip = 0;
77116+ current->signal->saved_ip = 0;
77117+ }
77118+
77119+ return;
77120+}
77121+
77122+static void
77123+gr_set_proc_res(struct task_struct *task)
77124+{
77125+ struct acl_subject_label *proc;
77126+ unsigned short i;
77127+
77128+ proc = task->acl;
77129+
77130+ if (proc->mode & (GR_LEARN | GR_INHERITLEARN))
77131+ return;
77132+
77133+ for (i = 0; i < RLIM_NLIMITS; i++) {
77134+ if (!(proc->resmask & (1 << i)))
77135+ continue;
77136+
77137+ task->signal->rlim[i].rlim_cur = proc->res[i].rlim_cur;
77138+ task->signal->rlim[i].rlim_max = proc->res[i].rlim_max;
77139+ }
77140+
77141+ return;
77142+}
77143+
77144+extern int __gr_process_user_ban(struct user_struct *user);
77145+
77146+int
77147+gr_check_user_change(int real, int effective, int fs)
77148+{
77149+ unsigned int i;
77150+ __u16 num;
77151+ uid_t *uidlist;
77152+ int curuid;
77153+ int realok = 0;
77154+ int effectiveok = 0;
77155+ int fsok = 0;
77156+
77157+#if defined(CONFIG_GRKERNSEC_KERN_LOCKOUT) || defined(CONFIG_GRKERNSEC_BRUTE)
77158+ struct user_struct *user;
77159+
77160+ if (real == -1)
77161+ goto skipit;
77162+
77163+ user = find_user(real);
77164+ if (user == NULL)
77165+ goto skipit;
77166+
77167+ if (__gr_process_user_ban(user)) {
77168+ /* for find_user */
77169+ free_uid(user);
77170+ return 1;
77171+ }
77172+
77173+ /* for find_user */
77174+ free_uid(user);
77175+
77176+skipit:
77177+#endif
77178+
77179+ if (unlikely(!(gr_status & GR_READY)))
77180+ return 0;
77181+
77182+ if (current->acl->mode & (GR_LEARN | GR_INHERITLEARN))
77183+ gr_log_learn_id_change('u', real, effective, fs);
77184+
77185+ num = current->acl->user_trans_num;
77186+ uidlist = current->acl->user_transitions;
77187+
77188+ if (uidlist == NULL)
77189+ return 0;
77190+
77191+ if (real == -1)
77192+ realok = 1;
77193+ if (effective == -1)
77194+ effectiveok = 1;
77195+ if (fs == -1)
77196+ fsok = 1;
77197+
77198+ if (current->acl->user_trans_type & GR_ID_ALLOW) {
77199+ for (i = 0; i < num; i++) {
77200+ curuid = (int)uidlist[i];
77201+ if (real == curuid)
77202+ realok = 1;
77203+ if (effective == curuid)
77204+ effectiveok = 1;
77205+ if (fs == curuid)
77206+ fsok = 1;
77207+ }
77208+ } else if (current->acl->user_trans_type & GR_ID_DENY) {
77209+ for (i = 0; i < num; i++) {
77210+ curuid = (int)uidlist[i];
77211+ if (real == curuid)
77212+ break;
77213+ if (effective == curuid)
77214+ break;
77215+ if (fs == curuid)
77216+ break;
77217+ }
77218+ /* not in deny list */
77219+ if (i == num) {
77220+ realok = 1;
77221+ effectiveok = 1;
77222+ fsok = 1;
77223+ }
77224+ }
77225+
77226+ if (realok && effectiveok && fsok)
77227+ return 0;
77228+ else {
77229+ gr_log_int(GR_DONT_AUDIT, GR_USRCHANGE_ACL_MSG, realok ? (effectiveok ? (fsok ? 0 : fs) : effective) : real);
77230+ return 1;
77231+ }
77232+}
77233+
77234+int
77235+gr_check_group_change(int real, int effective, int fs)
77236+{
77237+ unsigned int i;
77238+ __u16 num;
77239+ gid_t *gidlist;
77240+ int curgid;
77241+ int realok = 0;
77242+ int effectiveok = 0;
77243+ int fsok = 0;
77244+
77245+ if (unlikely(!(gr_status & GR_READY)))
77246+ return 0;
77247+
77248+ if (current->acl->mode & (GR_LEARN | GR_INHERITLEARN))
77249+ gr_log_learn_id_change('g', real, effective, fs);
77250+
77251+ num = current->acl->group_trans_num;
77252+ gidlist = current->acl->group_transitions;
77253+
77254+ if (gidlist == NULL)
77255+ return 0;
77256+
77257+ if (real == -1)
77258+ realok = 1;
77259+ if (effective == -1)
77260+ effectiveok = 1;
77261+ if (fs == -1)
77262+ fsok = 1;
77263+
77264+ if (current->acl->group_trans_type & GR_ID_ALLOW) {
77265+ for (i = 0; i < num; i++) {
77266+ curgid = (int)gidlist[i];
77267+ if (real == curgid)
77268+ realok = 1;
77269+ if (effective == curgid)
77270+ effectiveok = 1;
77271+ if (fs == curgid)
77272+ fsok = 1;
77273+ }
77274+ } else if (current->acl->group_trans_type & GR_ID_DENY) {
77275+ for (i = 0; i < num; i++) {
77276+ curgid = (int)gidlist[i];
77277+ if (real == curgid)
77278+ break;
77279+ if (effective == curgid)
77280+ break;
77281+ if (fs == curgid)
77282+ break;
77283+ }
77284+ /* not in deny list */
77285+ if (i == num) {
77286+ realok = 1;
77287+ effectiveok = 1;
77288+ fsok = 1;
77289+ }
77290+ }
77291+
77292+ if (realok && effectiveok && fsok)
77293+ return 0;
77294+ else {
77295+ gr_log_int(GR_DONT_AUDIT, GR_GRPCHANGE_ACL_MSG, realok ? (effectiveok ? (fsok ? 0 : fs) : effective) : real);
77296+ return 1;
77297+ }
77298+}
77299+
77300+extern int gr_acl_is_capable(const int cap);
77301+
77302+void
77303+gr_set_role_label(struct task_struct *task, const uid_t uid, const uid_t gid)
77304+{
77305+ struct acl_role_label *role = task->role;
77306+ struct acl_subject_label *subj = NULL;
77307+ struct acl_object_label *obj;
77308+ struct file *filp;
77309+
77310+ if (unlikely(!(gr_status & GR_READY)))
77311+ return;
77312+
77313+ filp = task->exec_file;
77314+
77315+ /* kernel process, we'll give them the kernel role */
77316+ if (unlikely(!filp)) {
77317+ task->role = kernel_role;
77318+ task->acl = kernel_role->root_label;
77319+ return;
77320+ } else if (!task->role || !(task->role->roletype & GR_ROLE_SPECIAL))
77321+ role = lookup_acl_role_label(task, uid, gid);
77322+
77323+ /* don't change the role if we're not a privileged process */
77324+ if (role && task->role != role &&
77325+ (((role->roletype & GR_ROLE_USER) && !gr_acl_is_capable(CAP_SETUID)) ||
77326+ ((role->roletype & GR_ROLE_GROUP) && !gr_acl_is_capable(CAP_SETGID))))
77327+ return;
77328+
77329+ /* perform subject lookup in possibly new role
77330+ we can use this result below in the case where role == task->role
77331+ */
77332+ subj = chk_subj_label(filp->f_path.dentry, filp->f_path.mnt, role);
77333+
77334+ /* if we changed uid/gid, but result in the same role
77335+ and are using inheritance, don't lose the inherited subject
77336+ if current subject is other than what normal lookup
77337+ would result in, we arrived via inheritance, don't
77338+ lose subject
77339+ */
77340+ if (role != task->role || (!(task->acl->mode & GR_INHERITLEARN) &&
77341+ (subj == task->acl)))
77342+ task->acl = subj;
77343+
77344+ task->role = role;
77345+
77346+ task->is_writable = 0;
77347+
77348+ /* ignore additional mmap checks for processes that are writable
77349+ by the default ACL */
77350+ obj = chk_obj_label(filp->f_path.dentry, filp->f_path.mnt, default_role->root_label);
77351+ if (unlikely(obj->mode & GR_WRITE))
77352+ task->is_writable = 1;
77353+ obj = chk_obj_label(filp->f_path.dentry, filp->f_path.mnt, task->role->root_label);
77354+ if (unlikely(obj->mode & GR_WRITE))
77355+ task->is_writable = 1;
77356+
77357+#ifdef CONFIG_GRKERNSEC_RBAC_DEBUG
77358+ printk(KERN_ALERT "Set role label for (%s:%d): role:%s, subject:%s\n", task->comm, task->pid, task->role->rolename, task->acl->filename);
77359+#endif
77360+
77361+ gr_set_proc_res(task);
77362+
77363+ return;
77364+}
77365+
77366+int
77367+gr_set_proc_label(const struct dentry *dentry, const struct vfsmount *mnt,
77368+ const int unsafe_flags)
77369+{
77370+ struct task_struct *task = current;
77371+ struct acl_subject_label *newacl;
77372+ struct acl_object_label *obj;
77373+ __u32 retmode;
77374+
77375+ if (unlikely(!(gr_status & GR_READY)))
77376+ return 0;
77377+
77378+ newacl = chk_subj_label(dentry, mnt, task->role);
77379+
77380+ task_lock(task);
77381+ if (unsafe_flags && !(task->acl->mode & GR_POVERRIDE) && (task->acl != newacl) &&
77382+ !(task->role->roletype & GR_ROLE_GOD) &&
77383+ !gr_search_file(dentry, GR_PTRACERD, mnt) &&
77384+ !(task->acl->mode & (GR_LEARN | GR_INHERITLEARN))) {
77385+ task_unlock(task);
77386+ if (unsafe_flags & LSM_UNSAFE_SHARE)
77387+ gr_log_fs_generic(GR_DONT_AUDIT, GR_UNSAFESHARE_EXEC_ACL_MSG, dentry, mnt);
77388+ else
77389+ gr_log_fs_generic(GR_DONT_AUDIT, GR_PTRACE_EXEC_ACL_MSG, dentry, mnt);
77390+ return -EACCES;
77391+ }
77392+ task_unlock(task);
77393+
77394+ obj = chk_obj_label(dentry, mnt, task->acl);
77395+ retmode = obj->mode & (GR_INHERIT | GR_AUDIT_INHERIT);
77396+
77397+ if (!(task->acl->mode & GR_INHERITLEARN) &&
77398+ ((newacl->mode & GR_LEARN) || !(retmode & GR_INHERIT))) {
77399+ if (obj->nested)
77400+ task->acl = obj->nested;
77401+ else
77402+ task->acl = newacl;
77403+ } else if (retmode & GR_INHERIT && retmode & GR_AUDIT_INHERIT)
77404+ gr_log_str_fs(GR_DO_AUDIT, GR_INHERIT_ACL_MSG, task->acl->filename, dentry, mnt);
77405+
77406+ task->is_writable = 0;
77407+
77408+ /* ignore additional mmap checks for processes that are writable
77409+ by the default ACL */
77410+ obj = chk_obj_label(dentry, mnt, default_role->root_label);
77411+ if (unlikely(obj->mode & GR_WRITE))
77412+ task->is_writable = 1;
77413+ obj = chk_obj_label(dentry, mnt, task->role->root_label);
77414+ if (unlikely(obj->mode & GR_WRITE))
77415+ task->is_writable = 1;
77416+
77417+ gr_set_proc_res(task);
77418+
77419+#ifdef CONFIG_GRKERNSEC_RBAC_DEBUG
77420+ printk(KERN_ALERT "Set subject label for (%s:%d): role:%s, subject:%s\n", task->comm, task->pid, task->role->rolename, task->acl->filename);
77421+#endif
77422+ return 0;
77423+}
77424+
77425+/* always called with valid inodev ptr */
77426+static void
77427+do_handle_delete(struct inodev_entry *inodev, const ino_t ino, const dev_t dev)
77428+{
77429+ struct acl_object_label *matchpo;
77430+ struct acl_subject_label *matchps;
77431+ struct acl_subject_label *subj;
77432+ struct acl_role_label *role;
77433+ unsigned int x;
77434+
77435+ FOR_EACH_ROLE_START(role)
77436+ FOR_EACH_SUBJECT_START(role, subj, x)
77437+ if ((matchpo = lookup_acl_obj_label(ino, dev, subj)) != NULL)
77438+ matchpo->mode |= GR_DELETED;
77439+ FOR_EACH_SUBJECT_END(subj,x)
77440+ FOR_EACH_NESTED_SUBJECT_START(role, subj)
77441+ if (subj->inode == ino && subj->device == dev)
77442+ subj->mode |= GR_DELETED;
77443+ FOR_EACH_NESTED_SUBJECT_END(subj)
77444+ if ((matchps = lookup_acl_subj_label(ino, dev, role)) != NULL)
77445+ matchps->mode |= GR_DELETED;
77446+ FOR_EACH_ROLE_END(role)
77447+
77448+ inodev->nentry->deleted = 1;
77449+
77450+ return;
77451+}
77452+
77453+void
77454+gr_handle_delete(const ino_t ino, const dev_t dev)
77455+{
77456+ struct inodev_entry *inodev;
77457+
77458+ if (unlikely(!(gr_status & GR_READY)))
77459+ return;
77460+
77461+ write_lock(&gr_inode_lock);
77462+ inodev = lookup_inodev_entry(ino, dev);
77463+ if (inodev != NULL)
77464+ do_handle_delete(inodev, ino, dev);
77465+ write_unlock(&gr_inode_lock);
77466+
77467+ return;
77468+}
77469+
77470+static void
77471+update_acl_obj_label(const ino_t oldinode, const dev_t olddevice,
77472+ const ino_t newinode, const dev_t newdevice,
77473+ struct acl_subject_label *subj)
77474+{
77475+ unsigned int index = fhash(oldinode, olddevice, subj->obj_hash_size);
77476+ struct acl_object_label *match;
77477+
77478+ match = subj->obj_hash[index];
77479+
77480+ while (match && (match->inode != oldinode ||
77481+ match->device != olddevice ||
77482+ !(match->mode & GR_DELETED)))
77483+ match = match->next;
77484+
77485+ if (match && (match->inode == oldinode)
77486+ && (match->device == olddevice)
77487+ && (match->mode & GR_DELETED)) {
77488+ if (match->prev == NULL) {
77489+ subj->obj_hash[index] = match->next;
77490+ if (match->next != NULL)
77491+ match->next->prev = NULL;
77492+ } else {
77493+ match->prev->next = match->next;
77494+ if (match->next != NULL)
77495+ match->next->prev = match->prev;
77496+ }
77497+ match->prev = NULL;
77498+ match->next = NULL;
77499+ match->inode = newinode;
77500+ match->device = newdevice;
77501+ match->mode &= ~GR_DELETED;
77502+
77503+ insert_acl_obj_label(match, subj);
77504+ }
77505+
77506+ return;
77507+}
77508+
77509+static void
77510+update_acl_subj_label(const ino_t oldinode, const dev_t olddevice,
77511+ const ino_t newinode, const dev_t newdevice,
77512+ struct acl_role_label *role)
77513+{
77514+ unsigned int index = fhash(oldinode, olddevice, role->subj_hash_size);
77515+ struct acl_subject_label *match;
77516+
77517+ match = role->subj_hash[index];
77518+
77519+ while (match && (match->inode != oldinode ||
77520+ match->device != olddevice ||
77521+ !(match->mode & GR_DELETED)))
77522+ match = match->next;
77523+
77524+ if (match && (match->inode == oldinode)
77525+ && (match->device == olddevice)
77526+ && (match->mode & GR_DELETED)) {
77527+ if (match->prev == NULL) {
77528+ role->subj_hash[index] = match->next;
77529+ if (match->next != NULL)
77530+ match->next->prev = NULL;
77531+ } else {
77532+ match->prev->next = match->next;
77533+ if (match->next != NULL)
77534+ match->next->prev = match->prev;
77535+ }
77536+ match->prev = NULL;
77537+ match->next = NULL;
77538+ match->inode = newinode;
77539+ match->device = newdevice;
77540+ match->mode &= ~GR_DELETED;
77541+
77542+ insert_acl_subj_label(match, role);
77543+ }
77544+
77545+ return;
77546+}
77547+
77548+static void
77549+update_inodev_entry(const ino_t oldinode, const dev_t olddevice,
77550+ const ino_t newinode, const dev_t newdevice)
77551+{
77552+ unsigned int index = fhash(oldinode, olddevice, inodev_set.i_size);
77553+ struct inodev_entry *match;
77554+
77555+ match = inodev_set.i_hash[index];
77556+
77557+ while (match && (match->nentry->inode != oldinode ||
77558+ match->nentry->device != olddevice || !match->nentry->deleted))
77559+ match = match->next;
77560+
77561+ if (match && (match->nentry->inode == oldinode)
77562+ && (match->nentry->device == olddevice) &&
77563+ match->nentry->deleted) {
77564+ if (match->prev == NULL) {
77565+ inodev_set.i_hash[index] = match->next;
77566+ if (match->next != NULL)
77567+ match->next->prev = NULL;
77568+ } else {
77569+ match->prev->next = match->next;
77570+ if (match->next != NULL)
77571+ match->next->prev = match->prev;
77572+ }
77573+ match->prev = NULL;
77574+ match->next = NULL;
77575+ match->nentry->inode = newinode;
77576+ match->nentry->device = newdevice;
77577+ match->nentry->deleted = 0;
77578+
77579+ insert_inodev_entry(match);
77580+ }
77581+
77582+ return;
77583+}
77584+
77585+static void
77586+__do_handle_create(const struct name_entry *matchn, ino_t inode, dev_t dev)
77587+{
77588+ struct acl_subject_label *subj;
77589+ struct acl_role_label *role;
77590+ unsigned int x;
77591+
77592+ FOR_EACH_ROLE_START(role)
77593+ update_acl_subj_label(matchn->inode, matchn->device,
77594+ inode, dev, role);
77595+
77596+ FOR_EACH_NESTED_SUBJECT_START(role, subj)
77597+ if ((subj->inode == inode) && (subj->device == dev)) {
77598+ subj->inode = inode;
77599+ subj->device = dev;
77600+ }
77601+ FOR_EACH_NESTED_SUBJECT_END(subj)
77602+ FOR_EACH_SUBJECT_START(role, subj, x)
77603+ update_acl_obj_label(matchn->inode, matchn->device,
77604+ inode, dev, subj);
77605+ FOR_EACH_SUBJECT_END(subj,x)
77606+ FOR_EACH_ROLE_END(role)
77607+
77608+ update_inodev_entry(matchn->inode, matchn->device, inode, dev);
77609+
77610+ return;
77611+}
77612+
77613+static void
77614+do_handle_create(const struct name_entry *matchn, const struct dentry *dentry,
77615+ const struct vfsmount *mnt)
77616+{
77617+ ino_t ino = dentry->d_inode->i_ino;
77618+ dev_t dev = __get_dev(dentry);
77619+
77620+ __do_handle_create(matchn, ino, dev);
77621+
77622+ return;
77623+}
77624+
77625+void
77626+gr_handle_create(const struct dentry *dentry, const struct vfsmount *mnt)
77627+{
77628+ struct name_entry *matchn;
77629+
77630+ if (unlikely(!(gr_status & GR_READY)))
77631+ return;
77632+
77633+ preempt_disable();
77634+ matchn = lookup_name_entry(gr_to_filename_rbac(dentry, mnt));
77635+
77636+ if (unlikely((unsigned long)matchn)) {
77637+ write_lock(&gr_inode_lock);
77638+ do_handle_create(matchn, dentry, mnt);
77639+ write_unlock(&gr_inode_lock);
77640+ }
77641+ preempt_enable();
77642+
77643+ return;
77644+}
77645+
77646+void
77647+gr_handle_proc_create(const struct dentry *dentry, const struct inode *inode)
77648+{
77649+ struct name_entry *matchn;
77650+
77651+ if (unlikely(!(gr_status & GR_READY)))
77652+ return;
77653+
77654+ preempt_disable();
77655+ matchn = lookup_name_entry(gr_to_proc_filename_rbac(dentry, init_pid_ns.proc_mnt));
77656+
77657+ if (unlikely((unsigned long)matchn)) {
77658+ write_lock(&gr_inode_lock);
77659+ __do_handle_create(matchn, inode->i_ino, inode->i_sb->s_dev);
77660+ write_unlock(&gr_inode_lock);
77661+ }
77662+ preempt_enable();
77663+
77664+ return;
77665+}
77666+
77667+void
77668+gr_handle_rename(struct inode *old_dir, struct inode *new_dir,
77669+ struct dentry *old_dentry,
77670+ struct dentry *new_dentry,
77671+ struct vfsmount *mnt, const __u8 replace)
77672+{
77673+ struct name_entry *matchn;
77674+ struct inodev_entry *inodev;
77675+ struct inode *inode = new_dentry->d_inode;
77676+ ino_t oldinode = old_dentry->d_inode->i_ino;
77677+ dev_t olddev = __get_dev(old_dentry);
77678+
77679+ /* vfs_rename swaps the name and parent link for old_dentry and
77680+ new_dentry
77681+ at this point, old_dentry has the new name, parent link, and inode
77682+ for the renamed file
77683+ if a file is being replaced by a rename, new_dentry has the inode
77684+ and name for the replaced file
77685+ */
77686+
77687+ if (unlikely(!(gr_status & GR_READY)))
77688+ return;
77689+
77690+ preempt_disable();
77691+ matchn = lookup_name_entry(gr_to_filename_rbac(old_dentry, mnt));
77692+
77693+ /* we wouldn't have to check d_inode if it weren't for
77694+ NFS silly-renaming
77695+ */
77696+
77697+ write_lock(&gr_inode_lock);
77698+ if (unlikely(replace && inode)) {
77699+ ino_t newinode = inode->i_ino;
77700+ dev_t newdev = __get_dev(new_dentry);
77701+ inodev = lookup_inodev_entry(newinode, newdev);
77702+ if (inodev != NULL && ((inode->i_nlink <= 1) || S_ISDIR(inode->i_mode)))
77703+ do_handle_delete(inodev, newinode, newdev);
77704+ }
77705+
77706+ inodev = lookup_inodev_entry(oldinode, olddev);
77707+ if (inodev != NULL && ((old_dentry->d_inode->i_nlink <= 1) || S_ISDIR(old_dentry->d_inode->i_mode)))
77708+ do_handle_delete(inodev, oldinode, olddev);
77709+
77710+ if (unlikely((unsigned long)matchn))
77711+ do_handle_create(matchn, old_dentry, mnt);
77712+
77713+ write_unlock(&gr_inode_lock);
77714+ preempt_enable();
77715+
77716+ return;
77717+}
77718+
77719+static int
77720+lookup_special_role_auth(__u16 mode, const char *rolename, unsigned char **salt,
77721+ unsigned char **sum)
77722+{
77723+ struct acl_role_label *r;
77724+ struct role_allowed_ip *ipp;
77725+ struct role_transition *trans;
77726+ unsigned int i;
77727+ int found = 0;
77728+ u32 curr_ip = current->signal->curr_ip;
77729+
77730+ current->signal->saved_ip = curr_ip;
77731+
77732+ /* check transition table */
77733+
77734+ for (trans = current->role->transitions; trans; trans = trans->next) {
77735+ if (!strcmp(rolename, trans->rolename)) {
77736+ found = 1;
77737+ break;
77738+ }
77739+ }
77740+
77741+ if (!found)
77742+ return 0;
77743+
77744+ /* handle special roles that do not require authentication
77745+ and check ip */
77746+
77747+ FOR_EACH_ROLE_START(r)
77748+ if (!strcmp(rolename, r->rolename) &&
77749+ (r->roletype & GR_ROLE_SPECIAL)) {
77750+ found = 0;
77751+ if (r->allowed_ips != NULL) {
77752+ for (ipp = r->allowed_ips; ipp; ipp = ipp->next) {
77753+ if ((ntohl(curr_ip) & ipp->netmask) ==
77754+ (ntohl(ipp->addr) & ipp->netmask))
77755+ found = 1;
77756+ }
77757+ } else
77758+ found = 2;
77759+ if (!found)
77760+ return 0;
77761+
77762+ if (((mode == GR_SPROLE) && (r->roletype & GR_ROLE_NOPW)) ||
77763+ ((mode == GR_SPROLEPAM) && (r->roletype & GR_ROLE_PAM))) {
77764+ *salt = NULL;
77765+ *sum = NULL;
77766+ return 1;
77767+ }
77768+ }
77769+ FOR_EACH_ROLE_END(r)
77770+
77771+ for (i = 0; i < num_sprole_pws; i++) {
77772+ if (!strcmp(rolename, acl_special_roles[i]->rolename)) {
77773+ *salt = acl_special_roles[i]->salt;
77774+ *sum = acl_special_roles[i]->sum;
77775+ return 1;
77776+ }
77777+ }
77778+
77779+ return 0;
77780+}
77781+
77782+static void
77783+assign_special_role(char *rolename)
77784+{
77785+ struct acl_object_label *obj;
77786+ struct acl_role_label *r;
77787+ struct acl_role_label *assigned = NULL;
77788+ struct task_struct *tsk;
77789+ struct file *filp;
77790+
77791+ FOR_EACH_ROLE_START(r)
77792+ if (!strcmp(rolename, r->rolename) &&
77793+ (r->roletype & GR_ROLE_SPECIAL)) {
77794+ assigned = r;
77795+ break;
77796+ }
77797+ FOR_EACH_ROLE_END(r)
77798+
77799+ if (!assigned)
77800+ return;
77801+
77802+ read_lock(&tasklist_lock);
77803+ read_lock(&grsec_exec_file_lock);
77804+
77805+ tsk = current->real_parent;
77806+ if (tsk == NULL)
77807+ goto out_unlock;
77808+
77809+ filp = tsk->exec_file;
77810+ if (filp == NULL)
77811+ goto out_unlock;
77812+
77813+ tsk->is_writable = 0;
77814+
77815+ tsk->acl_sp_role = 1;
77816+ tsk->acl_role_id = ++acl_sp_role_value;
77817+ tsk->role = assigned;
77818+ tsk->acl = chk_subj_label(filp->f_path.dentry, filp->f_path.mnt, tsk->role);
77819+
77820+ /* ignore additional mmap checks for processes that are writable
77821+ by the default ACL */
77822+ obj = chk_obj_label(filp->f_path.dentry, filp->f_path.mnt, default_role->root_label);
77823+ if (unlikely(obj->mode & GR_WRITE))
77824+ tsk->is_writable = 1;
77825+ obj = chk_obj_label(filp->f_path.dentry, filp->f_path.mnt, tsk->role->root_label);
77826+ if (unlikely(obj->mode & GR_WRITE))
77827+ tsk->is_writable = 1;
77828+
77829+#ifdef CONFIG_GRKERNSEC_RBAC_DEBUG
77830+ printk(KERN_ALERT "Assigning special role:%s subject:%s to process (%s:%d)\n", tsk->role->rolename, tsk->acl->filename, tsk->comm, tsk->pid);
77831+#endif
77832+
77833+out_unlock:
77834+ read_unlock(&grsec_exec_file_lock);
77835+ read_unlock(&tasklist_lock);
77836+ return;
77837+}
77838+
77839+int gr_check_secure_terminal(struct task_struct *task)
77840+{
77841+ struct task_struct *p, *p2, *p3;
77842+ struct files_struct *files;
77843+ struct fdtable *fdt;
77844+ struct file *our_file = NULL, *file;
77845+ int i;
77846+
77847+ if (task->signal->tty == NULL)
77848+ return 1;
77849+
77850+ files = get_files_struct(task);
77851+ if (files != NULL) {
77852+ rcu_read_lock();
77853+ fdt = files_fdtable(files);
77854+ for (i=0; i < fdt->max_fds; i++) {
77855+ file = fcheck_files(files, i);
77856+ if (file && (our_file == NULL) && (file->private_data == task->signal->tty)) {
77857+ get_file(file);
77858+ our_file = file;
77859+ }
77860+ }
77861+ rcu_read_unlock();
77862+ put_files_struct(files);
77863+ }
77864+
77865+ if (our_file == NULL)
77866+ return 1;
77867+
77868+ read_lock(&tasklist_lock);
77869+ do_each_thread(p2, p) {
77870+ files = get_files_struct(p);
77871+ if (files == NULL ||
77872+ (p->signal && p->signal->tty == task->signal->tty)) {
77873+ if (files != NULL)
77874+ put_files_struct(files);
77875+ continue;
77876+ }
77877+ rcu_read_lock();
77878+ fdt = files_fdtable(files);
77879+ for (i=0; i < fdt->max_fds; i++) {
77880+ file = fcheck_files(files, i);
77881+ if (file && S_ISCHR(file->f_path.dentry->d_inode->i_mode) &&
77882+ file->f_path.dentry->d_inode->i_rdev == our_file->f_path.dentry->d_inode->i_rdev) {
77883+ p3 = task;
77884+ while (p3->pid > 0) {
77885+ if (p3 == p)
77886+ break;
77887+ p3 = p3->real_parent;
77888+ }
77889+ if (p3 == p)
77890+ break;
77891+ gr_log_ttysniff(GR_DONT_AUDIT_GOOD, GR_TTYSNIFF_ACL_MSG, p);
77892+ gr_handle_alertkill(p);
77893+ rcu_read_unlock();
77894+ put_files_struct(files);
77895+ read_unlock(&tasklist_lock);
77896+ fput(our_file);
77897+ return 0;
77898+ }
77899+ }
77900+ rcu_read_unlock();
77901+ put_files_struct(files);
77902+ } while_each_thread(p2, p);
77903+ read_unlock(&tasklist_lock);
77904+
77905+ fput(our_file);
77906+ return 1;
77907+}
77908+
77909+ssize_t
77910+write_grsec_handler(struct file *file, const char * buf, size_t count, loff_t *ppos)
77911+{
77912+ struct gr_arg_wrapper uwrap;
77913+ unsigned char *sprole_salt = NULL;
77914+ unsigned char *sprole_sum = NULL;
77915+ int error = sizeof (struct gr_arg_wrapper);
77916+ int error2 = 0;
77917+
77918+ mutex_lock(&gr_dev_mutex);
77919+
77920+ if ((gr_status & GR_READY) && !(current->acl->mode & GR_KERNELAUTH)) {
77921+ error = -EPERM;
77922+ goto out;
77923+ }
77924+
77925+ if (count != sizeof (struct gr_arg_wrapper)) {
77926+ gr_log_int_int(GR_DONT_AUDIT_GOOD, GR_DEV_ACL_MSG, (int)count, (int)sizeof(struct gr_arg_wrapper));
77927+ error = -EINVAL;
77928+ goto out;
77929+ }
77930+
77931+
77932+ if (gr_auth_expires && time_after_eq(get_seconds(), gr_auth_expires)) {
77933+ gr_auth_expires = 0;
77934+ gr_auth_attempts = 0;
77935+ }
77936+
77937+ if (copy_from_user(&uwrap, buf, sizeof (struct gr_arg_wrapper))) {
77938+ error = -EFAULT;
77939+ goto out;
77940+ }
77941+
77942+ if ((uwrap.version != GRSECURITY_VERSION) || (uwrap.size != sizeof(struct gr_arg))) {
77943+ error = -EINVAL;
77944+ goto out;
77945+ }
77946+
77947+ if (copy_from_user(gr_usermode, uwrap.arg, sizeof (struct gr_arg))) {
77948+ error = -EFAULT;
77949+ goto out;
77950+ }
77951+
77952+ if (gr_usermode->mode != GR_SPROLE && gr_usermode->mode != GR_SPROLEPAM &&
77953+ gr_auth_attempts >= CONFIG_GRKERNSEC_ACL_MAXTRIES &&
77954+ time_after(gr_auth_expires, get_seconds())) {
77955+ error = -EBUSY;
77956+ goto out;
77957+ }
77958+
77959+ /* if non-root trying to do anything other than use a special role,
77960+ do not attempt authentication, do not count towards authentication
77961+ locking
77962+ */
77963+
77964+ if (gr_usermode->mode != GR_SPROLE && gr_usermode->mode != GR_STATUS &&
77965+ gr_usermode->mode != GR_UNSPROLE && gr_usermode->mode != GR_SPROLEPAM &&
77966+ current_uid()) {
77967+ error = -EPERM;
77968+ goto out;
77969+ }
77970+
77971+ /* ensure pw and special role name are null terminated */
77972+
77973+ gr_usermode->pw[GR_PW_LEN - 1] = '\0';
77974+ gr_usermode->sp_role[GR_SPROLE_LEN - 1] = '\0';
77975+
77976+ /* Okay.
77977+ * We have our enough of the argument structure..(we have yet
77978+ * to copy_from_user the tables themselves) . Copy the tables
77979+ * only if we need them, i.e. for loading operations. */
77980+
77981+ switch (gr_usermode->mode) {
77982+ case GR_STATUS:
77983+ if (gr_status & GR_READY) {
77984+ error = 1;
77985+ if (!gr_check_secure_terminal(current))
77986+ error = 3;
77987+ } else
77988+ error = 2;
77989+ goto out;
77990+ case GR_SHUTDOWN:
77991+ if ((gr_status & GR_READY)
77992+ && !(chkpw(gr_usermode, gr_system_salt, gr_system_sum))) {
77993+ pax_open_kernel();
77994+ gr_status &= ~GR_READY;
77995+ pax_close_kernel();
77996+
77997+ gr_log_noargs(GR_DONT_AUDIT_GOOD, GR_SHUTS_ACL_MSG);
77998+ free_variables();
77999+ memset(gr_usermode, 0, sizeof (struct gr_arg));
78000+ memset(gr_system_salt, 0, GR_SALT_LEN);
78001+ memset(gr_system_sum, 0, GR_SHA_LEN);
78002+ } else if (gr_status & GR_READY) {
78003+ gr_log_noargs(GR_DONT_AUDIT, GR_SHUTF_ACL_MSG);
78004+ error = -EPERM;
78005+ } else {
78006+ gr_log_noargs(GR_DONT_AUDIT_GOOD, GR_SHUTI_ACL_MSG);
78007+ error = -EAGAIN;
78008+ }
78009+ break;
78010+ case GR_ENABLE:
78011+ if (!(gr_status & GR_READY) && !(error2 = gracl_init(gr_usermode)))
78012+ gr_log_str(GR_DONT_AUDIT_GOOD, GR_ENABLE_ACL_MSG, GR_VERSION);
78013+ else {
78014+ if (gr_status & GR_READY)
78015+ error = -EAGAIN;
78016+ else
78017+ error = error2;
78018+ gr_log_str(GR_DONT_AUDIT, GR_ENABLEF_ACL_MSG, GR_VERSION);
78019+ }
78020+ break;
78021+ case GR_RELOAD:
78022+ if (!(gr_status & GR_READY)) {
78023+ gr_log_str(GR_DONT_AUDIT_GOOD, GR_RELOADI_ACL_MSG, GR_VERSION);
78024+ error = -EAGAIN;
78025+ } else if (!(chkpw(gr_usermode, gr_system_salt, gr_system_sum))) {
78026+ lock_kernel();
78027+
78028+ pax_open_kernel();
78029+ gr_status &= ~GR_READY;
78030+ pax_close_kernel();
78031+
78032+ free_variables();
78033+ if (!(error2 = gracl_init(gr_usermode))) {
78034+ unlock_kernel();
78035+ gr_log_str(GR_DONT_AUDIT_GOOD, GR_RELOAD_ACL_MSG, GR_VERSION);
78036+ } else {
78037+ unlock_kernel();
78038+ error = error2;
78039+ gr_log_str(GR_DONT_AUDIT, GR_RELOADF_ACL_MSG, GR_VERSION);
78040+ }
78041+ } else {
78042+ gr_log_str(GR_DONT_AUDIT, GR_RELOADF_ACL_MSG, GR_VERSION);
78043+ error = -EPERM;
78044+ }
78045+ break;
78046+ case GR_SEGVMOD:
78047+ if (unlikely(!(gr_status & GR_READY))) {
78048+ gr_log_noargs(GR_DONT_AUDIT_GOOD, GR_SEGVMODI_ACL_MSG);
78049+ error = -EAGAIN;
78050+ break;
78051+ }
78052+
78053+ if (!(chkpw(gr_usermode, gr_system_salt, gr_system_sum))) {
78054+ gr_log_noargs(GR_DONT_AUDIT_GOOD, GR_SEGVMODS_ACL_MSG);
78055+ if (gr_usermode->segv_device && gr_usermode->segv_inode) {
78056+ struct acl_subject_label *segvacl;
78057+ segvacl =
78058+ lookup_acl_subj_label(gr_usermode->segv_inode,
78059+ gr_usermode->segv_device,
78060+ current->role);
78061+ if (segvacl) {
78062+ segvacl->crashes = 0;
78063+ segvacl->expires = 0;
78064+ }
78065+ } else if (gr_find_uid(gr_usermode->segv_uid) >= 0) {
78066+ gr_remove_uid(gr_usermode->segv_uid);
78067+ }
78068+ } else {
78069+ gr_log_noargs(GR_DONT_AUDIT, GR_SEGVMODF_ACL_MSG);
78070+ error = -EPERM;
78071+ }
78072+ break;
78073+ case GR_SPROLE:
78074+ case GR_SPROLEPAM:
78075+ if (unlikely(!(gr_status & GR_READY))) {
78076+ gr_log_noargs(GR_DONT_AUDIT_GOOD, GR_SPROLEI_ACL_MSG);
78077+ error = -EAGAIN;
78078+ break;
78079+ }
78080+
78081+ if (current->role->expires && time_after_eq(get_seconds(), current->role->expires)) {
78082+ current->role->expires = 0;
78083+ current->role->auth_attempts = 0;
78084+ }
78085+
78086+ if (current->role->auth_attempts >= CONFIG_GRKERNSEC_ACL_MAXTRIES &&
78087+ time_after(current->role->expires, get_seconds())) {
78088+ error = -EBUSY;
78089+ goto out;
78090+ }
78091+
78092+ if (lookup_special_role_auth
78093+ (gr_usermode->mode, gr_usermode->sp_role, &sprole_salt, &sprole_sum)
78094+ && ((!sprole_salt && !sprole_sum)
78095+ || !(chkpw(gr_usermode, sprole_salt, sprole_sum)))) {
78096+ char *p = "";
78097+ assign_special_role(gr_usermode->sp_role);
78098+ read_lock(&tasklist_lock);
78099+ if (current->real_parent)
78100+ p = current->real_parent->role->rolename;
78101+ read_unlock(&tasklist_lock);
78102+ gr_log_str_int(GR_DONT_AUDIT_GOOD, GR_SPROLES_ACL_MSG,
78103+ p, acl_sp_role_value);
78104+ } else {
78105+ gr_log_str(GR_DONT_AUDIT, GR_SPROLEF_ACL_MSG, gr_usermode->sp_role);
78106+ error = -EPERM;
78107+ if(!(current->role->auth_attempts++))
78108+ current->role->expires = get_seconds() + CONFIG_GRKERNSEC_ACL_TIMEOUT;
78109+
78110+ goto out;
78111+ }
78112+ break;
78113+ case GR_UNSPROLE:
78114+ if (unlikely(!(gr_status & GR_READY))) {
78115+ gr_log_noargs(GR_DONT_AUDIT_GOOD, GR_UNSPROLEI_ACL_MSG);
78116+ error = -EAGAIN;
78117+ break;
78118+ }
78119+
78120+ if (current->role->roletype & GR_ROLE_SPECIAL) {
78121+ char *p = "";
78122+ int i = 0;
78123+
78124+ read_lock(&tasklist_lock);
78125+ if (current->real_parent) {
78126+ p = current->real_parent->role->rolename;
78127+ i = current->real_parent->acl_role_id;
78128+ }
78129+ read_unlock(&tasklist_lock);
78130+
78131+ gr_log_str_int(GR_DONT_AUDIT_GOOD, GR_UNSPROLES_ACL_MSG, p, i);
78132+ gr_set_acls(1);
78133+ } else {
78134+ error = -EPERM;
78135+ goto out;
78136+ }
78137+ break;
78138+ default:
78139+ gr_log_int(GR_DONT_AUDIT, GR_INVMODE_ACL_MSG, gr_usermode->mode);
78140+ error = -EINVAL;
78141+ break;
78142+ }
78143+
78144+ if (error != -EPERM)
78145+ goto out;
78146+
78147+ if(!(gr_auth_attempts++))
78148+ gr_auth_expires = get_seconds() + CONFIG_GRKERNSEC_ACL_TIMEOUT;
78149+
78150+ out:
78151+ mutex_unlock(&gr_dev_mutex);
78152+ return error;
78153+}
78154+
78155+/* must be called with
78156+ rcu_read_lock();
78157+ read_lock(&tasklist_lock);
78158+ read_lock(&grsec_exec_file_lock);
78159+*/
78160+int gr_apply_subject_to_task(struct task_struct *task)
78161+{
78162+ struct acl_object_label *obj;
78163+ char *tmpname;
78164+ struct acl_subject_label *tmpsubj;
78165+ struct file *filp;
78166+ struct name_entry *nmatch;
78167+
78168+ filp = task->exec_file;
78169+ if (filp == NULL)
78170+ return 0;
78171+
78172+ /* the following is to apply the correct subject
78173+ on binaries running when the RBAC system
78174+ is enabled, when the binaries have been
78175+ replaced or deleted since their execution
78176+ -----
78177+ when the RBAC system starts, the inode/dev
78178+ from exec_file will be one the RBAC system
78179+ is unaware of. It only knows the inode/dev
78180+ of the present file on disk, or the absence
78181+ of it.
78182+ */
78183+ preempt_disable();
78184+ tmpname = gr_to_filename_rbac(filp->f_path.dentry, filp->f_path.mnt);
78185+
78186+ nmatch = lookup_name_entry(tmpname);
78187+ preempt_enable();
78188+ tmpsubj = NULL;
78189+ if (nmatch) {
78190+ if (nmatch->deleted)
78191+ tmpsubj = lookup_acl_subj_label_deleted(nmatch->inode, nmatch->device, task->role);
78192+ else
78193+ tmpsubj = lookup_acl_subj_label(nmatch->inode, nmatch->device, task->role);
78194+ if (tmpsubj != NULL)
78195+ task->acl = tmpsubj;
78196+ }
78197+ if (tmpsubj == NULL)
78198+ task->acl = chk_subj_label(filp->f_path.dentry, filp->f_path.mnt,
78199+ task->role);
78200+ if (task->acl) {
78201+ task->is_writable = 0;
78202+ /* ignore additional mmap checks for processes that are writable
78203+ by the default ACL */
78204+ obj = chk_obj_label(filp->f_path.dentry, filp->f_path.mnt, default_role->root_label);
78205+ if (unlikely(obj->mode & GR_WRITE))
78206+ task->is_writable = 1;
78207+ obj = chk_obj_label(filp->f_path.dentry, filp->f_path.mnt, task->role->root_label);
78208+ if (unlikely(obj->mode & GR_WRITE))
78209+ task->is_writable = 1;
78210+
78211+ gr_set_proc_res(task);
78212+
78213+#ifdef CONFIG_GRKERNSEC_RBAC_DEBUG
78214+ printk(KERN_ALERT "gr_set_acls for (%s:%d): role:%s, subject:%s\n", task->comm, task->pid, task->role->rolename, task->acl->filename);
78215+#endif
78216+ } else {
78217+ return 1;
78218+ }
78219+
78220+ return 0;
78221+}
78222+
78223+int
78224+gr_set_acls(const int type)
78225+{
78226+ struct task_struct *task, *task2;
78227+ struct acl_role_label *role = current->role;
78228+ __u16 acl_role_id = current->acl_role_id;
78229+ const struct cred *cred;
78230+ int ret;
78231+
78232+ rcu_read_lock();
78233+ read_lock(&tasklist_lock);
78234+ read_lock(&grsec_exec_file_lock);
78235+ do_each_thread(task2, task) {
78236+ /* check to see if we're called from the exit handler,
78237+ if so, only replace ACLs that have inherited the admin
78238+ ACL */
78239+
78240+ if (type && (task->role != role ||
78241+ task->acl_role_id != acl_role_id))
78242+ continue;
78243+
78244+ task->acl_role_id = 0;
78245+ task->acl_sp_role = 0;
78246+
78247+ if (task->exec_file) {
78248+ cred = __task_cred(task);
78249+ task->role = lookup_acl_role_label(task, cred->uid, cred->gid);
78250+
78251+ ret = gr_apply_subject_to_task(task);
78252+ if (ret) {
78253+ read_unlock(&grsec_exec_file_lock);
78254+ read_unlock(&tasklist_lock);
78255+ rcu_read_unlock();
78256+ gr_log_str_int(GR_DONT_AUDIT_GOOD, GR_DEFACL_MSG, task->comm, task->pid);
78257+ return ret;
78258+ }
78259+ } else {
78260+ // it's a kernel process
78261+ task->role = kernel_role;
78262+ task->acl = kernel_role->root_label;
78263+#ifdef CONFIG_GRKERNSEC_ACL_HIDEKERN
78264+ task->acl->mode &= ~GR_PROCFIND;
78265+#endif
78266+ }
78267+ } while_each_thread(task2, task);
78268+ read_unlock(&grsec_exec_file_lock);
78269+ read_unlock(&tasklist_lock);
78270+ rcu_read_unlock();
78271+
78272+ return 0;
78273+}
78274+
78275+void
78276+gr_learn_resource(const struct task_struct *task,
78277+ const int res, const unsigned long wanted, const int gt)
78278+{
78279+ struct acl_subject_label *acl;
78280+ const struct cred *cred;
78281+
78282+ if (unlikely((gr_status & GR_READY) &&
78283+ task->acl && (task->acl->mode & (GR_LEARN | GR_INHERITLEARN))))
78284+ goto skip_reslog;
78285+
78286+#ifdef CONFIG_GRKERNSEC_RESLOG
78287+ gr_log_resource(task, res, wanted, gt);
78288+#endif
78289+ skip_reslog:
78290+
78291+ if (unlikely(!(gr_status & GR_READY) || !wanted || res >= GR_NLIMITS))
78292+ return;
78293+
78294+ acl = task->acl;
78295+
78296+ if (likely(!acl || !(acl->mode & (GR_LEARN | GR_INHERITLEARN)) ||
78297+ !(acl->resmask & (1 << (unsigned short) res))))
78298+ return;
78299+
78300+ if (wanted >= acl->res[res].rlim_cur) {
78301+ unsigned long res_add;
78302+
78303+ res_add = wanted;
78304+ switch (res) {
78305+ case RLIMIT_CPU:
78306+ res_add += GR_RLIM_CPU_BUMP;
78307+ break;
78308+ case RLIMIT_FSIZE:
78309+ res_add += GR_RLIM_FSIZE_BUMP;
78310+ break;
78311+ case RLIMIT_DATA:
78312+ res_add += GR_RLIM_DATA_BUMP;
78313+ break;
78314+ case RLIMIT_STACK:
78315+ res_add += GR_RLIM_STACK_BUMP;
78316+ break;
78317+ case RLIMIT_CORE:
78318+ res_add += GR_RLIM_CORE_BUMP;
78319+ break;
78320+ case RLIMIT_RSS:
78321+ res_add += GR_RLIM_RSS_BUMP;
78322+ break;
78323+ case RLIMIT_NPROC:
78324+ res_add += GR_RLIM_NPROC_BUMP;
78325+ break;
78326+ case RLIMIT_NOFILE:
78327+ res_add += GR_RLIM_NOFILE_BUMP;
78328+ break;
78329+ case RLIMIT_MEMLOCK:
78330+ res_add += GR_RLIM_MEMLOCK_BUMP;
78331+ break;
78332+ case RLIMIT_AS:
78333+ res_add += GR_RLIM_AS_BUMP;
78334+ break;
78335+ case RLIMIT_LOCKS:
78336+ res_add += GR_RLIM_LOCKS_BUMP;
78337+ break;
78338+ case RLIMIT_SIGPENDING:
78339+ res_add += GR_RLIM_SIGPENDING_BUMP;
78340+ break;
78341+ case RLIMIT_MSGQUEUE:
78342+ res_add += GR_RLIM_MSGQUEUE_BUMP;
78343+ break;
78344+ case RLIMIT_NICE:
78345+ res_add += GR_RLIM_NICE_BUMP;
78346+ break;
78347+ case RLIMIT_RTPRIO:
78348+ res_add += GR_RLIM_RTPRIO_BUMP;
78349+ break;
78350+ case RLIMIT_RTTIME:
78351+ res_add += GR_RLIM_RTTIME_BUMP;
78352+ break;
78353+ }
78354+
78355+ acl->res[res].rlim_cur = res_add;
78356+
78357+ if (wanted > acl->res[res].rlim_max)
78358+ acl->res[res].rlim_max = res_add;
78359+
78360+ /* only log the subject filename, since resource logging is supported for
78361+ single-subject learning only */
78362+ rcu_read_lock();
78363+ cred = __task_cred(task);
78364+ security_learn(GR_LEARN_AUDIT_MSG, task->role->rolename,
78365+ task->role->roletype, cred->uid, cred->gid, acl->filename,
78366+ acl->filename, acl->res[res].rlim_cur, acl->res[res].rlim_max,
78367+ "", (unsigned long) res, &task->signal->saved_ip);
78368+ rcu_read_unlock();
78369+ }
78370+
78371+ return;
78372+}
78373+
78374+#if defined(CONFIG_PAX_HAVE_ACL_FLAGS) && (defined(CONFIG_PAX_NOEXEC) || defined(CONFIG_PAX_ASLR))
78375+void
78376+pax_set_initial_flags(struct linux_binprm *bprm)
78377+{
78378+ struct task_struct *task = current;
78379+ struct acl_subject_label *proc;
78380+ unsigned long flags;
78381+
78382+ if (unlikely(!(gr_status & GR_READY)))
78383+ return;
78384+
78385+ flags = pax_get_flags(task);
78386+
78387+ proc = task->acl;
78388+
78389+ if (proc->pax_flags & GR_PAX_DISABLE_PAGEEXEC)
78390+ flags &= ~MF_PAX_PAGEEXEC;
78391+ if (proc->pax_flags & GR_PAX_DISABLE_SEGMEXEC)
78392+ flags &= ~MF_PAX_SEGMEXEC;
78393+ if (proc->pax_flags & GR_PAX_DISABLE_RANDMMAP)
78394+ flags &= ~MF_PAX_RANDMMAP;
78395+ if (proc->pax_flags & GR_PAX_DISABLE_EMUTRAMP)
78396+ flags &= ~MF_PAX_EMUTRAMP;
78397+ if (proc->pax_flags & GR_PAX_DISABLE_MPROTECT)
78398+ flags &= ~MF_PAX_MPROTECT;
78399+
78400+ if (proc->pax_flags & GR_PAX_ENABLE_PAGEEXEC)
78401+ flags |= MF_PAX_PAGEEXEC;
78402+ if (proc->pax_flags & GR_PAX_ENABLE_SEGMEXEC)
78403+ flags |= MF_PAX_SEGMEXEC;
78404+ if (proc->pax_flags & GR_PAX_ENABLE_RANDMMAP)
78405+ flags |= MF_PAX_RANDMMAP;
78406+ if (proc->pax_flags & GR_PAX_ENABLE_EMUTRAMP)
78407+ flags |= MF_PAX_EMUTRAMP;
78408+ if (proc->pax_flags & GR_PAX_ENABLE_MPROTECT)
78409+ flags |= MF_PAX_MPROTECT;
78410+
78411+ pax_set_flags(task, flags);
78412+
78413+ return;
78414+}
78415+#endif
78416+
78417+#ifdef CONFIG_SYSCTL
78418+/* Eric Biederman likes breaking userland ABI and every inode-based security
78419+ system to save 35kb of memory */
78420+
78421+/* we modify the passed in filename, but adjust it back before returning */
78422+static struct acl_object_label *gr_lookup_by_name(char *name, unsigned int len)
78423+{
78424+ struct name_entry *nmatch;
78425+ char *p, *lastp = NULL;
78426+ struct acl_object_label *obj = NULL, *tmp;
78427+ struct acl_subject_label *tmpsubj;
78428+ char c = '\0';
78429+
78430+ read_lock(&gr_inode_lock);
78431+
78432+ p = name + len - 1;
78433+ do {
78434+ nmatch = lookup_name_entry(name);
78435+ if (lastp != NULL)
78436+ *lastp = c;
78437+
78438+ if (nmatch == NULL)
78439+ goto next_component;
78440+ tmpsubj = current->acl;
78441+ do {
78442+ obj = lookup_acl_obj_label(nmatch->inode, nmatch->device, tmpsubj);
78443+ if (obj != NULL) {
78444+ tmp = obj->globbed;
78445+ while (tmp) {
78446+ if (!glob_match(tmp->filename, name)) {
78447+ obj = tmp;
78448+ goto found_obj;
78449+ }
78450+ tmp = tmp->next;
78451+ }
78452+ goto found_obj;
78453+ }
78454+ } while ((tmpsubj = tmpsubj->parent_subject));
78455+next_component:
78456+ /* end case */
78457+ if (p == name)
78458+ break;
78459+
78460+ while (*p != '/')
78461+ p--;
78462+ if (p == name)
78463+ lastp = p + 1;
78464+ else {
78465+ lastp = p;
78466+ p--;
78467+ }
78468+ c = *lastp;
78469+ *lastp = '\0';
78470+ } while (1);
78471+found_obj:
78472+ read_unlock(&gr_inode_lock);
78473+ /* obj returned will always be non-null */
78474+ return obj;
78475+}
78476+
78477+/* returns 0 when allowing, non-zero on error
78478+ op of 0 is used for readdir, so we don't log the names of hidden files
78479+*/
78480+__u32
78481+gr_handle_sysctl(const struct ctl_table *table, const int op)
78482+{
78483+ ctl_table *tmp;
78484+ const char *proc_sys = "/proc/sys";
78485+ char *path;
78486+ struct acl_object_label *obj;
78487+ unsigned short len = 0, pos = 0, depth = 0, i;
78488+ __u32 err = 0;
78489+ __u32 mode = 0;
78490+
78491+ if (unlikely(!(gr_status & GR_READY)))
78492+ return 0;
78493+
78494+ /* for now, ignore operations on non-sysctl entries if it's not a
78495+ readdir*/
78496+ if (table->child != NULL && op != 0)
78497+ return 0;
78498+
78499+ mode |= GR_FIND;
78500+ /* it's only a read if it's an entry, read on dirs is for readdir */
78501+ if (op & MAY_READ)
78502+ mode |= GR_READ;
78503+ if (op & MAY_WRITE)
78504+ mode |= GR_WRITE;
78505+
78506+ preempt_disable();
78507+
78508+ path = per_cpu_ptr(gr_shared_page[0], smp_processor_id());
78509+
78510+ /* it's only a read/write if it's an actual entry, not a dir
78511+ (which are opened for readdir)
78512+ */
78513+
78514+ /* convert the requested sysctl entry into a pathname */
78515+
78516+ for (tmp = (ctl_table *)table; tmp != NULL; tmp = tmp->parent) {
78517+ len += strlen(tmp->procname);
78518+ len++;
78519+ depth++;
78520+ }
78521+
78522+ if ((len + depth + strlen(proc_sys) + 1) > PAGE_SIZE) {
78523+ /* deny */
78524+ goto out;
78525+ }
78526+
78527+ memset(path, 0, PAGE_SIZE);
78528+
78529+ memcpy(path, proc_sys, strlen(proc_sys));
78530+
78531+ pos += strlen(proc_sys);
78532+
78533+ for (; depth > 0; depth--) {
78534+ path[pos] = '/';
78535+ pos++;
78536+ for (i = 1, tmp = (ctl_table *)table; tmp != NULL; tmp = tmp->parent) {
78537+ if (depth == i) {
78538+ memcpy(path + pos, tmp->procname,
78539+ strlen(tmp->procname));
78540+ pos += strlen(tmp->procname);
78541+ }
78542+ i++;
78543+ }
78544+ }
78545+
78546+ obj = gr_lookup_by_name(path, pos);
78547+ err = obj->mode & (mode | to_gr_audit(mode) | GR_SUPPRESS);
78548+
78549+ if (unlikely((current->acl->mode & (GR_LEARN | GR_INHERITLEARN)) &&
78550+ ((err & mode) != mode))) {
78551+ __u32 new_mode = mode;
78552+
78553+ new_mode &= ~(GR_AUDITS | GR_SUPPRESS);
78554+
78555+ err = 0;
78556+ gr_log_learn_sysctl(path, new_mode);
78557+ } else if (!(err & GR_FIND) && !(err & GR_SUPPRESS) && op != 0) {
78558+ gr_log_hidden_sysctl(GR_DONT_AUDIT, GR_HIDDEN_ACL_MSG, path);
78559+ err = -ENOENT;
78560+ } else if (!(err & GR_FIND)) {
78561+ err = -ENOENT;
78562+ } else if (((err & mode) & ~GR_FIND) != (mode & ~GR_FIND) && !(err & GR_SUPPRESS)) {
78563+ gr_log_str4(GR_DONT_AUDIT, GR_SYSCTL_ACL_MSG, "denied",
78564+ path, (mode & GR_READ) ? " reading" : "",
78565+ (mode & GR_WRITE) ? " writing" : "");
78566+ err = -EACCES;
78567+ } else if ((err & mode) != mode) {
78568+ err = -EACCES;
78569+ } else if ((((err & mode) & ~GR_FIND) == (mode & ~GR_FIND)) && (err & GR_AUDITS)) {
78570+ gr_log_str4(GR_DO_AUDIT, GR_SYSCTL_ACL_MSG, "successful",
78571+ path, (mode & GR_READ) ? " reading" : "",
78572+ (mode & GR_WRITE) ? " writing" : "");
78573+ err = 0;
78574+ } else
78575+ err = 0;
78576+
78577+ out:
78578+ preempt_enable();
78579+
78580+ return err;
78581+}
78582+#endif
78583+
78584+int
78585+gr_handle_proc_ptrace(struct task_struct *task)
78586+{
78587+ struct file *filp;
78588+ struct task_struct *tmp = task;
78589+ struct task_struct *curtemp = current;
78590+ __u32 retmode;
78591+
78592+#ifndef CONFIG_GRKERNSEC_HARDEN_PTRACE
78593+ if (unlikely(!(gr_status & GR_READY)))
78594+ return 0;
78595+#endif
78596+
78597+ read_lock(&tasklist_lock);
78598+ read_lock(&grsec_exec_file_lock);
78599+ filp = task->exec_file;
78600+
78601+ while (tmp->pid > 0) {
78602+ if (tmp == curtemp)
78603+ break;
78604+ tmp = tmp->real_parent;
78605+ }
78606+
78607+ if (!filp || (tmp->pid == 0 && ((grsec_enable_harden_ptrace && current_uid() && !(gr_status & GR_READY)) ||
78608+ ((gr_status & GR_READY) && !(current->acl->mode & GR_RELAXPTRACE))))) {
78609+ read_unlock(&grsec_exec_file_lock);
78610+ read_unlock(&tasklist_lock);
78611+ return 1;
78612+ }
78613+
78614+#ifdef CONFIG_GRKERNSEC_HARDEN_PTRACE
78615+ if (!(gr_status & GR_READY)) {
78616+ read_unlock(&grsec_exec_file_lock);
78617+ read_unlock(&tasklist_lock);
78618+ return 0;
78619+ }
78620+#endif
78621+
78622+ retmode = gr_search_file(filp->f_path.dentry, GR_NOPTRACE, filp->f_path.mnt);
78623+ read_unlock(&grsec_exec_file_lock);
78624+ read_unlock(&tasklist_lock);
78625+
78626+ if (retmode & GR_NOPTRACE)
78627+ return 1;
78628+
78629+ if (!(current->acl->mode & GR_POVERRIDE) && !(current->role->roletype & GR_ROLE_GOD)
78630+ && (current->acl != task->acl || (current->acl != current->role->root_label
78631+ && current->pid != task->pid)))
78632+ return 1;
78633+
78634+ return 0;
78635+}
78636+
78637+void task_grsec_rbac(struct seq_file *m, struct task_struct *p)
78638+{
78639+ if (unlikely(!(gr_status & GR_READY)))
78640+ return;
78641+
78642+ if (!(current->role->roletype & GR_ROLE_GOD))
78643+ return;
78644+
78645+ seq_printf(m, "RBAC:\t%.64s:%c:%.950s\n",
78646+ p->role->rolename, gr_task_roletype_to_char(p),
78647+ p->acl->filename);
78648+}
78649+
78650+int
78651+gr_handle_ptrace(struct task_struct *task, const long request)
78652+{
78653+ struct task_struct *tmp = task;
78654+ struct task_struct *curtemp = current;
78655+ __u32 retmode;
78656+
78657+#ifndef CONFIG_GRKERNSEC_HARDEN_PTRACE
78658+ if (unlikely(!(gr_status & GR_READY)))
78659+ return 0;
78660+#endif
78661+
78662+ read_lock(&tasklist_lock);
78663+ while (tmp->pid > 0) {
78664+ if (tmp == curtemp)
78665+ break;
78666+ tmp = tmp->real_parent;
78667+ }
78668+
78669+ if (tmp->pid == 0 && ((grsec_enable_harden_ptrace && current_uid() && !(gr_status & GR_READY)) ||
78670+ ((gr_status & GR_READY) && !(current->acl->mode & GR_RELAXPTRACE)))) {
78671+ read_unlock(&tasklist_lock);
78672+ gr_log_ptrace(GR_DONT_AUDIT, GR_PTRACE_ACL_MSG, task);
78673+ return 1;
78674+ }
78675+ read_unlock(&tasklist_lock);
78676+
78677+#ifdef CONFIG_GRKERNSEC_HARDEN_PTRACE
78678+ if (!(gr_status & GR_READY))
78679+ return 0;
78680+#endif
78681+
78682+ read_lock(&grsec_exec_file_lock);
78683+ if (unlikely(!task->exec_file)) {
78684+ read_unlock(&grsec_exec_file_lock);
78685+ return 0;
78686+ }
78687+
78688+ retmode = gr_search_file(task->exec_file->f_path.dentry, GR_PTRACERD | GR_NOPTRACE, task->exec_file->f_path.mnt);
78689+ read_unlock(&grsec_exec_file_lock);
78690+
78691+ if (retmode & GR_NOPTRACE) {
78692+ gr_log_ptrace(GR_DONT_AUDIT, GR_PTRACE_ACL_MSG, task);
78693+ return 1;
78694+ }
78695+
78696+ if (retmode & GR_PTRACERD) {
78697+ switch (request) {
78698+ case PTRACE_POKETEXT:
78699+ case PTRACE_POKEDATA:
78700+ case PTRACE_POKEUSR:
78701+#if !defined(CONFIG_PPC32) && !defined(CONFIG_PPC64) && !defined(CONFIG_PARISC) && !defined(CONFIG_ALPHA) && !defined(CONFIG_IA64)
78702+ case PTRACE_SETREGS:
78703+ case PTRACE_SETFPREGS:
78704+#endif
78705+#ifdef CONFIG_X86
78706+ case PTRACE_SETFPXREGS:
78707+#endif
78708+#ifdef CONFIG_ALTIVEC
78709+ case PTRACE_SETVRREGS:
78710+#endif
78711+ return 1;
78712+ default:
78713+ return 0;
78714+ }
78715+ } else if (!(current->acl->mode & GR_POVERRIDE) &&
78716+ !(current->role->roletype & GR_ROLE_GOD) &&
78717+ (current->acl != task->acl)) {
78718+ gr_log_ptrace(GR_DONT_AUDIT, GR_PTRACE_ACL_MSG, task);
78719+ return 1;
78720+ }
78721+
78722+ return 0;
78723+}
78724+
78725+static int is_writable_mmap(const struct file *filp)
78726+{
78727+ struct task_struct *task = current;
78728+ struct acl_object_label *obj, *obj2;
78729+
78730+ if (gr_status & GR_READY && !(task->acl->mode & GR_OVERRIDE) &&
78731+ !task->is_writable && S_ISREG(filp->f_path.dentry->d_inode->i_mode) && (filp->f_path.mnt != shm_mnt || (filp->f_path.dentry->d_inode->i_nlink > 0))) {
78732+ obj = chk_obj_label(filp->f_path.dentry, filp->f_path.mnt, default_role->root_label);
78733+ obj2 = chk_obj_label(filp->f_path.dentry, filp->f_path.mnt,
78734+ task->role->root_label);
78735+ if (unlikely((obj->mode & GR_WRITE) || (obj2->mode & GR_WRITE))) {
78736+ gr_log_fs_generic(GR_DONT_AUDIT, GR_WRITLIB_ACL_MSG, filp->f_path.dentry, filp->f_path.mnt);
78737+ return 1;
78738+ }
78739+ }
78740+ return 0;
78741+}
78742+
78743+int
78744+gr_acl_handle_mmap(const struct file *file, const unsigned long prot)
78745+{
78746+ __u32 mode;
78747+
78748+ if (unlikely(!file || !(prot & PROT_EXEC)))
78749+ return 1;
78750+
78751+ if (is_writable_mmap(file))
78752+ return 0;
78753+
78754+ mode =
78755+ gr_search_file(file->f_path.dentry,
78756+ GR_EXEC | GR_AUDIT_EXEC | GR_SUPPRESS,
78757+ file->f_path.mnt);
78758+
78759+ if (!gr_tpe_allow(file))
78760+ return 0;
78761+
78762+ if (unlikely(!(mode & GR_EXEC) && !(mode & GR_SUPPRESS))) {
78763+ gr_log_fs_rbac_generic(GR_DONT_AUDIT, GR_MMAP_ACL_MSG, file->f_path.dentry, file->f_path.mnt);
78764+ return 0;
78765+ } else if (unlikely(!(mode & GR_EXEC))) {
78766+ return 0;
78767+ } else if (unlikely(mode & GR_EXEC && mode & GR_AUDIT_EXEC)) {
78768+ gr_log_fs_rbac_generic(GR_DO_AUDIT, GR_MMAP_ACL_MSG, file->f_path.dentry, file->f_path.mnt);
78769+ return 1;
78770+ }
78771+
78772+ return 1;
78773+}
78774+
78775+int
78776+gr_acl_handle_mprotect(const struct file *file, const unsigned long prot)
78777+{
78778+ __u32 mode;
78779+
78780+ if (unlikely(!file || !(prot & PROT_EXEC)))
78781+ return 1;
78782+
78783+ if (is_writable_mmap(file))
78784+ return 0;
78785+
78786+ mode =
78787+ gr_search_file(file->f_path.dentry,
78788+ GR_EXEC | GR_AUDIT_EXEC | GR_SUPPRESS,
78789+ file->f_path.mnt);
78790+
78791+ if (!gr_tpe_allow(file))
78792+ return 0;
78793+
78794+ if (unlikely(!(mode & GR_EXEC) && !(mode & GR_SUPPRESS))) {
78795+ gr_log_fs_rbac_generic(GR_DONT_AUDIT, GR_MPROTECT_ACL_MSG, file->f_path.dentry, file->f_path.mnt);
78796+ return 0;
78797+ } else if (unlikely(!(mode & GR_EXEC))) {
78798+ return 0;
78799+ } else if (unlikely(mode & GR_EXEC && mode & GR_AUDIT_EXEC)) {
78800+ gr_log_fs_rbac_generic(GR_DO_AUDIT, GR_MPROTECT_ACL_MSG, file->f_path.dentry, file->f_path.mnt);
78801+ return 1;
78802+ }
78803+
78804+ return 1;
78805+}
78806+
78807+void
78808+gr_acl_handle_psacct(struct task_struct *task, const long code)
78809+{
78810+ unsigned long runtime;
78811+ unsigned long cputime;
78812+ unsigned int wday, cday;
78813+ __u8 whr, chr;
78814+ __u8 wmin, cmin;
78815+ __u8 wsec, csec;
78816+ struct timespec timeval;
78817+
78818+ if (unlikely(!(gr_status & GR_READY) || !task->acl ||
78819+ !(task->acl->mode & GR_PROCACCT)))
78820+ return;
78821+
78822+ do_posix_clock_monotonic_gettime(&timeval);
78823+ runtime = timeval.tv_sec - task->start_time.tv_sec;
78824+ wday = runtime / (3600 * 24);
78825+ runtime -= wday * (3600 * 24);
78826+ whr = runtime / 3600;
78827+ runtime -= whr * 3600;
78828+ wmin = runtime / 60;
78829+ runtime -= wmin * 60;
78830+ wsec = runtime;
78831+
78832+ cputime = (task->utime + task->stime) / HZ;
78833+ cday = cputime / (3600 * 24);
78834+ cputime -= cday * (3600 * 24);
78835+ chr = cputime / 3600;
78836+ cputime -= chr * 3600;
78837+ cmin = cputime / 60;
78838+ cputime -= cmin * 60;
78839+ csec = cputime;
78840+
78841+ gr_log_procacct(GR_DO_AUDIT, GR_ACL_PROCACCT_MSG, task, wday, whr, wmin, wsec, cday, chr, cmin, csec, code);
78842+
78843+ return;
78844+}
78845+
78846+void gr_set_kernel_label(struct task_struct *task)
78847+{
78848+ if (gr_status & GR_READY) {
78849+ task->role = kernel_role;
78850+ task->acl = kernel_role->root_label;
78851+ }
78852+ return;
78853+}
78854+
78855+#ifdef CONFIG_TASKSTATS
78856+int gr_is_taskstats_denied(int pid)
78857+{
78858+ struct task_struct *task;
78859+#if defined(CONFIG_GRKERNSEC_PROC_USER) || defined(CONFIG_GRKERNSEC_PROC_USERGROUP)
78860+ const struct cred *cred;
78861+#endif
78862+ int ret = 0;
78863+
78864+ /* restrict taskstats viewing to un-chrooted root users
78865+ who have the 'view' subject flag if the RBAC system is enabled
78866+ */
78867+
78868+ rcu_read_lock();
78869+ read_lock(&tasklist_lock);
78870+ task = find_task_by_vpid(pid);
78871+ if (task) {
78872+#ifdef CONFIG_GRKERNSEC_CHROOT
78873+ if (proc_is_chrooted(task))
78874+ ret = -EACCES;
78875+#endif
78876+#if defined(CONFIG_GRKERNSEC_PROC_USER) || defined(CONFIG_GRKERNSEC_PROC_USERGROUP)
78877+ cred = __task_cred(task);
78878+#ifdef CONFIG_GRKERNSEC_PROC_USER
78879+ if (cred->uid != 0)
78880+ ret = -EACCES;
78881+#elif defined(CONFIG_GRKERNSEC_PROC_USERGROUP)
78882+ if (cred->uid != 0 && !groups_search(cred->group_info, CONFIG_GRKERNSEC_PROC_GID))
78883+ ret = -EACCES;
78884+#endif
78885+#endif
78886+ if (gr_status & GR_READY) {
78887+ if (!(task->acl->mode & GR_VIEW))
78888+ ret = -EACCES;
78889+ }
78890+ } else
78891+ ret = -ENOENT;
78892+
78893+ read_unlock(&tasklist_lock);
78894+ rcu_read_unlock();
78895+
78896+ return ret;
78897+}
78898+#endif
78899+
78900+/* AUXV entries are filled via a descendant of search_binary_handler
78901+ after we've already applied the subject for the target
78902+*/
78903+int gr_acl_enable_at_secure(void)
78904+{
78905+ if (unlikely(!(gr_status & GR_READY)))
78906+ return 0;
78907+
78908+ if (current->acl->mode & GR_ATSECURE)
78909+ return 1;
78910+
78911+ return 0;
78912+}
78913+
78914+int gr_acl_handle_filldir(const struct file *file, const char *name, const unsigned int namelen, const ino_t ino)
78915+{
78916+ struct task_struct *task = current;
78917+ struct dentry *dentry = file->f_path.dentry;
78918+ struct vfsmount *mnt = file->f_path.mnt;
78919+ struct acl_object_label *obj, *tmp;
78920+ struct acl_subject_label *subj;
78921+ unsigned int bufsize;
78922+ int is_not_root;
78923+ char *path;
78924+ dev_t dev = __get_dev(dentry);
78925+
78926+ if (unlikely(!(gr_status & GR_READY)))
78927+ return 1;
78928+
78929+ if (task->acl->mode & (GR_LEARN | GR_INHERITLEARN))
78930+ return 1;
78931+
78932+ /* ignore Eric Biederman */
78933+ if (IS_PRIVATE(dentry->d_inode))
78934+ return 1;
78935+
78936+ subj = task->acl;
78937+ do {
78938+ obj = lookup_acl_obj_label(ino, dev, subj);
78939+ if (obj != NULL)
78940+ return (obj->mode & GR_FIND) ? 1 : 0;
78941+ } while ((subj = subj->parent_subject));
78942+
78943+ /* this is purely an optimization since we're looking for an object
78944+ for the directory we're doing a readdir on
78945+ if it's possible for any globbed object to match the entry we're
78946+ filling into the directory, then the object we find here will be
78947+ an anchor point with attached globbed objects
78948+ */
78949+ obj = chk_obj_label_noglob(dentry, mnt, task->acl);
78950+ if (obj->globbed == NULL)
78951+ return (obj->mode & GR_FIND) ? 1 : 0;
78952+
78953+ is_not_root = ((obj->filename[0] == '/') &&
78954+ (obj->filename[1] == '\0')) ? 0 : 1;
78955+ bufsize = PAGE_SIZE - namelen - is_not_root;
78956+
78957+ /* check bufsize > PAGE_SIZE || bufsize == 0 */
78958+ if (unlikely((bufsize - 1) > (PAGE_SIZE - 1)))
78959+ return 1;
78960+
78961+ preempt_disable();
78962+ path = d_real_path(dentry, mnt, per_cpu_ptr(gr_shared_page[0], smp_processor_id()),
78963+ bufsize);
78964+
78965+ bufsize = strlen(path);
78966+
78967+ /* if base is "/", don't append an additional slash */
78968+ if (is_not_root)
78969+ *(path + bufsize) = '/';
78970+ memcpy(path + bufsize + is_not_root, name, namelen);
78971+ *(path + bufsize + namelen + is_not_root) = '\0';
78972+
78973+ tmp = obj->globbed;
78974+ while (tmp) {
78975+ if (!glob_match(tmp->filename, path)) {
78976+ preempt_enable();
78977+ return (tmp->mode & GR_FIND) ? 1 : 0;
78978+ }
78979+ tmp = tmp->next;
78980+ }
78981+ preempt_enable();
78982+ return (obj->mode & GR_FIND) ? 1 : 0;
78983+}
78984+
78985+#ifdef CONFIG_NETFILTER_XT_MATCH_GRADM_MODULE
78986+EXPORT_SYMBOL(gr_acl_is_enabled);
78987+#endif
78988+EXPORT_SYMBOL(gr_learn_resource);
78989+EXPORT_SYMBOL(gr_set_kernel_label);
78990+#ifdef CONFIG_SECURITY
78991+EXPORT_SYMBOL(gr_check_user_change);
78992+EXPORT_SYMBOL(gr_check_group_change);
78993+#endif
78994+
78995diff --git a/grsecurity/gracl_alloc.c b/grsecurity/gracl_alloc.c
78996new file mode 100644
78997index 0000000..34fefda
78998--- /dev/null
78999+++ b/grsecurity/gracl_alloc.c
79000@@ -0,0 +1,105 @@
79001+#include <linux/kernel.h>
79002+#include <linux/mm.h>
79003+#include <linux/slab.h>
79004+#include <linux/vmalloc.h>
79005+#include <linux/gracl.h>
79006+#include <linux/grsecurity.h>
79007+
79008+static unsigned long alloc_stack_next = 1;
79009+static unsigned long alloc_stack_size = 1;
79010+static void **alloc_stack;
79011+
79012+static __inline__ int
79013+alloc_pop(void)
79014+{
79015+ if (alloc_stack_next == 1)
79016+ return 0;
79017+
79018+ kfree(alloc_stack[alloc_stack_next - 2]);
79019+
79020+ alloc_stack_next--;
79021+
79022+ return 1;
79023+}
79024+
79025+static __inline__ int
79026+alloc_push(void *buf)
79027+{
79028+ if (alloc_stack_next >= alloc_stack_size)
79029+ return 1;
79030+
79031+ alloc_stack[alloc_stack_next - 1] = buf;
79032+
79033+ alloc_stack_next++;
79034+
79035+ return 0;
79036+}
79037+
79038+void *
79039+acl_alloc(unsigned long len)
79040+{
79041+ void *ret = NULL;
79042+
79043+ if (!len || len > PAGE_SIZE)
79044+ goto out;
79045+
79046+ ret = kmalloc(len, GFP_KERNEL);
79047+
79048+ if (ret) {
79049+ if (alloc_push(ret)) {
79050+ kfree(ret);
79051+ ret = NULL;
79052+ }
79053+ }
79054+
79055+out:
79056+ return ret;
79057+}
79058+
79059+void *
79060+acl_alloc_num(unsigned long num, unsigned long len)
79061+{
79062+ if (!len || (num > (PAGE_SIZE / len)))
79063+ return NULL;
79064+
79065+ return acl_alloc(num * len);
79066+}
79067+
79068+void
79069+acl_free_all(void)
79070+{
79071+ if (gr_acl_is_enabled() || !alloc_stack)
79072+ return;
79073+
79074+ while (alloc_pop()) ;
79075+
79076+ if (alloc_stack) {
79077+ if ((alloc_stack_size * sizeof (void *)) <= PAGE_SIZE)
79078+ kfree(alloc_stack);
79079+ else
79080+ vfree(alloc_stack);
79081+ }
79082+
79083+ alloc_stack = NULL;
79084+ alloc_stack_size = 1;
79085+ alloc_stack_next = 1;
79086+
79087+ return;
79088+}
79089+
79090+int
79091+acl_alloc_stack_init(unsigned long size)
79092+{
79093+ if ((size * sizeof (void *)) <= PAGE_SIZE)
79094+ alloc_stack =
79095+ (void **) kmalloc(size * sizeof (void *), GFP_KERNEL);
79096+ else
79097+ alloc_stack = (void **) vmalloc(size * sizeof (void *));
79098+
79099+ alloc_stack_size = size;
79100+
79101+ if (!alloc_stack)
79102+ return 0;
79103+ else
79104+ return 1;
79105+}
79106diff --git a/grsecurity/gracl_cap.c b/grsecurity/gracl_cap.c
79107new file mode 100644
79108index 0000000..955ddfb
79109--- /dev/null
79110+++ b/grsecurity/gracl_cap.c
79111@@ -0,0 +1,101 @@
79112+#include <linux/kernel.h>
79113+#include <linux/module.h>
79114+#include <linux/sched.h>
79115+#include <linux/gracl.h>
79116+#include <linux/grsecurity.h>
79117+#include <linux/grinternal.h>
79118+
79119+extern const char *captab_log[];
79120+extern int captab_log_entries;
79121+
79122+int
79123+gr_acl_is_capable(const int cap)
79124+{
79125+ struct task_struct *task = current;
79126+ const struct cred *cred = current_cred();
79127+ struct acl_subject_label *curracl;
79128+ kernel_cap_t cap_drop = __cap_empty_set, cap_mask = __cap_empty_set;
79129+ kernel_cap_t cap_audit = __cap_empty_set;
79130+
79131+ if (!gr_acl_is_enabled())
79132+ return 1;
79133+
79134+ curracl = task->acl;
79135+
79136+ cap_drop = curracl->cap_lower;
79137+ cap_mask = curracl->cap_mask;
79138+ cap_audit = curracl->cap_invert_audit;
79139+
79140+ while ((curracl = curracl->parent_subject)) {
79141+ /* if the cap isn't specified in the current computed mask but is specified in the
79142+ current level subject, and is lowered in the current level subject, then add
79143+ it to the set of dropped capabilities
79144+ otherwise, add the current level subject's mask to the current computed mask
79145+ */
79146+ if (!cap_raised(cap_mask, cap) && cap_raised(curracl->cap_mask, cap)) {
79147+ cap_raise(cap_mask, cap);
79148+ if (cap_raised(curracl->cap_lower, cap))
79149+ cap_raise(cap_drop, cap);
79150+ if (cap_raised(curracl->cap_invert_audit, cap))
79151+ cap_raise(cap_audit, cap);
79152+ }
79153+ }
79154+
79155+ if (!cap_raised(cap_drop, cap)) {
79156+ if (cap_raised(cap_audit, cap))
79157+ gr_log_cap(GR_DO_AUDIT, GR_CAP_ACL_MSG2, task, captab_log[cap]);
79158+ return 1;
79159+ }
79160+
79161+ curracl = task->acl;
79162+
79163+ if ((curracl->mode & (GR_LEARN | GR_INHERITLEARN))
79164+ && cap_raised(cred->cap_effective, cap)) {
79165+ security_learn(GR_LEARN_AUDIT_MSG, task->role->rolename,
79166+ task->role->roletype, cred->uid,
79167+ cred->gid, task->exec_file ?
79168+ gr_to_filename(task->exec_file->f_path.dentry,
79169+ task->exec_file->f_path.mnt) : curracl->filename,
79170+ curracl->filename, 0UL,
79171+ 0UL, "", (unsigned long) cap, &task->signal->saved_ip);
79172+ return 1;
79173+ }
79174+
79175+ if ((cap >= 0) && (cap < captab_log_entries) && cap_raised(cred->cap_effective, cap) && !cap_raised(cap_audit, cap))
79176+ gr_log_cap(GR_DONT_AUDIT, GR_CAP_ACL_MSG, task, captab_log[cap]);
79177+ return 0;
79178+}
79179+
79180+int
79181+gr_acl_is_capable_nolog(const int cap)
79182+{
79183+ struct acl_subject_label *curracl;
79184+ kernel_cap_t cap_drop = __cap_empty_set, cap_mask = __cap_empty_set;
79185+
79186+ if (!gr_acl_is_enabled())
79187+ return 1;
79188+
79189+ curracl = current->acl;
79190+
79191+ cap_drop = curracl->cap_lower;
79192+ cap_mask = curracl->cap_mask;
79193+
79194+ while ((curracl = curracl->parent_subject)) {
79195+ /* if the cap isn't specified in the current computed mask but is specified in the
79196+ current level subject, and is lowered in the current level subject, then add
79197+ it to the set of dropped capabilities
79198+ otherwise, add the current level subject's mask to the current computed mask
79199+ */
79200+ if (!cap_raised(cap_mask, cap) && cap_raised(curracl->cap_mask, cap)) {
79201+ cap_raise(cap_mask, cap);
79202+ if (cap_raised(curracl->cap_lower, cap))
79203+ cap_raise(cap_drop, cap);
79204+ }
79205+ }
79206+
79207+ if (!cap_raised(cap_drop, cap))
79208+ return 1;
79209+
79210+ return 0;
79211+}
79212+
79213diff --git a/grsecurity/gracl_fs.c b/grsecurity/gracl_fs.c
79214new file mode 100644
79215index 0000000..8c4595a
79216--- /dev/null
79217+++ b/grsecurity/gracl_fs.c
79218@@ -0,0 +1,435 @@
79219+#include <linux/kernel.h>
79220+#include <linux/sched.h>
79221+#include <linux/types.h>
79222+#include <linux/fs.h>
79223+#include <linux/file.h>
79224+#include <linux/stat.h>
79225+#include <linux/grsecurity.h>
79226+#include <linux/grinternal.h>
79227+#include <linux/gracl.h>
79228+
79229+umode_t
79230+gr_acl_umask(void)
79231+{
79232+ if (unlikely(!gr_acl_is_enabled()))
79233+ return 0;
79234+
79235+ return current->role->umask;
79236+}
79237+
79238+__u32
79239+gr_acl_handle_hidden_file(const struct dentry * dentry,
79240+ const struct vfsmount * mnt)
79241+{
79242+ __u32 mode;
79243+
79244+ if (unlikely(!dentry->d_inode))
79245+ return GR_FIND;
79246+
79247+ mode =
79248+ gr_search_file(dentry, GR_FIND | GR_AUDIT_FIND | GR_SUPPRESS, mnt);
79249+
79250+ if (unlikely(mode & GR_FIND && mode & GR_AUDIT_FIND)) {
79251+ gr_log_fs_rbac_generic(GR_DO_AUDIT, GR_HIDDEN_ACL_MSG, dentry, mnt);
79252+ return mode;
79253+ } else if (unlikely(!(mode & GR_FIND) && !(mode & GR_SUPPRESS))) {
79254+ gr_log_fs_rbac_generic(GR_DONT_AUDIT, GR_HIDDEN_ACL_MSG, dentry, mnt);
79255+ return 0;
79256+ } else if (unlikely(!(mode & GR_FIND)))
79257+ return 0;
79258+
79259+ return GR_FIND;
79260+}
79261+
79262+__u32
79263+gr_acl_handle_open(const struct dentry * dentry, const struct vfsmount * mnt,
79264+ int acc_mode)
79265+{
79266+ __u32 reqmode = GR_FIND;
79267+ __u32 mode;
79268+
79269+ if (unlikely(!dentry->d_inode))
79270+ return reqmode;
79271+
79272+ if (acc_mode & MAY_APPEND)
79273+ reqmode |= GR_APPEND;
79274+ else if (acc_mode & MAY_WRITE)
79275+ reqmode |= GR_WRITE;
79276+ if ((acc_mode & MAY_READ) && !S_ISDIR(dentry->d_inode->i_mode))
79277+ reqmode |= GR_READ;
79278+
79279+ mode =
79280+ gr_search_file(dentry, reqmode | to_gr_audit(reqmode) | GR_SUPPRESS,
79281+ mnt);
79282+
79283+ if (unlikely(((mode & reqmode) == reqmode) && mode & GR_AUDITS)) {
79284+ gr_log_fs_rbac_mode2(GR_DO_AUDIT, GR_OPEN_ACL_MSG, dentry, mnt,
79285+ reqmode & GR_READ ? " reading" : "",
79286+ reqmode & GR_WRITE ? " writing" : reqmode &
79287+ GR_APPEND ? " appending" : "");
79288+ return reqmode;
79289+ } else
79290+ if (unlikely((mode & reqmode) != reqmode && !(mode & GR_SUPPRESS)))
79291+ {
79292+ gr_log_fs_rbac_mode2(GR_DONT_AUDIT, GR_OPEN_ACL_MSG, dentry, mnt,
79293+ reqmode & GR_READ ? " reading" : "",
79294+ reqmode & GR_WRITE ? " writing" : reqmode &
79295+ GR_APPEND ? " appending" : "");
79296+ return 0;
79297+ } else if (unlikely((mode & reqmode) != reqmode))
79298+ return 0;
79299+
79300+ return reqmode;
79301+}
79302+
79303+__u32
79304+gr_acl_handle_creat(const struct dentry * dentry,
79305+ const struct dentry * p_dentry,
79306+ const struct vfsmount * p_mnt, int open_flags, int acc_mode,
79307+ const int imode)
79308+{
79309+ __u32 reqmode = GR_WRITE | GR_CREATE;
79310+ __u32 mode;
79311+
79312+ if (acc_mode & MAY_APPEND)
79313+ reqmode |= GR_APPEND;
79314+ // if a directory was required or the directory already exists, then
79315+ // don't count this open as a read
79316+ if ((acc_mode & MAY_READ) &&
79317+ !((open_flags & O_DIRECTORY) || (dentry->d_inode && S_ISDIR(dentry->d_inode->i_mode))))
79318+ reqmode |= GR_READ;
79319+ if ((open_flags & O_CREAT) && (imode & (S_ISUID | S_ISGID)))
79320+ reqmode |= GR_SETID;
79321+
79322+ mode =
79323+ gr_check_create(dentry, p_dentry, p_mnt,
79324+ reqmode | to_gr_audit(reqmode) | GR_SUPPRESS);
79325+
79326+ if (unlikely(((mode & reqmode) == reqmode) && mode & GR_AUDITS)) {
79327+ gr_log_fs_rbac_mode2(GR_DO_AUDIT, GR_CREATE_ACL_MSG, dentry, p_mnt,
79328+ reqmode & GR_READ ? " reading" : "",
79329+ reqmode & GR_WRITE ? " writing" : reqmode &
79330+ GR_APPEND ? " appending" : "");
79331+ return reqmode;
79332+ } else
79333+ if (unlikely((mode & reqmode) != reqmode && !(mode & GR_SUPPRESS)))
79334+ {
79335+ gr_log_fs_rbac_mode2(GR_DONT_AUDIT, GR_CREATE_ACL_MSG, dentry, p_mnt,
79336+ reqmode & GR_READ ? " reading" : "",
79337+ reqmode & GR_WRITE ? " writing" : reqmode &
79338+ GR_APPEND ? " appending" : "");
79339+ return 0;
79340+ } else if (unlikely((mode & reqmode) != reqmode))
79341+ return 0;
79342+
79343+ return reqmode;
79344+}
79345+
79346+__u32
79347+gr_acl_handle_access(const struct dentry * dentry, const struct vfsmount * mnt,
79348+ const int fmode)
79349+{
79350+ __u32 mode, reqmode = GR_FIND;
79351+
79352+ if ((fmode & S_IXOTH) && !S_ISDIR(dentry->d_inode->i_mode))
79353+ reqmode |= GR_EXEC;
79354+ if (fmode & S_IWOTH)
79355+ reqmode |= GR_WRITE;
79356+ if (fmode & S_IROTH)
79357+ reqmode |= GR_READ;
79358+
79359+ mode =
79360+ gr_search_file(dentry, reqmode | to_gr_audit(reqmode) | GR_SUPPRESS,
79361+ mnt);
79362+
79363+ if (unlikely(((mode & reqmode) == reqmode) && mode & GR_AUDITS)) {
79364+ gr_log_fs_rbac_mode3(GR_DO_AUDIT, GR_ACCESS_ACL_MSG, dentry, mnt,
79365+ reqmode & GR_READ ? " reading" : "",
79366+ reqmode & GR_WRITE ? " writing" : "",
79367+ reqmode & GR_EXEC ? " executing" : "");
79368+ return reqmode;
79369+ } else
79370+ if (unlikely((mode & reqmode) != reqmode && !(mode & GR_SUPPRESS)))
79371+ {
79372+ gr_log_fs_rbac_mode3(GR_DONT_AUDIT, GR_ACCESS_ACL_MSG, dentry, mnt,
79373+ reqmode & GR_READ ? " reading" : "",
79374+ reqmode & GR_WRITE ? " writing" : "",
79375+ reqmode & GR_EXEC ? " executing" : "");
79376+ return 0;
79377+ } else if (unlikely((mode & reqmode) != reqmode))
79378+ return 0;
79379+
79380+ return reqmode;
79381+}
79382+
79383+static __u32 generic_fs_handler(const struct dentry *dentry, const struct vfsmount *mnt, __u32 reqmode, const char *fmt)
79384+{
79385+ __u32 mode;
79386+
79387+ mode = gr_search_file(dentry, reqmode | to_gr_audit(reqmode) | GR_SUPPRESS, mnt);
79388+
79389+ if (unlikely(((mode & (reqmode)) == (reqmode)) && mode & GR_AUDITS)) {
79390+ gr_log_fs_rbac_generic(GR_DO_AUDIT, fmt, dentry, mnt);
79391+ return mode;
79392+ } else if (unlikely((mode & (reqmode)) != (reqmode) && !(mode & GR_SUPPRESS))) {
79393+ gr_log_fs_rbac_generic(GR_DONT_AUDIT, fmt, dentry, mnt);
79394+ return 0;
79395+ } else if (unlikely((mode & (reqmode)) != (reqmode)))
79396+ return 0;
79397+
79398+ return (reqmode);
79399+}
79400+
79401+__u32
79402+gr_acl_handle_rmdir(const struct dentry * dentry, const struct vfsmount * mnt)
79403+{
79404+ return generic_fs_handler(dentry, mnt, GR_WRITE | GR_DELETE , GR_RMDIR_ACL_MSG);
79405+}
79406+
79407+__u32
79408+gr_acl_handle_unlink(const struct dentry *dentry, const struct vfsmount *mnt)
79409+{
79410+ return generic_fs_handler(dentry, mnt, GR_WRITE | GR_DELETE , GR_UNLINK_ACL_MSG);
79411+}
79412+
79413+__u32
79414+gr_acl_handle_truncate(const struct dentry *dentry, const struct vfsmount *mnt)
79415+{
79416+ return generic_fs_handler(dentry, mnt, GR_WRITE, GR_TRUNCATE_ACL_MSG);
79417+}
79418+
79419+__u32
79420+gr_acl_handle_utime(const struct dentry *dentry, const struct vfsmount *mnt)
79421+{
79422+ return generic_fs_handler(dentry, mnt, GR_WRITE, GR_ATIME_ACL_MSG);
79423+}
79424+
79425+__u32
79426+gr_acl_handle_chmod(const struct dentry *dentry, const struct vfsmount *mnt,
79427+ umode_t *modeptr)
79428+{
79429+ umode_t mode;
79430+
79431+ *modeptr &= ~gr_acl_umask();
79432+ mode = *modeptr;
79433+
79434+ if (unlikely(dentry->d_inode && S_ISSOCK(dentry->d_inode->i_mode)))
79435+ return 1;
79436+
79437+ if (unlikely(mode & (S_ISUID | S_ISGID))) {
79438+ return generic_fs_handler(dentry, mnt, GR_WRITE | GR_SETID,
79439+ GR_CHMOD_ACL_MSG);
79440+ } else {
79441+ return generic_fs_handler(dentry, mnt, GR_WRITE, GR_CHMOD_ACL_MSG);
79442+ }
79443+}
79444+
79445+__u32
79446+gr_acl_handle_chown(const struct dentry *dentry, const struct vfsmount *mnt)
79447+{
79448+ return generic_fs_handler(dentry, mnt, GR_WRITE, GR_CHOWN_ACL_MSG);
79449+}
79450+
79451+__u32
79452+gr_acl_handle_setxattr(const struct dentry *dentry, const struct vfsmount *mnt)
79453+{
79454+ return generic_fs_handler(dentry, mnt, GR_WRITE, GR_SETXATTR_ACL_MSG);
79455+}
79456+
79457+__u32
79458+gr_acl_handle_execve(const struct dentry *dentry, const struct vfsmount *mnt)
79459+{
79460+ return generic_fs_handler(dentry, mnt, GR_EXEC, GR_EXEC_ACL_MSG);
79461+}
79462+
79463+__u32
79464+gr_acl_handle_unix(const struct dentry *dentry, const struct vfsmount *mnt)
79465+{
79466+ return generic_fs_handler(dentry, mnt, GR_READ | GR_WRITE,
79467+ GR_UNIXCONNECT_ACL_MSG);
79468+}
79469+
79470+/* hardlinks require at minimum create and link permission,
79471+ any additional privilege required is based on the
79472+ privilege of the file being linked to
79473+*/
79474+__u32
79475+gr_acl_handle_link(const struct dentry * new_dentry,
79476+ const struct dentry * parent_dentry,
79477+ const struct vfsmount * parent_mnt,
79478+ const struct dentry * old_dentry,
79479+ const struct vfsmount * old_mnt, const char *to)
79480+{
79481+ __u32 mode;
79482+ __u32 needmode = GR_CREATE | GR_LINK;
79483+ __u32 needaudit = GR_AUDIT_CREATE | GR_AUDIT_LINK;
79484+
79485+ mode =
79486+ gr_check_link(new_dentry, parent_dentry, parent_mnt, old_dentry,
79487+ old_mnt);
79488+
79489+ if (unlikely(((mode & needmode) == needmode) && (mode & needaudit))) {
79490+ gr_log_fs_rbac_str(GR_DO_AUDIT, GR_LINK_ACL_MSG, old_dentry, old_mnt, to);
79491+ return mode;
79492+ } else if (unlikely(((mode & needmode) != needmode) && !(mode & GR_SUPPRESS))) {
79493+ gr_log_fs_rbac_str(GR_DONT_AUDIT, GR_LINK_ACL_MSG, old_dentry, old_mnt, to);
79494+ return 0;
79495+ } else if (unlikely((mode & needmode) != needmode))
79496+ return 0;
79497+
79498+ return 1;
79499+}
79500+
79501+__u32
79502+gr_acl_handle_symlink(const struct dentry * new_dentry,
79503+ const struct dentry * parent_dentry,
79504+ const struct vfsmount * parent_mnt, const char *from)
79505+{
79506+ __u32 needmode = GR_WRITE | GR_CREATE;
79507+ __u32 mode;
79508+
79509+ mode =
79510+ gr_check_create(new_dentry, parent_dentry, parent_mnt,
79511+ GR_CREATE | GR_AUDIT_CREATE |
79512+ GR_WRITE | GR_AUDIT_WRITE | GR_SUPPRESS);
79513+
79514+ if (unlikely(mode & GR_WRITE && mode & GR_AUDITS)) {
79515+ gr_log_fs_str_rbac(GR_DO_AUDIT, GR_SYMLINK_ACL_MSG, from, new_dentry, parent_mnt);
79516+ return mode;
79517+ } else if (unlikely(((mode & needmode) != needmode) && !(mode & GR_SUPPRESS))) {
79518+ gr_log_fs_str_rbac(GR_DONT_AUDIT, GR_SYMLINK_ACL_MSG, from, new_dentry, parent_mnt);
79519+ return 0;
79520+ } else if (unlikely((mode & needmode) != needmode))
79521+ return 0;
79522+
79523+ return (GR_WRITE | GR_CREATE);
79524+}
79525+
79526+static __u32 generic_fs_create_handler(const struct dentry *new_dentry, const struct dentry *parent_dentry, const struct vfsmount *parent_mnt, __u32 reqmode, const char *fmt)
79527+{
79528+ __u32 mode;
79529+
79530+ mode = gr_check_create(new_dentry, parent_dentry, parent_mnt, reqmode | to_gr_audit(reqmode) | GR_SUPPRESS);
79531+
79532+ if (unlikely(((mode & (reqmode)) == (reqmode)) && mode & GR_AUDITS)) {
79533+ gr_log_fs_rbac_generic(GR_DO_AUDIT, fmt, new_dentry, parent_mnt);
79534+ return mode;
79535+ } else if (unlikely((mode & (reqmode)) != (reqmode) && !(mode & GR_SUPPRESS))) {
79536+ gr_log_fs_rbac_generic(GR_DONT_AUDIT, fmt, new_dentry, parent_mnt);
79537+ return 0;
79538+ } else if (unlikely((mode & (reqmode)) != (reqmode)))
79539+ return 0;
79540+
79541+ return (reqmode);
79542+}
79543+
79544+__u32
79545+gr_acl_handle_mknod(const struct dentry * new_dentry,
79546+ const struct dentry * parent_dentry,
79547+ const struct vfsmount * parent_mnt,
79548+ const int mode)
79549+{
79550+ __u32 reqmode = GR_WRITE | GR_CREATE;
79551+ if (unlikely(mode & (S_ISUID | S_ISGID)))
79552+ reqmode |= GR_SETID;
79553+
79554+ return generic_fs_create_handler(new_dentry, parent_dentry, parent_mnt,
79555+ reqmode, GR_MKNOD_ACL_MSG);
79556+}
79557+
79558+__u32
79559+gr_acl_handle_mkdir(const struct dentry *new_dentry,
79560+ const struct dentry *parent_dentry,
79561+ const struct vfsmount *parent_mnt)
79562+{
79563+ return generic_fs_create_handler(new_dentry, parent_dentry, parent_mnt,
79564+ GR_WRITE | GR_CREATE, GR_MKDIR_ACL_MSG);
79565+}
79566+
79567+#define RENAME_CHECK_SUCCESS(old, new) \
79568+ (((old & (GR_WRITE | GR_READ)) == (GR_WRITE | GR_READ)) && \
79569+ ((new & (GR_WRITE | GR_READ)) == (GR_WRITE | GR_READ)))
79570+
79571+int
79572+gr_acl_handle_rename(struct dentry *new_dentry,
79573+ struct dentry *parent_dentry,
79574+ const struct vfsmount *parent_mnt,
79575+ struct dentry *old_dentry,
79576+ struct inode *old_parent_inode,
79577+ struct vfsmount *old_mnt, const char *newname)
79578+{
79579+ __u32 comp1, comp2;
79580+ int error = 0;
79581+
79582+ if (unlikely(!gr_acl_is_enabled()))
79583+ return 0;
79584+
79585+ if (!new_dentry->d_inode) {
79586+ comp1 = gr_check_create(new_dentry, parent_dentry, parent_mnt,
79587+ GR_READ | GR_WRITE | GR_CREATE | GR_AUDIT_READ |
79588+ GR_AUDIT_WRITE | GR_AUDIT_CREATE | GR_SUPPRESS);
79589+ comp2 = gr_search_file(old_dentry, GR_READ | GR_WRITE |
79590+ GR_DELETE | GR_AUDIT_DELETE |
79591+ GR_AUDIT_READ | GR_AUDIT_WRITE |
79592+ GR_SUPPRESS, old_mnt);
79593+ } else {
79594+ comp1 = gr_search_file(new_dentry, GR_READ | GR_WRITE |
79595+ GR_CREATE | GR_DELETE |
79596+ GR_AUDIT_CREATE | GR_AUDIT_DELETE |
79597+ GR_AUDIT_READ | GR_AUDIT_WRITE |
79598+ GR_SUPPRESS, parent_mnt);
79599+ comp2 =
79600+ gr_search_file(old_dentry,
79601+ GR_READ | GR_WRITE | GR_AUDIT_READ |
79602+ GR_DELETE | GR_AUDIT_DELETE |
79603+ GR_AUDIT_WRITE | GR_SUPPRESS, old_mnt);
79604+ }
79605+
79606+ if (RENAME_CHECK_SUCCESS(comp1, comp2) &&
79607+ ((comp1 & GR_AUDITS) || (comp2 & GR_AUDITS)))
79608+ gr_log_fs_rbac_str(GR_DO_AUDIT, GR_RENAME_ACL_MSG, old_dentry, old_mnt, newname);
79609+ else if (!RENAME_CHECK_SUCCESS(comp1, comp2) && !(comp1 & GR_SUPPRESS)
79610+ && !(comp2 & GR_SUPPRESS)) {
79611+ gr_log_fs_rbac_str(GR_DONT_AUDIT, GR_RENAME_ACL_MSG, old_dentry, old_mnt, newname);
79612+ error = -EACCES;
79613+ } else if (unlikely(!RENAME_CHECK_SUCCESS(comp1, comp2)))
79614+ error = -EACCES;
79615+
79616+ return error;
79617+}
79618+
79619+void
79620+gr_acl_handle_exit(void)
79621+{
79622+ u16 id;
79623+ char *rolename;
79624+ struct file *exec_file;
79625+
79626+ if (unlikely(current->acl_sp_role && gr_acl_is_enabled() &&
79627+ !(current->role->roletype & GR_ROLE_PERSIST))) {
79628+ id = current->acl_role_id;
79629+ rolename = current->role->rolename;
79630+ gr_set_acls(1);
79631+ gr_log_str_int(GR_DONT_AUDIT_GOOD, GR_SPROLEL_ACL_MSG, rolename, id);
79632+ }
79633+
79634+ write_lock(&grsec_exec_file_lock);
79635+ exec_file = current->exec_file;
79636+ current->exec_file = NULL;
79637+ write_unlock(&grsec_exec_file_lock);
79638+
79639+ if (exec_file)
79640+ fput(exec_file);
79641+}
79642+
79643+int
79644+gr_acl_handle_procpidmem(const struct task_struct *task)
79645+{
79646+ if (unlikely(!gr_acl_is_enabled()))
79647+ return 0;
79648+
79649+ if (task != current && task->acl->mode & GR_PROTPROCFD)
79650+ return -EACCES;
79651+
79652+ return 0;
79653+}
79654diff --git a/grsecurity/gracl_ip.c b/grsecurity/gracl_ip.c
79655new file mode 100644
79656index 0000000..cd07b96
79657--- /dev/null
79658+++ b/grsecurity/gracl_ip.c
79659@@ -0,0 +1,382 @@
79660+#include <linux/kernel.h>
79661+#include <asm/uaccess.h>
79662+#include <asm/errno.h>
79663+#include <net/sock.h>
79664+#include <linux/file.h>
79665+#include <linux/fs.h>
79666+#include <linux/net.h>
79667+#include <linux/in.h>
79668+#include <linux/skbuff.h>
79669+#include <linux/ip.h>
79670+#include <linux/udp.h>
79671+#include <linux/smp_lock.h>
79672+#include <linux/types.h>
79673+#include <linux/sched.h>
79674+#include <linux/netdevice.h>
79675+#include <linux/inetdevice.h>
79676+#include <linux/gracl.h>
79677+#include <linux/grsecurity.h>
79678+#include <linux/grinternal.h>
79679+
79680+#define GR_BIND 0x01
79681+#define GR_CONNECT 0x02
79682+#define GR_INVERT 0x04
79683+#define GR_BINDOVERRIDE 0x08
79684+#define GR_CONNECTOVERRIDE 0x10
79685+#define GR_SOCK_FAMILY 0x20
79686+
79687+static const char * gr_protocols[IPPROTO_MAX] = {
79688+ "ip", "icmp", "igmp", "ggp", "ipencap", "st", "tcp", "cbt",
79689+ "egp", "igp", "bbn-rcc", "nvp", "pup", "argus", "emcon", "xnet",
79690+ "chaos", "udp", "mux", "dcn", "hmp", "prm", "xns-idp", "trunk-1",
79691+ "trunk-2", "leaf-1", "leaf-2", "rdp", "irtp", "iso-tp4", "netblt", "mfe-nsp",
79692+ "merit-inp", "sep", "3pc", "idpr", "xtp", "ddp", "idpr-cmtp", "tp++",
79693+ "il", "ipv6", "sdrp", "ipv6-route", "ipv6-frag", "idrp", "rsvp", "gre",
79694+ "mhrp", "bna", "ipv6-crypt", "ipv6-auth", "i-nlsp", "swipe", "narp", "mobile",
79695+ "tlsp", "skip", "ipv6-icmp", "ipv6-nonxt", "ipv6-opts", "unknown:61", "cftp", "unknown:63",
79696+ "sat-expak", "kryptolan", "rvd", "ippc", "unknown:68", "sat-mon", "visa", "ipcv",
79697+ "cpnx", "cphb", "wsn", "pvp", "br-sat-mon", "sun-nd", "wb-mon", "wb-expak",
79698+ "iso-ip", "vmtp", "secure-vmtp", "vines", "ttp", "nfsnet-igp", "dgp", "tcf",
79699+ "eigrp", "ospf", "sprite-rpc", "larp", "mtp", "ax.25", "ipip", "micp",
79700+ "scc-sp", "etherip", "encap", "unknown:99", "gmtp", "ifmp", "pnni", "pim",
79701+ "aris", "scps", "qnx", "a/n", "ipcomp", "snp", "compaq-peer", "ipx-in-ip",
79702+ "vrrp", "pgm", "unknown:114", "l2tp", "ddx", "iatp", "stp", "srp",
79703+ "uti", "smp", "sm", "ptp", "isis", "fire", "crtp", "crdup",
79704+ "sscopmce", "iplt", "sps", "pipe", "sctp", "fc", "unkown:134", "unknown:135",
79705+ "unknown:136", "unknown:137", "unknown:138", "unknown:139", "unknown:140", "unknown:141", "unknown:142", "unknown:143",
79706+ "unknown:144", "unknown:145", "unknown:146", "unknown:147", "unknown:148", "unknown:149", "unknown:150", "unknown:151",
79707+ "unknown:152", "unknown:153", "unknown:154", "unknown:155", "unknown:156", "unknown:157", "unknown:158", "unknown:159",
79708+ "unknown:160", "unknown:161", "unknown:162", "unknown:163", "unknown:164", "unknown:165", "unknown:166", "unknown:167",
79709+ "unknown:168", "unknown:169", "unknown:170", "unknown:171", "unknown:172", "unknown:173", "unknown:174", "unknown:175",
79710+ "unknown:176", "unknown:177", "unknown:178", "unknown:179", "unknown:180", "unknown:181", "unknown:182", "unknown:183",
79711+ "unknown:184", "unknown:185", "unknown:186", "unknown:187", "unknown:188", "unknown:189", "unknown:190", "unknown:191",
79712+ "unknown:192", "unknown:193", "unknown:194", "unknown:195", "unknown:196", "unknown:197", "unknown:198", "unknown:199",
79713+ "unknown:200", "unknown:201", "unknown:202", "unknown:203", "unknown:204", "unknown:205", "unknown:206", "unknown:207",
79714+ "unknown:208", "unknown:209", "unknown:210", "unknown:211", "unknown:212", "unknown:213", "unknown:214", "unknown:215",
79715+ "unknown:216", "unknown:217", "unknown:218", "unknown:219", "unknown:220", "unknown:221", "unknown:222", "unknown:223",
79716+ "unknown:224", "unknown:225", "unknown:226", "unknown:227", "unknown:228", "unknown:229", "unknown:230", "unknown:231",
79717+ "unknown:232", "unknown:233", "unknown:234", "unknown:235", "unknown:236", "unknown:237", "unknown:238", "unknown:239",
79718+ "unknown:240", "unknown:241", "unknown:242", "unknown:243", "unknown:244", "unknown:245", "unknown:246", "unknown:247",
79719+ "unknown:248", "unknown:249", "unknown:250", "unknown:251", "unknown:252", "unknown:253", "unknown:254", "unknown:255",
79720+ };
79721+
79722+static const char * gr_socktypes[SOCK_MAX] = {
79723+ "unknown:0", "stream", "dgram", "raw", "rdm", "seqpacket", "unknown:6",
79724+ "unknown:7", "unknown:8", "unknown:9", "packet"
79725+ };
79726+
79727+static const char * gr_sockfamilies[AF_MAX+1] = {
79728+ "unspec", "unix", "inet", "ax25", "ipx", "appletalk", "netrom", "bridge", "atmpvc", "x25",
79729+ "inet6", "rose", "decnet", "netbeui", "security", "key", "netlink", "packet", "ash",
79730+ "econet", "atmsvc", "rds", "sna", "irda", "ppox", "wanpipe", "llc", "fam_27", "fam_28",
79731+ "tipc", "bluetooth", "iucv", "rxrpc", "isdn", "phonet", "ieee802154"
79732+ };
79733+
79734+const char *
79735+gr_proto_to_name(unsigned char proto)
79736+{
79737+ return gr_protocols[proto];
79738+}
79739+
79740+const char *
79741+gr_socktype_to_name(unsigned char type)
79742+{
79743+ return gr_socktypes[type];
79744+}
79745+
79746+const char *
79747+gr_sockfamily_to_name(unsigned char family)
79748+{
79749+ return gr_sockfamilies[family];
79750+}
79751+
79752+int
79753+gr_search_socket(const int domain, const int type, const int protocol)
79754+{
79755+ struct acl_subject_label *curr;
79756+ const struct cred *cred = current_cred();
79757+
79758+ if (unlikely(!gr_acl_is_enabled()))
79759+ goto exit;
79760+
79761+ if ((domain < 0) || (type < 0) || (protocol < 0) ||
79762+ (domain >= AF_MAX) || (type >= SOCK_MAX) || (protocol >= IPPROTO_MAX))
79763+ goto exit; // let the kernel handle it
79764+
79765+ curr = current->acl;
79766+
79767+ if (curr->sock_families[domain / 32] & (1 << (domain % 32))) {
79768+ /* the family is allowed, if this is PF_INET allow it only if
79769+ the extra sock type/protocol checks pass */
79770+ if (domain == PF_INET)
79771+ goto inet_check;
79772+ goto exit;
79773+ } else {
79774+ if (curr->mode & (GR_LEARN | GR_INHERITLEARN)) {
79775+ __u32 fakeip = 0;
79776+ security_learn(GR_IP_LEARN_MSG, current->role->rolename,
79777+ current->role->roletype, cred->uid,
79778+ cred->gid, current->exec_file ?
79779+ gr_to_filename(current->exec_file->f_path.dentry,
79780+ current->exec_file->f_path.mnt) :
79781+ curr->filename, curr->filename,
79782+ &fakeip, domain, 0, 0, GR_SOCK_FAMILY,
79783+ &current->signal->saved_ip);
79784+ goto exit;
79785+ }
79786+ goto exit_fail;
79787+ }
79788+
79789+inet_check:
79790+ /* the rest of this checking is for IPv4 only */
79791+ if (!curr->ips)
79792+ goto exit;
79793+
79794+ if ((curr->ip_type & (1 << type)) &&
79795+ (curr->ip_proto[protocol / 32] & (1 << (protocol % 32))))
79796+ goto exit;
79797+
79798+ if (curr->mode & (GR_LEARN | GR_INHERITLEARN)) {
79799+ /* we don't place acls on raw sockets , and sometimes
79800+ dgram/ip sockets are opened for ioctl and not
79801+ bind/connect, so we'll fake a bind learn log */
79802+ if (type == SOCK_RAW || type == SOCK_PACKET) {
79803+ __u32 fakeip = 0;
79804+ security_learn(GR_IP_LEARN_MSG, current->role->rolename,
79805+ current->role->roletype, cred->uid,
79806+ cred->gid, current->exec_file ?
79807+ gr_to_filename(current->exec_file->f_path.dentry,
79808+ current->exec_file->f_path.mnt) :
79809+ curr->filename, curr->filename,
79810+ &fakeip, 0, type,
79811+ protocol, GR_CONNECT, &current->signal->saved_ip);
79812+ } else if ((type == SOCK_DGRAM) && (protocol == IPPROTO_IP)) {
79813+ __u32 fakeip = 0;
79814+ security_learn(GR_IP_LEARN_MSG, current->role->rolename,
79815+ current->role->roletype, cred->uid,
79816+ cred->gid, current->exec_file ?
79817+ gr_to_filename(current->exec_file->f_path.dentry,
79818+ current->exec_file->f_path.mnt) :
79819+ curr->filename, curr->filename,
79820+ &fakeip, 0, type,
79821+ protocol, GR_BIND, &current->signal->saved_ip);
79822+ }
79823+ /* we'll log when they use connect or bind */
79824+ goto exit;
79825+ }
79826+
79827+exit_fail:
79828+ if (domain == PF_INET)
79829+ gr_log_str3(GR_DONT_AUDIT, GR_SOCK_MSG, gr_sockfamily_to_name(domain),
79830+ gr_socktype_to_name(type), gr_proto_to_name(protocol));
79831+ else
79832+ gr_log_str2_int(GR_DONT_AUDIT, GR_SOCK_NOINET_MSG, gr_sockfamily_to_name(domain),
79833+ gr_socktype_to_name(type), protocol);
79834+
79835+ return 0;
79836+exit:
79837+ return 1;
79838+}
79839+
79840+int check_ip_policy(struct acl_ip_label *ip, __u32 ip_addr, __u16 ip_port, __u8 protocol, const int mode, const int type, __u32 our_addr, __u32 our_netmask)
79841+{
79842+ if ((ip->mode & mode) &&
79843+ (ip_port >= ip->low) &&
79844+ (ip_port <= ip->high) &&
79845+ ((ntohl(ip_addr) & our_netmask) ==
79846+ (ntohl(our_addr) & our_netmask))
79847+ && (ip->proto[protocol / 32] & (1 << (protocol % 32)))
79848+ && (ip->type & (1 << type))) {
79849+ if (ip->mode & GR_INVERT)
79850+ return 2; // specifically denied
79851+ else
79852+ return 1; // allowed
79853+ }
79854+
79855+ return 0; // not specifically allowed, may continue parsing
79856+}
79857+
79858+static int
79859+gr_search_connectbind(const int full_mode, struct sock *sk,
79860+ struct sockaddr_in *addr, const int type)
79861+{
79862+ char iface[IFNAMSIZ] = {0};
79863+ struct acl_subject_label *curr;
79864+ struct acl_ip_label *ip;
79865+ struct inet_sock *isk;
79866+ struct net_device *dev;
79867+ struct in_device *idev;
79868+ unsigned long i;
79869+ int ret;
79870+ int mode = full_mode & (GR_BIND | GR_CONNECT);
79871+ __u32 ip_addr = 0;
79872+ __u32 our_addr;
79873+ __u32 our_netmask;
79874+ char *p;
79875+ __u16 ip_port = 0;
79876+ const struct cred *cred = current_cred();
79877+
79878+ if (unlikely(!gr_acl_is_enabled() || sk->sk_family != PF_INET))
79879+ return 0;
79880+
79881+ curr = current->acl;
79882+ isk = inet_sk(sk);
79883+
79884+ /* INADDR_ANY overriding for binds, inaddr_any_override is already in network order */
79885+ if ((full_mode & GR_BINDOVERRIDE) && addr->sin_addr.s_addr == htonl(INADDR_ANY) && curr->inaddr_any_override != 0)
79886+ addr->sin_addr.s_addr = curr->inaddr_any_override;
79887+ if ((full_mode & GR_CONNECT) && isk->saddr == htonl(INADDR_ANY) && curr->inaddr_any_override != 0) {
79888+ struct sockaddr_in saddr;
79889+ int err;
79890+
79891+ saddr.sin_family = AF_INET;
79892+ saddr.sin_addr.s_addr = curr->inaddr_any_override;
79893+ saddr.sin_port = isk->sport;
79894+
79895+ err = security_socket_bind(sk->sk_socket, (struct sockaddr *)&saddr, sizeof(struct sockaddr_in));
79896+ if (err)
79897+ return err;
79898+
79899+ err = sk->sk_socket->ops->bind(sk->sk_socket, (struct sockaddr *)&saddr, sizeof(struct sockaddr_in));
79900+ if (err)
79901+ return err;
79902+ }
79903+
79904+ if (!curr->ips)
79905+ return 0;
79906+
79907+ ip_addr = addr->sin_addr.s_addr;
79908+ ip_port = ntohs(addr->sin_port);
79909+
79910+ if (curr->mode & (GR_LEARN | GR_INHERITLEARN)) {
79911+ security_learn(GR_IP_LEARN_MSG, current->role->rolename,
79912+ current->role->roletype, cred->uid,
79913+ cred->gid, current->exec_file ?
79914+ gr_to_filename(current->exec_file->f_path.dentry,
79915+ current->exec_file->f_path.mnt) :
79916+ curr->filename, curr->filename,
79917+ &ip_addr, ip_port, type,
79918+ sk->sk_protocol, mode, &current->signal->saved_ip);
79919+ return 0;
79920+ }
79921+
79922+ for (i = 0; i < curr->ip_num; i++) {
79923+ ip = *(curr->ips + i);
79924+ if (ip->iface != NULL) {
79925+ strncpy(iface, ip->iface, IFNAMSIZ - 1);
79926+ p = strchr(iface, ':');
79927+ if (p != NULL)
79928+ *p = '\0';
79929+ dev = dev_get_by_name(sock_net(sk), iface);
79930+ if (dev == NULL)
79931+ continue;
79932+ idev = in_dev_get(dev);
79933+ if (idev == NULL) {
79934+ dev_put(dev);
79935+ continue;
79936+ }
79937+ rcu_read_lock();
79938+ for_ifa(idev) {
79939+ if (!strcmp(ip->iface, ifa->ifa_label)) {
79940+ our_addr = ifa->ifa_address;
79941+ our_netmask = 0xffffffff;
79942+ ret = check_ip_policy(ip, ip_addr, ip_port, sk->sk_protocol, mode, type, our_addr, our_netmask);
79943+ if (ret == 1) {
79944+ rcu_read_unlock();
79945+ in_dev_put(idev);
79946+ dev_put(dev);
79947+ return 0;
79948+ } else if (ret == 2) {
79949+ rcu_read_unlock();
79950+ in_dev_put(idev);
79951+ dev_put(dev);
79952+ goto denied;
79953+ }
79954+ }
79955+ } endfor_ifa(idev);
79956+ rcu_read_unlock();
79957+ in_dev_put(idev);
79958+ dev_put(dev);
79959+ } else {
79960+ our_addr = ip->addr;
79961+ our_netmask = ip->netmask;
79962+ ret = check_ip_policy(ip, ip_addr, ip_port, sk->sk_protocol, mode, type, our_addr, our_netmask);
79963+ if (ret == 1)
79964+ return 0;
79965+ else if (ret == 2)
79966+ goto denied;
79967+ }
79968+ }
79969+
79970+denied:
79971+ if (mode == GR_BIND)
79972+ gr_log_int5_str2(GR_DONT_AUDIT, GR_BIND_ACL_MSG, &ip_addr, ip_port, gr_socktype_to_name(type), gr_proto_to_name(sk->sk_protocol));
79973+ else if (mode == GR_CONNECT)
79974+ gr_log_int5_str2(GR_DONT_AUDIT, GR_CONNECT_ACL_MSG, &ip_addr, ip_port, gr_socktype_to_name(type), gr_proto_to_name(sk->sk_protocol));
79975+
79976+ return -EACCES;
79977+}
79978+
79979+int
79980+gr_search_connect(struct socket *sock, struct sockaddr_in *addr)
79981+{
79982+ return gr_search_connectbind(GR_CONNECT | GR_CONNECTOVERRIDE, sock->sk, addr, sock->type);
79983+}
79984+
79985+int
79986+gr_search_bind(struct socket *sock, struct sockaddr_in *addr)
79987+{
79988+ return gr_search_connectbind(GR_BIND | GR_BINDOVERRIDE, sock->sk, addr, sock->type);
79989+}
79990+
79991+int gr_search_listen(struct socket *sock)
79992+{
79993+ struct sock *sk = sock->sk;
79994+ struct sockaddr_in addr;
79995+
79996+ addr.sin_addr.s_addr = inet_sk(sk)->saddr;
79997+ addr.sin_port = inet_sk(sk)->sport;
79998+
79999+ return gr_search_connectbind(GR_BIND | GR_CONNECTOVERRIDE, sock->sk, &addr, sock->type);
80000+}
80001+
80002+int gr_search_accept(struct socket *sock)
80003+{
80004+ struct sock *sk = sock->sk;
80005+ struct sockaddr_in addr;
80006+
80007+ addr.sin_addr.s_addr = inet_sk(sk)->saddr;
80008+ addr.sin_port = inet_sk(sk)->sport;
80009+
80010+ return gr_search_connectbind(GR_BIND | GR_CONNECTOVERRIDE, sock->sk, &addr, sock->type);
80011+}
80012+
80013+int
80014+gr_search_udp_sendmsg(struct sock *sk, struct sockaddr_in *addr)
80015+{
80016+ if (addr)
80017+ return gr_search_connectbind(GR_CONNECT, sk, addr, SOCK_DGRAM);
80018+ else {
80019+ struct sockaddr_in sin;
80020+ const struct inet_sock *inet = inet_sk(sk);
80021+
80022+ sin.sin_addr.s_addr = inet->daddr;
80023+ sin.sin_port = inet->dport;
80024+
80025+ return gr_search_connectbind(GR_CONNECT | GR_CONNECTOVERRIDE, sk, &sin, SOCK_DGRAM);
80026+ }
80027+}
80028+
80029+int
80030+gr_search_udp_recvmsg(struct sock *sk, const struct sk_buff *skb)
80031+{
80032+ struct sockaddr_in sin;
80033+
80034+ if (unlikely(skb->len < sizeof (struct udphdr)))
80035+ return 0; // skip this packet
80036+
80037+ sin.sin_addr.s_addr = ip_hdr(skb)->saddr;
80038+ sin.sin_port = udp_hdr(skb)->source;
80039+
80040+ return gr_search_connectbind(GR_CONNECT | GR_CONNECTOVERRIDE, sk, &sin, SOCK_DGRAM);
80041+}
80042diff --git a/grsecurity/gracl_learn.c b/grsecurity/gracl_learn.c
80043new file mode 100644
80044index 0000000..34bdd46
80045--- /dev/null
80046+++ b/grsecurity/gracl_learn.c
80047@@ -0,0 +1,208 @@
80048+#include <linux/kernel.h>
80049+#include <linux/mm.h>
80050+#include <linux/sched.h>
80051+#include <linux/poll.h>
80052+#include <linux/smp_lock.h>
80053+#include <linux/string.h>
80054+#include <linux/file.h>
80055+#include <linux/types.h>
80056+#include <linux/vmalloc.h>
80057+#include <linux/grinternal.h>
80058+
80059+extern ssize_t write_grsec_handler(struct file * file, const char __user * buf,
80060+ size_t count, loff_t *ppos);
80061+extern int gr_acl_is_enabled(void);
80062+
80063+static DECLARE_WAIT_QUEUE_HEAD(learn_wait);
80064+static int gr_learn_attached;
80065+
80066+/* use a 512k buffer */
80067+#define LEARN_BUFFER_SIZE (512 * 1024)
80068+
80069+static DEFINE_SPINLOCK(gr_learn_lock);
80070+static DEFINE_MUTEX(gr_learn_user_mutex);
80071+
80072+/* we need to maintain two buffers, so that the kernel context of grlearn
80073+ uses a semaphore around the userspace copying, and the other kernel contexts
80074+ use a spinlock when copying into the buffer, since they cannot sleep
80075+*/
80076+static char *learn_buffer;
80077+static char *learn_buffer_user;
80078+static int learn_buffer_len;
80079+static int learn_buffer_user_len;
80080+
80081+static ssize_t
80082+read_learn(struct file *file, char __user * buf, size_t count, loff_t * ppos)
80083+{
80084+ DECLARE_WAITQUEUE(wait, current);
80085+ ssize_t retval = 0;
80086+
80087+ add_wait_queue(&learn_wait, &wait);
80088+ set_current_state(TASK_INTERRUPTIBLE);
80089+ do {
80090+ mutex_lock(&gr_learn_user_mutex);
80091+ spin_lock(&gr_learn_lock);
80092+ if (learn_buffer_len)
80093+ break;
80094+ spin_unlock(&gr_learn_lock);
80095+ mutex_unlock(&gr_learn_user_mutex);
80096+ if (file->f_flags & O_NONBLOCK) {
80097+ retval = -EAGAIN;
80098+ goto out;
80099+ }
80100+ if (signal_pending(current)) {
80101+ retval = -ERESTARTSYS;
80102+ goto out;
80103+ }
80104+
80105+ schedule();
80106+ } while (1);
80107+
80108+ memcpy(learn_buffer_user, learn_buffer, learn_buffer_len);
80109+ learn_buffer_user_len = learn_buffer_len;
80110+ retval = learn_buffer_len;
80111+ learn_buffer_len = 0;
80112+
80113+ spin_unlock(&gr_learn_lock);
80114+
80115+ if (copy_to_user(buf, learn_buffer_user, learn_buffer_user_len))
80116+ retval = -EFAULT;
80117+
80118+ mutex_unlock(&gr_learn_user_mutex);
80119+out:
80120+ set_current_state(TASK_RUNNING);
80121+ remove_wait_queue(&learn_wait, &wait);
80122+ return retval;
80123+}
80124+
80125+static unsigned int
80126+poll_learn(struct file * file, poll_table * wait)
80127+{
80128+ poll_wait(file, &learn_wait, wait);
80129+
80130+ if (learn_buffer_len)
80131+ return (POLLIN | POLLRDNORM);
80132+
80133+ return 0;
80134+}
80135+
80136+void
80137+gr_clear_learn_entries(void)
80138+{
80139+ char *tmp;
80140+
80141+ mutex_lock(&gr_learn_user_mutex);
80142+ spin_lock(&gr_learn_lock);
80143+ tmp = learn_buffer;
80144+ learn_buffer = NULL;
80145+ spin_unlock(&gr_learn_lock);
80146+ if (tmp)
80147+ vfree(tmp);
80148+ if (learn_buffer_user != NULL) {
80149+ vfree(learn_buffer_user);
80150+ learn_buffer_user = NULL;
80151+ }
80152+ learn_buffer_len = 0;
80153+ mutex_unlock(&gr_learn_user_mutex);
80154+
80155+ return;
80156+}
80157+
80158+void
80159+gr_add_learn_entry(const char *fmt, ...)
80160+{
80161+ va_list args;
80162+ unsigned int len;
80163+
80164+ if (!gr_learn_attached)
80165+ return;
80166+
80167+ spin_lock(&gr_learn_lock);
80168+
80169+ /* leave a gap at the end so we know when it's "full" but don't have to
80170+ compute the exact length of the string we're trying to append
80171+ */
80172+ if (learn_buffer_len > LEARN_BUFFER_SIZE - 16384) {
80173+ spin_unlock(&gr_learn_lock);
80174+ wake_up_interruptible(&learn_wait);
80175+ return;
80176+ }
80177+ if (learn_buffer == NULL) {
80178+ spin_unlock(&gr_learn_lock);
80179+ return;
80180+ }
80181+
80182+ va_start(args, fmt);
80183+ len = vsnprintf(learn_buffer + learn_buffer_len, LEARN_BUFFER_SIZE - learn_buffer_len, fmt, args);
80184+ va_end(args);
80185+
80186+ learn_buffer_len += len + 1;
80187+
80188+ spin_unlock(&gr_learn_lock);
80189+ wake_up_interruptible(&learn_wait);
80190+
80191+ return;
80192+}
80193+
80194+static int
80195+open_learn(struct inode *inode, struct file *file)
80196+{
80197+ if (file->f_mode & FMODE_READ && gr_learn_attached)
80198+ return -EBUSY;
80199+ if (file->f_mode & FMODE_READ) {
80200+ int retval = 0;
80201+ mutex_lock(&gr_learn_user_mutex);
80202+ if (learn_buffer == NULL)
80203+ learn_buffer = vmalloc(LEARN_BUFFER_SIZE);
80204+ if (learn_buffer_user == NULL)
80205+ learn_buffer_user = vmalloc(LEARN_BUFFER_SIZE);
80206+ if (learn_buffer == NULL) {
80207+ retval = -ENOMEM;
80208+ goto out_error;
80209+ }
80210+ if (learn_buffer_user == NULL) {
80211+ retval = -ENOMEM;
80212+ goto out_error;
80213+ }
80214+ learn_buffer_len = 0;
80215+ learn_buffer_user_len = 0;
80216+ gr_learn_attached = 1;
80217+out_error:
80218+ mutex_unlock(&gr_learn_user_mutex);
80219+ return retval;
80220+ }
80221+ return 0;
80222+}
80223+
80224+static int
80225+close_learn(struct inode *inode, struct file *file)
80226+{
80227+ if (file->f_mode & FMODE_READ) {
80228+ char *tmp = NULL;
80229+ mutex_lock(&gr_learn_user_mutex);
80230+ spin_lock(&gr_learn_lock);
80231+ tmp = learn_buffer;
80232+ learn_buffer = NULL;
80233+ spin_unlock(&gr_learn_lock);
80234+ if (tmp)
80235+ vfree(tmp);
80236+ if (learn_buffer_user != NULL) {
80237+ vfree(learn_buffer_user);
80238+ learn_buffer_user = NULL;
80239+ }
80240+ learn_buffer_len = 0;
80241+ learn_buffer_user_len = 0;
80242+ gr_learn_attached = 0;
80243+ mutex_unlock(&gr_learn_user_mutex);
80244+ }
80245+
80246+ return 0;
80247+}
80248+
80249+const struct file_operations grsec_fops = {
80250+ .read = read_learn,
80251+ .write = write_grsec_handler,
80252+ .open = open_learn,
80253+ .release = close_learn,
80254+ .poll = poll_learn,
80255+};
80256diff --git a/grsecurity/gracl_res.c b/grsecurity/gracl_res.c
80257new file mode 100644
80258index 0000000..70b2179
80259--- /dev/null
80260+++ b/grsecurity/gracl_res.c
80261@@ -0,0 +1,67 @@
80262+#include <linux/kernel.h>
80263+#include <linux/sched.h>
80264+#include <linux/gracl.h>
80265+#include <linux/grinternal.h>
80266+
80267+static const char *restab_log[] = {
80268+ [RLIMIT_CPU] = "RLIMIT_CPU",
80269+ [RLIMIT_FSIZE] = "RLIMIT_FSIZE",
80270+ [RLIMIT_DATA] = "RLIMIT_DATA",
80271+ [RLIMIT_STACK] = "RLIMIT_STACK",
80272+ [RLIMIT_CORE] = "RLIMIT_CORE",
80273+ [RLIMIT_RSS] = "RLIMIT_RSS",
80274+ [RLIMIT_NPROC] = "RLIMIT_NPROC",
80275+ [RLIMIT_NOFILE] = "RLIMIT_NOFILE",
80276+ [RLIMIT_MEMLOCK] = "RLIMIT_MEMLOCK",
80277+ [RLIMIT_AS] = "RLIMIT_AS",
80278+ [RLIMIT_LOCKS] = "RLIMIT_LOCKS",
80279+ [RLIMIT_SIGPENDING] = "RLIMIT_SIGPENDING",
80280+ [RLIMIT_MSGQUEUE] = "RLIMIT_MSGQUEUE",
80281+ [RLIMIT_NICE] = "RLIMIT_NICE",
80282+ [RLIMIT_RTPRIO] = "RLIMIT_RTPRIO",
80283+ [RLIMIT_RTTIME] = "RLIMIT_RTTIME",
80284+ [GR_CRASH_RES] = "RLIMIT_CRASH"
80285+};
80286+
80287+void
80288+gr_log_resource(const struct task_struct *task,
80289+ const int res, const unsigned long wanted, const int gt)
80290+{
80291+ const struct cred *cred;
80292+ unsigned long rlim;
80293+
80294+ if (!gr_acl_is_enabled() && !grsec_resource_logging)
80295+ return;
80296+
80297+ // not yet supported resource
80298+ if (unlikely(!restab_log[res]))
80299+ return;
80300+
80301+ if (res == RLIMIT_CPU || res == RLIMIT_RTTIME)
80302+ rlim = task->signal->rlim[res].rlim_max;
80303+ else
80304+ rlim = task->signal->rlim[res].rlim_cur;
80305+ if (likely((rlim == RLIM_INFINITY) || (gt && wanted <= rlim) || (!gt && wanted < rlim)))
80306+ return;
80307+
80308+ rcu_read_lock();
80309+ cred = __task_cred(task);
80310+
80311+ if (res == RLIMIT_NPROC &&
80312+ (cap_raised(cred->cap_effective, CAP_SYS_ADMIN) ||
80313+ cap_raised(cred->cap_effective, CAP_SYS_RESOURCE)))
80314+ goto out_rcu_unlock;
80315+ else if (res == RLIMIT_MEMLOCK &&
80316+ cap_raised(cred->cap_effective, CAP_IPC_LOCK))
80317+ goto out_rcu_unlock;
80318+ else if (res == RLIMIT_NICE && cap_raised(cred->cap_effective, CAP_SYS_NICE))
80319+ goto out_rcu_unlock;
80320+ rcu_read_unlock();
80321+
80322+ gr_log_res_ulong2_str(GR_DONT_AUDIT, GR_RESOURCE_MSG, task, wanted, restab_log[res], rlim);
80323+
80324+ return;
80325+out_rcu_unlock:
80326+ rcu_read_unlock();
80327+ return;
80328+}
80329diff --git a/grsecurity/gracl_segv.c b/grsecurity/gracl_segv.c
80330new file mode 100644
80331index 0000000..1d1b734
80332--- /dev/null
80333+++ b/grsecurity/gracl_segv.c
80334@@ -0,0 +1,284 @@
80335+#include <linux/kernel.h>
80336+#include <linux/mm.h>
80337+#include <asm/uaccess.h>
80338+#include <asm/errno.h>
80339+#include <asm/mman.h>
80340+#include <net/sock.h>
80341+#include <linux/file.h>
80342+#include <linux/fs.h>
80343+#include <linux/net.h>
80344+#include <linux/in.h>
80345+#include <linux/smp_lock.h>
80346+#include <linux/slab.h>
80347+#include <linux/types.h>
80348+#include <linux/sched.h>
80349+#include <linux/timer.h>
80350+#include <linux/gracl.h>
80351+#include <linux/grsecurity.h>
80352+#include <linux/grinternal.h>
80353+
80354+static struct crash_uid *uid_set;
80355+static unsigned short uid_used;
80356+static DEFINE_SPINLOCK(gr_uid_lock);
80357+extern rwlock_t gr_inode_lock;
80358+extern struct acl_subject_label *
80359+ lookup_acl_subj_label(const ino_t inode, const dev_t dev,
80360+ struct acl_role_label *role);
80361+extern int gr_fake_force_sig(int sig, struct task_struct *t);
80362+
80363+int
80364+gr_init_uidset(void)
80365+{
80366+ uid_set =
80367+ kmalloc(GR_UIDTABLE_MAX * sizeof (struct crash_uid), GFP_KERNEL);
80368+ uid_used = 0;
80369+
80370+ return uid_set ? 1 : 0;
80371+}
80372+
80373+void
80374+gr_free_uidset(void)
80375+{
80376+ if (uid_set)
80377+ kfree(uid_set);
80378+
80379+ return;
80380+}
80381+
80382+int
80383+gr_find_uid(const uid_t uid)
80384+{
80385+ struct crash_uid *tmp = uid_set;
80386+ uid_t buid;
80387+ int low = 0, high = uid_used - 1, mid;
80388+
80389+ while (high >= low) {
80390+ mid = (low + high) >> 1;
80391+ buid = tmp[mid].uid;
80392+ if (buid == uid)
80393+ return mid;
80394+ if (buid > uid)
80395+ high = mid - 1;
80396+ if (buid < uid)
80397+ low = mid + 1;
80398+ }
80399+
80400+ return -1;
80401+}
80402+
80403+static __inline__ void
80404+gr_insertsort(void)
80405+{
80406+ unsigned short i, j;
80407+ struct crash_uid index;
80408+
80409+ for (i = 1; i < uid_used; i++) {
80410+ index = uid_set[i];
80411+ j = i;
80412+ while ((j > 0) && uid_set[j - 1].uid > index.uid) {
80413+ uid_set[j] = uid_set[j - 1];
80414+ j--;
80415+ }
80416+ uid_set[j] = index;
80417+ }
80418+
80419+ return;
80420+}
80421+
80422+static __inline__ void
80423+gr_insert_uid(const uid_t uid, const unsigned long expires)
80424+{
80425+ int loc;
80426+
80427+ if (uid_used == GR_UIDTABLE_MAX)
80428+ return;
80429+
80430+ loc = gr_find_uid(uid);
80431+
80432+ if (loc >= 0) {
80433+ uid_set[loc].expires = expires;
80434+ return;
80435+ }
80436+
80437+ uid_set[uid_used].uid = uid;
80438+ uid_set[uid_used].expires = expires;
80439+ uid_used++;
80440+
80441+ gr_insertsort();
80442+
80443+ return;
80444+}
80445+
80446+void
80447+gr_remove_uid(const unsigned short loc)
80448+{
80449+ unsigned short i;
80450+
80451+ for (i = loc + 1; i < uid_used; i++)
80452+ uid_set[i - 1] = uid_set[i];
80453+
80454+ uid_used--;
80455+
80456+ return;
80457+}
80458+
80459+int
80460+gr_check_crash_uid(const uid_t uid)
80461+{
80462+ int loc;
80463+ int ret = 0;
80464+
80465+ if (unlikely(!gr_acl_is_enabled()))
80466+ return 0;
80467+
80468+ spin_lock(&gr_uid_lock);
80469+ loc = gr_find_uid(uid);
80470+
80471+ if (loc < 0)
80472+ goto out_unlock;
80473+
80474+ if (time_before_eq(uid_set[loc].expires, get_seconds()))
80475+ gr_remove_uid(loc);
80476+ else
80477+ ret = 1;
80478+
80479+out_unlock:
80480+ spin_unlock(&gr_uid_lock);
80481+ return ret;
80482+}
80483+
80484+static __inline__ int
80485+proc_is_setxid(const struct cred *cred)
80486+{
80487+ if (cred->uid != cred->euid || cred->uid != cred->suid ||
80488+ cred->uid != cred->fsuid)
80489+ return 1;
80490+ if (cred->gid != cred->egid || cred->gid != cred->sgid ||
80491+ cred->gid != cred->fsgid)
80492+ return 1;
80493+
80494+ return 0;
80495+}
80496+
80497+void
80498+gr_handle_crash(struct task_struct *task, const int sig)
80499+{
80500+ struct acl_subject_label *curr;
80501+ struct task_struct *tsk, *tsk2;
80502+ const struct cred *cred;
80503+ const struct cred *cred2;
80504+
80505+ if (sig != SIGSEGV && sig != SIGKILL && sig != SIGBUS && sig != SIGILL)
80506+ return;
80507+
80508+ if (unlikely(!gr_acl_is_enabled()))
80509+ return;
80510+
80511+ curr = task->acl;
80512+
80513+ if (!(curr->resmask & (1 << GR_CRASH_RES)))
80514+ return;
80515+
80516+ if (time_before_eq(curr->expires, get_seconds())) {
80517+ curr->expires = 0;
80518+ curr->crashes = 0;
80519+ }
80520+
80521+ curr->crashes++;
80522+
80523+ if (!curr->expires)
80524+ curr->expires = get_seconds() + curr->res[GR_CRASH_RES].rlim_max;
80525+
80526+ if ((curr->crashes >= curr->res[GR_CRASH_RES].rlim_cur) &&
80527+ time_after(curr->expires, get_seconds())) {
80528+ rcu_read_lock();
80529+ cred = __task_cred(task);
80530+ if (cred->uid && proc_is_setxid(cred)) {
80531+ gr_log_crash1(GR_DONT_AUDIT, GR_SEGVSTART_ACL_MSG, task, curr->res[GR_CRASH_RES].rlim_max);
80532+ spin_lock(&gr_uid_lock);
80533+ gr_insert_uid(cred->uid, curr->expires);
80534+ spin_unlock(&gr_uid_lock);
80535+ curr->expires = 0;
80536+ curr->crashes = 0;
80537+ read_lock(&tasklist_lock);
80538+ do_each_thread(tsk2, tsk) {
80539+ cred2 = __task_cred(tsk);
80540+ if (tsk != task && cred2->uid == cred->uid)
80541+ gr_fake_force_sig(SIGKILL, tsk);
80542+ } while_each_thread(tsk2, tsk);
80543+ read_unlock(&tasklist_lock);
80544+ } else {
80545+ gr_log_crash2(GR_DONT_AUDIT, GR_SEGVNOSUID_ACL_MSG, task, curr->res[GR_CRASH_RES].rlim_max);
80546+ read_lock(&tasklist_lock);
80547+ read_lock(&grsec_exec_file_lock);
80548+ do_each_thread(tsk2, tsk) {
80549+ if (likely(tsk != task)) {
80550+ // if this thread has the same subject as the one that triggered
80551+ // RES_CRASH and it's the same binary, kill it
80552+ if (tsk->acl == task->acl && tsk->exec_file == task->exec_file)
80553+ gr_fake_force_sig(SIGKILL, tsk);
80554+ }
80555+ } while_each_thread(tsk2, tsk);
80556+ read_unlock(&grsec_exec_file_lock);
80557+ read_unlock(&tasklist_lock);
80558+ }
80559+ rcu_read_unlock();
80560+ }
80561+
80562+ return;
80563+}
80564+
80565+int
80566+gr_check_crash_exec(const struct file *filp)
80567+{
80568+ struct acl_subject_label *curr;
80569+
80570+ if (unlikely(!gr_acl_is_enabled()))
80571+ return 0;
80572+
80573+ read_lock(&gr_inode_lock);
80574+ curr = lookup_acl_subj_label(filp->f_path.dentry->d_inode->i_ino,
80575+ filp->f_path.dentry->d_inode->i_sb->s_dev,
80576+ current->role);
80577+ read_unlock(&gr_inode_lock);
80578+
80579+ if (!curr || !(curr->resmask & (1 << GR_CRASH_RES)) ||
80580+ (!curr->crashes && !curr->expires))
80581+ return 0;
80582+
80583+ if ((curr->crashes >= curr->res[GR_CRASH_RES].rlim_cur) &&
80584+ time_after(curr->expires, get_seconds()))
80585+ return 1;
80586+ else if (time_before_eq(curr->expires, get_seconds())) {
80587+ curr->crashes = 0;
80588+ curr->expires = 0;
80589+ }
80590+
80591+ return 0;
80592+}
80593+
80594+void
80595+gr_handle_alertkill(struct task_struct *task)
80596+{
80597+ struct acl_subject_label *curracl;
80598+ __u32 curr_ip;
80599+ struct task_struct *p, *p2;
80600+
80601+ if (unlikely(!gr_acl_is_enabled()))
80602+ return;
80603+
80604+ curracl = task->acl;
80605+ curr_ip = task->signal->curr_ip;
80606+
80607+ if ((curracl->mode & GR_KILLIPPROC) && curr_ip) {
80608+ read_lock(&tasklist_lock);
80609+ do_each_thread(p2, p) {
80610+ if (p->signal->curr_ip == curr_ip)
80611+ gr_fake_force_sig(SIGKILL, p);
80612+ } while_each_thread(p2, p);
80613+ read_unlock(&tasklist_lock);
80614+ } else if (curracl->mode & GR_KILLPROC)
80615+ gr_fake_force_sig(SIGKILL, task);
80616+
80617+ return;
80618+}
80619diff --git a/grsecurity/gracl_shm.c b/grsecurity/gracl_shm.c
80620new file mode 100644
80621index 0000000..9d83a69
80622--- /dev/null
80623+++ b/grsecurity/gracl_shm.c
80624@@ -0,0 +1,40 @@
80625+#include <linux/kernel.h>
80626+#include <linux/mm.h>
80627+#include <linux/sched.h>
80628+#include <linux/file.h>
80629+#include <linux/ipc.h>
80630+#include <linux/gracl.h>
80631+#include <linux/grsecurity.h>
80632+#include <linux/grinternal.h>
80633+
80634+int
80635+gr_handle_shmat(const pid_t shm_cprid, const pid_t shm_lapid,
80636+ const time_t shm_createtime, const uid_t cuid, const int shmid)
80637+{
80638+ struct task_struct *task;
80639+
80640+ if (!gr_acl_is_enabled())
80641+ return 1;
80642+
80643+ rcu_read_lock();
80644+ read_lock(&tasklist_lock);
80645+
80646+ task = find_task_by_vpid(shm_cprid);
80647+
80648+ if (unlikely(!task))
80649+ task = find_task_by_vpid(shm_lapid);
80650+
80651+ if (unlikely(task && (time_before_eq((unsigned long)task->start_time.tv_sec, (unsigned long)shm_createtime) ||
80652+ (task->pid == shm_lapid)) &&
80653+ (task->acl->mode & GR_PROTSHM) &&
80654+ (task->acl != current->acl))) {
80655+ read_unlock(&tasklist_lock);
80656+ rcu_read_unlock();
80657+ gr_log_int3(GR_DONT_AUDIT, GR_SHMAT_ACL_MSG, cuid, shm_cprid, shmid);
80658+ return 0;
80659+ }
80660+ read_unlock(&tasklist_lock);
80661+ rcu_read_unlock();
80662+
80663+ return 1;
80664+}
80665diff --git a/grsecurity/grsec_chdir.c b/grsecurity/grsec_chdir.c
80666new file mode 100644
80667index 0000000..bc0be01
80668--- /dev/null
80669+++ b/grsecurity/grsec_chdir.c
80670@@ -0,0 +1,19 @@
80671+#include <linux/kernel.h>
80672+#include <linux/sched.h>
80673+#include <linux/fs.h>
80674+#include <linux/file.h>
80675+#include <linux/grsecurity.h>
80676+#include <linux/grinternal.h>
80677+
80678+void
80679+gr_log_chdir(const struct dentry *dentry, const struct vfsmount *mnt)
80680+{
80681+#ifdef CONFIG_GRKERNSEC_AUDIT_CHDIR
80682+ if ((grsec_enable_chdir && grsec_enable_group &&
80683+ in_group_p(grsec_audit_gid)) || (grsec_enable_chdir &&
80684+ !grsec_enable_group)) {
80685+ gr_log_fs_generic(GR_DO_AUDIT, GR_CHDIR_AUDIT_MSG, dentry, mnt);
80686+ }
80687+#endif
80688+ return;
80689+}
80690diff --git a/grsecurity/grsec_chroot.c b/grsecurity/grsec_chroot.c
80691new file mode 100644
80692index 0000000..197bdd5
80693--- /dev/null
80694+++ b/grsecurity/grsec_chroot.c
80695@@ -0,0 +1,386 @@
80696+#include <linux/kernel.h>
80697+#include <linux/module.h>
80698+#include <linux/sched.h>
80699+#include <linux/file.h>
80700+#include <linux/fs.h>
80701+#include <linux/mount.h>
80702+#include <linux/types.h>
80703+#include <linux/pid_namespace.h>
80704+#include <linux/grsecurity.h>
80705+#include <linux/grinternal.h>
80706+
80707+void gr_set_chroot_entries(struct task_struct *task, struct path *path)
80708+{
80709+#ifdef CONFIG_GRKERNSEC
80710+ if (task->pid > 1 && path->dentry != init_task.fs->root.dentry &&
80711+ path->dentry != task->nsproxy->mnt_ns->root->mnt_root)
80712+ task->gr_is_chrooted = 1;
80713+ else
80714+ task->gr_is_chrooted = 0;
80715+
80716+ task->gr_chroot_dentry = path->dentry;
80717+#endif
80718+ return;
80719+}
80720+
80721+void gr_clear_chroot_entries(struct task_struct *task)
80722+{
80723+#ifdef CONFIG_GRKERNSEC
80724+ task->gr_is_chrooted = 0;
80725+ task->gr_chroot_dentry = NULL;
80726+#endif
80727+ return;
80728+}
80729+
80730+int
80731+gr_handle_chroot_unix(const pid_t pid)
80732+{
80733+#ifdef CONFIG_GRKERNSEC_CHROOT_UNIX
80734+ struct task_struct *p;
80735+
80736+ if (unlikely(!grsec_enable_chroot_unix))
80737+ return 1;
80738+
80739+ if (likely(!proc_is_chrooted(current)))
80740+ return 1;
80741+
80742+ rcu_read_lock();
80743+ read_lock(&tasklist_lock);
80744+
80745+ p = find_task_by_vpid_unrestricted(pid);
80746+ if (unlikely(p && !have_same_root(current, p))) {
80747+ read_unlock(&tasklist_lock);
80748+ rcu_read_unlock();
80749+ gr_log_noargs(GR_DONT_AUDIT, GR_UNIX_CHROOT_MSG);
80750+ return 0;
80751+ }
80752+ read_unlock(&tasklist_lock);
80753+ rcu_read_unlock();
80754+#endif
80755+ return 1;
80756+}
80757+
80758+int
80759+gr_handle_chroot_nice(void)
80760+{
80761+#ifdef CONFIG_GRKERNSEC_CHROOT_NICE
80762+ if (grsec_enable_chroot_nice && proc_is_chrooted(current)) {
80763+ gr_log_noargs(GR_DONT_AUDIT, GR_NICE_CHROOT_MSG);
80764+ return -EPERM;
80765+ }
80766+#endif
80767+ return 0;
80768+}
80769+
80770+int
80771+gr_handle_chroot_setpriority(struct task_struct *p, const int niceval)
80772+{
80773+#ifdef CONFIG_GRKERNSEC_CHROOT_NICE
80774+ if (grsec_enable_chroot_nice && (niceval < task_nice(p))
80775+ && proc_is_chrooted(current)) {
80776+ gr_log_str_int(GR_DONT_AUDIT, GR_PRIORITY_CHROOT_MSG, p->comm, p->pid);
80777+ return -EACCES;
80778+ }
80779+#endif
80780+ return 0;
80781+}
80782+
80783+int
80784+gr_handle_chroot_rawio(const struct inode *inode)
80785+{
80786+#ifdef CONFIG_GRKERNSEC_CHROOT_CAPS
80787+ if (grsec_enable_chroot_caps && proc_is_chrooted(current) &&
80788+ inode && S_ISBLK(inode->i_mode) && !capable(CAP_SYS_RAWIO))
80789+ return 1;
80790+#endif
80791+ return 0;
80792+}
80793+
80794+int
80795+gr_handle_chroot_fowner(struct pid *pid, enum pid_type type)
80796+{
80797+#ifdef CONFIG_GRKERNSEC_CHROOT_FINDTASK
80798+ struct task_struct *p;
80799+ int ret = 0;
80800+ if (!grsec_enable_chroot_findtask || !proc_is_chrooted(current) || !pid)
80801+ return ret;
80802+
80803+ read_lock(&tasklist_lock);
80804+ do_each_pid_task(pid, type, p) {
80805+ if (!have_same_root(current, p)) {
80806+ ret = 1;
80807+ goto out;
80808+ }
80809+ } while_each_pid_task(pid, type, p);
80810+out:
80811+ read_unlock(&tasklist_lock);
80812+ return ret;
80813+#endif
80814+ return 0;
80815+}
80816+
80817+int
80818+gr_pid_is_chrooted(struct task_struct *p)
80819+{
80820+#ifdef CONFIG_GRKERNSEC_CHROOT_FINDTASK
80821+ if (!grsec_enable_chroot_findtask || !proc_is_chrooted(current) || p == NULL)
80822+ return 0;
80823+
80824+ if ((p->exit_state & (EXIT_ZOMBIE | EXIT_DEAD)) ||
80825+ !have_same_root(current, p)) {
80826+ return 1;
80827+ }
80828+#endif
80829+ return 0;
80830+}
80831+
80832+EXPORT_SYMBOL(gr_pid_is_chrooted);
80833+
80834+#if defined(CONFIG_GRKERNSEC_CHROOT_DOUBLE) || defined(CONFIG_GRKERNSEC_CHROOT_FCHDIR)
80835+int gr_is_outside_chroot(const struct dentry *u_dentry, const struct vfsmount *u_mnt)
80836+{
80837+ struct dentry *dentry = (struct dentry *)u_dentry;
80838+ struct vfsmount *mnt = (struct vfsmount *)u_mnt;
80839+ struct dentry *realroot;
80840+ struct vfsmount *realrootmnt;
80841+ struct dentry *currentroot;
80842+ struct vfsmount *currentmnt;
80843+ struct task_struct *reaper = &init_task;
80844+ int ret = 1;
80845+
80846+ read_lock(&reaper->fs->lock);
80847+ realrootmnt = mntget(reaper->fs->root.mnt);
80848+ realroot = dget(reaper->fs->root.dentry);
80849+ read_unlock(&reaper->fs->lock);
80850+
80851+ read_lock(&current->fs->lock);
80852+ currentmnt = mntget(current->fs->root.mnt);
80853+ currentroot = dget(current->fs->root.dentry);
80854+ read_unlock(&current->fs->lock);
80855+
80856+ spin_lock(&dcache_lock);
80857+ for (;;) {
80858+ if (unlikely((dentry == realroot && mnt == realrootmnt)
80859+ || (dentry == currentroot && mnt == currentmnt)))
80860+ break;
80861+ if (unlikely(dentry == mnt->mnt_root || IS_ROOT(dentry))) {
80862+ if (mnt->mnt_parent == mnt)
80863+ break;
80864+ dentry = mnt->mnt_mountpoint;
80865+ mnt = mnt->mnt_parent;
80866+ continue;
80867+ }
80868+ dentry = dentry->d_parent;
80869+ }
80870+ spin_unlock(&dcache_lock);
80871+
80872+ dput(currentroot);
80873+ mntput(currentmnt);
80874+
80875+ /* access is outside of chroot */
80876+ if (dentry == realroot && mnt == realrootmnt)
80877+ ret = 0;
80878+
80879+ dput(realroot);
80880+ mntput(realrootmnt);
80881+ return ret;
80882+}
80883+#endif
80884+
80885+int
80886+gr_chroot_fchdir(struct dentry *u_dentry, struct vfsmount *u_mnt)
80887+{
80888+#ifdef CONFIG_GRKERNSEC_CHROOT_FCHDIR
80889+ if (!grsec_enable_chroot_fchdir)
80890+ return 1;
80891+
80892+ if (!proc_is_chrooted(current))
80893+ return 1;
80894+ else if (!gr_is_outside_chroot(u_dentry, u_mnt)) {
80895+ gr_log_fs_generic(GR_DONT_AUDIT, GR_CHROOT_FCHDIR_MSG, u_dentry, u_mnt);
80896+ return 0;
80897+ }
80898+#endif
80899+ return 1;
80900+}
80901+
80902+int
80903+gr_chroot_shmat(const pid_t shm_cprid, const pid_t shm_lapid,
80904+ const time_t shm_createtime)
80905+{
80906+#ifdef CONFIG_GRKERNSEC_CHROOT_SHMAT
80907+ struct task_struct *p;
80908+ time_t starttime;
80909+
80910+ if (unlikely(!grsec_enable_chroot_shmat))
80911+ return 1;
80912+
80913+ if (likely(!proc_is_chrooted(current)))
80914+ return 1;
80915+
80916+ rcu_read_lock();
80917+ read_lock(&tasklist_lock);
80918+
80919+ if ((p = find_task_by_vpid_unrestricted(shm_cprid))) {
80920+ starttime = p->start_time.tv_sec;
80921+ if (time_before_eq((unsigned long)starttime, (unsigned long)shm_createtime)) {
80922+ if (have_same_root(current, p)) {
80923+ goto allow;
80924+ } else {
80925+ read_unlock(&tasklist_lock);
80926+ rcu_read_unlock();
80927+ gr_log_noargs(GR_DONT_AUDIT, GR_SHMAT_CHROOT_MSG);
80928+ return 0;
80929+ }
80930+ }
80931+ /* creator exited, pid reuse, fall through to next check */
80932+ }
80933+ if ((p = find_task_by_vpid_unrestricted(shm_lapid))) {
80934+ if (unlikely(!have_same_root(current, p))) {
80935+ read_unlock(&tasklist_lock);
80936+ rcu_read_unlock();
80937+ gr_log_noargs(GR_DONT_AUDIT, GR_SHMAT_CHROOT_MSG);
80938+ return 0;
80939+ }
80940+ }
80941+
80942+allow:
80943+ read_unlock(&tasklist_lock);
80944+ rcu_read_unlock();
80945+#endif
80946+ return 1;
80947+}
80948+
80949+void
80950+gr_log_chroot_exec(const struct dentry *dentry, const struct vfsmount *mnt)
80951+{
80952+#ifdef CONFIG_GRKERNSEC_CHROOT_EXECLOG
80953+ if (grsec_enable_chroot_execlog && proc_is_chrooted(current))
80954+ gr_log_fs_generic(GR_DO_AUDIT, GR_EXEC_CHROOT_MSG, dentry, mnt);
80955+#endif
80956+ return;
80957+}
80958+
80959+int
80960+gr_handle_chroot_mknod(const struct dentry *dentry,
80961+ const struct vfsmount *mnt, const int mode)
80962+{
80963+#ifdef CONFIG_GRKERNSEC_CHROOT_MKNOD
80964+ if (grsec_enable_chroot_mknod && !S_ISFIFO(mode) && !S_ISREG(mode) &&
80965+ proc_is_chrooted(current)) {
80966+ gr_log_fs_generic(GR_DONT_AUDIT, GR_MKNOD_CHROOT_MSG, dentry, mnt);
80967+ return -EPERM;
80968+ }
80969+#endif
80970+ return 0;
80971+}
80972+
80973+int
80974+gr_handle_chroot_mount(const struct dentry *dentry,
80975+ const struct vfsmount *mnt, const char *dev_name)
80976+{
80977+#ifdef CONFIG_GRKERNSEC_CHROOT_MOUNT
80978+ if (grsec_enable_chroot_mount && proc_is_chrooted(current)) {
80979+ gr_log_str_fs(GR_DONT_AUDIT, GR_MOUNT_CHROOT_MSG, dev_name ? dev_name : "none" , dentry, mnt);
80980+ return -EPERM;
80981+ }
80982+#endif
80983+ return 0;
80984+}
80985+
80986+int
80987+gr_handle_chroot_pivot(void)
80988+{
80989+#ifdef CONFIG_GRKERNSEC_CHROOT_PIVOT
80990+ if (grsec_enable_chroot_pivot && proc_is_chrooted(current)) {
80991+ gr_log_noargs(GR_DONT_AUDIT, GR_PIVOT_CHROOT_MSG);
80992+ return -EPERM;
80993+ }
80994+#endif
80995+ return 0;
80996+}
80997+
80998+int
80999+gr_handle_chroot_chroot(const struct dentry *dentry, const struct vfsmount *mnt)
81000+{
81001+#ifdef CONFIG_GRKERNSEC_CHROOT_DOUBLE
81002+ if (grsec_enable_chroot_double && proc_is_chrooted(current) &&
81003+ !gr_is_outside_chroot(dentry, mnt)) {
81004+ gr_log_fs_generic(GR_DONT_AUDIT, GR_CHROOT_CHROOT_MSG, dentry, mnt);
81005+ return -EPERM;
81006+ }
81007+#endif
81008+ return 0;
81009+}
81010+
81011+extern const char *captab_log[];
81012+extern int captab_log_entries;
81013+
81014+int
81015+gr_chroot_is_capable(const int cap)
81016+{
81017+#ifdef CONFIG_GRKERNSEC_CHROOT_CAPS
81018+ if (grsec_enable_chroot_caps && proc_is_chrooted(current)) {
81019+ kernel_cap_t chroot_caps = GR_CHROOT_CAPS;
81020+ if (cap_raised(chroot_caps, cap)) {
81021+ const struct cred *creds = current_cred();
81022+ if (cap_raised(creds->cap_effective, cap) && cap < captab_log_entries) {
81023+ gr_log_cap(GR_DONT_AUDIT, GR_CAP_CHROOT_MSG, current, captab_log[cap]);
81024+ }
81025+ return 0;
81026+ }
81027+ }
81028+#endif
81029+ return 1;
81030+}
81031+
81032+int
81033+gr_chroot_is_capable_nolog(const int cap)
81034+{
81035+#ifdef CONFIG_GRKERNSEC_CHROOT_CAPS
81036+ if (grsec_enable_chroot_caps && proc_is_chrooted(current)) {
81037+ kernel_cap_t chroot_caps = GR_CHROOT_CAPS;
81038+ if (cap_raised(chroot_caps, cap)) {
81039+ return 0;
81040+ }
81041+ }
81042+#endif
81043+ return 1;
81044+}
81045+
81046+int
81047+gr_handle_chroot_sysctl(const int op)
81048+{
81049+#ifdef CONFIG_GRKERNSEC_CHROOT_SYSCTL
81050+ if (grsec_enable_chroot_sysctl && proc_is_chrooted(current)
81051+ && (op & MAY_WRITE))
81052+ return -EACCES;
81053+#endif
81054+ return 0;
81055+}
81056+
81057+void
81058+gr_handle_chroot_chdir(struct path *path)
81059+{
81060+#ifdef CONFIG_GRKERNSEC_CHROOT_CHDIR
81061+ if (grsec_enable_chroot_chdir)
81062+ set_fs_pwd(current->fs, path);
81063+#endif
81064+ return;
81065+}
81066+
81067+int
81068+gr_handle_chroot_chmod(const struct dentry *dentry,
81069+ const struct vfsmount *mnt, const int mode)
81070+{
81071+#ifdef CONFIG_GRKERNSEC_CHROOT_CHMOD
81072+ /* allow chmod +s on directories, but not on files */
81073+ if (grsec_enable_chroot_chmod && !S_ISDIR(dentry->d_inode->i_mode) &&
81074+ ((mode & S_ISUID) || ((mode & (S_ISGID | S_IXGRP)) == (S_ISGID | S_IXGRP))) &&
81075+ proc_is_chrooted(current)) {
81076+ gr_log_fs_generic(GR_DONT_AUDIT, GR_CHMOD_CHROOT_MSG, dentry, mnt);
81077+ return -EPERM;
81078+ }
81079+#endif
81080+ return 0;
81081+}
81082diff --git a/grsecurity/grsec_disabled.c b/grsecurity/grsec_disabled.c
81083new file mode 100644
81084index 0000000..40545bf
81085--- /dev/null
81086+++ b/grsecurity/grsec_disabled.c
81087@@ -0,0 +1,437 @@
81088+#include <linux/kernel.h>
81089+#include <linux/module.h>
81090+#include <linux/sched.h>
81091+#include <linux/file.h>
81092+#include <linux/fs.h>
81093+#include <linux/kdev_t.h>
81094+#include <linux/net.h>
81095+#include <linux/in.h>
81096+#include <linux/ip.h>
81097+#include <linux/skbuff.h>
81098+#include <linux/sysctl.h>
81099+
81100+#ifdef CONFIG_PAX_HAVE_ACL_FLAGS
81101+void
81102+pax_set_initial_flags(struct linux_binprm *bprm)
81103+{
81104+ return;
81105+}
81106+#endif
81107+
81108+#ifdef CONFIG_SYSCTL
81109+__u32
81110+gr_handle_sysctl(const struct ctl_table * table, const int op)
81111+{
81112+ return 0;
81113+}
81114+#endif
81115+
81116+#ifdef CONFIG_TASKSTATS
81117+int gr_is_taskstats_denied(int pid)
81118+{
81119+ return 0;
81120+}
81121+#endif
81122+
81123+int
81124+gr_acl_is_enabled(void)
81125+{
81126+ return 0;
81127+}
81128+
81129+void
81130+gr_handle_proc_create(const struct dentry *dentry, const struct inode *inode)
81131+{
81132+ return;
81133+}
81134+
81135+int
81136+gr_handle_rawio(const struct inode *inode)
81137+{
81138+ return 0;
81139+}
81140+
81141+void
81142+gr_acl_handle_psacct(struct task_struct *task, const long code)
81143+{
81144+ return;
81145+}
81146+
81147+int
81148+gr_handle_ptrace(struct task_struct *task, const long request)
81149+{
81150+ return 0;
81151+}
81152+
81153+int
81154+gr_handle_proc_ptrace(struct task_struct *task)
81155+{
81156+ return 0;
81157+}
81158+
81159+void
81160+gr_learn_resource(const struct task_struct *task,
81161+ const int res, const unsigned long wanted, const int gt)
81162+{
81163+ return;
81164+}
81165+
81166+int
81167+gr_set_acls(const int type)
81168+{
81169+ return 0;
81170+}
81171+
81172+int
81173+gr_check_hidden_task(const struct task_struct *tsk)
81174+{
81175+ return 0;
81176+}
81177+
81178+int
81179+gr_check_protected_task(const struct task_struct *task)
81180+{
81181+ return 0;
81182+}
81183+
81184+int
81185+gr_check_protected_task_fowner(struct pid *pid, enum pid_type type)
81186+{
81187+ return 0;
81188+}
81189+
81190+void
81191+gr_copy_label(struct task_struct *tsk)
81192+{
81193+ return;
81194+}
81195+
81196+void
81197+gr_set_pax_flags(struct task_struct *task)
81198+{
81199+ return;
81200+}
81201+
81202+int
81203+gr_set_proc_label(const struct dentry *dentry, const struct vfsmount *mnt,
81204+ const int unsafe_share)
81205+{
81206+ return 0;
81207+}
81208+
81209+void
81210+gr_handle_delete(const ino_t ino, const dev_t dev)
81211+{
81212+ return;
81213+}
81214+
81215+void
81216+gr_handle_create(const struct dentry *dentry, const struct vfsmount *mnt)
81217+{
81218+ return;
81219+}
81220+
81221+void
81222+gr_handle_crash(struct task_struct *task, const int sig)
81223+{
81224+ return;
81225+}
81226+
81227+int
81228+gr_check_crash_exec(const struct file *filp)
81229+{
81230+ return 0;
81231+}
81232+
81233+int
81234+gr_check_crash_uid(const uid_t uid)
81235+{
81236+ return 0;
81237+}
81238+
81239+void
81240+gr_handle_rename(struct inode *old_dir, struct inode *new_dir,
81241+ struct dentry *old_dentry,
81242+ struct dentry *new_dentry,
81243+ struct vfsmount *mnt, const __u8 replace)
81244+{
81245+ return;
81246+}
81247+
81248+int
81249+gr_search_socket(const int family, const int type, const int protocol)
81250+{
81251+ return 1;
81252+}
81253+
81254+int
81255+gr_search_connectbind(const int mode, const struct socket *sock,
81256+ const struct sockaddr_in *addr)
81257+{
81258+ return 0;
81259+}
81260+
81261+void
81262+gr_handle_alertkill(struct task_struct *task)
81263+{
81264+ return;
81265+}
81266+
81267+__u32
81268+gr_acl_handle_execve(const struct dentry * dentry, const struct vfsmount * mnt)
81269+{
81270+ return 1;
81271+}
81272+
81273+__u32
81274+gr_acl_handle_hidden_file(const struct dentry * dentry,
81275+ const struct vfsmount * mnt)
81276+{
81277+ return 1;
81278+}
81279+
81280+__u32
81281+gr_acl_handle_open(const struct dentry * dentry, const struct vfsmount * mnt,
81282+ int acc_mode)
81283+{
81284+ return 1;
81285+}
81286+
81287+__u32
81288+gr_acl_handle_rmdir(const struct dentry * dentry, const struct vfsmount * mnt)
81289+{
81290+ return 1;
81291+}
81292+
81293+__u32
81294+gr_acl_handle_unlink(const struct dentry * dentry, const struct vfsmount * mnt)
81295+{
81296+ return 1;
81297+}
81298+
81299+int
81300+gr_acl_handle_mmap(const struct file *file, const unsigned long prot,
81301+ unsigned int *vm_flags)
81302+{
81303+ return 1;
81304+}
81305+
81306+__u32
81307+gr_acl_handle_truncate(const struct dentry * dentry,
81308+ const struct vfsmount * mnt)
81309+{
81310+ return 1;
81311+}
81312+
81313+__u32
81314+gr_acl_handle_utime(const struct dentry * dentry, const struct vfsmount * mnt)
81315+{
81316+ return 1;
81317+}
81318+
81319+__u32
81320+gr_acl_handle_access(const struct dentry * dentry,
81321+ const struct vfsmount * mnt, const int fmode)
81322+{
81323+ return 1;
81324+}
81325+
81326+__u32
81327+gr_acl_handle_chmod(const struct dentry * dentry, const struct vfsmount * mnt,
81328+ umode_t *mode)
81329+{
81330+ return 1;
81331+}
81332+
81333+__u32
81334+gr_acl_handle_chown(const struct dentry * dentry, const struct vfsmount * mnt)
81335+{
81336+ return 1;
81337+}
81338+
81339+__u32
81340+gr_acl_handle_setxattr(const struct dentry * dentry, const struct vfsmount * mnt)
81341+{
81342+ return 1;
81343+}
81344+
81345+void
81346+grsecurity_init(void)
81347+{
81348+ return;
81349+}
81350+
81351+umode_t gr_acl_umask(void)
81352+{
81353+ return 0;
81354+}
81355+
81356+__u32
81357+gr_acl_handle_mknod(const struct dentry * new_dentry,
81358+ const struct dentry * parent_dentry,
81359+ const struct vfsmount * parent_mnt,
81360+ const int mode)
81361+{
81362+ return 1;
81363+}
81364+
81365+__u32
81366+gr_acl_handle_mkdir(const struct dentry * new_dentry,
81367+ const struct dentry * parent_dentry,
81368+ const struct vfsmount * parent_mnt)
81369+{
81370+ return 1;
81371+}
81372+
81373+__u32
81374+gr_acl_handle_symlink(const struct dentry * new_dentry,
81375+ const struct dentry * parent_dentry,
81376+ const struct vfsmount * parent_mnt, const char *from)
81377+{
81378+ return 1;
81379+}
81380+
81381+__u32
81382+gr_acl_handle_link(const struct dentry * new_dentry,
81383+ const struct dentry * parent_dentry,
81384+ const struct vfsmount * parent_mnt,
81385+ const struct dentry * old_dentry,
81386+ const struct vfsmount * old_mnt, const char *to)
81387+{
81388+ return 1;
81389+}
81390+
81391+int
81392+gr_acl_handle_rename(const struct dentry *new_dentry,
81393+ const struct dentry *parent_dentry,
81394+ const struct vfsmount *parent_mnt,
81395+ const struct dentry *old_dentry,
81396+ const struct inode *old_parent_inode,
81397+ const struct vfsmount *old_mnt, const char *newname)
81398+{
81399+ return 0;
81400+}
81401+
81402+int
81403+gr_acl_handle_filldir(const struct file *file, const char *name,
81404+ const int namelen, const ino_t ino)
81405+{
81406+ return 1;
81407+}
81408+
81409+int
81410+gr_handle_shmat(const pid_t shm_cprid, const pid_t shm_lapid,
81411+ const time_t shm_createtime, const uid_t cuid, const int shmid)
81412+{
81413+ return 1;
81414+}
81415+
81416+int
81417+gr_search_bind(const struct socket *sock, const struct sockaddr_in *addr)
81418+{
81419+ return 0;
81420+}
81421+
81422+int
81423+gr_search_accept(const struct socket *sock)
81424+{
81425+ return 0;
81426+}
81427+
81428+int
81429+gr_search_listen(const struct socket *sock)
81430+{
81431+ return 0;
81432+}
81433+
81434+int
81435+gr_search_connect(const struct socket *sock, const struct sockaddr_in *addr)
81436+{
81437+ return 0;
81438+}
81439+
81440+__u32
81441+gr_acl_handle_unix(const struct dentry * dentry, const struct vfsmount * mnt)
81442+{
81443+ return 1;
81444+}
81445+
81446+__u32
81447+gr_acl_handle_creat(const struct dentry * dentry,
81448+ const struct dentry * p_dentry,
81449+ const struct vfsmount * p_mnt, int open_flags, int acc_mode,
81450+ const int imode)
81451+{
81452+ return 1;
81453+}
81454+
81455+void
81456+gr_acl_handle_exit(void)
81457+{
81458+ return;
81459+}
81460+
81461+int
81462+gr_acl_handle_mprotect(const struct file *file, const unsigned long prot)
81463+{
81464+ return 1;
81465+}
81466+
81467+void
81468+gr_set_role_label(const uid_t uid, const gid_t gid)
81469+{
81470+ return;
81471+}
81472+
81473+int
81474+gr_acl_handle_procpidmem(const struct task_struct *task)
81475+{
81476+ return 0;
81477+}
81478+
81479+int
81480+gr_search_udp_recvmsg(const struct sock *sk, const struct sk_buff *skb)
81481+{
81482+ return 0;
81483+}
81484+
81485+int
81486+gr_search_udp_sendmsg(const struct sock *sk, const struct sockaddr_in *addr)
81487+{
81488+ return 0;
81489+}
81490+
81491+void
81492+gr_set_kernel_label(struct task_struct *task)
81493+{
81494+ return;
81495+}
81496+
81497+int
81498+gr_check_user_change(int real, int effective, int fs)
81499+{
81500+ return 0;
81501+}
81502+
81503+int
81504+gr_check_group_change(int real, int effective, int fs)
81505+{
81506+ return 0;
81507+}
81508+
81509+int gr_acl_enable_at_secure(void)
81510+{
81511+ return 0;
81512+}
81513+
81514+dev_t gr_get_dev_from_dentry(struct dentry *dentry)
81515+{
81516+ return dentry->d_inode->i_sb->s_dev;
81517+}
81518+
81519+EXPORT_SYMBOL(gr_learn_resource);
81520+EXPORT_SYMBOL(gr_set_kernel_label);
81521+#ifdef CONFIG_SECURITY
81522+EXPORT_SYMBOL(gr_check_user_change);
81523+EXPORT_SYMBOL(gr_check_group_change);
81524+#endif
81525diff --git a/grsecurity/grsec_exec.c b/grsecurity/grsec_exec.c
81526new file mode 100644
81527index 0000000..a96e155
81528--- /dev/null
81529+++ b/grsecurity/grsec_exec.c
81530@@ -0,0 +1,204 @@
81531+#include <linux/kernel.h>
81532+#include <linux/sched.h>
81533+#include <linux/file.h>
81534+#include <linux/binfmts.h>
81535+#include <linux/smp_lock.h>
81536+#include <linux/fs.h>
81537+#include <linux/types.h>
81538+#include <linux/grdefs.h>
81539+#include <linux/grinternal.h>
81540+#include <linux/capability.h>
81541+#include <linux/compat.h>
81542+#include <linux/module.h>
81543+
81544+#include <asm/uaccess.h>
81545+
81546+#ifdef CONFIG_GRKERNSEC_EXECLOG
81547+static char gr_exec_arg_buf[132];
81548+static DEFINE_MUTEX(gr_exec_arg_mutex);
81549+#endif
81550+
81551+void
81552+gr_handle_exec_args(struct linux_binprm *bprm, const char __user *const __user *argv)
81553+{
81554+#ifdef CONFIG_GRKERNSEC_EXECLOG
81555+ char *grarg = gr_exec_arg_buf;
81556+ unsigned int i, x, execlen = 0;
81557+ char c;
81558+
81559+ if (!((grsec_enable_execlog && grsec_enable_group &&
81560+ in_group_p(grsec_audit_gid))
81561+ || (grsec_enable_execlog && !grsec_enable_group)))
81562+ return;
81563+
81564+ mutex_lock(&gr_exec_arg_mutex);
81565+ memset(grarg, 0, sizeof(gr_exec_arg_buf));
81566+
81567+ if (unlikely(argv == NULL))
81568+ goto log;
81569+
81570+ for (i = 0; i < bprm->argc && execlen < 128; i++) {
81571+ const char __user *p;
81572+ unsigned int len;
81573+
81574+ if (copy_from_user(&p, argv + i, sizeof(p)))
81575+ goto log;
81576+ if (!p)
81577+ goto log;
81578+ len = strnlen_user(p, 128 - execlen);
81579+ if (len > 128 - execlen)
81580+ len = 128 - execlen;
81581+ else if (len > 0)
81582+ len--;
81583+ if (copy_from_user(grarg + execlen, p, len))
81584+ goto log;
81585+
81586+ /* rewrite unprintable characters */
81587+ for (x = 0; x < len; x++) {
81588+ c = *(grarg + execlen + x);
81589+ if (c < 32 || c > 126)
81590+ *(grarg + execlen + x) = ' ';
81591+ }
81592+
81593+ execlen += len;
81594+ *(grarg + execlen) = ' ';
81595+ *(grarg + execlen + 1) = '\0';
81596+ execlen++;
81597+ }
81598+
81599+ log:
81600+ gr_log_fs_str(GR_DO_AUDIT, GR_EXEC_AUDIT_MSG, bprm->file->f_path.dentry,
81601+ bprm->file->f_path.mnt, grarg);
81602+ mutex_unlock(&gr_exec_arg_mutex);
81603+#endif
81604+ return;
81605+}
81606+
81607+#ifdef CONFIG_COMPAT
81608+void
81609+gr_handle_exec_args_compat(struct linux_binprm *bprm, compat_uptr_t __user *argv)
81610+{
81611+#ifdef CONFIG_GRKERNSEC_EXECLOG
81612+ char *grarg = gr_exec_arg_buf;
81613+ unsigned int i, x, execlen = 0;
81614+ char c;
81615+
81616+ if (!((grsec_enable_execlog && grsec_enable_group &&
81617+ in_group_p(grsec_audit_gid))
81618+ || (grsec_enable_execlog && !grsec_enable_group)))
81619+ return;
81620+
81621+ mutex_lock(&gr_exec_arg_mutex);
81622+ memset(grarg, 0, sizeof(gr_exec_arg_buf));
81623+
81624+ if (unlikely(argv == NULL))
81625+ goto log;
81626+
81627+ for (i = 0; i < bprm->argc && execlen < 128; i++) {
81628+ compat_uptr_t p;
81629+ unsigned int len;
81630+
81631+ if (get_user(p, argv + i))
81632+ goto log;
81633+ len = strnlen_user(compat_ptr(p), 128 - execlen);
81634+ if (len > 128 - execlen)
81635+ len = 128 - execlen;
81636+ else if (len > 0)
81637+ len--;
81638+ else
81639+ goto log;
81640+ if (copy_from_user(grarg + execlen, compat_ptr(p), len))
81641+ goto log;
81642+
81643+ /* rewrite unprintable characters */
81644+ for (x = 0; x < len; x++) {
81645+ c = *(grarg + execlen + x);
81646+ if (c < 32 || c > 126)
81647+ *(grarg + execlen + x) = ' ';
81648+ }
81649+
81650+ execlen += len;
81651+ *(grarg + execlen) = ' ';
81652+ *(grarg + execlen + 1) = '\0';
81653+ execlen++;
81654+ }
81655+
81656+ log:
81657+ gr_log_fs_str(GR_DO_AUDIT, GR_EXEC_AUDIT_MSG, bprm->file->f_path.dentry,
81658+ bprm->file->f_path.mnt, grarg);
81659+ mutex_unlock(&gr_exec_arg_mutex);
81660+#endif
81661+ return;
81662+}
81663+#endif
81664+
81665+#ifdef CONFIG_GRKERNSEC
81666+extern int gr_acl_is_capable(const int cap);
81667+extern int gr_acl_is_capable_nolog(const int cap);
81668+extern int gr_chroot_is_capable(const int cap);
81669+extern int gr_chroot_is_capable_nolog(const int cap);
81670+#endif
81671+
81672+const char *captab_log[] = {
81673+ "CAP_CHOWN",
81674+ "CAP_DAC_OVERRIDE",
81675+ "CAP_DAC_READ_SEARCH",
81676+ "CAP_FOWNER",
81677+ "CAP_FSETID",
81678+ "CAP_KILL",
81679+ "CAP_SETGID",
81680+ "CAP_SETUID",
81681+ "CAP_SETPCAP",
81682+ "CAP_LINUX_IMMUTABLE",
81683+ "CAP_NET_BIND_SERVICE",
81684+ "CAP_NET_BROADCAST",
81685+ "CAP_NET_ADMIN",
81686+ "CAP_NET_RAW",
81687+ "CAP_IPC_LOCK",
81688+ "CAP_IPC_OWNER",
81689+ "CAP_SYS_MODULE",
81690+ "CAP_SYS_RAWIO",
81691+ "CAP_SYS_CHROOT",
81692+ "CAP_SYS_PTRACE",
81693+ "CAP_SYS_PACCT",
81694+ "CAP_SYS_ADMIN",
81695+ "CAP_SYS_BOOT",
81696+ "CAP_SYS_NICE",
81697+ "CAP_SYS_RESOURCE",
81698+ "CAP_SYS_TIME",
81699+ "CAP_SYS_TTY_CONFIG",
81700+ "CAP_MKNOD",
81701+ "CAP_LEASE",
81702+ "CAP_AUDIT_WRITE",
81703+ "CAP_AUDIT_CONTROL",
81704+ "CAP_SETFCAP",
81705+ "CAP_MAC_OVERRIDE",
81706+ "CAP_MAC_ADMIN"
81707+};
81708+
81709+int captab_log_entries = sizeof(captab_log)/sizeof(captab_log[0]);
81710+
81711+int gr_is_capable(const int cap)
81712+{
81713+#ifdef CONFIG_GRKERNSEC
81714+ if (gr_acl_is_capable(cap) && gr_chroot_is_capable(cap))
81715+ return 1;
81716+ return 0;
81717+#else
81718+ return 1;
81719+#endif
81720+}
81721+
81722+int gr_is_capable_nolog(const int cap)
81723+{
81724+#ifdef CONFIG_GRKERNSEC
81725+ if (gr_acl_is_capable_nolog(cap) && gr_chroot_is_capable_nolog(cap))
81726+ return 1;
81727+ return 0;
81728+#else
81729+ return 1;
81730+#endif
81731+}
81732+
81733+EXPORT_SYMBOL(gr_is_capable);
81734+EXPORT_SYMBOL(gr_is_capable_nolog);
81735diff --git a/grsecurity/grsec_fifo.c b/grsecurity/grsec_fifo.c
81736new file mode 100644
81737index 0000000..d3ee748
81738--- /dev/null
81739+++ b/grsecurity/grsec_fifo.c
81740@@ -0,0 +1,24 @@
81741+#include <linux/kernel.h>
81742+#include <linux/sched.h>
81743+#include <linux/fs.h>
81744+#include <linux/file.h>
81745+#include <linux/grinternal.h>
81746+
81747+int
81748+gr_handle_fifo(const struct dentry *dentry, const struct vfsmount *mnt,
81749+ const struct dentry *dir, const int flag, const int acc_mode)
81750+{
81751+#ifdef CONFIG_GRKERNSEC_FIFO
81752+ const struct cred *cred = current_cred();
81753+
81754+ if (grsec_enable_fifo && S_ISFIFO(dentry->d_inode->i_mode) &&
81755+ !(flag & O_EXCL) && (dir->d_inode->i_mode & S_ISVTX) &&
81756+ (dentry->d_inode->i_uid != dir->d_inode->i_uid) &&
81757+ (cred->fsuid != dentry->d_inode->i_uid)) {
81758+ if (!inode_permission(dentry->d_inode, acc_mode))
81759+ gr_log_fs_int2(GR_DONT_AUDIT, GR_FIFO_MSG, dentry, mnt, dentry->d_inode->i_uid, dentry->d_inode->i_gid);
81760+ return -EACCES;
81761+ }
81762+#endif
81763+ return 0;
81764+}
81765diff --git a/grsecurity/grsec_fork.c b/grsecurity/grsec_fork.c
81766new file mode 100644
81767index 0000000..8ca18bf
81768--- /dev/null
81769+++ b/grsecurity/grsec_fork.c
81770@@ -0,0 +1,23 @@
81771+#include <linux/kernel.h>
81772+#include <linux/sched.h>
81773+#include <linux/grsecurity.h>
81774+#include <linux/grinternal.h>
81775+#include <linux/errno.h>
81776+
81777+void
81778+gr_log_forkfail(const int retval)
81779+{
81780+#ifdef CONFIG_GRKERNSEC_FORKFAIL
81781+ if (grsec_enable_forkfail && (retval == -EAGAIN || retval == -ENOMEM)) {
81782+ switch (retval) {
81783+ case -EAGAIN:
81784+ gr_log_str(GR_DONT_AUDIT, GR_FAILFORK_MSG, "EAGAIN");
81785+ break;
81786+ case -ENOMEM:
81787+ gr_log_str(GR_DONT_AUDIT, GR_FAILFORK_MSG, "ENOMEM");
81788+ break;
81789+ }
81790+ }
81791+#endif
81792+ return;
81793+}
81794diff --git a/grsecurity/grsec_init.c b/grsecurity/grsec_init.c
81795new file mode 100644
81796index 0000000..1e995d3
81797--- /dev/null
81798+++ b/grsecurity/grsec_init.c
81799@@ -0,0 +1,278 @@
81800+#include <linux/kernel.h>
81801+#include <linux/sched.h>
81802+#include <linux/mm.h>
81803+#include <linux/smp_lock.h>
81804+#include <linux/gracl.h>
81805+#include <linux/slab.h>
81806+#include <linux/vmalloc.h>
81807+#include <linux/percpu.h>
81808+#include <linux/module.h>
81809+
81810+int grsec_enable_ptrace_readexec;
81811+int grsec_enable_setxid;
81812+int grsec_enable_brute;
81813+int grsec_enable_link;
81814+int grsec_enable_dmesg;
81815+int grsec_enable_harden_ptrace;
81816+int grsec_enable_fifo;
81817+int grsec_enable_execlog;
81818+int grsec_enable_signal;
81819+int grsec_enable_forkfail;
81820+int grsec_enable_audit_ptrace;
81821+int grsec_enable_time;
81822+int grsec_enable_audit_textrel;
81823+int grsec_enable_group;
81824+int grsec_audit_gid;
81825+int grsec_enable_chdir;
81826+int grsec_enable_mount;
81827+int grsec_enable_rofs;
81828+int grsec_enable_chroot_findtask;
81829+int grsec_enable_chroot_mount;
81830+int grsec_enable_chroot_shmat;
81831+int grsec_enable_chroot_fchdir;
81832+int grsec_enable_chroot_double;
81833+int grsec_enable_chroot_pivot;
81834+int grsec_enable_chroot_chdir;
81835+int grsec_enable_chroot_chmod;
81836+int grsec_enable_chroot_mknod;
81837+int grsec_enable_chroot_nice;
81838+int grsec_enable_chroot_execlog;
81839+int grsec_enable_chroot_caps;
81840+int grsec_enable_chroot_sysctl;
81841+int grsec_enable_chroot_unix;
81842+int grsec_enable_tpe;
81843+int grsec_tpe_gid;
81844+int grsec_enable_blackhole;
81845+#ifdef CONFIG_IPV6_MODULE
81846+EXPORT_SYMBOL(grsec_enable_blackhole);
81847+#endif
81848+int grsec_lastack_retries;
81849+int grsec_enable_tpe_all;
81850+int grsec_enable_tpe_invert;
81851+int grsec_enable_socket_all;
81852+int grsec_socket_all_gid;
81853+int grsec_enable_socket_client;
81854+int grsec_socket_client_gid;
81855+int grsec_enable_socket_server;
81856+int grsec_socket_server_gid;
81857+int grsec_resource_logging;
81858+int grsec_disable_privio;
81859+int grsec_enable_log_rwxmaps;
81860+int grsec_lock;
81861+
81862+DEFINE_SPINLOCK(grsec_alert_lock);
81863+unsigned long grsec_alert_wtime = 0;
81864+unsigned long grsec_alert_fyet = 0;
81865+
81866+DEFINE_SPINLOCK(grsec_audit_lock);
81867+
81868+DEFINE_RWLOCK(grsec_exec_file_lock);
81869+
81870+char *gr_shared_page[4];
81871+
81872+char *gr_alert_log_fmt;
81873+char *gr_audit_log_fmt;
81874+char *gr_alert_log_buf;
81875+char *gr_audit_log_buf;
81876+
81877+extern struct gr_arg *gr_usermode;
81878+extern unsigned char *gr_system_salt;
81879+extern unsigned char *gr_system_sum;
81880+
81881+void __init
81882+grsecurity_init(void)
81883+{
81884+ int j;
81885+ /* create the per-cpu shared pages */
81886+
81887+#ifdef CONFIG_X86
81888+ memset((char *)(0x41a + PAGE_OFFSET), 0, 36);
81889+#endif
81890+
81891+ for (j = 0; j < 4; j++) {
81892+ gr_shared_page[j] = (char *)__alloc_percpu(PAGE_SIZE, __alignof__(unsigned long long));
81893+ if (gr_shared_page[j] == NULL) {
81894+ panic("Unable to allocate grsecurity shared page");
81895+ return;
81896+ }
81897+ }
81898+
81899+ /* allocate log buffers */
81900+ gr_alert_log_fmt = kmalloc(512, GFP_KERNEL);
81901+ if (!gr_alert_log_fmt) {
81902+ panic("Unable to allocate grsecurity alert log format buffer");
81903+ return;
81904+ }
81905+ gr_audit_log_fmt = kmalloc(512, GFP_KERNEL);
81906+ if (!gr_audit_log_fmt) {
81907+ panic("Unable to allocate grsecurity audit log format buffer");
81908+ return;
81909+ }
81910+ gr_alert_log_buf = (char *) get_zeroed_page(GFP_KERNEL);
81911+ if (!gr_alert_log_buf) {
81912+ panic("Unable to allocate grsecurity alert log buffer");
81913+ return;
81914+ }
81915+ gr_audit_log_buf = (char *) get_zeroed_page(GFP_KERNEL);
81916+ if (!gr_audit_log_buf) {
81917+ panic("Unable to allocate grsecurity audit log buffer");
81918+ return;
81919+ }
81920+
81921+ /* allocate memory for authentication structure */
81922+ gr_usermode = kmalloc(sizeof(struct gr_arg), GFP_KERNEL);
81923+ gr_system_salt = kmalloc(GR_SALT_LEN, GFP_KERNEL);
81924+ gr_system_sum = kmalloc(GR_SHA_LEN, GFP_KERNEL);
81925+
81926+ if (!gr_usermode || !gr_system_salt || !gr_system_sum) {
81927+ panic("Unable to allocate grsecurity authentication structure");
81928+ return;
81929+ }
81930+
81931+
81932+#ifdef CONFIG_GRKERNSEC_IO
81933+#if !defined(CONFIG_GRKERNSEC_SYSCTL_DISTRO)
81934+ grsec_disable_privio = 1;
81935+#elif defined(CONFIG_GRKERNSEC_SYSCTL_ON)
81936+ grsec_disable_privio = 1;
81937+#else
81938+ grsec_disable_privio = 0;
81939+#endif
81940+#endif
81941+
81942+#ifdef CONFIG_GRKERNSEC_TPE_INVERT
81943+ /* for backward compatibility, tpe_invert always defaults to on if
81944+ enabled in the kernel
81945+ */
81946+ grsec_enable_tpe_invert = 1;
81947+#endif
81948+
81949+#if !defined(CONFIG_GRKERNSEC_SYSCTL) || defined(CONFIG_GRKERNSEC_SYSCTL_ON)
81950+#ifndef CONFIG_GRKERNSEC_SYSCTL
81951+ grsec_lock = 1;
81952+#endif
81953+
81954+#ifdef CONFIG_GRKERNSEC_AUDIT_TEXTREL
81955+ grsec_enable_audit_textrel = 1;
81956+#endif
81957+#ifdef CONFIG_GRKERNSEC_RWXMAP_LOG
81958+ grsec_enable_log_rwxmaps = 1;
81959+#endif
81960+#ifdef CONFIG_GRKERNSEC_AUDIT_GROUP
81961+ grsec_enable_group = 1;
81962+ grsec_audit_gid = CONFIG_GRKERNSEC_AUDIT_GID;
81963+#endif
81964+#ifdef CONFIG_GRKERNSEC_AUDIT_CHDIR
81965+ grsec_enable_chdir = 1;
81966+#endif
81967+#ifdef CONFIG_GRKERNSEC_HARDEN_PTRACE
81968+ grsec_enable_harden_ptrace = 1;
81969+#endif
81970+#ifdef CONFIG_GRKERNSEC_AUDIT_MOUNT
81971+ grsec_enable_mount = 1;
81972+#endif
81973+#ifdef CONFIG_GRKERNSEC_LINK
81974+ grsec_enable_link = 1;
81975+#endif
81976+#ifdef CONFIG_GRKERNSEC_BRUTE
81977+ grsec_enable_brute = 1;
81978+#endif
81979+#ifdef CONFIG_GRKERNSEC_DMESG
81980+ grsec_enable_dmesg = 1;
81981+#endif
81982+#ifdef CONFIG_GRKERNSEC_BLACKHOLE
81983+ grsec_enable_blackhole = 1;
81984+ grsec_lastack_retries = 4;
81985+#endif
81986+#ifdef CONFIG_GRKERNSEC_FIFO
81987+ grsec_enable_fifo = 1;
81988+#endif
81989+#ifdef CONFIG_GRKERNSEC_EXECLOG
81990+ grsec_enable_execlog = 1;
81991+#endif
81992+#ifdef CONFIG_GRKERNSEC_SETXID
81993+ grsec_enable_setxid = 1;
81994+#endif
81995+#ifdef CONFIG_GRKERNSEC_PTRACE_READEXEC
81996+ grsec_enable_ptrace_readexec = 1;
81997+#endif
81998+#ifdef CONFIG_GRKERNSEC_SIGNAL
81999+ grsec_enable_signal = 1;
82000+#endif
82001+#ifdef CONFIG_GRKERNSEC_FORKFAIL
82002+ grsec_enable_forkfail = 1;
82003+#endif
82004+#ifdef CONFIG_GRKERNSEC_TIME
82005+ grsec_enable_time = 1;
82006+#endif
82007+#ifdef CONFIG_GRKERNSEC_RESLOG
82008+ grsec_resource_logging = 1;
82009+#endif
82010+#ifdef CONFIG_GRKERNSEC_CHROOT_FINDTASK
82011+ grsec_enable_chroot_findtask = 1;
82012+#endif
82013+#ifdef CONFIG_GRKERNSEC_CHROOT_UNIX
82014+ grsec_enable_chroot_unix = 1;
82015+#endif
82016+#ifdef CONFIG_GRKERNSEC_CHROOT_MOUNT
82017+ grsec_enable_chroot_mount = 1;
82018+#endif
82019+#ifdef CONFIG_GRKERNSEC_CHROOT_FCHDIR
82020+ grsec_enable_chroot_fchdir = 1;
82021+#endif
82022+#ifdef CONFIG_GRKERNSEC_CHROOT_SHMAT
82023+ grsec_enable_chroot_shmat = 1;
82024+#endif
82025+#ifdef CONFIG_GRKERNSEC_AUDIT_PTRACE
82026+ grsec_enable_audit_ptrace = 1;
82027+#endif
82028+#ifdef CONFIG_GRKERNSEC_CHROOT_DOUBLE
82029+ grsec_enable_chroot_double = 1;
82030+#endif
82031+#ifdef CONFIG_GRKERNSEC_CHROOT_PIVOT
82032+ grsec_enable_chroot_pivot = 1;
82033+#endif
82034+#ifdef CONFIG_GRKERNSEC_CHROOT_CHDIR
82035+ grsec_enable_chroot_chdir = 1;
82036+#endif
82037+#ifdef CONFIG_GRKERNSEC_CHROOT_CHMOD
82038+ grsec_enable_chroot_chmod = 1;
82039+#endif
82040+#ifdef CONFIG_GRKERNSEC_CHROOT_MKNOD
82041+ grsec_enable_chroot_mknod = 1;
82042+#endif
82043+#ifdef CONFIG_GRKERNSEC_CHROOT_NICE
82044+ grsec_enable_chroot_nice = 1;
82045+#endif
82046+#ifdef CONFIG_GRKERNSEC_CHROOT_EXECLOG
82047+ grsec_enable_chroot_execlog = 1;
82048+#endif
82049+#ifdef CONFIG_GRKERNSEC_CHROOT_CAPS
82050+ grsec_enable_chroot_caps = 1;
82051+#endif
82052+#ifdef CONFIG_GRKERNSEC_CHROOT_SYSCTL
82053+ grsec_enable_chroot_sysctl = 1;
82054+#endif
82055+#ifdef CONFIG_GRKERNSEC_TPE
82056+ grsec_enable_tpe = 1;
82057+ grsec_tpe_gid = CONFIG_GRKERNSEC_TPE_GID;
82058+#ifdef CONFIG_GRKERNSEC_TPE_ALL
82059+ grsec_enable_tpe_all = 1;
82060+#endif
82061+#endif
82062+#ifdef CONFIG_GRKERNSEC_SOCKET_ALL
82063+ grsec_enable_socket_all = 1;
82064+ grsec_socket_all_gid = CONFIG_GRKERNSEC_SOCKET_ALL_GID;
82065+#endif
82066+#ifdef CONFIG_GRKERNSEC_SOCKET_CLIENT
82067+ grsec_enable_socket_client = 1;
82068+ grsec_socket_client_gid = CONFIG_GRKERNSEC_SOCKET_CLIENT_GID;
82069+#endif
82070+#ifdef CONFIG_GRKERNSEC_SOCKET_SERVER
82071+ grsec_enable_socket_server = 1;
82072+ grsec_socket_server_gid = CONFIG_GRKERNSEC_SOCKET_SERVER_GID;
82073+#endif
82074+#endif
82075+
82076+ return;
82077+}
82078diff --git a/grsecurity/grsec_link.c b/grsecurity/grsec_link.c
82079new file mode 100644
82080index 0000000..3efe141
82081--- /dev/null
82082+++ b/grsecurity/grsec_link.c
82083@@ -0,0 +1,43 @@
82084+#include <linux/kernel.h>
82085+#include <linux/sched.h>
82086+#include <linux/fs.h>
82087+#include <linux/file.h>
82088+#include <linux/grinternal.h>
82089+
82090+int
82091+gr_handle_follow_link(const struct inode *parent,
82092+ const struct inode *inode,
82093+ const struct dentry *dentry, const struct vfsmount *mnt)
82094+{
82095+#ifdef CONFIG_GRKERNSEC_LINK
82096+ const struct cred *cred = current_cred();
82097+
82098+ if (grsec_enable_link && S_ISLNK(inode->i_mode) &&
82099+ (parent->i_mode & S_ISVTX) && (parent->i_uid != inode->i_uid) &&
82100+ (parent->i_mode & S_IWOTH) && (cred->fsuid != inode->i_uid)) {
82101+ gr_log_fs_int2(GR_DONT_AUDIT, GR_SYMLINK_MSG, dentry, mnt, inode->i_uid, inode->i_gid);
82102+ return -EACCES;
82103+ }
82104+#endif
82105+ return 0;
82106+}
82107+
82108+int
82109+gr_handle_hardlink(const struct dentry *dentry,
82110+ const struct vfsmount *mnt,
82111+ struct inode *inode, const int mode, const char *to)
82112+{
82113+#ifdef CONFIG_GRKERNSEC_LINK
82114+ const struct cred *cred = current_cred();
82115+
82116+ if (grsec_enable_link && cred->fsuid != inode->i_uid &&
82117+ (!S_ISREG(mode) || (mode & S_ISUID) ||
82118+ ((mode & (S_ISGID | S_IXGRP)) == (S_ISGID | S_IXGRP)) ||
82119+ (inode_permission(inode, MAY_READ | MAY_WRITE))) &&
82120+ !capable(CAP_FOWNER) && cred->uid) {
82121+ gr_log_fs_int2_str(GR_DONT_AUDIT, GR_HARDLINK_MSG, dentry, mnt, inode->i_uid, inode->i_gid, to);
82122+ return -EPERM;
82123+ }
82124+#endif
82125+ return 0;
82126+}
82127diff --git a/grsecurity/grsec_log.c b/grsecurity/grsec_log.c
82128new file mode 100644
82129index 0000000..a45d2e9
82130--- /dev/null
82131+++ b/grsecurity/grsec_log.c
82132@@ -0,0 +1,322 @@
82133+#include <linux/kernel.h>
82134+#include <linux/sched.h>
82135+#include <linux/file.h>
82136+#include <linux/tty.h>
82137+#include <linux/fs.h>
82138+#include <linux/grinternal.h>
82139+
82140+#ifdef CONFIG_TREE_PREEMPT_RCU
82141+#define DISABLE_PREEMPT() preempt_disable()
82142+#define ENABLE_PREEMPT() preempt_enable()
82143+#else
82144+#define DISABLE_PREEMPT()
82145+#define ENABLE_PREEMPT()
82146+#endif
82147+
82148+#define BEGIN_LOCKS(x) \
82149+ DISABLE_PREEMPT(); \
82150+ rcu_read_lock(); \
82151+ read_lock(&tasklist_lock); \
82152+ read_lock(&grsec_exec_file_lock); \
82153+ if (x != GR_DO_AUDIT) \
82154+ spin_lock(&grsec_alert_lock); \
82155+ else \
82156+ spin_lock(&grsec_audit_lock)
82157+
82158+#define END_LOCKS(x) \
82159+ if (x != GR_DO_AUDIT) \
82160+ spin_unlock(&grsec_alert_lock); \
82161+ else \
82162+ spin_unlock(&grsec_audit_lock); \
82163+ read_unlock(&grsec_exec_file_lock); \
82164+ read_unlock(&tasklist_lock); \
82165+ rcu_read_unlock(); \
82166+ ENABLE_PREEMPT(); \
82167+ if (x == GR_DONT_AUDIT) \
82168+ gr_handle_alertkill(current)
82169+
82170+enum {
82171+ FLOODING,
82172+ NO_FLOODING
82173+};
82174+
82175+extern char *gr_alert_log_fmt;
82176+extern char *gr_audit_log_fmt;
82177+extern char *gr_alert_log_buf;
82178+extern char *gr_audit_log_buf;
82179+
82180+static int gr_log_start(int audit)
82181+{
82182+ char *loglevel = (audit == GR_DO_AUDIT) ? KERN_INFO : KERN_ALERT;
82183+ char *fmt = (audit == GR_DO_AUDIT) ? gr_audit_log_fmt : gr_alert_log_fmt;
82184+ char *buf = (audit == GR_DO_AUDIT) ? gr_audit_log_buf : gr_alert_log_buf;
82185+#if (CONFIG_GRKERNSEC_FLOODTIME > 0 && CONFIG_GRKERNSEC_FLOODBURST > 0)
82186+ unsigned long curr_secs = get_seconds();
82187+
82188+ if (audit == GR_DO_AUDIT)
82189+ goto set_fmt;
82190+
82191+ if (!grsec_alert_wtime || time_after(curr_secs, grsec_alert_wtime + CONFIG_GRKERNSEC_FLOODTIME)) {
82192+ grsec_alert_wtime = curr_secs;
82193+ grsec_alert_fyet = 0;
82194+ } else if (time_before_eq(curr_secs, grsec_alert_wtime + CONFIG_GRKERNSEC_FLOODTIME)
82195+ && (grsec_alert_fyet < CONFIG_GRKERNSEC_FLOODBURST)) {
82196+ grsec_alert_fyet++;
82197+ } else if (grsec_alert_fyet == CONFIG_GRKERNSEC_FLOODBURST) {
82198+ grsec_alert_wtime = curr_secs;
82199+ grsec_alert_fyet++;
82200+ printk(KERN_ALERT "grsec: more alerts, logging disabled for %d seconds\n", CONFIG_GRKERNSEC_FLOODTIME);
82201+ return FLOODING;
82202+ }
82203+ else return FLOODING;
82204+
82205+set_fmt:
82206+#endif
82207+ memset(buf, 0, PAGE_SIZE);
82208+ if (current->signal->curr_ip && gr_acl_is_enabled()) {
82209+ sprintf(fmt, "%s%s", loglevel, "grsec: From %pI4: (%.64s:%c:%.950s) ");
82210+ snprintf(buf, PAGE_SIZE - 1, fmt, &current->signal->curr_ip, current->role->rolename, gr_roletype_to_char(), current->acl->filename);
82211+ } else if (current->signal->curr_ip) {
82212+ sprintf(fmt, "%s%s", loglevel, "grsec: From %pI4: ");
82213+ snprintf(buf, PAGE_SIZE - 1, fmt, &current->signal->curr_ip);
82214+ } else if (gr_acl_is_enabled()) {
82215+ sprintf(fmt, "%s%s", loglevel, "grsec: (%.64s:%c:%.950s) ");
82216+ snprintf(buf, PAGE_SIZE - 1, fmt, current->role->rolename, gr_roletype_to_char(), current->acl->filename);
82217+ } else {
82218+ sprintf(fmt, "%s%s", loglevel, "grsec: ");
82219+ strcpy(buf, fmt);
82220+ }
82221+
82222+ return NO_FLOODING;
82223+}
82224+
82225+static void gr_log_middle(int audit, const char *msg, va_list ap)
82226+ __attribute__ ((format (printf, 2, 0)));
82227+
82228+static void gr_log_middle(int audit, const char *msg, va_list ap)
82229+{
82230+ char *buf = (audit == GR_DO_AUDIT) ? gr_audit_log_buf : gr_alert_log_buf;
82231+ unsigned int len = strlen(buf);
82232+
82233+ vsnprintf(buf + len, PAGE_SIZE - len - 1, msg, ap);
82234+
82235+ return;
82236+}
82237+
82238+static void gr_log_middle_varargs(int audit, const char *msg, ...)
82239+ __attribute__ ((format (printf, 2, 3)));
82240+
82241+static void gr_log_middle_varargs(int audit, const char *msg, ...)
82242+{
82243+ char *buf = (audit == GR_DO_AUDIT) ? gr_audit_log_buf : gr_alert_log_buf;
82244+ unsigned int len = strlen(buf);
82245+ va_list ap;
82246+
82247+ va_start(ap, msg);
82248+ vsnprintf(buf + len, PAGE_SIZE - len - 1, msg, ap);
82249+ va_end(ap);
82250+
82251+ return;
82252+}
82253+
82254+static void gr_log_end(int audit, int append_default)
82255+{
82256+ char *buf = (audit == GR_DO_AUDIT) ? gr_audit_log_buf : gr_alert_log_buf;
82257+
82258+ if (append_default) {
82259+ unsigned int len = strlen(buf);
82260+ snprintf(buf + len, PAGE_SIZE - len - 1, DEFAULTSECMSG, DEFAULTSECARGS(current, current_cred(), __task_cred(current->real_parent)));
82261+ }
82262+
82263+ printk("%s\n", buf);
82264+
82265+ return;
82266+}
82267+
82268+void gr_log_varargs(int audit, const char *msg, int argtypes, ...)
82269+{
82270+ int logtype;
82271+ char *result = (audit == GR_DO_AUDIT) ? "successful" : "denied";
82272+ char *str1 = NULL, *str2 = NULL, *str3 = NULL;
82273+ void *voidptr = NULL;
82274+ int num1 = 0, num2 = 0;
82275+ unsigned long ulong1 = 0, ulong2 = 0;
82276+ struct dentry *dentry = NULL;
82277+ struct vfsmount *mnt = NULL;
82278+ struct file *file = NULL;
82279+ struct task_struct *task = NULL;
82280+ const struct cred *cred, *pcred;
82281+ va_list ap;
82282+
82283+ BEGIN_LOCKS(audit);
82284+ logtype = gr_log_start(audit);
82285+ if (logtype == FLOODING) {
82286+ END_LOCKS(audit);
82287+ return;
82288+ }
82289+ va_start(ap, argtypes);
82290+ switch (argtypes) {
82291+ case GR_TTYSNIFF:
82292+ task = va_arg(ap, struct task_struct *);
82293+ gr_log_middle_varargs(audit, msg, &task->signal->curr_ip, gr_task_fullpath0(task), task->comm, task->pid, gr_parent_task_fullpath0(task), task->real_parent->comm, task->real_parent->pid);
82294+ break;
82295+ case GR_SYSCTL_HIDDEN:
82296+ str1 = va_arg(ap, char *);
82297+ gr_log_middle_varargs(audit, msg, result, str1);
82298+ break;
82299+ case GR_RBAC:
82300+ dentry = va_arg(ap, struct dentry *);
82301+ mnt = va_arg(ap, struct vfsmount *);
82302+ gr_log_middle_varargs(audit, msg, result, gr_to_filename(dentry, mnt));
82303+ break;
82304+ case GR_RBAC_STR:
82305+ dentry = va_arg(ap, struct dentry *);
82306+ mnt = va_arg(ap, struct vfsmount *);
82307+ str1 = va_arg(ap, char *);
82308+ gr_log_middle_varargs(audit, msg, result, gr_to_filename(dentry, mnt), str1);
82309+ break;
82310+ case GR_STR_RBAC:
82311+ str1 = va_arg(ap, char *);
82312+ dentry = va_arg(ap, struct dentry *);
82313+ mnt = va_arg(ap, struct vfsmount *);
82314+ gr_log_middle_varargs(audit, msg, result, str1, gr_to_filename(dentry, mnt));
82315+ break;
82316+ case GR_RBAC_MODE2:
82317+ dentry = va_arg(ap, struct dentry *);
82318+ mnt = va_arg(ap, struct vfsmount *);
82319+ str1 = va_arg(ap, char *);
82320+ str2 = va_arg(ap, char *);
82321+ gr_log_middle_varargs(audit, msg, result, gr_to_filename(dentry, mnt), str1, str2);
82322+ break;
82323+ case GR_RBAC_MODE3:
82324+ dentry = va_arg(ap, struct dentry *);
82325+ mnt = va_arg(ap, struct vfsmount *);
82326+ str1 = va_arg(ap, char *);
82327+ str2 = va_arg(ap, char *);
82328+ str3 = va_arg(ap, char *);
82329+ gr_log_middle_varargs(audit, msg, result, gr_to_filename(dentry, mnt), str1, str2, str3);
82330+ break;
82331+ case GR_FILENAME:
82332+ dentry = va_arg(ap, struct dentry *);
82333+ mnt = va_arg(ap, struct vfsmount *);
82334+ gr_log_middle_varargs(audit, msg, gr_to_filename(dentry, mnt));
82335+ break;
82336+ case GR_STR_FILENAME:
82337+ str1 = va_arg(ap, char *);
82338+ dentry = va_arg(ap, struct dentry *);
82339+ mnt = va_arg(ap, struct vfsmount *);
82340+ gr_log_middle_varargs(audit, msg, str1, gr_to_filename(dentry, mnt));
82341+ break;
82342+ case GR_FILENAME_STR:
82343+ dentry = va_arg(ap, struct dentry *);
82344+ mnt = va_arg(ap, struct vfsmount *);
82345+ str1 = va_arg(ap, char *);
82346+ gr_log_middle_varargs(audit, msg, gr_to_filename(dentry, mnt), str1);
82347+ break;
82348+ case GR_FILENAME_TWO_INT:
82349+ dentry = va_arg(ap, struct dentry *);
82350+ mnt = va_arg(ap, struct vfsmount *);
82351+ num1 = va_arg(ap, int);
82352+ num2 = va_arg(ap, int);
82353+ gr_log_middle_varargs(audit, msg, gr_to_filename(dentry, mnt), num1, num2);
82354+ break;
82355+ case GR_FILENAME_TWO_INT_STR:
82356+ dentry = va_arg(ap, struct dentry *);
82357+ mnt = va_arg(ap, struct vfsmount *);
82358+ num1 = va_arg(ap, int);
82359+ num2 = va_arg(ap, int);
82360+ str1 = va_arg(ap, char *);
82361+ gr_log_middle_varargs(audit, msg, gr_to_filename(dentry, mnt), num1, num2, str1);
82362+ break;
82363+ case GR_TEXTREL:
82364+ file = va_arg(ap, struct file *);
82365+ ulong1 = va_arg(ap, unsigned long);
82366+ ulong2 = va_arg(ap, unsigned long);
82367+ gr_log_middle_varargs(audit, msg, file ? gr_to_filename(file->f_path.dentry, file->f_path.mnt) : "<anonymous mapping>", ulong1, ulong2);
82368+ break;
82369+ case GR_PTRACE:
82370+ task = va_arg(ap, struct task_struct *);
82371+ gr_log_middle_varargs(audit, msg, task->exec_file ? gr_to_filename(task->exec_file->f_path.dentry, task->exec_file->f_path.mnt) : "(none)", task->comm, task->pid);
82372+ break;
82373+ case GR_RESOURCE:
82374+ task = va_arg(ap, struct task_struct *);
82375+ cred = __task_cred(task);
82376+ pcred = __task_cred(task->real_parent);
82377+ ulong1 = va_arg(ap, unsigned long);
82378+ str1 = va_arg(ap, char *);
82379+ ulong2 = va_arg(ap, unsigned long);
82380+ gr_log_middle_varargs(audit, msg, ulong1, str1, ulong2, gr_task_fullpath(task), task->comm, task->pid, cred->uid, cred->euid, cred->gid, cred->egid, gr_parent_task_fullpath(task), task->real_parent->comm, task->real_parent->pid, pcred->uid, pcred->euid, pcred->gid, pcred->egid);
82381+ break;
82382+ case GR_CAP:
82383+ task = va_arg(ap, struct task_struct *);
82384+ cred = __task_cred(task);
82385+ pcred = __task_cred(task->real_parent);
82386+ str1 = va_arg(ap, char *);
82387+ gr_log_middle_varargs(audit, msg, str1, gr_task_fullpath(task), task->comm, task->pid, cred->uid, cred->euid, cred->gid, cred->egid, gr_parent_task_fullpath(task), task->real_parent->comm, task->real_parent->pid, pcred->uid, pcred->euid, pcred->gid, pcred->egid);
82388+ break;
82389+ case GR_SIG:
82390+ str1 = va_arg(ap, char *);
82391+ voidptr = va_arg(ap, void *);
82392+ gr_log_middle_varargs(audit, msg, str1, voidptr);
82393+ break;
82394+ case GR_SIG2:
82395+ task = va_arg(ap, struct task_struct *);
82396+ cred = __task_cred(task);
82397+ pcred = __task_cred(task->real_parent);
82398+ num1 = va_arg(ap, int);
82399+ gr_log_middle_varargs(audit, msg, num1, gr_task_fullpath0(task), task->comm, task->pid, cred->uid, cred->euid, cred->gid, cred->egid, gr_parent_task_fullpath0(task), task->real_parent->comm, task->real_parent->pid, pcred->uid, pcred->euid, pcred->gid, pcred->egid);
82400+ break;
82401+ case GR_CRASH1:
82402+ task = va_arg(ap, struct task_struct *);
82403+ cred = __task_cred(task);
82404+ pcred = __task_cred(task->real_parent);
82405+ ulong1 = va_arg(ap, unsigned long);
82406+ gr_log_middle_varargs(audit, msg, gr_task_fullpath(task), task->comm, task->pid, cred->uid, cred->euid, cred->gid, cred->egid, gr_parent_task_fullpath(task), task->real_parent->comm, task->real_parent->pid, pcred->uid, pcred->euid, pcred->gid, pcred->egid, cred->uid, ulong1);
82407+ break;
82408+ case GR_CRASH2:
82409+ task = va_arg(ap, struct task_struct *);
82410+ cred = __task_cred(task);
82411+ pcred = __task_cred(task->real_parent);
82412+ ulong1 = va_arg(ap, unsigned long);
82413+ gr_log_middle_varargs(audit, msg, gr_task_fullpath(task), task->comm, task->pid, cred->uid, cred->euid, cred->gid, cred->egid, gr_parent_task_fullpath(task), task->real_parent->comm, task->real_parent->pid, pcred->uid, pcred->euid, pcred->gid, pcred->egid, ulong1);
82414+ break;
82415+ case GR_RWXMAP:
82416+ file = va_arg(ap, struct file *);
82417+ gr_log_middle_varargs(audit, msg, file ? gr_to_filename(file->f_path.dentry, file->f_path.mnt) : "<anonymous mapping>");
82418+ break;
82419+ case GR_PSACCT:
82420+ {
82421+ unsigned int wday, cday;
82422+ __u8 whr, chr;
82423+ __u8 wmin, cmin;
82424+ __u8 wsec, csec;
82425+ char cur_tty[64] = { 0 };
82426+ char parent_tty[64] = { 0 };
82427+
82428+ task = va_arg(ap, struct task_struct *);
82429+ wday = va_arg(ap, unsigned int);
82430+ cday = va_arg(ap, unsigned int);
82431+ whr = va_arg(ap, int);
82432+ chr = va_arg(ap, int);
82433+ wmin = va_arg(ap, int);
82434+ cmin = va_arg(ap, int);
82435+ wsec = va_arg(ap, int);
82436+ csec = va_arg(ap, int);
82437+ ulong1 = va_arg(ap, unsigned long);
82438+ cred = __task_cred(task);
82439+ pcred = __task_cred(task->real_parent);
82440+
82441+ gr_log_middle_varargs(audit, msg, gr_task_fullpath(task), task->comm, task->pid, &task->signal->curr_ip, tty_name(task->signal->tty, cur_tty), cred->uid, cred->euid, cred->gid, cred->egid, wday, whr, wmin, wsec, cday, chr, cmin, csec, (task->flags & PF_SIGNALED) ? "killed by signal" : "exited", ulong1, gr_parent_task_fullpath(task), task->real_parent->comm, task->real_parent->pid, &task->real_parent->signal->curr_ip, tty_name(task->real_parent->signal->tty, parent_tty), pcred->uid, pcred->euid, pcred->gid, pcred->egid);
82442+ }
82443+ break;
82444+ default:
82445+ gr_log_middle(audit, msg, ap);
82446+ }
82447+ va_end(ap);
82448+ // these don't need DEFAULTSECARGS printed on the end
82449+ if (argtypes == GR_CRASH1 || argtypes == GR_CRASH2)
82450+ gr_log_end(audit, 0);
82451+ else
82452+ gr_log_end(audit, 1);
82453+ END_LOCKS(audit);
82454+}
82455diff --git a/grsecurity/grsec_mem.c b/grsecurity/grsec_mem.c
82456new file mode 100644
82457index 0000000..f536303
82458--- /dev/null
82459+++ b/grsecurity/grsec_mem.c
82460@@ -0,0 +1,40 @@
82461+#include <linux/kernel.h>
82462+#include <linux/sched.h>
82463+#include <linux/mm.h>
82464+#include <linux/mman.h>
82465+#include <linux/grinternal.h>
82466+
82467+void
82468+gr_handle_ioperm(void)
82469+{
82470+ gr_log_noargs(GR_DONT_AUDIT, GR_IOPERM_MSG);
82471+ return;
82472+}
82473+
82474+void
82475+gr_handle_iopl(void)
82476+{
82477+ gr_log_noargs(GR_DONT_AUDIT, GR_IOPL_MSG);
82478+ return;
82479+}
82480+
82481+void
82482+gr_handle_mem_readwrite(u64 from, u64 to)
82483+{
82484+ gr_log_two_u64(GR_DONT_AUDIT, GR_MEM_READWRITE_MSG, from, to);
82485+ return;
82486+}
82487+
82488+void
82489+gr_handle_vm86(void)
82490+{
82491+ gr_log_noargs(GR_DONT_AUDIT, GR_VM86_MSG);
82492+ return;
82493+}
82494+
82495+void
82496+gr_log_badprocpid(const char *entry)
82497+{
82498+ gr_log_str(GR_DONT_AUDIT, GR_BADPROCPID_MSG, entry);
82499+ return;
82500+}
82501diff --git a/grsecurity/grsec_mount.c b/grsecurity/grsec_mount.c
82502new file mode 100644
82503index 0000000..2131422
82504--- /dev/null
82505+++ b/grsecurity/grsec_mount.c
82506@@ -0,0 +1,62 @@
82507+#include <linux/kernel.h>
82508+#include <linux/sched.h>
82509+#include <linux/mount.h>
82510+#include <linux/grsecurity.h>
82511+#include <linux/grinternal.h>
82512+
82513+void
82514+gr_log_remount(const char *devname, const int retval)
82515+{
82516+#ifdef CONFIG_GRKERNSEC_AUDIT_MOUNT
82517+ if (grsec_enable_mount && (retval >= 0))
82518+ gr_log_str(GR_DO_AUDIT, GR_REMOUNT_AUDIT_MSG, devname ? devname : "none");
82519+#endif
82520+ return;
82521+}
82522+
82523+void
82524+gr_log_unmount(const char *devname, const int retval)
82525+{
82526+#ifdef CONFIG_GRKERNSEC_AUDIT_MOUNT
82527+ if (grsec_enable_mount && (retval >= 0))
82528+ gr_log_str(GR_DO_AUDIT, GR_UNMOUNT_AUDIT_MSG, devname ? devname : "none");
82529+#endif
82530+ return;
82531+}
82532+
82533+void
82534+gr_log_mount(const char *from, const char *to, const int retval)
82535+{
82536+#ifdef CONFIG_GRKERNSEC_AUDIT_MOUNT
82537+ if (grsec_enable_mount && (retval >= 0))
82538+ gr_log_str_str(GR_DO_AUDIT, GR_MOUNT_AUDIT_MSG, from ? from : "none", to);
82539+#endif
82540+ return;
82541+}
82542+
82543+int
82544+gr_handle_rofs_mount(struct dentry *dentry, struct vfsmount *mnt, int mnt_flags)
82545+{
82546+#ifdef CONFIG_GRKERNSEC_ROFS
82547+ if (grsec_enable_rofs && !(mnt_flags & MNT_READONLY)) {
82548+ gr_log_fs_generic(GR_DO_AUDIT, GR_ROFS_MOUNT_MSG, dentry, mnt);
82549+ return -EPERM;
82550+ } else
82551+ return 0;
82552+#endif
82553+ return 0;
82554+}
82555+
82556+int
82557+gr_handle_rofs_blockwrite(struct dentry *dentry, struct vfsmount *mnt, int acc_mode)
82558+{
82559+#ifdef CONFIG_GRKERNSEC_ROFS
82560+ if (grsec_enable_rofs && (acc_mode & MAY_WRITE) &&
82561+ dentry->d_inode && S_ISBLK(dentry->d_inode->i_mode)) {
82562+ gr_log_fs_generic(GR_DO_AUDIT, GR_ROFS_BLOCKWRITE_MSG, dentry, mnt);
82563+ return -EPERM;
82564+ } else
82565+ return 0;
82566+#endif
82567+ return 0;
82568+}
82569diff --git a/grsecurity/grsec_pax.c b/grsecurity/grsec_pax.c
82570new file mode 100644
82571index 0000000..a3b12a0
82572--- /dev/null
82573+++ b/grsecurity/grsec_pax.c
82574@@ -0,0 +1,36 @@
82575+#include <linux/kernel.h>
82576+#include <linux/sched.h>
82577+#include <linux/mm.h>
82578+#include <linux/file.h>
82579+#include <linux/grinternal.h>
82580+#include <linux/grsecurity.h>
82581+
82582+void
82583+gr_log_textrel(struct vm_area_struct * vma)
82584+{
82585+#ifdef CONFIG_GRKERNSEC_AUDIT_TEXTREL
82586+ if (grsec_enable_audit_textrel)
82587+ gr_log_textrel_ulong_ulong(GR_DO_AUDIT, GR_TEXTREL_AUDIT_MSG, vma->vm_file, vma->vm_start, vma->vm_pgoff);
82588+#endif
82589+ return;
82590+}
82591+
82592+void
82593+gr_log_rwxmmap(struct file *file)
82594+{
82595+#ifdef CONFIG_GRKERNSEC_RWXMAP_LOG
82596+ if (grsec_enable_log_rwxmaps)
82597+ gr_log_rwxmap(GR_DONT_AUDIT, GR_RWXMMAP_MSG, file);
82598+#endif
82599+ return;
82600+}
82601+
82602+void
82603+gr_log_rwxmprotect(struct file *file)
82604+{
82605+#ifdef CONFIG_GRKERNSEC_RWXMAP_LOG
82606+ if (grsec_enable_log_rwxmaps)
82607+ gr_log_rwxmap(GR_DONT_AUDIT, GR_RWXMPROTECT_MSG, file);
82608+#endif
82609+ return;
82610+}
82611diff --git a/grsecurity/grsec_ptrace.c b/grsecurity/grsec_ptrace.c
82612new file mode 100644
82613index 0000000..78f8733
82614--- /dev/null
82615+++ b/grsecurity/grsec_ptrace.c
82616@@ -0,0 +1,30 @@
82617+#include <linux/kernel.h>
82618+#include <linux/sched.h>
82619+#include <linux/grinternal.h>
82620+#include <linux/security.h>
82621+
82622+void
82623+gr_audit_ptrace(struct task_struct *task)
82624+{
82625+#ifdef CONFIG_GRKERNSEC_AUDIT_PTRACE
82626+ if (grsec_enable_audit_ptrace)
82627+ gr_log_ptrace(GR_DO_AUDIT, GR_PTRACE_AUDIT_MSG, task);
82628+#endif
82629+ return;
82630+}
82631+
82632+int
82633+gr_ptrace_readexec(struct file *file, int unsafe_flags)
82634+{
82635+#ifdef CONFIG_GRKERNSEC_PTRACE_READEXEC
82636+ const struct dentry *dentry = file->f_path.dentry;
82637+ const struct vfsmount *mnt = file->f_path.mnt;
82638+
82639+ if (grsec_enable_ptrace_readexec && (unsafe_flags & LSM_UNSAFE_PTRACE) &&
82640+ (inode_permission(dentry->d_inode, MAY_READ) || !gr_acl_handle_open(dentry, mnt, MAY_READ))) {
82641+ gr_log_fs_generic(GR_DONT_AUDIT, GR_PTRACE_READEXEC_MSG, dentry, mnt);
82642+ return -EACCES;
82643+ }
82644+#endif
82645+ return 0;
82646+}
82647diff --git a/grsecurity/grsec_sig.c b/grsecurity/grsec_sig.c
82648new file mode 100644
82649index 0000000..c648492
82650--- /dev/null
82651+++ b/grsecurity/grsec_sig.c
82652@@ -0,0 +1,206 @@
82653+#include <linux/kernel.h>
82654+#include <linux/sched.h>
82655+#include <linux/delay.h>
82656+#include <linux/grsecurity.h>
82657+#include <linux/grinternal.h>
82658+#include <linux/hardirq.h>
82659+
82660+char *signames[] = {
82661+ [SIGSEGV] = "Segmentation fault",
82662+ [SIGILL] = "Illegal instruction",
82663+ [SIGABRT] = "Abort",
82664+ [SIGBUS] = "Invalid alignment/Bus error"
82665+};
82666+
82667+void
82668+gr_log_signal(const int sig, const void *addr, const struct task_struct *t)
82669+{
82670+#ifdef CONFIG_GRKERNSEC_SIGNAL
82671+ if (grsec_enable_signal && ((sig == SIGSEGV) || (sig == SIGILL) ||
82672+ (sig == SIGABRT) || (sig == SIGBUS))) {
82673+ if (t->pid == current->pid) {
82674+ gr_log_sig_addr(GR_DONT_AUDIT_GOOD, GR_UNISIGLOG_MSG, signames[sig], addr);
82675+ } else {
82676+ gr_log_sig_task(GR_DONT_AUDIT_GOOD, GR_DUALSIGLOG_MSG, t, sig);
82677+ }
82678+ }
82679+#endif
82680+ return;
82681+}
82682+
82683+int
82684+gr_handle_signal(const struct task_struct *p, const int sig)
82685+{
82686+#ifdef CONFIG_GRKERNSEC
82687+ /* ignore the 0 signal for protected task checks */
82688+ if (current->pid > 1 && sig && gr_check_protected_task(p)) {
82689+ gr_log_sig_task(GR_DONT_AUDIT, GR_SIG_ACL_MSG, p, sig);
82690+ return -EPERM;
82691+ } else if (gr_pid_is_chrooted((struct task_struct *)p)) {
82692+ return -EPERM;
82693+ }
82694+#endif
82695+ return 0;
82696+}
82697+
82698+#ifdef CONFIG_GRKERNSEC
82699+extern int specific_send_sig_info(int sig, struct siginfo *info, struct task_struct *t);
82700+
82701+int gr_fake_force_sig(int sig, struct task_struct *t)
82702+{
82703+ unsigned long int flags;
82704+ int ret, blocked, ignored;
82705+ struct k_sigaction *action;
82706+
82707+ spin_lock_irqsave(&t->sighand->siglock, flags);
82708+ action = &t->sighand->action[sig-1];
82709+ ignored = action->sa.sa_handler == SIG_IGN;
82710+ blocked = sigismember(&t->blocked, sig);
82711+ if (blocked || ignored) {
82712+ action->sa.sa_handler = SIG_DFL;
82713+ if (blocked) {
82714+ sigdelset(&t->blocked, sig);
82715+ recalc_sigpending_and_wake(t);
82716+ }
82717+ }
82718+ if (action->sa.sa_handler == SIG_DFL)
82719+ t->signal->flags &= ~SIGNAL_UNKILLABLE;
82720+ ret = specific_send_sig_info(sig, SEND_SIG_PRIV, t);
82721+
82722+ spin_unlock_irqrestore(&t->sighand->siglock, flags);
82723+
82724+ return ret;
82725+}
82726+#endif
82727+
82728+#ifdef CONFIG_GRKERNSEC_BRUTE
82729+#define GR_USER_BAN_TIME (15 * 60)
82730+
82731+static int __get_dumpable(unsigned long mm_flags)
82732+{
82733+ int ret;
82734+
82735+ ret = mm_flags & MMF_DUMPABLE_MASK;
82736+ return (ret >= 2) ? 2 : ret;
82737+}
82738+#endif
82739+
82740+void gr_handle_brute_attach(struct task_struct *p, unsigned long mm_flags)
82741+{
82742+#ifdef CONFIG_GRKERNSEC_BRUTE
82743+ uid_t uid = 0;
82744+
82745+ if (!grsec_enable_brute)
82746+ return;
82747+
82748+ rcu_read_lock();
82749+ read_lock(&tasklist_lock);
82750+ read_lock(&grsec_exec_file_lock);
82751+ if (p->real_parent && p->real_parent->exec_file == p->exec_file)
82752+ p->real_parent->brute = 1;
82753+ else {
82754+ const struct cred *cred = __task_cred(p), *cred2;
82755+ struct task_struct *tsk, *tsk2;
82756+
82757+ if (!__get_dumpable(mm_flags) && cred->uid) {
82758+ struct user_struct *user;
82759+
82760+ uid = cred->uid;
82761+
82762+ /* this is put upon execution past expiration */
82763+ user = find_user(uid);
82764+ if (user == NULL)
82765+ goto unlock;
82766+ user->banned = 1;
82767+ user->ban_expires = get_seconds() + GR_USER_BAN_TIME;
82768+ if (user->ban_expires == ~0UL)
82769+ user->ban_expires--;
82770+
82771+ do_each_thread(tsk2, tsk) {
82772+ cred2 = __task_cred(tsk);
82773+ if (tsk != p && cred2->uid == uid)
82774+ gr_fake_force_sig(SIGKILL, tsk);
82775+ } while_each_thread(tsk2, tsk);
82776+ }
82777+ }
82778+unlock:
82779+ read_unlock(&grsec_exec_file_lock);
82780+ read_unlock(&tasklist_lock);
82781+ rcu_read_unlock();
82782+
82783+ if (uid)
82784+ printk(KERN_ALERT "grsec: bruteforce prevention initiated against uid %u, banning for %d minutes\n", uid, GR_USER_BAN_TIME / 60);
82785+#endif
82786+ return;
82787+}
82788+
82789+void gr_handle_brute_check(void)
82790+{
82791+#ifdef CONFIG_GRKERNSEC_BRUTE
82792+ if (current->brute)
82793+ msleep(30 * 1000);
82794+#endif
82795+ return;
82796+}
82797+
82798+void gr_handle_kernel_exploit(void)
82799+{
82800+#ifdef CONFIG_GRKERNSEC_KERN_LOCKOUT
82801+ const struct cred *cred;
82802+ struct task_struct *tsk, *tsk2;
82803+ struct user_struct *user;
82804+ uid_t uid;
82805+
82806+ if (in_irq() || in_serving_softirq() || in_nmi())
82807+ panic("grsec: halting the system due to suspicious kernel crash caused in interrupt context");
82808+
82809+ uid = current_uid();
82810+
82811+ if (uid == 0)
82812+ panic("grsec: halting the system due to suspicious kernel crash caused by root");
82813+ else {
82814+ /* kill all the processes of this user, hold a reference
82815+ to their creds struct, and prevent them from creating
82816+ another process until system reset
82817+ */
82818+ printk(KERN_ALERT "grsec: banning user with uid %u until system restart for suspicious kernel crash\n", uid);
82819+ /* we intentionally leak this ref */
82820+ user = get_uid(current->cred->user);
82821+ if (user) {
82822+ user->banned = 1;
82823+ user->ban_expires = ~0UL;
82824+ }
82825+
82826+ read_lock(&tasklist_lock);
82827+ do_each_thread(tsk2, tsk) {
82828+ cred = __task_cred(tsk);
82829+ if (cred->uid == uid)
82830+ gr_fake_force_sig(SIGKILL, tsk);
82831+ } while_each_thread(tsk2, tsk);
82832+ read_unlock(&tasklist_lock);
82833+ }
82834+#endif
82835+}
82836+
82837+int __gr_process_user_ban(struct user_struct *user)
82838+{
82839+#if defined(CONFIG_GRKERNSEC_KERN_LOCKOUT) || defined(CONFIG_GRKERNSEC_BRUTE)
82840+ if (unlikely(user->banned)) {
82841+ if (user->ban_expires != ~0UL && time_after_eq(get_seconds(), user->ban_expires)) {
82842+ user->banned = 0;
82843+ user->ban_expires = 0;
82844+ free_uid(user);
82845+ } else
82846+ return -EPERM;
82847+ }
82848+#endif
82849+ return 0;
82850+}
82851+
82852+int gr_process_user_ban(void)
82853+{
82854+#if defined(CONFIG_GRKERNSEC_KERN_LOCKOUT) || defined(CONFIG_GRKERNSEC_BRUTE)
82855+ return __gr_process_user_ban(current->cred->user);
82856+#endif
82857+ return 0;
82858+}
82859diff --git a/grsecurity/grsec_sock.c b/grsecurity/grsec_sock.c
82860new file mode 100644
82861index 0000000..7512ea9
82862--- /dev/null
82863+++ b/grsecurity/grsec_sock.c
82864@@ -0,0 +1,275 @@
82865+#include <linux/kernel.h>
82866+#include <linux/module.h>
82867+#include <linux/sched.h>
82868+#include <linux/file.h>
82869+#include <linux/net.h>
82870+#include <linux/in.h>
82871+#include <linux/ip.h>
82872+#include <net/sock.h>
82873+#include <net/inet_sock.h>
82874+#include <linux/grsecurity.h>
82875+#include <linux/grinternal.h>
82876+#include <linux/gracl.h>
82877+
82878+kernel_cap_t gr_cap_rtnetlink(struct sock *sock);
82879+EXPORT_SYMBOL(gr_cap_rtnetlink);
82880+
82881+extern int gr_search_udp_recvmsg(const struct sock *sk, const struct sk_buff *skb);
82882+extern int gr_search_udp_sendmsg(const struct sock *sk, const struct sockaddr_in *addr);
82883+
82884+EXPORT_SYMBOL(gr_search_udp_recvmsg);
82885+EXPORT_SYMBOL(gr_search_udp_sendmsg);
82886+
82887+#ifdef CONFIG_UNIX_MODULE
82888+EXPORT_SYMBOL(gr_acl_handle_unix);
82889+EXPORT_SYMBOL(gr_acl_handle_mknod);
82890+EXPORT_SYMBOL(gr_handle_chroot_unix);
82891+EXPORT_SYMBOL(gr_handle_create);
82892+#endif
82893+
82894+#ifdef CONFIG_GRKERNSEC
82895+#define gr_conn_table_size 32749
82896+struct conn_table_entry {
82897+ struct conn_table_entry *next;
82898+ struct signal_struct *sig;
82899+};
82900+
82901+struct conn_table_entry *gr_conn_table[gr_conn_table_size];
82902+DEFINE_SPINLOCK(gr_conn_table_lock);
82903+
82904+extern const char * gr_socktype_to_name(unsigned char type);
82905+extern const char * gr_proto_to_name(unsigned char proto);
82906+extern const char * gr_sockfamily_to_name(unsigned char family);
82907+
82908+static __inline__ int
82909+conn_hash(__u32 saddr, __u32 daddr, __u16 sport, __u16 dport, unsigned int size)
82910+{
82911+ return ((daddr + saddr + (sport << 8) + (dport << 16)) % size);
82912+}
82913+
82914+static __inline__ int
82915+conn_match(const struct signal_struct *sig, __u32 saddr, __u32 daddr,
82916+ __u16 sport, __u16 dport)
82917+{
82918+ if (unlikely(sig->gr_saddr == saddr && sig->gr_daddr == daddr &&
82919+ sig->gr_sport == sport && sig->gr_dport == dport))
82920+ return 1;
82921+ else
82922+ return 0;
82923+}
82924+
82925+static void gr_add_to_task_ip_table_nolock(struct signal_struct *sig, struct conn_table_entry *newent)
82926+{
82927+ struct conn_table_entry **match;
82928+ unsigned int index;
82929+
82930+ index = conn_hash(sig->gr_saddr, sig->gr_daddr,
82931+ sig->gr_sport, sig->gr_dport,
82932+ gr_conn_table_size);
82933+
82934+ newent->sig = sig;
82935+
82936+ match = &gr_conn_table[index];
82937+ newent->next = *match;
82938+ *match = newent;
82939+
82940+ return;
82941+}
82942+
82943+static void gr_del_task_from_ip_table_nolock(struct signal_struct *sig)
82944+{
82945+ struct conn_table_entry *match, *last = NULL;
82946+ unsigned int index;
82947+
82948+ index = conn_hash(sig->gr_saddr, sig->gr_daddr,
82949+ sig->gr_sport, sig->gr_dport,
82950+ gr_conn_table_size);
82951+
82952+ match = gr_conn_table[index];
82953+ while (match && !conn_match(match->sig,
82954+ sig->gr_saddr, sig->gr_daddr, sig->gr_sport,
82955+ sig->gr_dport)) {
82956+ last = match;
82957+ match = match->next;
82958+ }
82959+
82960+ if (match) {
82961+ if (last)
82962+ last->next = match->next;
82963+ else
82964+ gr_conn_table[index] = NULL;
82965+ kfree(match);
82966+ }
82967+
82968+ return;
82969+}
82970+
82971+static struct signal_struct * gr_lookup_task_ip_table(__u32 saddr, __u32 daddr,
82972+ __u16 sport, __u16 dport)
82973+{
82974+ struct conn_table_entry *match;
82975+ unsigned int index;
82976+
82977+ index = conn_hash(saddr, daddr, sport, dport, gr_conn_table_size);
82978+
82979+ match = gr_conn_table[index];
82980+ while (match && !conn_match(match->sig, saddr, daddr, sport, dport))
82981+ match = match->next;
82982+
82983+ if (match)
82984+ return match->sig;
82985+ else
82986+ return NULL;
82987+}
82988+
82989+#endif
82990+
82991+void gr_update_task_in_ip_table(struct task_struct *task, const struct inet_sock *inet)
82992+{
82993+#ifdef CONFIG_GRKERNSEC
82994+ struct signal_struct *sig = task->signal;
82995+ struct conn_table_entry *newent;
82996+
82997+ newent = kmalloc(sizeof(struct conn_table_entry), GFP_ATOMIC);
82998+ if (newent == NULL)
82999+ return;
83000+ /* no bh lock needed since we are called with bh disabled */
83001+ spin_lock(&gr_conn_table_lock);
83002+ gr_del_task_from_ip_table_nolock(sig);
83003+ sig->gr_saddr = inet->rcv_saddr;
83004+ sig->gr_daddr = inet->daddr;
83005+ sig->gr_sport = inet->sport;
83006+ sig->gr_dport = inet->dport;
83007+ gr_add_to_task_ip_table_nolock(sig, newent);
83008+ spin_unlock(&gr_conn_table_lock);
83009+#endif
83010+ return;
83011+}
83012+
83013+void gr_del_task_from_ip_table(struct task_struct *task)
83014+{
83015+#ifdef CONFIG_GRKERNSEC
83016+ spin_lock_bh(&gr_conn_table_lock);
83017+ gr_del_task_from_ip_table_nolock(task->signal);
83018+ spin_unlock_bh(&gr_conn_table_lock);
83019+#endif
83020+ return;
83021+}
83022+
83023+void
83024+gr_attach_curr_ip(const struct sock *sk)
83025+{
83026+#ifdef CONFIG_GRKERNSEC
83027+ struct signal_struct *p, *set;
83028+ const struct inet_sock *inet = inet_sk(sk);
83029+
83030+ if (unlikely(sk->sk_protocol != IPPROTO_TCP))
83031+ return;
83032+
83033+ set = current->signal;
83034+
83035+ spin_lock_bh(&gr_conn_table_lock);
83036+ p = gr_lookup_task_ip_table(inet->daddr, inet->rcv_saddr,
83037+ inet->dport, inet->sport);
83038+ if (unlikely(p != NULL)) {
83039+ set->curr_ip = p->curr_ip;
83040+ set->used_accept = 1;
83041+ gr_del_task_from_ip_table_nolock(p);
83042+ spin_unlock_bh(&gr_conn_table_lock);
83043+ return;
83044+ }
83045+ spin_unlock_bh(&gr_conn_table_lock);
83046+
83047+ set->curr_ip = inet->daddr;
83048+ set->used_accept = 1;
83049+#endif
83050+ return;
83051+}
83052+
83053+int
83054+gr_handle_sock_all(const int family, const int type, const int protocol)
83055+{
83056+#ifdef CONFIG_GRKERNSEC_SOCKET_ALL
83057+ if (grsec_enable_socket_all && in_group_p(grsec_socket_all_gid) &&
83058+ (family != AF_UNIX)) {
83059+ if (family == AF_INET)
83060+ gr_log_str3(GR_DONT_AUDIT, GR_SOCK_MSG, gr_sockfamily_to_name(family), gr_socktype_to_name(type), gr_proto_to_name(protocol));
83061+ else
83062+ gr_log_str2_int(GR_DONT_AUDIT, GR_SOCK_NOINET_MSG, gr_sockfamily_to_name(family), gr_socktype_to_name(type), protocol);
83063+ return -EACCES;
83064+ }
83065+#endif
83066+ return 0;
83067+}
83068+
83069+int
83070+gr_handle_sock_server(const struct sockaddr *sck)
83071+{
83072+#ifdef CONFIG_GRKERNSEC_SOCKET_SERVER
83073+ if (grsec_enable_socket_server &&
83074+ in_group_p(grsec_socket_server_gid) &&
83075+ sck && (sck->sa_family != AF_UNIX) &&
83076+ (sck->sa_family != AF_LOCAL)) {
83077+ gr_log_noargs(GR_DONT_AUDIT, GR_BIND_MSG);
83078+ return -EACCES;
83079+ }
83080+#endif
83081+ return 0;
83082+}
83083+
83084+int
83085+gr_handle_sock_server_other(const struct sock *sck)
83086+{
83087+#ifdef CONFIG_GRKERNSEC_SOCKET_SERVER
83088+ if (grsec_enable_socket_server &&
83089+ in_group_p(grsec_socket_server_gid) &&
83090+ sck && (sck->sk_family != AF_UNIX) &&
83091+ (sck->sk_family != AF_LOCAL)) {
83092+ gr_log_noargs(GR_DONT_AUDIT, GR_BIND_MSG);
83093+ return -EACCES;
83094+ }
83095+#endif
83096+ return 0;
83097+}
83098+
83099+int
83100+gr_handle_sock_client(const struct sockaddr *sck)
83101+{
83102+#ifdef CONFIG_GRKERNSEC_SOCKET_CLIENT
83103+ if (grsec_enable_socket_client && in_group_p(grsec_socket_client_gid) &&
83104+ sck && (sck->sa_family != AF_UNIX) &&
83105+ (sck->sa_family != AF_LOCAL)) {
83106+ gr_log_noargs(GR_DONT_AUDIT, GR_CONNECT_MSG);
83107+ return -EACCES;
83108+ }
83109+#endif
83110+ return 0;
83111+}
83112+
83113+kernel_cap_t
83114+gr_cap_rtnetlink(struct sock *sock)
83115+{
83116+#ifdef CONFIG_GRKERNSEC
83117+ if (!gr_acl_is_enabled())
83118+ return current_cap();
83119+ else if (sock->sk_protocol == NETLINK_ISCSI &&
83120+ cap_raised(current_cap(), CAP_SYS_ADMIN) &&
83121+ gr_is_capable(CAP_SYS_ADMIN))
83122+ return current_cap();
83123+ else if (sock->sk_protocol == NETLINK_AUDIT &&
83124+ cap_raised(current_cap(), CAP_AUDIT_WRITE) &&
83125+ gr_is_capable(CAP_AUDIT_WRITE) &&
83126+ cap_raised(current_cap(), CAP_AUDIT_CONTROL) &&
83127+ gr_is_capable(CAP_AUDIT_CONTROL))
83128+ return current_cap();
83129+ else if (cap_raised(current_cap(), CAP_NET_ADMIN) &&
83130+ ((sock->sk_protocol == NETLINK_ROUTE) ?
83131+ gr_is_capable_nolog(CAP_NET_ADMIN) :
83132+ gr_is_capable(CAP_NET_ADMIN)))
83133+ return current_cap();
83134+ else
83135+ return __cap_empty_set;
83136+#else
83137+ return current_cap();
83138+#endif
83139+}
83140diff --git a/grsecurity/grsec_sysctl.c b/grsecurity/grsec_sysctl.c
83141new file mode 100644
83142index 0000000..31f3258
83143--- /dev/null
83144+++ b/grsecurity/grsec_sysctl.c
83145@@ -0,0 +1,499 @@
83146+#include <linux/kernel.h>
83147+#include <linux/sched.h>
83148+#include <linux/sysctl.h>
83149+#include <linux/grsecurity.h>
83150+#include <linux/grinternal.h>
83151+
83152+int
83153+gr_handle_sysctl_mod(const char *dirname, const char *name, const int op)
83154+{
83155+#ifdef CONFIG_GRKERNSEC_SYSCTL
83156+ if (!strcmp(dirname, "grsecurity") && grsec_lock && (op & MAY_WRITE)) {
83157+ gr_log_str(GR_DONT_AUDIT, GR_SYSCTL_MSG, name);
83158+ return -EACCES;
83159+ }
83160+#endif
83161+ return 0;
83162+}
83163+
83164+#ifdef CONFIG_GRKERNSEC_ROFS
83165+static int __maybe_unused one = 1;
83166+#endif
83167+
83168+#if defined(CONFIG_GRKERNSEC_SYSCTL) || defined(CONFIG_GRKERNSEC_ROFS)
83169+ctl_table grsecurity_table[] = {
83170+#ifdef CONFIG_GRKERNSEC_SYSCTL
83171+#ifdef CONFIG_GRKERNSEC_SYSCTL_DISTRO
83172+#ifdef CONFIG_GRKERNSEC_IO
83173+ {
83174+ .ctl_name = CTL_UNNUMBERED,
83175+ .procname = "disable_priv_io",
83176+ .data = &grsec_disable_privio,
83177+ .maxlen = sizeof(int),
83178+ .mode = 0600,
83179+ .proc_handler = &proc_dointvec,
83180+ },
83181+#endif
83182+#endif
83183+#ifdef CONFIG_GRKERNSEC_LINK
83184+ {
83185+ .ctl_name = CTL_UNNUMBERED,
83186+ .procname = "linking_restrictions",
83187+ .data = &grsec_enable_link,
83188+ .maxlen = sizeof(int),
83189+ .mode = 0600,
83190+ .proc_handler = &proc_dointvec,
83191+ },
83192+#endif
83193+#ifdef CONFIG_GRKERNSEC_BRUTE
83194+ {
83195+ .ctl_name = CTL_UNNUMBERED,
83196+ .procname = "deter_bruteforce",
83197+ .data = &grsec_enable_brute,
83198+ .maxlen = sizeof(int),
83199+ .mode = 0600,
83200+ .proc_handler = &proc_dointvec,
83201+ },
83202+#endif
83203+#ifdef CONFIG_GRKERNSEC_FIFO
83204+ {
83205+ .ctl_name = CTL_UNNUMBERED,
83206+ .procname = "fifo_restrictions",
83207+ .data = &grsec_enable_fifo,
83208+ .maxlen = sizeof(int),
83209+ .mode = 0600,
83210+ .proc_handler = &proc_dointvec,
83211+ },
83212+#endif
83213+#ifdef CONFIG_GRKERNSEC_PTRACE_READEXEC
83214+ {
83215+ .ctl_name = CTL_UNNUMBERED,
83216+ .procname = "ptrace_readexec",
83217+ .data = &grsec_enable_ptrace_readexec,
83218+ .maxlen = sizeof(int),
83219+ .mode = 0600,
83220+ .proc_handler = &proc_dointvec,
83221+ },
83222+#endif
83223+#ifdef CONFIG_GRKERNSEC_SETXID
83224+ {
83225+ .ctl_name = CTL_UNNUMBERED,
83226+ .procname = "consistent_setxid",
83227+ .data = &grsec_enable_setxid,
83228+ .maxlen = sizeof(int),
83229+ .mode = 0600,
83230+ .proc_handler = &proc_dointvec,
83231+ },
83232+#endif
83233+#ifdef CONFIG_GRKERNSEC_BLACKHOLE
83234+ {
83235+ .ctl_name = CTL_UNNUMBERED,
83236+ .procname = "ip_blackhole",
83237+ .data = &grsec_enable_blackhole,
83238+ .maxlen = sizeof(int),
83239+ .mode = 0600,
83240+ .proc_handler = &proc_dointvec,
83241+ },
83242+ {
83243+ .ctl_name = CTL_UNNUMBERED,
83244+ .procname = "lastack_retries",
83245+ .data = &grsec_lastack_retries,
83246+ .maxlen = sizeof(int),
83247+ .mode = 0600,
83248+ .proc_handler = &proc_dointvec,
83249+ },
83250+#endif
83251+#ifdef CONFIG_GRKERNSEC_EXECLOG
83252+ {
83253+ .ctl_name = CTL_UNNUMBERED,
83254+ .procname = "exec_logging",
83255+ .data = &grsec_enable_execlog,
83256+ .maxlen = sizeof(int),
83257+ .mode = 0600,
83258+ .proc_handler = &proc_dointvec,
83259+ },
83260+#endif
83261+#ifdef CONFIG_GRKERNSEC_RWXMAP_LOG
83262+ {
83263+ .ctl_name = CTL_UNNUMBERED,
83264+ .procname = "rwxmap_logging",
83265+ .data = &grsec_enable_log_rwxmaps,
83266+ .maxlen = sizeof(int),
83267+ .mode = 0600,
83268+ .proc_handler = &proc_dointvec,
83269+ },
83270+#endif
83271+#ifdef CONFIG_GRKERNSEC_SIGNAL
83272+ {
83273+ .ctl_name = CTL_UNNUMBERED,
83274+ .procname = "signal_logging",
83275+ .data = &grsec_enable_signal,
83276+ .maxlen = sizeof(int),
83277+ .mode = 0600,
83278+ .proc_handler = &proc_dointvec,
83279+ },
83280+#endif
83281+#ifdef CONFIG_GRKERNSEC_FORKFAIL
83282+ {
83283+ .ctl_name = CTL_UNNUMBERED,
83284+ .procname = "forkfail_logging",
83285+ .data = &grsec_enable_forkfail,
83286+ .maxlen = sizeof(int),
83287+ .mode = 0600,
83288+ .proc_handler = &proc_dointvec,
83289+ },
83290+#endif
83291+#ifdef CONFIG_GRKERNSEC_TIME
83292+ {
83293+ .ctl_name = CTL_UNNUMBERED,
83294+ .procname = "timechange_logging",
83295+ .data = &grsec_enable_time,
83296+ .maxlen = sizeof(int),
83297+ .mode = 0600,
83298+ .proc_handler = &proc_dointvec,
83299+ },
83300+#endif
83301+#ifdef CONFIG_GRKERNSEC_CHROOT_SHMAT
83302+ {
83303+ .ctl_name = CTL_UNNUMBERED,
83304+ .procname = "chroot_deny_shmat",
83305+ .data = &grsec_enable_chroot_shmat,
83306+ .maxlen = sizeof(int),
83307+ .mode = 0600,
83308+ .proc_handler = &proc_dointvec,
83309+ },
83310+#endif
83311+#ifdef CONFIG_GRKERNSEC_CHROOT_UNIX
83312+ {
83313+ .ctl_name = CTL_UNNUMBERED,
83314+ .procname = "chroot_deny_unix",
83315+ .data = &grsec_enable_chroot_unix,
83316+ .maxlen = sizeof(int),
83317+ .mode = 0600,
83318+ .proc_handler = &proc_dointvec,
83319+ },
83320+#endif
83321+#ifdef CONFIG_GRKERNSEC_CHROOT_MOUNT
83322+ {
83323+ .ctl_name = CTL_UNNUMBERED,
83324+ .procname = "chroot_deny_mount",
83325+ .data = &grsec_enable_chroot_mount,
83326+ .maxlen = sizeof(int),
83327+ .mode = 0600,
83328+ .proc_handler = &proc_dointvec,
83329+ },
83330+#endif
83331+#ifdef CONFIG_GRKERNSEC_CHROOT_FCHDIR
83332+ {
83333+ .ctl_name = CTL_UNNUMBERED,
83334+ .procname = "chroot_deny_fchdir",
83335+ .data = &grsec_enable_chroot_fchdir,
83336+ .maxlen = sizeof(int),
83337+ .mode = 0600,
83338+ .proc_handler = &proc_dointvec,
83339+ },
83340+#endif
83341+#ifdef CONFIG_GRKERNSEC_CHROOT_DOUBLE
83342+ {
83343+ .ctl_name = CTL_UNNUMBERED,
83344+ .procname = "chroot_deny_chroot",
83345+ .data = &grsec_enable_chroot_double,
83346+ .maxlen = sizeof(int),
83347+ .mode = 0600,
83348+ .proc_handler = &proc_dointvec,
83349+ },
83350+#endif
83351+#ifdef CONFIG_GRKERNSEC_CHROOT_PIVOT
83352+ {
83353+ .ctl_name = CTL_UNNUMBERED,
83354+ .procname = "chroot_deny_pivot",
83355+ .data = &grsec_enable_chroot_pivot,
83356+ .maxlen = sizeof(int),
83357+ .mode = 0600,
83358+ .proc_handler = &proc_dointvec,
83359+ },
83360+#endif
83361+#ifdef CONFIG_GRKERNSEC_CHROOT_CHDIR
83362+ {
83363+ .ctl_name = CTL_UNNUMBERED,
83364+ .procname = "chroot_enforce_chdir",
83365+ .data = &grsec_enable_chroot_chdir,
83366+ .maxlen = sizeof(int),
83367+ .mode = 0600,
83368+ .proc_handler = &proc_dointvec,
83369+ },
83370+#endif
83371+#ifdef CONFIG_GRKERNSEC_CHROOT_CHMOD
83372+ {
83373+ .ctl_name = CTL_UNNUMBERED,
83374+ .procname = "chroot_deny_chmod",
83375+ .data = &grsec_enable_chroot_chmod,
83376+ .maxlen = sizeof(int),
83377+ .mode = 0600,
83378+ .proc_handler = &proc_dointvec,
83379+ },
83380+#endif
83381+#ifdef CONFIG_GRKERNSEC_CHROOT_MKNOD
83382+ {
83383+ .ctl_name = CTL_UNNUMBERED,
83384+ .procname = "chroot_deny_mknod",
83385+ .data = &grsec_enable_chroot_mknod,
83386+ .maxlen = sizeof(int),
83387+ .mode = 0600,
83388+ .proc_handler = &proc_dointvec,
83389+ },
83390+#endif
83391+#ifdef CONFIG_GRKERNSEC_CHROOT_NICE
83392+ {
83393+ .ctl_name = CTL_UNNUMBERED,
83394+ .procname = "chroot_restrict_nice",
83395+ .data = &grsec_enable_chroot_nice,
83396+ .maxlen = sizeof(int),
83397+ .mode = 0600,
83398+ .proc_handler = &proc_dointvec,
83399+ },
83400+#endif
83401+#ifdef CONFIG_GRKERNSEC_CHROOT_EXECLOG
83402+ {
83403+ .ctl_name = CTL_UNNUMBERED,
83404+ .procname = "chroot_execlog",
83405+ .data = &grsec_enable_chroot_execlog,
83406+ .maxlen = sizeof(int),
83407+ .mode = 0600,
83408+ .proc_handler = &proc_dointvec,
83409+ },
83410+#endif
83411+#ifdef CONFIG_GRKERNSEC_CHROOT_CAPS
83412+ {
83413+ .ctl_name = CTL_UNNUMBERED,
83414+ .procname = "chroot_caps",
83415+ .data = &grsec_enable_chroot_caps,
83416+ .maxlen = sizeof(int),
83417+ .mode = 0600,
83418+ .proc_handler = &proc_dointvec,
83419+ },
83420+#endif
83421+#ifdef CONFIG_GRKERNSEC_CHROOT_SYSCTL
83422+ {
83423+ .ctl_name = CTL_UNNUMBERED,
83424+ .procname = "chroot_deny_sysctl",
83425+ .data = &grsec_enable_chroot_sysctl,
83426+ .maxlen = sizeof(int),
83427+ .mode = 0600,
83428+ .proc_handler = &proc_dointvec,
83429+ },
83430+#endif
83431+#ifdef CONFIG_GRKERNSEC_TPE
83432+ {
83433+ .ctl_name = CTL_UNNUMBERED,
83434+ .procname = "tpe",
83435+ .data = &grsec_enable_tpe,
83436+ .maxlen = sizeof(int),
83437+ .mode = 0600,
83438+ .proc_handler = &proc_dointvec,
83439+ },
83440+ {
83441+ .ctl_name = CTL_UNNUMBERED,
83442+ .procname = "tpe_gid",
83443+ .data = &grsec_tpe_gid,
83444+ .maxlen = sizeof(int),
83445+ .mode = 0600,
83446+ .proc_handler = &proc_dointvec,
83447+ },
83448+#endif
83449+#ifdef CONFIG_GRKERNSEC_TPE_INVERT
83450+ {
83451+ .ctl_name = CTL_UNNUMBERED,
83452+ .procname = "tpe_invert",
83453+ .data = &grsec_enable_tpe_invert,
83454+ .maxlen = sizeof(int),
83455+ .mode = 0600,
83456+ .proc_handler = &proc_dointvec,
83457+ },
83458+#endif
83459+#ifdef CONFIG_GRKERNSEC_TPE_ALL
83460+ {
83461+ .ctl_name = CTL_UNNUMBERED,
83462+ .procname = "tpe_restrict_all",
83463+ .data = &grsec_enable_tpe_all,
83464+ .maxlen = sizeof(int),
83465+ .mode = 0600,
83466+ .proc_handler = &proc_dointvec,
83467+ },
83468+#endif
83469+#ifdef CONFIG_GRKERNSEC_SOCKET_ALL
83470+ {
83471+ .ctl_name = CTL_UNNUMBERED,
83472+ .procname = "socket_all",
83473+ .data = &grsec_enable_socket_all,
83474+ .maxlen = sizeof(int),
83475+ .mode = 0600,
83476+ .proc_handler = &proc_dointvec,
83477+ },
83478+ {
83479+ .ctl_name = CTL_UNNUMBERED,
83480+ .procname = "socket_all_gid",
83481+ .data = &grsec_socket_all_gid,
83482+ .maxlen = sizeof(int),
83483+ .mode = 0600,
83484+ .proc_handler = &proc_dointvec,
83485+ },
83486+#endif
83487+#ifdef CONFIG_GRKERNSEC_SOCKET_CLIENT
83488+ {
83489+ .ctl_name = CTL_UNNUMBERED,
83490+ .procname = "socket_client",
83491+ .data = &grsec_enable_socket_client,
83492+ .maxlen = sizeof(int),
83493+ .mode = 0600,
83494+ .proc_handler = &proc_dointvec,
83495+ },
83496+ {
83497+ .ctl_name = CTL_UNNUMBERED,
83498+ .procname = "socket_client_gid",
83499+ .data = &grsec_socket_client_gid,
83500+ .maxlen = sizeof(int),
83501+ .mode = 0600,
83502+ .proc_handler = &proc_dointvec,
83503+ },
83504+#endif
83505+#ifdef CONFIG_GRKERNSEC_SOCKET_SERVER
83506+ {
83507+ .ctl_name = CTL_UNNUMBERED,
83508+ .procname = "socket_server",
83509+ .data = &grsec_enable_socket_server,
83510+ .maxlen = sizeof(int),
83511+ .mode = 0600,
83512+ .proc_handler = &proc_dointvec,
83513+ },
83514+ {
83515+ .ctl_name = CTL_UNNUMBERED,
83516+ .procname = "socket_server_gid",
83517+ .data = &grsec_socket_server_gid,
83518+ .maxlen = sizeof(int),
83519+ .mode = 0600,
83520+ .proc_handler = &proc_dointvec,
83521+ },
83522+#endif
83523+#ifdef CONFIG_GRKERNSEC_AUDIT_GROUP
83524+ {
83525+ .ctl_name = CTL_UNNUMBERED,
83526+ .procname = "audit_group",
83527+ .data = &grsec_enable_group,
83528+ .maxlen = sizeof(int),
83529+ .mode = 0600,
83530+ .proc_handler = &proc_dointvec,
83531+ },
83532+ {
83533+ .ctl_name = CTL_UNNUMBERED,
83534+ .procname = "audit_gid",
83535+ .data = &grsec_audit_gid,
83536+ .maxlen = sizeof(int),
83537+ .mode = 0600,
83538+ .proc_handler = &proc_dointvec,
83539+ },
83540+#endif
83541+#ifdef CONFIG_GRKERNSEC_AUDIT_CHDIR
83542+ {
83543+ .ctl_name = CTL_UNNUMBERED,
83544+ .procname = "audit_chdir",
83545+ .data = &grsec_enable_chdir,
83546+ .maxlen = sizeof(int),
83547+ .mode = 0600,
83548+ .proc_handler = &proc_dointvec,
83549+ },
83550+#endif
83551+#ifdef CONFIG_GRKERNSEC_AUDIT_MOUNT
83552+ {
83553+ .ctl_name = CTL_UNNUMBERED,
83554+ .procname = "audit_mount",
83555+ .data = &grsec_enable_mount,
83556+ .maxlen = sizeof(int),
83557+ .mode = 0600,
83558+ .proc_handler = &proc_dointvec,
83559+ },
83560+#endif
83561+#ifdef CONFIG_GRKERNSEC_AUDIT_TEXTREL
83562+ {
83563+ .ctl_name = CTL_UNNUMBERED,
83564+ .procname = "audit_textrel",
83565+ .data = &grsec_enable_audit_textrel,
83566+ .maxlen = sizeof(int),
83567+ .mode = 0600,
83568+ .proc_handler = &proc_dointvec,
83569+ },
83570+#endif
83571+#ifdef CONFIG_GRKERNSEC_DMESG
83572+ {
83573+ .ctl_name = CTL_UNNUMBERED,
83574+ .procname = "dmesg",
83575+ .data = &grsec_enable_dmesg,
83576+ .maxlen = sizeof(int),
83577+ .mode = 0600,
83578+ .proc_handler = &proc_dointvec,
83579+ },
83580+#endif
83581+#ifdef CONFIG_GRKERNSEC_CHROOT_FINDTASK
83582+ {
83583+ .ctl_name = CTL_UNNUMBERED,
83584+ .procname = "chroot_findtask",
83585+ .data = &grsec_enable_chroot_findtask,
83586+ .maxlen = sizeof(int),
83587+ .mode = 0600,
83588+ .proc_handler = &proc_dointvec,
83589+ },
83590+#endif
83591+#ifdef CONFIG_GRKERNSEC_RESLOG
83592+ {
83593+ .ctl_name = CTL_UNNUMBERED,
83594+ .procname = "resource_logging",
83595+ .data = &grsec_resource_logging,
83596+ .maxlen = sizeof(int),
83597+ .mode = 0600,
83598+ .proc_handler = &proc_dointvec,
83599+ },
83600+#endif
83601+#ifdef CONFIG_GRKERNSEC_AUDIT_PTRACE
83602+ {
83603+ .ctl_name = CTL_UNNUMBERED,
83604+ .procname = "audit_ptrace",
83605+ .data = &grsec_enable_audit_ptrace,
83606+ .maxlen = sizeof(int),
83607+ .mode = 0600,
83608+ .proc_handler = &proc_dointvec,
83609+ },
83610+#endif
83611+#ifdef CONFIG_GRKERNSEC_HARDEN_PTRACE
83612+ {
83613+ .ctl_name = CTL_UNNUMBERED,
83614+ .procname = "harden_ptrace",
83615+ .data = &grsec_enable_harden_ptrace,
83616+ .maxlen = sizeof(int),
83617+ .mode = 0600,
83618+ .proc_handler = &proc_dointvec,
83619+ },
83620+#endif
83621+ {
83622+ .ctl_name = CTL_UNNUMBERED,
83623+ .procname = "grsec_lock",
83624+ .data = &grsec_lock,
83625+ .maxlen = sizeof(int),
83626+ .mode = 0600,
83627+ .proc_handler = &proc_dointvec,
83628+ },
83629+#endif
83630+#ifdef CONFIG_GRKERNSEC_ROFS
83631+ {
83632+ .ctl_name = CTL_UNNUMBERED,
83633+ .procname = "romount_protect",
83634+ .data = &grsec_enable_rofs,
83635+ .maxlen = sizeof(int),
83636+ .mode = 0600,
83637+ .proc_handler = &proc_dointvec_minmax,
83638+ .extra1 = &one,
83639+ .extra2 = &one,
83640+ },
83641+#endif
83642+ { .ctl_name = 0 }
83643+};
83644+#endif
83645diff --git a/grsecurity/grsec_time.c b/grsecurity/grsec_time.c
83646new file mode 100644
83647index 0000000..0dc13c3
83648--- /dev/null
83649+++ b/grsecurity/grsec_time.c
83650@@ -0,0 +1,16 @@
83651+#include <linux/kernel.h>
83652+#include <linux/sched.h>
83653+#include <linux/grinternal.h>
83654+#include <linux/module.h>
83655+
83656+void
83657+gr_log_timechange(void)
83658+{
83659+#ifdef CONFIG_GRKERNSEC_TIME
83660+ if (grsec_enable_time)
83661+ gr_log_noargs(GR_DONT_AUDIT_GOOD, GR_TIME_MSG);
83662+#endif
83663+ return;
83664+}
83665+
83666+EXPORT_SYMBOL(gr_log_timechange);
83667diff --git a/grsecurity/grsec_tpe.c b/grsecurity/grsec_tpe.c
83668new file mode 100644
83669index 0000000..07e0dc0
83670--- /dev/null
83671+++ b/grsecurity/grsec_tpe.c
83672@@ -0,0 +1,73 @@
83673+#include <linux/kernel.h>
83674+#include <linux/sched.h>
83675+#include <linux/file.h>
83676+#include <linux/fs.h>
83677+#include <linux/grinternal.h>
83678+
83679+extern int gr_acl_tpe_check(void);
83680+
83681+int
83682+gr_tpe_allow(const struct file *file)
83683+{
83684+#ifdef CONFIG_GRKERNSEC
83685+ struct inode *inode = file->f_path.dentry->d_parent->d_inode;
83686+ const struct cred *cred = current_cred();
83687+ char *msg = NULL;
83688+ char *msg2 = NULL;
83689+
83690+ // never restrict root
83691+ if (!cred->uid)
83692+ return 1;
83693+
83694+ if (grsec_enable_tpe) {
83695+#ifdef CONFIG_GRKERNSEC_TPE_INVERT
83696+ if (grsec_enable_tpe_invert && !in_group_p(grsec_tpe_gid))
83697+ msg = "not being in trusted group";
83698+ else if (!grsec_enable_tpe_invert && in_group_p(grsec_tpe_gid))
83699+ msg = "being in untrusted group";
83700+#else
83701+ if (in_group_p(grsec_tpe_gid))
83702+ msg = "being in untrusted group";
83703+#endif
83704+ }
83705+ if (!msg && gr_acl_tpe_check())
83706+ msg = "being in untrusted role";
83707+
83708+ // not in any affected group/role
83709+ if (!msg)
83710+ goto next_check;
83711+
83712+ if (inode->i_uid)
83713+ msg2 = "file in non-root-owned directory";
83714+ else if (inode->i_mode & S_IWOTH)
83715+ msg2 = "file in world-writable directory";
83716+ else if (inode->i_mode & S_IWGRP)
83717+ msg2 = "file in group-writable directory";
83718+
83719+ if (msg && msg2) {
83720+ char fullmsg[70] = {0};
83721+ snprintf(fullmsg, sizeof(fullmsg)-1, "%s and %s", msg, msg2);
83722+ gr_log_str_fs(GR_DONT_AUDIT, GR_EXEC_TPE_MSG, fullmsg, file->f_path.dentry, file->f_path.mnt);
83723+ return 0;
83724+ }
83725+ msg = NULL;
83726+next_check:
83727+#ifdef CONFIG_GRKERNSEC_TPE_ALL
83728+ if (!grsec_enable_tpe || !grsec_enable_tpe_all)
83729+ return 1;
83730+
83731+ if (inode->i_uid && (inode->i_uid != cred->uid))
83732+ msg = "directory not owned by user";
83733+ else if (inode->i_mode & S_IWOTH)
83734+ msg = "file in world-writable directory";
83735+ else if (inode->i_mode & S_IWGRP)
83736+ msg = "file in group-writable directory";
83737+
83738+ if (msg) {
83739+ gr_log_str_fs(GR_DONT_AUDIT, GR_EXEC_TPE_MSG, msg, file->f_path.dentry, file->f_path.mnt);
83740+ return 0;
83741+ }
83742+#endif
83743+#endif
83744+ return 1;
83745+}
83746diff --git a/grsecurity/grsum.c b/grsecurity/grsum.c
83747new file mode 100644
83748index 0000000..9f7b1ac
83749--- /dev/null
83750+++ b/grsecurity/grsum.c
83751@@ -0,0 +1,61 @@
83752+#include <linux/err.h>
83753+#include <linux/kernel.h>
83754+#include <linux/sched.h>
83755+#include <linux/mm.h>
83756+#include <linux/scatterlist.h>
83757+#include <linux/crypto.h>
83758+#include <linux/gracl.h>
83759+
83760+
83761+#if !defined(CONFIG_CRYPTO) || defined(CONFIG_CRYPTO_MODULE) || !defined(CONFIG_CRYPTO_SHA256) || defined(CONFIG_CRYPTO_SHA256_MODULE)
83762+#error "crypto and sha256 must be built into the kernel"
83763+#endif
83764+
83765+int
83766+chkpw(struct gr_arg *entry, unsigned char *salt, unsigned char *sum)
83767+{
83768+ char *p;
83769+ struct crypto_hash *tfm;
83770+ struct hash_desc desc;
83771+ struct scatterlist sg;
83772+ unsigned char temp_sum[GR_SHA_LEN];
83773+ volatile int retval = 0;
83774+ volatile int dummy = 0;
83775+ unsigned int i;
83776+
83777+ sg_init_table(&sg, 1);
83778+
83779+ tfm = crypto_alloc_hash("sha256", 0, CRYPTO_ALG_ASYNC);
83780+ if (IS_ERR(tfm)) {
83781+ /* should never happen, since sha256 should be built in */
83782+ return 1;
83783+ }
83784+
83785+ desc.tfm = tfm;
83786+ desc.flags = 0;
83787+
83788+ crypto_hash_init(&desc);
83789+
83790+ p = salt;
83791+ sg_set_buf(&sg, p, GR_SALT_LEN);
83792+ crypto_hash_update(&desc, &sg, sg.length);
83793+
83794+ p = entry->pw;
83795+ sg_set_buf(&sg, p, strlen(p));
83796+
83797+ crypto_hash_update(&desc, &sg, sg.length);
83798+
83799+ crypto_hash_final(&desc, temp_sum);
83800+
83801+ memset(entry->pw, 0, GR_PW_LEN);
83802+
83803+ for (i = 0; i < GR_SHA_LEN; i++)
83804+ if (sum[i] != temp_sum[i])
83805+ retval = 1;
83806+ else
83807+ dummy = 1; // waste a cycle
83808+
83809+ crypto_free_hash(tfm);
83810+
83811+ return retval;
83812+}
83813diff --git a/include/acpi/acpi_bus.h b/include/acpi/acpi_bus.h
83814index 3cd9ccd..fe16d47 100644
83815--- a/include/acpi/acpi_bus.h
83816+++ b/include/acpi/acpi_bus.h
83817@@ -107,7 +107,7 @@ struct acpi_device_ops {
83818 acpi_op_bind bind;
83819 acpi_op_unbind unbind;
83820 acpi_op_notify notify;
83821-};
83822+} __no_const;
83823
83824 #define ACPI_DRIVER_ALL_NOTIFY_EVENTS 0x1 /* system AND device events */
83825
83826diff --git a/include/acpi/acpi_drivers.h b/include/acpi/acpi_drivers.h
83827index f4906f6..71feb73 100644
83828--- a/include/acpi/acpi_drivers.h
83829+++ b/include/acpi/acpi_drivers.h
83830@@ -119,8 +119,8 @@ int acpi_processor_set_thermal_limit(acpi_handle handle, int type);
83831 Dock Station
83832 -------------------------------------------------------------------------- */
83833 struct acpi_dock_ops {
83834- acpi_notify_handler handler;
83835- acpi_notify_handler uevent;
83836+ const acpi_notify_handler handler;
83837+ const acpi_notify_handler uevent;
83838 };
83839
83840 #if defined(CONFIG_ACPI_DOCK) || defined(CONFIG_ACPI_DOCK_MODULE)
83841@@ -128,7 +128,7 @@ extern int is_dock_device(acpi_handle handle);
83842 extern int register_dock_notifier(struct notifier_block *nb);
83843 extern void unregister_dock_notifier(struct notifier_block *nb);
83844 extern int register_hotplug_dock_device(acpi_handle handle,
83845- struct acpi_dock_ops *ops,
83846+ const struct acpi_dock_ops *ops,
83847 void *context);
83848 extern void unregister_hotplug_dock_device(acpi_handle handle);
83849 #else
83850@@ -144,7 +144,7 @@ static inline void unregister_dock_notifier(struct notifier_block *nb)
83851 {
83852 }
83853 static inline int register_hotplug_dock_device(acpi_handle handle,
83854- struct acpi_dock_ops *ops,
83855+ const struct acpi_dock_ops *ops,
83856 void *context)
83857 {
83858 return -ENODEV;
83859diff --git a/include/asm-generic/atomic-long.h b/include/asm-generic/atomic-long.h
83860index b7babf0..a9ac9fc 100644
83861--- a/include/asm-generic/atomic-long.h
83862+++ b/include/asm-generic/atomic-long.h
83863@@ -22,6 +22,12 @@
83864
83865 typedef atomic64_t atomic_long_t;
83866
83867+#ifdef CONFIG_PAX_REFCOUNT
83868+typedef atomic64_unchecked_t atomic_long_unchecked_t;
83869+#else
83870+typedef atomic64_t atomic_long_unchecked_t;
83871+#endif
83872+
83873 #define ATOMIC_LONG_INIT(i) ATOMIC64_INIT(i)
83874
83875 static inline long atomic_long_read(atomic_long_t *l)
83876@@ -31,6 +37,15 @@ static inline long atomic_long_read(atomic_long_t *l)
83877 return (long)atomic64_read(v);
83878 }
83879
83880+#ifdef CONFIG_PAX_REFCOUNT
83881+static inline long atomic_long_read_unchecked(atomic_long_unchecked_t *l)
83882+{
83883+ atomic64_unchecked_t *v = (atomic64_unchecked_t *)l;
83884+
83885+ return (long)atomic64_read_unchecked(v);
83886+}
83887+#endif
83888+
83889 static inline void atomic_long_set(atomic_long_t *l, long i)
83890 {
83891 atomic64_t *v = (atomic64_t *)l;
83892@@ -38,6 +53,15 @@ static inline void atomic_long_set(atomic_long_t *l, long i)
83893 atomic64_set(v, i);
83894 }
83895
83896+#ifdef CONFIG_PAX_REFCOUNT
83897+static inline void atomic_long_set_unchecked(atomic_long_unchecked_t *l, long i)
83898+{
83899+ atomic64_unchecked_t *v = (atomic64_unchecked_t *)l;
83900+
83901+ atomic64_set_unchecked(v, i);
83902+}
83903+#endif
83904+
83905 static inline void atomic_long_inc(atomic_long_t *l)
83906 {
83907 atomic64_t *v = (atomic64_t *)l;
83908@@ -45,6 +69,15 @@ static inline void atomic_long_inc(atomic_long_t *l)
83909 atomic64_inc(v);
83910 }
83911
83912+#ifdef CONFIG_PAX_REFCOUNT
83913+static inline void atomic_long_inc_unchecked(atomic_long_unchecked_t *l)
83914+{
83915+ atomic64_unchecked_t *v = (atomic64_unchecked_t *)l;
83916+
83917+ atomic64_inc_unchecked(v);
83918+}
83919+#endif
83920+
83921 static inline void atomic_long_dec(atomic_long_t *l)
83922 {
83923 atomic64_t *v = (atomic64_t *)l;
83924@@ -52,6 +85,15 @@ static inline void atomic_long_dec(atomic_long_t *l)
83925 atomic64_dec(v);
83926 }
83927
83928+#ifdef CONFIG_PAX_REFCOUNT
83929+static inline void atomic_long_dec_unchecked(atomic_long_unchecked_t *l)
83930+{
83931+ atomic64_unchecked_t *v = (atomic64_unchecked_t *)l;
83932+
83933+ atomic64_dec_unchecked(v);
83934+}
83935+#endif
83936+
83937 static inline void atomic_long_add(long i, atomic_long_t *l)
83938 {
83939 atomic64_t *v = (atomic64_t *)l;
83940@@ -59,6 +101,15 @@ static inline void atomic_long_add(long i, atomic_long_t *l)
83941 atomic64_add(i, v);
83942 }
83943
83944+#ifdef CONFIG_PAX_REFCOUNT
83945+static inline void atomic_long_add_unchecked(long i, atomic_long_unchecked_t *l)
83946+{
83947+ atomic64_unchecked_t *v = (atomic64_unchecked_t *)l;
83948+
83949+ atomic64_add_unchecked(i, v);
83950+}
83951+#endif
83952+
83953 static inline void atomic_long_sub(long i, atomic_long_t *l)
83954 {
83955 atomic64_t *v = (atomic64_t *)l;
83956@@ -115,6 +166,15 @@ static inline long atomic_long_inc_return(atomic_long_t *l)
83957 return (long)atomic64_inc_return(v);
83958 }
83959
83960+#ifdef CONFIG_PAX_REFCOUNT
83961+static inline long atomic_long_inc_return_unchecked(atomic_long_unchecked_t *l)
83962+{
83963+ atomic64_unchecked_t *v = (atomic64_unchecked_t *)l;
83964+
83965+ return (long)atomic64_inc_return_unchecked(v);
83966+}
83967+#endif
83968+
83969 static inline long atomic_long_dec_return(atomic_long_t *l)
83970 {
83971 atomic64_t *v = (atomic64_t *)l;
83972@@ -140,6 +200,12 @@ static inline long atomic_long_add_unless(atomic_long_t *l, long a, long u)
83973
83974 typedef atomic_t atomic_long_t;
83975
83976+#ifdef CONFIG_PAX_REFCOUNT
83977+typedef atomic_unchecked_t atomic_long_unchecked_t;
83978+#else
83979+typedef atomic_t atomic_long_unchecked_t;
83980+#endif
83981+
83982 #define ATOMIC_LONG_INIT(i) ATOMIC_INIT(i)
83983 static inline long atomic_long_read(atomic_long_t *l)
83984 {
83985@@ -148,6 +214,15 @@ static inline long atomic_long_read(atomic_long_t *l)
83986 return (long)atomic_read(v);
83987 }
83988
83989+#ifdef CONFIG_PAX_REFCOUNT
83990+static inline long atomic_long_read_unchecked(atomic_long_unchecked_t *l)
83991+{
83992+ atomic_unchecked_t *v = (atomic_unchecked_t *)l;
83993+
83994+ return (long)atomic_read_unchecked(v);
83995+}
83996+#endif
83997+
83998 static inline void atomic_long_set(atomic_long_t *l, long i)
83999 {
84000 atomic_t *v = (atomic_t *)l;
84001@@ -155,6 +230,15 @@ static inline void atomic_long_set(atomic_long_t *l, long i)
84002 atomic_set(v, i);
84003 }
84004
84005+#ifdef CONFIG_PAX_REFCOUNT
84006+static inline void atomic_long_set_unchecked(atomic_long_unchecked_t *l, long i)
84007+{
84008+ atomic_unchecked_t *v = (atomic_unchecked_t *)l;
84009+
84010+ atomic_set_unchecked(v, i);
84011+}
84012+#endif
84013+
84014 static inline void atomic_long_inc(atomic_long_t *l)
84015 {
84016 atomic_t *v = (atomic_t *)l;
84017@@ -162,6 +246,15 @@ static inline void atomic_long_inc(atomic_long_t *l)
84018 atomic_inc(v);
84019 }
84020
84021+#ifdef CONFIG_PAX_REFCOUNT
84022+static inline void atomic_long_inc_unchecked(atomic_long_unchecked_t *l)
84023+{
84024+ atomic_unchecked_t *v = (atomic_unchecked_t *)l;
84025+
84026+ atomic_inc_unchecked(v);
84027+}
84028+#endif
84029+
84030 static inline void atomic_long_dec(atomic_long_t *l)
84031 {
84032 atomic_t *v = (atomic_t *)l;
84033@@ -169,6 +262,15 @@ static inline void atomic_long_dec(atomic_long_t *l)
84034 atomic_dec(v);
84035 }
84036
84037+#ifdef CONFIG_PAX_REFCOUNT
84038+static inline void atomic_long_dec_unchecked(atomic_long_unchecked_t *l)
84039+{
84040+ atomic_unchecked_t *v = (atomic_unchecked_t *)l;
84041+
84042+ atomic_dec_unchecked(v);
84043+}
84044+#endif
84045+
84046 static inline void atomic_long_add(long i, atomic_long_t *l)
84047 {
84048 atomic_t *v = (atomic_t *)l;
84049@@ -176,6 +278,15 @@ static inline void atomic_long_add(long i, atomic_long_t *l)
84050 atomic_add(i, v);
84051 }
84052
84053+#ifdef CONFIG_PAX_REFCOUNT
84054+static inline void atomic_long_add_unchecked(long i, atomic_long_unchecked_t *l)
84055+{
84056+ atomic_unchecked_t *v = (atomic_unchecked_t *)l;
84057+
84058+ atomic_add_unchecked(i, v);
84059+}
84060+#endif
84061+
84062 static inline void atomic_long_sub(long i, atomic_long_t *l)
84063 {
84064 atomic_t *v = (atomic_t *)l;
84065@@ -232,6 +343,15 @@ static inline long atomic_long_inc_return(atomic_long_t *l)
84066 return (long)atomic_inc_return(v);
84067 }
84068
84069+#ifdef CONFIG_PAX_REFCOUNT
84070+static inline long atomic_long_inc_return_unchecked(atomic_long_unchecked_t *l)
84071+{
84072+ atomic_unchecked_t *v = (atomic_unchecked_t *)l;
84073+
84074+ return (long)atomic_inc_return_unchecked(v);
84075+}
84076+#endif
84077+
84078 static inline long atomic_long_dec_return(atomic_long_t *l)
84079 {
84080 atomic_t *v = (atomic_t *)l;
84081@@ -255,4 +375,47 @@ static inline long atomic_long_add_unless(atomic_long_t *l, long a, long u)
84082
84083 #endif /* BITS_PER_LONG == 64 */
84084
84085+#ifdef CONFIG_PAX_REFCOUNT
84086+static inline void pax_refcount_needs_these_functions(void)
84087+{
84088+ atomic_read_unchecked((atomic_unchecked_t *)NULL);
84089+ atomic_set_unchecked((atomic_unchecked_t *)NULL, 0);
84090+ atomic_add_unchecked(0, (atomic_unchecked_t *)NULL);
84091+ atomic_sub_unchecked(0, (atomic_unchecked_t *)NULL);
84092+ atomic_inc_unchecked((atomic_unchecked_t *)NULL);
84093+ (void)atomic_inc_and_test_unchecked((atomic_unchecked_t *)NULL);
84094+ atomic_inc_return_unchecked((atomic_unchecked_t *)NULL);
84095+ atomic_add_return_unchecked(0, (atomic_unchecked_t *)NULL);
84096+ atomic_dec_unchecked((atomic_unchecked_t *)NULL);
84097+ atomic_cmpxchg_unchecked((atomic_unchecked_t *)NULL, 0, 0);
84098+ (void)atomic_xchg_unchecked((atomic_unchecked_t *)NULL, 0);
84099+
84100+ atomic_long_read_unchecked((atomic_long_unchecked_t *)NULL);
84101+ atomic_long_set_unchecked((atomic_long_unchecked_t *)NULL, 0);
84102+ atomic_long_add_unchecked(0, (atomic_long_unchecked_t *)NULL);
84103+ atomic_long_inc_unchecked((atomic_long_unchecked_t *)NULL);
84104+ atomic_long_inc_return_unchecked((atomic_long_unchecked_t *)NULL);
84105+ atomic_long_dec_unchecked((atomic_long_unchecked_t *)NULL);
84106+}
84107+#else
84108+#define atomic_read_unchecked(v) atomic_read(v)
84109+#define atomic_set_unchecked(v, i) atomic_set((v), (i))
84110+#define atomic_add_unchecked(i, v) atomic_add((i), (v))
84111+#define atomic_sub_unchecked(i, v) atomic_sub((i), (v))
84112+#define atomic_inc_unchecked(v) atomic_inc(v)
84113+#define atomic_inc_and_test_unchecked(v) atomic_inc_and_test(v)
84114+#define atomic_inc_return_unchecked(v) atomic_inc_return(v)
84115+#define atomic_add_return_unchecked(i, v) atomic_add_return((i), (v))
84116+#define atomic_dec_unchecked(v) atomic_dec(v)
84117+#define atomic_cmpxchg_unchecked(v, o, n) atomic_cmpxchg((v), (o), (n))
84118+#define atomic_xchg_unchecked(v, i) atomic_xchg((v), (i))
84119+
84120+#define atomic_long_read_unchecked(v) atomic_long_read(v)
84121+#define atomic_long_set_unchecked(v, i) atomic_long_set((v), (i))
84122+#define atomic_long_add_unchecked(i, v) atomic_long_add((i), (v))
84123+#define atomic_long_inc_unchecked(v) atomic_long_inc(v)
84124+#define atomic_long_inc_return_unchecked(v) atomic_long_inc_return(v)
84125+#define atomic_long_dec_unchecked(v) atomic_long_dec(v)
84126+#endif
84127+
84128 #endif /* _ASM_GENERIC_ATOMIC_LONG_H */
84129diff --git a/include/asm-generic/atomic64.h b/include/asm-generic/atomic64.h
84130index b18ce4f..2ee2843 100644
84131--- a/include/asm-generic/atomic64.h
84132+++ b/include/asm-generic/atomic64.h
84133@@ -16,6 +16,8 @@ typedef struct {
84134 long long counter;
84135 } atomic64_t;
84136
84137+typedef atomic64_t atomic64_unchecked_t;
84138+
84139 #define ATOMIC64_INIT(i) { (i) }
84140
84141 extern long long atomic64_read(const atomic64_t *v);
84142@@ -39,4 +41,14 @@ extern int atomic64_add_unless(atomic64_t *v, long long a, long long u);
84143 #define atomic64_dec_and_test(v) (atomic64_dec_return((v)) == 0)
84144 #define atomic64_inc_not_zero(v) atomic64_add_unless((v), 1LL, 0LL)
84145
84146+#define atomic64_read_unchecked(v) atomic64_read(v)
84147+#define atomic64_set_unchecked(v, i) atomic64_set((v), (i))
84148+#define atomic64_add_unchecked(a, v) atomic64_add((a), (v))
84149+#define atomic64_add_return_unchecked(a, v) atomic64_add_return((a), (v))
84150+#define atomic64_sub_unchecked(a, v) atomic64_sub((a), (v))
84151+#define atomic64_inc_unchecked(v) atomic64_inc(v)
84152+#define atomic64_inc_return_unchecked(v) atomic64_inc_return(v)
84153+#define atomic64_dec_unchecked(v) atomic64_dec(v)
84154+#define atomic64_cmpxchg_unchecked(v, o, n) atomic64_cmpxchg((v), (o), (n))
84155+
84156 #endif /* _ASM_GENERIC_ATOMIC64_H */
84157diff --git a/include/asm-generic/bug.h b/include/asm-generic/bug.h
84158index d48ddf0..656a0ac 100644
84159--- a/include/asm-generic/bug.h
84160+++ b/include/asm-generic/bug.h
84161@@ -105,11 +105,11 @@ extern void warn_slowpath_null(const char *file, const int line);
84162
84163 #else /* !CONFIG_BUG */
84164 #ifndef HAVE_ARCH_BUG
84165-#define BUG() do {} while(0)
84166+#define BUG() do { for (;;) ; } while(0)
84167 #endif
84168
84169 #ifndef HAVE_ARCH_BUG_ON
84170-#define BUG_ON(condition) do { if (condition) ; } while(0)
84171+#define BUG_ON(condition) do { if (condition) for (;;) ; } while(0)
84172 #endif
84173
84174 #ifndef HAVE_ARCH_WARN_ON
84175diff --git a/include/asm-generic/cache.h b/include/asm-generic/cache.h
84176index 1bfcfe5..e04c5c9 100644
84177--- a/include/asm-generic/cache.h
84178+++ b/include/asm-generic/cache.h
84179@@ -6,7 +6,7 @@
84180 * cache lines need to provide their own cache.h.
84181 */
84182
84183-#define L1_CACHE_SHIFT 5
84184-#define L1_CACHE_BYTES (1 << L1_CACHE_SHIFT)
84185+#define L1_CACHE_SHIFT 5UL
84186+#define L1_CACHE_BYTES (1UL << L1_CACHE_SHIFT)
84187
84188 #endif /* __ASM_GENERIC_CACHE_H */
84189diff --git a/include/asm-generic/dma-mapping-common.h b/include/asm-generic/dma-mapping-common.h
84190index 6920695..41038bc 100644
84191--- a/include/asm-generic/dma-mapping-common.h
84192+++ b/include/asm-generic/dma-mapping-common.h
84193@@ -11,7 +11,7 @@ static inline dma_addr_t dma_map_single_attrs(struct device *dev, void *ptr,
84194 enum dma_data_direction dir,
84195 struct dma_attrs *attrs)
84196 {
84197- struct dma_map_ops *ops = get_dma_ops(dev);
84198+ const struct dma_map_ops *ops = get_dma_ops(dev);
84199 dma_addr_t addr;
84200
84201 kmemcheck_mark_initialized(ptr, size);
84202@@ -30,7 +30,7 @@ static inline void dma_unmap_single_attrs(struct device *dev, dma_addr_t addr,
84203 enum dma_data_direction dir,
84204 struct dma_attrs *attrs)
84205 {
84206- struct dma_map_ops *ops = get_dma_ops(dev);
84207+ const struct dma_map_ops *ops = get_dma_ops(dev);
84208
84209 BUG_ON(!valid_dma_direction(dir));
84210 if (ops->unmap_page)
84211@@ -42,7 +42,7 @@ static inline int dma_map_sg_attrs(struct device *dev, struct scatterlist *sg,
84212 int nents, enum dma_data_direction dir,
84213 struct dma_attrs *attrs)
84214 {
84215- struct dma_map_ops *ops = get_dma_ops(dev);
84216+ const struct dma_map_ops *ops = get_dma_ops(dev);
84217 int i, ents;
84218 struct scatterlist *s;
84219
84220@@ -59,7 +59,7 @@ static inline void dma_unmap_sg_attrs(struct device *dev, struct scatterlist *sg
84221 int nents, enum dma_data_direction dir,
84222 struct dma_attrs *attrs)
84223 {
84224- struct dma_map_ops *ops = get_dma_ops(dev);
84225+ const struct dma_map_ops *ops = get_dma_ops(dev);
84226
84227 BUG_ON(!valid_dma_direction(dir));
84228 debug_dma_unmap_sg(dev, sg, nents, dir);
84229@@ -71,7 +71,7 @@ static inline dma_addr_t dma_map_page(struct device *dev, struct page *page,
84230 size_t offset, size_t size,
84231 enum dma_data_direction dir)
84232 {
84233- struct dma_map_ops *ops = get_dma_ops(dev);
84234+ const struct dma_map_ops *ops = get_dma_ops(dev);
84235 dma_addr_t addr;
84236
84237 kmemcheck_mark_initialized(page_address(page) + offset, size);
84238@@ -85,7 +85,7 @@ static inline dma_addr_t dma_map_page(struct device *dev, struct page *page,
84239 static inline void dma_unmap_page(struct device *dev, dma_addr_t addr,
84240 size_t size, enum dma_data_direction dir)
84241 {
84242- struct dma_map_ops *ops = get_dma_ops(dev);
84243+ const struct dma_map_ops *ops = get_dma_ops(dev);
84244
84245 BUG_ON(!valid_dma_direction(dir));
84246 if (ops->unmap_page)
84247@@ -97,7 +97,7 @@ static inline void dma_sync_single_for_cpu(struct device *dev, dma_addr_t addr,
84248 size_t size,
84249 enum dma_data_direction dir)
84250 {
84251- struct dma_map_ops *ops = get_dma_ops(dev);
84252+ const struct dma_map_ops *ops = get_dma_ops(dev);
84253
84254 BUG_ON(!valid_dma_direction(dir));
84255 if (ops->sync_single_for_cpu)
84256@@ -109,7 +109,7 @@ static inline void dma_sync_single_for_device(struct device *dev,
84257 dma_addr_t addr, size_t size,
84258 enum dma_data_direction dir)
84259 {
84260- struct dma_map_ops *ops = get_dma_ops(dev);
84261+ const struct dma_map_ops *ops = get_dma_ops(dev);
84262
84263 BUG_ON(!valid_dma_direction(dir));
84264 if (ops->sync_single_for_device)
84265@@ -123,7 +123,7 @@ static inline void dma_sync_single_range_for_cpu(struct device *dev,
84266 size_t size,
84267 enum dma_data_direction dir)
84268 {
84269- struct dma_map_ops *ops = get_dma_ops(dev);
84270+ const struct dma_map_ops *ops = get_dma_ops(dev);
84271
84272 BUG_ON(!valid_dma_direction(dir));
84273 if (ops->sync_single_range_for_cpu) {
84274@@ -140,7 +140,7 @@ static inline void dma_sync_single_range_for_device(struct device *dev,
84275 size_t size,
84276 enum dma_data_direction dir)
84277 {
84278- struct dma_map_ops *ops = get_dma_ops(dev);
84279+ const struct dma_map_ops *ops = get_dma_ops(dev);
84280
84281 BUG_ON(!valid_dma_direction(dir));
84282 if (ops->sync_single_range_for_device) {
84283@@ -155,7 +155,7 @@ static inline void
84284 dma_sync_sg_for_cpu(struct device *dev, struct scatterlist *sg,
84285 int nelems, enum dma_data_direction dir)
84286 {
84287- struct dma_map_ops *ops = get_dma_ops(dev);
84288+ const struct dma_map_ops *ops = get_dma_ops(dev);
84289
84290 BUG_ON(!valid_dma_direction(dir));
84291 if (ops->sync_sg_for_cpu)
84292@@ -167,7 +167,7 @@ static inline void
84293 dma_sync_sg_for_device(struct device *dev, struct scatterlist *sg,
84294 int nelems, enum dma_data_direction dir)
84295 {
84296- struct dma_map_ops *ops = get_dma_ops(dev);
84297+ const struct dma_map_ops *ops = get_dma_ops(dev);
84298
84299 BUG_ON(!valid_dma_direction(dir));
84300 if (ops->sync_sg_for_device)
84301diff --git a/include/asm-generic/emergency-restart.h b/include/asm-generic/emergency-restart.h
84302index 0d68a1e..b74a761 100644
84303--- a/include/asm-generic/emergency-restart.h
84304+++ b/include/asm-generic/emergency-restart.h
84305@@ -1,7 +1,7 @@
84306 #ifndef _ASM_GENERIC_EMERGENCY_RESTART_H
84307 #define _ASM_GENERIC_EMERGENCY_RESTART_H
84308
84309-static inline void machine_emergency_restart(void)
84310+static inline __noreturn void machine_emergency_restart(void)
84311 {
84312 machine_restart(NULL);
84313 }
84314diff --git a/include/asm-generic/futex.h b/include/asm-generic/futex.h
84315index 3c2344f..4590a7d 100644
84316--- a/include/asm-generic/futex.h
84317+++ b/include/asm-generic/futex.h
84318@@ -6,7 +6,7 @@
84319 #include <asm/errno.h>
84320
84321 static inline int
84322-futex_atomic_op_inuser (int encoded_op, int __user *uaddr)
84323+futex_atomic_op_inuser (int encoded_op, u32 __user *uaddr)
84324 {
84325 int op = (encoded_op >> 28) & 7;
84326 int cmp = (encoded_op >> 24) & 15;
84327@@ -48,7 +48,7 @@ futex_atomic_op_inuser (int encoded_op, int __user *uaddr)
84328 }
84329
84330 static inline int
84331-futex_atomic_cmpxchg_inatomic(int __user *uaddr, int oldval, int newval)
84332+futex_atomic_cmpxchg_inatomic(u32 __user *uaddr, int oldval, int newval)
84333 {
84334 return -ENOSYS;
84335 }
84336diff --git a/include/asm-generic/kmap_types.h b/include/asm-generic/kmap_types.h
84337index e5f234a..cdb16b3 100644
84338--- a/include/asm-generic/kmap_types.h
84339+++ b/include/asm-generic/kmap_types.h
84340@@ -28,7 +28,8 @@ KMAP_D(15) KM_UML_USERCOPY,
84341 KMAP_D(16) KM_IRQ_PTE,
84342 KMAP_D(17) KM_NMI,
84343 KMAP_D(18) KM_NMI_PTE,
84344-KMAP_D(19) KM_TYPE_NR
84345+KMAP_D(19) KM_CLEARPAGE,
84346+KMAP_D(20) KM_TYPE_NR
84347 };
84348
84349 #undef KMAP_D
84350diff --git a/include/asm-generic/local.h b/include/asm-generic/local.h
84351index fc21844..2ee9629 100644
84352--- a/include/asm-generic/local.h
84353+++ b/include/asm-generic/local.h
84354@@ -39,6 +39,7 @@ typedef struct
84355 #define local_add_return(i, l) atomic_long_add_return((i), (&(l)->a))
84356 #define local_sub_return(i, l) atomic_long_sub_return((i), (&(l)->a))
84357 #define local_inc_return(l) atomic_long_inc_return(&(l)->a)
84358+#define local_dec_return(l) atomic_long_dec_return(&(l)->a)
84359
84360 #define local_cmpxchg(l, o, n) atomic_long_cmpxchg((&(l)->a), (o), (n))
84361 #define local_xchg(l, n) atomic_long_xchg((&(l)->a), (n))
84362diff --git a/include/asm-generic/pgtable-nopmd.h b/include/asm-generic/pgtable-nopmd.h
84363index 725612b..9cc513a 100644
84364--- a/include/asm-generic/pgtable-nopmd.h
84365+++ b/include/asm-generic/pgtable-nopmd.h
84366@@ -1,14 +1,19 @@
84367 #ifndef _PGTABLE_NOPMD_H
84368 #define _PGTABLE_NOPMD_H
84369
84370-#ifndef __ASSEMBLY__
84371-
84372 #include <asm-generic/pgtable-nopud.h>
84373
84374-struct mm_struct;
84375-
84376 #define __PAGETABLE_PMD_FOLDED
84377
84378+#define PMD_SHIFT PUD_SHIFT
84379+#define PTRS_PER_PMD 1
84380+#define PMD_SIZE (_AC(1,UL) << PMD_SHIFT)
84381+#define PMD_MASK (~(PMD_SIZE-1))
84382+
84383+#ifndef __ASSEMBLY__
84384+
84385+struct mm_struct;
84386+
84387 /*
84388 * Having the pmd type consist of a pud gets the size right, and allows
84389 * us to conceptually access the pud entry that this pmd is folded into
84390@@ -16,11 +21,6 @@ struct mm_struct;
84391 */
84392 typedef struct { pud_t pud; } pmd_t;
84393
84394-#define PMD_SHIFT PUD_SHIFT
84395-#define PTRS_PER_PMD 1
84396-#define PMD_SIZE (1UL << PMD_SHIFT)
84397-#define PMD_MASK (~(PMD_SIZE-1))
84398-
84399 /*
84400 * The "pud_xxx()" functions here are trivial for a folded two-level
84401 * setup: the pmd is never bad, and a pmd always exists (as it's folded
84402diff --git a/include/asm-generic/pgtable-nopud.h b/include/asm-generic/pgtable-nopud.h
84403index 810431d..ccc3638 100644
84404--- a/include/asm-generic/pgtable-nopud.h
84405+++ b/include/asm-generic/pgtable-nopud.h
84406@@ -1,10 +1,15 @@
84407 #ifndef _PGTABLE_NOPUD_H
84408 #define _PGTABLE_NOPUD_H
84409
84410-#ifndef __ASSEMBLY__
84411-
84412 #define __PAGETABLE_PUD_FOLDED
84413
84414+#define PUD_SHIFT PGDIR_SHIFT
84415+#define PTRS_PER_PUD 1
84416+#define PUD_SIZE (_AC(1,UL) << PUD_SHIFT)
84417+#define PUD_MASK (~(PUD_SIZE-1))
84418+
84419+#ifndef __ASSEMBLY__
84420+
84421 /*
84422 * Having the pud type consist of a pgd gets the size right, and allows
84423 * us to conceptually access the pgd entry that this pud is folded into
84424@@ -12,11 +17,6 @@
84425 */
84426 typedef struct { pgd_t pgd; } pud_t;
84427
84428-#define PUD_SHIFT PGDIR_SHIFT
84429-#define PTRS_PER_PUD 1
84430-#define PUD_SIZE (1UL << PUD_SHIFT)
84431-#define PUD_MASK (~(PUD_SIZE-1))
84432-
84433 /*
84434 * The "pgd_xxx()" functions here are trivial for a folded two-level
84435 * setup: the pud is never bad, and a pud always exists (as it's folded
84436diff --git a/include/asm-generic/pgtable.h b/include/asm-generic/pgtable.h
84437index e2bd73e..fea8ed3 100644
84438--- a/include/asm-generic/pgtable.h
84439+++ b/include/asm-generic/pgtable.h
84440@@ -344,6 +344,14 @@ extern void untrack_pfn_vma(struct vm_area_struct *vma, unsigned long pfn,
84441 unsigned long size);
84442 #endif
84443
84444+#ifndef __HAVE_ARCH_PAX_OPEN_KERNEL
84445+static inline unsigned long pax_open_kernel(void) { return 0; }
84446+#endif
84447+
84448+#ifndef __HAVE_ARCH_PAX_CLOSE_KERNEL
84449+static inline unsigned long pax_close_kernel(void) { return 0; }
84450+#endif
84451+
84452 #endif /* !__ASSEMBLY__ */
84453
84454 #endif /* _ASM_GENERIC_PGTABLE_H */
84455diff --git a/include/asm-generic/uaccess.h b/include/asm-generic/uaccess.h
84456index b218b85..043ee5b 100644
84457--- a/include/asm-generic/uaccess.h
84458+++ b/include/asm-generic/uaccess.h
84459@@ -76,6 +76,8 @@ extern unsigned long search_exception_table(unsigned long);
84460 */
84461 #ifndef __copy_from_user
84462 static inline __must_check long __copy_from_user(void *to,
84463+ const void __user * from, unsigned long n) __size_overflow(3);
84464+static inline __must_check long __copy_from_user(void *to,
84465 const void __user * from, unsigned long n)
84466 {
84467 if (__builtin_constant_p(n)) {
84468@@ -106,6 +108,8 @@ static inline __must_check long __copy_from_user(void *to,
84469
84470 #ifndef __copy_to_user
84471 static inline __must_check long __copy_to_user(void __user *to,
84472+ const void *from, unsigned long n) __size_overflow(3);
84473+static inline __must_check long __copy_to_user(void __user *to,
84474 const void *from, unsigned long n)
84475 {
84476 if (__builtin_constant_p(n)) {
84477@@ -224,6 +228,7 @@ extern int __put_user_bad(void) __attribute__((noreturn));
84478 -EFAULT; \
84479 })
84480
84481+static inline int __get_user_fn(size_t size, const void __user *ptr, void *x) __size_overflow(1);
84482 static inline int __get_user_fn(size_t size, const void __user *ptr, void *x)
84483 {
84484 size = __copy_from_user(x, ptr, size);
84485@@ -240,6 +245,7 @@ extern int __get_user_bad(void) __attribute__((noreturn));
84486 #define __copy_to_user_inatomic __copy_to_user
84487 #endif
84488
84489+static inline long copy_from_user(void *to, const void __user * from, unsigned long n) __size_overflow(3);
84490 static inline long copy_from_user(void *to,
84491 const void __user * from, unsigned long n)
84492 {
84493@@ -250,6 +256,7 @@ static inline long copy_from_user(void *to,
84494 return n;
84495 }
84496
84497+static inline long copy_to_user(void __user *to, const void *from, unsigned long n) __size_overflow(3);
84498 static inline long copy_to_user(void __user *to,
84499 const void *from, unsigned long n)
84500 {
84501@@ -307,6 +314,8 @@ static inline long strlen_user(const char __user *src)
84502 */
84503 #ifndef __clear_user
84504 static inline __must_check unsigned long
84505+__clear_user(void __user *to, unsigned long n) __size_overflow(2);
84506+static inline __must_check unsigned long
84507 __clear_user(void __user *to, unsigned long n)
84508 {
84509 memset((void __force *)to, 0, n);
84510@@ -315,6 +324,8 @@ __clear_user(void __user *to, unsigned long n)
84511 #endif
84512
84513 static inline __must_check unsigned long
84514+clear_user(void __user *to, unsigned long n) __size_overflow(2);
84515+static inline __must_check unsigned long
84516 clear_user(void __user *to, unsigned long n)
84517 {
84518 might_sleep();
84519diff --git a/include/asm-generic/vmlinux.lds.h b/include/asm-generic/vmlinux.lds.h
84520index b6e818f..21aa58a 100644
84521--- a/include/asm-generic/vmlinux.lds.h
84522+++ b/include/asm-generic/vmlinux.lds.h
84523@@ -199,6 +199,7 @@
84524 .rodata : AT(ADDR(.rodata) - LOAD_OFFSET) { \
84525 VMLINUX_SYMBOL(__start_rodata) = .; \
84526 *(.rodata) *(.rodata.*) \
84527+ *(.data.read_only) \
84528 *(__vermagic) /* Kernel version magic */ \
84529 *(__markers_strings) /* Markers: strings */ \
84530 *(__tracepoints_strings)/* Tracepoints: strings */ \
84531@@ -656,22 +657,24 @@
84532 * section in the linker script will go there too. @phdr should have
84533 * a leading colon.
84534 *
84535- * Note that this macros defines __per_cpu_load as an absolute symbol.
84536+ * Note that this macros defines per_cpu_load as an absolute symbol.
84537 * If there is no need to put the percpu section at a predetermined
84538 * address, use PERCPU().
84539 */
84540 #define PERCPU_VADDR(vaddr, phdr) \
84541- VMLINUX_SYMBOL(__per_cpu_load) = .; \
84542- .data.percpu vaddr : AT(VMLINUX_SYMBOL(__per_cpu_load) \
84543+ per_cpu_load = .; \
84544+ .data.percpu vaddr : AT(VMLINUX_SYMBOL(per_cpu_load) \
84545 - LOAD_OFFSET) { \
84546+ VMLINUX_SYMBOL(__per_cpu_load) = . + per_cpu_load; \
84547 VMLINUX_SYMBOL(__per_cpu_start) = .; \
84548 *(.data.percpu.first) \
84549- *(.data.percpu.page_aligned) \
84550 *(.data.percpu) \
84551+ . = ALIGN(PAGE_SIZE); \
84552+ *(.data.percpu.page_aligned) \
84553 *(.data.percpu.shared_aligned) \
84554 VMLINUX_SYMBOL(__per_cpu_end) = .; \
84555 } phdr \
84556- . = VMLINUX_SYMBOL(__per_cpu_load) + SIZEOF(.data.percpu);
84557+ . = VMLINUX_SYMBOL(per_cpu_load) + SIZEOF(.data.percpu);
84558
84559 /**
84560 * PERCPU - define output section for percpu area, simple version
84561diff --git a/include/drm/drmP.h b/include/drm/drmP.h
84562index ebab6a6..351dba1 100644
84563--- a/include/drm/drmP.h
84564+++ b/include/drm/drmP.h
84565@@ -71,6 +71,7 @@
84566 #include <linux/workqueue.h>
84567 #include <linux/poll.h>
84568 #include <asm/pgalloc.h>
84569+#include <asm/local.h>
84570 #include "drm.h"
84571
84572 #include <linux/idr.h>
84573@@ -814,7 +815,7 @@ struct drm_driver {
84574 void (*vgaarb_irq)(struct drm_device *dev, bool state);
84575
84576 /* Driver private ops for this object */
84577- struct vm_operations_struct *gem_vm_ops;
84578+ const struct vm_operations_struct *gem_vm_ops;
84579
84580 int major;
84581 int minor;
84582@@ -917,7 +918,7 @@ struct drm_device {
84583
84584 /** \name Usage Counters */
84585 /*@{ */
84586- int open_count; /**< Outstanding files open */
84587+ local_t open_count; /**< Outstanding files open */
84588 atomic_t ioctl_count; /**< Outstanding IOCTLs pending */
84589 atomic_t vma_count; /**< Outstanding vma areas open */
84590 int buf_use; /**< Buffers in use -- cannot alloc */
84591@@ -928,7 +929,7 @@ struct drm_device {
84592 /*@{ */
84593 unsigned long counters;
84594 enum drm_stat_type types[15];
84595- atomic_t counts[15];
84596+ atomic_unchecked_t counts[15];
84597 /*@} */
84598
84599 struct list_head filelist;
84600@@ -1016,7 +1017,7 @@ struct drm_device {
84601 struct pci_controller *hose;
84602 #endif
84603 struct drm_sg_mem *sg; /**< Scatter gather memory */
84604- unsigned int num_crtcs; /**< Number of CRTCs on this device */
84605+ unsigned int num_crtcs; /**< Number of CRTCs on this device */
84606 void *dev_private; /**< device private data */
84607 void *mm_private;
84608 struct address_space *dev_mapping;
84609@@ -1042,11 +1043,11 @@ struct drm_device {
84610 spinlock_t object_name_lock;
84611 struct idr object_name_idr;
84612 atomic_t object_count;
84613- atomic_t object_memory;
84614+ atomic_unchecked_t object_memory;
84615 atomic_t pin_count;
84616- atomic_t pin_memory;
84617+ atomic_unchecked_t pin_memory;
84618 atomic_t gtt_count;
84619- atomic_t gtt_memory;
84620+ atomic_unchecked_t gtt_memory;
84621 uint32_t gtt_total;
84622 uint32_t invalidate_domains; /* domains pending invalidation */
84623 uint32_t flush_domains; /* domains pending flush */
84624diff --git a/include/drm/drm_crtc_helper.h b/include/drm/drm_crtc_helper.h
84625index b29e201..3413cc9 100644
84626--- a/include/drm/drm_crtc_helper.h
84627+++ b/include/drm/drm_crtc_helper.h
84628@@ -64,7 +64,7 @@ struct drm_crtc_helper_funcs {
84629
84630 /* reload the current crtc LUT */
84631 void (*load_lut)(struct drm_crtc *crtc);
84632-};
84633+} __no_const;
84634
84635 struct drm_encoder_helper_funcs {
84636 void (*dpms)(struct drm_encoder *encoder, int mode);
84637@@ -85,7 +85,7 @@ struct drm_encoder_helper_funcs {
84638 struct drm_connector *connector);
84639 /* disable encoder when not in use - more explicit than dpms off */
84640 void (*disable)(struct drm_encoder *encoder);
84641-};
84642+} __no_const;
84643
84644 struct drm_connector_helper_funcs {
84645 int (*get_modes)(struct drm_connector *connector);
84646diff --git a/include/drm/ttm/ttm_memory.h b/include/drm/ttm/ttm_memory.h
84647index b199170..6f9e64c 100644
84648--- a/include/drm/ttm/ttm_memory.h
84649+++ b/include/drm/ttm/ttm_memory.h
84650@@ -47,7 +47,7 @@
84651
84652 struct ttm_mem_shrink {
84653 int (*do_shrink) (struct ttm_mem_shrink *);
84654-};
84655+} __no_const;
84656
84657 /**
84658 * struct ttm_mem_global - Global memory accounting structure.
84659diff --git a/include/linux/a.out.h b/include/linux/a.out.h
84660index e86dfca..40cc55f 100644
84661--- a/include/linux/a.out.h
84662+++ b/include/linux/a.out.h
84663@@ -39,6 +39,14 @@ enum machine_type {
84664 M_MIPS2 = 152 /* MIPS R6000/R4000 binary */
84665 };
84666
84667+/* Constants for the N_FLAGS field */
84668+#define F_PAX_PAGEEXEC 1 /* Paging based non-executable pages */
84669+#define F_PAX_EMUTRAMP 2 /* Emulate trampolines */
84670+#define F_PAX_MPROTECT 4 /* Restrict mprotect() */
84671+#define F_PAX_RANDMMAP 8 /* Randomize mmap() base */
84672+/*#define F_PAX_RANDEXEC 16*/ /* Randomize ET_EXEC base */
84673+#define F_PAX_SEGMEXEC 32 /* Segmentation based non-executable pages */
84674+
84675 #if !defined (N_MAGIC)
84676 #define N_MAGIC(exec) ((exec).a_info & 0xffff)
84677 #endif
84678diff --git a/include/linux/atmdev.h b/include/linux/atmdev.h
84679index 817b237..62c10bc 100644
84680--- a/include/linux/atmdev.h
84681+++ b/include/linux/atmdev.h
84682@@ -237,7 +237,7 @@ struct compat_atm_iobuf {
84683 #endif
84684
84685 struct k_atm_aal_stats {
84686-#define __HANDLE_ITEM(i) atomic_t i
84687+#define __HANDLE_ITEM(i) atomic_unchecked_t i
84688 __AAL_STAT_ITEMS
84689 #undef __HANDLE_ITEM
84690 };
84691diff --git a/include/linux/backlight.h b/include/linux/backlight.h
84692index 0f5f578..8c4f884 100644
84693--- a/include/linux/backlight.h
84694+++ b/include/linux/backlight.h
84695@@ -36,18 +36,18 @@ struct backlight_device;
84696 struct fb_info;
84697
84698 struct backlight_ops {
84699- unsigned int options;
84700+ const unsigned int options;
84701
84702 #define BL_CORE_SUSPENDRESUME (1 << 0)
84703
84704 /* Notify the backlight driver some property has changed */
84705- int (*update_status)(struct backlight_device *);
84706+ int (* const update_status)(struct backlight_device *);
84707 /* Return the current backlight brightness (accounting for power,
84708 fb_blank etc.) */
84709- int (*get_brightness)(struct backlight_device *);
84710+ int (* const get_brightness)(struct backlight_device *);
84711 /* Check if given framebuffer device is the one bound to this backlight;
84712 return 0 if not, !=0 if it is. If NULL, backlight always matches the fb. */
84713- int (*check_fb)(struct fb_info *);
84714+ int (* const check_fb)(struct fb_info *);
84715 };
84716
84717 /* This structure defines all the properties of a backlight */
84718@@ -86,7 +86,7 @@ struct backlight_device {
84719 registered this device has been unloaded, and if class_get_devdata()
84720 points to something in the body of that driver, it is also invalid. */
84721 struct mutex ops_lock;
84722- struct backlight_ops *ops;
84723+ const struct backlight_ops *ops;
84724
84725 /* The framebuffer notifier block */
84726 struct notifier_block fb_notif;
84727@@ -103,7 +103,7 @@ static inline void backlight_update_status(struct backlight_device *bd)
84728 }
84729
84730 extern struct backlight_device *backlight_device_register(const char *name,
84731- struct device *dev, void *devdata, struct backlight_ops *ops);
84732+ struct device *dev, void *devdata, const struct backlight_ops *ops);
84733 extern void backlight_device_unregister(struct backlight_device *bd);
84734 extern void backlight_force_update(struct backlight_device *bd,
84735 enum backlight_update_reason reason);
84736diff --git a/include/linux/binfmts.h b/include/linux/binfmts.h
84737index a3d802e..93a2ef4 100644
84738--- a/include/linux/binfmts.h
84739+++ b/include/linux/binfmts.h
84740@@ -18,7 +18,7 @@ struct pt_regs;
84741 #define BINPRM_BUF_SIZE 128
84742
84743 #ifdef __KERNEL__
84744-#include <linux/list.h>
84745+#include <linux/sched.h>
84746
84747 #define CORENAME_MAX_SIZE 128
84748
84749@@ -58,6 +58,7 @@ struct linux_binprm{
84750 unsigned interp_flags;
84751 unsigned interp_data;
84752 unsigned long loader, exec;
84753+ char tcomm[TASK_COMM_LEN];
84754 };
84755
84756 extern void acct_arg_size(struct linux_binprm *bprm, unsigned long pages);
84757@@ -83,6 +84,7 @@ struct linux_binfmt {
84758 int (*load_binary)(struct linux_binprm *, struct pt_regs * regs);
84759 int (*load_shlib)(struct file *);
84760 int (*core_dump)(long signr, struct pt_regs *regs, struct file *file, unsigned long limit);
84761+ void (*handle_mprotect)(struct vm_area_struct *vma, unsigned long newflags);
84762 unsigned long min_coredump; /* minimal dump size */
84763 int hasvdso;
84764 };
84765diff --git a/include/linux/blkdev.h b/include/linux/blkdev.h
84766index 5eb6cb0..a2906d2 100644
84767--- a/include/linux/blkdev.h
84768+++ b/include/linux/blkdev.h
84769@@ -1281,7 +1281,7 @@ struct block_device_operations {
84770 int (*revalidate_disk) (struct gendisk *);
84771 int (*getgeo)(struct block_device *, struct hd_geometry *);
84772 struct module *owner;
84773-};
84774+} __do_const;
84775
84776 extern int __blkdev_driver_ioctl(struct block_device *, fmode_t, unsigned int,
84777 unsigned long);
84778diff --git a/include/linux/blktrace_api.h b/include/linux/blktrace_api.h
84779index 3b73b99..629d21b 100644
84780--- a/include/linux/blktrace_api.h
84781+++ b/include/linux/blktrace_api.h
84782@@ -160,7 +160,7 @@ struct blk_trace {
84783 struct dentry *dir;
84784 struct dentry *dropped_file;
84785 struct dentry *msg_file;
84786- atomic_t dropped;
84787+ atomic_unchecked_t dropped;
84788 };
84789
84790 extern int blk_trace_ioctl(struct block_device *, unsigned, char __user *);
84791diff --git a/include/linux/byteorder/little_endian.h b/include/linux/byteorder/little_endian.h
84792index 83195fb..0b0f77d 100644
84793--- a/include/linux/byteorder/little_endian.h
84794+++ b/include/linux/byteorder/little_endian.h
84795@@ -42,51 +42,51 @@
84796
84797 static inline __le64 __cpu_to_le64p(const __u64 *p)
84798 {
84799- return (__force __le64)*p;
84800+ return (__force const __le64)*p;
84801 }
84802 static inline __u64 __le64_to_cpup(const __le64 *p)
84803 {
84804- return (__force __u64)*p;
84805+ return (__force const __u64)*p;
84806 }
84807 static inline __le32 __cpu_to_le32p(const __u32 *p)
84808 {
84809- return (__force __le32)*p;
84810+ return (__force const __le32)*p;
84811 }
84812 static inline __u32 __le32_to_cpup(const __le32 *p)
84813 {
84814- return (__force __u32)*p;
84815+ return (__force const __u32)*p;
84816 }
84817 static inline __le16 __cpu_to_le16p(const __u16 *p)
84818 {
84819- return (__force __le16)*p;
84820+ return (__force const __le16)*p;
84821 }
84822 static inline __u16 __le16_to_cpup(const __le16 *p)
84823 {
84824- return (__force __u16)*p;
84825+ return (__force const __u16)*p;
84826 }
84827 static inline __be64 __cpu_to_be64p(const __u64 *p)
84828 {
84829- return (__force __be64)__swab64p(p);
84830+ return (__force const __be64)__swab64p(p);
84831 }
84832 static inline __u64 __be64_to_cpup(const __be64 *p)
84833 {
84834- return __swab64p((__u64 *)p);
84835+ return __swab64p((const __u64 *)p);
84836 }
84837 static inline __be32 __cpu_to_be32p(const __u32 *p)
84838 {
84839- return (__force __be32)__swab32p(p);
84840+ return (__force const __be32)__swab32p(p);
84841 }
84842 static inline __u32 __be32_to_cpup(const __be32 *p)
84843 {
84844- return __swab32p((__u32 *)p);
84845+ return __swab32p((const __u32 *)p);
84846 }
84847 static inline __be16 __cpu_to_be16p(const __u16 *p)
84848 {
84849- return (__force __be16)__swab16p(p);
84850+ return (__force const __be16)__swab16p(p);
84851 }
84852 static inline __u16 __be16_to_cpup(const __be16 *p)
84853 {
84854- return __swab16p((__u16 *)p);
84855+ return __swab16p((const __u16 *)p);
84856 }
84857 #define __cpu_to_le64s(x) do { (void)(x); } while (0)
84858 #define __le64_to_cpus(x) do { (void)(x); } while (0)
84859diff --git a/include/linux/cache.h b/include/linux/cache.h
84860index 97e2488..e7576b9 100644
84861--- a/include/linux/cache.h
84862+++ b/include/linux/cache.h
84863@@ -16,6 +16,10 @@
84864 #define __read_mostly
84865 #endif
84866
84867+#ifndef __read_only
84868+#define __read_only __read_mostly
84869+#endif
84870+
84871 #ifndef ____cacheline_aligned
84872 #define ____cacheline_aligned __attribute__((__aligned__(SMP_CACHE_BYTES)))
84873 #endif
84874diff --git a/include/linux/capability.h b/include/linux/capability.h
84875index c8f2a5f7..1618a5c 100644
84876--- a/include/linux/capability.h
84877+++ b/include/linux/capability.h
84878@@ -563,6 +563,7 @@ extern const kernel_cap_t __cap_init_eff_set;
84879 (security_real_capable_noaudit((t), (cap)) == 0)
84880
84881 extern int capable(int cap);
84882+int capable_nolog(int cap);
84883
84884 /* audit system wants to get cap info from files as well */
84885 struct dentry;
84886diff --git a/include/linux/compiler-gcc4.h b/include/linux/compiler-gcc4.h
84887index 450fa59..246fa19 100644
84888--- a/include/linux/compiler-gcc4.h
84889+++ b/include/linux/compiler-gcc4.h
84890@@ -14,6 +14,9 @@
84891 #define __compiler_offsetof(a,b) __builtin_offsetof(a,b)
84892 #define __always_inline inline __attribute__((always_inline))
84893
84894+#ifdef SIZE_OVERFLOW_PLUGIN
84895+#define __size_overflow(...) __attribute__((size_overflow(__VA_ARGS__)))
84896+#endif
84897 /*
84898 * A trick to suppress uninitialized variable warning without generating any
84899 * code
84900@@ -36,4 +39,16 @@
84901 the kernel context */
84902 #define __cold __attribute__((__cold__))
84903
84904+#define __alloc_size(...) __attribute((alloc_size(__VA_ARGS__)))
84905+#define __bos(ptr, arg) __builtin_object_size((ptr), (arg))
84906+#define __bos0(ptr) __bos((ptr), 0)
84907+#define __bos1(ptr) __bos((ptr), 1)
84908+
84909+#if __GNUC_MINOR__ >= 5
84910+#ifdef CONSTIFY_PLUGIN
84911+#define __no_const __attribute__((no_const))
84912+#define __do_const __attribute__((do_const))
84913+#endif
84914+#endif
84915+
84916 #endif
84917diff --git a/include/linux/compiler.h b/include/linux/compiler.h
84918index 04fb513..6189f3b 100644
84919--- a/include/linux/compiler.h
84920+++ b/include/linux/compiler.h
84921@@ -5,11 +5,14 @@
84922
84923 #ifdef __CHECKER__
84924 # define __user __attribute__((noderef, address_space(1)))
84925+# define __force_user __force __user
84926 # define __kernel /* default address space */
84927+# define __force_kernel __force __kernel
84928 # define __safe __attribute__((safe))
84929 # define __force __attribute__((force))
84930 # define __nocast __attribute__((nocast))
84931 # define __iomem __attribute__((noderef, address_space(2)))
84932+# define __force_iomem __force __iomem
84933 # define __acquires(x) __attribute__((context(x,0,1)))
84934 # define __releases(x) __attribute__((context(x,1,0)))
84935 # define __acquire(x) __context__(x,1)
84936@@ -17,13 +20,34 @@
84937 # define __cond_lock(x,c) ((c) ? ({ __acquire(x); 1; }) : 0)
84938 extern void __chk_user_ptr(const volatile void __user *);
84939 extern void __chk_io_ptr(const volatile void __iomem *);
84940+#elif defined(CHECKER_PLUGIN)
84941+//# define __user
84942+//# define __force_user
84943+//# define __kernel
84944+//# define __force_kernel
84945+# define __safe
84946+# define __force
84947+# define __nocast
84948+# define __iomem
84949+# define __force_iomem
84950+# define __chk_user_ptr(x) (void)0
84951+# define __chk_io_ptr(x) (void)0
84952+# define __builtin_warning(x, y...) (1)
84953+# define __acquires(x)
84954+# define __releases(x)
84955+# define __acquire(x) (void)0
84956+# define __release(x) (void)0
84957+# define __cond_lock(x,c) (c)
84958 #else
84959 # define __user
84960+# define __force_user
84961 # define __kernel
84962+# define __force_kernel
84963 # define __safe
84964 # define __force
84965 # define __nocast
84966 # define __iomem
84967+# define __force_iomem
84968 # define __chk_user_ptr(x) (void)0
84969 # define __chk_io_ptr(x) (void)0
84970 # define __builtin_warning(x, y...) (1)
84971@@ -247,6 +271,17 @@ void ftrace_likely_update(struct ftrace_branch_data *f, int val, int expect);
84972 # define __attribute_const__ /* unimplemented */
84973 #endif
84974
84975+#ifndef __no_const
84976+# define __no_const
84977+#endif
84978+
84979+#ifndef __do_const
84980+# define __do_const
84981+#endif
84982+
84983+#ifndef __size_overflow
84984+# define __size_overflow(...)
84985+#endif
84986 /*
84987 * Tell gcc if a function is cold. The compiler will assume any path
84988 * directly leading to the call is unlikely.
84989@@ -256,6 +291,22 @@ void ftrace_likely_update(struct ftrace_branch_data *f, int val, int expect);
84990 #define __cold
84991 #endif
84992
84993+#ifndef __alloc_size
84994+#define __alloc_size(...)
84995+#endif
84996+
84997+#ifndef __bos
84998+#define __bos(ptr, arg)
84999+#endif
85000+
85001+#ifndef __bos0
85002+#define __bos0(ptr)
85003+#endif
85004+
85005+#ifndef __bos1
85006+#define __bos1(ptr)
85007+#endif
85008+
85009 /* Simple shorthand for a section definition */
85010 #ifndef __section
85011 # define __section(S) __attribute__ ((__section__(#S)))
85012@@ -278,6 +329,7 @@ void ftrace_likely_update(struct ftrace_branch_data *f, int val, int expect);
85013 * use is to mediate communication between process-level code and irq/NMI
85014 * handlers, all running on the same CPU.
85015 */
85016-#define ACCESS_ONCE(x) (*(volatile typeof(x) *)&(x))
85017+#define ACCESS_ONCE(x) (*(volatile const typeof(x) *)&(x))
85018+#define ACCESS_ONCE_RW(x) (*(volatile typeof(x) *)&(x))
85019
85020 #endif /* __LINUX_COMPILER_H */
85021diff --git a/include/linux/crash_dump.h b/include/linux/crash_dump.h
85022index 0026f26..6c237c5 100644
85023--- a/include/linux/crash_dump.h
85024+++ b/include/linux/crash_dump.h
85025@@ -12,7 +12,7 @@
85026 extern unsigned long long elfcorehdr_addr;
85027
85028 extern ssize_t copy_oldmem_page(unsigned long, char *, size_t,
85029- unsigned long, int);
85030+ unsigned long, int) __size_overflow(3);
85031
85032 /* Architecture code defines this if there are other possible ELF
85033 * machine types, e.g. on bi-arch capable hardware. */
85034diff --git a/include/linux/crypto.h b/include/linux/crypto.h
85035index fd92988..a3164bd 100644
85036--- a/include/linux/crypto.h
85037+++ b/include/linux/crypto.h
85038@@ -394,7 +394,7 @@ struct cipher_tfm {
85039 const u8 *key, unsigned int keylen);
85040 void (*cit_encrypt_one)(struct crypto_tfm *tfm, u8 *dst, const u8 *src);
85041 void (*cit_decrypt_one)(struct crypto_tfm *tfm, u8 *dst, const u8 *src);
85042-};
85043+} __no_const;
85044
85045 struct hash_tfm {
85046 int (*init)(struct hash_desc *desc);
85047@@ -415,13 +415,13 @@ struct compress_tfm {
85048 int (*cot_decompress)(struct crypto_tfm *tfm,
85049 const u8 *src, unsigned int slen,
85050 u8 *dst, unsigned int *dlen);
85051-};
85052+} __no_const;
85053
85054 struct rng_tfm {
85055 int (*rng_gen_random)(struct crypto_rng *tfm, u8 *rdata,
85056 unsigned int dlen);
85057 int (*rng_reset)(struct crypto_rng *tfm, u8 *seed, unsigned int slen);
85058-};
85059+} __no_const;
85060
85061 #define crt_ablkcipher crt_u.ablkcipher
85062 #define crt_aead crt_u.aead
85063diff --git a/include/linux/dcache.h b/include/linux/dcache.h
85064index 30b93b2..cd7a8db 100644
85065--- a/include/linux/dcache.h
85066+++ b/include/linux/dcache.h
85067@@ -119,6 +119,8 @@ struct dentry {
85068 unsigned char d_iname[DNAME_INLINE_LEN_MIN]; /* small names */
85069 };
85070
85071+#define DNAME_INLINE_LEN (sizeof(struct dentry)-offsetof(struct dentry,d_iname))
85072+
85073 /*
85074 * dentry->d_lock spinlock nesting subclasses:
85075 *
85076diff --git a/include/linux/decompress/mm.h b/include/linux/decompress/mm.h
85077index 3e9bd6a..f4e1aa0 100644
85078--- a/include/linux/decompress/mm.h
85079+++ b/include/linux/decompress/mm.h
85080@@ -78,7 +78,7 @@ static void free(void *where)
85081 * warnings when not needed (indeed large_malloc / large_free are not
85082 * needed by inflate */
85083
85084-#define malloc(a) kmalloc(a, GFP_KERNEL)
85085+#define malloc(a) kmalloc((a), GFP_KERNEL)
85086 #define free(a) kfree(a)
85087
85088 #define large_malloc(a) vmalloc(a)
85089diff --git a/include/linux/dma-mapping.h b/include/linux/dma-mapping.h
85090index 91b7618..92a93d32 100644
85091--- a/include/linux/dma-mapping.h
85092+++ b/include/linux/dma-mapping.h
85093@@ -16,51 +16,51 @@ enum dma_data_direction {
85094 };
85095
85096 struct dma_map_ops {
85097- void* (*alloc_coherent)(struct device *dev, size_t size,
85098+ void* (* const alloc_coherent)(struct device *dev, size_t size,
85099 dma_addr_t *dma_handle, gfp_t gfp);
85100- void (*free_coherent)(struct device *dev, size_t size,
85101+ void (* const free_coherent)(struct device *dev, size_t size,
85102 void *vaddr, dma_addr_t dma_handle);
85103- dma_addr_t (*map_page)(struct device *dev, struct page *page,
85104+ dma_addr_t (* const map_page)(struct device *dev, struct page *page,
85105 unsigned long offset, size_t size,
85106 enum dma_data_direction dir,
85107 struct dma_attrs *attrs);
85108- void (*unmap_page)(struct device *dev, dma_addr_t dma_handle,
85109+ void (* const unmap_page)(struct device *dev, dma_addr_t dma_handle,
85110 size_t size, enum dma_data_direction dir,
85111 struct dma_attrs *attrs);
85112- int (*map_sg)(struct device *dev, struct scatterlist *sg,
85113+ int (* const map_sg)(struct device *dev, struct scatterlist *sg,
85114 int nents, enum dma_data_direction dir,
85115 struct dma_attrs *attrs);
85116- void (*unmap_sg)(struct device *dev,
85117+ void (* const unmap_sg)(struct device *dev,
85118 struct scatterlist *sg, int nents,
85119 enum dma_data_direction dir,
85120 struct dma_attrs *attrs);
85121- void (*sync_single_for_cpu)(struct device *dev,
85122+ void (* const sync_single_for_cpu)(struct device *dev,
85123 dma_addr_t dma_handle, size_t size,
85124 enum dma_data_direction dir);
85125- void (*sync_single_for_device)(struct device *dev,
85126+ void (* const sync_single_for_device)(struct device *dev,
85127 dma_addr_t dma_handle, size_t size,
85128 enum dma_data_direction dir);
85129- void (*sync_single_range_for_cpu)(struct device *dev,
85130+ void (* const sync_single_range_for_cpu)(struct device *dev,
85131 dma_addr_t dma_handle,
85132 unsigned long offset,
85133 size_t size,
85134 enum dma_data_direction dir);
85135- void (*sync_single_range_for_device)(struct device *dev,
85136+ void (* const sync_single_range_for_device)(struct device *dev,
85137 dma_addr_t dma_handle,
85138 unsigned long offset,
85139 size_t size,
85140 enum dma_data_direction dir);
85141- void (*sync_sg_for_cpu)(struct device *dev,
85142+ void (* const sync_sg_for_cpu)(struct device *dev,
85143 struct scatterlist *sg, int nents,
85144 enum dma_data_direction dir);
85145- void (*sync_sg_for_device)(struct device *dev,
85146+ void (* const sync_sg_for_device)(struct device *dev,
85147 struct scatterlist *sg, int nents,
85148 enum dma_data_direction dir);
85149- int (*mapping_error)(struct device *dev, dma_addr_t dma_addr);
85150- int (*dma_supported)(struct device *dev, u64 mask);
85151+ int (* const mapping_error)(struct device *dev, dma_addr_t dma_addr);
85152+ int (* const dma_supported)(struct device *dev, u64 mask);
85153 int (*set_dma_mask)(struct device *dev, u64 mask);
85154 int is_phys;
85155-};
85156+} __do_const;
85157
85158 #define DMA_BIT_MASK(n) (((n) == 64) ? ~0ULL : ((1ULL<<(n))-1))
85159
85160diff --git a/include/linux/dst.h b/include/linux/dst.h
85161index e26fed8..b976d9f 100644
85162--- a/include/linux/dst.h
85163+++ b/include/linux/dst.h
85164@@ -380,7 +380,7 @@ struct dst_node
85165 struct thread_pool *pool;
85166
85167 /* Transaction IDs live here */
85168- atomic_long_t gen;
85169+ atomic_long_unchecked_t gen;
85170
85171 /*
85172 * How frequently and how many times transaction
85173diff --git a/include/linux/elf.h b/include/linux/elf.h
85174index 90a4ed0..d652617 100644
85175--- a/include/linux/elf.h
85176+++ b/include/linux/elf.h
85177@@ -49,6 +49,17 @@ typedef __s64 Elf64_Sxword;
85178 #define PT_GNU_EH_FRAME 0x6474e550
85179
85180 #define PT_GNU_STACK (PT_LOOS + 0x474e551)
85181+#define PT_GNU_RELRO (PT_LOOS + 0x474e552)
85182+
85183+#define PT_PAX_FLAGS (PT_LOOS + 0x5041580)
85184+
85185+/* Constants for the e_flags field */
85186+#define EF_PAX_PAGEEXEC 1 /* Paging based non-executable pages */
85187+#define EF_PAX_EMUTRAMP 2 /* Emulate trampolines */
85188+#define EF_PAX_MPROTECT 4 /* Restrict mprotect() */
85189+#define EF_PAX_RANDMMAP 8 /* Randomize mmap() base */
85190+/*#define EF_PAX_RANDEXEC 16*/ /* Randomize ET_EXEC base */
85191+#define EF_PAX_SEGMEXEC 32 /* Segmentation based non-executable pages */
85192
85193 /* These constants define the different elf file types */
85194 #define ET_NONE 0
85195@@ -84,6 +95,8 @@ typedef __s64 Elf64_Sxword;
85196 #define DT_DEBUG 21
85197 #define DT_TEXTREL 22
85198 #define DT_JMPREL 23
85199+#define DT_FLAGS 30
85200+ #define DF_TEXTREL 0x00000004
85201 #define DT_ENCODING 32
85202 #define OLD_DT_LOOS 0x60000000
85203 #define DT_LOOS 0x6000000d
85204@@ -230,6 +243,19 @@ typedef struct elf64_hdr {
85205 #define PF_W 0x2
85206 #define PF_X 0x1
85207
85208+#define PF_PAGEEXEC (1U << 4) /* Enable PAGEEXEC */
85209+#define PF_NOPAGEEXEC (1U << 5) /* Disable PAGEEXEC */
85210+#define PF_SEGMEXEC (1U << 6) /* Enable SEGMEXEC */
85211+#define PF_NOSEGMEXEC (1U << 7) /* Disable SEGMEXEC */
85212+#define PF_MPROTECT (1U << 8) /* Enable MPROTECT */
85213+#define PF_NOMPROTECT (1U << 9) /* Disable MPROTECT */
85214+/*#define PF_RANDEXEC (1U << 10)*/ /* Enable RANDEXEC */
85215+/*#define PF_NORANDEXEC (1U << 11)*/ /* Disable RANDEXEC */
85216+#define PF_EMUTRAMP (1U << 12) /* Enable EMUTRAMP */
85217+#define PF_NOEMUTRAMP (1U << 13) /* Disable EMUTRAMP */
85218+#define PF_RANDMMAP (1U << 14) /* Enable RANDMMAP */
85219+#define PF_NORANDMMAP (1U << 15) /* Disable RANDMMAP */
85220+
85221 typedef struct elf32_phdr{
85222 Elf32_Word p_type;
85223 Elf32_Off p_offset;
85224@@ -322,6 +348,8 @@ typedef struct elf64_shdr {
85225 #define EI_OSABI 7
85226 #define EI_PAD 8
85227
85228+#define EI_PAX 14
85229+
85230 #define ELFMAG0 0x7f /* EI_MAG */
85231 #define ELFMAG1 'E'
85232 #define ELFMAG2 'L'
85233@@ -386,6 +414,7 @@ extern Elf32_Dyn _DYNAMIC [];
85234 #define elf_phdr elf32_phdr
85235 #define elf_note elf32_note
85236 #define elf_addr_t Elf32_Off
85237+#define elf_dyn Elf32_Dyn
85238
85239 #else
85240
85241@@ -394,6 +423,7 @@ extern Elf64_Dyn _DYNAMIC [];
85242 #define elf_phdr elf64_phdr
85243 #define elf_note elf64_note
85244 #define elf_addr_t Elf64_Off
85245+#define elf_dyn Elf64_Dyn
85246
85247 #endif
85248
85249diff --git a/include/linux/fs.h b/include/linux/fs.h
85250index 1b9a47a..6fe2934 100644
85251--- a/include/linux/fs.h
85252+++ b/include/linux/fs.h
85253@@ -568,41 +568,41 @@ typedef int (*read_actor_t)(read_descriptor_t *, struct page *,
85254 unsigned long, unsigned long);
85255
85256 struct address_space_operations {
85257- int (*writepage)(struct page *page, struct writeback_control *wbc);
85258- int (*readpage)(struct file *, struct page *);
85259- void (*sync_page)(struct page *);
85260+ int (* const writepage)(struct page *page, struct writeback_control *wbc);
85261+ int (* const readpage)(struct file *, struct page *);
85262+ void (* const sync_page)(struct page *);
85263
85264 /* Write back some dirty pages from this mapping. */
85265- int (*writepages)(struct address_space *, struct writeback_control *);
85266+ int (* const writepages)(struct address_space *, struct writeback_control *);
85267
85268 /* Set a page dirty. Return true if this dirtied it */
85269- int (*set_page_dirty)(struct page *page);
85270+ int (* const set_page_dirty)(struct page *page);
85271
85272- int (*readpages)(struct file *filp, struct address_space *mapping,
85273+ int (* const readpages)(struct file *filp, struct address_space *mapping,
85274 struct list_head *pages, unsigned nr_pages);
85275
85276- int (*write_begin)(struct file *, struct address_space *mapping,
85277+ int (* const write_begin)(struct file *, struct address_space *mapping,
85278 loff_t pos, unsigned len, unsigned flags,
85279 struct page **pagep, void **fsdata);
85280- int (*write_end)(struct file *, struct address_space *mapping,
85281+ int (* const write_end)(struct file *, struct address_space *mapping,
85282 loff_t pos, unsigned len, unsigned copied,
85283 struct page *page, void *fsdata);
85284
85285 /* Unfortunately this kludge is needed for FIBMAP. Don't use it */
85286- sector_t (*bmap)(struct address_space *, sector_t);
85287- void (*invalidatepage) (struct page *, unsigned long);
85288- int (*releasepage) (struct page *, gfp_t);
85289- ssize_t (*direct_IO)(int, struct kiocb *, const struct iovec *iov,
85290+ sector_t (* const bmap)(struct address_space *, sector_t);
85291+ void (* const invalidatepage) (struct page *, unsigned long);
85292+ int (* const releasepage) (struct page *, gfp_t);
85293+ ssize_t (* const direct_IO)(int, struct kiocb *, const struct iovec *iov,
85294 loff_t offset, unsigned long nr_segs);
85295- int (*get_xip_mem)(struct address_space *, pgoff_t, int,
85296+ int (* const get_xip_mem)(struct address_space *, pgoff_t, int,
85297 void **, unsigned long *);
85298 /* migrate the contents of a page to the specified target */
85299- int (*migratepage) (struct address_space *,
85300+ int (* const migratepage) (struct address_space *,
85301 struct page *, struct page *);
85302- int (*launder_page) (struct page *);
85303- int (*is_partially_uptodate) (struct page *, read_descriptor_t *,
85304+ int (* const launder_page) (struct page *);
85305+ int (* const is_partially_uptodate) (struct page *, read_descriptor_t *,
85306 unsigned long);
85307- int (*error_remove_page)(struct address_space *, struct page *);
85308+ int (* const error_remove_page)(struct address_space *, struct page *);
85309 };
85310
85311 /*
85312@@ -1031,19 +1031,19 @@ static inline int file_check_writeable(struct file *filp)
85313 typedef struct files_struct *fl_owner_t;
85314
85315 struct file_lock_operations {
85316- void (*fl_copy_lock)(struct file_lock *, struct file_lock *);
85317- void (*fl_release_private)(struct file_lock *);
85318+ void (* const fl_copy_lock)(struct file_lock *, struct file_lock *);
85319+ void (* const fl_release_private)(struct file_lock *);
85320 };
85321
85322 struct lock_manager_operations {
85323- int (*fl_compare_owner)(struct file_lock *, struct file_lock *);
85324- void (*fl_notify)(struct file_lock *); /* unblock callback */
85325- int (*fl_grant)(struct file_lock *, struct file_lock *, int);
85326- void (*fl_copy_lock)(struct file_lock *, struct file_lock *);
85327- void (*fl_release_private)(struct file_lock *);
85328- void (*fl_break)(struct file_lock *);
85329- int (*fl_mylease)(struct file_lock *, struct file_lock *);
85330- int (*fl_change)(struct file_lock **, int);
85331+ int (* const fl_compare_owner)(struct file_lock *, struct file_lock *);
85332+ void (* const fl_notify)(struct file_lock *); /* unblock callback */
85333+ int (* const fl_grant)(struct file_lock *, struct file_lock *, int);
85334+ void (* const fl_copy_lock)(struct file_lock *, struct file_lock *);
85335+ void (* const fl_release_private)(struct file_lock *);
85336+ void (* const fl_break)(struct file_lock *);
85337+ int (* const fl_mylease)(struct file_lock *, struct file_lock *);
85338+ int (* const fl_change)(struct file_lock **, int);
85339 };
85340
85341 struct lock_manager {
85342@@ -1442,7 +1442,7 @@ struct fiemap_extent_info {
85343 unsigned int fi_flags; /* Flags as passed from user */
85344 unsigned int fi_extents_mapped; /* Number of mapped extents */
85345 unsigned int fi_extents_max; /* Size of fiemap_extent array */
85346- struct fiemap_extent *fi_extents_start; /* Start of fiemap_extent
85347+ struct fiemap_extent __user *fi_extents_start; /* Start of fiemap_extent
85348 * array */
85349 };
85350 int fiemap_fill_next_extent(struct fiemap_extent_info *info, u64 logical,
85351@@ -1512,7 +1512,8 @@ struct file_operations {
85352 ssize_t (*splice_write)(struct pipe_inode_info *, struct file *, loff_t *, size_t, unsigned int);
85353 ssize_t (*splice_read)(struct file *, loff_t *, struct pipe_inode_info *, size_t, unsigned int);
85354 int (*setlease)(struct file *, long, struct file_lock **);
85355-};
85356+} __do_const;
85357+typedef struct file_operations __no_const file_operations_no_const;
85358
85359 struct inode_operations {
85360 int (*create) (struct inode *,struct dentry *,int, struct nameidata *);
85361@@ -1559,30 +1560,30 @@ extern ssize_t vfs_writev(struct file *, const struct iovec __user *,
85362 unsigned long, loff_t *);
85363
85364 struct super_operations {
85365- struct inode *(*alloc_inode)(struct super_block *sb);
85366- void (*destroy_inode)(struct inode *);
85367+ struct inode *(* const alloc_inode)(struct super_block *sb);
85368+ void (* const destroy_inode)(struct inode *);
85369
85370- void (*dirty_inode) (struct inode *);
85371- int (*write_inode) (struct inode *, int);
85372- void (*drop_inode) (struct inode *);
85373- void (*delete_inode) (struct inode *);
85374- void (*put_super) (struct super_block *);
85375- void (*write_super) (struct super_block *);
85376- int (*sync_fs)(struct super_block *sb, int wait);
85377- int (*freeze_fs) (struct super_block *);
85378- int (*unfreeze_fs) (struct super_block *);
85379- int (*statfs) (struct dentry *, struct kstatfs *);
85380- int (*remount_fs) (struct super_block *, int *, char *);
85381- void (*clear_inode) (struct inode *);
85382- void (*umount_begin) (struct super_block *);
85383+ void (* const dirty_inode) (struct inode *);
85384+ int (* const write_inode) (struct inode *, int);
85385+ void (* const drop_inode) (struct inode *);
85386+ void (* const delete_inode) (struct inode *);
85387+ void (* const put_super) (struct super_block *);
85388+ void (* const write_super) (struct super_block *);
85389+ int (* const sync_fs)(struct super_block *sb, int wait);
85390+ int (* const freeze_fs) (struct super_block *);
85391+ int (* const unfreeze_fs) (struct super_block *);
85392+ int (* const statfs) (struct dentry *, struct kstatfs *);
85393+ int (* const remount_fs) (struct super_block *, int *, char *);
85394+ void (* const clear_inode) (struct inode *);
85395+ void (* const umount_begin) (struct super_block *);
85396
85397- int (*show_options)(struct seq_file *, struct vfsmount *);
85398- int (*show_stats)(struct seq_file *, struct vfsmount *);
85399+ int (* const show_options)(struct seq_file *, struct vfsmount *);
85400+ int (* const show_stats)(struct seq_file *, struct vfsmount *);
85401 #ifdef CONFIG_QUOTA
85402- ssize_t (*quota_read)(struct super_block *, int, char *, size_t, loff_t);
85403- ssize_t (*quota_write)(struct super_block *, int, const char *, size_t, loff_t);
85404+ ssize_t (* const quota_read)(struct super_block *, int, char *, size_t, loff_t);
85405+ ssize_t (* const quota_write)(struct super_block *, int, const char *, size_t, loff_t);
85406 #endif
85407- int (*bdev_try_to_free_page)(struct super_block*, struct page*, gfp_t);
85408+ int (* const bdev_try_to_free_page)(struct super_block*, struct page*, gfp_t);
85409 };
85410
85411 /*
85412diff --git a/include/linux/fs_struct.h b/include/linux/fs_struct.h
85413index 78a05bf..2a7d3e1 100644
85414--- a/include/linux/fs_struct.h
85415+++ b/include/linux/fs_struct.h
85416@@ -4,7 +4,7 @@
85417 #include <linux/path.h>
85418
85419 struct fs_struct {
85420- int users;
85421+ atomic_t users;
85422 rwlock_t lock;
85423 int umask;
85424 int in_exec;
85425diff --git a/include/linux/fscache-cache.h b/include/linux/fscache-cache.h
85426index 7be0c6f..2f63a2b 100644
85427--- a/include/linux/fscache-cache.h
85428+++ b/include/linux/fscache-cache.h
85429@@ -116,7 +116,7 @@ struct fscache_operation {
85430 #endif
85431 };
85432
85433-extern atomic_t fscache_op_debug_id;
85434+extern atomic_unchecked_t fscache_op_debug_id;
85435 extern const struct slow_work_ops fscache_op_slow_work_ops;
85436
85437 extern void fscache_enqueue_operation(struct fscache_operation *);
85438@@ -134,7 +134,7 @@ static inline void fscache_operation_init(struct fscache_operation *op,
85439 fscache_operation_release_t release)
85440 {
85441 atomic_set(&op->usage, 1);
85442- op->debug_id = atomic_inc_return(&fscache_op_debug_id);
85443+ op->debug_id = atomic_inc_return_unchecked(&fscache_op_debug_id);
85444 op->release = release;
85445 INIT_LIST_HEAD(&op->pend_link);
85446 fscache_set_op_state(op, "Init");
85447diff --git a/include/linux/fsnotify_backend.h b/include/linux/fsnotify_backend.h
85448index 4d6f47b..00bcedb 100644
85449--- a/include/linux/fsnotify_backend.h
85450+++ b/include/linux/fsnotify_backend.h
85451@@ -86,6 +86,7 @@ struct fsnotify_ops {
85452 void (*freeing_mark)(struct fsnotify_mark_entry *entry, struct fsnotify_group *group);
85453 void (*free_event_priv)(struct fsnotify_event_private_data *priv);
85454 };
85455+typedef struct fsnotify_ops __no_const fsnotify_ops_no_const;
85456
85457 /*
85458 * A group is a "thing" that wants to receive notification about filesystem
85459diff --git a/include/linux/ftrace_event.h b/include/linux/ftrace_event.h
85460index 4ec5e67..42f1eb9 100644
85461--- a/include/linux/ftrace_event.h
85462+++ b/include/linux/ftrace_event.h
85463@@ -163,7 +163,7 @@ extern int trace_define_field(struct ftrace_event_call *call,
85464 int filter_type);
85465 extern int trace_define_common_fields(struct ftrace_event_call *call);
85466
85467-#define is_signed_type(type) (((type)(-1)) < 0)
85468+#define is_signed_type(type) (((type)(-1)) < (type)1)
85469
85470 int trace_set_clr_event(const char *system, const char *event, int set);
85471
85472diff --git a/include/linux/genhd.h b/include/linux/genhd.h
85473index 297df45..b6a74ff 100644
85474--- a/include/linux/genhd.h
85475+++ b/include/linux/genhd.h
85476@@ -161,7 +161,7 @@ struct gendisk {
85477
85478 struct timer_rand_state *random;
85479
85480- atomic_t sync_io; /* RAID */
85481+ atomic_unchecked_t sync_io; /* RAID */
85482 struct work_struct async_notify;
85483 #ifdef CONFIG_BLK_DEV_INTEGRITY
85484 struct blk_integrity *integrity;
85485diff --git a/include/linux/gracl.h b/include/linux/gracl.h
85486new file mode 100644
85487index 0000000..6c51079
85488--- /dev/null
85489+++ b/include/linux/gracl.h
85490@@ -0,0 +1,320 @@
85491+#ifndef GR_ACL_H
85492+#define GR_ACL_H
85493+
85494+#include <linux/grdefs.h>
85495+#include <linux/resource.h>
85496+#include <linux/capability.h>
85497+#include <linux/dcache.h>
85498+#include <asm/resource.h>
85499+
85500+/* Major status information */
85501+
85502+#define GR_VERSION "grsecurity 2.9"
85503+#define GRSECURITY_VERSION 0x2900
85504+
85505+enum {
85506+ GR_SHUTDOWN = 0,
85507+ GR_ENABLE = 1,
85508+ GR_SPROLE = 2,
85509+ GR_RELOAD = 3,
85510+ GR_SEGVMOD = 4,
85511+ GR_STATUS = 5,
85512+ GR_UNSPROLE = 6,
85513+ GR_PASSSET = 7,
85514+ GR_SPROLEPAM = 8,
85515+};
85516+
85517+/* Password setup definitions
85518+ * kernel/grhash.c */
85519+enum {
85520+ GR_PW_LEN = 128,
85521+ GR_SALT_LEN = 16,
85522+ GR_SHA_LEN = 32,
85523+};
85524+
85525+enum {
85526+ GR_SPROLE_LEN = 64,
85527+};
85528+
85529+enum {
85530+ GR_NO_GLOB = 0,
85531+ GR_REG_GLOB,
85532+ GR_CREATE_GLOB
85533+};
85534+
85535+#define GR_NLIMITS 32
85536+
85537+/* Begin Data Structures */
85538+
85539+struct sprole_pw {
85540+ unsigned char *rolename;
85541+ unsigned char salt[GR_SALT_LEN];
85542+ unsigned char sum[GR_SHA_LEN]; /* 256-bit SHA hash of the password */
85543+};
85544+
85545+struct name_entry {
85546+ __u32 key;
85547+ ino_t inode;
85548+ dev_t device;
85549+ char *name;
85550+ __u16 len;
85551+ __u8 deleted;
85552+ struct name_entry *prev;
85553+ struct name_entry *next;
85554+};
85555+
85556+struct inodev_entry {
85557+ struct name_entry *nentry;
85558+ struct inodev_entry *prev;
85559+ struct inodev_entry *next;
85560+};
85561+
85562+struct acl_role_db {
85563+ struct acl_role_label **r_hash;
85564+ __u32 r_size;
85565+};
85566+
85567+struct inodev_db {
85568+ struct inodev_entry **i_hash;
85569+ __u32 i_size;
85570+};
85571+
85572+struct name_db {
85573+ struct name_entry **n_hash;
85574+ __u32 n_size;
85575+};
85576+
85577+struct crash_uid {
85578+ uid_t uid;
85579+ unsigned long expires;
85580+};
85581+
85582+struct gr_hash_struct {
85583+ void **table;
85584+ void **nametable;
85585+ void *first;
85586+ __u32 table_size;
85587+ __u32 used_size;
85588+ int type;
85589+};
85590+
85591+/* Userspace Grsecurity ACL data structures */
85592+
85593+struct acl_subject_label {
85594+ char *filename;
85595+ ino_t inode;
85596+ dev_t device;
85597+ __u32 mode;
85598+ kernel_cap_t cap_mask;
85599+ kernel_cap_t cap_lower;
85600+ kernel_cap_t cap_invert_audit;
85601+
85602+ struct rlimit res[GR_NLIMITS];
85603+ __u32 resmask;
85604+
85605+ __u8 user_trans_type;
85606+ __u8 group_trans_type;
85607+ uid_t *user_transitions;
85608+ gid_t *group_transitions;
85609+ __u16 user_trans_num;
85610+ __u16 group_trans_num;
85611+
85612+ __u32 sock_families[2];
85613+ __u32 ip_proto[8];
85614+ __u32 ip_type;
85615+ struct acl_ip_label **ips;
85616+ __u32 ip_num;
85617+ __u32 inaddr_any_override;
85618+
85619+ __u32 crashes;
85620+ unsigned long expires;
85621+
85622+ struct acl_subject_label *parent_subject;
85623+ struct gr_hash_struct *hash;
85624+ struct acl_subject_label *prev;
85625+ struct acl_subject_label *next;
85626+
85627+ struct acl_object_label **obj_hash;
85628+ __u32 obj_hash_size;
85629+ __u16 pax_flags;
85630+};
85631+
85632+struct role_allowed_ip {
85633+ __u32 addr;
85634+ __u32 netmask;
85635+
85636+ struct role_allowed_ip *prev;
85637+ struct role_allowed_ip *next;
85638+};
85639+
85640+struct role_transition {
85641+ char *rolename;
85642+
85643+ struct role_transition *prev;
85644+ struct role_transition *next;
85645+};
85646+
85647+struct acl_role_label {
85648+ char *rolename;
85649+ uid_t uidgid;
85650+ __u16 roletype;
85651+
85652+ __u16 auth_attempts;
85653+ unsigned long expires;
85654+
85655+ struct acl_subject_label *root_label;
85656+ struct gr_hash_struct *hash;
85657+
85658+ struct acl_role_label *prev;
85659+ struct acl_role_label *next;
85660+
85661+ struct role_transition *transitions;
85662+ struct role_allowed_ip *allowed_ips;
85663+ uid_t *domain_children;
85664+ __u16 domain_child_num;
85665+
85666+ // __u16
85667+ umode_t umask;
85668+
85669+ struct acl_subject_label **subj_hash;
85670+ __u32 subj_hash_size;
85671+};
85672+
85673+struct user_acl_role_db {
85674+ struct acl_role_label **r_table;
85675+ __u32 num_pointers; /* Number of allocations to track */
85676+ __u32 num_roles; /* Number of roles */
85677+ __u32 num_domain_children; /* Number of domain children */
85678+ __u32 num_subjects; /* Number of subjects */
85679+ __u32 num_objects; /* Number of objects */
85680+};
85681+
85682+struct acl_object_label {
85683+ char *filename;
85684+ ino_t inode;
85685+ dev_t device;
85686+ __u32 mode;
85687+
85688+ struct acl_subject_label *nested;
85689+ struct acl_object_label *globbed;
85690+
85691+ /* next two structures not used */
85692+
85693+ struct acl_object_label *prev;
85694+ struct acl_object_label *next;
85695+};
85696+
85697+struct acl_ip_label {
85698+ char *iface;
85699+ __u32 addr;
85700+ __u32 netmask;
85701+ __u16 low, high;
85702+ __u8 mode;
85703+ __u32 type;
85704+ __u32 proto[8];
85705+
85706+ /* next two structures not used */
85707+
85708+ struct acl_ip_label *prev;
85709+ struct acl_ip_label *next;
85710+};
85711+
85712+struct gr_arg {
85713+ struct user_acl_role_db role_db;
85714+ unsigned char pw[GR_PW_LEN];
85715+ unsigned char salt[GR_SALT_LEN];
85716+ unsigned char sum[GR_SHA_LEN];
85717+ unsigned char sp_role[GR_SPROLE_LEN];
85718+ struct sprole_pw *sprole_pws;
85719+ dev_t segv_device;
85720+ ino_t segv_inode;
85721+ uid_t segv_uid;
85722+ __u16 num_sprole_pws;
85723+ __u16 mode;
85724+};
85725+
85726+struct gr_arg_wrapper {
85727+ struct gr_arg *arg;
85728+ __u32 version;
85729+ __u32 size;
85730+};
85731+
85732+struct subject_map {
85733+ struct acl_subject_label *user;
85734+ struct acl_subject_label *kernel;
85735+ struct subject_map *prev;
85736+ struct subject_map *next;
85737+};
85738+
85739+struct acl_subj_map_db {
85740+ struct subject_map **s_hash;
85741+ __u32 s_size;
85742+};
85743+
85744+/* End Data Structures Section */
85745+
85746+/* Hash functions generated by empirical testing by Brad Spengler
85747+ Makes good use of the low bits of the inode. Generally 0-1 times
85748+ in loop for successful match. 0-3 for unsuccessful match.
85749+ Shift/add algorithm with modulus of table size and an XOR*/
85750+
85751+static __inline__ unsigned int
85752+rhash(const uid_t uid, const __u16 type, const unsigned int sz)
85753+{
85754+ return ((((uid + type) << (16 + type)) ^ uid) % sz);
85755+}
85756+
85757+ static __inline__ unsigned int
85758+shash(const struct acl_subject_label *userp, const unsigned int sz)
85759+{
85760+ return ((const unsigned long)userp % sz);
85761+}
85762+
85763+static __inline__ unsigned int
85764+fhash(const ino_t ino, const dev_t dev, const unsigned int sz)
85765+{
85766+ return (((ino + dev) ^ ((ino << 13) + (ino << 23) + (dev << 9))) % sz);
85767+}
85768+
85769+static __inline__ unsigned int
85770+nhash(const char *name, const __u16 len, const unsigned int sz)
85771+{
85772+ return full_name_hash((const unsigned char *)name, len) % sz;
85773+}
85774+
85775+#define FOR_EACH_ROLE_START(role) \
85776+ role = role_list; \
85777+ while (role) {
85778+
85779+#define FOR_EACH_ROLE_END(role) \
85780+ role = role->prev; \
85781+ }
85782+
85783+#define FOR_EACH_SUBJECT_START(role,subj,iter) \
85784+ subj = NULL; \
85785+ iter = 0; \
85786+ while (iter < role->subj_hash_size) { \
85787+ if (subj == NULL) \
85788+ subj = role->subj_hash[iter]; \
85789+ if (subj == NULL) { \
85790+ iter++; \
85791+ continue; \
85792+ }
85793+
85794+#define FOR_EACH_SUBJECT_END(subj,iter) \
85795+ subj = subj->next; \
85796+ if (subj == NULL) \
85797+ iter++; \
85798+ }
85799+
85800+
85801+#define FOR_EACH_NESTED_SUBJECT_START(role,subj) \
85802+ subj = role->hash->first; \
85803+ while (subj != NULL) {
85804+
85805+#define FOR_EACH_NESTED_SUBJECT_END(subj) \
85806+ subj = subj->next; \
85807+ }
85808+
85809+#endif
85810+
85811diff --git a/include/linux/gralloc.h b/include/linux/gralloc.h
85812new file mode 100644
85813index 0000000..323ecf2
85814--- /dev/null
85815+++ b/include/linux/gralloc.h
85816@@ -0,0 +1,9 @@
85817+#ifndef __GRALLOC_H
85818+#define __GRALLOC_H
85819+
85820+void acl_free_all(void);
85821+int acl_alloc_stack_init(unsigned long size);
85822+void *acl_alloc(unsigned long len);
85823+void *acl_alloc_num(unsigned long num, unsigned long len);
85824+
85825+#endif
85826diff --git a/include/linux/grdefs.h b/include/linux/grdefs.h
85827new file mode 100644
85828index 0000000..70d6cd5
85829--- /dev/null
85830+++ b/include/linux/grdefs.h
85831@@ -0,0 +1,140 @@
85832+#ifndef GRDEFS_H
85833+#define GRDEFS_H
85834+
85835+/* Begin grsecurity status declarations */
85836+
85837+enum {
85838+ GR_READY = 0x01,
85839+ GR_STATUS_INIT = 0x00 // disabled state
85840+};
85841+
85842+/* Begin ACL declarations */
85843+
85844+/* Role flags */
85845+
85846+enum {
85847+ GR_ROLE_USER = 0x0001,
85848+ GR_ROLE_GROUP = 0x0002,
85849+ GR_ROLE_DEFAULT = 0x0004,
85850+ GR_ROLE_SPECIAL = 0x0008,
85851+ GR_ROLE_AUTH = 0x0010,
85852+ GR_ROLE_NOPW = 0x0020,
85853+ GR_ROLE_GOD = 0x0040,
85854+ GR_ROLE_LEARN = 0x0080,
85855+ GR_ROLE_TPE = 0x0100,
85856+ GR_ROLE_DOMAIN = 0x0200,
85857+ GR_ROLE_PAM = 0x0400,
85858+ GR_ROLE_PERSIST = 0x800
85859+};
85860+
85861+/* ACL Subject and Object mode flags */
85862+enum {
85863+ GR_DELETED = 0x80000000
85864+};
85865+
85866+/* ACL Object-only mode flags */
85867+enum {
85868+ GR_READ = 0x00000001,
85869+ GR_APPEND = 0x00000002,
85870+ GR_WRITE = 0x00000004,
85871+ GR_EXEC = 0x00000008,
85872+ GR_FIND = 0x00000010,
85873+ GR_INHERIT = 0x00000020,
85874+ GR_SETID = 0x00000040,
85875+ GR_CREATE = 0x00000080,
85876+ GR_DELETE = 0x00000100,
85877+ GR_LINK = 0x00000200,
85878+ GR_AUDIT_READ = 0x00000400,
85879+ GR_AUDIT_APPEND = 0x00000800,
85880+ GR_AUDIT_WRITE = 0x00001000,
85881+ GR_AUDIT_EXEC = 0x00002000,
85882+ GR_AUDIT_FIND = 0x00004000,
85883+ GR_AUDIT_INHERIT= 0x00008000,
85884+ GR_AUDIT_SETID = 0x00010000,
85885+ GR_AUDIT_CREATE = 0x00020000,
85886+ GR_AUDIT_DELETE = 0x00040000,
85887+ GR_AUDIT_LINK = 0x00080000,
85888+ GR_PTRACERD = 0x00100000,
85889+ GR_NOPTRACE = 0x00200000,
85890+ GR_SUPPRESS = 0x00400000,
85891+ GR_NOLEARN = 0x00800000,
85892+ GR_INIT_TRANSFER= 0x01000000
85893+};
85894+
85895+#define GR_AUDITS (GR_AUDIT_READ | GR_AUDIT_WRITE | GR_AUDIT_APPEND | GR_AUDIT_EXEC | \
85896+ GR_AUDIT_FIND | GR_AUDIT_INHERIT | GR_AUDIT_SETID | \
85897+ GR_AUDIT_CREATE | GR_AUDIT_DELETE | GR_AUDIT_LINK)
85898+
85899+/* ACL subject-only mode flags */
85900+enum {
85901+ GR_KILL = 0x00000001,
85902+ GR_VIEW = 0x00000002,
85903+ GR_PROTECTED = 0x00000004,
85904+ GR_LEARN = 0x00000008,
85905+ GR_OVERRIDE = 0x00000010,
85906+ /* just a placeholder, this mode is only used in userspace */
85907+ GR_DUMMY = 0x00000020,
85908+ GR_PROTSHM = 0x00000040,
85909+ GR_KILLPROC = 0x00000080,
85910+ GR_KILLIPPROC = 0x00000100,
85911+ /* just a placeholder, this mode is only used in userspace */
85912+ GR_NOTROJAN = 0x00000200,
85913+ GR_PROTPROCFD = 0x00000400,
85914+ GR_PROCACCT = 0x00000800,
85915+ GR_RELAXPTRACE = 0x00001000,
85916+ GR_NESTED = 0x00002000,
85917+ GR_INHERITLEARN = 0x00004000,
85918+ GR_PROCFIND = 0x00008000,
85919+ GR_POVERRIDE = 0x00010000,
85920+ GR_KERNELAUTH = 0x00020000,
85921+ GR_ATSECURE = 0x00040000,
85922+ GR_SHMEXEC = 0x00080000
85923+};
85924+
85925+enum {
85926+ GR_PAX_ENABLE_SEGMEXEC = 0x0001,
85927+ GR_PAX_ENABLE_PAGEEXEC = 0x0002,
85928+ GR_PAX_ENABLE_MPROTECT = 0x0004,
85929+ GR_PAX_ENABLE_RANDMMAP = 0x0008,
85930+ GR_PAX_ENABLE_EMUTRAMP = 0x0010,
85931+ GR_PAX_DISABLE_SEGMEXEC = 0x0100,
85932+ GR_PAX_DISABLE_PAGEEXEC = 0x0200,
85933+ GR_PAX_DISABLE_MPROTECT = 0x0400,
85934+ GR_PAX_DISABLE_RANDMMAP = 0x0800,
85935+ GR_PAX_DISABLE_EMUTRAMP = 0x1000,
85936+};
85937+
85938+enum {
85939+ GR_ID_USER = 0x01,
85940+ GR_ID_GROUP = 0x02,
85941+};
85942+
85943+enum {
85944+ GR_ID_ALLOW = 0x01,
85945+ GR_ID_DENY = 0x02,
85946+};
85947+
85948+#define GR_CRASH_RES 31
85949+#define GR_UIDTABLE_MAX 500
85950+
85951+/* begin resource learning section */
85952+enum {
85953+ GR_RLIM_CPU_BUMP = 60,
85954+ GR_RLIM_FSIZE_BUMP = 50000,
85955+ GR_RLIM_DATA_BUMP = 10000,
85956+ GR_RLIM_STACK_BUMP = 1000,
85957+ GR_RLIM_CORE_BUMP = 10000,
85958+ GR_RLIM_RSS_BUMP = 500000,
85959+ GR_RLIM_NPROC_BUMP = 1,
85960+ GR_RLIM_NOFILE_BUMP = 5,
85961+ GR_RLIM_MEMLOCK_BUMP = 50000,
85962+ GR_RLIM_AS_BUMP = 500000,
85963+ GR_RLIM_LOCKS_BUMP = 2,
85964+ GR_RLIM_SIGPENDING_BUMP = 5,
85965+ GR_RLIM_MSGQUEUE_BUMP = 10000,
85966+ GR_RLIM_NICE_BUMP = 1,
85967+ GR_RLIM_RTPRIO_BUMP = 1,
85968+ GR_RLIM_RTTIME_BUMP = 1000000
85969+};
85970+
85971+#endif
85972diff --git a/include/linux/grinternal.h b/include/linux/grinternal.h
85973new file mode 100644
85974index 0000000..3826b91
85975--- /dev/null
85976+++ b/include/linux/grinternal.h
85977@@ -0,0 +1,219 @@
85978+#ifndef __GRINTERNAL_H
85979+#define __GRINTERNAL_H
85980+
85981+#ifdef CONFIG_GRKERNSEC
85982+
85983+#include <linux/fs.h>
85984+#include <linux/mnt_namespace.h>
85985+#include <linux/nsproxy.h>
85986+#include <linux/gracl.h>
85987+#include <linux/grdefs.h>
85988+#include <linux/grmsg.h>
85989+
85990+void gr_add_learn_entry(const char *fmt, ...)
85991+ __attribute__ ((format (printf, 1, 2)));
85992+__u32 gr_search_file(const struct dentry *dentry, const __u32 mode,
85993+ const struct vfsmount *mnt);
85994+__u32 gr_check_create(const struct dentry *new_dentry,
85995+ const struct dentry *parent,
85996+ const struct vfsmount *mnt, const __u32 mode);
85997+int gr_check_protected_task(const struct task_struct *task);
85998+__u32 to_gr_audit(const __u32 reqmode);
85999+int gr_set_acls(const int type);
86000+int gr_apply_subject_to_task(struct task_struct *task);
86001+int gr_acl_is_enabled(void);
86002+char gr_roletype_to_char(void);
86003+
86004+void gr_handle_alertkill(struct task_struct *task);
86005+char *gr_to_filename(const struct dentry *dentry,
86006+ const struct vfsmount *mnt);
86007+char *gr_to_filename1(const struct dentry *dentry,
86008+ const struct vfsmount *mnt);
86009+char *gr_to_filename2(const struct dentry *dentry,
86010+ const struct vfsmount *mnt);
86011+char *gr_to_filename3(const struct dentry *dentry,
86012+ const struct vfsmount *mnt);
86013+
86014+extern int grsec_enable_ptrace_readexec;
86015+extern int grsec_enable_harden_ptrace;
86016+extern int grsec_enable_link;
86017+extern int grsec_enable_fifo;
86018+extern int grsec_enable_shm;
86019+extern int grsec_enable_execlog;
86020+extern int grsec_enable_signal;
86021+extern int grsec_enable_audit_ptrace;
86022+extern int grsec_enable_forkfail;
86023+extern int grsec_enable_time;
86024+extern int grsec_enable_rofs;
86025+extern int grsec_enable_chroot_shmat;
86026+extern int grsec_enable_chroot_mount;
86027+extern int grsec_enable_chroot_double;
86028+extern int grsec_enable_chroot_pivot;
86029+extern int grsec_enable_chroot_chdir;
86030+extern int grsec_enable_chroot_chmod;
86031+extern int grsec_enable_chroot_mknod;
86032+extern int grsec_enable_chroot_fchdir;
86033+extern int grsec_enable_chroot_nice;
86034+extern int grsec_enable_chroot_execlog;
86035+extern int grsec_enable_chroot_caps;
86036+extern int grsec_enable_chroot_sysctl;
86037+extern int grsec_enable_chroot_unix;
86038+extern int grsec_enable_tpe;
86039+extern int grsec_tpe_gid;
86040+extern int grsec_enable_tpe_all;
86041+extern int grsec_enable_tpe_invert;
86042+extern int grsec_enable_socket_all;
86043+extern int grsec_socket_all_gid;
86044+extern int grsec_enable_socket_client;
86045+extern int grsec_socket_client_gid;
86046+extern int grsec_enable_socket_server;
86047+extern int grsec_socket_server_gid;
86048+extern int grsec_audit_gid;
86049+extern int grsec_enable_group;
86050+extern int grsec_enable_audit_textrel;
86051+extern int grsec_enable_log_rwxmaps;
86052+extern int grsec_enable_mount;
86053+extern int grsec_enable_chdir;
86054+extern int grsec_resource_logging;
86055+extern int grsec_enable_blackhole;
86056+extern int grsec_lastack_retries;
86057+extern int grsec_enable_brute;
86058+extern int grsec_lock;
86059+
86060+extern spinlock_t grsec_alert_lock;
86061+extern unsigned long grsec_alert_wtime;
86062+extern unsigned long grsec_alert_fyet;
86063+
86064+extern spinlock_t grsec_audit_lock;
86065+
86066+extern rwlock_t grsec_exec_file_lock;
86067+
86068+#define gr_task_fullpath(tsk) ((tsk)->exec_file ? \
86069+ gr_to_filename2((tsk)->exec_file->f_path.dentry, \
86070+ (tsk)->exec_file->f_vfsmnt) : "/")
86071+
86072+#define gr_parent_task_fullpath(tsk) ((tsk)->real_parent->exec_file ? \
86073+ gr_to_filename3((tsk)->real_parent->exec_file->f_path.dentry, \
86074+ (tsk)->real_parent->exec_file->f_vfsmnt) : "/")
86075+
86076+#define gr_task_fullpath0(tsk) ((tsk)->exec_file ? \
86077+ gr_to_filename((tsk)->exec_file->f_path.dentry, \
86078+ (tsk)->exec_file->f_vfsmnt) : "/")
86079+
86080+#define gr_parent_task_fullpath0(tsk) ((tsk)->real_parent->exec_file ? \
86081+ gr_to_filename1((tsk)->real_parent->exec_file->f_path.dentry, \
86082+ (tsk)->real_parent->exec_file->f_vfsmnt) : "/")
86083+
86084+#define proc_is_chrooted(tsk_a) ((tsk_a)->gr_is_chrooted)
86085+
86086+#define have_same_root(tsk_a,tsk_b) ((tsk_a)->gr_chroot_dentry == (tsk_b)->gr_chroot_dentry)
86087+
86088+#define DEFAULTSECARGS(task, cred, pcred) gr_task_fullpath(task), (task)->comm, \
86089+ (task)->pid, (cred)->uid, \
86090+ (cred)->euid, (cred)->gid, (cred)->egid, \
86091+ gr_parent_task_fullpath(task), \
86092+ (task)->real_parent->comm, (task)->real_parent->pid, \
86093+ (pcred)->uid, (pcred)->euid, \
86094+ (pcred)->gid, (pcred)->egid
86095+
86096+#define GR_CHROOT_CAPS {{ \
86097+ CAP_TO_MASK(CAP_LINUX_IMMUTABLE) | CAP_TO_MASK(CAP_NET_ADMIN) | \
86098+ CAP_TO_MASK(CAP_SYS_MODULE) | CAP_TO_MASK(CAP_SYS_RAWIO) | \
86099+ CAP_TO_MASK(CAP_SYS_PACCT) | CAP_TO_MASK(CAP_SYS_ADMIN) | \
86100+ CAP_TO_MASK(CAP_SYS_BOOT) | CAP_TO_MASK(CAP_SYS_TIME) | \
86101+ CAP_TO_MASK(CAP_NET_RAW) | CAP_TO_MASK(CAP_SYS_TTY_CONFIG) | \
86102+ CAP_TO_MASK(CAP_IPC_OWNER) | CAP_TO_MASK(CAP_SETFCAP), \
86103+ CAP_TO_MASK(CAP_MAC_ADMIN) }}
86104+
86105+#define security_learn(normal_msg,args...) \
86106+({ \
86107+ read_lock(&grsec_exec_file_lock); \
86108+ gr_add_learn_entry(normal_msg "\n", ## args); \
86109+ read_unlock(&grsec_exec_file_lock); \
86110+})
86111+
86112+enum {
86113+ GR_DO_AUDIT,
86114+ GR_DONT_AUDIT,
86115+ GR_DONT_AUDIT_GOOD
86116+};
86117+
86118+enum {
86119+ GR_TTYSNIFF,
86120+ GR_RBAC,
86121+ GR_RBAC_STR,
86122+ GR_STR_RBAC,
86123+ GR_RBAC_MODE2,
86124+ GR_RBAC_MODE3,
86125+ GR_FILENAME,
86126+ GR_SYSCTL_HIDDEN,
86127+ GR_NOARGS,
86128+ GR_ONE_INT,
86129+ GR_ONE_INT_TWO_STR,
86130+ GR_ONE_STR,
86131+ GR_STR_INT,
86132+ GR_TWO_STR_INT,
86133+ GR_TWO_INT,
86134+ GR_TWO_U64,
86135+ GR_THREE_INT,
86136+ GR_FIVE_INT_TWO_STR,
86137+ GR_TWO_STR,
86138+ GR_THREE_STR,
86139+ GR_FOUR_STR,
86140+ GR_STR_FILENAME,
86141+ GR_FILENAME_STR,
86142+ GR_FILENAME_TWO_INT,
86143+ GR_FILENAME_TWO_INT_STR,
86144+ GR_TEXTREL,
86145+ GR_PTRACE,
86146+ GR_RESOURCE,
86147+ GR_CAP,
86148+ GR_SIG,
86149+ GR_SIG2,
86150+ GR_CRASH1,
86151+ GR_CRASH2,
86152+ GR_PSACCT,
86153+ GR_RWXMAP
86154+};
86155+
86156+#define gr_log_hidden_sysctl(audit, msg, str) gr_log_varargs(audit, msg, GR_SYSCTL_HIDDEN, str)
86157+#define gr_log_ttysniff(audit, msg, task) gr_log_varargs(audit, msg, GR_TTYSNIFF, task)
86158+#define gr_log_fs_rbac_generic(audit, msg, dentry, mnt) gr_log_varargs(audit, msg, GR_RBAC, dentry, mnt)
86159+#define gr_log_fs_rbac_str(audit, msg, dentry, mnt, str) gr_log_varargs(audit, msg, GR_RBAC_STR, dentry, mnt, str)
86160+#define gr_log_fs_str_rbac(audit, msg, str, dentry, mnt) gr_log_varargs(audit, msg, GR_STR_RBAC, str, dentry, mnt)
86161+#define gr_log_fs_rbac_mode2(audit, msg, dentry, mnt, str1, str2) gr_log_varargs(audit, msg, GR_RBAC_MODE2, dentry, mnt, str1, str2)
86162+#define gr_log_fs_rbac_mode3(audit, msg, dentry, mnt, str1, str2, str3) gr_log_varargs(audit, msg, GR_RBAC_MODE3, dentry, mnt, str1, str2, str3)
86163+#define gr_log_fs_generic(audit, msg, dentry, mnt) gr_log_varargs(audit, msg, GR_FILENAME, dentry, mnt)
86164+#define gr_log_noargs(audit, msg) gr_log_varargs(audit, msg, GR_NOARGS)
86165+#define gr_log_int(audit, msg, num) gr_log_varargs(audit, msg, GR_ONE_INT, num)
86166+#define gr_log_int_str2(audit, msg, num, str1, str2) gr_log_varargs(audit, msg, GR_ONE_INT_TWO_STR, num, str1, str2)
86167+#define gr_log_str(audit, msg, str) gr_log_varargs(audit, msg, GR_ONE_STR, str)
86168+#define gr_log_str_int(audit, msg, str, num) gr_log_varargs(audit, msg, GR_STR_INT, str, num)
86169+#define gr_log_int_int(audit, msg, num1, num2) gr_log_varargs(audit, msg, GR_TWO_INT, num1, num2)
86170+#define gr_log_two_u64(audit, msg, num1, num2) gr_log_varargs(audit, msg, GR_TWO_U64, num1, num2)
86171+#define gr_log_int3(audit, msg, num1, num2, num3) gr_log_varargs(audit, msg, GR_THREE_INT, num1, num2, num3)
86172+#define gr_log_int5_str2(audit, msg, num1, num2, str1, str2) gr_log_varargs(audit, msg, GR_FIVE_INT_TWO_STR, num1, num2, str1, str2)
86173+#define gr_log_str_str(audit, msg, str1, str2) gr_log_varargs(audit, msg, GR_TWO_STR, str1, str2)
86174+#define gr_log_str2_int(audit, msg, str1, str2, num) gr_log_varargs(audit, msg, GR_TWO_STR_INT, str1, str2, num)
86175+#define gr_log_str3(audit, msg, str1, str2, str3) gr_log_varargs(audit, msg, GR_THREE_STR, str1, str2, str3)
86176+#define gr_log_str4(audit, msg, str1, str2, str3, str4) gr_log_varargs(audit, msg, GR_FOUR_STR, str1, str2, str3, str4)
86177+#define gr_log_str_fs(audit, msg, str, dentry, mnt) gr_log_varargs(audit, msg, GR_STR_FILENAME, str, dentry, mnt)
86178+#define gr_log_fs_str(audit, msg, dentry, mnt, str) gr_log_varargs(audit, msg, GR_FILENAME_STR, dentry, mnt, str)
86179+#define gr_log_fs_int2(audit, msg, dentry, mnt, num1, num2) gr_log_varargs(audit, msg, GR_FILENAME_TWO_INT, dentry, mnt, num1, num2)
86180+#define gr_log_fs_int2_str(audit, msg, dentry, mnt, num1, num2, str) gr_log_varargs(audit, msg, GR_FILENAME_TWO_INT_STR, dentry, mnt, num1, num2, str)
86181+#define gr_log_textrel_ulong_ulong(audit, msg, file, ulong1, ulong2) gr_log_varargs(audit, msg, GR_TEXTREL, file, ulong1, ulong2)
86182+#define gr_log_ptrace(audit, msg, task) gr_log_varargs(audit, msg, GR_PTRACE, task)
86183+#define gr_log_res_ulong2_str(audit, msg, task, ulong1, str, ulong2) gr_log_varargs(audit, msg, GR_RESOURCE, task, ulong1, str, ulong2)
86184+#define gr_log_cap(audit, msg, task, str) gr_log_varargs(audit, msg, GR_CAP, task, str)
86185+#define gr_log_sig_addr(audit, msg, str, addr) gr_log_varargs(audit, msg, GR_SIG, str, addr)
86186+#define gr_log_sig_task(audit, msg, task, num) gr_log_varargs(audit, msg, GR_SIG2, task, num)
86187+#define gr_log_crash1(audit, msg, task, ulong) gr_log_varargs(audit, msg, GR_CRASH1, task, ulong)
86188+#define gr_log_crash2(audit, msg, task, ulong1) gr_log_varargs(audit, msg, GR_CRASH2, task, ulong1)
86189+#define gr_log_procacct(audit, msg, task, num1, num2, num3, num4, num5, num6, num7, num8, num9) gr_log_varargs(audit, msg, GR_PSACCT, task, num1, num2, num3, num4, num5, num6, num7, num8, num9)
86190+#define gr_log_rwxmap(audit, msg, str) gr_log_varargs(audit, msg, GR_RWXMAP, str)
86191+
86192+void gr_log_varargs(int audit, const char *msg, int argtypes, ...);
86193+
86194+#endif
86195+
86196+#endif
86197diff --git a/include/linux/grmsg.h b/include/linux/grmsg.h
86198new file mode 100644
86199index 0000000..f885406
86200--- /dev/null
86201+++ b/include/linux/grmsg.h
86202@@ -0,0 +1,109 @@
86203+#define DEFAULTSECMSG "%.256s[%.16s:%d] uid/euid:%u/%u gid/egid:%u/%u, parent %.256s[%.16s:%d] uid/euid:%u/%u gid/egid:%u/%u"
86204+#define GR_ACL_PROCACCT_MSG "%.256s[%.16s:%d] IP:%pI4 TTY:%.64s uid/euid:%u/%u gid/egid:%u/%u run time:[%ud %uh %um %us] cpu time:[%ud %uh %um %us] %s with exit code %ld, parent %.256s[%.16s:%d] IP:%pI4 TTY:%.64s uid/euid:%u/%u gid/egid:%u/%u"
86205+#define GR_PTRACE_ACL_MSG "denied ptrace of %.950s(%.16s:%d) by "
86206+#define GR_STOPMOD_MSG "denied modification of module state by "
86207+#define GR_ROFS_BLOCKWRITE_MSG "denied write to block device %.950s by "
86208+#define GR_ROFS_MOUNT_MSG "denied writable mount of %.950s by "
86209+#define GR_IOPERM_MSG "denied use of ioperm() by "
86210+#define GR_IOPL_MSG "denied use of iopl() by "
86211+#define GR_SHMAT_ACL_MSG "denied attach of shared memory of UID %u, PID %d, ID %u by "
86212+#define GR_UNIX_CHROOT_MSG "denied connect() to abstract AF_UNIX socket outside of chroot by "
86213+#define GR_SHMAT_CHROOT_MSG "denied attach of shared memory outside of chroot by "
86214+#define GR_MEM_READWRITE_MSG "denied access of range %Lx -> %Lx in /dev/mem by "
86215+#define GR_SYMLINK_MSG "not following symlink %.950s owned by %d.%d by "
86216+#define GR_LEARN_AUDIT_MSG "%s\t%u\t%u\t%u\t%.4095s\t%.4095s\t%lu\t%lu\t%.4095s\t%lu\t%pI4"
86217+#define GR_ID_LEARN_MSG "%s\t%u\t%u\t%u\t%.4095s\t%.4095s\t%c\t%d\t%d\t%d\t%pI4"
86218+#define GR_HIDDEN_ACL_MSG "%s access to hidden file %.950s by "
86219+#define GR_OPEN_ACL_MSG "%s open of %.950s for%s%s by "
86220+#define GR_CREATE_ACL_MSG "%s create of %.950s for%s%s by "
86221+#define GR_FIFO_MSG "denied writing FIFO %.950s of %d.%d by "
86222+#define GR_MKNOD_CHROOT_MSG "denied mknod of %.950s from chroot by "
86223+#define GR_MKNOD_ACL_MSG "%s mknod of %.950s by "
86224+#define GR_UNIXCONNECT_ACL_MSG "%s connect() to the unix domain socket %.950s by "
86225+#define GR_TTYSNIFF_ACL_MSG "terminal being sniffed by IP:%pI4 %.480s[%.16s:%d], parent %.480s[%.16s:%d] against "
86226+#define GR_MKDIR_ACL_MSG "%s mkdir of %.950s by "
86227+#define GR_RMDIR_ACL_MSG "%s rmdir of %.950s by "
86228+#define GR_UNLINK_ACL_MSG "%s unlink of %.950s by "
86229+#define GR_SYMLINK_ACL_MSG "%s symlink from %.480s to %.480s by "
86230+#define GR_HARDLINK_MSG "denied hardlink of %.930s (owned by %d.%d) to %.30s for "
86231+#define GR_LINK_ACL_MSG "%s link of %.480s to %.480s by "
86232+#define GR_INHERIT_ACL_MSG "successful inherit of %.480s's ACL for %.480s by "
86233+#define GR_RENAME_ACL_MSG "%s rename of %.480s to %.480s by "
86234+#define GR_UNSAFESHARE_EXEC_ACL_MSG "denied exec with cloned fs of %.950s by "
86235+#define GR_PTRACE_EXEC_ACL_MSG "denied ptrace of %.950s by "
86236+#define GR_EXEC_ACL_MSG "%s execution of %.950s by "
86237+#define GR_EXEC_TPE_MSG "denied untrusted exec (due to %.70s) of %.950s by "
86238+#define GR_SEGVSTART_ACL_MSG "possible exploit bruteforcing on " DEFAULTSECMSG " banning uid %u from login for %lu seconds"
86239+#define GR_SEGVNOSUID_ACL_MSG "possible exploit bruteforcing on " DEFAULTSECMSG " banning execution for %lu seconds"
86240+#define GR_MOUNT_CHROOT_MSG "denied mount of %.256s as %.930s from chroot by "
86241+#define GR_PIVOT_CHROOT_MSG "denied pivot_root from chroot by "
86242+#define GR_TRUNCATE_ACL_MSG "%s truncate of %.950s by "
86243+#define GR_ATIME_ACL_MSG "%s access time change of %.950s by "
86244+#define GR_ACCESS_ACL_MSG "%s access of %.950s for%s%s%s by "
86245+#define GR_CHROOT_CHROOT_MSG "denied double chroot to %.950s by "
86246+#define GR_CHMOD_CHROOT_MSG "denied chmod +s of %.950s by "
86247+#define GR_CHMOD_ACL_MSG "%s chmod of %.950s by "
86248+#define GR_CHROOT_FCHDIR_MSG "denied fchdir outside of chroot to %.950s by "
86249+#define GR_CHOWN_ACL_MSG "%s chown of %.950s by "
86250+#define GR_SETXATTR_ACL_MSG "%s setting extended attributes of %.950s by "
86251+#define GR_WRITLIB_ACL_MSG "denied load of writable library %.950s by "
86252+#define GR_INITF_ACL_MSG "init_variables() failed %s by "
86253+#define GR_DISABLED_ACL_MSG "Error loading %s, trying to run kernel with acls disabled. To disable acls at startup use <kernel image name> gracl=off from your boot loader"
86254+#define GR_DEV_ACL_MSG "/dev/grsec: %d bytes sent %d required, being fed garbaged by "
86255+#define GR_SHUTS_ACL_MSG "shutdown auth success for "
86256+#define GR_SHUTF_ACL_MSG "shutdown auth failure for "
86257+#define GR_SHUTI_ACL_MSG "ignoring shutdown for disabled RBAC system for "
86258+#define GR_SEGVMODS_ACL_MSG "segvmod auth success for "
86259+#define GR_SEGVMODF_ACL_MSG "segvmod auth failure for "
86260+#define GR_SEGVMODI_ACL_MSG "ignoring segvmod for disabled RBAC system for "
86261+#define GR_ENABLE_ACL_MSG "%s RBAC system loaded by "
86262+#define GR_ENABLEF_ACL_MSG "unable to load %s for "
86263+#define GR_RELOADI_ACL_MSG "ignoring reload request for disabled RBAC system"
86264+#define GR_RELOAD_ACL_MSG "%s RBAC system reloaded by "
86265+#define GR_RELOADF_ACL_MSG "failed reload of %s for "
86266+#define GR_SPROLEI_ACL_MSG "ignoring change to special role for disabled RBAC system for "
86267+#define GR_SPROLES_ACL_MSG "successful change to special role %s (id %d) by "
86268+#define GR_SPROLEL_ACL_MSG "special role %s (id %d) exited by "
86269+#define GR_SPROLEF_ACL_MSG "special role %s failure for "
86270+#define GR_UNSPROLEI_ACL_MSG "ignoring unauth of special role for disabled RBAC system for "
86271+#define GR_UNSPROLES_ACL_MSG "successful unauth of special role %s (id %d) by "
86272+#define GR_INVMODE_ACL_MSG "invalid mode %d by "
86273+#define GR_PRIORITY_CHROOT_MSG "denied priority change of process (%.16s:%d) by "
86274+#define GR_FAILFORK_MSG "failed fork with errno %s by "
86275+#define GR_NICE_CHROOT_MSG "denied priority change by "
86276+#define GR_UNISIGLOG_MSG "%.32s occurred at %p in "
86277+#define GR_DUALSIGLOG_MSG "signal %d sent to " DEFAULTSECMSG " by "
86278+#define GR_SIG_ACL_MSG "denied send of signal %d to protected task " DEFAULTSECMSG " by "
86279+#define GR_SYSCTL_MSG "denied modification of grsecurity sysctl value : %.32s by "
86280+#define GR_SYSCTL_ACL_MSG "%s sysctl of %.950s for%s%s by "
86281+#define GR_TIME_MSG "time set by "
86282+#define GR_DEFACL_MSG "fatal: unable to find subject for (%.16s:%d), loaded by "
86283+#define GR_MMAP_ACL_MSG "%s executable mmap of %.950s by "
86284+#define GR_MPROTECT_ACL_MSG "%s executable mprotect of %.950s by "
86285+#define GR_SOCK_MSG "denied socket(%.16s,%.16s,%.16s) by "
86286+#define GR_SOCK_NOINET_MSG "denied socket(%.16s,%.16s,%d) by "
86287+#define GR_BIND_MSG "denied bind() by "
86288+#define GR_CONNECT_MSG "denied connect() by "
86289+#define GR_BIND_ACL_MSG "denied bind() to %pI4 port %u sock type %.16s protocol %.16s by "
86290+#define GR_CONNECT_ACL_MSG "denied connect() to %pI4 port %u sock type %.16s protocol %.16s by "
86291+#define GR_IP_LEARN_MSG "%s\t%u\t%u\t%u\t%.4095s\t%.4095s\t%pI4\t%u\t%u\t%u\t%u\t%pI4"
86292+#define GR_EXEC_CHROOT_MSG "exec of %.980s within chroot by process "
86293+#define GR_CAP_ACL_MSG "use of %s denied for "
86294+#define GR_CAP_CHROOT_MSG "use of %s in chroot denied for "
86295+#define GR_CAP_ACL_MSG2 "use of %s permitted for "
86296+#define GR_USRCHANGE_ACL_MSG "change to uid %u denied for "
86297+#define GR_GRPCHANGE_ACL_MSG "change to gid %u denied for "
86298+#define GR_REMOUNT_AUDIT_MSG "remount of %.256s by "
86299+#define GR_UNMOUNT_AUDIT_MSG "unmount of %.256s by "
86300+#define GR_MOUNT_AUDIT_MSG "mount of %.256s to %.256s by "
86301+#define GR_CHDIR_AUDIT_MSG "chdir to %.980s by "
86302+#define GR_EXEC_AUDIT_MSG "exec of %.930s (%.128s) by "
86303+#define GR_RESOURCE_MSG "denied resource overstep by requesting %lu for %.16s against limit %lu for "
86304+#define GR_RWXMMAP_MSG "denied RWX mmap of %.950s by "
86305+#define GR_RWXMPROTECT_MSG "denied RWX mprotect of %.950s by "
86306+#define GR_TEXTREL_AUDIT_MSG "text relocation in %s, VMA:0x%08lx 0x%08lx by "
86307+#define GR_VM86_MSG "denied use of vm86 by "
86308+#define GR_PTRACE_AUDIT_MSG "process %.950s(%.16s:%d) attached to via ptrace by "
86309+#define GR_PTRACE_READEXEC_MSG "denied ptrace of unreadable binary %.950s by "
86310+#define GR_INIT_TRANSFER_MSG "persistent special role transferred privilege to init by "
86311+#define GR_BADPROCPID_MSG "denied read of sensitive /proc/pid/%s entry via fd passed across exec by "
86312diff --git a/include/linux/grsecurity.h b/include/linux/grsecurity.h
86313new file mode 100644
86314index 0000000..c1793ae
86315--- /dev/null
86316+++ b/include/linux/grsecurity.h
86317@@ -0,0 +1,219 @@
86318+#ifndef GR_SECURITY_H
86319+#define GR_SECURITY_H
86320+#include <linux/fs.h>
86321+#include <linux/fs_struct.h>
86322+#include <linux/binfmts.h>
86323+#include <linux/gracl.h>
86324+#include <linux/compat.h>
86325+
86326+/* notify of brain-dead configs */
86327+#if defined(CONFIG_GRKERNSEC_PROC_USER) && defined(CONFIG_GRKERNSEC_PROC_USERGROUP)
86328+#error "CONFIG_GRKERNSEC_PROC_USER and CONFIG_GRKERNSEC_PROC_USERGROUP cannot both be enabled."
86329+#endif
86330+#if defined(CONFIG_PAX_NOEXEC) && !defined(CONFIG_PAX_PAGEEXEC) && !defined(CONFIG_PAX_SEGMEXEC) && !defined(CONFIG_PAX_KERNEXEC)
86331+#error "CONFIG_PAX_NOEXEC enabled, but PAGEEXEC, SEGMEXEC, and KERNEXEC are disabled."
86332+#endif
86333+#if defined(CONFIG_PAX_ASLR) && !defined(CONFIG_PAX_RANDKSTACK) && !defined(CONFIG_PAX_RANDUSTACK) && !defined(CONFIG_PAX_RANDMMAP)
86334+#error "CONFIG_PAX_ASLR enabled, but RANDKSTACK, RANDUSTACK, and RANDMMAP are disabled."
86335+#endif
86336+#if defined(CONFIG_PAX) && !defined(CONFIG_PAX_NOEXEC) && !defined(CONFIG_PAX_ASLR)
86337+#error "CONFIG_PAX enabled, but no PaX options are enabled."
86338+#endif
86339+
86340+void gr_handle_brute_attach(struct task_struct *p, unsigned long mm_flags);
86341+void gr_handle_brute_check(void);
86342+void gr_handle_kernel_exploit(void);
86343+int gr_process_user_ban(void);
86344+
86345+char gr_roletype_to_char(void);
86346+
86347+int gr_acl_enable_at_secure(void);
86348+
86349+int gr_check_user_change(int real, int effective, int fs);
86350+int gr_check_group_change(int real, int effective, int fs);
86351+
86352+void gr_del_task_from_ip_table(struct task_struct *p);
86353+
86354+int gr_pid_is_chrooted(struct task_struct *p);
86355+int gr_handle_chroot_fowner(struct pid *pid, enum pid_type type);
86356+int gr_handle_chroot_nice(void);
86357+int gr_handle_chroot_sysctl(const int op);
86358+int gr_handle_chroot_setpriority(struct task_struct *p,
86359+ const int niceval);
86360+int gr_chroot_fchdir(struct dentry *u_dentry, struct vfsmount *u_mnt);
86361+int gr_handle_chroot_chroot(const struct dentry *dentry,
86362+ const struct vfsmount *mnt);
86363+void gr_handle_chroot_chdir(struct path *path);
86364+int gr_handle_chroot_chmod(const struct dentry *dentry,
86365+ const struct vfsmount *mnt, const int mode);
86366+int gr_handle_chroot_mknod(const struct dentry *dentry,
86367+ const struct vfsmount *mnt, const int mode);
86368+int gr_handle_chroot_mount(const struct dentry *dentry,
86369+ const struct vfsmount *mnt,
86370+ const char *dev_name);
86371+int gr_handle_chroot_pivot(void);
86372+int gr_handle_chroot_unix(const pid_t pid);
86373+
86374+int gr_handle_rawio(const struct inode *inode);
86375+
86376+void gr_handle_ioperm(void);
86377+void gr_handle_iopl(void);
86378+
86379+umode_t gr_acl_umask(void);
86380+
86381+int gr_tpe_allow(const struct file *file);
86382+
86383+void gr_set_chroot_entries(struct task_struct *task, struct path *path);
86384+void gr_clear_chroot_entries(struct task_struct *task);
86385+
86386+void gr_log_forkfail(const int retval);
86387+void gr_log_timechange(void);
86388+void gr_log_signal(const int sig, const void *addr, const struct task_struct *t);
86389+void gr_log_chdir(const struct dentry *dentry,
86390+ const struct vfsmount *mnt);
86391+void gr_log_chroot_exec(const struct dentry *dentry,
86392+ const struct vfsmount *mnt);
86393+void gr_handle_exec_args(struct linux_binprm *bprm, const char __user *const __user *argv);
86394+#ifdef CONFIG_COMPAT
86395+void gr_handle_exec_args_compat(struct linux_binprm *bprm, compat_uptr_t __user *argv);
86396+#endif
86397+void gr_log_remount(const char *devname, const int retval);
86398+void gr_log_unmount(const char *devname, const int retval);
86399+void gr_log_mount(const char *from, const char *to, const int retval);
86400+void gr_log_textrel(struct vm_area_struct *vma);
86401+void gr_log_rwxmmap(struct file *file);
86402+void gr_log_rwxmprotect(struct file *file);
86403+
86404+int gr_handle_follow_link(const struct inode *parent,
86405+ const struct inode *inode,
86406+ const struct dentry *dentry,
86407+ const struct vfsmount *mnt);
86408+int gr_handle_fifo(const struct dentry *dentry,
86409+ const struct vfsmount *mnt,
86410+ const struct dentry *dir, const int flag,
86411+ const int acc_mode);
86412+int gr_handle_hardlink(const struct dentry *dentry,
86413+ const struct vfsmount *mnt,
86414+ struct inode *inode,
86415+ const int mode, const char *to);
86416+
86417+int gr_is_capable(const int cap);
86418+int gr_is_capable_nolog(const int cap);
86419+void gr_learn_resource(const struct task_struct *task, const int limit,
86420+ const unsigned long wanted, const int gt);
86421+void gr_copy_label(struct task_struct *tsk);
86422+void gr_handle_crash(struct task_struct *task, const int sig);
86423+int gr_handle_signal(const struct task_struct *p, const int sig);
86424+int gr_check_crash_uid(const uid_t uid);
86425+int gr_check_protected_task(const struct task_struct *task);
86426+int gr_check_protected_task_fowner(struct pid *pid, enum pid_type type);
86427+int gr_acl_handle_mmap(const struct file *file,
86428+ const unsigned long prot);
86429+int gr_acl_handle_mprotect(const struct file *file,
86430+ const unsigned long prot);
86431+int gr_check_hidden_task(const struct task_struct *tsk);
86432+__u32 gr_acl_handle_truncate(const struct dentry *dentry,
86433+ const struct vfsmount *mnt);
86434+__u32 gr_acl_handle_utime(const struct dentry *dentry,
86435+ const struct vfsmount *mnt);
86436+__u32 gr_acl_handle_access(const struct dentry *dentry,
86437+ const struct vfsmount *mnt, const int fmode);
86438+__u32 gr_acl_handle_chmod(const struct dentry *dentry,
86439+ const struct vfsmount *mnt, umode_t *mode);
86440+__u32 gr_acl_handle_chown(const struct dentry *dentry,
86441+ const struct vfsmount *mnt);
86442+__u32 gr_acl_handle_setxattr(const struct dentry *dentry,
86443+ const struct vfsmount *mnt);
86444+int gr_handle_ptrace(struct task_struct *task, const long request);
86445+int gr_handle_proc_ptrace(struct task_struct *task);
86446+__u32 gr_acl_handle_execve(const struct dentry *dentry,
86447+ const struct vfsmount *mnt);
86448+int gr_check_crash_exec(const struct file *filp);
86449+int gr_acl_is_enabled(void);
86450+void gr_set_kernel_label(struct task_struct *task);
86451+void gr_set_role_label(struct task_struct *task, const uid_t uid,
86452+ const gid_t gid);
86453+int gr_set_proc_label(const struct dentry *dentry,
86454+ const struct vfsmount *mnt,
86455+ const int unsafe_flags);
86456+__u32 gr_acl_handle_hidden_file(const struct dentry *dentry,
86457+ const struct vfsmount *mnt);
86458+__u32 gr_acl_handle_open(const struct dentry *dentry,
86459+ const struct vfsmount *mnt, int acc_mode);
86460+__u32 gr_acl_handle_creat(const struct dentry *dentry,
86461+ const struct dentry *p_dentry,
86462+ const struct vfsmount *p_mnt,
86463+ int open_flags, int acc_mode, const int imode);
86464+void gr_handle_create(const struct dentry *dentry,
86465+ const struct vfsmount *mnt);
86466+void gr_handle_proc_create(const struct dentry *dentry,
86467+ const struct inode *inode);
86468+__u32 gr_acl_handle_mknod(const struct dentry *new_dentry,
86469+ const struct dentry *parent_dentry,
86470+ const struct vfsmount *parent_mnt,
86471+ const int mode);
86472+__u32 gr_acl_handle_mkdir(const struct dentry *new_dentry,
86473+ const struct dentry *parent_dentry,
86474+ const struct vfsmount *parent_mnt);
86475+__u32 gr_acl_handle_rmdir(const struct dentry *dentry,
86476+ const struct vfsmount *mnt);
86477+void gr_handle_delete(const ino_t ino, const dev_t dev);
86478+__u32 gr_acl_handle_unlink(const struct dentry *dentry,
86479+ const struct vfsmount *mnt);
86480+__u32 gr_acl_handle_symlink(const struct dentry *new_dentry,
86481+ const struct dentry *parent_dentry,
86482+ const struct vfsmount *parent_mnt,
86483+ const char *from);
86484+__u32 gr_acl_handle_link(const struct dentry *new_dentry,
86485+ const struct dentry *parent_dentry,
86486+ const struct vfsmount *parent_mnt,
86487+ const struct dentry *old_dentry,
86488+ const struct vfsmount *old_mnt, const char *to);
86489+int gr_acl_handle_rename(struct dentry *new_dentry,
86490+ struct dentry *parent_dentry,
86491+ const struct vfsmount *parent_mnt,
86492+ struct dentry *old_dentry,
86493+ struct inode *old_parent_inode,
86494+ struct vfsmount *old_mnt, const char *newname);
86495+void gr_handle_rename(struct inode *old_dir, struct inode *new_dir,
86496+ struct dentry *old_dentry,
86497+ struct dentry *new_dentry,
86498+ struct vfsmount *mnt, const __u8 replace);
86499+__u32 gr_check_link(const struct dentry *new_dentry,
86500+ const struct dentry *parent_dentry,
86501+ const struct vfsmount *parent_mnt,
86502+ const struct dentry *old_dentry,
86503+ const struct vfsmount *old_mnt);
86504+int gr_acl_handle_filldir(const struct file *file, const char *name,
86505+ const unsigned int namelen, const ino_t ino);
86506+
86507+__u32 gr_acl_handle_unix(const struct dentry *dentry,
86508+ const struct vfsmount *mnt);
86509+void gr_acl_handle_exit(void);
86510+void gr_acl_handle_psacct(struct task_struct *task, const long code);
86511+int gr_acl_handle_procpidmem(const struct task_struct *task);
86512+int gr_handle_rofs_mount(struct dentry *dentry, struct vfsmount *mnt, int mnt_flags);
86513+int gr_handle_rofs_blockwrite(struct dentry *dentry, struct vfsmount *mnt, int acc_mode);
86514+void gr_audit_ptrace(struct task_struct *task);
86515+dev_t gr_get_dev_from_dentry(struct dentry *dentry);
86516+
86517+int gr_ptrace_readexec(struct file *file, int unsafe_flags);
86518+
86519+#ifdef CONFIG_GRKERNSEC
86520+void task_grsec_rbac(struct seq_file *m, struct task_struct *p);
86521+void gr_handle_vm86(void);
86522+void gr_handle_mem_readwrite(u64 from, u64 to);
86523+
86524+void gr_log_badprocpid(const char *entry);
86525+
86526+extern int grsec_enable_dmesg;
86527+extern int grsec_disable_privio;
86528+#ifdef CONFIG_GRKERNSEC_CHROOT_FINDTASK
86529+extern int grsec_enable_chroot_findtask;
86530+#endif
86531+#ifdef CONFIG_GRKERNSEC_SETXID
86532+extern int grsec_enable_setxid;
86533+#endif
86534+#endif
86535+
86536+#endif
86537diff --git a/include/linux/hdpu_features.h b/include/linux/hdpu_features.h
86538index 6a87154..a3ce57b 100644
86539--- a/include/linux/hdpu_features.h
86540+++ b/include/linux/hdpu_features.h
86541@@ -3,7 +3,7 @@
86542 struct cpustate_t {
86543 spinlock_t lock;
86544 int excl;
86545- int open_count;
86546+ atomic_t open_count;
86547 unsigned char cached_val;
86548 int inited;
86549 unsigned long *set_addr;
86550diff --git a/include/linux/highmem.h b/include/linux/highmem.h
86551index 211ff44..00ab6d7 100644
86552--- a/include/linux/highmem.h
86553+++ b/include/linux/highmem.h
86554@@ -137,6 +137,18 @@ static inline void clear_highpage(struct page *page)
86555 kunmap_atomic(kaddr, KM_USER0);
86556 }
86557
86558+static inline void sanitize_highpage(struct page *page)
86559+{
86560+ void *kaddr;
86561+ unsigned long flags;
86562+
86563+ local_irq_save(flags);
86564+ kaddr = kmap_atomic(page, KM_CLEARPAGE);
86565+ clear_page(kaddr);
86566+ kunmap_atomic(kaddr, KM_CLEARPAGE);
86567+ local_irq_restore(flags);
86568+}
86569+
86570 static inline void zero_user_segments(struct page *page,
86571 unsigned start1, unsigned end1,
86572 unsigned start2, unsigned end2)
86573diff --git a/include/linux/i2c.h b/include/linux/i2c.h
86574index 7b40cda..24eb44e 100644
86575--- a/include/linux/i2c.h
86576+++ b/include/linux/i2c.h
86577@@ -325,6 +325,7 @@ struct i2c_algorithm {
86578 /* To determine what the adapter supports */
86579 u32 (*functionality) (struct i2c_adapter *);
86580 };
86581+typedef struct i2c_algorithm __no_const i2c_algorithm_no_const;
86582
86583 /*
86584 * i2c_adapter is the structure used to identify a physical i2c bus along
86585diff --git a/include/linux/i2o.h b/include/linux/i2o.h
86586index 4c4e57d..f3c5303 100644
86587--- a/include/linux/i2o.h
86588+++ b/include/linux/i2o.h
86589@@ -564,7 +564,7 @@ struct i2o_controller {
86590 struct i2o_device *exec; /* Executive */
86591 #if BITS_PER_LONG == 64
86592 spinlock_t context_list_lock; /* lock for context_list */
86593- atomic_t context_list_counter; /* needed for unique contexts */
86594+ atomic_unchecked_t context_list_counter; /* needed for unique contexts */
86595 struct list_head context_list; /* list of context id's
86596 and pointers */
86597 #endif
86598diff --git a/include/linux/init_task.h b/include/linux/init_task.h
86599index 21a6f5d..dc42eab 100644
86600--- a/include/linux/init_task.h
86601+++ b/include/linux/init_task.h
86602@@ -83,6 +83,12 @@ extern struct group_info init_groups;
86603 #define INIT_IDS
86604 #endif
86605
86606+#ifdef CONFIG_X86
86607+#define INIT_TASK_THREAD_INFO .tinfo = INIT_THREAD_INFO,
86608+#else
86609+#define INIT_TASK_THREAD_INFO
86610+#endif
86611+
86612 #ifdef CONFIG_SECURITY_FILE_CAPABILITIES
86613 /*
86614 * Because of the reduced scope of CAP_SETPCAP when filesystem
86615@@ -156,6 +162,7 @@ extern struct cred init_cred;
86616 __MUTEX_INITIALIZER(tsk.cred_guard_mutex), \
86617 .comm = "swapper", \
86618 .thread = INIT_THREAD, \
86619+ INIT_TASK_THREAD_INFO \
86620 .fs = &init_fs, \
86621 .files = &init_files, \
86622 .signal = &init_signals, \
86623diff --git a/include/linux/intel-iommu.h b/include/linux/intel-iommu.h
86624index 4f0a72a..a849599 100644
86625--- a/include/linux/intel-iommu.h
86626+++ b/include/linux/intel-iommu.h
86627@@ -296,7 +296,7 @@ struct iommu_flush {
86628 u8 fm, u64 type);
86629 void (*flush_iotlb)(struct intel_iommu *iommu, u16 did, u64 addr,
86630 unsigned int size_order, u64 type);
86631-};
86632+} __no_const;
86633
86634 enum {
86635 SR_DMAR_FECTL_REG,
86636diff --git a/include/linux/interrupt.h b/include/linux/interrupt.h
86637index c739150..be577b5 100644
86638--- a/include/linux/interrupt.h
86639+++ b/include/linux/interrupt.h
86640@@ -369,7 +369,7 @@ enum
86641 /* map softirq index to softirq name. update 'softirq_to_name' in
86642 * kernel/softirq.c when adding a new softirq.
86643 */
86644-extern char *softirq_to_name[NR_SOFTIRQS];
86645+extern const char * const softirq_to_name[NR_SOFTIRQS];
86646
86647 /* softirq mask and active fields moved to irq_cpustat_t in
86648 * asm/hardirq.h to get better cache usage. KAO
86649@@ -377,12 +377,12 @@ extern char *softirq_to_name[NR_SOFTIRQS];
86650
86651 struct softirq_action
86652 {
86653- void (*action)(struct softirq_action *);
86654+ void (*action)(void);
86655 };
86656
86657 asmlinkage void do_softirq(void);
86658 asmlinkage void __do_softirq(void);
86659-extern void open_softirq(int nr, void (*action)(struct softirq_action *));
86660+extern void open_softirq(int nr, void (*action)(void));
86661 extern void softirq_init(void);
86662 #define __raise_softirq_irqoff(nr) do { or_softirq_pending(1UL << (nr)); } while (0)
86663 extern void raise_softirq_irqoff(unsigned int nr);
86664diff --git a/include/linux/iocontext.h b/include/linux/iocontext.h
86665index eb73632..19abfc1 100644
86666--- a/include/linux/iocontext.h
86667+++ b/include/linux/iocontext.h
86668@@ -94,14 +94,15 @@ static inline struct io_context *ioc_task_link(struct io_context *ioc)
86669 return NULL;
86670 }
86671
86672+struct task_struct;
86673 #ifdef CONFIG_BLOCK
86674 int put_io_context(struct io_context *ioc);
86675-void exit_io_context(void);
86676+void exit_io_context(struct task_struct *task);
86677 struct io_context *get_io_context(gfp_t gfp_flags, int node);
86678 struct io_context *alloc_io_context(gfp_t gfp_flags, int node);
86679 void copy_io_context(struct io_context **pdst, struct io_context **psrc);
86680 #else
86681-static inline void exit_io_context(void)
86682+static inline void exit_io_context(struct task_struct *task)
86683 {
86684 }
86685
86686diff --git a/include/linux/irq.h b/include/linux/irq.h
86687index 9e5f45a..025865b 100644
86688--- a/include/linux/irq.h
86689+++ b/include/linux/irq.h
86690@@ -438,12 +438,12 @@ extern int set_irq_msi(unsigned int irq, struct msi_desc *entry);
86691 static inline bool alloc_desc_masks(struct irq_desc *desc, int node,
86692 bool boot)
86693 {
86694+#ifdef CONFIG_CPUMASK_OFFSTACK
86695 gfp_t gfp = GFP_ATOMIC;
86696
86697 if (boot)
86698 gfp = GFP_NOWAIT;
86699
86700-#ifdef CONFIG_CPUMASK_OFFSTACK
86701 if (!alloc_cpumask_var_node(&desc->affinity, gfp, node))
86702 return false;
86703
86704diff --git a/include/linux/kallsyms.h b/include/linux/kallsyms.h
86705index 7922742..27306a2 100644
86706--- a/include/linux/kallsyms.h
86707+++ b/include/linux/kallsyms.h
86708@@ -15,7 +15,8 @@
86709
86710 struct module;
86711
86712-#ifdef CONFIG_KALLSYMS
86713+#if !defined(__INCLUDED_BY_HIDESYM) || !defined(CONFIG_KALLSYMS)
86714+#if defined(CONFIG_KALLSYMS) && !defined(CONFIG_GRKERNSEC_HIDESYM)
86715 /* Lookup the address for a symbol. Returns 0 if not found. */
86716 unsigned long kallsyms_lookup_name(const char *name);
86717
86718@@ -92,6 +93,15 @@ static inline int lookup_symbol_attrs(unsigned long addr, unsigned long *size, u
86719 /* Stupid that this does nothing, but I didn't create this mess. */
86720 #define __print_symbol(fmt, addr)
86721 #endif /*CONFIG_KALLSYMS*/
86722+#else /* when included by kallsyms.c, vsnprintf.c, or
86723+ arch/x86/kernel/dumpstack.c, with HIDESYM enabled */
86724+extern void __print_symbol(const char *fmt, unsigned long address);
86725+extern int sprint_symbol(char *buffer, unsigned long address);
86726+const char *kallsyms_lookup(unsigned long addr,
86727+ unsigned long *symbolsize,
86728+ unsigned long *offset,
86729+ char **modname, char *namebuf);
86730+#endif
86731
86732 /* This macro allows us to keep printk typechecking */
86733 static void __check_printsym_format(const char *fmt, ...)
86734diff --git a/include/linux/kgdb.h b/include/linux/kgdb.h
86735index 6adcc29..13369e8 100644
86736--- a/include/linux/kgdb.h
86737+++ b/include/linux/kgdb.h
86738@@ -74,8 +74,8 @@ void kgdb_breakpoint(void);
86739
86740 extern int kgdb_connected;
86741
86742-extern atomic_t kgdb_setting_breakpoint;
86743-extern atomic_t kgdb_cpu_doing_single_step;
86744+extern atomic_unchecked_t kgdb_setting_breakpoint;
86745+extern atomic_unchecked_t kgdb_cpu_doing_single_step;
86746
86747 extern struct task_struct *kgdb_usethread;
86748 extern struct task_struct *kgdb_contthread;
86749@@ -235,7 +235,7 @@ struct kgdb_arch {
86750 int (*remove_hw_breakpoint)(unsigned long, int, enum kgdb_bptype);
86751 void (*remove_all_hw_break)(void);
86752 void (*correct_hw_break)(void);
86753-};
86754+} __do_const;
86755
86756 /**
86757 * struct kgdb_io - Describe the interface for an I/O driver to talk with KGDB.
86758@@ -257,14 +257,14 @@ struct kgdb_io {
86759 int (*init) (void);
86760 void (*pre_exception) (void);
86761 void (*post_exception) (void);
86762-};
86763+} __do_const;
86764
86765-extern struct kgdb_arch arch_kgdb_ops;
86766+extern const struct kgdb_arch arch_kgdb_ops;
86767
86768 extern unsigned long __weak kgdb_arch_pc(int exception, struct pt_regs *regs);
86769
86770-extern int kgdb_register_io_module(struct kgdb_io *local_kgdb_io_ops);
86771-extern void kgdb_unregister_io_module(struct kgdb_io *local_kgdb_io_ops);
86772+extern int kgdb_register_io_module(const struct kgdb_io *local_kgdb_io_ops);
86773+extern void kgdb_unregister_io_module(const struct kgdb_io *local_kgdb_io_ops);
86774
86775 extern int kgdb_hex2long(char **ptr, unsigned long *long_val);
86776 extern int kgdb_mem2hex(char *mem, char *buf, int count);
86777diff --git a/include/linux/kmod.h b/include/linux/kmod.h
86778index 0546fe7..2a22bc1 100644
86779--- a/include/linux/kmod.h
86780+++ b/include/linux/kmod.h
86781@@ -31,6 +31,8 @@
86782 * usually useless though. */
86783 extern int __request_module(bool wait, const char *name, ...) \
86784 __attribute__((format(printf, 2, 3)));
86785+extern int ___request_module(bool wait, char *param_name, const char *name, ...) \
86786+ __attribute__((format(printf, 3, 4)));
86787 #define request_module(mod...) __request_module(true, mod)
86788 #define request_module_nowait(mod...) __request_module(false, mod)
86789 #define try_then_request_module(x, mod...) \
86790diff --git a/include/linux/kobject.h b/include/linux/kobject.h
86791index 58ae8e0..3950d3c 100644
86792--- a/include/linux/kobject.h
86793+++ b/include/linux/kobject.h
86794@@ -106,7 +106,7 @@ extern char *kobject_get_path(struct kobject *kobj, gfp_t flag);
86795
86796 struct kobj_type {
86797 void (*release)(struct kobject *kobj);
86798- struct sysfs_ops *sysfs_ops;
86799+ const struct sysfs_ops *sysfs_ops;
86800 struct attribute **default_attrs;
86801 };
86802
86803@@ -118,9 +118,9 @@ struct kobj_uevent_env {
86804 };
86805
86806 struct kset_uevent_ops {
86807- int (*filter)(struct kset *kset, struct kobject *kobj);
86808- const char *(*name)(struct kset *kset, struct kobject *kobj);
86809- int (*uevent)(struct kset *kset, struct kobject *kobj,
86810+ int (* const filter)(struct kset *kset, struct kobject *kobj);
86811+ const char *(* const name)(struct kset *kset, struct kobject *kobj);
86812+ int (* const uevent)(struct kset *kset, struct kobject *kobj,
86813 struct kobj_uevent_env *env);
86814 };
86815
86816@@ -132,7 +132,7 @@ struct kobj_attribute {
86817 const char *buf, size_t count);
86818 };
86819
86820-extern struct sysfs_ops kobj_sysfs_ops;
86821+extern const struct sysfs_ops kobj_sysfs_ops;
86822
86823 /**
86824 * struct kset - a set of kobjects of a specific type, belonging to a specific subsystem.
86825@@ -155,14 +155,14 @@ struct kset {
86826 struct list_head list;
86827 spinlock_t list_lock;
86828 struct kobject kobj;
86829- struct kset_uevent_ops *uevent_ops;
86830+ const struct kset_uevent_ops *uevent_ops;
86831 };
86832
86833 extern void kset_init(struct kset *kset);
86834 extern int __must_check kset_register(struct kset *kset);
86835 extern void kset_unregister(struct kset *kset);
86836 extern struct kset * __must_check kset_create_and_add(const char *name,
86837- struct kset_uevent_ops *u,
86838+ const struct kset_uevent_ops *u,
86839 struct kobject *parent_kobj);
86840
86841 static inline struct kset *to_kset(struct kobject *kobj)
86842diff --git a/include/linux/kvm_host.h b/include/linux/kvm_host.h
86843index c728a50..762821f 100644
86844--- a/include/linux/kvm_host.h
86845+++ b/include/linux/kvm_host.h
86846@@ -210,7 +210,7 @@ void kvm_vcpu_uninit(struct kvm_vcpu *vcpu);
86847 void vcpu_load(struct kvm_vcpu *vcpu);
86848 void vcpu_put(struct kvm_vcpu *vcpu);
86849
86850-int kvm_init(void *opaque, unsigned int vcpu_size,
86851+int kvm_init(const void *opaque, unsigned int vcpu_size,
86852 struct module *module);
86853 void kvm_exit(void);
86854
86855@@ -316,7 +316,7 @@ int kvm_arch_vcpu_ioctl_set_guest_debug(struct kvm_vcpu *vcpu,
86856 struct kvm_guest_debug *dbg);
86857 int kvm_arch_vcpu_ioctl_run(struct kvm_vcpu *vcpu, struct kvm_run *kvm_run);
86858
86859-int kvm_arch_init(void *opaque);
86860+int kvm_arch_init(const void *opaque);
86861 void kvm_arch_exit(void);
86862
86863 int kvm_arch_vcpu_init(struct kvm_vcpu *vcpu);
86864@@ -519,7 +519,7 @@ int kvm_setup_default_irq_routing(struct kvm *kvm);
86865 int kvm_set_irq_routing(struct kvm *kvm,
86866 const struct kvm_irq_routing_entry *entries,
86867 unsigned nr,
86868- unsigned flags);
86869+ unsigned flags) __size_overflow(3);
86870 void kvm_free_irq_routing(struct kvm *kvm);
86871
86872 #else
86873diff --git a/include/linux/libata.h b/include/linux/libata.h
86874index a069916..223edde 100644
86875--- a/include/linux/libata.h
86876+++ b/include/linux/libata.h
86877@@ -525,11 +525,11 @@ struct ata_ioports {
86878
86879 struct ata_host {
86880 spinlock_t lock;
86881- struct device *dev;
86882+ struct device *dev;
86883 void __iomem * const *iomap;
86884 unsigned int n_ports;
86885 void *private_data;
86886- struct ata_port_operations *ops;
86887+ const struct ata_port_operations *ops;
86888 unsigned long flags;
86889 #ifdef CONFIG_ATA_ACPI
86890 acpi_handle acpi_handle;
86891@@ -710,7 +710,7 @@ struct ata_link {
86892
86893 struct ata_port {
86894 struct Scsi_Host *scsi_host; /* our co-allocated scsi host */
86895- struct ata_port_operations *ops;
86896+ const struct ata_port_operations *ops;
86897 spinlock_t *lock;
86898 /* Flags owned by the EH context. Only EH should touch these once the
86899 port is active */
86900@@ -884,7 +884,7 @@ struct ata_port_operations {
86901 * fields must be pointers.
86902 */
86903 const struct ata_port_operations *inherits;
86904-};
86905+} __do_const;
86906
86907 struct ata_port_info {
86908 unsigned long flags;
86909@@ -892,7 +892,7 @@ struct ata_port_info {
86910 unsigned long pio_mask;
86911 unsigned long mwdma_mask;
86912 unsigned long udma_mask;
86913- struct ata_port_operations *port_ops;
86914+ const struct ata_port_operations *port_ops;
86915 void *private_data;
86916 };
86917
86918@@ -916,7 +916,7 @@ extern const unsigned long sata_deb_timing_normal[];
86919 extern const unsigned long sata_deb_timing_hotplug[];
86920 extern const unsigned long sata_deb_timing_long[];
86921
86922-extern struct ata_port_operations ata_dummy_port_ops;
86923+extern const struct ata_port_operations ata_dummy_port_ops;
86924 extern const struct ata_port_info ata_dummy_port_info;
86925
86926 static inline const unsigned long *
86927@@ -962,7 +962,7 @@ extern int ata_host_activate(struct ata_host *host, int irq,
86928 struct scsi_host_template *sht);
86929 extern void ata_host_detach(struct ata_host *host);
86930 extern void ata_host_init(struct ata_host *, struct device *,
86931- unsigned long, struct ata_port_operations *);
86932+ unsigned long, const struct ata_port_operations *);
86933 extern int ata_scsi_detect(struct scsi_host_template *sht);
86934 extern int ata_scsi_ioctl(struct scsi_device *dev, int cmd, void __user *arg);
86935 extern int ata_scsi_queuecmd(struct scsi_cmnd *cmd, void (*done)(struct scsi_cmnd *));
86936diff --git a/include/linux/lockd/bind.h b/include/linux/lockd/bind.h
86937index fbc48f8..0886e57 100644
86938--- a/include/linux/lockd/bind.h
86939+++ b/include/linux/lockd/bind.h
86940@@ -23,13 +23,13 @@ struct svc_rqst;
86941 * This is the set of functions for lockd->nfsd communication
86942 */
86943 struct nlmsvc_binding {
86944- __be32 (*fopen)(struct svc_rqst *,
86945+ __be32 (* const fopen)(struct svc_rqst *,
86946 struct nfs_fh *,
86947 struct file **);
86948- void (*fclose)(struct file *);
86949+ void (* const fclose)(struct file *);
86950 };
86951
86952-extern struct nlmsvc_binding * nlmsvc_ops;
86953+extern const struct nlmsvc_binding * nlmsvc_ops;
86954
86955 /*
86956 * Similar to nfs_client_initdata, but without the NFS-specific
86957diff --git a/include/linux/mca.h b/include/linux/mca.h
86958index 3797270..7765ede 100644
86959--- a/include/linux/mca.h
86960+++ b/include/linux/mca.h
86961@@ -80,7 +80,7 @@ struct mca_bus_accessor_functions {
86962 int region);
86963 void * (*mca_transform_memory)(struct mca_device *,
86964 void *memory);
86965-};
86966+} __no_const;
86967
86968 struct mca_bus {
86969 u64 default_dma_mask;
86970diff --git a/include/linux/memory.h b/include/linux/memory.h
86971index 37fa19b..b597c85 100644
86972--- a/include/linux/memory.h
86973+++ b/include/linux/memory.h
86974@@ -108,7 +108,7 @@ struct memory_accessor {
86975 size_t count);
86976 ssize_t (*write)(struct memory_accessor *, const char *buf,
86977 off_t offset, size_t count);
86978-};
86979+} __no_const;
86980
86981 /*
86982 * Kernel text modification mutex, used for code patching. Users of this lock
86983diff --git a/include/linux/mm.h b/include/linux/mm.h
86984index 11e5be6..1ff2423 100644
86985--- a/include/linux/mm.h
86986+++ b/include/linux/mm.h
86987@@ -106,7 +106,14 @@ extern unsigned int kobjsize(const void *objp);
86988
86989 #define VM_CAN_NONLINEAR 0x08000000 /* Has ->fault & does nonlinear pages */
86990 #define VM_MIXEDMAP 0x10000000 /* Can contain "struct page" and pure PFN pages */
86991+
86992+#if defined(CONFIG_PAX_PAGEEXEC) && defined(CONFIG_X86_32)
86993+#define VM_SAO 0x00000000 /* Strong Access Ordering (powerpc) */
86994+#define VM_PAGEEXEC 0x20000000 /* vma->vm_page_prot needs special handling */
86995+#else
86996 #define VM_SAO 0x20000000 /* Strong Access Ordering (powerpc) */
86997+#endif
86998+
86999 #define VM_PFN_AT_MMAP 0x40000000 /* PFNMAP vma that is fully mapped at mmap time */
87000 #define VM_MERGEABLE 0x80000000 /* KSM may merge identical pages */
87001
87002@@ -841,12 +848,6 @@ int set_page_dirty(struct page *page);
87003 int set_page_dirty_lock(struct page *page);
87004 int clear_page_dirty_for_io(struct page *page);
87005
87006-/* Is the vma a continuation of the stack vma above it? */
87007-static inline int vma_stack_continue(struct vm_area_struct *vma, unsigned long addr)
87008-{
87009- return vma && (vma->vm_end == addr) && (vma->vm_flags & VM_GROWSDOWN);
87010-}
87011-
87012 extern unsigned long move_page_tables(struct vm_area_struct *vma,
87013 unsigned long old_addr, struct vm_area_struct *new_vma,
87014 unsigned long new_addr, unsigned long len);
87015@@ -890,6 +891,8 @@ struct shrinker {
87016 extern void register_shrinker(struct shrinker *);
87017 extern void unregister_shrinker(struct shrinker *);
87018
87019+pgprot_t vm_get_page_prot(unsigned long vm_flags);
87020+
87021 int vma_wants_writenotify(struct vm_area_struct *vma);
87022
87023 extern pte_t *get_locked_pte(struct mm_struct *mm, unsigned long addr, spinlock_t **ptl);
87024@@ -1162,6 +1165,7 @@ out:
87025 }
87026
87027 extern int do_munmap(struct mm_struct *, unsigned long, size_t);
87028+extern int __do_munmap(struct mm_struct *, unsigned long, size_t);
87029
87030 extern unsigned long do_brk(unsigned long, unsigned long);
87031
87032@@ -1218,6 +1222,10 @@ extern struct vm_area_struct * find_vma(struct mm_struct * mm, unsigned long add
87033 extern struct vm_area_struct * find_vma_prev(struct mm_struct * mm, unsigned long addr,
87034 struct vm_area_struct **pprev);
87035
87036+extern struct vm_area_struct *pax_find_mirror_vma(struct vm_area_struct *vma);
87037+extern void pax_mirror_vma(struct vm_area_struct *vma_m, struct vm_area_struct *vma);
87038+extern void pax_mirror_file_pte(struct vm_area_struct *vma, unsigned long address, struct page *page_m, spinlock_t *ptl);
87039+
87040 /* Look up the first VMA which intersects the interval start_addr..end_addr-1,
87041 NULL if none. Assume start_addr < end_addr. */
87042 static inline struct vm_area_struct * find_vma_intersection(struct mm_struct * mm, unsigned long start_addr, unsigned long end_addr)
87043@@ -1234,7 +1242,6 @@ static inline unsigned long vma_pages(struct vm_area_struct *vma)
87044 return (vma->vm_end - vma->vm_start) >> PAGE_SHIFT;
87045 }
87046
87047-pgprot_t vm_get_page_prot(unsigned long vm_flags);
87048 struct vm_area_struct *find_extend_vma(struct mm_struct *, unsigned long addr);
87049 int remap_pfn_range(struct vm_area_struct *, unsigned long addr,
87050 unsigned long pfn, unsigned long size, pgprot_t);
87051@@ -1332,7 +1339,13 @@ extern void memory_failure(unsigned long pfn, int trapno);
87052 extern int __memory_failure(unsigned long pfn, int trapno, int ref);
87053 extern int sysctl_memory_failure_early_kill;
87054 extern int sysctl_memory_failure_recovery;
87055-extern atomic_long_t mce_bad_pages;
87056+extern atomic_long_unchecked_t mce_bad_pages;
87057+
87058+#ifdef CONFIG_ARCH_TRACK_EXEC_LIMIT
87059+extern void track_exec_limit(struct mm_struct *mm, unsigned long start, unsigned long end, unsigned long prot);
87060+#else
87061+static inline void track_exec_limit(struct mm_struct *mm, unsigned long start, unsigned long end, unsigned long prot) {}
87062+#endif
87063
87064 #endif /* __KERNEL__ */
87065 #endif /* _LINUX_MM_H */
87066diff --git a/include/linux/mm_types.h b/include/linux/mm_types.h
87067index 9d12ed5..6d9707a 100644
87068--- a/include/linux/mm_types.h
87069+++ b/include/linux/mm_types.h
87070@@ -186,6 +186,8 @@ struct vm_area_struct {
87071 #ifdef CONFIG_NUMA
87072 struct mempolicy *vm_policy; /* NUMA policy for the VMA */
87073 #endif
87074+
87075+ struct vm_area_struct *vm_mirror;/* PaX: mirror vma or NULL */
87076 };
87077
87078 struct core_thread {
87079@@ -287,6 +289,24 @@ struct mm_struct {
87080 #ifdef CONFIG_MMU_NOTIFIER
87081 struct mmu_notifier_mm *mmu_notifier_mm;
87082 #endif
87083+
87084+#if defined(CONFIG_PAX_NOEXEC) || defined(CONFIG_PAX_ASLR)
87085+ unsigned long pax_flags;
87086+#endif
87087+
87088+#ifdef CONFIG_PAX_DLRESOLVE
87089+ unsigned long call_dl_resolve;
87090+#endif
87091+
87092+#if defined(CONFIG_PPC32) && defined(CONFIG_PAX_EMUSIGRT)
87093+ unsigned long call_syscall;
87094+#endif
87095+
87096+#ifdef CONFIG_PAX_ASLR
87097+ unsigned long delta_mmap; /* randomized offset */
87098+ unsigned long delta_stack; /* randomized offset */
87099+#endif
87100+
87101 };
87102
87103 /* Future-safe accessor for struct mm_struct's cpu_vm_mask. */
87104diff --git a/include/linux/mmu_notifier.h b/include/linux/mmu_notifier.h
87105index 4e02ee2..afb159e 100644
87106--- a/include/linux/mmu_notifier.h
87107+++ b/include/linux/mmu_notifier.h
87108@@ -235,12 +235,12 @@ static inline void mmu_notifier_mm_destroy(struct mm_struct *mm)
87109 */
87110 #define ptep_clear_flush_notify(__vma, __address, __ptep) \
87111 ({ \
87112- pte_t __pte; \
87113+ pte_t ___pte; \
87114 struct vm_area_struct *___vma = __vma; \
87115 unsigned long ___address = __address; \
87116- __pte = ptep_clear_flush(___vma, ___address, __ptep); \
87117+ ___pte = ptep_clear_flush(___vma, ___address, __ptep); \
87118 mmu_notifier_invalidate_page(___vma->vm_mm, ___address); \
87119- __pte; \
87120+ ___pte; \
87121 })
87122
87123 #define ptep_clear_flush_young_notify(__vma, __address, __ptep) \
87124diff --git a/include/linux/mmzone.h b/include/linux/mmzone.h
87125index 6c31a2a..4b0e930 100644
87126--- a/include/linux/mmzone.h
87127+++ b/include/linux/mmzone.h
87128@@ -350,7 +350,7 @@ struct zone {
87129 unsigned long flags; /* zone flags, see below */
87130
87131 /* Zone statistics */
87132- atomic_long_t vm_stat[NR_VM_ZONE_STAT_ITEMS];
87133+ atomic_long_unchecked_t vm_stat[NR_VM_ZONE_STAT_ITEMS];
87134
87135 /*
87136 * prev_priority holds the scanning priority for this zone. It is
87137diff --git a/include/linux/mod_devicetable.h b/include/linux/mod_devicetable.h
87138index f58e9d8..3503935 100644
87139--- a/include/linux/mod_devicetable.h
87140+++ b/include/linux/mod_devicetable.h
87141@@ -12,7 +12,7 @@
87142 typedef unsigned long kernel_ulong_t;
87143 #endif
87144
87145-#define PCI_ANY_ID (~0)
87146+#define PCI_ANY_ID ((__u16)~0)
87147
87148 struct pci_device_id {
87149 __u32 vendor, device; /* Vendor and device ID or PCI_ANY_ID*/
87150@@ -131,7 +131,7 @@ struct usb_device_id {
87151 #define USB_DEVICE_ID_MATCH_INT_SUBCLASS 0x0100
87152 #define USB_DEVICE_ID_MATCH_INT_PROTOCOL 0x0200
87153
87154-#define HID_ANY_ID (~0)
87155+#define HID_ANY_ID (~0U)
87156
87157 struct hid_device_id {
87158 __u16 bus;
87159diff --git a/include/linux/module.h b/include/linux/module.h
87160index 482efc8..642032b 100644
87161--- a/include/linux/module.h
87162+++ b/include/linux/module.h
87163@@ -16,6 +16,7 @@
87164 #include <linux/kobject.h>
87165 #include <linux/moduleparam.h>
87166 #include <linux/tracepoint.h>
87167+#include <linux/fs.h>
87168
87169 #include <asm/local.h>
87170 #include <asm/module.h>
87171@@ -287,16 +288,16 @@ struct module
87172 int (*init)(void);
87173
87174 /* If this is non-NULL, vfree after init() returns */
87175- void *module_init;
87176+ void *module_init_rx, *module_init_rw;
87177
87178 /* Here is the actual code + data, vfree'd on unload. */
87179- void *module_core;
87180+ void *module_core_rx, *module_core_rw;
87181
87182 /* Here are the sizes of the init and core sections */
87183- unsigned int init_size, core_size;
87184+ unsigned int init_size_rw, core_size_rw;
87185
87186 /* The size of the executable code in each section. */
87187- unsigned int init_text_size, core_text_size;
87188+ unsigned int init_size_rx, core_size_rx;
87189
87190 /* Arch-specific module values */
87191 struct mod_arch_specific arch;
87192@@ -345,6 +346,10 @@ struct module
87193 #ifdef CONFIG_EVENT_TRACING
87194 struct ftrace_event_call *trace_events;
87195 unsigned int num_trace_events;
87196+ struct file_operations trace_id;
87197+ struct file_operations trace_enable;
87198+ struct file_operations trace_format;
87199+ struct file_operations trace_filter;
87200 #endif
87201 #ifdef CONFIG_FTRACE_MCOUNT_RECORD
87202 unsigned long *ftrace_callsites;
87203@@ -393,16 +398,46 @@ struct module *__module_address(unsigned long addr);
87204 bool is_module_address(unsigned long addr);
87205 bool is_module_text_address(unsigned long addr);
87206
87207+static inline int within_module_range(unsigned long addr, void *start, unsigned long size)
87208+{
87209+
87210+#ifdef CONFIG_PAX_KERNEXEC
87211+ if (ktla_ktva(addr) >= (unsigned long)start &&
87212+ ktla_ktva(addr) < (unsigned long)start + size)
87213+ return 1;
87214+#endif
87215+
87216+ return ((void *)addr >= start && (void *)addr < start + size);
87217+}
87218+
87219+static inline int within_module_core_rx(unsigned long addr, struct module *mod)
87220+{
87221+ return within_module_range(addr, mod->module_core_rx, mod->core_size_rx);
87222+}
87223+
87224+static inline int within_module_core_rw(unsigned long addr, struct module *mod)
87225+{
87226+ return within_module_range(addr, mod->module_core_rw, mod->core_size_rw);
87227+}
87228+
87229+static inline int within_module_init_rx(unsigned long addr, struct module *mod)
87230+{
87231+ return within_module_range(addr, mod->module_init_rx, mod->init_size_rx);
87232+}
87233+
87234+static inline int within_module_init_rw(unsigned long addr, struct module *mod)
87235+{
87236+ return within_module_range(addr, mod->module_init_rw, mod->init_size_rw);
87237+}
87238+
87239 static inline int within_module_core(unsigned long addr, struct module *mod)
87240 {
87241- return (unsigned long)mod->module_core <= addr &&
87242- addr < (unsigned long)mod->module_core + mod->core_size;
87243+ return within_module_core_rx(addr, mod) || within_module_core_rw(addr, mod);
87244 }
87245
87246 static inline int within_module_init(unsigned long addr, struct module *mod)
87247 {
87248- return (unsigned long)mod->module_init <= addr &&
87249- addr < (unsigned long)mod->module_init + mod->init_size;
87250+ return within_module_init_rx(addr, mod) || within_module_init_rw(addr, mod);
87251 }
87252
87253 /* Search for module by name: must hold module_mutex. */
87254diff --git a/include/linux/moduleloader.h b/include/linux/moduleloader.h
87255index c1f40c2..e875ff4 100644
87256--- a/include/linux/moduleloader.h
87257+++ b/include/linux/moduleloader.h
87258@@ -18,11 +18,23 @@ unsigned int arch_mod_section_prepend(struct module *mod, unsigned int section);
87259
87260 /* Allocator used for allocating struct module, core sections and init
87261 sections. Returns NULL on failure. */
87262-void *module_alloc(unsigned long size);
87263+void *module_alloc(unsigned long size) __size_overflow(1);
87264+
87265+#ifdef CONFIG_PAX_KERNEXEC
87266+void *module_alloc_exec(unsigned long size);
87267+#else
87268+#define module_alloc_exec(x) module_alloc(x)
87269+#endif
87270
87271 /* Free memory returned from module_alloc. */
87272 void module_free(struct module *mod, void *module_region);
87273
87274+#ifdef CONFIG_PAX_KERNEXEC
87275+void module_free_exec(struct module *mod, void *module_region);
87276+#else
87277+#define module_free_exec(x, y) module_free((x), (y))
87278+#endif
87279+
87280 /* Apply the given relocation to the (simplified) ELF. Return -error
87281 or 0. */
87282 int apply_relocate(Elf_Shdr *sechdrs,
87283diff --git a/include/linux/moduleparam.h b/include/linux/moduleparam.h
87284index 82a9124..8a5f622 100644
87285--- a/include/linux/moduleparam.h
87286+++ b/include/linux/moduleparam.h
87287@@ -132,7 +132,7 @@ struct kparam_array
87288
87289 /* Actually copy string: maxlen param is usually sizeof(string). */
87290 #define module_param_string(name, string, len, perm) \
87291- static const struct kparam_string __param_string_##name \
87292+ static const struct kparam_string __param_string_##name __used \
87293 = { len, string }; \
87294 __module_param_call(MODULE_PARAM_PREFIX, name, \
87295 param_set_copystring, param_get_string, \
87296@@ -211,7 +211,7 @@ extern int param_get_invbool(char *buffer, struct kernel_param *kp);
87297
87298 /* Comma-separated array: *nump is set to number they actually specified. */
87299 #define module_param_array_named(name, array, type, nump, perm) \
87300- static const struct kparam_array __param_arr_##name \
87301+ static const struct kparam_array __param_arr_##name __used \
87302 = { ARRAY_SIZE(array), nump, param_set_##type, param_get_##type,\
87303 sizeof(array[0]), array }; \
87304 __module_param_call(MODULE_PARAM_PREFIX, name, \
87305diff --git a/include/linux/mutex.h b/include/linux/mutex.h
87306index 878cab4..c92cb3e 100644
87307--- a/include/linux/mutex.h
87308+++ b/include/linux/mutex.h
87309@@ -51,7 +51,7 @@ struct mutex {
87310 spinlock_t wait_lock;
87311 struct list_head wait_list;
87312 #if defined(CONFIG_DEBUG_MUTEXES) || defined(CONFIG_SMP)
87313- struct thread_info *owner;
87314+ struct task_struct *owner;
87315 #endif
87316 #ifdef CONFIG_DEBUG_MUTEXES
87317 const char *name;
87318diff --git a/include/linux/namei.h b/include/linux/namei.h
87319index ec0f607..d19e675 100644
87320--- a/include/linux/namei.h
87321+++ b/include/linux/namei.h
87322@@ -22,7 +22,7 @@ struct nameidata {
87323 unsigned int flags;
87324 int last_type;
87325 unsigned depth;
87326- char *saved_names[MAX_NESTED_LINKS + 1];
87327+ const char *saved_names[MAX_NESTED_LINKS + 1];
87328
87329 /* Intent data */
87330 union {
87331@@ -84,12 +84,12 @@ extern int follow_up(struct path *);
87332 extern struct dentry *lock_rename(struct dentry *, struct dentry *);
87333 extern void unlock_rename(struct dentry *, struct dentry *);
87334
87335-static inline void nd_set_link(struct nameidata *nd, char *path)
87336+static inline void nd_set_link(struct nameidata *nd, const char *path)
87337 {
87338 nd->saved_names[nd->depth] = path;
87339 }
87340
87341-static inline char *nd_get_link(struct nameidata *nd)
87342+static inline const char *nd_get_link(const struct nameidata *nd)
87343 {
87344 return nd->saved_names[nd->depth];
87345 }
87346diff --git a/include/linux/netdevice.h b/include/linux/netdevice.h
87347index 9d7e8f7..04428c5 100644
87348--- a/include/linux/netdevice.h
87349+++ b/include/linux/netdevice.h
87350@@ -637,6 +637,7 @@ struct net_device_ops {
87351 u16 xid);
87352 #endif
87353 };
87354+typedef struct net_device_ops __no_const net_device_ops_no_const;
87355
87356 /*
87357 * The DEVICE structure.
87358diff --git a/include/linux/netfilter/xt_gradm.h b/include/linux/netfilter/xt_gradm.h
87359new file mode 100644
87360index 0000000..33f4af8
87361--- /dev/null
87362+++ b/include/linux/netfilter/xt_gradm.h
87363@@ -0,0 +1,9 @@
87364+#ifndef _LINUX_NETFILTER_XT_GRADM_H
87365+#define _LINUX_NETFILTER_XT_GRADM_H 1
87366+
87367+struct xt_gradm_mtinfo {
87368+ __u16 flags;
87369+ __u16 invflags;
87370+};
87371+
87372+#endif
87373diff --git a/include/linux/nodemask.h b/include/linux/nodemask.h
87374index b359c4a..c08b334 100644
87375--- a/include/linux/nodemask.h
87376+++ b/include/linux/nodemask.h
87377@@ -464,11 +464,11 @@ static inline int num_node_state(enum node_states state)
87378
87379 #define any_online_node(mask) \
87380 ({ \
87381- int node; \
87382- for_each_node_mask(node, (mask)) \
87383- if (node_online(node)) \
87384+ int __node; \
87385+ for_each_node_mask(__node, (mask)) \
87386+ if (node_online(__node)) \
87387 break; \
87388- node; \
87389+ __node; \
87390 })
87391
87392 #define num_online_nodes() num_node_state(N_ONLINE)
87393diff --git a/include/linux/oprofile.h b/include/linux/oprofile.h
87394index 5171639..81f30d3 100644
87395--- a/include/linux/oprofile.h
87396+++ b/include/linux/oprofile.h
87397@@ -129,9 +129,9 @@ int oprofilefs_create_ulong(struct super_block * sb, struct dentry * root,
87398 int oprofilefs_create_ro_ulong(struct super_block * sb, struct dentry * root,
87399 char const * name, ulong * val);
87400
87401-/** Create a file for read-only access to an atomic_t. */
87402+/** Create a file for read-only access to an atomic_unchecked_t. */
87403 int oprofilefs_create_ro_atomic(struct super_block * sb, struct dentry * root,
87404- char const * name, atomic_t * val);
87405+ char const * name, atomic_unchecked_t * val);
87406
87407 /** create a directory */
87408 struct dentry * oprofilefs_mkdir(struct super_block * sb, struct dentry * root,
87409@@ -153,7 +153,7 @@ ssize_t oprofilefs_ulong_to_user(unsigned long val, char __user * buf, size_t co
87410 * Read an ASCII string for a number from a userspace buffer and fill *val on success.
87411 * Returns 0 on success, < 0 on error.
87412 */
87413-int oprofilefs_ulong_from_user(unsigned long * val, char const __user * buf, size_t count);
87414+int oprofilefs_ulong_from_user(unsigned long * val, char const __user * buf, size_t count) __size_overflow(3);
87415
87416 /** lock for read/write safety */
87417 extern spinlock_t oprofilefs_lock;
87418diff --git a/include/linux/pagemap.h b/include/linux/pagemap.h
87419index 3c62ed4..8924c7c 100644
87420--- a/include/linux/pagemap.h
87421+++ b/include/linux/pagemap.h
87422@@ -425,7 +425,9 @@ static inline int fault_in_pages_readable(const char __user *uaddr, int size)
87423 if (((unsigned long)uaddr & PAGE_MASK) !=
87424 ((unsigned long)end & PAGE_MASK))
87425 ret = __get_user(c, end);
87426+ (void)c;
87427 }
87428+ (void)c;
87429 return ret;
87430 }
87431
87432diff --git a/include/linux/perf_event.h b/include/linux/perf_event.h
87433index 81c9689..a567a55 100644
87434--- a/include/linux/perf_event.h
87435+++ b/include/linux/perf_event.h
87436@@ -476,7 +476,7 @@ struct hw_perf_event {
87437 struct hrtimer hrtimer;
87438 };
87439 };
87440- atomic64_t prev_count;
87441+ atomic64_unchecked_t prev_count;
87442 u64 sample_period;
87443 u64 last_period;
87444 atomic64_t period_left;
87445@@ -557,7 +557,7 @@ struct perf_event {
87446 const struct pmu *pmu;
87447
87448 enum perf_event_active_state state;
87449- atomic64_t count;
87450+ atomic64_unchecked_t count;
87451
87452 /*
87453 * These are the total time in nanoseconds that the event
87454@@ -595,8 +595,8 @@ struct perf_event {
87455 * These accumulate total time (in nanoseconds) that children
87456 * events have been enabled and running, respectively.
87457 */
87458- atomic64_t child_total_time_enabled;
87459- atomic64_t child_total_time_running;
87460+ atomic64_unchecked_t child_total_time_enabled;
87461+ atomic64_unchecked_t child_total_time_running;
87462
87463 /*
87464 * Protect attach/detach and child_list:
87465diff --git a/include/linux/personality.h b/include/linux/personality.h
87466index 1261208..ddef96f 100644
87467--- a/include/linux/personality.h
87468+++ b/include/linux/personality.h
87469@@ -43,6 +43,7 @@ enum {
87470 #define PER_CLEAR_ON_SETID (READ_IMPLIES_EXEC | \
87471 ADDR_NO_RANDOMIZE | \
87472 ADDR_COMPAT_LAYOUT | \
87473+ ADDR_LIMIT_3GB | \
87474 MMAP_PAGE_ZERO)
87475
87476 /*
87477diff --git a/include/linux/pipe_fs_i.h b/include/linux/pipe_fs_i.h
87478index b43a9e0..b77d869 100644
87479--- a/include/linux/pipe_fs_i.h
87480+++ b/include/linux/pipe_fs_i.h
87481@@ -46,9 +46,9 @@ struct pipe_inode_info {
87482 wait_queue_head_t wait;
87483 unsigned int nrbufs, curbuf;
87484 struct page *tmp_page;
87485- unsigned int readers;
87486- unsigned int writers;
87487- unsigned int waiting_writers;
87488+ atomic_t readers;
87489+ atomic_t writers;
87490+ atomic_t waiting_writers;
87491 unsigned int r_counter;
87492 unsigned int w_counter;
87493 struct fasync_struct *fasync_readers;
87494diff --git a/include/linux/poison.h b/include/linux/poison.h
87495index 34066ff..e95d744 100644
87496--- a/include/linux/poison.h
87497+++ b/include/linux/poison.h
87498@@ -19,8 +19,8 @@
87499 * under normal circumstances, used to verify that nobody uses
87500 * non-initialized list entries.
87501 */
87502-#define LIST_POISON1 ((void *) 0x00100100 + POISON_POINTER_DELTA)
87503-#define LIST_POISON2 ((void *) 0x00200200 + POISON_POINTER_DELTA)
87504+#define LIST_POISON1 ((void *) (long)0xFFFFFF01)
87505+#define LIST_POISON2 ((void *) (long)0xFFFFFF02)
87506
87507 /********** include/linux/timer.h **********/
87508 /*
87509diff --git a/include/linux/posix-timers.h b/include/linux/posix-timers.h
87510index 4f71bf4..cd2f68e 100644
87511--- a/include/linux/posix-timers.h
87512+++ b/include/linux/posix-timers.h
87513@@ -82,7 +82,8 @@ struct k_clock {
87514 #define TIMER_RETRY 1
87515 void (*timer_get) (struct k_itimer * timr,
87516 struct itimerspec * cur_setting);
87517-};
87518+} __do_const;
87519+typedef struct k_clock __no_const k_clock_no_const;
87520
87521 void register_posix_clock(const clockid_t clock_id, struct k_clock *new_clock);
87522
87523diff --git a/include/linux/preempt.h b/include/linux/preempt.h
87524index 72b1a10..13303a9 100644
87525--- a/include/linux/preempt.h
87526+++ b/include/linux/preempt.h
87527@@ -110,7 +110,7 @@ struct preempt_ops {
87528 void (*sched_in)(struct preempt_notifier *notifier, int cpu);
87529 void (*sched_out)(struct preempt_notifier *notifier,
87530 struct task_struct *next);
87531-};
87532+} __no_const;
87533
87534 /**
87535 * preempt_notifier - key for installing preemption notifiers
87536diff --git a/include/linux/prefetch.h b/include/linux/prefetch.h
87537index af7c36a..a93005c 100644
87538--- a/include/linux/prefetch.h
87539+++ b/include/linux/prefetch.h
87540@@ -11,6 +11,7 @@
87541 #define _LINUX_PREFETCH_H
87542
87543 #include <linux/types.h>
87544+#include <linux/const.h>
87545 #include <asm/processor.h>
87546 #include <asm/cache.h>
87547
87548diff --git a/include/linux/proc_fs.h b/include/linux/proc_fs.h
87549index 379eaed..1bf73e3 100644
87550--- a/include/linux/proc_fs.h
87551+++ b/include/linux/proc_fs.h
87552@@ -155,6 +155,19 @@ static inline struct proc_dir_entry *proc_create(const char *name, mode_t mode,
87553 return proc_create_data(name, mode, parent, proc_fops, NULL);
87554 }
87555
87556+static inline struct proc_dir_entry *proc_create_grsec(const char *name, mode_t mode,
87557+ struct proc_dir_entry *parent, const struct file_operations *proc_fops)
87558+{
87559+#ifdef CONFIG_GRKERNSEC_PROC_USER
87560+ return proc_create_data(name, S_IRUSR, parent, proc_fops, NULL);
87561+#elif defined(CONFIG_GRKERNSEC_PROC_USERGROUP)
87562+ return proc_create_data(name, S_IRUSR | S_IRGRP, parent, proc_fops, NULL);
87563+#else
87564+ return proc_create_data(name, mode, parent, proc_fops, NULL);
87565+#endif
87566+}
87567+
87568+
87569 static inline struct proc_dir_entry *create_proc_read_entry(const char *name,
87570 mode_t mode, struct proc_dir_entry *base,
87571 read_proc_t *read_proc, void * data)
87572@@ -256,7 +269,7 @@ union proc_op {
87573 int (*proc_show)(struct seq_file *m,
87574 struct pid_namespace *ns, struct pid *pid,
87575 struct task_struct *task);
87576-};
87577+} __no_const;
87578
87579 struct ctl_table_header;
87580 struct ctl_table;
87581diff --git a/include/linux/ptrace.h b/include/linux/ptrace.h
87582index 7456d7d..6c1cfc9 100644
87583--- a/include/linux/ptrace.h
87584+++ b/include/linux/ptrace.h
87585@@ -96,10 +96,10 @@ extern void __ptrace_unlink(struct task_struct *child);
87586 extern void exit_ptrace(struct task_struct *tracer);
87587 #define PTRACE_MODE_READ 1
87588 #define PTRACE_MODE_ATTACH 2
87589-/* Returns 0 on success, -errno on denial. */
87590-extern int __ptrace_may_access(struct task_struct *task, unsigned int mode);
87591 /* Returns true on success, false on denial. */
87592 extern bool ptrace_may_access(struct task_struct *task, unsigned int mode);
87593+/* Returns true on success, false on denial. */
87594+extern bool ptrace_may_access_log(struct task_struct *task, unsigned int mode);
87595
87596 static inline int ptrace_reparented(struct task_struct *child)
87597 {
87598diff --git a/include/linux/random.h b/include/linux/random.h
87599index 2948046..3262567 100644
87600--- a/include/linux/random.h
87601+++ b/include/linux/random.h
87602@@ -63,6 +63,11 @@ unsigned long randomize_range(unsigned long start, unsigned long end, unsigned l
87603 u32 random32(void);
87604 void srandom32(u32 seed);
87605
87606+static inline unsigned long pax_get_random_long(void)
87607+{
87608+ return random32() + (sizeof(long) > 4 ? (unsigned long)random32() << 32 : 0);
87609+}
87610+
87611 #endif /* __KERNEL___ */
87612
87613 #endif /* _LINUX_RANDOM_H */
87614diff --git a/include/linux/reboot.h b/include/linux/reboot.h
87615index 988e55f..17cb4ef 100644
87616--- a/include/linux/reboot.h
87617+++ b/include/linux/reboot.h
87618@@ -47,9 +47,9 @@ extern int unregister_reboot_notifier(struct notifier_block *);
87619 * Architecture-specific implementations of sys_reboot commands.
87620 */
87621
87622-extern void machine_restart(char *cmd);
87623-extern void machine_halt(void);
87624-extern void machine_power_off(void);
87625+extern void machine_restart(char *cmd) __noreturn;
87626+extern void machine_halt(void) __noreturn;
87627+extern void machine_power_off(void) __noreturn;
87628
87629 extern void machine_shutdown(void);
87630 struct pt_regs;
87631@@ -60,9 +60,9 @@ extern void machine_crash_shutdown(struct pt_regs *);
87632 */
87633
87634 extern void kernel_restart_prepare(char *cmd);
87635-extern void kernel_restart(char *cmd);
87636-extern void kernel_halt(void);
87637-extern void kernel_power_off(void);
87638+extern void kernel_restart(char *cmd) __noreturn;
87639+extern void kernel_halt(void) __noreturn;
87640+extern void kernel_power_off(void) __noreturn;
87641
87642 void ctrl_alt_del(void);
87643
87644@@ -75,7 +75,7 @@ extern int orderly_poweroff(bool force);
87645 * Emergency restart, callable from an interrupt handler.
87646 */
87647
87648-extern void emergency_restart(void);
87649+extern void emergency_restart(void) __noreturn;
87650 #include <asm/emergency-restart.h>
87651
87652 #endif
87653diff --git a/include/linux/reiserfs_fs.h b/include/linux/reiserfs_fs.h
87654index dd31e7b..5b03c5c 100644
87655--- a/include/linux/reiserfs_fs.h
87656+++ b/include/linux/reiserfs_fs.h
87657@@ -1326,7 +1326,7 @@ static inline loff_t max_reiserfs_offset(struct inode *inode)
87658 #define REISERFS_USER_MEM 1 /* reiserfs user memory mode */
87659
87660 #define fs_generation(s) (REISERFS_SB(s)->s_generation_counter)
87661-#define get_generation(s) atomic_read (&fs_generation(s))
87662+#define get_generation(s) atomic_read_unchecked (&fs_generation(s))
87663 #define FILESYSTEM_CHANGED_TB(tb) (get_generation((tb)->tb_sb) != (tb)->fs_gen)
87664 #define __fs_changed(gen,s) (gen != get_generation (s))
87665 #define fs_changed(gen,s) ({cond_resched(); __fs_changed(gen, s);})
87666@@ -1534,24 +1534,24 @@ static inline struct super_block *sb_from_bi(struct buffer_info *bi)
87667 */
87668
87669 struct item_operations {
87670- int (*bytes_number) (struct item_head * ih, int block_size);
87671- void (*decrement_key) (struct cpu_key *);
87672- int (*is_left_mergeable) (struct reiserfs_key * ih,
87673+ int (* const bytes_number) (struct item_head * ih, int block_size);
87674+ void (* const decrement_key) (struct cpu_key *);
87675+ int (* const is_left_mergeable) (struct reiserfs_key * ih,
87676 unsigned long bsize);
87677- void (*print_item) (struct item_head *, char *item);
87678- void (*check_item) (struct item_head *, char *item);
87679+ void (* const print_item) (struct item_head *, char *item);
87680+ void (* const check_item) (struct item_head *, char *item);
87681
87682- int (*create_vi) (struct virtual_node * vn, struct virtual_item * vi,
87683+ int (* const create_vi) (struct virtual_node * vn, struct virtual_item * vi,
87684 int is_affected, int insert_size);
87685- int (*check_left) (struct virtual_item * vi, int free,
87686+ int (* const check_left) (struct virtual_item * vi, int free,
87687 int start_skip, int end_skip);
87688- int (*check_right) (struct virtual_item * vi, int free);
87689- int (*part_size) (struct virtual_item * vi, int from, int to);
87690- int (*unit_num) (struct virtual_item * vi);
87691- void (*print_vi) (struct virtual_item * vi);
87692+ int (* const check_right) (struct virtual_item * vi, int free);
87693+ int (* const part_size) (struct virtual_item * vi, int from, int to);
87694+ int (* const unit_num) (struct virtual_item * vi);
87695+ void (* const print_vi) (struct virtual_item * vi);
87696 };
87697
87698-extern struct item_operations *item_ops[TYPE_ANY + 1];
87699+extern const struct item_operations * const item_ops[TYPE_ANY + 1];
87700
87701 #define op_bytes_number(ih,bsize) item_ops[le_ih_k_type (ih)]->bytes_number (ih, bsize)
87702 #define op_is_left_mergeable(key,bsize) item_ops[le_key_k_type (le_key_version (key), key)]->is_left_mergeable (key, bsize)
87703diff --git a/include/linux/reiserfs_fs_sb.h b/include/linux/reiserfs_fs_sb.h
87704index dab68bb..0688727 100644
87705--- a/include/linux/reiserfs_fs_sb.h
87706+++ b/include/linux/reiserfs_fs_sb.h
87707@@ -377,7 +377,7 @@ struct reiserfs_sb_info {
87708 /* Comment? -Hans */
87709 wait_queue_head_t s_wait;
87710 /* To be obsoleted soon by per buffer seals.. -Hans */
87711- atomic_t s_generation_counter; // increased by one every time the
87712+ atomic_unchecked_t s_generation_counter; // increased by one every time the
87713 // tree gets re-balanced
87714 unsigned long s_properties; /* File system properties. Currently holds
87715 on-disk FS format */
87716diff --git a/include/linux/relay.h b/include/linux/relay.h
87717index 14a86bc..17d0700 100644
87718--- a/include/linux/relay.h
87719+++ b/include/linux/relay.h
87720@@ -159,7 +159,7 @@ struct rchan_callbacks
87721 * The callback should return 0 if successful, negative if not.
87722 */
87723 int (*remove_buf_file)(struct dentry *dentry);
87724-};
87725+} __no_const;
87726
87727 /*
87728 * CONFIG_RELAY kernel API, kernel/relay.c
87729diff --git a/include/linux/rfkill.h b/include/linux/rfkill.h
87730index 3392c59..a746428 100644
87731--- a/include/linux/rfkill.h
87732+++ b/include/linux/rfkill.h
87733@@ -144,6 +144,7 @@ struct rfkill_ops {
87734 void (*query)(struct rfkill *rfkill, void *data);
87735 int (*set_block)(void *data, bool blocked);
87736 };
87737+typedef struct rfkill_ops __no_const rfkill_ops_no_const;
87738
87739 #if defined(CONFIG_RFKILL) || defined(CONFIG_RFKILL_MODULE)
87740 /**
87741diff --git a/include/linux/sched.h b/include/linux/sched.h
87742index 71849bf..8cf9dd2 100644
87743--- a/include/linux/sched.h
87744+++ b/include/linux/sched.h
87745@@ -101,6 +101,7 @@ struct bio;
87746 struct fs_struct;
87747 struct bts_context;
87748 struct perf_event_context;
87749+struct linux_binprm;
87750
87751 /*
87752 * List of flags we want to share for kernel threads,
87753@@ -350,7 +351,7 @@ extern signed long schedule_timeout_killable(signed long timeout);
87754 extern signed long schedule_timeout_uninterruptible(signed long timeout);
87755 asmlinkage void __schedule(void);
87756 asmlinkage void schedule(void);
87757-extern int mutex_spin_on_owner(struct mutex *lock, struct thread_info *owner);
87758+extern int mutex_spin_on_owner(struct mutex *lock, struct task_struct *owner);
87759
87760 struct nsproxy;
87761 struct user_namespace;
87762@@ -371,9 +372,12 @@ struct user_namespace;
87763 #define DEFAULT_MAX_MAP_COUNT (USHORT_MAX - MAPCOUNT_ELF_CORE_MARGIN)
87764
87765 extern int sysctl_max_map_count;
87766+extern unsigned long sysctl_heap_stack_gap;
87767
87768 #include <linux/aio.h>
87769
87770+extern bool check_heap_stack_gap(const struct vm_area_struct *vma, unsigned long addr, unsigned long len);
87771+extern unsigned long skip_heap_stack_gap(const struct vm_area_struct *vma, unsigned long len);
87772 extern unsigned long
87773 arch_get_unmapped_area(struct file *, unsigned long, unsigned long,
87774 unsigned long, unsigned long);
87775@@ -666,6 +670,16 @@ struct signal_struct {
87776 struct tty_audit_buf *tty_audit_buf;
87777 #endif
87778
87779+#ifdef CONFIG_GRKERNSEC
87780+ u32 curr_ip;
87781+ u32 saved_ip;
87782+ u32 gr_saddr;
87783+ u32 gr_daddr;
87784+ u16 gr_sport;
87785+ u16 gr_dport;
87786+ u8 used_accept:1;
87787+#endif
87788+
87789 int oom_adj; /* OOM kill score adjustment (bit shift) */
87790 };
87791
87792@@ -723,6 +737,11 @@ struct user_struct {
87793 struct key *session_keyring; /* UID's default session keyring */
87794 #endif
87795
87796+#if defined(CONFIG_GRKERNSEC_KERN_LOCKOUT) || defined(CONFIG_GRKERNSEC_BRUTE)
87797+ unsigned int banned;
87798+ unsigned long ban_expires;
87799+#endif
87800+
87801 /* Hash table maintenance information */
87802 struct hlist_node uidhash_node;
87803 uid_t uid;
87804@@ -1328,8 +1347,8 @@ struct task_struct {
87805 struct list_head thread_group;
87806
87807 struct completion *vfork_done; /* for vfork() */
87808- int __user *set_child_tid; /* CLONE_CHILD_SETTID */
87809- int __user *clear_child_tid; /* CLONE_CHILD_CLEARTID */
87810+ pid_t __user *set_child_tid; /* CLONE_CHILD_SETTID */
87811+ pid_t __user *clear_child_tid; /* CLONE_CHILD_CLEARTID */
87812
87813 cputime_t utime, stime, utimescaled, stimescaled;
87814 cputime_t gtime;
87815@@ -1343,16 +1362,6 @@ struct task_struct {
87816 struct task_cputime cputime_expires;
87817 struct list_head cpu_timers[3];
87818
87819-/* process credentials */
87820- const struct cred *real_cred; /* objective and real subjective task
87821- * credentials (COW) */
87822- const struct cred *cred; /* effective (overridable) subjective task
87823- * credentials (COW) */
87824- struct mutex cred_guard_mutex; /* guard against foreign influences on
87825- * credential calculations
87826- * (notably. ptrace) */
87827- struct cred *replacement_session_keyring; /* for KEYCTL_SESSION_TO_PARENT */
87828-
87829 char comm[TASK_COMM_LEN]; /* executable name excluding path
87830 - access with [gs]et_task_comm (which lock
87831 it with task_lock())
87832@@ -1369,6 +1378,10 @@ struct task_struct {
87833 #endif
87834 /* CPU-specific state of this task */
87835 struct thread_struct thread;
87836+/* thread_info moved to task_struct */
87837+#ifdef CONFIG_X86
87838+ struct thread_info tinfo;
87839+#endif
87840 /* filesystem information */
87841 struct fs_struct *fs;
87842 /* open file information */
87843@@ -1436,6 +1449,15 @@ struct task_struct {
87844 int hardirq_context;
87845 int softirq_context;
87846 #endif
87847+
87848+/* process credentials */
87849+ const struct cred *real_cred; /* objective and real subjective task
87850+ * credentials (COW) */
87851+ struct mutex cred_guard_mutex; /* guard against foreign influences on
87852+ * credential calculations
87853+ * (notably. ptrace) */
87854+ struct cred *replacement_session_keyring; /* for KEYCTL_SESSION_TO_PARENT */
87855+
87856 #ifdef CONFIG_LOCKDEP
87857 # define MAX_LOCK_DEPTH 48UL
87858 u64 curr_chain_key;
87859@@ -1456,6 +1478,9 @@ struct task_struct {
87860
87861 struct backing_dev_info *backing_dev_info;
87862
87863+ const struct cred *cred; /* effective (overridable) subjective task
87864+ * credentials (COW) */
87865+
87866 struct io_context *io_context;
87867
87868 unsigned long ptrace_message;
87869@@ -1519,6 +1544,27 @@ struct task_struct {
87870 unsigned long default_timer_slack_ns;
87871
87872 struct list_head *scm_work_list;
87873+
87874+#ifdef CONFIG_GRKERNSEC
87875+ /* grsecurity */
87876+#ifdef CONFIG_GRKERNSEC_PROC_MEMMAP
87877+ u64 exec_id;
87878+#endif
87879+#ifdef CONFIG_GRKERNSEC_SETXID
87880+ const struct cred *delayed_cred;
87881+#endif
87882+ struct dentry *gr_chroot_dentry;
87883+ struct acl_subject_label *acl;
87884+ struct acl_role_label *role;
87885+ struct file *exec_file;
87886+ u16 acl_role_id;
87887+ /* is this the task that authenticated to the special role */
87888+ u8 acl_sp_role;
87889+ u8 is_writable;
87890+ u8 brute;
87891+ u8 gr_is_chrooted;
87892+#endif
87893+
87894 #ifdef CONFIG_FUNCTION_GRAPH_TRACER
87895 /* Index of current stored adress in ret_stack */
87896 int curr_ret_stack;
87897@@ -1542,6 +1588,57 @@ struct task_struct {
87898 #endif /* CONFIG_TRACING */
87899 };
87900
87901+#define MF_PAX_PAGEEXEC 0x01000000 /* Paging based non-executable pages */
87902+#define MF_PAX_EMUTRAMP 0x02000000 /* Emulate trampolines */
87903+#define MF_PAX_MPROTECT 0x04000000 /* Restrict mprotect() */
87904+#define MF_PAX_RANDMMAP 0x08000000 /* Randomize mmap() base */
87905+/*#define MF_PAX_RANDEXEC 0x10000000*/ /* Randomize ET_EXEC base */
87906+#define MF_PAX_SEGMEXEC 0x20000000 /* Segmentation based non-executable pages */
87907+
87908+#ifdef CONFIG_PAX_SOFTMODE
87909+extern int pax_softmode;
87910+#endif
87911+
87912+extern int pax_check_flags(unsigned long *);
87913+
87914+/* if tsk != current then task_lock must be held on it */
87915+#if defined(CONFIG_PAX_NOEXEC) || defined(CONFIG_PAX_ASLR)
87916+static inline unsigned long pax_get_flags(struct task_struct *tsk)
87917+{
87918+ if (likely(tsk->mm))
87919+ return tsk->mm->pax_flags;
87920+ else
87921+ return 0UL;
87922+}
87923+
87924+/* if tsk != current then task_lock must be held on it */
87925+static inline long pax_set_flags(struct task_struct *tsk, unsigned long flags)
87926+{
87927+ if (likely(tsk->mm)) {
87928+ tsk->mm->pax_flags = flags;
87929+ return 0;
87930+ }
87931+ return -EINVAL;
87932+}
87933+#endif
87934+
87935+#ifdef CONFIG_PAX_HAVE_ACL_FLAGS
87936+extern void pax_set_initial_flags(struct linux_binprm *bprm);
87937+#elif defined(CONFIG_PAX_HOOK_ACL_FLAGS)
87938+extern void (*pax_set_initial_flags_func)(struct linux_binprm *bprm);
87939+#endif
87940+
87941+extern void pax_report_fault(struct pt_regs *regs, void *pc, void *sp);
87942+extern void pax_report_insns(struct pt_regs *regs, void *pc, void *sp);
87943+extern void pax_report_refcount_overflow(struct pt_regs *regs);
87944+extern __noreturn void pax_report_usercopy(const void *ptr, unsigned long len, bool to, const char *type);
87945+
87946+#ifdef CONFIG_PAX_MEMORY_STACKLEAK
87947+extern void pax_track_stack(void);
87948+#else
87949+static inline void pax_track_stack(void) {}
87950+#endif
87951+
87952 /* Future-safe accessor for struct task_struct's cpus_allowed. */
87953 #define tsk_cpumask(tsk) (&(tsk)->cpus_allowed)
87954
87955@@ -1740,7 +1837,7 @@ extern void thread_group_times(struct task_struct *p, cputime_t *ut, cputime_t *
87956 #define PF_DUMPCORE 0x00000200 /* dumped core */
87957 #define PF_SIGNALED 0x00000400 /* killed by a signal */
87958 #define PF_MEMALLOC 0x00000800 /* Allocating memory */
87959-#define PF_FLUSHER 0x00001000 /* responsible for disk writeback */
87960+#define PF_NPROC_EXCEEDED 0x00001000 /* set_user noticed that RLIMIT_NPROC was exceeded */
87961 #define PF_USED_MATH 0x00002000 /* if unset the fpu must be initialized before use */
87962 #define PF_FREEZING 0x00004000 /* freeze in progress. do not account to load */
87963 #define PF_NOFREEZE 0x00008000 /* this thread should not be frozen */
87964@@ -1978,7 +2075,9 @@ void yield(void);
87965 extern struct exec_domain default_exec_domain;
87966
87967 union thread_union {
87968+#ifndef CONFIG_X86
87969 struct thread_info thread_info;
87970+#endif
87971 unsigned long stack[THREAD_SIZE/sizeof(long)];
87972 };
87973
87974@@ -2011,6 +2110,7 @@ extern struct pid_namespace init_pid_ns;
87975 */
87976
87977 extern struct task_struct *find_task_by_vpid(pid_t nr);
87978+extern struct task_struct *find_task_by_vpid_unrestricted(pid_t nr);
87979 extern struct task_struct *find_task_by_pid_ns(pid_t nr,
87980 struct pid_namespace *ns);
87981
87982@@ -2155,7 +2255,7 @@ extern void __cleanup_sighand(struct sighand_struct *);
87983 extern void exit_itimers(struct signal_struct *);
87984 extern void flush_itimer_signals(void);
87985
87986-extern NORET_TYPE void do_group_exit(int);
87987+extern __noreturn void do_group_exit(int);
87988
87989 extern void daemonize(const char *, ...);
87990 extern int allow_signal(int);
87991@@ -2284,13 +2384,17 @@ static inline unsigned long *end_of_stack(struct task_struct *p)
87992
87993 #endif
87994
87995-static inline int object_is_on_stack(void *obj)
87996+static inline int object_starts_on_stack(void *obj)
87997 {
87998- void *stack = task_stack_page(current);
87999+ const void *stack = task_stack_page(current);
88000
88001 return (obj >= stack) && (obj < (stack + THREAD_SIZE));
88002 }
88003
88004+#ifdef CONFIG_PAX_USERCOPY
88005+extern int object_is_on_stack(const void *obj, unsigned long len);
88006+#endif
88007+
88008 extern void thread_info_cache_init(void);
88009
88010 #ifdef CONFIG_DEBUG_STACK_USAGE
88011@@ -2616,6 +2720,23 @@ static inline unsigned long rlimit_max(unsigned int limit)
88012 return task_rlimit_max(current, limit);
88013 }
88014
88015+#ifdef CONFIG_GRKERNSEC_PROC_MEMMAP
88016+DECLARE_PER_CPU(u64, exec_counter);
88017+static inline void increment_exec_counter(void)
88018+{
88019+ unsigned int cpu;
88020+ u64 *exec_id_ptr;
88021+ BUILD_BUG_ON(NR_CPUS > (1 << 16));
88022+ cpu = get_cpu();
88023+ exec_id_ptr = &per_cpu(exec_counter, cpu);
88024+ *exec_id_ptr += 1ULL << 16;
88025+ current->exec_id = *exec_id_ptr;
88026+ put_cpu();
88027+}
88028+#else
88029+static inline void increment_exec_counter(void) {}
88030+#endif
88031+
88032 #endif /* __KERNEL__ */
88033
88034 #endif
88035diff --git a/include/linux/screen_info.h b/include/linux/screen_info.h
88036index 1ee2c05..81b7ec4 100644
88037--- a/include/linux/screen_info.h
88038+++ b/include/linux/screen_info.h
88039@@ -42,7 +42,8 @@ struct screen_info {
88040 __u16 pages; /* 0x32 */
88041 __u16 vesa_attributes; /* 0x34 */
88042 __u32 capabilities; /* 0x36 */
88043- __u8 _reserved[6]; /* 0x3a */
88044+ __u16 vesapm_size; /* 0x3a */
88045+ __u8 _reserved[4]; /* 0x3c */
88046 } __attribute__((packed));
88047
88048 #define VIDEO_TYPE_MDA 0x10 /* Monochrome Text Display */
88049diff --git a/include/linux/security.h b/include/linux/security.h
88050index d40d23f..d739b08 100644
88051--- a/include/linux/security.h
88052+++ b/include/linux/security.h
88053@@ -34,6 +34,7 @@
88054 #include <linux/key.h>
88055 #include <linux/xfrm.h>
88056 #include <linux/gfp.h>
88057+#include <linux/grsecurity.h>
88058 #include <net/flow.h>
88059
88060 /* Maximum number of letters for an LSM name string */
88061@@ -76,7 +77,7 @@ extern int cap_task_prctl(int option, unsigned long arg2, unsigned long arg3,
88062 extern int cap_task_setscheduler(struct task_struct *p, int policy, struct sched_param *lp);
88063 extern int cap_task_setioprio(struct task_struct *p, int ioprio);
88064 extern int cap_task_setnice(struct task_struct *p, int nice);
88065-extern int cap_syslog(int type);
88066+extern int cap_syslog(int type, bool from_file);
88067 extern int cap_vm_enough_memory(struct mm_struct *mm, long pages);
88068
88069 struct msghdr;
88070@@ -1331,6 +1332,7 @@ static inline void security_free_mnt_opts(struct security_mnt_opts *opts)
88071 * logging to the console.
88072 * See the syslog(2) manual page for an explanation of the @type values.
88073 * @type contains the type of action.
88074+ * @from_file indicates the context of action (if it came from /proc).
88075 * Return 0 if permission is granted.
88076 * @settime:
88077 * Check permission to change the system time.
88078@@ -1445,7 +1447,7 @@ struct security_operations {
88079 int (*sysctl) (struct ctl_table *table, int op);
88080 int (*quotactl) (int cmds, int type, int id, struct super_block *sb);
88081 int (*quota_on) (struct dentry *dentry);
88082- int (*syslog) (int type);
88083+ int (*syslog) (int type, bool from_file);
88084 int (*settime) (struct timespec *ts, struct timezone *tz);
88085 int (*vm_enough_memory) (struct mm_struct *mm, long pages);
88086
88087@@ -1740,7 +1742,7 @@ int security_acct(struct file *file);
88088 int security_sysctl(struct ctl_table *table, int op);
88089 int security_quotactl(int cmds, int type, int id, struct super_block *sb);
88090 int security_quota_on(struct dentry *dentry);
88091-int security_syslog(int type);
88092+int security_syslog(int type, bool from_file);
88093 int security_settime(struct timespec *ts, struct timezone *tz);
88094 int security_vm_enough_memory(long pages);
88095 int security_vm_enough_memory_mm(struct mm_struct *mm, long pages);
88096@@ -1986,9 +1988,9 @@ static inline int security_quota_on(struct dentry *dentry)
88097 return 0;
88098 }
88099
88100-static inline int security_syslog(int type)
88101+static inline int security_syslog(int type, bool from_file)
88102 {
88103- return cap_syslog(type);
88104+ return cap_syslog(type, from_file);
88105 }
88106
88107 static inline int security_settime(struct timespec *ts, struct timezone *tz)
88108diff --git a/include/linux/seq_file.h b/include/linux/seq_file.h
88109index 8366d8f..cc5f9d6 100644
88110--- a/include/linux/seq_file.h
88111+++ b/include/linux/seq_file.h
88112@@ -23,6 +23,9 @@ struct seq_file {
88113 u64 version;
88114 struct mutex lock;
88115 const struct seq_operations *op;
88116+#ifdef CONFIG_GRKERNSEC_PROC_MEMMAP
88117+ u64 exec_id;
88118+#endif
88119 void *private;
88120 };
88121
88122@@ -32,6 +35,7 @@ struct seq_operations {
88123 void * (*next) (struct seq_file *m, void *v, loff_t *pos);
88124 int (*show) (struct seq_file *m, void *v);
88125 };
88126+typedef struct seq_operations __no_const seq_operations_no_const;
88127
88128 #define SEQ_SKIP 1
88129
88130diff --git a/include/linux/shm.h b/include/linux/shm.h
88131index eca6235..c7417ed 100644
88132--- a/include/linux/shm.h
88133+++ b/include/linux/shm.h
88134@@ -95,6 +95,10 @@ struct shmid_kernel /* private to the kernel */
88135 pid_t shm_cprid;
88136 pid_t shm_lprid;
88137 struct user_struct *mlock_user;
88138+#ifdef CONFIG_GRKERNSEC
88139+ time_t shm_createtime;
88140+ pid_t shm_lapid;
88141+#endif
88142 };
88143
88144 /* shm_mode upper byte flags */
88145diff --git a/include/linux/skbuff.h b/include/linux/skbuff.h
88146index bcdd660..fd2e332 100644
88147--- a/include/linux/skbuff.h
88148+++ b/include/linux/skbuff.h
88149@@ -14,6 +14,7 @@
88150 #ifndef _LINUX_SKBUFF_H
88151 #define _LINUX_SKBUFF_H
88152
88153+#include <linux/const.h>
88154 #include <linux/kernel.h>
88155 #include <linux/kmemcheck.h>
88156 #include <linux/compiler.h>
88157@@ -544,7 +545,7 @@ static inline union skb_shared_tx *skb_tx(struct sk_buff *skb)
88158 */
88159 static inline int skb_queue_empty(const struct sk_buff_head *list)
88160 {
88161- return list->next == (struct sk_buff *)list;
88162+ return list->next == (const struct sk_buff *)list;
88163 }
88164
88165 /**
88166@@ -557,7 +558,7 @@ static inline int skb_queue_empty(const struct sk_buff_head *list)
88167 static inline bool skb_queue_is_last(const struct sk_buff_head *list,
88168 const struct sk_buff *skb)
88169 {
88170- return (skb->next == (struct sk_buff *) list);
88171+ return (skb->next == (const struct sk_buff *) list);
88172 }
88173
88174 /**
88175@@ -570,7 +571,7 @@ static inline bool skb_queue_is_last(const struct sk_buff_head *list,
88176 static inline bool skb_queue_is_first(const struct sk_buff_head *list,
88177 const struct sk_buff *skb)
88178 {
88179- return (skb->prev == (struct sk_buff *) list);
88180+ return (skb->prev == (const struct sk_buff *) list);
88181 }
88182
88183 /**
88184@@ -1367,7 +1368,7 @@ static inline int skb_network_offset(const struct sk_buff *skb)
88185 * headroom, you should not reduce this.
88186 */
88187 #ifndef NET_SKB_PAD
88188-#define NET_SKB_PAD 32
88189+#define NET_SKB_PAD (_AC(32,UL))
88190 #endif
88191
88192 extern int ___pskb_trim(struct sk_buff *skb, unsigned int len);
88193@@ -1489,6 +1490,22 @@ static inline struct sk_buff *netdev_alloc_skb(struct net_device *dev,
88194 return __netdev_alloc_skb(dev, length, GFP_ATOMIC);
88195 }
88196
88197+static inline struct sk_buff *__netdev_alloc_skb_ip_align(struct net_device *dev,
88198+ unsigned int length, gfp_t gfp)
88199+{
88200+ struct sk_buff *skb = __netdev_alloc_skb(dev, length + NET_IP_ALIGN, gfp);
88201+
88202+ if (NET_IP_ALIGN && skb)
88203+ skb_reserve(skb, NET_IP_ALIGN);
88204+ return skb;
88205+}
88206+
88207+static inline struct sk_buff *netdev_alloc_skb_ip_align(struct net_device *dev,
88208+ unsigned int length)
88209+{
88210+ return __netdev_alloc_skb_ip_align(dev, length, GFP_ATOMIC);
88211+}
88212+
88213 extern struct page *__netdev_alloc_page(struct net_device *dev, gfp_t gfp_mask);
88214
88215 /**
88216diff --git a/include/linux/slab.h b/include/linux/slab.h
88217index 2da8372..9e01add 100644
88218--- a/include/linux/slab.h
88219+++ b/include/linux/slab.h
88220@@ -11,12 +11,20 @@
88221
88222 #include <linux/gfp.h>
88223 #include <linux/types.h>
88224+#include <linux/err.h>
88225
88226 /*
88227 * Flags to pass to kmem_cache_create().
88228 * The ones marked DEBUG are only valid if CONFIG_SLAB_DEBUG is set.
88229 */
88230 #define SLAB_DEBUG_FREE 0x00000100UL /* DEBUG: Perform (expensive) checks on free */
88231+
88232+#ifdef CONFIG_PAX_USERCOPY
88233+#define SLAB_USERCOPY 0x00000200UL /* PaX: Allow copying objs to/from userland */
88234+#else
88235+#define SLAB_USERCOPY 0x00000000UL
88236+#endif
88237+
88238 #define SLAB_RED_ZONE 0x00000400UL /* DEBUG: Red zone objs in a cache */
88239 #define SLAB_POISON 0x00000800UL /* DEBUG: Poison objects */
88240 #define SLAB_HWCACHE_ALIGN 0x00002000UL /* Align objs on cache lines */
88241@@ -82,10 +90,13 @@
88242 * ZERO_SIZE_PTR can be passed to kfree though in the same way that NULL can.
88243 * Both make kfree a no-op.
88244 */
88245-#define ZERO_SIZE_PTR ((void *)16)
88246+#define ZERO_SIZE_PTR \
88247+({ \
88248+ BUILD_BUG_ON(!(MAX_ERRNO & ~PAGE_MASK));\
88249+ (void *)(-MAX_ERRNO-1L); \
88250+})
88251
88252-#define ZERO_OR_NULL_PTR(x) ((unsigned long)(x) <= \
88253- (unsigned long)ZERO_SIZE_PTR)
88254+#define ZERO_OR_NULL_PTR(x) ((unsigned long)(x) - 1 >= (unsigned long)ZERO_SIZE_PTR - 1)
88255
88256 /*
88257 * struct kmem_cache related prototypes
88258@@ -133,11 +144,12 @@ int kmem_ptr_validate(struct kmem_cache *cachep, const void *ptr);
88259 /*
88260 * Common kmalloc functions provided by all allocators
88261 */
88262-void * __must_check __krealloc(const void *, size_t, gfp_t);
88263-void * __must_check krealloc(const void *, size_t, gfp_t);
88264+void * __must_check __krealloc(const void *, size_t, gfp_t) __size_overflow(2);
88265+void * __must_check krealloc(const void *, size_t, gfp_t) __size_overflow(2);
88266 void kfree(const void *);
88267 void kzfree(const void *);
88268 size_t ksize(const void *);
88269+void check_object_size(const void *ptr, unsigned long n, bool to);
88270
88271 /*
88272 * Allocator specific definitions. These are mainly used to establish optimized
88273@@ -263,7 +275,7 @@ static inline void *kmem_cache_alloc_node(struct kmem_cache *cachep,
88274 * request comes from.
88275 */
88276 #if defined(CONFIG_DEBUG_SLAB) || defined(CONFIG_SLUB)
88277-extern void *__kmalloc_track_caller(size_t, gfp_t, unsigned long);
88278+extern void *__kmalloc_track_caller(size_t, gfp_t, unsigned long) __size_overflow(1);
88279 #define kmalloc_track_caller(size, flags) \
88280 __kmalloc_track_caller(size, flags, _RET_IP_)
88281 #else
88282@@ -281,7 +293,7 @@ extern void *__kmalloc_track_caller(size_t, gfp_t, unsigned long);
88283 * allocation request comes from.
88284 */
88285 #if defined(CONFIG_DEBUG_SLAB) || defined(CONFIG_SLUB)
88286-extern void *__kmalloc_node_track_caller(size_t, gfp_t, int, unsigned long);
88287+extern void *__kmalloc_node_track_caller(size_t, gfp_t, int, unsigned long) __size_overflow(1);
88288 #define kmalloc_node_track_caller(size, flags, node) \
88289 __kmalloc_node_track_caller(size, flags, node, \
88290 _RET_IP_)
88291diff --git a/include/linux/slab_def.h b/include/linux/slab_def.h
88292index 850d057..33bad48 100644
88293--- a/include/linux/slab_def.h
88294+++ b/include/linux/slab_def.h
88295@@ -69,10 +69,10 @@ struct kmem_cache {
88296 unsigned long node_allocs;
88297 unsigned long node_frees;
88298 unsigned long node_overflow;
88299- atomic_t allochit;
88300- atomic_t allocmiss;
88301- atomic_t freehit;
88302- atomic_t freemiss;
88303+ atomic_unchecked_t allochit;
88304+ atomic_unchecked_t allocmiss;
88305+ atomic_unchecked_t freehit;
88306+ atomic_unchecked_t freemiss;
88307
88308 /*
88309 * If debugging is enabled, then the allocator can add additional
88310@@ -108,7 +108,7 @@ struct cache_sizes {
88311 extern struct cache_sizes malloc_sizes[];
88312
88313 void *kmem_cache_alloc(struct kmem_cache *, gfp_t);
88314-void *__kmalloc(size_t size, gfp_t flags);
88315+void *__kmalloc(size_t size, gfp_t flags) __size_overflow(1);
88316
88317 #ifdef CONFIG_KMEMTRACE
88318 extern void *kmem_cache_alloc_notrace(struct kmem_cache *cachep, gfp_t flags);
88319@@ -125,6 +125,7 @@ static inline size_t slab_buffer_size(struct kmem_cache *cachep)
88320 }
88321 #endif
88322
88323+static __always_inline void *kmalloc(size_t size, gfp_t flags) __size_overflow(1);
88324 static __always_inline void *kmalloc(size_t size, gfp_t flags)
88325 {
88326 struct kmem_cache *cachep;
88327@@ -163,7 +164,7 @@ found:
88328 }
88329
88330 #ifdef CONFIG_NUMA
88331-extern void *__kmalloc_node(size_t size, gfp_t flags, int node);
88332+extern void *__kmalloc_node(size_t size, gfp_t flags, int node) __size_overflow(1);
88333 extern void *kmem_cache_alloc_node(struct kmem_cache *, gfp_t flags, int node);
88334
88335 #ifdef CONFIG_KMEMTRACE
88336@@ -180,6 +181,7 @@ kmem_cache_alloc_node_notrace(struct kmem_cache *cachep,
88337 }
88338 #endif
88339
88340+static __always_inline void *kmalloc_node(size_t size, gfp_t flags, int node) __size_overflow(1);
88341 static __always_inline void *kmalloc_node(size_t size, gfp_t flags, int node)
88342 {
88343 struct kmem_cache *cachep;
88344diff --git a/include/linux/slob_def.h b/include/linux/slob_def.h
88345index 0ec00b3..65e7e0e 100644
88346--- a/include/linux/slob_def.h
88347+++ b/include/linux/slob_def.h
88348@@ -9,8 +9,9 @@ static __always_inline void *kmem_cache_alloc(struct kmem_cache *cachep,
88349 return kmem_cache_alloc_node(cachep, flags, -1);
88350 }
88351
88352-void *__kmalloc_node(size_t size, gfp_t flags, int node);
88353+void *__kmalloc_node(size_t size, gfp_t flags, int node) __size_overflow(1);
88354
88355+static __always_inline void *kmalloc_node(size_t size, gfp_t flags, int node) __size_overflow(1);
88356 static __always_inline void *kmalloc_node(size_t size, gfp_t flags, int node)
88357 {
88358 return __kmalloc_node(size, flags, node);
88359@@ -24,11 +25,13 @@ static __always_inline void *kmalloc_node(size_t size, gfp_t flags, int node)
88360 * kmalloc is the normal method of allocating memory
88361 * in the kernel.
88362 */
88363+static __always_inline void *kmalloc(size_t size, gfp_t flags) __size_overflow(1);
88364 static __always_inline void *kmalloc(size_t size, gfp_t flags)
88365 {
88366 return __kmalloc_node(size, flags, -1);
88367 }
88368
88369+static __always_inline void *__kmalloc(size_t size, gfp_t flags) __size_overflow(1);
88370 static __always_inline void *__kmalloc(size_t size, gfp_t flags)
88371 {
88372 return kmalloc(size, flags);
88373diff --git a/include/linux/slub_def.h b/include/linux/slub_def.h
88374index 5ad70a6..8f0e2c8 100644
88375--- a/include/linux/slub_def.h
88376+++ b/include/linux/slub_def.h
88377@@ -86,7 +86,7 @@ struct kmem_cache {
88378 struct kmem_cache_order_objects max;
88379 struct kmem_cache_order_objects min;
88380 gfp_t allocflags; /* gfp flags to use on each alloc */
88381- int refcount; /* Refcount for slab cache destroy */
88382+ atomic_t refcount; /* Refcount for slab cache destroy */
88383 void (*ctor)(void *);
88384 int inuse; /* Offset to metadata */
88385 int align; /* Alignment */
88386@@ -197,6 +197,7 @@ static __always_inline int kmalloc_index(size_t size)
88387 * This ought to end up with a global pointer to the right cache
88388 * in kmalloc_caches.
88389 */
88390+static __always_inline struct kmem_cache *kmalloc_slab(size_t size) __size_overflow(1);
88391 static __always_inline struct kmem_cache *kmalloc_slab(size_t size)
88392 {
88393 int index = kmalloc_index(size);
88394@@ -215,7 +216,7 @@ static __always_inline struct kmem_cache *kmalloc_slab(size_t size)
88395 #endif
88396
88397 void *kmem_cache_alloc(struct kmem_cache *, gfp_t);
88398-void *__kmalloc(size_t size, gfp_t flags);
88399+void *__kmalloc(size_t size, gfp_t flags) __alloc_size(1) __size_overflow(1);
88400
88401 #ifdef CONFIG_KMEMTRACE
88402 extern void *kmem_cache_alloc_notrace(struct kmem_cache *s, gfp_t gfpflags);
88403@@ -227,6 +228,7 @@ kmem_cache_alloc_notrace(struct kmem_cache *s, gfp_t gfpflags)
88404 }
88405 #endif
88406
88407+static __always_inline void *kmalloc_large(size_t size, gfp_t flags) __size_overflow(1);
88408 static __always_inline void *kmalloc_large(size_t size, gfp_t flags)
88409 {
88410 unsigned int order = get_order(size);
88411@@ -238,6 +240,7 @@ static __always_inline void *kmalloc_large(size_t size, gfp_t flags)
88412 return ret;
88413 }
88414
88415+static __always_inline void *kmalloc(size_t size, gfp_t flags) __size_overflow(1);
88416 static __always_inline void *kmalloc(size_t size, gfp_t flags)
88417 {
88418 void *ret;
88419@@ -263,7 +266,7 @@ static __always_inline void *kmalloc(size_t size, gfp_t flags)
88420 }
88421
88422 #ifdef CONFIG_NUMA
88423-void *__kmalloc_node(size_t size, gfp_t flags, int node);
88424+void *__kmalloc_node(size_t size, gfp_t flags, int node) __size_overflow(1);
88425 void *kmem_cache_alloc_node(struct kmem_cache *, gfp_t flags, int node);
88426
88427 #ifdef CONFIG_KMEMTRACE
88428@@ -280,6 +283,7 @@ kmem_cache_alloc_node_notrace(struct kmem_cache *s,
88429 }
88430 #endif
88431
88432+static __always_inline void *kmalloc_node(size_t size, gfp_t flags, int node) __size_overflow(1);
88433 static __always_inline void *kmalloc_node(size_t size, gfp_t flags, int node)
88434 {
88435 void *ret;
88436diff --git a/include/linux/sonet.h b/include/linux/sonet.h
88437index 67ad11f..0bbd8af 100644
88438--- a/include/linux/sonet.h
88439+++ b/include/linux/sonet.h
88440@@ -61,7 +61,7 @@ struct sonet_stats {
88441 #include <asm/atomic.h>
88442
88443 struct k_sonet_stats {
88444-#define __HANDLE_ITEM(i) atomic_t i
88445+#define __HANDLE_ITEM(i) atomic_unchecked_t i
88446 __SONET_ITEMS
88447 #undef __HANDLE_ITEM
88448 };
88449diff --git a/include/linux/sunrpc/cache.h b/include/linux/sunrpc/cache.h
88450index 6f52b4d..5500323 100644
88451--- a/include/linux/sunrpc/cache.h
88452+++ b/include/linux/sunrpc/cache.h
88453@@ -125,7 +125,7 @@ struct cache_detail {
88454 */
88455 struct cache_req {
88456 struct cache_deferred_req *(*defer)(struct cache_req *req);
88457-};
88458+} __no_const;
88459 /* this must be embedded in a deferred_request that is being
88460 * delayed awaiting cache-fill
88461 */
88462diff --git a/include/linux/sunrpc/clnt.h b/include/linux/sunrpc/clnt.h
88463index 8ed9642..101ceab 100644
88464--- a/include/linux/sunrpc/clnt.h
88465+++ b/include/linux/sunrpc/clnt.h
88466@@ -167,9 +167,9 @@ static inline unsigned short rpc_get_port(const struct sockaddr *sap)
88467 {
88468 switch (sap->sa_family) {
88469 case AF_INET:
88470- return ntohs(((struct sockaddr_in *)sap)->sin_port);
88471+ return ntohs(((const struct sockaddr_in *)sap)->sin_port);
88472 case AF_INET6:
88473- return ntohs(((struct sockaddr_in6 *)sap)->sin6_port);
88474+ return ntohs(((const struct sockaddr_in6 *)sap)->sin6_port);
88475 }
88476 return 0;
88477 }
88478@@ -202,7 +202,7 @@ static inline bool __rpc_cmp_addr4(const struct sockaddr *sap1,
88479 static inline bool __rpc_copy_addr4(struct sockaddr *dst,
88480 const struct sockaddr *src)
88481 {
88482- const struct sockaddr_in *ssin = (struct sockaddr_in *) src;
88483+ const struct sockaddr_in *ssin = (const struct sockaddr_in *) src;
88484 struct sockaddr_in *dsin = (struct sockaddr_in *) dst;
88485
88486 dsin->sin_family = ssin->sin_family;
88487@@ -299,7 +299,7 @@ static inline u32 rpc_get_scope_id(const struct sockaddr *sa)
88488 if (sa->sa_family != AF_INET6)
88489 return 0;
88490
88491- return ((struct sockaddr_in6 *) sa)->sin6_scope_id;
88492+ return ((const struct sockaddr_in6 *) sa)->sin6_scope_id;
88493 }
88494
88495 #endif /* __KERNEL__ */
88496diff --git a/include/linux/sunrpc/svc_rdma.h b/include/linux/sunrpc/svc_rdma.h
88497index c14fe86..393245e 100644
88498--- a/include/linux/sunrpc/svc_rdma.h
88499+++ b/include/linux/sunrpc/svc_rdma.h
88500@@ -53,15 +53,15 @@ extern unsigned int svcrdma_ord;
88501 extern unsigned int svcrdma_max_requests;
88502 extern unsigned int svcrdma_max_req_size;
88503
88504-extern atomic_t rdma_stat_recv;
88505-extern atomic_t rdma_stat_read;
88506-extern atomic_t rdma_stat_write;
88507-extern atomic_t rdma_stat_sq_starve;
88508-extern atomic_t rdma_stat_rq_starve;
88509-extern atomic_t rdma_stat_rq_poll;
88510-extern atomic_t rdma_stat_rq_prod;
88511-extern atomic_t rdma_stat_sq_poll;
88512-extern atomic_t rdma_stat_sq_prod;
88513+extern atomic_unchecked_t rdma_stat_recv;
88514+extern atomic_unchecked_t rdma_stat_read;
88515+extern atomic_unchecked_t rdma_stat_write;
88516+extern atomic_unchecked_t rdma_stat_sq_starve;
88517+extern atomic_unchecked_t rdma_stat_rq_starve;
88518+extern atomic_unchecked_t rdma_stat_rq_poll;
88519+extern atomic_unchecked_t rdma_stat_rq_prod;
88520+extern atomic_unchecked_t rdma_stat_sq_poll;
88521+extern atomic_unchecked_t rdma_stat_sq_prod;
88522
88523 #define RPCRDMA_VERSION 1
88524
88525diff --git a/include/linux/suspend.h b/include/linux/suspend.h
88526index 5e781d8..1e62818 100644
88527--- a/include/linux/suspend.h
88528+++ b/include/linux/suspend.h
88529@@ -104,15 +104,15 @@ typedef int __bitwise suspend_state_t;
88530 * which require special recovery actions in that situation.
88531 */
88532 struct platform_suspend_ops {
88533- int (*valid)(suspend_state_t state);
88534- int (*begin)(suspend_state_t state);
88535- int (*prepare)(void);
88536- int (*prepare_late)(void);
88537- int (*enter)(suspend_state_t state);
88538- void (*wake)(void);
88539- void (*finish)(void);
88540- void (*end)(void);
88541- void (*recover)(void);
88542+ int (* const valid)(suspend_state_t state);
88543+ int (* const begin)(suspend_state_t state);
88544+ int (* const prepare)(void);
88545+ int (* const prepare_late)(void);
88546+ int (* const enter)(suspend_state_t state);
88547+ void (* const wake)(void);
88548+ void (* const finish)(void);
88549+ void (* const end)(void);
88550+ void (* const recover)(void);
88551 };
88552
88553 #ifdef CONFIG_SUSPEND
88554@@ -120,7 +120,7 @@ struct platform_suspend_ops {
88555 * suspend_set_ops - set platform dependent suspend operations
88556 * @ops: The new suspend operations to set.
88557 */
88558-extern void suspend_set_ops(struct platform_suspend_ops *ops);
88559+extern void suspend_set_ops(const struct platform_suspend_ops *ops);
88560 extern int suspend_valid_only_mem(suspend_state_t state);
88561
88562 /**
88563@@ -145,7 +145,7 @@ extern int pm_suspend(suspend_state_t state);
88564 #else /* !CONFIG_SUSPEND */
88565 #define suspend_valid_only_mem NULL
88566
88567-static inline void suspend_set_ops(struct platform_suspend_ops *ops) {}
88568+static inline void suspend_set_ops(const struct platform_suspend_ops *ops) {}
88569 static inline int pm_suspend(suspend_state_t state) { return -ENOSYS; }
88570 #endif /* !CONFIG_SUSPEND */
88571
88572@@ -215,16 +215,16 @@ extern void mark_free_pages(struct zone *zone);
88573 * platforms which require special recovery actions in that situation.
88574 */
88575 struct platform_hibernation_ops {
88576- int (*begin)(void);
88577- void (*end)(void);
88578- int (*pre_snapshot)(void);
88579- void (*finish)(void);
88580- int (*prepare)(void);
88581- int (*enter)(void);
88582- void (*leave)(void);
88583- int (*pre_restore)(void);
88584- void (*restore_cleanup)(void);
88585- void (*recover)(void);
88586+ int (* const begin)(void);
88587+ void (* const end)(void);
88588+ int (* const pre_snapshot)(void);
88589+ void (* const finish)(void);
88590+ int (* const prepare)(void);
88591+ int (* const enter)(void);
88592+ void (* const leave)(void);
88593+ int (* const pre_restore)(void);
88594+ void (* const restore_cleanup)(void);
88595+ void (* const recover)(void);
88596 };
88597
88598 #ifdef CONFIG_HIBERNATION
88599@@ -243,7 +243,7 @@ extern void swsusp_set_page_free(struct page *);
88600 extern void swsusp_unset_page_free(struct page *);
88601 extern unsigned long get_safe_page(gfp_t gfp_mask);
88602
88603-extern void hibernation_set_ops(struct platform_hibernation_ops *ops);
88604+extern void hibernation_set_ops(const struct platform_hibernation_ops *ops);
88605 extern int hibernate(void);
88606 extern bool system_entering_hibernation(void);
88607 #else /* CONFIG_HIBERNATION */
88608@@ -251,7 +251,7 @@ static inline int swsusp_page_is_forbidden(struct page *p) { return 0; }
88609 static inline void swsusp_set_page_free(struct page *p) {}
88610 static inline void swsusp_unset_page_free(struct page *p) {}
88611
88612-static inline void hibernation_set_ops(struct platform_hibernation_ops *ops) {}
88613+static inline void hibernation_set_ops(const struct platform_hibernation_ops *ops) {}
88614 static inline int hibernate(void) { return -ENOSYS; }
88615 static inline bool system_entering_hibernation(void) { return false; }
88616 #endif /* CONFIG_HIBERNATION */
88617diff --git a/include/linux/sysctl.h b/include/linux/sysctl.h
88618index 0eb6942..a805cb6 100644
88619--- a/include/linux/sysctl.h
88620+++ b/include/linux/sysctl.h
88621@@ -164,7 +164,11 @@ enum
88622 KERN_PANIC_ON_NMI=76, /* int: whether we will panic on an unrecovered */
88623 };
88624
88625-
88626+#ifdef CONFIG_PAX_SOFTMODE
88627+enum {
88628+ PAX_SOFTMODE=1 /* PaX: disable/enable soft mode */
88629+};
88630+#endif
88631
88632 /* CTL_VM names: */
88633 enum
88634@@ -982,6 +986,8 @@ typedef int proc_handler (struct ctl_table *ctl, int write,
88635
88636 extern int proc_dostring(struct ctl_table *, int,
88637 void __user *, size_t *, loff_t *);
88638+extern int proc_dostring_modpriv(struct ctl_table *, int,
88639+ void __user *, size_t *, loff_t *);
88640 extern int proc_dointvec(struct ctl_table *, int,
88641 void __user *, size_t *, loff_t *);
88642 extern int proc_dointvec_minmax(struct ctl_table *, int,
88643@@ -1003,6 +1009,7 @@ extern int do_sysctl (int __user *name, int nlen,
88644
88645 extern ctl_handler sysctl_data;
88646 extern ctl_handler sysctl_string;
88647+extern ctl_handler sysctl_string_modpriv;
88648 extern ctl_handler sysctl_intvec;
88649 extern ctl_handler sysctl_jiffies;
88650 extern ctl_handler sysctl_ms_jiffies;
88651diff --git a/include/linux/sysfs.h b/include/linux/sysfs.h
88652index 9d68fed..71f02cc 100644
88653--- a/include/linux/sysfs.h
88654+++ b/include/linux/sysfs.h
88655@@ -75,8 +75,8 @@ struct bin_attribute {
88656 };
88657
88658 struct sysfs_ops {
88659- ssize_t (*show)(struct kobject *, struct attribute *,char *);
88660- ssize_t (*store)(struct kobject *,struct attribute *,const char *, size_t);
88661+ ssize_t (* const show)(struct kobject *, struct attribute *,char *);
88662+ ssize_t (* const store)(struct kobject *,struct attribute *,const char *, size_t);
88663 };
88664
88665 struct sysfs_dirent;
88666diff --git a/include/linux/syslog.h b/include/linux/syslog.h
88667new file mode 100644
88668index 0000000..3891139
88669--- /dev/null
88670+++ b/include/linux/syslog.h
88671@@ -0,0 +1,52 @@
88672+/* Syslog internals
88673+ *
88674+ * Copyright 2010 Canonical, Ltd.
88675+ * Author: Kees Cook <kees.cook@canonical.com>
88676+ *
88677+ * This program is free software; you can redistribute it and/or modify
88678+ * it under the terms of the GNU General Public License as published by
88679+ * the Free Software Foundation; either version 2, or (at your option)
88680+ * any later version.
88681+ *
88682+ * This program is distributed in the hope that it will be useful,
88683+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
88684+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
88685+ * GNU General Public License for more details.
88686+ *
88687+ * You should have received a copy of the GNU General Public License
88688+ * along with this program; see the file COPYING. If not, write to
88689+ * the Free Software Foundation, 675 Mass Ave, Cambridge, MA 02139, USA.
88690+ */
88691+
88692+#ifndef _LINUX_SYSLOG_H
88693+#define _LINUX_SYSLOG_H
88694+
88695+/* Close the log. Currently a NOP. */
88696+#define SYSLOG_ACTION_CLOSE 0
88697+/* Open the log. Currently a NOP. */
88698+#define SYSLOG_ACTION_OPEN 1
88699+/* Read from the log. */
88700+#define SYSLOG_ACTION_READ 2
88701+/* Read all messages remaining in the ring buffer. */
88702+#define SYSLOG_ACTION_READ_ALL 3
88703+/* Read and clear all messages remaining in the ring buffer */
88704+#define SYSLOG_ACTION_READ_CLEAR 4
88705+/* Clear ring buffer. */
88706+#define SYSLOG_ACTION_CLEAR 5
88707+/* Disable printk's to console */
88708+#define SYSLOG_ACTION_CONSOLE_OFF 6
88709+/* Enable printk's to console */
88710+#define SYSLOG_ACTION_CONSOLE_ON 7
88711+/* Set level of messages printed to console */
88712+#define SYSLOG_ACTION_CONSOLE_LEVEL 8
88713+/* Return number of unread characters in the log buffer */
88714+#define SYSLOG_ACTION_SIZE_UNREAD 9
88715+/* Return size of the log buffer */
88716+#define SYSLOG_ACTION_SIZE_BUFFER 10
88717+
88718+#define SYSLOG_FROM_CALL 0
88719+#define SYSLOG_FROM_FILE 1
88720+
88721+int do_syslog(int type, char __user *buf, int count, bool from_file);
88722+
88723+#endif /* _LINUX_SYSLOG_H */
88724diff --git a/include/linux/thread_info.h b/include/linux/thread_info.h
88725index a8cc4e1..98d3b85 100644
88726--- a/include/linux/thread_info.h
88727+++ b/include/linux/thread_info.h
88728@@ -23,7 +23,7 @@ struct restart_block {
88729 };
88730 /* For futex_wait and futex_wait_requeue_pi */
88731 struct {
88732- u32 *uaddr;
88733+ u32 __user *uaddr;
88734 u32 val;
88735 u32 flags;
88736 u32 bitset;
88737diff --git a/include/linux/tracehook.h b/include/linux/tracehook.h
88738index 1eb44a9..f582df3 100644
88739--- a/include/linux/tracehook.h
88740+++ b/include/linux/tracehook.h
88741@@ -69,12 +69,12 @@ static inline int tracehook_expect_breakpoints(struct task_struct *task)
88742 /*
88743 * ptrace report for syscall entry and exit looks identical.
88744 */
88745-static inline void ptrace_report_syscall(struct pt_regs *regs)
88746+static inline int ptrace_report_syscall(struct pt_regs *regs)
88747 {
88748 int ptrace = task_ptrace(current);
88749
88750 if (!(ptrace & PT_PTRACED))
88751- return;
88752+ return 0;
88753
88754 ptrace_notify(SIGTRAP | ((ptrace & PT_TRACESYSGOOD) ? 0x80 : 0));
88755
88756@@ -87,6 +87,8 @@ static inline void ptrace_report_syscall(struct pt_regs *regs)
88757 send_sig(current->exit_code, current, 1);
88758 current->exit_code = 0;
88759 }
88760+
88761+ return fatal_signal_pending(current);
88762 }
88763
88764 /**
88765@@ -111,8 +113,7 @@ static inline void ptrace_report_syscall(struct pt_regs *regs)
88766 static inline __must_check int tracehook_report_syscall_entry(
88767 struct pt_regs *regs)
88768 {
88769- ptrace_report_syscall(regs);
88770- return 0;
88771+ return ptrace_report_syscall(regs);
88772 }
88773
88774 /**
88775diff --git a/include/linux/tty.h b/include/linux/tty.h
88776index e9c57e9..ee6d489 100644
88777--- a/include/linux/tty.h
88778+++ b/include/linux/tty.h
88779@@ -493,7 +493,6 @@ extern void tty_ldisc_begin(void);
88780 /* This last one is just for the tty layer internals and shouldn't be used elsewhere */
88781 extern void tty_ldisc_enable(struct tty_struct *tty);
88782
88783-
88784 /* n_tty.c */
88785 extern struct tty_ldisc_ops tty_ldisc_N_TTY;
88786
88787diff --git a/include/linux/tty_ldisc.h b/include/linux/tty_ldisc.h
88788index 0c4ee9b..9f7c426 100644
88789--- a/include/linux/tty_ldisc.h
88790+++ b/include/linux/tty_ldisc.h
88791@@ -139,7 +139,7 @@ struct tty_ldisc_ops {
88792
88793 struct module *owner;
88794
88795- int refcount;
88796+ atomic_t refcount;
88797 };
88798
88799 struct tty_ldisc {
88800diff --git a/include/linux/types.h b/include/linux/types.h
88801index c42724f..d190eee 100644
88802--- a/include/linux/types.h
88803+++ b/include/linux/types.h
88804@@ -191,10 +191,26 @@ typedef struct {
88805 volatile int counter;
88806 } atomic_t;
88807
88808+#ifdef CONFIG_PAX_REFCOUNT
88809+typedef struct {
88810+ volatile int counter;
88811+} atomic_unchecked_t;
88812+#else
88813+typedef atomic_t atomic_unchecked_t;
88814+#endif
88815+
88816 #ifdef CONFIG_64BIT
88817 typedef struct {
88818 volatile long counter;
88819 } atomic64_t;
88820+
88821+#ifdef CONFIG_PAX_REFCOUNT
88822+typedef struct {
88823+ volatile long counter;
88824+} atomic64_unchecked_t;
88825+#else
88826+typedef atomic64_t atomic64_unchecked_t;
88827+#endif
88828 #endif
88829
88830 struct ustat {
88831diff --git a/include/linux/uaccess.h b/include/linux/uaccess.h
88832index 6b58367..57b150e 100644
88833--- a/include/linux/uaccess.h
88834+++ b/include/linux/uaccess.h
88835@@ -76,11 +76,11 @@ static inline unsigned long __copy_from_user_nocache(void *to,
88836 long ret; \
88837 mm_segment_t old_fs = get_fs(); \
88838 \
88839- set_fs(KERNEL_DS); \
88840 pagefault_disable(); \
88841- ret = __copy_from_user_inatomic(&(retval), (__force typeof(retval) __user *)(addr), sizeof(retval)); \
88842- pagefault_enable(); \
88843+ set_fs(KERNEL_DS); \
88844+ ret = __copy_from_user_inatomic(&(retval), (typeof(retval) __force_user *)(addr), sizeof(retval)); \
88845 set_fs(old_fs); \
88846+ pagefault_enable(); \
88847 ret; \
88848 })
88849
88850@@ -93,7 +93,7 @@ static inline unsigned long __copy_from_user_nocache(void *to,
88851 * Safely read from address @src to the buffer at @dst. If a kernel fault
88852 * happens, handle that and return -EFAULT.
88853 */
88854-extern long probe_kernel_read(void *dst, void *src, size_t size);
88855+extern long probe_kernel_read(void *dst, const void *src, size_t size);
88856
88857 /*
88858 * probe_kernel_write(): safely attempt to write to a location
88859@@ -104,6 +104,6 @@ extern long probe_kernel_read(void *dst, void *src, size_t size);
88860 * Safely write to address @dst from the buffer at @src. If a kernel fault
88861 * happens, handle that and return -EFAULT.
88862 */
88863-extern long probe_kernel_write(void *dst, void *src, size_t size);
88864+extern long probe_kernel_write(void *dst, const void *src, size_t size) __size_overflow(3);
88865
88866 #endif /* __LINUX_UACCESS_H__ */
88867diff --git a/include/linux/unaligned/access_ok.h b/include/linux/unaligned/access_ok.h
88868index 99c1b4d..bb94261 100644
88869--- a/include/linux/unaligned/access_ok.h
88870+++ b/include/linux/unaligned/access_ok.h
88871@@ -6,32 +6,32 @@
88872
88873 static inline u16 get_unaligned_le16(const void *p)
88874 {
88875- return le16_to_cpup((__le16 *)p);
88876+ return le16_to_cpup((const __le16 *)p);
88877 }
88878
88879 static inline u32 get_unaligned_le32(const void *p)
88880 {
88881- return le32_to_cpup((__le32 *)p);
88882+ return le32_to_cpup((const __le32 *)p);
88883 }
88884
88885 static inline u64 get_unaligned_le64(const void *p)
88886 {
88887- return le64_to_cpup((__le64 *)p);
88888+ return le64_to_cpup((const __le64 *)p);
88889 }
88890
88891 static inline u16 get_unaligned_be16(const void *p)
88892 {
88893- return be16_to_cpup((__be16 *)p);
88894+ return be16_to_cpup((const __be16 *)p);
88895 }
88896
88897 static inline u32 get_unaligned_be32(const void *p)
88898 {
88899- return be32_to_cpup((__be32 *)p);
88900+ return be32_to_cpup((const __be32 *)p);
88901 }
88902
88903 static inline u64 get_unaligned_be64(const void *p)
88904 {
88905- return be64_to_cpup((__be64 *)p);
88906+ return be64_to_cpup((const __be64 *)p);
88907 }
88908
88909 static inline void put_unaligned_le16(u16 val, void *p)
88910diff --git a/include/linux/vermagic.h b/include/linux/vermagic.h
88911index 79b9837..b5a56f9 100644
88912--- a/include/linux/vermagic.h
88913+++ b/include/linux/vermagic.h
88914@@ -26,9 +26,35 @@
88915 #define MODULE_ARCH_VERMAGIC ""
88916 #endif
88917
88918+#ifdef CONFIG_PAX_REFCOUNT
88919+#define MODULE_PAX_REFCOUNT "REFCOUNT "
88920+#else
88921+#define MODULE_PAX_REFCOUNT ""
88922+#endif
88923+
88924+#ifdef CONSTIFY_PLUGIN
88925+#define MODULE_CONSTIFY_PLUGIN "CONSTIFY_PLUGIN "
88926+#else
88927+#define MODULE_CONSTIFY_PLUGIN ""
88928+#endif
88929+
88930+#ifdef STACKLEAK_PLUGIN
88931+#define MODULE_STACKLEAK_PLUGIN "STACKLEAK_PLUGIN "
88932+#else
88933+#define MODULE_STACKLEAK_PLUGIN ""
88934+#endif
88935+
88936+#ifdef CONFIG_GRKERNSEC
88937+#define MODULE_GRSEC "GRSEC "
88938+#else
88939+#define MODULE_GRSEC ""
88940+#endif
88941+
88942 #define VERMAGIC_STRING \
88943 UTS_RELEASE " " \
88944 MODULE_VERMAGIC_SMP MODULE_VERMAGIC_PREEMPT \
88945 MODULE_VERMAGIC_MODULE_UNLOAD MODULE_VERMAGIC_MODVERSIONS \
88946- MODULE_ARCH_VERMAGIC
88947+ MODULE_ARCH_VERMAGIC \
88948+ MODULE_PAX_REFCOUNT MODULE_CONSTIFY_PLUGIN MODULE_STACKLEAK_PLUGIN \
88949+ MODULE_GRSEC
88950
88951diff --git a/include/linux/vmalloc.h b/include/linux/vmalloc.h
88952index 819a634..b99e71b 100644
88953--- a/include/linux/vmalloc.h
88954+++ b/include/linux/vmalloc.h
88955@@ -14,6 +14,11 @@ struct vm_area_struct; /* vma defining user mapping in mm_types.h */
88956 #define VM_USERMAP 0x00000008 /* suitable for remap_vmalloc_range */
88957 #define VM_VPAGES 0x00000010 /* buffer for pages was vmalloc'ed */
88958 #define VM_UNLIST 0x00000020 /* vm_struct is not listed in vmlist */
88959+
88960+#if defined(CONFIG_MODULES) && defined(CONFIG_X86) && defined(CONFIG_PAX_KERNEXEC)
88961+#define VM_KERNEXEC 0x00000040 /* allocate from executable kernel memory range */
88962+#endif
88963+
88964 /* bits [20..32] reserved for arch specific ioremap internals */
88965
88966 /*
88967@@ -51,13 +56,13 @@ static inline void vmalloc_init(void)
88968 }
88969 #endif
88970
88971-extern void *vmalloc(unsigned long size);
88972-extern void *vmalloc_user(unsigned long size);
88973-extern void *vmalloc_node(unsigned long size, int node);
88974-extern void *vmalloc_exec(unsigned long size);
88975-extern void *vmalloc_32(unsigned long size);
88976-extern void *vmalloc_32_user(unsigned long size);
88977-extern void *__vmalloc(unsigned long size, gfp_t gfp_mask, pgprot_t prot);
88978+extern void *vmalloc(unsigned long size) __size_overflow(1);
88979+extern void *vmalloc_user(unsigned long size) __size_overflow(1);
88980+extern void *vmalloc_node(unsigned long size, int node) __size_overflow(1);
88981+extern void *vmalloc_exec(unsigned long size) __size_overflow(1);
88982+extern void *vmalloc_32(unsigned long size) __size_overflow(1);
88983+extern void *vmalloc_32_user(unsigned long size) __size_overflow(1);
88984+extern void *__vmalloc(unsigned long size, gfp_t gfp_mask, pgprot_t prot) __size_overflow(1);
88985 extern void *__vmalloc_area(struct vm_struct *area, gfp_t gfp_mask,
88986 pgprot_t prot);
88987 extern void vfree(const void *addr);
88988@@ -106,8 +111,8 @@ extern struct vm_struct *alloc_vm_area(size_t size);
88989 extern void free_vm_area(struct vm_struct *area);
88990
88991 /* for /dev/kmem */
88992-extern long vread(char *buf, char *addr, unsigned long count);
88993-extern long vwrite(char *buf, char *addr, unsigned long count);
88994+extern long vread(char *buf, char *addr, unsigned long count) __size_overflow(3);
88995+extern long vwrite(char *buf, char *addr, unsigned long count) __size_overflow(3);
88996
88997 /*
88998 * Internals. Dont't use..
88999diff --git a/include/linux/vmstat.h b/include/linux/vmstat.h
89000index 13070d6..aa4159a 100644
89001--- a/include/linux/vmstat.h
89002+++ b/include/linux/vmstat.h
89003@@ -136,18 +136,18 @@ static inline void vm_events_fold_cpu(int cpu)
89004 /*
89005 * Zone based page accounting with per cpu differentials.
89006 */
89007-extern atomic_long_t vm_stat[NR_VM_ZONE_STAT_ITEMS];
89008+extern atomic_long_unchecked_t vm_stat[NR_VM_ZONE_STAT_ITEMS];
89009
89010 static inline void zone_page_state_add(long x, struct zone *zone,
89011 enum zone_stat_item item)
89012 {
89013- atomic_long_add(x, &zone->vm_stat[item]);
89014- atomic_long_add(x, &vm_stat[item]);
89015+ atomic_long_add_unchecked(x, &zone->vm_stat[item]);
89016+ atomic_long_add_unchecked(x, &vm_stat[item]);
89017 }
89018
89019 static inline unsigned long global_page_state(enum zone_stat_item item)
89020 {
89021- long x = atomic_long_read(&vm_stat[item]);
89022+ long x = atomic_long_read_unchecked(&vm_stat[item]);
89023 #ifdef CONFIG_SMP
89024 if (x < 0)
89025 x = 0;
89026@@ -158,7 +158,7 @@ static inline unsigned long global_page_state(enum zone_stat_item item)
89027 static inline unsigned long zone_page_state(struct zone *zone,
89028 enum zone_stat_item item)
89029 {
89030- long x = atomic_long_read(&zone->vm_stat[item]);
89031+ long x = atomic_long_read_unchecked(&zone->vm_stat[item]);
89032 #ifdef CONFIG_SMP
89033 if (x < 0)
89034 x = 0;
89035@@ -175,7 +175,7 @@ static inline unsigned long zone_page_state(struct zone *zone,
89036 static inline unsigned long zone_page_state_snapshot(struct zone *zone,
89037 enum zone_stat_item item)
89038 {
89039- long x = atomic_long_read(&zone->vm_stat[item]);
89040+ long x = atomic_long_read_unchecked(&zone->vm_stat[item]);
89041
89042 #ifdef CONFIG_SMP
89043 int cpu;
89044@@ -264,8 +264,8 @@ static inline void __mod_zone_page_state(struct zone *zone,
89045
89046 static inline void __inc_zone_state(struct zone *zone, enum zone_stat_item item)
89047 {
89048- atomic_long_inc(&zone->vm_stat[item]);
89049- atomic_long_inc(&vm_stat[item]);
89050+ atomic_long_inc_unchecked(&zone->vm_stat[item]);
89051+ atomic_long_inc_unchecked(&vm_stat[item]);
89052 }
89053
89054 static inline void __inc_zone_page_state(struct page *page,
89055@@ -276,8 +276,8 @@ static inline void __inc_zone_page_state(struct page *page,
89056
89057 static inline void __dec_zone_state(struct zone *zone, enum zone_stat_item item)
89058 {
89059- atomic_long_dec(&zone->vm_stat[item]);
89060- atomic_long_dec(&vm_stat[item]);
89061+ atomic_long_dec_unchecked(&zone->vm_stat[item]);
89062+ atomic_long_dec_unchecked(&vm_stat[item]);
89063 }
89064
89065 static inline void __dec_zone_page_state(struct page *page,
89066diff --git a/include/linux/xattr.h b/include/linux/xattr.h
89067index 5c84af8..1a3b6e2 100644
89068--- a/include/linux/xattr.h
89069+++ b/include/linux/xattr.h
89070@@ -33,6 +33,11 @@
89071 #define XATTR_USER_PREFIX "user."
89072 #define XATTR_USER_PREFIX_LEN (sizeof (XATTR_USER_PREFIX) - 1)
89073
89074+/* User namespace */
89075+#define XATTR_PAX_PREFIX XATTR_USER_PREFIX "pax."
89076+#define XATTR_PAX_FLAGS_SUFFIX "flags"
89077+#define XATTR_NAME_PAX_FLAGS XATTR_PAX_PREFIX XATTR_PAX_FLAGS_SUFFIX
89078+
89079 struct inode;
89080 struct dentry;
89081
89082diff --git a/include/media/saa7146_vv.h b/include/media/saa7146_vv.h
89083index eed5fcc..5080d24 100644
89084--- a/include/media/saa7146_vv.h
89085+++ b/include/media/saa7146_vv.h
89086@@ -167,7 +167,7 @@ struct saa7146_ext_vv
89087 int (*std_callback)(struct saa7146_dev*, struct saa7146_standard *);
89088
89089 /* the extension can override this */
89090- struct v4l2_ioctl_ops ops;
89091+ v4l2_ioctl_ops_no_const ops;
89092 /* pointer to the saa7146 core ops */
89093 const struct v4l2_ioctl_ops *core_ops;
89094
89095diff --git a/include/media/v4l2-dev.h b/include/media/v4l2-dev.h
89096index 73c9867..2da8837 100644
89097--- a/include/media/v4l2-dev.h
89098+++ b/include/media/v4l2-dev.h
89099@@ -34,7 +34,7 @@ struct v4l2_device;
89100 #define V4L2_FL_UNREGISTERED (0)
89101
89102 struct v4l2_file_operations {
89103- struct module *owner;
89104+ struct module * const owner;
89105 ssize_t (*read) (struct file *, char __user *, size_t, loff_t *);
89106 ssize_t (*write) (struct file *, const char __user *, size_t, loff_t *);
89107 unsigned int (*poll) (struct file *, struct poll_table_struct *);
89108@@ -46,6 +46,7 @@ struct v4l2_file_operations {
89109 int (*open) (struct file *);
89110 int (*release) (struct file *);
89111 };
89112+typedef struct v4l2_file_operations __no_const v4l2_file_operations_no_const;
89113
89114 /*
89115 * Newer version of video_device, handled by videodev2.c
89116diff --git a/include/media/v4l2-device.h b/include/media/v4l2-device.h
89117index 5d5d550..f559ef1 100644
89118--- a/include/media/v4l2-device.h
89119+++ b/include/media/v4l2-device.h
89120@@ -71,7 +71,7 @@ int __must_check v4l2_device_register(struct device *dev, struct v4l2_device *v4
89121 this function returns 0. If the name ends with a digit (e.g. cx18),
89122 then the name will be set to cx18-0 since cx180 looks really odd. */
89123 int v4l2_device_set_name(struct v4l2_device *v4l2_dev, const char *basename,
89124- atomic_t *instance);
89125+ atomic_unchecked_t *instance);
89126
89127 /* Set v4l2_dev->dev to NULL. Call when the USB parent disconnects.
89128 Since the parent disappears this ensures that v4l2_dev doesn't have an
89129diff --git a/include/media/v4l2-ioctl.h b/include/media/v4l2-ioctl.h
89130index 7a4529d..7244290 100644
89131--- a/include/media/v4l2-ioctl.h
89132+++ b/include/media/v4l2-ioctl.h
89133@@ -243,6 +243,7 @@ struct v4l2_ioctl_ops {
89134 long (*vidioc_default) (struct file *file, void *fh,
89135 int cmd, void *arg);
89136 };
89137+typedef struct v4l2_ioctl_ops __no_const v4l2_ioctl_ops_no_const;
89138
89139
89140 /* v4l debugging and diagnostics */
89141diff --git a/include/net/flow.h b/include/net/flow.h
89142index 809970b..c3df4f3 100644
89143--- a/include/net/flow.h
89144+++ b/include/net/flow.h
89145@@ -92,7 +92,7 @@ typedef int (*flow_resolve_t)(struct net *net, struct flowi *key, u16 family,
89146 extern void *flow_cache_lookup(struct net *net, struct flowi *key, u16 family,
89147 u8 dir, flow_resolve_t resolver);
89148 extern void flow_cache_flush(void);
89149-extern atomic_t flow_cache_genid;
89150+extern atomic_unchecked_t flow_cache_genid;
89151
89152 static inline int flow_cache_uli_match(struct flowi *fl1, struct flowi *fl2)
89153 {
89154diff --git a/include/net/inetpeer.h b/include/net/inetpeer.h
89155index 15e1f8fe..668837c 100644
89156--- a/include/net/inetpeer.h
89157+++ b/include/net/inetpeer.h
89158@@ -24,7 +24,7 @@ struct inet_peer
89159 __u32 dtime; /* the time of last use of not
89160 * referenced entries */
89161 atomic_t refcnt;
89162- atomic_t rid; /* Frag reception counter */
89163+ atomic_unchecked_t rid; /* Frag reception counter */
89164 __u32 tcp_ts;
89165 unsigned long tcp_ts_stamp;
89166 };
89167diff --git a/include/net/ip_vs.h b/include/net/ip_vs.h
89168index 98978e7..2243a3d 100644
89169--- a/include/net/ip_vs.h
89170+++ b/include/net/ip_vs.h
89171@@ -365,7 +365,7 @@ struct ip_vs_conn {
89172 struct ip_vs_conn *control; /* Master control connection */
89173 atomic_t n_control; /* Number of controlled ones */
89174 struct ip_vs_dest *dest; /* real server */
89175- atomic_t in_pkts; /* incoming packet counter */
89176+ atomic_unchecked_t in_pkts; /* incoming packet counter */
89177
89178 /* packet transmitter for different forwarding methods. If it
89179 mangles the packet, it must return NF_DROP or better NF_STOLEN,
89180@@ -466,7 +466,7 @@ struct ip_vs_dest {
89181 union nf_inet_addr addr; /* IP address of the server */
89182 __be16 port; /* port number of the server */
89183 volatile unsigned flags; /* dest status flags */
89184- atomic_t conn_flags; /* flags to copy to conn */
89185+ atomic_unchecked_t conn_flags; /* flags to copy to conn */
89186 atomic_t weight; /* server weight */
89187
89188 atomic_t refcnt; /* reference counter */
89189diff --git a/include/net/irda/ircomm_core.h b/include/net/irda/ircomm_core.h
89190index 69b610a..fe3962c 100644
89191--- a/include/net/irda/ircomm_core.h
89192+++ b/include/net/irda/ircomm_core.h
89193@@ -51,7 +51,7 @@ typedef struct {
89194 int (*connect_response)(struct ircomm_cb *, struct sk_buff *);
89195 int (*disconnect_request)(struct ircomm_cb *, struct sk_buff *,
89196 struct ircomm_info *);
89197-} call_t;
89198+} __no_const call_t;
89199
89200 struct ircomm_cb {
89201 irda_queue_t queue;
89202diff --git a/include/net/irda/ircomm_tty.h b/include/net/irda/ircomm_tty.h
89203index eea2e61..08c692d 100644
89204--- a/include/net/irda/ircomm_tty.h
89205+++ b/include/net/irda/ircomm_tty.h
89206@@ -35,6 +35,7 @@
89207 #include <linux/termios.h>
89208 #include <linux/timer.h>
89209 #include <linux/tty.h> /* struct tty_struct */
89210+#include <asm/local.h>
89211
89212 #include <net/irda/irias_object.h>
89213 #include <net/irda/ircomm_core.h>
89214@@ -105,8 +106,8 @@ struct ircomm_tty_cb {
89215 unsigned short close_delay;
89216 unsigned short closing_wait; /* time to wait before closing */
89217
89218- int open_count;
89219- int blocked_open; /* # of blocked opens */
89220+ local_t open_count;
89221+ local_t blocked_open; /* # of blocked opens */
89222
89223 /* Protect concurent access to :
89224 * o self->open_count
89225diff --git a/include/net/iucv/af_iucv.h b/include/net/iucv/af_iucv.h
89226index f82a1e8..82d81e8 100644
89227--- a/include/net/iucv/af_iucv.h
89228+++ b/include/net/iucv/af_iucv.h
89229@@ -87,7 +87,7 @@ struct iucv_sock {
89230 struct iucv_sock_list {
89231 struct hlist_head head;
89232 rwlock_t lock;
89233- atomic_t autobind_name;
89234+ atomic_unchecked_t autobind_name;
89235 };
89236
89237 unsigned int iucv_sock_poll(struct file *file, struct socket *sock,
89238diff --git a/include/net/lapb.h b/include/net/lapb.h
89239index 96cb5dd..25e8d4f 100644
89240--- a/include/net/lapb.h
89241+++ b/include/net/lapb.h
89242@@ -95,7 +95,7 @@ struct lapb_cb {
89243 struct sk_buff_head write_queue;
89244 struct sk_buff_head ack_queue;
89245 unsigned char window;
89246- struct lapb_register_struct callbacks;
89247+ struct lapb_register_struct *callbacks;
89248
89249 /* FRMR control information */
89250 struct lapb_frame frmr_data;
89251diff --git a/include/net/neighbour.h b/include/net/neighbour.h
89252index 3817fda..cdb2343 100644
89253--- a/include/net/neighbour.h
89254+++ b/include/net/neighbour.h
89255@@ -131,7 +131,7 @@ struct neigh_ops
89256 int (*connected_output)(struct sk_buff*);
89257 int (*hh_output)(struct sk_buff*);
89258 int (*queue_xmit)(struct sk_buff*);
89259-};
89260+} __do_const;
89261
89262 struct pneigh_entry
89263 {
89264diff --git a/include/net/netlink.h b/include/net/netlink.h
89265index c344646..4778c71 100644
89266--- a/include/net/netlink.h
89267+++ b/include/net/netlink.h
89268@@ -335,7 +335,7 @@ static inline int nlmsg_ok(const struct nlmsghdr *nlh, int remaining)
89269 {
89270 return (remaining >= (int) sizeof(struct nlmsghdr) &&
89271 nlh->nlmsg_len >= sizeof(struct nlmsghdr) &&
89272- nlh->nlmsg_len <= remaining);
89273+ nlh->nlmsg_len <= (unsigned int)remaining);
89274 }
89275
89276 /**
89277@@ -558,7 +558,7 @@ static inline void *nlmsg_get_pos(struct sk_buff *skb)
89278 static inline void nlmsg_trim(struct sk_buff *skb, const void *mark)
89279 {
89280 if (mark)
89281- skb_trim(skb, (unsigned char *) mark - skb->data);
89282+ skb_trim(skb, (const unsigned char *) mark - skb->data);
89283 }
89284
89285 /**
89286diff --git a/include/net/netns/ipv4.h b/include/net/netns/ipv4.h
89287index 9a4b8b7..e49e077 100644
89288--- a/include/net/netns/ipv4.h
89289+++ b/include/net/netns/ipv4.h
89290@@ -54,7 +54,7 @@ struct netns_ipv4 {
89291 int current_rt_cache_rebuild_count;
89292
89293 struct timer_list rt_secret_timer;
89294- atomic_t rt_genid;
89295+ atomic_unchecked_t rt_genid;
89296
89297 #ifdef CONFIG_IP_MROUTE
89298 struct sock *mroute_sk;
89299diff --git a/include/net/sctp/sctp.h b/include/net/sctp/sctp.h
89300index 8a6d529..171f401 100644
89301--- a/include/net/sctp/sctp.h
89302+++ b/include/net/sctp/sctp.h
89303@@ -305,8 +305,8 @@ extern int sctp_debug_flag;
89304
89305 #else /* SCTP_DEBUG */
89306
89307-#define SCTP_DEBUG_PRINTK(whatever...)
89308-#define SCTP_DEBUG_PRINTK_IPADDR(whatever...)
89309+#define SCTP_DEBUG_PRINTK(whatever...) do {} while (0)
89310+#define SCTP_DEBUG_PRINTK_IPADDR(whatever...) do {} while (0)
89311 #define SCTP_ENABLE_DEBUG
89312 #define SCTP_DISABLE_DEBUG
89313 #define SCTP_ASSERT(expr, str, func)
89314diff --git a/include/net/secure_seq.h b/include/net/secure_seq.h
89315index d97f689..f3b90ab 100644
89316--- a/include/net/secure_seq.h
89317+++ b/include/net/secure_seq.h
89318@@ -7,14 +7,14 @@ extern __u32 secure_ip_id(__be32 daddr);
89319 extern __u32 secure_ipv6_id(const __be32 daddr[4]);
89320 extern u32 secure_ipv4_port_ephemeral(__be32 saddr, __be32 daddr, __be16 dport);
89321 extern u32 secure_ipv6_port_ephemeral(const __be32 *saddr, const __be32 *daddr,
89322- __be16 dport);
89323+ __be16 dport);
89324 extern __u32 secure_tcp_sequence_number(__be32 saddr, __be32 daddr,
89325 __be16 sport, __be16 dport);
89326 extern __u32 secure_tcpv6_sequence_number(__be32 *saddr, __be32 *daddr,
89327- __be16 sport, __be16 dport);
89328+ __be16 sport, __be16 dport);
89329 extern u64 secure_dccp_sequence_number(__be32 saddr, __be32 daddr,
89330- __be16 sport, __be16 dport);
89331+ __be16 sport, __be16 dport);
89332 extern u64 secure_dccpv6_sequence_number(__be32 *saddr, __be32 *daddr,
89333- __be16 sport, __be16 dport);
89334+ __be16 sport, __be16 dport);
89335
89336 #endif /* _NET_SECURE_SEQ */
89337diff --git a/include/net/sock.h b/include/net/sock.h
89338index 78adf52..99afd29 100644
89339--- a/include/net/sock.h
89340+++ b/include/net/sock.h
89341@@ -272,7 +272,7 @@ struct sock {
89342 rwlock_t sk_callback_lock;
89343 int sk_err,
89344 sk_err_soft;
89345- atomic_t sk_drops;
89346+ atomic_unchecked_t sk_drops;
89347 unsigned short sk_ack_backlog;
89348 unsigned short sk_max_ack_backlog;
89349 __u32 sk_priority;
89350@@ -737,7 +737,7 @@ static inline void sk_refcnt_debug_release(const struct sock *sk)
89351 extern void sock_prot_inuse_add(struct net *net, struct proto *prot, int inc);
89352 extern int sock_prot_inuse_get(struct net *net, struct proto *proto);
89353 #else
89354-static void inline sock_prot_inuse_add(struct net *net, struct proto *prot,
89355+static inline void sock_prot_inuse_add(struct net *net, struct proto *prot,
89356 int inc)
89357 {
89358 }
89359diff --git a/include/net/tcp.h b/include/net/tcp.h
89360index 6cfe18b..dd21acb 100644
89361--- a/include/net/tcp.h
89362+++ b/include/net/tcp.h
89363@@ -1444,8 +1444,8 @@ enum tcp_seq_states {
89364 struct tcp_seq_afinfo {
89365 char *name;
89366 sa_family_t family;
89367- struct file_operations seq_fops;
89368- struct seq_operations seq_ops;
89369+ file_operations_no_const seq_fops;
89370+ seq_operations_no_const seq_ops;
89371 };
89372
89373 struct tcp_iter_state {
89374diff --git a/include/net/udp.h b/include/net/udp.h
89375index f98abd2..b4b042f 100644
89376--- a/include/net/udp.h
89377+++ b/include/net/udp.h
89378@@ -187,8 +187,8 @@ struct udp_seq_afinfo {
89379 char *name;
89380 sa_family_t family;
89381 struct udp_table *udp_table;
89382- struct file_operations seq_fops;
89383- struct seq_operations seq_ops;
89384+ file_operations_no_const seq_fops;
89385+ seq_operations_no_const seq_ops;
89386 };
89387
89388 struct udp_iter_state {
89389diff --git a/include/rdma/iw_cm.h b/include/rdma/iw_cm.h
89390index cbb822e..e9c1cbe 100644
89391--- a/include/rdma/iw_cm.h
89392+++ b/include/rdma/iw_cm.h
89393@@ -129,7 +129,7 @@ struct iw_cm_verbs {
89394 int backlog);
89395
89396 int (*destroy_listen)(struct iw_cm_id *cm_id);
89397-};
89398+} __no_const;
89399
89400 /**
89401 * iw_create_cm_id - Create an IW CM identifier.
89402diff --git a/include/scsi/libfc.h b/include/scsi/libfc.h
89403index 09a124b..caa8ca8 100644
89404--- a/include/scsi/libfc.h
89405+++ b/include/scsi/libfc.h
89406@@ -675,6 +675,7 @@ struct libfc_function_template {
89407 */
89408 void (*disc_stop_final) (struct fc_lport *);
89409 };
89410+typedef struct libfc_function_template __no_const libfc_function_template_no_const;
89411
89412 /* information used by the discovery layer */
89413 struct fc_disc {
89414@@ -707,7 +708,7 @@ struct fc_lport {
89415 struct fc_disc disc;
89416
89417 /* Operational Information */
89418- struct libfc_function_template tt;
89419+ libfc_function_template_no_const tt;
89420 u8 link_up;
89421 u8 qfull;
89422 enum fc_lport_state state;
89423diff --git a/include/scsi/scsi_device.h b/include/scsi/scsi_device.h
89424index de8e180..f15e0d7 100644
89425--- a/include/scsi/scsi_device.h
89426+++ b/include/scsi/scsi_device.h
89427@@ -156,9 +156,9 @@ struct scsi_device {
89428 unsigned int max_device_blocked; /* what device_blocked counts down from */
89429 #define SCSI_DEFAULT_DEVICE_BLOCKED 3
89430
89431- atomic_t iorequest_cnt;
89432- atomic_t iodone_cnt;
89433- atomic_t ioerr_cnt;
89434+ atomic_unchecked_t iorequest_cnt;
89435+ atomic_unchecked_t iodone_cnt;
89436+ atomic_unchecked_t ioerr_cnt;
89437
89438 struct device sdev_gendev,
89439 sdev_dev;
89440diff --git a/include/scsi/scsi_host.h b/include/scsi/scsi_host.h
89441index 0b4baba..0106e9e 100644
89442--- a/include/scsi/scsi_host.h
89443+++ b/include/scsi/scsi_host.h
89444@@ -43,6 +43,12 @@ struct blk_queue_tags;
89445 #define DISABLE_CLUSTERING 0
89446 #define ENABLE_CLUSTERING 1
89447
89448+enum {
89449+ SCSI_QDEPTH_DEFAULT, /* default requested change, e.g. from sysfs */
89450+ SCSI_QDEPTH_QFULL, /* scsi-ml requested due to queue full */
89451+ SCSI_QDEPTH_RAMP_UP, /* scsi-ml requested due to threshhold event */
89452+};
89453+
89454 struct scsi_host_template {
89455 struct module *module;
89456 const char *name;
89457diff --git a/include/scsi/scsi_transport_fc.h b/include/scsi/scsi_transport_fc.h
89458index fc50bd6..81ba9cb 100644
89459--- a/include/scsi/scsi_transport_fc.h
89460+++ b/include/scsi/scsi_transport_fc.h
89461@@ -708,7 +708,7 @@ struct fc_function_template {
89462 unsigned long show_host_system_hostname:1;
89463
89464 unsigned long disable_target_scan:1;
89465-};
89466+} __do_const;
89467
89468
89469 /**
89470diff --git a/include/sound/ac97_codec.h b/include/sound/ac97_codec.h
89471index 3dae3f7..8440d6f 100644
89472--- a/include/sound/ac97_codec.h
89473+++ b/include/sound/ac97_codec.h
89474@@ -419,15 +419,15 @@
89475 struct snd_ac97;
89476
89477 struct snd_ac97_build_ops {
89478- int (*build_3d) (struct snd_ac97 *ac97);
89479- int (*build_specific) (struct snd_ac97 *ac97);
89480- int (*build_spdif) (struct snd_ac97 *ac97);
89481- int (*build_post_spdif) (struct snd_ac97 *ac97);
89482+ int (* const build_3d) (struct snd_ac97 *ac97);
89483+ int (* const build_specific) (struct snd_ac97 *ac97);
89484+ int (* const build_spdif) (struct snd_ac97 *ac97);
89485+ int (* const build_post_spdif) (struct snd_ac97 *ac97);
89486 #ifdef CONFIG_PM
89487- void (*suspend) (struct snd_ac97 *ac97);
89488- void (*resume) (struct snd_ac97 *ac97);
89489+ void (* const suspend) (struct snd_ac97 *ac97);
89490+ void (* const resume) (struct snd_ac97 *ac97);
89491 #endif
89492- void (*update_jacks) (struct snd_ac97 *ac97); /* for jack-sharing */
89493+ void (* const update_jacks) (struct snd_ac97 *ac97); /* for jack-sharing */
89494 };
89495
89496 struct snd_ac97_bus_ops {
89497@@ -477,7 +477,7 @@ struct snd_ac97_template {
89498
89499 struct snd_ac97 {
89500 /* -- lowlevel (hardware) driver specific -- */
89501- struct snd_ac97_build_ops * build_ops;
89502+ const struct snd_ac97_build_ops * build_ops;
89503 void *private_data;
89504 void (*private_free) (struct snd_ac97 *ac97);
89505 /* --- */
89506diff --git a/include/sound/ak4xxx-adda.h b/include/sound/ak4xxx-adda.h
89507index 891cf1a..a94ba2b 100644
89508--- a/include/sound/ak4xxx-adda.h
89509+++ b/include/sound/ak4xxx-adda.h
89510@@ -35,7 +35,7 @@ struct snd_ak4xxx_ops {
89511 void (*write)(struct snd_akm4xxx *ak, int chip, unsigned char reg,
89512 unsigned char val);
89513 void (*set_rate_val)(struct snd_akm4xxx *ak, unsigned int rate);
89514-};
89515+} __no_const;
89516
89517 #define AK4XXX_IMAGE_SIZE (AK4XXX_MAX_CHIPS * 16) /* 64 bytes */
89518
89519diff --git a/include/sound/hwdep.h b/include/sound/hwdep.h
89520index 8c05e47..2b5df97 100644
89521--- a/include/sound/hwdep.h
89522+++ b/include/sound/hwdep.h
89523@@ -49,7 +49,7 @@ struct snd_hwdep_ops {
89524 struct snd_hwdep_dsp_status *status);
89525 int (*dsp_load)(struct snd_hwdep *hw,
89526 struct snd_hwdep_dsp_image *image);
89527-};
89528+} __no_const;
89529
89530 struct snd_hwdep {
89531 struct snd_card *card;
89532diff --git a/include/sound/info.h b/include/sound/info.h
89533index 112e894..6fda5b5 100644
89534--- a/include/sound/info.h
89535+++ b/include/sound/info.h
89536@@ -44,7 +44,7 @@ struct snd_info_entry_text {
89537 struct snd_info_buffer *buffer);
89538 void (*write)(struct snd_info_entry *entry,
89539 struct snd_info_buffer *buffer);
89540-};
89541+} __no_const;
89542
89543 struct snd_info_entry_ops {
89544 int (*open)(struct snd_info_entry *entry,
89545diff --git a/include/sound/pcm.h b/include/sound/pcm.h
89546index de6d981..590a550 100644
89547--- a/include/sound/pcm.h
89548+++ b/include/sound/pcm.h
89549@@ -80,6 +80,7 @@ struct snd_pcm_ops {
89550 int (*mmap)(struct snd_pcm_substream *substream, struct vm_area_struct *vma);
89551 int (*ack)(struct snd_pcm_substream *substream);
89552 };
89553+typedef struct snd_pcm_ops __no_const snd_pcm_ops_no_const;
89554
89555 /*
89556 *
89557diff --git a/include/sound/sb16_csp.h b/include/sound/sb16_csp.h
89558index 736eac7..fe8a80f 100644
89559--- a/include/sound/sb16_csp.h
89560+++ b/include/sound/sb16_csp.h
89561@@ -139,7 +139,7 @@ struct snd_sb_csp_ops {
89562 int (*csp_start) (struct snd_sb_csp * p, int sample_width, int channels);
89563 int (*csp_stop) (struct snd_sb_csp * p);
89564 int (*csp_qsound_transfer) (struct snd_sb_csp * p);
89565-};
89566+} __no_const;
89567
89568 /*
89569 * CSP private data
89570diff --git a/include/sound/ymfpci.h b/include/sound/ymfpci.h
89571index 444cd6b..3327cc5 100644
89572--- a/include/sound/ymfpci.h
89573+++ b/include/sound/ymfpci.h
89574@@ -358,7 +358,7 @@ struct snd_ymfpci {
89575 spinlock_t reg_lock;
89576 spinlock_t voice_lock;
89577 wait_queue_head_t interrupt_sleep;
89578- atomic_t interrupt_sleep_count;
89579+ atomic_unchecked_t interrupt_sleep_count;
89580 struct snd_info_entry *proc_entry;
89581 const struct firmware *dsp_microcode;
89582 const struct firmware *controller_microcode;
89583diff --git a/include/trace/events/irq.h b/include/trace/events/irq.h
89584index b89f9db..f097b38 100644
89585--- a/include/trace/events/irq.h
89586+++ b/include/trace/events/irq.h
89587@@ -34,7 +34,7 @@
89588 */
89589 TRACE_EVENT(irq_handler_entry,
89590
89591- TP_PROTO(int irq, struct irqaction *action),
89592+ TP_PROTO(int irq, const struct irqaction *action),
89593
89594 TP_ARGS(irq, action),
89595
89596@@ -64,7 +64,7 @@ TRACE_EVENT(irq_handler_entry,
89597 */
89598 TRACE_EVENT(irq_handler_exit,
89599
89600- TP_PROTO(int irq, struct irqaction *action, int ret),
89601+ TP_PROTO(int irq, const struct irqaction *action, int ret),
89602
89603 TP_ARGS(irq, action, ret),
89604
89605@@ -95,7 +95,7 @@ TRACE_EVENT(irq_handler_exit,
89606 */
89607 TRACE_EVENT(softirq_entry,
89608
89609- TP_PROTO(struct softirq_action *h, struct softirq_action *vec),
89610+ TP_PROTO(const struct softirq_action *h, const struct softirq_action *vec),
89611
89612 TP_ARGS(h, vec),
89613
89614@@ -124,7 +124,7 @@ TRACE_EVENT(softirq_entry,
89615 */
89616 TRACE_EVENT(softirq_exit,
89617
89618- TP_PROTO(struct softirq_action *h, struct softirq_action *vec),
89619+ TP_PROTO(const struct softirq_action *h, const struct softirq_action *vec),
89620
89621 TP_ARGS(h, vec),
89622
89623diff --git a/include/video/uvesafb.h b/include/video/uvesafb.h
89624index 0993a22..32ba2fe 100644
89625--- a/include/video/uvesafb.h
89626+++ b/include/video/uvesafb.h
89627@@ -177,6 +177,7 @@ struct uvesafb_par {
89628 u8 ypan; /* 0 - nothing, 1 - ypan, 2 - ywrap */
89629 u8 pmi_setpal; /* PMI for palette changes */
89630 u16 *pmi_base; /* protected mode interface location */
89631+ u8 *pmi_code; /* protected mode code location */
89632 void *pmi_start;
89633 void *pmi_pal;
89634 u8 *vbe_state_orig; /*
89635diff --git a/init/Kconfig b/init/Kconfig
89636index d72691b..3996e54 100644
89637--- a/init/Kconfig
89638+++ b/init/Kconfig
89639@@ -1004,7 +1004,7 @@ config SLUB_DEBUG
89640
89641 config COMPAT_BRK
89642 bool "Disable heap randomization"
89643- default y
89644+ default n
89645 help
89646 Randomizing heap placement makes heap exploits harder, but it
89647 also breaks ancient binaries (including anything libc5 based).
89648diff --git a/init/do_mounts.c b/init/do_mounts.c
89649index bb008d0..4fa3933 100644
89650--- a/init/do_mounts.c
89651+++ b/init/do_mounts.c
89652@@ -216,11 +216,11 @@ static void __init get_fs_names(char *page)
89653
89654 static int __init do_mount_root(char *name, char *fs, int flags, void *data)
89655 {
89656- int err = sys_mount(name, "/root", fs, flags, data);
89657+ int err = sys_mount((__force char __user *)name, (__force char __user *)"/root", (__force char __user *)fs, flags, (__force void __user *)data);
89658 if (err)
89659 return err;
89660
89661- sys_chdir("/root");
89662+ sys_chdir((__force const char __user *)"/root");
89663 ROOT_DEV = current->fs->pwd.mnt->mnt_sb->s_dev;
89664 printk("VFS: Mounted root (%s filesystem)%s on device %u:%u.\n",
89665 current->fs->pwd.mnt->mnt_sb->s_type->name,
89666@@ -311,18 +311,18 @@ void __init change_floppy(char *fmt, ...)
89667 va_start(args, fmt);
89668 vsprintf(buf, fmt, args);
89669 va_end(args);
89670- fd = sys_open("/dev/root", O_RDWR | O_NDELAY, 0);
89671+ fd = sys_open((char __user *)"/dev/root", O_RDWR | O_NDELAY, 0);
89672 if (fd >= 0) {
89673 sys_ioctl(fd, FDEJECT, 0);
89674 sys_close(fd);
89675 }
89676 printk(KERN_NOTICE "VFS: Insert %s and press ENTER\n", buf);
89677- fd = sys_open("/dev/console", O_RDWR, 0);
89678+ fd = sys_open((char __user *)"/dev/console", O_RDWR, 0);
89679 if (fd >= 0) {
89680 sys_ioctl(fd, TCGETS, (long)&termios);
89681 termios.c_lflag &= ~ICANON;
89682 sys_ioctl(fd, TCSETSF, (long)&termios);
89683- sys_read(fd, &c, 1);
89684+ sys_read(fd, (char __user *)&c, 1);
89685 termios.c_lflag |= ICANON;
89686 sys_ioctl(fd, TCSETSF, (long)&termios);
89687 sys_close(fd);
89688@@ -416,6 +416,6 @@ void __init prepare_namespace(void)
89689 mount_root();
89690 out:
89691 devtmpfs_mount("dev");
89692- sys_mount(".", "/", NULL, MS_MOVE, NULL);
89693- sys_chroot(".");
89694+ sys_mount((__force char __user *)".", (__force char __user *)"/", NULL, MS_MOVE, NULL);
89695+ sys_chroot((__force char __user *)".");
89696 }
89697diff --git a/init/do_mounts.h b/init/do_mounts.h
89698index f5b978a..69dbfe8 100644
89699--- a/init/do_mounts.h
89700+++ b/init/do_mounts.h
89701@@ -15,15 +15,15 @@ extern int root_mountflags;
89702
89703 static inline int create_dev(char *name, dev_t dev)
89704 {
89705- sys_unlink(name);
89706- return sys_mknod(name, S_IFBLK|0600, new_encode_dev(dev));
89707+ sys_unlink((char __force_user *)name);
89708+ return sys_mknod((char __force_user *)name, S_IFBLK|0600, new_encode_dev(dev));
89709 }
89710
89711 #if BITS_PER_LONG == 32
89712 static inline u32 bstat(char *name)
89713 {
89714 struct stat64 stat;
89715- if (sys_stat64(name, &stat) != 0)
89716+ if (sys_stat64((char __force_user *)name, (struct stat64 __force_user *)&stat) != 0)
89717 return 0;
89718 if (!S_ISBLK(stat.st_mode))
89719 return 0;
89720@@ -35,7 +35,7 @@ static inline u32 bstat(char *name)
89721 static inline u32 bstat(char *name)
89722 {
89723 struct stat stat;
89724- if (sys_newstat(name, &stat) != 0)
89725+ if (sys_newstat((const char __force_user *)name, (struct stat __force_user *)&stat) != 0)
89726 return 0;
89727 if (!S_ISBLK(stat.st_mode))
89728 return 0;
89729diff --git a/init/do_mounts_initrd.c b/init/do_mounts_initrd.c
89730index 614241b..4da046b 100644
89731--- a/init/do_mounts_initrd.c
89732+++ b/init/do_mounts_initrd.c
89733@@ -32,7 +32,7 @@ static int __init do_linuxrc(void * shell)
89734 sys_close(old_fd);sys_close(root_fd);
89735 sys_close(0);sys_close(1);sys_close(2);
89736 sys_setsid();
89737- (void) sys_open("/dev/console",O_RDWR,0);
89738+ (void) sys_open((__force const char __user *)"/dev/console",O_RDWR,0);
89739 (void) sys_dup(0);
89740 (void) sys_dup(0);
89741 return kernel_execve(shell, argv, envp_init);
89742@@ -47,13 +47,13 @@ static void __init handle_initrd(void)
89743 create_dev("/dev/root.old", Root_RAM0);
89744 /* mount initrd on rootfs' /root */
89745 mount_block_root("/dev/root.old", root_mountflags & ~MS_RDONLY);
89746- sys_mkdir("/old", 0700);
89747- root_fd = sys_open("/", 0, 0);
89748- old_fd = sys_open("/old", 0, 0);
89749+ sys_mkdir((const char __force_user *)"/old", 0700);
89750+ root_fd = sys_open((const char __force_user *)"/", 0, 0);
89751+ old_fd = sys_open((const char __force_user *)"/old", 0, 0);
89752 /* move initrd over / and chdir/chroot in initrd root */
89753- sys_chdir("/root");
89754- sys_mount(".", "/", NULL, MS_MOVE, NULL);
89755- sys_chroot(".");
89756+ sys_chdir((const char __force_user *)"/root");
89757+ sys_mount((char __force_user *)".", (char __force_user *)"/", NULL, MS_MOVE, NULL);
89758+ sys_chroot((const char __force_user *)".");
89759
89760 /*
89761 * In case that a resume from disk is carried out by linuxrc or one of
89762@@ -70,15 +70,15 @@ static void __init handle_initrd(void)
89763
89764 /* move initrd to rootfs' /old */
89765 sys_fchdir(old_fd);
89766- sys_mount("/", ".", NULL, MS_MOVE, NULL);
89767+ sys_mount((char __force_user *)"/", (char __force_user *)".", NULL, MS_MOVE, NULL);
89768 /* switch root and cwd back to / of rootfs */
89769 sys_fchdir(root_fd);
89770- sys_chroot(".");
89771+ sys_chroot((const char __force_user *)".");
89772 sys_close(old_fd);
89773 sys_close(root_fd);
89774
89775 if (new_decode_dev(real_root_dev) == Root_RAM0) {
89776- sys_chdir("/old");
89777+ sys_chdir((const char __force_user *)"/old");
89778 return;
89779 }
89780
89781@@ -86,17 +86,17 @@ static void __init handle_initrd(void)
89782 mount_root();
89783
89784 printk(KERN_NOTICE "Trying to move old root to /initrd ... ");
89785- error = sys_mount("/old", "/root/initrd", NULL, MS_MOVE, NULL);
89786+ error = sys_mount((char __force_user *)"/old", (char __force_user *)"/root/initrd", NULL, MS_MOVE, NULL);
89787 if (!error)
89788 printk("okay\n");
89789 else {
89790- int fd = sys_open("/dev/root.old", O_RDWR, 0);
89791+ int fd = sys_open((const char __force_user *)"/dev/root.old", O_RDWR, 0);
89792 if (error == -ENOENT)
89793 printk("/initrd does not exist. Ignored.\n");
89794 else
89795 printk("failed\n");
89796 printk(KERN_NOTICE "Unmounting old root\n");
89797- sys_umount("/old", MNT_DETACH);
89798+ sys_umount((char __force_user *)"/old", MNT_DETACH);
89799 printk(KERN_NOTICE "Trying to free ramdisk memory ... ");
89800 if (fd < 0) {
89801 error = fd;
89802@@ -119,11 +119,11 @@ int __init initrd_load(void)
89803 * mounted in the normal path.
89804 */
89805 if (rd_load_image("/initrd.image") && ROOT_DEV != Root_RAM0) {
89806- sys_unlink("/initrd.image");
89807+ sys_unlink((const char __force_user *)"/initrd.image");
89808 handle_initrd();
89809 return 1;
89810 }
89811 }
89812- sys_unlink("/initrd.image");
89813+ sys_unlink((const char __force_user *)"/initrd.image");
89814 return 0;
89815 }
89816diff --git a/init/do_mounts_md.c b/init/do_mounts_md.c
89817index 69aebbf..c0bf6a7 100644
89818--- a/init/do_mounts_md.c
89819+++ b/init/do_mounts_md.c
89820@@ -170,7 +170,7 @@ static void __init md_setup_drive(void)
89821 partitioned ? "_d" : "", minor,
89822 md_setup_args[ent].device_names);
89823
89824- fd = sys_open(name, 0, 0);
89825+ fd = sys_open((char __force_user *)name, 0, 0);
89826 if (fd < 0) {
89827 printk(KERN_ERR "md: open failed - cannot start "
89828 "array %s\n", name);
89829@@ -233,7 +233,7 @@ static void __init md_setup_drive(void)
89830 * array without it
89831 */
89832 sys_close(fd);
89833- fd = sys_open(name, 0, 0);
89834+ fd = sys_open((char __force_user *)name, 0, 0);
89835 sys_ioctl(fd, BLKRRPART, 0);
89836 }
89837 sys_close(fd);
89838@@ -283,7 +283,7 @@ static void __init autodetect_raid(void)
89839
89840 wait_for_device_probe();
89841
89842- fd = sys_open("/dev/md0", 0, 0);
89843+ fd = sys_open((__force char __user *)"/dev/md0", 0, 0);
89844 if (fd >= 0) {
89845 sys_ioctl(fd, RAID_AUTORUN, raid_autopart);
89846 sys_close(fd);
89847diff --git a/init/initramfs.c b/init/initramfs.c
89848index 1fd59b8..a01b079 100644
89849--- a/init/initramfs.c
89850+++ b/init/initramfs.c
89851@@ -74,7 +74,7 @@ static void __init free_hash(void)
89852 }
89853 }
89854
89855-static long __init do_utime(char __user *filename, time_t mtime)
89856+static long __init do_utime(__force char __user *filename, time_t mtime)
89857 {
89858 struct timespec t[2];
89859
89860@@ -109,7 +109,7 @@ static void __init dir_utime(void)
89861 struct dir_entry *de, *tmp;
89862 list_for_each_entry_safe(de, tmp, &dir_list, list) {
89863 list_del(&de->list);
89864- do_utime(de->name, de->mtime);
89865+ do_utime((char __force_user *)de->name, de->mtime);
89866 kfree(de->name);
89867 kfree(de);
89868 }
89869@@ -271,7 +271,7 @@ static int __init maybe_link(void)
89870 if (nlink >= 2) {
89871 char *old = find_link(major, minor, ino, mode, collected);
89872 if (old)
89873- return (sys_link(old, collected) < 0) ? -1 : 1;
89874+ return (sys_link((char __force_user *)old, (char __force_user *)collected) < 0) ? -1 : 1;
89875 }
89876 return 0;
89877 }
89878@@ -280,11 +280,11 @@ static void __init clean_path(char *path, mode_t mode)
89879 {
89880 struct stat st;
89881
89882- if (!sys_newlstat(path, &st) && (st.st_mode^mode) & S_IFMT) {
89883+ if (!sys_newlstat((char __force_user *)path, (struct stat __force_user *)&st) && (st.st_mode^mode) & S_IFMT) {
89884 if (S_ISDIR(st.st_mode))
89885- sys_rmdir(path);
89886+ sys_rmdir((char __force_user *)path);
89887 else
89888- sys_unlink(path);
89889+ sys_unlink((char __force_user *)path);
89890 }
89891 }
89892
89893@@ -305,7 +305,7 @@ static int __init do_name(void)
89894 int openflags = O_WRONLY|O_CREAT;
89895 if (ml != 1)
89896 openflags |= O_TRUNC;
89897- wfd = sys_open(collected, openflags, mode);
89898+ wfd = sys_open((char __force_user *)collected, openflags, mode);
89899
89900 if (wfd >= 0) {
89901 sys_fchown(wfd, uid, gid);
89902@@ -317,17 +317,17 @@ static int __init do_name(void)
89903 }
89904 }
89905 } else if (S_ISDIR(mode)) {
89906- sys_mkdir(collected, mode);
89907- sys_chown(collected, uid, gid);
89908- sys_chmod(collected, mode);
89909+ sys_mkdir((char __force_user *)collected, mode);
89910+ sys_chown((char __force_user *)collected, uid, gid);
89911+ sys_chmod((char __force_user *)collected, mode);
89912 dir_add(collected, mtime);
89913 } else if (S_ISBLK(mode) || S_ISCHR(mode) ||
89914 S_ISFIFO(mode) || S_ISSOCK(mode)) {
89915 if (maybe_link() == 0) {
89916- sys_mknod(collected, mode, rdev);
89917- sys_chown(collected, uid, gid);
89918- sys_chmod(collected, mode);
89919- do_utime(collected, mtime);
89920+ sys_mknod((char __force_user *)collected, mode, rdev);
89921+ sys_chown((char __force_user *)collected, uid, gid);
89922+ sys_chmod((char __force_user *)collected, mode);
89923+ do_utime((char __force_user *)collected, mtime);
89924 }
89925 }
89926 return 0;
89927@@ -336,15 +336,15 @@ static int __init do_name(void)
89928 static int __init do_copy(void)
89929 {
89930 if (count >= body_len) {
89931- sys_write(wfd, victim, body_len);
89932+ sys_write(wfd, (char __force_user *)victim, body_len);
89933 sys_close(wfd);
89934- do_utime(vcollected, mtime);
89935+ do_utime((char __force_user *)vcollected, mtime);
89936 kfree(vcollected);
89937 eat(body_len);
89938 state = SkipIt;
89939 return 0;
89940 } else {
89941- sys_write(wfd, victim, count);
89942+ sys_write(wfd, (char __force_user *)victim, count);
89943 body_len -= count;
89944 eat(count);
89945 return 1;
89946@@ -355,9 +355,9 @@ static int __init do_symlink(void)
89947 {
89948 collected[N_ALIGN(name_len) + body_len] = '\0';
89949 clean_path(collected, 0);
89950- sys_symlink(collected + N_ALIGN(name_len), collected);
89951- sys_lchown(collected, uid, gid);
89952- do_utime(collected, mtime);
89953+ sys_symlink((char __force_user *)collected + N_ALIGN(name_len), (char __force_user *)collected);
89954+ sys_lchown((char __force_user *)collected, uid, gid);
89955+ do_utime((char __force_user *)collected, mtime);
89956 state = SkipIt;
89957 next_state = Reset;
89958 return 0;
89959diff --git a/init/main.c b/init/main.c
89960index 1eb4bd5..fea5bbe 100644
89961--- a/init/main.c
89962+++ b/init/main.c
89963@@ -97,6 +97,7 @@ static inline void mark_rodata_ro(void) { }
89964 #ifdef CONFIG_TC
89965 extern void tc_init(void);
89966 #endif
89967+extern void grsecurity_init(void);
89968
89969 enum system_states system_state __read_mostly;
89970 EXPORT_SYMBOL(system_state);
89971@@ -183,6 +184,49 @@ static int __init set_reset_devices(char *str)
89972
89973 __setup("reset_devices", set_reset_devices);
89974
89975+#if defined(CONFIG_X86_64) && defined(CONFIG_PAX_MEMORY_UDEREF)
89976+extern char pax_enter_kernel_user[];
89977+extern char pax_exit_kernel_user[];
89978+extern pgdval_t clone_pgd_mask;
89979+#endif
89980+
89981+#if defined(CONFIG_X86) && defined(CONFIG_PAX_MEMORY_UDEREF)
89982+static int __init setup_pax_nouderef(char *str)
89983+{
89984+#ifdef CONFIG_X86_32
89985+ unsigned int cpu;
89986+ struct desc_struct *gdt;
89987+
89988+ for (cpu = 0; cpu < nr_cpu_ids; cpu++) {
89989+ gdt = get_cpu_gdt_table(cpu);
89990+ gdt[GDT_ENTRY_KERNEL_DS].type = 3;
89991+ gdt[GDT_ENTRY_KERNEL_DS].limit = 0xf;
89992+ gdt[GDT_ENTRY_DEFAULT_USER_CS].limit = 0xf;
89993+ gdt[GDT_ENTRY_DEFAULT_USER_DS].limit = 0xf;
89994+ }
89995+ asm("mov %0, %%ds; mov %0, %%es; mov %0, %%ss" : : "r" (__KERNEL_DS) : "memory");
89996+#else
89997+ memcpy(pax_enter_kernel_user, (unsigned char []){0xc3}, 1);
89998+ memcpy(pax_exit_kernel_user, (unsigned char []){0xc3}, 1);
89999+ clone_pgd_mask = ~(pgdval_t)0UL;
90000+#endif
90001+
90002+ return 0;
90003+}
90004+early_param("pax_nouderef", setup_pax_nouderef);
90005+#endif
90006+
90007+#ifdef CONFIG_PAX_SOFTMODE
90008+int pax_softmode;
90009+
90010+static int __init setup_pax_softmode(char *str)
90011+{
90012+ get_option(&str, &pax_softmode);
90013+ return 1;
90014+}
90015+__setup("pax_softmode=", setup_pax_softmode);
90016+#endif
90017+
90018 static char * argv_init[MAX_INIT_ARGS+2] = { "init", NULL, };
90019 char * envp_init[MAX_INIT_ENVS+2] = { "HOME=/", "TERM=linux", NULL, };
90020 static const char *panic_later, *panic_param;
90021@@ -705,52 +749,53 @@ int initcall_debug;
90022 core_param(initcall_debug, initcall_debug, bool, 0644);
90023
90024 static char msgbuf[64];
90025-static struct boot_trace_call call;
90026-static struct boot_trace_ret ret;
90027+static struct boot_trace_call trace_call;
90028+static struct boot_trace_ret trace_ret;
90029
90030 int do_one_initcall(initcall_t fn)
90031 {
90032 int count = preempt_count();
90033 ktime_t calltime, delta, rettime;
90034+ const char *msg1 = "", *msg2 = "";
90035
90036 if (initcall_debug) {
90037- call.caller = task_pid_nr(current);
90038- printk("calling %pF @ %i\n", fn, call.caller);
90039+ trace_call.caller = task_pid_nr(current);
90040+ printk("calling %pF @ %i\n", fn, trace_call.caller);
90041 calltime = ktime_get();
90042- trace_boot_call(&call, fn);
90043+ trace_boot_call(&trace_call, fn);
90044 enable_boot_trace();
90045 }
90046
90047- ret.result = fn();
90048+ trace_ret.result = fn();
90049
90050 if (initcall_debug) {
90051 disable_boot_trace();
90052 rettime = ktime_get();
90053 delta = ktime_sub(rettime, calltime);
90054- ret.duration = (unsigned long long) ktime_to_ns(delta) >> 10;
90055- trace_boot_ret(&ret, fn);
90056+ trace_ret.duration = (unsigned long long) ktime_to_ns(delta) >> 10;
90057+ trace_boot_ret(&trace_ret, fn);
90058 printk("initcall %pF returned %d after %Ld usecs\n", fn,
90059- ret.result, ret.duration);
90060+ trace_ret.result, trace_ret.duration);
90061 }
90062
90063 msgbuf[0] = 0;
90064
90065- if (ret.result && ret.result != -ENODEV && initcall_debug)
90066- sprintf(msgbuf, "error code %d ", ret.result);
90067+ if (trace_ret.result && trace_ret.result != -ENODEV && initcall_debug)
90068+ sprintf(msgbuf, "error code %d ", trace_ret.result);
90069
90070 if (preempt_count() != count) {
90071- strlcat(msgbuf, "preemption imbalance ", sizeof(msgbuf));
90072+ msg1 = " preemption imbalance";
90073 preempt_count() = count;
90074 }
90075 if (irqs_disabled()) {
90076- strlcat(msgbuf, "disabled interrupts ", sizeof(msgbuf));
90077+ msg2 = " disabled interrupts";
90078 local_irq_enable();
90079 }
90080- if (msgbuf[0]) {
90081- printk("initcall %pF returned with %s\n", fn, msgbuf);
90082+ if (msgbuf[0] || *msg1 || *msg2) {
90083+ printk("initcall %pF returned with %s%s%s\n", fn, msgbuf, msg1, msg2);
90084 }
90085
90086- return ret.result;
90087+ return trace_ret.result;
90088 }
90089
90090
90091@@ -893,11 +938,13 @@ static int __init kernel_init(void * unused)
90092 if (!ramdisk_execute_command)
90093 ramdisk_execute_command = "/init";
90094
90095- if (sys_access((const char __user *) ramdisk_execute_command, 0) != 0) {
90096+ if (sys_access((const char __force_user *) ramdisk_execute_command, 0) != 0) {
90097 ramdisk_execute_command = NULL;
90098 prepare_namespace();
90099 }
90100
90101+ grsecurity_init();
90102+
90103 /*
90104 * Ok, we have completed the initial bootup, and
90105 * we're essentially up and running. Get rid of the
90106diff --git a/init/noinitramfs.c b/init/noinitramfs.c
90107index f4c1a3a..96c19bd 100644
90108--- a/init/noinitramfs.c
90109+++ b/init/noinitramfs.c
90110@@ -29,7 +29,7 @@ static int __init default_rootfs(void)
90111 {
90112 int err;
90113
90114- err = sys_mkdir("/dev", 0755);
90115+ err = sys_mkdir((const char __user *)"/dev", 0755);
90116 if (err < 0)
90117 goto out;
90118
90119@@ -39,7 +39,7 @@ static int __init default_rootfs(void)
90120 if (err < 0)
90121 goto out;
90122
90123- err = sys_mkdir("/root", 0700);
90124+ err = sys_mkdir((const char __user *)"/root", 0700);
90125 if (err < 0)
90126 goto out;
90127
90128diff --git a/ipc/mqueue.c b/ipc/mqueue.c
90129index d01bc14..8df81db 100644
90130--- a/ipc/mqueue.c
90131+++ b/ipc/mqueue.c
90132@@ -150,6 +150,7 @@ static struct inode *mqueue_get_inode(struct super_block *sb,
90133 mq_bytes = (mq_msg_tblsz +
90134 (info->attr.mq_maxmsg * info->attr.mq_msgsize));
90135
90136+ gr_learn_resource(current, RLIMIT_MSGQUEUE, u->mq_bytes + mq_bytes, 1);
90137 spin_lock(&mq_lock);
90138 if (u->mq_bytes + mq_bytes < u->mq_bytes ||
90139 u->mq_bytes + mq_bytes >
90140diff --git a/ipc/msg.c b/ipc/msg.c
90141index 779f762..4af9e36 100644
90142--- a/ipc/msg.c
90143+++ b/ipc/msg.c
90144@@ -310,18 +310,19 @@ static inline int msg_security(struct kern_ipc_perm *ipcp, int msgflg)
90145 return security_msg_queue_associate(msq, msgflg);
90146 }
90147
90148+static struct ipc_ops msg_ops = {
90149+ .getnew = newque,
90150+ .associate = msg_security,
90151+ .more_checks = NULL
90152+};
90153+
90154 SYSCALL_DEFINE2(msgget, key_t, key, int, msgflg)
90155 {
90156 struct ipc_namespace *ns;
90157- struct ipc_ops msg_ops;
90158 struct ipc_params msg_params;
90159
90160 ns = current->nsproxy->ipc_ns;
90161
90162- msg_ops.getnew = newque;
90163- msg_ops.associate = msg_security;
90164- msg_ops.more_checks = NULL;
90165-
90166 msg_params.key = key;
90167 msg_params.flg = msgflg;
90168
90169diff --git a/ipc/sem.c b/ipc/sem.c
90170index b781007..f738b04 100644
90171--- a/ipc/sem.c
90172+++ b/ipc/sem.c
90173@@ -309,10 +309,15 @@ static inline int sem_more_checks(struct kern_ipc_perm *ipcp,
90174 return 0;
90175 }
90176
90177+static struct ipc_ops sem_ops = {
90178+ .getnew = newary,
90179+ .associate = sem_security,
90180+ .more_checks = sem_more_checks
90181+};
90182+
90183 SYSCALL_DEFINE3(semget, key_t, key, int, nsems, int, semflg)
90184 {
90185 struct ipc_namespace *ns;
90186- struct ipc_ops sem_ops;
90187 struct ipc_params sem_params;
90188
90189 ns = current->nsproxy->ipc_ns;
90190@@ -320,10 +325,6 @@ SYSCALL_DEFINE3(semget, key_t, key, int, nsems, int, semflg)
90191 if (nsems < 0 || nsems > ns->sc_semmsl)
90192 return -EINVAL;
90193
90194- sem_ops.getnew = newary;
90195- sem_ops.associate = sem_security;
90196- sem_ops.more_checks = sem_more_checks;
90197-
90198 sem_params.key = key;
90199 sem_params.flg = semflg;
90200 sem_params.u.nsems = nsems;
90201@@ -671,6 +672,8 @@ static int semctl_main(struct ipc_namespace *ns, int semid, int semnum,
90202 ushort* sem_io = fast_sem_io;
90203 int nsems;
90204
90205+ pax_track_stack();
90206+
90207 sma = sem_lock_check(ns, semid);
90208 if (IS_ERR(sma))
90209 return PTR_ERR(sma);
90210@@ -1071,6 +1074,8 @@ SYSCALL_DEFINE4(semtimedop, int, semid, struct sembuf __user *, tsops,
90211 unsigned long jiffies_left = 0;
90212 struct ipc_namespace *ns;
90213
90214+ pax_track_stack();
90215+
90216 ns = current->nsproxy->ipc_ns;
90217
90218 if (nsops < 1 || semid < 0)
90219diff --git a/ipc/shm.c b/ipc/shm.c
90220index d30732c..e4992cd 100644
90221--- a/ipc/shm.c
90222+++ b/ipc/shm.c
90223@@ -70,6 +70,14 @@ static void shm_destroy (struct ipc_namespace *ns, struct shmid_kernel *shp);
90224 static int sysvipc_shm_proc_show(struct seq_file *s, void *it);
90225 #endif
90226
90227+#ifdef CONFIG_GRKERNSEC
90228+extern int gr_handle_shmat(const pid_t shm_cprid, const pid_t shm_lapid,
90229+ const time_t shm_createtime, const uid_t cuid,
90230+ const int shmid);
90231+extern int gr_chroot_shmat(const pid_t shm_cprid, const pid_t shm_lapid,
90232+ const time_t shm_createtime);
90233+#endif
90234+
90235 void shm_init_ns(struct ipc_namespace *ns)
90236 {
90237 ns->shm_ctlmax = SHMMAX;
90238@@ -396,6 +404,14 @@ static int newseg(struct ipc_namespace *ns, struct ipc_params *params)
90239 shp->shm_lprid = 0;
90240 shp->shm_atim = shp->shm_dtim = 0;
90241 shp->shm_ctim = get_seconds();
90242+#ifdef CONFIG_GRKERNSEC
90243+ {
90244+ struct timespec timeval;
90245+ do_posix_clock_monotonic_gettime(&timeval);
90246+
90247+ shp->shm_createtime = timeval.tv_sec;
90248+ }
90249+#endif
90250 shp->shm_segsz = size;
90251 shp->shm_nattch = 0;
90252 shp->shm_file = file;
90253@@ -446,18 +462,19 @@ static inline int shm_more_checks(struct kern_ipc_perm *ipcp,
90254 return 0;
90255 }
90256
90257+static struct ipc_ops shm_ops = {
90258+ .getnew = newseg,
90259+ .associate = shm_security,
90260+ .more_checks = shm_more_checks
90261+};
90262+
90263 SYSCALL_DEFINE3(shmget, key_t, key, size_t, size, int, shmflg)
90264 {
90265 struct ipc_namespace *ns;
90266- struct ipc_ops shm_ops;
90267 struct ipc_params shm_params;
90268
90269 ns = current->nsproxy->ipc_ns;
90270
90271- shm_ops.getnew = newseg;
90272- shm_ops.associate = shm_security;
90273- shm_ops.more_checks = shm_more_checks;
90274-
90275 shm_params.key = key;
90276 shm_params.flg = shmflg;
90277 shm_params.u.size = size;
90278@@ -857,6 +874,12 @@ long do_shmat(int shmid, char __user *shmaddr, int shmflg, ulong *raddr)
90279 f_mode = FMODE_READ | FMODE_WRITE;
90280 }
90281 if (shmflg & SHM_EXEC) {
90282+
90283+#ifdef CONFIG_PAX_MPROTECT
90284+ if (current->mm->pax_flags & MF_PAX_MPROTECT)
90285+ goto out;
90286+#endif
90287+
90288 prot |= PROT_EXEC;
90289 acc_mode |= S_IXUGO;
90290 }
90291@@ -880,9 +903,21 @@ long do_shmat(int shmid, char __user *shmaddr, int shmflg, ulong *raddr)
90292 if (err)
90293 goto out_unlock;
90294
90295+#ifdef CONFIG_GRKERNSEC
90296+ if (!gr_handle_shmat(shp->shm_cprid, shp->shm_lapid, shp->shm_createtime,
90297+ shp->shm_perm.cuid, shmid) ||
90298+ !gr_chroot_shmat(shp->shm_cprid, shp->shm_lapid, shp->shm_createtime)) {
90299+ err = -EACCES;
90300+ goto out_unlock;
90301+ }
90302+#endif
90303+
90304 path.dentry = dget(shp->shm_file->f_path.dentry);
90305 path.mnt = shp->shm_file->f_path.mnt;
90306 shp->shm_nattch++;
90307+#ifdef CONFIG_GRKERNSEC
90308+ shp->shm_lapid = current->pid;
90309+#endif
90310 size = i_size_read(path.dentry->d_inode);
90311 shm_unlock(shp);
90312
90313diff --git a/kernel/acct.c b/kernel/acct.c
90314index a6605ca..ca91111 100644
90315--- a/kernel/acct.c
90316+++ b/kernel/acct.c
90317@@ -579,7 +579,7 @@ static void do_acct_process(struct bsd_acct_struct *acct,
90318 */
90319 flim = current->signal->rlim[RLIMIT_FSIZE].rlim_cur;
90320 current->signal->rlim[RLIMIT_FSIZE].rlim_cur = RLIM_INFINITY;
90321- file->f_op->write(file, (char *)&ac,
90322+ file->f_op->write(file, (char __force_user *)&ac,
90323 sizeof(acct_t), &file->f_pos);
90324 current->signal->rlim[RLIMIT_FSIZE].rlim_cur = flim;
90325 set_fs(fs);
90326diff --git a/kernel/audit.c b/kernel/audit.c
90327index 5feed23..48415fd 100644
90328--- a/kernel/audit.c
90329+++ b/kernel/audit.c
90330@@ -110,7 +110,7 @@ u32 audit_sig_sid = 0;
90331 3) suppressed due to audit_rate_limit
90332 4) suppressed due to audit_backlog_limit
90333 */
90334-static atomic_t audit_lost = ATOMIC_INIT(0);
90335+static atomic_unchecked_t audit_lost = ATOMIC_INIT(0);
90336
90337 /* The netlink socket. */
90338 static struct sock *audit_sock;
90339@@ -232,7 +232,7 @@ void audit_log_lost(const char *message)
90340 unsigned long now;
90341 int print;
90342
90343- atomic_inc(&audit_lost);
90344+ atomic_inc_unchecked(&audit_lost);
90345
90346 print = (audit_failure == AUDIT_FAIL_PANIC || !audit_rate_limit);
90347
90348@@ -251,7 +251,7 @@ void audit_log_lost(const char *message)
90349 printk(KERN_WARNING
90350 "audit: audit_lost=%d audit_rate_limit=%d "
90351 "audit_backlog_limit=%d\n",
90352- atomic_read(&audit_lost),
90353+ atomic_read_unchecked(&audit_lost),
90354 audit_rate_limit,
90355 audit_backlog_limit);
90356 audit_panic(message);
90357@@ -691,7 +691,7 @@ static int audit_receive_msg(struct sk_buff *skb, struct nlmsghdr *nlh)
90358 status_set.pid = audit_pid;
90359 status_set.rate_limit = audit_rate_limit;
90360 status_set.backlog_limit = audit_backlog_limit;
90361- status_set.lost = atomic_read(&audit_lost);
90362+ status_set.lost = atomic_read_unchecked(&audit_lost);
90363 status_set.backlog = skb_queue_len(&audit_skb_queue);
90364 audit_send_reply(NETLINK_CB(skb).pid, seq, AUDIT_GET, 0, 0,
90365 &status_set, sizeof(status_set));
90366@@ -891,8 +891,10 @@ static int audit_receive_msg(struct sk_buff *skb, struct nlmsghdr *nlh)
90367 spin_unlock_irq(&tsk->sighand->siglock);
90368 }
90369 read_unlock(&tasklist_lock);
90370- audit_send_reply(NETLINK_CB(skb).pid, seq, AUDIT_TTY_GET, 0, 0,
90371- &s, sizeof(s));
90372+
90373+ if (!err)
90374+ audit_send_reply(NETLINK_CB(skb).pid, seq,
90375+ AUDIT_TTY_GET, 0, 0, &s, sizeof(s));
90376 break;
90377 }
90378 case AUDIT_TTY_SET: {
90379@@ -1262,12 +1264,13 @@ static void audit_log_vformat(struct audit_buffer *ab, const char *fmt,
90380 avail = audit_expand(ab,
90381 max_t(unsigned, AUDIT_BUFSIZ, 1+len-avail));
90382 if (!avail)
90383- goto out;
90384+ goto out_va_end;
90385 len = vsnprintf(skb_tail_pointer(skb), avail, fmt, args2);
90386 }
90387- va_end(args2);
90388 if (len > 0)
90389 skb_put(skb, len);
90390+out_va_end:
90391+ va_end(args2);
90392 out:
90393 return;
90394 }
90395diff --git a/kernel/auditsc.c b/kernel/auditsc.c
90396index 267e484..ac41bc3 100644
90397--- a/kernel/auditsc.c
90398+++ b/kernel/auditsc.c
90399@@ -1157,8 +1157,8 @@ static void audit_log_execve_info(struct audit_context *context,
90400 struct audit_buffer **ab,
90401 struct audit_aux_data_execve *axi)
90402 {
90403- int i;
90404- size_t len, len_sent = 0;
90405+ int i, len;
90406+ size_t len_sent = 0;
90407 const char __user *p;
90408 char *buf;
90409
90410@@ -2113,7 +2113,7 @@ int auditsc_get_stamp(struct audit_context *ctx,
90411 }
90412
90413 /* global counter which is incremented every time something logs in */
90414-static atomic_t session_id = ATOMIC_INIT(0);
90415+static atomic_unchecked_t session_id = ATOMIC_INIT(0);
90416
90417 /**
90418 * audit_set_loginuid - set a task's audit_context loginuid
90419@@ -2126,7 +2126,7 @@ static atomic_t session_id = ATOMIC_INIT(0);
90420 */
90421 int audit_set_loginuid(struct task_struct *task, uid_t loginuid)
90422 {
90423- unsigned int sessionid = atomic_inc_return(&session_id);
90424+ unsigned int sessionid = atomic_inc_return_unchecked(&session_id);
90425 struct audit_context *context = task->audit_context;
90426
90427 if (context && context->in_syscall) {
90428diff --git a/kernel/capability.c b/kernel/capability.c
90429index 8a944f5..db5001e 100644
90430--- a/kernel/capability.c
90431+++ b/kernel/capability.c
90432@@ -305,10 +305,26 @@ int capable(int cap)
90433 BUG();
90434 }
90435
90436- if (security_capable(cap) == 0) {
90437+ if (security_capable(cap) == 0 && gr_is_capable(cap)) {
90438 current->flags |= PF_SUPERPRIV;
90439 return 1;
90440 }
90441 return 0;
90442 }
90443+
90444+int capable_nolog(int cap)
90445+{
90446+ if (unlikely(!cap_valid(cap))) {
90447+ printk(KERN_CRIT "capable() called with invalid cap=%u\n", cap);
90448+ BUG();
90449+ }
90450+
90451+ if (security_capable(cap) == 0 && gr_is_capable_nolog(cap)) {
90452+ current->flags |= PF_SUPERPRIV;
90453+ return 1;
90454+ }
90455+ return 0;
90456+}
90457+
90458 EXPORT_SYMBOL(capable);
90459+EXPORT_SYMBOL(capable_nolog);
90460diff --git a/kernel/cgroup.c b/kernel/cgroup.c
90461index 1fbcc74..7000012 100644
90462--- a/kernel/cgroup.c
90463+++ b/kernel/cgroup.c
90464@@ -536,6 +536,8 @@ static struct css_set *find_css_set(
90465 struct hlist_head *hhead;
90466 struct cg_cgroup_link *link;
90467
90468+ pax_track_stack();
90469+
90470 /* First see if we already have a cgroup group that matches
90471 * the desired set */
90472 read_lock(&css_set_lock);
90473diff --git a/kernel/compat.c b/kernel/compat.c
90474index 8bc5578..186e44a 100644
90475--- a/kernel/compat.c
90476+++ b/kernel/compat.c
90477@@ -108,7 +108,7 @@ static long compat_nanosleep_restart(struct restart_block *restart)
90478 mm_segment_t oldfs;
90479 long ret;
90480
90481- restart->nanosleep.rmtp = (struct timespec __user *) &rmt;
90482+ restart->nanosleep.rmtp = (struct timespec __force_user *) &rmt;
90483 oldfs = get_fs();
90484 set_fs(KERNEL_DS);
90485 ret = hrtimer_nanosleep_restart(restart);
90486@@ -140,7 +140,7 @@ asmlinkage long compat_sys_nanosleep(struct compat_timespec __user *rqtp,
90487 oldfs = get_fs();
90488 set_fs(KERNEL_DS);
90489 ret = hrtimer_nanosleep(&tu,
90490- rmtp ? (struct timespec __user *)&rmt : NULL,
90491+ rmtp ? (struct timespec __force_user *)&rmt : NULL,
90492 HRTIMER_MODE_REL, CLOCK_MONOTONIC);
90493 set_fs(oldfs);
90494
90495@@ -247,7 +247,7 @@ asmlinkage long compat_sys_sigpending(compat_old_sigset_t __user *set)
90496 mm_segment_t old_fs = get_fs();
90497
90498 set_fs(KERNEL_DS);
90499- ret = sys_sigpending((old_sigset_t __user *) &s);
90500+ ret = sys_sigpending((old_sigset_t __force_user *) &s);
90501 set_fs(old_fs);
90502 if (ret == 0)
90503 ret = put_user(s, set);
90504@@ -266,8 +266,8 @@ asmlinkage long compat_sys_sigprocmask(int how, compat_old_sigset_t __user *set,
90505 old_fs = get_fs();
90506 set_fs(KERNEL_DS);
90507 ret = sys_sigprocmask(how,
90508- set ? (old_sigset_t __user *) &s : NULL,
90509- oset ? (old_sigset_t __user *) &s : NULL);
90510+ set ? (old_sigset_t __force_user *) &s : NULL,
90511+ oset ? (old_sigset_t __force_user *) &s : NULL);
90512 set_fs(old_fs);
90513 if (ret == 0)
90514 if (oset)
90515@@ -310,7 +310,7 @@ asmlinkage long compat_sys_old_getrlimit(unsigned int resource,
90516 mm_segment_t old_fs = get_fs();
90517
90518 set_fs(KERNEL_DS);
90519- ret = sys_old_getrlimit(resource, &r);
90520+ ret = sys_old_getrlimit(resource, (struct rlimit __force_user *)&r);
90521 set_fs(old_fs);
90522
90523 if (!ret) {
90524@@ -385,7 +385,7 @@ asmlinkage long compat_sys_getrusage(int who, struct compat_rusage __user *ru)
90525 mm_segment_t old_fs = get_fs();
90526
90527 set_fs(KERNEL_DS);
90528- ret = sys_getrusage(who, (struct rusage __user *) &r);
90529+ ret = sys_getrusage(who, (struct rusage __force_user *) &r);
90530 set_fs(old_fs);
90531
90532 if (ret)
90533@@ -412,8 +412,8 @@ compat_sys_wait4(compat_pid_t pid, compat_uint_t __user *stat_addr, int options,
90534 set_fs (KERNEL_DS);
90535 ret = sys_wait4(pid,
90536 (stat_addr ?
90537- (unsigned int __user *) &status : NULL),
90538- options, (struct rusage __user *) &r);
90539+ (unsigned int __force_user *) &status : NULL),
90540+ options, (struct rusage __force_user *) &r);
90541 set_fs (old_fs);
90542
90543 if (ret > 0) {
90544@@ -438,8 +438,8 @@ asmlinkage long compat_sys_waitid(int which, compat_pid_t pid,
90545 memset(&info, 0, sizeof(info));
90546
90547 set_fs(KERNEL_DS);
90548- ret = sys_waitid(which, pid, (siginfo_t __user *)&info, options,
90549- uru ? (struct rusage __user *)&ru : NULL);
90550+ ret = sys_waitid(which, pid, (siginfo_t __force_user *)&info, options,
90551+ uru ? (struct rusage __force_user *)&ru : NULL);
90552 set_fs(old_fs);
90553
90554 if ((ret < 0) || (info.si_signo == 0))
90555@@ -569,8 +569,8 @@ long compat_sys_timer_settime(timer_t timer_id, int flags,
90556 oldfs = get_fs();
90557 set_fs(KERNEL_DS);
90558 err = sys_timer_settime(timer_id, flags,
90559- (struct itimerspec __user *) &newts,
90560- (struct itimerspec __user *) &oldts);
90561+ (struct itimerspec __force_user *) &newts,
90562+ (struct itimerspec __force_user *) &oldts);
90563 set_fs(oldfs);
90564 if (!err && old && put_compat_itimerspec(old, &oldts))
90565 return -EFAULT;
90566@@ -587,7 +587,7 @@ long compat_sys_timer_gettime(timer_t timer_id,
90567 oldfs = get_fs();
90568 set_fs(KERNEL_DS);
90569 err = sys_timer_gettime(timer_id,
90570- (struct itimerspec __user *) &ts);
90571+ (struct itimerspec __force_user *) &ts);
90572 set_fs(oldfs);
90573 if (!err && put_compat_itimerspec(setting, &ts))
90574 return -EFAULT;
90575@@ -606,7 +606,7 @@ long compat_sys_clock_settime(clockid_t which_clock,
90576 oldfs = get_fs();
90577 set_fs(KERNEL_DS);
90578 err = sys_clock_settime(which_clock,
90579- (struct timespec __user *) &ts);
90580+ (struct timespec __force_user *) &ts);
90581 set_fs(oldfs);
90582 return err;
90583 }
90584@@ -621,7 +621,7 @@ long compat_sys_clock_gettime(clockid_t which_clock,
90585 oldfs = get_fs();
90586 set_fs(KERNEL_DS);
90587 err = sys_clock_gettime(which_clock,
90588- (struct timespec __user *) &ts);
90589+ (struct timespec __force_user *) &ts);
90590 set_fs(oldfs);
90591 if (!err && put_compat_timespec(&ts, tp))
90592 return -EFAULT;
90593@@ -638,7 +638,7 @@ long compat_sys_clock_getres(clockid_t which_clock,
90594 oldfs = get_fs();
90595 set_fs(KERNEL_DS);
90596 err = sys_clock_getres(which_clock,
90597- (struct timespec __user *) &ts);
90598+ (struct timespec __force_user *) &ts);
90599 set_fs(oldfs);
90600 if (!err && tp && put_compat_timespec(&ts, tp))
90601 return -EFAULT;
90602@@ -650,9 +650,9 @@ static long compat_clock_nanosleep_restart(struct restart_block *restart)
90603 long err;
90604 mm_segment_t oldfs;
90605 struct timespec tu;
90606- struct compat_timespec *rmtp = restart->nanosleep.compat_rmtp;
90607+ struct compat_timespec __user *rmtp = restart->nanosleep.compat_rmtp;
90608
90609- restart->nanosleep.rmtp = (struct timespec __user *) &tu;
90610+ restart->nanosleep.rmtp = (struct timespec __force_user *) &tu;
90611 oldfs = get_fs();
90612 set_fs(KERNEL_DS);
90613 err = clock_nanosleep_restart(restart);
90614@@ -684,8 +684,8 @@ long compat_sys_clock_nanosleep(clockid_t which_clock, int flags,
90615 oldfs = get_fs();
90616 set_fs(KERNEL_DS);
90617 err = sys_clock_nanosleep(which_clock, flags,
90618- (struct timespec __user *) &in,
90619- (struct timespec __user *) &out);
90620+ (struct timespec __force_user *) &in,
90621+ (struct timespec __force_user *) &out);
90622 set_fs(oldfs);
90623
90624 if ((err == -ERESTART_RESTARTBLOCK) && rmtp &&
90625diff --git a/kernel/configs.c b/kernel/configs.c
90626index abaee68..047facd 100644
90627--- a/kernel/configs.c
90628+++ b/kernel/configs.c
90629@@ -73,8 +73,19 @@ static int __init ikconfig_init(void)
90630 struct proc_dir_entry *entry;
90631
90632 /* create the current config file */
90633+#if defined(CONFIG_GRKERNSEC_PROC_ADD) || defined(CONFIG_GRKERNSEC_HIDESYM)
90634+#if defined(CONFIG_GRKERNSEC_PROC_USER) || defined(CONFIG_GRKERNSEC_HIDESYM)
90635+ entry = proc_create("config.gz", S_IFREG | S_IRUSR, NULL,
90636+ &ikconfig_file_ops);
90637+#elif defined(CONFIG_GRKERNSEC_PROC_USERGROUP)
90638+ entry = proc_create("config.gz", S_IFREG | S_IRUSR | S_IRGRP, NULL,
90639+ &ikconfig_file_ops);
90640+#endif
90641+#else
90642 entry = proc_create("config.gz", S_IFREG | S_IRUGO, NULL,
90643 &ikconfig_file_ops);
90644+#endif
90645+
90646 if (!entry)
90647 return -ENOMEM;
90648
90649diff --git a/kernel/cpu.c b/kernel/cpu.c
90650index 3f2f04f..4e53ded 100644
90651--- a/kernel/cpu.c
90652+++ b/kernel/cpu.c
90653@@ -20,7 +20,7 @@
90654 /* Serializes the updates to cpu_online_mask, cpu_present_mask */
90655 static DEFINE_MUTEX(cpu_add_remove_lock);
90656
90657-static __cpuinitdata RAW_NOTIFIER_HEAD(cpu_chain);
90658+static RAW_NOTIFIER_HEAD(cpu_chain);
90659
90660 /* If set, cpu_up and cpu_down will return -EBUSY and do nothing.
90661 * Should always be manipulated under cpu_add_remove_lock
90662diff --git a/kernel/cred.c b/kernel/cred.c
90663index 0b5b5fc..f7fe51a 100644
90664--- a/kernel/cred.c
90665+++ b/kernel/cred.c
90666@@ -160,6 +160,8 @@ static void put_cred_rcu(struct rcu_head *rcu)
90667 */
90668 void __put_cred(struct cred *cred)
90669 {
90670+ pax_track_stack();
90671+
90672 kdebug("__put_cred(%p{%d,%d})", cred,
90673 atomic_read(&cred->usage),
90674 read_cred_subscribers(cred));
90675@@ -184,6 +186,8 @@ void exit_creds(struct task_struct *tsk)
90676 {
90677 struct cred *cred;
90678
90679+ pax_track_stack();
90680+
90681 kdebug("exit_creds(%u,%p,%p,{%d,%d})", tsk->pid, tsk->real_cred, tsk->cred,
90682 atomic_read(&tsk->cred->usage),
90683 read_cred_subscribers(tsk->cred));
90684@@ -206,6 +210,15 @@ void exit_creds(struct task_struct *tsk)
90685 validate_creds(cred);
90686 put_cred(cred);
90687 }
90688+
90689+#ifdef CONFIG_GRKERNSEC_SETXID
90690+ cred = (struct cred *) tsk->delayed_cred;
90691+ if (cred) {
90692+ tsk->delayed_cred = NULL;
90693+ validate_creds(cred);
90694+ put_cred(cred);
90695+ }
90696+#endif
90697 }
90698
90699 /**
90700@@ -222,6 +235,8 @@ const struct cred *get_task_cred(struct task_struct *task)
90701 {
90702 const struct cred *cred;
90703
90704+ pax_track_stack();
90705+
90706 rcu_read_lock();
90707
90708 do {
90709@@ -241,6 +256,8 @@ struct cred *cred_alloc_blank(void)
90710 {
90711 struct cred *new;
90712
90713+ pax_track_stack();
90714+
90715 new = kmem_cache_zalloc(cred_jar, GFP_KERNEL);
90716 if (!new)
90717 return NULL;
90718@@ -289,6 +306,8 @@ struct cred *prepare_creds(void)
90719 const struct cred *old;
90720 struct cred *new;
90721
90722+ pax_track_stack();
90723+
90724 validate_process_creds();
90725
90726 new = kmem_cache_alloc(cred_jar, GFP_KERNEL);
90727@@ -335,6 +354,8 @@ struct cred *prepare_exec_creds(void)
90728 struct thread_group_cred *tgcred = NULL;
90729 struct cred *new;
90730
90731+ pax_track_stack();
90732+
90733 #ifdef CONFIG_KEYS
90734 tgcred = kmalloc(sizeof(*tgcred), GFP_KERNEL);
90735 if (!tgcred)
90736@@ -441,6 +462,8 @@ int copy_creds(struct task_struct *p, unsigned long clone_flags)
90737 struct cred *new;
90738 int ret;
90739
90740+ pax_track_stack();
90741+
90742 mutex_init(&p->cred_guard_mutex);
90743
90744 if (
90745@@ -523,11 +546,13 @@ error_put:
90746 * Always returns 0 thus allowing this function to be tail-called at the end
90747 * of, say, sys_setgid().
90748 */
90749-int commit_creds(struct cred *new)
90750+static int __commit_creds(struct cred *new)
90751 {
90752 struct task_struct *task = current;
90753 const struct cred *old = task->real_cred;
90754
90755+ pax_track_stack();
90756+
90757 kdebug("commit_creds(%p{%d,%d})", new,
90758 atomic_read(&new->usage),
90759 read_cred_subscribers(new));
90760@@ -544,6 +569,8 @@ int commit_creds(struct cred *new)
90761
90762 get_cred(new); /* we will require a ref for the subj creds too */
90763
90764+ gr_set_role_label(task, new->uid, new->gid);
90765+
90766 /* dumpability changes */
90767 if (old->euid != new->euid ||
90768 old->egid != new->egid ||
90769@@ -563,10 +590,8 @@ int commit_creds(struct cred *new)
90770 key_fsgid_changed(task);
90771
90772 /* do it
90773- * - What if a process setreuid()'s and this brings the
90774- * new uid over his NPROC rlimit? We can check this now
90775- * cheaply with the new uid cache, so if it matters
90776- * we should be checking for it. -DaveM
90777+ * RLIMIT_NPROC limits on user->processes have already been checked
90778+ * in set_user().
90779 */
90780 alter_cred_subscribers(new, 2);
90781 if (new->user != old->user)
90782@@ -595,8 +620,96 @@ int commit_creds(struct cred *new)
90783 put_cred(old);
90784 return 0;
90785 }
90786+
90787+#ifdef CONFIG_GRKERNSEC_SETXID
90788+extern int set_user(struct cred *new);
90789+
90790+void gr_delayed_cred_worker(void)
90791+{
90792+ const struct cred *new = current->delayed_cred;
90793+ struct cred *ncred;
90794+
90795+ current->delayed_cred = NULL;
90796+
90797+ if (current_uid() && new != NULL) {
90798+ // from doing get_cred on it when queueing this
90799+ put_cred(new);
90800+ return;
90801+ } else if (new == NULL)
90802+ return;
90803+
90804+ ncred = prepare_creds();
90805+ if (!ncred)
90806+ goto die;
90807+ // uids
90808+ ncred->uid = new->uid;
90809+ ncred->euid = new->euid;
90810+ ncred->suid = new->suid;
90811+ ncred->fsuid = new->fsuid;
90812+ // gids
90813+ ncred->gid = new->gid;
90814+ ncred->egid = new->egid;
90815+ ncred->sgid = new->sgid;
90816+ ncred->fsgid = new->fsgid;
90817+ // groups
90818+ if (set_groups(ncred, new->group_info) < 0) {
90819+ abort_creds(ncred);
90820+ goto die;
90821+ }
90822+ // caps
90823+ ncred->securebits = new->securebits;
90824+ ncred->cap_inheritable = new->cap_inheritable;
90825+ ncred->cap_permitted = new->cap_permitted;
90826+ ncred->cap_effective = new->cap_effective;
90827+ ncred->cap_bset = new->cap_bset;
90828+
90829+ if (set_user(ncred)) {
90830+ abort_creds(ncred);
90831+ goto die;
90832+ }
90833+
90834+ // from doing get_cred on it when queueing this
90835+ put_cred(new);
90836+
90837+ __commit_creds(ncred);
90838+ return;
90839+die:
90840+ // from doing get_cred on it when queueing this
90841+ put_cred(new);
90842+ do_group_exit(SIGKILL);
90843+}
90844+#endif
90845+
90846+int commit_creds(struct cred *new)
90847+{
90848+#ifdef CONFIG_GRKERNSEC_SETXID
90849+ struct task_struct *t;
90850+
90851+ /* we won't get called with tasklist_lock held for writing
90852+ and interrupts disabled as the cred struct in that case is
90853+ init_cred
90854+ */
90855+ if (grsec_enable_setxid && !current_is_single_threaded() &&
90856+ !current_uid() && new->uid) {
90857+ rcu_read_lock();
90858+ read_lock(&tasklist_lock);
90859+ for (t = next_thread(current); t != current;
90860+ t = next_thread(t)) {
90861+ if (t->delayed_cred == NULL) {
90862+ t->delayed_cred = get_cred(new);
90863+ set_tsk_need_resched(t);
90864+ }
90865+ }
90866+ read_unlock(&tasklist_lock);
90867+ rcu_read_unlock();
90868+ }
90869+#endif
90870+ return __commit_creds(new);
90871+}
90872+
90873 EXPORT_SYMBOL(commit_creds);
90874
90875+
90876 /**
90877 * abort_creds - Discard a set of credentials and unlock the current task
90878 * @new: The credentials that were going to be applied
90879@@ -606,6 +719,8 @@ EXPORT_SYMBOL(commit_creds);
90880 */
90881 void abort_creds(struct cred *new)
90882 {
90883+ pax_track_stack();
90884+
90885 kdebug("abort_creds(%p{%d,%d})", new,
90886 atomic_read(&new->usage),
90887 read_cred_subscribers(new));
90888@@ -629,6 +744,8 @@ const struct cred *override_creds(const struct cred *new)
90889 {
90890 const struct cred *old = current->cred;
90891
90892+ pax_track_stack();
90893+
90894 kdebug("override_creds(%p{%d,%d})", new,
90895 atomic_read(&new->usage),
90896 read_cred_subscribers(new));
90897@@ -658,6 +775,8 @@ void revert_creds(const struct cred *old)
90898 {
90899 const struct cred *override = current->cred;
90900
90901+ pax_track_stack();
90902+
90903 kdebug("revert_creds(%p{%d,%d})", old,
90904 atomic_read(&old->usage),
90905 read_cred_subscribers(old));
90906@@ -704,6 +823,8 @@ struct cred *prepare_kernel_cred(struct task_struct *daemon)
90907 const struct cred *old;
90908 struct cred *new;
90909
90910+ pax_track_stack();
90911+
90912 new = kmem_cache_alloc(cred_jar, GFP_KERNEL);
90913 if (!new)
90914 return NULL;
90915@@ -758,6 +879,8 @@ EXPORT_SYMBOL(prepare_kernel_cred);
90916 */
90917 int set_security_override(struct cred *new, u32 secid)
90918 {
90919+ pax_track_stack();
90920+
90921 return security_kernel_act_as(new, secid);
90922 }
90923 EXPORT_SYMBOL(set_security_override);
90924@@ -777,6 +900,8 @@ int set_security_override_from_ctx(struct cred *new, const char *secctx)
90925 u32 secid;
90926 int ret;
90927
90928+ pax_track_stack();
90929+
90930 ret = security_secctx_to_secid(secctx, strlen(secctx), &secid);
90931 if (ret < 0)
90932 return ret;
90933diff --git a/kernel/exit.c b/kernel/exit.c
90934index 0f8fae3..66af9b1 100644
90935--- a/kernel/exit.c
90936+++ b/kernel/exit.c
90937@@ -55,6 +55,10 @@
90938 #include <asm/pgtable.h>
90939 #include <asm/mmu_context.h>
90940
90941+#ifdef CONFIG_GRKERNSEC
90942+extern rwlock_t grsec_exec_file_lock;
90943+#endif
90944+
90945 static void exit_mm(struct task_struct * tsk);
90946
90947 static void __unhash_process(struct task_struct *p)
90948@@ -174,6 +178,10 @@ void release_task(struct task_struct * p)
90949 struct task_struct *leader;
90950 int zap_leader;
90951 repeat:
90952+#ifdef CONFIG_NET
90953+ gr_del_task_from_ip_table(p);
90954+#endif
90955+
90956 tracehook_prepare_release_task(p);
90957 /* don't need to get the RCU readlock here - the process is dead and
90958 * can't be modifying its own credentials */
90959@@ -397,7 +405,7 @@ int allow_signal(int sig)
90960 * know it'll be handled, so that they don't get converted to
90961 * SIGKILL or just silently dropped.
90962 */
90963- current->sighand->action[(sig)-1].sa.sa_handler = (void __user *)2;
90964+ current->sighand->action[(sig)-1].sa.sa_handler = (__force void __user *)2;
90965 recalc_sigpending();
90966 spin_unlock_irq(&current->sighand->siglock);
90967 return 0;
90968@@ -433,6 +441,17 @@ void daemonize(const char *name, ...)
90969 vsnprintf(current->comm, sizeof(current->comm), name, args);
90970 va_end(args);
90971
90972+#ifdef CONFIG_GRKERNSEC
90973+ write_lock(&grsec_exec_file_lock);
90974+ if (current->exec_file) {
90975+ fput(current->exec_file);
90976+ current->exec_file = NULL;
90977+ }
90978+ write_unlock(&grsec_exec_file_lock);
90979+#endif
90980+
90981+ gr_set_kernel_label(current);
90982+
90983 /*
90984 * If we were started as result of loading a module, close all of the
90985 * user space pages. We don't need them, and if we didn't close them
90986@@ -897,17 +916,17 @@ NORET_TYPE void do_exit(long code)
90987 struct task_struct *tsk = current;
90988 int group_dead;
90989
90990- profile_task_exit(tsk);
90991-
90992- WARN_ON(atomic_read(&tsk->fs_excl));
90993-
90994+ /*
90995+ * Check this first since set_fs() below depends on
90996+ * current_thread_info(), which we better not access when we're in
90997+ * interrupt context. Other than that, we want to do the set_fs()
90998+ * as early as possible.
90999+ */
91000 if (unlikely(in_interrupt()))
91001 panic("Aiee, killing interrupt handler!");
91002- if (unlikely(!tsk->pid))
91003- panic("Attempted to kill the idle task!");
91004
91005 /*
91006- * If do_exit is called because this processes oopsed, it's possible
91007+ * If do_exit is called because this processes Oops'ed, it's possible
91008 * that get_fs() was left as KERNEL_DS, so reset it to USER_DS before
91009 * continuing. Amongst other possible reasons, this is to prevent
91010 * mm_release()->clear_child_tid() from writing to a user-controlled
91011@@ -915,6 +934,13 @@ NORET_TYPE void do_exit(long code)
91012 */
91013 set_fs(USER_DS);
91014
91015+ profile_task_exit(tsk);
91016+
91017+ WARN_ON(atomic_read(&tsk->fs_excl));
91018+
91019+ if (unlikely(!tsk->pid))
91020+ panic("Attempted to kill the idle task!");
91021+
91022 tracehook_report_exit(&code);
91023
91024 validate_creds_for_do_exit(tsk);
91025@@ -973,6 +999,9 @@ NORET_TYPE void do_exit(long code)
91026 tsk->exit_code = code;
91027 taskstats_exit(tsk, group_dead);
91028
91029+ gr_acl_handle_psacct(tsk, code);
91030+ gr_acl_handle_exit();
91031+
91032 exit_mm(tsk);
91033
91034 if (group_dead)
91035@@ -1020,7 +1049,7 @@ NORET_TYPE void do_exit(long code)
91036 tsk->flags |= PF_EXITPIDONE;
91037
91038 if (tsk->io_context)
91039- exit_io_context();
91040+ exit_io_context(tsk);
91041
91042 if (tsk->splice_pipe)
91043 __free_pipe_info(tsk->splice_pipe);
91044@@ -1059,7 +1088,7 @@ SYSCALL_DEFINE1(exit, int, error_code)
91045 * Take down every thread in the group. This is called by fatal signals
91046 * as well as by sys_exit_group (below).
91047 */
91048-NORET_TYPE void
91049+__noreturn void
91050 do_group_exit(int exit_code)
91051 {
91052 struct signal_struct *sig = current->signal;
91053@@ -1188,7 +1217,7 @@ static int wait_task_zombie(struct wait_opts *wo, struct task_struct *p)
91054
91055 if (unlikely(wo->wo_flags & WNOWAIT)) {
91056 int exit_code = p->exit_code;
91057- int why, status;
91058+ int why;
91059
91060 get_task_struct(p);
91061 read_unlock(&tasklist_lock);
91062diff --git a/kernel/fork.c b/kernel/fork.c
91063index 4bde56f..8976a8f 100644
91064--- a/kernel/fork.c
91065+++ b/kernel/fork.c
91066@@ -253,7 +253,7 @@ static struct task_struct *dup_task_struct(struct task_struct *orig)
91067 *stackend = STACK_END_MAGIC; /* for overflow detection */
91068
91069 #ifdef CONFIG_CC_STACKPROTECTOR
91070- tsk->stack_canary = get_random_int();
91071+ tsk->stack_canary = pax_get_random_long();
91072 #endif
91073
91074 /* One for us, one for whoever does the "release_task()" (usually parent) */
91075@@ -293,8 +293,8 @@ static int dup_mmap(struct mm_struct *mm, struct mm_struct *oldmm)
91076 mm->locked_vm = 0;
91077 mm->mmap = NULL;
91078 mm->mmap_cache = NULL;
91079- mm->free_area_cache = oldmm->mmap_base;
91080- mm->cached_hole_size = ~0UL;
91081+ mm->free_area_cache = oldmm->free_area_cache;
91082+ mm->cached_hole_size = oldmm->cached_hole_size;
91083 mm->map_count = 0;
91084 cpumask_clear(mm_cpumask(mm));
91085 mm->mm_rb = RB_ROOT;
91086@@ -335,6 +335,7 @@ static int dup_mmap(struct mm_struct *mm, struct mm_struct *oldmm)
91087 tmp->vm_flags &= ~VM_LOCKED;
91088 tmp->vm_mm = mm;
91089 tmp->vm_next = tmp->vm_prev = NULL;
91090+ tmp->vm_mirror = NULL;
91091 anon_vma_link(tmp);
91092 file = tmp->vm_file;
91093 if (file) {
91094@@ -384,6 +385,31 @@ static int dup_mmap(struct mm_struct *mm, struct mm_struct *oldmm)
91095 if (retval)
91096 goto out;
91097 }
91098+
91099+#ifdef CONFIG_PAX_SEGMEXEC
91100+ if (oldmm->pax_flags & MF_PAX_SEGMEXEC) {
91101+ struct vm_area_struct *mpnt_m;
91102+
91103+ for (mpnt = oldmm->mmap, mpnt_m = mm->mmap; mpnt; mpnt = mpnt->vm_next, mpnt_m = mpnt_m->vm_next) {
91104+ BUG_ON(!mpnt_m || mpnt_m->vm_mirror || mpnt->vm_mm != oldmm || mpnt_m->vm_mm != mm);
91105+
91106+ if (!mpnt->vm_mirror)
91107+ continue;
91108+
91109+ if (mpnt->vm_end <= SEGMEXEC_TASK_SIZE) {
91110+ BUG_ON(mpnt->vm_mirror->vm_mirror != mpnt);
91111+ mpnt->vm_mirror = mpnt_m;
91112+ } else {
91113+ BUG_ON(mpnt->vm_mirror->vm_mirror == mpnt || mpnt->vm_mirror->vm_mirror->vm_mm != mm);
91114+ mpnt_m->vm_mirror = mpnt->vm_mirror->vm_mirror;
91115+ mpnt_m->vm_mirror->vm_mirror = mpnt_m;
91116+ mpnt->vm_mirror->vm_mirror = mpnt;
91117+ }
91118+ }
91119+ BUG_ON(mpnt_m);
91120+ }
91121+#endif
91122+
91123 /* a new mm has just been created */
91124 arch_dup_mmap(oldmm, mm);
91125 retval = 0;
91126@@ -734,13 +760,14 @@ static int copy_fs(unsigned long clone_flags, struct task_struct *tsk)
91127 write_unlock(&fs->lock);
91128 return -EAGAIN;
91129 }
91130- fs->users++;
91131+ atomic_inc(&fs->users);
91132 write_unlock(&fs->lock);
91133 return 0;
91134 }
91135 tsk->fs = copy_fs_struct(fs);
91136 if (!tsk->fs)
91137 return -ENOMEM;
91138+ gr_set_chroot_entries(tsk, &tsk->fs->root);
91139 return 0;
91140 }
91141
91142@@ -1033,12 +1060,16 @@ static struct task_struct *copy_process(unsigned long clone_flags,
91143 DEBUG_LOCKS_WARN_ON(!p->softirqs_enabled);
91144 #endif
91145 retval = -EAGAIN;
91146+
91147+ gr_learn_resource(p, RLIMIT_NPROC, atomic_read(&p->real_cred->user->processes), 0);
91148+
91149 if (atomic_read(&p->real_cred->user->processes) >=
91150 p->signal->rlim[RLIMIT_NPROC].rlim_cur) {
91151- if (!capable(CAP_SYS_ADMIN) && !capable(CAP_SYS_RESOURCE) &&
91152- p->real_cred->user != INIT_USER)
91153+ if (p->real_cred->user != INIT_USER &&
91154+ !capable(CAP_SYS_RESOURCE) && !capable(CAP_SYS_ADMIN))
91155 goto bad_fork_free;
91156 }
91157+ current->flags &= ~PF_NPROC_EXCEEDED;
91158
91159 retval = copy_creds(p, clone_flags);
91160 if (retval < 0)
91161@@ -1183,6 +1214,8 @@ static struct task_struct *copy_process(unsigned long clone_flags,
91162 goto bad_fork_free_pid;
91163 }
91164
91165+ gr_copy_label(p);
91166+
91167 p->set_child_tid = (clone_flags & CLONE_CHILD_SETTID) ? child_tidptr : NULL;
91168 /*
91169 * Clear TID on mm_release()?
91170@@ -1299,7 +1332,8 @@ bad_fork_free_pid:
91171 if (pid != &init_struct_pid)
91172 free_pid(pid);
91173 bad_fork_cleanup_io:
91174- put_io_context(p->io_context);
91175+ if (p->io_context)
91176+ exit_io_context(p);
91177 bad_fork_cleanup_namespaces:
91178 exit_task_namespaces(p);
91179 bad_fork_cleanup_mm:
91180@@ -1333,6 +1367,8 @@ bad_fork_cleanup_count:
91181 bad_fork_free:
91182 free_task(p);
91183 fork_out:
91184+ gr_log_forkfail(retval);
91185+
91186 return ERR_PTR(retval);
91187 }
91188
91189@@ -1426,6 +1462,8 @@ long do_fork(unsigned long clone_flags,
91190 if (clone_flags & CLONE_PARENT_SETTID)
91191 put_user(nr, parent_tidptr);
91192
91193+ gr_handle_brute_check();
91194+
91195 if (clone_flags & CLONE_VFORK) {
91196 p->vfork_done = &vfork;
91197 init_completion(&vfork);
91198@@ -1558,7 +1596,7 @@ static int unshare_fs(unsigned long unshare_flags, struct fs_struct **new_fsp)
91199 return 0;
91200
91201 /* don't need lock here; in the worst case we'll do useless copy */
91202- if (fs->users == 1)
91203+ if (atomic_read(&fs->users) == 1)
91204 return 0;
91205
91206 *new_fsp = copy_fs_struct(fs);
91207@@ -1681,7 +1719,8 @@ SYSCALL_DEFINE1(unshare, unsigned long, unshare_flags)
91208 fs = current->fs;
91209 write_lock(&fs->lock);
91210 current->fs = new_fs;
91211- if (--fs->users)
91212+ gr_set_chroot_entries(current, &current->fs->root);
91213+ if (atomic_dec_return(&fs->users))
91214 new_fs = NULL;
91215 else
91216 new_fs = fs;
91217diff --git a/kernel/futex.c b/kernel/futex.c
91218index fb98c9f..333faec 100644
91219--- a/kernel/futex.c
91220+++ b/kernel/futex.c
91221@@ -54,6 +54,7 @@
91222 #include <linux/mount.h>
91223 #include <linux/pagemap.h>
91224 #include <linux/syscalls.h>
91225+#include <linux/ptrace.h>
91226 #include <linux/signal.h>
91227 #include <linux/module.h>
91228 #include <linux/magic.h>
91229@@ -223,6 +224,11 @@ get_futex_key(u32 __user *uaddr, int fshared, union futex_key *key, int rw)
91230 struct page *page;
91231 int err, ro = 0;
91232
91233+#ifdef CONFIG_PAX_SEGMEXEC
91234+ if ((mm->pax_flags & MF_PAX_SEGMEXEC) && address >= SEGMEXEC_TASK_SIZE)
91235+ return -EFAULT;
91236+#endif
91237+
91238 /*
91239 * The futex address must be "naturally" aligned.
91240 */
91241@@ -1819,6 +1825,8 @@ static int futex_wait(u32 __user *uaddr, int fshared,
91242 struct futex_q q;
91243 int ret;
91244
91245+ pax_track_stack();
91246+
91247 if (!bitset)
91248 return -EINVAL;
91249
91250@@ -1871,7 +1879,7 @@ retry:
91251
91252 restart = &current_thread_info()->restart_block;
91253 restart->fn = futex_wait_restart;
91254- restart->futex.uaddr = (u32 *)uaddr;
91255+ restart->futex.uaddr = uaddr;
91256 restart->futex.val = val;
91257 restart->futex.time = abs_time->tv64;
91258 restart->futex.bitset = bitset;
91259@@ -2233,6 +2241,8 @@ static int futex_wait_requeue_pi(u32 __user *uaddr, int fshared,
91260 struct futex_q q;
91261 int res, ret;
91262
91263+ pax_track_stack();
91264+
91265 if (!bitset)
91266 return -EINVAL;
91267
91268@@ -2423,6 +2433,10 @@ SYSCALL_DEFINE3(get_robust_list, int, pid,
91269 if (!p)
91270 goto err_unlock;
91271 ret = -EPERM;
91272+#ifdef CONFIG_GRKERNSEC_PROC_MEMMAP
91273+ if (!ptrace_may_access(p, PTRACE_MODE_READ))
91274+ goto err_unlock;
91275+#endif
91276 pcred = __task_cred(p);
91277 if (cred->euid != pcred->euid &&
91278 cred->euid != pcred->uid &&
91279@@ -2489,7 +2503,7 @@ retry:
91280 */
91281 static inline int fetch_robust_entry(struct robust_list __user **entry,
91282 struct robust_list __user * __user *head,
91283- int *pi)
91284+ unsigned int *pi)
91285 {
91286 unsigned long uentry;
91287
91288@@ -2670,6 +2684,7 @@ static int __init futex_init(void)
91289 {
91290 u32 curval;
91291 int i;
91292+ mm_segment_t oldfs;
91293
91294 /*
91295 * This will fail and we want it. Some arch implementations do
91296@@ -2681,7 +2696,10 @@ static int __init futex_init(void)
91297 * implementation, the non functional ones will return
91298 * -ENOSYS.
91299 */
91300+ oldfs = get_fs();
91301+ set_fs(USER_DS);
91302 curval = cmpxchg_futex_value_locked(NULL, 0, 0);
91303+ set_fs(oldfs);
91304 if (curval == -EFAULT)
91305 futex_cmpxchg_enabled = 1;
91306
91307diff --git a/kernel/futex_compat.c b/kernel/futex_compat.c
91308index 2357165..eb25501 100644
91309--- a/kernel/futex_compat.c
91310+++ b/kernel/futex_compat.c
91311@@ -10,6 +10,7 @@
91312 #include <linux/compat.h>
91313 #include <linux/nsproxy.h>
91314 #include <linux/futex.h>
91315+#include <linux/ptrace.h>
91316
91317 #include <asm/uaccess.h>
91318
91319@@ -135,7 +136,8 @@ compat_sys_get_robust_list(int pid, compat_uptr_t __user *head_ptr,
91320 {
91321 struct compat_robust_list_head __user *head;
91322 unsigned long ret;
91323- const struct cred *cred = current_cred(), *pcred;
91324+ const struct cred *cred = current_cred();
91325+ const struct cred *pcred;
91326
91327 if (!futex_cmpxchg_enabled)
91328 return -ENOSYS;
91329@@ -151,6 +153,10 @@ compat_sys_get_robust_list(int pid, compat_uptr_t __user *head_ptr,
91330 if (!p)
91331 goto err_unlock;
91332 ret = -EPERM;
91333+#ifdef CONFIG_GRKERNSEC_PROC_MEMMAP
91334+ if (!ptrace_may_access(p, PTRACE_MODE_READ))
91335+ goto err_unlock;
91336+#endif
91337 pcred = __task_cred(p);
91338 if (cred->euid != pcred->euid &&
91339 cred->euid != pcred->uid &&
91340diff --git a/kernel/gcov/base.c b/kernel/gcov/base.c
91341index 9b22d03..6295b62 100644
91342--- a/kernel/gcov/base.c
91343+++ b/kernel/gcov/base.c
91344@@ -102,11 +102,6 @@ void gcov_enable_events(void)
91345 }
91346
91347 #ifdef CONFIG_MODULES
91348-static inline int within(void *addr, void *start, unsigned long size)
91349-{
91350- return ((addr >= start) && (addr < start + size));
91351-}
91352-
91353 /* Update list and generate events when modules are unloaded. */
91354 static int gcov_module_notifier(struct notifier_block *nb, unsigned long event,
91355 void *data)
91356@@ -121,7 +116,7 @@ static int gcov_module_notifier(struct notifier_block *nb, unsigned long event,
91357 prev = NULL;
91358 /* Remove entries located in module from linked list. */
91359 for (info = gcov_info_head; info; info = info->next) {
91360- if (within(info, mod->module_core, mod->core_size)) {
91361+ if (within_module_core_rw((unsigned long)info, mod)) {
91362 if (prev)
91363 prev->next = info->next;
91364 else
91365diff --git a/kernel/hrtimer.c b/kernel/hrtimer.c
91366index a6e9d00..a0da4f9 100644
91367--- a/kernel/hrtimer.c
91368+++ b/kernel/hrtimer.c
91369@@ -1391,7 +1391,7 @@ void hrtimer_peek_ahead_timers(void)
91370 local_irq_restore(flags);
91371 }
91372
91373-static void run_hrtimer_softirq(struct softirq_action *h)
91374+static void run_hrtimer_softirq(void)
91375 {
91376 hrtimer_peek_ahead_timers();
91377 }
91378diff --git a/kernel/kallsyms.c b/kernel/kallsyms.c
91379index 8b6b8b6..6bc87df 100644
91380--- a/kernel/kallsyms.c
91381+++ b/kernel/kallsyms.c
91382@@ -11,6 +11,9 @@
91383 * Changed the compression method from stem compression to "table lookup"
91384 * compression (see scripts/kallsyms.c for a more complete description)
91385 */
91386+#ifdef CONFIG_GRKERNSEC_HIDESYM
91387+#define __INCLUDED_BY_HIDESYM 1
91388+#endif
91389 #include <linux/kallsyms.h>
91390 #include <linux/module.h>
91391 #include <linux/init.h>
91392@@ -51,12 +54,33 @@ extern const unsigned long kallsyms_markers[] __attribute__((weak));
91393
91394 static inline int is_kernel_inittext(unsigned long addr)
91395 {
91396+ if (system_state != SYSTEM_BOOTING)
91397+ return 0;
91398+
91399 if (addr >= (unsigned long)_sinittext
91400 && addr <= (unsigned long)_einittext)
91401 return 1;
91402 return 0;
91403 }
91404
91405+#if defined(CONFIG_X86_32) && defined(CONFIG_PAX_KERNEXEC)
91406+#ifdef CONFIG_MODULES
91407+static inline int is_module_text(unsigned long addr)
91408+{
91409+ if ((unsigned long)MODULES_EXEC_VADDR <= addr && addr <= (unsigned long)MODULES_EXEC_END)
91410+ return 1;
91411+
91412+ addr = ktla_ktva(addr);
91413+ return (unsigned long)MODULES_EXEC_VADDR <= addr && addr <= (unsigned long)MODULES_EXEC_END;
91414+}
91415+#else
91416+static inline int is_module_text(unsigned long addr)
91417+{
91418+ return 0;
91419+}
91420+#endif
91421+#endif
91422+
91423 static inline int is_kernel_text(unsigned long addr)
91424 {
91425 if ((addr >= (unsigned long)_stext && addr <= (unsigned long)_etext) ||
91426@@ -67,13 +91,28 @@ static inline int is_kernel_text(unsigned long addr)
91427
91428 static inline int is_kernel(unsigned long addr)
91429 {
91430+
91431+#if defined(CONFIG_X86_32) && defined(CONFIG_PAX_KERNEXEC)
91432+ if (is_kernel_text(addr) || is_kernel_inittext(addr))
91433+ return 1;
91434+
91435+ if (ktla_ktva((unsigned long)_text) <= addr && addr < (unsigned long)_end)
91436+#else
91437 if (addr >= (unsigned long)_stext && addr <= (unsigned long)_end)
91438+#endif
91439+
91440 return 1;
91441 return in_gate_area_no_task(addr);
91442 }
91443
91444 static int is_ksym_addr(unsigned long addr)
91445 {
91446+
91447+#if defined(CONFIG_X86_32) && defined(CONFIG_PAX_KERNEXEC)
91448+ if (is_module_text(addr))
91449+ return 0;
91450+#endif
91451+
91452 if (all_var)
91453 return is_kernel(addr);
91454
91455@@ -413,7 +452,6 @@ static unsigned long get_ksymbol_core(struct kallsym_iter *iter)
91456
91457 static void reset_iter(struct kallsym_iter *iter, loff_t new_pos)
91458 {
91459- iter->name[0] = '\0';
91460 iter->nameoff = get_symbol_offset(new_pos);
91461 iter->pos = new_pos;
91462 }
91463@@ -461,6 +499,11 @@ static int s_show(struct seq_file *m, void *p)
91464 {
91465 struct kallsym_iter *iter = m->private;
91466
91467+#ifdef CONFIG_GRKERNSEC_HIDESYM
91468+ if (current_uid())
91469+ return 0;
91470+#endif
91471+
91472 /* Some debugging symbols have no name. Ignore them. */
91473 if (!iter->name[0])
91474 return 0;
91475@@ -501,7 +544,7 @@ static int kallsyms_open(struct inode *inode, struct file *file)
91476 struct kallsym_iter *iter;
91477 int ret;
91478
91479- iter = kmalloc(sizeof(*iter), GFP_KERNEL);
91480+ iter = kzalloc(sizeof(*iter), GFP_KERNEL);
91481 if (!iter)
91482 return -ENOMEM;
91483 reset_iter(iter, 0);
91484diff --git a/kernel/kexec.c b/kernel/kexec.c
91485index f336e21..9c1c20b 100644
91486--- a/kernel/kexec.c
91487+++ b/kernel/kexec.c
91488@@ -1028,7 +1028,8 @@ asmlinkage long compat_sys_kexec_load(unsigned long entry,
91489 unsigned long flags)
91490 {
91491 struct compat_kexec_segment in;
91492- struct kexec_segment out, __user *ksegments;
91493+ struct kexec_segment out;
91494+ struct kexec_segment __user *ksegments;
91495 unsigned long i, result;
91496
91497 /* Don't allow clients that don't understand the native
91498diff --git a/kernel/kgdb.c b/kernel/kgdb.c
91499index 53dae4b..9ba3743 100644
91500--- a/kernel/kgdb.c
91501+++ b/kernel/kgdb.c
91502@@ -86,7 +86,7 @@ static int kgdb_io_module_registered;
91503 /* Guard for recursive entry */
91504 static int exception_level;
91505
91506-static struct kgdb_io *kgdb_io_ops;
91507+static const struct kgdb_io *kgdb_io_ops;
91508 static DEFINE_SPINLOCK(kgdb_registration_lock);
91509
91510 /* kgdb console driver is loaded */
91511@@ -123,7 +123,7 @@ atomic_t kgdb_active = ATOMIC_INIT(-1);
91512 */
91513 static atomic_t passive_cpu_wait[NR_CPUS];
91514 static atomic_t cpu_in_kgdb[NR_CPUS];
91515-atomic_t kgdb_setting_breakpoint;
91516+atomic_unchecked_t kgdb_setting_breakpoint;
91517
91518 struct task_struct *kgdb_usethread;
91519 struct task_struct *kgdb_contthread;
91520@@ -140,7 +140,7 @@ static unsigned long gdb_regs[(NUMREGBYTES +
91521 sizeof(unsigned long)];
91522
91523 /* to keep track of the CPU which is doing the single stepping*/
91524-atomic_t kgdb_cpu_doing_single_step = ATOMIC_INIT(-1);
91525+atomic_unchecked_t kgdb_cpu_doing_single_step = ATOMIC_INIT(-1);
91526
91527 /*
91528 * If you are debugging a problem where roundup (the collection of
91529@@ -815,7 +815,7 @@ static int kgdb_io_ready(int print_wait)
91530 return 0;
91531 if (kgdb_connected)
91532 return 1;
91533- if (atomic_read(&kgdb_setting_breakpoint))
91534+ if (atomic_read_unchecked(&kgdb_setting_breakpoint))
91535 return 1;
91536 if (print_wait)
91537 printk(KERN_CRIT "KGDB: Waiting for remote debugger\n");
91538@@ -1426,8 +1426,8 @@ acquirelock:
91539 * instance of the exception handler wanted to come into the
91540 * debugger on a different CPU via a single step
91541 */
91542- if (atomic_read(&kgdb_cpu_doing_single_step) != -1 &&
91543- atomic_read(&kgdb_cpu_doing_single_step) != cpu) {
91544+ if (atomic_read_unchecked(&kgdb_cpu_doing_single_step) != -1 &&
91545+ atomic_read_unchecked(&kgdb_cpu_doing_single_step) != cpu) {
91546
91547 atomic_set(&kgdb_active, -1);
91548 touch_softlockup_watchdog();
91549@@ -1634,7 +1634,7 @@ static void kgdb_initial_breakpoint(void)
91550 *
91551 * Register it with the KGDB core.
91552 */
91553-int kgdb_register_io_module(struct kgdb_io *new_kgdb_io_ops)
91554+int kgdb_register_io_module(const struct kgdb_io *new_kgdb_io_ops)
91555 {
91556 int err;
91557
91558@@ -1679,7 +1679,7 @@ EXPORT_SYMBOL_GPL(kgdb_register_io_module);
91559 *
91560 * Unregister it with the KGDB core.
91561 */
91562-void kgdb_unregister_io_module(struct kgdb_io *old_kgdb_io_ops)
91563+void kgdb_unregister_io_module(const struct kgdb_io *old_kgdb_io_ops)
91564 {
91565 BUG_ON(kgdb_connected);
91566
91567@@ -1712,11 +1712,11 @@ EXPORT_SYMBOL_GPL(kgdb_unregister_io_module);
91568 */
91569 void kgdb_breakpoint(void)
91570 {
91571- atomic_set(&kgdb_setting_breakpoint, 1);
91572+ atomic_set_unchecked(&kgdb_setting_breakpoint, 1);
91573 wmb(); /* Sync point before breakpoint */
91574 arch_kgdb_breakpoint();
91575 wmb(); /* Sync point after breakpoint */
91576- atomic_set(&kgdb_setting_breakpoint, 0);
91577+ atomic_set_unchecked(&kgdb_setting_breakpoint, 0);
91578 }
91579 EXPORT_SYMBOL_GPL(kgdb_breakpoint);
91580
91581diff --git a/kernel/kmod.c b/kernel/kmod.c
91582index a061472..40884b6 100644
91583--- a/kernel/kmod.c
91584+++ b/kernel/kmod.c
91585@@ -68,13 +68,12 @@ char modprobe_path[KMOD_PATH_LEN] = "/sbin/modprobe";
91586 * If module auto-loading support is disabled then this function
91587 * becomes a no-operation.
91588 */
91589-int __request_module(bool wait, const char *fmt, ...)
91590+static int ____request_module(bool wait, char *module_param, const char *fmt, va_list ap)
91591 {
91592- va_list args;
91593 char module_name[MODULE_NAME_LEN];
91594 unsigned int max_modprobes;
91595 int ret;
91596- char *argv[] = { modprobe_path, "-q", "--", module_name, NULL };
91597+ char *argv[] = { modprobe_path, "-q", "--", module_name, module_param, NULL };
91598 static char *envp[] = { "HOME=/",
91599 "TERM=linux",
91600 "PATH=/sbin:/usr/sbin:/bin:/usr/bin",
91601@@ -87,12 +86,24 @@ int __request_module(bool wait, const char *fmt, ...)
91602 if (ret)
91603 return ret;
91604
91605- va_start(args, fmt);
91606- ret = vsnprintf(module_name, MODULE_NAME_LEN, fmt, args);
91607- va_end(args);
91608+ ret = vsnprintf(module_name, MODULE_NAME_LEN, fmt, ap);
91609 if (ret >= MODULE_NAME_LEN)
91610 return -ENAMETOOLONG;
91611
91612+#ifdef CONFIG_GRKERNSEC_MODHARDEN
91613+ if (!current_uid()) {
91614+ /* hack to workaround consolekit/udisks stupidity */
91615+ read_lock(&tasklist_lock);
91616+ if (!strcmp(current->comm, "mount") &&
91617+ current->real_parent && !strncmp(current->real_parent->comm, "udisk", 5)) {
91618+ read_unlock(&tasklist_lock);
91619+ printk(KERN_ALERT "grsec: denied attempt to auto-load fs module %.64s by udisks\n", module_name);
91620+ return -EPERM;
91621+ }
91622+ read_unlock(&tasklist_lock);
91623+ }
91624+#endif
91625+
91626 /* If modprobe needs a service that is in a module, we get a recursive
91627 * loop. Limit the number of running kmod threads to max_threads/2 or
91628 * MAX_KMOD_CONCURRENT, whichever is the smaller. A cleaner method
91629@@ -126,6 +137,48 @@ int __request_module(bool wait, const char *fmt, ...)
91630 atomic_dec(&kmod_concurrent);
91631 return ret;
91632 }
91633+
91634+int ___request_module(bool wait, char *module_param, const char *fmt, ...)
91635+{
91636+ va_list args;
91637+ int ret;
91638+
91639+ va_start(args, fmt);
91640+ ret = ____request_module(wait, module_param, fmt, args);
91641+ va_end(args);
91642+
91643+ return ret;
91644+}
91645+
91646+int __request_module(bool wait, const char *fmt, ...)
91647+{
91648+ va_list args;
91649+ int ret;
91650+
91651+#ifdef CONFIG_GRKERNSEC_MODHARDEN
91652+ if (current_uid()) {
91653+ char module_param[MODULE_NAME_LEN];
91654+
91655+ memset(module_param, 0, sizeof(module_param));
91656+
91657+ snprintf(module_param, sizeof(module_param) - 1, "grsec_modharden_normal%u_", current_uid());
91658+
91659+ va_start(args, fmt);
91660+ ret = ____request_module(wait, module_param, fmt, args);
91661+ va_end(args);
91662+
91663+ return ret;
91664+ }
91665+#endif
91666+
91667+ va_start(args, fmt);
91668+ ret = ____request_module(wait, NULL, fmt, args);
91669+ va_end(args);
91670+
91671+ return ret;
91672+}
91673+
91674+
91675 EXPORT_SYMBOL(__request_module);
91676 #endif /* CONFIG_MODULES */
91677
91678@@ -231,7 +284,7 @@ static int wait_for_helper(void *data)
91679 *
91680 * Thus the __user pointer cast is valid here.
91681 */
91682- sys_wait4(pid, (int __user *)&ret, 0, NULL);
91683+ sys_wait4(pid, (int __force_user *)&ret, 0, NULL);
91684
91685 /*
91686 * If ret is 0, either ____call_usermodehelper failed and the
91687diff --git a/kernel/kprobes.c b/kernel/kprobes.c
91688index 176d825..77fa8ea 100644
91689--- a/kernel/kprobes.c
91690+++ b/kernel/kprobes.c
91691@@ -183,7 +183,7 @@ static kprobe_opcode_t __kprobes *__get_insn_slot(void)
91692 * kernel image and loaded module images reside. This is required
91693 * so x86_64 can correctly handle the %rip-relative fixups.
91694 */
91695- kip->insns = module_alloc(PAGE_SIZE);
91696+ kip->insns = module_alloc_exec(PAGE_SIZE);
91697 if (!kip->insns) {
91698 kfree(kip);
91699 return NULL;
91700@@ -220,7 +220,7 @@ static int __kprobes collect_one_slot(struct kprobe_insn_page *kip, int idx)
91701 */
91702 if (!list_is_singular(&kprobe_insn_pages)) {
91703 list_del(&kip->list);
91704- module_free(NULL, kip->insns);
91705+ module_free_exec(NULL, kip->insns);
91706 kfree(kip);
91707 }
91708 return 1;
91709@@ -1189,7 +1189,7 @@ static int __init init_kprobes(void)
91710 {
91711 int i, err = 0;
91712 unsigned long offset = 0, size = 0;
91713- char *modname, namebuf[128];
91714+ char *modname, namebuf[KSYM_NAME_LEN];
91715 const char *symbol_name;
91716 void *addr;
91717 struct kprobe_blackpoint *kb;
91718@@ -1304,7 +1304,7 @@ static int __kprobes show_kprobe_addr(struct seq_file *pi, void *v)
91719 const char *sym = NULL;
91720 unsigned int i = *(loff_t *) v;
91721 unsigned long offset = 0;
91722- char *modname, namebuf[128];
91723+ char *modname, namebuf[KSYM_NAME_LEN];
91724
91725 head = &kprobe_table[i];
91726 preempt_disable();
91727diff --git a/kernel/lockdep.c b/kernel/lockdep.c
91728index d86fe89..d12fc66 100644
91729--- a/kernel/lockdep.c
91730+++ b/kernel/lockdep.c
91731@@ -421,20 +421,20 @@ static struct stack_trace lockdep_init_trace = {
91732 /*
91733 * Various lockdep statistics:
91734 */
91735-atomic_t chain_lookup_hits;
91736-atomic_t chain_lookup_misses;
91737-atomic_t hardirqs_on_events;
91738-atomic_t hardirqs_off_events;
91739-atomic_t redundant_hardirqs_on;
91740-atomic_t redundant_hardirqs_off;
91741-atomic_t softirqs_on_events;
91742-atomic_t softirqs_off_events;
91743-atomic_t redundant_softirqs_on;
91744-atomic_t redundant_softirqs_off;
91745-atomic_t nr_unused_locks;
91746-atomic_t nr_cyclic_checks;
91747-atomic_t nr_find_usage_forwards_checks;
91748-atomic_t nr_find_usage_backwards_checks;
91749+atomic_unchecked_t chain_lookup_hits;
91750+atomic_unchecked_t chain_lookup_misses;
91751+atomic_unchecked_t hardirqs_on_events;
91752+atomic_unchecked_t hardirqs_off_events;
91753+atomic_unchecked_t redundant_hardirqs_on;
91754+atomic_unchecked_t redundant_hardirqs_off;
91755+atomic_unchecked_t softirqs_on_events;
91756+atomic_unchecked_t softirqs_off_events;
91757+atomic_unchecked_t redundant_softirqs_on;
91758+atomic_unchecked_t redundant_softirqs_off;
91759+atomic_unchecked_t nr_unused_locks;
91760+atomic_unchecked_t nr_cyclic_checks;
91761+atomic_unchecked_t nr_find_usage_forwards_checks;
91762+atomic_unchecked_t nr_find_usage_backwards_checks;
91763 #endif
91764
91765 /*
91766@@ -577,6 +577,10 @@ static int static_obj(void *obj)
91767 int i;
91768 #endif
91769
91770+#ifdef CONFIG_PAX_KERNEXEC
91771+ start = ktla_ktva(start);
91772+#endif
91773+
91774 /*
91775 * static variable?
91776 */
91777@@ -592,8 +596,7 @@ static int static_obj(void *obj)
91778 */
91779 for_each_possible_cpu(i) {
91780 start = (unsigned long) &__per_cpu_start + per_cpu_offset(i);
91781- end = (unsigned long) &__per_cpu_start + PERCPU_ENOUGH_ROOM
91782- + per_cpu_offset(i);
91783+ end = start + PERCPU_ENOUGH_ROOM;
91784
91785 if ((addr >= start) && (addr < end))
91786 return 1;
91787@@ -710,6 +713,7 @@ register_lock_class(struct lockdep_map *lock, unsigned int subclass, int force)
91788 if (!static_obj(lock->key)) {
91789 debug_locks_off();
91790 printk("INFO: trying to register non-static key.\n");
91791+ printk("lock:%pS key:%pS.\n", lock, lock->key);
91792 printk("the code is fine but needs lockdep annotation.\n");
91793 printk("turning off the locking correctness validator.\n");
91794 dump_stack();
91795@@ -2751,7 +2755,7 @@ static int __lock_acquire(struct lockdep_map *lock, unsigned int subclass,
91796 if (!class)
91797 return 0;
91798 }
91799- debug_atomic_inc((atomic_t *)&class->ops);
91800+ debug_atomic_inc((atomic_unchecked_t *)&class->ops);
91801 if (very_verbose(class)) {
91802 printk("\nacquire class [%p] %s", class->key, class->name);
91803 if (class->name_version > 1)
91804diff --git a/kernel/lockdep_internals.h b/kernel/lockdep_internals.h
91805index a2ee95a..092f0f2 100644
91806--- a/kernel/lockdep_internals.h
91807+++ b/kernel/lockdep_internals.h
91808@@ -113,26 +113,26 @@ lockdep_count_backward_deps(struct lock_class *class)
91809 /*
91810 * Various lockdep statistics:
91811 */
91812-extern atomic_t chain_lookup_hits;
91813-extern atomic_t chain_lookup_misses;
91814-extern atomic_t hardirqs_on_events;
91815-extern atomic_t hardirqs_off_events;
91816-extern atomic_t redundant_hardirqs_on;
91817-extern atomic_t redundant_hardirqs_off;
91818-extern atomic_t softirqs_on_events;
91819-extern atomic_t softirqs_off_events;
91820-extern atomic_t redundant_softirqs_on;
91821-extern atomic_t redundant_softirqs_off;
91822-extern atomic_t nr_unused_locks;
91823-extern atomic_t nr_cyclic_checks;
91824-extern atomic_t nr_cyclic_check_recursions;
91825-extern atomic_t nr_find_usage_forwards_checks;
91826-extern atomic_t nr_find_usage_forwards_recursions;
91827-extern atomic_t nr_find_usage_backwards_checks;
91828-extern atomic_t nr_find_usage_backwards_recursions;
91829-# define debug_atomic_inc(ptr) atomic_inc(ptr)
91830-# define debug_atomic_dec(ptr) atomic_dec(ptr)
91831-# define debug_atomic_read(ptr) atomic_read(ptr)
91832+extern atomic_unchecked_t chain_lookup_hits;
91833+extern atomic_unchecked_t chain_lookup_misses;
91834+extern atomic_unchecked_t hardirqs_on_events;
91835+extern atomic_unchecked_t hardirqs_off_events;
91836+extern atomic_unchecked_t redundant_hardirqs_on;
91837+extern atomic_unchecked_t redundant_hardirqs_off;
91838+extern atomic_unchecked_t softirqs_on_events;
91839+extern atomic_unchecked_t softirqs_off_events;
91840+extern atomic_unchecked_t redundant_softirqs_on;
91841+extern atomic_unchecked_t redundant_softirqs_off;
91842+extern atomic_unchecked_t nr_unused_locks;
91843+extern atomic_unchecked_t nr_cyclic_checks;
91844+extern atomic_unchecked_t nr_cyclic_check_recursions;
91845+extern atomic_unchecked_t nr_find_usage_forwards_checks;
91846+extern atomic_unchecked_t nr_find_usage_forwards_recursions;
91847+extern atomic_unchecked_t nr_find_usage_backwards_checks;
91848+extern atomic_unchecked_t nr_find_usage_backwards_recursions;
91849+# define debug_atomic_inc(ptr) atomic_inc_unchecked(ptr)
91850+# define debug_atomic_dec(ptr) atomic_dec_unchecked(ptr)
91851+# define debug_atomic_read(ptr) atomic_read_unchecked(ptr)
91852 #else
91853 # define debug_atomic_inc(ptr) do { } while (0)
91854 # define debug_atomic_dec(ptr) do { } while (0)
91855diff --git a/kernel/lockdep_proc.c b/kernel/lockdep_proc.c
91856index d4aba4f..02a353f 100644
91857--- a/kernel/lockdep_proc.c
91858+++ b/kernel/lockdep_proc.c
91859@@ -39,7 +39,7 @@ static void l_stop(struct seq_file *m, void *v)
91860
91861 static void print_name(struct seq_file *m, struct lock_class *class)
91862 {
91863- char str[128];
91864+ char str[KSYM_NAME_LEN];
91865 const char *name = class->name;
91866
91867 if (!name) {
91868diff --git a/kernel/module.c b/kernel/module.c
91869index 4b270e6..2efdb65 100644
91870--- a/kernel/module.c
91871+++ b/kernel/module.c
91872@@ -55,6 +55,7 @@
91873 #include <linux/async.h>
91874 #include <linux/percpu.h>
91875 #include <linux/kmemleak.h>
91876+#include <linux/grsecurity.h>
91877
91878 #define CREATE_TRACE_POINTS
91879 #include <trace/events/module.h>
91880@@ -89,7 +90,8 @@ static DECLARE_WAIT_QUEUE_HEAD(module_wq);
91881 static BLOCKING_NOTIFIER_HEAD(module_notify_list);
91882
91883 /* Bounds of module allocation, for speeding __module_address */
91884-static unsigned long module_addr_min = -1UL, module_addr_max = 0;
91885+static unsigned long module_addr_min_rw = -1UL, module_addr_max_rw = 0;
91886+static unsigned long module_addr_min_rx = -1UL, module_addr_max_rx = 0;
91887
91888 int register_module_notifier(struct notifier_block * nb)
91889 {
91890@@ -245,7 +247,7 @@ bool each_symbol(bool (*fn)(const struct symsearch *arr, struct module *owner,
91891 return true;
91892
91893 list_for_each_entry_rcu(mod, &modules, list) {
91894- struct symsearch arr[] = {
91895+ struct symsearch modarr[] = {
91896 { mod->syms, mod->syms + mod->num_syms, mod->crcs,
91897 NOT_GPL_ONLY, false },
91898 { mod->gpl_syms, mod->gpl_syms + mod->num_gpl_syms,
91899@@ -267,7 +269,7 @@ bool each_symbol(bool (*fn)(const struct symsearch *arr, struct module *owner,
91900 #endif
91901 };
91902
91903- if (each_symbol_in_section(arr, ARRAY_SIZE(arr), mod, fn, data))
91904+ if (each_symbol_in_section(modarr, ARRAY_SIZE(modarr), mod, fn, data))
91905 return true;
91906 }
91907 return false;
91908@@ -442,7 +444,7 @@ static void *percpu_modalloc(unsigned long size, unsigned long align,
91909 void *ptr;
91910 int cpu;
91911
91912- if (align > PAGE_SIZE) {
91913+ if (align-1 >= PAGE_SIZE) {
91914 printk(KERN_WARNING "%s: per-cpu alignment %li > %li\n",
91915 name, align, PAGE_SIZE);
91916 align = PAGE_SIZE;
91917@@ -1158,7 +1160,7 @@ static const struct kernel_symbol *resolve_symbol(Elf_Shdr *sechdrs,
91918 * /sys/module/foo/sections stuff
91919 * J. Corbet <corbet@lwn.net>
91920 */
91921-#if defined(CONFIG_KALLSYMS) && defined(CONFIG_SYSFS)
91922+#if defined(CONFIG_KALLSYMS) && defined(CONFIG_SYSFS) && !defined(CONFIG_GRKERNSEC_HIDESYM)
91923
91924 static inline bool sect_empty(const Elf_Shdr *sect)
91925 {
91926@@ -1545,7 +1547,8 @@ static void free_module(struct module *mod)
91927 destroy_params(mod->kp, mod->num_kp);
91928
91929 /* This may be NULL, but that's OK */
91930- module_free(mod, mod->module_init);
91931+ module_free(mod, mod->module_init_rw);
91932+ module_free_exec(mod, mod->module_init_rx);
91933 kfree(mod->args);
91934 if (mod->percpu)
91935 percpu_modfree(mod->percpu);
91936@@ -1554,10 +1557,12 @@ static void free_module(struct module *mod)
91937 percpu_modfree(mod->refptr);
91938 #endif
91939 /* Free lock-classes: */
91940- lockdep_free_key_range(mod->module_core, mod->core_size);
91941+ lockdep_free_key_range(mod->module_core_rx, mod->core_size_rx);
91942+ lockdep_free_key_range(mod->module_core_rw, mod->core_size_rw);
91943
91944 /* Finally, free the core (containing the module structure) */
91945- module_free(mod, mod->module_core);
91946+ module_free_exec(mod, mod->module_core_rx);
91947+ module_free(mod, mod->module_core_rw);
91948
91949 #ifdef CONFIG_MPU
91950 update_protections(current->mm);
91951@@ -1628,8 +1633,32 @@ static int simplify_symbols(Elf_Shdr *sechdrs,
91952 unsigned int i, n = sechdrs[symindex].sh_size / sizeof(Elf_Sym);
91953 int ret = 0;
91954 const struct kernel_symbol *ksym;
91955+#ifdef CONFIG_GRKERNSEC_MODHARDEN
91956+ int is_fs_load = 0;
91957+ int register_filesystem_found = 0;
91958+ char *p;
91959+
91960+ p = strstr(mod->args, "grsec_modharden_fs");
91961+
91962+ if (p) {
91963+ char *endptr = p + strlen("grsec_modharden_fs");
91964+ /* copy \0 as well */
91965+ memmove(p, endptr, strlen(mod->args) - (unsigned int)(endptr - mod->args) + 1);
91966+ is_fs_load = 1;
91967+ }
91968+#endif
91969+
91970
91971 for (i = 1; i < n; i++) {
91972+#ifdef CONFIG_GRKERNSEC_MODHARDEN
91973+ const char *name = strtab + sym[i].st_name;
91974+
91975+ /* it's a real shame this will never get ripped and copied
91976+ upstream! ;(
91977+ */
91978+ if (is_fs_load && !strcmp(name, "register_filesystem"))
91979+ register_filesystem_found = 1;
91980+#endif
91981 switch (sym[i].st_shndx) {
91982 case SHN_COMMON:
91983 /* We compiled with -fno-common. These are not
91984@@ -1651,7 +1680,9 @@ static int simplify_symbols(Elf_Shdr *sechdrs,
91985 strtab + sym[i].st_name, mod);
91986 /* Ok if resolved. */
91987 if (ksym) {
91988+ pax_open_kernel();
91989 sym[i].st_value = ksym->value;
91990+ pax_close_kernel();
91991 break;
91992 }
91993
91994@@ -1670,11 +1701,20 @@ static int simplify_symbols(Elf_Shdr *sechdrs,
91995 secbase = (unsigned long)mod->percpu;
91996 else
91997 secbase = sechdrs[sym[i].st_shndx].sh_addr;
91998+ pax_open_kernel();
91999 sym[i].st_value += secbase;
92000+ pax_close_kernel();
92001 break;
92002 }
92003 }
92004
92005+#ifdef CONFIG_GRKERNSEC_MODHARDEN
92006+ if (is_fs_load && !register_filesystem_found) {
92007+ printk(KERN_ALERT "grsec: Denied attempt to load non-fs module %.64s through mount\n", mod->name);
92008+ ret = -EPERM;
92009+ }
92010+#endif
92011+
92012 return ret;
92013 }
92014
92015@@ -1731,11 +1771,12 @@ static void layout_sections(struct module *mod,
92016 || s->sh_entsize != ~0UL
92017 || strstarts(secstrings + s->sh_name, ".init"))
92018 continue;
92019- s->sh_entsize = get_offset(mod, &mod->core_size, s, i);
92020+ if ((s->sh_flags & SHF_WRITE) || !(s->sh_flags & SHF_ALLOC))
92021+ s->sh_entsize = get_offset(mod, &mod->core_size_rw, s, i);
92022+ else
92023+ s->sh_entsize = get_offset(mod, &mod->core_size_rx, s, i);
92024 DEBUGP("\t%s\n", secstrings + s->sh_name);
92025 }
92026- if (m == 0)
92027- mod->core_text_size = mod->core_size;
92028 }
92029
92030 DEBUGP("Init section allocation order:\n");
92031@@ -1748,12 +1789,13 @@ static void layout_sections(struct module *mod,
92032 || s->sh_entsize != ~0UL
92033 || !strstarts(secstrings + s->sh_name, ".init"))
92034 continue;
92035- s->sh_entsize = (get_offset(mod, &mod->init_size, s, i)
92036- | INIT_OFFSET_MASK);
92037+ if ((s->sh_flags & SHF_WRITE) || !(s->sh_flags & SHF_ALLOC))
92038+ s->sh_entsize = get_offset(mod, &mod->init_size_rw, s, i);
92039+ else
92040+ s->sh_entsize = get_offset(mod, &mod->init_size_rx, s, i);
92041+ s->sh_entsize |= INIT_OFFSET_MASK;
92042 DEBUGP("\t%s\n", secstrings + s->sh_name);
92043 }
92044- if (m == 0)
92045- mod->init_text_size = mod->init_size;
92046 }
92047 }
92048
92049@@ -1857,9 +1899,8 @@ static int is_exported(const char *name, unsigned long value,
92050
92051 /* As per nm */
92052 static char elf_type(const Elf_Sym *sym,
92053- Elf_Shdr *sechdrs,
92054- const char *secstrings,
92055- struct module *mod)
92056+ const Elf_Shdr *sechdrs,
92057+ const char *secstrings)
92058 {
92059 if (ELF_ST_BIND(sym->st_info) == STB_WEAK) {
92060 if (ELF_ST_TYPE(sym->st_info) == STT_OBJECT)
92061@@ -1934,7 +1975,7 @@ static unsigned long layout_symtab(struct module *mod,
92062
92063 /* Put symbol section at end of init part of module. */
92064 symsect->sh_flags |= SHF_ALLOC;
92065- symsect->sh_entsize = get_offset(mod, &mod->init_size, symsect,
92066+ symsect->sh_entsize = get_offset(mod, &mod->init_size_rx, symsect,
92067 symindex) | INIT_OFFSET_MASK;
92068 DEBUGP("\t%s\n", secstrings + symsect->sh_name);
92069
92070@@ -1951,19 +1992,19 @@ static unsigned long layout_symtab(struct module *mod,
92071 }
92072
92073 /* Append room for core symbols at end of core part. */
92074- symoffs = ALIGN(mod->core_size, symsect->sh_addralign ?: 1);
92075- mod->core_size = symoffs + ndst * sizeof(Elf_Sym);
92076+ symoffs = ALIGN(mod->core_size_rx, symsect->sh_addralign ?: 1);
92077+ mod->core_size_rx = symoffs + ndst * sizeof(Elf_Sym);
92078
92079 /* Put string table section at end of init part of module. */
92080 strsect->sh_flags |= SHF_ALLOC;
92081- strsect->sh_entsize = get_offset(mod, &mod->init_size, strsect,
92082+ strsect->sh_entsize = get_offset(mod, &mod->init_size_rx, strsect,
92083 strindex) | INIT_OFFSET_MASK;
92084 DEBUGP("\t%s\n", secstrings + strsect->sh_name);
92085
92086 /* Append room for core symbols' strings at end of core part. */
92087- *pstroffs = mod->core_size;
92088+ *pstroffs = mod->core_size_rx;
92089 __set_bit(0, strmap);
92090- mod->core_size += bitmap_weight(strmap, strsect->sh_size);
92091+ mod->core_size_rx += bitmap_weight(strmap, strsect->sh_size);
92092
92093 return symoffs;
92094 }
92095@@ -1987,12 +2028,14 @@ static void add_kallsyms(struct module *mod,
92096 mod->num_symtab = sechdrs[symindex].sh_size / sizeof(Elf_Sym);
92097 mod->strtab = (void *)sechdrs[strindex].sh_addr;
92098
92099+ pax_open_kernel();
92100+
92101 /* Set types up while we still have access to sections. */
92102 for (i = 0; i < mod->num_symtab; i++)
92103 mod->symtab[i].st_info
92104- = elf_type(&mod->symtab[i], sechdrs, secstrings, mod);
92105+ = elf_type(&mod->symtab[i], sechdrs, secstrings);
92106
92107- mod->core_symtab = dst = mod->module_core + symoffs;
92108+ mod->core_symtab = dst = mod->module_core_rx + symoffs;
92109 src = mod->symtab;
92110 *dst = *src;
92111 for (ndst = i = 1; i < mod->num_symtab; ++i, ++src) {
92112@@ -2004,10 +2047,12 @@ static void add_kallsyms(struct module *mod,
92113 }
92114 mod->core_num_syms = ndst;
92115
92116- mod->core_strtab = s = mod->module_core + stroffs;
92117+ mod->core_strtab = s = mod->module_core_rx + stroffs;
92118 for (*s = 0, i = 1; i < sechdrs[strindex].sh_size; ++i)
92119 if (test_bit(i, strmap))
92120 *++s = mod->strtab[i];
92121+
92122+ pax_close_kernel();
92123 }
92124 #else
92125 static inline unsigned long layout_symtab(struct module *mod,
92126@@ -2044,16 +2089,30 @@ static void dynamic_debug_setup(struct _ddebug *debug, unsigned int num)
92127 #endif
92128 }
92129
92130-static void *module_alloc_update_bounds(unsigned long size)
92131+static void *module_alloc_update_bounds_rw(unsigned long size)
92132 {
92133 void *ret = module_alloc(size);
92134
92135 if (ret) {
92136 /* Update module bounds. */
92137- if ((unsigned long)ret < module_addr_min)
92138- module_addr_min = (unsigned long)ret;
92139- if ((unsigned long)ret + size > module_addr_max)
92140- module_addr_max = (unsigned long)ret + size;
92141+ if ((unsigned long)ret < module_addr_min_rw)
92142+ module_addr_min_rw = (unsigned long)ret;
92143+ if ((unsigned long)ret + size > module_addr_max_rw)
92144+ module_addr_max_rw = (unsigned long)ret + size;
92145+ }
92146+ return ret;
92147+}
92148+
92149+static void *module_alloc_update_bounds_rx(unsigned long size)
92150+{
92151+ void *ret = module_alloc_exec(size);
92152+
92153+ if (ret) {
92154+ /* Update module bounds. */
92155+ if ((unsigned long)ret < module_addr_min_rx)
92156+ module_addr_min_rx = (unsigned long)ret;
92157+ if ((unsigned long)ret + size > module_addr_max_rx)
92158+ module_addr_max_rx = (unsigned long)ret + size;
92159 }
92160 return ret;
92161 }
92162@@ -2065,8 +2124,8 @@ static void kmemleak_load_module(struct module *mod, Elf_Ehdr *hdr,
92163 unsigned int i;
92164
92165 /* only scan the sections containing data */
92166- kmemleak_scan_area(mod->module_core, (unsigned long)mod -
92167- (unsigned long)mod->module_core,
92168+ kmemleak_scan_area(mod->module_core_rw, (unsigned long)mod -
92169+ (unsigned long)mod->module_core_rw,
92170 sizeof(struct module), GFP_KERNEL);
92171
92172 for (i = 1; i < hdr->e_shnum; i++) {
92173@@ -2076,8 +2135,8 @@ static void kmemleak_load_module(struct module *mod, Elf_Ehdr *hdr,
92174 && strncmp(secstrings + sechdrs[i].sh_name, ".bss", 4) != 0)
92175 continue;
92176
92177- kmemleak_scan_area(mod->module_core, sechdrs[i].sh_addr -
92178- (unsigned long)mod->module_core,
92179+ kmemleak_scan_area(mod->module_core_rw, sechdrs[i].sh_addr -
92180+ (unsigned long)mod->module_core_rw,
92181 sechdrs[i].sh_size, GFP_KERNEL);
92182 }
92183 }
92184@@ -2097,7 +2156,7 @@ static noinline struct module *load_module(void __user *umod,
92185 Elf_Ehdr *hdr;
92186 Elf_Shdr *sechdrs;
92187 char *secstrings, *args, *modmagic, *strtab = NULL;
92188- char *staging;
92189+ char *staging, *license;
92190 unsigned int i;
92191 unsigned int symindex = 0;
92192 unsigned int strindex = 0;
92193@@ -2195,6 +2254,14 @@ static noinline struct module *load_module(void __user *umod,
92194 goto free_hdr;
92195 }
92196
92197+ license = get_modinfo(sechdrs, infoindex, "license");
92198+#ifdef CONFIG_PAX_KERNEXEC_PLUGIN_METHOD_OR
92199+ if (!license || !license_is_gpl_compatible(license)) {
92200+ err = -ENOEXEC;
92201+ goto free_hdr;
92202+ }
92203+#endif
92204+
92205 modmagic = get_modinfo(sechdrs, infoindex, "vermagic");
92206 /* This is allowed: modprobe --force will invalidate it. */
92207 if (!modmagic) {
92208@@ -2263,7 +2330,7 @@ static noinline struct module *load_module(void __user *umod,
92209 secstrings, &stroffs, strmap);
92210
92211 /* Do the allocs. */
92212- ptr = module_alloc_update_bounds(mod->core_size);
92213+ ptr = module_alloc_update_bounds_rw(mod->core_size_rw);
92214 /*
92215 * The pointer to this block is stored in the module structure
92216 * which is inside the block. Just mark it as not being a
92217@@ -2274,23 +2341,47 @@ static noinline struct module *load_module(void __user *umod,
92218 err = -ENOMEM;
92219 goto free_percpu;
92220 }
92221- memset(ptr, 0, mod->core_size);
92222- mod->module_core = ptr;
92223+ memset(ptr, 0, mod->core_size_rw);
92224+ mod->module_core_rw = ptr;
92225
92226- ptr = module_alloc_update_bounds(mod->init_size);
92227+ ptr = module_alloc_update_bounds_rw(mod->init_size_rw);
92228 /*
92229 * The pointer to this block is stored in the module structure
92230 * which is inside the block. This block doesn't need to be
92231 * scanned as it contains data and code that will be freed
92232 * after the module is initialized.
92233 */
92234- kmemleak_ignore(ptr);
92235- if (!ptr && mod->init_size) {
92236+ kmemleak_not_leak(ptr);
92237+ if (!ptr && mod->init_size_rw) {
92238 err = -ENOMEM;
92239- goto free_core;
92240+ goto free_core_rw;
92241 }
92242- memset(ptr, 0, mod->init_size);
92243- mod->module_init = ptr;
92244+ memset(ptr, 0, mod->init_size_rw);
92245+ mod->module_init_rw = ptr;
92246+
92247+ ptr = module_alloc_update_bounds_rx(mod->core_size_rx);
92248+ kmemleak_not_leak(ptr);
92249+ if (!ptr) {
92250+ err = -ENOMEM;
92251+ goto free_init_rw;
92252+ }
92253+
92254+ pax_open_kernel();
92255+ memset(ptr, 0, mod->core_size_rx);
92256+ pax_close_kernel();
92257+ mod->module_core_rx = ptr;
92258+
92259+ ptr = module_alloc_update_bounds_rx(mod->init_size_rx);
92260+ kmemleak_not_leak(ptr);
92261+ if (!ptr && mod->init_size_rx) {
92262+ err = -ENOMEM;
92263+ goto free_core_rx;
92264+ }
92265+
92266+ pax_open_kernel();
92267+ memset(ptr, 0, mod->init_size_rx);
92268+ pax_close_kernel();
92269+ mod->module_init_rx = ptr;
92270
92271 /* Transfer each section which specifies SHF_ALLOC */
92272 DEBUGP("final section addresses:\n");
92273@@ -2300,17 +2391,45 @@ static noinline struct module *load_module(void __user *umod,
92274 if (!(sechdrs[i].sh_flags & SHF_ALLOC))
92275 continue;
92276
92277- if (sechdrs[i].sh_entsize & INIT_OFFSET_MASK)
92278- dest = mod->module_init
92279- + (sechdrs[i].sh_entsize & ~INIT_OFFSET_MASK);
92280- else
92281- dest = mod->module_core + sechdrs[i].sh_entsize;
92282+ if (sechdrs[i].sh_entsize & INIT_OFFSET_MASK) {
92283+ if ((sechdrs[i].sh_flags & SHF_WRITE) || !(sechdrs[i].sh_flags & SHF_ALLOC))
92284+ dest = mod->module_init_rw
92285+ + (sechdrs[i].sh_entsize & ~INIT_OFFSET_MASK);
92286+ else
92287+ dest = mod->module_init_rx
92288+ + (sechdrs[i].sh_entsize & ~INIT_OFFSET_MASK);
92289+ } else {
92290+ if ((sechdrs[i].sh_flags & SHF_WRITE) || !(sechdrs[i].sh_flags & SHF_ALLOC))
92291+ dest = mod->module_core_rw + sechdrs[i].sh_entsize;
92292+ else
92293+ dest = mod->module_core_rx + sechdrs[i].sh_entsize;
92294+ }
92295
92296- if (sechdrs[i].sh_type != SHT_NOBITS)
92297- memcpy(dest, (void *)sechdrs[i].sh_addr,
92298- sechdrs[i].sh_size);
92299+ if (sechdrs[i].sh_type != SHT_NOBITS) {
92300+
92301+#ifdef CONFIG_PAX_KERNEXEC
92302+#ifdef CONFIG_X86_64
92303+ if ((sechdrs[i].sh_flags & SHF_WRITE) && (sechdrs[i].sh_flags & SHF_EXECINSTR))
92304+ set_memory_x((unsigned long)dest, (sechdrs[i].sh_size + PAGE_SIZE) >> PAGE_SHIFT);
92305+#endif
92306+ if (!(sechdrs[i].sh_flags & SHF_WRITE) && (sechdrs[i].sh_flags & SHF_ALLOC)) {
92307+ pax_open_kernel();
92308+ memcpy(dest, (void *)sechdrs[i].sh_addr, sechdrs[i].sh_size);
92309+ pax_close_kernel();
92310+ } else
92311+#endif
92312+
92313+ memcpy(dest, (void *)sechdrs[i].sh_addr, sechdrs[i].sh_size);
92314+ }
92315 /* Update sh_addr to point to copy in image. */
92316- sechdrs[i].sh_addr = (unsigned long)dest;
92317+
92318+#ifdef CONFIG_PAX_KERNEXEC
92319+ if (sechdrs[i].sh_flags & SHF_EXECINSTR)
92320+ sechdrs[i].sh_addr = ktva_ktla((unsigned long)dest);
92321+ else
92322+#endif
92323+
92324+ sechdrs[i].sh_addr = (unsigned long)dest;
92325 DEBUGP("\t0x%lx %s\n", sechdrs[i].sh_addr, secstrings + sechdrs[i].sh_name);
92326 }
92327 /* Module has been moved. */
92328@@ -2322,7 +2441,7 @@ static noinline struct module *load_module(void __user *umod,
92329 mod->name);
92330 if (!mod->refptr) {
92331 err = -ENOMEM;
92332- goto free_init;
92333+ goto free_init_rx;
92334 }
92335 #endif
92336 /* Now we've moved module, initialize linked lists, etc. */
92337@@ -2334,7 +2453,7 @@ static noinline struct module *load_module(void __user *umod,
92338 goto free_unload;
92339
92340 /* Set up license info based on the info section */
92341- set_license(mod, get_modinfo(sechdrs, infoindex, "license"));
92342+ set_license(mod, license);
92343
92344 /*
92345 * ndiswrapper is under GPL by itself, but loads proprietary modules.
92346@@ -2351,6 +2470,31 @@ static noinline struct module *load_module(void __user *umod,
92347 /* Set up MODINFO_ATTR fields */
92348 setup_modinfo(mod, sechdrs, infoindex);
92349
92350+ mod->args = args;
92351+
92352+#ifdef CONFIG_GRKERNSEC_MODHARDEN
92353+ {
92354+ char *p, *p2;
92355+
92356+ if (strstr(mod->args, "grsec_modharden_netdev")) {
92357+ printk(KERN_ALERT "grsec: denied auto-loading kernel module for a network device with CAP_SYS_MODULE (deprecated). Use CAP_NET_ADMIN and alias netdev-%.64s instead.", mod->name);
92358+ err = -EPERM;
92359+ goto cleanup;
92360+ } else if ((p = strstr(mod->args, "grsec_modharden_normal"))) {
92361+ p += strlen("grsec_modharden_normal");
92362+ p2 = strstr(p, "_");
92363+ if (p2) {
92364+ *p2 = '\0';
92365+ printk(KERN_ALERT "grsec: denied kernel module auto-load of %.64s by uid %.9s\n", mod->name, p);
92366+ *p2 = '_';
92367+ }
92368+ err = -EPERM;
92369+ goto cleanup;
92370+ }
92371+ }
92372+#endif
92373+
92374+
92375 /* Fix up syms, so that st_value is a pointer to location. */
92376 err = simplify_symbols(sechdrs, symindex, strtab, versindex, pcpuindex,
92377 mod);
92378@@ -2431,8 +2575,8 @@ static noinline struct module *load_module(void __user *umod,
92379
92380 /* Now do relocations. */
92381 for (i = 1; i < hdr->e_shnum; i++) {
92382- const char *strtab = (char *)sechdrs[strindex].sh_addr;
92383 unsigned int info = sechdrs[i].sh_info;
92384+ strtab = (char *)sechdrs[strindex].sh_addr;
92385
92386 /* Not a valid relocation section? */
92387 if (info >= hdr->e_shnum)
92388@@ -2493,16 +2637,15 @@ static noinline struct module *load_module(void __user *umod,
92389 * Do it before processing of module parameters, so the module
92390 * can provide parameter accessor functions of its own.
92391 */
92392- if (mod->module_init)
92393- flush_icache_range((unsigned long)mod->module_init,
92394- (unsigned long)mod->module_init
92395- + mod->init_size);
92396- flush_icache_range((unsigned long)mod->module_core,
92397- (unsigned long)mod->module_core + mod->core_size);
92398+ if (mod->module_init_rx)
92399+ flush_icache_range((unsigned long)mod->module_init_rx,
92400+ (unsigned long)mod->module_init_rx
92401+ + mod->init_size_rx);
92402+ flush_icache_range((unsigned long)mod->module_core_rx,
92403+ (unsigned long)mod->module_core_rx + mod->core_size_rx);
92404
92405 set_fs(old_fs);
92406
92407- mod->args = args;
92408 if (section_addr(hdr, sechdrs, secstrings, "__obsparm"))
92409 printk(KERN_WARNING "%s: Ignoring obsolete parameters\n",
92410 mod->name);
92411@@ -2546,12 +2689,16 @@ static noinline struct module *load_module(void __user *umod,
92412 free_unload:
92413 module_unload_free(mod);
92414 #if defined(CONFIG_MODULE_UNLOAD) && defined(CONFIG_SMP)
92415+ free_init_rx:
92416 percpu_modfree(mod->refptr);
92417- free_init:
92418 #endif
92419- module_free(mod, mod->module_init);
92420- free_core:
92421- module_free(mod, mod->module_core);
92422+ module_free_exec(mod, mod->module_init_rx);
92423+ free_core_rx:
92424+ module_free_exec(mod, mod->module_core_rx);
92425+ free_init_rw:
92426+ module_free(mod, mod->module_init_rw);
92427+ free_core_rw:
92428+ module_free(mod, mod->module_core_rw);
92429 /* mod will be freed with core. Don't access it beyond this line! */
92430 free_percpu:
92431 if (percpu)
92432@@ -2653,10 +2800,12 @@ SYSCALL_DEFINE3(init_module, void __user *, umod,
92433 mod->symtab = mod->core_symtab;
92434 mod->strtab = mod->core_strtab;
92435 #endif
92436- module_free(mod, mod->module_init);
92437- mod->module_init = NULL;
92438- mod->init_size = 0;
92439- mod->init_text_size = 0;
92440+ module_free(mod, mod->module_init_rw);
92441+ module_free_exec(mod, mod->module_init_rx);
92442+ mod->module_init_rw = NULL;
92443+ mod->module_init_rx = NULL;
92444+ mod->init_size_rw = 0;
92445+ mod->init_size_rx = 0;
92446 mutex_unlock(&module_mutex);
92447
92448 return 0;
92449@@ -2687,10 +2836,16 @@ static const char *get_ksymbol(struct module *mod,
92450 unsigned long nextval;
92451
92452 /* At worse, next value is at end of module */
92453- if (within_module_init(addr, mod))
92454- nextval = (unsigned long)mod->module_init+mod->init_text_size;
92455+ if (within_module_init_rx(addr, mod))
92456+ nextval = (unsigned long)mod->module_init_rx+mod->init_size_rx;
92457+ else if (within_module_init_rw(addr, mod))
92458+ nextval = (unsigned long)mod->module_init_rw+mod->init_size_rw;
92459+ else if (within_module_core_rx(addr, mod))
92460+ nextval = (unsigned long)mod->module_core_rx+mod->core_size_rx;
92461+ else if (within_module_core_rw(addr, mod))
92462+ nextval = (unsigned long)mod->module_core_rw+mod->core_size_rw;
92463 else
92464- nextval = (unsigned long)mod->module_core+mod->core_text_size;
92465+ return NULL;
92466
92467 /* Scan for closest preceeding symbol, and next symbol. (ELF
92468 starts real symbols at 1). */
92469@@ -2936,7 +3091,7 @@ static int m_show(struct seq_file *m, void *p)
92470 char buf[8];
92471
92472 seq_printf(m, "%s %u",
92473- mod->name, mod->init_size + mod->core_size);
92474+ mod->name, mod->init_size_rx + mod->init_size_rw + mod->core_size_rx + mod->core_size_rw);
92475 print_unload_info(m, mod);
92476
92477 /* Informative for users. */
92478@@ -2945,7 +3100,7 @@ static int m_show(struct seq_file *m, void *p)
92479 mod->state == MODULE_STATE_COMING ? "Loading":
92480 "Live");
92481 /* Used by oprofile and other similar tools. */
92482- seq_printf(m, " 0x%p", mod->module_core);
92483+ seq_printf(m, " 0x%p 0x%p", mod->module_core_rx, mod->module_core_rw);
92484
92485 /* Taints info */
92486 if (mod->taints)
92487@@ -2981,7 +3136,17 @@ static const struct file_operations proc_modules_operations = {
92488
92489 static int __init proc_modules_init(void)
92490 {
92491+#ifndef CONFIG_GRKERNSEC_HIDESYM
92492+#ifdef CONFIG_GRKERNSEC_PROC_USER
92493+ proc_create("modules", S_IRUSR, NULL, &proc_modules_operations);
92494+#elif defined(CONFIG_GRKERNSEC_PROC_USERGROUP)
92495+ proc_create("modules", S_IRUSR | S_IRGRP, NULL, &proc_modules_operations);
92496+#else
92497 proc_create("modules", 0, NULL, &proc_modules_operations);
92498+#endif
92499+#else
92500+ proc_create("modules", S_IRUSR, NULL, &proc_modules_operations);
92501+#endif
92502 return 0;
92503 }
92504 module_init(proc_modules_init);
92505@@ -3040,12 +3205,12 @@ struct module *__module_address(unsigned long addr)
92506 {
92507 struct module *mod;
92508
92509- if (addr < module_addr_min || addr > module_addr_max)
92510+ if ((addr < module_addr_min_rx || addr > module_addr_max_rx) &&
92511+ (addr < module_addr_min_rw || addr > module_addr_max_rw))
92512 return NULL;
92513
92514 list_for_each_entry_rcu(mod, &modules, list)
92515- if (within_module_core(addr, mod)
92516- || within_module_init(addr, mod))
92517+ if (within_module_init(addr, mod) || within_module_core(addr, mod))
92518 return mod;
92519 return NULL;
92520 }
92521@@ -3079,11 +3244,20 @@ bool is_module_text_address(unsigned long addr)
92522 */
92523 struct module *__module_text_address(unsigned long addr)
92524 {
92525- struct module *mod = __module_address(addr);
92526+ struct module *mod;
92527+
92528+#ifdef CONFIG_X86_32
92529+ addr = ktla_ktva(addr);
92530+#endif
92531+
92532+ if (addr < module_addr_min_rx || addr > module_addr_max_rx)
92533+ return NULL;
92534+
92535+ mod = __module_address(addr);
92536+
92537 if (mod) {
92538 /* Make sure it's within the text section. */
92539- if (!within(addr, mod->module_init, mod->init_text_size)
92540- && !within(addr, mod->module_core, mod->core_text_size))
92541+ if (!within_module_init_rx(addr, mod) && !within_module_core_rx(addr, mod))
92542 mod = NULL;
92543 }
92544 return mod;
92545diff --git a/kernel/mutex-debug.c b/kernel/mutex-debug.c
92546index ec815a9..fe46e99 100644
92547--- a/kernel/mutex-debug.c
92548+++ b/kernel/mutex-debug.c
92549@@ -49,21 +49,21 @@ void debug_mutex_free_waiter(struct mutex_waiter *waiter)
92550 }
92551
92552 void debug_mutex_add_waiter(struct mutex *lock, struct mutex_waiter *waiter,
92553- struct thread_info *ti)
92554+ struct task_struct *task)
92555 {
92556 SMP_DEBUG_LOCKS_WARN_ON(!spin_is_locked(&lock->wait_lock));
92557
92558 /* Mark the current thread as blocked on the lock: */
92559- ti->task->blocked_on = waiter;
92560+ task->blocked_on = waiter;
92561 }
92562
92563 void mutex_remove_waiter(struct mutex *lock, struct mutex_waiter *waiter,
92564- struct thread_info *ti)
92565+ struct task_struct *task)
92566 {
92567 DEBUG_LOCKS_WARN_ON(list_empty(&waiter->list));
92568- DEBUG_LOCKS_WARN_ON(waiter->task != ti->task);
92569- DEBUG_LOCKS_WARN_ON(ti->task->blocked_on != waiter);
92570- ti->task->blocked_on = NULL;
92571+ DEBUG_LOCKS_WARN_ON(waiter->task != task);
92572+ DEBUG_LOCKS_WARN_ON(task->blocked_on != waiter);
92573+ task->blocked_on = NULL;
92574
92575 list_del_init(&waiter->list);
92576 waiter->task = NULL;
92577@@ -75,7 +75,7 @@ void debug_mutex_unlock(struct mutex *lock)
92578 return;
92579
92580 DEBUG_LOCKS_WARN_ON(lock->magic != lock);
92581- DEBUG_LOCKS_WARN_ON(lock->owner != current_thread_info());
92582+ DEBUG_LOCKS_WARN_ON(lock->owner != current);
92583 DEBUG_LOCKS_WARN_ON(!lock->wait_list.prev && !lock->wait_list.next);
92584 mutex_clear_owner(lock);
92585 }
92586diff --git a/kernel/mutex-debug.h b/kernel/mutex-debug.h
92587index 6b2d735..372d3c4 100644
92588--- a/kernel/mutex-debug.h
92589+++ b/kernel/mutex-debug.h
92590@@ -20,16 +20,16 @@ extern void debug_mutex_wake_waiter(struct mutex *lock,
92591 extern void debug_mutex_free_waiter(struct mutex_waiter *waiter);
92592 extern void debug_mutex_add_waiter(struct mutex *lock,
92593 struct mutex_waiter *waiter,
92594- struct thread_info *ti);
92595+ struct task_struct *task);
92596 extern void mutex_remove_waiter(struct mutex *lock, struct mutex_waiter *waiter,
92597- struct thread_info *ti);
92598+ struct task_struct *task);
92599 extern void debug_mutex_unlock(struct mutex *lock);
92600 extern void debug_mutex_init(struct mutex *lock, const char *name,
92601 struct lock_class_key *key);
92602
92603 static inline void mutex_set_owner(struct mutex *lock)
92604 {
92605- lock->owner = current_thread_info();
92606+ lock->owner = current;
92607 }
92608
92609 static inline void mutex_clear_owner(struct mutex *lock)
92610diff --git a/kernel/mutex.c b/kernel/mutex.c
92611index f85644c..5ee9f77 100644
92612--- a/kernel/mutex.c
92613+++ b/kernel/mutex.c
92614@@ -169,7 +169,7 @@ __mutex_lock_common(struct mutex *lock, long state, unsigned int subclass,
92615 */
92616
92617 for (;;) {
92618- struct thread_info *owner;
92619+ struct task_struct *owner;
92620
92621 /*
92622 * If we own the BKL, then don't spin. The owner of
92623@@ -214,7 +214,7 @@ __mutex_lock_common(struct mutex *lock, long state, unsigned int subclass,
92624 spin_lock_mutex(&lock->wait_lock, flags);
92625
92626 debug_mutex_lock_common(lock, &waiter);
92627- debug_mutex_add_waiter(lock, &waiter, task_thread_info(task));
92628+ debug_mutex_add_waiter(lock, &waiter, task);
92629
92630 /* add waiting tasks to the end of the waitqueue (FIFO): */
92631 list_add_tail(&waiter.list, &lock->wait_list);
92632@@ -243,8 +243,7 @@ __mutex_lock_common(struct mutex *lock, long state, unsigned int subclass,
92633 * TASK_UNINTERRUPTIBLE case.)
92634 */
92635 if (unlikely(signal_pending_state(state, task))) {
92636- mutex_remove_waiter(lock, &waiter,
92637- task_thread_info(task));
92638+ mutex_remove_waiter(lock, &waiter, task);
92639 mutex_release(&lock->dep_map, 1, ip);
92640 spin_unlock_mutex(&lock->wait_lock, flags);
92641
92642@@ -265,7 +264,7 @@ __mutex_lock_common(struct mutex *lock, long state, unsigned int subclass,
92643 done:
92644 lock_acquired(&lock->dep_map, ip);
92645 /* got the lock - rejoice! */
92646- mutex_remove_waiter(lock, &waiter, current_thread_info());
92647+ mutex_remove_waiter(lock, &waiter, task);
92648 mutex_set_owner(lock);
92649
92650 /* set it to 0 if there are no waiters left: */
92651diff --git a/kernel/mutex.h b/kernel/mutex.h
92652index 67578ca..4115fbf 100644
92653--- a/kernel/mutex.h
92654+++ b/kernel/mutex.h
92655@@ -19,7 +19,7 @@
92656 #ifdef CONFIG_SMP
92657 static inline void mutex_set_owner(struct mutex *lock)
92658 {
92659- lock->owner = current_thread_info();
92660+ lock->owner = current;
92661 }
92662
92663 static inline void mutex_clear_owner(struct mutex *lock)
92664diff --git a/kernel/panic.c b/kernel/panic.c
92665index 96b45d0..ff70a46 100644
92666--- a/kernel/panic.c
92667+++ b/kernel/panic.c
92668@@ -71,7 +71,11 @@ NORET_TYPE void panic(const char * fmt, ...)
92669 va_end(args);
92670 printk(KERN_EMERG "Kernel panic - not syncing: %s\n",buf);
92671 #ifdef CONFIG_DEBUG_BUGVERBOSE
92672- dump_stack();
92673+ /*
92674+ * Avoid nested stack-dumping if a panic occurs during oops processing
92675+ */
92676+ if (!oops_in_progress)
92677+ dump_stack();
92678 #endif
92679
92680 /*
92681@@ -352,7 +356,7 @@ static void warn_slowpath_common(const char *file, int line, void *caller, struc
92682 const char *board;
92683
92684 printk(KERN_WARNING "------------[ cut here ]------------\n");
92685- printk(KERN_WARNING "WARNING: at %s:%d %pS()\n", file, line, caller);
92686+ printk(KERN_WARNING "WARNING: at %s:%d %pA()\n", file, line, caller);
92687 board = dmi_get_system_info(DMI_PRODUCT_NAME);
92688 if (board)
92689 printk(KERN_WARNING "Hardware name: %s\n", board);
92690@@ -392,7 +396,8 @@ EXPORT_SYMBOL(warn_slowpath_null);
92691 */
92692 void __stack_chk_fail(void)
92693 {
92694- panic("stack-protector: Kernel stack is corrupted in: %p\n",
92695+ dump_stack();
92696+ panic("stack-protector: Kernel stack is corrupted in: %pA\n",
92697 __builtin_return_address(0));
92698 }
92699 EXPORT_SYMBOL(__stack_chk_fail);
92700diff --git a/kernel/params.c b/kernel/params.c
92701index d656c27..21e452c 100644
92702--- a/kernel/params.c
92703+++ b/kernel/params.c
92704@@ -725,7 +725,7 @@ static ssize_t module_attr_store(struct kobject *kobj,
92705 return ret;
92706 }
92707
92708-static struct sysfs_ops module_sysfs_ops = {
92709+static const struct sysfs_ops module_sysfs_ops = {
92710 .show = module_attr_show,
92711 .store = module_attr_store,
92712 };
92713@@ -739,7 +739,7 @@ static int uevent_filter(struct kset *kset, struct kobject *kobj)
92714 return 0;
92715 }
92716
92717-static struct kset_uevent_ops module_uevent_ops = {
92718+static const struct kset_uevent_ops module_uevent_ops = {
92719 .filter = uevent_filter,
92720 };
92721
92722diff --git a/kernel/perf_event.c b/kernel/perf_event.c
92723index 37ebc14..9c121d9 100644
92724--- a/kernel/perf_event.c
92725+++ b/kernel/perf_event.c
92726@@ -77,7 +77,7 @@ int sysctl_perf_event_mlock __read_mostly = 516; /* 'free' kb per user */
92727 */
92728 int sysctl_perf_event_sample_rate __read_mostly = 100000;
92729
92730-static atomic64_t perf_event_id;
92731+static atomic64_unchecked_t perf_event_id;
92732
92733 /*
92734 * Lock for (sysadmin-configurable) event reservations:
92735@@ -1094,9 +1094,9 @@ static void __perf_event_sync_stat(struct perf_event *event,
92736 * In order to keep per-task stats reliable we need to flip the event
92737 * values when we flip the contexts.
92738 */
92739- value = atomic64_read(&next_event->count);
92740- value = atomic64_xchg(&event->count, value);
92741- atomic64_set(&next_event->count, value);
92742+ value = atomic64_read_unchecked(&next_event->count);
92743+ value = atomic64_xchg_unchecked(&event->count, value);
92744+ atomic64_set_unchecked(&next_event->count, value);
92745
92746 swap(event->total_time_enabled, next_event->total_time_enabled);
92747 swap(event->total_time_running, next_event->total_time_running);
92748@@ -1552,7 +1552,7 @@ static u64 perf_event_read(struct perf_event *event)
92749 update_event_times(event);
92750 }
92751
92752- return atomic64_read(&event->count);
92753+ return atomic64_read_unchecked(&event->count);
92754 }
92755
92756 /*
92757@@ -1790,11 +1790,11 @@ static int perf_event_read_group(struct perf_event *event,
92758 values[n++] = 1 + leader->nr_siblings;
92759 if (read_format & PERF_FORMAT_TOTAL_TIME_ENABLED) {
92760 values[n++] = leader->total_time_enabled +
92761- atomic64_read(&leader->child_total_time_enabled);
92762+ atomic64_read_unchecked(&leader->child_total_time_enabled);
92763 }
92764 if (read_format & PERF_FORMAT_TOTAL_TIME_RUNNING) {
92765 values[n++] = leader->total_time_running +
92766- atomic64_read(&leader->child_total_time_running);
92767+ atomic64_read_unchecked(&leader->child_total_time_running);
92768 }
92769
92770 size = n * sizeof(u64);
92771@@ -1829,11 +1829,11 @@ static int perf_event_read_one(struct perf_event *event,
92772 values[n++] = perf_event_read_value(event);
92773 if (read_format & PERF_FORMAT_TOTAL_TIME_ENABLED) {
92774 values[n++] = event->total_time_enabled +
92775- atomic64_read(&event->child_total_time_enabled);
92776+ atomic64_read_unchecked(&event->child_total_time_enabled);
92777 }
92778 if (read_format & PERF_FORMAT_TOTAL_TIME_RUNNING) {
92779 values[n++] = event->total_time_running +
92780- atomic64_read(&event->child_total_time_running);
92781+ atomic64_read_unchecked(&event->child_total_time_running);
92782 }
92783 if (read_format & PERF_FORMAT_ID)
92784 values[n++] = primary_event_id(event);
92785@@ -1903,7 +1903,7 @@ static unsigned int perf_poll(struct file *file, poll_table *wait)
92786 static void perf_event_reset(struct perf_event *event)
92787 {
92788 (void)perf_event_read(event);
92789- atomic64_set(&event->count, 0);
92790+ atomic64_set_unchecked(&event->count, 0);
92791 perf_event_update_userpage(event);
92792 }
92793
92794@@ -2079,15 +2079,15 @@ void perf_event_update_userpage(struct perf_event *event)
92795 ++userpg->lock;
92796 barrier();
92797 userpg->index = perf_event_index(event);
92798- userpg->offset = atomic64_read(&event->count);
92799+ userpg->offset = atomic64_read_unchecked(&event->count);
92800 if (event->state == PERF_EVENT_STATE_ACTIVE)
92801- userpg->offset -= atomic64_read(&event->hw.prev_count);
92802+ userpg->offset -= atomic64_read_unchecked(&event->hw.prev_count);
92803
92804 userpg->time_enabled = event->total_time_enabled +
92805- atomic64_read(&event->child_total_time_enabled);
92806+ atomic64_read_unchecked(&event->child_total_time_enabled);
92807
92808 userpg->time_running = event->total_time_running +
92809- atomic64_read(&event->child_total_time_running);
92810+ atomic64_read_unchecked(&event->child_total_time_running);
92811
92812 barrier();
92813 ++userpg->lock;
92814@@ -2903,14 +2903,14 @@ static void perf_output_read_one(struct perf_output_handle *handle,
92815 u64 values[4];
92816 int n = 0;
92817
92818- values[n++] = atomic64_read(&event->count);
92819+ values[n++] = atomic64_read_unchecked(&event->count);
92820 if (read_format & PERF_FORMAT_TOTAL_TIME_ENABLED) {
92821 values[n++] = event->total_time_enabled +
92822- atomic64_read(&event->child_total_time_enabled);
92823+ atomic64_read_unchecked(&event->child_total_time_enabled);
92824 }
92825 if (read_format & PERF_FORMAT_TOTAL_TIME_RUNNING) {
92826 values[n++] = event->total_time_running +
92827- atomic64_read(&event->child_total_time_running);
92828+ atomic64_read_unchecked(&event->child_total_time_running);
92829 }
92830 if (read_format & PERF_FORMAT_ID)
92831 values[n++] = primary_event_id(event);
92832@@ -2940,7 +2940,7 @@ static void perf_output_read_group(struct perf_output_handle *handle,
92833 if (leader != event)
92834 leader->pmu->read(leader);
92835
92836- values[n++] = atomic64_read(&leader->count);
92837+ values[n++] = atomic64_read_unchecked(&leader->count);
92838 if (read_format & PERF_FORMAT_ID)
92839 values[n++] = primary_event_id(leader);
92840
92841@@ -2952,7 +2952,7 @@ static void perf_output_read_group(struct perf_output_handle *handle,
92842 if (sub != event)
92843 sub->pmu->read(sub);
92844
92845- values[n++] = atomic64_read(&sub->count);
92846+ values[n++] = atomic64_read_unchecked(&sub->count);
92847 if (read_format & PERF_FORMAT_ID)
92848 values[n++] = primary_event_id(sub);
92849
92850@@ -3525,12 +3525,12 @@ static void perf_event_mmap_event(struct perf_mmap_event *mmap_event)
92851 * need to add enough zero bytes after the string to handle
92852 * the 64bit alignment we do later.
92853 */
92854- buf = kzalloc(PATH_MAX + sizeof(u64), GFP_KERNEL);
92855+ buf = kzalloc(PATH_MAX, GFP_KERNEL);
92856 if (!buf) {
92857 name = strncpy(tmp, "//enomem", sizeof(tmp));
92858 goto got_name;
92859 }
92860- name = d_path(&file->f_path, buf, PATH_MAX);
92861+ name = d_path(&file->f_path, buf, PATH_MAX - sizeof(u64));
92862 if (IS_ERR(name)) {
92863 name = strncpy(tmp, "//toolong", sizeof(tmp));
92864 goto got_name;
92865@@ -3783,7 +3783,7 @@ static void perf_swevent_add(struct perf_event *event, u64 nr,
92866 {
92867 struct hw_perf_event *hwc = &event->hw;
92868
92869- atomic64_add(nr, &event->count);
92870+ atomic64_add_unchecked(nr, &event->count);
92871
92872 if (!hwc->sample_period)
92873 return;
92874@@ -4040,9 +4040,9 @@ static void cpu_clock_perf_event_update(struct perf_event *event)
92875 u64 now;
92876
92877 now = cpu_clock(cpu);
92878- prev = atomic64_read(&event->hw.prev_count);
92879- atomic64_set(&event->hw.prev_count, now);
92880- atomic64_add(now - prev, &event->count);
92881+ prev = atomic64_read_unchecked(&event->hw.prev_count);
92882+ atomic64_set_unchecked(&event->hw.prev_count, now);
92883+ atomic64_add_unchecked(now - prev, &event->count);
92884 }
92885
92886 static int cpu_clock_perf_event_enable(struct perf_event *event)
92887@@ -4050,7 +4050,7 @@ static int cpu_clock_perf_event_enable(struct perf_event *event)
92888 struct hw_perf_event *hwc = &event->hw;
92889 int cpu = raw_smp_processor_id();
92890
92891- atomic64_set(&hwc->prev_count, cpu_clock(cpu));
92892+ atomic64_set_unchecked(&hwc->prev_count, cpu_clock(cpu));
92893 perf_swevent_start_hrtimer(event);
92894
92895 return 0;
92896@@ -4082,9 +4082,9 @@ static void task_clock_perf_event_update(struct perf_event *event, u64 now)
92897 u64 prev;
92898 s64 delta;
92899
92900- prev = atomic64_xchg(&event->hw.prev_count, now);
92901+ prev = atomic64_xchg_unchecked(&event->hw.prev_count, now);
92902 delta = now - prev;
92903- atomic64_add(delta, &event->count);
92904+ atomic64_add_unchecked(delta, &event->count);
92905 }
92906
92907 static int task_clock_perf_event_enable(struct perf_event *event)
92908@@ -4094,7 +4094,7 @@ static int task_clock_perf_event_enable(struct perf_event *event)
92909
92910 now = event->ctx->time;
92911
92912- atomic64_set(&hwc->prev_count, now);
92913+ atomic64_set_unchecked(&hwc->prev_count, now);
92914
92915 perf_swevent_start_hrtimer(event);
92916
92917@@ -4289,7 +4289,7 @@ perf_event_alloc(struct perf_event_attr *attr,
92918 event->parent = parent_event;
92919
92920 event->ns = get_pid_ns(current->nsproxy->pid_ns);
92921- event->id = atomic64_inc_return(&perf_event_id);
92922+ event->id = atomic64_inc_return_unchecked(&perf_event_id);
92923
92924 event->state = PERF_EVENT_STATE_INACTIVE;
92925
92926@@ -4720,15 +4720,15 @@ static void sync_child_event(struct perf_event *child_event,
92927 if (child_event->attr.inherit_stat)
92928 perf_event_read_event(child_event, child);
92929
92930- child_val = atomic64_read(&child_event->count);
92931+ child_val = atomic64_read_unchecked(&child_event->count);
92932
92933 /*
92934 * Add back the child's count to the parent's count:
92935 */
92936- atomic64_add(child_val, &parent_event->count);
92937- atomic64_add(child_event->total_time_enabled,
92938+ atomic64_add_unchecked(child_val, &parent_event->count);
92939+ atomic64_add_unchecked(child_event->total_time_enabled,
92940 &parent_event->child_total_time_enabled);
92941- atomic64_add(child_event->total_time_running,
92942+ atomic64_add_unchecked(child_event->total_time_running,
92943 &parent_event->child_total_time_running);
92944
92945 /*
92946diff --git a/kernel/pid.c b/kernel/pid.c
92947index fce7198..4f23a7e 100644
92948--- a/kernel/pid.c
92949+++ b/kernel/pid.c
92950@@ -33,6 +33,7 @@
92951 #include <linux/rculist.h>
92952 #include <linux/bootmem.h>
92953 #include <linux/hash.h>
92954+#include <linux/security.h>
92955 #include <linux/pid_namespace.h>
92956 #include <linux/init_task.h>
92957 #include <linux/syscalls.h>
92958@@ -45,7 +46,7 @@ struct pid init_struct_pid = INIT_STRUCT_PID;
92959
92960 int pid_max = PID_MAX_DEFAULT;
92961
92962-#define RESERVED_PIDS 300
92963+#define RESERVED_PIDS 500
92964
92965 int pid_max_min = RESERVED_PIDS + 1;
92966 int pid_max_max = PID_MAX_LIMIT;
92967@@ -383,7 +384,14 @@ EXPORT_SYMBOL(pid_task);
92968 */
92969 struct task_struct *find_task_by_pid_ns(pid_t nr, struct pid_namespace *ns)
92970 {
92971- return pid_task(find_pid_ns(nr, ns), PIDTYPE_PID);
92972+ struct task_struct *task;
92973+
92974+ task = pid_task(find_pid_ns(nr, ns), PIDTYPE_PID);
92975+
92976+ if (gr_pid_is_chrooted(task))
92977+ return NULL;
92978+
92979+ return task;
92980 }
92981
92982 struct task_struct *find_task_by_vpid(pid_t vnr)
92983@@ -391,6 +399,11 @@ struct task_struct *find_task_by_vpid(pid_t vnr)
92984 return find_task_by_pid_ns(vnr, current->nsproxy->pid_ns);
92985 }
92986
92987+struct task_struct *find_task_by_vpid_unrestricted(pid_t vnr)
92988+{
92989+ return pid_task(find_pid_ns(vnr, current->nsproxy->pid_ns), PIDTYPE_PID);
92990+}
92991+
92992 struct pid *get_task_pid(struct task_struct *task, enum pid_type type)
92993 {
92994 struct pid *pid;
92995diff --git a/kernel/posix-cpu-timers.c b/kernel/posix-cpu-timers.c
92996index 5c9dc22..d271117 100644
92997--- a/kernel/posix-cpu-timers.c
92998+++ b/kernel/posix-cpu-timers.c
92999@@ -6,6 +6,7 @@
93000 #include <linux/posix-timers.h>
93001 #include <linux/errno.h>
93002 #include <linux/math64.h>
93003+#include <linux/security.h>
93004 #include <asm/uaccess.h>
93005 #include <linux/kernel_stat.h>
93006 #include <trace/events/timer.h>
93007@@ -1697,7 +1698,7 @@ static long thread_cpu_nsleep_restart(struct restart_block *restart_block)
93008
93009 static __init int init_posix_cpu_timers(void)
93010 {
93011- struct k_clock process = {
93012+ static struct k_clock process = {
93013 .clock_getres = process_cpu_clock_getres,
93014 .clock_get = process_cpu_clock_get,
93015 .clock_set = do_posix_clock_nosettime,
93016@@ -1705,7 +1706,7 @@ static __init int init_posix_cpu_timers(void)
93017 .nsleep = process_cpu_nsleep,
93018 .nsleep_restart = process_cpu_nsleep_restart,
93019 };
93020- struct k_clock thread = {
93021+ static struct k_clock thread = {
93022 .clock_getres = thread_cpu_clock_getres,
93023 .clock_get = thread_cpu_clock_get,
93024 .clock_set = do_posix_clock_nosettime,
93025diff --git a/kernel/posix-timers.c b/kernel/posix-timers.c
93026index 5e76d22..cf1baeb 100644
93027--- a/kernel/posix-timers.c
93028+++ b/kernel/posix-timers.c
93029@@ -42,6 +42,7 @@
93030 #include <linux/compiler.h>
93031 #include <linux/idr.h>
93032 #include <linux/posix-timers.h>
93033+#include <linux/grsecurity.h>
93034 #include <linux/syscalls.h>
93035 #include <linux/wait.h>
93036 #include <linux/workqueue.h>
93037@@ -131,7 +132,7 @@ static DEFINE_SPINLOCK(idr_lock);
93038 * which we beg off on and pass to do_sys_settimeofday().
93039 */
93040
93041-static struct k_clock posix_clocks[MAX_CLOCKS];
93042+static struct k_clock *posix_clocks[MAX_CLOCKS];
93043
93044 /*
93045 * These ones are defined below.
93046@@ -157,8 +158,8 @@ static inline void unlock_timer(struct k_itimer *timr, unsigned long flags)
93047 */
93048 #define CLOCK_DISPATCH(clock, call, arglist) \
93049 ((clock) < 0 ? posix_cpu_##call arglist : \
93050- (posix_clocks[clock].call != NULL \
93051- ? (*posix_clocks[clock].call) arglist : common_##call arglist))
93052+ (posix_clocks[clock]->call != NULL \
93053+ ? (*posix_clocks[clock]->call) arglist : common_##call arglist))
93054
93055 /*
93056 * Default clock hook functions when the struct k_clock passed
93057@@ -172,7 +173,7 @@ static inline int common_clock_getres(const clockid_t which_clock,
93058 struct timespec *tp)
93059 {
93060 tp->tv_sec = 0;
93061- tp->tv_nsec = posix_clocks[which_clock].res;
93062+ tp->tv_nsec = posix_clocks[which_clock]->res;
93063 return 0;
93064 }
93065
93066@@ -217,9 +218,11 @@ static inline int invalid_clockid(const clockid_t which_clock)
93067 return 0;
93068 if ((unsigned) which_clock >= MAX_CLOCKS)
93069 return 1;
93070- if (posix_clocks[which_clock].clock_getres != NULL)
93071+ if (posix_clocks[which_clock] == NULL)
93072 return 0;
93073- if (posix_clocks[which_clock].res != 0)
93074+ if (posix_clocks[which_clock]->clock_getres != NULL)
93075+ return 0;
93076+ if (posix_clocks[which_clock]->res != 0)
93077 return 0;
93078 return 1;
93079 }
93080@@ -266,29 +269,29 @@ int posix_get_coarse_res(const clockid_t which_clock, struct timespec *tp)
93081 */
93082 static __init int init_posix_timers(void)
93083 {
93084- struct k_clock clock_realtime = {
93085+ static struct k_clock clock_realtime = {
93086 .clock_getres = hrtimer_get_res,
93087 };
93088- struct k_clock clock_monotonic = {
93089+ static struct k_clock clock_monotonic = {
93090 .clock_getres = hrtimer_get_res,
93091 .clock_get = posix_ktime_get_ts,
93092 .clock_set = do_posix_clock_nosettime,
93093 };
93094- struct k_clock clock_monotonic_raw = {
93095+ static struct k_clock clock_monotonic_raw = {
93096 .clock_getres = hrtimer_get_res,
93097 .clock_get = posix_get_monotonic_raw,
93098 .clock_set = do_posix_clock_nosettime,
93099 .timer_create = no_timer_create,
93100 .nsleep = no_nsleep,
93101 };
93102- struct k_clock clock_realtime_coarse = {
93103+ static struct k_clock clock_realtime_coarse = {
93104 .clock_getres = posix_get_coarse_res,
93105 .clock_get = posix_get_realtime_coarse,
93106 .clock_set = do_posix_clock_nosettime,
93107 .timer_create = no_timer_create,
93108 .nsleep = no_nsleep,
93109 };
93110- struct k_clock clock_monotonic_coarse = {
93111+ static struct k_clock clock_monotonic_coarse = {
93112 .clock_getres = posix_get_coarse_res,
93113 .clock_get = posix_get_monotonic_coarse,
93114 .clock_set = do_posix_clock_nosettime,
93115@@ -296,6 +299,8 @@ static __init int init_posix_timers(void)
93116 .nsleep = no_nsleep,
93117 };
93118
93119+ pax_track_stack();
93120+
93121 register_posix_clock(CLOCK_REALTIME, &clock_realtime);
93122 register_posix_clock(CLOCK_MONOTONIC, &clock_monotonic);
93123 register_posix_clock(CLOCK_MONOTONIC_RAW, &clock_monotonic_raw);
93124@@ -484,7 +489,7 @@ void register_posix_clock(const clockid_t clock_id, struct k_clock *new_clock)
93125 return;
93126 }
93127
93128- posix_clocks[clock_id] = *new_clock;
93129+ posix_clocks[clock_id] = new_clock;
93130 }
93131 EXPORT_SYMBOL_GPL(register_posix_clock);
93132
93133@@ -948,6 +953,13 @@ SYSCALL_DEFINE2(clock_settime, const clockid_t, which_clock,
93134 if (copy_from_user(&new_tp, tp, sizeof (*tp)))
93135 return -EFAULT;
93136
93137+ /* only the CLOCK_REALTIME clock can be set, all other clocks
93138+ have their clock_set fptr set to a nosettime dummy function
93139+ CLOCK_REALTIME has a NULL clock_set fptr which causes it to
93140+ call common_clock_set, which calls do_sys_settimeofday, which
93141+ we hook
93142+ */
93143+
93144 return CLOCK_DISPATCH(which_clock, clock_set, (which_clock, &new_tp));
93145 }
93146
93147diff --git a/kernel/power/hibernate.c b/kernel/power/hibernate.c
93148index 04a9e90..bc355aa 100644
93149--- a/kernel/power/hibernate.c
93150+++ b/kernel/power/hibernate.c
93151@@ -48,14 +48,14 @@ enum {
93152
93153 static int hibernation_mode = HIBERNATION_SHUTDOWN;
93154
93155-static struct platform_hibernation_ops *hibernation_ops;
93156+static const struct platform_hibernation_ops *hibernation_ops;
93157
93158 /**
93159 * hibernation_set_ops - set the global hibernate operations
93160 * @ops: the hibernation operations to use in subsequent hibernation transitions
93161 */
93162
93163-void hibernation_set_ops(struct platform_hibernation_ops *ops)
93164+void hibernation_set_ops(const struct platform_hibernation_ops *ops)
93165 {
93166 if (ops && !(ops->begin && ops->end && ops->pre_snapshot
93167 && ops->prepare && ops->finish && ops->enter && ops->pre_restore
93168diff --git a/kernel/power/poweroff.c b/kernel/power/poweroff.c
93169index e8b3370..484c2e4 100644
93170--- a/kernel/power/poweroff.c
93171+++ b/kernel/power/poweroff.c
93172@@ -37,7 +37,7 @@ static struct sysrq_key_op sysrq_poweroff_op = {
93173 .enable_mask = SYSRQ_ENABLE_BOOT,
93174 };
93175
93176-static int pm_sysrq_init(void)
93177+static int __init pm_sysrq_init(void)
93178 {
93179 register_sysrq_key('o', &sysrq_poweroff_op);
93180 return 0;
93181diff --git a/kernel/power/process.c b/kernel/power/process.c
93182index e7cd671..56d5f459 100644
93183--- a/kernel/power/process.c
93184+++ b/kernel/power/process.c
93185@@ -37,12 +37,15 @@ static int try_to_freeze_tasks(bool sig_only)
93186 struct timeval start, end;
93187 u64 elapsed_csecs64;
93188 unsigned int elapsed_csecs;
93189+ bool timedout = false;
93190
93191 do_gettimeofday(&start);
93192
93193 end_time = jiffies + TIMEOUT;
93194 do {
93195 todo = 0;
93196+ if (time_after(jiffies, end_time))
93197+ timedout = true;
93198 read_lock(&tasklist_lock);
93199 do_each_thread(g, p) {
93200 if (frozen(p) || !freezeable(p))
93201@@ -57,15 +60,17 @@ static int try_to_freeze_tasks(bool sig_only)
93202 * It is "frozen enough". If the task does wake
93203 * up, it will immediately call try_to_freeze.
93204 */
93205- if (!task_is_stopped_or_traced(p) &&
93206- !freezer_should_skip(p))
93207+ if (!task_is_stopped_or_traced(p) && !freezer_should_skip(p)) {
93208 todo++;
93209+ if (timedout) {
93210+ printk(KERN_ERR "Task refusing to freeze:\n");
93211+ sched_show_task(p);
93212+ }
93213+ }
93214 } while_each_thread(g, p);
93215 read_unlock(&tasklist_lock);
93216 yield(); /* Yield is okay here */
93217- if (time_after(jiffies, end_time))
93218- break;
93219- } while (todo);
93220+ } while (todo && !timedout);
93221
93222 do_gettimeofday(&end);
93223 elapsed_csecs64 = timeval_to_ns(&end) - timeval_to_ns(&start);
93224diff --git a/kernel/power/suspend.c b/kernel/power/suspend.c
93225index 40dd021..fb30ceb 100644
93226--- a/kernel/power/suspend.c
93227+++ b/kernel/power/suspend.c
93228@@ -23,13 +23,13 @@ const char *const pm_states[PM_SUSPEND_MAX] = {
93229 [PM_SUSPEND_MEM] = "mem",
93230 };
93231
93232-static struct platform_suspend_ops *suspend_ops;
93233+static const struct platform_suspend_ops *suspend_ops;
93234
93235 /**
93236 * suspend_set_ops - Set the global suspend method table.
93237 * @ops: Pointer to ops structure.
93238 */
93239-void suspend_set_ops(struct platform_suspend_ops *ops)
93240+void suspend_set_ops(const struct platform_suspend_ops *ops)
93241 {
93242 mutex_lock(&pm_mutex);
93243 suspend_ops = ops;
93244diff --git a/kernel/printk.c b/kernel/printk.c
93245index 4cade47..4d17900 100644
93246--- a/kernel/printk.c
93247+++ b/kernel/printk.c
93248@@ -33,6 +33,7 @@
93249 #include <linux/bootmem.h>
93250 #include <linux/syscalls.h>
93251 #include <linux/kexec.h>
93252+#include <linux/syslog.h>
93253
93254 #include <asm/uaccess.h>
93255
93256@@ -256,38 +257,30 @@ static inline void boot_delay_msec(void)
93257 }
93258 #endif
93259
93260-/*
93261- * Commands to do_syslog:
93262- *
93263- * 0 -- Close the log. Currently a NOP.
93264- * 1 -- Open the log. Currently a NOP.
93265- * 2 -- Read from the log.
93266- * 3 -- Read all messages remaining in the ring buffer.
93267- * 4 -- Read and clear all messages remaining in the ring buffer
93268- * 5 -- Clear ring buffer.
93269- * 6 -- Disable printk's to console
93270- * 7 -- Enable printk's to console
93271- * 8 -- Set level of messages printed to console
93272- * 9 -- Return number of unread characters in the log buffer
93273- * 10 -- Return size of the log buffer
93274- */
93275-int do_syslog(int type, char __user *buf, int len)
93276+int do_syslog(int type, char __user *buf, int len, bool from_file)
93277 {
93278 unsigned i, j, limit, count;
93279 int do_clear = 0;
93280 char c;
93281 int error = 0;
93282
93283- error = security_syslog(type);
93284+#ifdef CONFIG_GRKERNSEC_DMESG
93285+ if (grsec_enable_dmesg &&
93286+ (!from_file || (from_file && type == SYSLOG_ACTION_OPEN)) &&
93287+ !capable(CAP_SYS_ADMIN))
93288+ return -EPERM;
93289+#endif
93290+
93291+ error = security_syslog(type, from_file);
93292 if (error)
93293 return error;
93294
93295 switch (type) {
93296- case 0: /* Close log */
93297+ case SYSLOG_ACTION_CLOSE: /* Close log */
93298 break;
93299- case 1: /* Open log */
93300+ case SYSLOG_ACTION_OPEN: /* Open log */
93301 break;
93302- case 2: /* Read from log */
93303+ case SYSLOG_ACTION_READ: /* Read from log */
93304 error = -EINVAL;
93305 if (!buf || len < 0)
93306 goto out;
93307@@ -318,10 +311,12 @@ int do_syslog(int type, char __user *buf, int len)
93308 if (!error)
93309 error = i;
93310 break;
93311- case 4: /* Read/clear last kernel messages */
93312+ /* Read/clear last kernel messages */
93313+ case SYSLOG_ACTION_READ_CLEAR:
93314 do_clear = 1;
93315 /* FALL THRU */
93316- case 3: /* Read last kernel messages */
93317+ /* Read last kernel messages */
93318+ case SYSLOG_ACTION_READ_ALL:
93319 error = -EINVAL;
93320 if (!buf || len < 0)
93321 goto out;
93322@@ -374,21 +369,25 @@ int do_syslog(int type, char __user *buf, int len)
93323 }
93324 }
93325 break;
93326- case 5: /* Clear ring buffer */
93327+ /* Clear ring buffer */
93328+ case SYSLOG_ACTION_CLEAR:
93329 logged_chars = 0;
93330 break;
93331- case 6: /* Disable logging to console */
93332+ /* Disable logging to console */
93333+ case SYSLOG_ACTION_CONSOLE_OFF:
93334 if (saved_console_loglevel == -1)
93335 saved_console_loglevel = console_loglevel;
93336 console_loglevel = minimum_console_loglevel;
93337 break;
93338- case 7: /* Enable logging to console */
93339+ /* Enable logging to console */
93340+ case SYSLOG_ACTION_CONSOLE_ON:
93341 if (saved_console_loglevel != -1) {
93342 console_loglevel = saved_console_loglevel;
93343 saved_console_loglevel = -1;
93344 }
93345 break;
93346- case 8: /* Set level of messages printed to console */
93347+ /* Set level of messages printed to console */
93348+ case SYSLOG_ACTION_CONSOLE_LEVEL:
93349 error = -EINVAL;
93350 if (len < 1 || len > 8)
93351 goto out;
93352@@ -399,10 +398,12 @@ int do_syslog(int type, char __user *buf, int len)
93353 saved_console_loglevel = -1;
93354 error = 0;
93355 break;
93356- case 9: /* Number of chars in the log buffer */
93357+ /* Number of chars in the log buffer */
93358+ case SYSLOG_ACTION_SIZE_UNREAD:
93359 error = log_end - log_start;
93360 break;
93361- case 10: /* Size of the log buffer */
93362+ /* Size of the log buffer */
93363+ case SYSLOG_ACTION_SIZE_BUFFER:
93364 error = log_buf_len;
93365 break;
93366 default:
93367@@ -415,7 +416,7 @@ out:
93368
93369 SYSCALL_DEFINE3(syslog, int, type, char __user *, buf, int, len)
93370 {
93371- return do_syslog(type, buf, len);
93372+ return do_syslog(type, buf, len, SYSLOG_FROM_CALL);
93373 }
93374
93375 /*
93376diff --git a/kernel/profile.c b/kernel/profile.c
93377index dfadc5b..7f59404 100644
93378--- a/kernel/profile.c
93379+++ b/kernel/profile.c
93380@@ -39,7 +39,7 @@ struct profile_hit {
93381 /* Oprofile timer tick hook */
93382 static int (*timer_hook)(struct pt_regs *) __read_mostly;
93383
93384-static atomic_t *prof_buffer;
93385+static atomic_unchecked_t *prof_buffer;
93386 static unsigned long prof_len, prof_shift;
93387
93388 int prof_on __read_mostly;
93389@@ -283,7 +283,7 @@ static void profile_flip_buffers(void)
93390 hits[i].pc = 0;
93391 continue;
93392 }
93393- atomic_add(hits[i].hits, &prof_buffer[hits[i].pc]);
93394+ atomic_add_unchecked(hits[i].hits, &prof_buffer[hits[i].pc]);
93395 hits[i].hits = hits[i].pc = 0;
93396 }
93397 }
93398@@ -346,9 +346,9 @@ void profile_hits(int type, void *__pc, unsigned int nr_hits)
93399 * Add the current hit(s) and flush the write-queue out
93400 * to the global buffer:
93401 */
93402- atomic_add(nr_hits, &prof_buffer[pc]);
93403+ atomic_add_unchecked(nr_hits, &prof_buffer[pc]);
93404 for (i = 0; i < NR_PROFILE_HIT; ++i) {
93405- atomic_add(hits[i].hits, &prof_buffer[hits[i].pc]);
93406+ atomic_add_unchecked(hits[i].hits, &prof_buffer[hits[i].pc]);
93407 hits[i].pc = hits[i].hits = 0;
93408 }
93409 out:
93410@@ -426,7 +426,7 @@ void profile_hits(int type, void *__pc, unsigned int nr_hits)
93411 if (prof_on != type || !prof_buffer)
93412 return;
93413 pc = ((unsigned long)__pc - (unsigned long)_stext) >> prof_shift;
93414- atomic_add(nr_hits, &prof_buffer[min(pc, prof_len - 1)]);
93415+ atomic_add_unchecked(nr_hits, &prof_buffer[min(pc, prof_len - 1)]);
93416 }
93417 #endif /* !CONFIG_SMP */
93418 EXPORT_SYMBOL_GPL(profile_hits);
93419@@ -517,7 +517,7 @@ read_profile(struct file *file, char __user *buf, size_t count, loff_t *ppos)
93420 return -EFAULT;
93421 buf++; p++; count--; read++;
93422 }
93423- pnt = (char *)prof_buffer + p - sizeof(atomic_t);
93424+ pnt = (char *)prof_buffer + p - sizeof(atomic_unchecked_t);
93425 if (copy_to_user(buf, (void *)pnt, count))
93426 return -EFAULT;
93427 read += count;
93428@@ -548,7 +548,7 @@ static ssize_t write_profile(struct file *file, const char __user *buf,
93429 }
93430 #endif
93431 profile_discard_flip_buffers();
93432- memset(prof_buffer, 0, prof_len * sizeof(atomic_t));
93433+ memset(prof_buffer, 0, prof_len * sizeof(atomic_unchecked_t));
93434 return count;
93435 }
93436
93437diff --git a/kernel/ptrace.c b/kernel/ptrace.c
93438index 05625f6..733bf70 100644
93439--- a/kernel/ptrace.c
93440+++ b/kernel/ptrace.c
93441@@ -117,7 +117,8 @@ int ptrace_check_attach(struct task_struct *child, int kill)
93442 return ret;
93443 }
93444
93445-int __ptrace_may_access(struct task_struct *task, unsigned int mode)
93446+static int __ptrace_may_access(struct task_struct *task, unsigned int mode,
93447+ unsigned int log)
93448 {
93449 const struct cred *cred = current_cred(), *tcred;
93450
93451@@ -141,7 +142,9 @@ int __ptrace_may_access(struct task_struct *task, unsigned int mode)
93452 cred->gid != tcred->egid ||
93453 cred->gid != tcred->sgid ||
93454 cred->gid != tcred->gid) &&
93455- !capable(CAP_SYS_PTRACE)) {
93456+ ((!log && !capable_nolog(CAP_SYS_PTRACE)) ||
93457+ (log && !capable(CAP_SYS_PTRACE)))
93458+ ) {
93459 rcu_read_unlock();
93460 return -EPERM;
93461 }
93462@@ -149,7 +152,9 @@ int __ptrace_may_access(struct task_struct *task, unsigned int mode)
93463 smp_rmb();
93464 if (task->mm)
93465 dumpable = get_dumpable(task->mm);
93466- if (!dumpable && !capable(CAP_SYS_PTRACE))
93467+ if (!dumpable &&
93468+ ((!log && !capable_nolog(CAP_SYS_PTRACE)) ||
93469+ (log && !capable(CAP_SYS_PTRACE))))
93470 return -EPERM;
93471
93472 return security_ptrace_access_check(task, mode);
93473@@ -159,7 +164,16 @@ bool ptrace_may_access(struct task_struct *task, unsigned int mode)
93474 {
93475 int err;
93476 task_lock(task);
93477- err = __ptrace_may_access(task, mode);
93478+ err = __ptrace_may_access(task, mode, 0);
93479+ task_unlock(task);
93480+ return !err;
93481+}
93482+
93483+bool ptrace_may_access_log(struct task_struct *task, unsigned int mode)
93484+{
93485+ int err;
93486+ task_lock(task);
93487+ err = __ptrace_may_access(task, mode, 1);
93488 task_unlock(task);
93489 return !err;
93490 }
93491@@ -186,7 +200,7 @@ int ptrace_attach(struct task_struct *task)
93492 goto out;
93493
93494 task_lock(task);
93495- retval = __ptrace_may_access(task, PTRACE_MODE_ATTACH);
93496+ retval = __ptrace_may_access(task, PTRACE_MODE_ATTACH, 1);
93497 task_unlock(task);
93498 if (retval)
93499 goto unlock_creds;
93500@@ -199,7 +213,7 @@ int ptrace_attach(struct task_struct *task)
93501 goto unlock_tasklist;
93502
93503 task->ptrace = PT_PTRACED;
93504- if (capable(CAP_SYS_PTRACE))
93505+ if (capable_nolog(CAP_SYS_PTRACE))
93506 task->ptrace |= PT_PTRACE_CAP;
93507
93508 __ptrace_link(task, current);
93509@@ -351,6 +365,8 @@ int ptrace_readdata(struct task_struct *tsk, unsigned long src, char __user *dst
93510 {
93511 int copied = 0;
93512
93513+ pax_track_stack();
93514+
93515 while (len > 0) {
93516 char buf[128];
93517 int this_len, retval;
93518@@ -376,6 +392,8 @@ int ptrace_writedata(struct task_struct *tsk, char __user *src, unsigned long ds
93519 {
93520 int copied = 0;
93521
93522+ pax_track_stack();
93523+
93524 while (len > 0) {
93525 char buf[128];
93526 int this_len, retval;
93527@@ -517,6 +535,8 @@ int ptrace_request(struct task_struct *child, long request,
93528 int ret = -EIO;
93529 siginfo_t siginfo;
93530
93531+ pax_track_stack();
93532+
93533 switch (request) {
93534 case PTRACE_PEEKTEXT:
93535 case PTRACE_PEEKDATA:
93536@@ -532,18 +552,18 @@ int ptrace_request(struct task_struct *child, long request,
93537 ret = ptrace_setoptions(child, data);
93538 break;
93539 case PTRACE_GETEVENTMSG:
93540- ret = put_user(child->ptrace_message, (unsigned long __user *) data);
93541+ ret = put_user(child->ptrace_message, (__force unsigned long __user *) data);
93542 break;
93543
93544 case PTRACE_GETSIGINFO:
93545 ret = ptrace_getsiginfo(child, &siginfo);
93546 if (!ret)
93547- ret = copy_siginfo_to_user((siginfo_t __user *) data,
93548+ ret = copy_siginfo_to_user((__force siginfo_t __user *) data,
93549 &siginfo);
93550 break;
93551
93552 case PTRACE_SETSIGINFO:
93553- if (copy_from_user(&siginfo, (siginfo_t __user *) data,
93554+ if (copy_from_user(&siginfo, (__force siginfo_t __user *) data,
93555 sizeof siginfo))
93556 ret = -EFAULT;
93557 else
93558@@ -621,14 +641,21 @@ SYSCALL_DEFINE4(ptrace, long, request, long, pid, long, addr, long, data)
93559 goto out;
93560 }
93561
93562+ if (gr_handle_ptrace(child, request)) {
93563+ ret = -EPERM;
93564+ goto out_put_task_struct;
93565+ }
93566+
93567 if (request == PTRACE_ATTACH) {
93568 ret = ptrace_attach(child);
93569 /*
93570 * Some architectures need to do book-keeping after
93571 * a ptrace attach.
93572 */
93573- if (!ret)
93574+ if (!ret) {
93575 arch_ptrace_attach(child);
93576+ gr_audit_ptrace(child);
93577+ }
93578 goto out_put_task_struct;
93579 }
93580
93581@@ -653,7 +680,7 @@ int generic_ptrace_peekdata(struct task_struct *tsk, long addr, long data)
93582 copied = access_process_vm(tsk, addr, &tmp, sizeof(tmp), 0);
93583 if (copied != sizeof(tmp))
93584 return -EIO;
93585- return put_user(tmp, (unsigned long __user *)data);
93586+ return put_user(tmp, (__force unsigned long __user *)data);
93587 }
93588
93589 int generic_ptrace_pokedata(struct task_struct *tsk, long addr, long data)
93590@@ -675,6 +702,8 @@ int compat_ptrace_request(struct task_struct *child, compat_long_t request,
93591 siginfo_t siginfo;
93592 int ret;
93593
93594+ pax_track_stack();
93595+
93596 switch (request) {
93597 case PTRACE_PEEKTEXT:
93598 case PTRACE_PEEKDATA:
93599@@ -740,14 +769,21 @@ asmlinkage long compat_sys_ptrace(compat_long_t request, compat_long_t pid,
93600 goto out;
93601 }
93602
93603+ if (gr_handle_ptrace(child, request)) {
93604+ ret = -EPERM;
93605+ goto out_put_task_struct;
93606+ }
93607+
93608 if (request == PTRACE_ATTACH) {
93609 ret = ptrace_attach(child);
93610 /*
93611 * Some architectures need to do book-keeping after
93612 * a ptrace attach.
93613 */
93614- if (!ret)
93615+ if (!ret) {
93616 arch_ptrace_attach(child);
93617+ gr_audit_ptrace(child);
93618+ }
93619 goto out_put_task_struct;
93620 }
93621
93622diff --git a/kernel/rcutorture.c b/kernel/rcutorture.c
93623index 697c0a0..2402696 100644
93624--- a/kernel/rcutorture.c
93625+++ b/kernel/rcutorture.c
93626@@ -118,12 +118,12 @@ static DEFINE_PER_CPU(long [RCU_TORTURE_PIPE_LEN + 1], rcu_torture_count) =
93627 { 0 };
93628 static DEFINE_PER_CPU(long [RCU_TORTURE_PIPE_LEN + 1], rcu_torture_batch) =
93629 { 0 };
93630-static atomic_t rcu_torture_wcount[RCU_TORTURE_PIPE_LEN + 1];
93631-static atomic_t n_rcu_torture_alloc;
93632-static atomic_t n_rcu_torture_alloc_fail;
93633-static atomic_t n_rcu_torture_free;
93634-static atomic_t n_rcu_torture_mberror;
93635-static atomic_t n_rcu_torture_error;
93636+static atomic_unchecked_t rcu_torture_wcount[RCU_TORTURE_PIPE_LEN + 1];
93637+static atomic_unchecked_t n_rcu_torture_alloc;
93638+static atomic_unchecked_t n_rcu_torture_alloc_fail;
93639+static atomic_unchecked_t n_rcu_torture_free;
93640+static atomic_unchecked_t n_rcu_torture_mberror;
93641+static atomic_unchecked_t n_rcu_torture_error;
93642 static long n_rcu_torture_timers;
93643 static struct list_head rcu_torture_removed;
93644 static cpumask_var_t shuffle_tmp_mask;
93645@@ -187,11 +187,11 @@ rcu_torture_alloc(void)
93646
93647 spin_lock_bh(&rcu_torture_lock);
93648 if (list_empty(&rcu_torture_freelist)) {
93649- atomic_inc(&n_rcu_torture_alloc_fail);
93650+ atomic_inc_unchecked(&n_rcu_torture_alloc_fail);
93651 spin_unlock_bh(&rcu_torture_lock);
93652 return NULL;
93653 }
93654- atomic_inc(&n_rcu_torture_alloc);
93655+ atomic_inc_unchecked(&n_rcu_torture_alloc);
93656 p = rcu_torture_freelist.next;
93657 list_del_init(p);
93658 spin_unlock_bh(&rcu_torture_lock);
93659@@ -204,7 +204,7 @@ rcu_torture_alloc(void)
93660 static void
93661 rcu_torture_free(struct rcu_torture *p)
93662 {
93663- atomic_inc(&n_rcu_torture_free);
93664+ atomic_inc_unchecked(&n_rcu_torture_free);
93665 spin_lock_bh(&rcu_torture_lock);
93666 list_add_tail(&p->rtort_free, &rcu_torture_freelist);
93667 spin_unlock_bh(&rcu_torture_lock);
93668@@ -319,7 +319,7 @@ rcu_torture_cb(struct rcu_head *p)
93669 i = rp->rtort_pipe_count;
93670 if (i > RCU_TORTURE_PIPE_LEN)
93671 i = RCU_TORTURE_PIPE_LEN;
93672- atomic_inc(&rcu_torture_wcount[i]);
93673+ atomic_inc_unchecked(&rcu_torture_wcount[i]);
93674 if (++rp->rtort_pipe_count >= RCU_TORTURE_PIPE_LEN) {
93675 rp->rtort_mbtest = 0;
93676 rcu_torture_free(rp);
93677@@ -359,7 +359,7 @@ static void rcu_sync_torture_deferred_free(struct rcu_torture *p)
93678 i = rp->rtort_pipe_count;
93679 if (i > RCU_TORTURE_PIPE_LEN)
93680 i = RCU_TORTURE_PIPE_LEN;
93681- atomic_inc(&rcu_torture_wcount[i]);
93682+ atomic_inc_unchecked(&rcu_torture_wcount[i]);
93683 if (++rp->rtort_pipe_count >= RCU_TORTURE_PIPE_LEN) {
93684 rp->rtort_mbtest = 0;
93685 list_del(&rp->rtort_free);
93686@@ -653,7 +653,7 @@ rcu_torture_writer(void *arg)
93687 i = old_rp->rtort_pipe_count;
93688 if (i > RCU_TORTURE_PIPE_LEN)
93689 i = RCU_TORTURE_PIPE_LEN;
93690- atomic_inc(&rcu_torture_wcount[i]);
93691+ atomic_inc_unchecked(&rcu_torture_wcount[i]);
93692 old_rp->rtort_pipe_count++;
93693 cur_ops->deferred_free(old_rp);
93694 }
93695@@ -718,7 +718,7 @@ static void rcu_torture_timer(unsigned long unused)
93696 return;
93697 }
93698 if (p->rtort_mbtest == 0)
93699- atomic_inc(&n_rcu_torture_mberror);
93700+ atomic_inc_unchecked(&n_rcu_torture_mberror);
93701 spin_lock(&rand_lock);
93702 cur_ops->read_delay(&rand);
93703 n_rcu_torture_timers++;
93704@@ -776,7 +776,7 @@ rcu_torture_reader(void *arg)
93705 continue;
93706 }
93707 if (p->rtort_mbtest == 0)
93708- atomic_inc(&n_rcu_torture_mberror);
93709+ atomic_inc_unchecked(&n_rcu_torture_mberror);
93710 cur_ops->read_delay(&rand);
93711 preempt_disable();
93712 pipe_count = p->rtort_pipe_count;
93713@@ -834,17 +834,17 @@ rcu_torture_printk(char *page)
93714 rcu_torture_current,
93715 rcu_torture_current_version,
93716 list_empty(&rcu_torture_freelist),
93717- atomic_read(&n_rcu_torture_alloc),
93718- atomic_read(&n_rcu_torture_alloc_fail),
93719- atomic_read(&n_rcu_torture_free),
93720- atomic_read(&n_rcu_torture_mberror),
93721+ atomic_read_unchecked(&n_rcu_torture_alloc),
93722+ atomic_read_unchecked(&n_rcu_torture_alloc_fail),
93723+ atomic_read_unchecked(&n_rcu_torture_free),
93724+ atomic_read_unchecked(&n_rcu_torture_mberror),
93725 n_rcu_torture_timers);
93726- if (atomic_read(&n_rcu_torture_mberror) != 0)
93727+ if (atomic_read_unchecked(&n_rcu_torture_mberror) != 0)
93728 cnt += sprintf(&page[cnt], " !!!");
93729 cnt += sprintf(&page[cnt], "\n%s%s ", torture_type, TORTURE_FLAG);
93730 if (i > 1) {
93731 cnt += sprintf(&page[cnt], "!!! ");
93732- atomic_inc(&n_rcu_torture_error);
93733+ atomic_inc_unchecked(&n_rcu_torture_error);
93734 WARN_ON_ONCE(1);
93735 }
93736 cnt += sprintf(&page[cnt], "Reader Pipe: ");
93737@@ -858,7 +858,7 @@ rcu_torture_printk(char *page)
93738 cnt += sprintf(&page[cnt], "Free-Block Circulation: ");
93739 for (i = 0; i < RCU_TORTURE_PIPE_LEN + 1; i++) {
93740 cnt += sprintf(&page[cnt], " %d",
93741- atomic_read(&rcu_torture_wcount[i]));
93742+ atomic_read_unchecked(&rcu_torture_wcount[i]));
93743 }
93744 cnt += sprintf(&page[cnt], "\n");
93745 if (cur_ops->stats)
93746@@ -1084,7 +1084,7 @@ rcu_torture_cleanup(void)
93747
93748 if (cur_ops->cleanup)
93749 cur_ops->cleanup();
93750- if (atomic_read(&n_rcu_torture_error))
93751+ if (atomic_read_unchecked(&n_rcu_torture_error))
93752 rcu_torture_print_module_parms("End of test: FAILURE");
93753 else
93754 rcu_torture_print_module_parms("End of test: SUCCESS");
93755@@ -1138,13 +1138,13 @@ rcu_torture_init(void)
93756
93757 rcu_torture_current = NULL;
93758 rcu_torture_current_version = 0;
93759- atomic_set(&n_rcu_torture_alloc, 0);
93760- atomic_set(&n_rcu_torture_alloc_fail, 0);
93761- atomic_set(&n_rcu_torture_free, 0);
93762- atomic_set(&n_rcu_torture_mberror, 0);
93763- atomic_set(&n_rcu_torture_error, 0);
93764+ atomic_set_unchecked(&n_rcu_torture_alloc, 0);
93765+ atomic_set_unchecked(&n_rcu_torture_alloc_fail, 0);
93766+ atomic_set_unchecked(&n_rcu_torture_free, 0);
93767+ atomic_set_unchecked(&n_rcu_torture_mberror, 0);
93768+ atomic_set_unchecked(&n_rcu_torture_error, 0);
93769 for (i = 0; i < RCU_TORTURE_PIPE_LEN + 1; i++)
93770- atomic_set(&rcu_torture_wcount[i], 0);
93771+ atomic_set_unchecked(&rcu_torture_wcount[i], 0);
93772 for_each_possible_cpu(cpu) {
93773 for (i = 0; i < RCU_TORTURE_PIPE_LEN + 1; i++) {
93774 per_cpu(rcu_torture_count, cpu)[i] = 0;
93775diff --git a/kernel/rcutree.c b/kernel/rcutree.c
93776index 683c4f3..97f54c6 100644
93777--- a/kernel/rcutree.c
93778+++ b/kernel/rcutree.c
93779@@ -1303,7 +1303,7 @@ __rcu_process_callbacks(struct rcu_state *rsp, struct rcu_data *rdp)
93780 /*
93781 * Do softirq processing for the current CPU.
93782 */
93783-static void rcu_process_callbacks(struct softirq_action *unused)
93784+static void rcu_process_callbacks(void)
93785 {
93786 /*
93787 * Memory references from any prior RCU read-side critical sections
93788diff --git a/kernel/rcutree_plugin.h b/kernel/rcutree_plugin.h
93789index c03edf7..ac1b341 100644
93790--- a/kernel/rcutree_plugin.h
93791+++ b/kernel/rcutree_plugin.h
93792@@ -145,7 +145,7 @@ static void rcu_preempt_note_context_switch(int cpu)
93793 */
93794 void __rcu_read_lock(void)
93795 {
93796- ACCESS_ONCE(current->rcu_read_lock_nesting)++;
93797+ ACCESS_ONCE_RW(current->rcu_read_lock_nesting)++;
93798 barrier(); /* needed if we ever invoke rcu_read_lock in rcutree.c */
93799 }
93800 EXPORT_SYMBOL_GPL(__rcu_read_lock);
93801@@ -251,7 +251,7 @@ void __rcu_read_unlock(void)
93802 struct task_struct *t = current;
93803
93804 barrier(); /* needed if we ever invoke rcu_read_unlock in rcutree.c */
93805- if (--ACCESS_ONCE(t->rcu_read_lock_nesting) == 0 &&
93806+ if (--ACCESS_ONCE_RW(t->rcu_read_lock_nesting) == 0 &&
93807 unlikely(ACCESS_ONCE(t->rcu_read_unlock_special)))
93808 rcu_read_unlock_special(t);
93809 }
93810diff --git a/kernel/relay.c b/kernel/relay.c
93811index bf343f5..908e9ee 100644
93812--- a/kernel/relay.c
93813+++ b/kernel/relay.c
93814@@ -1228,7 +1228,7 @@ static int subbuf_splice_actor(struct file *in,
93815 unsigned int flags,
93816 int *nonpad_ret)
93817 {
93818- unsigned int pidx, poff, total_len, subbuf_pages, nr_pages, ret;
93819+ unsigned int pidx, poff, total_len, subbuf_pages, nr_pages;
93820 struct rchan_buf *rbuf = in->private_data;
93821 unsigned int subbuf_size = rbuf->chan->subbuf_size;
93822 uint64_t pos = (uint64_t) *ppos;
93823@@ -1247,6 +1247,9 @@ static int subbuf_splice_actor(struct file *in,
93824 .ops = &relay_pipe_buf_ops,
93825 .spd_release = relay_page_release,
93826 };
93827+ ssize_t ret;
93828+
93829+ pax_track_stack();
93830
93831 if (rbuf->subbufs_produced == rbuf->subbufs_consumed)
93832 return 0;
93833diff --git a/kernel/resource.c b/kernel/resource.c
93834index fb11a58..4e61ae1 100644
93835--- a/kernel/resource.c
93836+++ b/kernel/resource.c
93837@@ -132,8 +132,18 @@ static const struct file_operations proc_iomem_operations = {
93838
93839 static int __init ioresources_init(void)
93840 {
93841+#ifdef CONFIG_GRKERNSEC_PROC_ADD
93842+#ifdef CONFIG_GRKERNSEC_PROC_USER
93843+ proc_create("ioports", S_IRUSR, NULL, &proc_ioports_operations);
93844+ proc_create("iomem", S_IRUSR, NULL, &proc_iomem_operations);
93845+#elif defined(CONFIG_GRKERNSEC_PROC_USERGROUP)
93846+ proc_create("ioports", S_IRUSR | S_IRGRP, NULL, &proc_ioports_operations);
93847+ proc_create("iomem", S_IRUSR | S_IRGRP, NULL, &proc_iomem_operations);
93848+#endif
93849+#else
93850 proc_create("ioports", 0, NULL, &proc_ioports_operations);
93851 proc_create("iomem", 0, NULL, &proc_iomem_operations);
93852+#endif
93853 return 0;
93854 }
93855 __initcall(ioresources_init);
93856diff --git a/kernel/rtmutex-tester.c b/kernel/rtmutex-tester.c
93857index a56f629..1fc4989 100644
93858--- a/kernel/rtmutex-tester.c
93859+++ b/kernel/rtmutex-tester.c
93860@@ -21,7 +21,7 @@
93861 #define MAX_RT_TEST_MUTEXES 8
93862
93863 static spinlock_t rttest_lock;
93864-static atomic_t rttest_event;
93865+static atomic_unchecked_t rttest_event;
93866
93867 struct test_thread_data {
93868 int opcode;
93869@@ -64,7 +64,7 @@ static int handle_op(struct test_thread_data *td, int lockwakeup)
93870
93871 case RTTEST_LOCKCONT:
93872 td->mutexes[td->opdata] = 1;
93873- td->event = atomic_add_return(1, &rttest_event);
93874+ td->event = atomic_add_return_unchecked(1, &rttest_event);
93875 return 0;
93876
93877 case RTTEST_RESET:
93878@@ -82,7 +82,7 @@ static int handle_op(struct test_thread_data *td, int lockwakeup)
93879 return 0;
93880
93881 case RTTEST_RESETEVENT:
93882- atomic_set(&rttest_event, 0);
93883+ atomic_set_unchecked(&rttest_event, 0);
93884 return 0;
93885
93886 default:
93887@@ -99,9 +99,9 @@ static int handle_op(struct test_thread_data *td, int lockwakeup)
93888 return ret;
93889
93890 td->mutexes[id] = 1;
93891- td->event = atomic_add_return(1, &rttest_event);
93892+ td->event = atomic_add_return_unchecked(1, &rttest_event);
93893 rt_mutex_lock(&mutexes[id]);
93894- td->event = atomic_add_return(1, &rttest_event);
93895+ td->event = atomic_add_return_unchecked(1, &rttest_event);
93896 td->mutexes[id] = 4;
93897 return 0;
93898
93899@@ -112,9 +112,9 @@ static int handle_op(struct test_thread_data *td, int lockwakeup)
93900 return ret;
93901
93902 td->mutexes[id] = 1;
93903- td->event = atomic_add_return(1, &rttest_event);
93904+ td->event = atomic_add_return_unchecked(1, &rttest_event);
93905 ret = rt_mutex_lock_interruptible(&mutexes[id], 0);
93906- td->event = atomic_add_return(1, &rttest_event);
93907+ td->event = atomic_add_return_unchecked(1, &rttest_event);
93908 td->mutexes[id] = ret ? 0 : 4;
93909 return ret ? -EINTR : 0;
93910
93911@@ -123,9 +123,9 @@ static int handle_op(struct test_thread_data *td, int lockwakeup)
93912 if (id < 0 || id >= MAX_RT_TEST_MUTEXES || td->mutexes[id] != 4)
93913 return ret;
93914
93915- td->event = atomic_add_return(1, &rttest_event);
93916+ td->event = atomic_add_return_unchecked(1, &rttest_event);
93917 rt_mutex_unlock(&mutexes[id]);
93918- td->event = atomic_add_return(1, &rttest_event);
93919+ td->event = atomic_add_return_unchecked(1, &rttest_event);
93920 td->mutexes[id] = 0;
93921 return 0;
93922
93923@@ -187,7 +187,7 @@ void schedule_rt_mutex_test(struct rt_mutex *mutex)
93924 break;
93925
93926 td->mutexes[dat] = 2;
93927- td->event = atomic_add_return(1, &rttest_event);
93928+ td->event = atomic_add_return_unchecked(1, &rttest_event);
93929 break;
93930
93931 case RTTEST_LOCKBKL:
93932@@ -208,7 +208,7 @@ void schedule_rt_mutex_test(struct rt_mutex *mutex)
93933 return;
93934
93935 td->mutexes[dat] = 3;
93936- td->event = atomic_add_return(1, &rttest_event);
93937+ td->event = atomic_add_return_unchecked(1, &rttest_event);
93938 break;
93939
93940 case RTTEST_LOCKNOWAIT:
93941@@ -220,7 +220,7 @@ void schedule_rt_mutex_test(struct rt_mutex *mutex)
93942 return;
93943
93944 td->mutexes[dat] = 1;
93945- td->event = atomic_add_return(1, &rttest_event);
93946+ td->event = atomic_add_return_unchecked(1, &rttest_event);
93947 return;
93948
93949 case RTTEST_LOCKBKL:
93950diff --git a/kernel/rtmutex.c b/kernel/rtmutex.c
93951index 29bd4ba..8c5de90 100644
93952--- a/kernel/rtmutex.c
93953+++ b/kernel/rtmutex.c
93954@@ -511,7 +511,7 @@ static void wakeup_next_waiter(struct rt_mutex *lock)
93955 */
93956 spin_lock_irqsave(&pendowner->pi_lock, flags);
93957
93958- WARN_ON(!pendowner->pi_blocked_on);
93959+ BUG_ON(!pendowner->pi_blocked_on);
93960 WARN_ON(pendowner->pi_blocked_on != waiter);
93961 WARN_ON(pendowner->pi_blocked_on->lock != lock);
93962
93963diff --git a/kernel/sched.c b/kernel/sched.c
93964index 0591df8..e3af3a4 100644
93965--- a/kernel/sched.c
93966+++ b/kernel/sched.c
93967@@ -5043,7 +5043,7 @@ out:
93968 * In CONFIG_NO_HZ case, the idle load balance owner will do the
93969 * rebalancing for all the cpus for whom scheduler ticks are stopped.
93970 */
93971-static void run_rebalance_domains(struct softirq_action *h)
93972+static void run_rebalance_domains(void)
93973 {
93974 int this_cpu = smp_processor_id();
93975 struct rq *this_rq = cpu_rq(this_cpu);
93976@@ -5690,6 +5690,19 @@ pick_next_task(struct rq *rq)
93977 }
93978 }
93979
93980+#ifdef CONFIG_GRKERNSEC_SETXID
93981+extern void gr_delayed_cred_worker(void);
93982+static inline void gr_cred_schedule(void)
93983+{
93984+ if (unlikely(current->delayed_cred))
93985+ gr_delayed_cred_worker();
93986+}
93987+#else
93988+static inline void gr_cred_schedule(void)
93989+{
93990+}
93991+#endif
93992+
93993 /*
93994 * schedule() is the main scheduler function.
93995 */
93996@@ -5700,6 +5713,8 @@ asmlinkage void __sched schedule(void)
93997 struct rq *rq;
93998 int cpu;
93999
94000+ pax_track_stack();
94001+
94002 need_resched:
94003 preempt_disable();
94004 cpu = smp_processor_id();
94005@@ -5713,6 +5728,8 @@ need_resched_nonpreemptible:
94006
94007 schedule_debug(prev);
94008
94009+ gr_cred_schedule();
94010+
94011 if (sched_feat(HRTICK))
94012 hrtick_clear(rq);
94013
94014@@ -5770,7 +5787,7 @@ EXPORT_SYMBOL(schedule);
94015 * Look out! "owner" is an entirely speculative pointer
94016 * access and not reliable.
94017 */
94018-int mutex_spin_on_owner(struct mutex *lock, struct thread_info *owner)
94019+int mutex_spin_on_owner(struct mutex *lock, struct task_struct *owner)
94020 {
94021 unsigned int cpu;
94022 struct rq *rq;
94023@@ -5784,10 +5801,10 @@ int mutex_spin_on_owner(struct mutex *lock, struct thread_info *owner)
94024 * DEBUG_PAGEALLOC could have unmapped it if
94025 * the mutex owner just released it and exited.
94026 */
94027- if (probe_kernel_address(&owner->cpu, cpu))
94028+ if (probe_kernel_address(&task_thread_info(owner)->cpu, cpu))
94029 return 0;
94030 #else
94031- cpu = owner->cpu;
94032+ cpu = task_thread_info(owner)->cpu;
94033 #endif
94034
94035 /*
94036@@ -5816,7 +5833,7 @@ int mutex_spin_on_owner(struct mutex *lock, struct thread_info *owner)
94037 /*
94038 * Is that owner really running on that cpu?
94039 */
94040- if (task_thread_info(rq->curr) != owner || need_resched())
94041+ if (rq->curr != owner || need_resched())
94042 return 0;
94043
94044 cpu_relax();
94045@@ -6359,6 +6376,8 @@ int can_nice(const struct task_struct *p, const int nice)
94046 /* convert nice value [19,-20] to rlimit style value [1,40] */
94047 int nice_rlim = 20 - nice;
94048
94049+ gr_learn_resource(p, RLIMIT_NICE, nice_rlim, 1);
94050+
94051 return (nice_rlim <= p->signal->rlim[RLIMIT_NICE].rlim_cur ||
94052 capable(CAP_SYS_NICE));
94053 }
94054@@ -6392,7 +6411,8 @@ SYSCALL_DEFINE1(nice, int, increment)
94055 if (nice > 19)
94056 nice = 19;
94057
94058- if (increment < 0 && !can_nice(current, nice))
94059+ if (increment < 0 && (!can_nice(current, nice) ||
94060+ gr_handle_chroot_nice()))
94061 return -EPERM;
94062
94063 retval = security_task_setnice(current, nice);
94064@@ -8774,7 +8794,7 @@ static void init_sched_groups_power(int cpu, struct sched_domain *sd)
94065 long power;
94066 int weight;
94067
94068- WARN_ON(!sd || !sd->groups);
94069+ BUG_ON(!sd || !sd->groups);
94070
94071 if (cpu != group_first_cpu(sd->groups))
94072 return;
94073diff --git a/kernel/signal.c b/kernel/signal.c
94074index 2494827..cda80a0 100644
94075--- a/kernel/signal.c
94076+++ b/kernel/signal.c
94077@@ -41,12 +41,12 @@
94078
94079 static struct kmem_cache *sigqueue_cachep;
94080
94081-static void __user *sig_handler(struct task_struct *t, int sig)
94082+static __sighandler_t sig_handler(struct task_struct *t, int sig)
94083 {
94084 return t->sighand->action[sig - 1].sa.sa_handler;
94085 }
94086
94087-static int sig_handler_ignored(void __user *handler, int sig)
94088+static int sig_handler_ignored(__sighandler_t handler, int sig)
94089 {
94090 /* Is it explicitly or implicitly ignored? */
94091 return handler == SIG_IGN ||
94092@@ -56,7 +56,7 @@ static int sig_handler_ignored(void __user *handler, int sig)
94093 static int sig_task_ignored(struct task_struct *t, int sig,
94094 int from_ancestor_ns)
94095 {
94096- void __user *handler;
94097+ __sighandler_t handler;
94098
94099 handler = sig_handler(t, sig);
94100
94101@@ -207,6 +207,9 @@ static struct sigqueue *__sigqueue_alloc(struct task_struct *t, gfp_t flags,
94102 */
94103 user = get_uid(__task_cred(t)->user);
94104 atomic_inc(&user->sigpending);
94105+
94106+ if (!override_rlimit)
94107+ gr_learn_resource(t, RLIMIT_SIGPENDING, atomic_read(&user->sigpending), 1);
94108 if (override_rlimit ||
94109 atomic_read(&user->sigpending) <=
94110 t->signal->rlim[RLIMIT_SIGPENDING].rlim_cur)
94111@@ -327,7 +330,7 @@ flush_signal_handlers(struct task_struct *t, int force_default)
94112
94113 int unhandled_signal(struct task_struct *tsk, int sig)
94114 {
94115- void __user *handler = tsk->sighand->action[sig-1].sa.sa_handler;
94116+ __sighandler_t handler = tsk->sighand->action[sig-1].sa.sa_handler;
94117 if (is_global_init(tsk))
94118 return 1;
94119 if (handler != SIG_IGN && handler != SIG_DFL)
94120@@ -627,6 +630,13 @@ static int check_kill_permission(int sig, struct siginfo *info,
94121 }
94122 }
94123
94124+ /* allow glibc communication via tgkill to other threads in our
94125+ thread group */
94126+ if ((info == SEND_SIG_NOINFO || info->si_code != SI_TKILL ||
94127+ sig != (SIGRTMIN+1) || task_tgid_vnr(t) != info->si_pid)
94128+ && gr_handle_signal(t, sig))
94129+ return -EPERM;
94130+
94131 return security_task_kill(t, info, sig, 0);
94132 }
94133
94134@@ -968,7 +978,7 @@ __group_send_sig_info(int sig, struct siginfo *info, struct task_struct *p)
94135 return send_signal(sig, info, p, 1);
94136 }
94137
94138-static int
94139+int
94140 specific_send_sig_info(int sig, struct siginfo *info, struct task_struct *t)
94141 {
94142 return send_signal(sig, info, t, 0);
94143@@ -1005,6 +1015,7 @@ force_sig_info(int sig, struct siginfo *info, struct task_struct *t)
94144 unsigned long int flags;
94145 int ret, blocked, ignored;
94146 struct k_sigaction *action;
94147+ int is_unhandled = 0;
94148
94149 spin_lock_irqsave(&t->sighand->siglock, flags);
94150 action = &t->sighand->action[sig-1];
94151@@ -1019,9 +1030,18 @@ force_sig_info(int sig, struct siginfo *info, struct task_struct *t)
94152 }
94153 if (action->sa.sa_handler == SIG_DFL)
94154 t->signal->flags &= ~SIGNAL_UNKILLABLE;
94155+ if (action->sa.sa_handler == SIG_IGN || action->sa.sa_handler == SIG_DFL)
94156+ is_unhandled = 1;
94157 ret = specific_send_sig_info(sig, info, t);
94158 spin_unlock_irqrestore(&t->sighand->siglock, flags);
94159
94160+ /* only deal with unhandled signals, java etc trigger SIGSEGV during
94161+ normal operation */
94162+ if (is_unhandled) {
94163+ gr_log_signal(sig, !is_si_special(info) ? info->si_addr : NULL, t);
94164+ gr_handle_crash(t, sig);
94165+ }
94166+
94167 return ret;
94168 }
94169
94170@@ -1081,8 +1101,11 @@ int group_send_sig_info(int sig, struct siginfo *info, struct task_struct *p)
94171 {
94172 int ret = check_kill_permission(sig, info, p);
94173
94174- if (!ret && sig)
94175+ if (!ret && sig) {
94176 ret = do_send_sig_info(sig, info, p, true);
94177+ if (!ret)
94178+ gr_log_signal(sig, !is_si_special(info) ? info->si_addr : NULL, p);
94179+ }
94180
94181 return ret;
94182 }
94183@@ -1644,6 +1667,8 @@ void ptrace_notify(int exit_code)
94184 {
94185 siginfo_t info;
94186
94187+ pax_track_stack();
94188+
94189 BUG_ON((exit_code & (0x7f | ~0xffff)) != SIGTRAP);
94190
94191 memset(&info, 0, sizeof info);
94192@@ -2275,7 +2300,15 @@ do_send_specific(pid_t tgid, pid_t pid, int sig, struct siginfo *info)
94193 int error = -ESRCH;
94194
94195 rcu_read_lock();
94196- p = find_task_by_vpid(pid);
94197+#ifdef CONFIG_GRKERNSEC_CHROOT_FINDTASK
94198+ /* allow glibc communication via tgkill to other threads in our
94199+ thread group */
94200+ if (grsec_enable_chroot_findtask && info->si_code == SI_TKILL &&
94201+ sig == (SIGRTMIN+1) && tgid == info->si_pid)
94202+ p = find_task_by_vpid_unrestricted(pid);
94203+ else
94204+#endif
94205+ p = find_task_by_vpid(pid);
94206 if (p && (tgid <= 0 || task_tgid_vnr(p) == tgid)) {
94207 error = check_kill_permission(sig, info, p);
94208 /*
94209diff --git a/kernel/smp.c b/kernel/smp.c
94210index aa9cff3..631a0de 100644
94211--- a/kernel/smp.c
94212+++ b/kernel/smp.c
94213@@ -522,22 +522,22 @@ int smp_call_function(void (*func)(void *), void *info, int wait)
94214 }
94215 EXPORT_SYMBOL(smp_call_function);
94216
94217-void ipi_call_lock(void)
94218+void ipi_call_lock(void) __acquires(call_function.lock)
94219 {
94220 spin_lock(&call_function.lock);
94221 }
94222
94223-void ipi_call_unlock(void)
94224+void ipi_call_unlock(void) __releases(call_function.lock)
94225 {
94226 spin_unlock(&call_function.lock);
94227 }
94228
94229-void ipi_call_lock_irq(void)
94230+void ipi_call_lock_irq(void) __acquires(call_function.lock)
94231 {
94232 spin_lock_irq(&call_function.lock);
94233 }
94234
94235-void ipi_call_unlock_irq(void)
94236+void ipi_call_unlock_irq(void) __releases(call_function.lock)
94237 {
94238 spin_unlock_irq(&call_function.lock);
94239 }
94240diff --git a/kernel/softirq.c b/kernel/softirq.c
94241index 04a0252..580c512 100644
94242--- a/kernel/softirq.c
94243+++ b/kernel/softirq.c
94244@@ -56,7 +56,7 @@ static struct softirq_action softirq_vec[NR_SOFTIRQS] __cacheline_aligned_in_smp
94245
94246 static DEFINE_PER_CPU(struct task_struct *, ksoftirqd);
94247
94248-char *softirq_to_name[NR_SOFTIRQS] = {
94249+const char * const softirq_to_name[NR_SOFTIRQS] = {
94250 "HI", "TIMER", "NET_TX", "NET_RX", "BLOCK", "BLOCK_IOPOLL",
94251 "TASKLET", "SCHED", "HRTIMER", "RCU"
94252 };
94253@@ -206,7 +206,7 @@ EXPORT_SYMBOL(local_bh_enable_ip);
94254
94255 asmlinkage void __do_softirq(void)
94256 {
94257- struct softirq_action *h;
94258+ const struct softirq_action *h;
94259 __u32 pending;
94260 int max_restart = MAX_SOFTIRQ_RESTART;
94261 int cpu;
94262@@ -233,7 +233,7 @@ restart:
94263 kstat_incr_softirqs_this_cpu(h - softirq_vec);
94264
94265 trace_softirq_entry(h, softirq_vec);
94266- h->action(h);
94267+ h->action();
94268 trace_softirq_exit(h, softirq_vec);
94269 if (unlikely(prev_count != preempt_count())) {
94270 printk(KERN_ERR "huh, entered softirq %td %s %p"
94271@@ -363,9 +363,11 @@ void raise_softirq(unsigned int nr)
94272 local_irq_restore(flags);
94273 }
94274
94275-void open_softirq(int nr, void (*action)(struct softirq_action *))
94276+void open_softirq(int nr, void (*action)(void))
94277 {
94278- softirq_vec[nr].action = action;
94279+ pax_open_kernel();
94280+ *(void **)&softirq_vec[nr].action = action;
94281+ pax_close_kernel();
94282 }
94283
94284 /*
94285@@ -419,7 +421,7 @@ void __tasklet_hi_schedule_first(struct tasklet_struct *t)
94286
94287 EXPORT_SYMBOL(__tasklet_hi_schedule_first);
94288
94289-static void tasklet_action(struct softirq_action *a)
94290+static void tasklet_action(void)
94291 {
94292 struct tasklet_struct *list;
94293
94294@@ -454,7 +456,7 @@ static void tasklet_action(struct softirq_action *a)
94295 }
94296 }
94297
94298-static void tasklet_hi_action(struct softirq_action *a)
94299+static void tasklet_hi_action(void)
94300 {
94301 struct tasklet_struct *list;
94302
94303diff --git a/kernel/sys.c b/kernel/sys.c
94304index e9512b1..f07185f 100644
94305--- a/kernel/sys.c
94306+++ b/kernel/sys.c
94307@@ -133,6 +133,12 @@ static int set_one_prio(struct task_struct *p, int niceval, int error)
94308 error = -EACCES;
94309 goto out;
94310 }
94311+
94312+ if (gr_handle_chroot_setpriority(p, niceval)) {
94313+ error = -EACCES;
94314+ goto out;
94315+ }
94316+
94317 no_nice = security_task_setnice(p, niceval);
94318 if (no_nice) {
94319 error = no_nice;
94320@@ -190,10 +196,10 @@ SYSCALL_DEFINE3(setpriority, int, which, int, who, int, niceval)
94321 !(user = find_user(who)))
94322 goto out_unlock; /* No processes for this user */
94323
94324- do_each_thread(g, p)
94325+ do_each_thread(g, p) {
94326 if (__task_cred(p)->uid == who)
94327 error = set_one_prio(p, niceval, error);
94328- while_each_thread(g, p);
94329+ } while_each_thread(g, p);
94330 if (who != cred->uid)
94331 free_uid(user); /* For find_user() */
94332 break;
94333@@ -253,13 +259,13 @@ SYSCALL_DEFINE2(getpriority, int, which, int, who)
94334 !(user = find_user(who)))
94335 goto out_unlock; /* No processes for this user */
94336
94337- do_each_thread(g, p)
94338+ do_each_thread(g, p) {
94339 if (__task_cred(p)->uid == who) {
94340 niceval = 20 - task_nice(p);
94341 if (niceval > retval)
94342 retval = niceval;
94343 }
94344- while_each_thread(g, p);
94345+ } while_each_thread(g, p);
94346 if (who != cred->uid)
94347 free_uid(user); /* for find_user() */
94348 break;
94349@@ -509,6 +515,9 @@ SYSCALL_DEFINE2(setregid, gid_t, rgid, gid_t, egid)
94350 goto error;
94351 }
94352
94353+ if (gr_check_group_change(new->gid, new->egid, -1))
94354+ goto error;
94355+
94356 if (rgid != (gid_t) -1 ||
94357 (egid != (gid_t) -1 && egid != old->gid))
94358 new->sgid = new->egid;
94359@@ -542,6 +551,10 @@ SYSCALL_DEFINE1(setgid, gid_t, gid)
94360 goto error;
94361
94362 retval = -EPERM;
94363+
94364+ if (gr_check_group_change(gid, gid, gid))
94365+ goto error;
94366+
94367 if (capable(CAP_SETGID))
94368 new->gid = new->egid = new->sgid = new->fsgid = gid;
94369 else if (gid == old->gid || gid == old->sgid)
94370@@ -559,7 +572,7 @@ error:
94371 /*
94372 * change the user struct in a credentials set to match the new UID
94373 */
94374-static int set_user(struct cred *new)
94375+int set_user(struct cred *new)
94376 {
94377 struct user_struct *new_user;
94378
94379@@ -567,12 +580,19 @@ static int set_user(struct cred *new)
94380 if (!new_user)
94381 return -EAGAIN;
94382
94383+ /*
94384+ * We don't fail in case of NPROC limit excess here because too many
94385+ * poorly written programs don't check set*uid() return code, assuming
94386+ * it never fails if called by root. We may still enforce NPROC limit
94387+ * for programs doing set*uid()+execve() by harmlessly deferring the
94388+ * failure to the execve() stage.
94389+ */
94390 if (atomic_read(&new_user->processes) >=
94391 current->signal->rlim[RLIMIT_NPROC].rlim_cur &&
94392- new_user != INIT_USER) {
94393- free_uid(new_user);
94394- return -EAGAIN;
94395- }
94396+ new_user != INIT_USER)
94397+ current->flags |= PF_NPROC_EXCEEDED;
94398+ else
94399+ current->flags &= ~PF_NPROC_EXCEEDED;
94400
94401 free_uid(new->user);
94402 new->user = new_user;
94403@@ -627,6 +647,9 @@ SYSCALL_DEFINE2(setreuid, uid_t, ruid, uid_t, euid)
94404 goto error;
94405 }
94406
94407+ if (gr_check_user_change(new->uid, new->euid, -1))
94408+ goto error;
94409+
94410 if (new->uid != old->uid) {
94411 retval = set_user(new);
94412 if (retval < 0)
94413@@ -675,6 +698,12 @@ SYSCALL_DEFINE1(setuid, uid_t, uid)
94414 goto error;
94415
94416 retval = -EPERM;
94417+
94418+ if (gr_check_crash_uid(uid))
94419+ goto error;
94420+ if (gr_check_user_change(uid, uid, uid))
94421+ goto error;
94422+
94423 if (capable(CAP_SETUID)) {
94424 new->suid = new->uid = uid;
94425 if (uid != old->uid) {
94426@@ -732,6 +761,9 @@ SYSCALL_DEFINE3(setresuid, uid_t, ruid, uid_t, euid, uid_t, suid)
94427 goto error;
94428 }
94429
94430+ if (gr_check_user_change(ruid, euid, -1))
94431+ goto error;
94432+
94433 if (ruid != (uid_t) -1) {
94434 new->uid = ruid;
94435 if (ruid != old->uid) {
94436@@ -800,6 +832,9 @@ SYSCALL_DEFINE3(setresgid, gid_t, rgid, gid_t, egid, gid_t, sgid)
94437 goto error;
94438 }
94439
94440+ if (gr_check_group_change(rgid, egid, -1))
94441+ goto error;
94442+
94443 if (rgid != (gid_t) -1)
94444 new->gid = rgid;
94445 if (egid != (gid_t) -1)
94446@@ -849,6 +884,9 @@ SYSCALL_DEFINE1(setfsuid, uid_t, uid)
94447 if (security_task_setuid(uid, (uid_t)-1, (uid_t)-1, LSM_SETID_FS) < 0)
94448 goto error;
94449
94450+ if (gr_check_user_change(-1, -1, uid))
94451+ goto error;
94452+
94453 if (uid == old->uid || uid == old->euid ||
94454 uid == old->suid || uid == old->fsuid ||
94455 capable(CAP_SETUID)) {
94456@@ -889,6 +927,9 @@ SYSCALL_DEFINE1(setfsgid, gid_t, gid)
94457 if (gid == old->gid || gid == old->egid ||
94458 gid == old->sgid || gid == old->fsgid ||
94459 capable(CAP_SETGID)) {
94460+ if (gr_check_group_change(-1, -1, gid))
94461+ goto error;
94462+
94463 if (gid != old_fsgid) {
94464 new->fsgid = gid;
94465 goto change_okay;
94466@@ -1454,7 +1495,7 @@ SYSCALL_DEFINE5(prctl, int, option, unsigned long, arg2, unsigned long, arg3,
94467 error = get_dumpable(me->mm);
94468 break;
94469 case PR_SET_DUMPABLE:
94470- if (arg2 < 0 || arg2 > 1) {
94471+ if (arg2 > 1) {
94472 error = -EINVAL;
94473 break;
94474 }
94475diff --git a/kernel/sysctl.c b/kernel/sysctl.c
94476index b8bd058..ab6a76be 100644
94477--- a/kernel/sysctl.c
94478+++ b/kernel/sysctl.c
94479@@ -63,6 +63,13 @@
94480 static int deprecated_sysctl_warning(struct __sysctl_args *args);
94481
94482 #if defined(CONFIG_SYSCTL)
94483+#include <linux/grsecurity.h>
94484+#include <linux/grinternal.h>
94485+
94486+extern __u32 gr_handle_sysctl(const ctl_table *table, const int op);
94487+extern int gr_handle_sysctl_mod(const char *dirname, const char *name,
94488+ const int op);
94489+extern int gr_handle_chroot_sysctl(const int op);
94490
94491 /* External variables not in a header file. */
94492 extern int C_A_D;
94493@@ -168,6 +175,7 @@ static int proc_do_cad_pid(struct ctl_table *table, int write,
94494 static int proc_taint(struct ctl_table *table, int write,
94495 void __user *buffer, size_t *lenp, loff_t *ppos);
94496 #endif
94497+extern ctl_table grsecurity_table[];
94498
94499 static struct ctl_table root_table[];
94500 static struct ctl_table_root sysctl_table_root;
94501@@ -200,6 +208,21 @@ extern struct ctl_table epoll_table[];
94502 int sysctl_legacy_va_layout;
94503 #endif
94504
94505+#ifdef CONFIG_PAX_SOFTMODE
94506+static ctl_table pax_table[] = {
94507+ {
94508+ .ctl_name = CTL_UNNUMBERED,
94509+ .procname = "softmode",
94510+ .data = &pax_softmode,
94511+ .maxlen = sizeof(unsigned int),
94512+ .mode = 0600,
94513+ .proc_handler = &proc_dointvec,
94514+ },
94515+
94516+ { .ctl_name = 0 }
94517+};
94518+#endif
94519+
94520 extern int prove_locking;
94521 extern int lock_stat;
94522
94523@@ -251,6 +274,24 @@ static int max_wakeup_granularity_ns = NSEC_PER_SEC; /* 1 second */
94524 #endif
94525
94526 static struct ctl_table kern_table[] = {
94527+#if defined(CONFIG_GRKERNSEC_SYSCTL) || defined(CONFIG_GRKERNSEC_ROFS)
94528+ {
94529+ .ctl_name = CTL_UNNUMBERED,
94530+ .procname = "grsecurity",
94531+ .mode = 0500,
94532+ .child = grsecurity_table,
94533+ },
94534+#endif
94535+
94536+#ifdef CONFIG_PAX_SOFTMODE
94537+ {
94538+ .ctl_name = CTL_UNNUMBERED,
94539+ .procname = "pax",
94540+ .mode = 0500,
94541+ .child = pax_table,
94542+ },
94543+#endif
94544+
94545 {
94546 .ctl_name = CTL_UNNUMBERED,
94547 .procname = "sched_child_runs_first",
94548@@ -567,8 +608,8 @@ static struct ctl_table kern_table[] = {
94549 .data = &modprobe_path,
94550 .maxlen = KMOD_PATH_LEN,
94551 .mode = 0644,
94552- .proc_handler = &proc_dostring,
94553- .strategy = &sysctl_string,
94554+ .proc_handler = &proc_dostring_modpriv,
94555+ .strategy = &sysctl_string_modpriv,
94556 },
94557 {
94558 .ctl_name = CTL_UNNUMBERED,
94559@@ -1247,6 +1288,13 @@ static struct ctl_table vm_table[] = {
94560 .mode = 0644,
94561 .proc_handler = &proc_dointvec
94562 },
94563+ {
94564+ .procname = "heap_stack_gap",
94565+ .data = &sysctl_heap_stack_gap,
94566+ .maxlen = sizeof(sysctl_heap_stack_gap),
94567+ .mode = 0644,
94568+ .proc_handler = proc_doulongvec_minmax,
94569+ },
94570 #else
94571 {
94572 .ctl_name = CTL_UNNUMBERED,
94573@@ -1803,6 +1851,8 @@ static int do_sysctl_strategy(struct ctl_table_root *root,
94574 return 0;
94575 }
94576
94577+static int sysctl_perm_nochk(struct ctl_table_root *root, struct ctl_table *table, int op);
94578+
94579 static int parse_table(int __user *name, int nlen,
94580 void __user *oldval, size_t __user *oldlenp,
94581 void __user *newval, size_t newlen,
94582@@ -1821,7 +1871,7 @@ repeat:
94583 if (n == table->ctl_name) {
94584 int error;
94585 if (table->child) {
94586- if (sysctl_perm(root, table, MAY_EXEC))
94587+ if (sysctl_perm_nochk(root, table, MAY_EXEC))
94588 return -EPERM;
94589 name++;
94590 nlen--;
94591@@ -1906,6 +1956,33 @@ int sysctl_perm(struct ctl_table_root *root, struct ctl_table *table, int op)
94592 int error;
94593 int mode;
94594
94595+ if (table->parent != NULL && table->parent->procname != NULL &&
94596+ table->procname != NULL &&
94597+ gr_handle_sysctl_mod(table->parent->procname, table->procname, op))
94598+ return -EACCES;
94599+ if (gr_handle_chroot_sysctl(op))
94600+ return -EACCES;
94601+ error = gr_handle_sysctl(table, op);
94602+ if (error)
94603+ return error;
94604+
94605+ error = security_sysctl(table, op & (MAY_READ | MAY_WRITE | MAY_EXEC));
94606+ if (error)
94607+ return error;
94608+
94609+ if (root->permissions)
94610+ mode = root->permissions(root, current->nsproxy, table);
94611+ else
94612+ mode = table->mode;
94613+
94614+ return test_perm(mode, op);
94615+}
94616+
94617+int sysctl_perm_nochk(struct ctl_table_root *root, struct ctl_table *table, int op)
94618+{
94619+ int error;
94620+ int mode;
94621+
94622 error = security_sysctl(table, op & (MAY_READ | MAY_WRITE | MAY_EXEC));
94623 if (error)
94624 return error;
94625@@ -2335,6 +2412,16 @@ int proc_dostring(struct ctl_table *table, int write,
94626 buffer, lenp, ppos);
94627 }
94628
94629+int proc_dostring_modpriv(struct ctl_table *table, int write,
94630+ void __user *buffer, size_t *lenp, loff_t *ppos)
94631+{
94632+ if (write && !capable(CAP_SYS_MODULE))
94633+ return -EPERM;
94634+
94635+ return _proc_do_string(table->data, table->maxlen, write,
94636+ buffer, lenp, ppos);
94637+}
94638+
94639
94640 static int do_proc_dointvec_conv(int *negp, unsigned long *lvalp,
94641 int *valp,
94642@@ -2609,7 +2696,7 @@ static int __do_proc_doulongvec_minmax(void *data, struct ctl_table *table, int
94643 vleft = table->maxlen / sizeof(unsigned long);
94644 left = *lenp;
94645
94646- for (; left && vleft--; i++, min++, max++, first=0) {
94647+ for (; left && vleft--; i++, first=0) {
94648 if (write) {
94649 while (left) {
94650 char c;
94651@@ -2910,6 +2997,12 @@ int proc_dostring(struct ctl_table *table, int write,
94652 return -ENOSYS;
94653 }
94654
94655+int proc_dostring_modpriv(struct ctl_table *table, int write,
94656+ void __user *buffer, size_t *lenp, loff_t *ppos)
94657+{
94658+ return -ENOSYS;
94659+}
94660+
94661 int proc_dointvec(struct ctl_table *table, int write,
94662 void __user *buffer, size_t *lenp, loff_t *ppos)
94663 {
94664@@ -3038,6 +3131,16 @@ int sysctl_string(struct ctl_table *table,
94665 return 1;
94666 }
94667
94668+int sysctl_string_modpriv(struct ctl_table *table,
94669+ void __user *oldval, size_t __user *oldlenp,
94670+ void __user *newval, size_t newlen)
94671+{
94672+ if (newval && newlen && !capable(CAP_SYS_MODULE))
94673+ return -EPERM;
94674+
94675+ return sysctl_string(table, oldval, oldlenp, newval, newlen);
94676+}
94677+
94678 /*
94679 * This function makes sure that all of the integers in the vector
94680 * are between the minimum and maximum values given in the arrays
94681@@ -3182,6 +3285,13 @@ int sysctl_string(struct ctl_table *table,
94682 return -ENOSYS;
94683 }
94684
94685+int sysctl_string_modpriv(struct ctl_table *table,
94686+ void __user *oldval, size_t __user *oldlenp,
94687+ void __user *newval, size_t newlen)
94688+{
94689+ return -ENOSYS;
94690+}
94691+
94692 int sysctl_intvec(struct ctl_table *table,
94693 void __user *oldval, size_t __user *oldlenp,
94694 void __user *newval, size_t newlen)
94695@@ -3246,6 +3356,7 @@ EXPORT_SYMBOL(proc_dointvec_minmax);
94696 EXPORT_SYMBOL(proc_dointvec_userhz_jiffies);
94697 EXPORT_SYMBOL(proc_dointvec_ms_jiffies);
94698 EXPORT_SYMBOL(proc_dostring);
94699+EXPORT_SYMBOL(proc_dostring_modpriv);
94700 EXPORT_SYMBOL(proc_doulongvec_minmax);
94701 EXPORT_SYMBOL(proc_doulongvec_ms_jiffies_minmax);
94702 EXPORT_SYMBOL(register_sysctl_table);
94703@@ -3254,5 +3365,6 @@ EXPORT_SYMBOL(sysctl_intvec);
94704 EXPORT_SYMBOL(sysctl_jiffies);
94705 EXPORT_SYMBOL(sysctl_ms_jiffies);
94706 EXPORT_SYMBOL(sysctl_string);
94707+EXPORT_SYMBOL(sysctl_string_modpriv);
94708 EXPORT_SYMBOL(sysctl_data);
94709 EXPORT_SYMBOL(unregister_sysctl_table);
94710diff --git a/kernel/sysctl_check.c b/kernel/sysctl_check.c
94711index 469193c..ea3ecb2 100644
94712--- a/kernel/sysctl_check.c
94713+++ b/kernel/sysctl_check.c
94714@@ -1489,10 +1489,12 @@ int sysctl_check_table(struct nsproxy *namespaces, struct ctl_table *table)
94715 } else {
94716 if ((table->strategy == sysctl_data) ||
94717 (table->strategy == sysctl_string) ||
94718+ (table->strategy == sysctl_string_modpriv) ||
94719 (table->strategy == sysctl_intvec) ||
94720 (table->strategy == sysctl_jiffies) ||
94721 (table->strategy == sysctl_ms_jiffies) ||
94722 (table->proc_handler == proc_dostring) ||
94723+ (table->proc_handler == proc_dostring_modpriv) ||
94724 (table->proc_handler == proc_dointvec) ||
94725 (table->proc_handler == proc_dointvec_minmax) ||
94726 (table->proc_handler == proc_dointvec_jiffies) ||
94727diff --git a/kernel/taskstats.c b/kernel/taskstats.c
94728index a4ef542..798bcd7 100644
94729--- a/kernel/taskstats.c
94730+++ b/kernel/taskstats.c
94731@@ -26,9 +26,12 @@
94732 #include <linux/cgroup.h>
94733 #include <linux/fs.h>
94734 #include <linux/file.h>
94735+#include <linux/grsecurity.h>
94736 #include <net/genetlink.h>
94737 #include <asm/atomic.h>
94738
94739+extern int gr_is_taskstats_denied(int pid);
94740+
94741 /*
94742 * Maximum length of a cpumask that can be specified in
94743 * the TASKSTATS_CMD_ATTR_REGISTER/DEREGISTER_CPUMASK attribute
94744@@ -442,6 +445,9 @@ static int taskstats_user_cmd(struct sk_buff *skb, struct genl_info *info)
94745 size_t size;
94746 cpumask_var_t mask;
94747
94748+ if (gr_is_taskstats_denied(current->pid))
94749+ return -EACCES;
94750+
94751 if (!alloc_cpumask_var(&mask, GFP_KERNEL))
94752 return -ENOMEM;
94753
94754diff --git a/kernel/time.c b/kernel/time.c
94755index 33df60e..ca768bd 100644
94756--- a/kernel/time.c
94757+++ b/kernel/time.c
94758@@ -165,6 +165,11 @@ int do_sys_settimeofday(struct timespec *tv, struct timezone *tz)
94759 return error;
94760
94761 if (tz) {
94762+ /* we log in do_settimeofday called below, so don't log twice
94763+ */
94764+ if (!tv)
94765+ gr_log_timechange();
94766+
94767 /* SMP safe, global irq locking makes it work. */
94768 sys_tz = *tz;
94769 update_vsyscall_tz();
94770@@ -240,7 +245,7 @@ EXPORT_SYMBOL(current_fs_time);
94771 * Avoid unnecessary multiplications/divisions in the
94772 * two most common HZ cases:
94773 */
94774-unsigned int inline jiffies_to_msecs(const unsigned long j)
94775+inline unsigned int jiffies_to_msecs(const unsigned long j)
94776 {
94777 #if HZ <= MSEC_PER_SEC && !(MSEC_PER_SEC % HZ)
94778 return (MSEC_PER_SEC / HZ) * j;
94779@@ -256,7 +261,7 @@ unsigned int inline jiffies_to_msecs(const unsigned long j)
94780 }
94781 EXPORT_SYMBOL(jiffies_to_msecs);
94782
94783-unsigned int inline jiffies_to_usecs(const unsigned long j)
94784+inline unsigned int jiffies_to_usecs(const unsigned long j)
94785 {
94786 #if HZ <= USEC_PER_SEC && !(USEC_PER_SEC % HZ)
94787 return (USEC_PER_SEC / HZ) * j;
94788diff --git a/kernel/time/tick-broadcast.c b/kernel/time/tick-broadcast.c
94789index 57b953f..06f149f 100644
94790--- a/kernel/time/tick-broadcast.c
94791+++ b/kernel/time/tick-broadcast.c
94792@@ -116,7 +116,7 @@ int tick_device_uses_broadcast(struct clock_event_device *dev, int cpu)
94793 * then clear the broadcast bit.
94794 */
94795 if (!(dev->features & CLOCK_EVT_FEAT_C3STOP)) {
94796- int cpu = smp_processor_id();
94797+ cpu = smp_processor_id();
94798
94799 cpumask_clear_cpu(cpu, tick_get_broadcast_mask());
94800 tick_broadcast_clear_oneshot(cpu);
94801diff --git a/kernel/time/timekeeping.c b/kernel/time/timekeeping.c
94802index 4a71cff..ffb5548 100644
94803--- a/kernel/time/timekeeping.c
94804+++ b/kernel/time/timekeeping.c
94805@@ -14,6 +14,7 @@
94806 #include <linux/init.h>
94807 #include <linux/mm.h>
94808 #include <linux/sched.h>
94809+#include <linux/grsecurity.h>
94810 #include <linux/sysdev.h>
94811 #include <linux/clocksource.h>
94812 #include <linux/jiffies.h>
94813@@ -180,7 +181,7 @@ void update_xtime_cache(u64 nsec)
94814 */
94815 struct timespec ts = xtime;
94816 timespec_add_ns(&ts, nsec);
94817- ACCESS_ONCE(xtime_cache) = ts;
94818+ ACCESS_ONCE_RW(xtime_cache) = ts;
94819 }
94820
94821 /* must hold xtime_lock */
94822@@ -337,6 +338,8 @@ int do_settimeofday(struct timespec *tv)
94823 if ((unsigned long)tv->tv_nsec >= NSEC_PER_SEC)
94824 return -EINVAL;
94825
94826+ gr_log_timechange();
94827+
94828 write_seqlock_irqsave(&xtime_lock, flags);
94829
94830 timekeeping_forward_now();
94831diff --git a/kernel/time/timer_list.c b/kernel/time/timer_list.c
94832index 54c0dda..e9095d9 100644
94833--- a/kernel/time/timer_list.c
94834+++ b/kernel/time/timer_list.c
94835@@ -38,12 +38,16 @@ DECLARE_PER_CPU(struct hrtimer_cpu_base, hrtimer_bases);
94836
94837 static void print_name_offset(struct seq_file *m, void *sym)
94838 {
94839+#ifdef CONFIG_GRKERNSEC_HIDESYM
94840+ SEQ_printf(m, "<%p>", NULL);
94841+#else
94842 char symname[KSYM_NAME_LEN];
94843
94844 if (lookup_symbol_name((unsigned long)sym, symname) < 0)
94845 SEQ_printf(m, "<%p>", sym);
94846 else
94847 SEQ_printf(m, "%s", symname);
94848+#endif
94849 }
94850
94851 static void
94852@@ -112,7 +116,11 @@ next_one:
94853 static void
94854 print_base(struct seq_file *m, struct hrtimer_clock_base *base, u64 now)
94855 {
94856+#ifdef CONFIG_GRKERNSEC_HIDESYM
94857+ SEQ_printf(m, " .base: %p\n", NULL);
94858+#else
94859 SEQ_printf(m, " .base: %p\n", base);
94860+#endif
94861 SEQ_printf(m, " .index: %d\n",
94862 base->index);
94863 SEQ_printf(m, " .resolution: %Lu nsecs\n",
94864@@ -289,7 +297,11 @@ static int __init init_timer_list_procfs(void)
94865 {
94866 struct proc_dir_entry *pe;
94867
94868+#ifdef CONFIG_GRKERNSEC_PROC_ADD
94869+ pe = proc_create("timer_list", 0400, NULL, &timer_list_fops);
94870+#else
94871 pe = proc_create("timer_list", 0444, NULL, &timer_list_fops);
94872+#endif
94873 if (!pe)
94874 return -ENOMEM;
94875 return 0;
94876diff --git a/kernel/time/timer_stats.c b/kernel/time/timer_stats.c
94877index ee5681f..634089b 100644
94878--- a/kernel/time/timer_stats.c
94879+++ b/kernel/time/timer_stats.c
94880@@ -116,7 +116,7 @@ static ktime_t time_start, time_stop;
94881 static unsigned long nr_entries;
94882 static struct entry entries[MAX_ENTRIES];
94883
94884-static atomic_t overflow_count;
94885+static atomic_unchecked_t overflow_count;
94886
94887 /*
94888 * The entries are in a hash-table, for fast lookup:
94889@@ -140,7 +140,7 @@ static void reset_entries(void)
94890 nr_entries = 0;
94891 memset(entries, 0, sizeof(entries));
94892 memset(tstat_hash_table, 0, sizeof(tstat_hash_table));
94893- atomic_set(&overflow_count, 0);
94894+ atomic_set_unchecked(&overflow_count, 0);
94895 }
94896
94897 static struct entry *alloc_entry(void)
94898@@ -261,7 +261,7 @@ void timer_stats_update_stats(void *timer, pid_t pid, void *startf,
94899 if (likely(entry))
94900 entry->count++;
94901 else
94902- atomic_inc(&overflow_count);
94903+ atomic_inc_unchecked(&overflow_count);
94904
94905 out_unlock:
94906 spin_unlock_irqrestore(lock, flags);
94907@@ -269,12 +269,16 @@ void timer_stats_update_stats(void *timer, pid_t pid, void *startf,
94908
94909 static void print_name_offset(struct seq_file *m, unsigned long addr)
94910 {
94911+#ifdef CONFIG_GRKERNSEC_HIDESYM
94912+ seq_printf(m, "<%p>", NULL);
94913+#else
94914 char symname[KSYM_NAME_LEN];
94915
94916 if (lookup_symbol_name(addr, symname) < 0)
94917 seq_printf(m, "<%p>", (void *)addr);
94918 else
94919 seq_printf(m, "%s", symname);
94920+#endif
94921 }
94922
94923 static int tstats_show(struct seq_file *m, void *v)
94924@@ -300,9 +304,9 @@ static int tstats_show(struct seq_file *m, void *v)
94925
94926 seq_puts(m, "Timer Stats Version: v0.2\n");
94927 seq_printf(m, "Sample period: %ld.%03ld s\n", period.tv_sec, ms);
94928- if (atomic_read(&overflow_count))
94929+ if (atomic_read_unchecked(&overflow_count))
94930 seq_printf(m, "Overflow: %d entries\n",
94931- atomic_read(&overflow_count));
94932+ atomic_read_unchecked(&overflow_count));
94933
94934 for (i = 0; i < nr_entries; i++) {
94935 entry = entries + i;
94936@@ -415,7 +419,11 @@ static int __init init_tstats_procfs(void)
94937 {
94938 struct proc_dir_entry *pe;
94939
94940+#ifdef CONFIG_GRKERNSEC_PROC_ADD
94941+ pe = proc_create("timer_stats", 0600, NULL, &tstats_fops);
94942+#else
94943 pe = proc_create("timer_stats", 0644, NULL, &tstats_fops);
94944+#endif
94945 if (!pe)
94946 return -ENOMEM;
94947 return 0;
94948diff --git a/kernel/timer.c b/kernel/timer.c
94949index cb3c1f1..8bf5526 100644
94950--- a/kernel/timer.c
94951+++ b/kernel/timer.c
94952@@ -1213,7 +1213,7 @@ void update_process_times(int user_tick)
94953 /*
94954 * This function runs timers and the timer-tq in bottom half context.
94955 */
94956-static void run_timer_softirq(struct softirq_action *h)
94957+static void run_timer_softirq(void)
94958 {
94959 struct tvec_base *base = __get_cpu_var(tvec_bases);
94960
94961diff --git a/kernel/trace/blktrace.c b/kernel/trace/blktrace.c
94962index d9d6206..f19467e 100644
94963--- a/kernel/trace/blktrace.c
94964+++ b/kernel/trace/blktrace.c
94965@@ -313,7 +313,7 @@ static ssize_t blk_dropped_read(struct file *filp, char __user *buffer,
94966 struct blk_trace *bt = filp->private_data;
94967 char buf[16];
94968
94969- snprintf(buf, sizeof(buf), "%u\n", atomic_read(&bt->dropped));
94970+ snprintf(buf, sizeof(buf), "%u\n", atomic_read_unchecked(&bt->dropped));
94971
94972 return simple_read_from_buffer(buffer, count, ppos, buf, strlen(buf));
94973 }
94974@@ -376,7 +376,7 @@ static int blk_subbuf_start_callback(struct rchan_buf *buf, void *subbuf,
94975 return 1;
94976
94977 bt = buf->chan->private_data;
94978- atomic_inc(&bt->dropped);
94979+ atomic_inc_unchecked(&bt->dropped);
94980 return 0;
94981 }
94982
94983@@ -477,7 +477,7 @@ int do_blk_trace_setup(struct request_queue *q, char *name, dev_t dev,
94984
94985 bt->dir = dir;
94986 bt->dev = dev;
94987- atomic_set(&bt->dropped, 0);
94988+ atomic_set_unchecked(&bt->dropped, 0);
94989
94990 ret = -EIO;
94991 bt->dropped_file = debugfs_create_file("dropped", 0444, dir, bt,
94992diff --git a/kernel/trace/ftrace.c b/kernel/trace/ftrace.c
94993index 4872937..c794d40 100644
94994--- a/kernel/trace/ftrace.c
94995+++ b/kernel/trace/ftrace.c
94996@@ -1100,13 +1100,18 @@ ftrace_code_disable(struct module *mod, struct dyn_ftrace *rec)
94997
94998 ip = rec->ip;
94999
95000+ ret = ftrace_arch_code_modify_prepare();
95001+ FTRACE_WARN_ON(ret);
95002+ if (ret)
95003+ return 0;
95004+
95005 ret = ftrace_make_nop(mod, rec, MCOUNT_ADDR);
95006+ FTRACE_WARN_ON(ftrace_arch_code_modify_post_process());
95007 if (ret) {
95008 ftrace_bug(ret, ip);
95009 rec->flags |= FTRACE_FL_FAILED;
95010- return 0;
95011 }
95012- return 1;
95013+ return ret ? 0 : 1;
95014 }
95015
95016 /*
95017diff --git a/kernel/trace/ring_buffer.c b/kernel/trace/ring_buffer.c
95018index e749a05..19c6e94 100644
95019--- a/kernel/trace/ring_buffer.c
95020+++ b/kernel/trace/ring_buffer.c
95021@@ -606,7 +606,7 @@ static struct list_head *rb_list_head(struct list_head *list)
95022 * the reader page). But if the next page is a header page,
95023 * its flags will be non zero.
95024 */
95025-static int inline
95026+static inline int
95027 rb_is_head_page(struct ring_buffer_per_cpu *cpu_buffer,
95028 struct buffer_page *page, struct list_head *list)
95029 {
95030diff --git a/kernel/trace/trace.c b/kernel/trace/trace.c
95031index a2a2d1f..7f32b09 100644
95032--- a/kernel/trace/trace.c
95033+++ b/kernel/trace/trace.c
95034@@ -3193,6 +3193,8 @@ static ssize_t tracing_splice_read_pipe(struct file *filp,
95035 size_t rem;
95036 unsigned int i;
95037
95038+ pax_track_stack();
95039+
95040 /* copy the tracer to avoid using a global lock all around */
95041 mutex_lock(&trace_types_lock);
95042 if (unlikely(old_tracer != current_trace && current_trace)) {
95043@@ -3659,6 +3661,8 @@ tracing_buffers_splice_read(struct file *file, loff_t *ppos,
95044 int entries, size, i;
95045 size_t ret;
95046
95047+ pax_track_stack();
95048+
95049 if (*ppos & (PAGE_SIZE - 1)) {
95050 WARN_ONCE(1, "Ftrace: previous read must page-align\n");
95051 return -EINVAL;
95052@@ -3816,10 +3820,9 @@ static const struct file_operations tracing_dyn_info_fops = {
95053 };
95054 #endif
95055
95056-static struct dentry *d_tracer;
95057-
95058 struct dentry *tracing_init_dentry(void)
95059 {
95060+ static struct dentry *d_tracer;
95061 static int once;
95062
95063 if (d_tracer)
95064@@ -3839,10 +3842,9 @@ struct dentry *tracing_init_dentry(void)
95065 return d_tracer;
95066 }
95067
95068-static struct dentry *d_percpu;
95069-
95070 struct dentry *tracing_dentry_percpu(void)
95071 {
95072+ static struct dentry *d_percpu;
95073 static int once;
95074 struct dentry *d_tracer;
95075
95076diff --git a/kernel/trace/trace_events.c b/kernel/trace/trace_events.c
95077index d128f65..f37b4af 100644
95078--- a/kernel/trace/trace_events.c
95079+++ b/kernel/trace/trace_events.c
95080@@ -951,13 +951,10 @@ static LIST_HEAD(ftrace_module_file_list);
95081 * Modules must own their file_operations to keep up with
95082 * reference counting.
95083 */
95084+
95085 struct ftrace_module_file_ops {
95086 struct list_head list;
95087 struct module *mod;
95088- struct file_operations id;
95089- struct file_operations enable;
95090- struct file_operations format;
95091- struct file_operations filter;
95092 };
95093
95094 static void remove_subsystem_dir(const char *name)
95095@@ -1004,17 +1001,12 @@ trace_create_file_ops(struct module *mod)
95096
95097 file_ops->mod = mod;
95098
95099- file_ops->id = ftrace_event_id_fops;
95100- file_ops->id.owner = mod;
95101-
95102- file_ops->enable = ftrace_enable_fops;
95103- file_ops->enable.owner = mod;
95104-
95105- file_ops->filter = ftrace_event_filter_fops;
95106- file_ops->filter.owner = mod;
95107-
95108- file_ops->format = ftrace_event_format_fops;
95109- file_ops->format.owner = mod;
95110+ pax_open_kernel();
95111+ *(void **)&mod->trace_id.owner = mod;
95112+ *(void **)&mod->trace_enable.owner = mod;
95113+ *(void **)&mod->trace_filter.owner = mod;
95114+ *(void **)&mod->trace_format.owner = mod;
95115+ pax_close_kernel();
95116
95117 list_add(&file_ops->list, &ftrace_module_file_list);
95118
95119@@ -1063,8 +1055,8 @@ static void trace_module_add_events(struct module *mod)
95120 call->mod = mod;
95121 list_add(&call->list, &ftrace_events);
95122 event_create_dir(call, d_events,
95123- &file_ops->id, &file_ops->enable,
95124- &file_ops->filter, &file_ops->format);
95125+ &mod->trace_id, &mod->trace_enable,
95126+ &mod->trace_filter, &mod->trace_format);
95127 }
95128 }
95129
95130diff --git a/kernel/trace/trace_mmiotrace.c b/kernel/trace/trace_mmiotrace.c
95131index 0acd834..b800b56 100644
95132--- a/kernel/trace/trace_mmiotrace.c
95133+++ b/kernel/trace/trace_mmiotrace.c
95134@@ -23,7 +23,7 @@ struct header_iter {
95135 static struct trace_array *mmio_trace_array;
95136 static bool overrun_detected;
95137 static unsigned long prev_overruns;
95138-static atomic_t dropped_count;
95139+static atomic_unchecked_t dropped_count;
95140
95141 static void mmio_reset_data(struct trace_array *tr)
95142 {
95143@@ -126,7 +126,7 @@ static void mmio_close(struct trace_iterator *iter)
95144
95145 static unsigned long count_overruns(struct trace_iterator *iter)
95146 {
95147- unsigned long cnt = atomic_xchg(&dropped_count, 0);
95148+ unsigned long cnt = atomic_xchg_unchecked(&dropped_count, 0);
95149 unsigned long over = ring_buffer_overruns(iter->tr->buffer);
95150
95151 if (over > prev_overruns)
95152@@ -316,7 +316,7 @@ static void __trace_mmiotrace_rw(struct trace_array *tr,
95153 event = trace_buffer_lock_reserve(buffer, TRACE_MMIO_RW,
95154 sizeof(*entry), 0, pc);
95155 if (!event) {
95156- atomic_inc(&dropped_count);
95157+ atomic_inc_unchecked(&dropped_count);
95158 return;
95159 }
95160 entry = ring_buffer_event_data(event);
95161@@ -346,7 +346,7 @@ static void __trace_mmiotrace_map(struct trace_array *tr,
95162 event = trace_buffer_lock_reserve(buffer, TRACE_MMIO_MAP,
95163 sizeof(*entry), 0, pc);
95164 if (!event) {
95165- atomic_inc(&dropped_count);
95166+ atomic_inc_unchecked(&dropped_count);
95167 return;
95168 }
95169 entry = ring_buffer_event_data(event);
95170diff --git a/kernel/trace/trace_output.c b/kernel/trace/trace_output.c
95171index b6c12c6..41fdc53 100644
95172--- a/kernel/trace/trace_output.c
95173+++ b/kernel/trace/trace_output.c
95174@@ -237,7 +237,7 @@ int trace_seq_path(struct trace_seq *s, struct path *path)
95175 return 0;
95176 p = d_path(path, s->buffer + s->len, PAGE_SIZE - s->len);
95177 if (!IS_ERR(p)) {
95178- p = mangle_path(s->buffer + s->len, p, "\n");
95179+ p = mangle_path(s->buffer + s->len, p, "\n\\");
95180 if (p) {
95181 s->len = p - s->buffer;
95182 return 1;
95183diff --git a/kernel/trace/trace_stack.c b/kernel/trace/trace_stack.c
95184index 8504ac7..ecf0adb 100644
95185--- a/kernel/trace/trace_stack.c
95186+++ b/kernel/trace/trace_stack.c
95187@@ -50,7 +50,7 @@ static inline void check_stack(void)
95188 return;
95189
95190 /* we do not handle interrupt stacks yet */
95191- if (!object_is_on_stack(&this_size))
95192+ if (!object_starts_on_stack(&this_size))
95193 return;
95194
95195 local_irq_save(flags);
95196diff --git a/kernel/trace/trace_workqueue.c b/kernel/trace/trace_workqueue.c
95197index 40cafb0..d5ead43 100644
95198--- a/kernel/trace/trace_workqueue.c
95199+++ b/kernel/trace/trace_workqueue.c
95200@@ -21,7 +21,7 @@ struct cpu_workqueue_stats {
95201 int cpu;
95202 pid_t pid;
95203 /* Can be inserted from interrupt or user context, need to be atomic */
95204- atomic_t inserted;
95205+ atomic_unchecked_t inserted;
95206 /*
95207 * Don't need to be atomic, works are serialized in a single workqueue thread
95208 * on a single CPU.
95209@@ -58,7 +58,7 @@ probe_workqueue_insertion(struct task_struct *wq_thread,
95210 spin_lock_irqsave(&workqueue_cpu_stat(cpu)->lock, flags);
95211 list_for_each_entry(node, &workqueue_cpu_stat(cpu)->list, list) {
95212 if (node->pid == wq_thread->pid) {
95213- atomic_inc(&node->inserted);
95214+ atomic_inc_unchecked(&node->inserted);
95215 goto found;
95216 }
95217 }
95218@@ -205,7 +205,7 @@ static int workqueue_stat_show(struct seq_file *s, void *p)
95219 tsk = get_pid_task(pid, PIDTYPE_PID);
95220 if (tsk) {
95221 seq_printf(s, "%3d %6d %6u %s\n", cws->cpu,
95222- atomic_read(&cws->inserted), cws->executed,
95223+ atomic_read_unchecked(&cws->inserted), cws->executed,
95224 tsk->comm);
95225 put_task_struct(tsk);
95226 }
95227diff --git a/kernel/user.c b/kernel/user.c
95228index 1b91701..8795237 100644
95229--- a/kernel/user.c
95230+++ b/kernel/user.c
95231@@ -159,6 +159,7 @@ struct user_struct *alloc_uid(struct user_namespace *ns, uid_t uid)
95232 spin_lock_irq(&uidhash_lock);
95233 up = uid_hash_find(uid, hashent);
95234 if (up) {
95235+ put_user_ns(ns);
95236 key_put(new->uid_keyring);
95237 key_put(new->session_keyring);
95238 kmem_cache_free(uid_cachep, new);
95239diff --git a/lib/Kconfig.debug b/lib/Kconfig.debug
95240index 234ceb1..ad74049 100644
95241--- a/lib/Kconfig.debug
95242+++ b/lib/Kconfig.debug
95243@@ -905,7 +905,7 @@ config LATENCYTOP
95244 select STACKTRACE
95245 select SCHEDSTATS
95246 select SCHED_DEBUG
95247- depends on HAVE_LATENCYTOP_SUPPORT
95248+ depends on HAVE_LATENCYTOP_SUPPORT && !GRKERNSEC_HIDESYM
95249 help
95250 Enable this option if you want to use the LatencyTOP tool
95251 to find out which userspace is blocking on what kernel operations.
95252diff --git a/lib/bitmap.c b/lib/bitmap.c
95253index 7025658..8d14cab 100644
95254--- a/lib/bitmap.c
95255+++ b/lib/bitmap.c
95256@@ -341,7 +341,7 @@ int __bitmap_parse(const char *buf, unsigned int buflen,
95257 {
95258 int c, old_c, totaldigits, ndigits, nchunks, nbits;
95259 u32 chunk;
95260- const char __user *ubuf = buf;
95261+ const char __user *ubuf = (const char __force_user *)buf;
95262
95263 bitmap_zero(maskp, nmaskbits);
95264
95265@@ -426,7 +426,7 @@ int bitmap_parse_user(const char __user *ubuf,
95266 {
95267 if (!access_ok(VERIFY_READ, ubuf, ulen))
95268 return -EFAULT;
95269- return __bitmap_parse((const char *)ubuf, ulen, 1, maskp, nmaskbits);
95270+ return __bitmap_parse((const char __force_kernel *)ubuf, ulen, 1, maskp, nmaskbits);
95271 }
95272 EXPORT_SYMBOL(bitmap_parse_user);
95273
95274diff --git a/lib/bug.c b/lib/bug.c
95275index 300e41a..2779eb0 100644
95276--- a/lib/bug.c
95277+++ b/lib/bug.c
95278@@ -135,6 +135,8 @@ enum bug_trap_type report_bug(unsigned long bugaddr, struct pt_regs *regs)
95279 return BUG_TRAP_TYPE_NONE;
95280
95281 bug = find_bug(bugaddr);
95282+ if (!bug)
95283+ return BUG_TRAP_TYPE_NONE;
95284
95285 printk(KERN_EMERG "------------[ cut here ]------------\n");
95286
95287diff --git a/lib/debugobjects.c b/lib/debugobjects.c
95288index 2b413db..e21d207 100644
95289--- a/lib/debugobjects.c
95290+++ b/lib/debugobjects.c
95291@@ -277,7 +277,7 @@ static void debug_object_is_on_stack(void *addr, int onstack)
95292 if (limit > 4)
95293 return;
95294
95295- is_on_stack = object_is_on_stack(addr);
95296+ is_on_stack = object_starts_on_stack(addr);
95297 if (is_on_stack == onstack)
95298 return;
95299
95300diff --git a/lib/devres.c b/lib/devres.c
95301index 72c8909..7543868 100644
95302--- a/lib/devres.c
95303+++ b/lib/devres.c
95304@@ -80,7 +80,7 @@ void devm_iounmap(struct device *dev, void __iomem *addr)
95305 {
95306 iounmap(addr);
95307 WARN_ON(devres_destroy(dev, devm_ioremap_release, devm_ioremap_match,
95308- (void *)addr));
95309+ (void __force *)addr));
95310 }
95311 EXPORT_SYMBOL(devm_iounmap);
95312
95313@@ -140,7 +140,7 @@ void devm_ioport_unmap(struct device *dev, void __iomem *addr)
95314 {
95315 ioport_unmap(addr);
95316 WARN_ON(devres_destroy(dev, devm_ioport_map_release,
95317- devm_ioport_map_match, (void *)addr));
95318+ devm_ioport_map_match, (void __force *)addr));
95319 }
95320 EXPORT_SYMBOL(devm_ioport_unmap);
95321
95322diff --git a/lib/dma-debug.c b/lib/dma-debug.c
95323index 084e879..0674448 100644
95324--- a/lib/dma-debug.c
95325+++ b/lib/dma-debug.c
95326@@ -861,7 +861,7 @@ out:
95327
95328 static void check_for_stack(struct device *dev, void *addr)
95329 {
95330- if (object_is_on_stack(addr))
95331+ if (object_starts_on_stack(addr))
95332 err_printk(dev, NULL, "DMA-API: device driver maps memory from"
95333 "stack [addr=%p]\n", addr);
95334 }
95335diff --git a/lib/idr.c b/lib/idr.c
95336index eda7ba3..915dfae 100644
95337--- a/lib/idr.c
95338+++ b/lib/idr.c
95339@@ -156,7 +156,7 @@ static int sub_alloc(struct idr *idp, int *starting_id, struct idr_layer **pa)
95340 id = (id | ((1 << (IDR_BITS * l)) - 1)) + 1;
95341
95342 /* if already at the top layer, we need to grow */
95343- if (id >= 1 << (idp->layers * IDR_BITS)) {
95344+ if (id >= (1 << (idp->layers * IDR_BITS))) {
95345 *starting_id = id;
95346 return IDR_NEED_TO_GROW;
95347 }
95348diff --git a/lib/inflate.c b/lib/inflate.c
95349index d102559..4215f31 100644
95350--- a/lib/inflate.c
95351+++ b/lib/inflate.c
95352@@ -266,7 +266,7 @@ static void free(void *where)
95353 malloc_ptr = free_mem_ptr;
95354 }
95355 #else
95356-#define malloc(a) kmalloc(a, GFP_KERNEL)
95357+#define malloc(a) kmalloc((a), GFP_KERNEL)
95358 #define free(a) kfree(a)
95359 #endif
95360
95361diff --git a/lib/is_single_threaded.c b/lib/is_single_threaded.c
95362index bd2bea9..6b3c95e 100644
95363--- a/lib/is_single_threaded.c
95364+++ b/lib/is_single_threaded.c
95365@@ -22,6 +22,9 @@ bool current_is_single_threaded(void)
95366 struct task_struct *p, *t;
95367 bool ret;
95368
95369+ if (!mm)
95370+ return true;
95371+
95372 if (atomic_read(&task->signal->live) != 1)
95373 return false;
95374
95375diff --git a/lib/kobject.c b/lib/kobject.c
95376index b512b74..8115eb1 100644
95377--- a/lib/kobject.c
95378+++ b/lib/kobject.c
95379@@ -700,7 +700,7 @@ static ssize_t kobj_attr_store(struct kobject *kobj, struct attribute *attr,
95380 return ret;
95381 }
95382
95383-struct sysfs_ops kobj_sysfs_ops = {
95384+const struct sysfs_ops kobj_sysfs_ops = {
95385 .show = kobj_attr_show,
95386 .store = kobj_attr_store,
95387 };
95388@@ -789,7 +789,7 @@ static struct kobj_type kset_ktype = {
95389 * If the kset was not able to be created, NULL will be returned.
95390 */
95391 static struct kset *kset_create(const char *name,
95392- struct kset_uevent_ops *uevent_ops,
95393+ const struct kset_uevent_ops *uevent_ops,
95394 struct kobject *parent_kobj)
95395 {
95396 struct kset *kset;
95397@@ -832,7 +832,7 @@ static struct kset *kset_create(const char *name,
95398 * If the kset was not able to be created, NULL will be returned.
95399 */
95400 struct kset *kset_create_and_add(const char *name,
95401- struct kset_uevent_ops *uevent_ops,
95402+ const struct kset_uevent_ops *uevent_ops,
95403 struct kobject *parent_kobj)
95404 {
95405 struct kset *kset;
95406diff --git a/lib/kobject_uevent.c b/lib/kobject_uevent.c
95407index 507b821..0bf8ed0 100644
95408--- a/lib/kobject_uevent.c
95409+++ b/lib/kobject_uevent.c
95410@@ -95,7 +95,7 @@ int kobject_uevent_env(struct kobject *kobj, enum kobject_action action,
95411 const char *subsystem;
95412 struct kobject *top_kobj;
95413 struct kset *kset;
95414- struct kset_uevent_ops *uevent_ops;
95415+ const struct kset_uevent_ops *uevent_ops;
95416 u64 seq;
95417 int i = 0;
95418 int retval = 0;
95419diff --git a/lib/kref.c b/lib/kref.c
95420index 9ecd6e8..12c94c1 100644
95421--- a/lib/kref.c
95422+++ b/lib/kref.c
95423@@ -61,7 +61,7 @@ void kref_get(struct kref *kref)
95424 */
95425 int kref_put(struct kref *kref, void (*release)(struct kref *kref))
95426 {
95427- WARN_ON(release == NULL);
95428+ BUG_ON(release == NULL);
95429 WARN_ON(release == (void (*)(struct kref *))kfree);
95430
95431 if (atomic_dec_and_test(&kref->refcount)) {
95432diff --git a/lib/radix-tree.c b/lib/radix-tree.c
95433index 92cdd99..a8149d7 100644
95434--- a/lib/radix-tree.c
95435+++ b/lib/radix-tree.c
95436@@ -81,7 +81,7 @@ struct radix_tree_preload {
95437 int nr;
95438 struct radix_tree_node *nodes[RADIX_TREE_MAX_PATH];
95439 };
95440-static DEFINE_PER_CPU(struct radix_tree_preload, radix_tree_preloads) = { 0, };
95441+static DEFINE_PER_CPU(struct radix_tree_preload, radix_tree_preloads);
95442
95443 static inline gfp_t root_gfp_mask(struct radix_tree_root *root)
95444 {
95445diff --git a/lib/random32.c b/lib/random32.c
95446index 217d5c4..45aba8a 100644
95447--- a/lib/random32.c
95448+++ b/lib/random32.c
95449@@ -61,7 +61,7 @@ static u32 __random32(struct rnd_state *state)
95450 */
95451 static inline u32 __seed(u32 x, u32 m)
95452 {
95453- return (x < m) ? x + m : x;
95454+ return (x <= m) ? x + m + 1 : x;
95455 }
95456
95457 /**
95458diff --git a/lib/vsprintf.c b/lib/vsprintf.c
95459index 33bed5e..1477e46 100644
95460--- a/lib/vsprintf.c
95461+++ b/lib/vsprintf.c
95462@@ -16,6 +16,9 @@
95463 * - scnprintf and vscnprintf
95464 */
95465
95466+#ifdef CONFIG_GRKERNSEC_HIDESYM
95467+#define __INCLUDED_BY_HIDESYM 1
95468+#endif
95469 #include <stdarg.h>
95470 #include <linux/module.h>
95471 #include <linux/types.h>
95472@@ -546,12 +549,12 @@ static char *number(char *buf, char *end, unsigned long long num,
95473 return buf;
95474 }
95475
95476-static char *string(char *buf, char *end, char *s, struct printf_spec spec)
95477+static char *string(char *buf, char *end, const char *s, struct printf_spec spec)
95478 {
95479 int len, i;
95480
95481 if ((unsigned long)s < PAGE_SIZE)
95482- s = "<NULL>";
95483+ s = "(null)";
95484
95485 len = strnlen(s, spec.precision);
95486
95487@@ -581,7 +584,7 @@ static char *symbol_string(char *buf, char *end, void *ptr,
95488 unsigned long value = (unsigned long) ptr;
95489 #ifdef CONFIG_KALLSYMS
95490 char sym[KSYM_SYMBOL_LEN];
95491- if (ext != 'f' && ext != 's')
95492+ if (ext != 'f' && ext != 's' && ext != 'a')
95493 sprint_symbol(sym, value);
95494 else
95495 kallsyms_lookup(value, NULL, NULL, NULL, sym);
95496@@ -801,6 +804,8 @@ static char *ip4_addr_string(char *buf, char *end, const u8 *addr,
95497 * - 'f' For simple symbolic function names without offset
95498 * - 'S' For symbolic direct pointers with offset
95499 * - 's' For symbolic direct pointers without offset
95500+ * - 'A' For symbolic direct pointers with offset approved for use with GRKERNSEC_HIDESYM
95501+ * - 'a' For symbolic direct pointers without offset approved for use with GRKERNSEC_HIDESYM
95502 * - 'R' For a struct resource pointer, it prints the range of
95503 * addresses (not the name nor the flags)
95504 * - 'M' For a 6-byte MAC address, it prints the address in the
95505@@ -822,7 +827,7 @@ static char *pointer(const char *fmt, char *buf, char *end, void *ptr,
95506 struct printf_spec spec)
95507 {
95508 if (!ptr)
95509- return string(buf, end, "(null)", spec);
95510+ return string(buf, end, "(nil)", spec);
95511
95512 switch (*fmt) {
95513 case 'F':
95514@@ -831,6 +836,14 @@ static char *pointer(const char *fmt, char *buf, char *end, void *ptr,
95515 case 's':
95516 /* Fallthrough */
95517 case 'S':
95518+#ifdef CONFIG_GRKERNSEC_HIDESYM
95519+ break;
95520+#else
95521+ return symbol_string(buf, end, ptr, spec, *fmt);
95522+#endif
95523+ case 'a':
95524+ /* Fallthrough */
95525+ case 'A':
95526 return symbol_string(buf, end, ptr, spec, *fmt);
95527 case 'R':
95528 return resource_string(buf, end, ptr, spec);
95529@@ -1445,7 +1458,7 @@ do { \
95530 size_t len;
95531 if ((unsigned long)save_str > (unsigned long)-PAGE_SIZE
95532 || (unsigned long)save_str < PAGE_SIZE)
95533- save_str = "<NULL>";
95534+ save_str = "(null)";
95535 len = strlen(save_str);
95536 if (str + len + 1 < end)
95537 memcpy(str, save_str, len + 1);
95538@@ -1555,11 +1568,11 @@ int bstr_printf(char *buf, size_t size, const char *fmt, const u32 *bin_buf)
95539 typeof(type) value; \
95540 if (sizeof(type) == 8) { \
95541 args = PTR_ALIGN(args, sizeof(u32)); \
95542- *(u32 *)&value = *(u32 *)args; \
95543- *((u32 *)&value + 1) = *(u32 *)(args + 4); \
95544+ *(u32 *)&value = *(const u32 *)args; \
95545+ *((u32 *)&value + 1) = *(const u32 *)(args + 4); \
95546 } else { \
95547 args = PTR_ALIGN(args, sizeof(type)); \
95548- value = *(typeof(type) *)args; \
95549+ value = *(const typeof(type) *)args; \
95550 } \
95551 args += sizeof(type); \
95552 value; \
95553@@ -1622,7 +1635,7 @@ int bstr_printf(char *buf, size_t size, const char *fmt, const u32 *bin_buf)
95554 const char *str_arg = args;
95555 size_t len = strlen(str_arg);
95556 args += len + 1;
95557- str = string(str, end, (char *)str_arg, spec);
95558+ str = string(str, end, str_arg, spec);
95559 break;
95560 }
95561
95562diff --git a/localversion-grsec b/localversion-grsec
95563new file mode 100644
95564index 0000000..7cd6065
95565--- /dev/null
95566+++ b/localversion-grsec
95567@@ -0,0 +1 @@
95568+-grsec
95569diff --git a/mm/Kconfig b/mm/Kconfig
95570index 2c19c0b..f3c3f83 100644
95571--- a/mm/Kconfig
95572+++ b/mm/Kconfig
95573@@ -228,7 +228,7 @@ config KSM
95574 config DEFAULT_MMAP_MIN_ADDR
95575 int "Low address space to protect from user allocation"
95576 depends on MMU
95577- default 4096
95578+ default 65536
95579 help
95580 This is the portion of low virtual memory which should be protected
95581 from userspace allocation. Keeping a user from writing to low pages
95582diff --git a/mm/backing-dev.c b/mm/backing-dev.c
95583index d824401..9f5244a 100644
95584--- a/mm/backing-dev.c
95585+++ b/mm/backing-dev.c
95586@@ -271,7 +271,7 @@ static void bdi_task_init(struct backing_dev_info *bdi,
95587 list_add_tail_rcu(&wb->list, &bdi->wb_list);
95588 spin_unlock(&bdi->wb_lock);
95589
95590- tsk->flags |= PF_FLUSHER | PF_SWAPWRITE;
95591+ tsk->flags |= PF_SWAPWRITE;
95592 set_freezable();
95593
95594 /*
95595@@ -489,7 +489,7 @@ static void bdi_add_to_pending(struct rcu_head *head)
95596 * Add the default flusher task that gets created for any bdi
95597 * that has dirty data pending writeout
95598 */
95599-void static bdi_add_default_flusher_task(struct backing_dev_info *bdi)
95600+static void bdi_add_default_flusher_task(struct backing_dev_info *bdi)
95601 {
95602 if (!bdi_cap_writeback_dirty(bdi))
95603 return;
95604diff --git a/mm/filemap.c b/mm/filemap.c
95605index a1fe378..e26702f 100644
95606--- a/mm/filemap.c
95607+++ b/mm/filemap.c
95608@@ -1631,7 +1631,7 @@ int generic_file_mmap(struct file * file, struct vm_area_struct * vma)
95609 struct address_space *mapping = file->f_mapping;
95610
95611 if (!mapping->a_ops->readpage)
95612- return -ENOEXEC;
95613+ return -ENODEV;
95614 file_accessed(file);
95615 vma->vm_ops = &generic_file_vm_ops;
95616 vma->vm_flags |= VM_CAN_NONLINEAR;
95617@@ -2024,6 +2024,7 @@ inline int generic_write_checks(struct file *file, loff_t *pos, size_t *count, i
95618 *pos = i_size_read(inode);
95619
95620 if (limit != RLIM_INFINITY) {
95621+ gr_learn_resource(current, RLIMIT_FSIZE,*pos, 0);
95622 if (*pos >= limit) {
95623 send_sig(SIGXFSZ, current, 0);
95624 return -EFBIG;
95625diff --git a/mm/fremap.c b/mm/fremap.c
95626index b6ec85a..a24ac22 100644
95627--- a/mm/fremap.c
95628+++ b/mm/fremap.c
95629@@ -153,6 +153,11 @@ SYSCALL_DEFINE5(remap_file_pages, unsigned long, start, unsigned long, size,
95630 retry:
95631 vma = find_vma(mm, start);
95632
95633+#ifdef CONFIG_PAX_SEGMEXEC
95634+ if (vma && (mm->pax_flags & MF_PAX_SEGMEXEC) && (vma->vm_flags & VM_MAYEXEC))
95635+ goto out;
95636+#endif
95637+
95638 /*
95639 * Make sure the vma is shared, that it supports prefaulting,
95640 * and that the remapped range is valid and fully within
95641@@ -221,7 +226,7 @@ SYSCALL_DEFINE5(remap_file_pages, unsigned long, start, unsigned long, size,
95642 /*
95643 * drop PG_Mlocked flag for over-mapped range
95644 */
95645- unsigned int saved_flags = vma->vm_flags;
95646+ unsigned long saved_flags = vma->vm_flags;
95647 munlock_vma_pages_range(vma, start, start + size);
95648 vma->vm_flags = saved_flags;
95649 }
95650diff --git a/mm/highmem.c b/mm/highmem.c
95651index 9c1e627..5ca9447 100644
95652--- a/mm/highmem.c
95653+++ b/mm/highmem.c
95654@@ -116,9 +116,10 @@ static void flush_all_zero_pkmaps(void)
95655 * So no dangers, even with speculative execution.
95656 */
95657 page = pte_page(pkmap_page_table[i]);
95658+ pax_open_kernel();
95659 pte_clear(&init_mm, (unsigned long)page_address(page),
95660 &pkmap_page_table[i]);
95661-
95662+ pax_close_kernel();
95663 set_page_address(page, NULL);
95664 need_flush = 1;
95665 }
95666@@ -177,9 +178,11 @@ start:
95667 }
95668 }
95669 vaddr = PKMAP_ADDR(last_pkmap_nr);
95670+
95671+ pax_open_kernel();
95672 set_pte_at(&init_mm, vaddr,
95673 &(pkmap_page_table[last_pkmap_nr]), mk_pte(page, kmap_prot));
95674-
95675+ pax_close_kernel();
95676 pkmap_count[last_pkmap_nr] = 1;
95677 set_page_address(page, (void *)vaddr);
95678
95679diff --git a/mm/hugetlb.c b/mm/hugetlb.c
95680index 5e1e508..ac70275 100644
95681--- a/mm/hugetlb.c
95682+++ b/mm/hugetlb.c
95683@@ -869,6 +869,7 @@ free:
95684 list_del(&page->lru);
95685 enqueue_huge_page(h, page);
95686 }
95687+ spin_unlock(&hugetlb_lock);
95688
95689 /* Free unnecessary surplus pages to the buddy allocator */
95690 if (!list_empty(&surplus_list)) {
95691@@ -1933,6 +1934,26 @@ static int unmap_ref_private(struct mm_struct *mm, struct vm_area_struct *vma,
95692 return 1;
95693 }
95694
95695+#ifdef CONFIG_PAX_SEGMEXEC
95696+static void pax_mirror_huge_pte(struct vm_area_struct *vma, unsigned long address, struct page *page_m)
95697+{
95698+ struct mm_struct *mm = vma->vm_mm;
95699+ struct vm_area_struct *vma_m;
95700+ unsigned long address_m;
95701+ pte_t *ptep_m;
95702+
95703+ vma_m = pax_find_mirror_vma(vma);
95704+ if (!vma_m)
95705+ return;
95706+
95707+ BUG_ON(address >= SEGMEXEC_TASK_SIZE);
95708+ address_m = address + SEGMEXEC_TASK_SIZE;
95709+ ptep_m = huge_pte_offset(mm, address_m & HPAGE_MASK);
95710+ get_page(page_m);
95711+ set_huge_pte_at(mm, address_m, ptep_m, make_huge_pte(vma_m, page_m, 0));
95712+}
95713+#endif
95714+
95715 static int hugetlb_cow(struct mm_struct *mm, struct vm_area_struct *vma,
95716 unsigned long address, pte_t *ptep, pte_t pte,
95717 struct page *pagecache_page)
95718@@ -2004,6 +2025,11 @@ retry_avoidcopy:
95719 huge_ptep_clear_flush(vma, address, ptep);
95720 set_huge_pte_at(mm, address, ptep,
95721 make_huge_pte(vma, new_page, 1));
95722+
95723+#ifdef CONFIG_PAX_SEGMEXEC
95724+ pax_mirror_huge_pte(vma, address, new_page);
95725+#endif
95726+
95727 /* Make the old page be freed below */
95728 new_page = old_page;
95729 }
95730@@ -2135,6 +2161,10 @@ retry:
95731 && (vma->vm_flags & VM_SHARED)));
95732 set_huge_pte_at(mm, address, ptep, new_pte);
95733
95734+#ifdef CONFIG_PAX_SEGMEXEC
95735+ pax_mirror_huge_pte(vma, address, page);
95736+#endif
95737+
95738 if ((flags & FAULT_FLAG_WRITE) && !(vma->vm_flags & VM_SHARED)) {
95739 /* Optimization, do the COW without a second fault */
95740 ret = hugetlb_cow(mm, vma, address, ptep, new_pte, page);
95741@@ -2163,6 +2193,28 @@ int hugetlb_fault(struct mm_struct *mm, struct vm_area_struct *vma,
95742 static DEFINE_MUTEX(hugetlb_instantiation_mutex);
95743 struct hstate *h = hstate_vma(vma);
95744
95745+#ifdef CONFIG_PAX_SEGMEXEC
95746+ struct vm_area_struct *vma_m;
95747+
95748+ vma_m = pax_find_mirror_vma(vma);
95749+ if (vma_m) {
95750+ unsigned long address_m;
95751+
95752+ if (vma->vm_start > vma_m->vm_start) {
95753+ address_m = address;
95754+ address -= SEGMEXEC_TASK_SIZE;
95755+ vma = vma_m;
95756+ h = hstate_vma(vma);
95757+ } else
95758+ address_m = address + SEGMEXEC_TASK_SIZE;
95759+
95760+ if (!huge_pte_alloc(mm, address_m, huge_page_size(h)))
95761+ return VM_FAULT_OOM;
95762+ address_m &= HPAGE_MASK;
95763+ unmap_hugepage_range(vma, address_m, address_m + HPAGE_SIZE, NULL);
95764+ }
95765+#endif
95766+
95767 ptep = huge_pte_alloc(mm, address, huge_page_size(h));
95768 if (!ptep)
95769 return VM_FAULT_OOM;
95770diff --git a/mm/internal.h b/mm/internal.h
95771index f03e8e2..7354343 100644
95772--- a/mm/internal.h
95773+++ b/mm/internal.h
95774@@ -49,6 +49,7 @@ extern void putback_lru_page(struct page *page);
95775 * in mm/page_alloc.c
95776 */
95777 extern void __free_pages_bootmem(struct page *page, unsigned int order);
95778+extern void free_compound_page(struct page *page);
95779 extern void prep_compound_page(struct page *page, unsigned long order);
95780
95781
95782diff --git a/mm/kmemleak.c b/mm/kmemleak.c
95783index c346660..b47382f 100644
95784--- a/mm/kmemleak.c
95785+++ b/mm/kmemleak.c
95786@@ -358,7 +358,7 @@ static void print_unreferenced(struct seq_file *seq,
95787
95788 for (i = 0; i < object->trace_len; i++) {
95789 void *ptr = (void *)object->trace[i];
95790- seq_printf(seq, " [<%p>] %pS\n", ptr, ptr);
95791+ seq_printf(seq, " [<%p>] %pA\n", ptr, ptr);
95792 }
95793 }
95794
95795diff --git a/mm/maccess.c b/mm/maccess.c
95796index 9073695..1127f348 100644
95797--- a/mm/maccess.c
95798+++ b/mm/maccess.c
95799@@ -14,7 +14,7 @@
95800 * Safely read from address @src to the buffer at @dst. If a kernel fault
95801 * happens, handle that and return -EFAULT.
95802 */
95803-long probe_kernel_read(void *dst, void *src, size_t size)
95804+long probe_kernel_read(void *dst, const void *src, size_t size)
95805 {
95806 long ret;
95807 mm_segment_t old_fs = get_fs();
95808@@ -22,7 +22,7 @@ long probe_kernel_read(void *dst, void *src, size_t size)
95809 set_fs(KERNEL_DS);
95810 pagefault_disable();
95811 ret = __copy_from_user_inatomic(dst,
95812- (__force const void __user *)src, size);
95813+ (const void __force_user *)src, size);
95814 pagefault_enable();
95815 set_fs(old_fs);
95816
95817@@ -39,14 +39,14 @@ EXPORT_SYMBOL_GPL(probe_kernel_read);
95818 * Safely write to address @dst from the buffer at @src. If a kernel fault
95819 * happens, handle that and return -EFAULT.
95820 */
95821-long notrace __weak probe_kernel_write(void *dst, void *src, size_t size)
95822+long notrace __weak probe_kernel_write(void *dst, const void *src, size_t size)
95823 {
95824 long ret;
95825 mm_segment_t old_fs = get_fs();
95826
95827 set_fs(KERNEL_DS);
95828 pagefault_disable();
95829- ret = __copy_to_user_inatomic((__force void __user *)dst, src, size);
95830+ ret = __copy_to_user_inatomic((void __force_user *)dst, src, size);
95831 pagefault_enable();
95832 set_fs(old_fs);
95833
95834diff --git a/mm/madvise.c b/mm/madvise.c
95835index 35b1479..499f7d4 100644
95836--- a/mm/madvise.c
95837+++ b/mm/madvise.c
95838@@ -44,6 +44,10 @@ static long madvise_behavior(struct vm_area_struct * vma,
95839 pgoff_t pgoff;
95840 unsigned long new_flags = vma->vm_flags;
95841
95842+#ifdef CONFIG_PAX_SEGMEXEC
95843+ struct vm_area_struct *vma_m;
95844+#endif
95845+
95846 switch (behavior) {
95847 case MADV_NORMAL:
95848 new_flags = new_flags & ~VM_RAND_READ & ~VM_SEQ_READ;
95849@@ -103,6 +107,13 @@ success:
95850 /*
95851 * vm_flags is protected by the mmap_sem held in write mode.
95852 */
95853+
95854+#ifdef CONFIG_PAX_SEGMEXEC
95855+ vma_m = pax_find_mirror_vma(vma);
95856+ if (vma_m)
95857+ vma_m->vm_flags = new_flags & ~(VM_WRITE | VM_MAYWRITE | VM_ACCOUNT);
95858+#endif
95859+
95860 vma->vm_flags = new_flags;
95861
95862 out:
95863@@ -161,6 +172,11 @@ static long madvise_dontneed(struct vm_area_struct * vma,
95864 struct vm_area_struct ** prev,
95865 unsigned long start, unsigned long end)
95866 {
95867+
95868+#ifdef CONFIG_PAX_SEGMEXEC
95869+ struct vm_area_struct *vma_m;
95870+#endif
95871+
95872 *prev = vma;
95873 if (vma->vm_flags & (VM_LOCKED|VM_HUGETLB|VM_PFNMAP))
95874 return -EINVAL;
95875@@ -173,6 +189,21 @@ static long madvise_dontneed(struct vm_area_struct * vma,
95876 zap_page_range(vma, start, end - start, &details);
95877 } else
95878 zap_page_range(vma, start, end - start, NULL);
95879+
95880+#ifdef CONFIG_PAX_SEGMEXEC
95881+ vma_m = pax_find_mirror_vma(vma);
95882+ if (vma_m) {
95883+ if (unlikely(vma->vm_flags & VM_NONLINEAR)) {
95884+ struct zap_details details = {
95885+ .nonlinear_vma = vma_m,
95886+ .last_index = ULONG_MAX,
95887+ };
95888+ zap_page_range(vma, start + SEGMEXEC_TASK_SIZE, end - start, &details);
95889+ } else
95890+ zap_page_range(vma, start + SEGMEXEC_TASK_SIZE, end - start, NULL);
95891+ }
95892+#endif
95893+
95894 return 0;
95895 }
95896
95897@@ -359,6 +390,16 @@ SYSCALL_DEFINE3(madvise, unsigned long, start, size_t, len_in, int, behavior)
95898 if (end < start)
95899 goto out;
95900
95901+#ifdef CONFIG_PAX_SEGMEXEC
95902+ if (current->mm->pax_flags & MF_PAX_SEGMEXEC) {
95903+ if (end > SEGMEXEC_TASK_SIZE)
95904+ goto out;
95905+ } else
95906+#endif
95907+
95908+ if (end > TASK_SIZE)
95909+ goto out;
95910+
95911 error = 0;
95912 if (end == start)
95913 goto out;
95914diff --git a/mm/memory-failure.c b/mm/memory-failure.c
95915index 8aeba53..b4a4198 100644
95916--- a/mm/memory-failure.c
95917+++ b/mm/memory-failure.c
95918@@ -46,7 +46,7 @@ int sysctl_memory_failure_early_kill __read_mostly = 0;
95919
95920 int sysctl_memory_failure_recovery __read_mostly = 1;
95921
95922-atomic_long_t mce_bad_pages __read_mostly = ATOMIC_LONG_INIT(0);
95923+atomic_long_unchecked_t mce_bad_pages __read_mostly = ATOMIC_LONG_INIT(0);
95924
95925 /*
95926 * Send all the processes who have the page mapped an ``action optional''
95927@@ -64,7 +64,7 @@ static int kill_proc_ao(struct task_struct *t, unsigned long addr, int trapno,
95928 si.si_signo = SIGBUS;
95929 si.si_errno = 0;
95930 si.si_code = BUS_MCEERR_AO;
95931- si.si_addr = (void *)addr;
95932+ si.si_addr = (void __user *)addr;
95933 #ifdef __ARCH_SI_TRAPNO
95934 si.si_trapno = trapno;
95935 #endif
95936@@ -745,7 +745,7 @@ int __memory_failure(unsigned long pfn, int trapno, int ref)
95937 return 0;
95938 }
95939
95940- atomic_long_add(1, &mce_bad_pages);
95941+ atomic_long_add_unchecked(1, &mce_bad_pages);
95942
95943 /*
95944 * We need/can do nothing about count=0 pages.
95945diff --git a/mm/memory.c b/mm/memory.c
95946index 6c836d3..48f3264 100644
95947--- a/mm/memory.c
95948+++ b/mm/memory.c
95949@@ -187,8 +187,12 @@ static inline void free_pmd_range(struct mmu_gather *tlb, pud_t *pud,
95950 return;
95951
95952 pmd = pmd_offset(pud, start);
95953+
95954+#if !defined(CONFIG_X86_32) || !defined(CONFIG_PAX_PER_CPU_PGD)
95955 pud_clear(pud);
95956 pmd_free_tlb(tlb, pmd, start);
95957+#endif
95958+
95959 }
95960
95961 static inline void free_pud_range(struct mmu_gather *tlb, pgd_t *pgd,
95962@@ -219,9 +223,12 @@ static inline void free_pud_range(struct mmu_gather *tlb, pgd_t *pgd,
95963 if (end - 1 > ceiling - 1)
95964 return;
95965
95966+#if !defined(CONFIG_X86_64) || !defined(CONFIG_PAX_PER_CPU_PGD)
95967 pud = pud_offset(pgd, start);
95968 pgd_clear(pgd);
95969 pud_free_tlb(tlb, pud, start);
95970+#endif
95971+
95972 }
95973
95974 /*
95975@@ -1251,10 +1258,10 @@ int __get_user_pages(struct task_struct *tsk, struct mm_struct *mm,
95976 (VM_MAYREAD | VM_MAYWRITE) : (VM_READ | VM_WRITE);
95977 i = 0;
95978
95979- do {
95980+ while (nr_pages) {
95981 struct vm_area_struct *vma;
95982
95983- vma = find_extend_vma(mm, start);
95984+ vma = find_vma(mm, start);
95985 if (!vma && in_gate_area(tsk, start)) {
95986 unsigned long pg = start & PAGE_MASK;
95987 struct vm_area_struct *gate_vma = get_gate_vma(tsk);
95988@@ -1306,7 +1313,7 @@ int __get_user_pages(struct task_struct *tsk, struct mm_struct *mm,
95989 continue;
95990 }
95991
95992- if (!vma ||
95993+ if (!vma || start < vma->vm_start ||
95994 (vma->vm_flags & (VM_IO | VM_PFNMAP)) ||
95995 !(vm_flags & vma->vm_flags))
95996 return i ? : -EFAULT;
95997@@ -1381,7 +1388,7 @@ int __get_user_pages(struct task_struct *tsk, struct mm_struct *mm,
95998 start += PAGE_SIZE;
95999 nr_pages--;
96000 } while (nr_pages && start < vma->vm_end);
96001- } while (nr_pages);
96002+ }
96003 return i;
96004 }
96005
96006@@ -1526,6 +1533,10 @@ static int insert_page(struct vm_area_struct *vma, unsigned long addr,
96007 page_add_file_rmap(page);
96008 set_pte_at(mm, addr, pte, mk_pte(page, prot));
96009
96010+#ifdef CONFIG_PAX_SEGMEXEC
96011+ pax_mirror_file_pte(vma, addr, page, ptl);
96012+#endif
96013+
96014 retval = 0;
96015 pte_unmap_unlock(pte, ptl);
96016 return retval;
96017@@ -1560,10 +1571,22 @@ out:
96018 int vm_insert_page(struct vm_area_struct *vma, unsigned long addr,
96019 struct page *page)
96020 {
96021+
96022+#ifdef CONFIG_PAX_SEGMEXEC
96023+ struct vm_area_struct *vma_m;
96024+#endif
96025+
96026 if (addr < vma->vm_start || addr >= vma->vm_end)
96027 return -EFAULT;
96028 if (!page_count(page))
96029 return -EINVAL;
96030+
96031+#ifdef CONFIG_PAX_SEGMEXEC
96032+ vma_m = pax_find_mirror_vma(vma);
96033+ if (vma_m)
96034+ vma_m->vm_flags |= VM_INSERTPAGE;
96035+#endif
96036+
96037 vma->vm_flags |= VM_INSERTPAGE;
96038 return insert_page(vma, addr, page, vma->vm_page_prot);
96039 }
96040@@ -1649,6 +1672,7 @@ int vm_insert_mixed(struct vm_area_struct *vma, unsigned long addr,
96041 unsigned long pfn)
96042 {
96043 BUG_ON(!(vma->vm_flags & VM_MIXEDMAP));
96044+ BUG_ON(vma->vm_mirror);
96045
96046 if (addr < vma->vm_start || addr >= vma->vm_end)
96047 return -EFAULT;
96048@@ -1977,6 +2001,186 @@ static inline void cow_user_page(struct page *dst, struct page *src, unsigned lo
96049 copy_user_highpage(dst, src, va, vma);
96050 }
96051
96052+#ifdef CONFIG_PAX_SEGMEXEC
96053+static void pax_unmap_mirror_pte(struct vm_area_struct *vma, unsigned long address, pmd_t *pmd)
96054+{
96055+ struct mm_struct *mm = vma->vm_mm;
96056+ spinlock_t *ptl;
96057+ pte_t *pte, entry;
96058+
96059+ pte = pte_offset_map_lock(mm, pmd, address, &ptl);
96060+ entry = *pte;
96061+ if (!pte_present(entry)) {
96062+ if (!pte_none(entry)) {
96063+ BUG_ON(pte_file(entry));
96064+ free_swap_and_cache(pte_to_swp_entry(entry));
96065+ pte_clear_not_present_full(mm, address, pte, 0);
96066+ }
96067+ } else {
96068+ struct page *page;
96069+
96070+ flush_cache_page(vma, address, pte_pfn(entry));
96071+ entry = ptep_clear_flush(vma, address, pte);
96072+ BUG_ON(pte_dirty(entry));
96073+ page = vm_normal_page(vma, address, entry);
96074+ if (page) {
96075+ update_hiwater_rss(mm);
96076+ if (PageAnon(page))
96077+ dec_mm_counter(mm, anon_rss);
96078+ else
96079+ dec_mm_counter(mm, file_rss);
96080+ page_remove_rmap(page);
96081+ page_cache_release(page);
96082+ }
96083+ }
96084+ pte_unmap_unlock(pte, ptl);
96085+}
96086+
96087+/* PaX: if vma is mirrored, synchronize the mirror's PTE
96088+ *
96089+ * the ptl of the lower mapped page is held on entry and is not released on exit
96090+ * or inside to ensure atomic changes to the PTE states (swapout, mremap, munmap, etc)
96091+ */
96092+static void pax_mirror_anon_pte(struct vm_area_struct *vma, unsigned long address, struct page *page_m, spinlock_t *ptl)
96093+{
96094+ struct mm_struct *mm = vma->vm_mm;
96095+ unsigned long address_m;
96096+ spinlock_t *ptl_m;
96097+ struct vm_area_struct *vma_m;
96098+ pmd_t *pmd_m;
96099+ pte_t *pte_m, entry_m;
96100+
96101+ BUG_ON(!page_m || !PageAnon(page_m));
96102+
96103+ vma_m = pax_find_mirror_vma(vma);
96104+ if (!vma_m)
96105+ return;
96106+
96107+ BUG_ON(!PageLocked(page_m));
96108+ BUG_ON(address >= SEGMEXEC_TASK_SIZE);
96109+ address_m = address + SEGMEXEC_TASK_SIZE;
96110+ pmd_m = pmd_offset(pud_offset(pgd_offset(mm, address_m), address_m), address_m);
96111+ pte_m = pte_offset_map_nested(pmd_m, address_m);
96112+ ptl_m = pte_lockptr(mm, pmd_m);
96113+ if (ptl != ptl_m) {
96114+ spin_lock_nested(ptl_m, SINGLE_DEPTH_NESTING);
96115+ if (!pte_none(*pte_m))
96116+ goto out;
96117+ }
96118+
96119+ entry_m = pfn_pte(page_to_pfn(page_m), vma_m->vm_page_prot);
96120+ page_cache_get(page_m);
96121+ page_add_anon_rmap(page_m, vma_m, address_m);
96122+ inc_mm_counter(mm, anon_rss);
96123+ set_pte_at(mm, address_m, pte_m, entry_m);
96124+ update_mmu_cache(vma_m, address_m, entry_m);
96125+out:
96126+ if (ptl != ptl_m)
96127+ spin_unlock(ptl_m);
96128+ pte_unmap_nested(pte_m);
96129+ unlock_page(page_m);
96130+}
96131+
96132+void pax_mirror_file_pte(struct vm_area_struct *vma, unsigned long address, struct page *page_m, spinlock_t *ptl)
96133+{
96134+ struct mm_struct *mm = vma->vm_mm;
96135+ unsigned long address_m;
96136+ spinlock_t *ptl_m;
96137+ struct vm_area_struct *vma_m;
96138+ pmd_t *pmd_m;
96139+ pte_t *pte_m, entry_m;
96140+
96141+ BUG_ON(!page_m || PageAnon(page_m));
96142+
96143+ vma_m = pax_find_mirror_vma(vma);
96144+ if (!vma_m)
96145+ return;
96146+
96147+ BUG_ON(address >= SEGMEXEC_TASK_SIZE);
96148+ address_m = address + SEGMEXEC_TASK_SIZE;
96149+ pmd_m = pmd_offset(pud_offset(pgd_offset(mm, address_m), address_m), address_m);
96150+ pte_m = pte_offset_map_nested(pmd_m, address_m);
96151+ ptl_m = pte_lockptr(mm, pmd_m);
96152+ if (ptl != ptl_m) {
96153+ spin_lock_nested(ptl_m, SINGLE_DEPTH_NESTING);
96154+ if (!pte_none(*pte_m))
96155+ goto out;
96156+ }
96157+
96158+ entry_m = pfn_pte(page_to_pfn(page_m), vma_m->vm_page_prot);
96159+ page_cache_get(page_m);
96160+ page_add_file_rmap(page_m);
96161+ inc_mm_counter(mm, file_rss);
96162+ set_pte_at(mm, address_m, pte_m, entry_m);
96163+ update_mmu_cache(vma_m, address_m, entry_m);
96164+out:
96165+ if (ptl != ptl_m)
96166+ spin_unlock(ptl_m);
96167+ pte_unmap_nested(pte_m);
96168+}
96169+
96170+static void pax_mirror_pfn_pte(struct vm_area_struct *vma, unsigned long address, unsigned long pfn_m, spinlock_t *ptl)
96171+{
96172+ struct mm_struct *mm = vma->vm_mm;
96173+ unsigned long address_m;
96174+ spinlock_t *ptl_m;
96175+ struct vm_area_struct *vma_m;
96176+ pmd_t *pmd_m;
96177+ pte_t *pte_m, entry_m;
96178+
96179+ vma_m = pax_find_mirror_vma(vma);
96180+ if (!vma_m)
96181+ return;
96182+
96183+ BUG_ON(address >= SEGMEXEC_TASK_SIZE);
96184+ address_m = address + SEGMEXEC_TASK_SIZE;
96185+ pmd_m = pmd_offset(pud_offset(pgd_offset(mm, address_m), address_m), address_m);
96186+ pte_m = pte_offset_map_nested(pmd_m, address_m);
96187+ ptl_m = pte_lockptr(mm, pmd_m);
96188+ if (ptl != ptl_m) {
96189+ spin_lock_nested(ptl_m, SINGLE_DEPTH_NESTING);
96190+ if (!pte_none(*pte_m))
96191+ goto out;
96192+ }
96193+
96194+ entry_m = pfn_pte(pfn_m, vma_m->vm_page_prot);
96195+ set_pte_at(mm, address_m, pte_m, entry_m);
96196+out:
96197+ if (ptl != ptl_m)
96198+ spin_unlock(ptl_m);
96199+ pte_unmap_nested(pte_m);
96200+}
96201+
96202+static void pax_mirror_pte(struct vm_area_struct *vma, unsigned long address, pte_t *pte, pmd_t *pmd, spinlock_t *ptl)
96203+{
96204+ struct page *page_m;
96205+ pte_t entry;
96206+
96207+ if (!(vma->vm_mm->pax_flags & MF_PAX_SEGMEXEC))
96208+ goto out;
96209+
96210+ entry = *pte;
96211+ page_m = vm_normal_page(vma, address, entry);
96212+ if (!page_m)
96213+ pax_mirror_pfn_pte(vma, address, pte_pfn(entry), ptl);
96214+ else if (PageAnon(page_m)) {
96215+ if (pax_find_mirror_vma(vma)) {
96216+ pte_unmap_unlock(pte, ptl);
96217+ lock_page(page_m);
96218+ pte = pte_offset_map_lock(vma->vm_mm, pmd, address, &ptl);
96219+ if (pte_same(entry, *pte))
96220+ pax_mirror_anon_pte(vma, address, page_m, ptl);
96221+ else
96222+ unlock_page(page_m);
96223+ }
96224+ } else
96225+ pax_mirror_file_pte(vma, address, page_m, ptl);
96226+
96227+out:
96228+ pte_unmap_unlock(pte, ptl);
96229+}
96230+#endif
96231+
96232 /*
96233 * This routine handles present pages, when users try to write
96234 * to a shared page. It is done by copying the page to a new address
96235@@ -2156,6 +2360,12 @@ gotten:
96236 */
96237 page_table = pte_offset_map_lock(mm, pmd, address, &ptl);
96238 if (likely(pte_same(*page_table, orig_pte))) {
96239+
96240+#ifdef CONFIG_PAX_SEGMEXEC
96241+ if (pax_find_mirror_vma(vma))
96242+ BUG_ON(!trylock_page(new_page));
96243+#endif
96244+
96245 if (old_page) {
96246 if (!PageAnon(old_page)) {
96247 dec_mm_counter(mm, file_rss);
96248@@ -2207,6 +2417,10 @@ gotten:
96249 page_remove_rmap(old_page);
96250 }
96251
96252+#ifdef CONFIG_PAX_SEGMEXEC
96253+ pax_mirror_anon_pte(vma, address, new_page, ptl);
96254+#endif
96255+
96256 /* Free the old page.. */
96257 new_page = old_page;
96258 ret |= VM_FAULT_WRITE;
96259@@ -2606,6 +2820,11 @@ static int do_swap_page(struct mm_struct *mm, struct vm_area_struct *vma,
96260 swap_free(entry);
96261 if (vm_swap_full() || (vma->vm_flags & VM_LOCKED) || PageMlocked(page))
96262 try_to_free_swap(page);
96263+
96264+#ifdef CONFIG_PAX_SEGMEXEC
96265+ if ((flags & FAULT_FLAG_WRITE) || !pax_find_mirror_vma(vma))
96266+#endif
96267+
96268 unlock_page(page);
96269
96270 if (flags & FAULT_FLAG_WRITE) {
96271@@ -2617,6 +2836,11 @@ static int do_swap_page(struct mm_struct *mm, struct vm_area_struct *vma,
96272
96273 /* No need to invalidate - it was non-present before */
96274 update_mmu_cache(vma, address, pte);
96275+
96276+#ifdef CONFIG_PAX_SEGMEXEC
96277+ pax_mirror_anon_pte(vma, address, page, ptl);
96278+#endif
96279+
96280 unlock:
96281 pte_unmap_unlock(page_table, ptl);
96282 out:
96283@@ -2632,40 +2856,6 @@ out_release:
96284 }
96285
96286 /*
96287- * This is like a special single-page "expand_{down|up}wards()",
96288- * except we must first make sure that 'address{-|+}PAGE_SIZE'
96289- * doesn't hit another vma.
96290- */
96291-static inline int check_stack_guard_page(struct vm_area_struct *vma, unsigned long address)
96292-{
96293- address &= PAGE_MASK;
96294- if ((vma->vm_flags & VM_GROWSDOWN) && address == vma->vm_start) {
96295- struct vm_area_struct *prev = vma->vm_prev;
96296-
96297- /*
96298- * Is there a mapping abutting this one below?
96299- *
96300- * That's only ok if it's the same stack mapping
96301- * that has gotten split..
96302- */
96303- if (prev && prev->vm_end == address)
96304- return prev->vm_flags & VM_GROWSDOWN ? 0 : -ENOMEM;
96305-
96306- expand_stack(vma, address - PAGE_SIZE);
96307- }
96308- if ((vma->vm_flags & VM_GROWSUP) && address + PAGE_SIZE == vma->vm_end) {
96309- struct vm_area_struct *next = vma->vm_next;
96310-
96311- /* As VM_GROWSDOWN but s/below/above/ */
96312- if (next && next->vm_start == address + PAGE_SIZE)
96313- return next->vm_flags & VM_GROWSUP ? 0 : -ENOMEM;
96314-
96315- expand_upwards(vma, address + PAGE_SIZE);
96316- }
96317- return 0;
96318-}
96319-
96320-/*
96321 * We enter with non-exclusive mmap_sem (to exclude vma changes,
96322 * but allow concurrent faults), and pte mapped but not yet locked.
96323 * We return with mmap_sem still held, but pte unmapped and unlocked.
96324@@ -2674,27 +2864,23 @@ static int do_anonymous_page(struct mm_struct *mm, struct vm_area_struct *vma,
96325 unsigned long address, pte_t *page_table, pmd_t *pmd,
96326 unsigned int flags)
96327 {
96328- struct page *page;
96329+ struct page *page = NULL;
96330 spinlock_t *ptl;
96331 pte_t entry;
96332
96333- pte_unmap(page_table);
96334-
96335- /* Check if we need to add a guard page to the stack */
96336- if (check_stack_guard_page(vma, address) < 0)
96337- return VM_FAULT_SIGBUS;
96338-
96339- /* Use the zero-page for reads */
96340 if (!(flags & FAULT_FLAG_WRITE)) {
96341 entry = pte_mkspecial(pfn_pte(my_zero_pfn(address),
96342 vma->vm_page_prot));
96343- page_table = pte_offset_map_lock(mm, pmd, address, &ptl);
96344+ ptl = pte_lockptr(mm, pmd);
96345+ spin_lock(ptl);
96346 if (!pte_none(*page_table))
96347 goto unlock;
96348 goto setpte;
96349 }
96350
96351 /* Allocate our own private page. */
96352+ pte_unmap(page_table);
96353+
96354 if (unlikely(anon_vma_prepare(vma)))
96355 goto oom;
96356 page = alloc_zeroed_user_highpage_movable(vma, address);
96357@@ -2713,6 +2899,11 @@ static int do_anonymous_page(struct mm_struct *mm, struct vm_area_struct *vma,
96358 if (!pte_none(*page_table))
96359 goto release;
96360
96361+#ifdef CONFIG_PAX_SEGMEXEC
96362+ if (pax_find_mirror_vma(vma))
96363+ BUG_ON(!trylock_page(page));
96364+#endif
96365+
96366 inc_mm_counter(mm, anon_rss);
96367 page_add_new_anon_rmap(page, vma, address);
96368 setpte:
96369@@ -2720,6 +2911,12 @@ setpte:
96370
96371 /* No need to invalidate - it was non-present before */
96372 update_mmu_cache(vma, address, entry);
96373+
96374+#ifdef CONFIG_PAX_SEGMEXEC
96375+ if (page)
96376+ pax_mirror_anon_pte(vma, address, page, ptl);
96377+#endif
96378+
96379 unlock:
96380 pte_unmap_unlock(page_table, ptl);
96381 return 0;
96382@@ -2862,6 +3059,12 @@ static int __do_fault(struct mm_struct *mm, struct vm_area_struct *vma,
96383 */
96384 /* Only go through if we didn't race with anybody else... */
96385 if (likely(pte_same(*page_table, orig_pte))) {
96386+
96387+#ifdef CONFIG_PAX_SEGMEXEC
96388+ if (anon && pax_find_mirror_vma(vma))
96389+ BUG_ON(!trylock_page(page));
96390+#endif
96391+
96392 flush_icache_page(vma, page);
96393 entry = mk_pte(page, vma->vm_page_prot);
96394 if (flags & FAULT_FLAG_WRITE)
96395@@ -2881,6 +3084,14 @@ static int __do_fault(struct mm_struct *mm, struct vm_area_struct *vma,
96396
96397 /* no need to invalidate: a not-present page won't be cached */
96398 update_mmu_cache(vma, address, entry);
96399+
96400+#ifdef CONFIG_PAX_SEGMEXEC
96401+ if (anon)
96402+ pax_mirror_anon_pte(vma, address, page, ptl);
96403+ else
96404+ pax_mirror_file_pte(vma, address, page, ptl);
96405+#endif
96406+
96407 } else {
96408 if (charged)
96409 mem_cgroup_uncharge_page(page);
96410@@ -3028,6 +3239,12 @@ static inline int handle_pte_fault(struct mm_struct *mm,
96411 if (flags & FAULT_FLAG_WRITE)
96412 flush_tlb_page(vma, address);
96413 }
96414+
96415+#ifdef CONFIG_PAX_SEGMEXEC
96416+ pax_mirror_pte(vma, address, pte, pmd, ptl);
96417+ return 0;
96418+#endif
96419+
96420 unlock:
96421 pte_unmap_unlock(pte, ptl);
96422 return 0;
96423@@ -3044,6 +3261,10 @@ int handle_mm_fault(struct mm_struct *mm, struct vm_area_struct *vma,
96424 pmd_t *pmd;
96425 pte_t *pte;
96426
96427+#ifdef CONFIG_PAX_SEGMEXEC
96428+ struct vm_area_struct *vma_m;
96429+#endif
96430+
96431 __set_current_state(TASK_RUNNING);
96432
96433 count_vm_event(PGFAULT);
96434@@ -3051,6 +3272,34 @@ int handle_mm_fault(struct mm_struct *mm, struct vm_area_struct *vma,
96435 if (unlikely(is_vm_hugetlb_page(vma)))
96436 return hugetlb_fault(mm, vma, address, flags);
96437
96438+#ifdef CONFIG_PAX_SEGMEXEC
96439+ vma_m = pax_find_mirror_vma(vma);
96440+ if (vma_m) {
96441+ unsigned long address_m;
96442+ pgd_t *pgd_m;
96443+ pud_t *pud_m;
96444+ pmd_t *pmd_m;
96445+
96446+ if (vma->vm_start > vma_m->vm_start) {
96447+ address_m = address;
96448+ address -= SEGMEXEC_TASK_SIZE;
96449+ vma = vma_m;
96450+ } else
96451+ address_m = address + SEGMEXEC_TASK_SIZE;
96452+
96453+ pgd_m = pgd_offset(mm, address_m);
96454+ pud_m = pud_alloc(mm, pgd_m, address_m);
96455+ if (!pud_m)
96456+ return VM_FAULT_OOM;
96457+ pmd_m = pmd_alloc(mm, pud_m, address_m);
96458+ if (!pmd_m)
96459+ return VM_FAULT_OOM;
96460+ if (!pmd_present(*pmd_m) && __pte_alloc(mm, pmd_m, address_m))
96461+ return VM_FAULT_OOM;
96462+ pax_unmap_mirror_pte(vma_m, address_m, pmd_m);
96463+ }
96464+#endif
96465+
96466 pgd = pgd_offset(mm, address);
96467 pud = pud_alloc(mm, pgd, address);
96468 if (!pud)
96469@@ -3148,7 +3397,7 @@ static int __init gate_vma_init(void)
96470 gate_vma.vm_start = FIXADDR_USER_START;
96471 gate_vma.vm_end = FIXADDR_USER_END;
96472 gate_vma.vm_flags = VM_READ | VM_MAYREAD | VM_EXEC | VM_MAYEXEC;
96473- gate_vma.vm_page_prot = __P101;
96474+ gate_vma.vm_page_prot = vm_get_page_prot(gate_vma.vm_flags);
96475 /*
96476 * Make sure the vDSO gets into every core dump.
96477 * Dumping its contents makes post-mortem fully interpretable later
96478diff --git a/mm/mempolicy.c b/mm/mempolicy.c
96479index 3c6e3e2..b1ddbb8 100644
96480--- a/mm/mempolicy.c
96481+++ b/mm/mempolicy.c
96482@@ -573,6 +573,10 @@ static int mbind_range(struct vm_area_struct *vma, unsigned long start,
96483 struct vm_area_struct *next;
96484 int err;
96485
96486+#ifdef CONFIG_PAX_SEGMEXEC
96487+ struct vm_area_struct *vma_m;
96488+#endif
96489+
96490 err = 0;
96491 for (; vma && vma->vm_start < end; vma = next) {
96492 next = vma->vm_next;
96493@@ -584,6 +588,16 @@ static int mbind_range(struct vm_area_struct *vma, unsigned long start,
96494 err = policy_vma(vma, new);
96495 if (err)
96496 break;
96497+
96498+#ifdef CONFIG_PAX_SEGMEXEC
96499+ vma_m = pax_find_mirror_vma(vma);
96500+ if (vma_m) {
96501+ err = policy_vma(vma_m, new);
96502+ if (err)
96503+ break;
96504+ }
96505+#endif
96506+
96507 }
96508 return err;
96509 }
96510@@ -1002,6 +1016,17 @@ static long do_mbind(unsigned long start, unsigned long len,
96511
96512 if (end < start)
96513 return -EINVAL;
96514+
96515+#ifdef CONFIG_PAX_SEGMEXEC
96516+ if (mm->pax_flags & MF_PAX_SEGMEXEC) {
96517+ if (end > SEGMEXEC_TASK_SIZE)
96518+ return -EINVAL;
96519+ } else
96520+#endif
96521+
96522+ if (end > TASK_SIZE)
96523+ return -EINVAL;
96524+
96525 if (end == start)
96526 return 0;
96527
96528@@ -1207,6 +1232,14 @@ SYSCALL_DEFINE4(migrate_pages, pid_t, pid, unsigned long, maxnode,
96529 if (!mm)
96530 return -EINVAL;
96531
96532+#ifdef CONFIG_GRKERNSEC_PROC_MEMMAP
96533+ if (mm != current->mm &&
96534+ (mm->pax_flags & MF_PAX_RANDMMAP || mm->pax_flags & MF_PAX_SEGMEXEC)) {
96535+ err = -EPERM;
96536+ goto out;
96537+ }
96538+#endif
96539+
96540 /*
96541 * Check if this process has the right to modify the specified
96542 * process. The right exists if the process has administrative
96543@@ -1216,8 +1249,7 @@ SYSCALL_DEFINE4(migrate_pages, pid_t, pid, unsigned long, maxnode,
96544 rcu_read_lock();
96545 tcred = __task_cred(task);
96546 if (cred->euid != tcred->suid && cred->euid != tcred->uid &&
96547- cred->uid != tcred->suid && cred->uid != tcred->uid &&
96548- !capable(CAP_SYS_NICE)) {
96549+ cred->uid != tcred->suid && !capable(CAP_SYS_NICE)) {
96550 rcu_read_unlock();
96551 err = -EPERM;
96552 goto out;
96553@@ -2367,6 +2399,12 @@ static inline void check_huge_range(struct vm_area_struct *vma,
96554 }
96555 #endif
96556
96557+#ifdef CONFIG_GRKERNSEC_PROC_MEMMAP
96558+#define PAX_RAND_FLAGS(_mm) (_mm != NULL && _mm != current->mm && \
96559+ (_mm->pax_flags & MF_PAX_RANDMMAP || \
96560+ _mm->pax_flags & MF_PAX_SEGMEXEC))
96561+#endif
96562+
96563 /*
96564 * Display pages allocated per node and memory policy via /proc.
96565 */
96566@@ -2381,6 +2419,13 @@ int show_numa_map(struct seq_file *m, void *v)
96567 int n;
96568 char buffer[50];
96569
96570+#ifdef CONFIG_GRKERNSEC_PROC_MEMMAP
96571+ if (current->exec_id != m->exec_id) {
96572+ gr_log_badprocpid("numa_maps");
96573+ return 0;
96574+ }
96575+#endif
96576+
96577 if (!mm)
96578 return 0;
96579
96580@@ -2392,11 +2437,15 @@ int show_numa_map(struct seq_file *m, void *v)
96581 mpol_to_str(buffer, sizeof(buffer), pol, 0);
96582 mpol_cond_put(pol);
96583
96584+#ifdef CONFIG_GRKERNSEC_PROC_MEMMAP
96585+ seq_printf(m, "%08lx %s", PAX_RAND_FLAGS(vma->vm_mm) ? 0UL : vma->vm_start, buffer);
96586+#else
96587 seq_printf(m, "%08lx %s", vma->vm_start, buffer);
96588+#endif
96589
96590 if (file) {
96591 seq_printf(m, " file=");
96592- seq_path(m, &file->f_path, "\n\t= ");
96593+ seq_path(m, &file->f_path, "\n\t\\= ");
96594 } else if (vma->vm_start <= mm->brk && vma->vm_end >= mm->start_brk) {
96595 seq_printf(m, " heap");
96596 } else if (vma->vm_start <= mm->start_stack &&
96597diff --git a/mm/migrate.c b/mm/migrate.c
96598index aaca868..2ebecdc 100644
96599--- a/mm/migrate.c
96600+++ b/mm/migrate.c
96601@@ -916,6 +916,8 @@ static int do_pages_move(struct mm_struct *mm, struct task_struct *task,
96602 unsigned long chunk_start;
96603 int err;
96604
96605+ pax_track_stack();
96606+
96607 task_nodes = cpuset_mems_allowed(task);
96608
96609 err = -ENOMEM;
96610@@ -1106,6 +1108,14 @@ SYSCALL_DEFINE6(move_pages, pid_t, pid, unsigned long, nr_pages,
96611 if (!mm)
96612 return -EINVAL;
96613
96614+#ifdef CONFIG_GRKERNSEC_PROC_MEMMAP
96615+ if (mm != current->mm &&
96616+ (mm->pax_flags & MF_PAX_RANDMMAP || mm->pax_flags & MF_PAX_SEGMEXEC)) {
96617+ err = -EPERM;
96618+ goto out;
96619+ }
96620+#endif
96621+
96622 /*
96623 * Check if this process has the right to modify the specified
96624 * process. The right exists if the process has administrative
96625@@ -1115,8 +1125,7 @@ SYSCALL_DEFINE6(move_pages, pid_t, pid, unsigned long, nr_pages,
96626 rcu_read_lock();
96627 tcred = __task_cred(task);
96628 if (cred->euid != tcred->suid && cred->euid != tcred->uid &&
96629- cred->uid != tcred->suid && cred->uid != tcred->uid &&
96630- !capable(CAP_SYS_NICE)) {
96631+ cred->uid != tcred->suid && !capable(CAP_SYS_NICE)) {
96632 rcu_read_unlock();
96633 err = -EPERM;
96634 goto out;
96635diff --git a/mm/mlock.c b/mm/mlock.c
96636index 2d846cf..98134d2 100644
96637--- a/mm/mlock.c
96638+++ b/mm/mlock.c
96639@@ -13,6 +13,7 @@
96640 #include <linux/pagemap.h>
96641 #include <linux/mempolicy.h>
96642 #include <linux/syscalls.h>
96643+#include <linux/security.h>
96644 #include <linux/sched.h>
96645 #include <linux/module.h>
96646 #include <linux/rmap.h>
96647@@ -138,13 +139,6 @@ void munlock_vma_page(struct page *page)
96648 }
96649 }
96650
96651-static inline int stack_guard_page(struct vm_area_struct *vma, unsigned long addr)
96652-{
96653- return (vma->vm_flags & VM_GROWSDOWN) &&
96654- (vma->vm_start == addr) &&
96655- !vma_stack_continue(vma->vm_prev, addr);
96656-}
96657-
96658 /**
96659 * __mlock_vma_pages_range() - mlock a range of pages in the vma.
96660 * @vma: target vma
96661@@ -177,12 +171,6 @@ static long __mlock_vma_pages_range(struct vm_area_struct *vma,
96662 if (vma->vm_flags & VM_WRITE)
96663 gup_flags |= FOLL_WRITE;
96664
96665- /* We don't try to access the guard page of a stack vma */
96666- if (stack_guard_page(vma, start)) {
96667- addr += PAGE_SIZE;
96668- nr_pages--;
96669- }
96670-
96671 while (nr_pages > 0) {
96672 int i;
96673
96674@@ -440,7 +428,7 @@ static int do_mlock(unsigned long start, size_t len, int on)
96675 {
96676 unsigned long nstart, end, tmp;
96677 struct vm_area_struct * vma, * prev;
96678- int error;
96679+ int error = -EINVAL;
96680
96681 len = PAGE_ALIGN(len);
96682 end = start + len;
96683@@ -448,6 +436,9 @@ static int do_mlock(unsigned long start, size_t len, int on)
96684 return -EINVAL;
96685 if (end == start)
96686 return 0;
96687+ if (end > TASK_SIZE)
96688+ return -EINVAL;
96689+
96690 vma = find_vma_prev(current->mm, start, &prev);
96691 if (!vma || vma->vm_start > start)
96692 return -ENOMEM;
96693@@ -458,6 +449,11 @@ static int do_mlock(unsigned long start, size_t len, int on)
96694 for (nstart = start ; ; ) {
96695 unsigned int newflags;
96696
96697+#ifdef CONFIG_PAX_SEGMEXEC
96698+ if ((current->mm->pax_flags & MF_PAX_SEGMEXEC) && (vma->vm_start >= SEGMEXEC_TASK_SIZE))
96699+ break;
96700+#endif
96701+
96702 /* Here we know that vma->vm_start <= nstart < vma->vm_end. */
96703
96704 newflags = vma->vm_flags | VM_LOCKED;
96705@@ -507,6 +503,7 @@ SYSCALL_DEFINE2(mlock, unsigned long, start, size_t, len)
96706 lock_limit >>= PAGE_SHIFT;
96707
96708 /* check against resource limits */
96709+ gr_learn_resource(current, RLIMIT_MEMLOCK, (current->mm->locked_vm << PAGE_SHIFT) + len, 1);
96710 if ((locked <= lock_limit) || capable(CAP_IPC_LOCK))
96711 error = do_mlock(start, len, 1);
96712 up_write(&current->mm->mmap_sem);
96713@@ -528,17 +525,23 @@ SYSCALL_DEFINE2(munlock, unsigned long, start, size_t, len)
96714 static int do_mlockall(int flags)
96715 {
96716 struct vm_area_struct * vma, * prev = NULL;
96717- unsigned int def_flags = 0;
96718
96719 if (flags & MCL_FUTURE)
96720- def_flags = VM_LOCKED;
96721- current->mm->def_flags = def_flags;
96722+ current->mm->def_flags |= VM_LOCKED;
96723+ else
96724+ current->mm->def_flags &= ~VM_LOCKED;
96725 if (flags == MCL_FUTURE)
96726 goto out;
96727
96728 for (vma = current->mm->mmap; vma ; vma = prev->vm_next) {
96729- unsigned int newflags;
96730+ unsigned long newflags;
96731
96732+#ifdef CONFIG_PAX_SEGMEXEC
96733+ if ((current->mm->pax_flags & MF_PAX_SEGMEXEC) && (vma->vm_start >= SEGMEXEC_TASK_SIZE))
96734+ break;
96735+#endif
96736+
96737+ BUG_ON(vma->vm_end > TASK_SIZE);
96738 newflags = vma->vm_flags | VM_LOCKED;
96739 if (!(flags & MCL_CURRENT))
96740 newflags &= ~VM_LOCKED;
96741@@ -570,6 +573,7 @@ SYSCALL_DEFINE1(mlockall, int, flags)
96742 lock_limit >>= PAGE_SHIFT;
96743
96744 ret = -ENOMEM;
96745+ gr_learn_resource(current, RLIMIT_MEMLOCK, current->mm->total_vm << PAGE_SHIFT, 1);
96746 if (!(flags & MCL_CURRENT) || (current->mm->total_vm <= lock_limit) ||
96747 capable(CAP_IPC_LOCK))
96748 ret = do_mlockall(flags);
96749diff --git a/mm/mmap.c b/mm/mmap.c
96750index 4b80cbf..12a7861 100644
96751--- a/mm/mmap.c
96752+++ b/mm/mmap.c
96753@@ -45,6 +45,16 @@
96754 #define arch_rebalance_pgtables(addr, len) (addr)
96755 #endif
96756
96757+static inline void verify_mm_writelocked(struct mm_struct *mm)
96758+{
96759+#if defined(CONFIG_DEBUG_VM) || defined(CONFIG_PAX)
96760+ if (unlikely(down_read_trylock(&mm->mmap_sem))) {
96761+ up_read(&mm->mmap_sem);
96762+ BUG();
96763+ }
96764+#endif
96765+}
96766+
96767 static void unmap_region(struct mm_struct *mm,
96768 struct vm_area_struct *vma, struct vm_area_struct *prev,
96769 unsigned long start, unsigned long end);
96770@@ -70,22 +80,32 @@ static void unmap_region(struct mm_struct *mm,
96771 * x: (no) no x: (no) yes x: (no) yes x: (yes) yes
96772 *
96773 */
96774-pgprot_t protection_map[16] = {
96775+pgprot_t protection_map[16] __read_only = {
96776 __P000, __P001, __P010, __P011, __P100, __P101, __P110, __P111,
96777 __S000, __S001, __S010, __S011, __S100, __S101, __S110, __S111
96778 };
96779
96780 pgprot_t vm_get_page_prot(unsigned long vm_flags)
96781 {
96782- return __pgprot(pgprot_val(protection_map[vm_flags &
96783+ pgprot_t prot = __pgprot(pgprot_val(protection_map[vm_flags &
96784 (VM_READ|VM_WRITE|VM_EXEC|VM_SHARED)]) |
96785 pgprot_val(arch_vm_get_page_prot(vm_flags)));
96786+
96787+#if defined(CONFIG_PAX_PAGEEXEC) && defined(CONFIG_X86_32)
96788+ if (!nx_enabled &&
96789+ (vm_flags & (VM_PAGEEXEC | VM_EXEC)) == VM_PAGEEXEC &&
96790+ (vm_flags & (VM_READ | VM_WRITE)))
96791+ prot = __pgprot(pte_val(pte_exprotect(__pte(pgprot_val(prot)))));
96792+#endif
96793+
96794+ return prot;
96795 }
96796 EXPORT_SYMBOL(vm_get_page_prot);
96797
96798 int sysctl_overcommit_memory = OVERCOMMIT_GUESS; /* heuristic overcommit */
96799 int sysctl_overcommit_ratio = 50; /* default is 50% */
96800 int sysctl_max_map_count __read_mostly = DEFAULT_MAX_MAP_COUNT;
96801+unsigned long sysctl_heap_stack_gap __read_mostly = 64*1024;
96802 struct percpu_counter vm_committed_as;
96803
96804 /*
96805@@ -231,6 +251,7 @@ static struct vm_area_struct *remove_vma(struct vm_area_struct *vma)
96806 struct vm_area_struct *next = vma->vm_next;
96807
96808 might_sleep();
96809+ BUG_ON(vma->vm_mirror);
96810 if (vma->vm_ops && vma->vm_ops->close)
96811 vma->vm_ops->close(vma);
96812 if (vma->vm_file) {
96813@@ -267,6 +288,7 @@ SYSCALL_DEFINE1(brk, unsigned long, brk)
96814 * not page aligned -Ram Gupta
96815 */
96816 rlim = current->signal->rlim[RLIMIT_DATA].rlim_cur;
96817+ gr_learn_resource(current, RLIMIT_DATA, (brk - mm->start_brk) + (mm->end_data - mm->start_data), 1);
96818 if (rlim < RLIM_INFINITY && (brk - mm->start_brk) +
96819 (mm->end_data - mm->start_data) > rlim)
96820 goto out;
96821@@ -704,6 +726,12 @@ static int
96822 can_vma_merge_before(struct vm_area_struct *vma, unsigned long vm_flags,
96823 struct anon_vma *anon_vma, struct file *file, pgoff_t vm_pgoff)
96824 {
96825+
96826+#ifdef CONFIG_PAX_SEGMEXEC
96827+ if ((vma->vm_mm->pax_flags & MF_PAX_SEGMEXEC) && vma->vm_start == SEGMEXEC_TASK_SIZE)
96828+ return 0;
96829+#endif
96830+
96831 if (is_mergeable_vma(vma, file, vm_flags) &&
96832 is_mergeable_anon_vma(anon_vma, vma->anon_vma)) {
96833 if (vma->vm_pgoff == vm_pgoff)
96834@@ -723,6 +751,12 @@ static int
96835 can_vma_merge_after(struct vm_area_struct *vma, unsigned long vm_flags,
96836 struct anon_vma *anon_vma, struct file *file, pgoff_t vm_pgoff)
96837 {
96838+
96839+#ifdef CONFIG_PAX_SEGMEXEC
96840+ if ((vma->vm_mm->pax_flags & MF_PAX_SEGMEXEC) && vma->vm_end == SEGMEXEC_TASK_SIZE)
96841+ return 0;
96842+#endif
96843+
96844 if (is_mergeable_vma(vma, file, vm_flags) &&
96845 is_mergeable_anon_vma(anon_vma, vma->anon_vma)) {
96846 pgoff_t vm_pglen;
96847@@ -765,12 +799,19 @@ can_vma_merge_after(struct vm_area_struct *vma, unsigned long vm_flags,
96848 struct vm_area_struct *vma_merge(struct mm_struct *mm,
96849 struct vm_area_struct *prev, unsigned long addr,
96850 unsigned long end, unsigned long vm_flags,
96851- struct anon_vma *anon_vma, struct file *file,
96852+ struct anon_vma *anon_vma, struct file *file,
96853 pgoff_t pgoff, struct mempolicy *policy)
96854 {
96855 pgoff_t pglen = (end - addr) >> PAGE_SHIFT;
96856 struct vm_area_struct *area, *next;
96857
96858+#ifdef CONFIG_PAX_SEGMEXEC
96859+ unsigned long addr_m = addr + SEGMEXEC_TASK_SIZE, end_m = end + SEGMEXEC_TASK_SIZE;
96860+ struct vm_area_struct *area_m = NULL, *next_m = NULL, *prev_m = NULL;
96861+
96862+ BUG_ON((mm->pax_flags & MF_PAX_SEGMEXEC) && SEGMEXEC_TASK_SIZE < end);
96863+#endif
96864+
96865 /*
96866 * We later require that vma->vm_flags == vm_flags,
96867 * so this tests vma->vm_flags & VM_SPECIAL, too.
96868@@ -786,6 +827,15 @@ struct vm_area_struct *vma_merge(struct mm_struct *mm,
96869 if (next && next->vm_end == end) /* cases 6, 7, 8 */
96870 next = next->vm_next;
96871
96872+#ifdef CONFIG_PAX_SEGMEXEC
96873+ if (prev)
96874+ prev_m = pax_find_mirror_vma(prev);
96875+ if (area)
96876+ area_m = pax_find_mirror_vma(area);
96877+ if (next)
96878+ next_m = pax_find_mirror_vma(next);
96879+#endif
96880+
96881 /*
96882 * Can it merge with the predecessor?
96883 */
96884@@ -805,9 +855,24 @@ struct vm_area_struct *vma_merge(struct mm_struct *mm,
96885 /* cases 1, 6 */
96886 vma_adjust(prev, prev->vm_start,
96887 next->vm_end, prev->vm_pgoff, NULL);
96888- } else /* cases 2, 5, 7 */
96889+
96890+#ifdef CONFIG_PAX_SEGMEXEC
96891+ if (prev_m)
96892+ vma_adjust(prev_m, prev_m->vm_start,
96893+ next_m->vm_end, prev_m->vm_pgoff, NULL);
96894+#endif
96895+
96896+ } else { /* cases 2, 5, 7 */
96897 vma_adjust(prev, prev->vm_start,
96898 end, prev->vm_pgoff, NULL);
96899+
96900+#ifdef CONFIG_PAX_SEGMEXEC
96901+ if (prev_m)
96902+ vma_adjust(prev_m, prev_m->vm_start,
96903+ end_m, prev_m->vm_pgoff, NULL);
96904+#endif
96905+
96906+ }
96907 return prev;
96908 }
96909
96910@@ -818,12 +883,27 @@ struct vm_area_struct *vma_merge(struct mm_struct *mm,
96911 mpol_equal(policy, vma_policy(next)) &&
96912 can_vma_merge_before(next, vm_flags,
96913 anon_vma, file, pgoff+pglen)) {
96914- if (prev && addr < prev->vm_end) /* case 4 */
96915+ if (prev && addr < prev->vm_end) { /* case 4 */
96916 vma_adjust(prev, prev->vm_start,
96917 addr, prev->vm_pgoff, NULL);
96918- else /* cases 3, 8 */
96919+
96920+#ifdef CONFIG_PAX_SEGMEXEC
96921+ if (prev_m)
96922+ vma_adjust(prev_m, prev_m->vm_start,
96923+ addr_m, prev_m->vm_pgoff, NULL);
96924+#endif
96925+
96926+ } else { /* cases 3, 8 */
96927 vma_adjust(area, addr, next->vm_end,
96928 next->vm_pgoff - pglen, NULL);
96929+
96930+#ifdef CONFIG_PAX_SEGMEXEC
96931+ if (area_m)
96932+ vma_adjust(area_m, addr_m, next_m->vm_end,
96933+ next_m->vm_pgoff - pglen, NULL);
96934+#endif
96935+
96936+ }
96937 return area;
96938 }
96939
96940@@ -898,14 +978,11 @@ none:
96941 void vm_stat_account(struct mm_struct *mm, unsigned long flags,
96942 struct file *file, long pages)
96943 {
96944- const unsigned long stack_flags
96945- = VM_STACK_FLAGS & (VM_GROWSUP|VM_GROWSDOWN);
96946-
96947 if (file) {
96948 mm->shared_vm += pages;
96949 if ((flags & (VM_EXEC|VM_WRITE)) == VM_EXEC)
96950 mm->exec_vm += pages;
96951- } else if (flags & stack_flags)
96952+ } else if (flags & (VM_GROWSUP|VM_GROWSDOWN))
96953 mm->stack_vm += pages;
96954 if (flags & (VM_RESERVED|VM_IO))
96955 mm->reserved_vm += pages;
96956@@ -932,7 +1009,7 @@ unsigned long do_mmap_pgoff(struct file *file, unsigned long addr,
96957 * (the exception is when the underlying filesystem is noexec
96958 * mounted, in which case we dont add PROT_EXEC.)
96959 */
96960- if ((prot & PROT_READ) && (current->personality & READ_IMPLIES_EXEC))
96961+ if ((prot & (PROT_READ | PROT_WRITE)) && (current->personality & READ_IMPLIES_EXEC))
96962 if (!(file && (file->f_path.mnt->mnt_flags & MNT_NOEXEC)))
96963 prot |= PROT_EXEC;
96964
96965@@ -958,7 +1035,7 @@ unsigned long do_mmap_pgoff(struct file *file, unsigned long addr,
96966 /* Obtain the address to map to. we verify (or select) it and ensure
96967 * that it represents a valid section of the address space.
96968 */
96969- addr = get_unmapped_area(file, addr, len, pgoff, flags);
96970+ addr = get_unmapped_area(file, addr, len, pgoff, flags | ((prot & PROT_EXEC) ? MAP_EXECUTABLE : 0));
96971 if (addr & ~PAGE_MASK)
96972 return addr;
96973
96974@@ -969,6 +1046,36 @@ unsigned long do_mmap_pgoff(struct file *file, unsigned long addr,
96975 vm_flags = calc_vm_prot_bits(prot) | calc_vm_flag_bits(flags) |
96976 mm->def_flags | VM_MAYREAD | VM_MAYWRITE | VM_MAYEXEC;
96977
96978+#ifdef CONFIG_PAX_MPROTECT
96979+ if (mm->pax_flags & MF_PAX_MPROTECT) {
96980+#ifndef CONFIG_PAX_MPROTECT_COMPAT
96981+ if ((vm_flags & (VM_WRITE | VM_EXEC)) == (VM_WRITE | VM_EXEC)) {
96982+ gr_log_rwxmmap(file);
96983+
96984+#ifdef CONFIG_PAX_EMUPLT
96985+ vm_flags &= ~VM_EXEC;
96986+#else
96987+ return -EPERM;
96988+#endif
96989+
96990+ }
96991+
96992+ if (!(vm_flags & VM_EXEC))
96993+ vm_flags &= ~VM_MAYEXEC;
96994+#else
96995+ if ((vm_flags & (VM_WRITE | VM_EXEC)) != VM_EXEC)
96996+ vm_flags &= ~(VM_EXEC | VM_MAYEXEC);
96997+#endif
96998+ else
96999+ vm_flags &= ~VM_MAYWRITE;
97000+ }
97001+#endif
97002+
97003+#if defined(CONFIG_PAX_PAGEEXEC) && defined(CONFIG_X86_32)
97004+ if ((mm->pax_flags & MF_PAX_PAGEEXEC) && file)
97005+ vm_flags &= ~VM_PAGEEXEC;
97006+#endif
97007+
97008 if (flags & MAP_LOCKED)
97009 if (!can_do_mlock())
97010 return -EPERM;
97011@@ -980,6 +1087,7 @@ unsigned long do_mmap_pgoff(struct file *file, unsigned long addr,
97012 locked += mm->locked_vm;
97013 lock_limit = current->signal->rlim[RLIMIT_MEMLOCK].rlim_cur;
97014 lock_limit >>= PAGE_SHIFT;
97015+ gr_learn_resource(current, RLIMIT_MEMLOCK, locked << PAGE_SHIFT, 1);
97016 if (locked > lock_limit && !capable(CAP_IPC_LOCK))
97017 return -EAGAIN;
97018 }
97019@@ -1053,6 +1161,9 @@ unsigned long do_mmap_pgoff(struct file *file, unsigned long addr,
97020 if (error)
97021 return error;
97022
97023+ if (!gr_acl_handle_mmap(file, prot))
97024+ return -EACCES;
97025+
97026 return mmap_region(file, addr, len, flags, vm_flags, pgoff);
97027 }
97028 EXPORT_SYMBOL(do_mmap_pgoff);
97029@@ -1065,10 +1176,10 @@ EXPORT_SYMBOL(do_mmap_pgoff);
97030 */
97031 int vma_wants_writenotify(struct vm_area_struct *vma)
97032 {
97033- unsigned int vm_flags = vma->vm_flags;
97034+ unsigned long vm_flags = vma->vm_flags;
97035
97036 /* If it was private or non-writable, the write bit is already clear */
97037- if ((vm_flags & (VM_WRITE|VM_SHARED)) != ((VM_WRITE|VM_SHARED)))
97038+ if ((vm_flags & (VM_WRITE|VM_SHARED)) != (VM_WRITE|VM_SHARED))
97039 return 0;
97040
97041 /* The backer wishes to know when pages are first written to? */
97042@@ -1117,14 +1228,24 @@ unsigned long mmap_region(struct file *file, unsigned long addr,
97043 unsigned long charged = 0;
97044 struct inode *inode = file ? file->f_path.dentry->d_inode : NULL;
97045
97046+#ifdef CONFIG_PAX_SEGMEXEC
97047+ struct vm_area_struct *vma_m = NULL;
97048+#endif
97049+
97050+ /*
97051+ * mm->mmap_sem is required to protect against another thread
97052+ * changing the mappings in case we sleep.
97053+ */
97054+ verify_mm_writelocked(mm);
97055+
97056 /* Clear old maps */
97057 error = -ENOMEM;
97058-munmap_back:
97059 vma = find_vma_prepare(mm, addr, &prev, &rb_link, &rb_parent);
97060 if (vma && vma->vm_start < addr + len) {
97061 if (do_munmap(mm, addr, len))
97062 return -ENOMEM;
97063- goto munmap_back;
97064+ vma = find_vma_prepare(mm, addr, &prev, &rb_link, &rb_parent);
97065+ BUG_ON(vma && vma->vm_start < addr + len);
97066 }
97067
97068 /* Check against address space limit. */
97069@@ -1173,6 +1294,16 @@ munmap_back:
97070 goto unacct_error;
97071 }
97072
97073+#ifdef CONFIG_PAX_SEGMEXEC
97074+ if ((mm->pax_flags & MF_PAX_SEGMEXEC) && (vm_flags & VM_EXEC)) {
97075+ vma_m = kmem_cache_zalloc(vm_area_cachep, GFP_KERNEL);
97076+ if (!vma_m) {
97077+ error = -ENOMEM;
97078+ goto free_vma;
97079+ }
97080+ }
97081+#endif
97082+
97083 vma->vm_mm = mm;
97084 vma->vm_start = addr;
97085 vma->vm_end = addr + len;
97086@@ -1180,8 +1311,9 @@ munmap_back:
97087 vma->vm_page_prot = vm_get_page_prot(vm_flags);
97088 vma->vm_pgoff = pgoff;
97089
97090+ error = -EINVAL; /* when rejecting VM_GROWSDOWN|VM_GROWSUP */
97091+
97092 if (file) {
97093- error = -EINVAL;
97094 if (vm_flags & (VM_GROWSDOWN|VM_GROWSUP))
97095 goto free_vma;
97096 if (vm_flags & VM_DENYWRITE) {
97097@@ -1195,6 +1327,19 @@ munmap_back:
97098 error = file->f_op->mmap(file, vma);
97099 if (error)
97100 goto unmap_and_free_vma;
97101+
97102+#ifdef CONFIG_PAX_SEGMEXEC
97103+ if (vma_m && (vm_flags & VM_EXECUTABLE))
97104+ added_exe_file_vma(mm);
97105+#endif
97106+
97107+#if defined(CONFIG_PAX_PAGEEXEC) && defined(CONFIG_X86_32)
97108+ if ((mm->pax_flags & MF_PAX_PAGEEXEC) && !(vma->vm_flags & VM_SPECIAL)) {
97109+ vma->vm_flags |= VM_PAGEEXEC;
97110+ vma->vm_page_prot = vm_get_page_prot(vma->vm_flags);
97111+ }
97112+#endif
97113+
97114 if (vm_flags & VM_EXECUTABLE)
97115 added_exe_file_vma(mm);
97116
97117@@ -1207,6 +1352,8 @@ munmap_back:
97118 pgoff = vma->vm_pgoff;
97119 vm_flags = vma->vm_flags;
97120 } else if (vm_flags & VM_SHARED) {
97121+ if (unlikely(vm_flags & (VM_GROWSDOWN|VM_GROWSUP)))
97122+ goto free_vma;
97123 error = shmem_zero_setup(vma);
97124 if (error)
97125 goto free_vma;
97126@@ -1218,6 +1365,11 @@ munmap_back:
97127 vma_link(mm, vma, prev, rb_link, rb_parent);
97128 file = vma->vm_file;
97129
97130+#ifdef CONFIG_PAX_SEGMEXEC
97131+ if (vma_m)
97132+ pax_mirror_vma(vma_m, vma);
97133+#endif
97134+
97135 /* Once vma denies write, undo our temporary denial count */
97136 if (correct_wcount)
97137 atomic_inc(&inode->i_writecount);
97138@@ -1226,6 +1378,7 @@ out:
97139
97140 mm->total_vm += len >> PAGE_SHIFT;
97141 vm_stat_account(mm, vm_flags, file, len >> PAGE_SHIFT);
97142+ track_exec_limit(mm, addr, addr + len, vm_flags);
97143 if (vm_flags & VM_LOCKED) {
97144 /*
97145 * makes pages present; downgrades, drops, reacquires mmap_sem
97146@@ -1248,6 +1401,12 @@ unmap_and_free_vma:
97147 unmap_region(mm, vma, prev, vma->vm_start, vma->vm_end);
97148 charged = 0;
97149 free_vma:
97150+
97151+#ifdef CONFIG_PAX_SEGMEXEC
97152+ if (vma_m)
97153+ kmem_cache_free(vm_area_cachep, vma_m);
97154+#endif
97155+
97156 kmem_cache_free(vm_area_cachep, vma);
97157 unacct_error:
97158 if (charged)
97159@@ -1255,6 +1414,44 @@ unacct_error:
97160 return error;
97161 }
97162
97163+bool check_heap_stack_gap(const struct vm_area_struct *vma, unsigned long addr, unsigned long len)
97164+{
97165+ if (!vma) {
97166+#ifdef CONFIG_STACK_GROWSUP
97167+ if (addr > sysctl_heap_stack_gap)
97168+ vma = find_vma(current->mm, addr - sysctl_heap_stack_gap);
97169+ else
97170+ vma = find_vma(current->mm, 0);
97171+ if (vma && (vma->vm_flags & VM_GROWSUP))
97172+ return false;
97173+#endif
97174+ return true;
97175+ }
97176+
97177+ if (addr + len > vma->vm_start)
97178+ return false;
97179+
97180+ if (vma->vm_flags & VM_GROWSDOWN)
97181+ return sysctl_heap_stack_gap <= vma->vm_start - addr - len;
97182+#ifdef CONFIG_STACK_GROWSUP
97183+ else if (vma->vm_prev && (vma->vm_prev->vm_flags & VM_GROWSUP))
97184+ return addr - vma->vm_prev->vm_end <= sysctl_heap_stack_gap;
97185+#endif
97186+
97187+ return true;
97188+}
97189+
97190+unsigned long skip_heap_stack_gap(const struct vm_area_struct *vma, unsigned long len)
97191+{
97192+ if (vma->vm_start < len)
97193+ return -ENOMEM;
97194+ if (!(vma->vm_flags & VM_GROWSDOWN))
97195+ return vma->vm_start - len;
97196+ if (sysctl_heap_stack_gap <= vma->vm_start - len)
97197+ return vma->vm_start - len - sysctl_heap_stack_gap;
97198+ return -ENOMEM;
97199+}
97200+
97201 /* Get an address range which is currently unmapped.
97202 * For shmat() with addr=0.
97203 *
97204@@ -1281,18 +1478,23 @@ arch_get_unmapped_area(struct file *filp, unsigned long addr,
97205 if (flags & MAP_FIXED)
97206 return addr;
97207
97208+#ifdef CONFIG_PAX_RANDMMAP
97209+ if (!(mm->pax_flags & MF_PAX_RANDMMAP))
97210+#endif
97211+
97212 if (addr) {
97213 addr = PAGE_ALIGN(addr);
97214- vma = find_vma(mm, addr);
97215- if (TASK_SIZE - len >= addr &&
97216- (!vma || addr + len <= vma->vm_start))
97217- return addr;
97218+ if (TASK_SIZE - len >= addr) {
97219+ vma = find_vma(mm, addr);
97220+ if (check_heap_stack_gap(vma, addr, len))
97221+ return addr;
97222+ }
97223 }
97224 if (len > mm->cached_hole_size) {
97225- start_addr = addr = mm->free_area_cache;
97226+ start_addr = addr = mm->free_area_cache;
97227 } else {
97228- start_addr = addr = TASK_UNMAPPED_BASE;
97229- mm->cached_hole_size = 0;
97230+ start_addr = addr = mm->mmap_base;
97231+ mm->cached_hole_size = 0;
97232 }
97233
97234 full_search:
97235@@ -1303,34 +1505,40 @@ full_search:
97236 * Start a new search - just in case we missed
97237 * some holes.
97238 */
97239- if (start_addr != TASK_UNMAPPED_BASE) {
97240- addr = TASK_UNMAPPED_BASE;
97241- start_addr = addr;
97242+ if (start_addr != mm->mmap_base) {
97243+ start_addr = addr = mm->mmap_base;
97244 mm->cached_hole_size = 0;
97245 goto full_search;
97246 }
97247 return -ENOMEM;
97248 }
97249- if (!vma || addr + len <= vma->vm_start) {
97250- /*
97251- * Remember the place where we stopped the search:
97252- */
97253- mm->free_area_cache = addr + len;
97254- return addr;
97255- }
97256+ if (check_heap_stack_gap(vma, addr, len))
97257+ break;
97258 if (addr + mm->cached_hole_size < vma->vm_start)
97259 mm->cached_hole_size = vma->vm_start - addr;
97260 addr = vma->vm_end;
97261 }
97262+
97263+ /*
97264+ * Remember the place where we stopped the search:
97265+ */
97266+ mm->free_area_cache = addr + len;
97267+ return addr;
97268 }
97269 #endif
97270
97271 void arch_unmap_area(struct mm_struct *mm, unsigned long addr)
97272 {
97273+
97274+#ifdef CONFIG_PAX_SEGMEXEC
97275+ if ((mm->pax_flags & MF_PAX_SEGMEXEC) && SEGMEXEC_TASK_SIZE <= addr)
97276+ return;
97277+#endif
97278+
97279 /*
97280 * Is this a new hole at the lowest possible address?
97281 */
97282- if (addr >= TASK_UNMAPPED_BASE && addr < mm->free_area_cache) {
97283+ if (addr >= mm->mmap_base && addr < mm->free_area_cache) {
97284 mm->free_area_cache = addr;
97285 mm->cached_hole_size = ~0UL;
97286 }
97287@@ -1348,7 +1556,7 @@ arch_get_unmapped_area_topdown(struct file *filp, const unsigned long addr0,
97288 {
97289 struct vm_area_struct *vma;
97290 struct mm_struct *mm = current->mm;
97291- unsigned long addr = addr0;
97292+ unsigned long base = mm->mmap_base, addr = addr0;
97293
97294 /* requested length too big for entire address space */
97295 if (len > TASK_SIZE)
97296@@ -1357,13 +1565,18 @@ arch_get_unmapped_area_topdown(struct file *filp, const unsigned long addr0,
97297 if (flags & MAP_FIXED)
97298 return addr;
97299
97300+#ifdef CONFIG_PAX_RANDMMAP
97301+ if (!(mm->pax_flags & MF_PAX_RANDMMAP))
97302+#endif
97303+
97304 /* requesting a specific address */
97305 if (addr) {
97306 addr = PAGE_ALIGN(addr);
97307- vma = find_vma(mm, addr);
97308- if (TASK_SIZE - len >= addr &&
97309- (!vma || addr + len <= vma->vm_start))
97310- return addr;
97311+ if (TASK_SIZE - len >= addr) {
97312+ vma = find_vma(mm, addr);
97313+ if (check_heap_stack_gap(vma, addr, len))
97314+ return addr;
97315+ }
97316 }
97317
97318 /* check if free_area_cache is useful for us */
97319@@ -1378,7 +1591,7 @@ arch_get_unmapped_area_topdown(struct file *filp, const unsigned long addr0,
97320 /* make sure it can fit in the remaining address space */
97321 if (addr > len) {
97322 vma = find_vma(mm, addr-len);
97323- if (!vma || addr <= vma->vm_start)
97324+ if (check_heap_stack_gap(vma, addr - len, len))
97325 /* remember the address as a hint for next time */
97326 return (mm->free_area_cache = addr-len);
97327 }
97328@@ -1395,7 +1608,7 @@ arch_get_unmapped_area_topdown(struct file *filp, const unsigned long addr0,
97329 * return with success:
97330 */
97331 vma = find_vma(mm, addr);
97332- if (!vma || addr+len <= vma->vm_start)
97333+ if (check_heap_stack_gap(vma, addr, len))
97334 /* remember the address as a hint for next time */
97335 return (mm->free_area_cache = addr);
97336
97337@@ -1404,8 +1617,8 @@ arch_get_unmapped_area_topdown(struct file *filp, const unsigned long addr0,
97338 mm->cached_hole_size = vma->vm_start - addr;
97339
97340 /* try just below the current vma->vm_start */
97341- addr = vma->vm_start-len;
97342- } while (len < vma->vm_start);
97343+ addr = skip_heap_stack_gap(vma, len);
97344+ } while (!IS_ERR_VALUE(addr));
97345
97346 bottomup:
97347 /*
97348@@ -1414,13 +1627,21 @@ bottomup:
97349 * can happen with large stack limits and large mmap()
97350 * allocations.
97351 */
97352+ mm->mmap_base = TASK_UNMAPPED_BASE;
97353+
97354+#ifdef CONFIG_PAX_RANDMMAP
97355+ if (mm->pax_flags & MF_PAX_RANDMMAP)
97356+ mm->mmap_base += mm->delta_mmap;
97357+#endif
97358+
97359+ mm->free_area_cache = mm->mmap_base;
97360 mm->cached_hole_size = ~0UL;
97361- mm->free_area_cache = TASK_UNMAPPED_BASE;
97362 addr = arch_get_unmapped_area(filp, addr0, len, pgoff, flags);
97363 /*
97364 * Restore the topdown base:
97365 */
97366- mm->free_area_cache = mm->mmap_base;
97367+ mm->mmap_base = base;
97368+ mm->free_area_cache = base;
97369 mm->cached_hole_size = ~0UL;
97370
97371 return addr;
97372@@ -1429,6 +1650,12 @@ bottomup:
97373
97374 void arch_unmap_area_topdown(struct mm_struct *mm, unsigned long addr)
97375 {
97376+
97377+#ifdef CONFIG_PAX_SEGMEXEC
97378+ if ((mm->pax_flags & MF_PAX_SEGMEXEC) && SEGMEXEC_TASK_SIZE <= addr)
97379+ return;
97380+#endif
97381+
97382 /*
97383 * Is this a new hole at the highest possible address?
97384 */
97385@@ -1436,8 +1663,10 @@ void arch_unmap_area_topdown(struct mm_struct *mm, unsigned long addr)
97386 mm->free_area_cache = addr;
97387
97388 /* dont allow allocations above current base */
97389- if (mm->free_area_cache > mm->mmap_base)
97390+ if (mm->free_area_cache > mm->mmap_base) {
97391 mm->free_area_cache = mm->mmap_base;
97392+ mm->cached_hole_size = ~0UL;
97393+ }
97394 }
97395
97396 unsigned long
97397@@ -1510,40 +1739,49 @@ struct vm_area_struct *find_vma(struct mm_struct *mm, unsigned long addr)
97398
97399 EXPORT_SYMBOL(find_vma);
97400
97401-/* Same as find_vma, but also return a pointer to the previous VMA in *pprev. */
97402+/*
97403+ * Same as find_vma, but also return a pointer to the previous VMA in *pprev.
97404+ */
97405 struct vm_area_struct *
97406 find_vma_prev(struct mm_struct *mm, unsigned long addr,
97407 struct vm_area_struct **pprev)
97408 {
97409- struct vm_area_struct *vma = NULL, *prev = NULL;
97410- struct rb_node *rb_node;
97411- if (!mm)
97412- goto out;
97413-
97414- /* Guard against addr being lower than the first VMA */
97415- vma = mm->mmap;
97416-
97417- /* Go through the RB tree quickly. */
97418- rb_node = mm->mm_rb.rb_node;
97419-
97420- while (rb_node) {
97421- struct vm_area_struct *vma_tmp;
97422- vma_tmp = rb_entry(rb_node, struct vm_area_struct, vm_rb);
97423-
97424- if (addr < vma_tmp->vm_end) {
97425- rb_node = rb_node->rb_left;
97426- } else {
97427- prev = vma_tmp;
97428- if (!prev->vm_next || (addr < prev->vm_next->vm_end))
97429- break;
97430+ struct vm_area_struct *vma;
97431+
97432+ vma = find_vma(mm, addr);
97433+ if (vma) {
97434+ *pprev = vma->vm_prev;
97435+ } else {
97436+ struct rb_node *rb_node = mm->mm_rb.rb_node;
97437+ *pprev = NULL;
97438+ while (rb_node) {
97439+ *pprev = rb_entry(rb_node, struct vm_area_struct, vm_rb);
97440 rb_node = rb_node->rb_right;
97441 }
97442 }
97443+ return vma;
97444+}
97445+
97446+#ifdef CONFIG_PAX_SEGMEXEC
97447+struct vm_area_struct *pax_find_mirror_vma(struct vm_area_struct *vma)
97448+{
97449+ struct vm_area_struct *vma_m;
97450
97451-out:
97452- *pprev = prev;
97453- return prev ? prev->vm_next : vma;
97454+ BUG_ON(!vma || vma->vm_start >= vma->vm_end);
97455+ if (!(vma->vm_mm->pax_flags & MF_PAX_SEGMEXEC) || !(vma->vm_flags & VM_EXEC)) {
97456+ BUG_ON(vma->vm_mirror);
97457+ return NULL;
97458+ }
97459+ BUG_ON(vma->vm_start < SEGMEXEC_TASK_SIZE && SEGMEXEC_TASK_SIZE < vma->vm_end);
97460+ vma_m = vma->vm_mirror;
97461+ BUG_ON(!vma_m || vma_m->vm_mirror != vma);
97462+ BUG_ON(vma->vm_file != vma_m->vm_file);
97463+ BUG_ON(vma->vm_end - vma->vm_start != vma_m->vm_end - vma_m->vm_start);
97464+ BUG_ON(vma->vm_pgoff != vma_m->vm_pgoff || vma->anon_vma != vma_m->anon_vma);
97465+ BUG_ON((vma->vm_flags ^ vma_m->vm_flags) & ~(VM_WRITE | VM_MAYWRITE | VM_ACCOUNT | VM_LOCKED | VM_RESERVED));
97466+ return vma_m;
97467 }
97468+#endif
97469
97470 /*
97471 * Verify that the stack growth is acceptable and
97472@@ -1561,6 +1799,7 @@ static int acct_stack_growth(struct vm_area_struct *vma, unsigned long size, uns
97473 return -ENOMEM;
97474
97475 /* Stack limit test */
97476+ gr_learn_resource(current, RLIMIT_STACK, size, 1);
97477 if (size > rlim[RLIMIT_STACK].rlim_cur)
97478 return -ENOMEM;
97479
97480@@ -1570,6 +1809,7 @@ static int acct_stack_growth(struct vm_area_struct *vma, unsigned long size, uns
97481 unsigned long limit;
97482 locked = mm->locked_vm + grow;
97483 limit = rlim[RLIMIT_MEMLOCK].rlim_cur >> PAGE_SHIFT;
97484+ gr_learn_resource(current, RLIMIT_MEMLOCK, locked << PAGE_SHIFT, 1);
97485 if (locked > limit && !capable(CAP_IPC_LOCK))
97486 return -ENOMEM;
97487 }
97488@@ -1600,37 +1840,48 @@ static int acct_stack_growth(struct vm_area_struct *vma, unsigned long size, uns
97489 * PA-RISC uses this for its stack; IA64 for its Register Backing Store.
97490 * vma is the last one with address > vma->vm_end. Have to extend vma.
97491 */
97492+#ifndef CONFIG_IA64
97493+static
97494+#endif
97495 int expand_upwards(struct vm_area_struct *vma, unsigned long address)
97496 {
97497 int error;
97498+ bool locknext;
97499
97500 if (!(vma->vm_flags & VM_GROWSUP))
97501 return -EFAULT;
97502
97503+ /* Also guard against wrapping around to address 0. */
97504+ if (address < PAGE_ALIGN(address+1))
97505+ address = PAGE_ALIGN(address+1);
97506+ else
97507+ return -ENOMEM;
97508+
97509 /*
97510 * We must make sure the anon_vma is allocated
97511 * so that the anon_vma locking is not a noop.
97512 */
97513 if (unlikely(anon_vma_prepare(vma)))
97514 return -ENOMEM;
97515+ locknext = vma->vm_next && (vma->vm_next->vm_flags & VM_GROWSDOWN);
97516+ if (locknext && anon_vma_prepare(vma->vm_next))
97517+ return -ENOMEM;
97518 anon_vma_lock(vma);
97519+ if (locknext)
97520+ anon_vma_lock(vma->vm_next);
97521
97522 /*
97523 * vma->vm_start/vm_end cannot change under us because the caller
97524 * is required to hold the mmap_sem in read mode. We need the
97525- * anon_vma lock to serialize against concurrent expand_stacks.
97526- * Also guard against wrapping around to address 0.
97527+ * anon_vma locks to serialize against concurrent expand_stacks
97528+ * and expand_upwards.
97529 */
97530- if (address < PAGE_ALIGN(address+4))
97531- address = PAGE_ALIGN(address+4);
97532- else {
97533- anon_vma_unlock(vma);
97534- return -ENOMEM;
97535- }
97536 error = 0;
97537
97538 /* Somebody else might have raced and expanded it already */
97539- if (address > vma->vm_end) {
97540+ if (vma->vm_next && (vma->vm_next->vm_flags & (VM_READ | VM_WRITE | VM_EXEC)) && vma->vm_next->vm_start - address < sysctl_heap_stack_gap)
97541+ error = -ENOMEM;
97542+ else if (address > vma->vm_end && (!locknext || vma->vm_next->vm_start >= address)) {
97543 unsigned long size, grow;
97544
97545 size = address - vma->vm_start;
97546@@ -1643,6 +1894,8 @@ int expand_upwards(struct vm_area_struct *vma, unsigned long address)
97547 vma->vm_end = address;
97548 }
97549 }
97550+ if (locknext)
97551+ anon_vma_unlock(vma->vm_next);
97552 anon_vma_unlock(vma);
97553 return error;
97554 }
97555@@ -1655,6 +1908,8 @@ static int expand_downwards(struct vm_area_struct *vma,
97556 unsigned long address)
97557 {
97558 int error;
97559+ bool lockprev = false;
97560+ struct vm_area_struct *prev;
97561
97562 /*
97563 * We must make sure the anon_vma is allocated
97564@@ -1668,6 +1923,15 @@ static int expand_downwards(struct vm_area_struct *vma,
97565 if (error)
97566 return error;
97567
97568+ prev = vma->vm_prev;
97569+#if defined(CONFIG_STACK_GROWSUP) || defined(CONFIG_IA64)
97570+ lockprev = prev && (prev->vm_flags & VM_GROWSUP);
97571+#endif
97572+ if (lockprev && anon_vma_prepare(prev))
97573+ return -ENOMEM;
97574+ if (lockprev)
97575+ anon_vma_lock(prev);
97576+
97577 anon_vma_lock(vma);
97578
97579 /*
97580@@ -1677,9 +1941,17 @@ static int expand_downwards(struct vm_area_struct *vma,
97581 */
97582
97583 /* Somebody else might have raced and expanded it already */
97584- if (address < vma->vm_start) {
97585+ if (prev && (prev->vm_flags & (VM_READ | VM_WRITE | VM_EXEC)) && address - prev->vm_end < sysctl_heap_stack_gap)
97586+ error = -ENOMEM;
97587+ else if (address < vma->vm_start && (!lockprev || prev->vm_end <= address)) {
97588 unsigned long size, grow;
97589
97590+#ifdef CONFIG_PAX_SEGMEXEC
97591+ struct vm_area_struct *vma_m;
97592+
97593+ vma_m = pax_find_mirror_vma(vma);
97594+#endif
97595+
97596 size = vma->vm_end - address;
97597 grow = (vma->vm_start - address) >> PAGE_SHIFT;
97598
97599@@ -1689,10 +1961,22 @@ static int expand_downwards(struct vm_area_struct *vma,
97600 if (!error) {
97601 vma->vm_start = address;
97602 vma->vm_pgoff -= grow;
97603+ track_exec_limit(vma->vm_mm, vma->vm_start, vma->vm_end, vma->vm_flags);
97604+
97605+#ifdef CONFIG_PAX_SEGMEXEC
97606+ if (vma_m) {
97607+ vma_m->vm_start -= grow << PAGE_SHIFT;
97608+ vma_m->vm_pgoff -= grow;
97609+ }
97610+#endif
97611+
97612+
97613 }
97614 }
97615 }
97616 anon_vma_unlock(vma);
97617+ if (lockprev)
97618+ anon_vma_unlock(prev);
97619 return error;
97620 }
97621
97622@@ -1768,6 +2052,13 @@ static void remove_vma_list(struct mm_struct *mm, struct vm_area_struct *vma)
97623 do {
97624 long nrpages = vma_pages(vma);
97625
97626+#ifdef CONFIG_PAX_SEGMEXEC
97627+ if ((mm->pax_flags & MF_PAX_SEGMEXEC) && (vma->vm_start >= SEGMEXEC_TASK_SIZE)) {
97628+ vma = remove_vma(vma);
97629+ continue;
97630+ }
97631+#endif
97632+
97633 mm->total_vm -= nrpages;
97634 vm_stat_account(mm, vma->vm_flags, vma->vm_file, -nrpages);
97635 vma = remove_vma(vma);
97636@@ -1813,6 +2104,16 @@ detach_vmas_to_be_unmapped(struct mm_struct *mm, struct vm_area_struct *vma,
97637 insertion_point = (prev ? &prev->vm_next : &mm->mmap);
97638 vma->vm_prev = NULL;
97639 do {
97640+
97641+#ifdef CONFIG_PAX_SEGMEXEC
97642+ if (vma->vm_mirror) {
97643+ BUG_ON(!vma->vm_mirror->vm_mirror || vma->vm_mirror->vm_mirror != vma);
97644+ vma->vm_mirror->vm_mirror = NULL;
97645+ vma->vm_mirror->vm_flags &= ~VM_EXEC;
97646+ vma->vm_mirror = NULL;
97647+ }
97648+#endif
97649+
97650 rb_erase(&vma->vm_rb, &mm->mm_rb);
97651 mm->map_count--;
97652 tail_vma = vma;
97653@@ -1840,10 +2141,25 @@ int split_vma(struct mm_struct * mm, struct vm_area_struct * vma,
97654 struct mempolicy *pol;
97655 struct vm_area_struct *new;
97656
97657+#ifdef CONFIG_PAX_SEGMEXEC
97658+ struct vm_area_struct *vma_m, *new_m = NULL;
97659+ unsigned long addr_m = addr + SEGMEXEC_TASK_SIZE;
97660+#endif
97661+
97662 if (is_vm_hugetlb_page(vma) && (addr &
97663 ~(huge_page_mask(hstate_vma(vma)))))
97664 return -EINVAL;
97665
97666+#ifdef CONFIG_PAX_SEGMEXEC
97667+ vma_m = pax_find_mirror_vma(vma);
97668+
97669+ if (mm->pax_flags & MF_PAX_SEGMEXEC) {
97670+ BUG_ON(vma->vm_end > SEGMEXEC_TASK_SIZE);
97671+ if (mm->map_count >= sysctl_max_map_count-1)
97672+ return -ENOMEM;
97673+ } else
97674+#endif
97675+
97676 if (mm->map_count >= sysctl_max_map_count)
97677 return -ENOMEM;
97678
97679@@ -1851,6 +2167,16 @@ int split_vma(struct mm_struct * mm, struct vm_area_struct * vma,
97680 if (!new)
97681 return -ENOMEM;
97682
97683+#ifdef CONFIG_PAX_SEGMEXEC
97684+ if (vma_m) {
97685+ new_m = kmem_cache_alloc(vm_area_cachep, GFP_KERNEL);
97686+ if (!new_m) {
97687+ kmem_cache_free(vm_area_cachep, new);
97688+ return -ENOMEM;
97689+ }
97690+ }
97691+#endif
97692+
97693 /* most fields are the same, copy all, and then fixup */
97694 *new = *vma;
97695
97696@@ -1861,8 +2187,29 @@ int split_vma(struct mm_struct * mm, struct vm_area_struct * vma,
97697 new->vm_pgoff += ((addr - vma->vm_start) >> PAGE_SHIFT);
97698 }
97699
97700+#ifdef CONFIG_PAX_SEGMEXEC
97701+ if (vma_m) {
97702+ *new_m = *vma_m;
97703+ new_m->vm_mirror = new;
97704+ new->vm_mirror = new_m;
97705+
97706+ if (new_below)
97707+ new_m->vm_end = addr_m;
97708+ else {
97709+ new_m->vm_start = addr_m;
97710+ new_m->vm_pgoff += ((addr_m - vma_m->vm_start) >> PAGE_SHIFT);
97711+ }
97712+ }
97713+#endif
97714+
97715 pol = mpol_dup(vma_policy(vma));
97716 if (IS_ERR(pol)) {
97717+
97718+#ifdef CONFIG_PAX_SEGMEXEC
97719+ if (new_m)
97720+ kmem_cache_free(vm_area_cachep, new_m);
97721+#endif
97722+
97723 kmem_cache_free(vm_area_cachep, new);
97724 return PTR_ERR(pol);
97725 }
97726@@ -1883,6 +2230,28 @@ int split_vma(struct mm_struct * mm, struct vm_area_struct * vma,
97727 else
97728 vma_adjust(vma, vma->vm_start, addr, vma->vm_pgoff, new);
97729
97730+#ifdef CONFIG_PAX_SEGMEXEC
97731+ if (vma_m) {
97732+ mpol_get(pol);
97733+ vma_set_policy(new_m, pol);
97734+
97735+ if (new_m->vm_file) {
97736+ get_file(new_m->vm_file);
97737+ if (vma_m->vm_flags & VM_EXECUTABLE)
97738+ added_exe_file_vma(mm);
97739+ }
97740+
97741+ if (new_m->vm_ops && new_m->vm_ops->open)
97742+ new_m->vm_ops->open(new_m);
97743+
97744+ if (new_below)
97745+ vma_adjust(vma_m, addr_m, vma_m->vm_end, vma_m->vm_pgoff +
97746+ ((addr_m - new_m->vm_start) >> PAGE_SHIFT), new_m);
97747+ else
97748+ vma_adjust(vma_m, vma_m->vm_start, addr_m, vma_m->vm_pgoff, new_m);
97749+ }
97750+#endif
97751+
97752 return 0;
97753 }
97754
97755@@ -1891,11 +2260,30 @@ int split_vma(struct mm_struct * mm, struct vm_area_struct * vma,
97756 * work. This now handles partial unmappings.
97757 * Jeremy Fitzhardinge <jeremy@goop.org>
97758 */
97759+#ifdef CONFIG_PAX_SEGMEXEC
97760 int do_munmap(struct mm_struct *mm, unsigned long start, size_t len)
97761 {
97762+ int ret = __do_munmap(mm, start, len);
97763+ if (ret || !(mm->pax_flags & MF_PAX_SEGMEXEC))
97764+ return ret;
97765+
97766+ return __do_munmap(mm, start + SEGMEXEC_TASK_SIZE, len);
97767+}
97768+
97769+int __do_munmap(struct mm_struct *mm, unsigned long start, size_t len)
97770+#else
97771+int do_munmap(struct mm_struct *mm, unsigned long start, size_t len)
97772+#endif
97773+{
97774 unsigned long end;
97775 struct vm_area_struct *vma, *prev, *last;
97776
97777+ /*
97778+ * mm->mmap_sem is required to protect against another thread
97779+ * changing the mappings in case we sleep.
97780+ */
97781+ verify_mm_writelocked(mm);
97782+
97783 if ((start & ~PAGE_MASK) || start > TASK_SIZE || len > TASK_SIZE-start)
97784 return -EINVAL;
97785
97786@@ -1959,6 +2347,8 @@ int do_munmap(struct mm_struct *mm, unsigned long start, size_t len)
97787 /* Fix up all other VM information */
97788 remove_vma_list(mm, vma);
97789
97790+ track_exec_limit(mm, start, end, 0UL);
97791+
97792 return 0;
97793 }
97794
97795@@ -1971,22 +2361,18 @@ SYSCALL_DEFINE2(munmap, unsigned long, addr, size_t, len)
97796
97797 profile_munmap(addr);
97798
97799+#ifdef CONFIG_PAX_SEGMEXEC
97800+ if ((mm->pax_flags & MF_PAX_SEGMEXEC) &&
97801+ (len > SEGMEXEC_TASK_SIZE || addr > SEGMEXEC_TASK_SIZE-len))
97802+ return -EINVAL;
97803+#endif
97804+
97805 down_write(&mm->mmap_sem);
97806 ret = do_munmap(mm, addr, len);
97807 up_write(&mm->mmap_sem);
97808 return ret;
97809 }
97810
97811-static inline void verify_mm_writelocked(struct mm_struct *mm)
97812-{
97813-#ifdef CONFIG_DEBUG_VM
97814- if (unlikely(down_read_trylock(&mm->mmap_sem))) {
97815- WARN_ON(1);
97816- up_read(&mm->mmap_sem);
97817- }
97818-#endif
97819-}
97820-
97821 /*
97822 * this is really a simplified "do_mmap". it only handles
97823 * anonymous maps. eventually we may be able to do some
97824@@ -2000,6 +2386,7 @@ unsigned long do_brk(unsigned long addr, unsigned long len)
97825 struct rb_node ** rb_link, * rb_parent;
97826 pgoff_t pgoff = addr >> PAGE_SHIFT;
97827 int error;
97828+ unsigned long charged;
97829
97830 len = PAGE_ALIGN(len);
97831 if (!len)
97832@@ -2011,16 +2398,30 @@ unsigned long do_brk(unsigned long addr, unsigned long len)
97833
97834 flags = VM_DATA_DEFAULT_FLAGS | VM_ACCOUNT | mm->def_flags;
97835
97836+#if defined(CONFIG_PAX_PAGEEXEC) || defined(CONFIG_PAX_SEGMEXEC)
97837+ if (mm->pax_flags & (MF_PAX_PAGEEXEC | MF_PAX_SEGMEXEC)) {
97838+ flags &= ~VM_EXEC;
97839+
97840+#ifdef CONFIG_PAX_MPROTECT
97841+ if (mm->pax_flags & MF_PAX_MPROTECT)
97842+ flags &= ~VM_MAYEXEC;
97843+#endif
97844+
97845+ }
97846+#endif
97847+
97848 error = get_unmapped_area(NULL, addr, len, 0, MAP_FIXED);
97849 if (error & ~PAGE_MASK)
97850 return error;
97851
97852+ charged = len >> PAGE_SHIFT;
97853+
97854 /*
97855 * mlock MCL_FUTURE?
97856 */
97857 if (mm->def_flags & VM_LOCKED) {
97858 unsigned long locked, lock_limit;
97859- locked = len >> PAGE_SHIFT;
97860+ locked = charged;
97861 locked += mm->locked_vm;
97862 lock_limit = current->signal->rlim[RLIMIT_MEMLOCK].rlim_cur;
97863 lock_limit >>= PAGE_SHIFT;
97864@@ -2037,22 +2438,22 @@ unsigned long do_brk(unsigned long addr, unsigned long len)
97865 /*
97866 * Clear old maps. this also does some error checking for us
97867 */
97868- munmap_back:
97869 vma = find_vma_prepare(mm, addr, &prev, &rb_link, &rb_parent);
97870 if (vma && vma->vm_start < addr + len) {
97871 if (do_munmap(mm, addr, len))
97872 return -ENOMEM;
97873- goto munmap_back;
97874+ vma = find_vma_prepare(mm, addr, &prev, &rb_link, &rb_parent);
97875+ BUG_ON(vma && vma->vm_start < addr + len);
97876 }
97877
97878 /* Check against address space limits *after* clearing old maps... */
97879- if (!may_expand_vm(mm, len >> PAGE_SHIFT))
97880+ if (!may_expand_vm(mm, charged))
97881 return -ENOMEM;
97882
97883 if (mm->map_count > sysctl_max_map_count)
97884 return -ENOMEM;
97885
97886- if (security_vm_enough_memory(len >> PAGE_SHIFT))
97887+ if (security_vm_enough_memory(charged))
97888 return -ENOMEM;
97889
97890 /* Can we just expand an old private anonymous mapping? */
97891@@ -2066,7 +2467,7 @@ unsigned long do_brk(unsigned long addr, unsigned long len)
97892 */
97893 vma = kmem_cache_zalloc(vm_area_cachep, GFP_KERNEL);
97894 if (!vma) {
97895- vm_unacct_memory(len >> PAGE_SHIFT);
97896+ vm_unacct_memory(charged);
97897 return -ENOMEM;
97898 }
97899
97900@@ -2078,11 +2479,12 @@ unsigned long do_brk(unsigned long addr, unsigned long len)
97901 vma->vm_page_prot = vm_get_page_prot(flags);
97902 vma_link(mm, vma, prev, rb_link, rb_parent);
97903 out:
97904- mm->total_vm += len >> PAGE_SHIFT;
97905+ mm->total_vm += charged;
97906 if (flags & VM_LOCKED) {
97907 if (!mlock_vma_pages_range(vma, addr, addr + len))
97908- mm->locked_vm += (len >> PAGE_SHIFT);
97909+ mm->locked_vm += charged;
97910 }
97911+ track_exec_limit(mm, addr, addr + len, flags);
97912 return addr;
97913 }
97914
97915@@ -2129,8 +2531,10 @@ void exit_mmap(struct mm_struct *mm)
97916 * Walk the list again, actually closing and freeing it,
97917 * with preemption enabled, without holding any MM locks.
97918 */
97919- while (vma)
97920+ while (vma) {
97921+ vma->vm_mirror = NULL;
97922 vma = remove_vma(vma);
97923+ }
97924
97925 BUG_ON(mm->nr_ptes > (FIRST_USER_ADDRESS+PMD_SIZE-1)>>PMD_SHIFT);
97926 }
97927@@ -2144,6 +2548,10 @@ int insert_vm_struct(struct mm_struct * mm, struct vm_area_struct * vma)
97928 struct vm_area_struct * __vma, * prev;
97929 struct rb_node ** rb_link, * rb_parent;
97930
97931+#ifdef CONFIG_PAX_SEGMEXEC
97932+ struct vm_area_struct *vma_m = NULL;
97933+#endif
97934+
97935 /*
97936 * The vm_pgoff of a purely anonymous vma should be irrelevant
97937 * until its first write fault, when page's anon_vma and index
97938@@ -2166,7 +2574,22 @@ int insert_vm_struct(struct mm_struct * mm, struct vm_area_struct * vma)
97939 if ((vma->vm_flags & VM_ACCOUNT) &&
97940 security_vm_enough_memory_mm(mm, vma_pages(vma)))
97941 return -ENOMEM;
97942+
97943+#ifdef CONFIG_PAX_SEGMEXEC
97944+ if ((mm->pax_flags & MF_PAX_SEGMEXEC) && (vma->vm_flags & VM_EXEC)) {
97945+ vma_m = kmem_cache_zalloc(vm_area_cachep, GFP_KERNEL);
97946+ if (!vma_m)
97947+ return -ENOMEM;
97948+ }
97949+#endif
97950+
97951 vma_link(mm, vma, prev, rb_link, rb_parent);
97952+
97953+#ifdef CONFIG_PAX_SEGMEXEC
97954+ if (vma_m)
97955+ pax_mirror_vma(vma_m, vma);
97956+#endif
97957+
97958 return 0;
97959 }
97960
97961@@ -2184,6 +2607,8 @@ struct vm_area_struct *copy_vma(struct vm_area_struct **vmap,
97962 struct rb_node **rb_link, *rb_parent;
97963 struct mempolicy *pol;
97964
97965+ BUG_ON(vma->vm_mirror);
97966+
97967 /*
97968 * If anonymous vma has not yet been faulted, update new pgoff
97969 * to match new location, to increase its chance of merging.
97970@@ -2227,6 +2652,35 @@ struct vm_area_struct *copy_vma(struct vm_area_struct **vmap,
97971 return new_vma;
97972 }
97973
97974+#ifdef CONFIG_PAX_SEGMEXEC
97975+void pax_mirror_vma(struct vm_area_struct *vma_m, struct vm_area_struct *vma)
97976+{
97977+ struct vm_area_struct *prev_m;
97978+ struct rb_node **rb_link_m, *rb_parent_m;
97979+ struct mempolicy *pol_m;
97980+
97981+ BUG_ON(!(vma->vm_mm->pax_flags & MF_PAX_SEGMEXEC) || !(vma->vm_flags & VM_EXEC));
97982+ BUG_ON(vma->vm_mirror || vma_m->vm_mirror);
97983+ BUG_ON(!mpol_equal(vma_policy(vma), vma_policy(vma_m)));
97984+ *vma_m = *vma;
97985+ pol_m = vma_policy(vma_m);
97986+ mpol_get(pol_m);
97987+ vma_set_policy(vma_m, pol_m);
97988+ vma_m->vm_start += SEGMEXEC_TASK_SIZE;
97989+ vma_m->vm_end += SEGMEXEC_TASK_SIZE;
97990+ vma_m->vm_flags &= ~(VM_WRITE | VM_MAYWRITE | VM_ACCOUNT | VM_LOCKED);
97991+ vma_m->vm_page_prot = vm_get_page_prot(vma_m->vm_flags);
97992+ if (vma_m->vm_file)
97993+ get_file(vma_m->vm_file);
97994+ if (vma_m->vm_ops && vma_m->vm_ops->open)
97995+ vma_m->vm_ops->open(vma_m);
97996+ find_vma_prepare(vma->vm_mm, vma_m->vm_start, &prev_m, &rb_link_m, &rb_parent_m);
97997+ vma_link(vma->vm_mm, vma_m, prev_m, rb_link_m, rb_parent_m);
97998+ vma_m->vm_mirror = vma;
97999+ vma->vm_mirror = vma_m;
98000+}
98001+#endif
98002+
98003 /*
98004 * Return true if the calling process may expand its vm space by the passed
98005 * number of pages
98006@@ -2237,7 +2691,7 @@ int may_expand_vm(struct mm_struct *mm, unsigned long npages)
98007 unsigned long lim;
98008
98009 lim = current->signal->rlim[RLIMIT_AS].rlim_cur >> PAGE_SHIFT;
98010-
98011+ gr_learn_resource(current, RLIMIT_AS, (cur + npages) << PAGE_SHIFT, 1);
98012 if (cur + npages > lim)
98013 return 0;
98014 return 1;
98015@@ -2307,6 +2761,22 @@ int install_special_mapping(struct mm_struct *mm,
98016 vma->vm_start = addr;
98017 vma->vm_end = addr + len;
98018
98019+#ifdef CONFIG_PAX_MPROTECT
98020+ if (mm->pax_flags & MF_PAX_MPROTECT) {
98021+#ifndef CONFIG_PAX_MPROTECT_COMPAT
98022+ if ((vm_flags & (VM_WRITE | VM_EXEC)) == (VM_WRITE | VM_EXEC))
98023+ return -EPERM;
98024+ if (!(vm_flags & VM_EXEC))
98025+ vm_flags &= ~VM_MAYEXEC;
98026+#else
98027+ if ((vm_flags & (VM_WRITE | VM_EXEC)) != VM_EXEC)
98028+ vm_flags &= ~(VM_EXEC | VM_MAYEXEC);
98029+#endif
98030+ else
98031+ vm_flags &= ~VM_MAYWRITE;
98032+ }
98033+#endif
98034+
98035 vma->vm_flags = vm_flags | mm->def_flags | VM_DONTEXPAND;
98036 vma->vm_page_prot = vm_get_page_prot(vma->vm_flags);
98037
98038diff --git a/mm/mprotect.c b/mm/mprotect.c
98039index 1737c7e..c7faeb4 100644
98040--- a/mm/mprotect.c
98041+++ b/mm/mprotect.c
98042@@ -24,10 +24,16 @@
98043 #include <linux/mmu_notifier.h>
98044 #include <linux/migrate.h>
98045 #include <linux/perf_event.h>
98046+
98047+#ifdef CONFIG_PAX_MPROTECT
98048+#include <linux/elf.h>
98049+#endif
98050+
98051 #include <asm/uaccess.h>
98052 #include <asm/pgtable.h>
98053 #include <asm/cacheflush.h>
98054 #include <asm/tlbflush.h>
98055+#include <asm/mmu_context.h>
98056
98057 #ifndef pgprot_modify
98058 static inline pgprot_t pgprot_modify(pgprot_t oldprot, pgprot_t newprot)
98059@@ -132,6 +138,48 @@ static void change_protection(struct vm_area_struct *vma,
98060 flush_tlb_range(vma, start, end);
98061 }
98062
98063+#ifdef CONFIG_ARCH_TRACK_EXEC_LIMIT
98064+/* called while holding the mmap semaphor for writing except stack expansion */
98065+void track_exec_limit(struct mm_struct *mm, unsigned long start, unsigned long end, unsigned long prot)
98066+{
98067+ unsigned long oldlimit, newlimit = 0UL;
98068+
98069+ if (!(mm->pax_flags & MF_PAX_PAGEEXEC) || nx_enabled)
98070+ return;
98071+
98072+ spin_lock(&mm->page_table_lock);
98073+ oldlimit = mm->context.user_cs_limit;
98074+ if ((prot & VM_EXEC) && oldlimit < end)
98075+ /* USER_CS limit moved up */
98076+ newlimit = end;
98077+ else if (!(prot & VM_EXEC) && start < oldlimit && oldlimit <= end)
98078+ /* USER_CS limit moved down */
98079+ newlimit = start;
98080+
98081+ if (newlimit) {
98082+ mm->context.user_cs_limit = newlimit;
98083+
98084+#ifdef CONFIG_SMP
98085+ wmb();
98086+ cpus_clear(mm->context.cpu_user_cs_mask);
98087+ cpu_set(smp_processor_id(), mm->context.cpu_user_cs_mask);
98088+#endif
98089+
98090+ set_user_cs(mm->context.user_cs_base, mm->context.user_cs_limit, smp_processor_id());
98091+ }
98092+ spin_unlock(&mm->page_table_lock);
98093+ if (newlimit == end) {
98094+ struct vm_area_struct *vma = find_vma(mm, oldlimit);
98095+
98096+ for (; vma && vma->vm_start < end; vma = vma->vm_next)
98097+ if (is_vm_hugetlb_page(vma))
98098+ hugetlb_change_protection(vma, vma->vm_start, vma->vm_end, vma->vm_page_prot);
98099+ else
98100+ change_protection(vma, vma->vm_start, vma->vm_end, vma->vm_page_prot, vma_wants_writenotify(vma));
98101+ }
98102+}
98103+#endif
98104+
98105 int
98106 mprotect_fixup(struct vm_area_struct *vma, struct vm_area_struct **pprev,
98107 unsigned long start, unsigned long end, unsigned long newflags)
98108@@ -144,11 +192,29 @@ mprotect_fixup(struct vm_area_struct *vma, struct vm_area_struct **pprev,
98109 int error;
98110 int dirty_accountable = 0;
98111
98112+#ifdef CONFIG_PAX_SEGMEXEC
98113+ struct vm_area_struct *vma_m = NULL;
98114+ unsigned long start_m, end_m;
98115+
98116+ start_m = start + SEGMEXEC_TASK_SIZE;
98117+ end_m = end + SEGMEXEC_TASK_SIZE;
98118+#endif
98119+
98120 if (newflags == oldflags) {
98121 *pprev = vma;
98122 return 0;
98123 }
98124
98125+ if (newflags & (VM_READ | VM_WRITE | VM_EXEC)) {
98126+ struct vm_area_struct *prev = vma->vm_prev, *next = vma->vm_next;
98127+
98128+ if (next && (next->vm_flags & VM_GROWSDOWN) && sysctl_heap_stack_gap > next->vm_start - end)
98129+ return -ENOMEM;
98130+
98131+ if (prev && (prev->vm_flags & VM_GROWSUP) && sysctl_heap_stack_gap > start - prev->vm_end)
98132+ return -ENOMEM;
98133+ }
98134+
98135 /*
98136 * If we make a private mapping writable we increase our commit;
98137 * but (without finer accounting) cannot reduce our commit if we
98138@@ -165,6 +231,38 @@ mprotect_fixup(struct vm_area_struct *vma, struct vm_area_struct **pprev,
98139 }
98140 }
98141
98142+#ifdef CONFIG_PAX_SEGMEXEC
98143+ if ((mm->pax_flags & MF_PAX_SEGMEXEC) && ((oldflags ^ newflags) & VM_EXEC)) {
98144+ if (start != vma->vm_start) {
98145+ error = split_vma(mm, vma, start, 1);
98146+ if (error)
98147+ goto fail;
98148+ BUG_ON(!*pprev || (*pprev)->vm_next == vma);
98149+ *pprev = (*pprev)->vm_next;
98150+ }
98151+
98152+ if (end != vma->vm_end) {
98153+ error = split_vma(mm, vma, end, 0);
98154+ if (error)
98155+ goto fail;
98156+ }
98157+
98158+ if (pax_find_mirror_vma(vma)) {
98159+ error = __do_munmap(mm, start_m, end_m - start_m);
98160+ if (error)
98161+ goto fail;
98162+ } else {
98163+ vma_m = kmem_cache_zalloc(vm_area_cachep, GFP_KERNEL);
98164+ if (!vma_m) {
98165+ error = -ENOMEM;
98166+ goto fail;
98167+ }
98168+ vma->vm_flags = newflags;
98169+ pax_mirror_vma(vma_m, vma);
98170+ }
98171+ }
98172+#endif
98173+
98174 /*
98175 * First try to merge with previous and/or next vma.
98176 */
98177@@ -195,9 +293,21 @@ success:
98178 * vm_flags and vm_page_prot are protected by the mmap_sem
98179 * held in write mode.
98180 */
98181+
98182+#ifdef CONFIG_PAX_SEGMEXEC
98183+ if ((mm->pax_flags & MF_PAX_SEGMEXEC) && (newflags & VM_EXEC) && ((vma->vm_flags ^ newflags) & VM_READ))
98184+ pax_find_mirror_vma(vma)->vm_flags ^= VM_READ;
98185+#endif
98186+
98187 vma->vm_flags = newflags;
98188+
98189+#ifdef CONFIG_PAX_MPROTECT
98190+ if (mm->binfmt && mm->binfmt->handle_mprotect)
98191+ mm->binfmt->handle_mprotect(vma, newflags);
98192+#endif
98193+
98194 vma->vm_page_prot = pgprot_modify(vma->vm_page_prot,
98195- vm_get_page_prot(newflags));
98196+ vm_get_page_prot(vma->vm_flags));
98197
98198 if (vma_wants_writenotify(vma)) {
98199 vma->vm_page_prot = vm_get_page_prot(newflags & ~VM_SHARED);
98200@@ -239,6 +349,17 @@ SYSCALL_DEFINE3(mprotect, unsigned long, start, size_t, len,
98201 end = start + len;
98202 if (end <= start)
98203 return -ENOMEM;
98204+
98205+#ifdef CONFIG_PAX_SEGMEXEC
98206+ if (current->mm->pax_flags & MF_PAX_SEGMEXEC) {
98207+ if (end > SEGMEXEC_TASK_SIZE)
98208+ return -EINVAL;
98209+ } else
98210+#endif
98211+
98212+ if (end > TASK_SIZE)
98213+ return -EINVAL;
98214+
98215 if (!arch_validate_prot(prot))
98216 return -EINVAL;
98217
98218@@ -246,7 +367,7 @@ SYSCALL_DEFINE3(mprotect, unsigned long, start, size_t, len,
98219 /*
98220 * Does the application expect PROT_READ to imply PROT_EXEC:
98221 */
98222- if ((prot & PROT_READ) && (current->personality & READ_IMPLIES_EXEC))
98223+ if ((prot & (PROT_READ | PROT_WRITE)) && (current->personality & READ_IMPLIES_EXEC))
98224 prot |= PROT_EXEC;
98225
98226 vm_flags = calc_vm_prot_bits(prot);
98227@@ -278,6 +399,11 @@ SYSCALL_DEFINE3(mprotect, unsigned long, start, size_t, len,
98228 if (start > vma->vm_start)
98229 prev = vma;
98230
98231+#ifdef CONFIG_PAX_MPROTECT
98232+ if (current->mm->binfmt && current->mm->binfmt->handle_mprotect)
98233+ current->mm->binfmt->handle_mprotect(vma, vm_flags);
98234+#endif
98235+
98236 for (nstart = start ; ; ) {
98237 unsigned long newflags;
98238
98239@@ -287,6 +413,14 @@ SYSCALL_DEFINE3(mprotect, unsigned long, start, size_t, len,
98240
98241 /* newflags >> 4 shift VM_MAY% in place of VM_% */
98242 if ((newflags & ~(newflags >> 4)) & (VM_READ | VM_WRITE | VM_EXEC)) {
98243+ if (prot & (PROT_WRITE | PROT_EXEC))
98244+ gr_log_rwxmprotect(vma->vm_file);
98245+
98246+ error = -EACCES;
98247+ goto out;
98248+ }
98249+
98250+ if (!gr_acl_handle_mprotect(vma->vm_file, prot)) {
98251 error = -EACCES;
98252 goto out;
98253 }
98254@@ -301,6 +435,9 @@ SYSCALL_DEFINE3(mprotect, unsigned long, start, size_t, len,
98255 error = mprotect_fixup(vma, &prev, nstart, tmp, newflags);
98256 if (error)
98257 goto out;
98258+
98259+ track_exec_limit(current->mm, nstart, tmp, vm_flags);
98260+
98261 nstart = tmp;
98262
98263 if (nstart < prev->vm_end)
98264diff --git a/mm/mremap.c b/mm/mremap.c
98265index 3e98d79..1706cec 100644
98266--- a/mm/mremap.c
98267+++ b/mm/mremap.c
98268@@ -112,6 +112,12 @@ static void move_ptes(struct vm_area_struct *vma, pmd_t *old_pmd,
98269 continue;
98270 pte = ptep_clear_flush(vma, old_addr, old_pte);
98271 pte = move_pte(pte, new_vma->vm_page_prot, old_addr, new_addr);
98272+
98273+#ifdef CONFIG_ARCH_TRACK_EXEC_LIMIT
98274+ if (!nx_enabled && (new_vma->vm_flags & (VM_PAGEEXEC | VM_EXEC)) == VM_PAGEEXEC)
98275+ pte = pte_exprotect(pte);
98276+#endif
98277+
98278 set_pte_at(mm, new_addr, new_pte, pte);
98279 }
98280
98281@@ -271,6 +277,11 @@ static struct vm_area_struct *vma_to_resize(unsigned long addr,
98282 if (is_vm_hugetlb_page(vma))
98283 goto Einval;
98284
98285+#ifdef CONFIG_PAX_SEGMEXEC
98286+ if (pax_find_mirror_vma(vma))
98287+ goto Einval;
98288+#endif
98289+
98290 /* We can't remap across vm area boundaries */
98291 if (old_len > vma->vm_end - addr)
98292 goto Efault;
98293@@ -327,20 +338,25 @@ static unsigned long mremap_to(unsigned long addr,
98294 unsigned long ret = -EINVAL;
98295 unsigned long charged = 0;
98296 unsigned long map_flags;
98297+ unsigned long pax_task_size = TASK_SIZE;
98298
98299 if (new_addr & ~PAGE_MASK)
98300 goto out;
98301
98302- if (new_len > TASK_SIZE || new_addr > TASK_SIZE - new_len)
98303+#ifdef CONFIG_PAX_SEGMEXEC
98304+ if (mm->pax_flags & MF_PAX_SEGMEXEC)
98305+ pax_task_size = SEGMEXEC_TASK_SIZE;
98306+#endif
98307+
98308+ pax_task_size -= PAGE_SIZE;
98309+
98310+ if (new_len > TASK_SIZE || new_addr > pax_task_size - new_len)
98311 goto out;
98312
98313 /* Check if the location we're moving into overlaps the
98314 * old location at all, and fail if it does.
98315 */
98316- if ((new_addr <= addr) && (new_addr+new_len) > addr)
98317- goto out;
98318-
98319- if ((addr <= new_addr) && (addr+old_len) > new_addr)
98320+ if (addr + old_len > new_addr && new_addr + new_len > addr)
98321 goto out;
98322
98323 ret = security_file_mmap(NULL, 0, 0, 0, new_addr, 1);
98324@@ -412,6 +428,7 @@ unsigned long do_mremap(unsigned long addr,
98325 struct vm_area_struct *vma;
98326 unsigned long ret = -EINVAL;
98327 unsigned long charged = 0;
98328+ unsigned long pax_task_size = TASK_SIZE;
98329
98330 if (flags & ~(MREMAP_FIXED | MREMAP_MAYMOVE))
98331 goto out;
98332@@ -430,6 +447,17 @@ unsigned long do_mremap(unsigned long addr,
98333 if (!new_len)
98334 goto out;
98335
98336+#ifdef CONFIG_PAX_SEGMEXEC
98337+ if (mm->pax_flags & MF_PAX_SEGMEXEC)
98338+ pax_task_size = SEGMEXEC_TASK_SIZE;
98339+#endif
98340+
98341+ pax_task_size -= PAGE_SIZE;
98342+
98343+ if (new_len > pax_task_size || addr > pax_task_size-new_len ||
98344+ old_len > pax_task_size || addr > pax_task_size-old_len)
98345+ goto out;
98346+
98347 if (flags & MREMAP_FIXED) {
98348 if (flags & MREMAP_MAYMOVE)
98349 ret = mremap_to(addr, old_len, new_addr, new_len);
98350@@ -476,6 +504,7 @@ unsigned long do_mremap(unsigned long addr,
98351 addr + new_len);
98352 }
98353 ret = addr;
98354+ track_exec_limit(vma->vm_mm, vma->vm_start, addr + new_len, vma->vm_flags);
98355 goto out;
98356 }
98357 }
98358@@ -502,7 +531,13 @@ unsigned long do_mremap(unsigned long addr,
98359 ret = security_file_mmap(NULL, 0, 0, 0, new_addr, 1);
98360 if (ret)
98361 goto out;
98362+
98363+ map_flags = vma->vm_flags;
98364 ret = move_vma(vma, addr, old_len, new_len, new_addr);
98365+ if (!(ret & ~PAGE_MASK)) {
98366+ track_exec_limit(current->mm, addr, addr + old_len, 0UL);
98367+ track_exec_limit(current->mm, new_addr, new_addr + new_len, map_flags);
98368+ }
98369 }
98370 out:
98371 if (ret & ~PAGE_MASK)
98372diff --git a/mm/nommu.c b/mm/nommu.c
98373index 406e8d4..53970d3 100644
98374--- a/mm/nommu.c
98375+++ b/mm/nommu.c
98376@@ -67,7 +67,6 @@ int sysctl_overcommit_memory = OVERCOMMIT_GUESS; /* heuristic overcommit */
98377 int sysctl_overcommit_ratio = 50; /* default is 50% */
98378 int sysctl_max_map_count = DEFAULT_MAX_MAP_COUNT;
98379 int sysctl_nr_trim_pages = CONFIG_NOMMU_INITIAL_TRIM_EXCESS;
98380-int heap_stack_gap = 0;
98381
98382 atomic_long_t mmap_pages_allocated;
98383
98384@@ -761,15 +760,6 @@ struct vm_area_struct *find_vma(struct mm_struct *mm, unsigned long addr)
98385 EXPORT_SYMBOL(find_vma);
98386
98387 /*
98388- * find a VMA
98389- * - we don't extend stack VMAs under NOMMU conditions
98390- */
98391-struct vm_area_struct *find_extend_vma(struct mm_struct *mm, unsigned long addr)
98392-{
98393- return find_vma(mm, addr);
98394-}
98395-
98396-/*
98397 * expand a stack to a given address
98398 * - not supported under NOMMU conditions
98399 */
98400diff --git a/mm/page_alloc.c b/mm/page_alloc.c
98401index 3ecab7e..594a471 100644
98402--- a/mm/page_alloc.c
98403+++ b/mm/page_alloc.c
98404@@ -289,7 +289,7 @@ out:
98405 * This usage means that zero-order pages may not be compound.
98406 */
98407
98408-static void free_compound_page(struct page *page)
98409+void free_compound_page(struct page *page)
98410 {
98411 __free_pages_ok(page, compound_order(page));
98412 }
98413@@ -587,6 +587,10 @@ static void __free_pages_ok(struct page *page, unsigned int order)
98414 int bad = 0;
98415 int wasMlocked = __TestClearPageMlocked(page);
98416
98417+#ifdef CONFIG_PAX_MEMORY_SANITIZE
98418+ unsigned long index = 1UL << order;
98419+#endif
98420+
98421 kmemcheck_free_shadow(page, order);
98422
98423 for (i = 0 ; i < (1 << order) ; ++i)
98424@@ -599,6 +603,12 @@ static void __free_pages_ok(struct page *page, unsigned int order)
98425 debug_check_no_obj_freed(page_address(page),
98426 PAGE_SIZE << order);
98427 }
98428+
98429+#ifdef CONFIG_PAX_MEMORY_SANITIZE
98430+ for (; index; --index)
98431+ sanitize_highpage(page + index - 1);
98432+#endif
98433+
98434 arch_free_page(page, order);
98435 kernel_map_pages(page, 1 << order, 0);
98436
98437@@ -702,8 +712,10 @@ static int prep_new_page(struct page *page, int order, gfp_t gfp_flags)
98438 arch_alloc_page(page, order);
98439 kernel_map_pages(page, 1 << order, 1);
98440
98441+#ifndef CONFIG_PAX_MEMORY_SANITIZE
98442 if (gfp_flags & __GFP_ZERO)
98443 prep_zero_page(page, order, gfp_flags);
98444+#endif
98445
98446 if (order && (gfp_flags & __GFP_COMP))
98447 prep_compound_page(page, order);
98448@@ -1097,6 +1109,11 @@ static void free_hot_cold_page(struct page *page, int cold)
98449 debug_check_no_locks_freed(page_address(page), PAGE_SIZE);
98450 debug_check_no_obj_freed(page_address(page), PAGE_SIZE);
98451 }
98452+
98453+#ifdef CONFIG_PAX_MEMORY_SANITIZE
98454+ sanitize_highpage(page);
98455+#endif
98456+
98457 arch_free_page(page, 0);
98458 kernel_map_pages(page, 1, 0);
98459
98460@@ -2179,6 +2196,8 @@ void show_free_areas(void)
98461 int cpu;
98462 struct zone *zone;
98463
98464+ pax_track_stack();
98465+
98466 for_each_populated_zone(zone) {
98467 show_node(zone);
98468 printk("%s per-cpu:\n", zone->name);
98469@@ -3736,7 +3755,7 @@ static void __init setup_usemap(struct pglist_data *pgdat,
98470 zone->pageblock_flags = alloc_bootmem_node(pgdat, usemapsize);
98471 }
98472 #else
98473-static void inline setup_usemap(struct pglist_data *pgdat,
98474+static inline void setup_usemap(struct pglist_data *pgdat,
98475 struct zone *zone, unsigned long zonesize) {}
98476 #endif /* CONFIG_SPARSEMEM */
98477
98478diff --git a/mm/percpu.c b/mm/percpu.c
98479index c90614a..5f7b7b8 100644
98480--- a/mm/percpu.c
98481+++ b/mm/percpu.c
98482@@ -115,7 +115,7 @@ static unsigned int pcpu_low_unit_cpu __read_mostly;
98483 static unsigned int pcpu_high_unit_cpu __read_mostly;
98484
98485 /* the address of the first chunk which starts with the kernel static area */
98486-void *pcpu_base_addr __read_mostly;
98487+void *pcpu_base_addr __read_only;
98488 EXPORT_SYMBOL_GPL(pcpu_base_addr);
98489
98490 static const int *pcpu_unit_map __read_mostly; /* cpu -> unit */
98491diff --git a/mm/rmap.c b/mm/rmap.c
98492index dd43373..d848cd7 100644
98493--- a/mm/rmap.c
98494+++ b/mm/rmap.c
98495@@ -121,6 +121,17 @@ int anon_vma_prepare(struct vm_area_struct *vma)
98496 /* page_table_lock to protect against threads */
98497 spin_lock(&mm->page_table_lock);
98498 if (likely(!vma->anon_vma)) {
98499+
98500+#ifdef CONFIG_PAX_SEGMEXEC
98501+ struct vm_area_struct *vma_m = pax_find_mirror_vma(vma);
98502+
98503+ if (vma_m) {
98504+ BUG_ON(vma_m->anon_vma);
98505+ vma_m->anon_vma = anon_vma;
98506+ list_add_tail(&vma_m->anon_vma_node, &anon_vma->head);
98507+ }
98508+#endif
98509+
98510 vma->anon_vma = anon_vma;
98511 list_add_tail(&vma->anon_vma_node, &anon_vma->head);
98512 allocated = NULL;
98513diff --git a/mm/shmem.c b/mm/shmem.c
98514index 3e0005b..1d659a8 100644
98515--- a/mm/shmem.c
98516+++ b/mm/shmem.c
98517@@ -31,7 +31,7 @@
98518 #include <linux/swap.h>
98519 #include <linux/ima.h>
98520
98521-static struct vfsmount *shm_mnt;
98522+struct vfsmount *shm_mnt;
98523
98524 #ifdef CONFIG_SHMEM
98525 /*
98526@@ -1061,6 +1061,8 @@ static int shmem_writepage(struct page *page, struct writeback_control *wbc)
98527 goto unlock;
98528 }
98529 entry = shmem_swp_entry(info, index, NULL);
98530+ if (!entry)
98531+ goto unlock;
98532 if (entry->val) {
98533 /*
98534 * The more uptodate page coming down from a stacked
98535@@ -1144,6 +1146,8 @@ static struct page *shmem_swapin(swp_entry_t entry, gfp_t gfp,
98536 struct vm_area_struct pvma;
98537 struct page *page;
98538
98539+ pax_track_stack();
98540+
98541 spol = mpol_cond_copy(&mpol,
98542 mpol_shared_policy_lookup(&info->policy, idx));
98543
98544@@ -1962,7 +1966,7 @@ static int shmem_symlink(struct inode *dir, struct dentry *dentry, const char *s
98545
98546 info = SHMEM_I(inode);
98547 inode->i_size = len-1;
98548- if (len <= (char *)inode - (char *)info) {
98549+ if (len <= (char *)inode - (char *)info && len <= 64) {
98550 /* do it inline */
98551 memcpy(info, symname, len);
98552 inode->i_op = &shmem_symlink_inline_operations;
98553@@ -2310,8 +2314,7 @@ int shmem_fill_super(struct super_block *sb, void *data, int silent)
98554 int err = -ENOMEM;
98555
98556 /* Round up to L1_CACHE_BYTES to resist false sharing */
98557- sbinfo = kzalloc(max((int)sizeof(struct shmem_sb_info),
98558- L1_CACHE_BYTES), GFP_KERNEL);
98559+ sbinfo = kzalloc(max(sizeof(struct shmem_sb_info), L1_CACHE_BYTES), GFP_KERNEL);
98560 if (!sbinfo)
98561 return -ENOMEM;
98562
98563diff --git a/mm/slab.c b/mm/slab.c
98564index c8d466a..909e01e 100644
98565--- a/mm/slab.c
98566+++ b/mm/slab.c
98567@@ -174,7 +174,7 @@
98568
98569 /* Legal flag mask for kmem_cache_create(). */
98570 #if DEBUG
98571-# define CREATE_MASK (SLAB_RED_ZONE | \
98572+# define CREATE_MASK (SLAB_USERCOPY | SLAB_RED_ZONE | \
98573 SLAB_POISON | SLAB_HWCACHE_ALIGN | \
98574 SLAB_CACHE_DMA | \
98575 SLAB_STORE_USER | \
98576@@ -182,7 +182,7 @@
98577 SLAB_DESTROY_BY_RCU | SLAB_MEM_SPREAD | \
98578 SLAB_DEBUG_OBJECTS | SLAB_NOLEAKTRACE | SLAB_NOTRACK)
98579 #else
98580-# define CREATE_MASK (SLAB_HWCACHE_ALIGN | \
98581+# define CREATE_MASK (SLAB_USERCOPY | SLAB_HWCACHE_ALIGN | \
98582 SLAB_CACHE_DMA | \
98583 SLAB_RECLAIM_ACCOUNT | SLAB_PANIC | \
98584 SLAB_DESTROY_BY_RCU | SLAB_MEM_SPREAD | \
98585@@ -308,7 +308,7 @@ struct kmem_list3 {
98586 * Need this for bootstrapping a per node allocator.
98587 */
98588 #define NUM_INIT_LISTS (3 * MAX_NUMNODES)
98589-struct kmem_list3 __initdata initkmem_list3[NUM_INIT_LISTS];
98590+struct kmem_list3 initkmem_list3[NUM_INIT_LISTS];
98591 #define CACHE_CACHE 0
98592 #define SIZE_AC MAX_NUMNODES
98593 #define SIZE_L3 (2 * MAX_NUMNODES)
98594@@ -409,10 +409,10 @@ static void kmem_list3_init(struct kmem_list3 *parent)
98595 if ((x)->max_freeable < i) \
98596 (x)->max_freeable = i; \
98597 } while (0)
98598-#define STATS_INC_ALLOCHIT(x) atomic_inc(&(x)->allochit)
98599-#define STATS_INC_ALLOCMISS(x) atomic_inc(&(x)->allocmiss)
98600-#define STATS_INC_FREEHIT(x) atomic_inc(&(x)->freehit)
98601-#define STATS_INC_FREEMISS(x) atomic_inc(&(x)->freemiss)
98602+#define STATS_INC_ALLOCHIT(x) atomic_inc_unchecked(&(x)->allochit)
98603+#define STATS_INC_ALLOCMISS(x) atomic_inc_unchecked(&(x)->allocmiss)
98604+#define STATS_INC_FREEHIT(x) atomic_inc_unchecked(&(x)->freehit)
98605+#define STATS_INC_FREEMISS(x) atomic_inc_unchecked(&(x)->freemiss)
98606 #else
98607 #define STATS_INC_ACTIVE(x) do { } while (0)
98608 #define STATS_DEC_ACTIVE(x) do { } while (0)
98609@@ -558,7 +558,7 @@ static inline void *index_to_obj(struct kmem_cache *cache, struct slab *slab,
98610 * reciprocal_divide(offset, cache->reciprocal_buffer_size)
98611 */
98612 static inline unsigned int obj_to_index(const struct kmem_cache *cache,
98613- const struct slab *slab, void *obj)
98614+ const struct slab *slab, const void *obj)
98615 {
98616 u32 offset = (obj - slab->s_mem);
98617 return reciprocal_divide(offset, cache->reciprocal_buffer_size);
98618@@ -1453,7 +1453,7 @@ void __init kmem_cache_init(void)
98619 sizes[INDEX_AC].cs_cachep = kmem_cache_create(names[INDEX_AC].name,
98620 sizes[INDEX_AC].cs_size,
98621 ARCH_KMALLOC_MINALIGN,
98622- ARCH_KMALLOC_FLAGS|SLAB_PANIC,
98623+ ARCH_KMALLOC_FLAGS|SLAB_PANIC|SLAB_USERCOPY,
98624 NULL);
98625
98626 if (INDEX_AC != INDEX_L3) {
98627@@ -1461,7 +1461,7 @@ void __init kmem_cache_init(void)
98628 kmem_cache_create(names[INDEX_L3].name,
98629 sizes[INDEX_L3].cs_size,
98630 ARCH_KMALLOC_MINALIGN,
98631- ARCH_KMALLOC_FLAGS|SLAB_PANIC,
98632+ ARCH_KMALLOC_FLAGS|SLAB_PANIC|SLAB_USERCOPY,
98633 NULL);
98634 }
98635
98636@@ -1479,7 +1479,7 @@ void __init kmem_cache_init(void)
98637 sizes->cs_cachep = kmem_cache_create(names->name,
98638 sizes->cs_size,
98639 ARCH_KMALLOC_MINALIGN,
98640- ARCH_KMALLOC_FLAGS|SLAB_PANIC,
98641+ ARCH_KMALLOC_FLAGS|SLAB_PANIC|SLAB_USERCOPY,
98642 NULL);
98643 }
98644 #ifdef CONFIG_ZONE_DMA
98645@@ -4211,10 +4211,10 @@ static int s_show(struct seq_file *m, void *p)
98646 }
98647 /* cpu stats */
98648 {
98649- unsigned long allochit = atomic_read(&cachep->allochit);
98650- unsigned long allocmiss = atomic_read(&cachep->allocmiss);
98651- unsigned long freehit = atomic_read(&cachep->freehit);
98652- unsigned long freemiss = atomic_read(&cachep->freemiss);
98653+ unsigned long allochit = atomic_read_unchecked(&cachep->allochit);
98654+ unsigned long allocmiss = atomic_read_unchecked(&cachep->allocmiss);
98655+ unsigned long freehit = atomic_read_unchecked(&cachep->freehit);
98656+ unsigned long freemiss = atomic_read_unchecked(&cachep->freemiss);
98657
98658 seq_printf(m, " : cpustat %6lu %6lu %6lu %6lu",
98659 allochit, allocmiss, freehit, freemiss);
98660@@ -4471,15 +4471,70 @@ static const struct file_operations proc_slabstats_operations = {
98661
98662 static int __init slab_proc_init(void)
98663 {
98664- proc_create("slabinfo",S_IWUSR|S_IRUGO,NULL,&proc_slabinfo_operations);
98665+ mode_t gr_mode = S_IRUGO;
98666+
98667+#ifdef CONFIG_GRKERNSEC_PROC_ADD
98668+ gr_mode = S_IRUSR;
98669+#endif
98670+
98671+ proc_create("slabinfo",S_IWUSR|gr_mode,NULL,&proc_slabinfo_operations);
98672 #ifdef CONFIG_DEBUG_SLAB_LEAK
98673- proc_create("slab_allocators", 0, NULL, &proc_slabstats_operations);
98674+ proc_create("slab_allocators", gr_mode, NULL, &proc_slabstats_operations);
98675 #endif
98676 return 0;
98677 }
98678 module_init(slab_proc_init);
98679 #endif
98680
98681+void check_object_size(const void *ptr, unsigned long n, bool to)
98682+{
98683+
98684+#ifdef CONFIG_PAX_USERCOPY
98685+ struct page *page;
98686+ struct kmem_cache *cachep = NULL;
98687+ struct slab *slabp;
98688+ unsigned int objnr;
98689+ unsigned long offset;
98690+ const char *type;
98691+
98692+ if (!n)
98693+ return;
98694+
98695+ type = "<null>";
98696+ if (ZERO_OR_NULL_PTR(ptr))
98697+ goto report;
98698+
98699+ if (!virt_addr_valid(ptr))
98700+ return;
98701+
98702+ page = virt_to_head_page(ptr);
98703+
98704+ type = "<process stack>";
98705+ if (!PageSlab(page)) {
98706+ if (object_is_on_stack(ptr, n) == -1)
98707+ goto report;
98708+ return;
98709+ }
98710+
98711+ cachep = page_get_cache(page);
98712+ type = cachep->name;
98713+ if (!(cachep->flags & SLAB_USERCOPY))
98714+ goto report;
98715+
98716+ slabp = page_get_slab(page);
98717+ objnr = obj_to_index(cachep, slabp, ptr);
98718+ BUG_ON(objnr >= cachep->num);
98719+ offset = ptr - index_to_obj(cachep, slabp, objnr) - obj_offset(cachep);
98720+ if (offset <= obj_size(cachep) && n <= obj_size(cachep) - offset)
98721+ return;
98722+
98723+report:
98724+ pax_report_usercopy(ptr, n, to, type);
98725+#endif
98726+
98727+}
98728+EXPORT_SYMBOL(check_object_size);
98729+
98730 /**
98731 * ksize - get the actual amount of memory allocated for a given object
98732 * @objp: Pointer to the object
98733diff --git a/mm/slob.c b/mm/slob.c
98734index 837ebd6..0bd23bc 100644
98735--- a/mm/slob.c
98736+++ b/mm/slob.c
98737@@ -29,7 +29,7 @@
98738 * If kmalloc is asked for objects of PAGE_SIZE or larger, it calls
98739 * alloc_pages() directly, allocating compound pages so the page order
98740 * does not have to be separately tracked, and also stores the exact
98741- * allocation size in page->private so that it can be used to accurately
98742+ * allocation size in slob_page->size so that it can be used to accurately
98743 * provide ksize(). These objects are detected in kfree() because slob_page()
98744 * is false for them.
98745 *
98746@@ -58,6 +58,7 @@
98747 */
98748
98749 #include <linux/kernel.h>
98750+#include <linux/sched.h>
98751 #include <linux/slab.h>
98752 #include <linux/mm.h>
98753 #include <linux/swap.h> /* struct reclaim_state */
98754@@ -100,7 +101,8 @@ struct slob_page {
98755 unsigned long flags; /* mandatory */
98756 atomic_t _count; /* mandatory */
98757 slobidx_t units; /* free units left in page */
98758- unsigned long pad[2];
98759+ unsigned long pad[1];
98760+ unsigned long size; /* size when >=PAGE_SIZE */
98761 slob_t *free; /* first free slob_t in page */
98762 struct list_head list; /* linked list of free pages */
98763 };
98764@@ -133,7 +135,7 @@ static LIST_HEAD(free_slob_large);
98765 */
98766 static inline int is_slob_page(struct slob_page *sp)
98767 {
98768- return PageSlab((struct page *)sp);
98769+ return PageSlab((struct page *)sp) && !sp->size;
98770 }
98771
98772 static inline void set_slob_page(struct slob_page *sp)
98773@@ -148,7 +150,7 @@ static inline void clear_slob_page(struct slob_page *sp)
98774
98775 static inline struct slob_page *slob_page(const void *addr)
98776 {
98777- return (struct slob_page *)virt_to_page(addr);
98778+ return (struct slob_page *)virt_to_head_page(addr);
98779 }
98780
98781 /*
98782@@ -208,7 +210,7 @@ static void set_slob(slob_t *s, slobidx_t size, slob_t *next)
98783 /*
98784 * Return the size of a slob block.
98785 */
98786-static slobidx_t slob_units(slob_t *s)
98787+static slobidx_t slob_units(const slob_t *s)
98788 {
98789 if (s->units > 0)
98790 return s->units;
98791@@ -218,7 +220,7 @@ static slobidx_t slob_units(slob_t *s)
98792 /*
98793 * Return the next free slob block pointer after this one.
98794 */
98795-static slob_t *slob_next(slob_t *s)
98796+static slob_t *slob_next(const slob_t *s)
98797 {
98798 slob_t *base = (slob_t *)((unsigned long)s & PAGE_MASK);
98799 slobidx_t next;
98800@@ -233,7 +235,7 @@ static slob_t *slob_next(slob_t *s)
98801 /*
98802 * Returns true if s is the last free block in its page.
98803 */
98804-static int slob_last(slob_t *s)
98805+static int slob_last(const slob_t *s)
98806 {
98807 return !((unsigned long)slob_next(s) & ~PAGE_MASK);
98808 }
98809@@ -252,6 +254,7 @@ static void *slob_new_pages(gfp_t gfp, int order, int node)
98810 if (!page)
98811 return NULL;
98812
98813+ set_slob_page(page);
98814 return page_address(page);
98815 }
98816
98817@@ -368,11 +371,11 @@ static void *slob_alloc(size_t size, gfp_t gfp, int align, int node)
98818 if (!b)
98819 return NULL;
98820 sp = slob_page(b);
98821- set_slob_page(sp);
98822
98823 spin_lock_irqsave(&slob_lock, flags);
98824 sp->units = SLOB_UNITS(PAGE_SIZE);
98825 sp->free = b;
98826+ sp->size = 0;
98827 INIT_LIST_HEAD(&sp->list);
98828 set_slob(b, SLOB_UNITS(PAGE_SIZE), b + SLOB_UNITS(PAGE_SIZE));
98829 set_slob_page_free(sp, slob_list);
98830@@ -475,10 +478,9 @@ out:
98831 #define ARCH_SLAB_MINALIGN __alignof__(unsigned long)
98832 #endif
98833
98834-void *__kmalloc_node(size_t size, gfp_t gfp, int node)
98835+static void *__kmalloc_node_align(size_t size, gfp_t gfp, int node, int align)
98836 {
98837- unsigned int *m;
98838- int align = max(ARCH_KMALLOC_MINALIGN, ARCH_SLAB_MINALIGN);
98839+ slob_t *m;
98840 void *ret;
98841
98842 lockdep_trace_alloc(gfp);
98843@@ -491,7 +493,10 @@ void *__kmalloc_node(size_t size, gfp_t gfp, int node)
98844
98845 if (!m)
98846 return NULL;
98847- *m = size;
98848+ BUILD_BUG_ON(ARCH_KMALLOC_MINALIGN < 2 * SLOB_UNIT);
98849+ BUILD_BUG_ON(ARCH_SLAB_MINALIGN < 2 * SLOB_UNIT);
98850+ m[0].units = size;
98851+ m[1].units = align;
98852 ret = (void *)m + align;
98853
98854 trace_kmalloc_node(_RET_IP_, ret,
98855@@ -501,16 +506,25 @@ void *__kmalloc_node(size_t size, gfp_t gfp, int node)
98856
98857 ret = slob_new_pages(gfp | __GFP_COMP, get_order(size), node);
98858 if (ret) {
98859- struct page *page;
98860- page = virt_to_page(ret);
98861- page->private = size;
98862+ struct slob_page *sp;
98863+ sp = slob_page(ret);
98864+ sp->size = size;
98865 }
98866
98867 trace_kmalloc_node(_RET_IP_, ret,
98868 size, PAGE_SIZE << order, gfp, node);
98869 }
98870
98871- kmemleak_alloc(ret, size, 1, gfp);
98872+ return ret;
98873+}
98874+
98875+void *__kmalloc_node(size_t size, gfp_t gfp, int node)
98876+{
98877+ int align = max(ARCH_KMALLOC_MINALIGN, ARCH_SLAB_MINALIGN);
98878+ void *ret = __kmalloc_node_align(size, gfp, node, align);
98879+
98880+ if (!ZERO_OR_NULL_PTR(ret))
98881+ kmemleak_alloc(ret, size, 1, gfp);
98882 return ret;
98883 }
98884 EXPORT_SYMBOL(__kmalloc_node);
98885@@ -528,13 +542,92 @@ void kfree(const void *block)
98886 sp = slob_page(block);
98887 if (is_slob_page(sp)) {
98888 int align = max(ARCH_KMALLOC_MINALIGN, ARCH_SLAB_MINALIGN);
98889- unsigned int *m = (unsigned int *)(block - align);
98890- slob_free(m, *m + align);
98891- } else
98892+ slob_t *m = (slob_t *)(block - align);
98893+ slob_free(m, m[0].units + align);
98894+ } else {
98895+ clear_slob_page(sp);
98896+ free_slob_page(sp);
98897+ sp->size = 0;
98898 put_page(&sp->page);
98899+ }
98900 }
98901 EXPORT_SYMBOL(kfree);
98902
98903+void check_object_size(const void *ptr, unsigned long n, bool to)
98904+{
98905+
98906+#ifdef CONFIG_PAX_USERCOPY
98907+ struct slob_page *sp;
98908+ const slob_t *free;
98909+ const void *base;
98910+ unsigned long flags;
98911+ const char *type;
98912+
98913+ if (!n)
98914+ return;
98915+
98916+ type = "<null>";
98917+ if (ZERO_OR_NULL_PTR(ptr))
98918+ goto report;
98919+
98920+ if (!virt_addr_valid(ptr))
98921+ return;
98922+
98923+ type = "<process stack>";
98924+ sp = slob_page(ptr);
98925+ if (!PageSlab((struct page *)sp)) {
98926+ if (object_is_on_stack(ptr, n) == -1)
98927+ goto report;
98928+ return;
98929+ }
98930+
98931+ type = "<slob>";
98932+ if (sp->size) {
98933+ base = page_address(&sp->page);
98934+ if (base <= ptr && n <= sp->size - (ptr - base))
98935+ return;
98936+ goto report;
98937+ }
98938+
98939+ /* some tricky double walking to find the chunk */
98940+ spin_lock_irqsave(&slob_lock, flags);
98941+ base = (void *)((unsigned long)ptr & PAGE_MASK);
98942+ free = sp->free;
98943+
98944+ while (!slob_last(free) && (void *)free <= ptr) {
98945+ base = free + slob_units(free);
98946+ free = slob_next(free);
98947+ }
98948+
98949+ while (base < (void *)free) {
98950+ slobidx_t m = ((slob_t *)base)[0].units, align = ((slob_t *)base)[1].units;
98951+ int size = SLOB_UNIT * SLOB_UNITS(m + align);
98952+ int offset;
98953+
98954+ if (ptr < base + align)
98955+ break;
98956+
98957+ offset = ptr - base - align;
98958+ if (offset >= m) {
98959+ base += size;
98960+ continue;
98961+ }
98962+
98963+ if (n > m - offset)
98964+ break;
98965+
98966+ spin_unlock_irqrestore(&slob_lock, flags);
98967+ return;
98968+ }
98969+
98970+ spin_unlock_irqrestore(&slob_lock, flags);
98971+report:
98972+ pax_report_usercopy(ptr, n, to, type);
98973+#endif
98974+
98975+}
98976+EXPORT_SYMBOL(check_object_size);
98977+
98978 /* can't use ksize for kmem_cache_alloc memory, only kmalloc */
98979 size_t ksize(const void *block)
98980 {
98981@@ -547,10 +640,10 @@ size_t ksize(const void *block)
98982 sp = slob_page(block);
98983 if (is_slob_page(sp)) {
98984 int align = max(ARCH_KMALLOC_MINALIGN, ARCH_SLAB_MINALIGN);
98985- unsigned int *m = (unsigned int *)(block - align);
98986- return SLOB_UNITS(*m) * SLOB_UNIT;
98987+ slob_t *m = (slob_t *)(block - align);
98988+ return SLOB_UNITS(m[0].units) * SLOB_UNIT;
98989 } else
98990- return sp->page.private;
98991+ return sp->size;
98992 }
98993 EXPORT_SYMBOL(ksize);
98994
98995@@ -566,8 +659,13 @@ struct kmem_cache *kmem_cache_create(const char *name, size_t size,
98996 {
98997 struct kmem_cache *c;
98998
98999+#ifdef CONFIG_PAX_USERCOPY
99000+ c = __kmalloc_node_align(sizeof(struct kmem_cache),
99001+ GFP_KERNEL, -1, ARCH_KMALLOC_MINALIGN);
99002+#else
99003 c = slob_alloc(sizeof(struct kmem_cache),
99004 GFP_KERNEL, ARCH_KMALLOC_MINALIGN, -1);
99005+#endif
99006
99007 if (c) {
99008 c->name = name;
99009@@ -605,17 +703,25 @@ void *kmem_cache_alloc_node(struct kmem_cache *c, gfp_t flags, int node)
99010 {
99011 void *b;
99012
99013+#ifdef CONFIG_PAX_USERCOPY
99014+ b = __kmalloc_node_align(c->size, flags, node, c->align);
99015+#else
99016 if (c->size < PAGE_SIZE) {
99017 b = slob_alloc(c->size, flags, c->align, node);
99018 trace_kmem_cache_alloc_node(_RET_IP_, b, c->size,
99019 SLOB_UNITS(c->size) * SLOB_UNIT,
99020 flags, node);
99021 } else {
99022+ struct slob_page *sp;
99023+
99024 b = slob_new_pages(flags, get_order(c->size), node);
99025+ sp = slob_page(b);
99026+ sp->size = c->size;
99027 trace_kmem_cache_alloc_node(_RET_IP_, b, c->size,
99028 PAGE_SIZE << get_order(c->size),
99029 flags, node);
99030 }
99031+#endif
99032
99033 if (c->ctor)
99034 c->ctor(b);
99035@@ -627,10 +733,16 @@ EXPORT_SYMBOL(kmem_cache_alloc_node);
99036
99037 static void __kmem_cache_free(void *b, int size)
99038 {
99039- if (size < PAGE_SIZE)
99040+ struct slob_page *sp = slob_page(b);
99041+
99042+ if (is_slob_page(sp))
99043 slob_free(b, size);
99044- else
99045+ else {
99046+ clear_slob_page(sp);
99047+ free_slob_page(sp);
99048+ sp->size = 0;
99049 slob_free_pages(b, get_order(size));
99050+ }
99051 }
99052
99053 static void kmem_rcu_free(struct rcu_head *head)
99054@@ -643,18 +755,32 @@ static void kmem_rcu_free(struct rcu_head *head)
99055
99056 void kmem_cache_free(struct kmem_cache *c, void *b)
99057 {
99058+ int size = c->size;
99059+
99060+#ifdef CONFIG_PAX_USERCOPY
99061+ if (size + c->align < PAGE_SIZE) {
99062+ size += c->align;
99063+ b -= c->align;
99064+ }
99065+#endif
99066+
99067 kmemleak_free_recursive(b, c->flags);
99068 if (unlikely(c->flags & SLAB_DESTROY_BY_RCU)) {
99069 struct slob_rcu *slob_rcu;
99070- slob_rcu = b + (c->size - sizeof(struct slob_rcu));
99071+ slob_rcu = b + (size - sizeof(struct slob_rcu));
99072 INIT_RCU_HEAD(&slob_rcu->head);
99073- slob_rcu->size = c->size;
99074+ slob_rcu->size = size;
99075 call_rcu(&slob_rcu->head, kmem_rcu_free);
99076 } else {
99077- __kmem_cache_free(b, c->size);
99078+ __kmem_cache_free(b, size);
99079 }
99080
99081+#ifdef CONFIG_PAX_USERCOPY
99082+ trace_kfree(_RET_IP_, b);
99083+#else
99084 trace_kmem_cache_free(_RET_IP_, b);
99085+#endif
99086+
99087 }
99088 EXPORT_SYMBOL(kmem_cache_free);
99089
99090diff --git a/mm/slub.c b/mm/slub.c
99091index 4996fc7..87e01d0 100644
99092--- a/mm/slub.c
99093+++ b/mm/slub.c
99094@@ -201,7 +201,7 @@ struct track {
99095
99096 enum track_item { TRACK_ALLOC, TRACK_FREE };
99097
99098-#ifdef CONFIG_SLUB_DEBUG
99099+#if defined(CONFIG_SLUB_DEBUG) && !defined(CONFIG_GRKERNSEC_PROC_ADD)
99100 static int sysfs_slab_add(struct kmem_cache *);
99101 static int sysfs_slab_alias(struct kmem_cache *, const char *);
99102 static void sysfs_slab_remove(struct kmem_cache *);
99103@@ -410,7 +410,7 @@ static void print_track(const char *s, struct track *t)
99104 if (!t->addr)
99105 return;
99106
99107- printk(KERN_ERR "INFO: %s in %pS age=%lu cpu=%u pid=%d\n",
99108+ printk(KERN_ERR "INFO: %s in %pA age=%lu cpu=%u pid=%d\n",
99109 s, (void *)t->addr, jiffies - t->when, t->cpu, t->pid);
99110 }
99111
99112@@ -1893,6 +1893,8 @@ void kmem_cache_free(struct kmem_cache *s, void *x)
99113
99114 page = virt_to_head_page(x);
99115
99116+ BUG_ON(!PageSlab(page));
99117+
99118 slab_free(s, page, x, _RET_IP_);
99119
99120 trace_kmem_cache_free(_RET_IP_, x);
99121@@ -1937,7 +1939,7 @@ static int slub_min_objects;
99122 * Merge control. If this is set then no merging of slab caches will occur.
99123 * (Could be removed. This was introduced to pacify the merge skeptics.)
99124 */
99125-static int slub_nomerge;
99126+static int slub_nomerge = 1;
99127
99128 /*
99129 * Calculate the order of allocation given an slab object size.
99130@@ -2493,7 +2495,7 @@ static int kmem_cache_open(struct kmem_cache *s, gfp_t gfpflags,
99131 * list to avoid pounding the page allocator excessively.
99132 */
99133 set_min_partial(s, ilog2(s->size));
99134- s->refcount = 1;
99135+ atomic_set(&s->refcount, 1);
99136 #ifdef CONFIG_NUMA
99137 s->remote_node_defrag_ratio = 1000;
99138 #endif
99139@@ -2630,8 +2632,7 @@ static inline int kmem_cache_close(struct kmem_cache *s)
99140 void kmem_cache_destroy(struct kmem_cache *s)
99141 {
99142 down_write(&slub_lock);
99143- s->refcount--;
99144- if (!s->refcount) {
99145+ if (atomic_dec_and_test(&s->refcount)) {
99146 list_del(&s->list);
99147 up_write(&slub_lock);
99148 if (kmem_cache_close(s)) {
99149@@ -2691,12 +2692,10 @@ static int __init setup_slub_nomerge(char *str)
99150 __setup("slub_nomerge", setup_slub_nomerge);
99151
99152 static struct kmem_cache *create_kmalloc_cache(struct kmem_cache *s,
99153- const char *name, int size, gfp_t gfp_flags)
99154+ const char *name, int size, gfp_t gfp_flags, unsigned int flags)
99155 {
99156- unsigned int flags = 0;
99157-
99158 if (gfp_flags & SLUB_DMA)
99159- flags = SLAB_CACHE_DMA;
99160+ flags |= SLAB_CACHE_DMA;
99161
99162 /*
99163 * This function is called with IRQs disabled during early-boot on
99164@@ -2915,6 +2914,50 @@ void *__kmalloc_node(size_t size, gfp_t flags, int node)
99165 EXPORT_SYMBOL(__kmalloc_node);
99166 #endif
99167
99168+void check_object_size(const void *ptr, unsigned long n, bool to)
99169+{
99170+
99171+#ifdef CONFIG_PAX_USERCOPY
99172+ struct page *page;
99173+ struct kmem_cache *s = NULL;
99174+ unsigned long offset;
99175+ const char *type;
99176+
99177+ if (!n)
99178+ return;
99179+
99180+ type = "<null>";
99181+ if (ZERO_OR_NULL_PTR(ptr))
99182+ goto report;
99183+
99184+ if (!virt_addr_valid(ptr))
99185+ return;
99186+
99187+ page = get_object_page(ptr);
99188+
99189+ type = "<process stack>";
99190+ if (!page) {
99191+ if (object_is_on_stack(ptr, n) == -1)
99192+ goto report;
99193+ return;
99194+ }
99195+
99196+ s = page->slab;
99197+ type = s->name;
99198+ if (!(s->flags & SLAB_USERCOPY))
99199+ goto report;
99200+
99201+ offset = (ptr - page_address(page)) % s->size;
99202+ if (offset <= s->objsize && n <= s->objsize - offset)
99203+ return;
99204+
99205+report:
99206+ pax_report_usercopy(ptr, n, to, type);
99207+#endif
99208+
99209+}
99210+EXPORT_SYMBOL(check_object_size);
99211+
99212 size_t ksize(const void *object)
99213 {
99214 struct page *page;
99215@@ -3185,8 +3228,8 @@ void __init kmem_cache_init(void)
99216 * kmem_cache_open for slab_state == DOWN.
99217 */
99218 create_kmalloc_cache(&kmalloc_caches[0], "kmem_cache_node",
99219- sizeof(struct kmem_cache_node), GFP_NOWAIT);
99220- kmalloc_caches[0].refcount = -1;
99221+ sizeof(struct kmem_cache_node), GFP_NOWAIT, 0);
99222+ atomic_set(&kmalloc_caches[0].refcount, -1);
99223 caches++;
99224
99225 hotplug_memory_notifier(slab_memory_callback, SLAB_CALLBACK_PRI);
99226@@ -3198,18 +3241,18 @@ void __init kmem_cache_init(void)
99227 /* Caches that are not of the two-to-the-power-of size */
99228 if (KMALLOC_MIN_SIZE <= 32) {
99229 create_kmalloc_cache(&kmalloc_caches[1],
99230- "kmalloc-96", 96, GFP_NOWAIT);
99231+ "kmalloc-96", 96, GFP_NOWAIT, SLAB_USERCOPY);
99232 caches++;
99233 }
99234 if (KMALLOC_MIN_SIZE <= 64) {
99235 create_kmalloc_cache(&kmalloc_caches[2],
99236- "kmalloc-192", 192, GFP_NOWAIT);
99237+ "kmalloc-192", 192, GFP_NOWAIT, SLAB_USERCOPY);
99238 caches++;
99239 }
99240
99241 for (i = KMALLOC_SHIFT_LOW; i < SLUB_PAGE_SHIFT; i++) {
99242 create_kmalloc_cache(&kmalloc_caches[i],
99243- "kmalloc", 1 << i, GFP_NOWAIT);
99244+ "kmalloc", 1 << i, GFP_NOWAIT, SLAB_USERCOPY);
99245 caches++;
99246 }
99247
99248@@ -3293,7 +3336,7 @@ static int slab_unmergeable(struct kmem_cache *s)
99249 /*
99250 * We may have set a slab to be unmergeable during bootstrap.
99251 */
99252- if (s->refcount < 0)
99253+ if (atomic_read(&s->refcount) < 0)
99254 return 1;
99255
99256 return 0;
99257@@ -3353,7 +3396,7 @@ struct kmem_cache *kmem_cache_create(const char *name, size_t size,
99258 if (s) {
99259 int cpu;
99260
99261- s->refcount++;
99262+ atomic_inc(&s->refcount);
99263 /*
99264 * Adjust the object sizes so that we clear
99265 * the complete object on kzalloc.
99266@@ -3372,7 +3415,7 @@ struct kmem_cache *kmem_cache_create(const char *name, size_t size,
99267
99268 if (sysfs_slab_alias(s, name)) {
99269 down_write(&slub_lock);
99270- s->refcount--;
99271+ atomic_dec(&s->refcount);
99272 up_write(&slub_lock);
99273 goto err;
99274 }
99275@@ -4101,7 +4144,7 @@ SLAB_ATTR_RO(ctor);
99276
99277 static ssize_t aliases_show(struct kmem_cache *s, char *buf)
99278 {
99279- return sprintf(buf, "%d\n", s->refcount - 1);
99280+ return sprintf(buf, "%d\n", atomic_read(&s->refcount) - 1);
99281 }
99282 SLAB_ATTR_RO(aliases);
99283
99284@@ -4503,7 +4546,7 @@ static void kmem_cache_release(struct kobject *kobj)
99285 kfree(s);
99286 }
99287
99288-static struct sysfs_ops slab_sysfs_ops = {
99289+static const struct sysfs_ops slab_sysfs_ops = {
99290 .show = slab_attr_show,
99291 .store = slab_attr_store,
99292 };
99293@@ -4522,7 +4565,7 @@ static int uevent_filter(struct kset *kset, struct kobject *kobj)
99294 return 0;
99295 }
99296
99297-static struct kset_uevent_ops slab_uevent_ops = {
99298+static const struct kset_uevent_ops slab_uevent_ops = {
99299 .filter = uevent_filter,
99300 };
99301
99302@@ -4564,6 +4607,7 @@ static char *create_unique_id(struct kmem_cache *s)
99303 return name;
99304 }
99305
99306+#if defined(CONFIG_SLUB_DEBUG) && !defined(CONFIG_GRKERNSEC_PROC_ADD)
99307 static int sysfs_slab_add(struct kmem_cache *s)
99308 {
99309 int err;
99310@@ -4619,6 +4663,7 @@ static void sysfs_slab_remove(struct kmem_cache *s)
99311 kobject_del(&s->kobj);
99312 kobject_put(&s->kobj);
99313 }
99314+#endif
99315
99316 /*
99317 * Need to buffer aliases during bootup until sysfs becomes
99318@@ -4632,6 +4677,7 @@ struct saved_alias {
99319
99320 static struct saved_alias *alias_list;
99321
99322+#if defined(CONFIG_SLUB_DEBUG) && !defined(CONFIG_GRKERNSEC_PROC_ADD)
99323 static int sysfs_slab_alias(struct kmem_cache *s, const char *name)
99324 {
99325 struct saved_alias *al;
99326@@ -4654,6 +4700,7 @@ static int sysfs_slab_alias(struct kmem_cache *s, const char *name)
99327 alias_list = al;
99328 return 0;
99329 }
99330+#endif
99331
99332 static int __init slab_sysfs_init(void)
99333 {
99334@@ -4785,7 +4832,13 @@ static const struct file_operations proc_slabinfo_operations = {
99335
99336 static int __init slab_proc_init(void)
99337 {
99338- proc_create("slabinfo", S_IRUGO, NULL, &proc_slabinfo_operations);
99339+ mode_t gr_mode = S_IRUGO;
99340+
99341+#ifdef CONFIG_GRKERNSEC_PROC_ADD
99342+ gr_mode = S_IRUSR;
99343+#endif
99344+
99345+ proc_create("slabinfo", gr_mode, NULL, &proc_slabinfo_operations);
99346 return 0;
99347 }
99348 module_init(slab_proc_init);
99349diff --git a/mm/swap.c b/mm/swap.c
99350index 308e57d..5de19c0 100644
99351--- a/mm/swap.c
99352+++ b/mm/swap.c
99353@@ -30,6 +30,7 @@
99354 #include <linux/notifier.h>
99355 #include <linux/backing-dev.h>
99356 #include <linux/memcontrol.h>
99357+#include <linux/hugetlb.h>
99358
99359 #include "internal.h"
99360
99361@@ -65,6 +66,8 @@ static void put_compound_page(struct page *page)
99362 compound_page_dtor *dtor;
99363
99364 dtor = get_compound_page_dtor(page);
99365+ if (!PageHuge(page))
99366+ BUG_ON(dtor != free_compound_page);
99367 (*dtor)(page);
99368 }
99369 }
99370diff --git a/mm/util.c b/mm/util.c
99371index e48b493..24a601d 100644
99372--- a/mm/util.c
99373+++ b/mm/util.c
99374@@ -228,6 +228,12 @@ EXPORT_SYMBOL(strndup_user);
99375 void arch_pick_mmap_layout(struct mm_struct *mm)
99376 {
99377 mm->mmap_base = TASK_UNMAPPED_BASE;
99378+
99379+#ifdef CONFIG_PAX_RANDMMAP
99380+ if (mm->pax_flags & MF_PAX_RANDMMAP)
99381+ mm->mmap_base += mm->delta_mmap;
99382+#endif
99383+
99384 mm->get_unmapped_area = arch_get_unmapped_area;
99385 mm->unmap_area = arch_unmap_area;
99386 }
99387diff --git a/mm/vmalloc.c b/mm/vmalloc.c
99388index f34ffd0..90d7407 100644
99389--- a/mm/vmalloc.c
99390+++ b/mm/vmalloc.c
99391@@ -40,8 +40,19 @@ static void vunmap_pte_range(pmd_t *pmd, unsigned long addr, unsigned long end)
99392
99393 pte = pte_offset_kernel(pmd, addr);
99394 do {
99395- pte_t ptent = ptep_get_and_clear(&init_mm, addr, pte);
99396- WARN_ON(!pte_none(ptent) && !pte_present(ptent));
99397+
99398+#if defined(CONFIG_MODULES) && defined(CONFIG_X86_32) && defined(CONFIG_PAX_KERNEXEC)
99399+ if ((unsigned long)MODULES_EXEC_VADDR <= addr && addr < (unsigned long)MODULES_EXEC_END) {
99400+ BUG_ON(!pte_exec(*pte));
99401+ set_pte_at(&init_mm, addr, pte, pfn_pte(__pa(addr) >> PAGE_SHIFT, PAGE_KERNEL_EXEC));
99402+ continue;
99403+ }
99404+#endif
99405+
99406+ {
99407+ pte_t ptent = ptep_get_and_clear(&init_mm, addr, pte);
99408+ WARN_ON(!pte_none(ptent) && !pte_present(ptent));
99409+ }
99410 } while (pte++, addr += PAGE_SIZE, addr != end);
99411 }
99412
99413@@ -92,6 +103,7 @@ static int vmap_pte_range(pmd_t *pmd, unsigned long addr,
99414 unsigned long end, pgprot_t prot, struct page **pages, int *nr)
99415 {
99416 pte_t *pte;
99417+ int ret = -ENOMEM;
99418
99419 /*
99420 * nr is a running index into the array which helps higher level
99421@@ -101,17 +113,32 @@ static int vmap_pte_range(pmd_t *pmd, unsigned long addr,
99422 pte = pte_alloc_kernel(pmd, addr);
99423 if (!pte)
99424 return -ENOMEM;
99425+
99426+ pax_open_kernel();
99427 do {
99428 struct page *page = pages[*nr];
99429
99430- if (WARN_ON(!pte_none(*pte)))
99431- return -EBUSY;
99432- if (WARN_ON(!page))
99433- return -ENOMEM;
99434+#if defined(CONFIG_MODULES) && defined(CONFIG_X86_32) && defined(CONFIG_PAX_KERNEXEC)
99435+ if (!(pgprot_val(prot) & _PAGE_NX))
99436+ BUG_ON(!pte_exec(*pte) || pte_pfn(*pte) != __pa(addr) >> PAGE_SHIFT);
99437+ else
99438+#endif
99439+
99440+ if (WARN_ON(!pte_none(*pte))) {
99441+ ret = -EBUSY;
99442+ goto out;
99443+ }
99444+ if (WARN_ON(!page)) {
99445+ ret = -ENOMEM;
99446+ goto out;
99447+ }
99448 set_pte_at(&init_mm, addr, pte, mk_pte(page, prot));
99449 (*nr)++;
99450 } while (pte++, addr += PAGE_SIZE, addr != end);
99451- return 0;
99452+ ret = 0;
99453+out:
99454+ pax_close_kernel();
99455+ return ret;
99456 }
99457
99458 static int vmap_pmd_range(pud_t *pud, unsigned long addr,
99459@@ -192,11 +219,20 @@ int is_vmalloc_or_module_addr(const void *x)
99460 * and fall back on vmalloc() if that fails. Others
99461 * just put it in the vmalloc space.
99462 */
99463-#if defined(CONFIG_MODULES) && defined(MODULES_VADDR)
99464+#ifdef CONFIG_MODULES
99465+#ifdef MODULES_VADDR
99466 unsigned long addr = (unsigned long)x;
99467 if (addr >= MODULES_VADDR && addr < MODULES_END)
99468 return 1;
99469 #endif
99470+
99471+#if defined(CONFIG_X86_32) && defined(CONFIG_PAX_KERNEXEC)
99472+ if (x >= (const void *)MODULES_EXEC_VADDR && x < (const void *)MODULES_EXEC_END)
99473+ return 1;
99474+#endif
99475+
99476+#endif
99477+
99478 return is_vmalloc_addr(x);
99479 }
99480
99481@@ -217,8 +253,14 @@ struct page *vmalloc_to_page(const void *vmalloc_addr)
99482
99483 if (!pgd_none(*pgd)) {
99484 pud_t *pud = pud_offset(pgd, addr);
99485+#ifdef CONFIG_X86
99486+ if (!pud_large(*pud))
99487+#endif
99488 if (!pud_none(*pud)) {
99489 pmd_t *pmd = pmd_offset(pud, addr);
99490+#ifdef CONFIG_X86
99491+ if (!pmd_large(*pmd))
99492+#endif
99493 if (!pmd_none(*pmd)) {
99494 pte_t *ptep, pte;
99495
99496@@ -292,13 +334,13 @@ static void __insert_vmap_area(struct vmap_area *va)
99497 struct rb_node *tmp;
99498
99499 while (*p) {
99500- struct vmap_area *tmp;
99501+ struct vmap_area *varea;
99502
99503 parent = *p;
99504- tmp = rb_entry(parent, struct vmap_area, rb_node);
99505- if (va->va_start < tmp->va_end)
99506+ varea = rb_entry(parent, struct vmap_area, rb_node);
99507+ if (va->va_start < varea->va_end)
99508 p = &(*p)->rb_left;
99509- else if (va->va_end > tmp->va_start)
99510+ else if (va->va_end > varea->va_start)
99511 p = &(*p)->rb_right;
99512 else
99513 BUG();
99514@@ -1245,6 +1287,16 @@ static struct vm_struct *__get_vm_area_node(unsigned long size,
99515 struct vm_struct *area;
99516
99517 BUG_ON(in_interrupt());
99518+
99519+#if defined(CONFIG_MODULES) && defined(CONFIG_X86) && defined(CONFIG_PAX_KERNEXEC)
99520+ if (flags & VM_KERNEXEC) {
99521+ if (start != VMALLOC_START || end != VMALLOC_END)
99522+ return NULL;
99523+ start = (unsigned long)MODULES_EXEC_VADDR;
99524+ end = (unsigned long)MODULES_EXEC_END;
99525+ }
99526+#endif
99527+
99528 if (flags & VM_IOREMAP) {
99529 int bit = fls(size);
99530
99531@@ -1484,6 +1536,11 @@ void *vmap(struct page **pages, unsigned int count,
99532 if (count > totalram_pages)
99533 return NULL;
99534
99535+#if defined(CONFIG_MODULES) && defined(CONFIG_X86) && defined(CONFIG_PAX_KERNEXEC)
99536+ if (!(pgprot_val(prot) & _PAGE_NX))
99537+ flags |= VM_KERNEXEC;
99538+#endif
99539+
99540 area = get_vm_area_caller((count << PAGE_SHIFT), flags,
99541 __builtin_return_address(0));
99542 if (!area)
99543@@ -1594,6 +1651,14 @@ static void *__vmalloc_node(unsigned long size, unsigned long align,
99544 if (!size || (size >> PAGE_SHIFT) > totalram_pages)
99545 return NULL;
99546
99547+#if defined(CONFIG_MODULES) && defined(CONFIG_X86) && defined(CONFIG_PAX_KERNEXEC)
99548+ if (!(pgprot_val(prot) & _PAGE_NX))
99549+ area = __get_vm_area_node(size, align, VM_ALLOC | VM_UNLIST | VM_KERNEXEC,
99550+ VMALLOC_START, VMALLOC_END, node,
99551+ gfp_mask, caller);
99552+ else
99553+#endif
99554+
99555 area = __get_vm_area_node(size, align, VM_ALLOC | VM_UNLIST,
99556 VMALLOC_START, VMALLOC_END, node,
99557 gfp_mask, caller);
99558@@ -1698,10 +1763,9 @@ EXPORT_SYMBOL(vmalloc_node);
99559 * For tight control over page level allocator and protection flags
99560 * use __vmalloc() instead.
99561 */
99562-
99563 void *vmalloc_exec(unsigned long size)
99564 {
99565- return __vmalloc_node(size, 1, GFP_KERNEL | __GFP_HIGHMEM, PAGE_KERNEL_EXEC,
99566+ return __vmalloc_node(size, 1, GFP_KERNEL | __GFP_HIGHMEM | __GFP_ZERO, PAGE_KERNEL_EXEC,
99567 -1, __builtin_return_address(0));
99568 }
99569
99570@@ -1998,6 +2062,8 @@ int remap_vmalloc_range(struct vm_area_struct *vma, void *addr,
99571 unsigned long uaddr = vma->vm_start;
99572 unsigned long usize = vma->vm_end - vma->vm_start;
99573
99574+ BUG_ON(vma->vm_mirror);
99575+
99576 if ((PAGE_SIZE-1) & (unsigned long)addr)
99577 return -EINVAL;
99578
99579diff --git a/mm/vmstat.c b/mm/vmstat.c
99580index 42d76c6..5643dc4 100644
99581--- a/mm/vmstat.c
99582+++ b/mm/vmstat.c
99583@@ -74,7 +74,7 @@ void vm_events_fold_cpu(int cpu)
99584 *
99585 * vm_stat contains the global counters
99586 */
99587-atomic_long_t vm_stat[NR_VM_ZONE_STAT_ITEMS];
99588+atomic_long_unchecked_t vm_stat[NR_VM_ZONE_STAT_ITEMS];
99589 EXPORT_SYMBOL(vm_stat);
99590
99591 #ifdef CONFIG_SMP
99592@@ -324,7 +324,7 @@ void refresh_cpu_vm_stats(int cpu)
99593 v = p->vm_stat_diff[i];
99594 p->vm_stat_diff[i] = 0;
99595 local_irq_restore(flags);
99596- atomic_long_add(v, &zone->vm_stat[i]);
99597+ atomic_long_add_unchecked(v, &zone->vm_stat[i]);
99598 global_diff[i] += v;
99599 #ifdef CONFIG_NUMA
99600 /* 3 seconds idle till flush */
99601@@ -362,7 +362,7 @@ void refresh_cpu_vm_stats(int cpu)
99602
99603 for (i = 0; i < NR_VM_ZONE_STAT_ITEMS; i++)
99604 if (global_diff[i])
99605- atomic_long_add(global_diff[i], &vm_stat[i]);
99606+ atomic_long_add_unchecked(global_diff[i], &vm_stat[i]);
99607 }
99608
99609 #endif
99610@@ -953,10 +953,20 @@ static int __init setup_vmstat(void)
99611 start_cpu_timer(cpu);
99612 #endif
99613 #ifdef CONFIG_PROC_FS
99614- proc_create("buddyinfo", S_IRUGO, NULL, &fragmentation_file_operations);
99615- proc_create("pagetypeinfo", S_IRUGO, NULL, &pagetypeinfo_file_ops);
99616- proc_create("vmstat", S_IRUGO, NULL, &proc_vmstat_file_operations);
99617- proc_create("zoneinfo", S_IRUGO, NULL, &proc_zoneinfo_file_operations);
99618+ {
99619+ mode_t gr_mode = S_IRUGO;
99620+#ifdef CONFIG_GRKERNSEC_PROC_ADD
99621+ gr_mode = S_IRUSR;
99622+#endif
99623+ proc_create("buddyinfo", gr_mode, NULL, &fragmentation_file_operations);
99624+ proc_create("pagetypeinfo", gr_mode, NULL, &pagetypeinfo_file_ops);
99625+#ifdef CONFIG_GRKERNSEC_PROC_USERGROUP
99626+ proc_create("vmstat", gr_mode | S_IRGRP, NULL, &proc_vmstat_file_operations);
99627+#else
99628+ proc_create("vmstat", gr_mode, NULL, &proc_vmstat_file_operations);
99629+#endif
99630+ proc_create("zoneinfo", gr_mode, NULL, &proc_zoneinfo_file_operations);
99631+ }
99632 #endif
99633 return 0;
99634 }
99635diff --git a/net/8021q/vlan.c b/net/8021q/vlan.c
99636index a29c5ab..6143f20 100644
99637--- a/net/8021q/vlan.c
99638+++ b/net/8021q/vlan.c
99639@@ -622,8 +622,7 @@ static int vlan_ioctl_handler(struct net *net, void __user *arg)
99640 err = -EPERM;
99641 if (!capable(CAP_NET_ADMIN))
99642 break;
99643- if ((args.u.name_type >= 0) &&
99644- (args.u.name_type < VLAN_NAME_TYPE_HIGHEST)) {
99645+ if (args.u.name_type < VLAN_NAME_TYPE_HIGHEST) {
99646 struct vlan_net *vn;
99647
99648 vn = net_generic(net, vlan_net_id);
99649diff --git a/net/9p/trans_fd.c b/net/9p/trans_fd.c
99650index a2d2984..f9eb711 100644
99651--- a/net/9p/trans_fd.c
99652+++ b/net/9p/trans_fd.c
99653@@ -419,7 +419,7 @@ static int p9_fd_write(struct p9_client *client, void *v, int len)
99654 oldfs = get_fs();
99655 set_fs(get_ds());
99656 /* The cast to a user pointer is valid due to the set_fs() */
99657- ret = vfs_write(ts->wr, (__force void __user *)v, len, &ts->wr->f_pos);
99658+ ret = vfs_write(ts->wr, (void __force_user *)v, len, &ts->wr->f_pos);
99659 set_fs(oldfs);
99660
99661 if (ret <= 0 && ret != -ERESTARTSYS && ret != -EAGAIN)
99662diff --git a/net/atm/atm_misc.c b/net/atm/atm_misc.c
99663index 02cc7e7..4514f1b 100644
99664--- a/net/atm/atm_misc.c
99665+++ b/net/atm/atm_misc.c
99666@@ -19,7 +19,7 @@ int atm_charge(struct atm_vcc *vcc,int truesize)
99667 if (atomic_read(&sk_atm(vcc)->sk_rmem_alloc) <= sk_atm(vcc)->sk_rcvbuf)
99668 return 1;
99669 atm_return(vcc,truesize);
99670- atomic_inc(&vcc->stats->rx_drop);
99671+ atomic_inc_unchecked(&vcc->stats->rx_drop);
99672 return 0;
99673 }
99674
99675@@ -41,7 +41,7 @@ struct sk_buff *atm_alloc_charge(struct atm_vcc *vcc,int pdu_size,
99676 }
99677 }
99678 atm_return(vcc,guess);
99679- atomic_inc(&vcc->stats->rx_drop);
99680+ atomic_inc_unchecked(&vcc->stats->rx_drop);
99681 return NULL;
99682 }
99683
99684@@ -88,7 +88,7 @@ int atm_pcr_goal(const struct atm_trafprm *tp)
99685
99686 void sonet_copy_stats(struct k_sonet_stats *from,struct sonet_stats *to)
99687 {
99688-#define __HANDLE_ITEM(i) to->i = atomic_read(&from->i)
99689+#define __HANDLE_ITEM(i) to->i = atomic_read_unchecked(&from->i)
99690 __SONET_ITEMS
99691 #undef __HANDLE_ITEM
99692 }
99693@@ -96,7 +96,7 @@ void sonet_copy_stats(struct k_sonet_stats *from,struct sonet_stats *to)
99694
99695 void sonet_subtract_stats(struct k_sonet_stats *from,struct sonet_stats *to)
99696 {
99697-#define __HANDLE_ITEM(i) atomic_sub(to->i,&from->i)
99698+#define __HANDLE_ITEM(i) atomic_sub_unchecked(to->i,&from->i)
99699 __SONET_ITEMS
99700 #undef __HANDLE_ITEM
99701 }
99702diff --git a/net/atm/lec.h b/net/atm/lec.h
99703index 9d14d19..5c145f3 100644
99704--- a/net/atm/lec.h
99705+++ b/net/atm/lec.h
99706@@ -48,7 +48,7 @@ struct lane2_ops {
99707 const u8 *tlvs, u32 sizeoftlvs);
99708 void (*associate_indicator) (struct net_device *dev, const u8 *mac_addr,
99709 const u8 *tlvs, u32 sizeoftlvs);
99710-};
99711+} __no_const;
99712
99713 /*
99714 * ATM LAN Emulation supports both LLC & Dix Ethernet EtherType
99715diff --git a/net/atm/mpc.h b/net/atm/mpc.h
99716index 0919a88..a23d54e 100644
99717--- a/net/atm/mpc.h
99718+++ b/net/atm/mpc.h
99719@@ -33,7 +33,7 @@ struct mpoa_client {
99720 struct mpc_parameters parameters; /* parameters for this client */
99721
99722 const struct net_device_ops *old_ops;
99723- struct net_device_ops new_ops;
99724+ net_device_ops_no_const new_ops;
99725 };
99726
99727
99728diff --git a/net/atm/mpoa_caches.c b/net/atm/mpoa_caches.c
99729index 4504a4b..1733f1e 100644
99730--- a/net/atm/mpoa_caches.c
99731+++ b/net/atm/mpoa_caches.c
99732@@ -498,6 +498,8 @@ static void clear_expired(struct mpoa_client *client)
99733 struct timeval now;
99734 struct k_message msg;
99735
99736+ pax_track_stack();
99737+
99738 do_gettimeofday(&now);
99739
99740 write_lock_irq(&client->egress_lock);
99741diff --git a/net/atm/proc.c b/net/atm/proc.c
99742index ab8419a..aa91497 100644
99743--- a/net/atm/proc.c
99744+++ b/net/atm/proc.c
99745@@ -43,9 +43,9 @@ static void add_stats(struct seq_file *seq, const char *aal,
99746 const struct k_atm_aal_stats *stats)
99747 {
99748 seq_printf(seq, "%s ( %d %d %d %d %d )", aal,
99749- atomic_read(&stats->tx),atomic_read(&stats->tx_err),
99750- atomic_read(&stats->rx),atomic_read(&stats->rx_err),
99751- atomic_read(&stats->rx_drop));
99752+ atomic_read_unchecked(&stats->tx),atomic_read_unchecked(&stats->tx_err),
99753+ atomic_read_unchecked(&stats->rx),atomic_read_unchecked(&stats->rx_err),
99754+ atomic_read_unchecked(&stats->rx_drop));
99755 }
99756
99757 static void atm_dev_info(struct seq_file *seq, const struct atm_dev *dev)
99758@@ -188,7 +188,12 @@ static void vcc_info(struct seq_file *seq, struct atm_vcc *vcc)
99759 {
99760 struct sock *sk = sk_atm(vcc);
99761
99762+#ifdef CONFIG_GRKERNSEC_HIDESYM
99763+ seq_printf(seq, "%p ", NULL);
99764+#else
99765 seq_printf(seq, "%p ", vcc);
99766+#endif
99767+
99768 if (!vcc->dev)
99769 seq_printf(seq, "Unassigned ");
99770 else
99771@@ -214,7 +219,11 @@ static void svc_info(struct seq_file *seq, struct atm_vcc *vcc)
99772 {
99773 if (!vcc->dev)
99774 seq_printf(seq, sizeof(void *) == 4 ?
99775+#ifdef CONFIG_GRKERNSEC_HIDESYM
99776+ "N/A@%p%10s" : "N/A@%p%2s", NULL, "");
99777+#else
99778 "N/A@%p%10s" : "N/A@%p%2s", vcc, "");
99779+#endif
99780 else
99781 seq_printf(seq, "%3d %3d %5d ",
99782 vcc->dev->number, vcc->vpi, vcc->vci);
99783diff --git a/net/atm/resources.c b/net/atm/resources.c
99784index 56b7322..c48b84e 100644
99785--- a/net/atm/resources.c
99786+++ b/net/atm/resources.c
99787@@ -161,7 +161,7 @@ void atm_dev_deregister(struct atm_dev *dev)
99788 static void copy_aal_stats(struct k_atm_aal_stats *from,
99789 struct atm_aal_stats *to)
99790 {
99791-#define __HANDLE_ITEM(i) to->i = atomic_read(&from->i)
99792+#define __HANDLE_ITEM(i) to->i = atomic_read_unchecked(&from->i)
99793 __AAL_STAT_ITEMS
99794 #undef __HANDLE_ITEM
99795 }
99796@@ -170,7 +170,7 @@ static void copy_aal_stats(struct k_atm_aal_stats *from,
99797 static void subtract_aal_stats(struct k_atm_aal_stats *from,
99798 struct atm_aal_stats *to)
99799 {
99800-#define __HANDLE_ITEM(i) atomic_sub(to->i, &from->i)
99801+#define __HANDLE_ITEM(i) atomic_sub_unchecked(to->i, &from->i)
99802 __AAL_STAT_ITEMS
99803 #undef __HANDLE_ITEM
99804 }
99805diff --git a/net/bridge/br_private.h b/net/bridge/br_private.h
99806index 8567d47..bba2292 100644
99807--- a/net/bridge/br_private.h
99808+++ b/net/bridge/br_private.h
99809@@ -255,7 +255,7 @@ extern void br_ifinfo_notify(int event, struct net_bridge_port *port);
99810
99811 #ifdef CONFIG_SYSFS
99812 /* br_sysfs_if.c */
99813-extern struct sysfs_ops brport_sysfs_ops;
99814+extern const struct sysfs_ops brport_sysfs_ops;
99815 extern int br_sysfs_addif(struct net_bridge_port *p);
99816
99817 /* br_sysfs_br.c */
99818diff --git a/net/bridge/br_stp_if.c b/net/bridge/br_stp_if.c
99819index 9a52ac5..c97538e 100644
99820--- a/net/bridge/br_stp_if.c
99821+++ b/net/bridge/br_stp_if.c
99822@@ -146,7 +146,7 @@ static void br_stp_stop(struct net_bridge *br)
99823 char *envp[] = { NULL };
99824
99825 if (br->stp_enabled == BR_USER_STP) {
99826- r = call_usermodehelper(BR_STP_PROG, argv, envp, 1);
99827+ r = call_usermodehelper(BR_STP_PROG, argv, envp, UMH_WAIT_PROC);
99828 printk(KERN_INFO "%s: userspace STP stopped, return code %d\n",
99829 br->dev->name, r);
99830
99831diff --git a/net/bridge/br_sysfs_if.c b/net/bridge/br_sysfs_if.c
99832index 820643a..ce77fb3 100644
99833--- a/net/bridge/br_sysfs_if.c
99834+++ b/net/bridge/br_sysfs_if.c
99835@@ -220,7 +220,7 @@ static ssize_t brport_store(struct kobject * kobj,
99836 return ret;
99837 }
99838
99839-struct sysfs_ops brport_sysfs_ops = {
99840+const struct sysfs_ops brport_sysfs_ops = {
99841 .show = brport_show,
99842 .store = brport_store,
99843 };
99844diff --git a/net/bridge/netfilter/ebtables.c b/net/bridge/netfilter/ebtables.c
99845index d73d47f..72df42a 100644
99846--- a/net/bridge/netfilter/ebtables.c
99847+++ b/net/bridge/netfilter/ebtables.c
99848@@ -1337,6 +1337,8 @@ static int copy_everything_to_user(struct ebt_table *t, void __user *user,
99849 unsigned int entries_size, nentries;
99850 char *entries;
99851
99852+ pax_track_stack();
99853+
99854 if (cmd == EBT_SO_GET_ENTRIES) {
99855 entries_size = t->private->entries_size;
99856 nentries = t->private->nentries;
99857diff --git a/net/can/bcm.c b/net/can/bcm.c
99858index 2ffd2e0..72a7486 100644
99859--- a/net/can/bcm.c
99860+++ b/net/can/bcm.c
99861@@ -164,9 +164,15 @@ static int bcm_proc_show(struct seq_file *m, void *v)
99862 struct bcm_sock *bo = bcm_sk(sk);
99863 struct bcm_op *op;
99864
99865+#ifdef CONFIG_GRKERNSEC_HIDESYM
99866+ seq_printf(m, ">>> socket %p", NULL);
99867+ seq_printf(m, " / sk %p", NULL);
99868+ seq_printf(m, " / bo %p", NULL);
99869+#else
99870 seq_printf(m, ">>> socket %p", sk->sk_socket);
99871 seq_printf(m, " / sk %p", sk);
99872 seq_printf(m, " / bo %p", bo);
99873+#endif
99874 seq_printf(m, " / dropped %lu", bo->dropped_usr_msgs);
99875 seq_printf(m, " / bound %s", bcm_proc_getifname(ifname, bo->ifindex));
99876 seq_printf(m, " <<<\n");
99877diff --git a/net/compat.c b/net/compat.c
99878index 9559afc..ccd74e1 100644
99879--- a/net/compat.c
99880+++ b/net/compat.c
99881@@ -69,9 +69,9 @@ int get_compat_msghdr(struct msghdr *kmsg, struct compat_msghdr __user *umsg)
99882 __get_user(kmsg->msg_controllen, &umsg->msg_controllen) ||
99883 __get_user(kmsg->msg_flags, &umsg->msg_flags))
99884 return -EFAULT;
99885- kmsg->msg_name = compat_ptr(tmp1);
99886- kmsg->msg_iov = compat_ptr(tmp2);
99887- kmsg->msg_control = compat_ptr(tmp3);
99888+ kmsg->msg_name = (void __force_kernel *)compat_ptr(tmp1);
99889+ kmsg->msg_iov = (void __force_kernel *)compat_ptr(tmp2);
99890+ kmsg->msg_control = (void __force_kernel *)compat_ptr(tmp3);
99891 return 0;
99892 }
99893
99894@@ -94,7 +94,7 @@ int verify_compat_iovec(struct msghdr *kern_msg, struct iovec *kern_iov,
99895 kern_msg->msg_name = NULL;
99896
99897 tot_len = iov_from_user_compat_to_kern(kern_iov,
99898- (struct compat_iovec __user *)kern_msg->msg_iov,
99899+ (struct compat_iovec __force_user *)kern_msg->msg_iov,
99900 kern_msg->msg_iovlen);
99901 if (tot_len >= 0)
99902 kern_msg->msg_iov = kern_iov;
99903@@ -114,20 +114,20 @@ int verify_compat_iovec(struct msghdr *kern_msg, struct iovec *kern_iov,
99904
99905 #define CMSG_COMPAT_FIRSTHDR(msg) \
99906 (((msg)->msg_controllen) >= sizeof(struct compat_cmsghdr) ? \
99907- (struct compat_cmsghdr __user *)((msg)->msg_control) : \
99908+ (struct compat_cmsghdr __force_user *)((msg)->msg_control) : \
99909 (struct compat_cmsghdr __user *)NULL)
99910
99911 #define CMSG_COMPAT_OK(ucmlen, ucmsg, mhdr) \
99912 ((ucmlen) >= sizeof(struct compat_cmsghdr) && \
99913 (ucmlen) <= (unsigned long) \
99914 ((mhdr)->msg_controllen - \
99915- ((char *)(ucmsg) - (char *)(mhdr)->msg_control)))
99916+ ((char __force_kernel *)(ucmsg) - (char *)(mhdr)->msg_control)))
99917
99918 static inline struct compat_cmsghdr __user *cmsg_compat_nxthdr(struct msghdr *msg,
99919 struct compat_cmsghdr __user *cmsg, int cmsg_len)
99920 {
99921 char __user *ptr = (char __user *)cmsg + CMSG_COMPAT_ALIGN(cmsg_len);
99922- if ((unsigned long)(ptr + 1 - (char __user *)msg->msg_control) >
99923+ if ((unsigned long)(ptr + 1 - (char __force_user *)msg->msg_control) >
99924 msg->msg_controllen)
99925 return NULL;
99926 return (struct compat_cmsghdr __user *)ptr;
99927@@ -219,7 +219,7 @@ int put_cmsg_compat(struct msghdr *kmsg, int level, int type, int len, void *dat
99928 {
99929 struct compat_timeval ctv;
99930 struct compat_timespec cts[3];
99931- struct compat_cmsghdr __user *cm = (struct compat_cmsghdr __user *) kmsg->msg_control;
99932+ struct compat_cmsghdr __user *cm = (struct compat_cmsghdr __force_user *) kmsg->msg_control;
99933 struct compat_cmsghdr cmhdr;
99934 int cmlen;
99935
99936@@ -271,7 +271,7 @@ int put_cmsg_compat(struct msghdr *kmsg, int level, int type, int len, void *dat
99937
99938 void scm_detach_fds_compat(struct msghdr *kmsg, struct scm_cookie *scm)
99939 {
99940- struct compat_cmsghdr __user *cm = (struct compat_cmsghdr __user *) kmsg->msg_control;
99941+ struct compat_cmsghdr __user *cm = (struct compat_cmsghdr __force_user *) kmsg->msg_control;
99942 int fdmax = (kmsg->msg_controllen - sizeof(struct compat_cmsghdr)) / sizeof(int);
99943 int fdnum = scm->fp->count;
99944 struct file **fp = scm->fp->fp;
99945@@ -433,7 +433,7 @@ static int do_get_sock_timeout(struct socket *sock, int level, int optname,
99946 len = sizeof(ktime);
99947 old_fs = get_fs();
99948 set_fs(KERNEL_DS);
99949- err = sock_getsockopt(sock, level, optname, (char *) &ktime, &len);
99950+ err = sock_getsockopt(sock, level, optname, (char __force_user *) &ktime, (int __force_user *)&len);
99951 set_fs(old_fs);
99952
99953 if (!err) {
99954@@ -570,7 +570,7 @@ int compat_mc_setsockopt(struct sock *sock, int level, int optname,
99955 case MCAST_JOIN_GROUP:
99956 case MCAST_LEAVE_GROUP:
99957 {
99958- struct compat_group_req __user *gr32 = (void *)optval;
99959+ struct compat_group_req __user *gr32 = (void __user *)optval;
99960 struct group_req __user *kgr =
99961 compat_alloc_user_space(sizeof(struct group_req));
99962 u32 interface;
99963@@ -591,7 +591,7 @@ int compat_mc_setsockopt(struct sock *sock, int level, int optname,
99964 case MCAST_BLOCK_SOURCE:
99965 case MCAST_UNBLOCK_SOURCE:
99966 {
99967- struct compat_group_source_req __user *gsr32 = (void *)optval;
99968+ struct compat_group_source_req __user *gsr32 = (void __user *)optval;
99969 struct group_source_req __user *kgsr = compat_alloc_user_space(
99970 sizeof(struct group_source_req));
99971 u32 interface;
99972@@ -612,7 +612,7 @@ int compat_mc_setsockopt(struct sock *sock, int level, int optname,
99973 }
99974 case MCAST_MSFILTER:
99975 {
99976- struct compat_group_filter __user *gf32 = (void *)optval;
99977+ struct compat_group_filter __user *gf32 = (void __user *)optval;
99978 struct group_filter __user *kgf;
99979 u32 interface, fmode, numsrc;
99980
99981diff --git a/net/core/dev.c b/net/core/dev.c
99982index 84a0705..575db4c 100644
99983--- a/net/core/dev.c
99984+++ b/net/core/dev.c
99985@@ -1047,10 +1047,14 @@ void dev_load(struct net *net, const char *name)
99986 if (no_module && capable(CAP_NET_ADMIN))
99987 no_module = request_module("netdev-%s", name);
99988 if (no_module && capable(CAP_SYS_MODULE)) {
99989+#ifdef CONFIG_GRKERNSEC_MODHARDEN
99990+ ___request_module(true, "grsec_modharden_netdev", "%s", name);
99991+#else
99992 if (!request_module("%s", name))
99993 pr_err("Loading kernel module for a network device "
99994 "with CAP_SYS_MODULE (deprecated). Use CAP_NET_ADMIN and alias netdev-%s "
99995 "instead\n", name);
99996+#endif
99997 }
99998 }
99999 EXPORT_SYMBOL(dev_load);
100000@@ -1654,7 +1658,7 @@ static inline int illegal_highdma(struct net_device *dev, struct sk_buff *skb)
100001
100002 struct dev_gso_cb {
100003 void (*destructor)(struct sk_buff *skb);
100004-};
100005+} __no_const;
100006
100007 #define DEV_GSO_CB(skb) ((struct dev_gso_cb *)(skb)->cb)
100008
100009@@ -2063,7 +2067,7 @@ int netif_rx_ni(struct sk_buff *skb)
100010 }
100011 EXPORT_SYMBOL(netif_rx_ni);
100012
100013-static void net_tx_action(struct softirq_action *h)
100014+static void net_tx_action(void)
100015 {
100016 struct softnet_data *sd = &__get_cpu_var(softnet_data);
100017
100018@@ -2827,7 +2831,7 @@ void netif_napi_del(struct napi_struct *napi)
100019 EXPORT_SYMBOL(netif_napi_del);
100020
100021
100022-static void net_rx_action(struct softirq_action *h)
100023+static void net_rx_action(void)
100024 {
100025 struct list_head *list = &__get_cpu_var(softnet_data).poll_list;
100026 unsigned long time_limit = jiffies + 2;
100027diff --git a/net/core/flow.c b/net/core/flow.c
100028index 9601587..8c4824e 100644
100029--- a/net/core/flow.c
100030+++ b/net/core/flow.c
100031@@ -35,11 +35,11 @@ struct flow_cache_entry {
100032 atomic_t *object_ref;
100033 };
100034
100035-atomic_t flow_cache_genid = ATOMIC_INIT(0);
100036+atomic_unchecked_t flow_cache_genid = ATOMIC_INIT(0);
100037
100038 static u32 flow_hash_shift;
100039 #define flow_hash_size (1 << flow_hash_shift)
100040-static DEFINE_PER_CPU(struct flow_cache_entry **, flow_tables) = { NULL };
100041+static DEFINE_PER_CPU(struct flow_cache_entry **, flow_tables);
100042
100043 #define flow_table(cpu) (per_cpu(flow_tables, cpu))
100044
100045@@ -52,7 +52,7 @@ struct flow_percpu_info {
100046 u32 hash_rnd;
100047 int count;
100048 };
100049-static DEFINE_PER_CPU(struct flow_percpu_info, flow_hash_info) = { 0 };
100050+static DEFINE_PER_CPU(struct flow_percpu_info, flow_hash_info);
100051
100052 #define flow_hash_rnd_recalc(cpu) \
100053 (per_cpu(flow_hash_info, cpu).hash_rnd_recalc)
100054@@ -69,7 +69,7 @@ struct flow_flush_info {
100055 atomic_t cpuleft;
100056 struct completion completion;
100057 };
100058-static DEFINE_PER_CPU(struct tasklet_struct, flow_flush_tasklets) = { NULL };
100059+static DEFINE_PER_CPU(struct tasklet_struct, flow_flush_tasklets);
100060
100061 #define flow_flush_tasklet(cpu) (&per_cpu(flow_flush_tasklets, cpu))
100062
100063@@ -190,7 +190,7 @@ void *flow_cache_lookup(struct net *net, struct flowi *key, u16 family, u8 dir,
100064 if (fle->family == family &&
100065 fle->dir == dir &&
100066 flow_key_compare(key, &fle->key) == 0) {
100067- if (fle->genid == atomic_read(&flow_cache_genid)) {
100068+ if (fle->genid == atomic_read_unchecked(&flow_cache_genid)) {
100069 void *ret = fle->object;
100070
100071 if (ret)
100072@@ -228,7 +228,7 @@ nocache:
100073 err = resolver(net, key, family, dir, &obj, &obj_ref);
100074
100075 if (fle && !err) {
100076- fle->genid = atomic_read(&flow_cache_genid);
100077+ fle->genid = atomic_read_unchecked(&flow_cache_genid);
100078
100079 if (fle->object)
100080 atomic_dec(fle->object_ref);
100081@@ -258,7 +258,7 @@ static void flow_cache_flush_tasklet(unsigned long data)
100082
100083 fle = flow_table(cpu)[i];
100084 for (; fle; fle = fle->next) {
100085- unsigned genid = atomic_read(&flow_cache_genid);
100086+ unsigned genid = atomic_read_unchecked(&flow_cache_genid);
100087
100088 if (!fle->object || fle->genid == genid)
100089 continue;
100090diff --git a/net/core/rtnetlink.c b/net/core/rtnetlink.c
100091index d4fd895..ac9b1e6 100644
100092--- a/net/core/rtnetlink.c
100093+++ b/net/core/rtnetlink.c
100094@@ -57,7 +57,7 @@ struct rtnl_link
100095 {
100096 rtnl_doit_func doit;
100097 rtnl_dumpit_func dumpit;
100098-};
100099+} __no_const;
100100
100101 static DEFINE_MUTEX(rtnl_mutex);
100102
100103diff --git a/net/core/scm.c b/net/core/scm.c
100104index d98eafc..1a190a9 100644
100105--- a/net/core/scm.c
100106+++ b/net/core/scm.c
100107@@ -191,7 +191,7 @@ error:
100108 int put_cmsg(struct msghdr * msg, int level, int type, int len, void *data)
100109 {
100110 struct cmsghdr __user *cm
100111- = (__force struct cmsghdr __user *)msg->msg_control;
100112+ = (struct cmsghdr __force_user *)msg->msg_control;
100113 struct cmsghdr cmhdr;
100114 int cmlen = CMSG_LEN(len);
100115 int err;
100116@@ -214,7 +214,7 @@ int put_cmsg(struct msghdr * msg, int level, int type, int len, void *data)
100117 err = -EFAULT;
100118 if (copy_to_user(cm, &cmhdr, sizeof cmhdr))
100119 goto out;
100120- if (copy_to_user(CMSG_DATA(cm), data, cmlen - sizeof(struct cmsghdr)))
100121+ if (copy_to_user((void __force_user *)CMSG_DATA((void __force_kernel *)cm), data, cmlen - sizeof(struct cmsghdr)))
100122 goto out;
100123 cmlen = CMSG_SPACE(len);
100124 if (msg->msg_controllen < cmlen)
100125@@ -229,7 +229,7 @@ out:
100126 void scm_detach_fds(struct msghdr *msg, struct scm_cookie *scm)
100127 {
100128 struct cmsghdr __user *cm
100129- = (__force struct cmsghdr __user*)msg->msg_control;
100130+ = (struct cmsghdr __force_user *)msg->msg_control;
100131
100132 int fdmax = 0;
100133 int fdnum = scm->fp->count;
100134@@ -249,7 +249,7 @@ void scm_detach_fds(struct msghdr *msg, struct scm_cookie *scm)
100135 if (fdnum < fdmax)
100136 fdmax = fdnum;
100137
100138- for (i=0, cmfptr=(__force int __user *)CMSG_DATA(cm); i<fdmax;
100139+ for (i=0, cmfptr=(int __force_user *)CMSG_DATA((void __force_kernel *)cm); i<fdmax;
100140 i++, cmfptr++)
100141 {
100142 int new_fd;
100143diff --git a/net/core/secure_seq.c b/net/core/secure_seq.c
100144index 45329d7..626aaa6 100644
100145--- a/net/core/secure_seq.c
100146+++ b/net/core/secure_seq.c
100147@@ -57,7 +57,7 @@ __u32 secure_tcpv6_sequence_number(__be32 *saddr, __be32 *daddr,
100148 EXPORT_SYMBOL(secure_tcpv6_sequence_number);
100149
100150 u32 secure_ipv6_port_ephemeral(const __be32 *saddr, const __be32 *daddr,
100151- __be16 dport)
100152+ __be16 dport)
100153 {
100154 u32 secret[MD5_MESSAGE_BYTES / 4];
100155 u32 hash[MD5_DIGEST_WORDS];
100156@@ -71,7 +71,6 @@ u32 secure_ipv6_port_ephemeral(const __be32 *saddr, const __be32 *daddr,
100157 secret[i] = net_secret[i];
100158
100159 md5_transform(hash, secret);
100160-
100161 return hash[0];
100162 }
100163 #endif
100164diff --git a/net/core/skbuff.c b/net/core/skbuff.c
100165index 025f924..70a71c4 100644
100166--- a/net/core/skbuff.c
100167+++ b/net/core/skbuff.c
100168@@ -1544,6 +1544,8 @@ int skb_splice_bits(struct sk_buff *skb, unsigned int offset,
100169 struct sk_buff *frag_iter;
100170 struct sock *sk = skb->sk;
100171
100172+ pax_track_stack();
100173+
100174 /*
100175 * __skb_splice_bits() only fails if the output has no room left,
100176 * so no point in going over the frag_list for the error case.
100177diff --git a/net/core/sock.c b/net/core/sock.c
100178index 6605e75..3acebda 100644
100179--- a/net/core/sock.c
100180+++ b/net/core/sock.c
100181@@ -864,11 +864,15 @@ int sock_getsockopt(struct socket *sock, int level, int optname,
100182 break;
100183
100184 case SO_PEERCRED:
100185+ {
100186+ struct ucred peercred;
100187 if (len > sizeof(sk->sk_peercred))
100188 len = sizeof(sk->sk_peercred);
100189- if (copy_to_user(optval, &sk->sk_peercred, len))
100190+ peercred = sk->sk_peercred;
100191+ if (copy_to_user(optval, &peercred, len))
100192 return -EFAULT;
100193 goto lenout;
100194+ }
100195
100196 case SO_PEERNAME:
100197 {
100198@@ -1892,7 +1896,7 @@ void sock_init_data(struct socket *sock, struct sock *sk)
100199 */
100200 smp_wmb();
100201 atomic_set(&sk->sk_refcnt, 1);
100202- atomic_set(&sk->sk_drops, 0);
100203+ atomic_set_unchecked(&sk->sk_drops, 0);
100204 }
100205 EXPORT_SYMBOL(sock_init_data);
100206
100207diff --git a/net/decnet/sysctl_net_decnet.c b/net/decnet/sysctl_net_decnet.c
100208index 2036568..c55883d 100644
100209--- a/net/decnet/sysctl_net_decnet.c
100210+++ b/net/decnet/sysctl_net_decnet.c
100211@@ -206,7 +206,7 @@ static int dn_node_address_handler(ctl_table *table, int write,
100212
100213 if (len > *lenp) len = *lenp;
100214
100215- if (copy_to_user(buffer, addr, len))
100216+ if (len > sizeof addr || copy_to_user(buffer, addr, len))
100217 return -EFAULT;
100218
100219 *lenp = len;
100220@@ -327,7 +327,7 @@ static int dn_def_dev_handler(ctl_table *table, int write,
100221
100222 if (len > *lenp) len = *lenp;
100223
100224- if (copy_to_user(buffer, devname, len))
100225+ if (len > sizeof devname || copy_to_user(buffer, devname, len))
100226 return -EFAULT;
100227
100228 *lenp = len;
100229diff --git a/net/econet/Kconfig b/net/econet/Kconfig
100230index 39a2d29..f39c0fe 100644
100231--- a/net/econet/Kconfig
100232+++ b/net/econet/Kconfig
100233@@ -4,7 +4,7 @@
100234
100235 config ECONET
100236 tristate "Acorn Econet/AUN protocols (EXPERIMENTAL)"
100237- depends on EXPERIMENTAL && INET
100238+ depends on EXPERIMENTAL && INET && BROKEN
100239 ---help---
100240 Econet is a fairly old and slow networking protocol mainly used by
100241 Acorn computers to access file and print servers. It uses native
100242diff --git a/net/ieee802154/dgram.c b/net/ieee802154/dgram.c
100243index a413b1b..380849c 100644
100244--- a/net/ieee802154/dgram.c
100245+++ b/net/ieee802154/dgram.c
100246@@ -318,7 +318,7 @@ out:
100247 static int dgram_rcv_skb(struct sock *sk, struct sk_buff *skb)
100248 {
100249 if (sock_queue_rcv_skb(sk, skb) < 0) {
100250- atomic_inc(&sk->sk_drops);
100251+ atomic_inc_unchecked(&sk->sk_drops);
100252 kfree_skb(skb);
100253 return NET_RX_DROP;
100254 }
100255diff --git a/net/ieee802154/raw.c b/net/ieee802154/raw.c
100256index 30e74ee..bfc6ee0 100644
100257--- a/net/ieee802154/raw.c
100258+++ b/net/ieee802154/raw.c
100259@@ -206,7 +206,7 @@ out:
100260 static int raw_rcv_skb(struct sock *sk, struct sk_buff *skb)
100261 {
100262 if (sock_queue_rcv_skb(sk, skb) < 0) {
100263- atomic_inc(&sk->sk_drops);
100264+ atomic_inc_unchecked(&sk->sk_drops);
100265 kfree_skb(skb);
100266 return NET_RX_DROP;
100267 }
100268diff --git a/net/ipv4/inet_diag.c b/net/ipv4/inet_diag.c
100269index dba56d2..acee5d6 100644
100270--- a/net/ipv4/inet_diag.c
100271+++ b/net/ipv4/inet_diag.c
100272@@ -113,8 +113,13 @@ static int inet_csk_diag_fill(struct sock *sk,
100273 r->idiag_retrans = 0;
100274
100275 r->id.idiag_if = sk->sk_bound_dev_if;
100276+#ifdef CONFIG_GRKERNSEC_HIDESYM
100277+ r->id.idiag_cookie[0] = 0;
100278+ r->id.idiag_cookie[1] = 0;
100279+#else
100280 r->id.idiag_cookie[0] = (u32)(unsigned long)sk;
100281 r->id.idiag_cookie[1] = (u32)(((unsigned long)sk >> 31) >> 1);
100282+#endif
100283
100284 r->id.idiag_sport = inet->sport;
100285 r->id.idiag_dport = inet->dport;
100286@@ -200,8 +205,15 @@ static int inet_twsk_diag_fill(struct inet_timewait_sock *tw,
100287 r->idiag_family = tw->tw_family;
100288 r->idiag_retrans = 0;
100289 r->id.idiag_if = tw->tw_bound_dev_if;
100290+
100291+#ifdef CONFIG_GRKERNSEC_HIDESYM
100292+ r->id.idiag_cookie[0] = 0;
100293+ r->id.idiag_cookie[1] = 0;
100294+#else
100295 r->id.idiag_cookie[0] = (u32)(unsigned long)tw;
100296 r->id.idiag_cookie[1] = (u32)(((unsigned long)tw >> 31) >> 1);
100297+#endif
100298+
100299 r->id.idiag_sport = tw->tw_sport;
100300 r->id.idiag_dport = tw->tw_dport;
100301 r->id.idiag_src[0] = tw->tw_rcv_saddr;
100302@@ -284,12 +296,14 @@ static int inet_diag_get_exact(struct sk_buff *in_skb,
100303 if (sk == NULL)
100304 goto unlock;
100305
100306+#ifndef CONFIG_GRKERNSEC_HIDESYM
100307 err = -ESTALE;
100308 if ((req->id.idiag_cookie[0] != INET_DIAG_NOCOOKIE ||
100309 req->id.idiag_cookie[1] != INET_DIAG_NOCOOKIE) &&
100310 ((u32)(unsigned long)sk != req->id.idiag_cookie[0] ||
100311 (u32)((((unsigned long)sk) >> 31) >> 1) != req->id.idiag_cookie[1]))
100312 goto out;
100313+#endif
100314
100315 err = -ENOMEM;
100316 rep = alloc_skb(NLMSG_SPACE((sizeof(struct inet_diag_msg) +
100317@@ -579,8 +593,14 @@ static int inet_diag_fill_req(struct sk_buff *skb, struct sock *sk,
100318 r->idiag_retrans = req->retrans;
100319
100320 r->id.idiag_if = sk->sk_bound_dev_if;
100321+
100322+#ifdef CONFIG_GRKERNSEC_HIDESYM
100323+ r->id.idiag_cookie[0] = 0;
100324+ r->id.idiag_cookie[1] = 0;
100325+#else
100326 r->id.idiag_cookie[0] = (u32)(unsigned long)req;
100327 r->id.idiag_cookie[1] = (u32)(((unsigned long)req >> 31) >> 1);
100328+#endif
100329
100330 tmo = req->expires - jiffies;
100331 if (tmo < 0)
100332diff --git a/net/ipv4/inet_hashtables.c b/net/ipv4/inet_hashtables.c
100333index d717267..56de7e7 100644
100334--- a/net/ipv4/inet_hashtables.c
100335+++ b/net/ipv4/inet_hashtables.c
100336@@ -18,12 +18,15 @@
100337 #include <linux/sched.h>
100338 #include <linux/slab.h>
100339 #include <linux/wait.h>
100340+#include <linux/security.h>
100341
100342 #include <net/inet_connection_sock.h>
100343 #include <net/inet_hashtables.h>
100344 #include <net/secure_seq.h>
100345 #include <net/ip.h>
100346
100347+extern void gr_update_task_in_ip_table(struct task_struct *task, const struct inet_sock *inet);
100348+
100349 /*
100350 * Allocate and initialize a new local port bind bucket.
100351 * The bindhash mutex for snum's hash chain must be held here.
100352@@ -491,6 +494,8 @@ ok:
100353 }
100354 spin_unlock(&head->lock);
100355
100356+ gr_update_task_in_ip_table(current, inet_sk(sk));
100357+
100358 if (tw) {
100359 inet_twsk_deschedule(tw, death_row);
100360 inet_twsk_put(tw);
100361diff --git a/net/ipv4/inetpeer.c b/net/ipv4/inetpeer.c
100362index 13b229f..6956484 100644
100363--- a/net/ipv4/inetpeer.c
100364+++ b/net/ipv4/inetpeer.c
100365@@ -367,6 +367,8 @@ struct inet_peer *inet_getpeer(__be32 daddr, int create)
100366 struct inet_peer *p, *n;
100367 struct inet_peer **stack[PEER_MAXDEPTH], ***stackptr;
100368
100369+ pax_track_stack();
100370+
100371 /* Look up for the address quickly. */
100372 read_lock_bh(&peer_pool_lock);
100373 p = lookup(daddr, NULL);
100374@@ -390,7 +392,7 @@ struct inet_peer *inet_getpeer(__be32 daddr, int create)
100375 return NULL;
100376 n->v4daddr = daddr;
100377 atomic_set(&n->refcnt, 1);
100378- atomic_set(&n->rid, 0);
100379+ atomic_set_unchecked(&n->rid, 0);
100380 n->ip_id_count = secure_ip_id(daddr);
100381 n->tcp_ts_stamp = 0;
100382
100383diff --git a/net/ipv4/ip_fragment.c b/net/ipv4/ip_fragment.c
100384index d3fe10b..feeafc9 100644
100385--- a/net/ipv4/ip_fragment.c
100386+++ b/net/ipv4/ip_fragment.c
100387@@ -255,7 +255,7 @@ static inline int ip_frag_too_far(struct ipq *qp)
100388 return 0;
100389
100390 start = qp->rid;
100391- end = atomic_inc_return(&peer->rid);
100392+ end = atomic_inc_return_unchecked(&peer->rid);
100393 qp->rid = end;
100394
100395 rc = qp->q.fragments && (end - start) > max;
100396diff --git a/net/ipv4/ip_sockglue.c b/net/ipv4/ip_sockglue.c
100397index e982b5c..f079d75 100644
100398--- a/net/ipv4/ip_sockglue.c
100399+++ b/net/ipv4/ip_sockglue.c
100400@@ -1015,6 +1015,8 @@ static int do_ip_getsockopt(struct sock *sk, int level, int optname,
100401 int val;
100402 int len;
100403
100404+ pax_track_stack();
100405+
100406 if (level != SOL_IP)
100407 return -EOPNOTSUPP;
100408
100409@@ -1173,7 +1175,7 @@ static int do_ip_getsockopt(struct sock *sk, int level, int optname,
100410 if (sk->sk_type != SOCK_STREAM)
100411 return -ENOPROTOOPT;
100412
100413- msg.msg_control = optval;
100414+ msg.msg_control = (void __force_kernel *)optval;
100415 msg.msg_controllen = len;
100416 msg.msg_flags = 0;
100417
100418diff --git a/net/ipv4/ipconfig.c b/net/ipv4/ipconfig.c
100419index f8d04c2..c1188f2 100644
100420--- a/net/ipv4/ipconfig.c
100421+++ b/net/ipv4/ipconfig.c
100422@@ -295,7 +295,7 @@ static int __init ic_devinet_ioctl(unsigned int cmd, struct ifreq *arg)
100423
100424 mm_segment_t oldfs = get_fs();
100425 set_fs(get_ds());
100426- res = devinet_ioctl(&init_net, cmd, (struct ifreq __user *) arg);
100427+ res = devinet_ioctl(&init_net, cmd, (struct ifreq __force_user *) arg);
100428 set_fs(oldfs);
100429 return res;
100430 }
100431@@ -306,7 +306,7 @@ static int __init ic_dev_ioctl(unsigned int cmd, struct ifreq *arg)
100432
100433 mm_segment_t oldfs = get_fs();
100434 set_fs(get_ds());
100435- res = dev_ioctl(&init_net, cmd, (struct ifreq __user *) arg);
100436+ res = dev_ioctl(&init_net, cmd, (struct ifreq __force_user *) arg);
100437 set_fs(oldfs);
100438 return res;
100439 }
100440@@ -317,7 +317,7 @@ static int __init ic_route_ioctl(unsigned int cmd, struct rtentry *arg)
100441
100442 mm_segment_t oldfs = get_fs();
100443 set_fs(get_ds());
100444- res = ip_rt_ioctl(&init_net, cmd, (void __user *) arg);
100445+ res = ip_rt_ioctl(&init_net, cmd, (void __force_user *) arg);
100446 set_fs(oldfs);
100447 return res;
100448 }
100449diff --git a/net/ipv4/netfilter/arp_tables.c b/net/ipv4/netfilter/arp_tables.c
100450index c8b0cc3..05e4007 100644
100451--- a/net/ipv4/netfilter/arp_tables.c
100452+++ b/net/ipv4/netfilter/arp_tables.c
100453@@ -934,6 +934,7 @@ static int get_info(struct net *net, void __user *user, int *len, int compat)
100454 private = &tmp;
100455 }
100456 #endif
100457+ memset(&info, 0, sizeof(info));
100458 info.valid_hooks = t->valid_hooks;
100459 memcpy(info.hook_entry, private->hook_entry,
100460 sizeof(info.hook_entry));
100461@@ -1003,6 +1004,11 @@ static int __do_replace(struct net *net, const char *name,
100462 unsigned int valid_hooks,
100463 struct xt_table_info *newinfo,
100464 unsigned int num_counters,
100465+ void __user *counters_ptr) __size_overflow(5);
100466+static int __do_replace(struct net *net, const char *name,
100467+ unsigned int valid_hooks,
100468+ struct xt_table_info *newinfo,
100469+ unsigned int num_counters,
100470 void __user *counters_ptr)
100471 {
100472 int ret;
100473@@ -1135,6 +1141,8 @@ add_counter_to_entry(struct arpt_entry *e,
100474 }
100475
100476 static int do_add_counters(struct net *net, void __user *user, unsigned int len,
100477+ int compat) __size_overflow(3);
100478+static int do_add_counters(struct net *net, void __user *user, unsigned int len,
100479 int compat)
100480 {
100481 unsigned int i, curcpu;
100482diff --git a/net/ipv4/netfilter/ip_queue.c b/net/ipv4/netfilter/ip_queue.c
100483index c156db2..e772975 100644
100484--- a/net/ipv4/netfilter/ip_queue.c
100485+++ b/net/ipv4/netfilter/ip_queue.c
100486@@ -286,6 +286,9 @@ ipq_mangle_ipv4(ipq_verdict_msg_t *v, struct nf_queue_entry *e)
100487
100488 if (v->data_len < sizeof(*user_iph))
100489 return 0;
100490+ if (v->data_len > 65535)
100491+ return -EMSGSIZE;
100492+
100493 diff = v->data_len - e->skb->len;
100494 if (diff < 0) {
100495 if (pskb_trim(e->skb, v->data_len))
100496@@ -409,7 +412,8 @@ ipq_dev_drop(int ifindex)
100497 static inline void
100498 __ipq_rcv_skb(struct sk_buff *skb)
100499 {
100500- int status, type, pid, flags, nlmsglen, skblen;
100501+ int status, type, pid, flags;
100502+ unsigned int nlmsglen, skblen;
100503 struct nlmsghdr *nlh;
100504
100505 skblen = skb->len;
100506diff --git a/net/ipv4/netfilter/ip_tables.c b/net/ipv4/netfilter/ip_tables.c
100507index 0606db1..918b88a 100644
100508--- a/net/ipv4/netfilter/ip_tables.c
100509+++ b/net/ipv4/netfilter/ip_tables.c
100510@@ -1141,6 +1141,7 @@ static int get_info(struct net *net, void __user *user, int *len, int compat)
100511 private = &tmp;
100512 }
100513 #endif
100514+ memset(&info, 0, sizeof(info));
100515 info.valid_hooks = t->valid_hooks;
100516 memcpy(info.hook_entry, private->hook_entry,
100517 sizeof(info.hook_entry));
100518@@ -1208,6 +1209,10 @@ get_entries(struct net *net, struct ipt_get_entries __user *uptr, int *len)
100519 static int
100520 __do_replace(struct net *net, const char *name, unsigned int valid_hooks,
100521 struct xt_table_info *newinfo, unsigned int num_counters,
100522+ void __user *counters_ptr) __size_overflow(5);
100523+static int
100524+__do_replace(struct net *net, const char *name, unsigned int valid_hooks,
100525+ struct xt_table_info *newinfo, unsigned int num_counters,
100526 void __user *counters_ptr)
100527 {
100528 int ret;
100529@@ -1339,6 +1344,8 @@ add_counter_to_entry(struct ipt_entry *e,
100530 }
100531
100532 static int
100533+do_add_counters(struct net *net, void __user *user, unsigned int len, int compat) __size_overflow(3);
100534+static int
100535 do_add_counters(struct net *net, void __user *user, unsigned int len, int compat)
100536 {
100537 unsigned int i, curcpu;
100538diff --git a/net/ipv4/netfilter/nf_nat_snmp_basic.c b/net/ipv4/netfilter/nf_nat_snmp_basic.c
100539index d9521f6..127fa44 100644
100540--- a/net/ipv4/netfilter/nf_nat_snmp_basic.c
100541+++ b/net/ipv4/netfilter/nf_nat_snmp_basic.c
100542@@ -436,6 +436,10 @@ static unsigned char asn1_subid_decode(struct asn1_ctx *ctx,
100543 static unsigned char asn1_oid_decode(struct asn1_ctx *ctx,
100544 unsigned char *eoc,
100545 unsigned long **oid,
100546+ unsigned int *len) __size_overflow(2);
100547+static unsigned char asn1_oid_decode(struct asn1_ctx *ctx,
100548+ unsigned char *eoc,
100549+ unsigned long **oid,
100550 unsigned int *len)
100551 {
100552 unsigned long subid;
100553diff --git a/net/ipv4/raw.c b/net/ipv4/raw.c
100554index ab996f9..3da5f96 100644
100555--- a/net/ipv4/raw.c
100556+++ b/net/ipv4/raw.c
100557@@ -292,7 +292,7 @@ static int raw_rcv_skb(struct sock * sk, struct sk_buff * skb)
100558 /* Charge it to the socket. */
100559
100560 if (sock_queue_rcv_skb(sk, skb) < 0) {
100561- atomic_inc(&sk->sk_drops);
100562+ atomic_inc_unchecked(&sk->sk_drops);
100563 kfree_skb(skb);
100564 return NET_RX_DROP;
100565 }
100566@@ -303,7 +303,7 @@ static int raw_rcv_skb(struct sock * sk, struct sk_buff * skb)
100567 int raw_rcv(struct sock *sk, struct sk_buff *skb)
100568 {
100569 if (!xfrm4_policy_check(sk, XFRM_POLICY_IN, skb)) {
100570- atomic_inc(&sk->sk_drops);
100571+ atomic_inc_unchecked(&sk->sk_drops);
100572 kfree_skb(skb);
100573 return NET_RX_DROP;
100574 }
100575@@ -724,16 +724,23 @@ static int raw_init(struct sock *sk)
100576
100577 static int raw_seticmpfilter(struct sock *sk, char __user *optval, int optlen)
100578 {
100579+ struct icmp_filter filter;
100580+
100581+ if (optlen < 0)
100582+ return -EINVAL;
100583 if (optlen > sizeof(struct icmp_filter))
100584 optlen = sizeof(struct icmp_filter);
100585- if (copy_from_user(&raw_sk(sk)->filter, optval, optlen))
100586+ if (copy_from_user(&filter, optval, optlen))
100587 return -EFAULT;
100588+ raw_sk(sk)->filter = filter;
100589+
100590 return 0;
100591 }
100592
100593 static int raw_geticmpfilter(struct sock *sk, char __user *optval, int __user *optlen)
100594 {
100595 int len, ret = -EFAULT;
100596+ struct icmp_filter filter;
100597
100598 if (get_user(len, optlen))
100599 goto out;
100600@@ -743,8 +750,9 @@ static int raw_geticmpfilter(struct sock *sk, char __user *optval, int __user *o
100601 if (len > sizeof(struct icmp_filter))
100602 len = sizeof(struct icmp_filter);
100603 ret = -EFAULT;
100604- if (put_user(len, optlen) ||
100605- copy_to_user(optval, &raw_sk(sk)->filter, len))
100606+ filter = raw_sk(sk)->filter;
100607+ if (put_user(len, optlen) || len > sizeof filter ||
100608+ copy_to_user(optval, &filter, len))
100609 goto out;
100610 ret = 0;
100611 out: return ret;
100612@@ -954,7 +962,13 @@ static void raw_sock_seq_show(struct seq_file *seq, struct sock *sp, int i)
100613 sk_wmem_alloc_get(sp),
100614 sk_rmem_alloc_get(sp),
100615 0, 0L, 0, sock_i_uid(sp), 0, sock_i_ino(sp),
100616- atomic_read(&sp->sk_refcnt), sp, atomic_read(&sp->sk_drops));
100617+ atomic_read(&sp->sk_refcnt),
100618+#ifdef CONFIG_GRKERNSEC_HIDESYM
100619+ NULL,
100620+#else
100621+ sp,
100622+#endif
100623+ atomic_read_unchecked(&sp->sk_drops));
100624 }
100625
100626 static int raw_seq_show(struct seq_file *seq, void *v)
100627diff --git a/net/ipv4/route.c b/net/ipv4/route.c
100628index 58f141b..b759702 100644
100629--- a/net/ipv4/route.c
100630+++ b/net/ipv4/route.c
100631@@ -269,7 +269,7 @@ static inline unsigned int rt_hash(__be32 daddr, __be32 saddr, int idx,
100632
100633 static inline int rt_genid(struct net *net)
100634 {
100635- return atomic_read(&net->ipv4.rt_genid);
100636+ return atomic_read_unchecked(&net->ipv4.rt_genid);
100637 }
100638
100639 #ifdef CONFIG_PROC_FS
100640@@ -889,7 +889,7 @@ static void rt_cache_invalidate(struct net *net)
100641 unsigned char shuffle;
100642
100643 get_random_bytes(&shuffle, sizeof(shuffle));
100644- atomic_add(shuffle + 1U, &net->ipv4.rt_genid);
100645+ atomic_add_unchecked(shuffle + 1U, &net->ipv4.rt_genid);
100646 }
100647
100648 /*
100649@@ -3357,7 +3357,7 @@ static __net_initdata struct pernet_operations sysctl_route_ops = {
100650
100651 static __net_init int rt_secret_timer_init(struct net *net)
100652 {
100653- atomic_set(&net->ipv4.rt_genid,
100654+ atomic_set_unchecked(&net->ipv4.rt_genid,
100655 (int) ((num_physpages ^ (num_physpages>>8)) ^
100656 (jiffies ^ (jiffies >> 7))));
100657
100658diff --git a/net/ipv4/tcp.c b/net/ipv4/tcp.c
100659index f095659..adc892a 100644
100660--- a/net/ipv4/tcp.c
100661+++ b/net/ipv4/tcp.c
100662@@ -2085,6 +2085,8 @@ static int do_tcp_setsockopt(struct sock *sk, int level,
100663 int val;
100664 int err = 0;
100665
100666+ pax_track_stack();
100667+
100668 /* This is a string value all the others are int's */
100669 if (optname == TCP_CONGESTION) {
100670 char name[TCP_CA_NAME_MAX];
100671@@ -2355,6 +2357,8 @@ static int do_tcp_getsockopt(struct sock *sk, int level,
100672 struct tcp_sock *tp = tcp_sk(sk);
100673 int val, len;
100674
100675+ pax_track_stack();
100676+
100677 if (get_user(len, optlen))
100678 return -EFAULT;
100679
100680diff --git a/net/ipv4/tcp_ipv4.c b/net/ipv4/tcp_ipv4.c
100681index 6fc7961..33bad4a 100644
100682--- a/net/ipv4/tcp_ipv4.c
100683+++ b/net/ipv4/tcp_ipv4.c
100684@@ -85,6 +85,9 @@
100685 int sysctl_tcp_tw_reuse __read_mostly;
100686 int sysctl_tcp_low_latency __read_mostly;
100687
100688+#ifdef CONFIG_GRKERNSEC_BLACKHOLE
100689+extern int grsec_enable_blackhole;
100690+#endif
100691
100692 #ifdef CONFIG_TCP_MD5SIG
100693 static struct tcp_md5sig_key *tcp_v4_md5_do_lookup(struct sock *sk,
100694@@ -1543,6 +1546,9 @@ int tcp_v4_do_rcv(struct sock *sk, struct sk_buff *skb)
100695 return 0;
100696
100697 reset:
100698+#ifdef CONFIG_GRKERNSEC_BLACKHOLE
100699+ if (!grsec_enable_blackhole)
100700+#endif
100701 tcp_v4_send_reset(rsk, skb);
100702 discard:
100703 kfree_skb(skb);
100704@@ -1604,12 +1610,20 @@ int tcp_v4_rcv(struct sk_buff *skb)
100705 TCP_SKB_CB(skb)->sacked = 0;
100706
100707 sk = __inet_lookup_skb(&tcp_hashinfo, skb, th->source, th->dest);
100708- if (!sk)
100709+ if (!sk) {
100710+#ifdef CONFIG_GRKERNSEC_BLACKHOLE
100711+ ret = 1;
100712+#endif
100713 goto no_tcp_socket;
100714+ }
100715
100716 process:
100717- if (sk->sk_state == TCP_TIME_WAIT)
100718+ if (sk->sk_state == TCP_TIME_WAIT) {
100719+#ifdef CONFIG_GRKERNSEC_BLACKHOLE
100720+ ret = 2;
100721+#endif
100722 goto do_time_wait;
100723+ }
100724
100725 if (!xfrm4_policy_check(sk, XFRM_POLICY_IN, skb))
100726 goto discard_and_relse;
100727@@ -1651,6 +1665,10 @@ no_tcp_socket:
100728 bad_packet:
100729 TCP_INC_STATS_BH(net, TCP_MIB_INERRS);
100730 } else {
100731+#ifdef CONFIG_GRKERNSEC_BLACKHOLE
100732+ if (!grsec_enable_blackhole || (ret == 1 &&
100733+ (skb->dev->flags & IFF_LOOPBACK)))
100734+#endif
100735 tcp_v4_send_reset(NULL, skb);
100736 }
100737
100738@@ -2238,7 +2256,11 @@ static void get_openreq4(struct sock *sk, struct request_sock *req,
100739 0, /* non standard timer */
100740 0, /* open_requests have no inode */
100741 atomic_read(&sk->sk_refcnt),
100742+#ifdef CONFIG_GRKERNSEC_HIDESYM
100743+ NULL,
100744+#else
100745 req,
100746+#endif
100747 len);
100748 }
100749
100750@@ -2280,7 +2302,12 @@ static void get_tcp4_sock(struct sock *sk, struct seq_file *f, int i, int *len)
100751 sock_i_uid(sk),
100752 icsk->icsk_probes_out,
100753 sock_i_ino(sk),
100754- atomic_read(&sk->sk_refcnt), sk,
100755+ atomic_read(&sk->sk_refcnt),
100756+#ifdef CONFIG_GRKERNSEC_HIDESYM
100757+ NULL,
100758+#else
100759+ sk,
100760+#endif
100761 jiffies_to_clock_t(icsk->icsk_rto),
100762 jiffies_to_clock_t(icsk->icsk_ack.ato),
100763 (icsk->icsk_ack.quick << 1) | icsk->icsk_ack.pingpong,
100764@@ -2308,7 +2335,13 @@ static void get_timewait4_sock(struct inet_timewait_sock *tw,
100765 " %02X %08X:%08X %02X:%08lX %08X %5d %8d %d %d %p%n",
100766 i, src, srcp, dest, destp, tw->tw_substate, 0, 0,
100767 3, jiffies_to_clock_t(ttd), 0, 0, 0, 0,
100768- atomic_read(&tw->tw_refcnt), tw, len);
100769+ atomic_read(&tw->tw_refcnt),
100770+#ifdef CONFIG_GRKERNSEC_HIDESYM
100771+ NULL,
100772+#else
100773+ tw,
100774+#endif
100775+ len);
100776 }
100777
100778 #define TMPSZ 150
100779diff --git a/net/ipv4/tcp_minisocks.c b/net/ipv4/tcp_minisocks.c
100780index 4c03598..e09a8e8 100644
100781--- a/net/ipv4/tcp_minisocks.c
100782+++ b/net/ipv4/tcp_minisocks.c
100783@@ -26,6 +26,10 @@
100784 #include <net/inet_common.h>
100785 #include <net/xfrm.h>
100786
100787+#ifdef CONFIG_GRKERNSEC_BLACKHOLE
100788+extern int grsec_enable_blackhole;
100789+#endif
100790+
100791 #ifdef CONFIG_SYSCTL
100792 #define SYNC_INIT 0 /* let the user enable it */
100793 #else
100794@@ -672,6 +676,10 @@ listen_overflow:
100795
100796 embryonic_reset:
100797 NET_INC_STATS_BH(sock_net(sk), LINUX_MIB_EMBRYONICRSTS);
100798+
100799+#ifdef CONFIG_GRKERNSEC_BLACKHOLE
100800+ if (!grsec_enable_blackhole)
100801+#endif
100802 if (!(flg & TCP_FLAG_RST))
100803 req->rsk_ops->send_reset(sk, skb);
100804
100805diff --git a/net/ipv4/tcp_output.c b/net/ipv4/tcp_output.c
100806index af83bdf..ec91cb2 100644
100807--- a/net/ipv4/tcp_output.c
100808+++ b/net/ipv4/tcp_output.c
100809@@ -2234,6 +2234,8 @@ struct sk_buff *tcp_make_synack(struct sock *sk, struct dst_entry *dst,
100810 __u8 *md5_hash_location;
100811 int mss;
100812
100813+ pax_track_stack();
100814+
100815 skb = sock_wmalloc(sk, MAX_TCP_HEADER + 15, 1, GFP_ATOMIC);
100816 if (skb == NULL)
100817 return NULL;
100818diff --git a/net/ipv4/tcp_probe.c b/net/ipv4/tcp_probe.c
100819index 59f5b5e..193860f 100644
100820--- a/net/ipv4/tcp_probe.c
100821+++ b/net/ipv4/tcp_probe.c
100822@@ -200,7 +200,7 @@ static ssize_t tcpprobe_read(struct file *file, char __user *buf,
100823 if (cnt + width >= len)
100824 break;
100825
100826- if (copy_to_user(buf + cnt, tbuf, width))
100827+ if (width > sizeof tbuf || copy_to_user(buf + cnt, tbuf, width))
100828 return -EFAULT;
100829 cnt += width;
100830 }
100831diff --git a/net/ipv4/tcp_timer.c b/net/ipv4/tcp_timer.c
100832index 57d5501..a9ed13a 100644
100833--- a/net/ipv4/tcp_timer.c
100834+++ b/net/ipv4/tcp_timer.c
100835@@ -21,6 +21,10 @@
100836 #include <linux/module.h>
100837 #include <net/tcp.h>
100838
100839+#ifdef CONFIG_GRKERNSEC_BLACKHOLE
100840+extern int grsec_lastack_retries;
100841+#endif
100842+
100843 int sysctl_tcp_syn_retries __read_mostly = TCP_SYN_RETRIES;
100844 int sysctl_tcp_synack_retries __read_mostly = TCP_SYNACK_RETRIES;
100845 int sysctl_tcp_keepalive_time __read_mostly = TCP_KEEPALIVE_TIME;
100846@@ -164,6 +168,13 @@ static int tcp_write_timeout(struct sock *sk)
100847 }
100848 }
100849
100850+#ifdef CONFIG_GRKERNSEC_BLACKHOLE
100851+ if ((sk->sk_state == TCP_LAST_ACK) &&
100852+ (grsec_lastack_retries > 0) &&
100853+ (grsec_lastack_retries < retry_until))
100854+ retry_until = grsec_lastack_retries;
100855+#endif
100856+
100857 if (retransmits_timed_out(sk, retry_until)) {
100858 /* Has it gone just too far? */
100859 tcp_write_err(sk);
100860diff --git a/net/ipv4/udp.c b/net/ipv4/udp.c
100861index 8e28770..72105c8 100644
100862--- a/net/ipv4/udp.c
100863+++ b/net/ipv4/udp.c
100864@@ -86,6 +86,7 @@
100865 #include <linux/types.h>
100866 #include <linux/fcntl.h>
100867 #include <linux/module.h>
100868+#include <linux/security.h>
100869 #include <linux/socket.h>
100870 #include <linux/sockios.h>
100871 #include <linux/igmp.h>
100872@@ -106,6 +107,10 @@
100873 #include <net/xfrm.h>
100874 #include "udp_impl.h"
100875
100876+#ifdef CONFIG_GRKERNSEC_BLACKHOLE
100877+extern int grsec_enable_blackhole;
100878+#endif
100879+
100880 struct udp_table udp_table;
100881 EXPORT_SYMBOL(udp_table);
100882
100883@@ -371,6 +376,9 @@ found:
100884 return s;
100885 }
100886
100887+extern int gr_search_udp_recvmsg(struct sock *sk, const struct sk_buff *skb);
100888+extern int gr_search_udp_sendmsg(struct sock *sk, struct sockaddr_in *addr);
100889+
100890 /*
100891 * This routine is called by the ICMP module when it gets some
100892 * sort of error condition. If err < 0 then the socket should
100893@@ -639,9 +647,18 @@ int udp_sendmsg(struct kiocb *iocb, struct sock *sk, struct msghdr *msg,
100894 dport = usin->sin_port;
100895 if (dport == 0)
100896 return -EINVAL;
100897+
100898+ err = gr_search_udp_sendmsg(sk, usin);
100899+ if (err)
100900+ return err;
100901 } else {
100902 if (sk->sk_state != TCP_ESTABLISHED)
100903 return -EDESTADDRREQ;
100904+
100905+ err = gr_search_udp_sendmsg(sk, NULL);
100906+ if (err)
100907+ return err;
100908+
100909 daddr = inet->daddr;
100910 dport = inet->dport;
100911 /* Open fast path for connected socket.
100912@@ -945,6 +962,10 @@ try_again:
100913 if (!skb)
100914 goto out;
100915
100916+ err = gr_search_udp_recvmsg(sk, skb);
100917+ if (err)
100918+ goto out_free;
100919+
100920 ulen = skb->len - sizeof(struct udphdr);
100921 copied = len;
100922 if (copied > ulen)
100923@@ -1068,7 +1089,7 @@ static int __udp_queue_rcv_skb(struct sock *sk, struct sk_buff *skb)
100924 if (rc == -ENOMEM) {
100925 UDP_INC_STATS_BH(sock_net(sk), UDP_MIB_RCVBUFERRORS,
100926 is_udplite);
100927- atomic_inc(&sk->sk_drops);
100928+ atomic_inc_unchecked(&sk->sk_drops);
100929 }
100930 goto drop;
100931 }
100932@@ -1338,6 +1359,9 @@ int __udp4_lib_rcv(struct sk_buff *skb, struct udp_table *udptable,
100933 goto csum_error;
100934
100935 UDP_INC_STATS_BH(net, UDP_MIB_NOPORTS, proto == IPPROTO_UDPLITE);
100936+#ifdef CONFIG_GRKERNSEC_BLACKHOLE
100937+ if (!grsec_enable_blackhole || (skb->dev->flags & IFF_LOOPBACK))
100938+#endif
100939 icmp_send(skb, ICMP_DEST_UNREACH, ICMP_PORT_UNREACH, 0);
100940
100941 /*
100942@@ -1758,8 +1782,13 @@ static void udp4_format_sock(struct sock *sp, struct seq_file *f,
100943 sk_wmem_alloc_get(sp),
100944 sk_rmem_alloc_get(sp),
100945 0, 0L, 0, sock_i_uid(sp), 0, sock_i_ino(sp),
100946- atomic_read(&sp->sk_refcnt), sp,
100947- atomic_read(&sp->sk_drops), len);
100948+ atomic_read(&sp->sk_refcnt),
100949+#ifdef CONFIG_GRKERNSEC_HIDESYM
100950+ NULL,
100951+#else
100952+ sp,
100953+#endif
100954+ atomic_read_unchecked(&sp->sk_drops), len);
100955 }
100956
100957 int udp4_seq_show(struct seq_file *seq, void *v)
100958diff --git a/net/ipv6/addrconf.c b/net/ipv6/addrconf.c
100959index 8ac3d09..fc58c5f 100644
100960--- a/net/ipv6/addrconf.c
100961+++ b/net/ipv6/addrconf.c
100962@@ -2053,7 +2053,7 @@ int addrconf_set_dstaddr(struct net *net, void __user *arg)
100963 p.iph.ihl = 5;
100964 p.iph.protocol = IPPROTO_IPV6;
100965 p.iph.ttl = 64;
100966- ifr.ifr_ifru.ifru_data = (__force void __user *)&p;
100967+ ifr.ifr_ifru.ifru_data = (void __force_user *)&p;
100968
100969 if (ops->ndo_do_ioctl) {
100970 mm_segment_t oldfs = get_fs();
100971diff --git a/net/ipv6/inet6_connection_sock.c b/net/ipv6/inet6_connection_sock.c
100972index cc4797d..7cfdfcc 100644
100973--- a/net/ipv6/inet6_connection_sock.c
100974+++ b/net/ipv6/inet6_connection_sock.c
100975@@ -152,7 +152,7 @@ void __inet6_csk_dst_store(struct sock *sk, struct dst_entry *dst,
100976 #ifdef CONFIG_XFRM
100977 {
100978 struct rt6_info *rt = (struct rt6_info *)dst;
100979- rt->rt6i_flow_cache_genid = atomic_read(&flow_cache_genid);
100980+ rt->rt6i_flow_cache_genid = atomic_read_unchecked(&flow_cache_genid);
100981 }
100982 #endif
100983 }
100984@@ -167,7 +167,7 @@ struct dst_entry *__inet6_csk_dst_check(struct sock *sk, u32 cookie)
100985 #ifdef CONFIG_XFRM
100986 if (dst) {
100987 struct rt6_info *rt = (struct rt6_info *)dst;
100988- if (rt->rt6i_flow_cache_genid != atomic_read(&flow_cache_genid)) {
100989+ if (rt->rt6i_flow_cache_genid != atomic_read_unchecked(&flow_cache_genid)) {
100990 sk->sk_dst_cache = NULL;
100991 dst_release(dst);
100992 dst = NULL;
100993diff --git a/net/ipv6/inet6_hashtables.c b/net/ipv6/inet6_hashtables.c
100994index 093e9b2..f72cddb 100644
100995--- a/net/ipv6/inet6_hashtables.c
100996+++ b/net/ipv6/inet6_hashtables.c
100997@@ -119,7 +119,7 @@ out:
100998 }
100999 EXPORT_SYMBOL(__inet6_lookup_established);
101000
101001-static int inline compute_score(struct sock *sk, struct net *net,
101002+static inline int compute_score(struct sock *sk, struct net *net,
101003 const unsigned short hnum,
101004 const struct in6_addr *daddr,
101005 const int dif)
101006diff --git a/net/ipv6/ipv6_sockglue.c b/net/ipv6/ipv6_sockglue.c
101007index 4f7aaf6..f7acf45 100644
101008--- a/net/ipv6/ipv6_sockglue.c
101009+++ b/net/ipv6/ipv6_sockglue.c
101010@@ -130,6 +130,8 @@ static int do_ipv6_setsockopt(struct sock *sk, int level, int optname,
101011 int val, valbool;
101012 int retv = -ENOPROTOOPT;
101013
101014+ pax_track_stack();
101015+
101016 if (optval == NULL)
101017 val=0;
101018 else {
101019@@ -881,6 +883,8 @@ static int do_ipv6_getsockopt(struct sock *sk, int level, int optname,
101020 int len;
101021 int val;
101022
101023+ pax_track_stack();
101024+
101025 if (ip6_mroute_opt(optname))
101026 return ip6_mroute_getsockopt(sk, optname, optval, optlen);
101027
101028@@ -922,7 +926,7 @@ static int do_ipv6_getsockopt(struct sock *sk, int level, int optname,
101029 if (sk->sk_type != SOCK_STREAM)
101030 return -ENOPROTOOPT;
101031
101032- msg.msg_control = optval;
101033+ msg.msg_control = (void __force_kernel *)optval;
101034 msg.msg_controllen = len;
101035 msg.msg_flags = 0;
101036
101037diff --git a/net/ipv6/netfilter/ip6_queue.c b/net/ipv6/netfilter/ip6_queue.c
101038index 1cf3f0c..1d4376f 100644
101039--- a/net/ipv6/netfilter/ip6_queue.c
101040+++ b/net/ipv6/netfilter/ip6_queue.c
101041@@ -287,6 +287,9 @@ ipq_mangle_ipv6(ipq_verdict_msg_t *v, struct nf_queue_entry *e)
101042
101043 if (v->data_len < sizeof(*user_iph))
101044 return 0;
101045+ if (v->data_len > 65535)
101046+ return -EMSGSIZE;
101047+
101048 diff = v->data_len - e->skb->len;
101049 if (diff < 0) {
101050 if (pskb_trim(e->skb, v->data_len))
101051@@ -411,7 +414,8 @@ ipq_dev_drop(int ifindex)
101052 static inline void
101053 __ipq_rcv_skb(struct sk_buff *skb)
101054 {
101055- int status, type, pid, flags, nlmsglen, skblen;
101056+ int status, type, pid, flags;
101057+ unsigned int nlmsglen, skblen;
101058 struct nlmsghdr *nlh;
101059
101060 skblen = skb->len;
101061diff --git a/net/ipv6/netfilter/ip6_tables.c b/net/ipv6/netfilter/ip6_tables.c
101062index 78b5a36..2b9bb06 100644
101063--- a/net/ipv6/netfilter/ip6_tables.c
101064+++ b/net/ipv6/netfilter/ip6_tables.c
101065@@ -1173,6 +1173,7 @@ static int get_info(struct net *net, void __user *user, int *len, int compat)
101066 private = &tmp;
101067 }
101068 #endif
101069+ memset(&info, 0, sizeof(info));
101070 info.valid_hooks = t->valid_hooks;
101071 memcpy(info.hook_entry, private->hook_entry,
101072 sizeof(info.hook_entry));
101073@@ -1240,6 +1241,10 @@ get_entries(struct net *net, struct ip6t_get_entries __user *uptr, int *len)
101074 static int
101075 __do_replace(struct net *net, const char *name, unsigned int valid_hooks,
101076 struct xt_table_info *newinfo, unsigned int num_counters,
101077+ void __user *counters_ptr) __size_overflow(5);
101078+static int
101079+__do_replace(struct net *net, const char *name, unsigned int valid_hooks,
101080+ struct xt_table_info *newinfo, unsigned int num_counters,
101081 void __user *counters_ptr)
101082 {
101083 int ret;
101084@@ -1373,6 +1378,9 @@ add_counter_to_entry(struct ip6t_entry *e,
101085
101086 static int
101087 do_add_counters(struct net *net, void __user *user, unsigned int len,
101088+ int compat) __size_overflow(3);
101089+static int
101090+do_add_counters(struct net *net, void __user *user, unsigned int len,
101091 int compat)
101092 {
101093 unsigned int i, curcpu;
101094diff --git a/net/ipv6/raw.c b/net/ipv6/raw.c
101095index 4f24570..b813b34 100644
101096--- a/net/ipv6/raw.c
101097+++ b/net/ipv6/raw.c
101098@@ -375,14 +375,14 @@ static inline int rawv6_rcv_skb(struct sock * sk, struct sk_buff * skb)
101099 {
101100 if ((raw6_sk(sk)->checksum || sk->sk_filter) &&
101101 skb_checksum_complete(skb)) {
101102- atomic_inc(&sk->sk_drops);
101103+ atomic_inc_unchecked(&sk->sk_drops);
101104 kfree_skb(skb);
101105 return NET_RX_DROP;
101106 }
101107
101108 /* Charge it to the socket. */
101109 if (sock_queue_rcv_skb(sk,skb)<0) {
101110- atomic_inc(&sk->sk_drops);
101111+ atomic_inc_unchecked(&sk->sk_drops);
101112 kfree_skb(skb);
101113 return NET_RX_DROP;
101114 }
101115@@ -403,7 +403,7 @@ int rawv6_rcv(struct sock *sk, struct sk_buff *skb)
101116 struct raw6_sock *rp = raw6_sk(sk);
101117
101118 if (!xfrm6_policy_check(sk, XFRM_POLICY_IN, skb)) {
101119- atomic_inc(&sk->sk_drops);
101120+ atomic_inc_unchecked(&sk->sk_drops);
101121 kfree_skb(skb);
101122 return NET_RX_DROP;
101123 }
101124@@ -427,7 +427,7 @@ int rawv6_rcv(struct sock *sk, struct sk_buff *skb)
101125
101126 if (inet->hdrincl) {
101127 if (skb_checksum_complete(skb)) {
101128- atomic_inc(&sk->sk_drops);
101129+ atomic_inc_unchecked(&sk->sk_drops);
101130 kfree_skb(skb);
101131 return NET_RX_DROP;
101132 }
101133@@ -518,7 +518,7 @@ csum_copy_err:
101134 as some normal condition.
101135 */
101136 err = (flags&MSG_DONTWAIT) ? -EAGAIN : -EHOSTUNREACH;
101137- atomic_inc(&sk->sk_drops);
101138+ atomic_inc_unchecked(&sk->sk_drops);
101139 goto out;
101140 }
101141
101142@@ -600,7 +600,7 @@ out:
101143 return err;
101144 }
101145
101146-static int rawv6_send_hdrinc(struct sock *sk, void *from, int length,
101147+static int rawv6_send_hdrinc(struct sock *sk, void *from, unsigned int length,
101148 struct flowi *fl, struct rt6_info *rt,
101149 unsigned int flags)
101150 {
101151@@ -738,6 +738,8 @@ static int rawv6_sendmsg(struct kiocb *iocb, struct sock *sk,
101152 u16 proto;
101153 int err;
101154
101155+ pax_track_stack();
101156+
101157 /* Rough check on arithmetic overflow,
101158 better check is made in ip6_append_data().
101159 */
101160@@ -916,12 +918,17 @@ do_confirm:
101161 static int rawv6_seticmpfilter(struct sock *sk, int level, int optname,
101162 char __user *optval, int optlen)
101163 {
101164+ struct icmp6_filter filter;
101165+
101166 switch (optname) {
101167 case ICMPV6_FILTER:
101168+ if (optlen < 0)
101169+ return -EINVAL;
101170 if (optlen > sizeof(struct icmp6_filter))
101171 optlen = sizeof(struct icmp6_filter);
101172- if (copy_from_user(&raw6_sk(sk)->filter, optval, optlen))
101173+ if (copy_from_user(&filter, optval, optlen))
101174 return -EFAULT;
101175+ raw6_sk(sk)->filter = filter;
101176 return 0;
101177 default:
101178 return -ENOPROTOOPT;
101179@@ -934,6 +941,7 @@ static int rawv6_geticmpfilter(struct sock *sk, int level, int optname,
101180 char __user *optval, int __user *optlen)
101181 {
101182 int len;
101183+ struct icmp6_filter filter;
101184
101185 switch (optname) {
101186 case ICMPV6_FILTER:
101187@@ -945,7 +953,8 @@ static int rawv6_geticmpfilter(struct sock *sk, int level, int optname,
101188 len = sizeof(struct icmp6_filter);
101189 if (put_user(len, optlen))
101190 return -EFAULT;
101191- if (copy_to_user(optval, &raw6_sk(sk)->filter, len))
101192+ filter = raw6_sk(sk)->filter;
101193+ if (len > sizeof filter || copy_to_user(optval, &filter, len))
101194 return -EFAULT;
101195 return 0;
101196 default:
101197@@ -1241,7 +1250,13 @@ static void raw6_sock_seq_show(struct seq_file *seq, struct sock *sp, int i)
101198 0, 0L, 0,
101199 sock_i_uid(sp), 0,
101200 sock_i_ino(sp),
101201- atomic_read(&sp->sk_refcnt), sp, atomic_read(&sp->sk_drops));
101202+ atomic_read(&sp->sk_refcnt),
101203+#ifdef CONFIG_GRKERNSEC_HIDESYM
101204+ NULL,
101205+#else
101206+ sp,
101207+#endif
101208+ atomic_read_unchecked(&sp->sk_drops));
101209 }
101210
101211 static int raw6_seq_show(struct seq_file *seq, void *v)
101212diff --git a/net/ipv6/tcp_ipv6.c b/net/ipv6/tcp_ipv6.c
101213index faae6df..d4430c1 100644
101214--- a/net/ipv6/tcp_ipv6.c
101215+++ b/net/ipv6/tcp_ipv6.c
101216@@ -89,6 +89,10 @@ static struct tcp_md5sig_key *tcp_v6_md5_do_lookup(struct sock *sk,
101217 }
101218 #endif
101219
101220+#ifdef CONFIG_GRKERNSEC_BLACKHOLE
101221+extern int grsec_enable_blackhole;
101222+#endif
101223+
101224 static void tcp_v6_hash(struct sock *sk)
101225 {
101226 if (sk->sk_state != TCP_CLOSE) {
101227@@ -1579,6 +1583,9 @@ static int tcp_v6_do_rcv(struct sock *sk, struct sk_buff *skb)
101228 return 0;
101229
101230 reset:
101231+#ifdef CONFIG_GRKERNSEC_BLACKHOLE
101232+ if (!grsec_enable_blackhole)
101233+#endif
101234 tcp_v6_send_reset(sk, skb);
101235 discard:
101236 if (opt_skb)
101237@@ -1656,12 +1663,20 @@ static int tcp_v6_rcv(struct sk_buff *skb)
101238 TCP_SKB_CB(skb)->sacked = 0;
101239
101240 sk = __inet6_lookup_skb(&tcp_hashinfo, skb, th->source, th->dest);
101241- if (!sk)
101242+ if (!sk) {
101243+#ifdef CONFIG_GRKERNSEC_BLACKHOLE
101244+ ret = 1;
101245+#endif
101246 goto no_tcp_socket;
101247+ }
101248
101249 process:
101250- if (sk->sk_state == TCP_TIME_WAIT)
101251+ if (sk->sk_state == TCP_TIME_WAIT) {
101252+#ifdef CONFIG_GRKERNSEC_BLACKHOLE
101253+ ret = 2;
101254+#endif
101255 goto do_time_wait;
101256+ }
101257
101258 if (!xfrm6_policy_check(sk, XFRM_POLICY_IN, skb))
101259 goto discard_and_relse;
101260@@ -1701,6 +1716,10 @@ no_tcp_socket:
101261 bad_packet:
101262 TCP_INC_STATS_BH(net, TCP_MIB_INERRS);
101263 } else {
101264+#ifdef CONFIG_GRKERNSEC_BLACKHOLE
101265+ if (!grsec_enable_blackhole || (ret == 1 &&
101266+ (skb->dev->flags & IFF_LOOPBACK)))
101267+#endif
101268 tcp_v6_send_reset(NULL, skb);
101269 }
101270
101271@@ -1916,7 +1935,13 @@ static void get_openreq6(struct seq_file *seq,
101272 uid,
101273 0, /* non standard timer */
101274 0, /* open_requests have no inode */
101275- 0, req);
101276+ 0,
101277+#ifdef CONFIG_GRKERNSEC_HIDESYM
101278+ NULL
101279+#else
101280+ req
101281+#endif
101282+ );
101283 }
101284
101285 static void get_tcp6_sock(struct seq_file *seq, struct sock *sp, int i)
101286@@ -1966,7 +1991,12 @@ static void get_tcp6_sock(struct seq_file *seq, struct sock *sp, int i)
101287 sock_i_uid(sp),
101288 icsk->icsk_probes_out,
101289 sock_i_ino(sp),
101290- atomic_read(&sp->sk_refcnt), sp,
101291+ atomic_read(&sp->sk_refcnt),
101292+#ifdef CONFIG_GRKERNSEC_HIDESYM
101293+ NULL,
101294+#else
101295+ sp,
101296+#endif
101297 jiffies_to_clock_t(icsk->icsk_rto),
101298 jiffies_to_clock_t(icsk->icsk_ack.ato),
101299 (icsk->icsk_ack.quick << 1 ) | icsk->icsk_ack.pingpong,
101300@@ -2001,7 +2031,13 @@ static void get_timewait6_sock(struct seq_file *seq,
101301 dest->s6_addr32[2], dest->s6_addr32[3], destp,
101302 tw->tw_substate, 0, 0,
101303 3, jiffies_to_clock_t(ttd), 0, 0, 0, 0,
101304- atomic_read(&tw->tw_refcnt), tw);
101305+ atomic_read(&tw->tw_refcnt),
101306+#ifdef CONFIG_GRKERNSEC_HIDESYM
101307+ NULL
101308+#else
101309+ tw
101310+#endif
101311+ );
101312 }
101313
101314 static int tcp6_seq_show(struct seq_file *seq, void *v)
101315diff --git a/net/ipv6/udp.c b/net/ipv6/udp.c
101316index 9cc6289..052c521 100644
101317--- a/net/ipv6/udp.c
101318+++ b/net/ipv6/udp.c
101319@@ -49,6 +49,10 @@
101320 #include <linux/seq_file.h>
101321 #include "udp_impl.h"
101322
101323+#ifdef CONFIG_GRKERNSEC_BLACKHOLE
101324+extern int grsec_enable_blackhole;
101325+#endif
101326+
101327 int ipv6_rcv_saddr_equal(const struct sock *sk, const struct sock *sk2)
101328 {
101329 const struct in6_addr *sk_rcv_saddr6 = &inet6_sk(sk)->rcv_saddr;
101330@@ -391,7 +395,7 @@ int udpv6_queue_rcv_skb(struct sock * sk, struct sk_buff *skb)
101331 if (rc == -ENOMEM) {
101332 UDP6_INC_STATS_BH(sock_net(sk),
101333 UDP_MIB_RCVBUFERRORS, is_udplite);
101334- atomic_inc(&sk->sk_drops);
101335+ atomic_inc_unchecked(&sk->sk_drops);
101336 }
101337 goto drop;
101338 }
101339@@ -590,6 +594,9 @@ int __udp6_lib_rcv(struct sk_buff *skb, struct udp_table *udptable,
101340 UDP6_INC_STATS_BH(net, UDP_MIB_NOPORTS,
101341 proto == IPPROTO_UDPLITE);
101342
101343+#ifdef CONFIG_GRKERNSEC_BLACKHOLE
101344+ if (!grsec_enable_blackhole || (skb->dev->flags & IFF_LOOPBACK))
101345+#endif
101346 icmpv6_send(skb, ICMPV6_DEST_UNREACH, ICMPV6_PORT_UNREACH, 0, dev);
101347
101348 kfree_skb(skb);
101349@@ -1209,8 +1216,13 @@ static void udp6_sock_seq_show(struct seq_file *seq, struct sock *sp, int bucket
101350 0, 0L, 0,
101351 sock_i_uid(sp), 0,
101352 sock_i_ino(sp),
101353- atomic_read(&sp->sk_refcnt), sp,
101354- atomic_read(&sp->sk_drops));
101355+ atomic_read(&sp->sk_refcnt),
101356+#ifdef CONFIG_GRKERNSEC_HIDESYM
101357+ NULL,
101358+#else
101359+ sp,
101360+#endif
101361+ atomic_read_unchecked(&sp->sk_drops));
101362 }
101363
101364 int udp6_seq_show(struct seq_file *seq, void *v)
101365diff --git a/net/ipv6/xfrm6_tunnel.c b/net/ipv6/xfrm6_tunnel.c
101366index 48bb1e3..5980e6e 100644
101367--- a/net/ipv6/xfrm6_tunnel.c
101368+++ b/net/ipv6/xfrm6_tunnel.c
101369@@ -258,7 +258,7 @@ static int xfrm6_tunnel_rcv(struct sk_buff *skb)
101370 __be32 spi;
101371
101372 spi = xfrm6_tunnel_spi_lookup((xfrm_address_t *)&iph->saddr);
101373- return xfrm6_rcv_spi(skb, IPPROTO_IPV6, spi) > 0 ? : 0;
101374+ return xfrm6_rcv_spi(skb, IPPROTO_IPV6, spi);
101375 }
101376
101377 static int xfrm6_tunnel_err(struct sk_buff *skb, struct inet6_skb_parm *opt,
101378diff --git a/net/irda/ircomm/ircomm_tty.c b/net/irda/ircomm/ircomm_tty.c
101379index 811984d..11f59b7 100644
101380--- a/net/irda/ircomm/ircomm_tty.c
101381+++ b/net/irda/ircomm/ircomm_tty.c
101382@@ -280,16 +280,16 @@ static int ircomm_tty_block_til_ready(struct ircomm_tty_cb *self,
101383 add_wait_queue(&self->open_wait, &wait);
101384
101385 IRDA_DEBUG(2, "%s(%d):block_til_ready before block on %s open_count=%d\n",
101386- __FILE__,__LINE__, tty->driver->name, self->open_count );
101387+ __FILE__,__LINE__, tty->driver->name, local_read(&self->open_count) );
101388
101389 /* As far as I can see, we protect open_count - Jean II */
101390 spin_lock_irqsave(&self->spinlock, flags);
101391 if (!tty_hung_up_p(filp)) {
101392 extra_count = 1;
101393- self->open_count--;
101394+ local_dec(&self->open_count);
101395 }
101396 spin_unlock_irqrestore(&self->spinlock, flags);
101397- self->blocked_open++;
101398+ local_inc(&self->blocked_open);
101399
101400 while (1) {
101401 if (tty->termios->c_cflag & CBAUD) {
101402@@ -329,7 +329,7 @@ static int ircomm_tty_block_til_ready(struct ircomm_tty_cb *self,
101403 }
101404
101405 IRDA_DEBUG(1, "%s(%d):block_til_ready blocking on %s open_count=%d\n",
101406- __FILE__,__LINE__, tty->driver->name, self->open_count );
101407+ __FILE__,__LINE__, tty->driver->name, local_read(&self->open_count) );
101408
101409 schedule();
101410 }
101411@@ -340,13 +340,13 @@ static int ircomm_tty_block_til_ready(struct ircomm_tty_cb *self,
101412 if (extra_count) {
101413 /* ++ is not atomic, so this should be protected - Jean II */
101414 spin_lock_irqsave(&self->spinlock, flags);
101415- self->open_count++;
101416+ local_inc(&self->open_count);
101417 spin_unlock_irqrestore(&self->spinlock, flags);
101418 }
101419- self->blocked_open--;
101420+ local_dec(&self->blocked_open);
101421
101422 IRDA_DEBUG(1, "%s(%d):block_til_ready after blocking on %s open_count=%d\n",
101423- __FILE__,__LINE__, tty->driver->name, self->open_count);
101424+ __FILE__,__LINE__, tty->driver->name, local_read(&self->open_count));
101425
101426 if (!retval)
101427 self->flags |= ASYNC_NORMAL_ACTIVE;
101428@@ -415,14 +415,14 @@ static int ircomm_tty_open(struct tty_struct *tty, struct file *filp)
101429 }
101430 /* ++ is not atomic, so this should be protected - Jean II */
101431 spin_lock_irqsave(&self->spinlock, flags);
101432- self->open_count++;
101433+ local_inc(&self->open_count);
101434
101435 tty->driver_data = self;
101436 self->tty = tty;
101437 spin_unlock_irqrestore(&self->spinlock, flags);
101438
101439 IRDA_DEBUG(1, "%s(), %s%d, count = %d\n", __func__ , tty->driver->name,
101440- self->line, self->open_count);
101441+ self->line, local_read(&self->open_count));
101442
101443 /* Not really used by us, but lets do it anyway */
101444 self->tty->low_latency = (self->flags & ASYNC_LOW_LATENCY) ? 1 : 0;
101445@@ -511,7 +511,7 @@ static void ircomm_tty_close(struct tty_struct *tty, struct file *filp)
101446 return;
101447 }
101448
101449- if ((tty->count == 1) && (self->open_count != 1)) {
101450+ if ((tty->count == 1) && (local_read(&self->open_count) != 1)) {
101451 /*
101452 * Uh, oh. tty->count is 1, which means that the tty
101453 * structure will be freed. state->count should always
101454@@ -521,16 +521,16 @@ static void ircomm_tty_close(struct tty_struct *tty, struct file *filp)
101455 */
101456 IRDA_DEBUG(0, "%s(), bad serial port count; "
101457 "tty->count is 1, state->count is %d\n", __func__ ,
101458- self->open_count);
101459- self->open_count = 1;
101460+ local_read(&self->open_count));
101461+ local_set(&self->open_count, 1);
101462 }
101463
101464- if (--self->open_count < 0) {
101465+ if (local_dec_return(&self->open_count) < 0) {
101466 IRDA_ERROR("%s(), bad serial port count for ttys%d: %d\n",
101467- __func__, self->line, self->open_count);
101468- self->open_count = 0;
101469+ __func__, self->line, local_read(&self->open_count));
101470+ local_set(&self->open_count, 0);
101471 }
101472- if (self->open_count) {
101473+ if (local_read(&self->open_count)) {
101474 spin_unlock_irqrestore(&self->spinlock, flags);
101475
101476 IRDA_DEBUG(0, "%s(), open count > 0\n", __func__ );
101477@@ -562,7 +562,7 @@ static void ircomm_tty_close(struct tty_struct *tty, struct file *filp)
101478 tty->closing = 0;
101479 self->tty = NULL;
101480
101481- if (self->blocked_open) {
101482+ if (local_read(&self->blocked_open)) {
101483 if (self->close_delay)
101484 schedule_timeout_interruptible(self->close_delay);
101485 wake_up_interruptible(&self->open_wait);
101486@@ -1017,7 +1017,7 @@ static void ircomm_tty_hangup(struct tty_struct *tty)
101487 spin_lock_irqsave(&self->spinlock, flags);
101488 self->flags &= ~ASYNC_NORMAL_ACTIVE;
101489 self->tty = NULL;
101490- self->open_count = 0;
101491+ local_set(&self->open_count, 0);
101492 spin_unlock_irqrestore(&self->spinlock, flags);
101493
101494 wake_up_interruptible(&self->open_wait);
101495@@ -1369,7 +1369,7 @@ static void ircomm_tty_line_info(struct ircomm_tty_cb *self, struct seq_file *m)
101496 seq_putc(m, '\n');
101497
101498 seq_printf(m, "Role: %s\n", self->client ? "client" : "server");
101499- seq_printf(m, "Open count: %d\n", self->open_count);
101500+ seq_printf(m, "Open count: %d\n", local_read(&self->open_count));
101501 seq_printf(m, "Max data size: %d\n", self->max_data_size);
101502 seq_printf(m, "Max header size: %d\n", self->max_header_size);
101503
101504diff --git a/net/iucv/af_iucv.c b/net/iucv/af_iucv.c
101505index bada1b9..f325943 100644
101506--- a/net/iucv/af_iucv.c
101507+++ b/net/iucv/af_iucv.c
101508@@ -651,10 +651,10 @@ static int iucv_sock_autobind(struct sock *sk)
101509
101510 write_lock_bh(&iucv_sk_list.lock);
101511
101512- sprintf(name, "%08x", atomic_inc_return(&iucv_sk_list.autobind_name));
101513+ sprintf(name, "%08x", atomic_inc_return_unchecked(&iucv_sk_list.autobind_name));
101514 while (__iucv_get_sock_by_name(name)) {
101515 sprintf(name, "%08x",
101516- atomic_inc_return(&iucv_sk_list.autobind_name));
101517+ atomic_inc_return_unchecked(&iucv_sk_list.autobind_name));
101518 }
101519
101520 write_unlock_bh(&iucv_sk_list.lock);
101521diff --git a/net/key/af_key.c b/net/key/af_key.c
101522index 4e98193..439b449 100644
101523--- a/net/key/af_key.c
101524+++ b/net/key/af_key.c
101525@@ -2489,6 +2489,8 @@ static int pfkey_migrate(struct sock *sk, struct sk_buff *skb,
101526 struct xfrm_migrate m[XFRM_MAX_DEPTH];
101527 struct xfrm_kmaddress k;
101528
101529+ pax_track_stack();
101530+
101531 if (!present_and_same_family(ext_hdrs[SADB_EXT_ADDRESS_SRC - 1],
101532 ext_hdrs[SADB_EXT_ADDRESS_DST - 1]) ||
101533 !ext_hdrs[SADB_X_EXT_POLICY - 1]) {
101534@@ -3660,7 +3662,11 @@ static int pfkey_seq_show(struct seq_file *f, void *v)
101535 seq_printf(f ,"sk RefCnt Rmem Wmem User Inode\n");
101536 else
101537 seq_printf(f ,"%p %-6d %-6u %-6u %-6u %-6lu\n",
101538+#ifdef CONFIG_GRKERNSEC_HIDESYM
101539+ NULL,
101540+#else
101541 s,
101542+#endif
101543 atomic_read(&s->sk_refcnt),
101544 sk_rmem_alloc_get(s),
101545 sk_wmem_alloc_get(s),
101546diff --git a/net/lapb/lapb_iface.c b/net/lapb/lapb_iface.c
101547index bda96d1..c038b72 100644
101548--- a/net/lapb/lapb_iface.c
101549+++ b/net/lapb/lapb_iface.c
101550@@ -157,7 +157,7 @@ int lapb_register(struct net_device *dev, struct lapb_register_struct *callbacks
101551 goto out;
101552
101553 lapb->dev = dev;
101554- lapb->callbacks = *callbacks;
101555+ lapb->callbacks = callbacks;
101556
101557 __lapb_insert_cb(lapb);
101558
101559@@ -379,32 +379,32 @@ int lapb_data_received(struct net_device *dev, struct sk_buff *skb)
101560
101561 void lapb_connect_confirmation(struct lapb_cb *lapb, int reason)
101562 {
101563- if (lapb->callbacks.connect_confirmation)
101564- lapb->callbacks.connect_confirmation(lapb->dev, reason);
101565+ if (lapb->callbacks->connect_confirmation)
101566+ lapb->callbacks->connect_confirmation(lapb->dev, reason);
101567 }
101568
101569 void lapb_connect_indication(struct lapb_cb *lapb, int reason)
101570 {
101571- if (lapb->callbacks.connect_indication)
101572- lapb->callbacks.connect_indication(lapb->dev, reason);
101573+ if (lapb->callbacks->connect_indication)
101574+ lapb->callbacks->connect_indication(lapb->dev, reason);
101575 }
101576
101577 void lapb_disconnect_confirmation(struct lapb_cb *lapb, int reason)
101578 {
101579- if (lapb->callbacks.disconnect_confirmation)
101580- lapb->callbacks.disconnect_confirmation(lapb->dev, reason);
101581+ if (lapb->callbacks->disconnect_confirmation)
101582+ lapb->callbacks->disconnect_confirmation(lapb->dev, reason);
101583 }
101584
101585 void lapb_disconnect_indication(struct lapb_cb *lapb, int reason)
101586 {
101587- if (lapb->callbacks.disconnect_indication)
101588- lapb->callbacks.disconnect_indication(lapb->dev, reason);
101589+ if (lapb->callbacks->disconnect_indication)
101590+ lapb->callbacks->disconnect_indication(lapb->dev, reason);
101591 }
101592
101593 int lapb_data_indication(struct lapb_cb *lapb, struct sk_buff *skb)
101594 {
101595- if (lapb->callbacks.data_indication)
101596- return lapb->callbacks.data_indication(lapb->dev, skb);
101597+ if (lapb->callbacks->data_indication)
101598+ return lapb->callbacks->data_indication(lapb->dev, skb);
101599
101600 kfree_skb(skb);
101601 return NET_RX_SUCCESS; /* For now; must be != NET_RX_DROP */
101602@@ -414,8 +414,8 @@ int lapb_data_transmit(struct lapb_cb *lapb, struct sk_buff *skb)
101603 {
101604 int used = 0;
101605
101606- if (lapb->callbacks.data_transmit) {
101607- lapb->callbacks.data_transmit(lapb->dev, skb);
101608+ if (lapb->callbacks->data_transmit) {
101609+ lapb->callbacks->data_transmit(lapb->dev, skb);
101610 used = 1;
101611 }
101612
101613diff --git a/net/mac80211/cfg.c b/net/mac80211/cfg.c
101614index fe2d3f8..e57f683 100644
101615--- a/net/mac80211/cfg.c
101616+++ b/net/mac80211/cfg.c
101617@@ -1369,7 +1369,7 @@ static int ieee80211_set_bitrate_mask(struct wiphy *wiphy,
101618 return err;
101619 }
101620
101621-struct cfg80211_ops mac80211_config_ops = {
101622+const struct cfg80211_ops mac80211_config_ops = {
101623 .add_virtual_intf = ieee80211_add_iface,
101624 .del_virtual_intf = ieee80211_del_iface,
101625 .change_virtual_intf = ieee80211_change_iface,
101626diff --git a/net/mac80211/cfg.h b/net/mac80211/cfg.h
101627index 7d7879f..2d51f62 100644
101628--- a/net/mac80211/cfg.h
101629+++ b/net/mac80211/cfg.h
101630@@ -4,6 +4,6 @@
101631 #ifndef __CFG_H
101632 #define __CFG_H
101633
101634-extern struct cfg80211_ops mac80211_config_ops;
101635+extern const struct cfg80211_ops mac80211_config_ops;
101636
101637 #endif /* __CFG_H */
101638diff --git a/net/mac80211/debugfs_key.c b/net/mac80211/debugfs_key.c
101639index 99c7525..9cb4937 100644
101640--- a/net/mac80211/debugfs_key.c
101641+++ b/net/mac80211/debugfs_key.c
101642@@ -211,9 +211,13 @@ static ssize_t key_key_read(struct file *file, char __user *userbuf,
101643 size_t count, loff_t *ppos)
101644 {
101645 struct ieee80211_key *key = file->private_data;
101646- int i, res, bufsize = 2 * key->conf.keylen + 2;
101647+ int i, bufsize = 2 * key->conf.keylen + 2;
101648 char *buf = kmalloc(bufsize, GFP_KERNEL);
101649 char *p = buf;
101650+ ssize_t res;
101651+
101652+ if (buf == NULL)
101653+ return -ENOMEM;
101654
101655 for (i = 0; i < key->conf.keylen; i++)
101656 p += scnprintf(p, bufsize + buf - p, "%02x", key->conf.key[i]);
101657diff --git a/net/mac80211/debugfs_sta.c b/net/mac80211/debugfs_sta.c
101658index 33a2e89..08650c8 100644
101659--- a/net/mac80211/debugfs_sta.c
101660+++ b/net/mac80211/debugfs_sta.c
101661@@ -124,6 +124,8 @@ static ssize_t sta_agg_status_read(struct file *file, char __user *userbuf,
101662 int i;
101663 struct sta_info *sta = file->private_data;
101664
101665+ pax_track_stack();
101666+
101667 spin_lock_bh(&sta->lock);
101668 p += scnprintf(p, sizeof(buf)+buf-p, "next dialog_token is %#02x\n",
101669 sta->ampdu_mlme.dialog_token_allocator + 1);
101670diff --git a/net/mac80211/ieee80211_i.h b/net/mac80211/ieee80211_i.h
101671index ca62bfe..6657a03 100644
101672--- a/net/mac80211/ieee80211_i.h
101673+++ b/net/mac80211/ieee80211_i.h
101674@@ -25,6 +25,7 @@
101675 #include <linux/etherdevice.h>
101676 #include <net/cfg80211.h>
101677 #include <net/mac80211.h>
101678+#include <asm/local.h>
101679 #include "key.h"
101680 #include "sta_info.h"
101681
101682@@ -635,7 +636,7 @@ struct ieee80211_local {
101683 /* also used to protect ampdu_ac_queue and amdpu_ac_stop_refcnt */
101684 spinlock_t queue_stop_reason_lock;
101685
101686- int open_count;
101687+ local_t open_count;
101688 int monitors, cooked_mntrs;
101689 /* number of interfaces with corresponding FIF_ flags */
101690 int fif_fcsfail, fif_plcpfail, fif_control, fif_other_bss, fif_pspoll;
101691diff --git a/net/mac80211/iface.c b/net/mac80211/iface.c
101692index 079c500..eb3c6d4 100644
101693--- a/net/mac80211/iface.c
101694+++ b/net/mac80211/iface.c
101695@@ -166,7 +166,7 @@ static int ieee80211_open(struct net_device *dev)
101696 break;
101697 }
101698
101699- if (local->open_count == 0) {
101700+ if (local_read(&local->open_count) == 0) {
101701 res = drv_start(local);
101702 if (res)
101703 goto err_del_bss;
101704@@ -196,7 +196,7 @@ static int ieee80211_open(struct net_device *dev)
101705 * Validate the MAC address for this device.
101706 */
101707 if (!is_valid_ether_addr(dev->dev_addr)) {
101708- if (!local->open_count)
101709+ if (!local_read(&local->open_count))
101710 drv_stop(local);
101711 return -EADDRNOTAVAIL;
101712 }
101713@@ -292,7 +292,7 @@ static int ieee80211_open(struct net_device *dev)
101714
101715 hw_reconf_flags |= __ieee80211_recalc_idle(local);
101716
101717- local->open_count++;
101718+ local_inc(&local->open_count);
101719 if (hw_reconf_flags) {
101720 ieee80211_hw_config(local, hw_reconf_flags);
101721 /*
101722@@ -320,7 +320,7 @@ static int ieee80211_open(struct net_device *dev)
101723 err_del_interface:
101724 drv_remove_interface(local, &conf);
101725 err_stop:
101726- if (!local->open_count)
101727+ if (!local_read(&local->open_count))
101728 drv_stop(local);
101729 err_del_bss:
101730 sdata->bss = NULL;
101731@@ -420,7 +420,7 @@ static int ieee80211_stop(struct net_device *dev)
101732 WARN_ON(!list_empty(&sdata->u.ap.vlans));
101733 }
101734
101735- local->open_count--;
101736+ local_dec(&local->open_count);
101737
101738 switch (sdata->vif.type) {
101739 case NL80211_IFTYPE_AP_VLAN:
101740@@ -526,7 +526,7 @@ static int ieee80211_stop(struct net_device *dev)
101741
101742 ieee80211_recalc_ps(local, -1);
101743
101744- if (local->open_count == 0) {
101745+ if (local_read(&local->open_count) == 0) {
101746 ieee80211_clear_tx_pending(local);
101747 ieee80211_stop_device(local);
101748
101749diff --git a/net/mac80211/main.c b/net/mac80211/main.c
101750index 2dfe176..74e4388 100644
101751--- a/net/mac80211/main.c
101752+++ b/net/mac80211/main.c
101753@@ -145,7 +145,7 @@ int ieee80211_hw_config(struct ieee80211_local *local, u32 changed)
101754 local->hw.conf.power_level = power;
101755 }
101756
101757- if (changed && local->open_count) {
101758+ if (changed && local_read(&local->open_count)) {
101759 ret = drv_config(local, changed);
101760 /*
101761 * Goal:
101762diff --git a/net/mac80211/mlme.c b/net/mac80211/mlme.c
101763index e67eea7..fcc227e 100644
101764--- a/net/mac80211/mlme.c
101765+++ b/net/mac80211/mlme.c
101766@@ -1438,6 +1438,8 @@ ieee80211_rx_mgmt_assoc_resp(struct ieee80211_sub_if_data *sdata,
101767 bool have_higher_than_11mbit = false, newsta = false;
101768 u16 ap_ht_cap_flags;
101769
101770+ pax_track_stack();
101771+
101772 /*
101773 * AssocResp and ReassocResp have identical structure, so process both
101774 * of them in this function.
101775diff --git a/net/mac80211/pm.c b/net/mac80211/pm.c
101776index e535f1c..4d733d1 100644
101777--- a/net/mac80211/pm.c
101778+++ b/net/mac80211/pm.c
101779@@ -107,7 +107,7 @@ int __ieee80211_suspend(struct ieee80211_hw *hw)
101780 }
101781
101782 /* stop hardware - this must stop RX */
101783- if (local->open_count)
101784+ if (local_read(&local->open_count))
101785 ieee80211_stop_device(local);
101786
101787 local->suspended = true;
101788diff --git a/net/mac80211/rate.c b/net/mac80211/rate.c
101789index b33efc4..0a2efb6 100644
101790--- a/net/mac80211/rate.c
101791+++ b/net/mac80211/rate.c
101792@@ -287,7 +287,7 @@ int ieee80211_init_rate_ctrl_alg(struct ieee80211_local *local,
101793 struct rate_control_ref *ref, *old;
101794
101795 ASSERT_RTNL();
101796- if (local->open_count)
101797+ if (local_read(&local->open_count))
101798 return -EBUSY;
101799
101800 ref = rate_control_alloc(name, local);
101801diff --git a/net/mac80211/tx.c b/net/mac80211/tx.c
101802index b1d7904..57e4da7 100644
101803--- a/net/mac80211/tx.c
101804+++ b/net/mac80211/tx.c
101805@@ -173,7 +173,7 @@ static __le16 ieee80211_duration(struct ieee80211_tx_data *tx, int group_addr,
101806 return cpu_to_le16(dur);
101807 }
101808
101809-static int inline is_ieee80211_device(struct ieee80211_local *local,
101810+static inline int is_ieee80211_device(struct ieee80211_local *local,
101811 struct net_device *dev)
101812 {
101813 return local == wdev_priv(dev->ieee80211_ptr);
101814diff --git a/net/mac80211/util.c b/net/mac80211/util.c
101815index 31b1085..48fb26d 100644
101816--- a/net/mac80211/util.c
101817+++ b/net/mac80211/util.c
101818@@ -1042,7 +1042,7 @@ int ieee80211_reconfig(struct ieee80211_local *local)
101819 local->resuming = true;
101820
101821 /* restart hardware */
101822- if (local->open_count) {
101823+ if (local_read(&local->open_count)) {
101824 /*
101825 * Upon resume hardware can sometimes be goofy due to
101826 * various platform / driver / bus issues, so restarting
101827diff --git a/net/netfilter/Kconfig b/net/netfilter/Kconfig
101828index 634d14a..b35a608 100644
101829--- a/net/netfilter/Kconfig
101830+++ b/net/netfilter/Kconfig
101831@@ -635,6 +635,16 @@ config NETFILTER_XT_MATCH_ESP
101832
101833 To compile it as a module, choose M here. If unsure, say N.
101834
101835+config NETFILTER_XT_MATCH_GRADM
101836+ tristate '"gradm" match support'
101837+ depends on NETFILTER_XTABLES && NETFILTER_ADVANCED
101838+ depends on GRKERNSEC && !GRKERNSEC_NO_RBAC
101839+ ---help---
101840+ The gradm match allows to match on grsecurity RBAC being enabled.
101841+ It is useful when iptables rules are applied early on bootup to
101842+ prevent connections to the machine (except from a trusted host)
101843+ while the RBAC system is disabled.
101844+
101845 config NETFILTER_XT_MATCH_HASHLIMIT
101846 tristate '"hashlimit" match support'
101847 depends on (IP6_NF_IPTABLES || IP6_NF_IPTABLES=n)
101848diff --git a/net/netfilter/Makefile b/net/netfilter/Makefile
101849index 49f62ee..a17b2c6 100644
101850--- a/net/netfilter/Makefile
101851+++ b/net/netfilter/Makefile
101852@@ -68,6 +68,7 @@ obj-$(CONFIG_NETFILTER_XT_MATCH_CONNTRACK) += xt_conntrack.o
101853 obj-$(CONFIG_NETFILTER_XT_MATCH_DCCP) += xt_dccp.o
101854 obj-$(CONFIG_NETFILTER_XT_MATCH_DSCP) += xt_dscp.o
101855 obj-$(CONFIG_NETFILTER_XT_MATCH_ESP) += xt_esp.o
101856+obj-$(CONFIG_NETFILTER_XT_MATCH_GRADM) += xt_gradm.o
101857 obj-$(CONFIG_NETFILTER_XT_MATCH_HASHLIMIT) += xt_hashlimit.o
101858 obj-$(CONFIG_NETFILTER_XT_MATCH_HELPER) += xt_helper.o
101859 obj-$(CONFIG_NETFILTER_XT_MATCH_HL) += xt_hl.o
101860diff --git a/net/netfilter/ipvs/ip_vs_app.c b/net/netfilter/ipvs/ip_vs_app.c
101861index 3c7e427..724043c 100644
101862--- a/net/netfilter/ipvs/ip_vs_app.c
101863+++ b/net/netfilter/ipvs/ip_vs_app.c
101864@@ -564,7 +564,7 @@ static const struct file_operations ip_vs_app_fops = {
101865 .open = ip_vs_app_open,
101866 .read = seq_read,
101867 .llseek = seq_lseek,
101868- .release = seq_release,
101869+ .release = seq_release_net,
101870 };
101871 #endif
101872
101873diff --git a/net/netfilter/ipvs/ip_vs_conn.c b/net/netfilter/ipvs/ip_vs_conn.c
101874index 95682e5..457dbac 100644
101875--- a/net/netfilter/ipvs/ip_vs_conn.c
101876+++ b/net/netfilter/ipvs/ip_vs_conn.c
101877@@ -453,10 +453,10 @@ ip_vs_bind_dest(struct ip_vs_conn *cp, struct ip_vs_dest *dest)
101878 /* if the connection is not template and is created
101879 * by sync, preserve the activity flag.
101880 */
101881- cp->flags |= atomic_read(&dest->conn_flags) &
101882+ cp->flags |= atomic_read_unchecked(&dest->conn_flags) &
101883 (~IP_VS_CONN_F_INACTIVE);
101884 else
101885- cp->flags |= atomic_read(&dest->conn_flags);
101886+ cp->flags |= atomic_read_unchecked(&dest->conn_flags);
101887 cp->dest = dest;
101888
101889 IP_VS_DBG_BUF(7, "Bind-dest %s c:%s:%d v:%s:%d "
101890@@ -723,7 +723,7 @@ ip_vs_conn_new(int af, int proto, const union nf_inet_addr *caddr, __be16 cport,
101891 atomic_set(&cp->refcnt, 1);
101892
101893 atomic_set(&cp->n_control, 0);
101894- atomic_set(&cp->in_pkts, 0);
101895+ atomic_set_unchecked(&cp->in_pkts, 0);
101896
101897 atomic_inc(&ip_vs_conn_count);
101898 if (flags & IP_VS_CONN_F_NO_CPORT)
101899@@ -871,7 +871,7 @@ static const struct file_operations ip_vs_conn_fops = {
101900 .open = ip_vs_conn_open,
101901 .read = seq_read,
101902 .llseek = seq_lseek,
101903- .release = seq_release,
101904+ .release = seq_release_net,
101905 };
101906
101907 static const char *ip_vs_origin_name(unsigned flags)
101908@@ -934,7 +934,7 @@ static const struct file_operations ip_vs_conn_sync_fops = {
101909 .open = ip_vs_conn_sync_open,
101910 .read = seq_read,
101911 .llseek = seq_lseek,
101912- .release = seq_release,
101913+ .release = seq_release_net,
101914 };
101915
101916 #endif
101917@@ -961,7 +961,7 @@ static inline int todrop_entry(struct ip_vs_conn *cp)
101918
101919 /* Don't drop the entry if its number of incoming packets is not
101920 located in [0, 8] */
101921- i = atomic_read(&cp->in_pkts);
101922+ i = atomic_read_unchecked(&cp->in_pkts);
101923 if (i > 8 || i < 0) return 0;
101924
101925 if (!todrop_rate[i]) return 0;
101926diff --git a/net/netfilter/ipvs/ip_vs_core.c b/net/netfilter/ipvs/ip_vs_core.c
101927index b95699f..5fee919 100644
101928--- a/net/netfilter/ipvs/ip_vs_core.c
101929+++ b/net/netfilter/ipvs/ip_vs_core.c
101930@@ -485,7 +485,7 @@ int ip_vs_leave(struct ip_vs_service *svc, struct sk_buff *skb,
101931 ret = cp->packet_xmit(skb, cp, pp);
101932 /* do not touch skb anymore */
101933
101934- atomic_inc(&cp->in_pkts);
101935+ atomic_inc_unchecked(&cp->in_pkts);
101936 ip_vs_conn_put(cp);
101937 return ret;
101938 }
101939@@ -1357,7 +1357,7 @@ ip_vs_in(unsigned int hooknum, struct sk_buff *skb,
101940 * Sync connection if it is about to close to
101941 * encorage the standby servers to update the connections timeout
101942 */
101943- pkts = atomic_add_return(1, &cp->in_pkts);
101944+ pkts = atomic_add_return_unchecked(1, &cp->in_pkts);
101945 if (af == AF_INET &&
101946 (ip_vs_sync_state & IP_VS_STATE_MASTER) &&
101947 (((cp->protocol != IPPROTO_TCP ||
101948diff --git a/net/netfilter/ipvs/ip_vs_ctl.c b/net/netfilter/ipvs/ip_vs_ctl.c
101949index 02b2610..2d89424 100644
101950--- a/net/netfilter/ipvs/ip_vs_ctl.c
101951+++ b/net/netfilter/ipvs/ip_vs_ctl.c
101952@@ -792,7 +792,7 @@ __ip_vs_update_dest(struct ip_vs_service *svc,
101953 ip_vs_rs_hash(dest);
101954 write_unlock_bh(&__ip_vs_rs_lock);
101955 }
101956- atomic_set(&dest->conn_flags, conn_flags);
101957+ atomic_set_unchecked(&dest->conn_flags, conn_flags);
101958
101959 /* bind the service */
101960 if (!dest->svc) {
101961@@ -1888,7 +1888,7 @@ static int ip_vs_info_seq_show(struct seq_file *seq, void *v)
101962 " %-7s %-6d %-10d %-10d\n",
101963 &dest->addr.in6,
101964 ntohs(dest->port),
101965- ip_vs_fwd_name(atomic_read(&dest->conn_flags)),
101966+ ip_vs_fwd_name(atomic_read_unchecked(&dest->conn_flags)),
101967 atomic_read(&dest->weight),
101968 atomic_read(&dest->activeconns),
101969 atomic_read(&dest->inactconns));
101970@@ -1899,7 +1899,7 @@ static int ip_vs_info_seq_show(struct seq_file *seq, void *v)
101971 "%-7s %-6d %-10d %-10d\n",
101972 ntohl(dest->addr.ip),
101973 ntohs(dest->port),
101974- ip_vs_fwd_name(atomic_read(&dest->conn_flags)),
101975+ ip_vs_fwd_name(atomic_read_unchecked(&dest->conn_flags)),
101976 atomic_read(&dest->weight),
101977 atomic_read(&dest->activeconns),
101978 atomic_read(&dest->inactconns));
101979@@ -1927,7 +1927,7 @@ static const struct file_operations ip_vs_info_fops = {
101980 .open = ip_vs_info_open,
101981 .read = seq_read,
101982 .llseek = seq_lseek,
101983- .release = seq_release_private,
101984+ .release = seq_release_net,
101985 };
101986
101987 #endif
101988@@ -1976,7 +1976,7 @@ static const struct file_operations ip_vs_stats_fops = {
101989 .open = ip_vs_stats_seq_open,
101990 .read = seq_read,
101991 .llseek = seq_lseek,
101992- .release = single_release,
101993+ .release = single_release_net,
101994 };
101995
101996 #endif
101997@@ -2292,7 +2292,7 @@ __ip_vs_get_dest_entries(const struct ip_vs_get_dests *get,
101998
101999 entry.addr = dest->addr.ip;
102000 entry.port = dest->port;
102001- entry.conn_flags = atomic_read(&dest->conn_flags);
102002+ entry.conn_flags = atomic_read_unchecked(&dest->conn_flags);
102003 entry.weight = atomic_read(&dest->weight);
102004 entry.u_threshold = dest->u_threshold;
102005 entry.l_threshold = dest->l_threshold;
102006@@ -2353,6 +2353,8 @@ do_ip_vs_get_ctl(struct sock *sk, int cmd, void __user *user, int *len)
102007 unsigned char arg[128];
102008 int ret = 0;
102009
102010+ pax_track_stack();
102011+
102012 if (!capable(CAP_NET_ADMIN))
102013 return -EPERM;
102014
102015@@ -2802,7 +2804,7 @@ static int ip_vs_genl_fill_dest(struct sk_buff *skb, struct ip_vs_dest *dest)
102016 NLA_PUT_U16(skb, IPVS_DEST_ATTR_PORT, dest->port);
102017
102018 NLA_PUT_U32(skb, IPVS_DEST_ATTR_FWD_METHOD,
102019- atomic_read(&dest->conn_flags) & IP_VS_CONN_F_FWD_MASK);
102020+ atomic_read_unchecked(&dest->conn_flags) & IP_VS_CONN_F_FWD_MASK);
102021 NLA_PUT_U32(skb, IPVS_DEST_ATTR_WEIGHT, atomic_read(&dest->weight));
102022 NLA_PUT_U32(skb, IPVS_DEST_ATTR_U_THRESH, dest->u_threshold);
102023 NLA_PUT_U32(skb, IPVS_DEST_ATTR_L_THRESH, dest->l_threshold);
102024diff --git a/net/netfilter/ipvs/ip_vs_sync.c b/net/netfilter/ipvs/ip_vs_sync.c
102025index e177f0d..55e8581 100644
102026--- a/net/netfilter/ipvs/ip_vs_sync.c
102027+++ b/net/netfilter/ipvs/ip_vs_sync.c
102028@@ -438,7 +438,7 @@ static void ip_vs_process_message(const char *buffer, const size_t buflen)
102029
102030 if (opt)
102031 memcpy(&cp->in_seq, opt, sizeof(*opt));
102032- atomic_set(&cp->in_pkts, sysctl_ip_vs_sync_threshold[0]);
102033+ atomic_set_unchecked(&cp->in_pkts, sysctl_ip_vs_sync_threshold[0]);
102034 cp->state = state;
102035 cp->old_state = cp->state;
102036 /*
102037diff --git a/net/netfilter/ipvs/ip_vs_xmit.c b/net/netfilter/ipvs/ip_vs_xmit.c
102038index 30b3189..e2e4b55 100644
102039--- a/net/netfilter/ipvs/ip_vs_xmit.c
102040+++ b/net/netfilter/ipvs/ip_vs_xmit.c
102041@@ -875,7 +875,7 @@ ip_vs_icmp_xmit(struct sk_buff *skb, struct ip_vs_conn *cp,
102042 else
102043 rc = NF_ACCEPT;
102044 /* do not touch skb anymore */
102045- atomic_inc(&cp->in_pkts);
102046+ atomic_inc_unchecked(&cp->in_pkts);
102047 goto out;
102048 }
102049
102050@@ -949,7 +949,7 @@ ip_vs_icmp_xmit_v6(struct sk_buff *skb, struct ip_vs_conn *cp,
102051 else
102052 rc = NF_ACCEPT;
102053 /* do not touch skb anymore */
102054- atomic_inc(&cp->in_pkts);
102055+ atomic_inc_unchecked(&cp->in_pkts);
102056 goto out;
102057 }
102058
102059diff --git a/net/netfilter/nf_conntrack_netlink.c b/net/netfilter/nf_conntrack_netlink.c
102060index d521718..d0fd7a1 100644
102061--- a/net/netfilter/nf_conntrack_netlink.c
102062+++ b/net/netfilter/nf_conntrack_netlink.c
102063@@ -706,7 +706,7 @@ ctnetlink_parse_tuple_proto(struct nlattr *attr,
102064 static int
102065 ctnetlink_parse_tuple(const struct nlattr * const cda[],
102066 struct nf_conntrack_tuple *tuple,
102067- enum ctattr_tuple type, u_int8_t l3num)
102068+ enum ctattr_type type, u_int8_t l3num)
102069 {
102070 struct nlattr *tb[CTA_TUPLE_MAX+1];
102071 int err;
102072diff --git a/net/netfilter/nfnetlink_log.c b/net/netfilter/nfnetlink_log.c
102073index f900dc3..5e45346 100644
102074--- a/net/netfilter/nfnetlink_log.c
102075+++ b/net/netfilter/nfnetlink_log.c
102076@@ -68,7 +68,7 @@ struct nfulnl_instance {
102077 };
102078
102079 static DEFINE_RWLOCK(instances_lock);
102080-static atomic_t global_seq;
102081+static atomic_unchecked_t global_seq;
102082
102083 #define INSTANCE_BUCKETS 16
102084 static struct hlist_head instance_table[INSTANCE_BUCKETS];
102085@@ -493,7 +493,7 @@ __build_packet_message(struct nfulnl_instance *inst,
102086 /* global sequence number */
102087 if (inst->flags & NFULNL_CFG_F_SEQ_GLOBAL)
102088 NLA_PUT_BE32(inst->skb, NFULA_SEQ_GLOBAL,
102089- htonl(atomic_inc_return(&global_seq)));
102090+ htonl(atomic_inc_return_unchecked(&global_seq)));
102091
102092 if (data_len) {
102093 struct nlattr *nla;
102094diff --git a/net/netfilter/xt_gradm.c b/net/netfilter/xt_gradm.c
102095new file mode 100644
102096index 0000000..b1bac76
102097--- /dev/null
102098+++ b/net/netfilter/xt_gradm.c
102099@@ -0,0 +1,51 @@
102100+/*
102101+ * gradm match for netfilter
102102